content
stringlengths 5
1.05M
|
|---|
'''test pysftp.Connection.readlink - uses py.test'''
from __future__ import print_function
from common import SKIP_IF_CI
from io import BytesIO
@SKIP_IF_CI
def test_readlink(lsftp):
'''test the readlink method'''
rfile = 'readme.txt'
rlink = 'readme.sym'
buf = b'I will not buy this record, it is scratched\nMy hovercraft'\
b' is full of eels.'
flo = BytesIO(buf)
print(lsftp.listdir())
lsftp.putfo(flo, rfile)
lsftp.symlink(rfile, rlink)
assert lsftp.readlink(rlink) == '/home/test/readme.txt'
lsftp.remove(rlink)
lsftp.remove(rfile)
|
import numpy as np
# sigmoid function
def nonlin(x):
return 1 / (1 + np.exp(-x))
def get_rand_weights(num_inputs, num_outputs, hidden_layer_nodes):
# Generate weights with value between -1 to 1 so that mean is overall 0
weights_1 = 2 * np.random.random((num_inputs, hidden_layer_nodes)) - 1
weights_2 = 2 * np.random.random((hidden_layer_nodes, num_outputs)) - 1
return weights_1, weights_2
def forward_prop(inputs, layer1_weights, layer2_weights):
layer1 = nonlin(np.dot(inputs, layer1_weights))
layer2 = nonlin(np.dot(layer1, layer2_weights))
return layer2
def flatten(layer1_weights, layer2_weights):
weights = []
# flatten weight arrays so it can be stored in the individual
flat1 = layer1_weights.flatten()
flat2 = layer2_weights.flatten()
for x in flat1:
weights.append(x)
for x in flat2:
weights.append(x)
return weights
def un_flatten(num_inputs, num_hidden, num_outputs, weights):
# find split point of flat_weights
num_first_layer_weights = num_inputs * num_hidden
layer1_w = np.reshape(weights[:num_first_layer_weights], (num_inputs, num_hidden))
layer2_w = np.reshape(weights[num_first_layer_weights:], (num_hidden, num_outputs))
return layer1_w, layer2_w
|
#!/usr/bin/python
import collections
import fnmatch
import os
import re
import subprocess
import sys
import time
import traceback
from pytools.common.common import *
def remove_repeated_spaces(line):
return " ".join(line.split())
def chunk_list(array_to_chunk, chunk_size):
"""Yield successive n-sized chunks from l.
Divides an array into a number of equally sized chunks"""
for i in xrange(0, len(array_to_chunk), chunk_size):
yield array_to_chunk[i:i+chunk_size]
def chunk_list_by_nsets(array_to_chunk, number_of_sublists):
chunk_size = len(array_to_chunk) / number_of_sublists
if len(array_to_chunk) % number_of_sublists > 0:
chunk_size += 1
chunk_size = max(1, chunk_size)
for i in xrange(0, len(array_to_chunk), chunk_size):
yield array_to_chunk[i:i+chunk_size]
def get_folders_matching_mask(where, mask):
matching_folders = []
# print where, mask
for folder_entry in os.listdir(where):
if fnmatch.fnmatch(folder_entry, mask):
if os.path.isdir(os.path.join(where, folder_entry)):
matching_folders.append(folder_entry)
return matching_folders
def get_files_matching_mask(where, mask):
matching_files = []
# print where, mask
for folder_entry in os.listdir(where):
if fnmatch.fnmatch(folder_entry, mask):
if os.path.isfile(os.path.join(where, folder_entry)):
matching_files.append(folder_entry)
return matching_files
def find_files_matching_mask(where, mask):
p = subprocess.Popen(["find", where, "-name", mask], stdout=subprocess.PIPE)
out, _ = p.communicate()
for line in out.readlines():
print line
def yeld_iterative_folders(folder_entry, index_start=1, index_end=0, replace_pattern=""):
''' Iterator over folders that contain an index number. For example:
fldr_1_test, fldr_2_test, fldr_3_test. etc.
the folder entry can be in a form "fldr_{}_test" or "fldr_REPLACE_PATTERN_test"
If no replace pattern is specified or not curly brackets in the folder_entry then
the input foder_entry is returned as the only folder if exists.
'''
index = index_start
while True:
if len(replace_pattern):
fldr_name = folder_entry.replace(replace_pattern, index)
elif "{" in folder_entry:
fldr_name = folder_entry.format(index)
else:
# Nothing to iterate over, try to return the input.
fldr_name = folder_entry
index_end = 1
if os.path.isdir(fldr_name):
yield fldr_name
else:
break
index += 1
if index_end > 0 and index > index_end:
break
def wait_with_message(message, seconds):
_now = time.time()
_end = _now + seconds
while _end > _now:
sys.stdout.write("{0} ({1} seconds) \r".format(message, int(_end - _now)))
sys.stdout.flush()
time.sleep(1)
_now = time.time()
print ""
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes":True, "y":True, "ye":True,
"no":False, "n":False}
if default == None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "\
"(or 'y' or 'n').\n")
def get_duplicate_values_in_list(l):
return [item for item, count in collections.Counter(l).items() if count > 1]
def merge_dicts(dst_dic, src_dic, overwrite=True):
""" Takes src_dic and adds everything into dst_dict. Can either add new elements only
or overwrite old ones"""
if not src_dic:
return dst_dic
for k, v in src_dic.iteritems():
# print k, isinstance(dst_dic[k], dict), isinstance(v, dict)
if k in dst_dic and isinstance(dst_dic[k], dict) and isinstance(v, dict):
merge_dicts(dst_dic[k], v)
elif k in dst_dic and overwrite:
dst_dic[k] = v
continue
if k not in dst_dic:
dst_dic[k] = v
return dst_dic
def is_strin_anIP(s):
a = s.split('.')
if len(a) != 4:
return False
for x in a:
if not x.isdigit():
return False
i = int(x)
if i < 0 or i > 255:
return False
return True
def print_generic_exception(msg="Generic Exception"):
exec_info = sys.exc_info()
print "%s: %s" % (msg, sys.exc_info())
traceback.print_exception(*exec_info)
def remove_files(flist):
for f in flist:
if os.path.isfile(f):
os.remove(f)
def remove_non_alphanumerics(instr):
regex = re.compile('[^a-zA-Z0-9]')
return regex.sub('', instr)
|
import msvcrt # Windows only
def getch():
if msvcrt.kbhit():
return ord(msvcrt.getch())
return 0
'''
while True: # test
ch = getch()
if ch != b'\xff':
if ch == b'\x1b': # ASCII Esc
print("exit\n")
break
else:
print(ch)
'''
|
n = int(input())
print(n)
for i in range(n):
print(1)
|
import socket
import cv2
import numpy
TCP_IP = 'localhost'
TCP_PORT = 5001
sock = socket.socket()
sock.connect((TCP_IP, TCP_PORT))
frame = cv2.imread('scene.jpeg')
encode_param=[int(cv2.IMWRITE_JPEG_QUALITY),90]
result, imgencode = cv2.imencode('.jpg', frame, encode_param)
data = numpy.array(imgencode)
stringData = data.tostring()
sock.send( str(len(stringData)).ljust(16));
sock.send( stringData );
sock.close()
# decimg=cv2.imdecode(data,1)
# cv2.imshow('CLIENT',decimg)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
|
from abc import ABC, abstractmethod
class VestDevice(ABC):
@abstractmethod
def set_pin(self, index, intensity):
pass
@abstractmethod
def set_pins_batched(self, values = dict):
pass
@abstractmethod
def set_frequency(self, frequency):
pass
@abstractmethod
def mute(self):
pass
class DummyVestDevice(VestDevice):
def set_pin(self, index, intensity):
pass
def set_frequency(self, frequency):
pass
def mute(self):
pass
def set_pins_batched(self, values = dict):
pass
|
# Number of Connected Components in an Undirected Graph: https://leetcode.com/problems/number-of-connected-components-in-an-undirected-graph/
# You have a graph of n nodes. You are given an integer n and an array edges where edges[i] = [ai, bi] indicates that there is an edge between ai and bi in the graph.
# Return the number of connected components in the graph.
from collections import deque
class Solution:
def countComponents(self, n: int, edges: List[List[int]]) -> int:
count = 0
ourGraph = {}
for source, dest in edges:
if source not in ourGraph:
ourGraph[source] = []
ourGraph[source].append(dest)
if dest not in ourGraph:
ourGraph[dest] = []
ourGraph[dest].append(source)
# Simple dfs or bfs probably bfs on account of iterative
seen = set()
q = deque()
for i in range(n):
if i not in seen:
count += 1
if i not in ourGraph:
continue
q.appendleft(i)
while q:
node = q.pop()
if node in seen:
continue
seen.add(node)
for edge in ourGraph[node]:
q.appendleft(edge)
return count
# This turns out to be the optimal solution however I did have one bug where I didn't add any nodes that were on there own like 0 in the example edges = [[2,3],[1,2],[1,3]]
# The time/space complexity is o(E+V) o(E) for building ourGraph and o(E+V) for bfs and o(E+V) as we keep a list of edges and a set of visted verticies
# That being said apparently this has an optimization where we have a disjoint set union although I didn't have a fast enough time to try implementation
# Score Card
# Did I need hints? N (But the second solution did)
# Did you finish within 30 min? Y
# Was the solution optimal? It is optimal enough, technically you can improve from o(E+V ) to o(e and the ackermanfunction(n)) and sapce could be just o(V) instead
# Were there any bugs? I did have one bug where I didn't add any nodes that were on there own like 0 in the example edges = [[2,3],[1,2],[1,3]]
# 4 5 3 3 = 3.75
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.com.h21lab.TS29510_Nnrf_NFDiscovery.handler.base_model_ import Model
from openapi_server.com.h21lab.TS29510_Nnrf_NFDiscovery.handler.ipv6_addr import Ipv6Addr
from openapi_server import util
from openapi_server.com.h21lab.TS29510_Nnrf_NFDiscovery.handler.ipv6_addr import Ipv6Addr # noqa: E501
class N2InterfaceAmfInfo(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, ipv4_endpoint_address=None, ipv6_endpoint_address=None, amf_name=None): # noqa: E501
"""N2InterfaceAmfInfo - a model defined in OpenAPI
:param ipv4_endpoint_address: The ipv4_endpoint_address of this N2InterfaceAmfInfo. # noqa: E501
:type ipv4_endpoint_address: List[str]
:param ipv6_endpoint_address: The ipv6_endpoint_address of this N2InterfaceAmfInfo. # noqa: E501
:type ipv6_endpoint_address: List[Ipv6Addr]
:param amf_name: The amf_name of this N2InterfaceAmfInfo. # noqa: E501
:type amf_name: str
"""
self.openapi_types = {
'ipv4_endpoint_address': List[str],
'ipv6_endpoint_address': List[Ipv6Addr],
'amf_name': str
}
self.attribute_map = {
'ipv4_endpoint_address': 'ipv4EndpointAddress',
'ipv6_endpoint_address': 'ipv6EndpointAddress',
'amf_name': 'amfName'
}
self._ipv4_endpoint_address = ipv4_endpoint_address
self._ipv6_endpoint_address = ipv6_endpoint_address
self._amf_name = amf_name
@classmethod
def from_dict(cls, dikt) -> 'N2InterfaceAmfInfo':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The N2InterfaceAmfInfo of this N2InterfaceAmfInfo. # noqa: E501
:rtype: N2InterfaceAmfInfo
"""
return util.deserialize_model(dikt, cls)
@property
def ipv4_endpoint_address(self):
"""Gets the ipv4_endpoint_address of this N2InterfaceAmfInfo.
:return: The ipv4_endpoint_address of this N2InterfaceAmfInfo.
:rtype: List[str]
"""
return self._ipv4_endpoint_address
@ipv4_endpoint_address.setter
def ipv4_endpoint_address(self, ipv4_endpoint_address):
"""Sets the ipv4_endpoint_address of this N2InterfaceAmfInfo.
:param ipv4_endpoint_address: The ipv4_endpoint_address of this N2InterfaceAmfInfo.
:type ipv4_endpoint_address: List[str]
"""
self._ipv4_endpoint_address = ipv4_endpoint_address
@property
def ipv6_endpoint_address(self):
"""Gets the ipv6_endpoint_address of this N2InterfaceAmfInfo.
:return: The ipv6_endpoint_address of this N2InterfaceAmfInfo.
:rtype: List[Ipv6Addr]
"""
return self._ipv6_endpoint_address
@ipv6_endpoint_address.setter
def ipv6_endpoint_address(self, ipv6_endpoint_address):
"""Sets the ipv6_endpoint_address of this N2InterfaceAmfInfo.
:param ipv6_endpoint_address: The ipv6_endpoint_address of this N2InterfaceAmfInfo.
:type ipv6_endpoint_address: List[Ipv6Addr]
"""
self._ipv6_endpoint_address = ipv6_endpoint_address
@property
def amf_name(self):
"""Gets the amf_name of this N2InterfaceAmfInfo.
:return: The amf_name of this N2InterfaceAmfInfo.
:rtype: str
"""
return self._amf_name
@amf_name.setter
def amf_name(self, amf_name):
"""Sets the amf_name of this N2InterfaceAmfInfo.
:param amf_name: The amf_name of this N2InterfaceAmfInfo.
:type amf_name: str
"""
self._amf_name = amf_name
|
#!/usr/bin/env python3
""" This script creates histogram of which txout types are present in a snapshot.
Namely, this script creates two CSV files in the given --target-folder:
1. Histogram CSV file containing the counts for each txout type that occurred.
2. Another CSV file, which contains the output scripts of all txouts classified as "others". """
import argparse
import glob
import progressbar
from parse_chunk_file import parse_chunk_file
from lib import utxo as utxo_handler
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('folder', type=str, help='Folder holding all snapshot chunks')
argparser.add_argument('snapshot_height', type=int, help='Block height of the snapshot to extract')
argparser.add_argument('--target-folder', type=str, help='Target folder for output', default='.')
argparser.add_argument('--target-prefix', type=str, help='Prefix of output file', default='utxo_hist_')
argparser.add_argument('--obfuscated-snapshot', action='store_true', help='Use if you are analysing an obfuscated snapshot')
args = argparser.parse_args()
f_histogram = open(f'{args.target_folder}/{args.target_prefix}{args.snapshot_height:010d}_histogram.csv', 'w')
f_other = open(f'{args.target_folder}/{args.target_prefix}{args.snapshot_height:010d}_others.csv', 'w')
utxo_handler.print_utxo_histogram_header(f_histogram)
utxo_handler.print_utxo_other_header(f_other)
filenames = glob.glob(f'{args.folder}/chunks/{args.snapshot_height:010d}_**.chunk')
bar = progressbar.ProgressBar(max_value=len(filenames), redirect_stdout=True)
for i, chunk_filename in enumerate(sorted(filenames)):
chunk_height, chunk_offset, chunk_hash, utxos = parse_chunk_file(chunk_filename, is_obfuscated_snapshot=args.obfuscated_snapshot)
histogram, other = utxo_handler.get_utxo_histogram(utxos, is_obfuscated_snapshot=args.obfuscated_snapshot)
utxo_handler.print_utxo_histogram(histogram, chunk_height, chunk_offset, f_histogram, machine=True)
utxo_handler.print_other_utxos(other, chunk_height, chunk_offset, f_other, machine=True)
bar.update(i)
f_histogram.close()
f_other.close()
|
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dirichlet."""
import collections
import itertools
from absl.testing import parameterized
import tensorflow as tf
from generalization.synthesization import dirichlet
class DirichletTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
(f'num_clients={num_clients}, rotate={rotate}', num_clients, rotate)
for num_clients, rotate in itertools.product([1, 2, 3], [True, False]))
def test_synthesize_by_dirichlet_over_labels(self, num_clients, rotate):
test_dataset = tf.data.Dataset.from_tensor_slices(
collections.OrderedDict(
x=list(range(9)), foo=['a', 'b', 'c'] * 3, label=[0, 1, 6] * 3))
cd = dirichlet.synthesize_by_dirichlet_over_labels(
dataset=test_dataset, num_clients=num_clients, use_rotate_draw=rotate)
self.assertCountEqual(cd.client_ids, map(str, range(num_clients)))
expected_num_elements_per_client = (9 // num_clients)
for client_id in cd.client_ids:
local_ds = cd.create_tf_dataset_for_client(client_id)
self.assertLen(list(local_ds), expected_num_elements_per_client)
if __name__ == '__main__':
tf.test.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# hsh
# Copyright 2016 Christopher Simpkins
# MIT license
# ------------------------------------------------------------------------------
import sys
from commandlines import Command
from hsh.library.hash import Hasher, HashChecker, file_exists
from hsh.settings import help as hsh_help
from hsh.settings import usage as hsh_usage
from hsh.settings import app_name, major_version, minor_version, patch_version
def main():
c = Command()
if c.does_not_validate_missing_args():
print(hsh_usage)
sys.exit(1)
if c.is_help_request(): # User requested hsh help information
print(hsh_help)
sys.exit(0)
elif c.is_usage_request(): # User requested hsh usage information
print(hsh_usage)
sys.exit(0)
elif c.is_version_request(): # User requested hsh version information
version_display_string = app_name + ' ' + major_version + '.' + minor_version + '.' + patch_version
print(version_display_string)
sys.exit(0)
primary_command = c.subcmd.lower() # make the subcommand case-independent
if primary_command == "sha1":
if c.argc > 1:
file_list = c.argv[1:]
for file in file_list:
if file_exists(file):
hasher = Hasher()
sha_hash = hasher.sha1(file)
print("SHA1 (" + file + ") :")
print(sha_hash)
else:
sys.stderr.write(file + " does not appear to be an existing file path.\n")
else:
sys.stderr.write("You did not include a file in your command. Please try again.\n")
sys.exit(1)
elif primary_command == "sha224":
if c.argc > 1:
file_list = c.argv[1:]
for file in file_list:
if file_exists(file):
hasher = Hasher()
sha_hash = hasher.sha224(file)
print("SHA224 (" + file + ") :")
print(sha_hash)
else:
sys.stderr.write(file + " does not appear to be an existing file path.\n")
else:
sys.stderr.write("You did not include a file in your command. Please try again.\n")
sys.exit(1)
elif primary_command == "sha256":
if c.argc > 1:
file_list = c.argv[1:]
for file in file_list:
if file_exists(file):
hasher = Hasher()
sha_hash = hasher.sha256(file)
print("SHA256 (" + file + ") :")
print(sha_hash)
else:
sys.stderr.write(file + " does not appear to be an existing file path.\n")
else:
sys.stderr.write("You did not include a file in your command. Please try again.\n")
sys.exit(1)
elif primary_command == "sha384":
if c.argc > 1:
file_list = c.argv[1:]
for file in file_list:
if file_exists(file):
hasher = Hasher()
sha_hash = hasher.sha384(file)
print("SHA384 (" + file + ") :")
print(sha_hash)
else:
sys.stderr.write(file + " does not appear to be an existing file path.\n")
else:
sys.stderr.write("You did not include a file in your command. Please try again.\n")
sys.exit(1)
elif primary_command == "sha512":
if c.argc > 1:
file_list = c.argv[1:]
for file in file_list:
if file_exists(file):
hasher = Hasher()
sha_hash = hasher.sha512(file)
print("SHA512 (" + file + ") :")
print(sha_hash)
else:
sys.stderr.write(file + " does not appear to be an existing file path.\n")
else:
sys.stderr.write("You did not include a file in your command. Please try again.\n")
sys.exit(1)
elif primary_command == "md5":
if c.argc > 1:
file_list = c.argv[1:]
for file in file_list:
if file_exists(file):
hasher = Hasher()
sha_hash = hasher.md5(file)
print("MD5 (" + file + ") :")
print(sha_hash)
else:
sys.stderr.write(file + " does not appear to be an existing file path.\n")
else:
sys.stderr.write("You did not include a file in your command. Please try again.\n")
sys.exit(1)
elif primary_command == "check":
if c.argc == 3: # primary command + 2 arguments
hc = HashChecker()
hc.compare(c.argv[1:]) # pass the argument list excluding the primary command
elif c.argc < 3:
sys.stderr.write("You did not include a file or hash digest for comparison. Please try again.\n")
sys.exit(1)
elif c.argc > 3:
sys.stderr.write("Too many arguments. Please include two arguments for comparison.\n")
sys.exit(1)
elif c.argc == 1: # single file hash digest request with default SHA256 settings
file = c.arg0
if file_exists(file):
hasher = Hasher()
sha_hash = hasher.sha256(file)
print("SHA256 (" + file + ") :")
print(sha_hash)
else:
sys.stderr.write(c.arg0 + " does not appear to be an existing file path. Please try again.\n")
sys.exit(1)
elif c.argc == 2: # exactly two arguments, perform comparison between them by default
hc = HashChecker()
hc.compare(c.argv) # pass the entire argument list because there is no primary command
else:
print("Could not complete the command that you entered. Please try again.")
sys.exit(1)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
import os
from sys import platform
def Lynx(busqueda):
buscador = f'lynx -source {busqueda} > prueba.html'
os.system(buscador)
buscador = f'lynx {busqueda}'
os.system(buscador)
def Mensaje():
print('Tipos de Busquedas permitidas:')
print('1. Busqueda simple')
print('2. Busqueda por "site:"')
print('3. Busqueda por "inurl:"')
print('4. Busqueda por "intitle:"')
print('5. Busqueda por "intext:')
print('6. Busqueda por "filetype:"')
def Query():
Mensaje()
respuesta = input("Ingrese opcion: ").lower()
if respuesta == '1':
termino = input("Ingrese el termino: ").replace(" ","+")
busqueda = f'http://www.google.com/search?q="{termino}"'
Lynx(busqueda)
elif respuesta == '2':
termino = input("Ingrese el termino: ").replace(" ","+")
parametro = input("Ingrese sitio: ")
dork = f'site:{parametro}+'
busqueda =f'http://www.google.com/search?q={dork}"{termino}"'
Lynx(busqueda)
elif respuesta == '3':
termino = input("Ingrese el termino: ").replace(" ","+")
busqueda =f'http://www.google.com/search?q=inurl:"{termino}"'
Lynx(busqueda)
elif respuesta == '4':
termino = input("Ingrese el termino: ").replace(" ","+")
parametro = input("Ingrese el titulo: ")
dork = f'intitle:"{parametro}"+'
busqueda =f'http://www.google.com/search?q={dork}"{termino}"'
Lynx(busqueda)
elif respuesta == '5':
termino = input("Ingrese el termino: ").replace(" ","+")
busqueda =f'http://www.google.com/search?q=intext:"{termino}"'
Lynx(busqueda)
elif respuesta == '6':
termino = input("Ingrese el termino: ").replace(" ","+")
parametro = input("Ingrese extencion de archivo: ")
dork = f'filetype:{parametro}+'
busqueda =f'http://www.google.com/search?q={dork}"{termino}"'
Lynx(busqueda)
else:
print('Elemento incorrecto')
def Inicio():
print('Gestor de Consultas de Google')
print('Adertencia este programa hace uso del navegador de texto "LYNX" ')
print('para ver y guardar las consultas hechas')
continuar = input('Desea continuar con el programa (N/n para salir, cualquier tecla para continuar): ').lower()
while continuar != 'n':
Query()
continuar = input('Desea continuar con el programa (N/n para salir, cualquier tecla para continuar): ').lower()
print('Adios')
Inicio()
|
import itertools
import numpy
from ..cg import voronoi_frames
from ..io.fileio import FileIO
from ._contW_lists import ContiguityWeightsLists
from .util import get_ids, get_points_array
from .weights import WSP, W
from .raster import da2W, da2WSP
try:
from shapely.geometry import Point as shapely_point
from ..cg.shapes import Point as pysal_point
point_type = (shapely_point, pysal_point)
except ImportError:
from ..cg.shapes import Point as point_type
WT_TYPE = {"rook": 2, "queen": 1} # for _contW_Binning
__author__ = "Sergio J. Rey <srey@asu.edu> , Levi John Wolf <levi.john.wolf@gmail.com>"
__all__ = ["Rook", "Queen", "Voronoi"]
class Rook(W):
"""
Construct a weights object from a collection of pysal polygons that share at least one edge.
Parameters
----------
polygons : list
a collection of PySAL shapes to build weights from
ids : list
a list of names to use to build the weights
**kw : keyword arguments
optional arguments for :class:`pysal.weights.W`
See Also
---------
:class:`libpysal.weights.weights.W`
"""
def __init__(self, polygons, **kw):
criterion = "rook"
ids = kw.pop("ids", None)
polygons, backup = itertools.tee(polygons)
first_shape = next(iter(backup))
if isinstance(first_shape, point_type):
polygons, vertices = voronoi_frames(get_points_array(polygons))
polygons = list(polygons.geometry)
neighbors, ids = _build(polygons, criterion=criterion, ids=ids)
W.__init__(self, neighbors, ids=ids, **kw)
@classmethod
def from_shapefile(cls, filepath, idVariable=None, full=False, **kwargs):
"""
Rook contiguity weights from a polygon shapefile.
Parameters
----------
shapefile : string
name of polygon shapefile including suffix.
sparse : boolean
If True return WSP instance
If False return W instance
Returns
-------
w : W
instance of spatial weights
Examples
--------
>>> from libpysal.weights import Rook
>>> import libpysal
>>> wr=Rook.from_shapefile(libpysal.examples.get_path("columbus.shp"), "POLYID")
>>> "%.3f"%wr.pct_nonzero
'8.330'
>>> wr=Rook.from_shapefile(libpysal.examples.get_path("columbus.shp"), sparse=True)
>>> pct_sp = wr.sparse.nnz *1. / wr.n**2
>>> "%.3f"%pct_sp
'0.083'
Notes
-----
Rook contiguity defines as neighbors any pair of polygons that share a
common edge in their polygon definitions.
See Also
--------
:class:`libpysal.weights.weights.W`
:class:`libpysal.weights.contiguity.Rook`
"""
sparse = kwargs.pop("sparse", False)
if idVariable is not None:
ids = get_ids(filepath, idVariable)
else:
ids = None
w = cls(FileIO(filepath), ids=ids, **kwargs)
w.set_shapefile(filepath, idVariable=idVariable, full=full)
if sparse:
w = w.to_WSP()
return w
@classmethod
def from_iterable(cls, iterable, sparse=False, **kwargs):
"""
Construct a weights object from a collection of arbitrary polygons. This
will cast the polygons to PySAL polygons, then build the W.
Parameters
----------
iterable : iterable
a collection of of shapes to be cast to PySAL shapes. Must
support iteration. Can be either Shapely or PySAL shapes.
**kw : keyword arguments
optional arguments for :class:`pysal.weights.W`
See Also
--------
:class:`libpysal.weights.weights.W`
:class:`libpysal.weights.contiguity.Rook`
"""
new_iterable = iter(iterable)
w = cls(new_iterable, **kwargs)
if sparse:
w = WSP.from_W(w)
return w
@classmethod
def from_dataframe(
cls, df, geom_col=None, idVariable=None, ids=None, id_order=None, **kwargs
):
"""
Construct a weights object from a pandas dataframe with a geometry
column. This will cast the polygons to PySAL polygons, then build the W
using ids from the dataframe.
Parameters
----------
df : DataFrame
a :class: `pandas.DataFrame` containing geometries to use
for spatial weights
geom_col : string
the name of the column in `df` that contains the
geometries. Defaults to active geometry column.
idVariable : string
the name of the column to use as IDs. If nothing is
provided, the dataframe index is used
ids : list
a list of ids to use to index the spatial weights object.
Order is not respected from this list.
id_order : list
an ordered list of ids to use to index the spatial weights
object. If used, the resulting weights object will iterate
over results in the order of the names provided in this
argument.
See Also
--------
:class:`libpysal.weights.weights.W`
:class:`libpysal.weights.contiguity.Rook`
"""
if geom_col is None:
geom_col = df.geometry.name
if id_order is not None:
if id_order is True and ((idVariable is not None) or (ids is not None)):
# if idVariable is None, we want ids. Otherwise, we want the
# idVariable column
id_order = list(df.get(idVariable, ids))
else:
id_order = df.get(id_order, ids)
elif idVariable is not None:
ids = df.get(idVariable).tolist()
elif isinstance(ids, str):
ids = df.get(ids).tolist()
return cls.from_iterable(
df[geom_col].tolist(), ids=ids, id_order=id_order, **kwargs
)
@classmethod
def from_xarray(
cls,
da,
z_value=None,
coords_labels={},
k=1,
include_nodata=False,
n_jobs=1,
sparse=True,
**kwargs,
):
"""
Construct a weights object from a xarray.DataArray with an additional
attribute index containing coordinate values of the raster
in the form of Pandas.Index/MultiIndex.
Parameters
----------
da : xarray.DataArray
Input 2D or 3D DataArray with shape=(z, y, x)
z_value : int/string/float
Select the z_value of 3D DataArray with multiple layers.
coords_labels : dictionary
Pass dimension labels for coordinates and layers if they do not
belong to default dimensions, which are (band/time, y/lat, x/lon)
e.g. coords_labels = {"y_label": "latitude", "x_label": "longitude", "z_label": "year"}
Default is {} empty dictionary.
sparse : boolean
type of weight object. Default is True. For libpysal.weights.W, sparse = False
k : int
Order of contiguity, this will select all neighbors upto kth order.
Default is 1.
include_nodata : boolean
If True, missing values will be assumed as non-missing when
selecting higher_order neighbors, Default is False
n_jobs : int
Number of cores to be used in the sparse weight construction. If -1,
all available cores are used. Default is 1.
**kwargs : keyword arguments
optional arguments passed when sparse = False
Returns
-------
w : libpysal.weights.W/libpysal.weights.WSP
instance of spatial weights class W or WSP with an index attribute
Notes
-----
1. Lower order contiguities are also selected.
2. Returned object contains `index` attribute that includes a
`Pandas.MultiIndex` object from the DataArray.
See Also
--------
:class:`libpysal.weights.weights.W`
:class:`libpysal.weights.weights.WSP`
"""
if sparse:
w = da2WSP(da, "rook", z_value, coords_labels, k, include_nodata)
else:
w = da2W(da, "rook", z_value, coords_labels, k, include_nodata, **kwargs)
return w
class Queen(W):
"""
Construct a weights object from a collection of pysal polygons that share at least one vertex.
Parameters
----------
polygons : list
a collection of PySAL shapes to build weights from
ids : list
a list of names to use to build the weights
**kw : keyword arguments
optional arguments for :class:`pysal.weights.W`
See Also
--------
:class:`libpysal.weights.weights.W`
"""
def __init__(self, polygons, **kw):
criterion = "queen"
ids = kw.pop("ids", None)
polygons, backup = itertools.tee(polygons)
first_shape = next(iter(backup))
if isinstance(first_shape, point_type):
polygons, vertices = voronoi_frames(get_points_array(polygons))
polygons = list(polygons.geometry)
neighbors, ids = _build(polygons, criterion=criterion, ids=ids)
W.__init__(self, neighbors, ids=ids, **kw)
@classmethod
def from_shapefile(cls, filepath, idVariable=None, full=False, **kwargs):
"""
Queen contiguity weights from a polygon shapefile.
Parameters
----------
shapefile : string
name of polygon shapefile including suffix.
idVariable : string
name of a column in the shapefile's DBF to use for ids.
sparse : boolean
If True return WSP instance
If False return W instance
Returns
-------
w : W
instance of spatial weights
Examples
--------
>>> from libpysal.weights import Queen
>>> import libpysal
>>> wq=Queen.from_shapefile(libpysal.examples.get_path("columbus.shp"))
>>> "%.3f"%wq.pct_nonzero
'9.829'
>>> wq=Queen.from_shapefile(libpysal.examples.get_path("columbus.shp"),"POLYID")
>>> "%.3f"%wq.pct_nonzero
'9.829'
>>> wq=Queen.from_shapefile(libpysal.examples.get_path("columbus.shp"), sparse=True)
>>> pct_sp = wq.sparse.nnz *1. / wq.n**2
>>> "%.3f"%pct_sp
'0.098'
Notes
Queen contiguity defines as neighbors any pair of polygons that share at
least one vertex in their polygon definitions.
See Also
--------
:class:`libpysal.weights.weights.W`
:class:`libpysal.weights.contiguity.Queen`
"""
sparse = kwargs.pop("sparse", False)
if idVariable is not None:
ids = get_ids(filepath, idVariable)
else:
ids = None
w = cls(FileIO(filepath), ids=ids, **kwargs)
w.set_shapefile(filepath, idVariable=idVariable, full=full)
if sparse:
w = w.to_WSP()
return w
@classmethod
def from_iterable(cls, iterable, sparse=False, **kwargs):
"""
Construct a weights object from a collection of arbitrary polygons. This
will cast the polygons to PySAL polygons, then build the W.
Parameters
----------
iterable : iterable
a collection of of shapes to be cast to PySAL shapes. Must
support iteration. Contents may either be a shapely or PySAL shape.
**kw : keyword arguments
optional arguments for :class:`pysal.weights.W`
See Also
---------
:class:`libpysal.weights.weights.W`
:class:`libpysal.weights.contiguiyt.Queen`
"""
new_iterable = iter(iterable)
w = cls(new_iterable, **kwargs)
if sparse:
w = WSP.from_W(w)
return w
@classmethod
def from_dataframe(cls, df, geom_col=None, **kwargs):
"""
Construct a weights object from a pandas dataframe with a geometry
column. This will cast the polygons to PySAL polygons, then build the W
using ids from the dataframe.
Parameters
----------
df : DataFrame
a :class: `pandas.DataFrame` containing geometries to use
for spatial weights
geom_col : string
the name of the column in `df` that contains the
geometries. Defaults to active geometry column
idVariable : string
the name of the column to use as IDs. If nothing is
provided, the dataframe index is used
ids : list
a list of ids to use to index the spatial weights object.
Order is not respected from this list.
id_order : list
an ordered list of ids to use to index the spatial weights
object. If used, the resulting weights object will iterate
over results in the order of the names provided in this
argument.
See Also
--------
:class:`libpysal.weights.weights.W`
:class:`libpysal.weights.contiguity.Queen`
"""
idVariable = kwargs.pop("idVariable", None)
ids = kwargs.pop("ids", None)
id_order = kwargs.pop("id_order", None)
if geom_col is None:
geom_col = df.geometry.name
if id_order is not None:
if id_order is True and ((idVariable is not None) or (ids is not None)):
# if idVariable is None, we want ids. Otherwise, we want the
# idVariable column
ids = list(df.get(idVariable, ids))
id_order = ids
elif isinstance(id_order, str):
ids = df.get(id_order, ids)
id_order = ids
elif idVariable is not None:
ids = df.get(idVariable).tolist()
elif isinstance(ids, str):
ids = df.get(ids).tolist()
w = cls.from_iterable(
df[geom_col].tolist(), ids=ids, id_order=id_order, **kwargs
)
return w
@classmethod
def from_xarray(
cls,
da,
z_value=None,
coords_labels={},
k=1,
include_nodata=False,
n_jobs=1,
sparse=True,
**kwargs,
):
"""
Construct a weights object from a xarray.DataArray with an additional
attribute index containing coordinate values of the raster
in the form of Pandas.Index/MultiIndex.
Parameters
----------
da : xarray.DataArray
Input 2D or 3D DataArray with shape=(z, y, x)
z_value : int/string/float
Select the z_value of 3D DataArray with multiple layers.
coords_labels : dictionary
Pass dimension labels for coordinates and layers if they do not
belong to default dimensions, which are (band/time, y/lat, x/lon)
e.g. coords_labels = {"y_label": "latitude", "x_label": "longitude", "z_label": "year"}
Default is {} empty dictionary.
sparse : boolean
type of weight object. Default is True. For libpysal.weights.W, sparse = False
k : int
Order of contiguity, this will select all neighbors upto kth order.
Default is 1.
include_nodata : boolean
If True, missing values will be assumed as non-missing when
selecting higher_order neighbors, Default is False
n_jobs : int
Number of cores to be used in the sparse weight construction. If -1,
all available cores are used. Default is 1.
**kwargs : keyword arguments
optional arguments passed when sparse = False
Returns
-------
w : libpysal.weights.W/libpysal.weights.WSP
instance of spatial weights class W or WSP with an index attribute
Notes
-----
1. Lower order contiguities are also selected.
2. Returned object contains `index` attribute that includes a
`Pandas.MultiIndex` object from the DataArray.
See Also
--------
:class:`libpysal.weights.weights.W`
:class:`libpysal.weights.weights.WSP`
"""
if sparse:
w = da2WSP(da, "queen", z_value, coords_labels, k, include_nodata)
else:
w = da2W(da, "queen", z_value, coords_labels, k, include_nodata, **kwargs)
return w
def Voronoi(points, criterion="rook", clip="ahull", **kwargs):
"""
Voronoi weights for a 2-d point set
Points are Voronoi neighbors if their polygons share an edge or vertex.
Parameters
----------
points : array
(n,2)
coordinates for point locations
kwargs : arguments to pass to Rook, the underlying contiguity class.
Returns
-------
w : W
instance of spatial weights
Examples
--------
>>> import numpy as np
>>> from libpysal.weights import Voronoi
>>> np.random.seed(12345)
>>> points= np.random.random((5,2))*10 + 10
>>> w = Voronoi(points)
>>> w.neighbors
{0: [2, 3, 4], 1: [2], 2: [0, 1, 4], 3: [0, 4], 4: [0, 2, 3]}
"""
from ..cg.voronoi import voronoi_frames
region_df, _ = voronoi_frames(points, clip=clip)
if criterion.lower() == "queen":
cls = Queen
elif criterion.lower() == "rook":
cls = Rook
else:
raise ValueError(
"Contiguity criterion {} not supported. "
'Only "rook" and "queen" are supported.'.format(criterion)
)
return cls.from_dataframe(region_df, **kwargs)
def _from_dataframe(df, **kwargs):
"""
Construct a voronoi contiguity weight directly from a dataframe.
Note that if criterion='rook', this is identical to the delaunay
graph for the points.
If the input dataframe is of any other geometry type than "Point",
a value error is raised.
Arguments
---------
df : pandas.DataFrame
dataframe containing point geometries for a
voronoi diagram.
Returns
-------
w : W
instance of spatial weights.
"""
try:
x, y = df.geometry.x.values, df.geometry.y.values
except ValueError:
raise NotImplementedError(
"Voronoi weights are only"
" implemented for point geometries. "
"You may consider using df.centroid."
)
coords = numpy.column_stack((x, y))
return Voronoi(coords, **kwargs)
Voronoi.from_dataframe = _from_dataframe
def _build(polygons, criterion="rook", ids=None):
"""
This is a developer-facing function to construct a spatial weights object.
Parameters
---------
polygons : list
list of pysal polygons to use to build contiguity
criterion : string
option of which kind of contiguity to build. Is either "rook" or "queen"
ids : list
list of ids to use to index the neighbor dictionary
Returns
-------
tuple containing (neighbors, ids), where neighbors is a dictionary
describing contiguity relations and ids is the list of ids used to index
that dictionary.
NOTE: this is different from the prior behavior of buildContiguity, which
returned an actual weights object. Since this just dispatches for the
classes above, this returns the raw ingredients for a spatial weights
object, not the object itself.
"""
if ids and len(ids) != len(set(ids)):
raise ValueError(
"The argument to the ids parameter contains duplicate entries."
)
wttype = WT_TYPE[criterion.lower()]
geo = polygons
if issubclass(type(geo), FileIO):
geo.seek(0) # Make sure we read from the beginning of the file.
neighbor_data = ContiguityWeightsLists(polygons, wttype=wttype).w
neighbors = {}
# weights={}
if ids:
for key in neighbor_data:
ida = ids[key]
if ida not in neighbors:
neighbors[ida] = set()
neighbors[ida].update([ids[x] for x in neighbor_data[key]])
for key in neighbors:
neighbors[key] = set(neighbors[key])
else:
for key in neighbor_data:
neighbors[key] = set(neighbor_data[key])
return (
dict(
list(zip(list(neighbors.keys()), list(map(list, list(neighbors.values())))))
),
ids,
)
def buildContiguity(polygons, criterion="rook", ids=None):
"""
This is a deprecated function.
It builds a contiguity W from the polygons provided. As such, it is now
identical to calling the class constructors for Rook or Queen.
"""
# Warn('This function is deprecated. Please use the Rook or Queen classes',
# UserWarning)
if criterion.lower() == "rook":
return Rook(polygons, ids=ids)
elif criterion.lower() == "queen":
return Queen(polygons, ids=ids)
else:
raise Exception('Weights criterion "{}" was not found.'.format(criterion))
|
from datetime import date, timedelta
import json
from django.conf import settings
from mock import patch
from nose.tools import eq_
from lib.buyers.models import Buyer, BuyerPaypal
from solitude.base import APITest
@patch('lib.paypal.client.Client.get_preapproval_key')
@patch.object(settings, 'PAYPAL_USE_SANDBOX', True)
class TestPreapprovalPaypal(APITest):
def setUp(self):
self.api_name = 'paypal'
self.uuid = 'sample:uid'
self.list_url = self.get_list_url('preapproval')
self.buyer = Buyer.objects.create(uuid=self.uuid)
def get_data(self):
return {'start': date.today().strftime('%Y-%m-%d'),
'end': (date.today() +
timedelta(days=30)).strftime('%Y-%m-%d'),
'return_url': 'http://foo.com/return.url',
'cancel_url': 'http://foo.com/cancel.url',
'uuid': self.uuid}
def test_post(self, key):
key.return_value = {'key': 'foo'}
res = self.client.post(self.list_url, data=self.get_data())
eq_(res.status_code, 201, res.content)
# Note: the key needs to be disclosed here so it can be passed
# on to client to ask PayPal. This is the only time it should
# be disclosed however.
data = json.loads(res.content)
eq_(data['key'], 'foo')
def test_post_empty(self, key):
res = self.client.post(self.list_url, data={})
eq_(res.status_code, 400)
data = json.loads(res.content)
for k in ['start', 'end', 'return_url', 'cancel_url']:
eq_(data[k], [u'This field is required.'])
def test_post_not_date(self, key):
data = self.get_data()
data['start'] = '2012'
res = self.client.post(self.list_url, data=data)
eq_(res.status_code, 400)
eq_(json.loads(res.content)['start'], [u'Enter a valid date.'])
def test_post_not_url(self, key):
data = self.get_data()
data['return_url'] = 'blargh'
res = self.client.post(self.list_url, data=data)
eq_(res.status_code, 400)
eq_(json.loads(res.content)['return_url'], [u'Enter a valid URL.'])
def create(self):
res = self.client.post(self.list_url, data=self.get_data())
return json.loads(res.content)['pk']
def test_get(self, key):
key.return_value = {'key': 'foo'}
uuid = self.create()
url = self.get_detail_url('preapproval', uuid)
res = self.client.get(url)
assert 'foo' not in res # Just check we didn't leak the key.
def test_put(self, key):
key.return_value = {'key': 'foo'}
paypal = BuyerPaypal.objects.create(buyer=self.buyer)
eq_(paypal.key, None)
uuid = self.create()
url = self.get_detail_url('preapproval', uuid)
res = self.client.put(url)
eq_(res.status_code, 202)
eq_(BuyerPaypal.objects.get(buyer=self.buyer).key, 'foo')
def test_put_no_buyer(self, key):
key.return_value = {'key': 'foo'}
uuid = self.create()
url = self.get_detail_url('preapproval', uuid)
eq_(BuyerPaypal.objects.count(), 0)
res = self.client.put(url)
eq_(res.status_code, 202)
eq_(BuyerPaypal.objects.all()[0].key, 'foo')
def test_put_partial(self, key):
key.return_value = {'key': 'foo'}
paypal = BuyerPaypal.objects.create(buyer=self.buyer, currency='BRL')
eq_(paypal.key, None)
uuid = self.create()
url = self.get_detail_url('preapproval', uuid)
res = self.client.put(url)
eq_(res.status_code, 202)
eq_(BuyerPaypal.objects.get(buyer=self.buyer).currency, 'BRL')
def test_put_fails(self, key):
url = self.get_detail_url('preapproval', 'asd')
res = self.client.put(url)
eq_(res.status_code, 404, res.content)
def test_put_no_cache(self, key):
key.return_value = {'key': 'foo'}
paypal = BuyerPaypal.objects.create(buyer=self.buyer)
eq_(paypal.key, None)
url = self.get_detail_url('preapproval', '123')
res = self.client.put(url)
eq_(res.status_code, 404)
def test_delete(self, key):
key.return_value = {'key': 'foo'}
BuyerPaypal.objects.create(buyer=self.buyer)
uuid = self.create()
url = self.get_detail_url('preapproval', uuid)
eq_(self.client.delete(url).status_code, 204)
eq_(self.client.put(url).status_code, 404)
|
#!/usr/bin/env python3
import streamlit as st
showWarningOnDirectExecution = False
import pandas as pd
import numpy as np
import gzip
import gensim.downloader as api
from gensim import corpora
from gensim.similarities import WordEmbeddingSimilarityIndex
from gensim.similarities import SparseTermSimilarityMatrix
import os
import itertools
from collections import Counter
import re
import string
from textblob import Word
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.stem.porter import PorterStemmer
from nltk.stem.lancaster import LancasterStemmer
from nltk.tokenize import TweetTokenizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from spellchecker import SpellChecker
@st.cache(suppress_st_warning=True,allow_output_mutation=True)
def importish():
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
importish()
tknzr = TweetTokenizer()
lemmatizer = WordNetLemmatizer()
spell=SpellChecker()
# Creating our tokenizer and lemmatizer
@st.cache(suppress_st_warning=True,allow_output_mutation=True)
def remove_emoji(string):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U00002500-\U00002BEF" # chinese char
u"\U000024C2-\U0001F251"
u"\U0001f926-\U0001f937"
u"\U00010000-\U0010ffff"
u"\u2640-\u2642"
u"\u2600-\u2B55"
u"\u200d"
u"\u23cf"
u"\u23e9"
u"\u231a"
u"\ufe0f" # dingbats
u"\u3030"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', string)
@st.cache(suppress_st_warning=True,allow_output_mutation=True)
def clean_text(text):
# remove numbers
text=remove_emoji(text)
text_nonum = re.sub(r'\d+', '', text)
text_no_misplacedstop = text_nonum.replace('.',' ')
text_no_forslash = text_no_misplacedstop.replace('/',' ')
answer = tknzr.tokenize(text_no_forslash)
SpellChecked=[spell.correction(word) for word in answer]
answer =' '.join(SpellChecked)
# remove punctuations and convert characters to lower case
punc = string.punctuation.replace('.','')
punc = punc.replace('/','')
text_no_punc = "".join([char.lower() for char in answer if char not in punc])
# substitute multiple whitespace with single whitespace
# removes leading and trailing whitespaces
# remove forward slash with space
text_no_doublespace = re.sub('\s+', ' ',text_no_punc).strip()
return text_no_doublespace
stpwrd = nltk.corpus.stopwords.words('english')
stpwrd.extend(string.punctuation)
keepwords="don't,does,no,not,can,should,will,aren't,couldn't,doesn't,isn't,shouldn't,won't,is".split(',')
for word in keepwords:
stpwrd.remove(word)
def lem_data(data):
tknzr = TweetTokenizer()
data = tknzr.tokenize(data)
data = [word for word in data if word not in stpwrd]
data = [lemmatizer.lemmatize(x) for x in data]
return data
@st.cache(allow_output_mutation=True)
def questiontype(df):
YeeeNooo=df[df['question'].str.contains('does|can|will|would',flags=re.IGNORECASE)].index.to_list()
df['qtype']='open-ended'
df.at[YeeeNooo,'qtype']='yes/no'
return df
@st.cache(suppress_st_warning=True,allow_output_mutation=True)
def yes_no(df):
# yes/no helpful replies
Yes_No=df[df['answer'].str.contains('definitely|absolutely|positively|suppose so|believe so|think so',flags=re.IGNORECASE,regex=True)].index.to_list()
Yes=df[df['answer'].str.contains('yes',flags=re.IGNORECASE,regex=False)].index.to_list()
Yep=df[df['answer'].str.contains('yep',flags=re.IGNORECASE,regex=False)].index.to_list()
No=df[df['answer'].str.contains('no',flags=re.IGNORECASE,regex=False)].index.to_list()
Nah=df[df['answer'].str.contains('nah',flags=re.IGNORECASE,regex=False)].index.to_list()
Nope=df[df['answer'].str.contains('nope',flags=re.IGNORECASE,regex=False)].index.to_list()
Not=df[df['answer'].str.contains('not',flags=re.IGNORECASE,regex=False)].index.to_list()
df.at[Yes_No+Yes+No+Not+Yep+Nah+Nope,'Helpful-Definitive']=1
definitively_definitive=df[((df['answerType']=='Y')|(df['answerType']=='N'))&(df['Helpful-Definitive']==0)].index.to_list()
for x in definitively_definitive:
df.at[x,'Helpful-Definitive']=1
return df
@st.cache(suppress_st_warning=True,allow_output_mutation=True)
def specboyQ(df):
# definitively unhelpful replies
idk=df[df['answer'].str.contains("don't know|not sure|do not know|can't help|not arrived|gift",flags=re.IGNORECASE)].index.to_list()
df['Unhelpful']=0
df.at[idk,'Unhelpful']=1
size=df[(df['answer'].str.contains('"|width|height|wide|long|tall|high|inch|measures|inch|metre|meter|feet|cm|centimetre|millimetre|big|small|large|tiny',flags=re.IGNORECASE))&(df['question'].str.contains('size|big|high|wide|diameter|clearance|clearence|dimension|dimention|depth|height|width|high|wide|measure',flags=re.IGNORECASE))].index.to_list()
where=df[(df['answer'].str.contains('under|behind|top|bottom|left|right|side|front|back|over|below|inside|outside',flags=re.IGNORECASE))&(df['question'].str.contains('where',flags=re.IGNORECASE))].index.to_list()
itis=df[(df['answer'].str.contains("its|it's|it is",flags=re.IGNORECASE))&(df['question'].str.contains('is this|is it',flags=re.IGNORECASE))].index.to_list()
how=df[(df['answer'].str.contains("use|using|have to",flags=re.IGNORECASE))&(df['question'].str.contains('how',flags=re.IGNORECASE))].index.to_list()
can=df[(df['answer'].str.contains("can",flags=re.IGNORECASE))&(df['question'].str.contains('can',flags=re.IGNORECASE))].index.to_list()
inclusive=df[(df['answer'].str.contains('came with|comes with|include',flags=re.IGNORECASE))&(df['question'].str.contains('come with|include',flags=re.IGNORECASE))].index.to_list()
QAnswered=inclusive+where+itis+size+how+can
df['Helpful-QAnswered']=0
df.at[QAnswered,'Helpful-QAnswered']=1
return df
@st.cache(suppress_st_warning=True,allow_output_mutation=True)
def slow_funcs():
w2v_model = api.load("glove-wiki-gigaword-50")
similarity_index = WordEmbeddingSimilarityIndex(w2v_model)
return similarity_index
@st.cache(suppress_st_warning=True)
def SCM(q, a):
"""Function that calculates Soft Cosine Similarity between a Question and its Answer
references: https://devopedia.org/question-similarity
https://notebook.community/gojomo/gensim/docs/notebooks/soft_cosine_tutorial
"""
similarity_index = slow_funcs()
q_lem = lem_data(q)
a_lem = lem_data(a)
documents = [q_lem,a_lem]
dictionary = corpora.Dictionary(documents)
# Convert the sentences into bag-of-words vectors.
q_bag = dictionary.doc2bow(q_lem)
a_bag = dictionary.doc2bow(a_lem)
# Prepare the similarity matrix
similarity_matrix = SparseTermSimilarityMatrix(similarity_index, dictionary)
# compute SCM using the inner_product method
similarity = similarity_matrix.inner_product(q_bag, a_bag, normalized=(True, True))
# convert SCM score to percentage
percentage_similarity= round(similarity * 100,2)
cut_off=0.2
if similarity > cut_off:
pred = f'This answer is likely helpful!, Q&A similairty = {percentage_similarity:.1f}%'
else:
pred = f'This answer is probably unhelpful, Q&A similairty = {percentage_similarity:.1f}%'
return pred
def is_useful(q, a, questionType, answerType):
"""Function that evaluates the usefulness of the answer to a question
in the Amazon reviews section"""
d={'question':[q],'answer':[a],'questionType':[questionType],'answerType':[answerType]}
df= pd.DataFrame(data=d)
df['question'] = df['question'].apply(clean_text)
df['answer'] = df['answer'].apply(clean_text)
df=questiontype(df)
df['Helpful-Definitive']=0
df['Unhelpful']=0
df=specboyQ(df)
if (df['questionType'].iloc[0]=='yes/no')|(df['qtype'][0]=='yes/no'):
df=yes_no(df)
if len(df)==df['Unhelpful'].sum():
return 'This answer is unhelpful'
elif len(df)==df['Helpful-QAnswered'].sum()+df['Helpful-Definitive'].sum():
return 'This answer is helpful'
return SCM(q=df['question'].iloc[0],a=df['answer'].iloc[0])
st.title("How useful is the answer?")
st.text("Fire away!")
q = st.text_input("Ask a question", value="", max_chars=None, key=None, type="default", help=None, autocomplete=None, on_change=None, args=None, kwargs=None, placeholder=None)
qtype = st.radio("What type of question is this?",["yes/no","open-ended"])
a = st.text_input("Answer the question", value="", max_chars=None, key=None, type="default", help=None, autocomplete=None, on_change=None, args=None, kwargs=None, placeholder=None)
atype = st.radio("What type of answer is this?",["Y","N","Other"])
if st.button("Am I useful? 🥺", key=None, help=None, on_click=None, args=None, kwargs=None):
answer=is_useful(q,a,qtype,atype)
st.text({answer})
|
import os, csv, platform, argparse
from tqdm import tqdm
from bs4 import BeautifulSoup
from selenium import webdriver
def main(args):
# To do
# make csv file checker
# Check Platform
if platform.system() == 'Windows':
print('Detected OS : Windows')
executable = './webdriver/chromedriver.exe'
elif platform.system() == 'Linux':
print('Detected OS : Linux')
executable = './webdriver/chromedriver_linux'
elif platform.system() == 'Darwin':
print('Detected OS : Mac')
executable = './webdriver/chromedriver_mac'
options = webdriver.ChromeOptions()
options.add_argument('window-size=1920x1080')
options.add_argument("disable-gpu")
options.add_argument("user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36")
options.add_argument("lang=ko_KR") # 한국어!
print("Pega's Post Crawling Start ! ")
mode="headless"
if mode == "headless":
options.add_argument(mode)
driver = webdriver.Chrome(executable, options=options)
driver.implicitly_wait(1.5)
category = f"{args.category}"
tag = f"{args.tag}"
dst = './_posts'
os.makedirs(dst, exist_ok=True)
root = "https://jehyunlee.github.io"
main_page = f"https://jehyunlee.github.io/categories/{category}/{tag}/"
driver.get(main_page)
last_page = driver.find_element_by_xpath('//*[@id="page-nav"]/span[1]').text.split(' ')[-1]
print(f"Number of pages: {last_page}")
num_fieldnames = None
try:
csv_file_read = open('crawler_checker_pega.csv', mode='r')
reader = csv.DictReader(csv_file_read)
num_fieldnames = reader.fieldnames
print("Checker file is already exist!")
except:
print("Checker file is Not exist !")
fieldnames = ['date', 'title', 'link']
csv_file_write = open('crawler_checker_pega.csv', mode='a')
writer = csv.DictWriter(csv_file_write, fieldnames=fieldnames)
title_in_csv = []
if num_fieldnames:
for row in reader:
title_in_csv.append(row["title"])
else:
writer.writeheader()
dates = []
titles = []
links = []
excerpts = []
thumbnails = []
for page_num in tqdm(range(int(last_page), 0, -1)):
if page_num != 1:
driver.get(f"{main_page}/page/{page_num}/")
else:
driver.get(f"{main_page}")
html = driver.page_source
soup = BeautifulSoup(html, "html.parser")
#article article-summary
for i in soup.find_all(class_="article article-summary"):
date = i.find(class_="date").text
dates.append(date)
title_herf = i.find(class_="article-title")
title = title_herf.contents[1].text
titles.append(title)
if title in title_in_csv: continue
print(f"New Post ! ({title})")
link = root+title_herf.contents[1].get("href")
links.append(link)
excerpt = i.find(class_="article-excerpt").text
excerpts.append(excerpt)
thumbnail = root+i.find(class_="thumbnail-image").get("style")[:-1].split("(")[1]
thumbnails.append(thumbnail)
writer.writerow({'date': date, 'title': title, 'link': link})
filename = os.path.join(dst, f"{date}-{link.split('/')[-2]}.md")
full_text = f'---\nlayout: post\ntitle: {title_herf.contents[1].text}\ncategory: Pega\ntag:\n- Data Science\n---\n\n\n\n\n[]({link})'
with open(f"{filename}", "w") as f:
f.write(full_text)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--category", default='Python', type=str, help="Main Category")
parser.add_argument("--tag", default='Data-Science', type=str, help="Tag")
args = parser.parse_args()
main(args)
|
# Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from tempest.common import rest_client
from tempest.common import xml_utils
from tempest import config
CONF = config.CONF
class InstanceUsagesAuditLogClientXML(rest_client.RestClient):
TYPE = "xml"
def __init__(self, auth_provider):
super(InstanceUsagesAuditLogClientXML, self).__init__(
auth_provider)
self.service = CONF.compute.catalog_type
def list_instance_usage_audit_logs(self):
url = 'os-instance_usage_audit_log'
resp, body = self.get(url)
instance_usage_audit_logs = xml_utils.xml_to_json(
etree.fromstring(body))
return resp, instance_usage_audit_logs
def get_instance_usage_audit_log(self, time_before):
url = 'os-instance_usage_audit_log/%s' % time_before
resp, body = self.get(url)
instance_usage_audit_log = xml_utils.xml_to_json(
etree.fromstring(body))
return resp, instance_usage_audit_log
|
# There are exactly ten ways of selecting three from five, 12345:
# 123, 124, 125, 134, 135, 145, 234, 235, 245, and 345
# In combinatorics, we use the notation, 5C3 = 10.
# In general,
# nCr = n! / r!(n-r)! ,where r <= n, n! = n*(n-1)*...*3*2*1, and 0! = 1.
# It is not until n = 23, that a value exceeds one-million: 23C10 = 1144066.
# How many, not necessarily distinct, values of nCr, for 1 <= n <= 100, are
# greater than one-million?
from operator import mul
from fractions import Fraction
def nCk(n,k):
return int(reduce(mul, (Fraction(n-i, i+1) for i in range(k)), 1))
total = 0
for n in xrange(1,101):
for k in xrange(2,n):
if nCk(n,k) > 1000000:
total += 1
print total
# 4075
|
import requests
from flask import request, abort, jsonify
from .settings import config
import datetime
ERROR_CODES = {
400: {"errorCode": "INVALID_JSON"},
403: {"errorCode": "INVALID_API_KEY"},
404: {"errorCode": "NOT_FOUND"},
410: {"errorCode": "INVALID_USER_TOKEN"},
429: {"errorCode": "RATE_LIMIT_EXCEEDED"},
500: {"errorCode": "INTERNAL_SERVER_ERROR"}
}
ISO_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
ISO_FORMAT_MSEC = '%Y-%m-%dT%H:%M:%S.%fZ'
# Copied from https://github.com/pebble-dev/rebble-appstore-api/blob/master/appstore/utils.py
# Really should be in common library
def get_access_token():
access_token = request.args.get('access_token')
if not access_token:
header = request.headers.get('Authorization')
if header:
auth = header.split(' ')
if len(auth) == 2 and auth[0] == 'Bearer':
access_token = auth[1]
if not access_token:
abort(401)
return access_token
def authed_request(method, url, **kwargs):
headers = kwargs.setdefault('headers', {})
headers['Authorization'] = f'Bearer {get_access_token()}'
return requests.request(method, url, **kwargs)
def get_uid():
result = authed_request('GET', f"{config['REBBLE_AUTH_URL']}/api/v1/me")
if result.status_code != 200:
abort(401)
return result.json()['uid']
def api_error(code):
response = jsonify(ERROR_CODES[code])
response.status_code = code
return response
def parse_time(time_str):
try:
return datetime.datetime.strptime(time_str, ISO_FORMAT)
except ValueError:
pass
return datetime.datetime.strptime(time_str, ISO_FORMAT_MSEC)
def time_to_str(time):
return time.strftime(ISO_FORMAT)
def time_valid(time):
now = datetime.datetime.utcnow()
if (time < now and (now - time).days > 2) or (time > now and (time - now).days > 366):
return False # Time must not be more than two days in the past, or a year in the future.
return True
def pin_valid(pin_id, pin_json):
try:
if pin_json is None or pin_json.get('id') != pin_id:
return False
if not time_valid(parse_time(pin_json['time'])):
return False
if 'createNotification' in pin_json and 'time' in pin_json['createNotification']:
return False # The createNotification type does not require a time attribute.
if 'updateNotification' in pin_json and not time_valid(parse_time(pin_json['updateNotification']['time'])):
return False
if 'reminders' in pin_json:
if len(pin_json['reminders']) > 3:
return False # Max 3 reminders
for reminder in pin_json['reminders']:
if not time_valid(parse_time(reminder['time'])):
return False
except (KeyError, ValueError):
return False
return True
|
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import re
from sqlalchemy.dialects.oracle.base import OracleDialect
from sqlalchemy.sql import sqltypes
from sqlalchemy import util, exc
from .base import MixedBinary, BaseDialect
colspecs = util.update_copy(
OracleDialect.colspecs, {sqltypes.LargeBinary: MixedBinary,},
)
class OracleJDBCDialect(BaseDialect, OracleDialect):
jdbc_db_name = "oracle"
jdbc_driver_name = "oracle.jdbc.OracleDriver"
colspecs = colspecs
def initialize(self, connection):
super(OracleJDBCDialect, self).initialize(connection)
def _driver_kwargs(self):
return {}
def create_connect_args(self, url):
if url is None:
return
# dialects expect jdbc url e.g.
# "jdbc:oracle:thin@example.com:1521/db"
# if sqlalchemy create_engine() url is passed e.g.
# "oracle://scott:tiger@example.com/db"
# it is parsed wrong
# restore original url
s: str = str(url)
# get jdbc url
jdbc_url: str = s.split("//", 1)[-1]
# add driver information
if not jdbc_url.startswith("jdbc"):
jdbc_url = f"jdbc:oracle:thin:@{jdbc_url}"
kwargs = {
"jclassname": self.jdbc_driver_name,
"url": jdbc_url,
# pass driver args via JVM System settings
"driver_args": []
}
return ((), kwargs)
def _get_server_version_info(self, connection):
try:
banner = connection.execute(
"SELECT BANNER FROM v$version"
).scalar()
except exc.DBAPIError:
banner = None
version = re.search(r"Release ([\d\.]+)", banner).group(1)
return tuple(int(x) for x in version.split("."))
dialect = OracleJDBCDialect
|
from systemcheck.checks.models.checks import Check
from systemcheck.models.meta.orm_choices import choices
from systemcheck.models.meta import Base, ChoiceType, Column, ForeignKey, Integer, QtModelMixin, String, qtRelationship, \
relationship, RichString, generic_repr, Boolean, RestrictionsMixin, OperatorMixin, Date, DateTime, Time, BaseMixin
from systemcheck.systems.ABAP.models import ActionAbapIsNotClientSpecificMixin
from systemcheck import models
pluginName='ActionAbapJobSchedulingValidation'
@generic_repr
class ActionAbapJobSchedulingValidation(Check, ActionAbapIsNotClientSpecificMixin):
__tablename__ = pluginName
id = Column(Integer, ForeignKey('checks_metadata.id'), primary_key=True)
params = relationship(pluginName+'__params', cascade="all, delete-orphan")
__mapper_args__ = {
'polymorphic_identity':pluginName,
}
__qtmap__ = [Check.name, Check.description, Check.failcriteria, Check.criticality]
@choices
class ActionAbapJobSchedulingValidationIntervalType:
class Meta:
HOUR = ['H', 'Hours']
DAY = ['D', 'Days']
MIN = ['M', 'Minutes']
WEEK = ['W', 'Weeks']
YEAR = ['Y', 'Year']
@choices
class ActionAbapJobSchedulingValidationComparisonOperator:
class Meta:
LE = ['LE', 'lower or equal']
LT = ['LT', 'lower than']
EQ = ['EQ', 'equal']
HE = ['HE', 'higher or equal']
HT = ['HT', 'higher than']
@generic_repr
class ActionAbapJobSchedulingValidation__params(QtModelMixin, Base, OperatorMixin, BaseMixin):
""" Job Scheduling Validation:
"""
__tablename__ = pluginName+'__params'
check = relationship(pluginName, back_populates="params")
id = Column(Integer, ForeignKey('checks_metadata.id'), primary_key=True)
param_set_name = Column(String,
qt_label='Parameter Set Name',
qt_description='Parameter Set Description')
expected_count = Column(Integer,
qt_label='Expected Executions')
operator = Column(String,
qt_label='Comparison Operator',
choices=ActionAbapJobSchedulingValidationComparisonOperator.CHOICES,
default=ActionAbapJobSchedulingValidationComparisonOperator.LE)
interval = Column(Integer,
qt_label='Interval',
qt_description='Interval, for example 3 for 3 hours')
interval_type=Column(String,
qt_label='Interval Type',
qt_description='Interval Type',
choices=ActionAbapJobSchedulingValidationIntervalType.CHOICES,
default=ActionAbapJobSchedulingValidationIntervalType.HOUR)
#executionclient = Column(String, qt_label='Client', qt_description='Client')
abapname = Column(String, qt_label='ABAP Program Name', qt_description='Name of the ABAP Program')
sel_jobname = Column(String, qt_label='Job Name', qt_description='Name of the Job')
sel_jobcount = Column(String, qt_label='Job Count', qt_description='Internal Number of the job')
sel_jobgroup = Column(String, qt_label='Job Group', qt_description='Job Group')
sel_username = Column(String, qt_label='Username', qt_description='Job Scheduler')
sel_from_date = Column(Date, qt_label='From Date', qt_description='From Date')
sel_from_time = Column(Time, qt_label='From Time', qt_description='From Time')
sel_to_date = Column(Date, qt_label='To Date', qt_description='To Date')
sel_to_time = Column(Time, qt_label='To Time', qt_description='To Time')
sel_no_date = Column(Boolean,
qt_label='Without Date',
qt_description='No Date',
choices=models.meta.YesNoChoice.CHOICES)
sel_with_pred = Column(Boolean,
qt_label='With Predecessor',
qt_description='With Predecessor',
choices=models.meta.YesNoChoice.CHOICES)
sel_eventid = Column(String, qt_label='Event ID', qt_description='Event ID')
sel_eventpara = Column(String, qt_label='Event Parameter', qt_description='Event Parameter')
sel_prelim = Column(Boolean,
qt_label='Status Preliminary',
qt_description='Status Preliminary',
choices=models.meta.YesNoChoice.CHOICES)
sel_schedul= Column(Boolean,
qt_label='Status Scheduled',
qt_description='Status Scheduled',
choices=models.meta.YesNoChoice.CHOICES)
sel_ready = Column(Boolean,
qt_label='Status Ready',
qt_description='Status Ready',
choices=models.meta.YesNoChoice.CHOICES)
sel_running = Column(Boolean,
qt_label='Status Running',
qt_description='Status Running',
choices=models.meta.YesNoChoice.CHOICES)
sel_finished = Column(Boolean,
qt_label='Status Finished',
qt_description='Status Finished',
choices=models.meta.YesNoChoice.CHOICES)
sel_aborted = Column(Boolean,
qt_label='Status Aborted',
qt_description='Status Aborted',
choices=models.meta.YesNoChoice.CHOICES)
#TODO: At some point, add the selection options for dates and time
__qtmap__ = [param_set_name, expected_count, operator, interval, interval_type, abapname, sel_jobname, sel_jobcount,
sel_jobgroup, sel_username, sel_with_pred, sel_eventid, sel_prelim, sel_schedul, sel_ready, sel_running, sel_finished, sel_aborted]
|
from __future__ import print_function
# Uncomment to run this module directly. TODO comment out.
#import sys, os
#sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
# End of uncomment.
import unittest
import subprocess
import runserver
from views import neo4j_driver
import helper
from passlib.hash import argon2
class Neo4jTestCase(unittest.TestCase):
def setUp(self):
helper.load_neo4j_test_data()
runserver.app.config['TESTING'] = True
self.app = runserver.app.test_client()
def tearDown(self):
helper.delete_neo4j_test_data()
def test_users_data(self):
with neo4j_driver.session() as neo4j_session:
# Test unknown user
results = neo4j_session.run("MATCH (u:User {user : 'xxx'}) RETURN u.user AS user, u.argon_password AS argon_password")
result = results.single()
assert(not result)
# Test known user
results = neo4j_session.run("MATCH (u:User {user : 'testSuite'}) RETURN u.user AS user, u.argon_password AS argon_password")
result = results.single()
assert(result)
assert result['user'] == 'testSuite'
assert(argon2.verify('demo123', result['argon_password']))
def test_login_logout(self):
rv = self.login('Testx', 'demo123')
assert rv.status_code == 401
assert 'Invalid Credentials. Please try again.' in rv.data
rv = self.login('testSuite', 'demo123x')
assert rv.status_code == 401
assert 'Invalid Credentials. Please try again' in rv.data
rv = self.login('testSuite', 'demo123')
assert rv.status_code == 200
assert 'Authenticated' in rv.data
rv = self.logout()
assert rv.status_code == 200
assert 'Please login' and 'username' and 'password' in rv.data
def test_change_password(self):
rv = self.login('testSuite', 'demo123')
assert rv.status_code == 200
assert 'Authenticated' in rv.data
rv = self.change_password('testSuite', 'demo123', 'demo456')
assert rv.status_code == 200
print(rv.data)
assert 'Password for username \'testSuite\' changed' in rv.data
rv = self.login('testSuite', 'demo456')
assert rv.status_code == 200
rv = self.login('testSuite', 'demo123')
assert rv.status_code == 401
rv = self.change_password('testSuite', 'demo456', 'demo123')
assert rv.status_code == 200
rv = self.change_password('x', 'demo123', 'demo456')
assert rv.status_code == 401
rv = self.change_password('testSuite', 'x', 'demo456')
assert rv.status_code == 401
def login(self, username, password):
return self.app.post('/login', data=dict(
name=username,
password=password
), follow_redirects=True)
def logout(self):
return self.app.get('/logout', follow_redirects=True)
def change_password(self, username, password, new_pass_1):
return self.app.post('/change_password', data=dict(
change_pwd_name=username,
current_password=password,
new_password_1=new_pass_1,
), follow_redirects=True)
if __name__ == '__main__':
unittest.main()
|
#########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import shutil
import tempfile
import testtools
from cloudify import context
from cloudify.workflows import local
from cloudify.decorators import operation
from cloudify.decorators import workflow
from cloudify import ctx as operation_ctx
from cloudify.workflows import ctx as workflow_ctx
class TestLocalWorkflowGetAttribute(testtools.TestCase):
def test_in_memory_storage(self):
self._test()
def test_file_storage(self):
tempdir = tempfile.mkdtemp()
storage = local.FileStorage(tempdir)
try:
self._test(storage)
finally:
shutil.rmtree(tempdir)
def test_file_storage_payload(self):
tempdir = tempfile.mkdtemp()
storage = local.FileStorage(tempdir)
try:
self._test(storage)
# update payload
with storage.payload() as payload:
payload['payload_key'] = 'payload_key_value'
# read payload
storage2 = local.FileStorage(tempdir)
local.load_env(self.env.name, storage=storage2)
with storage2.payload() as payload:
self.assertEqual(payload['payload_key'], 'payload_key_value')
finally:
shutil.rmtree(tempdir)
def _test(self, storage=None):
blueprint_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'resources/blueprints/get_attribute.yaml')
self.env = local.init_env(blueprint_path, storage=storage)
self.env.execute('setup', task_retries=0)
self.env.execute('run', task_retries=0)
@workflow
def populate_runtime_properties(**_):
for node in workflow_ctx.nodes:
for instance in node.instances:
instance.execute_operation('test.setup')
@workflow
def run_all_operations(**_):
node = workflow_ctx.get_node('node1')
instance = next(node.instances)
instance.execute_operation('test.op')
relationship = next(instance.relationships)
relationship.execute_source_operation('test.op')
relationship.execute_target_operation('test.op')
@operation
def populate(**_):
operation_ctx.instance.runtime_properties.update({
'self_ref_property': 'self_ref_value',
'node_ref_property': 'node_ref_value',
'source_ref_property': 'source_ref_value',
'target_ref_property': 'target_ref_value',
})
@operation
def op(self_ref=None,
node_ref=None,
source_ref=None,
target_ref=None,
static=None,
**_):
if operation_ctx.type == context.NODE_INSTANCE:
assert self_ref == 'self_ref_value', \
'self: {0}'.format(self_ref)
assert node_ref == 'node_ref_value', \
'node: {0}'.format(self_ref)
assert source_ref is None, \
'source: {0}'.format(source_ref)
assert source_ref is None, \
'target: {0}'.format(target_ref)
assert static == 'static_property_value', \
'static: {0}'.format(static)
else:
assert self_ref is None, \
'self: {0}'.format(self_ref)
assert node_ref is None, \
'node: {0}'.format(self_ref)
assert source_ref == 'source_ref_value', \
'source: {0}'.format(source_ref)
assert target_ref == 'target_ref_value', \
'target: {0}'.format(target_ref)
|
import sys
import os
import sqlite3
import json
import subprocess
from pprint import pprint
INDEX_FILE_NAME = 'index.json'
def get_database_path():
if len(sys.argv) < 2:
raise ValueError('Usage: requiem path/to/database/directory')
return os.path.join(
os.path.dirname(sys.argv[0]),
sys.argv[1])
def get_schema_file():
filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'schema.sql')
with open(filename) as f:
return f.read()
class Database:
_database_path = None
_sqlite_connection = None
_cursor = None
_set_order = None
_link_order = None
_requirement_order = None
_metadata_order = None
def __init__(self, database_path=None):
if not database_path:
database_path = get_database_path()
self._database_path = database_path
self._set_order = 1
self._link_order = 1
self._requirement_order = 1
self._metadata_order = 1
self._initialize_sqlite()
self._read_index()
self._read_requirement_sets()
def get_set_number(self):
number = self._set_order
self._set_order = self._set_order + 1
return number
def get_link_number(self):
number = self._link_order
self._link_order = self._link_order + 1
return number
def get_requirement_number(self):
number = self._requirement_order
self._requirement_order = self._requirement_order + 1
return number
def get_metadata_number(self):
number = self._metadata_order
self._metadata_order = self._metadata_order + 1
return number
def _initialize_sqlite(self):
self._sqlite_connection = sqlite3.connect(':memory:')
self._cursor = self._sqlite_connection.cursor()
self._cursor.executescript(get_schema_file())
def _read_index(self):
index_filename = os.path.join(self._database_path, INDEX_FILE_NAME)
with open(index_filename) as f:
index = json.load(f)
for req_set in index.get('requirement_sets'):
self.insert_requirement_set(req_set.get('id'), req_set.get('name'), req_set.get('filename'), save=False)
def insert_requirement_set(self, set_id, name, filename, save=True):
statement = """
INSERT INTO requirement_sets (id, name, filename, placement_order)
VALUES (:id, :name, :filename, :placement_order)
"""
args = {
'id': set_id,
'name': name,
'filename': filename,
'placement_order': self.get_set_number()
}
self._cursor.execute(statement, args)
if save:
self.save('Add requirment set {} {}'.format(set_id, name))
def insert_link(self, from_req_set_id, from_req_id, to_req_set_id, to_req_id, save=True):
statement = """
INSERT INTO links (
from_set_id,
from_id,
to_set_id,
to_id,
placement_order
)
VALUES (
:from_set_id,
:from_id,
:to_set_id,
:to_id,
:placement_order
)
ON CONFLICT DO NOTHING;
"""
args = {
'from_set_id': from_req_set_id,
'from_id': from_req_id,
'to_set_id': to_req_set_id,
'to_id': to_req_id,
'placement_order': self.get_link_number()
}
self._cursor.execute(statement, args)
if save:
self.save('Add link from {}:{} to {}:{}'.format(
from_req_set_id, from_req_id, to_req_set_id, to_req_id))
def insert_requirement(self, set_id, requirement_id, contents):
statement = """
INSERT INTO requirements (set_id, id, key, value, placement_order)
VALUES (:set_id, :id, :key, :value, :placement_order);
"""
args = {
'set_id': set_id,
'id': requirement_id,
'key': 'contents',
'value': contents,
'placement_order': self.get_requirement_number()
}
self._cursor.execute(statement, args)
def insert_requirement_set_metadata(self, set_id, key, value):
statement = """
INSERT INTO requirement_set_metadata (id, key, value, placement_order)
VALUES (:id, :key, :value, :placement_order);
"""
args = {
'id': set_id,
'key': key,
'value': value,
'placement_order': self.get_metadata_number()
}
self._cursor.execute(statement, args)
def _read_requirement_sets(self):
self._cursor.execute('SELECT id, name, filename from requirement_sets')
link_insert_order = 1
for index_entry in self._cursor.fetchall():
(set_id, name, filename) = index_entry
with open(os.path.join(self._database_path, filename)) as f:
requirement_set = json.load(f)
# read requirements
for requirement in requirement_set.get('requirements'):
self.insert_requirement(set_id, requirement.get('id'), requirement.get('contents'))
# links
for link in requirement.get('from_links'):
to_requirement_set_id, to_requirement_id = link.split(':')
self.insert_link(set_id, requirement.get('id'), to_requirement_set_id, to_requirement_id, save=False)
for link in requirement.get('to_links'):
from_requirement_set_id, from_requirement_id = link.split(':')
self.insert_link(from_requirement_set_id, from_requirement_id, set_id, requirement.get('id'), save=False)
# read metadata
for key, value in requirement_set.items():
if key in ['name', 'filename', 'requirements']:
continue
self.insert_requirement_set_metadata(set_id, key, value)
def get_requirement_sets(self):
statement = 'SELECT id, name, filename FROM requirement_sets;'
self._cursor.execute(statement)
sets = []
for req_set in self._cursor.fetchall():
(set_id, name, filename) = req_set
sets.append({
'id': set_id,
'name': name,
'filename': filename
})
return sets
def get_requirement_set(self, set_id):
statement = 'SELECT id, name, filename FROM requirement_sets WHERE id = :set_id;'
self._cursor.execute(statement, {'set_id': set_id})
for req_set in self._cursor.fetchall():
(set_id, name, filename) = req_set
return {
'id': set_id,
'name': name,
'filename': filename
}
return None
def get_links_with_content(self, set_id, req_id):
statement = """
SELECT links.from_set_id, links.from_id, links.to_set_id, links.to_id,
r_from.key, r_from.value, r_to.key, r_to.value
FROM links
JOIN requirements AS r_from ON (links.from_set_id = r_from.set_id AND links.from_id = r_from.id)
JOIN requirements AS r_to ON (links.to_set_id = r_to.set_id AND links.to_id = r_to.id)
WHERE (
(links.from_set_id = :set_id AND links.from_id = :req_id)
OR
(links.to_set_id = :set_id AND links.to_id = :req_id)
)
AND r_from.key = 'contents'
AND r_to.key = 'contents'
ORDER BY links.placement_order ASC;
"""
self._cursor.execute(statement, {'set_id': set_id, 'req_id': req_id})
return [
{
'from_set_id': from_set_id,
'from_id': from_id,
'from_content': from_value,
'to_set_id': to_set_id,
'to_id': to_id,
'to_content': to_value
}
for (from_set_id, from_id, to_set_id, to_id, from_key, from_value, to_key, to_value)
in self._cursor.fetchall()
]
def get_requirement(self, set_id, req_id):
statement = """
SELECT key, value
FROM requirements
WHERE set_id = :set_id AND id = :req_id
"""
self._cursor.execute(statement, {'set_id': set_id, 'req_id': req_id})
rows = self._cursor.fetchall()
requirement = {
'id': req_id,
'set_id': set_id
}
for row in rows:
(key, value) = row
requirement[key] = value
return requirement
def get_requirements(self, set_id):
requirements = []
statement = """
SELECT id, key, value
FROM requirements
WHERE set_id = :set_id
ORDER BY placement_order ASC
"""
self._cursor.execute(statement, {'set_id': set_id})
current_id = None
requirement = {}
rows = self._cursor.fetchall()
for row in rows:
(req_id, key, value) = row
if current_id != None and current_id != req_id:
requirements.append(requirement)
current_id = req_id
requirement = {}
current_id = req_id
requirement['id'] = req_id
requirement[key] = value
links = self.get_links_with_content(set_id, req_id)
requirement['from_links'] = [
'{}:{}'.format(link.get('to_set_id'), link.get('to_id'))
for link in links if link.get('from_id') == req_id
]
requirement['to_links'] = [
'{}:{}'.format(link.get('from_set_id'), link.get('from_id'))
for link in links if link.get('to_id') == req_id
]
# when there are no requirments in the set we don't want to add an empty
# dict
if len(requirement) > 0:
requirements.append(requirement)
return requirements
def remove_requirement(self, set_id, req_id, save=True):
statement = 'DELETE FROM requirements WHERE set_id = :set_id AND id = :req_id'
self._cursor.execute(statement, {'set_id': set_id, 'req_id': req_id})
if save:
self.save('Remove requirement {}'.format(req_id))
def update_requirement(self, set_id, req_id, contents, save=True):
statement = 'UPDATE requirements SET value=:value WHERE set_id=:set_id AND id=:req_id AND key=:key'
self._cursor.execute(statement, {'set_id': set_id, 'req_id': req_id, 'key': 'contents', 'value': contents})
if save:
self.save('Update requirement {}'.format(req_id))
# This function is a bit tricky and it's very likely the exact numbering of
# the placement_order is not continuous. However, what do matters is that
# the placement order is strictly increasing so that it can be used to sort
# the requirements. This holds for this implementation of the movement.
# Also, the specific placement older numbers do not matter as they only
# exist in the in-memory database.
def move_requirement(self, set_id, req_id, new_index, placement_order=None, save=True):
# first find old placement_order
statement = 'SELECT placement_order FROM requirements WHERE set_id = :set_id and id = :req_id'
self._cursor.execute(statement, {'set_id': set_id, 'req_id': req_id})
old_placement_order = self._cursor.fetchone()[0]
if placement_order:
new_placement_order = placement_order
else:
# find the first placement order in the set, can then calculate the new placement order
# since new_index is the same as the offset from the beginning of the set
statement = 'SELECT placement_order FROM requirements WHERE set_id = :set_id ORDER BY placement_order ASC LIMIT 1'
self._cursor.execute(statement, {'set_id': set_id, 'req_id': req_id})
first_placement_order = self._cursor.fetchone()[0]
new_placement_order = first_placement_order + new_index
if old_placement_order < new_placement_order:
# moved down
# move all requirements before the new placement_order up one step
# move all requirements after new_placement_order down one step
statement = 'UPDATE requirements SET placement_order = placement_order - 1 WHERE placement_order <= :new_placement_order'
self._cursor.execute(statement, {'new_placement_order': new_placement_order})
statement = 'UPDATE requirements SET placement_order = placement_order + 1 WHERE placement_order > :new_placement_order'
self._cursor.execute(statement, {'new_placement_order': new_placement_order})
else:
# moved up
# move all requirements at or after new_placement_order
statement = 'UPDATE requirements SET placement_order = placement_order + 1 WHERE placement_order >= :new_placement_order'
self._cursor.execute(statement, {'new_placement_order': new_placement_order})
# move the requirement to its new position
statement = 'UPDATE requirements SET placement_order = :new_placement_order WHERE set_id = :set_id and id = :req_id'
self._cursor.execute(statement, {'new_placement_order': new_placement_order, 'set_id': set_id, 'req_id': req_id})
if save:
self.save(comment='Move requirement {} to index {}'.format(req_id, new_index))
def find_requirement_placement_order(self, set_id, req_id):
statement = 'SELECT placement_order FROM requirements WHERE set_id = :set_id and id = :req_id'
self._cursor.execute(statement, {'set_id': set_id, 'req_id': req_id})
placement_order = self._cursor.fetchone()[0]
return placement_order
def rename_requirement_set(self, set_id, new_name, new_filename, old_filename, save=True):
statement = 'UPDATE requirement_sets SET name=:name, filename=:filename WHERE id=:id'
self._cursor.execute(statement, {'name': new_name, 'filename': new_filename, 'id': set_id})
subprocess.run(['git', 'mv', old_filename, new_filename], cwd=self._database_path)
if save:
self.save('Rename requirement set {} to {}'.format(set_id, new_name))
def save(self, comment):
# save index
statement = 'SELECT id, name, filename FROM requirement_sets ORDER BY placement_order ASC'
self._cursor.execute(statement)
index = []
for row in self._cursor.fetchall():
(set_id, name, filename) = row
index.append({
'id': set_id,
'name': name,
'filename': filename
})
with open(os.path.join(self._database_path, INDEX_FILE_NAME), 'w') as f:
json.dump({'requirement_sets': index}, f, indent=4, sort_keys=True)
subprocess.run(['git', 'add', INDEX_FILE_NAME], cwd=self._database_path)
# save requirement sets
for req_set in index:
requirements = self.get_requirements(req_set.get('id'))
data = {
'name': req_set.get('name'),
'id': req_set.get('id'),
'requirements': requirements
}
with open(os.path.join(self._database_path, req_set.get('filename')), 'w') as f:
json.dump(data, f, indent=4, sort_keys=True)
subprocess.run(['git', 'add', req_set.get('filename')], cwd=self._database_path)
subprocess.run(['git', 'commit', '-m', comment], cwd=self._database_path)
|
import numpy as np
from PIL import Image
def is_image_file(filename):
return any(filename.endswith(extension) for extension in [".png", ".jpg", ".jpeg"])
def load_img(filepath):
img = Image.open(filepath).convert('RGB')
img = img.resize((256, 256), Image.BICUBIC)
return img
def save_img(image_tensor, filename):
image_numpy = image_tensor.float().numpy()
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
image_numpy = image_numpy.clip(0, 255)
image_numpy = image_numpy.astype(np.uint8)
image_pil = Image.fromarray(image_numpy)
image_pil.save(filename)
print("Image saved as {}".format(filename))
def detect_edge_batch(imgs):
# YOU MAY NEED TO MODIFY THIS FUNCTION IN ORDER TO CHOOSE THE BEST EDGE DETECTION THAT WORKS ON YOUR DATA
# FOR THAT, YOU MAY ALSO NEED TO CHANGE THE SOME PARAMETERS; SEE EDGE_DETECTOR.PY
# import pdb; pdb.set_trace()
for im in imgs:
edge_map = edge_detect(im)
# edge_map = edge_map/255.
if (edge_map.max() - edge_map.min()) > 0:
edge_map = (edge_map - edge_map.min()) / (edge_map.max() - edge_map.min())
edge_map = torch.tensor(edge_map, dtype=torch.float32)
im[-1] = edge_map # replace the last map
return imgs
|
from pydantic.main import BaseModel
from pydantic.networks import EmailStr
from app.models.rwmodel import MongoModel
class TokenPayload(MongoModel):
email: EmailStr = ""
class Token(BaseModel):
accsee_token: str
token_type: str
|
import os
import requests
import datetime as dt
from twilio.rest import Client
STOCK = "TSLA"
COMPANY_NAME = "Tesla Inc"
now = dt.datetime.now()
if now.day < 12:
yesterday = f"{now.year}-{now.month}-0{now.day - 2}"
day_before = f"{now.year}-{now.month}-0{now.day - 3}"
else:
yesterday = f"{now.year}-{now.month}-{now.day - 2}"
day_before = f"{now.year}-{now.month}-{now.day - 3}"
# STEP 1: Use https://www.alphavantage.co
# When STOCK price increase/decreases by 5% between yesterday and the day before yesterday then print("Get News").
alpha_api = os.environ.get("ALPHA_API")
alpha_endpoint = "https://www.alphavantage.co/query"
params = {
"function": "TIME_SERIES_DAILY",
"symbol": STOCK,
"apikey": alpha_api,
}
res = requests.get(url=alpha_endpoint, params=params)
res.raise_for_status()
stock = res.json()
stock_yesterday = float(stock["Time Series (Daily)"][yesterday]["4. close"])
stock_day_before = float(stock["Time Series (Daily)"][day_before]["4. close"])
stock_change = round(stock_day_before / stock_yesterday * 100 - 100, 2)
if stock_change < 0:
stock_move = f"{STOCK}: 🔻{stock_change * -1}%"
elif stock_change > 0:
stock_move = f"{STOCK}: 🔺{stock_change}%"
else:
stock_move = f"{STOCK}: {stock_change}%"
# STEP 2: Use https://newsapi.org
# Instead of printing ("Get News"), actually get the first 3 news pieces for the COMPANY_NAME.
news_api = os.environ.get("NEWS_API")
news_endpoint = "http://newsapi.org/v2/top-headlines"
params = {
"q": COMPANY_NAME,
"from": yesterday,
"apiKey": news_api,
}
res = requests.get(url=news_endpoint, params=params)
res.raise_for_status()
news_data = res.json()
top_three = news_data["articles"][:3]
news = [f'{stock_move}\nHeadline: {news["title"]}\nBrief: {news["description"]}' for news in top_three]
for item in news:
# STEP 3: Use https://www.twilio.com
# Send a separate message with the percentage change and each article's title and description to your phone number.
# Optional: Format the SMS message like this:
"""
TSLA: 🔺2%
Headline: Were Hedge Funds Right About Piling Into Tesla Inc. (TSLA)?.
Brief: We at Insider Monkey have gone over 821 13F filings that hedge funds and prominent investors are required to
file by the SEC The 13F filings show the funds' and investors' portfolio positions as of March 31st, near the height
of the coronavirus market crash.
or
"TSLA: 🔻5%
Headline: Were Hedge Funds Right About Piling Into Tesla Inc. (TSLA)?.
Brief: We at Insider Monkey have gone over 821 13F filings that hedge funds and prominent investors are required to
file by the SEC The 13F filings show the funds' and investors' portfolio positions as of March 31st, near the height
of the coronavirus market crash.
"""
account_sid = os.environ.get("ACCOUNT_SID")
auth_token = os.environ.get("AUTH_TOKEN")
receiver = os.environ.get("RECEIVER")
client = Client(account_sid, auth_token)
message = client.messages\
.create(
body=f"{item}",
from_="+14156349707",
to=receiver,
)
print(message.status)
|
import pytest
import sys
import io
from adjust_driver import Ec2WinDriver, DESC, HAS_CANCEL, VERSION
adjust_json_stdin = '''\
{
"application": {
"components": {
"web": {
"settings": {
"UriEnableCache": {"value": 1},
"UriScavengerPeriod": {"value": 260},
"WebConfigCacheEnabled": {"value": 0},
"WebConfigEnableKernelCache": {"value": 1},
"inst_type": {"value": "t2.micro"}
}
}
}
}
}
'''
def test_version(monkeypatch):
with monkeypatch.context() as m:
# replicate command line arguments fed in by servo
m.setattr(sys, 'argv', ['', '--version', '1234'])
driver = Ec2WinDriver(cli_desc=DESC, supports_cancel=HAS_CANCEL, version=VERSION)
with pytest.raises(SystemExit) as exit_exception:
driver.run()
assert exit_exception.type == SystemExit
assert exit_exception.value.code == 0
def test_info(monkeypatch):
with monkeypatch.context() as m:
# replicate command line arguments fed in by servo
m.setattr(sys, 'argv', ['', '--info', '1234'])
driver = Ec2WinDriver(cli_desc=DESC, supports_cancel=HAS_CANCEL, version=VERSION)
with pytest.raises(SystemExit) as exit_exception:
driver.run()
assert exit_exception.type == SystemExit
assert exit_exception.value.code == 0
def test_query(monkeypatch):
with monkeypatch.context() as m:
# replicate command line arguments fed in by servo
m.setattr(sys, 'argv', ['', '--query', '1234'])
driver = Ec2WinDriver(cli_desc=DESC, supports_cancel=HAS_CANCEL, version=VERSION)
with pytest.raises(SystemExit) as exit_exception:
driver.run()
assert exit_exception.type == SystemExit
assert exit_exception.value.code == 0
def test_adjust(monkeypatch):
with monkeypatch.context() as m:
# replicate command line arguments fed in by servo
m.setattr(sys, 'argv', ['', '1234'])
m.setattr(sys, 'stdin', io.StringIO(adjust_json_stdin))
driver = Ec2WinDriver(cli_desc=DESC, supports_cancel=HAS_CANCEL, version=VERSION)
driver.run()
assert True
|
import pytest
from habr_challenge.site_config import Selector, SiteConfig
@pytest.fixture(scope="module")
def selector():
return Selector('div', class_='test', id_='me')
@pytest.fixture(scope="module")
def site_config():
SiteConfig.SITE_CONFIG['test'] = {
'url': 'http://test.com/',
'pagination': 'pages{page}/',
'div': Selector('div', class_='test_me'),
}
return SiteConfig('test')
|
# Copyright 2016 Chr. Hansen A/S and The Novo Nordisk Foundation Center for Biosustainability, DTU.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import numpy as np
from cachetools import LRUCache
from marsi.chemistry.common_ext import tanimoto_coefficient, tanimoto_distance, rmsd, monte_carlo_volume
from scipy.spatial import ConvexHull
from scipy.spatial.qhull import QhullError
__all__ = ["rmsd", "tanimoto_coefficient", "tanimoto_distance", "monte_carlo_volume",
"INCHI_KEY_REGEX", 'SOLUBILITY']
inchi_key_lru_cache = LRUCache(maxsize=512)
SOLUBILITY = {
"high": lambda sol: sol > 0.00006,
"medium": lambda sol: 0.00001 <= sol <= 0.00006,
"low": lambda sol: sol < 0.00001,
"all": lambda sol: True
}
INCHI_KEY_REGEX = re.compile("[0-9A-Z]{14}\-[0-9A-Z]{8,10}\-[0-9A-Z]")
def convex_hull_volume(xyz):
try:
return ConvexHull(xyz).volume
except (QhullError, ValueError):
return np.nan
def dynamic_fingerprint_cut(n_atoms):
return min(0.017974 * n_atoms + 0.008239, 0.75)
|
import sys, logging, os, random, math, arcade, open_color
#check to make sure we are running the right version of Python
version = (3,7)
assert sys.version_info >= version, "This script requires at least Python {0}.{1}".format(version[0],version[1])
#turn on logging, in case we have to leave ourselves debugging messages
logging.basicConfig(format='[%(filename)s:%(lineno)d] %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
MARGIN = 20
SCREEN_TITLE = "Invasion"
NUM_ENEMIES = 8
STARTING_LOCATION = (400,100)
BULLET_DAMAGE = 10
BULLET_SPEED = 1
ENEMY_HP = 10
HIT_SCORE = 10
KILL_SCORE = 100
Initial_Velocity = 5
class Bullet(arcade.Sprite):
def __init__(self, position, velocity, damage):
'''
initializes the bullet
Parameters: position: (x,y) tuple
velocity: (dx, dy) tuple
damage: int (or float)
'''
super().__init__("assets/laserRed.png", 0.5)
(self.center_x, self.center_y) = position
(self.dx, self.dy) = velocity
self.damage = damage
def update(self):
'''
Moves the bullet
'''
self.center_x += self.dx
self.center_y += self.dy
class Player(arcade.Sprite):
def __init__(self):
super().__init__("assets/player.png", 0.5)
(self.center_x, self.center_y) = STARTING_LOCATION
class Enemy(arcade.Sprite):
def __init__(self, position, velocity):
super().__init__("assets/enemyShip.png", 0.5)
self.hp = ENEMY_HP
(self.center_x,self.center_y) = position
(self.dx, self.dy) = velocity
class Window(arcade.Window):
def __init__(self, width, height, title):
super().__init__(width, height, title)
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
self.set_mouse_visible(True)
self.bullet_list = arcade.SpriteList()
self.enemy_list = arcade.SpriteList()
self.player = Player()
self.score = 0
def setup(self):
for i in range(NUM_ENEMIES):
x = 120 * (i+1) + 40
y = 500
dx = random.uniform(-Initial_Velocity, Initial_Velocity)
dy = random.uniform(-Initial_Velocity, Initial_Velocity)
enemy = Enemy((x,y), (dx,dy))
self.enemy_list.append(enemy)
def update(self, delta_time):
self.bullet_list.update()
for e in self.enemy_list:
missle = arcade.check_for_collision_with_list(e,self.bullet_list)
for b in missle:
e.hp = e.hp - b.damage
b.kill()
if e.hp <=0:
e.kill()
self.score = self.score + KILL_SCORE
else:
self.score = self.score + HIT_SCORE
self.enemy_list.update()
for e in self.enemy_list:
e.center_x = e.center_x + e.dx
e.center_y = e.center_x + e.dy
if e.center_x <= 200:
e.dx = abs(e.dx)
if e.center_x >= SCREEN_WIDTH:
e.draw_text = abs(e.dx) * 1
if e.center_y <= 200:
e.dy = abs(e.dy)
if e.center_y >= SCREEN_HEIGHT:
e.draw_text = abs(e.dy) * 1
def on_draw(self):
arcade.start_render()
arcade.draw_text(str(self.score), 20, SCREEN_HEIGHT - 40, open_color.white, 16)
self.player.draw()
self.bullet_list.draw()
self.enemy_list.draw()
def on_mouse_motion(self, x, y, dx, dy):
'''
The player moves left and right with the mouse
'''
self.player.center_x = x
def on_mouse_press(self, x, y, button, modifiers):
if button == arcade.MOUSE_BUTTON_LEFT:
x = self.player.center_x
y = self.player.center_y + 15
bullet = Bullet((x,y),(0,10),BULLET_DAMAGE)
self.bullet_list.append(bullet)
#fire a bullet
#the pass statement is a placeholder. Remove line 97 when you add your code
pass
def main():
window = Window(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == "__main__":
main()
|
from versions.default import *
class Magma(Server):
def getlink(version_number:str) -> str:
versions = {
'1.12':'https://www.mediafire.com/file/0t95jcbgint6bod/Magma-761933c-STABLE-server.jar/file',
'1.12.2':'https://www.mediafire.com/file/un8i590pylhxx3r/Magma-dd991e7-DEV-server.jar/file',
}
versions['stable'] = versions['1.12']
versions['dev'] = versions['1.12.2']
version = versions.keys()
if version_number in version:
download_url = versions[version_number]
return download_url
else:
print()
print(f'The version "{version_number}" is not found!')
print()
return "about:blank"
|
from dearpygui.core import *
import os
def set_style(heron_path):
set_style_window_padding(8.00, 8.00)
set_style_frame_padding(4.00, 3.00)
set_style_item_spacing(8.00, 4.00)
set_style_item_inner_spacing(4.00, 4.00)
set_style_touch_extra_padding(0.00, 0.00)
set_style_indent_spacing(21.00)
set_style_scrollbar_size(14.00)
set_style_grab_min_size(10.00)
set_style_window_border_size(1.00)
set_style_child_border_size(1.00)
set_style_popup_border_size(1.00)
set_style_frame_border_size(1.00)
set_style_tab_border_size(0.00)
set_style_window_rounding(7.00)
set_style_child_rounding(0.00)
set_style_frame_rounding(2.30)
set_style_popup_rounding(0.00)
set_style_scrollbar_rounding(0.00)
set_style_grab_rounding(0.00)
set_style_tab_rounding(4.00)
set_style_window_title_align(0.00, 0.50)
set_style_window_menu_button_position(mvDir_Left)
set_style_color_button_position(mvDir_Right)
set_style_button_text_align(0.50, 0.50)
set_style_selectable_text_align(0.00, 0.00)
set_style_display_safe_area_padding(3.00, 3.00)
set_style_global_alpha(1.00)
set_style_antialiased_lines(True)
set_style_antialiased_fill(True)
set_style_curve_tessellation_tolerance(1.25)
set_style_circle_segment_max_error(1.60)
add_additional_font(os.path.join(heron_path, 'resources', 'fonts', 'SF-Pro-Rounded-Regular.ttf'), 18)
|
# TODO Comment
class Node:
def __init__(self, parent):
self._children = []
self._leaf_data = None
self._parent = parent
# TODO Comment
def get_children(self):
return self._children
# TODO Comment
def get_parent(self):
return self._parent
# TODO Comment
def is_leaf(self) -> bool:
return self._leaf_data is not None
# TODO Comment
def set_leaf_data(self, leaf_data):
self._leaf_data = leaf_data
# TODO Comment
def get_leaf_data(self):
return self._leaf_data
# TODO Comment
def prune(self):
self._leaf_data.combine(self, self._children)
self._children = []
# TODO Comment
def split(self, leaf_data_factory):
self._children = self.split_into_children(leaf_data_factory)
|
import json
import csv
import script
import numpy
input_file = open("migrantData.json",'r', encoding='utf-8')
input_data = json.load(input_file)
input_file.close()
causeList = []
uniqueCause = []
causesNewDefList = []
#generalizing causes of death
def deathCauseReplacement(input_data):
for i in input_data:
cause = i["Cause of Death"]
#Health Condition
if "Sickness" in cause or "sickness" in cause or "cancer" in cause or "bleeding" in cause or "Organ" in cause or "Coronary" in cause or "Envenomation" in cause or "Post-partum" in cause or "Respiratory" in cause or "Hypoglycemia" in cause:
cause = "Health Condition"
#Harsh Conditions
elif "Harsh conditions""harsh conditions" in cause or "Harsh weather" in cause or "hearsh weather" in cause or "Exhaustion" in cause or "Heat stroke" in cause:
cause = "Harsh Conditions"
#Unknown
elif "Unknown" in cause or "unknown" in cause:
cause = "Unknown"
#Starvation
elif "Starvation" in cause:
cause = "Starvation"
#Dehydration
elif "Dehydration" in cause:
cause = "Dehydration"
#Drowning
elif "Drowning" in cause or "drowning" in cause or "Pulmonary" in cause or "Respiratory" in cause or "Pneumonia" in cause:
cause = "Drowning"
#Hyperthermia
elif "Hyperthermia" in cause or "hyperthermia" in cause:
cause = "Hyperthermia"
#Hypothermia
elif "Hypothermia" in cause or "hypothermia" in cause:
cause = "Hypothermia"
#Asphyxiation
elif "Asphyxiation" in cause or "asphyxiation" in cause or "Suffocation" in cause:
cause = "Asphyxiation"
#Vehicle Accident
elif "Train" in cause or "train" in cause or "Bus" in cause or "bus" in cause or "vehicle" in cause or "Vehicle" in cause or "truck" in cause or "Truck" in cause or "boat" in cause or "Boat" in cause or "Plane" in cause or "Car" in cause or "car" in cause or "helicopter" in cause:
cause = "Vehicle Accident"
#Murdered
elif "Murder" in cause or "murder" in cause or "murdered" in cause or "Murdered" in cause or "shot" in cause or "Shot" in cause or "Violence" in cause or "violence" in cause or "Hanging" in cause or "mortar" in cause or "landmine" in cause or "Rape" in cause or "Gassed" in cause:
cause = "Murdered"
#Crushed
elif "Rockslide" in cause or "Crushed" in cause or "Crush" in cause:
cause = "Crushed"
#Burned
elif "burns" in cause or "Burned" in cause or "Suffocation" in cause or "fire" in cause or "Fire" in cause:
cause = "Burned"
#Electrocution
elif "Electrocution" in cause:
cause = "Electrocution"
#Fallen
elif "Fall" in cause:
cause = "Fallen"
#Killed by animals
elif "hippopotamus" in cause or "crocodile" in cause:
cause = "Killed by animals"
#Exposure
elif "Exposure" in cause:
cause = "Exposure"
i["Cause of Death"] = cause
deathCauseReplacement(input_data)
#Causes of Death
for i in input_data:
region = i["Region of Incident"]
year = i["Reported Year"]
cause = i["Cause of Death"]
#print(cause)
if year == 2018:
causeList.append(cause)
countList = []
for ucause in uniqueCause:
countList.append(0)
for cause in causeList:
if ucause==cause:
countList[uniqueCause.index(ucause)]+=1
causesDict = dict(zip(uniqueCause,countList))
list1 = sorted(causesDict, key=causesDict.__getitem__, reverse=True)
list2 = sorted(causesDict.values(), reverse=True)
causeDict = dict(zip(list1,list2))
#print(sorted(countList, reverse = True))
#print(causeDict)
#concatinating values to paste in c# for unity
code = "public Dictionary<string, int> causes = new Dictionary<string, int>();\n"
for thing in causeDict:
code+= "causes.Add(\"" + thing + "\"," + str(causeDict[thing]) + ");\n"
# print(code)
### Total dead and missing by gender and age
childrenList = []
femaleList = []
maleList = []
for i in input_data:
totalDeadAndMissing = i["Total Dead and Missing"]
numFemales = i["Number of Females"]
numMales = i["Number of Males"]
numChildren = i["Number of Children"]
if totalDeadAndMissing != "" and numFemales != "" and numMales != "" and numChildren != "":
childrenList.append(numChildren)
femaleList.append(numFemales)
maleList.append(numMales)
sumChild = sum(childrenList)
sumFemale = sum(femaleList)
sumMale = sum(maleList)
|
import unittest
from il2fb.ds.events.definitions.base import Event
from il2fb.ds.events.definitions.cheating import CheatingInfo
from il2fb.ds.events.definitions.cheating import CheatingDetectedEvent
from il2fb.ds.events.definitions import registry
class CheatingDetectedEventTestCase(unittest.TestCase):
def test_derives_from_Event(self):
self.assertTrue(issubclass(CheatingDetectedEvent, Event))
def test_is_registered(self):
self.assertEqual(
registry.get_class_by_name("CheatingDetectedEvent"),
CheatingDetectedEvent,
)
def test_to_primitive(self):
testee = CheatingDetectedEvent(CheatingInfo(
channel_no=207,
cheat_code=8,
cheat_details="Cheat-Engine",
))
self.assertEqual(testee.to_primitive(), {
'category': 'cheating',
'name': 'CheatingDetectedEvent',
'verbose_name': 'Cheating detected',
'help_text': None,
'data': {
'channel_no': 207,
'cheat_code': 8,
'cheat_details': 'Cheat-Engine',
},
})
def test_from_primitive(self):
testee = CheatingDetectedEvent(CheatingInfo(
channel_no=207,
cheat_code=8,
cheat_details="Cheat-Engine",
))
self.assertEqual(
testee,
CheatingDetectedEvent.from_primitive(testee.to_primitive()),
)
|
from __future__ import absolute_import
from django.test import TestCase
from .models import Tournament, Organiser, Pool, PoolStyle
class ExistingRelatedInstancesTests(TestCase):
fixtures = ['tournament.json']
def test_foreign_key(self):
with self.assertNumQueries(2):
tournament = Tournament.objects.get(pk=1)
pool = tournament.pool_set.all()[0]
self.assertIs(tournament, pool.tournament)
def test_foreign_key_prefetch_related(self):
with self.assertNumQueries(2):
tournament = (Tournament.objects.prefetch_related('pool_set').get(pk=1))
pool = tournament.pool_set.all()[0]
self.assertIs(tournament, pool.tournament)
def test_foreign_key_multiple_prefetch(self):
with self.assertNumQueries(2):
tournaments = list(Tournament.objects.prefetch_related('pool_set').order_by('pk'))
pool1 = tournaments[0].pool_set.all()[0]
self.assertIs(tournaments[0], pool1.tournament)
pool2 = tournaments[1].pool_set.all()[0]
self.assertIs(tournaments[1], pool2.tournament)
def test_queryset_or(self):
tournament_1 = Tournament.objects.get(pk=1)
tournament_2 = Tournament.objects.get(pk=2)
with self.assertNumQueries(1):
pools = tournament_1.pool_set.all() | tournament_2.pool_set.all()
related_objects = set(pool.tournament for pool in pools)
self.assertEqual(related_objects, set((tournament_1, tournament_2)))
def test_queryset_or_different_cached_items(self):
tournament = Tournament.objects.get(pk=1)
organiser = Organiser.objects.get(pk=1)
with self.assertNumQueries(1):
pools = tournament.pool_set.all() | organiser.pool_set.all()
first = pools.filter(pk=1)[0]
self.assertIs(first.tournament, tournament)
self.assertIs(first.organiser, organiser)
def test_queryset_or_only_one_with_precache(self):
tournament_1 = Tournament.objects.get(pk=1)
tournament_2 = Tournament.objects.get(pk=2)
# 2 queries here as pool id 3 has tournament 2, which is not cached
with self.assertNumQueries(2):
pools = tournament_1.pool_set.all() | Pool.objects.filter(pk=3)
related_objects = set(pool.tournament for pool in pools)
self.assertEqual(related_objects, set((tournament_1, tournament_2)))
# and the other direction
with self.assertNumQueries(2):
pools = Pool.objects.filter(pk=3) | tournament_1.pool_set.all()
related_objects = set(pool.tournament for pool in pools)
self.assertEqual(related_objects, set((tournament_1, tournament_2)))
def test_queryset_and(self):
tournament = Tournament.objects.get(pk=1)
organiser = Organiser.objects.get(pk=1)
with self.assertNumQueries(1):
pools = tournament.pool_set.all() & organiser.pool_set.all()
first = pools.filter(pk=1)[0]
self.assertIs(first.tournament, tournament)
self.assertIs(first.organiser, organiser)
def test_one_to_one(self):
with self.assertNumQueries(2):
style = PoolStyle.objects.get(pk=1)
pool = style.pool
self.assertIs(style, pool.poolstyle)
def test_one_to_one_select_related(self):
with self.assertNumQueries(1):
style = PoolStyle.objects.select_related('pool').get(pk=1)
pool = style.pool
self.assertIs(style, pool.poolstyle)
def test_one_to_one_multi_select_related(self):
with self.assertNumQueries(1):
poolstyles = list(PoolStyle.objects.select_related('pool').order_by('pk'))
self.assertIs(poolstyles[0], poolstyles[0].pool.poolstyle)
self.assertIs(poolstyles[1], poolstyles[1].pool.poolstyle)
def test_one_to_one_prefetch_related(self):
with self.assertNumQueries(2):
style = PoolStyle.objects.prefetch_related('pool').get(pk=1)
pool = style.pool
self.assertIs(style, pool.poolstyle)
def test_one_to_one_multi_prefetch_related(self):
with self.assertNumQueries(2):
poolstyles = list(PoolStyle.objects.prefetch_related('pool').order_by('pk'))
self.assertIs(poolstyles[0], poolstyles[0].pool.poolstyle)
self.assertIs(poolstyles[1], poolstyles[1].pool.poolstyle)
def test_reverse_one_to_one(self):
with self.assertNumQueries(2):
pool = Pool.objects.get(pk=2)
style = pool.poolstyle
self.assertIs(pool, style.pool)
def test_reverse_one_to_one_select_related(self):
with self.assertNumQueries(1):
pool = Pool.objects.select_related('poolstyle').get(pk=2)
style = pool.poolstyle
self.assertIs(pool, style.pool)
def test_reverse_one_to_one_prefetch_related(self):
with self.assertNumQueries(2):
pool = Pool.objects.prefetch_related('poolstyle').get(pk=2)
style = pool.poolstyle
self.assertIs(pool, style.pool)
def test_reverse_one_to_one_multi_select_related(self):
with self.assertNumQueries(1):
pools = list(Pool.objects.select_related('poolstyle').order_by('pk'))
self.assertIs(pools[1], pools[1].poolstyle.pool)
self.assertIs(pools[2], pools[2].poolstyle.pool)
def test_reverse_one_to_one_multi_prefetch_related(self):
with self.assertNumQueries(2):
pools = list(Pool.objects.prefetch_related('poolstyle').order_by('pk'))
self.assertIs(pools[1], pools[1].poolstyle.pool)
self.assertIs(pools[2], pools[2].poolstyle.pool)
|
from collections import defaultdict
class FrequencyList(list):
def __init__(self, members):
super().__init__(members)
def frequency(self):
counts = defaultdict(int)
for item in self:
counts[item] += 1
return counts
class BinaryNode:
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
class Count:
def __init__(self):
self.value = 0
def increment(self):
self.value += 1
class IndexableNode(BinaryNode):
def _search(self, count, index):
if self.left:
self.left._search(count, index)
if count.value == index:
return self.value
count.increment()
# visit self
if self.right:
count.increment()
self.right._search(count, index)
def __getitem__(self, index):
found = self._search(Count(), index)
if not found:
raise IndexError('Index out of range')
return found
def main():
foo = FrequencyList(['a', 'b', 'a', 'c', 'b', 'a', 'd'])
assert len(foo) == 7
foo.pop()
assert foo.frequency() == {'a': 3, 'b': 2, 'c': 1}
tree = IndexableNode(
10,
left=IndexableNode(
5,
left=IndexableNode(2),
right=IndexableNode(
6, right=IndexableNode(7))),
right=IndexableNode(
15, left=IndexableNode(11)))
print('Index 0: ', tree[0])
if __name__ == '__main__':
main()
|
import re
import jax.numpy as jnp
import numpy as np
import pytest
from pgmax import fgroup, vgroup
def test_single_factor():
with pytest.raises(ValueError, match="Cannot create a FactorGroup with no Factor."):
fgroup.ORFactorGroup(variables_for_factors=[])
A = vgroup.NDVarArray(num_states=2, shape=(10,))
B = vgroup.NDVarArray(num_states=2, shape=(10,))
variables0 = (A[0], B[0])
variables1 = (A[1], B[1])
ORFactor0 = fgroup.ORFactorGroup(variables_for_factors=[variables0])
with pytest.raises(
ValueError, match="SingleFactorGroup should only contain one factor. Got 2"
):
fgroup.SingleFactorGroup(
variables_for_factors=[variables0, variables1],
factor=ORFactor0,
)
ORFactor1 = fgroup.ORFactorGroup(variables_for_factors=[variables1])
ORFactor0 < ORFactor1
def test_enumeration_factor_group():
vg = vgroup.NDVarArray(shape=(2, 2), num_states=3)
with pytest.raises(
ValueError,
match=re.escape("Expected log potentials shape: (1,) or (2, 1). Got (3, 2)"),
):
enumeration_factor_group = fgroup.EnumFactorGroup(
variables_for_factors=[
[vg[0, 0], vg[0, 1], vg[1, 1]],
[vg[0, 1], vg[1, 0], vg[1, 1]],
],
factor_configs=np.zeros((1, 3), dtype=int),
log_potentials=np.zeros((3, 2)),
)
with pytest.raises(ValueError, match=re.escape("Potentials should be floats")):
enumeration_factor_group = fgroup.EnumFactorGroup(
variables_for_factors=[
[vg[0, 0], vg[0, 1], vg[1, 1]],
[vg[0, 1], vg[1, 0], vg[1, 1]],
],
factor_configs=np.zeros((1, 3), dtype=int),
log_potentials=np.zeros((2, 1), dtype=int),
)
enumeration_factor_group = fgroup.EnumFactorGroup(
variables_for_factors=[
[vg[0, 0], vg[0, 1], vg[1, 1]],
[vg[0, 1], vg[1, 0], vg[1, 1]],
],
factor_configs=np.zeros((1, 3), dtype=int),
)
name = [vg[0, 0], vg[1, 1]]
with pytest.raises(
ValueError,
match=re.escape(
f"The queried factor connected to the set of variables {frozenset(name)} is not present in the factor group."
),
):
enumeration_factor_group[name]
assert (
enumeration_factor_group[[vg[0, 1], vg[1, 0], vg[1, 1]]]
== enumeration_factor_group.factors[1]
)
with pytest.raises(
ValueError,
match=re.escape(
"data should be of shape (2, 1) or (2, 9) or (1,). Got (4, 5)."
),
):
enumeration_factor_group.flatten(np.zeros((4, 5)))
assert jnp.all(enumeration_factor_group.flatten(np.ones(1)) == jnp.ones(2))
assert jnp.all(enumeration_factor_group.flatten(np.ones((2, 9))) == jnp.ones(18))
with pytest.raises(
ValueError, match=re.escape("Can only unflatten 1D array. Got a 3D array.")
):
enumeration_factor_group.unflatten(jnp.ones((1, 2, 3)))
with pytest.raises(
ValueError,
match=re.escape(
"flat_data should be compatible with shape (2, 1) or (2, 9). Got (30,)"
),
):
enumeration_factor_group.unflatten(jnp.zeros(30))
assert jnp.all(
enumeration_factor_group.unflatten(jnp.arange(2)) == jnp.array([[0], [1]])
)
assert jnp.all(enumeration_factor_group.unflatten(jnp.ones(18)) == jnp.ones((2, 9)))
def test_pairwise_factor_group():
vg = vgroup.NDVarArray(shape=(2, 2), num_states=3)
with pytest.raises(
ValueError, match=re.escape("log_potential_matrix should be either a 2D array")
):
fgroup.PairwiseFactorGroup([[vg[0, 0], vg[1, 1]]], np.zeros((1,), dtype=float))
with pytest.raises(
ValueError, match=re.escape("Potential matrix should be floats")
):
fgroup.PairwiseFactorGroup([[vg[0, 0], vg[1, 1]]], np.zeros((3, 3), dtype=int))
with pytest.raises(
ValueError,
match=re.escape(
"Expected log_potential_matrix for 1 factors. Got log_potential_matrix for 2 factors."
),
):
fgroup.PairwiseFactorGroup(
[[vg[0, 0], vg[1, 1]]], np.zeros((2, 3, 3), dtype=float)
)
with pytest.raises(
ValueError,
match=re.escape(
"All pairwise factors should connect to exactly 2 variables. Got a factor connecting to 3 variables"
),
):
fgroup.PairwiseFactorGroup(
[[vg[0, 0], vg[1, 1], vg[0, 1]]], np.zeros((3, 3), dtype=float)
)
name = [vg[0, 0], vg[1, 1]]
with pytest.raises(
ValueError,
match=re.escape(f"The specified pairwise factor {name}"),
):
fgroup.PairwiseFactorGroup([name], np.zeros((4, 4), dtype=float))
pairwise_factor_group = fgroup.PairwiseFactorGroup(
[[vg[0, 0], vg[1, 1]], [vg[1, 0], vg[0, 1]]],
)
with pytest.raises(
ValueError,
match=re.escape(
"data should be of shape (2, 3, 3) or (2, 6) or (3, 3). Got (4, 4)."
),
):
pairwise_factor_group.flatten(np.zeros((4, 4)))
assert jnp.all(
pairwise_factor_group.flatten(np.zeros((3, 3))) == jnp.zeros(2 * 3 * 3)
)
assert jnp.all(pairwise_factor_group.flatten(np.zeros((2, 6))) == jnp.zeros(12))
with pytest.raises(ValueError, match="Can only unflatten 1D array. Got a 2D array"):
pairwise_factor_group.unflatten(np.zeros((10, 20)))
assert jnp.all(
pairwise_factor_group.unflatten(np.zeros(2 * 3 * 3)) == jnp.zeros((2, 3, 3))
)
assert jnp.all(
pairwise_factor_group.unflatten(np.zeros(2 * 6)) == jnp.zeros((2, 6))
)
with pytest.raises(
ValueError,
match=re.escape(
"flat_data should be compatible with shape (2, 3, 3) or (2, 6). Got (10,)."
),
):
pairwise_factor_group.unflatten(np.zeros(10))
|
import time
from WVPoster import WVPoster
wv = WVPoster()
wv.postToSIO("chat", {'id': str(time.time()),
'name': 'Zeno',
't': time.time(),
'text': 'Hello.....'})
|
from Tkinter import *
from PIL import Image, ImageTk
import tkFileDialog
from __main__ import *
import os,sys
def reset():
os.remove("current.txt")
sys.exit()
master = Tk()
master.title("DCM - Home Cinema")
top = Label(master, text="Cinema!", font=("Helvetica", 50))
top.grid()
def adverts():
global advertselector
advertselector = tkFileDialog.askopenfilename(filetypes = (("Digital Cinema Advertising Package", "*.dca.mp4"),("All files", "*.*") ))
def hdmi():
master.destroy()
c = open('current.txt', 'a')
c.close()
w = open('current.txt', 'w')
w.write('/home/pi/Desktop/majestic-pi/' + dim.get() + '/intro.mp4\r\n')
w.write('/home/pi/Desktop/majestic-pi/' + dim.get() + '/glasses.mp4\r\n')
w.write('/home/pi/Desktop/majestic-pi/' + dim.get() + '/mimics/' + cinema.get() + '/1.mp4\r\n')
w.write(dcp.get() + 'trailer.mp4' + '\r\n')
w.write('/home/pi/Desktop/majestic-pi/' + dim.get() + '/mimics/' + cinema.get() + '/2.mp4\r\n')
w.write(advertselector + '\r\n')
w.write('/home/pi/Desktop/majestic-pi/' + dim.get() + '/mimics/' + cinema.get() + '/3.mp4\r\n')
w.write('/home/pi/Desktop/majestic-pi/' + dim.get() + '/mimics/' + cinema.get() + '/soundintro.mp4\r\n')
w.write(dcp.get() + 'filmintro.mp4' + '\r\n')
w.write(dcp.get() + 'film.mp4' + '\r\n')
w.write('/home/pi/Desktop/majestic-pi/' + dim.get() + '/mimics/' + cinema.get() + '/4.mp4\r\n')
w.close()
os.system("python prepare.py & sudo python play.py")
dcp = StringVar(master)
dcp.set("Choose")
drivelabel = Label(master, text="Select DCP:")
drive = OptionMenu(master, dcp, "C:/", "F:/", "G:/", "D:/")
drivelabel.grid(row=1)
drive.grid(row=1, column=1)
advertlabel = Label(master, text="Select Advert Package:")
advertbutton = Button(master, text="Browse", command=adverts)
advertlabel.grid(row=2)
advertbutton.grid(row=2, column=1)
cinema = StringVar(master)
cinema.set("Choose")
cinemalabel = Label(master, text="Choose Cinema Mimic:")
pick = OptionMenu(master, cinema, "Odeon", "Vue", "Cineworld")
cinemalabel.grid(row=4)
pick.grid(row=4, column=1)
dim = StringVar(master)
dim.set("Choose")
drivelabel = Label(master, text="Enable 3D:")
drive = OptionMenu(master, dim, "3D", "2D")
drivelabel.grid(row=5)
drive.grid(row=5, column=1)
img = Image.open("go.png")
go = ImageTk.PhotoImage(img)
label = Button(master, image=go, command=hdmi)
label.grid()
mainloop()
|
from __future__ import print_function
import sys
import os
import csv
def parse_homeowners(csv_file):
with open(csv_file, 'r') as f:
reader = csv.DictReader(f)
homeowners = [x for x in reader]
return homeowners
if __name__ == "__main__":
parse_homeowners(sys.argv[1])
|
#!/usr/bin/env python3
import re
def calc_ore_count(reaction_graph, product, amount, stash):
count, dependencies = reaction_graph.get(product)
reserve = stash.get(product, 0)
while amount > 0 and reserve > 0:
reserve -= 1
amount -= 1
stash[product] = reserve
ore_count = 0
while amount > 0:
amount -= count
for n, chem in dependencies:
if chem == "ORE":
ore_count += n
continue
ore_count += calc_ore_count(reaction_graph, chem, n, stash)
stash[product] += abs(amount)
return ore_count
def construct_graph(reactions):
reaction_graph = {}
for r in reactions:
in_chems, out_amount, out_chem = r[:-1], r[-1][0], r[-1][1]
reaction_graph[out_chem] = (out_amount, in_chems)
return reaction_graph
def load_input():
for row in [re.findall("\d+ [A-Z]+", x) for x in open("input")]:
yield [(int(a), b) for a, b in (r.split(" ") for r in row)]
graph = construct_graph(load_input())
print(calc_ore_count(graph, "FUEL", 1, {}))
|
from adapt.intent import IntentBuilder
from mycroft.skills.core import MycroftSkill, intent_handler
from mycroft.util.log import LOG
from random import randint
import re
__author__ = "HellCatVN"
class RandomSkill(MycroftSkill):
def __init__(self):
super(RandomSkill, self).__init__(name="RandomSkill")
@intent_handler(IntentBuilder("").require("RandomKeyword"))
def handle_random_intent(self, message):
utterance=message.data.get('utterance')
keyword_search = re.search('random ?([0-9]*) from (?P<From>[0-9]*) to (?P<To>[0-9]*)', utterance, re.IGNORECASE)
random_array = []
if(keyword_search.group(1) != ''):
while (len(random_array)< int(keyword_search.group(1))):
random_array.append(randint(int(keyword_search.group(2)),int(keyword_search.group(3))))
print(random_array)
random_array = list(set(random_array))
print(random_array)
random_string = ''
self.speak_dialog("multi.random.res", data={"time": keyword_search.group(1)})
for i in random_array:
self.speak(str(i))
else:
random_number=randint(int(keyword_search.group(2)),int(keyword_search.group(3)))
self.speak_dialog("single.random.res", data={"number": random_number})
def create_skill():
return RandomSkill()
|
class HostNetworkAddress(object):
def read_get(self, name, idx_name, unity_client):
return unity_client.get_host_network_address(idx_name)
class HostNetworkAddressColumn(object):
def get_idx(self, name, idx, unity_client):
return unity_client.get_hosts()
|
from typing import List, Mapping, Optional
import torch
from ._translation_base import _TranslationBase, NullableTensor
class Field(_TranslationBase):
"""
A field is not actually a translator, this is experimental
and will later be refactored to inherit from another interface.
"""
def batch_tensor_by_key(self,
tensors_by_keys: Mapping[str, List[NullableTensor]]
) -> Mapping[str, torch.Tensor]:
"""
A normal translator accept a batch of tensors,
which is an iterable collection containing the instances from Translator.to_tensor interface.
A field will handle the tensor list by some predefined key,
but will accept all the possible tensors batch dict by the keys.
:param tensors_by_keys:
:return:
"""
raise NotImplementedError
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from local_utils import detect_lp
from os.path import splitext,basename
from keras.models import model_from_json
from keras.preprocessing.image import load_img, img_to_array
from keras.applications.mobilenet_v2 import preprocess_input
from sklearn.preprocessing import LabelEncoder
import glob
def load_model(path):
try:
path = splitext(path)[0]
with open(path +'.json', 'r') as json_file:
model_json = json_file.read()
model = model_from_json(model_json, custom_objects={})
model.load_weights(path +'.h5')
print('Loading model successfull')
return model
except Exception as e:
print(e)
wpod_net_path = "wpod-net.json"
wpod_net = load_model(wpod_net_path)
mobileNet_json_path = 'MobileNets_character_recognition.json'
mobileNet_weight_path = 'License_character_recognition_weight.h5'
mobileNet_classes_path = 'license_character_classes.npy'
def load_mobileNet_model(json_path,weight_path,classes_path):
json_file = open(json_path, 'r')
loaded_model_json = json_file.read()
r_model = model_from_json(loaded_model_json)
r_model.load_weights(weight_path)
print("[INFO] Model loaded successfully...")
labels = LabelEncoder()
labels.classes_ = np.load(classes_path)
print("[INFO] Labels loaded successfully...")
return r_model,labels
recognition_model,class_labels = load_mobileNet_model(mobileNet_json_path,mobileNet_weight_path,mobileNet_classes_path)
def predict_from_model(image,model,labels):
image = cv2.resize(image,(80,80))
image = np.stack((image,)*3, axis=-1)
prediction = labels.inverse_transform([np.argmax(model.predict(image[np.newaxis,:]))])
return prediction
Dmin = 256
Dmax = 608
video_path = 'bike_number_plate_video.mp4'
try:
vid = cv2.VideoCapture(int(video_path))
except:
vid = cv2.VideoCapture(video_path)
while True:
return_value, frame = vid.read()
if return_value:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
vehicle = frame / 255
ratio = float(max(vehicle.shape[:2])) / min(vehicle.shape[:2])
side = int(ratio * Dmin)
bound_dim = min(side, Dmax)
_,LpImg,_,cor = detect_lp(wpod_net, vehicle, bound_dim,lp_threshold=0.5)
plate_image = cv2.convertScaleAbs(LpImg[0],alpha=(255.0))
# conver to grayscale
gray = cv2.cvtColor(plate_image,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(7,7),0)
#Applied inversed thresh_binary where the pixel value less than threshold will be converted to 255 and vice versa
binary = cv2.threshold(blur,180,255,cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
## Applied dilation
kernel3 = cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
thre_mor = cv2.morphologyEx(binary, cv2.MORPH_DILATE, kernel3)
cont, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
sorted_contours = sorted(cont, key=lambda ctr: cv2.boundingRect(ctr)[0])
# creat a copy version "test_roi" of plat_image to draw bounding box
test_roi = plate_image.copy()
# Initialize a list which will be used to append charater image
crop_characters = []
# define standard width and height of character
digit_w, digit_h = 30, 60
for c in sorted_contours:
(x,y,w,h) = cv2.boundingRect(c)
ratio = h/w
if 1<=ratio<=3.5:
if h/plate_image.shape[0]>=0.5:
cv2.rectangle(test_roi,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow('kuch',test_roi)
cv2.waitKey(0)
curr_num = thre_mor[y-5:y+h+5,x-5:x+w+5]
curr_num = cv2.resize(curr_num, dsize=(digit_w, digit_h))
_, curr_num = cv2.threshold(curr_num, 220, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
crop_characters.append(curr_num)
print("Detect {} letters...".format(len(crop_characters)))
final_string = ''
for i, character in enumerate(crop_characters):
title = np.array2string(predict_from_model(character,recognition_model,class_labels))
final_string+=title.strip("'[]")
print(final_string)
|
"""
Exposes regular shell commands as services.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/shell_command/
"""
import logging
import subprocess
import shlex
import voluptuous as vol
from homeassistant.helpers import template
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
DOMAIN = 'shell_command'
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
cv.slug: cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the shell_command component."""
conf = config.get(DOMAIN, {})
cache = {}
def service_handler(call):
"""Execute a shell command service."""
cmd = conf[call.service]
if cmd in cache:
prog, args, args_compiled = cache[cmd]
elif ' ' not in cmd:
prog = cmd
args = None
args_compiled = None
cache[cmd] = prog, args, args_compiled
else:
prog, args = cmd.split(' ', 1)
args_compiled = template.Template(args, hass)
cache[cmd] = prog, args, args_compiled
if args_compiled:
try:
rendered_args = args_compiled.render(call.data)
except TemplateError as ex:
_LOGGER.exception("Error rendering command template: %s", ex)
return
else:
rendered_args = None
if rendered_args == args:
# No template used. default behavior
shell = True
else:
# Template used. Break into list and use shell=False for security
cmd = [prog] + shlex.split(rendered_args)
shell = False
try:
subprocess.call(cmd, shell=shell,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
except subprocess.SubprocessError:
_LOGGER.exception("Error running command: %s", cmd)
for name in conf.keys():
hass.services.register(DOMAIN, name, service_handler)
return True
|
from hypernets.conf import configure, Configurable, Bool, Float, Int, Enum, List
from hypernets.tabular.sklearn_ex import DatetimeEncoder
@configure()
class HyperGBMCfg(Configurable):
# numeric
numeric_pipeline_mode = \
Enum(['simple', 'complex'], default_value='complex',
config=True,
help='Feature scaling mode, simple (standard only) or '
'complex (search in standard, logstandard, minmax, maxabs and robust).'
)
# category
category_pipeline_enabled = \
Bool(True,
config=True,
help='detect and encode category feature from training data or not.'
)
category_pipeline_mode = \
Enum(['simple', 'complex'], default_value='simple',
config=True,
help='Feature encoding mode, simple (SafeOrdinalEncoder) or '
'complex (search in SafeOrdinalEncoder and SafeOneHot+Optional(SVD)).'
)
category_pipeline_auto_detect = \
Bool(False,
config=True,
help='detect category feature from numeric and datetime columns or not.'
)
category_pipeline_auto_detect_exponent = \
Float(0.5,
config=True,
help=''
)
# datetime
datetime_pipeline_enabled = \
Bool(False,
config=True,
help='detect and encode datetime feature from training data or not.'
)
datetime_pipeline_encoder_include = \
List(DatetimeEncoder.default_include, allow_none=True, config=True,
help='include items when encoding datetime feature.')
datetime_pipeline_encoder_exclude = \
List(allow_none=True,
config=True,
help='exclude items when encoding datetime feature.')
# text
text_pipeline_enabled = \
Bool(False,
config=True,
help='detect and encode text feature from training data or not.'
)
text_pipeline_word_count_threshold = \
Int(3,
config=True,
help='')
# estimators
estimator_lightgbm_enabled = \
Bool(True,
config=True,
help='enable lightgbm or not.'
)
estimator_xgboost_enabled = \
Bool(True,
config=True,
help='enable xgboost or not.'
)
estimator_catboost_enabled = \
Bool(True,
config=True,
help='enable catboost or not.'
)
estimator_histgb_enabled = \
Bool(False,
config=True,
help='enable HistGradientBoosting or not.'
)
straightforward_excluded = \
List(['TruncatedSVD', 'OneHot'],
allow_none=True,
config=True,
help='no-straightforward transformer name list.')
|
import os
from django.apps import apps
from django.conf import settings
from .utils import get_tailwind_src_path
class ValidationError(Exception):
pass
class Validations:
def acceptable_label(self, label):
if label not in ["init", "install", "npm", "start", "build", "deploy"]:
raise ValidationError(f"Subcommand {label} doesn't exist")
def is_installed(self, app_name):
if not apps.is_installed(app_name):
raise ValidationError(f"{app_name} is not in INSTALLED_APPS")
def is_tailwind_app(self, app_name):
if not os.path.isfile(
os.path.join(get_tailwind_src_path(app_name), "tailwind.config.js")
):
raise ValidationError(f"'{app_name}' isn't a Tailwind app")
def has_settings(self):
if not hasattr(settings, "TAILWIND_APP_NAME"):
raise ValidationError("TAILWIND_APP_NAME isn't set in settings.py")
|
"""
Tool for visualizing OVAL rules from
ARF XML generated with SCAP scanners.
"""
__version__ = '1.3.2'
|
import os
shuffle_f = open('/data/Datasets/DiOR/DeepFashionDX/deepfashion_test_up_no_shuffle.txt','r')
lines = shuffle_f.readlines()
std_txt = open('/data/Datasets/DiOR/DeepFashionDX_noshuffle/standard_test_anns.txt', 'w')
std_txt.write('pose\n')
std_txt.write('v0200c120000bs0leo700hftjqr66jg0-frmaes_0132.jpg\n') # 随便给了个姿势
std_txt.write('attr\n')
for i, line in enumerate(lines):
source = line.rstrip().strip().split()[0].replace('/','-') # for deepfashiondx
# source = line.rstrip().strip().split()[0] # for dance50k
# source = source.split('/')[2] + '-' + source.split('/')[3]
std_txt.write(str(i) + ', ' + source + '\n')
target = line.rstrip().strip().split()[1].replace('/','-')
# target = line.rstrip().strip().split()[1]
# target = target.split('/')[2] + '-' + target.split('/')[3]
std_txt.write(str(i) + ', ' + target + '\n')
|
from sklearn.model_selection import KFold
import tensorflow
import numpy
import os
import sys
# --- Machine Learning Hyper Parameters [START] --- #
BATCH_SIZE = 1000
LEARNING_RATE = 0.001
EPOCHS = 100
CLASSES = 2 # Buggy vs Non-Buggy
LAYER_NEURONS = 128
FEATURE_NUM = 130 # 34 + (16 * 6)
DROPOUT_RATE = 0.5
# --- Machine Learning Hyper Parameters [END] --- #
# --- OTHER [START] --- #
# 278 buggy methods in dataset (excluding Math-66)
# --- OTHER [END] --- #
# Process specific columns of the feature dataset
def processNumFeats(data):
result = []
for line in data:
line = line.strip()
line_data = line.split(",")[1:] # Skip the first column (it contains the method's signature)
row_data = []
for item in line_data:
if(item == 10000000):
item = 1000 # Reduce highest value improve results
row_data.append(float(item))
result.append(row_data)
return result
# Process specific rows of the feature dataset
def readNumFeats(dir, project):
data = {}
for f in os.listdir(dir):
if f.endswith(".numFeats") and not "Math-66" in f and (project is "*" or project in f):
line_data = open(os.path.join(dir, f),"r").readlines()
processed_data = processNumFeats(line_data)
id = f.split(".")[0]
data[id] = []
data[id].extend(numpy.array(processed_data))
return data # Data is as values data[project-id][index] = <various features> e.g. data["Chart-1"][0] = 0,0,0,1,...,0.8984,0.1351
# Process specific columns of the class dataset (should only ever be 2 columns in data and 1 column retrieved)
def processClassFeats(data):
result_data = []
result_sig = []
for line in data:
line = line.strip().split(",")
methodSig = line[0]
line_data = line[1:]
row_data = []
for item in line_data:
row_data.append(int(item)) # 0 = non-buggy method, 1 = buggy method
result_data.append(row_data)
result_sig.append(methodSig) # methodSig is used elsewhere to associate new susipicious values to methods
return (result_data, result_sig)
def readClassFeats(dir, project):
data = {}
data_sigs = {}
for f in os.listdir(dir):
if f.endswith(".classFeats") and not "Math-66" in f and (project is "*" or project in f):
(processed_data, sigs) = processClassFeats(open(os.path.join(dir, f),"r").readlines()[1:])
id = f.split(".")[0]
data[id] = []
data[id].extend(numpy.array(processed_data))
data_sigs[id] = []
data_sigs[id].extend(sigs)
return (data, data_sigs)
def makeNeuralNetwork():
# copy / paste snippet below
# activation='relu', kernel_regularizer='l1_l2', activity_regularizer='l1_l2', bias_regularizer='l1_l2'
model = tensorflow.keras.models.Sequential()
# --- HIDDEN LAYER 1 --- #
model.add(tensorflow.keras.layers.Dense(LAYER_NEURONS, kernel_regularizer='l2', input_shape=(FEATURE_NUM,)))
model.add(tensorflow.keras.layers.LeakyReLU())
model.add(tensorflow.keras.layers.Dropout(DROPOUT_RATE))
# --- HIDDEN LAYER 2 --- #
model.add(tensorflow.keras.layers.Dense(LAYER_NEURONS, kernel_regularizer='l2'))
model.add(tensorflow.keras.layers.LeakyReLU())
model.add(tensorflow.keras.layers.Dropout(DROPOUT_RATE))
# --- OUTPUT LAYER --- #
model.add(tensorflow.keras.layers.Dense(CLASSES, kernel_regularizer='l2', activation='sigmoid'))
model.summary() # Output summary of neural network
model.compile(loss=tensorflow.keras.losses.BinaryCrossentropy(from_logits=True), optimizer=tensorflow.keras.optimizers.Adagrad(),
metrics=[
tensorflow.keras.losses.BinaryCrossentropy(from_logits=True, name='binary_crossentropy'),
'accuracy'
])
return model
def model():
# --- DATA PROCESSING --- #
print("Executing data preprocessing")
print("Importing raw data")
# projects = ["*"] # For inter-project prediction
projects = ["Chart", "Time", "Lang", "Math"] # For intra-project prediction
print(tensorflow.config.list_physical_devices('GPU'))
for project in projects:
# Import raw data #
raw_output_train, sigs = readClassFeats(sys.argv[1], project)
raw_input_train = readNumFeats(sys.argv[1], project)
# Iterate through each project (Chart-1 / Time-10 / Lang-20 / Math-30 / etc)
for project_prediction_id in raw_input_train.keys():
print("Processing", project_prediction_id)
# --- Create Neural Network --- #
network_model = makeNeuralNetwork()
# --- Create Training / Test Data --- #
training_data_input = []
training_data_output = []
# Create training data from input by EXCLUDING project we want to predict (this becomes test_data)
for other_project in raw_input_train.keys():
if not project_prediction_id is other_project:
training_data_input.extend(raw_input_train[other_project])
training_data_output.extend(raw_output_train[other_project])
training_data_input = tensorflow.keras.utils.normalize(numpy.array(training_data_input)) # Normalize data
training_data_output_raw = numpy.array(training_data_output)
# Test data is the project we are trying to predict (e.g. Chart-1)
test_data_input = tensorflow.keras.utils.normalize(numpy.array(raw_input_train[project_prediction_id]))
test_data_output = numpy.array(raw_output_train[project_prediction_id])
# Since class distribution is not reasonably symmetric, manually set weights of classes to be the same
class_weights = {}
class_weights[0] = 1
# class_weights[1] = setClassWeight(training_data_output)
training_data_output = tensorflow.keras.utils.to_categorical(training_data_output_raw, CLASSES)
print("--- Training Results ---")
folds = 10
if(folds > 1):
# Create validation subset from training data
validation_data_splitter = KFold(n_splits=folds)
# Train model using training data, validating on validation data,
# using callbacks to stop training early as needed with the aforementioned class weights
for index, (indicies_train, indicies_validate) in enumerate(validation_data_splitter.split(training_data_input, y=training_data_output)):
tempWeights = []
for item in indicies_validate:
tempWeights.append(training_data_output_raw[item])
class_weights[1] = setClassWeight(tempWeights)
network_model.fit(class_weight=class_weights,
x=training_data_input[indicies_train], y=training_data_output[indicies_train],
batch_size=BATCH_SIZE, epochs=EPOCHS, verbose=1,
validation_data=(training_data_input[indicies_validate], training_data_output[indicies_validate]),
callbacks=getCallback(),
)
else:
class_weights[1] = setClassWeight(training_data_output_raw)
network_model.fit(class_weight=class_weights,
x=training_data_input,
y=training_data_output,
batch_size=BATCH_SIZE,
epochs=EPOCHS
)
print("--- Prediction Results ---")
#output_file = open("sbfl-feats-only_" + project_prediction_id + ".susValues", "w")
#output_file = open("profl-feats-only_" + project_prediction_id + ".susValues", "w")
output_file = open(project_prediction_id + ".susValues", "w")
for k, (i) in enumerate(network_model.predict(test_data_input, verbose=1)):
prediction = numpy.argmax(i)
print('\t-', i, "->" , prediction, prediction == test_data_output[k], sigs[project_prediction_id][k])
# Save new suspicious values to local file
output_file.write(",".join([sigs[project_prediction_id][k], str(i[prediction])]))
output_file.write("\n")
print("\n", "----------------", "\n")
output_file.close()
# Equalizes weights between the positive (1) classes and negative (0) classes
def setClassWeight(data):
other = 0
positive = 0
for row in data:
if(row == 1):
positive += 1
else:
other += 1
weight = other / max(1, positive)
return weight * 4
# These terminate training during a given K-fold
# if validation accuracy or validation loss
# fail to improve within a given epoch timespan
def getCallback():
return [
tensorflow.keras.callbacks.TerminateOnNaN(),
#tensorflow.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=EPOCHS / 4, restore_best_weights=True),
#tensorflow.keras.callbacks.EarlyStopping(monitor='val_binary_crossentropy', patience=EPOCHS / 20, restore_best_weights=True, min_delta=0.001)
]
model()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#=======================================================================
#
# sha256.py
# ---------
# Simple, pure Python model of the SHA-256 hash function. Used as a
# reference for the HW implementation. The code follows the structure
# of the HW implementation as much as possible.
#
#
# Author: Joachim Strömbergson
# Copyright (c) 2013 Secworks Sweden AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#=======================================================================
#-------------------------------------------------------------------
# Python module imports.
#-------------------------------------------------------------------
import sys
#-------------------------------------------------------------------
# Constants.
#-------------------------------------------------------------------
VERBOSE = True
HUGE = False
#-------------------------------------------------------------------
# SHA256()
#-------------------------------------------------------------------
class SHA256():
def __init__(self, mode="sha256", verbose = 0):
if mode not in ["sha224", "sha256"]:
print("Error: Given %s is not a supported mode." % mode)
return 0
self.mode = mode
self.verbose = verbose
self.H = [0] * 8
self.t1 = 0
self.t2 = 0
self.a = 0
self.b = 0
self.c = 0
self.d = 0
self.e = 0
self.f = 0
self.g = 0
self.h = 0
self.w = 0
self.W = [0] * 16
self.k = 0
self.K = [0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2]
def init(self):
if self.mode == "sha256":
self.H = [0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19]
else:
self.H = [0xc1059ed8, 0x367cd507, 0x3070dd17, 0xf70e5939,
0xffc00b31, 0x68581511, 0x64f98fa7, 0xbefa4fa4]
def next(self, block):
self._W_schedule(block)
self._copy_digest()
if self.verbose:
print("State after init:")
self._print_state(0)
for i in range(64):
self._sha256_round(i)
if self.verbose:
self._print_state(i)
self._update_digest()
def get_digest(self):
return self.H
def _copy_digest(self):
self.a = self.H[0]
self.b = self.H[1]
self.c = self.H[2]
self.d = self.H[3]
self.e = self.H[4]
self.f = self.H[5]
self.g = self.H[6]
self.h = self.H[7]
def _update_digest(self):
self.H[0] = (self.H[0] + self.a) & 0xffffffff
self.H[1] = (self.H[1] + self.b) & 0xffffffff
self.H[2] = (self.H[2] + self.c) & 0xffffffff
self.H[3] = (self.H[3] + self.d) & 0xffffffff
self.H[4] = (self.H[4] + self.e) & 0xffffffff
self.H[5] = (self.H[5] + self.f) & 0xffffffff
self.H[6] = (self.H[6] + self.g) & 0xffffffff
self.H[7] = (self.H[7] + self.h) & 0xffffffff
def _print_state(self, round):
print("State at round 0x%02x:" % round)
print("t1 = 0x%08x, t2 = 0x%08x" % (self.t1, self.t2))
print("k = 0x%08x, w = 0x%08x" % (self.k, self.w))
print("a = 0x%08x, b = 0x%08x" % (self.a, self.b))
print("c = 0x%08x, d = 0x%08x" % (self.c, self.d))
print("e = 0x%08x, f = 0x%08x" % (self.e, self.f))
print("g = 0x%08x, h = 0x%08x" % (self.g, self.h))
print("")
def _sha256_round(self, round):
self.k = self.K[round]
self.w = self._next_w(round)
self.t1 = self._T1(self.e, self.f, self.g, self.h, self.k, self.w)
self.t2 = self._T2(self.a, self.b, self.c)
self.h = self.g
self.g = self.f
self.f = self.e
self.e = (self.d + self.t1) & 0xffffffff
self.d = self.c
self.c = self.b
self.b = self.a
self.a = (self.t1 + self.t2) & 0xffffffff
def _next_w(self, round):
if (round < 16):
return self.W[round]
else:
tmp_w = (self._delta1(self.W[14]) +
self.W[9] +
self._delta0(self.W[1]) +
self.W[0]) & 0xffffffff
for i in range(15):
self.W[i] = self.W[(i+1)]
self.W[15] = tmp_w
return tmp_w
def _W_schedule(self, block):
for i in range(16):
self.W[i] = block[i]
def _Ch(self, x, y, z):
return (x & y) ^ (~x & z)
def _Maj(self, x, y, z):
return (x & y) ^ (x & z) ^ (y & z)
def _sigma0(self, x):
return (self._rotr32(x, 2) ^ self._rotr32(x, 13) ^ self._rotr32(x, 22))
def _sigma1(self, x):
return (self._rotr32(x, 6) ^ self._rotr32(x, 11) ^ self._rotr32(x, 25))
def _delta0(self, x):
return (self._rotr32(x, 7) ^ self._rotr32(x, 18) ^ self._shr32(x, 3))
def _delta1(self, x):
return (self._rotr32(x, 17) ^ self._rotr32(x, 19) ^ self._shr32(x, 10))
def _T1(self, e, f, g, h, k, w):
return (h + self._sigma1(e) + self._Ch(e, f, g) + k + w) & 0xffffffff
def _T2(self, a, b, c):
return (self._sigma0(a) + self._Maj(a, b, c)) & 0xffffffff
def _rotr32(self, n, r):
return ((n >> r) | (n << (32 - r))) & 0xffffffff
def _shr32(self, n, r):
return (n >> r)
#-------------------------------------------------------------------
# print_digest()
#
# Print the given digest.
#-------------------------------------------------------------------
def print_digest(digest, length = 8):
print("0x%08x, 0x%08x, 0x%08x, 0x%08x" %\
(digest[0], digest[1], digest[2], digest[3]))
if length == 8:
print("0x%08x, 0x%08x, 0x%08x, 0x%08x" %\
(digest[4], digest[5], digest[6], digest[7]))
else:
print("0x%08x, 0x%08x, 0x%08x" %\
(digest[4], digest[5], digest[6]))
print("")
#-------------------------------------------------------------------
# compare_digests()
#
# Check that the given digest matches the expected digest.
#-------------------------------------------------------------------
def compare_digests(digest, expected, length = 8):
correct = True
for i in range(length):
if digest[i] != expected[i]:
correct = False
if (not correct):
print("Error:")
print("Got:")
print_digest(digest, length)
print("Expected:")
print_digest(expected, length)
else:
print("Test case ok.")
#-------------------------------------------------------------------
# sha224_tests()
#
# Tests for the SHA224 mode.
#-------------------------------------------------------------------
def sha224_tests():
print("Running test cases for SHA224:")
my_sha224 = SHA256(mode="sha224", verbose=0)
# TC1: NIST testcase with message "abc"
print("TC1: Single block message test specified by NIST.")
TC1_block = [0x61626380, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000018]
TC1_expected = [0x23097D22, 0x3405D822, 0x8642A477, 0xBDA255B3,
0x2AADBCE4, 0xBDA0B3F7, 0xE36C9DA7]
my_sha224.init()
my_sha224.next(TC1_block)
my_digest = my_sha224.get_digest()
compare_digests(my_digest, TC1_expected, 7)
print("")
# TC2: NIST testcase with double block message."
print("TC2: Double block message test specified by NIST.")
TC2_1_block = [0x61626364, 0x62636465, 0x63646566, 0x64656667,
0x65666768, 0x66676869, 0x6768696A, 0x68696A6B,
0x696A6B6C, 0x6A6B6C6D, 0x6B6C6D6E, 0x6C6D6E6F,
0x6D6E6F70, 0x6E6F7071, 0x80000000, 0x00000000]
TC2_2_block = [0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x000001C0]
TC2_1_expected = [0x8250e65d, 0xbcf62f84, 0x66659c33, 0x33e5e91a,
0x10c8b7b0, 0x95392769, 0x1f1419c3]
TC2_2_expected = [0x75388b16, 0x512776cc, 0x5dba5da1, 0xfd890150,
0xb0c6455c, 0xb4f58b19, 0x52522525]
my_sha224.init()
my_sha224.next(TC2_1_block)
my_digest = my_sha224.get_digest()
compare_digests(my_digest, TC2_1_expected, 7)
my_sha224.next(TC2_2_block)
my_digest = my_sha224.get_digest()
compare_digests(my_digest, TC2_2_expected, 7)
print("")
if (HUGE):
# TC3: Huge message with n blocks
n = 1000
print("TC3: Huge message with %d blocks test case." % n)
TC3_block = [0xaa55aa55, 0xdeadbeef, 0x55aa55aa, 0xf00ff00f,
0xaa55aa55, 0xdeadbeef, 0x55aa55aa, 0xf00ff00f,
0xaa55aa55, 0xdeadbeef, 0x55aa55aa, 0xf00ff00f,
0xaa55aa55, 0xdeadbeef, 0x55aa55aa, 0xf00ff00f]
TC3_expected = [0x7638f3bc, 0x500dd1a6, 0x586dd4d0, 0x1a1551af,
0xd821d235, 0x2f919e28, 0xd5842fab, 0x03a40f2a]
my_sha224.init()
for i in range(n):
my_sha224.next(TC3_block)
my_digest = my_sha224.get_digest()
if (VERBOSE):
print("Digest for block %d:" % i)
print_digest(my_digest, 7)
compare_digests(my_digest, TC3_expected)
print("")
#-------------------------------------------------------------------
# sha256_tests()
#
# Tests for the SHA256 mode.
#-------------------------------------------------------------------
def sha256_tests():
print("Running test cases for SHA256:")
my_sha256 = SHA256(verbose=0)
# TC1: NIST testcase with message "abc"
print("TC1: Single block message test specified by NIST.")
TC1_block = [0x61626380, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000018]
TC1_expected = [0xBA7816BF, 0x8F01CFEA, 0x414140DE, 0x5DAE2223,
0xB00361A3, 0x96177A9C, 0xB410FF61, 0xF20015AD]
my_sha256.init()
my_sha256.next(TC1_block)
my_digest = my_sha256.get_digest()
compare_digests(my_digest, TC1_expected)
print("")
# TC2: NIST testcase with double block message."
print("TC2: Double block message test specified by NIST.")
TC2_1_block = [0x61626364, 0x62636465, 0x63646566, 0x64656667,
0x65666768, 0x66676869, 0x6768696A, 0x68696A6B,
0x696A6B6C, 0x6A6B6C6D, 0x6B6C6D6E, 0x6C6D6E6F,
0x6D6E6F70, 0x6E6F7071, 0x80000000, 0x00000000]
TC2_2_block = [0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x000001C0]
TC2_1_expected = [0x85E655D6, 0x417A1795, 0x3363376A, 0x624CDE5C,
0x76E09589, 0xCAC5F811, 0xCC4B32C1, 0xF20E533A]
TC2_2_expected = [0x248D6A61, 0xD20638B8, 0xE5C02693, 0x0C3E6039,
0xA33CE459, 0x64FF2167, 0xF6ECEDD4, 0x19DB06C1]
my_sha256.init()
my_sha256.next(TC2_1_block)
my_digest = my_sha256.get_digest()
compare_digests(my_digest, TC2_1_expected)
my_sha256.next(TC2_2_block)
my_digest = my_sha256.get_digest()
compare_digests(my_digest, TC2_2_expected)
print("")
if (HUGE):
# TC3: Huge message with n blocks
n = 1000
print("TC3: Huge message with %d blocks test case." % n)
TC3_block = [0xaa55aa55, 0xdeadbeef, 0x55aa55aa, 0xf00ff00f,
0xaa55aa55, 0xdeadbeef, 0x55aa55aa, 0xf00ff00f,
0xaa55aa55, 0xdeadbeef, 0x55aa55aa, 0xf00ff00f,
0xaa55aa55, 0xdeadbeef, 0x55aa55aa, 0xf00ff00f]
TC3_expected = [0x7638f3bc, 0x500dd1a6, 0x586dd4d0, 0x1a1551af,
0xd821d235, 0x2f919e28, 0xd5842fab, 0x03a40f2a]
my_sha256.init()
for i in range(n):
my_sha256.next(TC3_block)
my_digest = my_sha256.get_digest()
if (VERBOSE):
print("Digest for block %d:" % i)
print_digest(my_digest)
compare_digests(my_digest, TC3_expected)
print("")
#-------------------------------------------------------------------
# sha256_issue_test()
# Testcase to test and drive debugging of issue with messages
# that span more than eight blocks.
#-------------------------------------------------------------------
def sha256_issue_test():
block0 = [0x6b900001, 0x496e2074, 0x68652061, 0x72656120,
0x6f662049, 0x6f542028, 0x496e7465, 0x726e6574,
0x206f6620, 0x5468696e, 0x6773292c, 0x206d6f72,
0x6520616e, 0x64206d6f, 0x7265626f, 0x6f6d2c20]
block1 = [0x69742068, 0x61732062, 0x65656e20, 0x6120756e,
0x69766572, 0x73616c20, 0x636f6e73, 0x656e7375,
0x73207468, 0x61742064, 0x61746120, 0x69732074,
0x69732061, 0x206e6577, 0x20746563, 0x686e6f6c]
block2 = [0x6f677920, 0x74686174, 0x20696e74, 0x65677261,
0x74657320, 0x64656365, 0x6e747261, 0x6c697a61,
0x74696f6e, 0x2c496e20, 0x74686520, 0x61726561,
0x206f6620, 0x496f5420, 0x28496e74, 0x65726e65]
block3 = [0x74206f66, 0x20546869, 0x6e677329, 0x2c206d6f,
0x72652061, 0x6e64206d, 0x6f726562, 0x6f6f6d2c,
0x20697420, 0x68617320, 0x6265656e, 0x20612075,
0x6e697665, 0x7273616c, 0x20636f6e, 0x73656e73]
block4 = [0x75732074, 0x68617420, 0x64617461, 0x20697320,
0x74697320, 0x61206e65, 0x77207465, 0x63686e6f,
0x6c6f6779, 0x20746861, 0x7420696e, 0x74656772,
0x61746573, 0x20646563, 0x656e7472, 0x616c697a]
block5 = [0x6174696f, 0x6e2c496e, 0x20746865, 0x20617265,
0x61206f66, 0x20496f54, 0x2028496e, 0x7465726e,
0x6574206f, 0x66205468, 0x696e6773, 0x292c206d,
0x6f726520, 0x616e6420, 0x6d6f7265, 0x626f6f6d]
block6 = [0x2c206974, 0x20686173, 0x20626565, 0x6e206120,
0x756e6976, 0x65727361, 0x6c20636f, 0x6e73656e,
0x73757320, 0x74686174, 0x20646174, 0x61206973,
0x20746973, 0x2061206e, 0x65772074, 0x6563686e]
block7 = [0x6f6c6f67, 0x79207468, 0x61742069, 0x6e746567,
0x72617465, 0x73206465, 0x63656e74, 0x72616c69,
0x7a617469, 0x6f6e2c49, 0x6e207468, 0x65206172,
0x6561206f, 0x6620496f, 0x54202849, 0x6e746572]
# Padding calculation:
# Length: 8 * 512 + 7 * 32 + 8 = 0x10e8
block8 = [0x6e657420, 0x6f662054, 0x68696e67, 0x73292c20,
0x6d6f7265, 0x20616e64, 0x206d6f72, 0x65800000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x000010e8]
expected = [0x7758a30b, 0xbdfc9cd9, 0x2b284b05, 0xe9be9ca3,
0xd269d3d1, 0x49e7e82a, 0xb4a9ed5e, 0x81fbcf9d]
print("Running test for issue:")
my_sha256 = SHA256(verbose=0)
my_sha256.init()
my_sha256.next(block0)
my_sha256.next(block1)
my_sha256.next(block2)
my_sha256.next(block3)
my_sha256.next(block4)
my_sha256.next(block5)
my_sha256.next(block6)
my_sha256.next(block7)
my_sha256.next(block8)
my_digest = my_sha256.get_digest()
print("Digest for message:")
print_digest(my_digest)
compare_digests(my_digest, expected)
print("")
#-------------------------------------------------------------------
# main()
#
# If executed tests the sha256 class using known test vectors.
#-------------------------------------------------------------------
def main():
print("Testing the SHA-256 Python model.")
print("---------------------------------")
print("")
sha224_tests()
sha256_tests()
sha256_issue_test()
#-------------------------------------------------------------------
# __name__
# Python thingy which allows the file to be run standalone as
# well as parsed from within a Python interpreter.
#-------------------------------------------------------------------
if __name__=="__main__":
# Run the main function.
sys.exit(main())
#=======================================================================
# EOF sha256.py
#=======================================================================
|
class ComplexNumber:
def __init__(self, r=0, i=0):
self.real = r
self.imag = i
def get_data(self):
print(f'{self.real}+{self.imag}j')
c1 = ComplexNumber(2, 3)
c1.holy = 'This attr used to not exist but it does now'
|
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import ApplicationSession
class BackendSession(ApplicationSession):
@inlineCallbacks
def onJoin(self, details):
print("Backend session joined: {}".format(details))
def onhello(msg=None):
print("event received on {}: {}".format(topic, msg))
## SUBSCRIBE to a few topics we are allowed to subscribe to.
##
for topic in [
'com.example.topic1',
'com.foobar.topic1',
'com.foobar.topic2']:
try:
sub = yield self.subscribe(onhello, topic)
print("ok, subscribed to topic {}".format(topic))
except Exception as e:
print("could not subscribe to {}: {}".format(topic, e))
## (try to) SUBSCRIBE to a topic we are not allowed to subscribe to (so this should fail).
##
try:
sub = yield self.subscribe(onhello, 'com.example.topic2')
except Exception as e:
print("subscription failed (this is expected!) {}".format(e))
## REGISTER a procedure for remote calling
##
def add2(x, y):
print("add2() called with {} and {}".format(x, y))
return x + y
try:
reg = yield self.register(add2, 'com.example.add2')
print("procedure add2() registered")
except Exception as e:
print("could not register procedure: {}".format(e))
|
"""Validation"""
import re
from . import Instance
class Validator:
"""Factory for a misc.validation.MultiValidator
that guesses which validator to use
"""
def __new__(cls, *checks):
"""Create a new mis.validation.MultiValidator guessing
which validator to use
`checks` are the arguments passed to the respective validator
which one is determined automatically.
"""
checks = checks + (None,)
validators = []
current = None
stored = []
for check in checks:
if isinstance(check, (MultiValidator, TransformValidator, ConditionValidator)):
validators.append(check)
continue
if check is None:
val = None
elif callable(check):
val = TransformValidator
elif isinstance(check[0], Instance):
val = ConditionValidator
elif isinstance(check[0], str):
val = RegexValidator
else:
val = TransformValidator
if val is not current:
if current is not None:
validators.append(current(*stored))
current = val
stored = []
stored.append(check)
return MultiValidator(*validators)
class MultiValidator:
"""Chain multiple validators"""
def __init__(self, *validators):
"""Create a new MultiValidator
`validators` are single validators to be chained"""
self.validators = validators
def __call__(self, value):
for validator in self.validators:
good, value = validator(value)
if not good:
return False, value
return True, value
class ConditionValidator:
"""validate based on a user-defined condition"""
def __init__(self, *conditions):
"""Create a new ConditionValidator.
`conditions` are iterables of the form
(<condition>, <error message>), where <condition>
is a delayed evaluation using `misc.Instance`
that yields a value to used in boolean context
indicating whether the value passed is valid
e.g. Instance().attr['key'] == 'someval'
<error message> is the error message to return
if the condition resolves to a falsey value
"""
self.conditions = conditions
def __call__(self, value):
for cond, msg in self.conditions:
if not Instance.lookup(cond, value):
return False, msg
return True, value
class TransformValidator:
"""validate based on transformation"""
def __init__(self, *transformations):
"""Create a new TransformValidator
`transformations` are callables taking as single argument
the user input and returning the transformed input.
They may also be tuples of the form (<callable>, <config>)
where <callale> is the callable described above
and <config> is a mapping from exceptions that may occur
during the transformation to error messages or tuples of
multiple such exceptions.
The default maps (ValueError, TypeError) to
'Must be of type <name>' with <name> replaced by
the callables __name__, making it suitable for types.
Note: if a config (even if empty) is supplied,
it overrides the default.
"""
default_config = {(ValueError, TypeError): 'Must be of type {__name__}'}
self.trans = []
self.configs = []
for t in transformations:
if isinstance(t, tuple):
t, new_cnf = t
self.configs.append(new_cnf)
else:
self.configs.append(default_config)
self.trans.append(t)
def __call__(self, value):
for trans, cnf in zip(self.trans, self.configs):
try:
value = trans(value)
except Exception as e:
for exc, msg in cnf.items():
if isinstance(e, exc):
return False, msg.format(__name__=trans.__name__,
value=value,
)
raise
return True, value
class RegexValidator:
"""Factory for TransformValidators validating by regex"""
class Error(Exception):
pass
def __new__(cls, *conditions):
"""Create a new TransformValidator validating
the passed reular expressions
`conditions` are (<regex>, <error>, [<group>]), <group> being
optional, where <regex> is a regular expression in string form
and <error> is the error message to display on failure
of matching. <group> is the regex group to return.
The default (if not given) is 0, returning the whole match.
Note: while the verb "match" is used in this docstring,
the re.search functionality is actually used for the validation
"""
def creator(regex, error, group=0):
def trans(value, _re=re.compile(regex), _group=group):
try:
return _re.search(value).group(group)
except AttributeError:
raise RegexValidator.Error from None
return trans, {RegexValidator.Error: error}
return TransformValidator(*[creator(*c) for c in conditions])
|
import psycopg2 as dbapi2
from flask import current_app
from flask_login import UserMixin
from passlib.apps import custom_app_context as pwd_context
class EventRestaurants():
def __init__(self, eventId, userId):
self.Id = ""
self.eventId = eventId
self.userId = userId
with dbapi2.connect(current_app.config['dsn']) as connection:
cursor = connection.cursor()
query = """
INSERT INTO EVENT_RESTAURANTS (EVENT_ID, USER_ID)
VALUES (%s,%s)"""
cursor.execute(query, [self.eventId, self.userId])
connection.commit
with dbapi2.connect(current_app.config['dsn']) as connection:
cursor = connection.cursor()
statement = """SELECT * FROM EVENT_RESTAURANTS WHERE (EVENT_ID = %s)
AND (USER_ID = %s)"""
cursor.execute(statement,[self.eventId, self.userId])
IdofCurrent = cursor.fetchone()[0]
self.Id = IdofCurrent
def select_comers_all(eventId):
#Select name from user table who comes to that event.
with dbapi2.connect(current_app.config['dsn']) as connection:
cursor = connection.cursor()
statement = """SELECT USERS.FIRSTNAME, USERS.LASTNAME FROM EVENT_RESTAURANTS,USERS
WHERE USERS.ID = EVENT_RESTAURANTS.USER_ID
AND EVENT_RESTAURANTS.EVENT_ID = %s"""
cursor.execute(statement,[eventId])
comers = cursor.fetchall()
return comers
def delete_comers_by_Id(eventId,userId):
with dbapi2.connect(current_app.config['dsn']) as connection:
cursor = connection.cursor()
query = """
DELETE FROM EVENT_RESTAURANTS WHERE EVENT_ID = %s AND
USER_ID = %s"""
cursor.execute(query, [eventId,userId])
connection.commit()
def does_user_come(userId,eventId):
with dbapi2.connect(current_app.config['dsn']) as connection:
cursor = connection.cursor()
statement = """SELECT * FROM EVENT_RESTAURANTS
WHERE USER_ID = %s
AND EVENT_ID = %s """
cursor.execute(statement,[userId,eventId])
comers = cursor.fetchall()
return comers
def delete_unnecessary_rows():
with dbapi2.connect(current_app.config['dsn']) as connection:
cursor = connection.cursor()
query = """
DELETE FROM EVENT_RESTAURANTS WHERE EVENT_ID IS NULL OR
USER_ID IS NULL"""
cursor.execute(query)
connection.commit()
|
import numpy as np
import torch
from .base_model import BaseModel
from . import networks
from .nce import PatchNCELoss
import util.util as util
class CUTModel(BaseModel):
""" This class implements CUT and FastCUT model
The code borrows heavily from the PyTorch implementation of CycleGAN
https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
""" Configures options specific for CUT model
"""
parser.add_argument('--CUT_mode', type=str, default="CUT", choices='(CUT, cut, FastCUT, fastcut)')
parser.add_argument('--lambda_GAN', type=float, default=1.0, help='weight for GAN loss:GAN(G(X))')
parser.add_argument('--lambda_NCE', type=float, default=1.0, help='weight for NCE loss: NCE(G(X), X)')
parser.add_argument('--nce_idt', type=util.str2bool, nargs='?', const=True, default=False, help='use NCE loss for identity mapping: NCE(G(Y), Y))')
parser.add_argument('--nce_layers', type=str, default='0,4,8,12,16', help='compute NCE loss on which layers')
parser.add_argument('--netF', type=str, default='mlp_sample', help='downsample the feature map: sample | reshape | mlp_sample')
parser.add_argument('--netF_nc', type=int, default=256)
parser.add_argument('--nce_T', type=float, default=0.07, help='temperature for NCE loss')
parser.add_argument('--num_patches', type=int, default=256, help='number of patches per layer')
parser.add_argument('--flip_equivariance',
type=util.str2bool, nargs='?', const=True, default=False,
help="Enforce flip-equivariance as additional regularization. It's used by FastCUT, but not CUT")
parser.set_defaults(pool_size=0) # no image pooling
opt, _ = parser.parse_known_args()
# Set default parameters for CUT and FastCUT
if opt.CUT_mode.lower() == "cut":
parser.set_defaults(nce_idt=True, lambda_NCE=1.0)
elif opt.CUT_mode.lower() == "fastcut":
parser.set_defaults(
nce_idt=False, lambda_NCE=10.0, flip_equivariance=True,
n_epochs=150, n_epochs_decay=50
)
else:
raise ValueError(opt.CUT_mode)
return parser
def __init__(self, opt):
BaseModel.__init__(self, opt)
# specify the training losses you want to print out.
# The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['G_GAN', 'D_real', 'D_fake', 'G', 'NCE']
self.visual_names = ['real_A', 'fake_B', 'real_B']
self.nce_layers = [int(i) for i in self.opt.nce_layers.split(',')]
if opt.nce_idt and self.isTrain:
self.loss_names += ['NCE_Y']
self.visual_names += ['idt_B']
if self.isTrain:
self.model_names = ['G', 'F', 'D']
else: # during test time, only load G
self.model_names = ['G']
# define networks (both generator and discriminator)
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.normG, not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, opt.no_antialias_up, self.gpu_ids, opt)
self.netF = networks.define_F(opt.input_nc, opt.netF, opt.normG, not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt)
if self.isTrain:
self.netD = networks.define_D(opt.output_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.normD, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt)
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
self.criterionNCE = []
for nce_layer in self.nce_layers:
self.criterionNCE.append(PatchNCELoss(opt).to(self.device))
self.criterionIdt = torch.nn.L1Loss().to(self.device)
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
def data_dependent_initialize(self):
"""
The feature network netF is defined in terms of the shape of the intermediate, extracted
features of the encoder portion of netG. Because of this, the weights of netF are
initialized at the first feedforward pass with some input images.
Please also see PatchSampleF.create_mlp(), which is called at the first forward() call.
"""
bs_per_gpu = self.real_A.size(0) // len(self.opt.gpu_ids)
self.real_A = self.real_A[:bs_per_gpu]
self.real_B = self.real_B[:bs_per_gpu]
self.forward() # compute fake images: G(A)
if self.opt.isTrain:
self.backward_D() # calculate gradients for D
self.backward_G() # calculate graidents for G
if self.opt.lambda_NCE > 0.0:
self.optimizer_F = torch.optim.Adam(self.netF.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, self.opt.beta2))
self.optimizers.append(self.optimizer_F)
def optimize_parameters(self):
# forward
self.forward() # compute fake images: G(A)
# update D
self.set_requires_grad(self.netD, True) # enable backprop for D
self.optimizer_D.zero_grad() # set D's gradients to zero
self.backward_D() # calculate gradients for D
self.optimizer_D.step() # update D's weights
# update G
self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G
self.optimizer_G.zero_grad() # set G's gradients to zero
if self.opt.netF == 'mlp_sample':
self.optimizer_F.zero_grad()
self.backward_G() # calculate graidents for G
self.optimizer_G.step() # udpate G's weights
if self.opt.netF == 'mlp_sample':
self.optimizer_F.step()
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap domain A and domain B.
"""
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.real = torch.cat((self.real_A, self.real_B), dim=0) if self.opt.nce_idt else self.real_A
if self.opt.flip_equivariance:
self.flipped_for_equivariance = self.opt.isTrain and (np.random.random() < 0.5)
if self.flipped_for_equivariance:
self.real = torch.flip(self.real, [3])
self.fake = self.netG(self.real)
self.fake_B = self.fake[:self.real_A.size(0)]
if self.opt.nce_idt:
self.idt_B = self.fake[self.real_A.size(0):]
def backward_D(self):
if self.opt.lambda_GAN > 0.0:
"""Calculate GAN loss for the discriminator"""
fake = self.fake_B.detach()
# Fake; stop backprop to the generator by detaching fake_B
pred_fake = self.netD(fake)
self.loss_D_fake = self.criterionGAN(pred_fake, False).mean()
# Real
pred_real = self.netD(self.real_B)
loss_D_real_unweighted = self.criterionGAN(pred_real, True)
self.loss_D_real = loss_D_real_unweighted.mean()
# combine loss and calculate gradients
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
self.loss_D.backward()
else:
self.loss_D_real, self.loss_D_fake, self.loss_D = 0.0, 0.0, 0.0
def backward_G(self):
"""Calculate GAN and NCE loss for the generator"""
fake = self.fake_B
# First, G(A) should fake the discriminator
if self.opt.lambda_GAN > 0.0:
pred_fake = self.netD(fake)
self.loss_G_GAN = self.criterionGAN(pred_fake, True).mean() * self.opt.lambda_GAN
else:
self.loss_G_GAN = 0.0
if self.opt.lambda_NCE > 0.0:
self.loss_NCE = self.calculate_NCE_loss(self.real_A, self.fake_B)
else:
self.loss_NCE, self.loss_NCE_bd = 0.0, 0.0
if self.opt.nce_idt and self.opt.lambda_NCE > 0.0:
self.loss_NCE_Y = self.calculate_NCE_loss(self.real_B, self.idt_B)
loss_NCE_both = (self.loss_NCE + self.loss_NCE_Y) * 0.5
else:
loss_NCE_both = self.loss_NCE
self.loss_G = self.loss_G_GAN + loss_NCE_both
self.loss_G.backward()
def calculate_NCE_loss(self, src, tgt):
n_layers = len(self.nce_layers)
feat_q = self.netG(tgt, self.nce_layers, encode_only=True)
if self.opt.flip_equivariance and self.flipped_for_equivariance:
feat_q = [torch.flip(fq, [3]) for fq in feat_q]
feat_k = self.netG(src, self.nce_layers, encode_only=True)
feat_k_pool, sample_ids = self.netF(feat_k, self.opt.num_patches, None)
feat_q_pool, _ = self.netF(feat_q, self.opt.num_patches, sample_ids)
total_nce_loss = 0.0
for f_q, f_k, crit, nce_layer in zip(feat_q_pool, feat_k_pool, self.criterionNCE, self.nce_layers):
loss = crit(f_q, f_k) * self.opt.lambda_NCE
total_nce_loss += loss.mean()
return total_nce_loss / n_layers
|
from django.urls import path
from blog import views
#TEMPLATE TAGGING
app_name = 'blog'
urlpatterns = [
path('<int:year>/', views.archeive_posts, name='archeive_post'),
path('<int:year>/<int:month>/<int:day>/', views.archeive_posts_by_date, name='archeive_date'),
path('tag/<str:tag>/', views.archeive_posts_by_tag, name='archeive_tag'),
path('category/<str:category>/', views.archeive_posts_by_category, name='archeive_category'),
path('author/<str:username>/', views.archeive_posts_by_author, name='archeive_author'),
path('posts/<int:pk>/', views.post_details, name='post_details'),
path('posts/search_result', views.search_view, name='search_view'),
path('submit_comment', views.submit_comment, name='submit_comment'),
path('submit_reply', views.submit_reply, name='submit_reply'),
]
|
# This code is provided by Foroozan Karimzadeh- PhD student at Gatech
# coding: utf-8
# In[155]:
import numpy as np
import json
from sklearn.feature_extraction import text
x = open('fedpapers_split.txt').read()
papers = json.loads(x)
papersH = papers[0] # papers by Hamilton
papersM = papers[1] # papers by Madison
papersD = papers[2] # disputed papers
nH, nM, nD = len(papersH), len(papersM), len(papersD)
# This allows you to ignore certain common words in English
# You may want to experiment by choosing the second option or your own
# list of stop words, but be sure to keep 'HAMILTON' and 'MADISON' in
# this list at a minimum, as their names appear in the text of the papers
# and leaving them in could lead to unpredictable results
# stop_words = text.ENGLISH_STOP_WORDS.union({'HAMILTON','MADISON'})
stop_words = {'HAMILTON', 'MADISON'}
## Form bag of words model using words used at least 10 times
vectorizer = text.CountVectorizer(stop_words=stop_words,min_df=10)
X = vectorizer.fit_transform(papersH+papersM+papersD).toarray()
# Uncomment this line to see the full list of words remaining after filtering out
# stop words and words used less than min_df times
# vectorizer.vocabulary_
# Split word counts into separate matrices
XH, XM, XD = X[:nH,:], X[nH:nH+nM,:], X[nH+nM:,:]
# Estimate probability of each word in vocabulary being used by Hamilton (this is P(word|class=Hamilton))
# Applying Laplace smoothing
fH = [ (XH.sum(axis = 0, dtype='float')+1)/(XH.sum(dtype='float')+XH.shape[1]) ]
print('#########################################################################')
print('P(word|class=Hamilton)= '), print(fH)
# Estimate probability of each word in vocabulary being used by Madison (this is P(word|class=Madison))
# Applying Laplace smoothing
fM = [ (XM.sum(axis = 0, dtype='float')+1)/(XM.sum(dtype='float')+XM.shape[1]) ]
print('P(word|class=Madison)= '), print(fM)
# Compute ratio of these probabilities
fH_num=np.array(fH, dtype='float')
fM_denum=np.array(fM, dtype='float')
fratio = fH_num/fM_denum
print('#########################################################################')
print('fratio=' ), print(fratio)
print('#########################################################################')
# Compute prior probabilities
piH = nH/(nH+nM)
piM = nM/(nH+nM)
piratio = piH/piM
threshold = 0.0001
for doc in range(len(XD)): # Iterate over disputed documents
element_power = fratio**XD[doc, :]
# Compute likelihood ratio for Naive Bayes model
LR = piratio*np.prod(element_power)
print('LR of doc (%d) is =' %(doc)), print(LR)
if LR > threshold:
print('This document is by Hamilton')
else:
print('This document is by Madison')
print('#########################################################################')
|
from .modules import * # noqa: F403
|
#! /usr/bin/env python
from .device_base import Device_Base
class Device_Dimmer(Device_Base):
def __init__(self, container, name, address):
Device_Base.__init__(self,container,'dimmer', name, address)
self.add_property('level',0) #in percent
def set_level(self,level):
pass
|
#!/usr/bin/env python3
"""
Usage:
eval.py [options] SAVE_FOLDER TRAIN_DATA_PATH VALID_DATA_PATH TEST_DATA_PATH
eval.py [options] [SAVE_FOLDER]
*_DATA_PATH arguments may either accept (1) directory filled with .jsonl.gz files that we use as data,
or a (2) plain text file containing a list of such directories (used for multi-language training).
In the case that you supply a (2) plain text file, all directory names must be separated by a newline.
For example, if you want to read from multiple directories you might have a plain text file called
data_dirs_train.txt with the below contents:
> cat ~/src/data_dirs_train.txt
azure://semanticcodesearch/pythondata/Processed_Data/jsonl/train
azure://semanticcodesearch/csharpdata/split/csharpCrawl-train
Options:
-h --help Show this screen.
--restore DIR specify restoration dir. [optional]
--debug Enable debug routines. [default: False]
"""
import os
import torch
from docopt import docopt
from dpu_utils.utils import run_and_debug
from loguru import logger
from tqdm import tqdm
from torch.utils.data import DataLoader
# from codenets.codesearchnet.single_branch_ctx import SingleBranchTrainingContext
from codenets.codesearchnet.dataset_utils import BalancedBatchSchedulerSampler, DatasetType
from codenets.codesearchnet.training_ctx import (
CodeSearchTrainingContext,
compute_loss_mrr,
TotalLoss,
TotalMrr,
TotalSize,
BatchSize,
BatchLoss,
)
def run(args, tag_in_vcs=False) -> None:
os.environ["WANDB_MODE"] = "dryrun"
logger.debug("Building Training Context")
training_ctx: CodeSearchTrainingContext
restore_dir = args["--restore"]
logger.info(f"Restoring Training Context from directory{restore_dir}")
training_ctx = CodeSearchTrainingContext.build_context_from_dir(restore_dir)
# Build Val Dataloader
# val_dataset = training_ctx.build_lang_dataset(DatasetType.VAL)
# val_dataloader = DataLoader(
# dataset=val_dataset,
# batch_size=training_ctx.val_batch_size,
# sampler=BalancedBatchSchedulerSampler(dataset=val_dataset, batch_size=training_ctx.val_batch_size),
# )
# logger.info(f"Built val_dataloader [Length:{len(val_dataloader)} x Batch:{training_ctx.val_batch_size}]")
# Build Test Dataloader
test_dataset = training_ctx.build_lang_dataset(DatasetType.TEST)
test_dataloader = DataLoader(
dataset=test_dataset,
batch_size=training_ctx.val_batch_size,
sampler=BalancedBatchSchedulerSampler(dataset=test_dataset, batch_size=training_ctx.test_batch_size),
)
logger.info(f"Built test_dataloader [Length:{len(test_dataloader)} x Batch:{training_ctx.test_batch_size}]")
total_loss = TotalLoss(0.0)
total_size = TotalSize(0)
total_mrr = TotalMrr(0.0)
training_ctx.eval_mode()
with torch.no_grad():
training_ctx.zero_grad()
with tqdm(total=len(test_dataloader)) as t_batch:
for batch_idx, batch in enumerate(test_dataloader):
languages, similarity, query_tokens, query_tokens_mask, code_tokens, code_tokens_mask = [
t.to(training_ctx.device) for t in batch
]
batch_total_loss, similarity_scores = training_ctx.forward(batch, batch_idx)
batch_size = BatchSize(batch[0].size()[0])
batch_loss = BatchLoss(batch_total_loss.item())
total_loss, avg_loss, total_mrr, avg_mrr, total_size = compute_loss_mrr(
similarity_scores, batch_loss, batch_size, total_loss, total_mrr, total_size
)
# languages=languages,
# query_tokens=query_tokens,
# query_tokens_mask=query_tokens_mask,
# code_tokens=code_tokens,
# code_tokens_mask=code_tokens_mask,
# )
# batch_total_losses, similarity_scores = training_ctx.losses_scores_fn(
# query_embedding, code_embedding, similarity
# )
# batch_total_loss = torch.mean(batch_total_losses)
# nb_samples = batch[0].size()[0]
# # compute MRR
# # extract the logits from the diagonal of the matrix, which are the logits corresponding to the ground-truth
# correct_scores = similarity_scores.diagonal()
# # compute how many queries have bigger logits than the ground truth (the diagonal)
# # the elements that are incorrectly ranked
# compared_scores = similarity_scores.ge(correct_scores.unsqueeze(dim=-1)).float()
# compared_scores_nb = torch.sum(compared_scores, dim=1)
# per_sample_mrr = torch.div(1.0, compared_scores_nb)
# per_batch_mrr = torch.sum(per_sample_mrr) / nb_samples
# epoch_samples += nb_samples
# epoch_loss += batch_total_loss.item() * nb_samples
# loss = epoch_loss / max(1, epoch_samples)
# mrr_sum += per_batch_mrr.item() * nb_samples
# mrr = mrr_sum / max(1, epoch_samples)
t_batch.set_postfix({f"loss": f"{batch_total_loss.item():10}"})
t_batch.update(1)
logger.info(
f"total_loss:{total_loss}, avg_loss:{avg_loss}, total_mrr:{total_mrr}, avg_mrr:{avg_mrr}, total_size:{total_size}"
)
if __name__ == "__main__":
args = docopt(__doc__)
run_and_debug(lambda: run(args), args["--debug"])
|
from .blueprint import GraphQL
from .graphqlview import GraphQLView
__all__ = ['GraphQL', 'GraphQLView']
|
from __future__ import annotations
import dataclasses
import unittest
from datetime import date, datetime
import pydantic
from cl_sii.dte.constants import TipoDte
from cl_sii.dte.data_models import DteDataL1, DteNaturalKey, DteXmlData
from cl_sii.libs import encoding_utils, tz_utils
from cl_sii.rtc.data_models import CesionAltNaturalKey, CesionL2, CesionNaturalKey
from cl_sii.rtc.data_models_aec import AecXml, CesionAecXml
from cl_sii.rut import Rut
from .utils import read_test_file_bytes
class CesionAecXmlTest(unittest.TestCase):
"""
Tests for :class:`CesionAecXml`.
"""
def _set_obj_1(self) -> None:
obj = CesionAecXml(
dte=DteDataL1(
emisor_rut=Rut('76354771-K'),
tipo_dte=TipoDte.FACTURA_ELECTRONICA,
folio=170,
fecha_emision_date=date(2019, 4, 1),
receptor_rut=Rut('96790240-3'),
monto_total=2996301,
),
seq=1,
cedente_rut=Rut('76354771-K'),
cesionario_rut=Rut('76389992-6'),
monto_cesion=2996301,
fecha_cesion_dt=tz_utils.convert_naive_dt_to_tz_aware(
dt=datetime(2019, 4, 1, 10, 22, 2),
tz=CesionAecXml.DATETIME_FIELDS_TZ,
),
fecha_ultimo_vencimiento=date(2019, 5, 1),
cedente_razon_social='SERVICIOS BONILLA Y LOPEZ Y COMPAÑIA LIMITADA',
cedente_direccion='MERCED 753 16 ARBOLEDA DE QUIILOTA',
cedente_email='enaconltda@gmail.com',
cedente_persona_autorizada_rut=Rut('76354771-K'),
cedente_persona_autorizada_nombre='SERVICIOS BONILLA Y LOPEZ Y COMPAÑIA LIM',
cesionario_razon_social='ST CAPITAL S.A.',
cesionario_direccion='Isidora Goyenechea 2939 Oficina 602',
cesionario_email='fynpal-app-notif-st-capital@fynpal.com',
dte_deudor_email=None,
cedente_declaracion_jurada=(
'Se declara bajo juramento que SERVICIOS BONILLA Y LOPEZ Y COMPAÑIA '
'LIMITADA, RUT 76354771-K ha puesto a disposición del cesionario ST '
'CAPITAL S.A., RUT 76389992-6, el o los documentos donde constan los '
'recibos de las mercaderías entregadas o servicios prestados, entregados '
'por parte del deudor de la factura MINERA LOS PELAMBRES, RUT 96790240-3, '
'deacuerdo a lo establecido en la Ley N°19.983.'
),
)
self.assertIsInstance(obj, CesionAecXml)
self.obj_1 = obj
def _set_obj_2(self) -> None:
obj = CesionAecXml(
dte=DteDataL1(
emisor_rut=Rut('76354771-K'),
tipo_dte=TipoDte.FACTURA_ELECTRONICA,
folio=170,
fecha_emision_date=date(2019, 4, 1),
receptor_rut=Rut('96790240-3'),
monto_total=2996301,
),
seq=2,
cedente_rut=Rut('76389992-6'),
cesionario_rut=Rut('76598556-0'),
monto_cesion=2996301,
fecha_cesion_dt=tz_utils.convert_naive_dt_to_tz_aware(
dt=datetime(2019, 4, 5, 12, 57, 32),
tz=CesionAecXml.DATETIME_FIELDS_TZ,
),
fecha_ultimo_vencimiento=date(2019, 5, 1),
cedente_razon_social='ST CAPITAL S.A.',
cedente_direccion='Isidora Goyenechea 2939 Oficina 602',
cedente_email='APrat@Financiaenlinea.com',
cedente_persona_autorizada_rut=Rut('16360379-9'),
cedente_persona_autorizada_nombre='ANDRES PRATS VIAL',
cesionario_razon_social='Fondo de Inversión Privado Deuda y Facturas',
cesionario_direccion='Arrayan 2750 Oficina 703 Providencia',
cesionario_email='solicitudes@stcapital.cl',
dte_deudor_email=None,
cedente_declaracion_jurada=(
'Se declara bajo juramento que ST CAPITAL S.A., RUT 76389992-6 ha puesto '
'a disposicion del cesionario Fondo de Inversión Privado Deuda y Facturas, '
'RUT 76598556-0, el documento validamente emitido al deudor MINERA LOS '
'PELAMBRES, RUT 96790240-3.'
),
)
self.assertIsInstance(obj, CesionAecXml)
self.obj_2 = obj
def test_create_new_empty_instance(self) -> None:
with self.assertRaises(TypeError):
CesionAecXml()
def test_natural_key(self) -> None:
self._set_obj_1()
self._set_obj_2()
obj = self.obj_1
expected_output = CesionNaturalKey(
dte_key=DteNaturalKey(
emisor_rut=Rut('76354771-K'),
tipo_dte=TipoDte.FACTURA_ELECTRONICA,
folio=170,
),
seq=1,
)
self.assertEqual(obj.natural_key, expected_output)
obj = self.obj_2
expected_output = CesionNaturalKey(
dte_key=DteNaturalKey(
emisor_rut=Rut('76354771-K'),
tipo_dte=TipoDte.FACTURA_ELECTRONICA,
folio=170,
),
seq=2,
)
self.assertEqual(obj.natural_key, expected_output)
def test_alt_natural_key(self) -> None:
self._set_obj_1()
self._set_obj_2()
obj = self.obj_1
expected_output = CesionAltNaturalKey(
dte_key=DteNaturalKey(
emisor_rut=Rut('76354771-K'),
tipo_dte=TipoDte.FACTURA_ELECTRONICA,
folio=170,
),
cedente_rut=Rut('76354771-K'),
cesionario_rut=Rut('76389992-6'),
fecha_cesion_dt=tz_utils.convert_naive_dt_to_tz_aware(
dt=datetime(2019, 4, 1, 10, 22),
tz=CesionAltNaturalKey.DATETIME_FIELDS_TZ,
),
)
self.assertEqual(obj.alt_natural_key, expected_output)
obj = self.obj_2
expected_output = CesionAltNaturalKey(
dte_key=DteNaturalKey(
emisor_rut=Rut('76354771-K'),
tipo_dte=TipoDte.FACTURA_ELECTRONICA,
folio=170,
),
cedente_rut=Rut('76389992-6'),
cesionario_rut=Rut('76598556-0'),
fecha_cesion_dt=tz_utils.convert_naive_dt_to_tz_aware(
dt=datetime(2019, 4, 5, 12, 57),
tz=CesionAltNaturalKey.DATETIME_FIELDS_TZ,
),
)
self.assertEqual(obj.alt_natural_key, expected_output)
def test_as_cesion_l2(self) -> None:
self._set_obj_1()
obj = self.obj_1
expected_output = CesionL2(
dte_key=DteNaturalKey(
emisor_rut=Rut('76354771-K'),
tipo_dte=TipoDte.FACTURA_ELECTRONICA,
folio=170,
),
seq=1,
cedente_rut=Rut('76354771-K'),
cesionario_rut=Rut('76389992-6'),
fecha_cesion_dt=tz_utils.convert_naive_dt_to_tz_aware(
dt=datetime(2019, 4, 1, 10, 22, 2),
tz=CesionL2.DATETIME_FIELDS_TZ,
),
monto_cedido=2996301,
dte_receptor_rut=Rut('96790240-3'),
dte_fecha_emision=date(2019, 4, 1),
dte_monto_total=2996301,
fecha_ultimo_vencimiento=date(2019, 5, 1),
cedente_razon_social='SERVICIOS BONILLA Y LOPEZ Y COMPAÑIA LIMITADA',
cedente_email='enaconltda@gmail.com',
cesionario_razon_social='ST CAPITAL S.A.',
cesionario_email='fynpal-app-notif-st-capital@fynpal.com',
dte_deudor_email=None,
cedente_declaracion_jurada=(
'Se declara bajo juramento que SERVICIOS BONILLA Y LOPEZ Y COMPAÑIA '
'LIMITADA, RUT 76354771-K ha puesto a disposición del cesionario ST '
'CAPITAL S.A., RUT 76389992-6, el o los documentos donde constan los '
'recibos de las mercaderías entregadas o servicios prestados, entregados '
'por parte del deudor de la factura MINERA LOS PELAMBRES, RUT 96790240-3, '
'deacuerdo a lo establecido en la Ley N°19.983.'
),
)
obj_cesion_l2 = obj.as_cesion_l2()
self.assertEqual(obj_cesion_l2, expected_output)
self.assertEqual(obj_cesion_l2.natural_key, obj.natural_key)
self.assertEqual(obj_cesion_l2.alt_natural_key, obj.alt_natural_key)
self.assertEqual(obj_cesion_l2.dte_key, obj.dte.natural_key)
class AecXmlTest(unittest.TestCase):
"""
Tests for :class:`AecXml`.
"""
def _set_obj_1(self) -> None:
obj_dte_signature_value = encoding_utils.decode_base64_strict(
read_test_file_bytes(
'test_data/sii-crypto/DTE--76354771-K--33--170-signature-value-base64.txt',
),
)
obj_dte_signature_x509_cert_der = read_test_file_bytes(
'test_data/sii-crypto/DTE--76354771-K--33--170-cert.der',
)
obj_dte = DteXmlData(
emisor_rut=Rut('76354771-K'),
tipo_dte=TipoDte.FACTURA_ELECTRONICA,
folio=170,
fecha_emision_date=date(2019, 4, 1),
receptor_rut=Rut('96790240-3'),
monto_total=2996301,
emisor_razon_social='INGENIERIA ENACON SPA',
receptor_razon_social='MINERA LOS PELAMBRES',
fecha_vencimiento_date=None,
firma_documento_dt=tz_utils.convert_naive_dt_to_tz_aware(
dt=datetime(2019, 4, 1, 1, 36, 40),
tz=DteXmlData.DATETIME_FIELDS_TZ,
),
signature_value=obj_dte_signature_value,
signature_x509_cert_der=obj_dte_signature_x509_cert_der,
emisor_giro='Ingenieria y Construccion',
emisor_email='ENACONLTDA@GMAIL.COM',
receptor_email=None,
)
obj_cesion_1 = CesionAecXml(
dte=DteDataL1(
emisor_rut=Rut('76354771-K'),
tipo_dte=TipoDte.FACTURA_ELECTRONICA,
folio=170,
fecha_emision_date=date(2019, 4, 1),
receptor_rut=Rut('96790240-3'),
monto_total=2996301,
),
seq=1,
cedente_rut=Rut('76354771-K'),
cesionario_rut=Rut('76389992-6'),
monto_cesion=2996301,
fecha_cesion_dt=tz_utils.convert_naive_dt_to_tz_aware(
dt=datetime(2019, 4, 1, 10, 22, 2),
tz=CesionAecXml.DATETIME_FIELDS_TZ,
),
fecha_ultimo_vencimiento=date(2019, 5, 1),
cedente_razon_social='SERVICIOS BONILLA Y LOPEZ Y COMPAÑIA LIMITADA',
cedente_direccion='MERCED 753 16 ARBOLEDA DE QUIILOTA',
cedente_email='enaconltda@gmail.com',
cedente_persona_autorizada_rut=Rut('76354771-K'),
cedente_persona_autorizada_nombre='SERVICIOS BONILLA Y LOPEZ Y COMPAÑIA LIM',
cesionario_razon_social='ST CAPITAL S.A.',
cesionario_direccion='Isidora Goyenechea 2939 Oficina 602',
cesionario_email='fynpal-app-notif-st-capital@fynpal.com',
dte_deudor_email=None,
cedente_declaracion_jurada=(
'Se declara bajo juramento que SERVICIOS BONILLA Y LOPEZ Y COMPAÑIA '
'LIMITADA, RUT 76354771-K ha puesto a disposición del cesionario ST '
'CAPITAL S.A., RUT 76389992-6, el o los documentos donde constan los '
'recibos de las mercaderías entregadas o servicios prestados, entregados '
'por parte del deudor de la factura MINERA LOS PELAMBRES, RUT 96790240-3, '
'deacuerdo a lo establecido en la Ley N°19.983.'
),
)
obj_cesion_2 = CesionAecXml(
dte=DteDataL1(
emisor_rut=Rut('76354771-K'),
tipo_dte=TipoDte.FACTURA_ELECTRONICA,
folio=170,
fecha_emision_date=date(2019, 4, 1),
receptor_rut=Rut('96790240-3'),
monto_total=2996301,
),
seq=2,
cedente_rut=Rut('76389992-6'),
cesionario_rut=Rut('76598556-0'),
monto_cesion=2996301,
fecha_cesion_dt=tz_utils.convert_naive_dt_to_tz_aware(
dt=datetime(2019, 4, 5, 12, 57, 32),
tz=CesionAecXml.DATETIME_FIELDS_TZ,
),
fecha_ultimo_vencimiento=date(2019, 5, 1),
cedente_razon_social='ST CAPITAL S.A.',
cedente_direccion='Isidora Goyenechea 2939 Oficina 602',
cedente_email='APrat@Financiaenlinea.com',
cedente_persona_autorizada_rut=Rut('16360379-9'),
cedente_persona_autorizada_nombre='ANDRES PRATS VIAL',
cesionario_razon_social='Fondo de Inversión Privado Deuda y Facturas',
cesionario_direccion='Arrayan 2750 Oficina 703 Providencia',
cesionario_email='solicitudes@stcapital.cl',
dte_deudor_email=None,
cedente_declaracion_jurada=(
'Se declara bajo juramento que ST CAPITAL S.A., RUT 76389992-6 ha puesto '
'a disposicion del cesionario Fondo de Inversión Privado Deuda y Facturas, '
'RUT 76598556-0, el documento validamente emitido al deudor MINERA LOS '
'PELAMBRES, RUT 96790240-3.'
),
)
obj_signature_value = encoding_utils.decode_base64_strict(
read_test_file_bytes(
'test_data/sii-crypto/AEC--76354771-K--33--170--SEQ-2-signature-value-base64.txt',
),
)
obj_signature_x509_cert_der = read_test_file_bytes(
'test_data/sii-crypto/AEC--76354771-K--33--170--SEQ-2-cert.der',
)
obj = AecXml(
dte=obj_dte,
cedente_rut=Rut('76389992-6'),
cesionario_rut=Rut('76598556-0'),
fecha_firma_dt=tz_utils.convert_naive_dt_to_tz_aware(
dt=datetime(2019, 4, 5, 12, 57, 32),
tz=AecXml.DATETIME_FIELDS_TZ,
),
signature_value=obj_signature_value,
signature_x509_cert_der=obj_signature_x509_cert_der,
cesiones=[
obj_cesion_1,
obj_cesion_2,
],
contacto_nombre='ST Capital Servicios Financieros',
contacto_telefono=None,
contacto_email='APrat@Financiaenlinea.com',
)
self.assertIsInstance(obj, AecXml)
self.obj_1 = obj
self.obj_1_dte = obj_dte
self.obj_1_cesion_1 = obj_cesion_1
self.obj_1_cesion_2 = obj_cesion_2
def test_create_new_empty_instance(self) -> None:
with self.assertRaises(TypeError):
AecXml()
def test_natural_key(self) -> None:
self._set_obj_1()
obj = self.obj_1
expected_output = CesionNaturalKey(
dte_key=DteNaturalKey(
emisor_rut=Rut('76354771-K'),
tipo_dte=TipoDte.FACTURA_ELECTRONICA,
folio=170,
),
seq=2,
)
self.assertEqual(obj.natural_key, expected_output)
def test_alt_natural_key(self) -> None:
self._set_obj_1()
obj = self.obj_1
expected_output = CesionAltNaturalKey(
dte_key=DteNaturalKey(
emisor_rut=Rut('76354771-K'),
tipo_dte=TipoDte.FACTURA_ELECTRONICA,
folio=170,
),
cedente_rut=Rut('76389992-6'),
cesionario_rut=Rut('76598556-0'),
fecha_cesion_dt=tz_utils.convert_naive_dt_to_tz_aware(
dt=datetime(2019, 4, 5, 12, 57),
tz=CesionAltNaturalKey.DATETIME_FIELDS_TZ,
),
)
self.assertEqual(obj.alt_natural_key, expected_output)
def test_slug(self) -> None:
self._set_obj_1()
obj = self.obj_1
expected_output = '76354771-K--33--170--2'
self.assertEqual(obj.slug, expected_output)
def test_last_cesion(self) -> None:
self._set_obj_1()
obj = self.obj_1
obj_cesion_2 = self.obj_1_cesion_2
self.assertEqual(obj.cesiones[-1], obj_cesion_2)
self.assertEqual(obj._last_cesion, obj.cesiones[-1])
def test_as_cesion_l2(self) -> None:
self._set_obj_1()
obj = self.obj_1
expected_output = CesionL2(
dte_key=DteNaturalKey(
emisor_rut=Rut('76354771-K'),
tipo_dte=TipoDte.FACTURA_ELECTRONICA,
folio=170,
),
seq=2,
cedente_rut=Rut('76389992-6'),
cesionario_rut=Rut('76598556-0'),
fecha_cesion_dt=tz_utils.convert_naive_dt_to_tz_aware(
dt=datetime(2019, 4, 5, 12, 57, 32),
tz=CesionL2.DATETIME_FIELDS_TZ,
),
monto_cedido=2996301,
fecha_firma_dt=tz_utils.convert_naive_dt_to_tz_aware(
dt=datetime(2019, 4, 5, 12, 57, 32),
tz=CesionL2.DATETIME_FIELDS_TZ,
),
dte_receptor_rut=Rut('96790240-3'),
dte_fecha_emision=date(2019, 4, 1),
dte_monto_total=2996301,
fecha_ultimo_vencimiento=date(2019, 5, 1),
cedente_razon_social='ST CAPITAL S.A.',
cedente_email='APrat@Financiaenlinea.com',
cesionario_razon_social='Fondo de Inversión Privado Deuda y Facturas',
cesionario_email='solicitudes@stcapital.cl',
dte_emisor_razon_social='INGENIERIA ENACON SPA',
dte_receptor_razon_social='MINERA LOS PELAMBRES',
dte_deudor_email=None,
cedente_declaracion_jurada=(
'Se declara bajo juramento que ST CAPITAL S.A., RUT 76389992-6 ha puesto '
'a disposicion del cesionario Fondo de Inversión Privado Deuda y Facturas, '
'RUT 76598556-0, el documento validamente emitido al deudor MINERA LOS '
'PELAMBRES, RUT 96790240-3.'
),
dte_fecha_vencimiento=None,
contacto_nombre='ST Capital Servicios Financieros',
contacto_telefono=None,
contacto_email='APrat@Financiaenlinea.com',
)
obj_cesion_l2 = obj.as_cesion_l2()
self.assertEqual(obj_cesion_l2, expected_output)
self.assertEqual(obj_cesion_l2.natural_key, obj.natural_key)
self.assertEqual(obj_cesion_l2.alt_natural_key, obj.alt_natural_key)
self.assertEqual(obj_cesion_l2.dte_key, obj.dte.natural_key)
def test_validate_dte_tipo_dte(self) -> None:
self._set_obj_1()
obj = self.obj_1
expected_validation_errors = [
{
'loc': ('dte',),
'msg': """('Value is not "cedible".', <TipoDte.NOTA_CREDITO_ELECTRONICA: 61>)""",
'type': 'value_error',
},
]
with self.assertRaises(pydantic.ValidationError) as assert_raises_cm:
dataclasses.replace(
obj,
dte=dataclasses.replace(
obj.dte,
tipo_dte=TipoDte.NOTA_CREDITO_ELECTRONICA,
),
)
validation_errors = assert_raises_cm.exception.errors()
self.assertEqual(len(validation_errors), len(expected_validation_errors))
for expected_validation_error in expected_validation_errors:
self.assertIn(expected_validation_error, validation_errors)
def test_validate_datetime_tz(self) -> None:
self._set_obj_1()
obj = self.obj_1
# Test TZ-awareness:
expected_validation_errors = [
{
'loc': ('fecha_firma_dt',),
'msg': 'Value must be a timezone-aware datetime object.',
'type': 'value_error',
},
]
with self.assertRaises(pydantic.ValidationError) as assert_raises_cm:
dataclasses.replace(
obj,
fecha_firma_dt=datetime(2019, 4, 5, 12, 57, 32),
)
validation_errors = assert_raises_cm.exception.errors()
self.assertEqual(len(validation_errors), len(expected_validation_errors))
for expected_validation_error in expected_validation_errors:
self.assertIn(expected_validation_error, validation_errors)
# Test TZ-value:
expected_validation_errors = [
{
'loc': ('fecha_firma_dt',),
'msg': (
'('
'''"Timezone of datetime value must be 'America/Santiago'.",'''
' datetime.datetime(2019, 4, 5, 12, 57, 32, tzinfo=<UTC>)'
')'
),
'type': 'value_error',
},
]
with self.assertRaises(pydantic.ValidationError) as assert_raises_cm:
dataclasses.replace(
obj,
fecha_firma_dt=tz_utils.convert_naive_dt_to_tz_aware(
dt=datetime(2019, 4, 5, 12, 57, 32),
tz=tz_utils.TZ_UTC,
),
)
validation_errors = assert_raises_cm.exception.errors()
self.assertEqual(len(validation_errors), len(expected_validation_errors))
for expected_validation_error in expected_validation_errors:
self.assertIn(expected_validation_error, validation_errors)
def test_validate_cesiones_min_items(self) -> None:
self._set_obj_1()
obj = self.obj_1
expected_validation_errors = [
{
'loc': ('cesiones',),
'msg': 'must contain at least one item',
'type': 'value_error',
},
]
with self.assertRaises(pydantic.ValidationError) as assert_raises_cm:
dataclasses.replace(
obj,
cesiones=[],
)
validation_errors = assert_raises_cm.exception.errors()
self.assertEqual(len(validation_errors), len(expected_validation_errors))
for expected_validation_error in expected_validation_errors:
self.assertIn(expected_validation_error, validation_errors)
def test_validate_cesiones_seq_order(self) -> None:
self._set_obj_1()
obj = self.obj_1
expected_validation_errors = [
{
'loc': ('cesiones',),
'msg': "items must be ordered according to their 'seq'",
'type': 'value_error',
},
]
with self.assertRaises(pydantic.ValidationError) as assert_raises_cm:
dataclasses.replace(
obj,
cesiones=list(reversed(obj.cesiones)),
)
validation_errors = assert_raises_cm.exception.errors()
self.assertEqual(len(validation_errors), len(expected_validation_errors))
for expected_validation_error in expected_validation_errors:
self.assertIn(expected_validation_error, validation_errors)
# def test_validate_cesiones_monto_cesion_must_not_increase(self) -> None:
# self._set_obj_1()
# obj = self.obj_1
# expected_validation_errors = [
# {
# 'loc': ('cesiones',),
# 'msg':
# "items must have a 'monto_cesion'"
# " that does not exceed the previous item's 'monto_cesion'.",
# 'type': 'value_error',
# },
# ]
# with self.assertRaises(pydantic.ValidationError) as assert_raises_cm:
# dataclasses.replace(
# obj,
# cesiones=[
# dataclasses.replace(
# obj.cesiones[0],
# monto_cesion=obj.cesiones[1].monto_cesion - 1,
# ),
# obj.cesiones[1],
# ],
# )
# validation_errors = assert_raises_cm.exception.errors()
# self.assertEqual(len(validation_errors), len(expected_validation_errors))
# for expected_validation_error in expected_validation_errors:
# self.assertIn(expected_validation_error, validation_errors)
def test_validate_dte_matches_cesiones_dtes(self) -> None:
self._set_obj_1()
obj = self.obj_1
expected_validation_errors = [
{
'loc': ('__root__',),
'msg': (
"'dte' of CesionAecXml with CesionNaturalKey("
"dte_key=DteNaturalKey("
"emisor_rut=Rut('76354771-K'),"
" tipo_dte=<TipoDte.FACTURA_ELECTRONICA: 33>,"
" folio=171),"
" seq=1"
")"
" must match DteDataL1 with DteNaturalKey("
"emisor_rut=Rut('76354771-K'),"
" tipo_dte=<TipoDte.FACTURA_ELECTRONICA: 33>,"
" folio=170"
")."
),
'type': 'value_error',
},
]
with self.assertRaises(pydantic.ValidationError) as assert_raises_cm:
dataclasses.replace(
obj,
cesiones=[
dataclasses.replace(
obj.cesiones[0],
dte=dataclasses.replace(
obj.cesiones[0].dte,
folio=obj.cesiones[0].dte.folio + 1,
),
),
obj.cesiones[1],
],
)
validation_errors = assert_raises_cm.exception.errors()
self.assertEqual(len(validation_errors), len(expected_validation_errors))
for expected_validation_error in expected_validation_errors:
self.assertIn(expected_validation_error, validation_errors)
def test_validate_last_cesion_matches_some_fields(self) -> None:
self._set_obj_1()
obj = self.obj_1
expected_validation_errors = [
{
'loc': ('__root__',),
'msg': (
"'cedente_rut' of last 'cesion' must match 'cedente_rut':"
" Rut('76389992-6')"
" !="
" Rut('76598556-0')."
),
'type': 'value_error',
},
]
with self.assertRaises(pydantic.ValidationError) as assert_raises_cm:
dataclasses.replace(
obj,
cedente_rut=obj.cesionario_rut,
)
validation_errors = assert_raises_cm.exception.errors()
self.assertEqual(len(validation_errors), len(expected_validation_errors))
for expected_validation_error in expected_validation_errors:
self.assertIn(expected_validation_error, validation_errors)
|
import fcntl
class LockedException (Exception):
pass
class flock (object):
__slots__ = [ "file", "shared", "locked", "block" ]
def lock (self, *, shared=None):
shared = self.shared if shared is None else shared
flg = fcntl.LOCK_SH if shared else fcntl.LOCK_EX
fcntl.flock(self.file, flg)
self.locked = True
def try_lock (self, *, shared=None, throw=False):
shared = self.shared if shared is None else shared
flg = fcntl.LOCK_SH if shared else fcntl.LOCK_EX
try:
fcntl.flock(self.file, flg | fcntl.LOCK_NB)
self.locked = True
except OSError:
if throw:
raise LockedException
return False
return True
def unlock (self):
if self.locked:
fcntl.flock(self.file, fcntl.LOCK_UN)
self.locked = False
def __init__ (self, file, shared=False, block=True):
self.file = file
self.shared = shared
self.block = block
self.locked = False
def __del__ (self):
self.unlock()
def __enter__ (self, *_):
self.lock() if self.block else self.try_lock(throw=True)
def __exit__ (self, *_):
self.unlock()
|
from ffov.setters.base import FovSetter
from ffov.setters.nv import NewVegasFovSetter
|
import random
numero = random.randint(0,100)
def pedir_numero():
while True:
intento = int (input ("Dime un numero del 1 al 99: "))
if intento < 0 or intento > 99:
break
return intento
print("BIENVENIDO AL JUEGO DE ADIVINAR")
pedir_numero()
contador = 1
while numero != intento:
if numero > intento:
print("muy pequeño")
intento = int (input ("Dime un numero del 1 al 99: "))
elif numero < intento:
print("demasiado grande")
intento = int (input ("Dime un numero del 1 al 99: "))
if intento == numero:
print("FELICIDADES CAMPEÓN")
contador += 1
if contador < 5:
print ("Has usado ",contador, " intentos" )
else:
print("Has usado ",contador, " intentos, necesitas mejorar")
|
from urllib.parse import urlencode
from django import http
from django.apps import apps
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.shortcuts import render
from django.template.loader import render_to_string
from django.utils.html import escape
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_protect
from django.views.generic.edit import FormView
from django.shortcuts import render, resolve_url
from ..compat import url_has_allowed_host_and_scheme
import django_comments
from django_comments import signals
from django_comments.views.utils import confirmation_view
class CommentPostBadRequest(http.HttpResponseBadRequest):
"""
Response returned when a comment post is invalid. If ``DEBUG`` is on a
nice-ish error message will be displayed (for debugging purposes), but in
production mode a simple opaque 400 page will be displayed.
"""
def __init__(self, why):
super().__init__()
if settings.DEBUG:
self.content = render_to_string("comments/400-debug.html", {"why": why})
class BadRequest(Exception):
"""
Exception raised for a bad post request holding the CommentPostBadRequest
object.
"""
def __init__(self, why):
self.response = CommentPostBadRequest(why)
class CommentPostView(FormView):
http_method_names = ['post']
def get_target_object(self, data):
# Look up the object we're trying to comment about
ctype = data.get("content_type")
object_pk = data.get("object_pk")
if ctype is None or object_pk is None:
raise BadRequest("Missing content_type or object_pk field.")
try:
model = apps.get_model(*ctype.split(".", 1))
return model._default_manager.using(self.kwargs.get('using')).get(pk=object_pk)
except TypeError:
raise BadRequest("Invalid content_type value: %r" % escape(ctype))
except AttributeError:
raise BadRequest("The given content-type %r does not resolve to a valid model." % escape(ctype))
except ObjectDoesNotExist:
raise BadRequest("No object matching content-type %r and object PK %r exists." % (
escape(ctype), escape(object_pk)))
except (ValueError, ValidationError) as e:
raise BadRequest("Attempting to get content-type %r and object PK %r raised %s" % (
escape(ctype), escape(object_pk), e.__class__.__name__))
def get_form_kwargs(self):
data = self.request.POST.copy()
if self.request.user.is_authenticated:
if not data.get('name', ''):
data["name"] = self.request.user.get_full_name() or self.request.user.get_username()
if not data.get('email', ''):
data["email"] = self.request.user.email
return data
def get_form_class(self):
"""Return the form class to use."""
return django_comments.get_form()
def get_form(self, form_class=None):
"""Return an instance of the form to be used in this view."""
if form_class is None:
form_class = self.get_form_class()
return form_class(self.target_object, data=self.data)
def get_success_url(self):
"""Return the URL to redirect to after processing a valid form."""
next = self.data.get('next')
fallback = self.kwargs.get('next') or 'comments-comment-done'
get_kwargs = dict(c=self.object._get_pk_val())
if not url_has_allowed_host_and_scheme(url=next, allowed_hosts={self.request.get_host()}):
next = resolve_url(fallback)
if '#' in next:
tmp = next.rsplit('#', 1)
next = tmp[0]
anchor = '#' + tmp[1]
else:
anchor = ''
joiner = ('?' in next) and '&' or '?'
next += joiner + urlencode(get_kwargs) + anchor
return next
def create_comment(self, form):
comment = form.get_comment_object(site_id=get_current_site(self.request).id)
comment.ip_address = self.request.META.get("REMOTE_ADDR", None) or None
if self.request.user.is_authenticated:
comment.user = self.request.user
# Signal that the comment is about to be saved
responses = signals.comment_will_be_posted.send(
sender=comment.__class__,
comment=comment,
request=self.request
)
for (receiver, response) in responses:
if response is False:
raise BadRequest("comment_will_be_posted receiver %r killed the comment" % receiver.__name__)
# Save the comment and signal that it was saved
comment.save()
signals.comment_was_posted.send(
sender=comment.__class__,
comment=comment,
request=self.request
)
return comment
def get_template_names(self):
if self.template_name is None:
model = type(self.target_object)
return [
# These first two exist for purely historical reasons.
# Django v1.0 and v1.1 allowed the underscore format for
# preview templates, so we have to preserve that format.
"comments/%s_%s_preview.html" % (model._meta.app_label, model._meta.model_name),
"comments/%s_preview.html" % model._meta.app_label,
# Now the usual directory based template hierarchy.
"comments/%s/%s/preview.html" % (model._meta.app_label, model._meta.model_name),
"comments/%s/preview.html" % model._meta.app_label,
"comments/preview.html",
]
else:
return [self.template_name]
def get_context_data(self, form):
return dict(
form=form,
comment=form.data.get("comment", ""),
next=self.data.get("next", self.kwargs.get('next')),
)
@method_decorator(csrf_protect)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def post(self, request, **kwargs):
self.object = None
self.target_object = None
self.data = self.get_form_kwargs()
try:
self.target_object = self.get_target_object(self.data)
except BadRequest as exc:
return exc.response
form = self.get_form()
# Check security information
if form.security_errors():
return CommentPostBadRequest(
"The comment form failed security verification: %s" % escape(str(form.security_errors())))
if not form.is_valid() or "preview" in self.data:
return self.form_invalid(form)
else:
try:
self.object = self.create_comment(form)
except BadRequest as exc:
return exc.response
else:
return self.form_valid(form)
comment_done = confirmation_view(
template="comments/posted.html",
doc="""Display a "comment was posted" success page."""
)
|
#!/usr/bin/python
# coding: utf-8
# Copyright (c) 2016 Mountainstorm
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import unicode_literals, print_function
from pdb.bitaccess import BitAccess
from pdb.pdbfileheader import PdbFileHeader
from pdb.pdbstreamhelper import PdbStreamHelper
from pdb.msfdirectory import MsfDirectory
from pdb.pdbfile import PdbFile
from pdb.pdbexception import PdbException
from pdb.pdbdebugexception import PdbDebugException
import os
class PdbError(Exception):
pass
class PdbInvalidError(PdbError):
pass
class PdbUnsupportedError(PdbError):
pass
class PdbMissingNameStreamError(PdbError):
pass
class PdbMissingDBIError(PdbError):
pass
class NameStream(object):
def __init__(self, reader, directory):
if directory.streams[1].content_size == 0:
raise PdbMissingNameStreamError()
bits = BitAccess(512 * 1024)
directory.streams[1].read(reader, bits)
(self.name_index,
self.ver,
self.sig,
self.age,
self.guid) = PdbFile.load_name_index(bits)
class DbiStream(object):
def __init__(self, reader, directory, ext=PdbFile.EXT_MODULE_FILES):
if directory.streams[3].content_size == 0:
raise PdbMissingDBIError()
bits = BitAccess(512 * 1024)
directory.streams[3].read(reader, bits)
(self.modules,
self.header,
self.dbghdr,
self.module_files) = PdbFile.load_dbi_stream(
bits, True, ext
)
class PDB(object):
'''Helper for retrieving information from PDB file'''
def __init__(self, path, filename=None, ignore_dbi=False):
bits = BitAccess(512 * 1024)
self.path = path
self.filename = filename
if filename is None:
self.filename = os.path.basename(path)
self.pdb_stream = open(path, 'rb')
self.check_format()
self.header = PdbFileHeader(self.pdb_stream, bits)
self.reader = PdbStreamHelper(self.pdb_stream, self.header.page_size)
self.directory = MsfDirectory(self.reader, self.header, bits)
# streams
self.name_stream = NameStream(self.reader, self.directory)
self.dbi_stream = None
age = None
try:
self.dbi_stream = DbiStream(self.reader, self.directory)
except PdbDebugException:
# try without files
self.dbi_stream = DbiStream(
self.reader, self.directory, PdbFile.EXT_DBIHEADER
)
except PdbMissingDBIError, e:
if ignore_dbi == False:
raise e
# generate the symbol id which will match the one from the PE file
age = self.name_stream.age
if self.dbi_stream is not None:
age = self.dbi_stream.dbghdr.age
self.symbol_id = '%s/%s%X' % (
self.filename.lower(),
str(self.name_stream.guid).replace('-', '').upper(),
age
)
def __del__(self):
self.close()
def close(self):
if self.pdb_stream is not None:
self.pdb_stream.close()
self.pdb_stream = None
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
def check_format(self):
pdb7 = b'Microsoft C/C++ MSF 7.00\r\n\x1ADS\0\0\0'
magic = self.pdb_stream.read(len(pdb7))
self.pdb_stream.seek(0)
if magic != pdb7:
pdb2 = b'Microsoft C/C++ program database 2.00\r\n\032JG\0\0'
magic = self.pdb_stream.read(len(pdb2))
self.pdb_stream.seek(0)
if magic != pdb2:
raise PdbInvalidError('File not a PDB or contains an invalid header')
else:
raise PdbUnsupportedError('File is an unsupported PDB2 symbol file')
|
import datetime
from bx_py_utils.test_utils.datetime import parse_dt
from django.test import SimpleTestCase
from django.utils import translation
from bx_django_utils.templatetags.humanize_time import human_duration
class HumanizeTimeTestCase(SimpleTestCase):
def test_basic(self):
with translation.override('en'):
result = human_duration(
parse_dt('2000-01-01T12:00:00+0000'),
parse_dt('2000-01-01T12:10:00+0000'),
)
assert result == '<span title="Jan. 1, 2000, noon">10.0\xa0minutes</span>'
with translation.override('en'):
result = human_duration(
parse_dt('2000-01-01T12:00:00+0000'),
parse_dt('2000-01-01T12:00:02+0000'),
)
assert result == '<span title="Jan. 1, 2000, noon">2.0\xa0seconds</span>'
with translation.override('de'):
result = human_duration(
parse_dt('2000-01-01T12:00:00+0000'),
parse_dt('2000-01-01T12:10:00+0000'),
)
assert result == '<span title="1. Januar 2000 12:00">10.0\xa0minutes</span>'
with translation.override('en'):
result = human_duration(
parse_dt('2000-01-01T12:32:12+0000'),
parse_dt('2000-01-01T12:20:10+0000'),
)
assert result == '<span title="Jan. 1, 2000, 12:32 p.m.">-12.0\xa0minutes</span>'
with translation.override('en'):
years_back = datetime.datetime.now() - datetime.timedelta(days=5 * 365)
result = human_duration(years_back)
assert result.endswith('>5.0\xa0years</span>')
assert human_duration(None) == ''
assert human_duration(value=object) == ''
with translation.override('en'):
result = human_duration(
datetime.date(2000, 1, 1),
datetime.date(2000, 6, 15),
)
assert result == '<span title="Jan. 1, 2000, midnight">5.5\xa0months</span>'
|
from . import vk
def get_mutual_friends(user_id: int):
friends = vk.get_friends(user_id)
mutual_ids = vk.get_mutual_friends_ids_batch([user.id for user in
friends], user_id)
return friends, mutual_ids
|
from .apmeter import APMeter
from .aucmeter import AUCMeter
from .averagevaluemeter import AverageValueMeter
from .classerrormeter import ClassErrorMeter
from .confusionmeter import ConfusionMeter
from .mapmeter import mAPMeter
from .movingaveragevaluemeter import MovingAverageValueMeter
from .msemeter import MSEMeter
from .retrievalmeter import RetrievalMAPMeter, CrossRetrievalMAPMeter
from .timemeter import TimeMeter
|
#
# Autogenerated by Thrift Compiler (0.9.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:new_style,utf8strings
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Provider(object):
"""
Attributes:
- uuid
- name
- hash
- options
- time
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'uuid', None, None, ), # 1
(2, TType.STRING, 'name', None, None, ), # 2
(3, TType.STRING, 'hash', None, None, ), # 3
(4, TType.MAP, 'options', (TType.STRING,None,TType.STRING,None), None, ), # 4
(5, TType.I32, 'time', None, None, ), # 5
)
def __init__(self, uuid=None, name=None, hash=None, options=None, time=None,):
self.uuid = uuid
self.name = name
self.hash = hash
self.options = options
self.time = time
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.uuid = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.hash = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.options = {}
(_ktype1, _vtype2, _size0 ) = iprot.readMapBegin()
for _i4 in xrange(_size0):
_key5 = iprot.readString().decode('utf-8')
_val6 = iprot.readString().decode('utf-8')
self.options[_key5] = _val6
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.time = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Provider')
if self.uuid is not None:
oprot.writeFieldBegin('uuid', TType.STRING, 1)
oprot.writeString(self.uuid.encode('utf-8'))
oprot.writeFieldEnd()
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 2)
oprot.writeString(self.name.encode('utf-8'))
oprot.writeFieldEnd()
if self.hash is not None:
oprot.writeFieldBegin('hash', TType.STRING, 3)
oprot.writeString(self.hash.encode('utf-8'))
oprot.writeFieldEnd()
if self.options is not None:
oprot.writeFieldBegin('options', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.options))
for kiter7,viter8 in self.options.items():
oprot.writeString(kiter7.encode('utf-8'))
oprot.writeString(viter8.encode('utf-8'))
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.time is not None:
oprot.writeFieldBegin('time', TType.I32, 5)
oprot.writeI32(self.time)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.uuid is None:
raise TProtocol.TProtocolException(message='Required field uuid is unset!')
if self.options is None:
raise TProtocol.TProtocolException(message='Required field options is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.uuid)
value = (value * 31) ^ hash(self.name)
value = (value * 31) ^ hash(self.hash)
value = (value * 31) ^ hash(self.options)
value = (value * 31) ^ hash(self.time)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Identity(object):
"""
Attributes:
- uuid
- name
- hash
- options
- time
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'uuid', None, None, ), # 1
(2, TType.STRING, 'name', None, None, ), # 2
(3, TType.STRING, 'hash', None, None, ), # 3
(4, TType.MAP, 'options', (TType.STRING,None,TType.STRING,None), None, ), # 4
(5, TType.I32, 'time', None, None, ), # 5
)
def __init__(self, uuid=None, name=None, hash=None, options=None, time=None,):
self.uuid = uuid
self.name = name
self.hash = hash
self.options = options
self.time = time
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.uuid = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.hash = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.options = {}
(_ktype10, _vtype11, _size9 ) = iprot.readMapBegin()
for _i13 in xrange(_size9):
_key14 = iprot.readString().decode('utf-8')
_val15 = iprot.readString().decode('utf-8')
self.options[_key14] = _val15
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.time = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Identity')
if self.uuid is not None:
oprot.writeFieldBegin('uuid', TType.STRING, 1)
oprot.writeString(self.uuid.encode('utf-8'))
oprot.writeFieldEnd()
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 2)
oprot.writeString(self.name.encode('utf-8'))
oprot.writeFieldEnd()
if self.hash is not None:
oprot.writeFieldBegin('hash', TType.STRING, 3)
oprot.writeString(self.hash.encode('utf-8'))
oprot.writeFieldEnd()
if self.options is not None:
oprot.writeFieldBegin('options', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.options))
for kiter16,viter17 in self.options.items():
oprot.writeString(kiter16.encode('utf-8'))
oprot.writeString(viter17.encode('utf-8'))
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.time is not None:
oprot.writeFieldBegin('time', TType.I32, 5)
oprot.writeI32(self.time)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.uuid is None:
raise TProtocol.TProtocolException(message='Required field uuid is unset!')
if self.options is None:
raise TProtocol.TProtocolException(message='Required field options is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.uuid)
value = (value * 31) ^ hash(self.name)
value = (value * 31) ^ hash(self.hash)
value = (value * 31) ^ hash(self.options)
value = (value * 31) ^ hash(self.time)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Instance(object):
"""
Attributes:
- uuid
- machine_uuid
- name
- public_addresses
- private_addresses
- extra
- project_id
- provider_hash
- identity_hash
- time
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'uuid', None, None, ), # 1
(2, TType.STRING, 'machine_uuid', None, None, ), # 2
(3, TType.STRING, 'name', None, None, ), # 3
(4, TType.LIST, 'public_addresses', (TType.STRING,None), None, ), # 4
(5, TType.LIST, 'private_addresses', (TType.STRING,None), None, ), # 5
(6, TType.STRING, 'extra', None, None, ), # 6
(7, TType.STRING, 'project_id', None, None, ), # 7
(8, TType.STRING, 'provider_hash', None, None, ), # 8
(9, TType.STRING, 'identity_hash', None, None, ), # 9
(10, TType.I32, 'time', None, None, ), # 10
)
def __init__(self, uuid=None, machine_uuid=None, name=None, public_addresses=None, private_addresses=None, extra=None, project_id=None, provider_hash=None, identity_hash=None, time=None,):
self.uuid = uuid
self.machine_uuid = machine_uuid
self.name = name
self.public_addresses = public_addresses
self.private_addresses = private_addresses
self.extra = extra
self.project_id = project_id
self.provider_hash = provider_hash
self.identity_hash = identity_hash
self.time = time
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.uuid = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.machine_uuid = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.public_addresses = []
(_etype21, _size18) = iprot.readListBegin()
for _i22 in xrange(_size18):
_elem23 = iprot.readString().decode('utf-8')
self.public_addresses.append(_elem23)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.private_addresses = []
(_etype27, _size24) = iprot.readListBegin()
for _i28 in xrange(_size24):
_elem29 = iprot.readString().decode('utf-8')
self.private_addresses.append(_elem29)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.extra = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.project_id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRING:
self.provider_hash = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.STRING:
self.identity_hash = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.I32:
self.time = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Instance')
if self.uuid is not None:
oprot.writeFieldBegin('uuid', TType.STRING, 1)
oprot.writeString(self.uuid.encode('utf-8'))
oprot.writeFieldEnd()
if self.machine_uuid is not None:
oprot.writeFieldBegin('machine_uuid', TType.STRING, 2)
oprot.writeString(self.machine_uuid.encode('utf-8'))
oprot.writeFieldEnd()
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 3)
oprot.writeString(self.name.encode('utf-8'))
oprot.writeFieldEnd()
if self.public_addresses is not None:
oprot.writeFieldBegin('public_addresses', TType.LIST, 4)
oprot.writeListBegin(TType.STRING, len(self.public_addresses))
for iter30 in self.public_addresses:
oprot.writeString(iter30.encode('utf-8'))
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.private_addresses is not None:
oprot.writeFieldBegin('private_addresses', TType.LIST, 5)
oprot.writeListBegin(TType.STRING, len(self.private_addresses))
for iter31 in self.private_addresses:
oprot.writeString(iter31.encode('utf-8'))
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.extra is not None:
oprot.writeFieldBegin('extra', TType.STRING, 6)
oprot.writeString(self.extra.encode('utf-8'))
oprot.writeFieldEnd()
if self.project_id is not None:
oprot.writeFieldBegin('project_id', TType.STRING, 7)
oprot.writeString(self.project_id.encode('utf-8'))
oprot.writeFieldEnd()
if self.provider_hash is not None:
oprot.writeFieldBegin('provider_hash', TType.STRING, 8)
oprot.writeString(self.provider_hash.encode('utf-8'))
oprot.writeFieldEnd()
if self.identity_hash is not None:
oprot.writeFieldBegin('identity_hash', TType.STRING, 9)
oprot.writeString(self.identity_hash.encode('utf-8'))
oprot.writeFieldEnd()
if self.time is not None:
oprot.writeFieldBegin('time', TType.I32, 10)
oprot.writeI32(self.time)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.uuid is None:
raise TProtocol.TProtocolException(message='Required field uuid is unset!')
if self.machine_uuid is None:
raise TProtocol.TProtocolException(message='Required field machine_uuid is unset!')
if self.provider_hash is None:
raise TProtocol.TProtocolException(message='Required field provider_hash is unset!')
if self.time is None:
raise TProtocol.TProtocolException(message='Required field time is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.uuid)
value = (value * 31) ^ hash(self.machine_uuid)
value = (value * 31) ^ hash(self.name)
value = (value * 31) ^ hash(self.public_addresses)
value = (value * 31) ^ hash(self.private_addresses)
value = (value * 31) ^ hash(self.extra)
value = (value * 31) ^ hash(self.project_id)
value = (value * 31) ^ hash(self.provider_hash)
value = (value * 31) ^ hash(self.identity_hash)
value = (value * 31) ^ hash(self.time)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Instances(object):
"""
Attributes:
- instances
- provider_hash
- identity_hash
- time
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'instances', (TType.STRUCT,(Instance, Instance.thrift_spec)), None, ), # 1
(2, TType.STRING, 'provider_hash', None, None, ), # 2
(3, TType.STRING, 'identity_hash', None, None, ), # 3
(4, TType.I32, 'time', None, None, ), # 4
)
def __init__(self, instances=None, provider_hash=None, identity_hash=None, time=None,):
self.instances = instances
self.provider_hash = provider_hash
self.identity_hash = identity_hash
self.time = time
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.instances = []
(_etype35, _size32) = iprot.readListBegin()
for _i36 in xrange(_size32):
_elem37 = Instance()
_elem37.read(iprot)
self.instances.append(_elem37)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.provider_hash = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.identity_hash = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.time = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Instances')
if self.instances is not None:
oprot.writeFieldBegin('instances', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.instances))
for iter38 in self.instances:
iter38.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.provider_hash is not None:
oprot.writeFieldBegin('provider_hash', TType.STRING, 2)
oprot.writeString(self.provider_hash.encode('utf-8'))
oprot.writeFieldEnd()
if self.identity_hash is not None:
oprot.writeFieldBegin('identity_hash', TType.STRING, 3)
oprot.writeString(self.identity_hash.encode('utf-8'))
oprot.writeFieldEnd()
if self.time is not None:
oprot.writeFieldBegin('time', TType.I32, 4)
oprot.writeI32(self.time)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.instances is None:
raise TProtocol.TProtocolException(message='Required field instances is unset!')
if self.provider_hash is None:
raise TProtocol.TProtocolException(message='Required field provider_hash is unset!')
if self.time is None:
raise TProtocol.TProtocolException(message='Required field time is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.instances)
value = (value * 31) ^ hash(self.provider_hash)
value = (value * 31) ^ hash(self.identity_hash)
value = (value * 31) ^ hash(self.time)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class OpenStackException(TException):
"""
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('OpenStackException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.message is None:
raise TProtocol.TProtocolException(message='Required field message is unset!')
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.message)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ConnectionException(TException):
"""
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ConnectionException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.message is None:
raise TProtocol.TProtocolException(message='Required field message is unset!')
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.message)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DeployException(TException):
"""
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('DeployException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.message is None:
raise TProtocol.TProtocolException(message='Required field message is unset!')
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.message)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
from numpy import load
from numpy import zeros
from numpy import ones
from numpy.random import randint
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.initializers import RandomNormal
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Conv2DTranspose
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Concatenate
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import LeakyReLU
from matplotlib import pyplot
from tensorflow.keras.losses import binary_crossentropy
from skimage.transform import resize
from skimage.exposure import rescale_intensity
import numpy as np
import os
import tensorflow as tf
import albumentations as A
import math
import matplotlib.pyplot as plt
from tensorflow.keras.callbacks import History
smooth = 1.
img_rows = int(192)
img_cols = int(192)
def load_test_data():
imgs_test = np.load('./masks_test.npy')
return imgs_test
def preprocess(imgs):
imgs_p = np.ndarray((imgs.shape[0], img_rows, img_cols), dtype=np.float32)
for i in range(imgs.shape[0]):
imgs_p[i] = resize(imgs[i], (img_cols, img_rows), preserve_range=True)
imgs_p = imgs_p[..., np.newaxis]
return imgs_p
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
#########################################################################
# define an encoder block
def define_encoder_block(layer_in, n_filters, block_name='name', batchnorm=True, trainable=True):
# weight initialization
init = RandomNormal(stddev=0.02)
# add downsampling layer
g = Conv2D(n_filters, (4, 4), strides=(2, 2), padding='same',
kernel_initializer=init, name='conv_'+block_name)(layer_in)
# conditionally add batch normalization
if batchnorm:
g = BatchNormalization(name='batch_'+block_name)(g, training=True)
# leaky relu activation
g = LeakyReLU(alpha=0.2)(g)
return g
# define a decoder block
def decoder_block(layer_in, skip_in, n_filters, block_name='name', dropout=True):
# weight initialization
init = RandomNormal(stddev=0.02)
# add upsampling layer
g = Conv2DTranspose(n_filters, (4, 4), strides=(
2, 2), padding='same', kernel_initializer=init, name='conv_'+block_name)(layer_in)
# add batch normalization
g = BatchNormalization(name='batch_'+block_name)(g, training=True)
# conditionally add dropout
if dropout:
g = Dropout(0.5)(g, training=True)
# merge with skip connection
g = Concatenate()([g, skip_in])
# relu activation
g = Activation('relu')(g)
return g
# GENERATOR WITH ORIGINAL ENCODER PART
def define_original_generator(image_shape=(192, 192, 1)):
# weight initialization
init = RandomNormal(stddev=0.02)
# image input
in_image = Input(shape=image_shape)
# encoder model
e1 = define_encoder_block(in_image, 64, block_name='e1', batchnorm=False)
e2 = define_encoder_block(e1, 128, block_name='e2')
e3 = define_encoder_block(e2, 256, block_name='e3')
e4 = define_encoder_block(e3, 512, block_name='e4')
e5 = define_encoder_block(e4, 512, block_name='e5')
# bottleneck, no batch norm and relu
b = Conv2D(512, (4, 4), strides=(2, 2), padding='same',
kernel_initializer=init, name='bottleneck')(e5)
b = Activation('relu')(b)
# decoder model
d3 = decoder_block(b, e5, 512, block_name='d1')
d4 = decoder_block(d3, e4, 512, block_name='d2', dropout=False)
d5 = decoder_block(d4, e3, 256, block_name='d3', dropout=False)
d6 = decoder_block(d5, e2, 128, block_name='d4', dropout=False)
d7 = decoder_block(d6, e1, 64, block_name='d5', dropout=False)
# output
g = Conv2DTranspose(32, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(d7)
g2 = Conv2D(1, (1, 1), padding='same')(g)
out_image = Activation('sigmoid')(g2)
# define model
model = Model(in_image, out_image)
return model
# GENERATOR WITH RENAMED DECODER PART
def define_transfer_generator(image_shape=(192, 192, 1)):
# weight initialization
init = RandomNormal(stddev=0.02)
# image input
in_image = Input(shape=image_shape)
# encoder model
e1 = define_encoder_block(in_image, 64, block_name='e1', batchnorm=False)
e2 = define_encoder_block(e1, 128, block_name='e2')
e3 = define_encoder_block(e2, 256, block_name='e3')
e4 = define_encoder_block(e3, 512, block_name='e4')
e5 = define_encoder_block(e4, 512, block_name='e5')
# bottleneck, no batch norm and relu
b = Conv2D(512, (4, 4), strides=(2, 2), padding='same',
kernel_initializer=init, name='bottleneck_RENAMED')(e5)
b = Activation('relu')(b)
# decoder model
d3 = decoder_block(b, e5, 512, block_name='d1_RENAMED')
d4 = decoder_block(d3, e4, 512, block_name='d2_RENAMED', dropout=False)
d5 = decoder_block(d4, e3, 256, block_name='d3_RENAMED', dropout=False)
d6 = decoder_block(d5, e2, 128, block_name='d4_RENAMED', dropout=False)
d7 = decoder_block(d6, e1, 64, block_name='d5_RENAMED', dropout=False)
# output
g = Conv2DTranspose(32, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(d7)
g2 = Conv2D(1, (1, 1), padding='same')(g)
out_image = Activation('sigmoid')(g2)
# define model
model = Model(in_image, out_image)
return model
# GENERATOR WITH FREEZED ENCODER PART
def define_transfer_generator_freezed(image_shape=(192, 192, 1)):
# weight initialization
init = RandomNormal(stddev=0.02)
# image input
in_image = Input(shape=image_shape)
# encoder model
e1 = define_encoder_block(in_image, 64, block_name='e1', batchnorm=False, trainable=False)
e2 = define_encoder_block(e1, 128, block_name='e2', trainable=False)
e3 = define_encoder_block(e2, 256, block_name='e3', trainable=False)
e4 = define_encoder_block(e3, 512, block_name='e4', trainable=False)
e5 = define_encoder_block(e4, 512, block_name='e5', trainable=False)
# FREEZE THE LAYERS
e1.trainable = False
e2.trainable = False
e3.trainable = False
e4.trainable = False
e5.trainable = False
# bottleneck, no batch norm and relu
b = Conv2D(512, (4, 4), strides=(2, 2), padding='same',
kernel_initializer=init, name='bottleneck_RENAMED')(e5)
b = Activation('relu')(b)
# decoder model
d3 = decoder_block(b, e5, 512, block_name='d1_RENAMED')
d4 = decoder_block(d3, e4, 512, block_name='d2_RENAMED', dropout=False)
d5 = decoder_block(d4, e3, 256, block_name='d3_RENAMED', dropout=False)
d6 = decoder_block(d5, e2, 128, block_name='d4_RENAMED', dropout=False)
d7 = decoder_block(d6, e1, 64, block_name='d5_RENAMED', dropout=False)
# output
g = Conv2DTranspose(32, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(d7)
g2 = Conv2D(1, (1, 1), padding='same')(g)
out_image = Activation('sigmoid')(g2)
# define model
model = Model(in_image, out_image)
return model
#########################################################################
# LOAD TEST DATA
imgs_test = load_test_data()
imgs_test = preprocess(imgs_test)
imgs_test = rescale_intensity(imgs_test[:][:,:,:],out_range=(0,1))
# CREATING THE MODEL
model = define_transfer_generator()
# LOAD PRE-TRAINED WEIGTS
model.load_weights('weights.h5')
# PREDICT FROM THE MODEL
imgs_mask_test = model.predict(imgs_test, verbose=1)
np.save('predicted_masks.npy', imgs_mask_test)
|
from sqlalchemy.sql.expression import func
from hokuto_flask.extensions import db
from ..relationships import character_category_association
class CategoryModel(db.Model):
"""Database model for a category in the Hokuto no Ken universe.
"""
__tablename__ = "categories"
id = db.Column(db.String(32), primary_key=True, autoincrement=False)
name = db.Column(db.String(64), index=True, unique=True, nullable=False)
url = db.Column(db.String(128), nullable=True)
characters = db.relationship(
"CharacterModel",
secondary=character_category_association,
back_populates="categories",
)
def __repr__(self):
return f"<{self.__class__.__name__} {self.id} {self.name}>"
@classmethod
def find_all(cls):
return cls.query.all()
@classmethod
def find_by_id(cls, _id):
return cls.query.filter_by(id=_id).first()
@classmethod
def random(cls):
return cls.query.order_by(func.random()).first()
|
def collapse(nu_fe_sa):
print("Before collapsing: {}".format(nu_fe_sa.shape))
nu_fe_sa = nu_fe_sa.groupby(level=0).median()
print("After: {}".format(nu_fe_sa.shape))
return nu_fe_sa
|
import pickle
import shelve
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.screenmanager import Screen
from kivy.uix.scrollview import ScrollView
class SketchyLoad(Screen):
root = None
def __init__(self, **kwargs):
super(SketchyLoad, self).__init__(**kwargs)
self.sketchyloadbox = SketchyBox()
self.sketchyloadbox.screen = self
self.add_widget(self.sketchyloadbox)
def on_enter(self, *args):
super(SketchyLoad, self).on_enter(*args)
self.sketchyloadbox.open_load()
class SketchyBox(BoxLayout):
isopen = False
sketchybook = []
def __init__(self, **kwargs):
super(SketchyBox, self).__init__(**kwargs)
self.orientation = 'vertical'
self.topbox = BoxLayout()
self.topbox.orientation = 'vertical'
self.topbox.size_hint_y = None
self.topbox.bind(minimum_height=self.topbox.setter('height'))
self.scrollview = ScrollView(size_hint=(1, 0.9), do_scroll_x=False, do_scroll_y=True)
self.scrollview.add_widget(self.topbox)
self.bottombox = BoxLayout()
self.bottombox.orientation = 'horizontal'
self.bottombox.size_hint_y = 0.1
self.bottombox.load_examples_button = Button()
self.bottombox.load_examples_button.text = 'Load Examples'
self.bottombox.load_examples_button.bind(on_press=self.examples_load)
self.bottombox.load_examples_button.size_hint_x = .20
self.bottombox.load_autosaves_button = Button()
self.bottombox.load_autosaves_button.text = 'Load Auto-Saves'
self.bottombox.load_autosaves_button.bind(on_press=self.auto_saves_load)
self.bottombox.load_autosaves_button.size_hint_x = .20
self.bottombox.b1 = Button()
self.bottombox.b1.text = 'Close'
self.bottombox.b1.bind(on_press=self.close_load)
self.bottombox.b1.size_hint_x = .60
self.bottombox.add_widget(self.bottombox.load_examples_button)
self.bottombox.add_widget(self.bottombox.load_autosaves_button)
self.bottombox.add_widget(self.bottombox.b1)
self.add_widget(self.scrollview)
self.add_widget(self.bottombox)
self.bind(height=self.height_callback)
def height_callback(self, target, value):
self.bottombox.size_hint_y = 60 / value
def close_load(self, target):
if target.text != 'Close':
data = pickle.loads(self.sketchybook[target.text])
self.parent.parent.load_data(data)
if self.isopen:
self.sketchybook.close()
self.topbox.clear_widgets()
self.isopen = False
self.parent.parent.current = 'main'
def open_load(self, load_type='normal'):
if not self.isopen:
if load_type == 'examples':
self.sketchybook = shelve.open('data/SketchyExamples')
elif load_type == 'autosaves':
self.sketchybook = shelve.open('data/AutoSaves')
else:
self.sketchybook = shelve.open('data/SketchyBook')
i = 0
for save in self.sketchybook:
save_line = BoxLayout(size_hint_y=None, height=40)
load_button = Button(text=save, size_hint_x=.9)
delete_button = Button(text='Delete', size_hint_x=.1)
save_line.add_widget(load_button)
save_line.add_widget(delete_button)
self.topbox.add_widget(save_line, i)
delete_button.save = save
delete_button.save_line = save_line
load_button.bind(on_press=self.close_load)
delete_button.bind(on_release=self.delete_save)
i += 1
self.isopen = True
def examples_load(self, target):
if self.isopen:
self.sketchybook.close()
self.isopen = False
self.topbox.clear_widgets()
self.open_load('examples')
def auto_saves_load(self, target):
if self.isopen:
self.sketchybook.close()
self.isopen = False
self.topbox.clear_widgets()
self.open_load('autosaves')
def delete_save(self, target):
del self.sketchybook[target.save]
self.topbox.remove_widget(target.save_line)
|
import setuptools
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
setuptools.setup(
name="thompyson",
version="1.0.0",
author="William Angus",
author_email="william@ngus.co.uk",
description="Utility for computations in R J Thompson's groups F and T.",
long_description=long_description,
long_description_content_type="text/markdown",
url = "https://github.com/WilliamAngus/thompyson",
project_urls={
"Bug Tracker" : "https://github.com/WilliamAngus/thompyson/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: The Unlicense (Unlicense)",
"Operating System :: OS Independent",
],
package_dir={"" : "src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3.6",
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-11-22 01:52
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comments', '0003_comment_offset'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='downvoters',
field=models.ManyToManyField(blank=True, related_name='downvoted_comments', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='comment',
name='upvoters',
field=models.ManyToManyField(blank=True, related_name='upvoted_comments', to=settings.AUTH_USER_MODEL),
),
]
|
from flask_socketio import SocketIO, emit
lastDetection = None
def initSocketIO(app):
global socketio
socketio = SocketIO(app, cors_allowed_origins="*")
return socketio
def getSocketIO():
return socketio
def emitEvent(event, message):
socketio.emit(event, message)
print(event, message)
def emitDetectionEvent(jumpmasterDetection=False, championDetection=False):
global lastDetection
currentDetection = 'champion' if championDetection else 'jumpmaster'
# if last detection matches current detection, do nothing
if jumpmasterDetection is True and currentDetection != lastDetection:
emitEvent("detection_log",
{"message": "Looking for jumpmaster...", "type": "jumpmaster", "loading": True})
lastDetection = currentDetection
if championDetection is True and currentDetection != lastDetection:
emitEvent("detection_log",
{"message": "Looking for champion selection...", "type": "champion", "loading": True})
lastDetection = currentDetection
def resetDetection():
global lastDetection
lastDetection = None
|
from neo4j.v1 import GraphDatabase, basic_auth
from .neo4j_wrapper import Neo4jWrapper
from .neo4j_init_wrapper import Neo4jInitWrapper
class WrapperFactory:
@staticmethod
def build_neo4j_wrapper(host, port, username, password):
connect = f'bolt://{host}:{port}'
auth = basic_auth(username, password)
return Neo4jWrapper(GraphDatabase.driver, connect, auth)
@staticmethod
def build_neo4j_init_wrapper(host, port, username, password):
connect = f'bolt://{host}:{port}'
auth = basic_auth(username, password)
return Neo4jInitWrapper(GraphDatabase.driver, connect, auth)
|
from streamer import *
from time import sleep
from helper import *
import os
import re
from threadclient import ThreadClient
from ftplib import error_perm
from zfec import filefec
import random
import urllib2
import csv
import math
import sys
import thread
import string
# Debugging MSG
DEBUGGING_MSG = True
VLC_PLAYER_USE = False
# Topology
USER_TOPOLOGY_UPDATE = True
T_choke = 1 # Choke period
T_choke2 = 2 # Choke period
eps_choke = 1 # Choke parameter
# Global parameters
CACHE_DOWNLOAD_DURATION = 8 # sec
SERVER_DOWNLOAD_DURATION = 2 # sec
DECODE_WAIT_DURATION = 0.1 # sec
tracker_address = load_tracker_address()
num_of_caches = 5
class P2PUser():
#def __init__(self, tracker_address, video_name, packet_size):
def __init__(self, tracker_address, video_name, user_name):
""" Create a new P2PUser. Set the packet size, instantiate the manager,
and establish clients. Currently, the clients are static but will
become dynamic when the tracker is implemented.
"""
self.packet_size = 1000
self.user_name = user_name
self.my_ip = user_name
self.my_port = 0
register_to_tracker_as_user(tracker_address, self.my_ip, self.my_port, video_name)
# Connect to the server
# Cache will get a response when each chunk is downloaded from the server.
# Note that this flag should **NOT** be set for the caches, as the caches
# downloads will be aborted after 8 seconds with no expectation.
# After the cache download period, the files themselves will be checked
# to see what remains to be downloaded from the server.
server_ip_address = retrieve_server_address_from_tracker(tracker_address)
self.server_client = ThreadClient(server_ip_address, self.packet_size)
self.server_client.set_respond_RETR(True)
self.tracker_address = tracker_address
self.clients = []
self.num_of_caches = num_of_caches
self.manager = None # TODO: create the manager class to decode/play
def VLC_start_video(self, video_path):
# Put the file into the queue and play it
url = 'http://127.0.0.1:8080/requests/status.xml?command=in_play&input=file://'
url = url + video_path
print '[user.py] ', url
urllib2.urlopen(url).read()
def VLC_pause_video(self):
# Pause or play it
url = 'http://127.0.0.1:8080/requests/status.xml?command=pl_pause'
print '[user.py] ', url
urllib2.urlopen(url).read()
def play(self, video_name, frame_number):
""" Starts playing the video as identified by either name or number and
begins handling the data connections necessary to play the video,
starting at frame_number (the 10-second section of time within the
vid).
"""
# inform the web browser we have started playing
if not self.manager.playing():
self.manager.start_playing()
# TODO: add decoding.
def connected_caches():
f = open(config_file)
fs = csv.reader(f, delimiter = ' ')
for row in fs:
if (DEBUGGING_MSG): print '[server.py] Loading movie : ', row
movie_name = row[0]
self.movies_LUT[movie_name] = (int(row[1]), int(row[2]), int(row[3]), int(row[4]), int(row[5]), int(row[6]))
def download(self, video_name, start_frame):
connected_caches = []
not_connected_caches = []
# Connect to the caches
cache_ip_addr = retrieve_caches_address_from_tracker(self.tracker_address, 100, self.user_name)
self.cache_ip_addr = cache_ip_addr
#connected_caches = set([])
self.num_of_caches = min(self.num_of_caches, len(cache_ip_addr))
#connected_caches_index = [0] * self.num_of_caches
#not_connected_caches = set(range(len(cache_ip_addr)))
choke_state = 0 # 0 : usual state, 1 : overhead state
choke_ct = 0
for i in range(self.num_of_caches):
each_client = ThreadClient(cache_ip_addr[i], self.packet_size, i)
self.clients.append(each_client)
connected_caches.append(each_client)
print '[user.py] ', i, 'th connection is CONNECTED : ' , cache_ip_addr[i]
for i in range(self.num_of_caches, len(cache_ip_addr)):
each_client = (cache_ip_addr[i], self.packet_size, i)
not_connected_caches.append(each_client)
print '[user.py] ', i, 'th connection is RESERVED: ' , cache_ip_addr[i]
available_chunks = set([])
self.clients[0].put_instruction('VLEN file-%s' % (video_name))
vlen_str = self.clients[0].get_response().split('\n')[0]
vlen_items = vlen_str.split('&')
print "VLEN: ", vlen_items
num_frames = int(vlen_items[0])
base_file_name = video_name + '.flv'
try:
os.mkdir('video-' + video_name)
except:
pass
# Set internal chunk_size through putting an internal instruction into
# the queue.
base_file = open('video-' + video_name + '/' + base_file_name, 'ab')
base_file_full_path = os.path.abspath('video-' + video_name + '/' + base_file_name)
frame_number = 1
for i in range(30):
sys.stdout.flush()
effective_rates = [0]*len(self.clients)
assigned_chunks = [0]*len(self.clients)
if frame_number < num_frames: # Usual frames
inst_INTL = 'INTL ' + 'CNKN ' + vlen_items[2] # chunk size of typical frame (not last one)
for client in self.clients:
client.put_instruction(inst_INTL)
self.server_client.put_instruction(inst_INTL)
else: # Last frame
inst_INTL = 'INTL ' + 'CNKN ' + vlen_items[3] # chunk size of last frame
for client in self.clients:
client.put_instruction(inst_INTL)
self.server_client.put_instruction(inst_INTL)
print '[user.py] frame_number : ', frame_number
filename = 'file-' + video_name + '.' + str(frame_number)
# directory for this frame
folder_name = 'video-' + video_name + '/' + video_name + '.' + str(frame_number) + '.dir/'
# FAKE USER is deleting all chunks and receiving again again
chunk_delete_all_in_frame_dir(folder_name)
print '[user.py] deleting successfully...'
# get available chunks lists from cache A and B.
inst_CNKS = 'CNKS ' + filename
inst_RETR = 'RETR ' + filename
inst_NOOP = 'NOOP'
###### DECIDING WHICH CHUNKS TO DOWNLOAD FROM CACHES: TIME 0 ######
available_chunks = [0]*len(self.clients) # available_chunks[i] = cache i's availble chunks
rates = [0]*len(self.clients) # rates[i] = cache i's offered rate
union_chunks = [] # union of all available indices
for i in range(len(self.clients)):
client = self.clients[i]
client.put_instruction(inst_CNKS)
return_str = client.get_response().split('&')
if return_str[0] == '':
available_chunks[i] = []
else:
available_chunks[i] = map(str, return_str[0].split('%'))
for j in range(len(available_chunks[i])):
available_chunks[i][j] = available_chunks[i][j].zfill(2)
rates[i] = int(return_str[1])
union_chunks = list( set(union_chunks) | set(available_chunks[i]) )
## index assignment here
# Assign chunks to cache using cache_chunks_to_request.
print '[user.py] Rates ', rates
print '[user.py] Available chunks', available_chunks
assigned_chunks = cache_chunks_to_request(available_chunks, rates)
effective_rates = [0]*len(rates)
for i in range(len(rates)):
effective_rates[i] = len(assigned_chunks[i])
chosen_chunks = [j for i in assigned_chunks for j in i]
flag_deficit = (sum(effective_rates) < 20) # True if user needs more rate from caches
# request assigned chunks
for i in range(len(self.clients)):
client = self.clients[i]
client_request_string = '%'.join(assigned_chunks[i])
client_request_string = client_request_string + '&' + str(int(flag_deficit))
print "[user.py] [Client " + str(i) + "] flag_deficit: ", int(flag_deficit), \
", Assigned chunks: ", assigned_chunks[i], \
", Request string: ", client_request_string
client.put_instruction(inst_RETR + '.' + client_request_string)
###### DECIDING CHUNKS THAT HAVE TO BE DOWNLOADED FROM CACHE: TIME 0 ######
# Before CACHE_DOWNLOAD_DURATION, also start requesting chunks from server.
server_request = []
chosen_chunks = list(chosen_chunks)
num_chunks_rx_predicted = len(chosen_chunks)
server_request = chunks_to_request(chosen_chunks, range(0, 40), 20 - num_chunks_rx_predicted)
num_of_chks_from_server = len(server_request)
if num_of_chks_from_server == 0:
self.server_client.put_instruction(inst_NOOP)
print '[user.py] Caches handling 20 chunks, so no request to server. Sending a NOOP'
else:
server_request_string = '%'.join(server_request)
server_request_string = server_request_string + '&' + str(1) ## DOWNLOAD FROM SERVER : binary_g = 1
self.server_client.put_instruction(inst_RETR + '.' + server_request_string)
if(DEBUGGING_MSG):
print "[user.py] Requesting from server: ", server_request, ", Request string: ", server_request_string
#update_server_load(tracker_address, video_name, num_of_chks_from_server)
sleep(CACHE_DOWNLOAD_DURATION)
###### STOPPING CACHE DOWNLOADS: TIME 8 (CACHE_DOWNLOAD_DURATION) ######
# immediately stop cache downloads.
for client in self.clients:
try:
client.client.abort()
except:
print "[user.py] Cache connections suddenly aborted. Stopping all download."
return
print "[user.py] Cache connections aborted for frame %d" % (frame_number)
###### REQUEST ADDITIONAL CHUNKS FROM SERVER: TIME 8 (CACHE_DOWNLOAD_DURATION) ######
# Request from server remaining chunks missing
# Look up the download directory and count the downloaded chunks
chunk_nums_rx = chunk_nums_in_frame_dir(folder_name)
if (DEBUGGING_MSG):
print "%d chunks received so far for frame %d: " % (len(chunk_nums_rx), frame_number)
print chunk_nums_rx
# Add the chunks that have already been requested from server
chunk_nums_rx = list (set(chunk_nums_in_frame_dir(folder_name)) | set(server_request))
addtl_server_request = []
num_chunks_rx = len(chunk_nums_rx)
if (num_chunks_rx >= 20):
print "[user.py] No additional chunks to download from the server. Sending a NOOP"
self.server_client.put_instruction(inst_NOOP)
else:
addtl_server_request = chunks_to_request(chunk_nums_rx, range(0, 40), 20 - num_chunks_rx)
if addtl_server_request:
addtl_server_request_string = '%'.join(addtl_server_request)
# server should always be set with flag_deficit = 0 (has all chunks)
addtl_server_request_string = addtl_server_request_string + '&' + str(1) ## DOWNLOAD FROM SERVER : binary_g = 1
self.server_client.put_instruction(inst_RETR + '.' + addtl_server_request_string)
if(DEBUGGING_MSG):
print "[user.py] Requesting from server: ", addtl_server_request
elif (DEBUGGING_MSG):
print "No unique chunks from server requested."
###### WAIT FOR CHUNKS FROM SERVER TO FINISH DOWNLOADING: TIME 10 ######
sleep(SERVER_DOWNLOAD_DURATION)
if (DEBUGGING_MSG):
print "[user.py] Waiting to receive all elements from server."
if frame_number > start_frame and (server_request or addtl_server_request) and VLC_PLAYER_USE:
# Need to pause it!
self.VLC_pause_video()
if server_request:
resp_RETR = self.server_client.get_response()
parsed_form = parse_chunks(resp_RETR)
fname, framenum, binary_g, chunks = parsed_form
print "[user.py] Downloaded chunks from server: ", chunks
if addtl_server_request:
resp_RETR = self.server_client.get_response()
parsed_form = parse_chunks(resp_RETR)
fname, framenum, binary_g, chunks = parsed_form
print "[user.py] Downloaded chunks from server: ", chunks
# Now play it
if frame_number > start_frame and (server_request or addtl_server_request) and VLC_PLAYER_USE:
self.VLC_pause_video()
chunk_nums = chunk_nums_in_frame_dir(folder_name)
num_chunks_rx = len(chunk_nums)
if num_chunks_rx >= 20 and DEBUGGING_MSG:
print "[user.py] Received 20 packets"
else:
print "[user.py] Did not receive 20 packets for this frame."
# abort the connection to the server
self.server_client.client.abort()
# put together chunks into single frame; then concatenate onto original file.
print 'about to decode...'
chunksList = chunk_files_in_frame_dir(folder_name)
if frame_number != start_frame:
print 'size of base file:', os.path.getsize('video-' + video_name + '/' + base_file_name)
print 'trying to decode'
#filefec.decode_from_files(base_file, chunksList)
print 'decoded. Size of base file =', os.path.getsize('video-' + video_name + '/' + base_file_name)
if frame_number == 1 and VLC_PLAYER_USE:
self.VLC_start_video(base_file_full_path)
if USER_TOPOLOGY_UPDATE:
if choke_state == 0: # Normal state
print '[user.py] Normal state : ', choke_ct
choke_ct += 1
if choke_ct == T_choke:
choke_ct = 0
if len(not_connected_caches) == 0:
pass
else: # Add a new cache temporarily
new_cache_index = random.sample(range(len(not_connected_caches)), 1)
if new_cache_index >= 0:
new_cache_meta = not_connected_caches[new_cache_index[0]]
new_cache = ThreadClient(*new_cache_meta)
self.clients.append(new_cache)
connected_caches.append(new_cache)
not_connected_caches.remove(new_cache_meta)
print '[user.py] Topology Update : Temporarily added ', new_cache.address
choke_state = 1 # Now, move to transitional state
choke_ct = 0
print '[user.py] Topology Update : Now the state is changed to overhead staet'
#print '[user.py]', connected_caches, not_connected_caches, self.clients
print '[user.py] conneced caches', self.clients
elif choke_state == 1: # Overhead state
print '[user.py] Overhead state : ', choke_ct
choke_ct += 1
if choke_ct == T_choke2: # Temporary period to spend with temporarily added node
rate_vector = [0] * len(self.clients)
p_vector = [0] * len(self.clients)
for i in range(len(self.clients)):
rate_vector[i] = len(assigned_chunks[i])
p_vector[i] = math.exp( -eps_choke * rate_vector[i])
# >>> cdf = [(1, 0), (2,0.1), (3,0.15), (4,0.2), (5,0.4), (6,0.8)]
p_sum = sum(p_vector)
for i in range(len(self.clients)):
p_vector[i] /= p_sum
cdf = [(0,0)] * len(self.clients)
cdf[0] = (0, 0)
for i in range(1, len(self.clients)):
cdf[i] = (i, cdf[i-1][1] + p_vector[i-1])
print '[user.py] cdf :', cdf
client_index = max(i for r in [random.random()] for i,c in cdf if c <= r) # http://stackoverflow.com/questions/4265988/generate-random-numbers-with-a-given-numerical-distribution
# client_index = rate_vector.index(min(rate_vector))
removed_cache = self.clients[client_index]
removed_cache.put_instruction('QUIT')
self.clients.remove(removed_cache)
connected_caches.remove(removed_cache)
new_cache_meta = (self.cache_ip_addr[client_index], 1000, client_index)
not_connected_caches.append(new_cache_meta)
print '[user.py] Topology Update : ', removed_cache.address, 'is chocked.'
choke_state = 0 # Now, move to normal state
choke_ct = 0
def disconnect(self, tracker_address, video_name, user_name):
for client in self.clients:
client.put_instruction('QUIT')
self.server_client.put_instruction('QUIT')
print "[user.py] Closed all connections."
my_ip = user_name
my_port = 0
my_video_name = video_name
deregister_to_tracker_as_user(tracker_address, my_ip, my_port, video_name)
print "[user.py] BYE"
sys.stdout.flush()
def cache_chunks_to_request(available_chunks, rates):
"""
(a) Sort the packets by their rarity (defined by the presence of packets in multiple caches).
(b) Starting with the rarest first, assign the rarest packet to the cache with the lowest used BW ratio currently. Pop off the rarest packet and decrement the bandwidth of the assigned cache.
(c) Repeat until all packets have been assigned a cache source, or until 20 chunks have been assigned.
"""
# index assignment here
chunk_locs = {}
assigned_chunks = [list()]*len(rates)
for i in range(len(rates)):
for j in available_chunks[i]:
if rates[i]:
if j in chunk_locs:
(chunk_locs[j]).append(i)
else:
chunk_locs[j] = [i]
# sort chunks by rarest first
chunk_freqs = Queue.PriorityQueue()
for chunk in chunk_locs:
chunk_freqs.put((len(chunk_locs[chunk]), chunk))
# from rarest first, make chunk request list by assigning next available chunk
# to carrier with the lowest ratio of used cache rate so far (fairness)
for i in range(20):
if chunk_freqs.empty():
break
freq, chunk = chunk_freqs.get()
best_location = -1
for cache in chunk_locs[chunk]:
ratio_bw_used = float(len(assigned_chunks[cache]))/rates[cache]
if best_location == -1:
if ratio_bw_used < 1:
best_location = cache
# print "No best_location set for chunk %s, as ratio bw is %d: %f" % (chunk, cache, ratio_bw_used)
else:
best_locations_ratio_bw = float(len(assigned_chunks[best_location]))/rates[best_location]
# print "%d:%f vs. %d:current best %f" % (cache, ratio_bw_used, best_location, best_locations_ratio_bw)
if ratio_bw_used < best_locations_ratio_bw:
best_location = cache
# print "best location for chunk %s determined to be %d" % (chunk, best_location)
if best_location == -1:
continue
if not assigned_chunks[best_location]:
assigned_chunks[best_location] = [str(chunk)]
else:
(assigned_chunks[best_location]).append(str(chunk))
return assigned_chunks
def chunks_to_request(A, B, num_ret):
""" Find the elements in B that are not in A. From these elements, return a
randomized set that has maximum num_ret elements.
Example: A = {1, 3, 5, 7, 9}, B = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14},
num_ret = 5 possible element sets: {2, 4, 6, 8, 10}, {2, 4, 6, 8, 12}, and
so on.
For now, it may just be easiest to take the first num_ret elements of the
non-overlapping set instead of randomizing the elements to choose from the
non-overlapping set. """
str_A = map(str, A)
str_B = map(str, B)
for i in range(len(str_A)):
str_A[i] = str_A[i].zfill(2)
for i in range(len(str_B)):
str_B[i] = str_B[i].zfill(2)
#print str_A
#print str_B
set_A, set_B = set(str_A), set(str_B) # map all elts to str
list_diff = list(set_B - set_A)
list_diff.sort()
return list_diff[:min(len(set_B - set_A), num_ret)]
def thread_duration_control(test_user, tracker_address, video_name, user_name):
# call using:
# thread.start_new_thread(thread_duration_control, (test_user, tracker_address, video_name, user_name))
mu = 20
close_time = random.expovariate(1/float(mu))
print "Waiting %f until close." % close_time
sleep(close_time)
print "Countdown finished. Closing connection."
test_user.disconnect(tracker_address, video_name, user_name)
def normListSumTo(L, sumTo=1):
sum = reduce(lambda x,y:x+y, L)
return [ x/(sum*1.0)*sumTo for x in L]
def zipfCDF(n, zipf_param=1):
a = [0]*n
for i in range(0,n):
a[i] = pow(i+1, -zipf_param)
b = normListSumTo(a)
c = [(0,0)]*n
print b
print c
for i in range(1,n):
c[i] = (i, c[i-1][1] + b[i-1])
return c
def main():
mu = 20
# Create unique user ID
user_id = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(6))
print '[user.py]', tracker_address
# Discover movies.
movie_LUT = retrieve_MovieLUT_from_tracker(tracker_address)
movies = movie_LUT.movies_LUT.keys()
number_of_videos = 20
movies = []
movies.append('hyunah')
for i in range(2, number_of_videos + 1):
movies.append('hyunah' + str(i))
zipf_param = 1
cdf = zipfCDF(len(movies), zipf_param) # Popularity CDF
print '[user.py] Popularity cdf', cdf
for i in range(2):
wait_time = random.expovariate(1/float(mu))
print '[user.py] i == ', i
sleep(wait_time)
os.system("rm -r video*")
video_index = max(i for r in [random.random()] for i,c in cdf if c <= r) # http://stackoverflow.com/questions/4265988/generate-random-numbers-with-a-given-numerical-distribution
if i == 0:
video_name = movies[video_index]
elif i == 1:
video_name = movies[19-video_index]
user_name = 'user-' + user_id
print '[user.py] Starting to watch video %s' % video_name
sys.stdout.flush()
test_user = P2PUser(tracker_address, video_name, user_name)
test_user.download(video_name, 1)
test_user.disconnect(tracker_address, video_name, user_name)
print '[user.py] Download of video %s finished.' % video_name
sys.stdout.flush()
if __name__ == "__main__":
main()
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ReplicationInstanceArgs', 'ReplicationInstance']
@pulumi.input_type
class ReplicationInstanceArgs:
def __init__(__self__, *,
replication_instance_class: pulumi.Input[str],
allocated_storage: Optional[pulumi.Input[int]] = None,
allow_major_version_upgrade: Optional[pulumi.Input[bool]] = None,
auto_minor_version_upgrade: Optional[pulumi.Input[bool]] = None,
availability_zone: Optional[pulumi.Input[str]] = None,
engine_version: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
multi_az: Optional[pulumi.Input[bool]] = None,
preferred_maintenance_window: Optional[pulumi.Input[str]] = None,
publicly_accessible: Optional[pulumi.Input[bool]] = None,
replication_instance_identifier: Optional[pulumi.Input[str]] = None,
replication_subnet_group_identifier: Optional[pulumi.Input[str]] = None,
resource_identifier: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['ReplicationInstanceTagArgs']]]] = None,
vpc_security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ReplicationInstance resource.
"""
pulumi.set(__self__, "replication_instance_class", replication_instance_class)
if allocated_storage is not None:
pulumi.set(__self__, "allocated_storage", allocated_storage)
if allow_major_version_upgrade is not None:
pulumi.set(__self__, "allow_major_version_upgrade", allow_major_version_upgrade)
if auto_minor_version_upgrade is not None:
pulumi.set(__self__, "auto_minor_version_upgrade", auto_minor_version_upgrade)
if availability_zone is not None:
pulumi.set(__self__, "availability_zone", availability_zone)
if engine_version is not None:
pulumi.set(__self__, "engine_version", engine_version)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if multi_az is not None:
pulumi.set(__self__, "multi_az", multi_az)
if preferred_maintenance_window is not None:
pulumi.set(__self__, "preferred_maintenance_window", preferred_maintenance_window)
if publicly_accessible is not None:
pulumi.set(__self__, "publicly_accessible", publicly_accessible)
if replication_instance_identifier is not None:
pulumi.set(__self__, "replication_instance_identifier", replication_instance_identifier)
if replication_subnet_group_identifier is not None:
pulumi.set(__self__, "replication_subnet_group_identifier", replication_subnet_group_identifier)
if resource_identifier is not None:
pulumi.set(__self__, "resource_identifier", resource_identifier)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if vpc_security_group_ids is not None:
pulumi.set(__self__, "vpc_security_group_ids", vpc_security_group_ids)
@property
@pulumi.getter(name="replicationInstanceClass")
def replication_instance_class(self) -> pulumi.Input[str]:
return pulumi.get(self, "replication_instance_class")
@replication_instance_class.setter
def replication_instance_class(self, value: pulumi.Input[str]):
pulumi.set(self, "replication_instance_class", value)
@property
@pulumi.getter(name="allocatedStorage")
def allocated_storage(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "allocated_storage")
@allocated_storage.setter
def allocated_storage(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "allocated_storage", value)
@property
@pulumi.getter(name="allowMajorVersionUpgrade")
def allow_major_version_upgrade(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "allow_major_version_upgrade")
@allow_major_version_upgrade.setter
def allow_major_version_upgrade(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_major_version_upgrade", value)
@property
@pulumi.getter(name="autoMinorVersionUpgrade")
def auto_minor_version_upgrade(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "auto_minor_version_upgrade")
@auto_minor_version_upgrade.setter
def auto_minor_version_upgrade(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_minor_version_upgrade", value)
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "availability_zone")
@availability_zone.setter
def availability_zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "availability_zone", value)
@property
@pulumi.getter(name="engineVersion")
def engine_version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "engine_version")
@engine_version.setter
def engine_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "engine_version", value)
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "kms_key_id")
@kms_key_id.setter
def kms_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_id", value)
@property
@pulumi.getter(name="multiAZ")
def multi_az(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "multi_az")
@multi_az.setter
def multi_az(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "multi_az", value)
@property
@pulumi.getter(name="preferredMaintenanceWindow")
def preferred_maintenance_window(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "preferred_maintenance_window")
@preferred_maintenance_window.setter
def preferred_maintenance_window(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "preferred_maintenance_window", value)
@property
@pulumi.getter(name="publiclyAccessible")
def publicly_accessible(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "publicly_accessible")
@publicly_accessible.setter
def publicly_accessible(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "publicly_accessible", value)
@property
@pulumi.getter(name="replicationInstanceIdentifier")
def replication_instance_identifier(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "replication_instance_identifier")
@replication_instance_identifier.setter
def replication_instance_identifier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "replication_instance_identifier", value)
@property
@pulumi.getter(name="replicationSubnetGroupIdentifier")
def replication_subnet_group_identifier(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "replication_subnet_group_identifier")
@replication_subnet_group_identifier.setter
def replication_subnet_group_identifier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "replication_subnet_group_identifier", value)
@property
@pulumi.getter(name="resourceIdentifier")
def resource_identifier(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "resource_identifier")
@resource_identifier.setter
def resource_identifier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_identifier", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ReplicationInstanceTagArgs']]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ReplicationInstanceTagArgs']]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="vpcSecurityGroupIds")
def vpc_security_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "vpc_security_group_ids")
@vpc_security_group_ids.setter
def vpc_security_group_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "vpc_security_group_ids", value)
warnings.warn("""ReplicationInstance is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""", DeprecationWarning)
class ReplicationInstance(pulumi.CustomResource):
warnings.warn("""ReplicationInstance is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""", DeprecationWarning)
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allocated_storage: Optional[pulumi.Input[int]] = None,
allow_major_version_upgrade: Optional[pulumi.Input[bool]] = None,
auto_minor_version_upgrade: Optional[pulumi.Input[bool]] = None,
availability_zone: Optional[pulumi.Input[str]] = None,
engine_version: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
multi_az: Optional[pulumi.Input[bool]] = None,
preferred_maintenance_window: Optional[pulumi.Input[str]] = None,
publicly_accessible: Optional[pulumi.Input[bool]] = None,
replication_instance_class: Optional[pulumi.Input[str]] = None,
replication_instance_identifier: Optional[pulumi.Input[str]] = None,
replication_subnet_group_identifier: Optional[pulumi.Input[str]] = None,
resource_identifier: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ReplicationInstanceTagArgs']]]]] = None,
vpc_security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
Resource Type definition for AWS::DMS::ReplicationInstance
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ReplicationInstanceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Resource Type definition for AWS::DMS::ReplicationInstance
:param str resource_name: The name of the resource.
:param ReplicationInstanceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ReplicationInstanceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allocated_storage: Optional[pulumi.Input[int]] = None,
allow_major_version_upgrade: Optional[pulumi.Input[bool]] = None,
auto_minor_version_upgrade: Optional[pulumi.Input[bool]] = None,
availability_zone: Optional[pulumi.Input[str]] = None,
engine_version: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
multi_az: Optional[pulumi.Input[bool]] = None,
preferred_maintenance_window: Optional[pulumi.Input[str]] = None,
publicly_accessible: Optional[pulumi.Input[bool]] = None,
replication_instance_class: Optional[pulumi.Input[str]] = None,
replication_instance_identifier: Optional[pulumi.Input[str]] = None,
replication_subnet_group_identifier: Optional[pulumi.Input[str]] = None,
resource_identifier: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ReplicationInstanceTagArgs']]]]] = None,
vpc_security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
pulumi.log.warn("""ReplicationInstance is deprecated: ReplicationInstance is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""")
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ReplicationInstanceArgs.__new__(ReplicationInstanceArgs)
__props__.__dict__["allocated_storage"] = allocated_storage
__props__.__dict__["allow_major_version_upgrade"] = allow_major_version_upgrade
__props__.__dict__["auto_minor_version_upgrade"] = auto_minor_version_upgrade
__props__.__dict__["availability_zone"] = availability_zone
__props__.__dict__["engine_version"] = engine_version
__props__.__dict__["kms_key_id"] = kms_key_id
__props__.__dict__["multi_az"] = multi_az
__props__.__dict__["preferred_maintenance_window"] = preferred_maintenance_window
__props__.__dict__["publicly_accessible"] = publicly_accessible
if replication_instance_class is None and not opts.urn:
raise TypeError("Missing required property 'replication_instance_class'")
__props__.__dict__["replication_instance_class"] = replication_instance_class
__props__.__dict__["replication_instance_identifier"] = replication_instance_identifier
__props__.__dict__["replication_subnet_group_identifier"] = replication_subnet_group_identifier
__props__.__dict__["resource_identifier"] = resource_identifier
__props__.__dict__["tags"] = tags
__props__.__dict__["vpc_security_group_ids"] = vpc_security_group_ids
__props__.__dict__["replication_instance_private_ip_addresses"] = None
__props__.__dict__["replication_instance_public_ip_addresses"] = None
super(ReplicationInstance, __self__).__init__(
'aws-native:dms:ReplicationInstance',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ReplicationInstance':
"""
Get an existing ReplicationInstance resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ReplicationInstanceArgs.__new__(ReplicationInstanceArgs)
__props__.__dict__["allocated_storage"] = None
__props__.__dict__["allow_major_version_upgrade"] = None
__props__.__dict__["auto_minor_version_upgrade"] = None
__props__.__dict__["availability_zone"] = None
__props__.__dict__["engine_version"] = None
__props__.__dict__["kms_key_id"] = None
__props__.__dict__["multi_az"] = None
__props__.__dict__["preferred_maintenance_window"] = None
__props__.__dict__["publicly_accessible"] = None
__props__.__dict__["replication_instance_class"] = None
__props__.__dict__["replication_instance_identifier"] = None
__props__.__dict__["replication_instance_private_ip_addresses"] = None
__props__.__dict__["replication_instance_public_ip_addresses"] = None
__props__.__dict__["replication_subnet_group_identifier"] = None
__props__.__dict__["resource_identifier"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["vpc_security_group_ids"] = None
return ReplicationInstance(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allocatedStorage")
def allocated_storage(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "allocated_storage")
@property
@pulumi.getter(name="allowMajorVersionUpgrade")
def allow_major_version_upgrade(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "allow_major_version_upgrade")
@property
@pulumi.getter(name="autoMinorVersionUpgrade")
def auto_minor_version_upgrade(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "auto_minor_version_upgrade")
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "availability_zone")
@property
@pulumi.getter(name="engineVersion")
def engine_version(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "engine_version")
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "kms_key_id")
@property
@pulumi.getter(name="multiAZ")
def multi_az(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "multi_az")
@property
@pulumi.getter(name="preferredMaintenanceWindow")
def preferred_maintenance_window(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "preferred_maintenance_window")
@property
@pulumi.getter(name="publiclyAccessible")
def publicly_accessible(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "publicly_accessible")
@property
@pulumi.getter(name="replicationInstanceClass")
def replication_instance_class(self) -> pulumi.Output[str]:
return pulumi.get(self, "replication_instance_class")
@property
@pulumi.getter(name="replicationInstanceIdentifier")
def replication_instance_identifier(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "replication_instance_identifier")
@property
@pulumi.getter(name="replicationInstancePrivateIpAddresses")
def replication_instance_private_ip_addresses(self) -> pulumi.Output[Sequence[str]]:
return pulumi.get(self, "replication_instance_private_ip_addresses")
@property
@pulumi.getter(name="replicationInstancePublicIpAddresses")
def replication_instance_public_ip_addresses(self) -> pulumi.Output[Sequence[str]]:
return pulumi.get(self, "replication_instance_public_ip_addresses")
@property
@pulumi.getter(name="replicationSubnetGroupIdentifier")
def replication_subnet_group_identifier(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "replication_subnet_group_identifier")
@property
@pulumi.getter(name="resourceIdentifier")
def resource_identifier(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "resource_identifier")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence['outputs.ReplicationInstanceTag']]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="vpcSecurityGroupIds")
def vpc_security_group_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
return pulumi.get(self, "vpc_security_group_ids")
|
import gym
import random
import logging
import numpy as np
from tqdm import trange
from agents.imt_agent import IMTAgent
from agents.qtable_agent import QAgent
class GoWrapper(gym.ObservationWrapper):
def __init__(self, env=None):
super(GoWrapper, self).__init__(env)
self.observation_space = gym.spaces.Tuple(
spaces=(gym.spaces.Discrete(3),)*25)
def observation(self, obs):
new_obs = np.zeros(25)
# print(obs.flatten())
for ind, ob in enumerate(obs.flatten()[:25]):
if new_obs[ind] == 0 and ob == 1:
new_obs[ind] = 1
for ind, ob in enumerate(obs.flatten()[25:50]):
if new_obs[ind] == 0 and ob == 1:
new_obs[ind] = -1
return new_obs
def run(Black, White, SEED = 42069, EPISODES = 10000):
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
SIZE = 5
RENDER = False
np.random.seed(SEED)
env = GoWrapper(gym.make('gym_go:go-v0', size=SIZE,
komi=0, reward_method='heuristic'))
env.seed(SEED)
env.action_space.seed(SEED)
obs_size = len(env.observation_space)
action_size = env.action_space.n
black = Black(obs_size, action_size, alpha=0.1, gamma=0.9, epsilon=0.1)
black_rewards = []
white = White(obs_size, action_size, alpha=0.1, gamma=0.9, epsilon=0.1)
white_rewards = []
game_status = []
progress_bar = trange(EPISODES)
for _ in progress_bar:
state = env.reset()
done = False
while not done:
state, reward, done, _ = black.step(env, state)
black_rewards.append(reward)
if done:
break
state, reward, done, _ = white.step(env, state)
white_rewards.append(reward)
game_status.append(reward)
black_win = [x for x in game_status if x > 0]
tie = [x for x in game_status if x == 0]
white_win = [x for x in game_status if x < 0]
progress_bar.set_description(f'BLACK WIN%: {len(black_win)/len(game_status):.2f}, TIE%: {len(tie)/len(game_status):.2f}, WHITE WIN%: {len(white_win)/len(game_status):.2f}')
if RENDER:
env.render('terminal')
# if isinstance(black, IMTAgent):
# imt = black
# qtable = white
# else:
# qtable = black
# imt = white
# print('IMT REVISIT %:', imt.experiential_model.revisit_counter / imt.experiential_model.update_counter)
# print('QTABLE REVISIT %:', qtable.model.revisit_counter / qtable.model.update_counter)
print('BLACK QTABLE REVISIT %:', black.model.revisit_counter / black.model.update_counter)
print('WHITE QTABLE REVISIT %:', white.model.revisit_counter / white.model.update_counter)
black_win = [x for x in game_status if x > 0]
tie = [x for x in game_status if x == 0]
white_win = [x for x in game_status if x < 0]
return len(black_win), len(tie), len(white_win)
if __name__ == '__main__':
wins, ties, losses = 0, 0, 0
ITERATIONS = 2
EPISODES = 1000000
SEED = 240
# run with imt agent first
black_wins, tie, white_wins = run(QAgent, QAgent, SEED, EPISODES)
wins += black_wins
ties += tie
losses += white_wins
# run with imt agent second
# black_wins, tie, white_wins = run(QAgent, IMTAgent, SEED, EPISODES)
# wins += white_wins
# ties += tie
# losses += black_wins
total = EPISODES*ITERATIONS
print()
print(f'IMT_AGENT WIN%: {wins/total:.2f}')
print(f'TIE%: {ties/total:.2f}')
print(f'IMT_AGENT LOSS%: {losses/total:.2f}')
|
"""xancestry URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.views.generic import RedirectView
from django.conf.urls import url
import django.views.static
import django.contrib.auth.views
import os.path
from . import views
urlpatterns = [
url('admin/', admin.site.urls),
url(r'^$', views.index, name='xancestry.index'),
url(r'^person/(?P<person_id>\d+)/$', views.person, name='person'),
url(r'^person/(?P<person_id>\d+)/edit/$', views.edit_person, name='edit_person'),
url(r'^person/(?P<person_id>\d+)/relatives/$', views.relatives, name='relatives'),
url(r'^person/(?P<person_id>\d+)/relatives/map/$', views.relatives_map, name='relatives_map'),
url(r'^person/(?P<person_id>\d+)/descendants/$', views.descendants, name='descendants'),
url(r'^person/(?P<person_id>\d+)/descendants/map/$', views.descendants_map, name='descendants_map'),
url(r'^person/(?P<person_id>\d+)/descendants/tree/$', views.descendants_tree, name='descendants_tree'),
url(r'^person/(?P<person_id>\d+)/descendants/tree/svg/$', views.descendants_tree_svg, name='descendants_tree_svg'),
url(r'^person/(?P<person_id>\d+)/ancestors/$', views.ancestors, name='ancestors'),
url(r'^person/(?P<person_id>\d+)/ancestors/report/$', views.ancestors_report, name='report'),
url(r'^person/(?P<person_id>\d+)/ancestors/report/undead/$',
views.ancestors_report_undead,
name='report_undead'),
url(r'^person/(?P<person_id>\d+)/ancestors/report/maiden-names/$',
views.ancestors_report_maiden_names,
name='report_maiden_names'),
url(r'^report/alive/(?P<year>\d+)/$', views.alive_in_year, name='alive_in_year'),
url(r'^person/(?P<person_id>\d+)/ancestors/map/$', views.ancestors_map, name='ancestors_map'),
url(r'^person/(?P<person_id>\d+)/ancestors/ringchart/$', views.ring_chart, name='ring_chart'),
url(r'^person/(?P<person_id>\d+)/ancestors/ringchart/svg/$', views.ring_chart_svg, name='ring_chart_svg'),
url(r'^location/(?P<location_id>\d+)/$', views.location, name='location'),
url(r'^region/(?P<region_name>[\w\W]+)/$', views.region, name='region'),
url(r'^surname/(?P<surname>[\w\W]+)/$', views.surname, name='surname'),
url(r'^forename/(?P<forename>[\w\W]+)/$', views.forename, name='forename'),
url(r'^tag/(?P<slug>[\w-]+)/$', views.tag, name='tag'),
url(r'^person/add/$', views.add_person, name='add_person'),
url(r'^location/add/$', views.add_location, name='add_location'),
url(r'^public/surnames/$', views.surnames, name='surnames'),
]
|
import pytz
from django.utils import timezone
from django.utils.deprecation import MiddlewareMixin
class TimezoneMiddleware(MiddlewareMixin):
tzname = None
def process_request(self, request):
self.tzname = request.session.get('django_timezone')
if self.tzname:
timezone.activate(pytz.timezone(self.tzname))
else:
timezone.activate(pytz.timezone('Asia/Dhaka'))
|
"""
LeetCode Problem: 1302. Deepest Leaves Sum
Link: https://leetcode.com/problems/deepest-leaves-sum/
Language: Python
Written by: Mostofa Adib Shakib
Time Complexity: O(n)
Space Complexity: O(n)
"""
class Solution:
def deepestLeavesSum(self, root: TreeNode) -> int:
queue = [root]
ans = 0
while queue:
length = len(queue)
ans = 0
for i in range(length):
node = queue.pop(0)
ans += node.val
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
return ans
|
# Generated by Django 3.1 on 2020-08-25 10:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0013_set_minimum_datetime_timezone'),
]
operations = [
migrations.RemoveField(
model_name='board',
name='is_kaist',
),
migrations.AddField(
model_name='board',
name='access_mask',
field=models.IntegerField(default=2, verbose_name='접근 권한 값'),
),
]
|
# Copyright 2020 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module containing graph optimization passes
"""
import logging
from pyxir.graph.passing.base_pass import XGraphBasePass
logger = logging.getLogger("pyxir")
class XGraphOptimizationPass(XGraphBasePass):
"""
Responsible for optimizing XGraph models through graph passes
Attributes
----------
"""
def __init__(self,
name='XGraphOptimization',
output_png=None,
repeat_until_stable=False):
super(XGraphOptimizationPass, self).__init__(name,
output_png=output_png)
self.repeat_until_stable = repeat_until_stable
self.optimizations = []
def add_optimization(self,
condition_func,
opt_func,
name,
**kwargs):
self.optimizations.append({
'condition_func': condition_func,
'opt_func': opt_func,
'name': name,
'kwargs': kwargs
})
def execute(self, xgraph):
# type: (XGraph) -> XGraph
"""
"""
condition_funcs = [opt['condition_func'] for opt in self.optimizations]
opt_funcs = [opt['opt_func'] for opt in self.optimizations]
names = [opt['name'] for opt in self.optimizations]
opt_kwargs_lst = [opt['kwargs'] for opt in self.optimizations]
# Execute all optimization passes
# for opt_params in self.optimizations:
# logger.debug("-- opt: {}".format(opt_params['name']))
xgraph = self._optimization_layer_pass(
xgraph=xgraph,
condition_funcs=condition_funcs,
opt_funcs=opt_funcs,
opt_names=names,
opt_kwargs_lst=opt_kwargs_lst,
repeat_until_stable=self.repeat_until_stable,
name=self.name,
output_png=self.output_png
)
return xgraph
|
# 假设按照升序排序的数组在预先未知的某个点上进行了旋转。
#
# ( 例如,数组 [0,1,2,4,5,6,7] 可能变为 [4,5,6,7,0,1,2] )。
#
# 请找出其中最小的元素。
#
# 你可以假设数组中不存在重复元素。
#
# 示例 1:
#
# 输入: [3,4,5,1,2]
# 输出: 1
#
# 示例 2:
#
# 输入: [4,5,6,7,0,1,2]
# 输出: 0
# Related Topics 数组 二分查找
# leetcode submit region begin(Prohibit modification and deletion)
class Solution(object):
def findMin(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
left, right = 0, len(nums) - 1
while left < right:
mid = left + (right - left) // 2
if nums[mid] < nums[right]:
right = mid
else:
left = mid + 1
return nums[left]
# leetcode submit region end(Prohibit modification and deletion)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-04 19:51
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('document', models.FileField(default='document', upload_to='documents/')),
('name', models.CharField(default='name', max_length=100)),
],
),
migrations.CreateModel(
name='folder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, unique=True)),
],
),
migrations.CreateModel(
name='report',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='title', max_length=200, unique=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('short_description', models.CharField(max_length=30)),
('detailed_description', models.CharField(max_length=200)),
('is_private', models.BooleanField(default='False')),
('location', models.CharField(default='Virginia', max_length=100)),
('is_encrypted', models.BooleanField(default='False')),
('username_id', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='folder',
name='added_reports',
field=models.ManyToManyField(to='reports.report'),
),
migrations.AddField(
model_name='folder',
name='username_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='document',
name='report_document',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reports.report'),
),
]
|
# Generated by Django 2.0.7 on 2018-07-14 11:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('road', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='situation',
name='endTime',
field=models.DateTimeField(),
),
]
|
"""InVEST Seasonal water yield model tests that use the InVEST sample data"""
import glob
import unittest
import tempfile
import shutil
import os
import numpy
from osgeo import ogr
from natcap.invest.pygeoprocessing_0_3_3.testing import scm
SAMPLE_DATA = os.path.join(
os.path.dirname(__file__), '..', 'data', 'invest-data',
'seasonal_water_yield')
REGRESSION_DATA = os.path.join(
os.path.dirname(__file__), '..', 'data', 'invest-test-data',
'seasonal_water_yield')
class SeasonalWaterYieldUnusualDataTests(unittest.TestCase):
"""Tests for InVEST Seasonal Water Yield model that cover cases where
input data are in an unusual corner case"""
def setUp(self):
# this lets us delete the workspace after its done no matter the
# the rest result
self.workspace_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.workspace_dir)
@scm.skip_if_data_missing(SAMPLE_DATA)
@scm.skip_if_data_missing(REGRESSION_DATA)
def test_ambiguous_precip_data(self):
"""SWY test case where there are more than 12 precipitation files"""
from natcap.invest.seasonal_water_yield import seasonal_water_yield
test_precip_dir = os.path.join(self.workspace_dir, 'test_precip_dir')
shutil.copytree(
os.path.join(SAMPLE_DATA, 'precip_dir'), test_precip_dir)
shutil.copy(
os.path.join(test_precip_dir, 'precip_mm_3.tif'),
os.path.join(test_precip_dir, 'bonus_precip_mm_3.tif'))
# A placeholder args that has the property that the aoi_path will be
# the same name as the output aggregate vector
args = {
'workspace_dir': self.workspace_dir,
'aoi_path': os.path.join(SAMPLE_DATA, 'watersheds.shp'),
'alpha_m': '1/12',
'beta_i': '1.0',
'biophysical_table_path': os.path.join(
SAMPLE_DATA, 'biophysical_table.csv'),
'dem_raster_path': os.path.join(SAMPLE_DATA, 'dem.tif'),
'et0_dir': os.path.join(SAMPLE_DATA, 'eto_dir'),
'gamma': '1.0',
'lulc_raster_path': os.path.join(SAMPLE_DATA, 'lulc.tif'),
'precip_dir': test_precip_dir, # test constructed one
'rain_events_table_path': os.path.join(
SAMPLE_DATA, 'rain_events_table.csv'),
'soil_group_path': os.path.join(SAMPLE_DATA, 'soil_group.tif'),
'threshold_flow_accumulation': '1000',
'user_defined_climate_zones': False,
'user_defined_local_recharge': False,
'monthly_alpha': False,
}
with self.assertRaises(ValueError):
seasonal_water_yield.execute(args)
@scm.skip_if_data_missing(SAMPLE_DATA)
@scm.skip_if_data_missing(REGRESSION_DATA)
def test_precip_data_missing(self):
"""SWY test case where there is a missing precipitation file"""
from natcap.invest.seasonal_water_yield import seasonal_water_yield
test_precip_dir = os.path.join(self.workspace_dir, 'test_precip_dir')
shutil.copytree(
os.path.join(SAMPLE_DATA, 'precip_dir'), test_precip_dir)
os.remove(os.path.join(test_precip_dir, 'precip_mm_3.tif'))
# A placeholder args that has the property that the aoi_path will be
# the same name as the output aggregate vector
args = {
'workspace_dir': self.workspace_dir,
'aoi_path': os.path.join(SAMPLE_DATA, 'watersheds.shp'),
'alpha_m': '1/12',
'beta_i': '1.0',
'biophysical_table_path': os.path.join(
SAMPLE_DATA, 'biophysical_table.csv'),
'dem_raster_path': os.path.join(SAMPLE_DATA, 'dem.tif'),
'et0_dir': os.path.join(SAMPLE_DATA, 'eto_dir'),
'gamma': '1.0',
'lulc_raster_path': os.path.join(SAMPLE_DATA, 'lulc.tif'),
'precip_dir': test_precip_dir, # test constructed one
'rain_events_table_path': os.path.join(
SAMPLE_DATA, 'rain_events_table.csv'),
'soil_group_path': os.path.join(SAMPLE_DATA, 'soil_group.tif'),
'threshold_flow_accumulation': '1000',
'user_defined_climate_zones': False,
'user_defined_local_recharge': False,
'monthly_alpha': False,
}
with self.assertRaises(ValueError):
seasonal_water_yield.execute(args)
@scm.skip_if_data_missing(SAMPLE_DATA)
@scm.skip_if_data_missing(REGRESSION_DATA)
def test_aggregate_vector_preexists(self):
"""SWY test that model deletes a preexisting aggregate output result"""
from natcap.invest.seasonal_water_yield import seasonal_water_yield
# Set up data so there is enough code to do an aggregate over the
# rasters but the output vector already exists
for file_path in glob.glob(os.path.join(SAMPLE_DATA, "watershed.*")):
shutil.copy(file_path, self.workspace_dir)
aoi_path = os.path.join(SAMPLE_DATA, 'watershed.shp')
l_path = os.path.join(REGRESSION_DATA, 'L.tif')
aggregate_vector_path = os.path.join(
self.workspace_dir, 'watershed.shp')
seasonal_water_yield._aggregate_recharge(
aoi_path, l_path, l_path, aggregate_vector_path)
# test if aggregate is expected
tolerance_places = 1 # this was an experimentally acceptable value
agg_results_base_path = os.path.join(
REGRESSION_DATA, 'l_agg_results.csv')
result_vector = ogr.Open(aggregate_vector_path)
result_layer = result_vector.GetLayer()
incorrect_value_list = []
with open(agg_results_base_path, 'rb') as agg_result_file:
for line in agg_result_file:
fid, vri_sum, qb_val = [float(x) for x in line.split(',')]
feature = result_layer.GetFeature(int(fid))
for field, value in [('vri_sum', vri_sum), ('qb', qb_val)]:
if not numpy.isclose(
feature.GetField(field), value, rtol=1e-6):
incorrect_value_list.append(
'Unexpected value on feature %d, '
'expected %f got %f' % (
fid, value, feature.GetField(field)))
ogr.Feature.__swig_destroy__(feature)
feature = None
result_layer = None
ogr.DataSource.__swig_destroy__(result_vector)
result_vector = None
if incorrect_value_list:
raise AssertionError('\n' + '\n'.join(incorrect_value_list))
@scm.skip_if_data_missing(SAMPLE_DATA)
def test_duplicate_aoi_assertion(self):
"""SWY ensure model halts when AOI path identical to output vector"""
from natcap.invest.seasonal_water_yield import seasonal_water_yield
# A placeholder args that has the property that the aoi_path will be
# the same name as the output aggregate vector
args = {
'workspace_dir': self.workspace_dir,
'aoi_path': os.path.join(
self.workspace_dir, 'aggregated_results_foo.shp'),
'results_suffix': 'foo',
'alpha_m': '1/12',
'beta_i': '1.0',
'biophysical_table_path': os.path.join(
SAMPLE_DATA, 'biophysical_table.csv'),
'dem_raster_path': os.path.join(SAMPLE_DATA, 'dem.tif'),
'et0_dir': os.path.join(SAMPLE_DATA, 'eto_dir'),
'gamma': '1.0',
'lulc_raster_path': os.path.join(SAMPLE_DATA, 'lulc.tif'),
'precip_dir': os.path.join(SAMPLE_DATA, 'precip_dir'),
'rain_events_table_path': os.path.join(
SAMPLE_DATA, 'rain_events_table.csv'),
'soil_group_path': os.path.join(SAMPLE_DATA, 'soil_group.tif'),
'threshold_flow_accumulation': '1000',
'user_defined_climate_zones': False,
'user_defined_local_recharge': False,
'monthly_alpha': False,
}
with self.assertRaises(ValueError):
seasonal_water_yield.execute(args)
class SeasonalWaterYieldRegressionTests(unittest.TestCase):
"""Regression tests for InVEST Seasonal Water Yield model"""
def setUp(self):
# this lets us delete the workspace after its done no matter the
# the rest result
self.workspace_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.workspace_dir)
@staticmethod
def generate_base_args(workspace_dir):
"""Generate an args list that is consistent across all three regression
tests"""
args = {
'alpha_m': '1/12',
'aoi_path': os.path.join(SAMPLE_DATA, 'watershed.shp'),
'beta_i': '1.0',
'biophysical_table_path': os.path.join(
SAMPLE_DATA, 'biophysical_table.csv'),
'dem_raster_path': os.path.join(SAMPLE_DATA, 'dem.tif'),
'et0_dir': os.path.join(SAMPLE_DATA, 'eto_dir'),
'gamma': '1.0',
'lulc_raster_path': os.path.join(SAMPLE_DATA, 'lulc.tif'),
'precip_dir': os.path.join(SAMPLE_DATA, 'precip_dir'),
'rain_events_table_path': os.path.join(
SAMPLE_DATA, 'rain_events_table.csv'),
'results_suffix': '',
'soil_group_path': os.path.join(SAMPLE_DATA, 'soil_group.tif'),
'threshold_flow_accumulation': '1000',
'workspace_dir': workspace_dir,
}
return args
@scm.skip_if_data_missing(SAMPLE_DATA)
@scm.skip_if_data_missing(REGRESSION_DATA)
def test_base_regression(self):
"""SWY base regression test on sample data
Executes SWY in default mode and checks that the output files are
generated and that the aggregate shapefile fields are the same as the
regression case."""
from natcap.invest.seasonal_water_yield import seasonal_water_yield
# use predefined directory so test can clean up files during teardown
args = SeasonalWaterYieldRegressionTests.generate_base_args(
self.workspace_dir)
# make args explicit that this is a base run of SWY
args['user_defined_climate_zones'] = False
args['user_defined_local_recharge'] = False
args['monthly_alpha'] = False
args['results_suffix'] = ''
seasonal_water_yield.execute(args)
SeasonalWaterYieldRegressionTests._assert_regression_results_equal(
args['workspace_dir'],
os.path.join(REGRESSION_DATA, 'file_list_base.txt'),
os.path.join(args['workspace_dir'], 'aggregated_results.shp'),
os.path.join(REGRESSION_DATA, 'agg_results_base.csv'))
@scm.skip_if_data_missing(SAMPLE_DATA)
@scm.skip_if_data_missing(REGRESSION_DATA)
def test_monthly_alpha_regression(self):
"""SWY monthly alpha values regression test on sample data
Executes SWY using the monthly alpha table and checks that the output
files are generated and that the aggregate shapefile fields are the
same as the regression case."""
from natcap.invest.seasonal_water_yield import seasonal_water_yield
# use predefined directory so test can clean up files during teardown
args = SeasonalWaterYieldRegressionTests.generate_base_args(
self.workspace_dir)
# make args explicit that this is a base run of SWY
args['user_defined_climate_zones'] = False
args['user_defined_local_recharge'] = False
args['monthly_alpha'] = True
args['monthly_alpha_path'] = os.path.join(
SAMPLE_DATA, 'monthly_alpha.csv')
args['results_suffix'] = ''
seasonal_water_yield.execute(args)
SeasonalWaterYieldRegressionTests._assert_regression_results_equal(
args['workspace_dir'],
os.path.join(REGRESSION_DATA, 'file_list_base.txt'),
os.path.join(args['workspace_dir'], 'aggregated_results.shp'),
os.path.join(REGRESSION_DATA, 'agg_results_base.csv'))
@scm.skip_if_data_missing(SAMPLE_DATA)
@scm.skip_if_data_missing(REGRESSION_DATA)
def test_climate_zones_regression(self):
"""SWY climate zone regression test on sample data
Executes SWY in climate zones mode and checks that the output files are
generated and that the aggregate shapefile fields are the same as the
regression case."""
from natcap.invest.seasonal_water_yield import seasonal_water_yield
# use predefined directory so test can clean up files during teardown
args = SeasonalWaterYieldRegressionTests.generate_base_args(
self.workspace_dir)
# modify args to account for climate zones defined
args['climate_zone_raster_path'] = os.path.join(
SAMPLE_DATA, 'climate_zones.tif')
args['climate_zone_table_path'] = os.path.join(
SAMPLE_DATA, 'climate_zone_events.csv')
args['user_defined_climate_zones'] = True
args['user_defined_local_recharge'] = False
args['monthly_alpha'] = False
args['results_suffix'] = 'cz'
seasonal_water_yield.execute(args)
SeasonalWaterYieldRegressionTests._assert_regression_results_equal(
args['workspace_dir'],
os.path.join(REGRESSION_DATA, 'file_list_cz.txt'),
os.path.join(
args['workspace_dir'], 'aggregated_results_cz.shp'),
os.path.join(REGRESSION_DATA, 'agg_results_cz.csv'))
@scm.skip_if_data_missing(SAMPLE_DATA)
@scm.skip_if_data_missing(REGRESSION_DATA)
def test_user_recharge(self):
"""SWY user recharge regression test on sample data
Executes SWY in user defined local recharge mode and checks that the
output files are generated and that the aggregate shapefile fields
are the same as the regression case."""
from natcap.invest.seasonal_water_yield import seasonal_water_yield
# use predefined directory so test can clean up files during teardown
args = SeasonalWaterYieldRegressionTests.generate_base_args(
self.workspace_dir)
# modify args to account for user recharge
args['user_defined_climate_zones'] = False
args['user_defined_local_recharge'] = True
args['monthly_alpha'] = False
args['results_suffix'] = ''
args['l_path'] = os.path.join(REGRESSION_DATA, 'L.tif')
seasonal_water_yield.execute(args)
SeasonalWaterYieldRegressionTests._assert_regression_results_equal(
args['workspace_dir'],
os.path.join(REGRESSION_DATA, 'file_list_user_recharge.txt'),
os.path.join(args['workspace_dir'], 'aggregated_results.shp'),
os.path.join(REGRESSION_DATA, 'agg_results_base.csv'))
@staticmethod
def _assert_regression_results_equal(
workspace_dir, file_list_path, result_vector_path,
agg_results_path):
"""Test the state of the workspace against the expected list of files
and aggregated results.
Parameters:
workspace_dir (string): path to the completed model workspace
file_list_path (string): path to a file that has a list of all
the expected files relative to the workspace base
result_vector_path (string): path to the summary shapefile
produced by the SWY model.
agg_results_path (string): path to a csv file that has the
expected aggregated_results.shp table in the form of
fid,vri_sum,qb_val per line
Returns:
None
Raises:
AssertionError if any files are missing or results are out of
range by `tolerance_places`
"""
# Test that the workspace has the same files as we expect
SeasonalWaterYieldRegressionTests._test_same_files(
file_list_path, workspace_dir)
# we expect a file called 'aggregated_results.shp'
result_vector = ogr.Open(result_vector_path)
result_layer = result_vector.GetLayer()
# The tolerance of 3 digits after the decimal was determined by
# experimentation on the application with the given range of numbers.
# This is an apparently reasonable approach as described by ChrisF:
# http://stackoverflow.com/a/3281371/42897
# and even more reading about picking numerical tolerance (it's hard):
# https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
tolerance_places = 3
with open(agg_results_path, 'rb') as agg_result_file:
for line in agg_result_file:
fid, vri_sum, qb_val = [float(x) for x in line.split(',')]
feature = result_layer.GetFeature(int(fid))
for field, value in [('vri_sum', vri_sum), ('qb', qb_val)]:
numpy.testing.assert_almost_equal(
feature.GetField(field), value,
decimal=tolerance_places)
ogr.Feature.__swig_destroy__(feature)
feature = None
result_layer = None
ogr.DataSource.__swig_destroy__(result_vector)
result_vector = None
@staticmethod
def _test_same_files(base_list_path, directory_path):
"""Assert that the files listed in `base_list_path` are also in the
directory pointed to by `directory_path`.
Parameters:
base_list_path (string): a path to a file that has one relative
file path per line.
directory_path (string): a path to a directory whose contents will
be checked against the files listed in `base_list_file`
Returns:
None
Raises:
AssertionError when there are files listed in `base_list_file`
that don't exist in the directory indicated by `path`"""
missing_files = []
with open(base_list_path, 'r') as file_list:
for file_path in file_list:
full_path = os.path.join(directory_path, file_path.rstrip())
if full_path == '':
#skip blank lines
continue
if not os.path.isfile(full_path):
missing_files.append(full_path)
if len(missing_files) > 0:
raise AssertionError(
"The following files were expected but not found: " +
'\n'.join(missing_files))
|
inputfile = open('day-8/day8_input.txt', 'r')
def getDigitsList(inputfile):
digitsList = []
with inputfile as f:
for line in f.readlines():
lineEntry = []
line = line.strip()
pattern, output = line.split(' | ')
pattern = pattern.split()
output = output.split()
lineEntry.append({'pattern': pattern, 'output': output})
digitsList.append(lineEntry)
return digitsList
def getEasyDigits(digitsList):
results = [0] * 10
for entry in digitsList:
output = entry[0]["output"]
for digit in output:
digitlength = len(digit)
if digitlength == 2:
results[0] += 1
elif digitlength == 4:
results[4] += 1
elif digitlength == 3:
results[7] += 1
elif digitlength == 7:
results[8] += 1
print(sum(results))
digitsList = getDigitsList(inputfile)
print(digitsList)
print(getEasyDigits(digitsList))
|
from __future__ import unicode_literals
from django.db import models, transaction
from django.core.validators import MinValueValidator, MaxValueValidator
from django.utils import timezone
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from django.core.exceptions import ObjectDoesNotExist
import os.path, csv, logging, socket
import json, time, datetime, pytz
from constance import config
from fermentrack_django import settings
import re
from decimal import Decimal
from . import udev_integration
from lib.ftcircus.client import CircusMgr, CircusException
from fermentrack_django.settings import USE_DOCKER
logger = logging.getLogger(__name__)
# BrewPiDevice
# |
# |--Beer,Beer,...
# | |
# | |- BeerLog Point,Beer Log Point...
# |
# |--OldControlConstants/NewControlConstants
# |
# |--PinDevice,PinDevice...
#
# Fermentation Profile
# |
# |--FermentationProfilePoint,FermentationProfilePoint,...
#
class PinDevice(models.Model):
class Meta:
managed = False
text = models.CharField(max_length=16, default="")
type = models.CharField(max_length=8, default="")
pin = models.IntegerField(default=-1) # 'val' in the dict
def __str__(self):
return self.text
# This factory method is used to allow us to quickly create an instance from a dict loaded in the firmware
@classmethod
def create_from_dict(cls, device_dict):
# If the pin definition has " text" rather than "text" we assume the pin should be excluded
if " text" in device_dict and "text" not in device_dict:
return None
new_device = cls(text=device_dict['text'], type=device_dict['type'], pin=device_dict['val'])
return new_device
# load_all_from_pinlist returns a list of available pin objects
@classmethod
def load_all_from_pinlist(cls, pinlist):
all_pins = []
for this_pin in pinlist:
next_pin = cls.create_from_dict(this_pin)
if next_pin is not None:
all_pins.append(next_pin)
return all_pins
# SensorDevice is a "sensor" (onewire addressable?) device
class SensorDevice(models.Model):
class Meta:
managed = False
verbose_name = "Sensor Device"
verbose_name_plural = "Sensor Devices"
# DEVICE_NONE = 0, // End of chamber device list
# DEVICE_CHAMBER_DOOR = 1, // switch sensor
# DEVICE_CHAMBER_HEAT = 2,
# DEVICE_CHAMBER_COOL = 3,
# DEVICE_CHAMBER_LIGHT = 4, // actuator
# DEVICE_CHAMBER_TEMP = 5,
# DEVICE_CHAMBER_ROOM_TEMP = 6, // temp sensors
# DEVICE_CHAMBER_FAN = 7, // a fan in the chamber
# DEVICE_CHAMBER_RESERVED1 = 8, // reserved for future use
# // carboy devices
# DEVICE_BEER_FIRST = 9,
# DEVICE_BEER_TEMP = DEVICE_BEER_FIRST, // primary beer temp sensor
# DEVICE_BEER_TEMP2 = 10, // secondary beer temp sensor
# DEVICE_BEER_HEAT = 11, DEVICE_BEER_COOL = 12, // individual actuators
# DEVICE_BEER_SG = 13, // SG sensor
# DEVICE_BEER_RESERVED1 = 14, DEVICE_BEER_RESERVED2 = 15, // reserved
# DEVICE_MAX = 16
# };
# Define the options for the choices below
DEVICE_FUNCTION_NONE = 0
DEVICE_FUNCTION_CHAMBER_DOOR = 1
DEVICE_FUNCTION_CHAMBER_HEAT = 2
DEVICE_FUNCTION_CHAMBER_COOL = 3
DEVICE_FUNCTION_CHAMBER_LIGHT = 4
DEVICE_FUNCTION_CHAMBER_TEMP = 5
DEVICE_FUNCTION_CHAMBER_ROOM_TEMP = 6
DEVICE_FUNCTION_CHAMBER_FAN = 7
DEVICE_FUNCTION_MANUAL_ACTUATOR = 8
DEVICE_FUNCTION_BEER_TEMP = 9
DEVICE_FUNCTION_BEER_TEMP2 = 10
DEVICE_FUNCTION_BEER_HEAT = 11
DEVICE_FUNCTION_BEER_COOL = 12
DEVICE_FUNCTION_BEER_SG = 13
DEVICE_FUNCTION_BEER_RESERVED1 = 14
DEVICE_FUNCTION_BEER_RESERVED2 = 15
DEVICE_FUNCTION_MAX = 16
INVERT_NOT_INVERTED = 0
INVERT_INVERTED = 1
DEVICE_FUNCTION_CHOICES = (
(DEVICE_FUNCTION_NONE, 'NONE'),
(DEVICE_FUNCTION_CHAMBER_DOOR, 'Chamber Door'), # CHAMBER_DOOR
(DEVICE_FUNCTION_CHAMBER_HEAT, 'Heating Relay'), # CHAMBER_HEAT
(DEVICE_FUNCTION_CHAMBER_COOL, 'Cooling Relay'), # CHAMBER_COOL
(DEVICE_FUNCTION_CHAMBER_LIGHT, 'Chamber Light'), # CHAMBER_LIGHT
(DEVICE_FUNCTION_CHAMBER_TEMP, 'Chamber Temp'), # CHAMBER_TEMP
(DEVICE_FUNCTION_CHAMBER_ROOM_TEMP, 'Room (outside) Temp'), # CHAMBER_ROOM_TEMP
(DEVICE_FUNCTION_CHAMBER_FAN, 'Chamber Fan'), # CHAMBER_FAN
# (DEVICE_FUNCTION_MANUAL_ACTUATOR, 'CHAMBER_RESERVED1'), # Unused, reserved for future use - Tagged as "Manual Actuator" in develop www
(DEVICE_FUNCTION_BEER_TEMP, 'Beer Temp'), # Primary beer temp sensor
# The rest of these are available in the code, but appear to have no implemented functionality.
# Commenting them out for the time being.
# (DEVICE_FUNCTION_BEER_TEMP2, 'BEER_TEMP2'), # Secondary beer temp sensor (unimplemented)
# (DEVICE_FUNCTION_BEER_HEAT, 'BEER_HEAT'),
# (DEVICE_FUNCTION_BEER_COOL, 'BEER_COOL'),
# (DEVICE_FUNCTION_BEER_SG, 'BEER_SG'),
# (DEVICE_FUNCTION_BEER_RESERVED1, 'BEER_RESERVED1'),
# (DEVICE_FUNCTION_BEER_RESERVED2, 'BEER_RESERVED2'),
# (DEVICE_FUNCTION_MAX, 'MAX'),
)
# DEVICE_HARDWARE_NONE = 0,
# DEVICE_HARDWARE_PIN = 1, // a digital pin, either input or output
# DEVICE_HARDWARE_ONEWIRE_TEMP = 2, // a onewire temperature sensor
# DEVICE_HARDWARE_ONEWIRE_2413 = 3 // a onewire 2 - channel PIO input or output.
DEVICE_HARDWARE_CHOICES = (
(0, 'NONE'),
(1, 'PIN'),
(2, 'ONEWIRE_TEMP'),
(3, 'ONEWIRE_2413'),
(4, 'ONEWIRE_2408/Valve'),
)
DEVICE_TYPE_CHOICES = (
(0, 'None'),
(1, 'Temp Sensor'),
(2, 'Switch Sensor'),
(3, 'Switch Actuator'),
(4, 'PWM Actuator'),
(5, 'Manual Actuator'),
)
INVERT_CHOICES = (
(INVERT_NOT_INVERTED, 'Not Inverted'),
(INVERT_INVERTED, 'Inverted'),
)
address = models.CharField(max_length=16, blank=True, default="")
device_index = models.IntegerField(default=-1)
type = models.IntegerField(default=0)
chamber = models.IntegerField(default=0)
beer = models.IntegerField(default=0)
device_function = models.IntegerField(default=0, choices=DEVICE_FUNCTION_CHOICES)
hardware = models.IntegerField(default=2, choices=DEVICE_HARDWARE_CHOICES)
deactivated = models.IntegerField(default=0)
pin = models.IntegerField(default=0)
calibrate_adjust = models.FloatField(default=0.0)
pio = models.IntegerField(null=True, default=None)
invert = models.IntegerField(default=1, choices=INVERT_CHOICES)
sensor_value = models.FloatField(default=0.0)
# For the two ForeignKey fields, due to the fact that managed=False, we don't want Django attempting to enforce
# referential integrity when a controller/PinDevice is deleted as there is no database table to enforce upon.
# (You'll get a 'no_such_table' error)
pin_data = models.ForeignKey(PinDevice, null=True, blank=True, default=None, on_delete=models.DO_NOTHING)
controller = models.ForeignKey('BrewPiDevice', null=True, default=None, on_delete=models.DO_NOTHING)
# Defining the name as something readable for debugging
def __str__(self):
if self.hardware == 1:
return "Pin {}".format(self.pin)
elif self.hardware == 2:
return "TempSensor " + self.address
elif self.hardware == 3:
return "OneWire 2413 " + self.address
elif self.hardware == 4:
return "OneWire 2408 " + self.address
# This factory method is used to allow us to quickly create an instance from a dict loaded from the firmware
@classmethod
def create_from_dict(cls, device_dict, pinlist_dict=None):
new_device = cls()
# An example string is as below (from one of my (unconfigured) onewire temperature sensors)
# {u'a': u'28FF93A7A4150307', u'c': 1, u'b': 0, u'd': 0, u'f': 0, u'i': -1, u'h': 2, u'j': 0.0, u'p': 12, u't': 0}
# and here is an example string from one of the 'pin' devices:
# {u'c': 1, u'b': 0, u'd': 0, u'f': 0, u'i': -1, u'h': 1, u'p': 16, u't': 0, u'x': 1}
# The following are defined in the code, but aren't interpreted here (for now)
# const char DEVICE_ATTRIB_VALUE = 'v'; // print current values
# const char DEVICE_ATTRIB_WRITE = 'w'; // write value to device
if 'a' in device_dict: # const char DEVICE_ATTRIB_ADDRESS = 'a';
new_device.address = device_dict['a']
if 'c' in device_dict: # const char DEVICE_ATTRIB_CHAMBER = 'c';
new_device.chamber = device_dict['c']
if 'b' in device_dict: # const char DEVICE_ATTRIB_BEER = 'b';
new_device.beer = device_dict['b']
if 'd' in device_dict: # const char DEVICE_ATTRIB_DEACTIVATED = 'd';
new_device.deactivated = device_dict['d']
if 'f' in device_dict: # const char DEVICE_ATTRIB_FUNCTION = 'f';
new_device.device_function = device_dict['f']
if 'i' in device_dict: # const char DEVICE_ATTRIB_INDEX = 'i';
new_device.device_index = device_dict['i']
# Not allowing defaulting of new_device.hardware
# if 'h' in device_dict: # const char DEVICE_ATTRIB_HARDWARE = 'h';
new_device.hardware = device_dict['h']
if 'j' in device_dict: # const char DEVICE_ATTRIB_CALIBRATEADJUST = 'j'; // value to add to temp sensors to bring to correct temperature
new_device.calibrate_adjust = device_dict['j']
# TODO - Determine if I should error out if we don't receive 'p' back in the dict, or should allow defaulting
if 'p' in device_dict: # const char DEVICE_ATTRIB_PIN = 'p';
new_device.pin = device_dict['p']
# TODO - Determine if I should error out if we don't receive 't' back in the dict, or should allow defaulting
if 't' in device_dict: # const char DEVICE_ATTRIB_TYPE = 't';
new_device.type = device_dict['t']
# pio is only set if BREWPI_DS2413 is enabled (OneWire actuator support)
if 'n' in device_dict: # const char DEVICE_ATTRIB_PIO = 'n';
new_device.pio = device_dict['n']
if 'x' in device_dict: # const char DEVICE_ATTRIB_INVERT = 'x';
new_device.invert = int(device_dict['x'])
if 'v' in device_dict: # Temperature value (if we read values when we queried devices from the controller)
new_device.sensor_value = device_dict['v']
if pinlist_dict:
for this_pin in pinlist_dict:
if this_pin.pin == new_device.pin:
new_device.pin_data = this_pin
return new_device
@classmethod
def load_all_from_devicelist(cls, device_list, pinlist_dict=None, controller=None):
all_devices = []
for this_device in device_list:
# This gets wrapped in a try/except block as if the controller returns a malformed device list (e.g. missing
# one of the required parameters, like 'h') we want to skip it.
try:
next_device = cls.create_from_dict(this_device, pinlist_dict)
next_device.controller = controller
all_devices.append(next_device)
except:
pass
return all_devices
def get_next_available_device_index(self):
try:
if not self.controller: # If we can't load the controller (it's optional, after all) return None
return None
if len(self.controller.installed_devices) == 0:
return 0
except:
return None
indices = {}
for i in range(0,25): # Prepopulate indices as false
indices[i] = False
for this_device in self.controller.installed_devices: # Iterate over installed_devices & update indices
indices[this_device.device_index] = True
for key in indices: # Find the first unused index
if not indices[key]:
return key # ...and return it
return None # If we used all indices, return None
def set_defaults_for_device_function(self):
# In the current state of the BrewPi firmware, a number of options that are available on the controller are
# either confusing or unnecessary. Rather than force the user to figure them out, let's default them.
# Device index is what is used internally by the BrewPi firmware to track installed devices. Once a device
# is on the "installed" list, we need to always address it by the index. If it doesn't have an index assigned
# yet, however, then we need to get the next available one and use it.
if self.device_index < 0:
self.device_index = self.get_next_available_device_index()
if self.device_function > 0: # If the device has a function, set the default chamber/beer
# For the ESP8266 implementation, this same logic is enforced on the controller, as well
self.chamber = 1 # Default the chamber to 1
self.beer = 0 # Default the beer to 0
if self.device_function >= 9 and self.device_function <=15:
self.beer = 1 # ...unless this is an actual beer device, in which case default the beer to 1
# This uses the "updateDevice" message. There is also a "writeDevice" message which is used to -create- devices.
# (Used for "manual" actuators, aka buttons)
def write_config_to_controller(self, uninstall=False):
self.set_defaults_for_device_function() # Bring the configuration to a consistent state
# U:{"i":"0","c":"1","b":"0","f":"5","h":"2","p":"12","a":"28FF93A7A4150307"}
config_dict = {}
# The following options are universal for all hardware types
config_dict['i'] = self.device_index
config_dict['c'] = self.chamber
config_dict['b'] = self.beer
config_dict['f'] = self.device_function
config_dict['h'] = self.hardware
config_dict['p'] = self.pin
# config_dict['d'] = self.deactivated
if self.hardware == 1: # Set options that are specific to pin devices
config_dict['x'] = self.invert
elif self.hardware == 2: # Set options that are specific to OneWire temp sensors
config_dict['j'] = self.calibrate_adjust
config_dict['a'] = self.address
sent_message = self.controller.send_message("applyDevice", json.dumps(config_dict))
time.sleep(3) # There's a 2.5 second delay in re-reading values within BrewPi Script - We'll give it 0.5s more
self.controller.load_sensors_from_device()
try:
updated_device = SensorDevice.find_device_from_address_or_pin(self.controller.installed_devices, address=self.address, pin=self.pin)
except ValueError:
if uninstall:
# If we were -trying- to uninstall the device, it's a good thing it doesn't show up in installed_devices
return True
else:
return False
if updated_device.device_index != self.device_index:
return False
elif updated_device.chamber != self.chamber:
return False
elif updated_device.beer != self.beer:
return False
elif updated_device.device_function != self.device_function:
return False
elif updated_device.hardware != self.hardware:
return False
elif updated_device.pin != self.pin:
return False
elif self.hardware == 1 and updated_device.invert != int(self.invert):
return False
elif self.hardware == 2 and updated_device.address != self.address:
return False
else:
return True
# Uninstall basically just sets device_function to 0
def uninstall(self):
self.device_function = SensorDevice.DEVICE_FUNCTION_NONE
# Technically, the next 2 are overwritten in write_config_to_controller, but explicitly breaking them out here
self.chamber = 0
self.beer = 0
return self.write_config_to_controller(uninstall=True)
@staticmethod
def find_device_from_address_or_pin(device_list, address=None, pin=None):
if device_list is None:
raise ValueError('No sensors/pins are available for this device')
if address is not None and len(address) > 0:
for this_device in device_list:
if this_device.address == address:
return this_device
# We weren't able to find a device with that address
raise ValueError('Unable to find address in device_list')
elif pin is not None:
for this_device in device_list:
if this_device.pin == pin:
return this_device
# We weren't able to find a device with that pin number
raise ValueError('Unable to find pin in device_list')
else:
# We weren't passed an address or pin number
raise ValueError('Neither address nor pin passed to function')
#{"beerName": "Sample Data", "tempFormat": "C", "profileName": "Sample Profile", "dateTimeFormat": "yy-mm-dd", "dateTimeFormatDisplay": "mm/dd/yy" }
class BrewPiDevice(models.Model):
"""
BrewPiDevice is the rough equivalent to an individual installation of brewpi-www
"""
class Meta:
verbose_name = "BrewPi Device"
verbose_name_plural = "BrewPi Devices"
TEMP_FORMAT_CHOICES = (('C', 'Celsius'), ('F', 'Fahrenheit'))
DATA_LOGGING_ACTIVE = 'active'
DATA_LOGGING_PAUSED = 'paused'
DATA_LOGGING_STOPPED = 'stopped'
DATA_LOGGING_CHOICES = (
(DATA_LOGGING_ACTIVE, 'Active'),
(DATA_LOGGING_PAUSED, 'Paused'),
(DATA_LOGGING_STOPPED, 'Stopped')
)
DATA_POINT_TIME_CHOICES = (
(10, '10 Seconds'),
(30, '30 Seconds'),
(60, '1 Minute'),
(60*2, '2 Minutes'),
(60*5, '5 Minutes'),
(60*10, '10 Minutes'),
(60*30, '30 Minutes'),
(60*60, '1 Hour'),
)
BOARD_TYPE_CHOICES = (
('uno', 'Arduino Uno (or compatible)'),
('esp8266', 'ESP8266'),
('leonardo', 'Arduino Leonardo'),
('core', 'Core'),
('photon', 'Photon'),
)
CONNECTION_SERIAL = 'serial'
CONNECTION_WIFI = 'wifi'
CONNECTION_TYPE_CHOICES = (
(CONNECTION_SERIAL, 'Serial (Arduino and others)'),
(CONNECTION_WIFI, 'WiFi (ESP8266)'),
)
STATUS_ACTIVE = 'active'
STATUS_UNMANAGED = 'unmanaged'
STATUS_DISABLED = 'disabled'
STATUS_UPDATING = 'updating'
STATUS_CHOICES = (
(STATUS_ACTIVE, 'Active, Managed by Circus'),
(STATUS_UNMANAGED, 'Active, NOT managed by Circus'),
(STATUS_DISABLED, 'Explicitly disabled, cannot be launched'),
(STATUS_UPDATING, 'Disabled, pending an update'),
)
device_name = models.CharField(max_length=48, help_text="Unique name for this device", unique=True)
# This is set at the device level, and should probably be read from the device as well. Going to include here
# to cache it.
temp_format = models.CharField(max_length=1, choices=TEMP_FORMAT_CHOICES, default='C', help_text="Temperature units")
data_point_log_interval = models.IntegerField(default=30, choices=DATA_POINT_TIME_CHOICES,
help_text="Time between logged data points")
######## The following are used if we are loading the configuration directly from the database.
useInetSocket = models.BooleanField(default=False, help_text="Whether or not to use an internet socket (rather than local)")
socketPort = models.IntegerField(default=2222, validators=[MinValueValidator(10,"Port must be 10 or higher"),
MaxValueValidator(65535, "Port must be 65535 or lower")],
help_text="The internet socket to use (only used if useInetSocket above is "
"\"True\")")
socketHost = models.CharField(max_length=128, default="localhost", help_text="The interface to bind for the "
"internet socket (only used if "
"useInetSocket above is \"True\")")
logging_status = models.CharField(max_length=10, choices=DATA_LOGGING_CHOICES, default='stopped', help_text="Data logging status")
serial_port = models.CharField(max_length=255, help_text="Serial port to which the BrewPi device is connected",
default="auto")
serial_alt_port = models.CharField(max_length=255, help_text="Alternate serial port to which the BrewPi device is connected (??)",
default="None")
udev_serial_number = models.CharField(max_length=255, blank=True,
help_text="USB Serial ID number for autodetection of serial port", default="")
prefer_connecting_via_udev = models.BooleanField(default=True, help_text="Prefer to connect to the device with the correct serial number instead of the serial_port")
board_type = models.CharField(max_length=10, default="uno", choices=BOARD_TYPE_CHOICES, help_text="Board type to which BrewPi is connected")
# Replaces the 'do not run' file used by brewpi-script
status = models.CharField(max_length=15, default=STATUS_ACTIVE, choices=STATUS_CHOICES)
socket_name = models.CharField(max_length=25, default="BEERSOCKET",
help_text="Name of the file-based socket (Only used if useInetSocket is False)")
connection_type = models.CharField(max_length=15, default='serial', choices=CONNECTION_TYPE_CHOICES,
help_text="Type of connection between the Raspberry Pi and the hardware")
wifi_host = models.CharField(max_length=40, default='None',
help_text="mDNS host name or IP address for WiFi connected hardware (only used if " +
"connection_type is wifi)")
wifi_host_ip = models.CharField(max_length=46, blank=True, default='', help_text="Cached IP address in case of mDNS issues (only used if connection_type is wifi)")
wifi_port = models.IntegerField(default=23, validators=[MinValueValidator(10,"Port must be 10 or higher"),
MaxValueValidator(65535, "Port must be 65535 or lower")],
help_text="The internet socket to use (only used if connection_type is wifi)")
# The beer that is currently active & being logged
active_beer = models.ForeignKey('Beer', null=True, blank=True, default=None, on_delete=models.SET_NULL)
# The active fermentation profile (if any!)
active_profile = models.ForeignKey('FermentationProfile', null=True, blank=True, default=None, on_delete=models.SET_NULL)
# The time the fermentation profile was applied (all our math is based on this)
time_profile_started = models.DateTimeField(null=True, blank=True, default=None)
def is_temp_controller(self): # This is a hack used in the site template so we can display relevant functionality
return True
def get_profile_temp(self) -> float or None:
# If the object is inconsistent, don't return anything
if self.active_profile is None:
return None
if self.time_profile_started is None:
return None
# self.sync_temp_format() # Before we update the profile temp, make sure our math is consistent
return self.active_profile.profile_temp(self.time_profile_started, self.temp_format)
def is_past_end_of_profile(self):
if self.active_profile is None:
return None
if self.time_profile_started is None:
return None
# self.sync_temp_format() # Before we update the profile temp, make sure our math is consistent
return self.active_profile.past_end_of_profile(self.time_profile_started)
# Other things that aren't persisted in the database
# available_devices = []
# installed_devices = []
# devices_are_loaded = False
def __str__(self):
# TODO - Make this test if the name is unicode, and return a default name if that is the case
return self.device_name
def __unicode__(self):
return self.device_name
def read_lcd_from_device(self):
pass
def get_active_beer_name(self):
if self.active_beer:
return self.active_beer.name
else:
return ""
# I'm torn as to whether or not to move all of this out to another class. Leaving everything socket-related here
# for now.
def open_socket(self):
if self.useInetSocket:
this_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.SOL_TCP)
else:
this_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if this_socket:
try:
if self.useInetSocket:
this_socket.connect((self.socketHost, self.socketPort))
else:
this_socket.connect(self.socket_name)
except:
this_socket.close()
return this_socket
@staticmethod
def write_to_socket(this_socket, message):
try:
# Python 3 readiness
encoded_message=message.encode(encoding="cp437")
this_socket.sendall(encoded_message)
return True
except:
return False
@staticmethod
def read_from_socket(this_socket):
try:
encoded_message = this_socket.recv(65536)
return encoded_message.decode(encoding="cp437")
except:
return None
def send_message(self, message, message_extended=None, read_response=False):
message_to_send = message
if message_extended is not None:
message_to_send += "=" + message_extended
this_socket = self.open_socket()
if this_socket:
if self.write_to_socket(this_socket, message_to_send):
if read_response:
return self.read_from_socket(this_socket)
else:
return True
return False
def read_lcd(self):
try:
lcd_text = json.loads(self.send_message("lcd", read_response=True))
except:
lcd_text = ["Cannot receive", "LCD text from", "Controller/Script"]
# Due to the various codepage swaps, we're now receiving the raw degree symbol (0xB0) back when we poll the
# LCD under Python 3. Let's replace it with "°" for display in HTML
deg_symbol = bytes([0xB0]).decode(encoding="cp437")
sanitized_text = [n.replace(deg_symbol, "°") for n in lcd_text]
return sanitized_text
def is_connected(self):
# Tests if we're connected to the device via BrewPi-Script
try:
_ = json.loads(self.send_message("lcd", read_response=True))
except:
return False
return True
def retrieve_version(self):
try:
version_data = json.loads(self.send_message("getVersion", read_response=True))
except:
return None
return version_data
def is_legacy(self, version=None):
if version == None:
version = self.retrieve_version()
if not version:
# If we weren't passed a version & can't load from the device itself, return None
return None # There's probably a better way of doing this.
if version['version'][:3] == "0.2":
return True
return False
def retrieve_control_constants(self):
version = self.retrieve_version()
if version:
if self.is_legacy(version):
# If we're dealing with a legacy controller, we need to work with the old control constants.
control_constants = OldControlConstants()
control_constants.load_from_controller(self)
else:
# Otherwise, we need to work with the NEW control constants
control_constants = NewControlConstants()
control_constants.load_from_controller(self)
# Returning both the control constants structure as well as which structure we ended up using
control_constants.controller = self
return control_constants, self.is_legacy(version=version)
return None, None
def request_device_refresh(self):
self.send_message("refreshDeviceList") # refreshDeviceList refreshes the cache within brewpi-script
time.sleep(0.1)
# We don't persist the "sensor" (onewire/pin) list in the database, so we always have to load it from the
# controller
def load_sensors_from_device(self):
# Note - getDeviceList actually is reading the cache from brewpi-script - not the firmware itself
loop_number = 1
device_response = self.send_message("getDeviceList", read_response=True)
# If the cache wasn't up to date, request that brewpi-script refresh it
if device_response == "device-list-not-up-to-date":
self.request_device_refresh()
# This can take a few seconds. Periodically poll brewpi-script to try to get a response.
while device_response == "device-list-not-up-to-date" and loop_number <= 4:
time.sleep(5)
device_response = self.send_message("getDeviceList", read_response=True)
loop_number += 1
if not device_response or device_response == "device-list-not-up-to-date":
self.all_pins = None
self.available_devices = None
self.installed_devices = None
if not device_response:
# We weren't able to reach brewpi-script
self.error_message = "Unable to reach brewpi-script. Try restarting brewpi-script."
else:
# We were able to reach brewpi-script, but it wasn't able to reach the controller
self.error_message = "BrewPi-script wasn't able to load sensors from the controller. "
self.error_message += "Try restarting brewpi-script. If that fails, try restarting the controller."
return False # False
# Devices loaded
devices = json.loads(device_response)
self.all_pins = PinDevice.load_all_from_pinlist(devices['pinList'])
self.available_devices = SensorDevice.load_all_from_devicelist(devices['deviceList']['available'], self.all_pins, self)
self.installed_devices = SensorDevice.load_all_from_devicelist(devices['deviceList']['installed'], self.all_pins, self)
# Loop through the installed devices to set up the special links to the key ones
for this_device in self.installed_devices:
if this_device.device_function == SensorDevice.DEVICE_FUNCTION_CHAMBER_DOOR: # (1, 'CHAMBER_DOOR'),
self.door_pin = this_device
elif this_device.device_function == SensorDevice.DEVICE_FUNCTION_CHAMBER_HEAT: # (2, 'CHAMBER_HEAT'),
self.heat_pin = this_device
elif this_device.device_function == SensorDevice.DEVICE_FUNCTION_CHAMBER_COOL: # (3, 'CHAMBER_COOL'),
self.cool_pin = this_device
elif this_device.device_function == SensorDevice.DEVICE_FUNCTION_CHAMBER_TEMP: # (5, 'CHAMBER_TEMP'),
self.chamber_sensor = this_device
elif this_device.device_function == SensorDevice.DEVICE_FUNCTION_CHAMBER_ROOM_TEMP: # (6, 'CHAMBER_ROOM_TEMP'),
self.room_sensor = this_device
elif this_device.device_function == SensorDevice.DEVICE_FUNCTION_BEER_TEMP: # (9, 'BEER_TEMP'),
self.beer_sensor = this_device
return True
# TODO - Determine if we care about controlSettings
# # Retrieve the control settings from the controller
# def retrieve_control_settings(self):
# version = self.retrieve_version()
# if version:
# if self.is_legacy(version):
# # If we're dealing with a legacy controller, we need to work with the old control constants.
# control_settings = OldControlSettings()
# control_settings.load_from_controller(self)
# else:
# # Otherwise, we need to work with the NEW control constants
# control_settings = OldControlSettings()
# control_settings.load_from_controller(self)
#
# # Returning both the control constants structure as well as which structure we ended up using
# control_settings.controller = self
# return control_settings, self.is_legacy(version=version)
# return None, None
def sync_temp_format(self) -> bool:
# This queries the controller to see if we have the correct tempFormat set (If it matches what is specified
# in the device definition above). If it doesn't, we overwrite what is on the device to match what is in the
# device definition.
control_constants, legacy_mode = self.retrieve_control_constants()
if control_constants is None:
return False
if control_constants.tempFormat != self.temp_format: # The device has the wrong tempFormat - We need to update
control_constants.tempFormat = self.temp_format
if legacy_mode:
if self.temp_format == 'C':
control_constants.tempSetMax = 35.0
control_constants.tempSetMin = -8.0
elif self.temp_format == 'F':
control_constants.tempSetMax = 90.0
control_constants.tempSetMin = 20.0
else:
return False # If we can't define a good max/min, don't do anything
else:
# TODO - Fix/expand this when we add "modern" controller support
return False
control_constants.save_to_controller(self, "tempFormat")
if legacy_mode:
control_constants.save_to_controller(self, "tempSetMax")
control_constants.save_to_controller(self, "tempSetMin")
return True
return False
def get_temp_control_status(self):
device_mode = self.send_message("getMode", read_response=True)
control_status = {}
if (device_mode is None) or (not device_mode): # We were unable to read from the device
control_status['device_mode'] = "unable_to_connect" # Not sure if I want to pass the message back this way
return control_status
# If we could connect to the device, force-sync the temp format
self.sync_temp_format()
if device_mode == 'o': # Device mode is off
control_status['device_mode'] = "off"
elif device_mode == 'b': # Device mode is beer constant
control_status['device_mode'] = "beer_constant"
control_status['set_temp'] = self.send_message("getBeer", read_response=True)
elif device_mode == 'f': # Device mode is fridge constant
control_status['device_mode'] = "fridge_constant"
control_status['set_temp'] = self.send_message("getFridge", read_response=True)
elif device_mode == 'p': # Device mode is beer profile
control_status['device_mode'] = "beer_profile"
else:
# No idea what the device mode is
logger.error("Invalid device mode '{}'".format(device_mode))
return control_status
def reset_profile(self):
if self.active_profile is not None:
self.active_profile = None
if self.time_profile_started is not None:
self.time_profile_started = None
self.save()
def set_temp_control(self, method, set_temp=None, profile=None, profile_startat=None):
if method == "off":
self.reset_profile()
self.send_message("setOff")
elif method == "beer_constant":
if set_temp is not None:
self.reset_profile()
self.send_message("setBeer", str(set_temp))
else:
error_message = "Device {} set to beer_constant without a setpoint".format(self.device_name)
logger.error(error_message)
raise ValueError(error_message)
elif method == "fridge_constant":
if set_temp is not None:
self.reset_profile()
self.send_message("setFridge", str(set_temp))
else:
error_message = "Device {} set to fridge_constant without a setpoint".format(self.device_name)
logger.error(error_message)
raise ValueError(error_message)
elif method == "beer_profile":
try:
ferm_profile = FermentationProfile.objects.get(id=profile)
except:
error_message ="Device {} set to beer_profile {} but the profile could not be located".format(
self.device_name, profile)
logger.error(error_message)
raise ValueError(error_message)
if not ferm_profile.is_assignable():
error_message = "Device {} set to beer_profile {} but the profile isn't assignable".format(
self.device_name, profile)
logger.error(error_message)
raise ValueError(error_message)
if profile_startat is not None:
start_at = profile_startat
else:
start_at = datetime.timedelta(seconds=0) # Set start_at to have no effect
self.active_profile = ferm_profile
timezone_obj = pytz.timezone(getattr(settings, 'TIME_ZONE', 'UTC'))
# We're subtracting start_at because we want to start in the past
self.time_profile_started = timezone.now() - start_at
self.save()
transaction.on_commit(lambda: self.send_message("setActiveProfile", str(self.active_profile.id)))
return True # If we made it here, return True (we did our job)
def start_new_brew(self, active_beer):
self.logging_status = self.DATA_LOGGING_ACTIVE
self.active_beer = active_beer
self.save()
transaction.on_commit(lambda: self.send_message("startNewBrew", message_extended=active_beer.name, read_response=False))
def manage_logging(self, status):
if status == 'stop':
if hasattr(self, 'gravity_sensor') and self.gravity_sensor is not None:
# If there is a linked gravity log, stop that as well
self.gravity_sensor.active_log = None
self.gravity_sensor.save()
self.active_beer = None
self.logging_status = self.DATA_LOGGING_STOPPED
self.save()
transaction.on_commit(lambda: self.send_message("stopLogging", read_response=False))
elif status == 'resume':
self.logging_status = self.DATA_LOGGING_ACTIVE
self.save()
transaction.on_commit(lambda: self.send_message("resumeLogging", read_response=False))
elif status == 'pause':
self.logging_status = self.DATA_LOGGING_PAUSED
self.save()
transaction.on_commit(lambda: self.send_message("pauseLogging", read_response=False))
def reset_eeprom(self):
response = self.send_message("resetController") # Reset the controller
time.sleep(1) # Give it 1 second to complete
synced = self.sync_temp_format() # ...then resync the temp format
return synced
def reset_wifi(self) -> bool:
response = self.send_message("resetWiFi") # Reset the controller WiFi settings
time.sleep(1) # Give it 1 second to complete
return True
def restart(self) -> bool:
response = self.send_message("restartController") # Restart the controller
time.sleep(1) # Give it 1 second to complete
return True
def get_control_constants(self):
return json.loads(self.send_message("getControlConstants", read_response=True))
def set_parameters(self, parameters):
return self.send_message("setParameters", json.dumps(parameters))
def get_dashpanel_info(self):
try: # This is apparently failing when being called in a loop for external_push - Wrapping in a try/except so the loop doesn't die
return json.loads(self.send_message("getDashInfo", read_response=True))
except TypeError:
return None
def circus_parameter(self) -> int:
"""Returns the parameter used by Circus to track this device's processes"""
return self.id
def _get_circusmgr(self) -> CircusMgr:
if USE_DOCKER:
return CircusMgr(circus_endpoint="tcp://127.0.0.1:7555")
else:
return CircusMgr()
def start_process(self):
"""Start this device process, raises CircusException if error"""
fc = self._get_circusmgr()
circus_process_name = u"dev-{}".format(self.circus_parameter())
fc.start(name=circus_process_name)
def remove_process(self):
"""Remove this device process, raises CircusException if error"""
fc = self._get_circusmgr()
circus_process_name = u"dev-{}".format(self.circus_parameter())
fc.remove(name=circus_process_name)
def stop_process(self):
"""Stop this device process, raises CircusException if error"""
fc = self._get_circusmgr()
circus_process_name = u"dev-{}".format(self.circus_parameter())
fc.stop(name=circus_process_name)
def restart_process(self):
"""Restart the device process, raises CircusException if error"""
fc = self._get_circusmgr()
circus_process_name = u"dev-{}".format(self.circus_parameter())
fc.restart(name=circus_process_name)
def status_process(self):
"""Status this device process, raises CircusException if error"""
fc = self._get_circusmgr()
circus_process_name = u"dev-{}".format(self.circus_parameter())
status = fc.application_status(name=circus_process_name)
return status
def get_cached_ip(self):
# This only gets called from within BrewPi-script
# I really hate the name of the function, but I can't think of anything else. This basically does three things:
# 1. Looks up the mDNS hostname (if any) set as self.wifi_host and gets the IP address
# 2. Saves that IP address to self.wifi_host_ip (if we were successful in step 1)
# 3. Returns the found IP address (if step 1 was successful), the cached (self.wifi_host_ip) address if it
# wasn't, or 'None' if we don't have a cached address and we weren't able to resolve the hostname
if len(self.wifi_host) > 4:
try:
ip_list = []
ipv6_list = []
ais = socket.getaddrinfo(self.wifi_host, 0, 0, 0, 0)
for result in ais:
if result[0] == socket.AddressFamily.AF_INET:
# IPv4 only
ip_list.append(result[-1][0])
elif result[0] == socket.AddressFamily.AF_INET6:
ipv6_list.append(result[-1][0])
ip_list = list(set(ip_list))
ipv6_list = list(set(ip_list))
if len(ip_list) > 0:
resolved_address = ip_list[0]
else:
resolved_address = ipv6_list[0]
# If we were able to find an IP address, save it to the cache
self.wifi_host_ip = resolved_address
self.save()
return resolved_address
except:
# TODO - Add an error message here
if len(self.wifi_host_ip) > 6:
# We weren't able to resolve the hostname (self.wifi_host) but we DID have a cached IP address.
# Return that.
return self.wifi_host_ip
else:
return None
# In case of error (or we have no wifi_host)
return None
def get_port_from_udev(self):
# This only gets called from within BrewPi-script
# get_port_from_udev() looks for a USB device connected which matches self.udev_serial_number. If one is found,
# it returns the associated device port. If one isn't found, it returns None (to prevent the cached port from
# being used, and potentially pointing to another, unrelated device)
if self.connection_type != self.CONNECTION_SERIAL:
return self.serial_port # If we're connecting via WiFi, don't attempt autodetection
# If the user elected to not use udev to get the port, just return self.serial_port
if not self.prefer_connecting_via_udev:
return self.serial_port
# If the platform doesn't support udev (isn't Linux) then return self.serial_port as well.
if not udev_integration.valid_platform_for_udev():
return self.serial_port
# TODO - Detect if this is a Fuscus board and return self.serial_port (as well as setting prefer_connecting_via_udev)
# If the udev_serial_number isn't yet set, try setting it
if self.udev_serial_number == "":
if not self.set_udev_from_port():
# If we can't set it (device isn't connected, etc.) then return None
return None
udev_node = udev_integration.get_node_from_serial(self.udev_serial_number)
if udev_node is not None:
# The udev lookup found a device! Return the appropriate serial port.
if self.serial_port != udev_node:
# If the serial port changed, cache it.
self.serial_port = udev_node
self.save()
return udev_node
else:
# The udev lookup failed - return None
return None
def set_udev_from_port(self):
# set_udev_from_port() quickly scans the device connected at self.serial_port and - if found - saves the
# associated udev serial number to the object.
udev_serial_number = udev_integration.get_serial_from_node(self.serial_port)
if udev_serial_number is not None:
self.udev_serial_number = udev_serial_number
self.save()
return True
# We failed to look up the udev serial number.
return False
class Beer(models.Model):
# Beers are unique based on the combination of their name & the original device
name = models.CharField(max_length=255, db_index=True,
help_text='Name of the beer being logged (must be unique)')
device = models.ForeignKey(BrewPiDevice, db_index=True, on_delete=models.SET_NULL, null=True,
help_text='The linked temperature control device from which data is logged')
created = models.DateTimeField(default=timezone.now, help_text='When the beer log was initially created')
# format generally should be equal to device.temp_format. We're caching it here specifically so that if the user
# updates the device temp format somehow we will continue to log in the OLD format. We'll need to make a giant
# button that allows the user to convert the log files to the new format if they're different.
format = models.CharField(max_length=1, default='F', help_text='Temperature format to write the logs in')
# model_version is the revision number of the "Beer" and "BeerLogPoint" models, designed to be iterated when any
# change is made to the format/content of the flatfiles that would be written out. The idea is that a separate
# converter could then be written moving between each iteration of model_version that could then be sequentially
# applied to bring a beer log in line with what the model then expects.
# Version 1: Original version
# Version 2: Adds 'state' to 'base_csv' for state plotting
model_version = models.IntegerField(default=2, help_text='Version # used for the logged file format')
gravity_enabled = models.BooleanField(default=False, help_text='Is gravity logging enabled for this beer log?')
def __str__(self):
return self.name
def __unicode__(self):
return self.__str__()
def column_headers(self, which='base_csv', human_readable=False):
if which == 'base_csv':
if human_readable:
headers = ['Log Time', 'Beer Temp', 'Beer Setting', 'Fridge Temp', 'Fridge Setting', 'Room Temp']
else:
headers = ['log_time', 'beer_temp', 'beer_set', 'fridge_temp', 'fridge_set', 'room_temp']
elif which == 'full_csv':
if human_readable:
# Currently unused
headers = ['log_time', 'beer_temp', 'beer_set', 'beer_ann', 'fridge_temp', 'fridge_set', 'fridge_ann',
'room_temp', 'state', 'temp_format', 'associated_beer_id']
else:
headers = ['log_time', 'beer_temp', 'beer_set', 'beer_ann', 'fridge_temp', 'fridge_set', 'fridge_ann',
'room_temp', 'state', 'temp_format', 'associated_beer_id']
else:
return None
# This works because we're appending the gravity data to both logs
if self.gravity_enabled:
if human_readable:
headers.append('Gravity')
headers.append('Gravity Sensor Temp')
else:
headers.append('gravity')
headers.append('grav_temp')
if which == 'base_csv' and self.model_version > 1:
# For model versions 2 and greater, we are appending "state" to the base CSV.
if human_readable:
headers.append('State') # I don't think this gets used anywhere...
else:
headers.append('state')
return headers
def base_column_visibility(self):
# TODO - Determine if we want to take some kind of user setting into account (auto-hide room temp, for example)
# headers = [x, 'beer_temp', 'beer_set', 'fridge_temp', 'fridge_set', 'room_temp']
visibility = "[true, true, true, true, true"
# This works because we're appending the gravity data to both logs
if self.gravity_enabled:
visibility += ", true, true"
if self.model_version >= 1:
visibility += ", false" # Literally the whole point of this code block is to hide "state"
visibility += "]"
return visibility
def column_headers_to_graph_string(self, which='base_csv'):
col_headers = self.column_headers(which, True)
graph_string = ""
for this_header in col_headers:
graph_string += "'" + this_header + "', "
if graph_string.__len__() > 2:
return graph_string[:-2]
else:
return ""
@staticmethod
def name_is_valid(proposed_name):
# Since we're using self.name in a file path, want to make sure no injection-type attacks can occur.
return True if re.match("^[a-zA-Z0-9 _-]*$", proposed_name) else False
def base_filename(self): # This is the "base" filename used in all the files saved out
# Including the beer ID in the file name to ensure uniqueness (if the user duplicates the name, for example)
if self.name_is_valid(self.name):
return "Device " + str(self.device_id) + " - B" + str(self.id) + " - " + self.name
else:
return "Device " + str(self.device_id) + " - B" + str(self.id) + " - NAME ERROR - "
def full_filename(self, which_file):
base_name = self.base_filename()
if which_file == 'base_csv':
return base_name + "_graph.csv"
elif which_file == 'full_csv':
return base_name + "_full.csv"
elif which_file == 'annotation_json':
return base_name + "_annotations.almost_json"
else:
return None
def data_file_url(self, which_file):
return settings.DATA_URL + self.full_filename(which_file)
def full_csv_url(self):
return self.data_file_url('full_csv')
def full_csv_exists(self) -> bool:
# This is so that we can test if the log exists before presenting the user the option to download it
file_name_base = settings.ROOT_DIR / settings.DATA_ROOT
full_csv_file = file_name_base / self.full_filename('full_csv')
return os.path.isfile(full_csv_file)
def can_log_gravity(self):
if self.gravity_enabled is False:
return False
if self.device.gravity_sensor is None:
return False
return True
# def base_csv_url(self):
# return self.data_file_url('base_csv')
# TODO - Add function to allow conversion of log files between temp formats
# When the user attempts to delete a beer, also delete the log files associated with it.
@receiver(pre_delete, sender=Beer)
def delete_beer(sender, instance, **kwargs):
file_name_base = settings.ROOT_DIR / settings.DATA_ROOT
base_csv_file = file_name_base / instance.full_filename('base_csv')
full_csv_file = file_name_base / instance.full_filename('full_csv')
annotation_json = file_name_base / instance.full_filename('annotation_json')
for this_filepath in [base_csv_file, full_csv_file, annotation_json]:
try:
os.remove(this_filepath)
except OSError:
pass
class BeerLogPoint(models.Model):
"""
BeerLogPoint contains the individual temperature log points we're saving
"""
class Meta:
managed = False # Since we're using flatfiles rather than a database
verbose_name = "Beer Log Point"
verbose_name_plural = "Beer Log Points"
ordering = ['log_time']
STATE_CHOICES = (
(0, 'IDLE'),
(1, 'STATE_OFF'),
(2, 'DOOR_OPEN'),
(3, 'HEATING'),
(4, 'COOLING'),
(5, 'WAITING_TO_COOL'),
(6, 'WAITING_TO_HEAT'),
(7, 'WAITING_FOR_PEAK_DETECT'),
(8, 'COOLING_MIN_TIME'),
(9, 'HEATING_MIN_TIME'),
)
TEMP_FORMAT_CHOICES = (('C', 'Celsius'), ('F', 'Fahrenheit'))
beer_temp = models.DecimalField(max_digits=13, decimal_places=10, null=True)
beer_set = models.DecimalField(max_digits=5, decimal_places=2, null=True)
beer_ann = models.CharField(max_length=255, null=True)
fridge_temp = models.DecimalField(max_digits=13, decimal_places=10, null=True)
fridge_set = models.DecimalField(max_digits=5, decimal_places=2, null=True)
fridge_ann = models.CharField(max_length=255, null=True)
room_temp = models.DecimalField(max_digits=13, decimal_places=10, null=True)
state = models.IntegerField(choices=STATE_CHOICES, default=0)
log_time = models.DateTimeField(default=timezone.now, db_index=True)
# Adding temp_format here so we can do conversions later on if we want to
temp_format = models.CharField(max_length=1, choices=TEMP_FORMAT_CHOICES, default='C')
associated_beer = models.ForeignKey(Beer, db_index=True, on_delete=models.DO_NOTHING)
gravity = models.DecimalField(max_digits=5, decimal_places=3, null=True)
gravity_temp = models.DecimalField(max_digits=13, decimal_places=10, null=True)
def has_gravity_enabled(self):
# Just punting this upstream
if self.associated_beer_id is not None:
return self.associated_beer.gravity_enabled
else:
return False
def can_log_gravity(self):
if self.associated_beer_id is not None:
return self.associated_beer.can_log_gravity()
else:
return False
def enrich_gravity_data(self):
# enrich_gravity_data is called to enrich this data point with the relevant gravity data
# Only relevant if self.has_gravity_enabled is true (The associated_beer has gravity logging enabled)
if self.has_gravity_enabled():
if not self.can_log_gravity():
# We have gravity enabled, but we can't actually log gravity. Stop logging, as this is an issue.
self.associated_beer.device.manage_logging(status='stop')
raise RuntimeError("Gravity enabled, but gravity sensor doesn't exist")
self.gravity = self.associated_beer.device.gravity_sensor.retrieve_loggable_gravity()
temp, temp_format = self.associated_beer.device.gravity_sensor.retrieve_loggable_temp()
if self.temp_format != temp_format:
if temp_format is None:
# No data exists in redis yet for this sensor
temp = None
elif self.temp_format == 'C' and temp_format == 'F':
# Convert Fahrenheit to Celsius
temp = (temp-32) * 5 / 9
elif self.temp_format == 'F' and temp_format == 'C':
# Convert Celsius to Fahrenheit
temp = (temp*9/5) + 32
else:
logger.error("BeerLogPoint.enrich_gravity_data called with unsupported temp format {}".format(self.temp_format))
self.gravity_temp = temp
def data_point(self, data_format='base_csv', set_defaults=True):
# Everything gets stored in UTC and then converted back on the fly
utc_tz = pytz.timezone("UTC")
time_value = self.log_time.astimezone(utc_tz).strftime('%Y/%m/%d %H:%M:%SZ') # Adding 'Zulu' designation
if set_defaults:
beerTemp = self.beer_temp or 0
fridgeTemp = self.fridge_temp or 0
roomTemp = self.room_temp or 0
beerSet = self.beer_set or 0
fridgeSet = self.fridge_set or 0
gravity_log = self.gravity or 0 # We'll set this just in case
gravity_temp = self.gravity_temp or 0 # We'll set this just in case
else:
beerTemp = self.beer_temp or None
fridgeTemp = self.fridge_temp or None
roomTemp = self.room_temp or None
beerSet = self.beer_set or None
fridgeSet = self.fridge_set or None
gravity_log = self.gravity or None # We'll set this just in case
gravity_temp = self.gravity_temp or None # We'll set this just in case
if self.beer_ann is not None:
combined_annotation = self.beer_ann
elif self.fridge_ann is not None:
combined_annotation = self.fridge_ann
else:
combined_annotation = ""
if data_format == 'base_csv':
if not self.has_gravity_enabled():
if self.associated_beer.model_version > 1:
return [time_value, beerTemp, beerSet, fridgeTemp, fridgeSet, roomTemp, self.state]
else:
return [time_value, beerTemp, beerSet, fridgeTemp, fridgeSet, roomTemp]
else:
if self.associated_beer.model_version > 1:
return [time_value, beerTemp, beerSet, fridgeTemp, fridgeSet, roomTemp, gravity_log, gravity_temp,
self.state]
else:
return [time_value, beerTemp, beerSet, fridgeTemp, fridgeSet, roomTemp, gravity_log, gravity_temp]
elif data_format == 'full_csv':
if not self.has_gravity_enabled():
return [time_value, beerTemp, beerSet, self.beer_ann, fridgeTemp, fridgeSet, self.fridge_ann,
roomTemp, self.state, self.temp_format, self.associated_beer_id]
else:
return [time_value, beerTemp, beerSet, self.beer_ann, fridgeTemp, fridgeSet, self.fridge_ann,
roomTemp, self.state, self.temp_format, self.associated_beer_id, gravity_log, gravity_temp]
elif data_format == 'annotation_json':
retval = []
if self.beer_ann is not None:
retval.append({'series': 'beer_temp', 'x': time_value, 'shortText': self.beer_ann[:1],
'text': self.beer_ann})
if self.fridge_ann is not None:
retval.append({'series': 'beer_temp', 'x': time_value, 'shortText': self.fridge_ann[:1],
'text': self.fridge_ann})
return retval
else:
# Should never hit this
logger.warning("Invalid data format '{}' provided to BeerLogPoint.data_point".format(data_format))
def save(self, *args, **kwargs):
# Don't repeat yourself
def check_and_write_headers(path, col_headers):
if not os.path.exists(path):
with open(path, 'w') as f:
writer = csv.writer(f)
writer.writerow(col_headers)
def write_data(path, row_data):
with open(path, 'a') as f:
writer = csv.writer(f)
writer.writerow(row_data)
def check_and_write_annotation_json_head(path):
if not os.path.exists(path):
with open(path, 'w') as f:
f.write("[\r\n")
return False
else:
return True
def write_annotation_json(path, annotation_data, write_comma=True):
# annotation_data is actually an array of potential annotations. We'll loop through them & write them out
with open(path, 'a') as f:
for this_annotation in annotation_data:
if write_comma: # We only want to do this once per run, regardless of the size of annotation_data
f.write(',\r\n')
write_comma = False
f.write(' {')
f.write('"series": "{}", "x": "{}",'.format(this_annotation['series'], this_annotation['x']))
f.write(' "shortText": "{}", "text": "{}"'.format(this_annotation['shortText'],
this_annotation['text']))
f.write('}')
# This really isn't the right place to do this, but I don't know of anywhere else to add this check.
# TODO - Figure out if there is somewhere better to do this
if self.has_gravity_enabled() and self.associated_beer.device.gravity_sensor is None:
# We're logging a gravity enabled beer, but there is no gravity sensor to pull data from. Stop logging.
if self.associated_beer.device.active_beer == self.associated_beer:
logger.error('Gravity sensor was deleted without cessation of logging on device {}. Logging has been force-stopped within BeerLogPoint.save()'.format(self.associated_beer.device_id))
self.associated_beer.device.manage_logging(status='stop')
return False
if self.associated_beer_id is not None:
file_name_base = settings.ROOT_DIR / settings.DATA_ROOT
base_csv_file = file_name_base / self.associated_beer.full_filename('base_csv')
full_csv_file = file_name_base / self.associated_beer.full_filename('full_csv')
annotation_json = file_name_base / self.associated_beer.full_filename('annotation_json')
# Write out headers (if the files don't exist)
check_and_write_headers(base_csv_file, self.associated_beer.column_headers('base_csv'))
check_and_write_headers(full_csv_file, self.associated_beer.column_headers('full_csv'))
# And then write out the data
write_data(base_csv_file, self.data_point('base_csv'))
write_data(full_csv_file, self.data_point('full_csv'))
# Next, do the json file
annotation_data = self.data_point('annotation_json')
if len(annotation_data) > 0: # Not all log points come with annotation data
json_existed = check_and_write_annotation_json_head(annotation_json)
write_annotation_json(annotation_json, annotation_data, json_existed)
# super(BeerLogPoint, self).save(*args, **kwargs)
# A model representing the fermentation profile as a whole
class FermentationProfile(models.Model):
# Status Choices
STATUS_ACTIVE = 1
STATUS_PENDING_DELETE = 2
STATUS_CHOICES = (
(STATUS_ACTIVE, 'Active'),
(STATUS_PENDING_DELETE, 'Pending Delete'),
)
# Profile Type Choices
PROFILE_STANDARD = "Standard Profile"
PROFILE_SMART = "Smart Profile"
PROFILE_TYPE_CHOICES = (
(PROFILE_STANDARD, 'Standard Profile'),
(PROFILE_SMART, 'Smart Profile (Unimplemented)'),
)
# Export/Import Strings
EXPORT_LEFT_WALL = "| "
EXPORT_RIGHT_WALL = " |"
EXPORT_COL_SEPARATOR = " | "
EXPORT_COL_SEPARATOR_REGEX = r" \| "
EXPORT_ROW_SEPARATOR = "="
# Fields
name = models.CharField(max_length=128)
status = models.IntegerField(choices=STATUS_CHOICES, default=STATUS_ACTIVE)
profile_type = models.CharField(max_length=32, default=PROFILE_STANDARD, help_text="Type of temperature profile")
notes = models.TextField(default="", blank=True, null=False,
help_text="Notes about the fermentation profile (Optional)")
def __str__(self):
return self.name
def __unicode__(self):
return self.name
# Test if this fermentation profile is currently being used by a beer
def currently_in_use(self) -> bool:
try:
num_devices_currently_using = BrewPiDevice.objects.filter(active_profile=self).count()
except:
num_devices_currently_using = 0
if num_devices_currently_using > 0:
return True
else:
return False
def is_pending_delete(self):
return self.status == self.STATUS_PENDING_DELETE
# An assignable profile needs to be active and have setpoints
def is_assignable(self) -> bool:
if self.status != self.STATUS_ACTIVE:
return False
else:
if self.fermentationprofilepoint_set is None:
return False
return True
# If we attempt to delete a profile that is in use, we instead change the status. This runs through profiles in
# this status and deletes those that are no longer in use.
@classmethod
def cleanup_pending_delete(cls):
profiles_pending_delete = cls.objects.filter(status=cls.STATUS_PENDING_DELETE)
for profile in profiles_pending_delete:
if not profile.currently_in_use():
profile.delete()
# This function is designed to create a more "human readable" version of a temperature profile (to help people
# better understand what a given profile is actually going to do).
# I would prefer to implement this as part of a template (given that it's honestly display logic) but the Django
# template language doesn't provide quite what I would need to pull it off.
def to_english(self):
profile_points = self.fermentationprofilepoint_set.order_by('ttl')
description = []
past_first_point=False # There's guaranteed to be a better way to do this
previous_setpoint = 0.0
previous_ttl = 0.0
if profile_points.__len__() < 1:
description.append("This profile contains no setpoints and cannot be assigned.")
# TODO - Make the timedelta objects more human readable (I don't like the 5:20:30 format that much)
for this_point in profile_points:
if not past_first_point:
desc_text = "Start off by heating/cooling to {}° {}".format(this_point.temp_to_preferred(), config.TEMPERATURE_FORMAT)
if this_point.ttl == datetime.timedelta(seconds=0):
desc_text += "."
else:
desc_text += " and hold this temperature for {}".format(this_point.ttl)
description.append(desc_text)
previous_setpoint = this_point.temp_to_preferred()
previous_ttl = this_point.ttl
past_first_point = True
else:
if previous_setpoint == this_point.temperature_setting:
desc_text = "Hold this temperature for {} ".format((this_point.ttl - previous_ttl))
desc_text += "(until {} after the profile was assigned).".format(this_point.ttl)
else:
if previous_setpoint > this_point.temp_to_preferred():
desc_text = "Cool to"
else: # If previous_setpoint is less than the current setpoint
desc_text = "Heat to"
# Breaking this up to reduce line length
desc_text += " {}° {} ".format(this_point.temp_to_preferred(), config.TEMPERATURE_FORMAT)
desc_text += "over the next {} ".format(this_point.ttl - previous_ttl)
desc_text += "(reaching this temperature {}".format(this_point.ttl)
desc_text += " after the profile was assigned)."
description.append(desc_text)
previous_setpoint = this_point.temp_to_preferred()
previous_ttl = this_point.ttl
if past_first_point:
desc_text = "Finally, permanently hold the temperature at {}° {}.".format(previous_setpoint, config.TEMPERATURE_FORMAT)
description.append(desc_text)
return description
# profile_temp replaces brewpi-script/temperatureProfile.py, and is intended to be called by
# get_profile_temp from BrewPiDevice
def profile_temp(self, time_started, temp_format) -> float:
# temp_format in this case is the temperature format active on BrewPiDevice. This will force conversion from
# the profile point's format to the device's format.
profile_points = self.fermentationprofilepoint_set.order_by('ttl')
past_first_point=False # There's guaranteed to be a better way to do this
previous_setpoint = Decimal("0.0")
previous_ttl = 0.0
current_time = timezone.now()
for this_point in profile_points:
if not past_first_point:
# If we haven't hit the first TTL yet, we are in the initial lag period where we hold a constant
# temperature. Return the temperature setting
if current_time < (time_started + this_point.ttl):
return float(this_point.convert_temp(temp_format))
past_first_point = True
else:
# Test if we are in this period
if current_time < (time_started + this_point.ttl):
# We are - Check if we need to interpolate, or if we can just use the static temperature
if this_point.convert_temp(temp_format) == previous_setpoint: # We can just use the static temperature
return float(this_point.convert_temp(temp_format))
else: # We have to interpolate
duration = this_point.ttl.total_seconds() - previous_ttl.total_seconds()
delta = (this_point.convert_temp(temp_format) - previous_setpoint)
slope = float(delta) / duration
seconds_into_point = (current_time - (time_started + previous_ttl)).total_seconds()
return round(seconds_into_point * slope + float(previous_setpoint), 1)
previous_setpoint = this_point.convert_temp(temp_format)
previous_ttl = this_point.ttl
# If we hit this point, we looped through all the setpoints & aren't between two (or on the first one)
# That is to say - we're at the end. Just return the last setpoint.
return previous_setpoint
# past_end_of_profile allows us to test if we're in the last stage of a profile (which is effectively beer constant
# mode) so we can switch to explicitly be in beer constant mode
def past_end_of_profile(self, time_started):
current_time = timezone.now()
last_profile_point = self.fermentationprofilepoint_set.order_by('-ttl')[:1]
if last_profile_point:
if current_time >= (time_started + last_profile_point[0].ttl):
return True
else:
return False
else:
# There are no profile points for us to test against
return None
def to_export(self):
# to_export generates a somewhat readable, machine interpretable representation of a fermentation profile
def pad_to_width(string, width):
if len(string) < width:
for i in range(len(string), width):
string += " "
return string
def add_row_separator(width, no_walls=False):
ret_string = ""
if not no_walls:
separator_width = width + len(self.EXPORT_LEFT_WALL) + len(self.EXPORT_RIGHT_WALL)
else:
separator_width = width
for i in range(separator_width):
ret_string += self.EXPORT_ROW_SEPARATOR
return ret_string + "\r\n"
profile_type = self.profile_type # For future compatibility
export_string = ""
max_ttl_string = "" # To enable me being lazy
max_ttl_length = 0 # max_ttl_length is the maximum size of the left (ttl) column of the profile
interior_width = 0 # Interior width is the interior size that can be occupied by data (wall to wall)
point_set = self.fermentationprofilepoint_set.order_by('ttl')
# We need to check there are any point_set yet
if len(point_set) > 0:
max_ttl_string = max([x.ttl_to_string(True) for x in point_set], key=len)
max_ttl_length = len(max_ttl_string)
# Set interior_width to the maximum interior width that we might need. This can be one of four things:
# The length of the profile_type
# The length of the profile.name
# The maximum length of the two columns (max_ttl_length + self.EXPORT_COL_SEPARATOR + "-100.00 C"
# The minimum table width (currently 30)
interior_width = len(max([profile_type, self.name, (max_ttl_string + self.EXPORT_COL_SEPARATOR + "-100.00 C"),
add_row_separator(30,no_walls=True)], key=len))
# The header looks like this:
# ===============================
# | Profile Name |
# | Profile Type |
# ===============================
export_string += add_row_separator(interior_width)
export_string += self.EXPORT_LEFT_WALL + pad_to_width(self.name, interior_width) + self.EXPORT_RIGHT_WALL + "\r\n"
export_string += self.EXPORT_LEFT_WALL + pad_to_width(profile_type, interior_width) + self.EXPORT_RIGHT_WALL + "\r\n"
export_string += add_row_separator(interior_width)
if profile_type == self.PROFILE_STANDARD:
# For PROFILE_STANDARD profiles the body looks like this:
# ===============================
# | 3d4h | 72.00 F |
# | 6d | 64.00 F |
# ===============================
for this_point in point_set:
point_string = pad_to_width(this_point.ttl_to_string(short_code=True), max_ttl_length)
point_string += self.EXPORT_COL_SEPARATOR + str(this_point.temperature_setting) + " " + this_point.temp_format
export_string += self.EXPORT_LEFT_WALL + pad_to_width(point_string, interior_width) + self.EXPORT_RIGHT_WALL + "\r\n"
export_string += add_row_separator(interior_width)
return export_string
@classmethod
def import_from_text(cls, import_string):
# Since we're going to loop through the entire profile in one go, track what parts of the profile we've captured
found_initial_separator=False
profile_name = u""
profile_type = u""
found_row_split = False
profile_points = []
found_profile_terminator = False
for this_row in iter(import_string.splitlines()):
if not found_initial_separator:
if this_row.strip()[:10] == u"==========":
found_initial_separator = True
elif profile_name == u"":
profile_name = this_row.strip()[len(cls.EXPORT_LEFT_WALL):(len(this_row)-len(cls.EXPORT_RIGHT_WALL))].strip()
if len(profile_name) > 128:
raise ValueError("Imported profile name is too long")
elif profile_type == u"":
profile_type = this_row.strip()[len(cls.EXPORT_LEFT_WALL):(len(this_row)-len(cls.EXPORT_RIGHT_WALL))].strip()
if profile_type not in [x for x, _ in cls.PROFILE_TYPE_CHOICES]:
raise ValueError("Invalid profile type specified, or missing initial row separator")
elif not found_row_split:
if this_row.strip()[:10] == u"==========":
found_row_split = True
else:
raise ValueError("Unable to locate divider between header & profile point list")
elif not found_profile_terminator:
if this_row.strip()[:10] == u"==========":
# We've found the profile terminator - tag found_profile_terminator and break
found_profile_terminator = True
break
else:
# Before we do anything else, strip out the actual data (remove left/right wall & whitespace)
profile_data = this_row.strip()[len(cls.EXPORT_LEFT_WALL):(len(this_row)-len(cls.EXPORT_RIGHT_WALL))].strip()
try:
if profile_type == cls.PROFILE_STANDARD:
# For PROFILE_STANDARD profiles the body looks like this:
# ===============================
# | 3d 4h | 72.00 F |
# | 6d | 64.00 F |
# ===============================
point_pattern = r"(?P<time_str>[0-9ywdhms]+)[ ]*" + cls.EXPORT_COL_SEPARATOR_REGEX + \
r"(?P<temp_str>[0-9\.]+) (?P<temp_fmt>[CF]{1})"
else:
raise ValueError("Unsupported profile type specified")
point_regex = re.compile(point_pattern)
point_matches = point_regex.finditer(this_row)
except:
raise ValueError("{} isn't in a valid format for conversion".format(profile_data))
for this_match in point_matches:
if profile_type == cls.PROFILE_STANDARD:
try:
ttl = FermentationProfilePoint.string_to_ttl(this_match.group('time_str'))
except ValueError:
raise ValueError("Invalid time string for row {}".format(profile_data))
profile_points.append({'ttl': ttl,
'temperature_setting': float(this_match.group('temp_str')),
'temp_format': this_match.group('temp_fmt')})
else:
raise ValueError("Unsupported profile type specified")
if found_profile_terminator:
# At this point, we've imported the full profile. If there are no profile points, raise an error. Otherwise,
# attempt to create the various objects
if len(profile_points) <= 0:
raise ValueError("No points in provided profile")
if profile_type == cls.PROFILE_STANDARD:
new_profile = cls(name=profile_name, profile_type=profile_type)
new_profile.save()
for this_point in profile_points:
new_point = FermentationProfilePoint(profile=new_profile, ttl=this_point['ttl'],
temperature_setting=this_point['temperature_setting'],
temp_format=this_point['temp_format'])
new_point.save()
# And we're done. Return the new_profile object
return new_profile
else:
raise ValueError("Unsupported profile type specified")
else:
raise ValueError("No profile terminator found")
def copy_to_new(self, name):
# This copies the current fermentation profile to a new profile
if len(name) <= 0:
raise ValueError("Name provided is too short")
new_profile = FermentationProfile(name=name, profile_type=self.profile_type)
new_profile.save()
for this_point in self.fermentationprofilepoint_set.all():
new_point = FermentationProfilePoint(profile=new_profile, temp_format=this_point.temp_format,
ttl=this_point.ttl, temperature_setting=this_point.temperature_setting)
new_point.save()
return new_profile
class FermentationProfilePoint(models.Model):
TEMP_FORMAT_CHOICES = (('C', 'Celsius'), ('F', 'Fahrenheit'))
profile = models.ForeignKey(FermentationProfile, on_delete=models.CASCADE)
ttl = models.DurationField(help_text="Time at which we should arrive at this temperature")
temperature_setting = models.DecimalField(max_digits=5, decimal_places=2, null=True,
help_text="The temperature the beer should be when TTL has passed")
temp_format = models.CharField(max_length=1, default='F')
def temp_to_f(self) -> Decimal:
if self.temp_format == 'F':
return self.temperature_setting
else:
return (self.temperature_setting*9/5) + 32
def temp_to_c(self) -> Decimal:
if self.temp_format == 'C':
return self.temperature_setting
else:
return (self.temperature_setting-32) * 5 / 9
def temp_to_preferred(self) -> Decimal:
# Converts the point to whatever the preferred temperature format is per Constance
if config.TEMPERATURE_FORMAT == 'F':
return self.temp_to_f()
elif config.TEMPERATURE_FORMAT == 'C':
return self.temp_to_c()
pass
def convert_temp(self, desired_temp_format) -> Decimal:
if self.temp_format == desired_temp_format:
return self.temperature_setting
elif self.temp_format == 'F' and desired_temp_format == 'C':
return self.temp_to_c()
elif self.temp_format == 'C' and desired_temp_format == 'F':
return self.temp_to_f()
else:
logger.error("Invalid temperature format {} specified".format(desired_temp_format))
return self.temperature_setting
def ttl_to_string(self, short_code=False):
# This function returns self.ttl in the "5d 3h 4m 15s" format we use to key it in
ttl_string = ""
remainder = self.ttl.total_seconds()
days, remainder = divmod(remainder, 60*60*24)
hours, remainder = divmod(remainder, 60*60)
minutes, seconds = divmod(remainder, 60)
if short_code:
day_string = "d"
hour_string = "h"
minute_string = "m"
second_string = "s"
else:
day_string = " days, "
hour_string = " hours, "
minute_string = " minutes, "
second_string = " seconds, "
if days > 0:
ttl_string += str(int(days)) + day_string
if hours > 0:
ttl_string += str(int(hours)) + hour_string
if minutes > 0:
ttl_string += str(int(minutes)) + minute_string
if seconds > 0:
ttl_string += str(int(seconds)) + second_string
if len(ttl_string) <=0: # Default to 0 seconds
ttl_string = "0" + second_string
return ttl_string.rstrip(", ")
@staticmethod
def string_to_ttl(string):
if len(string) <= 1:
raise ValueError("No string provided to convert")
# Split out the d/h/m/s of the timer
try:
timer_pattern = r"(?P<time_amt>[0-9]+)[ ]*(?P<ywdhms>[ywdhms]{1})"
timer_regex = re.compile(timer_pattern)
timer_matches = timer_regex.finditer(string)
except:
raise ValueError("{} isn't in a valid format for conversion".format(string))
# timer_time is equal to now + the time delta
time_delta = datetime.timedelta(seconds=0)
for this_match in timer_matches:
dhms = this_match.group('ywdhms')
delta_amt = int(this_match.group('time_amt'))
if dhms == 'y': # This doesn't account for leap years, but whatever.
time_delta = time_delta + datetime.timedelta(days=(365*delta_amt))
elif dhms == 'w':
time_delta = time_delta + datetime.timedelta(weeks=delta_amt)
elif dhms == 'd':
time_delta = time_delta + datetime.timedelta(days=delta_amt)
elif dhms == 'h':
time_delta = time_delta + datetime.timedelta(hours=delta_amt)
elif dhms == 'm':
time_delta = time_delta + datetime.timedelta(minutes=delta_amt)
elif dhms == 's':
time_delta = time_delta + datetime.timedelta(seconds=delta_amt)
return time_delta
# The old (0.2.x/Arduino) Control Constants Model
class OldControlConstants(models.Model):
tempSetMin = models.FloatField(
verbose_name="Min Temperature",
help_text="The fridge and beer temperatures cannot go below this value. Units are specified by 'Temperature " +
"format' below.",
)
tempSetMax = models.FloatField(
verbose_name="Max Temperature",
help_text="The fridge and beer temperatures cannot go above this value. Units are specified by 'Temperature " +
"format' below.",
)
Kp = models.FloatField(
verbose_name="PID: Kp",
help_text="The beer temperature error is multiplied by Kp to give the proportional of the PID value"
)
Ki = models.FloatField(
verbose_name="PID: Ki",
help_text="When the integral is active, the error is added to the integral every 30 sec. The result is " +
"multiplied by Ki to give the integral part."
)
Kd = models.FloatField(
verbose_name="PID: Kd",
help_text="The derivative of the beer temperature is multiplied by " +
"Kd to give the derivative part of the PID value"
)
pidMax = models.FloatField(
verbose_name="PID: maximum",
help_text="Defines the maximum difference between the beer temp setting and fridge temp setting. The fridge " +
"setting will be clipped to this range."
)
iMaxErr = models.FloatField(
verbose_name="Integrator: Max temp error C",
help_text="The integral is only active when the temperature is close to the target temperature. This is the " +
"maximum error for which the integral is active."
)
idleRangeH = models.FloatField(
verbose_name="Temperature idle range top",
help_text="When the fridge temperature is within this range, it " +
"will not heat or cool, regardless of other settings"
)
idleRangeL = models.FloatField(
verbose_name="Temperature idle range bottom",
help_text="When the fridge temperature is within this range, it " +
"will not heat or cool, regardless of other settings"
)
heatTargetH = models.FloatField(
verbose_name="Heating target upper bound",
help_text="When the overshoot lands under this value, the peak " +
"is within the target range and the estimator is not adjusted"
)
heatTargetL = models.FloatField(
verbose_name="Heating target lower bound",
help_text="When the overshoot lands above this value, the peak " +
"is within the target range and the estimator is not adjusted"
)
coolTargetH = models.FloatField(
verbose_name="Cooling target upper bound",
help_text="When the overshoot lands under this value, the peak " +
"is within the target range and the estimator is not adjusted"
)
coolTargetL = models.FloatField(
verbose_name="Cooling target lower bound",
help_text="When the overshoot lands above this value, the peak " +
"is within the target range and the estimator is not adjusted"
)
maxHeatTimeForEst = models.IntegerField(
verbose_name="Maximum time in seconds for heating overshoot estimator",
help_text="The time the fridge has been heating is used to estimate overshoot. " +
"This is the maximum time that is taken into account."
)
maxCoolTimeForEst = models.IntegerField(
verbose_name="Maximum time in seconds for cooling overshoot estimator",
help_text="Maximum time the fridge has been cooling is used to estimate " +
"overshoot. This is the maximum time that is taken into account."
)
beerFastFilt = models.IntegerField(
verbose_name="Beer fast filter delay time",
help_text="The beer fast filter is used for display and data logging. " +
"More filtering gives a smoother line but also more delay."
)
beerSlowFilt = models.IntegerField(
verbose_name="Beer slow filter delay time",
help_text="The beer slow filter is used for the control algorithm. " +
"The fridge temperature setting is calculated from this filter. " +
"Because a small difference in beer temperature cases a large " +
"adjustment in the fridge temperature, more smoothing is needed."
)
beerSlopeFilt = models.IntegerField(
verbose_name="Beer slope filter delay time",
help_text="The slope is calculated every 30 sec and fed to this filter. " +
"More filtering means a smoother fridge setting."
)
fridgeFastFilt = models.IntegerField(
verbose_name="Fridge fast filter delay time",
help_text="The fridge fast filter is used for on-off control, display, " +
"and logging. It needs to have a small delay."
)
fridgeSlowFilt = models.IntegerField(
verbose_name="Fridge slow filter delay time",
help_text="The fridge slow filter is used for peak detection to adjust " +
"the overshoot estimators. More smoothing is needed to prevent " +
"small fluctuations from being recognized as peaks."
)
fridgeSlopeFilt = models.IntegerField(
verbose_name="Fridge slope filter delay time",
help_text="Fridge slope filter is not used in this revision of the firmware."
)
lah = models.IntegerField(
verbose_name="Using light as heater?",
help_text="If set to yes the chamber light (if assigned a pin) will be used in place of the heat pin",
choices=(
(1, "YES"),
(0, "No")
),
default=0
)
hs = models.IntegerField(
verbose_name="Use half steps for rotary encoder?",
help_text="If this option is set to yes, the rotary encoder will use half steps",
choices=(
(1, "YES"),
(0, "No")
),
default=0
)
tempFormat = models.CharField(
verbose_name="Temperature format",
help_text="This is the temperature format that will be used by the device",
max_length=1,
choices=(
("F", "Fahrenheit"),
("C", "Celsius")
),
default='F'
)
# In a lot of cases we're selectively loading/sending/comparing the fields that are known by the firmware
# To make it easy to iterate over those fields, going to list them out here
firmware_field_list = ['tempSetMin', 'tempSetMax', 'Kp', 'Ki', 'Kd', 'pidMax', 'iMaxErr', 'idleRangeH',
'idleRangeL', 'heatTargetH', 'heatTargetL', 'coolTargetH', 'coolTargetL',
'maxHeatTimeForEst', 'maxCoolTimeForEst', 'beerFastFilt', 'beerSlowFilt', 'beerSlopeFilt',
'fridgeFastFilt', 'fridgeSlowFilt', 'fridgeSlopeFilt', 'lah', 'hs', 'tempFormat']
# preset_name is only used if we want to save the preset to the database to be reapplied later
preset_name = models.CharField(max_length=255, null=True, blank=True, default="")
def load_from_controller(self, controller):
"""
:param controller: models.BrewPiDevice
:type controller: BrewPiDevice
:return: boolean
"""
# try:
# Load the control constants dict from the controller
cc = controller.get_control_constants()
for this_field in self.firmware_field_list:
try:
# In case we don't get every field back
setattr(self, this_field, cc[this_field])
except:
pass
return True
# except:
# return False
def save_to_controller(self, controller, attribute):
"""
:param controller: models.BrewPiDevice
:type controller: BrewPiDevice
:return:
"""
value_to_send = {attribute: getattr(self, attribute)}
return controller.set_parameters(value_to_send)
def save_all_to_controller(self, controller, prior_control_constants=None):
"""
:param controller: models.BrewPiDevice
:type controller: BrewPiDevice
:return: boolean
"""
if prior_control_constants is None:
# Load the preexisting control constants from the controller
prior_control_constants = OldControlConstants()
prior_control_constants.load_from_controller(controller)
for this_field in self.firmware_field_list:
# Now loop through and check each field to find out what changed
if getattr(self, this_field) != getattr(prior_control_constants, this_field):
# ...and only update those fields
self.save_to_controller(controller, this_field)
return True
# The new (0.4.x/Spark) Control Constants Model
class NewControlConstants(models.Model):
# class Meta:
# managed = False
tempFormat = models.CharField(max_length=1,default="C")
# settings for heater 1
heater1_kp = models.FloatField(help_text="Actuator output in % = Kp * input error")
heater1_ti = models.IntegerField()
heater1_td = models.IntegerField()
heater1_infilt = models.IntegerField()
heater1_dfilt = models.IntegerField()
# settings for heater 2
heater2_kp = models.FloatField()
heater2_ti = models.IntegerField()
heater2_td = models.IntegerField()
heater2_infilt = models.IntegerField()
heater2_dfilt = models.IntegerField()
# settings for cooler
cooler_kp = models.FloatField()
cooler_ti = models.IntegerField()
cooler_td = models.IntegerField()
cooler_infilt = models.IntegerField()
cooler_dfilt = models.IntegerField()
# settings for beer2fridge PID
beer2fridge_kp = models.FloatField()
beer2fridge_ti = models.IntegerField()
beer2fridge_td = models.IntegerField()
beer2fridge_infilt = models.IntegerField()
beer2fridge_dfilt = models.IntegerField()
beer2fridge_pidMax = models.FloatField()
minCoolTime = models.IntegerField()
minCoolIdleTime = models.IntegerField()
heater1PwmPeriod = models.IntegerField()
heater2PwmPeriod = models.IntegerField()
coolerPwmPeriod = models.IntegerField()
mutexDeadTime = models.IntegerField()
# preset_name is only used if we want to save the preset to the database to be reapplied later
preset_name = models.CharField(max_length=255, null=True, blank=True, default="")
# In a lot of cases we're selectively loading/sending/comparing the fields that are known by the firmware
# To make it easy to iterate over those fields, going to list them out here
firmware_field_list = ['tempFormat', 'heater1_kp', 'heater1_ti', 'heater1_td', 'heater1_infilt', 'heater1_dfilt',
'heater2_kp', 'heater2_ti', 'heater2_td', 'heater2_infilt', 'heater2_dfilt',
'cooler_kp', 'cooler_ti', 'cooler_td', 'cooler_infilt', 'cooler_dfilt',
'beer2fridge_kp', 'beer2fridge_ti', 'beer2fridge_td', 'beer2fridge_infilt',
'beer2fridge_dfilt', 'beer2fridge_pidMax', 'minCoolTime', 'minCoolIdleTime',
'heater1PwmPeriod', 'heater2PwmPeriod', 'coolerPwmPeriod', 'mutexDeadTime',]
def load_from_controller(self, controller):
"""
:param controller: models.BrewPiDevice
:type controller: BrewPiDevice
:return: boolean
"""
try:
# Load the control constants dict from the controller
cc = controller.get_control_constants()
for this_field in self.firmware_field_list:
setattr(self, this_field, cc[this_field])
return True
except:
return False
def save_to_controller(self, controller, attribute):
"""
:param controller: models.BrewPiDevice
:type controller: BrewPiDevice
:return:
"""
value_to_send = {attribute: getattr(self, attribute)}
return controller.set_parameters(value_to_send)
def save_all_to_controller(self, controller, prior_control_constants=None):
"""
:param controller: models.BrewPiDevice
:type controller: BrewPiDevice
:return: boolean
"""
try:
for this_field in self.firmware_field_list:
self.save_to_controller(controller, this_field)
return True
except:
return False
# TODO - Determine if we care about controlSettings
# # There may only be a single control settings object between both revisions of the firmware, but I'll break it out
# # for now just in case.
# class OldControlSettings(models.Model):
# class Meta:
# managed = False
#
# firmware_field_list = ['tempSetMin', 'tempSetMax', 'Kp', 'Ki', 'Kd', 'pidMax', 'iMaxErr', 'idleRangeH',
# 'idleRangeL', 'heatTargetH', 'heatTargetL', 'coolTargetH', 'coolTargetL',
# 'maxHeatTimeForEst', 'maxCoolTimeForEst', 'beerFastFilt', 'beerSlowFilt', 'beerSlopeFilt',
# 'fridgeFastFilt', 'fridgeSlowFilt', 'fridgeSlopeFilt', 'lah', 'hs',]
#
# controller = models.ForeignKey(BrewPiDevice)
#
# def load_from_controller(self, controller=None):
# """
# :param controller: models.BrewPiDevice
# :type controller: BrewPiDevice
# :return: boolean
# """
# if controller is not None:
# self.controller = controller
# try:
# cc = json.loads(self.controller.send_message("getControlSettings", read_response=True))
#
# for this_field in self.firmware_field_list:
# setattr(self, this_field, cc[this_field])
# return True
#
# except:
# return False
#
# def save_to_controller(self, controller, attribute):
# """
# :param controller: models.BrewPiDevice
# :type controller: BrewPiDevice
# :return:
# """
#
# value_to_send = {attribute, getattr(self, attribute)}
# return controller.send_message("setParameters", json.dumps(value_to_send))
#
# def save_all_to_controller(self, controller, prior_control_constants=None):
# """
# :param controller: models.BrewPiDevice
# :type controller: BrewPiDevice
# :return: boolean
# """
# try:
# for this_field in self.firmware_field_list:
# self.save_to_controller(controller, this_field)
# return True
# except:
# return False
#
|
import numpy as np
import pprint
import sys
import gym.spaces
if "../" not in sys.path:
sys.path.append("../")
from lib.envs.gridworld import GridworldEnv
pp = pprint.PrettyPrinter(indent=2)
env = GridworldEnv()
def value_iteration(env, epsilon=0.0001, discount_factor=1.0):
"""
Value Iteration Algorithm.
Args:
env: OpenAI env. env.P represents the transition probabilities of the environment.
env.P[s][a] is a list of transition tuples (prob, next_state, reward, done).
env.nS is a number of states in the environment.
env.nA is a number of actions in the environment.
theta: We stop evaluation once our value function change is less than theta for all states.
discount_factor: Gamma discount factor.
Returns:
A tuple (policy, V) of the optimal policy and the optimal value function.
"""
def one_step_lookahead(V, a, s):
[(prob, next_state, reward, done)] = env.P[s][a]
v = prob * (reward + discount_factor * V[next_state])
return v
#start with inital value function and intial policy
V = np.zeros(env.nS)
policy = np.zeros([env.nS, env.nA])
#while not the optimal policy
while True:
delta = 0
#loop over state space
for s in range(env.nS):
actions_values = np.zeros(env.nA)
#loop over possible actions
for a in range(env.nA):
#apply bellman eqn to get actions values
actions_values[a] = one_step_lookahead(V, a, s)
#pick the best action
best_action_value = max(actions_values)
#get the biggest difference between best action value and our old value function
delta = max(delta, abs(best_action_value - V[s]))
#apply bellman optimality eqn
V[s] = best_action_value
#to update the policy
best_action = np.argmax(actions_values)
#update the policy
policy[s] = np.eye(env.nA)[best_action]
#if optimal value function
if(delta < epsilon):
break
return policy, V
policy, v = value_iteration(env)
print("Policy Probability Distribution:")
print(policy)
print("")
print("Reshaped Grid Policy (0=up, 1=right, 2=down, 3=left):")
print(np.reshape(np.argmax(policy, axis=1), env.shape))
print("")
print("Value Function:")
print(v)
print("")
print("Reshaped Grid Value Function:")
print(v.reshape(env.shape))
print("")
|
from office365.sharepoint.base_entity import BaseEntity
class UserResource(BaseEntity):
"""An object representing user-defined localizable resources."""
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.