content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/env python3
# Building tool for cpp and hpp files
# @Author Leonardo Montagner https://github.com/leomonta/Python_cpp_builder
#
# Build only the modified files on a cpp project
# Link and compile using the appropriate library and include path
# Print out error and warning messages
# add the args for the link and the compile process
#
# Done: compile and link files
# Done: check for newer version of source files
# Done: skip compilation or linking if there are no new or modified files
# TODO: check for newer version of header files (check in every file if that header is included, if it has to be rebuilt)
# TODO: identify each header and figure out which source file include which
# Done: error and warning coloring in the console
# Done: if error occurs stop compilation and return 1
# Done: if error occurs stop linking and return 1
# Done: retrive include dirs, libs and args from a file
# Done: retrive target directories for exe, objects, include and source files
# Done: support for debug and optimization compilation, compiler flag and libraries
import subprocess # execute command on the cmd / bash / whatever
import os # get directories file names
import json # parse cpp_builder_config.json
from colorama import Fore, init
import hashlib # for calculating hashes
import sys # for arguments parsing
includes_variable = {
# names of all includes dir + name
"all_includes" : [],
# file: list of references (indices) to include files
"src_references": {}
}
compilation_variables = {
# arguments to feed to the compiler and the linker
"compiler_args": "",
"linker_args":"",
# the string composed by the names of the libraries -> "-lpthread -lm ..."
"libraries_names" : "",
# the string composed by the path of the libraries -> "-L./path/to/lib -L..."
"libraries_paths": "",
# the string composed by the path of the includes -> "-I./include -I./ext/include -I..."
"includes_paths": "",
"includes_dirs": [],
# directories containing the names of the source directories
"src_dirs" : [],
# name of the compiler executable
"compiler_exec": "",
# base directory of the project
"project_path":"",
# directory where to leave the compiled object files
"objects_path": "",
# path to the directory where to leave the final executable
"exe_path": "",
# name of the final executable
"exe_name" : ""
}
sha1 = hashlib.sha1()
old_hashes = {}
new_hashes = {}
source_files_extensions = ["c", "cpp", "cxx", "c++", "cc", "C"]
def print_stdout(mexage: tuple) -> bool:
out = mexage[1].split("\n")[0:-1]
res = True
for i in range(len(out)):
if "error" in out[i]:
print(Fore.RED, out[i])
res = True
elif "warning" in out[i]:
print(Fore.BLUE, out[i])
elif "note" in out[i]:
print(Fore.CYAN, out[i])
else:
print(out[i])
print(Fore.WHITE)
return res
def exe_command(command: str) -> tuple:
"""
execute the given command and return the output -> [stdout, stderr]
"""
stream = subprocess.Popen(command.split(" "), stderr=subprocess.PIPE, universal_newlines=True)
return stream.communicate() # execute the command and get the result
def parse_config_json(optimization: bool) -> None:
"""
Set the global variables by reading the from cpp_builder_config.json
"""
global compilation_variables
for k in compilation_variables:
compilation_variables[k] = ""
# load and parse the file
config_file = json.load(open("cpp_builder_config.json"))
# base directory for ALL the other directories and files
compilation_variables["project_path"] = config_file["projectDir"]
# get the compiler executable {gcc, g++, clang, etc}
compilation_variables["compiler_exec"] = config_file["compilerExe"]
# --- Libraries path and names ---
# create the library args -> -lSomelib -lSomelib2 -l...
if optimization:
for lname in config_file["libraries"]["Release"]:
compilation_variables["libraries_names"] += " -l" + lname
else:
for lname in config_file["libraries"]["Debug"]:
compilation_variables["libraries_names"] += " -l" + lname
compilation_variables["libraries_names"] = compilation_variables["libraries_names"][1:] # remove first whitespace
# create the libraries path args -> -LSomelibrary/lib -L...
for Lname in config_file["Directories"]["libraryDir"]:
compilation_variables["libraries_paths"] += " -L" + Lname
compilation_variables["libraries_paths"] = compilation_variables["libraries_paths"][1:] # remove first whitespace
# --- Include and Source Directories
compilation_variables["includes_dirs"] = config_file["Directories"]["includeDir"]
# create the includes args -> -IInclude -ISomelibrary/include -I...
for Idir in config_file["Directories"]["includeDir"]:
compilation_variables["includes_paths"] += " -I" + Idir
compilation_variables["includes_paths"] = compilation_variables["includes_paths"][1:] # remove first whitespace
# source dir where the source code file are located
compilation_variables["src_paths"] = config_file["Directories"]["sourceDir"]
compilation_variables["objects_path"] = config_file["Directories"]["objectsDir"]
compilation_variables["exe_path"] = config_file["Directories"]["exeDir"]
compilation_variables["exe_name"] = config_file["exeName"]
# --- Compiling an linking arguments ---
# compiler and linker argument
if optimization:
compilation_variables["compiler_args"] = config_file["Arguments"]["Release"]["Compiler"]
compilation_variables["linker_args"] = config_file["Arguments"]["Release"]["Linker"]
else:
compilation_variables["compiler_args"] = config_file["Arguments"]["Debug"]["Compiler"]
compilation_variables["linker_args"] = config_file["Arguments"]["Debug"]["Linker"]
def is_modified(filename: str) -> bool:
"""
Given a filename return if it has been modified
"""
global new_hashes
global old_hashes
if filename in old_hashes.keys():
if old_hashes[filename] == new_hashes[filename]:
return False
return True
def calculate_new_hashes() -> None:
"""
Calculate the hashes for all the source files
"""
global compilation_variables, sha1, includes_variable
for source_directory in compilation_variables["src_paths"]: # loop trough all the source files directories
for file in os.listdir(source_directory): # loop trough every file of each directory
# sha1 hash calculation
with open(f"{source_directory}/{file}", "r+b") as f:
sha1.update(f.read())
# insert in the new_hashes dict the key filename with the value hash
new_hashes[source_directory + file] = sha1.hexdigest() # create the new hash
# i need to re-instantiate the object to empty it
sha1 = hashlib.sha1()
def load_old_hashes() -> None:
"""
Load in old_hashes the hashes present in files_hash.txt
"""
global old_hashes
# read hashes from files and add them to old_hashes array
with open("files_hash.txt", "r") as f:
while True:
data = f.readline()
if not data:
break
temp = data.split(":")
# remove trailing newline
temp[1] = temp[1].replace("\n", "")
old_hashes[temp[0]] = temp[1]
def get_to_compile() -> list:
"""
return a list of files and their directories that need to be compiled
"""
global compilation_variables
to_compile = [] # contains directory and filename
# checking which file need to be compiled
for source_directory in compilation_variables["src_paths"]: # loop trough all the source files directories
for file in os.listdir(source_directory): # loop trough every file of each directory
# i need to differentiate different parts
# extension: to decide if it has to be compiled or not and to name it
# filename: everything else of the file name ignoring the extension, useful for naming compilitation files
# source dir: necessary for differentiate eventual same-named files on different dirs
if is_modified(source_directory + file):
temp = file.split(".")
ext = temp.pop(-1)
file_name = "".join(temp)
if (ext in source_files_extensions): # check if it is a source file
to_compile.append([source_directory, file_name, ext])
return to_compile
def save_new_hashes() -> None:
"""
Write all the hashes on files_hash.txt
"""
global new_hashes
with open("files_hash.txt", "w") as f:
for i in new_hashes.keys():
f.write(i + ":")
f.write(new_hashes[i] + "\n")
def compile(to_compile: list) -> bool:
"""
Compile all correct files with the specified arguments
"""
global compilation_variables
errors = 0
compiler_exec = compilation_variables["compiler_exec"]
includes = compilation_variables["includes_paths"]
compiler_args = compilation_variables["compiler_args"]
obj_dir = compilation_variables["objects_path"]
for file in to_compile:
command = f"{compiler_exec} {compiler_args} {includes} -c -o {obj_dir}/{file[0]}{file[1]}.o {file[0]}/{file[1]}.{file[2]}"
print(command)
errors += not print_stdout(exe_command(command))
return errors > 0
def link() -> bool:
"""
Link together all the files that have been compiled with the specified libraries and arguments
"""
global compilation_variables
to_link = [] # contains directory and filename
# checking which file need to be compiled
for source_directory in compilation_variables["src_paths"]: # loop trough all the source files directories
for file in os.listdir(source_directory): # loop trough every file of each directory
# i need to differentiate different parts
# extension: to decide if it has to be compiled or not and to name it
# filename: everything else of the file name ignoring the extension, useful for naming compilitation files
# source dir: necessary for differentiate eventual same-named files on different dirs
temp = file.split(".")
ext = temp.pop(-1)
file_name = "".join(temp)
if (ext in source_files_extensions): # check if it is a source file
to_link.append([source_directory, file_name, ext])
compiler_exec = compilation_variables["compiler_exec"]
linker_args = compilation_variables["linker_args"]
exe_path = compilation_variables["exe_path"]
exe_name = compilation_variables["exe_name"]
libraries_paths = compilation_variables["libraries_paths"]
obj_dir = compilation_variables["objects_path"]
Link_cmd = f"{compiler_exec} {linker_args} -o {exe_path}/{exe_name} {libraries_paths}"
for file in to_link:
Link_cmd += f" {obj_dir}/{file[0]}{file[1]}.o"
Link_cmd += " " + compilation_variables["libraries_names"]
print(Link_cmd)
return print_stdout(exe_command(Link_cmd))
def create_makefile():
# first debug options
parse_config_json(False)
make_file = ""
# variables
make_file += f'CC={compilation_variables["compiler_exec"]}\n'
make_file += f'BinName={compilation_variables["exe_path"]}/{compilation_variables["exe_name"]}\n'
make_file += f'ObjsDir={compilation_variables["objects_path"]}\n'
make_file += '\n# Debug variables\n'
make_file += f'DCompilerArgs={compilation_variables["compiler_args"]}\n'
make_file += f'DLinkerArgs={compilation_variables["linker_args"]}\n'
make_file += f'DLibrariesPaths={compilation_variables["libraries_paths"]}\n'
make_file += f'DLibrariesNames={compilation_variables["libraries_names"]}\n'
# first debug options
parse_config_json(True)
make_file += '\n# Release variables\n'
make_file += f'RCompilerArgs={compilation_variables["compiler_args"]}\n'
make_file += f'RLinkerArgs={compilation_variables["linker_args"]}\n'
make_file += f'RLibrariesPaths={compilation_variables["libraries_paths"]}\n'
make_file += f'RLibrariesNames={compilation_variables["libraries_names"]}\n'
make_file += '\n# includes\n'
make_file += f'Includes={compilation_variables["includes_paths"]}\n'
make_file += '\n\n'
# targets
os.chdir(compilation_variables["project_path"])
# obtain new hashes
calculate_new_hashes()
# get the file needed to compile
to_compile = get_to_compile()
make_file += 'debug: DCompile DLink\n\n'
make_file += 'release: RCompile RLink\n\n'
# Debug commands
make_file += '\nDCompile: \n'
for file in to_compile:
make_file += f" $(CC) $(DCompilerArgs) $(Includes) -c -o $(ObjsDir)/{file[0]}{file[1]}.o {file[0]}/{file[1]}.{file[2]}\n"
make_file += '\nDLink: \n'
make_file += f' $(CC) $(DLinkerArgs) -o $(BinName) $(DLibrariesPaths)'
for file in to_compile:
make_file += f" $(ObjsDir)/{file[0]}{file[1]}.o"
make_file += f' $(DLibrariesNames)\n'
# Release commands
make_file += '\nRCompile: \n'
for file in to_compile:
make_file += f" $(CC) $(RCompilerArgs) $(Includes) -c -o $(ObjsDir)/{file[0]}{file[1]}.o {file[0]}/{file[1]}.{file[2]}\n"
make_file += '\nRLink: \n'
make_file += f' $(CC) $(RLinkerArgs) -o $(BinName) $(RLibrariesPaths)'
for file in to_compile:
make_file += f" $(ObjsDir)/{file[0]}{file[1]}.o"
make_file += f' $(RLibrariesNames)\n'
make_file += '\nclean:\n'
make_file += ' rm -r -f objs/*\n'
make_file += ' rm -r -f $(BinName)\n'
with open("Makefile", "w+") as mf:
mf.write(make_file)
def main():
global compilation_variables
if "-e" in sys.argv:
create_makefile()
return
if "-o" in sys.argv:
parse_config_json(True)
else:
parse_config_json(False)
os.chdir(compilation_variables["project_path"])
#init colorama
init()
# create file if it does not exist
if not os.path.exists("files_hash.txt"):
f = open("files_hash.txt", "w")
f.close()
if "-a" not in sys.argv:
# load old hashes
load_old_hashes()
# obtain new hashes
calculate_new_hashes()
# get the file needed to compile
to_compile = get_to_compile()
# --- Compiling ---
print(Fore.GREEN, " --- Compiling ---", Fore.WHITE)
if not to_compile:
print(" --- Compilation and linking skipped due to no new or modified files ---")
return
# compile each file and show the output,
# and check for errors
if compile(to_compile):
print(f"\n{Fore.RED} --- Linking skipped due to errors in compilation process! ---")
sys.exit(1)
# --- Linking ---
print(Fore.GREEN, " --- Linking ---", Fore.WHITE)
if not link():
print(f"\n{Fore.RED} --- Errors in linking process! ---")
sys.exit(1)
save_new_hashes()
main()
|
from django.http import HttpResponse
from django.shortcuts import render
from django.template import TemplateDoesNotExist
from comments.forms import CommentForm, ReplyForm, BattleCommentForm
from comments.models import Comment
from posts.models.post import Post
from bookmarks.models import PostBookmark
from posts.models.subscriptions import PostSubscription
from posts.models.votes import PostVote
POSSIBLE_COMMENT_ORDERS = {"created_at", "-created_at", "-upvotes"}
def render_post(request, post, context=None):
# render "raw" newsletters
if post.type == Post.TYPE_WEEKLY_DIGEST:
return HttpResponse(post.html)
# select votes and comments
if request.me:
comments = Comment.objects_for_user(request.me).filter(post=post).all()
is_bookmark = PostBookmark.objects.filter(post=post, user=request.me).exists()
is_voted = PostVote.objects.filter(post=post, user=request.me).exists()
upvoted_at = int(PostVote.objects.filter(post=post, user=request.me).first().created_at.timestamp() * 1000) if is_voted else None
subscription = PostSubscription.get(request.me, post)
else:
comments = Comment.visible_objects().filter(post=post).all()
is_voted = False
is_bookmark = False
upvoted_at = None
subscription = None
# order comments
comment_order = request.GET.get("comment_order") or "-upvotes"
if comment_order in POSSIBLE_COMMENT_ORDERS:
comments = comments.order_by(comment_order, "created_at") # additionally sort by time to preserve an order
# hide deleted comments for battle (visual junk)
if post.type == Post.TYPE_BATTLE:
comments = comments.filter(is_deleted=False)
context = {
**(context or {}),
"post": post,
"comments": comments,
"comment_form": CommentForm(),
"comment_order": comment_order,
"reply_form": ReplyForm(),
"is_bookmark": is_bookmark,
"is_voted": is_voted,
"upvoted_at": upvoted_at,
"subscription": subscription,
}
# TODO: make a proper mapping here in future
if post.type == Post.TYPE_BATTLE:
context["comment_form"] = BattleCommentForm()
try:
return render(request, f"posts/show/{post.type}.html", context)
except TemplateDoesNotExist:
return render(request, "posts/show/post.html", context)
|
from django.conf.urls import include, url
from django.core import urlresolvers
from django.utils.translation import ugettext_lazy as _
from wagtail.wagtailcore import hooks
from wagtail.wagtailadmin.menu import MenuItem
from wagtail.wagtailusers import urls
def register_admin_urls():
return [
url(r'^users/', include(urls)),
]
hooks.register('register_admin_urls', register_admin_urls)
def construct_main_menu(request, menu_items):
if request.user.has_module_perms('auth'):
menu_items.append(
MenuItem(_('Users'), urlresolvers.reverse('wagtailusers_index'), classnames='icon icon-user', order=600)
)
hooks.register('construct_main_menu', construct_main_menu)
|
"""
###############################################################################
# Copyright 2019, by the California Institute of Technology.
# ALL RIGHTS RESERVED.
#
# United States Government Sponsorship acknowledged. Any commercial use
# must be negotiated with the Office of Technology Transfer at the
# California Institute of Technology.
#
# This software may be subject to U.S. export control laws. By accepting
# this software, the user agrees to comply with all applicable U.S. export
# laws and regulations. User has the responsibility to obtain export licenses,
# or other export authority as may be required before exporting such
# information to foreign countries or providing access to foreign persons.
#
# file: controller.py
# author: S. Felipe Fregoso
# brief: Controls telemetry and signal reconstruction done
# dispatches commands to executing
#
#
###############################################################################
"""
import sys
import time
import copy
import numpy as np
from . import interface as Iface
from . import telemetry_iface_ag
from . import metadata_classes
from .dhm_cmd_client_server import (DhmCmdServer)
from .dhmcommands import CommandDictionary
from .heartbeat import Heartbeat as HBeat
from .component_abc import ComponentABC
NUMBER_OF_COMPONENTS = 5
class Controller(ComponentABC):
"""
Controller Component Class
"""
def __init__(self, identifier, inq, pub, _events, configfile=None, verbose=False):
ComponentABC.__init__(self,
identifier,
inq,
pub,
_events,
configfile=configfile,
verbose=verbose)
self._module_init_count = 0
self._cmd_dict = CommandDictionary()
### Command server handler
self._cmd_server = None
### Telemetry Objects
self._reconst_telem = None
self._session_telem = None
self._heartbeat_telem = None
self._holo_telem = None
self._framesource_telem = None
self._datalogger_telem = None
self._guiserver_telem = None
self._fouriermask_telem = None
self._reconst_meta = self._allmeta.metadata['RECONSTRUCTION']
self._holo_meta = self._allmeta.metadata['HOLOGRAM']
self._framesource_meta = self._allmeta.metadata['FRAMESOURCE']
self._datalogger_meta = self._allmeta.metadata['DATALOGGER']
self._guiserver_meta = self._allmeta.metadata['GUISERVER']
self._session_meta = self._allmeta.metadata['SESSION']
self._fouriermask_meta = self._allmeta.metadata['FOURIERMASK']
def publish_status(self, status_msg=None):
"""
Publish component status
"""
def holometa_to_telem(self, meta):
"""
Convert holo metadata to a telemetry object
"""
self._holo_telem.set_values(len(meta.wavelength),
meta.wavelength,
meta.dx,
meta.dy,
meta.crop_fraction,
meta.rebin_factor,
meta.bgd_sub,
meta.bgd_file)
return self._holo_telem.pack()
def framesourcemeta_to_telem(self, meta):
"""
Framesource metadata to telemetry object
"""
self._framesource_telem.set_values(meta.state,
meta.mode,
meta.file['datadir'],
meta.file['currentfile'],
meta.status_msg)
return self._framesource_telem.pack()
def dataloggermeta_to_telem(self, meta):
"""
Datalogger metadata to telemetry object
"""
self._datalogger_telem.set_values(meta.enabled,
"",
meta.status_msg)
return self._datalogger_telem.pack()
def guiservermeta_to_telem(self, meta):
"""
Gui server metadata to telemetry object
"""
portlist = [meta.ports['fourier'],\
meta.ports['reconst_amp'],\
meta.ports['raw_frames'],\
meta.ports['telemetry'],\
]
self._guiserver_telem.set_values(portlist,
meta.connection_status[0:4],
meta.status_msg)
return self._guiserver_telem.pack()
def sessionmeta_to_telem(self, meta):
"""
Session metadata to telemetry object
"""
self._session_telem.set_values(meta.name,
meta.description,
len(meta.holo.wavelength),
meta.holo.wavelength,
meta.holo.dx,
meta.holo.dy,
int(meta.holo.crop_fraction),
int(meta.holo.rebin_factor),
meta.lens.focal_length,
meta.lens.numerical_aperture,
meta.lens.system_magnification,
meta.status_msg,
)
return self._session_telem.pack()
def fouriermaskmeta_to_telem(self, meta):
"""
Fourier mask metadata to telemetry object
"""
x_peak = [circ.get_params[0] for circ in meta.mask.circle_list]
num_x_peak = len(x_peak)
y_peak = [circ.get_params[1] for circ in meta.mask.circle_list]
num_y_peak = len(y_peak)
mask = np.any(meta.mask.mask, axis=2).flatten()
self._fouriermask_telem.set_values(num_x_peak,
x_peak,
num_y_peak,
y_peak,
mask,
)
return self._fouriermask_telem.pack()
def reconstmeta_to_telem(self, meta):
"""
Convert reconstruction metadata to telemetry object
"""
self._reconst_telem.set_values(
len(meta.propagation_distance),
meta.propagation_distance,
meta.compute_spectral_peak,
meta.compute_digital_phase_mask,
meta.processing_mode,
len(meta.chromatic_shift),
meta.chromatic_shift,
meta.ref_holo.path,
meta.ref_holo.enabled,
meta.ref_holo.averaging_sec,
meta.ref_holo.averaging_enabled,
meta.phase_unwrapping.enabled,
meta.phase_unwrapping.algorithm,
meta.fitting.mode,
meta.fitting.method,
meta.fitting.order,
meta.fitting.applied,
meta.phase_mask_reset,
meta.roi_x.offset,
meta.roi_y.offset,
meta.roi_x.size,
meta.roi_y.size,
meta.store_files,
meta.center_image.center,
meta.center_image.center_and_tilt,
meta.center_image.max_value,
meta.center_image.wide_spectrum,
meta.status_msg)
return self._reconst_telem.pack()
def send_telemetry(self, telem_bin_str, srcid):
"""
Send telemetry to the GUI server component
"""
msg_pkt = Iface.MessagePkt(Iface.TELEMETRY_TYPE, srcid)
msg_pkt.append(telem_bin_str)
msg_pkt.complete_packet()
gui_pkt = Iface.GuiPacket('telemetry', msg_pkt.to_bytes())
self._inq['guiserver_inq'].put_nowait(gui_pkt)
def publish_session_status(self, status_msg=None):
"""
Publish the session metadata as status
"""
if status_msg:
self._session_meta.status_msg = status_msg
self._pub.publish('session_status',
Iface.MetadataPacket(self._session_meta)
)
def process_session_cmd(self, arg):
"""
Process 'session' commands
"""
tempsession = copy.copy(self._session_meta)
tempsession.holo = copy.copy(self._holo_meta)
validcmd = True
if not arg: ### Empty parameter list, send reconst status
self.publish_session_status(status_msg="SUCCESS")
else:
for param, value in arg.items():
if param == 'name':
tempsession.name = value
elif param == 'description':
tempsession.description = value
elif param == 'wavelength':
tempsession.holo.wavelength = value
elif param == 'dx':
tempsession.holo.dx = value
elif param == 'dy':
tempsession.holo.dy = value
elif param == 'crop_fraction':
tempsession.holo.crop_fraction = value
elif param == 'rebin_factor':
tempsession.holo.rebin_factor = value
elif param == 'focal_length':
tempsession.lens.focal_length = value
elif param == 'numerical_aperture':
tempsession.lens.numerical_aperture = value
elif param == 'system_magnification':
tempsession.lens.system_magnification = value
else:
validcmd = False
if validcmd:
self._session_meta = copy.copy(tempsession)
#self._holo_meta = copy.copy(tempsession.holo)
# Send holo data to
#self._inq['reconstructor_inq'].put_nowait(self._holo_meta)
self._inq['reconstructor_inq'].put_nowait(self._session_meta)
self.publish_session_status(status_msg="SUCCESS")
def process_shutdown_cmd(self, data):
"""
Process the shutdown command
Send 'None' to all component message queues to exit its
execution loop.
"""
for _, value in self._inq.items():
value.put_nowait(None)
def dispatch_commands(self, modid, data, arg):
"""
Dispatch the command data to executing component.
"""
switcher = {
'reconst':self._inq['reconstructor_inq'].put_nowait,
'holo':self._inq['reconstructor_inq'].put_nowait,
'fouriermask':self._inq['reconstructor_inq'].put_nowait,
'framesource':self._inq['framesource_inq'].put_nowait,
'guiserver':self._inq['guiserver_inq'].put_nowait,
'datalogger':self._inq['datalogger_inq'].put_nowait,
'session':self.process_session_cmd,
'shutdown':self.process_shutdown_cmd,
}
func = switcher.get(modid, None)
if func == self.process_session_cmd:
func(arg)
else:
func(data)
def _command_dispatcher(self, data):
"""
Command Dispatcher. Sends commands to the assigned component.
"""
cmd = data.get_cmd()
for modid, arg in cmd.items():
if modid in self._cmd_dict.get_dict().keys():
self.dispatch_commands(modid, data, arg)
else:
pass
def _process_meta(self, data):
"""
Process the metadata received by the component
"""
meta = None
if isinstance(data, Iface.MetadataPacket):
meta = data.meta
else:
meta = data
if isinstance(meta, metadata_classes.ReconstructionMetadata):
self._reconst_meta = data.meta
self.send_telemetry(self.reconstmeta_to_telem(data.meta),
Iface.SRCID_TELEMETRY_RECONSTRUCTION,
)
elif isinstance(meta, metadata_classes.FouriermaskMetadata):
print('Received FouriermaskMetadata')
self._fouriermask_meta = data.meta
#self.send_telemetry(self.fouriermaskmeta_to_telem(data.meta),
# Iface.SRCID_TELEMETRY_FOURIERMASK)
elif isinstance(meta, metadata_classes.SessionMetadata):
#print('Received SessionMetadata')
self._session_meta = data.meta
self.send_telemetry(self.sessionmeta_to_telem(data.meta),
Iface.SRCID_TELEMETRY_SESSION,
)
elif isinstance(meta, metadata_classes.HologramMetadata):
#print('Received HologramMetadata')
self._holo_meta = data.meta
self.send_telemetry(self.holometa_to_telem(data.meta),
Iface.SRCID_TELEMETRY_HOLOGRAM,
)
elif isinstance(meta, metadata_classes.FramesourceMetadata):
#print('Received FramesourceMetadata')
self._framesource_meta = data.meta
self.send_telemetry(self.framesourcemeta_to_telem(data.meta),
Iface.SRCID_TELEMETRY_FRAMESOURCE,
)
elif isinstance(meta, metadata_classes.DataloggerMetadata):
#print('Received DataloggerMetadata')
self._datalogger_meta = data.meta
self.send_telemetry(self.dataloggermeta_to_telem(data.meta),
Iface.SRCID_TELEMETRY_DATALOGGER,
)
elif isinstance(meta, metadata_classes.GuiserverMetadata):
#print('Received GuiserverMetadata')
self._guiserver_meta = data.meta
self.send_telemetry(self.guiservermeta_to_telem(data.meta),
Iface.SRCID_TELEMETRY_GUISERVER,
)
elif isinstance(meta, metadata_classes.ReconstructionDoneMetadata):
print("************* RECONST DONE EVENT ***********", time.time())
self._events['reconst']['done'].set()
else:
print('Unknown metadata type')
def create_heartbeat(self):
"""
Create the heartbeat object
"""
self._hbeat = HBeat(self._pub, self._id.lower())
def start_heartbeat(self):
"""
Start the heartbeat
"""
self._hbeat.start()
def terminate_heartbeat(self):
"""
End the execution of this components heartbeat
"""
self._hbeat.terminate()
def _init_for_component_execute(self):
"""
Intialize variable within run thread
"""
self._reconst_telem = telemetry_iface_ag.Reconstruction_Telemetry()
self._session_telem = telemetry_iface_ag.Session_Telemetry()
self._heartbeat_telem = telemetry_iface_ag.Heartbeat_Telemetry()
self._holo_telem = telemetry_iface_ag.Hologram_Telemetry()
self._framesource_telem = telemetry_iface_ag.Framesource_Telemetry()
self._datalogger_telem = telemetry_iface_ag.Datalogger_Telemetry()
self._guiserver_telem = telemetry_iface_ag.Guiserver_Telemetry()
self._fouriermask_telem = telemetry_iface_ag.Fouriermask_Telemetry()
### Create heartbeat thread
### Start the HBeat thread
def _process_initdone(self, data):
"""
Process the "init_done" messages sent by the other components
"""
if data.get_errorcode() == 0:
self._module_init_count += 1
if self._module_init_count >= NUMBER_OF_COMPONENTS:
self._events['controller']['start'].set()
if self._cmd_server is None:
control_q = self._inq['controller_inq']
validate_func = self._cmd_dict.validate_command
hostname = self._meta.cmd_hostname
self._cmd_server = DhmCmdServer(q=control_q,
validate_func=validate_func,
hostname=hostname,
port=self._meta.cmd_port,
)
self._cmd_server.start()
print('Controller: Starting command server...')
def _process_component_messages(self, data):
"""
Process the messages received by the component
"""
if isinstance(data, Iface.InitDonePkt):
self._process_initdone(data)
### Process Metadata Packets by converting them to telemetry
elif isinstance(data, Iface.MetadataPacket):
self._process_meta(data)
elif isinstance(data, Iface.Command):
print('CONTROLLER: %f: Got command!'%(time.time()))
### Dispatch the command to the responsible module
self._command_dispatcher(data)
### Process image (from camera streamer)
elif isinstance(data, Iface.Image):
pass
elif isinstance(data, Iface.ReconstructorProduct):
print("CONTROLLER: %f: Got reconstructor data!"%(time.time()))
else:
print('Controller: Unknown data type')
def run(self):
"""
Component execution loop
"""
try:
self._init_for_component_execute()
self.create_heartbeat()
self.start_heartbeat()
print('[%s] Consumer thread started'%(self._id))
while True:
data = self._inq['controller_inq'].get()
if data is None:
print('Exiting Controller')
break
self._process_component_messages(data)
## End of While
self.end_component()
except Exception as err:
self.handle_component_exception(err)
finally:
pass
def end_component(self):
"""
End execution of component
"""
self.terminate_heartbeat()
if self._hbeat.isAlive():
self._hbeat.join(timeout=5)
print('[%s]: End'%(self._id))
def handle_component_exception(self, err):
"""
Send Heartbeat error and raise the error
"""
print('[%s] Exception caught: %s'%(self._id, repr(err)))
exc_type, exc_obj, t_b = sys.exc_info()
lineno = t_b.tb_lineno
print('{} EXCEPTION IN (LINE {}): {}'.format(self._id, lineno, exc_obj))
self._hbeat.set_update(err)
if self._hbeat.isAlive():
self._hbeat.join(timeout=5)
raise err
|
"""
There is another implementation of this using stack please look,
in stack folder
"""
def rightmost_greater(arr, left, right):
if left <= right:
if arr[left] > arr[right]:
rightmost_greater(arr, left, right-1)
else:
lst.append(arr[right])
rightmost_greater(arr, left+1, n-1)
return lst
arr = [16, 17, 4, 3, 5, 2]
lst = []
n = len(arr)
print(rightmost_greater(arr, 0, n-1))
|
''' data_handler.py '''
import os
from sorter.lib.db import DB
from sorter.lib.book_utils import get_by_id, get_by_isbn
from sorter.lib.parse_xml import parse_isbn13_response, parse_id_response
def store_data(books, db_file):
'''
Store the book data in the provided database
'''
database = DB(db_file)
database.create_connection()
query = '''INSERT INTO rankings(id, isbn, isbn13, title,
image_url, publication_year, ratings_count, average_rating,
author, link) VALUES(?,?,?,?,?,?,?,?,?,?)'''
for book in books:
database.insertupdate(query, book)
database.close_connection()
def get_books(db_file):
'''
Get the previously stored books data
'''
database = DB(db_file)
database.create_connection()
books = database.query('select * from rankings')
database.close_connection()
return books
def get_books_with_missing_data(db_file):
'''
Get the previously stored books data
'''
database = DB(db_file)
database.create_connection()
books = database.query('select * from rankings where publication_year is null')
database.close_connection()
return books
def dump_data(db_file):
'''
Delete the provided data file
'''
if os.path.isfile(db_file):
os.remove(db_file)
def clean_data(db_name, defaults):
'''
Plug in missing data:
book[0] = ID
book[1] = ISBN
book[2] = ISBN13
book[3] = title
book[4] = image url
book[5] = pub year
book[6] = Total Ratings
book[7] = avg rating
book[8] = author
book[9] = link
'''
db_file = os.path.abspath(db_name)
if os.path.isfile(db_file):
books = get_books_with_missing_data(db_file)
map(update_book, books, ([db_file] * len(books)), ([defaults] * len(books)))
def update_book(book, db_file, defaults):
'''
Add the missing book data
'''
qry = None
if book[2] is not None:
xml_response = get_by_isbn(book[2], defaults)
new_book = parse_isbn13_response(xml_response)
qry = 'UPDATE rankings set publication_year = ? where isbn13 = ?'
vals = [new_book[5], book[2]]
elif book[0] is not None:
xml_response = get_by_id(book[0], defaults)
new_book = parse_id_response(xml_response)
qry = 'UPDATE rankings set publication_year = ?, isbn = ?, isbn13 = ? where id = ?'
vals = [new_book[5], new_book[1], new_book[2], book[0]]
if qry is not None:
database = DB(db_file)
database.create_connection()
database.insertupdate(qry, vals)
database.close_connection()
def manually_update_books(data, db_file):
'''
Update books based on parsed POST data
'''
database = DB(db_file)
database.create_connection()
for book in data:
if book['attr'] == 'id':
continue
qry = 'UPDATE rankings set %s = ? where id = ?' % book['attr']
vals = [book['value'], int(book['book_id'])]
database.insertupdate(qry, vals)
database.close_connection()
|
import typing
import warnings
from pathlib import Path
import numpy as np
import pandas as pd
from scipy.stats import wilcoxon
from cacp.util import to_latex
def bold_large_p_value(data: float, format_string="%.4f") -> str:
"""
Makes large p-value in Latex table bold
:param data: value
:param format_string:
:return: bolded values string
"""
if data > 0.05:
return "\\textbf{%s}" % format_string % data
return "%s" % format_string % data
def process_wilcoxon_for_metric(current_algorithm: str, metric: str, result_dir: Path) -> pd.DataFrame:
"""
Calculates the Wilcoxon signed-rank test for comparison results single metric.
:param current_algorithm: current algorithm
:param metric: comparison metric {auc, accuracy, precision, recall, f1}
:param result_dir: results directory
:return: DateFrame with wilcoxon values for metric
"""
wilcoxon_dir = result_dir.joinpath('wilcoxon')
wilcoxon_dir.mkdir(exist_ok=True, parents=True)
metric_dir = wilcoxon_dir.joinpath(metric)
metric_dir.mkdir(exist_ok=True, parents=True)
records = []
df = pd.read_csv(result_dir.joinpath('comparison.csv'))
algorithms = list(df.algorithm.unique())
algorithms.remove(current_algorithm)
current_alg_df = df[df.algorithm == current_algorithm]
for algorithm in algorithms:
a_df = df[df.algorithm == algorithm]
alg1_values = current_alg_df[metric].values
alg2_values = a_df[metric].values
diff = alg1_values - alg2_values
if np.all(diff == 0):
w, p = 'invalid-data', 1
else:
w, p = wilcoxon(alg1_values, alg2_values)
row = {
current_algorithm: current_algorithm,
'Algorithm': algorithm,
# 'w': w,
'p-value': p,
}
records.append(row)
df_r = pd.DataFrame(records)
df_r.reset_index(drop=True, inplace=True)
df_r.index += 1
df_r.to_csv(metric_dir.joinpath(f'comparison_{current_algorithm}_result.csv'), index=True)
f = metric_dir.joinpath(f'comparison_{current_algorithm}_result.tex').open('w')
f.write(
to_latex(df_r,
caption=f"Comparison of classifiers and {current_algorithm} "
f"using Wilcoxon signed-rank test for {metric}",
label=f'tab:{current_algorithm}_wilcoxon_{metric}_comparison',
)
)
return df_r
def process_wilcoxon(classifiers: typing.List[typing.Tuple[str, typing.Callable]], result_dir: Path):
"""
Calculates the Wilcoxon signed-rank test for comparison results.
:param classifiers: classifiers collection
:param result_dir: results directory
"""
for current_algorithm, _ in classifiers:
with warnings.catch_warnings():
warnings.simplefilter(action='ignore', category=UserWarning)
acc_df = process_wilcoxon_for_metric(current_algorithm, 'accuracy', result_dir).sort_values(
by=['Algorithm'])
auc_df = process_wilcoxon_for_metric(current_algorithm, 'auc', result_dir).sort_values(by=['Algorithm'])
r_df = acc_df[['Algorithm']].copy()
for c in acc_df.columns[2:]:
r_df['accuracy ' + c] = acc_df[c].values
for c in auc_df.columns[2:]:
r_df['auc ' + c] = auc_df[c].values
wilcoxon_dir = result_dir.joinpath('wilcoxon')
wilcoxon_dir.mkdir(exist_ok=True, parents=True)
r_df.reset_index(drop=True, inplace=True)
r_df.index += 1
r_df.to_csv(wilcoxon_dir.joinpath(f'comparison_{current_algorithm}.csv'), index=True)
for col in ['accuracy p-value', 'auc p-value']:
r_df[col] = r_df[col].apply(lambda data: bold_large_p_value(data))
wilcoxon_dir.joinpath(f'comparison_{current_algorithm}.tex').open('w').write(
to_latex(r_df,
caption=f"Comparison of classifiers and {current_algorithm} using Wilcoxon signed-rank test",
label='tab:wilcoxon_comparison',
))
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import textwrap
import pretend
import pytest
from .utils import (
load_nist_vectors, load_vectors_from_file, load_cryptrec_vectors,
load_hash_vectors, check_for_iface, check_backend_support,
select_backends, load_pkcs1_vectors
)
class FakeInterface(object):
pass
def test_select_one_backend():
b1 = pretend.stub(name="b1")
b2 = pretend.stub(name="b2")
b3 = pretend.stub(name="b3")
backends = [b1, b2, b3]
name = "b2"
selected_backends = select_backends(name, backends)
assert len(selected_backends) == 1
assert selected_backends[0] == b2
def test_select_no_backend():
b1 = pretend.stub(name="b1")
b2 = pretend.stub(name="b2")
b3 = pretend.stub(name="b3")
backends = [b1, b2, b3]
name = "back!"
with pytest.raises(ValueError):
select_backends(name, backends)
def test_select_backends_none():
b1 = pretend.stub(name="b1")
b2 = pretend.stub(name="b2")
b3 = pretend.stub(name="b3")
backends = [b1, b2, b3]
name = None
selected_backends = select_backends(name, backends)
assert len(selected_backends) == 3
def test_select_two_backends():
b1 = pretend.stub(name="b1")
b2 = pretend.stub(name="b2")
b3 = pretend.stub(name="b3")
backends = [b1, b2, b3]
name = "b2 ,b1 "
selected_backends = select_backends(name, backends)
assert len(selected_backends) == 2
assert selected_backends == [b1, b2]
def test_check_for_iface():
item = pretend.stub(keywords=["fake_name"], funcargs={"backend": True})
with pytest.raises(pytest.skip.Exception) as exc_info:
check_for_iface("fake_name", FakeInterface, item)
assert exc_info.value.args[0] == "True backend does not support fake_name"
item = pretend.stub(
keywords=["fake_name"],
funcargs={"backend": FakeInterface()}
)
check_for_iface("fake_name", FakeInterface, item)
def test_check_backend_support_skip():
supported = pretend.stub(
kwargs={"only_if": lambda backend: False, "skip_message": "Nope"}
)
item = pretend.stub(keywords={"supported": supported},
funcargs={"backend": True})
with pytest.raises(pytest.skip.Exception) as exc_info:
check_backend_support(item)
assert exc_info.value.args[0] == "Nope (True)"
def test_check_backend_support_no_skip():
supported = pretend.stub(
kwargs={"only_if": lambda backend: True, "skip_message": "Nope"}
)
item = pretend.stub(keywords={"supported": supported},
funcargs={"backend": True})
assert check_backend_support(item) is None
def test_check_backend_support_no_backend():
supported = pretend.stub(
kwargs={"only_if": "notalambda", "skip_message": "Nope"}
)
item = pretend.stub(keywords={"supported": supported},
funcargs={})
with pytest.raises(ValueError):
check_backend_support(item)
def test_load_nist_vectors():
vector_data = textwrap.dedent("""
# CAVS 11.1
# Config info for aes_values
# AESVS GFSbox test data for CBC
# State : Encrypt and Decrypt
# Key Length : 128
# Generated on Fri Apr 22 15:11:33 2011
[ENCRYPT]
COUNT = 0
KEY = 00000000000000000000000000000000
IV = 00000000000000000000000000000000
PLAINTEXT = f34481ec3cc627bacd5dc3fb08f273e6
CIPHERTEXT = 0336763e966d92595a567cc9ce537f5e
COUNT = 1
KEY = 00000000000000000000000000000000
IV = 00000000000000000000000000000000
PLAINTEXT = 9798c4640bad75c7c3227db910174e72
CIPHERTEXT = a9a1631bf4996954ebc093957b234589
[DECRYPT]
COUNT = 0
KEY = 00000000000000000000000000000000
IV = 00000000000000000000000000000000
CIPHERTEXT = 0336763e966d92595a567cc9ce537f5e
PLAINTEXT = f34481ec3cc627bacd5dc3fb08f273e6
COUNT = 1
KEY = 00000000000000000000000000000000
IV = 00000000000000000000000000000000
CIPHERTEXT = a9a1631bf4996954ebc093957b234589
PLAINTEXT = 9798c4640bad75c7c3227db910174e72
""").splitlines()
assert load_nist_vectors(vector_data) == [
{
"key": b"00000000000000000000000000000000",
"iv": b"00000000000000000000000000000000",
"plaintext": b"f34481ec3cc627bacd5dc3fb08f273e6",
"ciphertext": b"0336763e966d92595a567cc9ce537f5e",
},
{
"key": b"00000000000000000000000000000000",
"iv": b"00000000000000000000000000000000",
"plaintext": b"9798c4640bad75c7c3227db910174e72",
"ciphertext": b"a9a1631bf4996954ebc093957b234589",
},
{
"key": b"00000000000000000000000000000000",
"iv": b"00000000000000000000000000000000",
"plaintext": b"f34481ec3cc627bacd5dc3fb08f273e6",
"ciphertext": b"0336763e966d92595a567cc9ce537f5e",
},
{
"key": b"00000000000000000000000000000000",
"iv": b"00000000000000000000000000000000",
"plaintext": b"9798c4640bad75c7c3227db910174e72",
"ciphertext": b"a9a1631bf4996954ebc093957b234589",
},
]
def test_load_nist_vectors_with_null_chars():
vector_data = textwrap.dedent("""
COUNT = 0
KEY = thing\\0withnulls
COUNT = 1
KEY = 00000000000000000000000000000000
""").splitlines()
assert load_nist_vectors(vector_data) == [
{
"key": b"thing\x00withnulls",
},
{
"key": b"00000000000000000000000000000000",
},
]
def test_load_cryptrec_vectors():
vector_data = textwrap.dedent("""
# Vectors taken from http://info.isl.ntt.co.jp/crypt/eng/camellia/
# Download is t_camelia.txt
# Camellia with 128-bit key
K No.001 : 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
P No.001 : 80 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
C No.001 : 07 92 3A 39 EB 0A 81 7D 1C 4D 87 BD B8 2D 1F 1C
P No.002 : 40 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
C No.002 : 48 CD 64 19 80 96 72 D2 34 92 60 D8 9A 08 D3 D3
K No.002 : 10 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
P No.001 : 80 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
C No.001 : 07 92 3A 39 EB 0A 81 7D 1C 4D 87 BD B8 2D 1F 1C
""").splitlines()
assert load_cryptrec_vectors(vector_data) == [
{
"key": b"00000000000000000000000000000000",
"plaintext": b"80000000000000000000000000000000",
"ciphertext": b"07923A39EB0A817D1C4D87BDB82D1F1C",
},
{
"key": b"00000000000000000000000000000000",
"plaintext": b"40000000000000000000000000000000",
"ciphertext": b"48CD6419809672D2349260D89A08D3D3",
},
{
"key": b"10000000000000000000000000000000",
"plaintext": b"80000000000000000000000000000000",
"ciphertext": b"07923A39EB0A817D1C4D87BDB82D1F1C",
},
]
def test_load_cryptrec_vectors_invalid():
vector_data = textwrap.dedent("""
# Vectors taken from http://info.isl.ntt.co.jp/crypt/eng/camellia/
# Download is t_camelia.txt
# Camellia with 128-bit key
E No.001 : 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
""").splitlines()
with pytest.raises(ValueError):
load_cryptrec_vectors(vector_data)
def test_load_hash_vectors():
vector_data = textwrap.dedent("""
# http://tools.ietf.org/html/rfc1321
[irrelevant]
Len = 0
Msg = 00
MD = d41d8cd98f00b204e9800998ecf8427e
Len = 8
Msg = 61
MD = 0cc175b9c0f1b6a831c399e269772661
Len = 24
Msg = 616263
MD = 900150983cd24fb0d6963f7d28e17f72
Len = 112
Msg = 6d65737361676520646967657374
MD = f96b697d7cb7938d525a2f31aaf161d0
""").splitlines()
assert load_hash_vectors(vector_data) == [
(b"", "d41d8cd98f00b204e9800998ecf8427e"),
(b"61", "0cc175b9c0f1b6a831c399e269772661"),
(b"616263", "900150983cd24fb0d6963f7d28e17f72"),
(b"6d65737361676520646967657374", "f96b697d7cb7938d525a2f31aaf161d0"),
]
def test_load_hmac_vectors():
vector_data = textwrap.dedent("""
Len = 224
# "Jefe"
Key = 4a656665
# "what do ya want for nothing?"
Msg = 7768617420646f2079612077616e7420666f72206e6f7468696e673f
MD = 750c783e6ab0b503eaa86e310a5db738
""").splitlines()
assert load_hash_vectors(vector_data) == [
(b"7768617420646f2079612077616e7420666f72206e6f7468696e673f",
"750c783e6ab0b503eaa86e310a5db738",
b"4a656665"),
]
def test_load_hash_vectors_bad_data():
vector_data = textwrap.dedent("""
# http://tools.ietf.org/html/rfc1321
Len = 0
Msg = 00
UNKNOWN=Hello World
""").splitlines()
with pytest.raises(ValueError):
load_hash_vectors(vector_data)
def test_load_vectors_from_file():
vectors = load_vectors_from_file(
os.path.join("ciphers", "Blowfish", "bf-cfb.txt"),
load_nist_vectors,
)
assert vectors == [
{
"key": b"0123456789ABCDEFF0E1D2C3B4A59687",
"iv": b"FEDCBA9876543210",
"plaintext": (
b"37363534333231204E6F77206973207468652074696D6520666F722000"
),
"ciphertext": (
b"E73214A2822139CAF26ECF6D2EB9E76E3DA3DE04D1517200519D57A6C3"
),
}
]
def test_load_nist_gcm_vectors():
vector_data = textwrap.dedent("""
[Keylen = 128]
[IVlen = 96]
[PTlen = 0]
[AADlen = 0]
[Taglen = 128]
Count = 0
Key = 11754cd72aec309bf52f7687212e8957
IV = 3c819d9a9bed087615030b65
PT =
AAD =
CT =
Tag = 250327c674aaf477aef2675748cf6971
Count = 1
Key = 272f16edb81a7abbea887357a58c1917
IV = 794ec588176c703d3d2a7a07
PT =
AAD =
CT =
Tag = b6e6f197168f5049aeda32dafbdaeb
Count = 2
Key = a49a5e26a2f8cb63d05546c2a62f5343
IV = 907763b19b9b4ab6bd4f0281
CT =
AAD =
Tag = a2be08210d8c470a8df6e8fbd79ec5cf
FAIL
Count = 3
Key = 5c1155084cc0ede76b3bc22e9f7574ef
IV = 9549e4ba69a61cad7856efc1
PT = d1448fa852b84408e2dad8381f363de7
AAD = e98e9d9c618e46fef32660976f854ee3
CT = f78b60ca125218493bea1c50a2e12ef4
Tag = d72da7f5c6cf0bca7242c71835809449
[Keylen = 128]
[IVlen = 96]
[PTlen = 0]
[AADlen = 0]
[Taglen = 120]
Count = 0
Key = eac258e99c55e6ae8ef1da26640613d7
IV = 4e8df20faaf2c8eebe922902
CT =
AAD =
Tag = e39aeaebe86aa309a4d062d6274339
PT =
Count = 1
Key = 3726cf02fcc6b8639a5497652c94350d
IV = 55fef82cde693ce76efcc193
CT =
AAD =
Tag = 3d68111a81ed22d2ef5bccac4fc27f
FAIL
Count = 2
Key = f202299d5fd74f03b12d2119a6c4c038
IV = eec51e7958c3f20a1bb71815
CT =
AAD =
Tag = a81886b3fb26e51fca87b267e1e157
FAIL
Count = 3
Key = fd52925f39546b4c55ffb6b20c59898c
IV = f5cf3227444afd905a5f6dba
CT =
AAD =
Tag = 1665b0f1a0b456e1664cfd3de08ccd
PT =
[Keylen = 128]
[IVlen = 8]
[PTlen = 104]
[AADlen = 0]
[Taglen = 128]
Count = 0
Key = 58fab7632bcf10d2bcee58520bf37414
IV = 3c
CT = 15c4db4cbb451211179d57017f
AAD =
Tag = eae841d4355feeb3f786bc86625f1e5b
FAIL
""").splitlines()
assert load_nist_vectors(vector_data) == [
{'aad': b'',
'pt': b'',
'iv': b'3c819d9a9bed087615030b65',
'tag': b'250327c674aaf477aef2675748cf6971',
'key': b'11754cd72aec309bf52f7687212e8957',
'ct': b''},
{'aad': b'',
'pt': b'',
'iv': b'794ec588176c703d3d2a7a07',
'tag': b'b6e6f197168f5049aeda32dafbdaeb',
'key': b'272f16edb81a7abbea887357a58c1917',
'ct': b''},
{'aad': b'',
'iv': b'907763b19b9b4ab6bd4f0281',
'tag': b'a2be08210d8c470a8df6e8fbd79ec5cf',
'key': b'a49a5e26a2f8cb63d05546c2a62f5343',
'ct': b'',
'fail': True},
{'aad': b'e98e9d9c618e46fef32660976f854ee3',
'pt': b'd1448fa852b84408e2dad8381f363de7',
'iv': b'9549e4ba69a61cad7856efc1',
'tag': b'd72da7f5c6cf0bca7242c71835809449',
'key': b'5c1155084cc0ede76b3bc22e9f7574ef',
'ct': b'f78b60ca125218493bea1c50a2e12ef4'},
{'aad': b'',
'pt': b'',
'iv': b'4e8df20faaf2c8eebe922902',
'tag': b'e39aeaebe86aa309a4d062d6274339',
'key': b'eac258e99c55e6ae8ef1da26640613d7',
'ct': b''},
{'aad': b'',
'iv': b'55fef82cde693ce76efcc193',
'tag': b'3d68111a81ed22d2ef5bccac4fc27f',
'key': b'3726cf02fcc6b8639a5497652c94350d',
'ct': b'',
'fail': True},
{'aad': b'',
'iv': b'eec51e7958c3f20a1bb71815',
'tag': b'a81886b3fb26e51fca87b267e1e157',
'key': b'f202299d5fd74f03b12d2119a6c4c038',
'ct': b'',
'fail': True},
{'aad': b'',
'pt': b'',
'iv': b'f5cf3227444afd905a5f6dba',
'tag': b'1665b0f1a0b456e1664cfd3de08ccd',
'key': b'fd52925f39546b4c55ffb6b20c59898c',
'ct': b''},
{'aad': b'',
'iv': b'3c',
'tag': b'eae841d4355feeb3f786bc86625f1e5b',
'key': b'58fab7632bcf10d2bcee58520bf37414',
'ct': b'15c4db4cbb451211179d57017f',
'fail': True},
]
def test_load_pkcs1_vectors():
vector_data = textwrap.dedent("""
Test vectors for RSA-PSS
========================
This file contains an extract of the original pss-vect.txt
Key lengths:
Key 8: 1031 bits
Key 9: 1536 bits
===========================================================================
<snip>
# Example 8: A 1031-bit RSA key pair
# -----------------------------------
# Public key
# ----------
# Modulus:
49 53 70 a1 fb 18 54 3c 16 d3 63 1e 31 63 25 5d
f6 2b e6 ee e8 90 d5 f2 55 09 e4 f7 78 a8 ea 6f
bb bc df 85 df f6 4e 0d 97 20 03 ab 36 81 fb ba
6d d4 1f d5 41 82 9b 2e 58 2d e9 f2 a4 a4 e0 a2
d0 90 0b ef 47 53 db 3c ee 0e e0 6c 7d fa e8 b1
d5 3b 59 53 21 8f 9c ce ea 69 5b 08 66 8e de aa
dc ed 94 63 b1 d7 90 d5 eb f2 7e 91 15 b4 6c ad
4d 9a 2b 8e fa b0 56 1b 08 10 34 47 39 ad a0 73
3f
# Exponent:
01 00 01
# Private key
# -----------
# Modulus:
49 53 70 a1 fb 18 54 3c 16 d3 63 1e 31 63 25 5d
f6 2b e6 ee e8 90 d5 f2 55 09 e4 f7 78 a8 ea 6f
bb bc df 85 df f6 4e 0d 97 20 03 ab 36 81 fb ba
6d d4 1f d5 41 82 9b 2e 58 2d e9 f2 a4 a4 e0 a2
d0 90 0b ef 47 53 db 3c ee 0e e0 6c 7d fa e8 b1
d5 3b 59 53 21 8f 9c ce ea 69 5b 08 66 8e de aa
dc ed 94 63 b1 d7 90 d5 eb f2 7e 91 15 b4 6c ad
4d 9a 2b 8e fa b0 56 1b 08 10 34 47 39 ad a0 73
3f
# Public exponent:
01 00 01
# Exponent:
6c 66 ff e9 89 80 c3 8f cd ea b5 15 98 98 83 61
65 f4 b4 b8 17 c4 f6 a8 d4 86 ee 4e a9 13 0f e9
b9 09 2b d1 36 d1 84 f9 5f 50 4a 60 7e ac 56 58
46 d2 fd d6 59 7a 89 67 c7 39 6e f9 5a 6e ee bb
45 78 a6 43 96 6d ca 4d 8e e3 de 84 2d e6 32 79
c6 18 15 9c 1a b5 4a 89 43 7b 6a 61 20 e4 93 0a
fb 52 a4 ba 6c ed 8a 49 47 ac 64 b3 0a 34 97 cb
e7 01 c2 d6 26 6d 51 72 19 ad 0e c6 d3 47 db e9
# Prime 1:
08 da d7 f1 13 63 fa a6 23 d5 d6 d5 e8 a3 19 32
8d 82 19 0d 71 27 d2 84 6c 43 9b 0a b7 26 19 b0
a4 3a 95 32 0e 4e c3 4f c3 a9 ce a8 76 42 23 05
bd 76 c5 ba 7b e9 e2 f4 10 c8 06 06 45 a1 d2 9e
db
# Prime 2:
08 47 e7 32 37 6f c7 90 0f 89 8e a8 2e b2 b0 fc
41 85 65 fd ae 62 f7 d9 ec 4c e2 21 7b 97 99 0d
d2 72 db 15 7f 99 f6 3c 0d cb b9 fb ac db d4 c4
da db 6d f6 77 56 35 8c a4 17 48 25 b4 8f 49 70
6d
# Prime exponent 1:
05 c2 a8 3c 12 4b 36 21 a2 aa 57 ea 2c 3e fe 03
5e ff 45 60 f3 3d de bb 7a da b8 1f ce 69 a0 c8
c2 ed c1 65 20 dd a8 3d 59 a2 3b e8 67 96 3a c6
5f 2c c7 10 bb cf b9 6e e1 03 de b7 71 d1 05 fd
85
# Prime exponent 2:
04 ca e8 aa 0d 9f aa 16 5c 87 b6 82 ec 14 0b 8e
d3 b5 0b 24 59 4b 7a 3b 2c 22 0b 36 69 bb 81 9f
98 4f 55 31 0a 1a e7 82 36 51 d4 a0 2e 99 44 79
72 59 51 39 36 34 34 e5 e3 0a 7e 7d 24 15 51 e1
b9
# Coefficient:
07 d3 e4 7b f6 86 60 0b 11 ac 28 3c e8 8d bb 3f
60 51 e8 ef d0 46 80 e4 4c 17 1e f5 31 b8 0b 2b
7c 39 fc 76 63 20 e2 cf 15 d8 d9 98 20 e9 6f f3
0d c6 96 91 83 9c 4b 40 d7 b0 6e 45 30 7d c9 1f
3f
# RSA-PSS signing of 6 random messages with random salts
# -------------------------------------------------------
# PSS Example 8.1
# -----------------
# Message to be signed:
81 33 2f 4b e6 29 48 41 5e a1 d8 99 79 2e ea cf
6c 6e 1d b1 da 8b e1 3b 5c ea 41 db 2f ed 46 70
92 e1 ff 39 89 14 c7 14 25 97 75 f5 95 f8 54 7f
73 56 92 a5 75 e6 92 3a f7 8f 22 c6 99 7d db 90
fb 6f 72 d7 bb 0d d5 74 4a 31 de cd 3d c3 68 58
49 83 6e d3 4a ec 59 63 04 ad 11 84 3c 4f 88 48
9f 20 97 35 f5 fb 7f da f7 ce c8 ad dc 58 18 16
8f 88 0a cb f4 90 d5 10 05 b7 a8 e8 4e 43 e5 42
87 97 75 71 dd 99 ee a4 b1 61 eb 2d f1 f5 10 8f
12 a4 14 2a 83 32 2e db 05 a7 54 87 a3 43 5c 9a
78 ce 53 ed 93 bc 55 08 57 d7 a9 fb
# Salt:
1d 65 49 1d 79 c8 64 b3 73 00 9b e6 f6 f2 46 7b
ac 4c 78 fa
# Signature:
02 62 ac 25 4b fa 77 f3 c1 ac a2 2c 51 79 f8 f0
40 42 2b 3c 5b af d4 0a 8f 21 cf 0f a5 a6 67 cc
d5 99 3d 42 db af b4 09 c5 20 e2 5f ce 2b 1e e1
e7 16 57 7f 1e fa 17 f3 da 28 05 2f 40 f0 41 9b
23 10 6d 78 45 aa f0 11 25 b6 98 e7 a4 df e9 2d
39 67 bb 00 c4 d0 d3 5b a3 55 2a b9 a8 b3 ee f0
7c 7f ec db c5 42 4a c4 db 1e 20 cb 37 d0 b2 74
47 69 94 0e a9 07 e1 7f bb ca 67 3b 20 52 23 80
c5
# PSS Example 8.2
# -----------------
# Message to be signed:
e2 f9 6e af 0e 05 e7 ba 32 6e cc a0 ba 7f d2 f7
c0 23 56 f3 ce de 9d 0f aa bf 4f cc 8e 60 a9 73
e5 59 5f d9 ea 08
# Salt:
43 5c 09 8a a9 90 9e b2 37 7f 12 48 b0 91 b6 89
87 ff 18 38
# Signature:
27 07 b9 ad 51 15 c5 8c 94 e9 32 e8 ec 0a 28 0f
56 33 9e 44 a1 b5 8d 4d dc ff 2f 31 2e 5f 34 dc
fe 39 e8 9c 6a 94 dc ee 86 db bd ae 5b 79 ba 4e
08 19 a9 e7 bf d9 d9 82 e7 ee 6c 86 ee 68 39 6e
8b 3a 14 c9 c8 f3 4b 17 8e b7 41 f9 d3 f1 21 10
9b f5 c8 17 2f ad a2 e7 68 f9 ea 14 33 03 2c 00
4a 8a a0 7e b9 90 00 0a 48 dc 94 c8 ba c8 aa be
2b 09 b1 aa 46 c0 a2 aa 0e 12 f6 3f bb a7 75 ba
7e
# <snip>
# =============================================
# Example 9: A 1536-bit RSA key pair
# -----------------------------------
# Public key
# ----------
# Modulus:
e6 bd 69 2a c9 66 45 79 04 03 fd d0 f5 be b8 b9
bf 92 ed 10 00 7f c3 65 04 64 19 dd 06 c0 5c 5b
5b 2f 48 ec f9 89 e4 ce 26 91 09 97 9c bb 40 b4
a0 ad 24 d2 24 83 d1 ee 31 5a d4 cc b1 53 42 68
35 26 91 c5 24 f6 dd 8e 6c 29 d2 24 cf 24 69 73
ae c8 6c 5b f6 b1 40 1a 85 0d 1b 9a d1 bb 8c bc
ec 47 b0 6f 0f 8c 7f 45 d3 fc 8f 31 92 99 c5 43
3d db c2 b3 05 3b 47 de d2 ec d4 a4 ca ef d6 14
83 3d c8 bb 62 2f 31 7e d0 76 b8 05 7f e8 de 3f
84 48 0a d5 e8 3e 4a 61 90 4a 4f 24 8f b3 97 02
73 57 e1 d3 0e 46 31 39 81 5c 6f d4 fd 5a c5 b8
17 2a 45 23 0e cb 63 18 a0 4f 14 55 d8 4e 5a 8b
# Exponent:
01 00 01
# Private key
# -----------
# Modulus:
e6 bd 69 2a c9 66 45 79 04 03 fd d0 f5 be b8 b9
bf 92 ed 10 00 7f c3 65 04 64 19 dd 06 c0 5c 5b
5b 2f 48 ec f9 89 e4 ce 26 91 09 97 9c bb 40 b4
a0 ad 24 d2 24 83 d1 ee 31 5a d4 cc b1 53 42 68
35 26 91 c5 24 f6 dd 8e 6c 29 d2 24 cf 24 69 73
ae c8 6c 5b f6 b1 40 1a 85 0d 1b 9a d1 bb 8c bc
ec 47 b0 6f 0f 8c 7f 45 d3 fc 8f 31 92 99 c5 43
3d db c2 b3 05 3b 47 de d2 ec d4 a4 ca ef d6 14
83 3d c8 bb 62 2f 31 7e d0 76 b8 05 7f e8 de 3f
84 48 0a d5 e8 3e 4a 61 90 4a 4f 24 8f b3 97 02
73 57 e1 d3 0e 46 31 39 81 5c 6f d4 fd 5a c5 b8
17 2a 45 23 0e cb 63 18 a0 4f 14 55 d8 4e 5a 8b
# Public exponent:
01 00 01
# Exponent:
6a 7f d8 4f b8 5f ad 07 3b 34 40 6d b7 4f 8d 61
a6 ab c1 21 96 a9 61 dd 79 56 5e 9d a6 e5 18 7b
ce 2d 98 02 50 f7 35 95 75 35 92 70 d9 15 90 bb
0e 42 7c 71 46 0b 55 d5 14 10 b1 91 bc f3 09 fe
a1 31 a9 2c 8e 70 27 38 fa 71 9f 1e 00 41 f5 2e
40 e9 1f 22 9f 4d 96 a1 e6 f1 72 e1 55 96 b4 51
0a 6d ae c2 61 05 f2 be bc 53 31 6b 87 bd f2 13
11 66 60 70 e8 df ee 69 d5 2c 71 a9 76 ca ae 79
c7 2b 68 d2 85 80 dc 68 6d 9f 51 29 d2 25 f8 2b
3d 61 55 13 a8 82 b3 db 91 41 6b 48 ce 08 88 82
13 e3 7e eb 9a f8 00 d8 1c ab 32 8c e4 20 68 99
03 c0 0c 7b 5f d3 1b 75 50 3a 6d 41 96 84 d6 29
# Prime 1:
f8 eb 97 e9 8d f1 26 64 ee fd b7 61 59 6a 69 dd
cd 0e 76 da ec e6 ed 4b f5 a1 b5 0a c0 86 f7 92
8a 4d 2f 87 26 a7 7e 51 5b 74 da 41 98 8f 22 0b
1c c8 7a a1 fc 81 0c e9 9a 82 f2 d1 ce 82 1e dc
ed 79 4c 69 41 f4 2c 7a 1a 0b 8c 4d 28 c7 5e c6
0b 65 22 79 f6 15 4a 76 2a ed 16 5d 47 de e3 67
# Prime 2:
ed 4d 71 d0 a6 e2 4b 93 c2 e5 f6 b4 bb e0 5f 5f
b0 af a0 42 d2 04 fe 33 78 d3 65 c2 f2 88 b6 a8
da d7 ef e4 5d 15 3e ef 40 ca cc 7b 81 ff 93 40
02 d1 08 99 4b 94 a5 e4 72 8c d9 c9 63 37 5a e4
99 65 bd a5 5c bf 0e fe d8 d6 55 3b 40 27 f2 d8
62 08 a6 e6 b4 89 c1 76 12 80 92 d6 29 e4 9d 3d
# Prime exponent 1:
2b b6 8b dd fb 0c 4f 56 c8 55 8b ff af 89 2d 80
43 03 78 41 e7 fa 81 cf a6 1a 38 c5 e3 9b 90 1c
8e e7 11 22 a5 da 22 27 bd 6c de eb 48 14 52 c1
2a d3 d6 1d 5e 4f 77 6a 0a b5 56 59 1b ef e3 e5
9e 5a 7f dd b8 34 5e 1f 2f 35 b9 f4 ce e5 7c 32
41 4c 08 6a ec 99 3e 93 53 e4 80 d9 ee c6 28 9f
# Prime exponent 2:
4f f8 97 70 9f ad 07 97 46 49 45 78 e7 0f d8 54
61 30 ee ab 56 27 c4 9b 08 0f 05 ee 4a d9 f3 e4
b7 cb a9 d6 a5 df f1 13 a4 1c 34 09 33 68 33 f1
90 81 6d 8a 6b c4 2e 9b ec 56 b7 56 7d 0f 3c 9c
69 6d b6 19 b2 45 d9 01 dd 85 6d b7 c8 09 2e 77
e9 a1 cc cd 56 ee 4d ba 42 c5 fd b6 1a ec 26 69
# Coefficient:
77 b9 d1 13 7b 50 40 4a 98 27 29 31 6e fa fc 7d
fe 66 d3 4e 5a 18 26 00 d5 f3 0a 0a 85 12 05 1c
56 0d 08 1d 4d 0a 18 35 ec 3d 25 a6 0f 4e 4d 6a
a9 48 b2 bf 3d bb 5b 12 4c bb c3 48 92 55 a3 a9
48 37 2f 69 78 49 67 45 f9 43 e1 db 4f 18 38 2c
ea a5 05 df c6 57 57 bb 3f 85 7a 58 dc e5 21 56
# PKCS#1 v1.5 Signature Example 2.17
# -----------------
# Message to be signed:
06 ad d7 5a b6 89 de 06 77 44 e6 9a 2e bd 4b 90
fa 93 83 00 3c d0 5f f5 36 cb f2 94 cd 21 5f 09
23 b7 fc 90 04 f0 aa 18 52 71 a1 d0 06 1f d0 e9
77 7a d1 ec 0c 71 59 1f 57 8b f7 b8 e5 a1
# Signature:
45 14 21 0e 54 1d 5b ad 7d d6 0a e5 49 b9 43 ac
c4 4f 21 39 0d f5 b6 13 18 45 5a 17 61 0d f5 b7
4d 84 ae d2 32 f1 7e 59 d9 1d d2 65 99 22 f8 12
db d4 96 81 69 03 84 b9 54 e9 ad fb 9b 1a 96 8c
0c bf f7 63 ec ee d6 27 50 c5 91 64 b5 e0 80 a8
fe f3 d5 5b fe 2a cf ad 27 52 a6 a8 45 9f a1 fa
b4 9a d3 78 c6 96 4b 23 ee 97 fd 10 34 61 0c 5c
c1 4c 61 e0 eb fb 17 11 f8 ad e9 6f e6 55 7b 38
# <snip>
# =============================================
# <snip>
""").splitlines()
vectors = tuple(load_pkcs1_vectors(vector_data))
expected = (
(
{
'modulus': int(
'495370a1fb18543c16d3631e3163255df62be6eee890d5f25509e4f77'
'8a8ea6fbbbcdf85dff64e0d972003ab3681fbba6dd41fd541829b2e58'
'2de9f2a4a4e0a2d0900bef4753db3cee0ee06c7dfae8b1d53b5953218'
'f9cceea695b08668edeaadced9463b1d790d5ebf27e9115b46cad4d9a'
'2b8efab0561b0810344739ada0733f', 16),
'public_exponent': int('10001', 16),
'private_exponent': int(
'6c66ffe98980c38fcdeab5159898836165f4b4b817c4f6a8d486ee4ea'
'9130fe9b9092bd136d184f95f504a607eac565846d2fdd6597a8967c7'
'396ef95a6eeebb4578a643966dca4d8ee3de842de63279c618159c1ab'
'54a89437b6a6120e4930afb52a4ba6ced8a4947ac64b30a3497cbe701'
'c2d6266d517219ad0ec6d347dbe9', 16),
'p': int(
'8dad7f11363faa623d5d6d5e8a319328d82190d7127d2846c439b0ab7'
'2619b0a43a95320e4ec34fc3a9cea876422305bd76c5ba7be9e2f410c'
'8060645a1d29edb', 16),
'q': int(
'847e732376fc7900f898ea82eb2b0fc418565fdae62f7d9ec4ce2217b'
'97990dd272db157f99f63c0dcbb9fbacdbd4c4dadb6df67756358ca41'
'74825b48f49706d', 16),
'dmp1': int(
'05c2a83c124b3621a2aa57ea2c3efe035eff4560f33ddebb7adab81fc'
'e69a0c8c2edc16520dda83d59a23be867963ac65f2cc710bbcfb96ee1'
'03deb771d105fd85', 16),
'dmq1': int(
'04cae8aa0d9faa165c87b682ec140b8ed3b50b24594b7a3b2c220b366'
'9bb819f984f55310a1ae7823651d4a02e99447972595139363434e5e3'
'0a7e7d241551e1b9', 16),
'iqmp': int(
'07d3e47bf686600b11ac283ce88dbb3f6051e8efd04680e44c171ef53'
'1b80b2b7c39fc766320e2cf15d8d99820e96ff30dc69691839c4b40d7'
'b06e45307dc91f3f', 16),
'examples': [
{
'message': b'81332f4be62948415ea1d899792eeacf6c6e1db1d'
b'a8be13b5cea41db2fed467092e1ff398914c71425'
b'9775f595f8547f735692a575e6923af78f22c6997'
b'ddb90fb6f72d7bb0dd5744a31decd3dc368584983'
b'6ed34aec596304ad11843c4f88489f209735f5fb7'
b'fdaf7cec8addc5818168f880acbf490d51005b7a8'
b'e84e43e54287977571dd99eea4b161eb2df1f5108'
b'f12a4142a83322edb05a75487a3435c9a78ce53ed'
b'93bc550857d7a9fb',
'salt': b'1d65491d79c864b373009be6f6f2467bac4c78fa',
'signature': b'0262ac254bfa77f3c1aca22c5179f8f040422b3'
b'c5bafd40a8f21cf0fa5a667ccd5993d42dbafb4'
b'09c520e25fce2b1ee1e716577f1efa17f3da280'
b'52f40f0419b23106d7845aaf01125b698e7a4df'
b'e92d3967bb00c4d0d35ba3552ab9a8b3eef07c7'
b'fecdbc5424ac4db1e20cb37d0b2744769940ea9'
b'07e17fbbca673b20522380c5'
}, {
'message': b'e2f96eaf0e05e7ba326ecca0ba7fd2f7c02356f3c'
b'ede9d0faabf4fcc8e60a973e5595fd9ea08',
'salt': b'435c098aa9909eb2377f1248b091b68987ff1838',
'signature': b'2707b9ad5115c58c94e932e8ec0a280f56339e4'
b'4a1b58d4ddcff2f312e5f34dcfe39e89c6a94dc'
b'ee86dbbdae5b79ba4e0819a9e7bfd9d982e7ee6'
b'c86ee68396e8b3a14c9c8f34b178eb741f9d3f1'
b'21109bf5c8172fada2e768f9ea1433032c004a8'
b'aa07eb990000a48dc94c8bac8aabe2b09b1aa46'
b'c0a2aa0e12f63fbba775ba7e'
}
]
},
{
'modulus': int(
'495370a1fb18543c16d3631e3163255df62be6eee890d5f25509e4f77'
'8a8ea6fbbbcdf85dff64e0d972003ab3681fbba6dd41fd541829b2e58'
'2de9f2a4a4e0a2d0900bef4753db3cee0ee06c7dfae8b1d53b5953218'
'f9cceea695b08668edeaadced9463b1d790d5ebf27e9115b46cad4d9a'
'2b8efab0561b0810344739ada0733f', 16),
'public_exponent': int('10001', 16)
}
),
(
{
'modulus': int(
'e6bd692ac96645790403fdd0f5beb8b9bf92ed10007fc365046419dd0'
'6c05c5b5b2f48ecf989e4ce269109979cbb40b4a0ad24d22483d1ee31'
'5ad4ccb1534268352691c524f6dd8e6c29d224cf246973aec86c5bf6b'
'1401a850d1b9ad1bb8cbcec47b06f0f8c7f45d3fc8f319299c5433ddb'
'c2b3053b47ded2ecd4a4caefd614833dc8bb622f317ed076b8057fe8d'
'e3f84480ad5e83e4a61904a4f248fb397027357e1d30e463139815c6f'
'd4fd5ac5b8172a45230ecb6318a04f1455d84e5a8b', 16),
'public_exponent': int('10001', 16),
'private_exponent': int(
'6a7fd84fb85fad073b34406db74f8d61a6abc12196a961dd79565e9da'
'6e5187bce2d980250f7359575359270d91590bb0e427c71460b55d514'
'10b191bcf309fea131a92c8e702738fa719f1e0041f52e40e91f229f4'
'd96a1e6f172e15596b4510a6daec26105f2bebc53316b87bdf2131166'
'6070e8dfee69d52c71a976caae79c72b68d28580dc686d9f5129d225f'
'82b3d615513a882b3db91416b48ce08888213e37eeb9af800d81cab32'
'8ce420689903c00c7b5fd31b75503a6d419684d629', 16),
'p': int(
'f8eb97e98df12664eefdb761596a69ddcd0e76daece6ed4bf5a1b50ac'
'086f7928a4d2f8726a77e515b74da41988f220b1cc87aa1fc810ce99a'
'82f2d1ce821edced794c6941f42c7a1a0b8c4d28c75ec60b652279f61'
'54a762aed165d47dee367', 16),
'q': int(
'ed4d71d0a6e24b93c2e5f6b4bbe05f5fb0afa042d204fe3378d365c2f'
'288b6a8dad7efe45d153eef40cacc7b81ff934002d108994b94a5e472'
'8cd9c963375ae49965bda55cbf0efed8d6553b4027f2d86208a6e6b48'
'9c176128092d629e49d3d', 16),
'dmp1': int(
'2bb68bddfb0c4f56c8558bffaf892d8043037841e7fa81cfa61a38c5e'
'39b901c8ee71122a5da2227bd6cdeeb481452c12ad3d61d5e4f776a0a'
'b556591befe3e59e5a7fddb8345e1f2f35b9f4cee57c32414c086aec9'
'93e9353e480d9eec6289f', 16),
'dmq1': int(
'4ff897709fad079746494578e70fd8546130eeab5627c49b080f05ee4'
'ad9f3e4b7cba9d6a5dff113a41c3409336833f190816d8a6bc42e9bec'
'56b7567d0f3c9c696db619b245d901dd856db7c8092e77e9a1cccd56e'
'e4dba42c5fdb61aec2669', 16),
'iqmp': int(
'77b9d1137b50404a982729316efafc7dfe66d34e5a182600d5f30a0a8'
'512051c560d081d4d0a1835ec3d25a60f4e4d6aa948b2bf3dbb5b124c'
'bbc3489255a3a948372f6978496745f943e1db4f18382ceaa505dfc65'
'757bb3f857a58dce52156', 16),
'examples': [
{
'message': b'06add75ab689de067744e69a2ebd4b90fa9383003'
b'cd05ff536cbf294cd215f0923b7fc9004f0aa1852'
b'71a1d0061fd0e9777ad1ec0c71591f578bf7b8e5a'
b'1',
'signature': b'4514210e541d5bad7dd60ae549b943acc44f213'
b'90df5b61318455a17610df5b74d84aed232f17e'
b'59d91dd2659922f812dbd49681690384b954e9a'
b'dfb9b1a968c0cbff763eceed62750c59164b5e0'
b'80a8fef3d55bfe2acfad2752a6a8459fa1fab49'
b'ad378c6964b23ee97fd1034610c5cc14c61e0eb'
b'fb1711f8ade96fe6557b38'
}
]
},
{
'modulus': int(
'e6bd692ac96645790403fdd0f5beb8b9bf92ed10007fc365046419dd0'
'6c05c5b5b2f48ecf989e4ce269109979cbb40b4a0ad24d22483d1ee31'
'5ad4ccb1534268352691c524f6dd8e6c29d224cf246973aec86c5bf6b'
'1401a850d1b9ad1bb8cbcec47b06f0f8c7f45d3fc8f319299c5433ddb'
'c2b3053b47ded2ecd4a4caefd614833dc8bb622f317ed076b8057fe8d'
'e3f84480ad5e83e4a61904a4f248fb397027357e1d30e463139815c6f'
'd4fd5ac5b8172a45230ecb6318a04f1455d84e5a8b', 16),
'public_exponent': int('10001', 16)
}
)
)
assert vectors == expected
def test_load_hotp_vectors():
vector_data = textwrap.dedent("""
# HOTP Test Vectors
# RFC 4226 Appendix D
COUNT = 0
COUNTER = 0
INTERMEDIATE = cc93cf18508d94934c64b65d8ba7667fb7cde4b0
TRUNCATED = 4c93cf18
HOTP = 755224
SECRET = 12345678901234567890
COUNT = 1
COUNTER = 1
INTERMEDIATE = 75a48a19d4cbe100644e8ac1397eea747a2d33ab
TRUNCATED = 41397eea
HOTP = 287082
SECRET = 12345678901234567890
COUNT = 2
COUNTER = 2
INTERMEDIATE = 0bacb7fa082fef30782211938bc1c5e70416ff44
TRUNCATED = 82fef30
HOTP = 359152
SECRET = 12345678901234567890
COUNT = 3
COUNTER = 3
INTERMEDIATE = 66c28227d03a2d5529262ff016a1e6ef76557ece
TRUNCATED = 66ef7655
HOTP = 969429
SECRET = 12345678901234567890
""").splitlines()
assert load_nist_vectors(vector_data) == [
{
"counter": b"0",
"intermediate": b"cc93cf18508d94934c64b65d8ba7667fb7cde4b0",
"truncated": b"4c93cf18",
"hotp": b"755224",
"secret": b"12345678901234567890",
},
{
"counter": b"1",
"intermediate": b"75a48a19d4cbe100644e8ac1397eea747a2d33ab",
"truncated": b"41397eea",
"hotp": b"287082",
"secret": b"12345678901234567890",
},
{
"counter": b"2",
"intermediate": b"0bacb7fa082fef30782211938bc1c5e70416ff44",
"truncated": b"82fef30",
"hotp": b"359152",
"secret": b"12345678901234567890",
},
{
"counter": b"3",
"intermediate": b"66c28227d03a2d5529262ff016a1e6ef76557ece",
"truncated": b"66ef7655",
"hotp": b"969429",
"secret": b"12345678901234567890",
},
]
def test_load_totp_vectors():
vector_data = textwrap.dedent("""
# TOTP Test Vectors
# RFC 6238 Appendix B
COUNT = 0
TIME = 59
TOTP = 94287082
MODE = SHA1
SECRET = 12345678901234567890
COUNT = 1
TIME = 59
TOTP = 46119246
MODE = SHA256
SECRET = 12345678901234567890
COUNT = 2
TIME = 59
TOTP = 90693936
MODE = SHA512
SECRET = 12345678901234567890
""").splitlines()
assert load_nist_vectors(vector_data) == [
{
"time": b"59",
"totp": b"94287082",
"mode": b"SHA1",
"secret": b"12345678901234567890",
},
{
"time": b"59",
"totp": b"46119246",
"mode": b"SHA256",
"secret": b"12345678901234567890",
},
{
"time": b"59",
"totp": b"90693936",
"mode": b"SHA512",
"secret": b"12345678901234567890",
},
]
|
import asyncio
import functools
import random
import time
from testing import Client
from testing import default_test_setup
from testing import gen_data
from testing import gen_points
from testing import gen_series
from testing import InsertError
from testing import PoolError
from testing import QueryError
from testing import run_test
from testing import Series
from testing import Server
from testing import ServerError
from testing import SiriDB
from testing import TestBase
from testing import UserAuthError
from testing import parse_args
TIME_PRECISION = 's'
class TestExpiration(TestBase):
title = 'Test shard expiration'
GEN_POINTS = functools.partial(
gen_points, n=1, time_precision=TIME_PRECISION)
async def _test_series(self, client):
result = await client.query('select * from "series float"')
self.assertEqual(result['series float'], self.series_float)
result = await client.query('select * from "series int"')
self.assertEqual(result['series int'], self.series_int)
result = await client.query(
'list series name, length, type, start, end')
result['series'].sort()
self.assertEqual(
result,
{
'columns': ['name', 'length', 'type', 'start', 'end'],
'series': [
[
'series float',
10000, 'float',
self.series_float[0][0],
self.series_float[-1][0]],
[
'series int', 10000,
'integer',
self.series_int[0][0],
self.series_int[-1][0]],
]
})
async def insert(self, client, series, n, timeout=1):
for _ in range(n):
await client.insert_some_series(
series, timeout=timeout, points=self.GEN_POINTS)
await asyncio.sleep(1.0)
@default_test_setup(
2,
time_precision=TIME_PRECISION,
compression=True,
optimize_interval=20)
async def run(self):
await self.client0.connect()
await self.db.add_replica(self.server1, 0, sleep=30)
# await self.db.add_pool(self.server1, sleep=30)
await self.assertIsRunning(self.db, self.client0, timeout=30)
await self.client1.connect()
self.series_float = gen_points(
tp=float, n=10000, time_precision=TIME_PRECISION, ts_gap='10m')
random.shuffle(self.series_float)
self.series_int = gen_points(
tp=int, n=10000, time_precision=TIME_PRECISION, ts_gap='10m')
random.shuffle(self.series_int)
self.assertEqual(
await self.client0.insert({
'series float': self.series_float,
'series int': self.series_int
}), {'success_msg': 'Successfully inserted 20000 point(s).'})
self.series_float.sort()
self.series_int.sort()
await self._test_series(self.client0)
total = (await self.client0.query('count shards'))['shards']
rest = (
await self.client0.query('count shards where end > now - 3w')
)['shards']
self.assertGreater(total, rest)
await self.client0.query('alter database set expiration_num 3w')
await asyncio.sleep(50) # wait for optimize to complete
total = (await self.client0.query('count shards'))['shards']
self.assertEqual(total, rest)
await self.client0.query('alter database set expiration_log 2w')
await self.client0.insert({
'series_log': [
[int(time.time()) - 3600*24*15, "expired_log"]
]
})
res = await self.client0.query('list series name, length "series_log"')
self.assertEqual(len(res['series']), 0)
await self.client0.insert({
'series_log': [
[int(time.time()) - 3600*24*15, "expired_log"],
[int(time.time()) - 3600*24*7, "valid_log"],
]
})
res = await self.client0.query('list series name, length "series_log"')
self.assertEqual(len(res['series']), 1)
self.assertEqual(res['series'], [['series_log', 1]])
await self.client0.query('alter database set drop_threshold 0.1')
with self.assertRaisesRegex(
QueryError,
"This query would drop .*"):
result = await self.client0.query(
'alter database set expiration_num 1w')
total = (await self.client0.query('count shards'))['shards']
rest = (
await self.client0.query('count shards where end > now - 1w')
)['shards']
result = await self.client0.query(
'alter database set expiration_num 1w '
'set ignore_threshold true')
await asyncio.sleep(40) # wait for optimize to complete
total = (await self.client0.query('count shards'))['shards']
self.assertEqual(total, rest)
self.client0.close()
self.client1.close()
if __name__ == '__main__':
random.seed(1)
parse_args()
run_test(TestExpiration())
|
#!/usr/bin/env python
import xdrlib
p = xdrlib.Packer()
p.pack_double(3.2)
p.pack_int(5)
# pack list; 2nd arg is the function used to pack each element
p.pack_array([1.0, 0.1, 0.001], p.pack_double)
f=open('tmp.dat','w'); f.write(p.get_buffer()); f.close()
f=open('tmp.dat','r');
u = xdrlib.Unpacker(f.read())
f.close()
some_double = u.unpack_double()
some_int = u.unpack_int()
some_list = u.unpack_array(u.unpack_double)
print some_double, some_int, some_list
|
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from .models import Site,SocialMedia,Position,Pages,Carousel,CarouselImage,Widget, FAQ
class SocialMediaInline(admin.StackedInline):
model = SocialMedia
extra = 1
class SiteAdmin(admin.ModelAdmin):
inlines = [SocialMediaInline,]
class CarouselImageInline(admin.StackedInline):
model = CarouselImage
extra = 1
class CarouselAdmin(admin.ModelAdmin):
inlines = [CarouselImageInline,]
# admin.site.register(Position)
# admin.site.register(Pages)
admin.site.register(FAQ)
# admin.site.register(CarouselImage)
admin.site.register(Site, SiteAdmin)
admin.site.register(Carousel, CarouselAdmin)
|
"""Add Point
Revision ID: 71de7d079c37
Revises: e6d7560692fe
Create Date: 2016-01-03 13:26:52.977321
"""
# revision identifiers, used by Alembic.
revision = '71de7d079c37'
down_revision = 'e6d7560692fe'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
# commands auto generated by Alembic - please adjust!
op.create_table(
'point',
sa.Column('pid', sa.Integer(), nullable=False),
sa.Column('score', sa.Integer(), nullable=True),
sa.Column('reason', sa.Text(), nullable=True),
sa.Column('group_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['group.gid'], ),
sa.PrimaryKeyConstraint('pid'))
# end Alembic commands
def downgrade():
# commands auto generated by Alembic - please adjust!
op.drop_table('point')
# end Alembic commands
|
# -*- coding: utf-8 -*-
import logging
import multiprocessing
import time
from ..utils import timing
logger = logging.getLogger(__name__)
def run_sequential(tasks, name=None):
"""
Args:
tasks(list(NXtask))
Returns:
bool
"""
with timing.timeit_logger(logger, name=name):
for task in tasks:
task.run()
if not task.done:
return False
return True
def run_task(task, result_queue):
"""
Args:
task(NXtask)
result_queue(Queue)
"""
try:
task.run()
finally:
result_queue.put(task.done)
def run_parallel(tasks, name, nproc=2):
"""
Args:
tasks(list(NXtask))
Returns:
bool
"""
with timing.timeit_logger(logger, name=name):
result_queue = multiprocessing.Queue()
results = []
with multiprocessing.Pool(nproc) as pool:
try:
while tasks:
tasks_copy = list(tasks)
tasks = []
for task in tasks_copy:
if task.ready_to_run:
results.append(
pool.apply_async(run_task, task, result_queue)
)
else:
tasks.append(task)
if not result_queue.get():
for result in results:
result.wait()
return
finally:
for result in results:
result.wait()
|
import time
import numpy as np
from numba import jit
from matplotlib import colors
from matplotlib import pyplot as plt
@jit
def mandelbrot(creal, cimag, maxiter, horizon, log_horizon):
real = creal
imag = cimag
for n in range(maxiter):
real2 = real*real
imag2 = imag*imag
if real2 + imag2 > horizon:
return n - np.log(np.log(real2+imag2))/np.log(2) + log_horizon
imag = 2 * real*imag + cimag
real = real2 - imag2 + creal
return 0
@jit
def mandelbrot_set(xmin, xmax, ymin, ymax, width, height, maxiter):
horizon = 2.0 ** 40
log_horizon = np.log(np.log(horizon))/np.log(2)
r1 = np.linspace(xmin, xmax, width)
r2 = np.linspace(ymin, ymax, height)
n3 = np.empty((width, height))
for i in range(width):
for j in range(height):
n3[i, j] = mandelbrot(r1[i], r2[j], maxiter, horizon, log_horizon)
return (r1, r2, n3)
def mandelbrot_image(xmin, xmax, ymin, ymax, width=10, height=10,
maxiter=256, cmap="jet", gamma=0.3):
dpi = 72
img_width = dpi * width
img_height = dpi * height
x, y, z = mandelbrot_set(xmin, xmax, ymin, ymax,
img_width, img_height, maxiter)
fig, ax = plt.subplots(figsize=(width, height), dpi=dpi)
plt.axis("off")
norm = colors.PowerNorm(gamma)
plt.imshow(z.T, cmap=cmap, origin='lower', norm=norm)
extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
filename = "images\\mandelbrot_%s_iter%s_%s.png" % (
cmap, maxiter, time.strftime("%H-%M-%S"))
fig.savefig(filename, bbox_inches=extent, pad_inches=0,)
plt.close()
def make_fractal(x, y, range, width=10, height=10, maxiter=512, cmap="jet"):
xmin = x-range
xmax = x+range
ymin = y-range
ymax = y+range
mandelbrot_image(xmin, xmax, ymin, ymax, maxiter=maxiter,
width=width, height=height, cmap=cmap)
def animation():
for n in range(500, 2048, 10):
make_fractal(-0.748, 0.1, 0.0014, maxiter=n, cmap='inferno')
print("Frame %i complete" % n)
# cmaps = ['viridis', 'plasma', 'inferno', 'magma',
# ('Sequential (2)', [
# 'binary', 'gist_yarg', 'gist_gray', 'gray', 'bone', 'pink',
# 'spring', 'summer', 'autumn', 'winter', 'cool', 'Wistia',
# 'hot', 'afmhot', 'gist_heat', 'copper']),
# ('Diverging', [
# 'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu',
# 'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', 'seismic']),
# 'ocean', 'gist_earth', 'terrain',
# 'gnuplot', 'gnuplot2', 'CMRmap', 'brg', 'hsv',
# 'gist_rainbow', 'jet', 'nipy_spectral', 'gist_ncar'])]
start = time.time()
make_fractal( 0.3750001200618655, -0.2166393884377127,
0.000000000002, maxiter=1000000000, cmap='jet')
end = time.time()
print("Total time: %i seconds" % (end-start))
|
from regression_tests import *
class Test(Test):
"""Checks that fileinfo does not crash when analyzing a PE sample for which
we are unable to find a signer or counter-signer.
https://github.com/avast/retdec/issues/87
"""
settings=TestSettings(
tool='fileinfo',
args='--verbose',
input=[
'74DB92D527624055DC928DFC7DC19DDA7FA257B2BC9F5539CEAFE2E8B4FFD6F3.dat',
'E7B7C4B486676CF535F9CFE1A84F991E8544E220EB0B06D33A0D808635DA3713.dat',
]
)
def test_fileinfo_does_not_crash(self):
assert self.fileinfo.succeeded
|
import numpy as np
import cv2
class Sample(object):
def __init__(self, img_path):
self.img_path = img_path
def read_features(self):
img = cv2.imread(self.img_path[0], cv2.IMREAD_COLOR)
return img
class Sample_Architecture_V1(Sample):
def __init__(self, img_path, label_path):
super(Sample_Architecture_V1, self).__init__(img_path)
self.depth_gt = label_path[0]
self.obstacles_gt = label_path[1]
def read_labels(self):
depth_label = cv2.imread(self.depth_gt[0], cv2.IMREAD_GRAYSCALE)
# Read obstacles
with open(self.obstacles_gt[0],'r') as f:
obstacles = f.readlines()
obstacles = [x.strip() for x in obstacles]
# Label obtacles
obstacles_label = np.zeros(shape=(5, 8, 7))
for obs in obstacles:
parsed_str_obs = obs.split()
parsed_obs = np.zeros(shape=(8))
i = 0
for n in parsed_str_obs:
if i < 2:
parsed_obs[i] = int(n)
else:
parsed_obs[i] = float(n)
i += 1
# Save labels
obstacles_label[int(parsed_obs[1]), int(parsed_obs[0]), 0] = 1.0 #confidence
obstacles_label[int(parsed_obs[1]), int(parsed_obs[0]), 1] = parsed_obs[2] # x
obstacles_label[int(parsed_obs[1]), int(parsed_obs[0]), 2] = parsed_obs[3] # y
obstacles_label[int(parsed_obs[1]), int(parsed_obs[0]), 3] = parsed_obs[4] # w
obstacles_label[int(parsed_obs[1]), int(parsed_obs[0]), 4] = parsed_obs[5] # h
obstacles_label[int(parsed_obs[1]), int(parsed_obs[0]), 5] = parsed_obs[6] # m
obstacles_label[int(parsed_obs[1]), int(parsed_obs[0]), 6] = np.abs(parsed_obs[7]) / 68.759 # v
# Yolo out + depth put
labels = {}
labels["depth"] = np.expand_dims(depth_label, 2)
labels["obstacles"] = np.reshape(obstacles_label,(40,7))
return labels
class Sample_Architecture_V2(Sample):
def __init__(self, img_path, label_path):
super(Sample_Architecture_V2, self).__init__(img_path)
self.depth_gt = label_path[0]
self.obstacles_gt = label_path[1]
def numpy_overlap(self, x1, w1, x2, w2):
l1 = (x1) - w1 / 2
l2 = (x2) - w2 / 2
left = np.where(l1 > l2, l1, l2)
r1 = (x1) + w1 / 2
r2 = (x2) + w2 / 2
right = np.where(r1 > r2, r2, r1)
result = (right - left)
return result
def numpy_iou(self, centroid_gt, centroid_p, dims_gt, dims_p):
ow = self.numpy_overlap(centroid_p[0], dims_p[0], centroid_gt[0] , dims_gt[0])
oh = self.numpy_overlap(centroid_p[1], dims_p[1], centroid_gt[1] , dims_gt[1])
ow = np.where(ow > 0, ow, 0)
oh = np.where(oh > 0, oh, 0)
intersection = float(ow) * float(oh)
area_p = dims_p[0] * dims_p[1]
area_gt = dims_gt[0] * dims_gt[1]
union = area_p + area_gt - intersection
pred_iou = intersection / (float(union) + 0.000001) # prevent div 0
return pred_iou
def read_labels(self):
depth_label = cv2.imread(self.depth_gt[0], cv2.IMREAD_GRAYSCALE)
# Read obstacles
with open(self.obstacles_gt[0],'r') as f:
obstacles = f.readlines()
obstacles = [x.strip() for x in obstacles]
# Label obtacles
obstacles_label = np.zeros(shape=(5,8,2,7))
# Anchors
anchors = np.array([[0.34755122, 0.84069513],
[0.14585618, 0.25650666]])
for obs in obstacles:
parsed_str_obs = obs.split()
parsed_obs = np.zeros(shape=(8))
i = 0
for n in parsed_str_obs:
if i < 2:
parsed_obs[i] = int(n)
else:
parsed_obs[i] = float(n)
i += 1
# Compute centroid and size bounding box
w = parsed_obs[4]
h = parsed_obs[5]
best_iou = -1.
best_iou_index = 0
index = 0
for anchor in anchors:
# Compute iou
pred_iou = self.numpy_iou([0., 0.], [0., 0.], anchor, [w, h])
if pred_iou > best_iou:
best_iou = pred_iou
best_iou_index = index
index += 1
# Save labels
obstacles_label[int(parsed_obs[1]), int(parsed_obs[0]), best_iou_index, 0] = 1.0 #confidence
obstacles_label[int(parsed_obs[1]), int(parsed_obs[0]), best_iou_index, 1] = parsed_obs[2] # x
obstacles_label[int(parsed_obs[1]), int(parsed_obs[0]), best_iou_index, 2] = parsed_obs[3] # y
obstacles_label[int(parsed_obs[1]), int(parsed_obs[0]), best_iou_index, 3] = w # w
obstacles_label[int(parsed_obs[1]), int(parsed_obs[0]), best_iou_index, 4] = h # h
obstacles_label[int(parsed_obs[1]), int(parsed_obs[0]), best_iou_index, 5] = parsed_obs[6] # m
obstacles_label[int(parsed_obs[1]), int(parsed_obs[0]), best_iou_index, 6] = parsed_obs[7] / 100. # v
# Yolo out + depth put
labels = {}
labels["depth"] = np.expand_dims(depth_label, 2)
labels["obstacles"] = obstacles_label
return labels
|
from moderngl_window.context.tk.window import Window # noqa
from moderngl_window.context.tk.keys import Keys # noqa
|
from flask import render_template, url_for, request, redirect, flash, abort
from Tetris import app, db, login_manager
from flask_login import current_user, logout_user, login_user, login_required
from .models import User, Game
from .forms import SignupForm, LoginForm
@login_manager.user_loader
def load_user(userid):
return User.query.get(int(userid))
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/play')
def play():
return render_template('play.html')
@app.route('/signup', methods=['GET', 'POST'])
def signup():
form = SignupForm()
if form.validate_on_submit():
user = User(username=form.username.data,
password=form.password.data,
email=form.email.data)
db.session.add(user)
db.session.commit()
flash('Welcome {}! Please log in'.format(user.username))
return redirect(url_for('login'))
return render_template('signup.html', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.get_by_username(form.username.data)
if user is not None and user.check_password(form.password.data):
login_user(user, form.remember_me.data)
flash('Logged in as {}. Welcome back!'.format(user.username))
return redirect(request.args.get('next') or url_for('index'))
flash('Incorrect username or password')
return render_template('login.html', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/user/<username>')
def user(username):
user = User.query.filter_by(username=username).first_or_404()
return render_template('user.html', user=user)
|
#!/usr/bin/env python3
from src.util import *
# tag::starOne[]
passwords = read_file_to_list("input.txt")
result = 0
for (rf,rt,ch,pw) in passwords:
c = count(pw,ch)
if(rf <= c and c <= rt):
result = result + 1;
print(result)
# end::starOne[]
# tag::starTwo[]
passwords = read_file_to_list("input.txt")
result = 0
for (rf,rt,ch,pw) in passwords:
c = check(pw,rf,ch) + check(pw,rt,ch)
if(c == 1):
result = result + 1;
print(result)
# end::starTwo[]
|
#!/usr/bin/env python3
import unittest
from data import Data, Q, Var
d = Data()
d.load("supermorphgnt.txt")
class DepQueryTestCase(unittest.TestCase):
def test_match_query(self):
"""
how many times is the 'lemma' καθώς?
"""
self.assertEqual(
len(list(d.query(Q(lemma="καθώς")))),
182
)
def test_match_and_variable_count(self):
"""
what values of 'rel' does καθώς have?
"""
self.assertEqual(
d.count(
Q(lemma="καθώς", rel=Var("rel"))
), {
(("rel", "conj"),): 182
}
)
def test_match_and_multiple_variables_count(self):
"""
what values of 'rel' and 'pos' does καθώς have?
"""
self.assertEqual(
d.count(
Q(lemma="καθώς", rel=Var("rel"), pos=Var("pos"))
), {
(("pos", "C-"), ("rel", "conj")): 182
}
)
def test_multiple_matches_count(self):
"""
how many times does καθώς have a 'pos' of 'C-'?
"""
self.assertEqual(
d.count(
Q(lemma="καθώς", pos="C-")
), {
(): 182
}
)
def test_multiple_matches_with_no_hits_count(self):
"""
how many times does καθώς have a 'pos' of 'D-'?
"""
self.assertEqual(
d.count(
Q(lemma="καθώς", pos="D-")
), {
}
)
def test_match_count(self):
"""
how many times does a word have a 'pos' of 'N-'?
"""
self.assertEqual(
d.count(
Q(pos="N-")
), {
(): 28237
}
)
def test_match_and_variable_and_subquery_match_count(self):
"""
what is the 'rel' of καθώς when the head has a 'pos' of 'V-'?
"""
self.assertEqual(
d.count(
Q(lemma="καθώς", rel=Var("rel"), head=Q(pos="V-"))
), {
(("rel", "conj"),): 148
}
)
def test_match_and_variable_and_subquery_variable_count(self):
"""
when 'lemma' is 'καθώς',
what are the values of 'rel' and the head's 'pos' ?
"""
self.assertEqual(
d.count(
Q(lemma="καθώς", rel=Var("rel"), head=Q(pos=Var("h_pos")))
), {
(("h_pos", "A-"), ("rel", "conj")): 9,
(("h_pos", "RP"), ("rel", "conj")): 8,
(("h_pos", "N-"), ("rel", "conj")): 14,
(("h_pos", "V-"), ("rel", "conj")): 148,
(("h_pos", "D-"), ("rel", "conj")): 2,
(("h_pos", "RD"), ("rel", "conj")): 1,
}
)
def test_subquery_variable_and_subsubquery_variable_count(self):
"""
what the 'lemma' is 'καθώς', what are the values of 'rel',
the head's 'pos', the head's 'rel' and the head's head's 'pos'?
"""
self.assertEqual(
d.count(
Q(
lemma="καθώς",
rel=Var(1),
head=Q(
pos=Var(2),
rel=Var(3),
head=Q(pos=Var(4))
)
)
), {
((1, 'conj'), (2, 'RP'), (3, 'CL'), (4, 'A-')): 2,
((1, 'conj'), (2, 'A-'), (3, 'CL'), (4, 'V-')): 4,
((1, 'conj'), (2, 'V-'), (3, 'CL'), (4, 'N-')): 12,
((1, 'conj'), (2, 'N-'), (3, 'CL'), (4, 'RP')): 1,
((1, 'conj'), (2, 'V-'), (3, 'CL'), (4, 'D-')): 2,
((1, 'conj'), (2, 'RD'), (3, 'CL'), (4, 'N-')): 1,
((1, 'conj'), (2, 'V-'), (3, 'ADV'), (4, 'V-')): 3,
((1, 'conj'), (2, 'V-'), (3, 'CL'), (4, 'RP')): 2,
((1, 'conj'), (2, 'V-'), (3, 'CL'), (4, 'V-')): 113,
((1, 'conj'), (2, 'D-'), (3, 'CL'), (4, None)): 1,
((1, 'conj'), (2, 'N-'), (3, 'CL'), (4, 'V-')): 8,
((1, 'conj'), (2, 'A-'), (3, 'CL'), (4, 'A-')): 3,
((1, 'conj'), (2, 'N-'), (3, 'CL'), (4, 'D-')): 1,
((1, 'conj'), (2, 'N-'), (3, 'CL'), (4, 'N-')): 3,
((1, 'conj'), (2, 'A-'), (3, 'CL'), (4, 'N-')): 1,
((1, 'conj'), (2, 'V-'), (3, 'CL'), (4, 'RD')): 1,
((1, 'conj'), (2, 'N-'), (3, 'ADV'), (4, 'V-')): 1,
((1, 'conj'), (2, 'V-'), (3, 'CL'), (4, None)): 10,
((1, 'conj'), (2, 'RP'), (3, 'CL'), (4, 'V-')): 4,
((1, 'conj'), (2, 'D-'), (3, 'CL'), (4, 'N-')): 1,
((1, 'conj'), (2, 'RP'), (3, 'CL'), (4, 'N-')): 2,
((1, 'conj'), (2, 'A-'), (3, 'CL'), (4, None)): 1,
((1, 'conj'), (2, 'V-'), (3, 'CL'), (4, 'A-')): 5,
}
)
def test_match_and_variable_and_subquery_match_count2(self):
"""
what are the possible relationships of a noun to a verb head?
"""
self.assertEqual(
d.count(
Q(pos="N-", rel=Var("rel"), head=Q(pos="V-"))
), {
(('rel', 'ADV'),): 6390,
(('rel', 'S'),): 4311,
(('rel', 'O2'),): 134,
(('rel', 'IO'),): 478,
(('rel', 'CL'),): 1426,
(('rel', 'np'),): 33,
(('rel', 'pp'),): 8,
(('rel', 'P'),): 2,
(('rel', 'O'),): 4618
}
)
if __name__ == "__main__":
unittest.main()
|
import weakref
import numpy as np
class float64(np.float64):
r"""
Examples
--------
.. doctest::
>>> from ndarray_listener import ndl, float64
>>>
>>> print(float64(1.5))
1.5
>>> print(ndl(1.5))
1.5
"""
def __new__(cls, *args):
return np.float64.__new__(cls, *args)
def talk_to(self, me):
r"""Not implemented.
Array-scalars are immutable.
"""
pass
class ndl(np.ndarray):
r"""
Examples
--------
A scalar is stored as a zero-dimensional array much like a NumPy scalar:
.. doctest::
>>> from __future__ import print_function
>>> from ndarray_listener import ndl
>>> from numpy import atleast_1d
>>>
>>> class Watcher(object):
... def __init__(self, msg):
... self._msg = msg
...
... def __call__(self):
... print(self._msg + " called me")
...
>>> scalar = ndl(-0.5)
>>>
>>> you0 = Watcher("First guy")
>>> you1 = Watcher("Second guy")
>>>
>>> scalar.talk_to(you0)
>>> scalar.itemset(-1.0)
First guy called me
>>> s0 = scalar.copy()
>>> s0.itemset(-0.5)
First guy called me
>>> s0.talk_to(you1)
>>> scalar.itemset(0.0)
First guy called me
Second guy called me
>>>
>>> s1 = atleast_1d(scalar)
>>> s1[0] = 1.0
First guy called me
Second guy called me
One-dimension arrays are also supported:
.. doctest::
>>> from ndarray_listener import ndl
>>> from numpy import atleast_1d
>>> from numpy import set_printoptions
>>>
>>> set_printoptions(precision=2, suppress=True)
>>>
>>> vector = ndl([-0.5, 0.1])
>>>
>>> you0 = Watcher("First guy")
>>> you1 = Watcher("Second guy")
>>>
>>> vector.talk_to(you0)
>>>
>>> vector[0] = 0.0
First guy called me
>>> vector[:] = 1.0
First guy called me
>>>
>>> v0 = vector.copy()
>>> v0.itemset(0, 1.1)
First guy called me
>>>
>>> v0.itemset(1, 2.2)
First guy called me
>>>
>>> v1 = v0.ravel()
>>>
>>> v1.talk_to(you1)
>>> vector[-1] = 9.9
First guy called me
Second guy called me
"""
def __new__(cls, input_array):
obj = np.asarray(input_array).view(cls)
if hasattr(input_array, "_listeners"):
obj._listeners = input_array._listeners
else:
obj._listeners = []
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self._listeners = getattr(obj, "_listeners", [])
def __setitem__(self, *args, **kwargs):
super(ndl, self).__setitem__(*args, **kwargs)
self.__notify()
def __setattr__(self, *args, **kwargs):
super(ndl, self).__setattr__(*args, **kwargs)
if len(args) > 0 and args[0] == "_listeners":
return
self.__notify()
def __getitem__(self, *args, **kwargs):
v = super(ndl, self).__getitem__(*args, **kwargs)
if isinstance(v, ndl):
return v
if np.isscalar(v):
v = float64(v)
else:
v = ndl(v)
for k in self._listeners:
v.talk_to(k)
return v
def talk_to(self, me):
self._listeners.append(_create_callback(me))
def __notify(self):
dirty = False
for k in self._listeners:
cb = k()
if cb is None:
dirty = True
else:
cb()
if dirty:
self.__flush()
def __flush(self):
self._listeners = [k for k in self._listeners if k() is not None]
def itemset(self, *args, **kwargs):
super(ndl, self).itemset(*args, **kwargs)
self.__notify()
def _create_callback(cb):
try:
return weakref.WeakMethod(cb)
except TypeError:
def _callback():
try:
return weakref.proxy(cb)
except ReferenceError:
return None
return _callback
|
# SPDX-FileCopyrightText: 2017 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
from decisionengine.framework.modules import Publisher
class PublisherWithMissingConsumes(Publisher.Publisher):
pass
|
# import requests
# from typing import Dict, Any, List, Union
#
#
# class CompaniesMatch:
#
# __baseurl: str = 'http://url.com.br'
# _companies: Union[List[Dict[str, Any]], None] = None
#
# def __init__(self):
# self._companies = None
#
# def get(self) -> None:
# """Return companies"""
# response = self._request()
# if self._is_request_ok(response):
# self._companies = response.json()
#
# @classmethod
# def save(cls, companies):
# pass
#
# @staticmethod
# def _is_request_ok(response: requests.Response) -> bool:
# return response.ok
#
# def _request(self) -> requests.Response:
# """Make the request for D-Legal."""
#
# response = requests.get(self.__baseurl)
# return response
#
import requests
from typing import List, Union, Dict, Any
class CompaniesMatch:
_companies: Union[List, None] = None
_baseurl: str = 'http://url.example'
def __init__(self):
self._companies = None
def get(self) -> None:
"""Return companies"""
response = self._request()
self._companies = self._get_companies(response)
@staticmethod
def _get_companies(response: requests.Response) -> List[Dict[str, Any]]:
data = []
if response.ok:
data = response.json()
return data
def _request(self) -> requests.Response:
return requests.get(self._baseurl)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from .mongo_storage import MongoDBStorage
from .mongo_handler import MongoHandler
|
# Copyright 2020 The Caer Authors. All Rights Reserved.
#
# Licensed under the MIT License (see LICENSE);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at <https://opensource.org/licenses/MIT>
#
# ==============================================================================
#pylint: disable=bare-except
import numpy as np
from ._split import train_test_split
from .path import listdir
def median(arr, axis=None):
return np.median(arr, axis=axis)
def npmean(arr):
return np.mean(arr)
def array(obj, dtype=None, order='K'):
return np.array(obj, dtype=dtype, order=order)
def to_array(obj, dtype=None, order='K'):
return np.array(obj, dtype=dtype, order=order)
def asarray(obj, dtype=None, order=None):
return np.asarray(obj, dtype=dtype, order=order)
def load(filename, allow_pickle=False):
return np.load(filename, allow_pickle=allow_pickle)
def get_classes_from_dir(DIR):
if len(listdir(DIR)) == 0:
raise ValueError('The specified directory does not seem to have any folders in it')
else:
classes = [i for i in listdir(DIR)]
return classes
def saveNumpy(base_name, data):
"""
Saves an array to a .npy file
Converts to Numpy (if not already)
"""
if not (isinstance(data, list) or isinstance(data, np.ndarray)):
raise ValueError('data needs to be a Python list or a Numpy array')
data = np.array(data)
if '.npy' in base_name:
np.save(base_name, data)
print(f'[INFO] {base_name} saved!')
elif '.npz' in base_name:
np.savez_compressed(base_name, data)
print(f'[INFO] {base_name} saved!')
def train_val_split(X, y, val_ratio=.2):
"""
Do not use if mean subtraction is being employed
Returns X_train, X_val, y_train, y_val
"""
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=val_ratio)
return X_train, y_train, X_val, y_val
def sort_dict(unsorted_dict, descending=False):
"""
Sorts a dictionary in ascending order (if descending = False) or descending order (if descending = True)
"""
if isinstance(descending, bool):
raise ValueError('`descending` must be a boolean')
return sorted(unsorted_dict.items(), key=lambda x:x[1], reverse=descending)
# def plotAcc(histories):
# """
# Plots the model accuracies as 2 graphs
# """
# pass
# import matplotlib.pyplot as plt
# acc = histories.history['acc']
# val_acc = histories.history['val_acc']
# loss = histories.history['loss']
# val_loss = histories.history['val_loss']
# epochs = range(1, len(acc)+1)
# # Plotting Accuracy
# plt.plot(epochs, acc, 'b', label='Training Accuracy')
# plt.plot(epochs, val_acc, 'r', label='Validation Accuracy')
# plt.title('Training and Validation Accuracy')
# plt.legend()
# # Plotting Loss
# plt.plot(epochs, loss, 'b', label='Training Loss')
# plt.plot(epochs, val_loss, 'r', label='Validation Loss')
# plt.title('Training and Validation Loss')
# plt.legend()
# plt.show()
__all__ = [
'get_classes_from_dir',
'median',
'npmean',
'asarray',
'to_array',
'array',
'saveNumpy',
'train_val_split',
'sort_dict'
# 'plotAcc'
]
|
"""
@author: Shubham Shantaram Pawar
"""
# importing all the required libraries
import numpy as np
import matplotlib .pyplot as plt
from sklearn.datasets import load_iris
# function to plot the training data
def plotTrainingData(X, y):
versicolor = np.where(y==0)
verginica = np.where(y==1)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_title('Scatter Plot of Training Data')
ax.scatter(X[versicolor][:,0], X[versicolor][:,1], color='blue', label='versicolor', marker='o')
ax.scatter(X[verginica][:,0], X[verginica][:,1], color='red', label='verginica', marker='+')
ax.set_aspect('equal', 'box')
ax.set_xlabel('petal length (cm)')
ax.set_ylabel('petal width (cm)')
ax.legend()
fig.set_size_inches(10, 6)
fig.show()
# function to plot cost vs iterations
def plotCostVsIterations(J_history, iterations):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_title('cost vs iterations')
ax.set_xlabel(r'iterations')
ax.set_ylabel(r'$J(\theta)$')
ax.scatter(range(iterations), J_history, color='blue', s=10)
fig.set_size_inches(8, 5)
fig.show()
# function to initialize parameters to be uniformly distributed random numbers
# between -0.22 and 0.22
def randInitializeWeights(L_in, L_out):
epsilon_init = 0.22
W = np.random.rand(L_out, 1 + L_in) * 2 * epsilon_init - epsilon_init
return W
# function to calculate sigmoid of activity
def sigmoid(z):
return 1 / (1 + np.exp(-z))
# function to calculate sigmoid gradient
def sigmoidGradient(z):
return np.multiply(sigmoid(z), 1 - sigmoid(z))
# function to compute cost and gradients
def computeCost(X, y, Theta1, Theta2):
m, n = X.shape
J = 0
Theta1_grad = np.zeros(Theta1.shape)
Theta2_grad = np.zeros(Theta2.shape)
# Forward Propagation:
# input layer values (with bias unit)
a1 = X
# calculating activity of hidden layer
z2 = np.dot(a1, Theta1.T)
a, b = z2.shape
# calculating activation of hidden layer (with bias unit)
a2 = np.concatenate((np.ones((a, 1)), sigmoid(z2)), axis=1)
# calculating activity of output layer
z3 = np.dot(a2, Theta2.T)
# calculating activation of output layer
a3 = sigmoid(z3)
# hypothesis
h = a3
# calculating mean squared error cost
J = (1 / m) * np.sum(np.sum(-1 * np.multiply(y, np.log10(h)) - np.multiply(1 - y, np.log10(1 - h)), axis=0))
# Backpropagation:
# calculating gradients
d3 = h - y
d2 = np.multiply(d3 * Theta2, sigmoidGradient(np.concatenate((np.ones((a, 1)), z2), axis=1)))
c, d = d2.shape
d2 = d2[:, [1, d-1]]
delta1 = d2.T * a1
delta2 = d3.T * a2
Theta1_grad = delta1 / m
Theta2_grad = delta2 / m
return J, Theta1_grad, Theta2_grad
# function for gradient descent
def gradientDescent(x, y, Theta1, Theta2, alpha, num_iters):
# initializing matrix to store cost history
J_history = np.zeros((num_iters,1))
for iter in range(0, num_iters):
J, Theta1_grad, Theta2_grad = computeCost(x, y, Theta1, Theta2)
#updating parameters/thetas
Theta1 = np.subtract(Theta1, alpha * Theta1_grad)
Theta2 = np.subtract(Theta2, alpha * Theta2_grad)
J_history[iter] = J
return J_history, Theta1, Theta2
# function to make a 100 folds of the data for leave-one-out analysis
def leaveOneOut_split(X, y):
k_folds = 100
data_splits = []
for i in range(k_folds):
temp = []
train_data = {}
index = list(range(k_folds))
index.pop(i)
train_data['X'] = X[index]
train_data['y'] = y[index]
test_data = {}
test_data['X'] = X[i]
test_data['y'] = y[i]
temp.append(train_data)
temp.append(test_data)
data_splits.append(temp)
return data_splits
# function to perform leave-one-out analysis
def leaveOneOutAnalysis(X, y, alpha, iterations, input_layer_size, hidden_layer_size, output_layer_size):
total_error = 0
data_splits = leaveOneOut_split(X, y)
for i, data_split in enumerate(data_splits):
print('\nTraining with fold ' + str(i+1) + '...')
X_train = data_split[0]['X']
y_train = data_split[0]['y']
X_test = data_split[1]['X']
y_test = data_split[1]['y'][0, 0]
# initializing parameters/thetas
theta1 = randInitializeWeights(input_layer_size, hidden_layer_size)
theta2 = randInitializeWeights(hidden_layer_size, output_layer_size)
J_history, Theta1, Theta2 = gradientDescent(X_train, y_train, theta1, theta2, alpha, iterations)
# forward propagation for prediction
a1 = X_test
z2 = np.dot(a1, Theta1.T)
a, b = z2.shape
a2 = np.concatenate((np.ones((a, 1)), sigmoid(z2)), axis=1)
z3 = np.dot(a2, Theta2.T)
a3 = sigmoid(z3)
h = a3
# predicting class label for the test data
if h >= 0.5:
y_predict = 1.0
else:
y_predict = 0.0
# comparing predicted class label with the test/actual class label
# if not equal, increase total error by 1
if y_predict != y_test:
total_error += 1
return total_error/100
def main():
input_layer_size = 2
hidden_layer_size = 2
output_layer_size = 1
# loading iris dataset
iris = load_iris()
# selecting indices for samples corresponding to versicolor and virginica classes respectively
versicolor_target = np.where(iris.target==1)
virginica_target = np.where(iris.target==2)
# extracting training dataset corresponding to versicolor and virginica classes
X_train = iris.data[np.concatenate((versicolor_target[0], virginica_target[0]), axis = 0)][:, [2, 3]]
y_train = iris.target[0:100]
# ploting training data
plotTrainingData(X_train, y_train)
# min-max normalization/scaling
X_train[:, 0] = (X_train[:, 0] - np.min(X_train[:, 0])) / (np.max(X_train[:, 0]) - np.min(X_train[:, 0]))
X_train[:, 1] = (X_train[:, 1] - np.min(X_train[:, 1])) / (np.max(X_train[:, 1]) - np.min(X_train[:, 1]))
m, n = X_train.shape
# adding one's for the bias term
X = np.concatenate((np.ones((m, 1)), X_train), axis=1)
y = np.matrix(y_train).reshape(100,1)
initial_Theta1 = randInitializeWeights(input_layer_size, hidden_layer_size)
initial_Theta2 = randInitializeWeights(hidden_layer_size, output_layer_size)
# no. of iterations
iterations = 5000
# learning rate
alpha = 0.1
print('\nPerforming logistic regression using an ANN on the entire dataset...')
J_history, Theta1, Theta2 = gradientDescent(X, y, initial_Theta1, initial_Theta2, alpha, iterations)
# plotting cost vs iterations
plotCostVsIterations(J_history, iterations)
print('\nTheta 1:')
print(Theta1)
print('\nTheta 2:')
print(Theta2)
# computing average error rate for the model using leave-one-out analysis
avg_error = leaveOneOutAnalysis(X, y, alpha, iterations, input_layer_size, hidden_layer_size, output_layer_size)
print('\nThe average error rate for the ANN model after performing leave-one-out analysis is ' + str(avg_error) +'.')
if __name__ == '__main__':
main()
|
import logging
import pathlib
BACKUP_DIR = pathlib.Path("/backup/")
CHUNK_SIZE = 4 * 1024 * 1024
class RemoteInitializationError(Exception):
pass
class Remote(object):
def __init__(self, remote_dir, use_filename=False):
# directory in which to store snapshot files
self.remote_dir = remote_dir
# use display name instead of slug for remote file path
self.use_filename = use_filename
self.LOG = logging.getLogger("snapshot_manager")
def upload(self, snapshot):
raise NotImplementedError
def clean_remote(self, snapshot):
# remove certain snapshot from remote location
# Maybe keep_last instead
raise NotImplementedError
def remote_path(self, snapshot):
if self.use_filename:
return self.remote_dir / f"{snapshot['name']}.tar"
else:
return self.remote_dir / f"{snapshot['slug']}.tar"
def bytes_to_human(nbytes):
suffixes = ["B", "KB", "MB", "GB", "TB", "PB"]
i = 0
while nbytes >= 1024 and i < len(suffixes) - 1:
nbytes /= 1024.
i += 1
f = ("%.2f" % nbytes).rstrip("0").rstrip(".")
return "%s %s" % (f, suffixes[i])
def local_path(snapshot):
return BACKUP_DIR / f"{snapshot['slug']}.tar"
|
from datesplitter import tokenize
import unittest
class TestTokenizing(unittest.TestCase) :
def test_split_on_punc(self) :
assert tokenize('foo,bar') == ['foo,', 'bar']
def test_spaces(self) :
assert tokenize('foo bar') == ['foo', 'bar']
assert tokenize('foo bar') == ['foo', 'bar']
assert tokenize('foo bar ') == ['foo', 'bar']
assert tokenize(' foo bar') == ['foo', 'bar']
if __name__ == '__main__' :
unittest.main()
|
"""Python math functions"""
import math
def convert_to_base(decimal_number, base, digits):
"""Converts decimal numbers to strings of a custom base using custom digits."""
if decimal_number == 0:
return ''
return digits[decimal_number % base] + convert_to_base(decimal_number // base, base, digits)
def base_to_dec(string, base, digits):
"""Converts strings to decimal numbers via a custom base and digits."""
if string == '':
return 0
return digits.index(string[0]) + base_to_dec(string[1:], base, digits) * base
def sigmoid(x):
"""A Python function for the Sigmoid function."""
return 1 / (1 + math.exp(-x))
def inverse_sigmoid(y):
"""A Python function for the inverse of the Sigmoid function."""
return math.log(y / (1 - y))
|
import string
import pandas as pd
from keras.optimizers import Adam
from keras.utils import np_utils
import numpy as np
from config import *
import json
from keras import backend as K
from keras.layers import Dense, Dropout
from keras.models import Model, load_model
from sys import argv
from custom_layers import *
from collections import Counter
import os
metadata_dict = {}
#------------------------------------------------------------------------------
def read_metadata(labels):
global metadata_dict
data = pd.read_csv(ocr_file, sep=' ')
ocr_dict = {}
#"{0:05b}".format(10)
alpha_dict = {i.upper():j/35 for j,i in enumerate(string.ascii_uppercase + string.digits)}
data.fillna(0, inplace=True)
for i in data.index:
key = "/".join(data.loc[i,"file"].split("/")[-5:])
ocrs = []
for char1 in data.loc[i,'pred']:
ocrs.append(alpha_dict[char1])
if len(ocrs)<7:
ocrs+=[0]*(7-len(ocrs))
for j in range(1,8):
ocrs.append(data.loc[i,'char%d' % j])
ocr_dict[key] = ocrs
for i in labels:
key = "/".join(i.split("/")[-5:])
if key in ocr_dict:
metadata_dict[i] = ocr_dict[key]
else:
metadata_dict[i] = [0] * 14
del ocr_dict, data, alpha_dict
return metadata_dict
#------------------------------------------------------------------------------
def siamese_model(input1, input2):
left_input_P = Input(input1)
right_input_P = Input(input1)
left_input_C = Input(input2)
right_input_C = Input(input2)
convnet_plate = small_vgg_plate(input1)
encoded_l_P = convnet_plate(left_input_P)
encoded_r_P = convnet_plate(right_input_P)
convnet_car = small_vgg_car(input2)
encoded_l_C = convnet_car(left_input_C)
encoded_r_C = convnet_car(right_input_C)
auxiliary_input = Input(shape=(metadata_length,), name='aux_input')
inputs = [left_input_P, right_input_P, left_input_C, right_input_C, auxiliary_input]
# Add the distance function to the network
L1_distanceP = L1_layer([encoded_l_P, encoded_r_P])
L1_distanceC = L1_layer([encoded_l_C, encoded_r_C])
x = Concatenate()([L1_distanceP, L1_distanceC, auxiliary_input])
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(1024, kernel_initializer='normal',activation='relu')(x)
x = Dropout(0.5)(x)
predF2 = Dense(2,kernel_initializer='normal',activation='softmax', name='class_output')(x)
regF2 = Dense(1,kernel_initializer='normal',activation='sigmoid', name='reg_output')(x)
optimizer = Adam(0.0001)
losses = {
'class_output': 'binary_crossentropy',
'reg_output': 'mean_squared_error'
}
lossWeights = {"class_output": 1.0, "reg_output": 1.0}
model = Model(inputs=inputs, outputs=[predF2, regF2])
model.compile(loss=losses, loss_weights=lossWeights,optimizer=optimizer, metrics=kmetrics)
return model
#------------------------------------------------------------------------------
if __name__ == '__main__':
data = json.load(open('%s/dataset_1.json' % (path)))
labels = []
for k in keys:
for img in data[k]:
labels += [img[0][0], img[2][0]]
labels = list(set(labels))
read_metadata(labels)
input1 = (image_size_h_p,image_size_w_p,nchannels)
input2 = (image_size_h_c,image_size_w_c,nchannels)
type1 = argv[1]
if type1=='train':
for k,val_idx in enumerate(keys):
K.clear_session()
idx = fold(keys,k, train=True)
val = data[val_idx]
trn = data[idx[0]] + data[idx[1]]
trnGen = SiameseSequence(trn, train_augs, metadata_dict=metadata_dict,metadata_length=metadata_length)
tstGen = SiameseSequence(val, test_augs, metadata_dict=metadata_dict,metadata_length=metadata_length)
siamese_net = siamese_model(input1, input2)
f1 = 'model_three_stream_%d.h5' % (k)
#fit model
history = siamese_net.fit_generator(trnGen,
epochs=NUM_EPOCHS,
validation_data=tstGen)
#validate plate model
tstGen2 = SiameseSequence(val, test_augs, metadata_dict=metadata_dict,metadata_length=metadata_length, with_paths = True)
test_report('validation_three_stream_%d' % (k),siamese_net, tstGen2)
siamese_net.save(f1)
elif type1 == 'test':
folder = argv[3]
for k in range(len(keys)):
idx = fold(keys,k, train=False)
tst = data[idx[0]] + data[idx[1]]
tstGen2 = SiameseSequence(tst, test_augs, metadata_dict=metadata_dict,metadata_length=metadata_length, with_paths = True)
f1 = os.path.join(folder,'model_three_stream_%d.h5' % (k))
siamese_net = load_model(f1, custom_objects=customs_func)
test_report('test_three_stream_%d' % (k),siamese_net, tstGen2)
elif type1 == 'predict':
results = []
data = json.load(open(argv[2]))
alpha_dict = {i.upper():j/35 for j,i in enumerate(string.ascii_uppercase + string.digits)}
img1 = (process_load(data['img1_plate'], input1)/255.0).reshape(1,input1[0],input1[1],input1[2])
img2 = (process_load(data['img2_plate'], input1)/255.0).reshape(1,input1[0],input1[1],input1[2])
img3 = (process_load(data['img1_shape'], input2)/255.0).reshape(1,input2[0],input2[1],input2[2])
img4 = (process_load(data['img2_shape'], input2)/255.0).reshape(1,input2[0],input2[1],input2[2])
aux1 = []
for str1 in data['ocr1']:
for c in str1:
aux1.append(alpha_dict[c])
aux1 += data['probs1']
aux2 = []
for str1 in data['ocr2']:
for c in str1:
aux2.append(alpha_dict[c])
aux2 += data['probs2']
diff = abs(np.array(aux1[:7]) - np.array(aux2[:7])).tolist()
for j in range(len(diff)):
diff[j] = 1 if diff[j] else 0
metadata = aux1 + aux2 + diff
metadata = np.array(metadata).reshape(1,-1)
X = [img1, img2, img3, img4, metadata]
folder = argv[2]
for k in range(len(keys)):
K.clear_session()
f1 = os.path.join(folder,'model_three_stream_%d.h5' % (k))
model = load_model(f1)
Y_ = model.predict(X)
results.append(np.argmax(Y_[0]))
print("model %d: %s" % (k+1,"positive" if results[k]==POS else "negative"))
print("final result: %s" % ("positive" if Counter(results).most_common(1)[0][0]==POS else "negative"))
|
import argparse
import json
import os
import cv2 as cv
import keras.backend as K
import numpy as np
from tqdm import tqdm
from config import img_size, image_folder, eval_path, best_model
from model import build_model
from utils import random_crop, preprocess_input, psnr
if __name__ == '__main__':
names_file = 'valid_names.txt'
with open(names_file, 'r') as f:
names = f.read().splitlines()
# Parse arguments
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--scale", help="scale")
args = vars(ap.parse_args())
scale = int(args["scale"])
scale_key = 'x{}'.format(scale)
model_weights_path = os.path.join('models', best_model[scale_key])
model = build_model(scale=scale)
model.load_weights(model_weights_path)
h, w = img_size * scale, img_size * scale
psnr_list = []
bicubic_list = []
for i in tqdm(range(len(names))):
name = names[i]
filename = os.path.join(image_folder, name)
image_bgr = cv.imread(filename)
gt = random_crop(image_bgr, scale)
input = cv.resize(gt, (img_size, img_size), cv.INTER_CUBIC)
bicubic = cv.resize(input, (img_size * scale, img_size * scale), cv.INTER_CUBIC)
x = input.copy()
x = preprocess_input(x.astype(np.float32))
x_test = np.empty((1, img_size, img_size, 3), dtype=np.float32)
x_test[0] = x
out = model.predict(x_test)
out = out.reshape((h, w, 3))
out = np.clip(out, 0.0, 255.0)
out = out.astype(np.uint8)
bicubic_list.append(psnr(bicubic, gt))
psnr_list.append(psnr(out, gt))
print('num_valid_samples: ' + str(len(names)))
print('scale: ' + str(scale))
print('PSNR(avg): {0:.5f}'.format(np.mean(psnr_list)))
print('Bicubic(avg): {0:.5f}'.format(np.mean(bicubic_list)))
if os.path.isfile(eval_path):
with open(eval_path) as file:
eval_result = json.load(file)
else:
eval_result = {}
eval_result['psnr_avg_x{}'.format(scale)] = np.mean(psnr_list)
eval_result['bicubic_avg_x{}'.format(scale)] = np.mean(bicubic_list)
with open(eval_path, 'w') as file:
json.dump(eval_result, file, indent=4)
K.clear_session()
|
import uuid
from djongo import models
from node.blockchain.inner_models import Block as PydanticBlock
from node.core.models import CustomModel
class PendingBlock(CustomModel):
_id = models.UUIDField(primary_key=True, default=uuid.uuid4)
number = models.PositiveBigIntegerField()
hash = models.CharField(max_length=128) # noqa: A003
signer = models.CharField(max_length=64)
body = models.BinaryField()
def get_block(self) -> PydanticBlock:
return PydanticBlock.parse_raw(self.body)
class Meta:
unique_together = ('number', 'signer')
ordering = unique_together
def __str__(self):
return f'block_number={self.number}, hash={self.hash}'
|
from math import floor
import numpy as np
from skimage.metrics import peak_signal_noise_ratio as psnr
from .metric_computer import MetricComputer
from ..common_util.image import get_comp_frame, get_mask_frame, pil_binary_to_numpy, pil_rgb_to_numpy
class PConsPSNRMaskComputer(MetricComputer):
def compute_metric(self):
self.send_work_count_msg(len(self.opts.video_names))
pcons_psnr_mask_all = np.zeros(len(self.opts.video_names))
for v in range(len(self.opts.video_names)):
video_name = self.opts.video_names[v]
num_frames = self.opts.video_frame_counts[v]
comp_frame_a = get_comp_frame(self.opts.gt_root, self.opts.pred_root, video_name, 0)
comp_frame_a = pil_rgb_to_numpy(comp_frame_a)
mask_frame_a = get_mask_frame(self.opts.gt_root, video_name, 0)
mask_frame_a = pil_binary_to_numpy(mask_frame_a)
for t in range(1, num_frames):
comp_frame_b = get_comp_frame(self.opts.gt_root, self.opts.pred_root, video_name, t)
comp_frame_b = pil_rgb_to_numpy(comp_frame_b)
mask_frame_b = get_mask_frame(self.opts.gt_root, video_name, t)
mask_frame_b = pil_binary_to_numpy(mask_frame_b)
# Extract a patch around the center of mass of the missing region (or a corner of the image if the
# center is too close to the edge)
rows, cols = np.where(mask_frame_a == 0)
a_sy = floor(rows.mean() - self.opts.sim_cons_ps / 2)
a_sx = floor(cols.mean() - self.opts.sim_cons_ps / 2)
a_sy = np.clip(a_sy, 0, comp_frame_a.shape[0] - self.opts.sim_cons_ps)
a_sx = np.clip(a_sx, 0, comp_frame_a.shape[1] - self.opts.sim_cons_ps)
### measure video consistency by finding patch in next frame
comp_frame_a_patch = comp_frame_a[a_sy:a_sy + self.opts.sim_cons_ps, a_sx:a_sx + self.opts.sim_cons_ps]
best_patch_psnr = 0.0
best_b_sy = None
best_b_sx = None
for b_sy in range(a_sy - self.opts.sim_cons_sw, a_sy + self.opts.sim_cons_sw):
for b_sx in range(a_sx - self.opts.sim_cons_sw, a_sx + self.opts.sim_cons_sw):
comp_frame_b_patch = comp_frame_b[b_sy:b_sy + self.opts.sim_cons_ps,
b_sx:b_sx + self.opts.sim_cons_ps]
if comp_frame_a_patch.shape != comp_frame_b_patch.shape:
# Invalid patch at given location in comp_frame_b, so skip
continue
patch_psnr = psnr(comp_frame_a_patch, comp_frame_b_patch)
if patch_psnr > best_patch_psnr:
best_patch_psnr = patch_psnr
best_b_sy = b_sy
best_b_sx = b_sx
best_comp_frame_b_patch = comp_frame_b[best_b_sy:best_b_sy + self.opts.sim_cons_ps,
best_b_sx:best_b_sx + self.opts.sim_cons_ps]
pcons_psnr_mask_all[v] += psnr(comp_frame_a_patch, best_comp_frame_b_patch) / (num_frames - 1)
comp_frame_a = comp_frame_b
mask_frame_a = mask_frame_b
self.send_update_msg(1)
self.send_result_msg(pcons_psnr_mask_all)
|
from sklearn.metrics.pairwise import cosine_similarity
import pickle
import json
import numpy as np
from tqdm import tqdm
import jsonlines
import argparse
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument('--claim_file', type=str)
argparser.add_argument('--corpus_file', type=str)
argparser.add_argument('--k_retrieval', type=int)
argparser.add_argument('--claim_retrieved_file', type=str)
argparser.add_argument('--scifact_abstract_retrieval_file', type=str, help="abstract retreival in scifact format")
argparser.add_argument('--corpus_embedding_pickle', type=str, default="corpus_paragraph_biosentvec.pkl")
argparser.add_argument('--claim_embedding_pickle', type=str, default="claim_biosentvec.pkl")
args = argparser.parse_args()
with open(args.corpus_embedding_pickle,"rb") as f:
corpus_embeddings = pickle.load(f)
with open(args.claim_embedding_pickle,"rb") as f:
claim_embeddings = pickle.load(f)
claim_file = args.claim_file
claims = []
with open(claim_file) as f:
for line in f:
claim = json.loads(line)
claims.append(claim)
claims_by_id = {claim['id']:claim for claim in claims}
all_similarities = {}
for claim_id, claim_embedding in tqdm(claim_embeddings.items()):
this_claim = {}
for abstract_id, abstract_embedding in corpus_embeddings.items():
claim_similarity = cosine_similarity(claim_embedding,abstract_embedding)
this_claim[abstract_id] = claim_similarity
all_similarities[claim_id] = this_claim
ordered_corpus = {}
for claim_id, claim_similarities in tqdm(all_similarities.items()):
corpus_ids = []
max_similarity = []
for abstract_id, similarity in claim_similarities.items():
corpus_ids.append(abstract_id)
max_similarity.append(np.max(similarity))
corpus_ids = np.array(corpus_ids)
sorted_order = np.argsort(max_similarity)[::-1]
ordered_corpus[claim_id] = corpus_ids[sorted_order]
k = args.k_retrieval
retrieved_corpus = {ID:v[:k] for ID,v in ordered_corpus.items()}
with jsonlines.open(args.claim_retrieved_file, 'w') as output:
claim_ids = sorted(list(claims_by_id.keys()))
for id in claim_ids:
claims_by_id[id]["retrieved_doc_ids"] = retrieved_corpus[id].tolist()
output.write(claims_by_id[id])
with jsonlines.open(args.scifact_abstract_retrieval_file, 'w') as output:
claim_ids = sorted(list(claims_by_id.keys()))
for id in claim_ids:
doc_ids = retrieved_corpus[id].tolist()
doc_ids = [int(id) for id in doc_ids]
output.write({"claim_id": id, "doc_ids": doc_ids})
|
"""trident layers"""
|
'''
Advent of Code - 2018
--- Day 5: Alchemical Reduction ---
Released under the MIT License <http://opensource.org/licenses/mit-license.php>
'''
def react(unit1, unit2):
return abs(ord(unit1) - ord(unit2)) == 32
def react_polymer(polymer):
res = []
res.append(polymer[0])
for p in polymer[1:]:
if len(res) > 0 and react(p, res[-1]):
res.pop()
else:
res.append(p)
return len(res)
def part1(polymer):
return react_polymer(polymer)
def part2(polymer):
units = set([u.lower() for u in polymer])
return min([react_polymer(polymer.replace(u, '').replace(u.upper(), ''))
for u in units])
if __name__ == '__main__':
with open('../input/d05.txt', mode='r') as f:
_input = f.read()
print('Part One: {}'.format(part1(_input)))
print('Part Two: {}'.format(part2(_input)))
|
import os
from iconsdk.builder.transaction_builder import (
DeployTransactionBuilder,
CallTransactionBuilder,
)
from iconsdk.builder.call_builder import CallBuilder
from iconsdk.icon_service import IconService
from iconsdk.libs.in_memory_zip import gen_deploy_data_content
from iconsdk.providers.http_provider import HTTPProvider
from iconsdk.signed_transaction import SignedTransaction
from tbears.libs.icon_integrate_test import IconIntegrateTestBase, SCORE_INSTALL_ADDRESS
DIR_PATH = os.path.abspath(os.path.dirname(__file__))
class TestSampleToken(IconIntegrateTestBase):
TEST_HTTP_ENDPOINT_URI_V3 = "http://127.0.0.1:9000/api/v3"
SCORE_PROJECT = os.path.abspath(os.path.join(DIR_PATH, '..'))
def setUp(self):
super().setUp()
self.icon_service = None
# if you want to send request to network, uncomment next line
# self.icon_service = IconService(HTTPProvider(self.TEST_HTTP_ENDPOINT_URI_V3))
# install SCORE
self.name = 'MySampleToken'
self.symbol = 'MST'
self.initial_supply = 1
self.decimals = 6
params = {
'_name': self.name,
'_symbol': self.symbol,
'_decimals': self.decimals,
'_initialSupply': self.initial_supply
}
self._score_address = self._deploy_score(params=params)['scoreAddress']
def _deploy_score(self, to: str = SCORE_INSTALL_ADDRESS, params: dict = None) -> dict:
# Generates an instance of transaction for deploying SCORE.
transaction = DeployTransactionBuilder() \
.from_(self._test1.get_address()) \
.to(to) \
.step_limit(100_000_000_000) \
.nid(3) \
.nonce(100) \
.content_type("application/zip") \
.content(gen_deploy_data_content(self.SCORE_PROJECT)) \
.params(params) \
.build()
# Returns the signed transaction object having a signature
signed_transaction = SignedTransaction(transaction, self._test1)
# process the transaction
tx_result = self.process_transaction(signed_transaction, self.icon_service)
self.assertTrue('status' in tx_result)
self.assertEqual(1, tx_result['status'])
self.assertTrue('scoreAddress' in tx_result)
return tx_result
def test_score_update(self):
# update SCORE
tx_result = self._deploy_score(to=self._score_address)
self.assertEqual(self._score_address, tx_result['scoreAddress'])
def test_call_name(self):
# Generates a call instance using the CallBuilder
call = CallBuilder().from_(self._test1.get_address()) \
.to(self._score_address) \
.method("name") \
.build()
# Sends the call request
response = self.process_call(call, self.icon_service)
self.assertEqual(self.name, response)
def test_call_symbol(self):
# Generates a call instance using the CallBuilder
call = CallBuilder().from_(self._test1.get_address()) \
.to(self._score_address) \
.method("symbol") \
.build()
# Sends the call request
response = self.process_call(call, self.icon_service)
self.assertEqual(self.symbol, response)
def test_call_decimals(self):
# Generates a call instance using the CallBuilder
call = CallBuilder().from_(self._test1.get_address()) \
.to(self._score_address) \
.method("decimals") \
.build()
# Sends the call request
response = self.process_call(call, self.icon_service)
self.assertEqual(hex(self.decimals), response)
def test_call_totalSupply(self):
# Generates a call instance using the CallBuilder
call = CallBuilder().from_(self._test1.get_address()) \
.to(self._score_address) \
.method("totalSupply") \
.build()
# Sends the call request
response = self.process_call(call, self.icon_service)
self.assertEqual(hex(self.initial_supply * 10 ** self.decimals), response)
def test_call_balanceOf(self):
# Make params of balanceOf method
params = {
# token owner
'_owner': self._test1.get_address()
}
# Generates a call instance using the CallBuilder
call = CallBuilder().from_(self._test1.get_address()) \
.to(self._score_address) \
.method("balanceOf") \
.params(params) \
.build()
# Sends the call request
response = self.process_call(call, self.icon_service)
self.assertEqual(hex(self.initial_supply * 10 ** self.decimals), response)
def test_token_transfer(self):
# Make params of transfer method
to = self._wallet_array[0].get_address()
value = 100
params = {
'_to': to,
'_value': value,
}
# Generates an instance of transaction for calling method in SCORE.
transaction = CallTransactionBuilder() \
.from_(self._test1.get_address()) \
.to(self._score_address) \
.step_limit(10_000_000) \
.nid(3) \
.nonce(100) \
.method("transfer") \
.params(params) \
.build()
# Returns the signed transaction object having a signature
signed_transaction = SignedTransaction(transaction, self._test1)
# Sends the transaction to the network
tx_result = self.process_transaction(signed_transaction, self.icon_service)
self.assertTrue('status' in tx_result)
self.assertEqual(1, tx_result['status'])
# Make params of balanceOf method
params = {
'_owner': to
}
# Generates a call instance using the CallBuilder
call = CallBuilder().from_(self._test1.get_address()) \
.to(self._score_address) \
.method("balanceOf") \
.params(params) \
.build()
# Sends the call request
response = self.process_call(call, self.icon_service)
# check balance of receiver
self.assertEqual(hex(value), response)
|
__author__ = 'Kalyan'
# this is a sample module for the understanding_modules assignment.
def greet(name):
return "module1 says hi to " + name
def _private_func():
pass
|
#!/usr/bin/env python
""" LearnedLeague Luck
This is an implementation of SheahanJ's algorithm for computing
"luck" in the [LearnedLeague](http://learnedleague.com), as described
by his post at in the [LearnedLeague Forum](http://www.learnedleague.com/viewtopic.php?f=3&t=5250)
The program expects to receive a CSV of player records on STDIN,
in the format of the "all players" file from the LL site.
It generates three output files in the current directory:
lucky.csv - a CSV with player names, records, Rundle,
expected points, luck, and strengh-of-schedule as defined by SheahanJ
lucky.bbcode - a BBCode table showing the same information for the
luckiest 100 LLamas (all output rounded to 2 decimal places)
unlucky.bbcode - a BBCode table showing the same information for the
unluckiest 100 LLamas
"""
import csv, os, sys, math
def normalize(s):
s = s.replace(' ','')
if not s[0].isalpha():
s = '_' + s
abbrevs = {'Wins': 'W', 'Losses': 'L', 'Ties': 'T'}
if s in abbrevs:
s = abbrevs[s]
return s
def mean(l):
return sum(l) / len(l)
class Rundle(object):
rundles = {}
@classmethod
def get(self, name):
if name not in self.rundles:
self.rundles[name] = self(name)
return self.rundles[name]
def __init__(self, name):
self.name = name
self.players = {}
self.tFL = 0
self.tFW = 0
self.tTCA = 0
self.tQ = 0
self.pcount = 0
def addPlayer(self, player):
self.players[player.Player] = player
self.tFL += player.FL
self.tFW += player.FW
self.tTCA += player.TCA
self.tQ += player.Q
self.pcount += 1
class Player(object):
players = {}
@classmethod
def get(self, name):
return self.players[name]
def __init__(self, dict):
# Clean up the data from the spreadsheet and put it into the object
for key in dict:
newkey = normalize(key)
value = dict[key]
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
pass
self.__dict__[newkey] = value
# Compute additional stats
self.played = self.W + self.L + self.T
self.forfeitRate = self.FL / self.played
self.Q = 6 * (self.played - self.FL)
self.MPA = self.TMP - self.MPD
# Link into the rundle
self.realRundle = Rundle.get(self.Rundle)
self.realRundle.addPlayer(self)
# And save
self.players[self.Player] = self
def computeStats(self):
rr = self.realRundle
oFL = rr.tFL - self.FL # Games forfeited by others
aoFL = oFL / (rr.pcount - 1) # Average games forfeited by others
self.oFR = (1.0 * oFL) / (self.played * (rr.pcount - 1)) # Other forfeit rate
self.xFW = self.oFR * (self.played - self.FL) # Expected Forfeit Wins
self.rQPCT = 1.0 * (rr.tTCA - self.TCA)/(rr.tQ - self.Q)
self.xCAA = self.rQPCT * 6 * (25-self.xFW-self.FL)
self.xMPA = self.PCAA * self.xCAA
self.SOS = (self.CAA / (6.0 * (self.played - self.FW))) / self.rQPCT
try:
self.xTMP = self.TMP*(25.0-self.xFW-self.FL)/(25.0-self.FL)
except ZeroDivisionError:
self.xTMP = 0
try:
self.pwp = 1/(1+(self.xMPA/self.xTMP)**1.93)
except ZeroDivisionError:
self.pwp = 0
self.xPts = 2*self.pwp*(25-self.xFW-self.FL) + (2*self.xFW) - self.FL
self.luck = (self.Pts- self.xPts) / 2
def out(self, stats):
ans = [self.__dict__[normalize(t)] for t in stats]
return ans
def output(filename, data, stats, fmts):
# Unfortunately, the BBCode implementation on LearnedLeague doesn't properly
# handle tables (it puts a huge amount of whitespace above the table), so we
# have to use pre-formatted text instead. We replace spaces with underscores
# for purposes of clarity.
outfile = open(filename, 'w')
outfile.write('[code]\n')
res = []
outline = ['Rank']
# We need to figure out how wide to make each column.
# Start by leaving enough room for the item names
widths = {}
for item in stats:
outline.append(item)
widths[item] = len(item)
res.append(outline)
linenum = 0
for line in data:
linenum += 1
outline = ['%4d' % linenum]
for item in stats:
if 's' in fmts[item]:
value = fmts[item] % line.__dict__[item].strip()
else:
value = fmts[item] % line.__dict__[item]
outline.append(value)
widths[item] = max(widths[item],len(value))
res.append(outline)
# Now, we have all of the results in outline, and widths tells us how wide each
# column must be, so we can output the results
fmtline = '%4s | ' + ' | '.join([('%%%ds' if 's' not in fmts[item] else '%%-%ds') % widths[item] for item in stats])
for line in res:
outfile.write(fmtline % tuple(line))
outfile.write('\n')
outfile.write('[/code]\n')
outfile.close()
if __name__ == '__main__':
# Get the data
reader = csv.DictReader(sys.stdin)
for row in reader:
Player(row)
# Now we can compute all statistics
for p in Player.players.values():
p.computeStats()
# Sort the players from luckiest to least lucky
sortedlist = sorted(Player.players.values(), key=lambda x:0-x.luck)
# Establish columns for output
stats = [
'Player', 'W', 'L', 'T', 'Pts', 'MPD', 'Rundle', 'xPts', 'luck', 'SOS'
]
fmts = {'Player': '%s', 'W': '%2d', 'L': '%2d', 'T':'%2d', 'Pts':'%3d',
'MPD': '%4d', 'Rundle': '%s', 'xPts': '%7.2f', 'luck': '%7.2f', 'SOS' : '%5.2f'}
# The spreadsheet gets more information
ssstats = ['Player', 'Rundle', 'Wins', 'Losses', 'Ties', 'Pts', 'xPts', 'MPD', 'FL', 'FW', 'xFW', 'pwp', 'QPct', 'rQPCT', 'PCAA', 'CAA', 'xCAA', 'MPA', 'xMPA', 'TMP', 'xTMP', 'luck', 'SOS']
# Generate the total list
writer = csv.writer(open('lucky.csv','wb'))
writer.writerow(ssstats)
for p in sortedlist:
writer.writerow(p.out(ssstats))
# And now, let's generate the luckiest and unluckiest 100 LLamas
output('lucky.bbcode', sortedlist[0:100], stats, fmts)
# For unlucky LLamas, we need to ignore Pavanos.
index = -1
while sortedlist[index].FL == 25:
index -= 1
output('unlucky.bbcode', sortedlist[index:index-100:-1], stats, fmts)
|
# MIT License
#
# Copyright (c) 2021 Richard Mah (richard@geometrylabs.io) & Geometry Labs (geometrylabs.io)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from blockchainetl_common.jobs.exporters.console_item_exporter import (
ConsoleItemExporter,
)
def create_item_exporter(output, kafka_settings):
item_exporter_type = determine_item_exporter_type(output)
if item_exporter_type == ItemExporterType.PUBSUB:
from blockchainetl_common.jobs.exporters.google_pubsub_item_exporter import (
GooglePubSubItemExporter,
)
item_exporter = GooglePubSubItemExporter(
item_type_to_topic_mapping={
"block": output + ".blocks",
"transaction": output + ".transactions",
"log": output + ".logs",
}
)
elif item_exporter_type == ItemExporterType.POSTGRES:
from iconetl.jobs.exporters.converters.int_to_decimal_item_converter import (
IntToDecimalItemConverter,
)
from iconetl.jobs.exporters.converters.list_field_item_converter import (
ListFieldItemConverter,
)
from iconetl.jobs.exporters.converters.unix_timestamp_item_converter import (
UnixTimestampItemConverter,
)
from iconetl.jobs.exporters.postgres_item_exporter import PostgresItemExporter
from iconetl.streaming.postgres_tables import (
BLOCKS,
LOGS,
RECEIPTS,
TRANSACTIONS,
)
from iconetl.streaming.utils.postgres_item_exporter import (
create_insert_statement_for_table,
)
item_exporter = PostgresItemExporter(
output,
item_type_to_insert_stmt_mapping={
"block": create_insert_statement_for_table(BLOCKS),
"transaction": create_insert_statement_for_table(TRANSACTIONS),
"log": create_insert_statement_for_table(LOGS),
"receipts": create_insert_statement_for_table(RECEIPTS),
},
converters=[
UnixTimestampItemConverter(),
IntToDecimalItemConverter(),
ListFieldItemConverter("topics", "topic", fill=4),
],
)
elif item_exporter_type == ItemExporterType.KAFKA:
from confluent_kafka import Producer
from confluent_kafka.schema_registry import SchemaRegistryClient
from confluent_kafka.schema_registry.protobuf import ProtobufSerializer
from iconetl.jobs.exporters.kafka_item_exporter import KafkaItemExporter
from iconetl.schemas.protobuf_compiled import blocks_raw_pb2 as blocks_raw
from iconetl.schemas.protobuf_compiled import logs_raw_pb2 as logs_raw
from iconetl.schemas.protobuf_compiled import (
transactions_raw_pb2 as transactions_raw,
)
if kafka_settings["enable_schema_registry"]:
registry_client = SchemaRegistryClient(
{"url": kafka_settings["schema_registry_url"]}
)
serializers = {
"block": ProtobufSerializer(
blocks_raw.BlockRaw,
registry_client,
conf={"auto.register.schemas": True},
),
"log": ProtobufSerializer(
logs_raw.LogRaw,
registry_client,
conf={"auto.register.schemas": True},
),
"transaction": ProtobufSerializer(
transactions_raw.TransactionRaw,
registry_client,
conf={"auto.register.schemas": True},
),
}
else:
serializers = None
producer = Producer(
{
"bootstrap.servers": output,
"compression.codec": kafka_settings["compression_type"],
}
)
item_exporter = KafkaItemExporter(
producer,
kafka_settings["topic_map"],
serializers,
)
elif item_exporter_type == ItemExporterType.CONSOLE:
item_exporter = ConsoleItemExporter()
else:
raise ValueError("Unable to determine item exporter type for output " + output)
return item_exporter
def determine_item_exporter_type(output):
if output is not None and output.startswith("projects"):
return ItemExporterType.PUBSUB
elif output is not None and output.startswith("postgresql"):
return ItemExporterType.POSTGRES
elif output is None or output == "console":
return ItemExporterType.CONSOLE
else:
return ItemExporterType.KAFKA
class ItemExporterType:
PUBSUB = "pubsub"
POSTGRES = "postgres"
CONSOLE = "console"
KAFKA = "kafka"
UNKNOWN = "unknown"
|
from easygraphics.turtle import *
create_world(800, 600)
set_speed(400)
for i in range(6):
for j in range(60):
fd(3)
rt(1)
rt(120)
for j in range(60):
fd(3)
rt(1)
rt(120)
rt(60)
pause()
close_world()
|
# -*- python -*-
"""@file
@brief Basic stuff for constructing Pato requests
Copyright (c) 2014-2015 Dimitry Kloper <kloper@users.sf.net>.
All rights reserved.
@page License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation
are those of the authors and should not be interpreted as representing
official policies, either expressed or implied, of the Pato Project.
"""
#pylint: disable=signature-differs,arguments-differ
from pato.protocol.packet import Request
from pato.protocol import Cmd, Direct
class Ping(Request):
"""
@brief Ping request packet compiler
@see PATO_CMD_PING
"""
@classmethod
def compile(cls, value):
cls.assert_true(value >= 0 and value <= 0xff,
"value must be in range [0..0xff]")
return super(Ping, cls).compile([0, value])
Ping.register(Cmd.PING)
class ClearScreen(Request):
"""
@brief DIRECT Clear Screen request packet compiler
@see PATO_DIRECT_CLR
"""
@classmethod
def compile(cls):
return super(ClearScreen, cls).compile([Direct.CLR, 0])
ClearScreen.register(Cmd.DIRECT, Direct.CLR)
class Home(Request):
"""
@brief DIRECT Home request packet compiler
@see PATO_DIRECT_HOME
"""
@classmethod
def compile(cls):
return super(Home, cls).compile([Direct.HOME, 0])
Home.register(Cmd.DIRECT, Direct.HOME)
class EntryModeSet(Request):
"""
@brief DIRECT Entry Mode Set request packet compiler
@see PATO_DIRECT_EMS
"""
@classmethod
def compile(cls, shift_direction, shift_subject):
param = (2 if shift_direction else 0) | (1 if shift_subject else 0)
return super(EntryModeSet, cls).compile([Direct.EMS, param])
EntryModeSet.register(Cmd.DIRECT, Direct.EMS)
class DisplayControl(Request):
"""
@brief DIRECT Display Control request packet compiler
@see PATO_DIRECT_DCTRL
"""
@classmethod
def compile(cls, display_on, cursor_on, cursor_blink):
param = (4 if display_on else 0) | \
(2 if cursor_on else 0) | \
(1 if cursor_blink else 0)
return super(DisplayControl, cls).compile([Direct.DCTRL, param])
DisplayControl.register(Cmd.DIRECT, Direct.DCTRL)
class Shift(Request):
"""
@brief DIRECT Shift request packet compiler
@see PATO_DIRECT_SHIFT
"""
@classmethod
def compile(cls, display_shift, right_shift):
param = (8 if display_shift else 0) | \
(4 if right_shift else 0)
return super(Shift, cls).compile([Direct.SHIFT, param])
Shift.register(Cmd.DIRECT, Direct.SHIFT)
class FunctionSet(Request):
"""
@brief DIRECT Function Set request packet compiler
@see PATO_DIRECT_FUNC
"""
@classmethod
def compile(cls, wide_bus, two_lines, large_font):
param = (16 if wide_bus else 0) | \
(8 if two_lines else 0) | \
(4 if large_font else 0)
return super(FunctionSet, cls).compile([Direct.FUNC, param])
FunctionSet.register(Cmd.DIRECT, Direct.FUNC)
class SetCGRAMAddr(Request):
"""
@brief DIRECT Set CGRAM Address request packet compiler
@see PATO_DIRECT_CGADDR
"""
@classmethod
def compile(cls, addr):
return super(SetCGRAMAddr, cls).compile([Direct.CGADDR, addr & 0x3F])
SetCGRAMAddr.register(Cmd.DIRECT, Direct.CGADDR)
class SetDDRAMAddr(Request):
"""
@brief DIRECT Set DDRAM Address request packet compiler
@see PATO_DIRECT_DDADDR
"""
@classmethod
def compile(cls, addr):
return super(SetDDRAMAddr, cls).compile([Direct.DDADDR, addr & 0x7F])
SetDDRAMAddr.register(Cmd.DIRECT, Direct.DDADDR)
class Wait(Request):
"""
@brief DIRECT Busy Wait request packet compiler
@see PATO_DIRECT_BUSY_WAIT
"""
@classmethod
def compile(cls):
return super(Wait, cls).compile([Direct.BUSY_WAIT, 0])
Wait.register(Cmd.DIRECT, Direct.BUSY_WAIT)
class Write(Request):
"""
@brief DIRECT Write request packet compiler
@see PATO_DIRECT_WRITE
"""
@classmethod
def compile(cls, val):
return super(Write, cls).compile([Direct.WRITE, val & 0xff])
Write.register(Cmd.DIRECT, Direct.WRITE)
class Read(Request):
"""
@brief DIRECT Read request packet compiler
@see PATO_DIRECT_READ
"""
@classmethod
def compile(cls):
return super(Read, cls).compile([Direct.READ, 0])
Read.register(Cmd.DIRECT, Direct.READ)
class Reset(Request):
"""
@brief Reset Display request packet compiler
@see PATO_CMD_RESET
"""
@classmethod
def compile(cls, param, value=0):
cls.assert_true(param in [0, 1, 2], \
"Reset parameter must be 0, 1 or 2")
return super(Reset, cls).compile([param, value])
Reset.register(Cmd.RESET)
class PrintSetAddr(Request):
"""
@brief Print Set Address request packet compiler
@see PATO_CMD_PRINT_SETADDR
"""
@classmethod
def compile(cls, addr):
return super(PrintSetAddr, cls).compile([addr & 0xff,
(addr >> 8) & 0xff])
PrintSetAddr.register(Cmd.PRINT_SETADDR)
class PrintGetAddr(Request):
"""
@brief Print Get Address request packet compiler
@see PATO_CMD_PRINT_GETADDR
"""
@classmethod
def compile(cls):
return super(PrintGetAddr, cls).compile([0, 0])
PrintGetAddr.register(Cmd.PRINT_GETADDR)
class PrintPut(Request):
"""
@brief Print Put request packet compiler
@see PATO_CMD_PRINT_PUT
"""
@classmethod
def compile(cls, b0, b1):
return super(PrintPut, cls).compile([b0 & 0xff, b1 & 0xff])
PrintPut.register(Cmd.PRINT_PUT)
class PrintPutPtr(Request):
"""
@brief Print Put Pointer request packet compiler
@see PATO_CMD_PRINT_PUT_PTR
"""
@classmethod
def compile(cls, offset):
return super(PrintPutPtr, cls).compile([offset & 0xff,
(offset >> 8) & 0xff])
PrintPutPtr.register(Cmd.PRINT_PUT_PTR)
class PrintCommit(Request):
"""
@brief Print Commit request packet compiler
@see PATO_CMD_PRINT_COMMIT
"""
@classmethod
def compile(cls, offset):
return super(PrintCommit, cls).compile([offset & 0xff,
(offset >> 8) & 0xff])
PrintCommit.register(Cmd.PRINT_COMMIT)
|
from common.caching import cached, read_log_dir
from common.math import sigmoid, log_loss
from . import body_zone_models
from . import tf_models
from . import dataio
import tensorflow as tf
import numpy as np
import os
import datetime
import time
import tqdm
def _heatmap(z):
return np.stack([z == i for i in range(1, 18)], axis=-1).astype('float32')
def _train_data_generator(x, y, z, num_angles, random_angles, preproc, batch_size, chunk_size):
x_batch, y_batch, z_batch = [], [], []
for i in range(len(x)):
num_slices = x.shape[1]//num_angles
for j in range(num_slices):
if random_angles:
angles = sorted(np.random.choice(x.shape[1], num_angles, replace=False))
x_batch.append(x[i, angles])
z_batch.append(_heatmap(z[i, angles]))
else:
x_batch.append(x[i, j::num_slices])
z_batch.append(_heatmap(z[i, j::num_slices]))
y_batch.append(y[i])
if len(x_batch) >= chunk_size or (len(x_batch) > 0 and i+1 == len(x)):
x_batch, y_batch, z_batch = np.stack(x_batch), np.stack(y_batch), np.stack(z_batch)
perm = np.random.permutation(len(x_batch))
x_batch, y_batch, z_batch = x_batch[perm], y_batch[perm], z_batch[perm]
x_batch, z_batch = preproc(x_batch, z_batch)
for j in range(0, len(x_batch), batch_size):
yield x_batch[j:j+batch_size], y_batch[j:j+batch_size], z_batch[j:j+batch_size]
x_batch, y_batch, z_batch = [], [], []
def _test_data_generator(x, z, num_angles, random_angles, preproc):
for i in range(len(x)):
x_batch, z_batch = [], []
num_slices = x.shape[1]//num_angles
for j in range(num_slices):
if random_angles:
angles = sorted(np.random.choice(x.shape[1], num_angles, replace=False))
x_batch.append(x[i, angles])
z_batch.append(_heatmap(z[i, angles]))
else:
x_batch.append(x[i, j::num_slices])
z_batch.append(_heatmap(z[i, j::num_slices]))
x_batch, z_batch = np.stack(x_batch), np.stack(z_batch)
x_batch, z_batch = preproc(x_batch, z_batch)
yield x_batch, z_batch
def _train_basic_multiview_cnn(mode, model, train_preproc, test_preproc, num_angles, model_mode,
batch_size, learning_rate, duration, random_angles):
assert mode in ('sample_train', 'train')
image_size = 256
output_size = 64
chunk_size = 256
tf.reset_default_graph()
images = tf.placeholder(tf.float32, [None, num_angles, image_size, image_size])
labels = tf.placeholder(tf.float32, [None, 17])
zones = tf.placeholder(tf.float32, [None, num_angles, output_size, output_size, 17])
logits = tf_models.simple_multiview_cnn(images, zones, model, model_mode)
train_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,
logits=logits))
train_summary = tf.summary.scalar('train_loss', train_loss)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_step = optimizer.minimize(train_loss)
saver = tf.train.Saver()
model_path = os.getcwd() + '/model.ckpt'
def predict_batch(sess, x_batch, z_batch):
preds = []
for i in range(0, len(x_batch), batch_size):
feed_dict = {
images: x_batch[i:i+batch_size],
zones: z_batch[i:i+batch_size]
}
cur_preds = sess.run([logits], feed_dict=feed_dict)[0]
preds.append(cur_preds)
preds = np.mean(np.concatenate(preds), axis=0)
return preds
def predict(x, z):
with tf.Session() as sess:
saver.restore(sess, model_path)
preds = []
gen = _test_data_generator(x, z, num_angles, random_angles, test_preproc)
for x_batch, z_batch in tqdm.tqdm(gen, total=len(x)):
preds.append(predict_batch(sess, x_batch, z_batch))
preds = np.stack(preds)
return preds
if os.path.exists('done'):
return predict
valid_mode = mode.replace('train', 'valid')
_, x_train, y_train = dataio.get_data_hdf5(mode)
z_train = body_zone_models.get_body_zone_heatmaps(mode)
_, x_valid, y_valid = dataio.get_data_hdf5(valid_mode)
z_valid = body_zone_models.get_body_zone_heatmaps(valid_mode)
train_gen = lambda: _train_data_generator(x_train, y_train, z_train, num_angles, random_angles,
train_preproc, batch_size, chunk_size)
valid_gen = lambda: _test_data_generator(x_valid, z_valid, num_angles, random_angles,
test_preproc)
with read_log_dir():
writer = tf.summary.FileWriter(os.getcwd())
def eval_model(sess):
losses = []
for (x_batch, z_batch), y_batch in zip(valid_gen(), y_valid):
preds = predict_batch(sess, x_batch, z_batch)
loss = log_loss(sigmoid(preds), y_batch)
losses.append(loss)
return np.mean(losses)
def train_model(sess, duration):
it = 0
t0 = time.time()
best_valid_loss = None
while time.time() - t0 < duration:
num_batches = len(x_train)*x_train.shape[1]//(num_angles*batch_size)
for x_batch, y_batch, z_batch in tqdm.tqdm(train_gen(), total=num_batches):
feed_dict = {
images: x_batch,
labels: y_batch,
zones: z_batch
}
_, cur_train_summary = sess.run([train_step, train_summary], feed_dict=feed_dict)
writer.add_summary(cur_train_summary, it)
it += 1
valid_loss = eval_model(sess)
cur_valid_summary = tf.Summary()
cur_valid_summary.value.add(tag='valid_loss', simple_value=valid_loss)
writer.add_summary(cur_valid_summary, it)
if best_valid_loss is None or valid_loss < best_valid_loss:
best_valid_loss = valid_loss
saver.save(sess, model_path)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_model(sess, duration)
open('done', 'w').close()
return predict
@cached(version=2)
def train_simple_cnn(mode, num_angles=4, num_features=64, model_mode='default', train_hours=10,
random_angles=False):
def train_preproc(x_batch, z_batch):
x_batch = x_batch + 0.025 * np.random.randn(*x_batch.shape)
if np.random.randint(2):
return x_batch[..., ::-1], z_batch[..., ::-1, :]
return x_batch, z_batch
def test_preproc(x_batch, z_batch):
x_batch = x_batch + 0.025 * np.random.randn(*x_batch.shape)
x_batch = np.concatenate([x_batch, x_batch[..., ::-1]])
z_batch = np.concatenate([z_batch, z_batch[..., ::-1, :]])
return x_batch, z_batch
model = lambda x: tf_models.simple_cnn(x, num_features, [1, 3, 3], tf_models.leaky_relu)
duration = 10 if mode.startswith('sample') else train_hours * 3600
batch_size = 16*4*64 // (num_angles*num_features)
return _train_basic_multiview_cnn(mode, model, train_preproc, test_preproc,
num_angles=num_angles, model_mode=model_mode,
batch_size=batch_size, learning_rate=1e-4, duration=duration,
random_angles=random_angles)
@cached(version=0)
def get_simple_cnn_predictions(mode):
if not os.path.exists('done'):
_, x, y = dataio.get_data_hdf5(mode)
z = body_zone_models.get_body_zone_heatmaps(mode)
predict = train_simple_cnn('train')
preds = predict(x, z)
np.save('preds.npy', preds)
open('done', 'w').close()
else:
preds = np.load('preds.npy')
return preds
@cached(version=2)
def train_simple_meta_model(mode, train_minutes=1, reg_amt=1):
assert mode in ('sample_train', 'train')
valid_mode = mode.replace('train', 'valid')
_, _, y_train = dataio.get_data_hdf5(mode)
_, _, y_valid = dataio.get_data_hdf5(valid_mode)
train_logits = get_simple_cnn_predictions(mode)
valid_logits = get_simple_cnn_predictions(valid_mode)
tf.reset_default_graph()
logits_in = tf.placeholder(tf.float32, [None, 17])
labels = tf.placeholder(tf.float32, [None, 17])
W = tf.get_variable('W', [17, 17])
logits_out = logits_in + tf.matmul(logits_in, W)
reg = tf.reduce_mean(tf.abs(W)) * reg_amt
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits_out))
train_step = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(loss + reg)
train_summary = tf.summary.scalar('train_loss', loss)
model_path = os.getcwd() + '/model.ckpt'
saver = tf.train.Saver()
with read_log_dir():
writer = tf.summary.FileWriter(os.getcwd())
def train_model(sess, duration):
it = 0
t0 = time.time()
best_valid_loss = None
while time.time() - t0 < duration:
for _ in tqdm.trange(100):
feed_dict = {
logits_in: train_logits,
labels: y_train
}
_, cur_train_summary = sess.run([train_step, train_summary], feed_dict=feed_dict)
writer.add_summary(cur_train_summary, it)
it += 1
feed_dict = {
logits_in: valid_logits,
labels: y_valid
}
cur_valid_loss = sess.run([loss], feed_dict=feed_dict)[0]
cur_valid_summary = tf.Summary()
cur_valid_summary.value.add(tag='valid_loss', simple_value=cur_valid_loss)
writer.add_summary(cur_valid_summary, it)
if best_valid_loss is None or cur_valid_loss < best_valid_loss:
best_valid_loss = cur_valid_loss
saver.save(sess, model_path)
duration = 10 if mode.startswith('sample') else train_minutes * 60
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_model(sess, duration)
@cached(version=0)
def write_simple_cnn_predictions(mode):
preds = sigmoid(get_simple_cnn_predictions(mode))
ans_dict = {name: pred for name, pred in zip(names, preds)}
dataio.write_answer_csv(ans_dict)
|
class MovingAverage:
def __init__(self, windowSize) -> None:
self._windowSize = windowSize
self._historyWindow = [0] * windowSize
self._historyIndex = 0
self._historyCount = 0
def recordValue(self, value):
self._historyWindow[self._historyIndex] = value
self._historyIndex += 1
if self._historyIndex >= self._windowSize:
self._historyIndex = 0
self._historyCount += 1
def getAverage(self):
endIndex = self._windowSize
if self._historyCount < self._windowSize:
endIndex = self._historyCount
total = 0
if endIndex == 0:
return 0
for i in range(endIndex):
total += self._historyWindow[i]
return total / endIndex
|
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import argparse
from torchvision import datasets
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "2,3"
import scipy.io
from models import Resnet50_ft
from utils.utils import extract_feature, get_id, evaluate
from utils.dataloader import preprocess
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', type=str, default='')
parser.add_argument('--dataset_path', type=str, default='/mnt/disk2/data_sht/Market-1501/pytorch')
parser.add_argument('--num_classes', type=int, default=751)
parser.add_argument('--backbone', type=str, default='resnet50')
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--cuda', type=bool, default=True)
parser.add_argument('--h', type=int, default=256)
parser.add_argument('--w', type=int, default=128)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
print(args)
num_classes = args.num_classes
dataset_path = args.dataset_path
h, w = args.h, args.w
batch_size = args.batch_size
checkpoint = args.checkpoint
# 加载数据
_, transform = preprocess(args.h, args.w, 0)
gallery_dataset = datasets.ImageFolder(os.path.join(dataset_path, 'gallery'), transform)
query_dataset = datasets.ImageFolder(os.path.join(dataset_path, 'query'), transform)
gallery_loader = DataLoader(gallery_dataset, batch_size=batch_size, shuffle=False, num_workers=8)
query_loader = DataLoader(query_dataset, batch_size=batch_size, shuffle=False, num_workers=8)
class_names = query_dataset.classes
# 加载模型
model = Resnet50_ft(num_classes)
model.load_state_dict(torch.load(checkpoint))
# 去除分类头
model.classifier.classifier = nn.Sequential()
model = model.eval()
if args.cuda:
model = model.cuda()
with torch.no_grad():
gallery_feature = extract_feature(model, gallery_loader)
query_feature = extract_feature(model, query_loader)
# 其他检测评估的指标
gallery_cam, gallery_label = get_id(gallery_dataset.imgs)
query_cam, query_label = get_id(query_dataset.imgs)
# 开始评估
result = {
'gallery_f': gallery_feature.numpy(),
'gallery_label': gallery_label,
'gallery_cam': gallery_cam,
'query_f': query_feature.numpy(),
'query_label': query_label,
'query_cam': query_cam
}
scipy.io.savemat('result.mat', result)
os.system('python evaluate.py')
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from torch import nn
from torch.nn import functional as F
from maskrcnn_benchmark.modeling.utils import cat
class FastRCNNPredictor(nn.Module):
def __init__(self, config, pretrained=None):
super(FastRCNNPredictor, self).__init__()
stage_index = 4
stage2_relative_factor = 2 ** (stage_index - 1)
res2_out_channels = config.MODEL.RESNETS.RES2_OUT_CHANNELS
num_inputs = res2_out_channels * stage2_relative_factor
num_classes = config.MODEL.ROI_BOX_HEAD.NUM_CLASSES
self.avgpool = nn.AvgPool2d(kernel_size=7, stride=7)
self.cls_score = nn.Linear(num_inputs, num_classes)
self.bbox_pred = nn.Linear(num_inputs, num_classes * 4)
nn.init.normal_(self.cls_score.weight, mean=0, std=0.01)
nn.init.constant_(self.cls_score.bias, 0)
nn.init.normal_(self.bbox_pred.weight, mean=0, std=0.001)
nn.init.constant_(self.bbox_pred.bias, 0)
def forward(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
cls_logit = self.cls_score(x)
bbox_pred = self.bbox_pred(x)
return cls_logit, bbox_pred
class FPNPredictor(nn.Module):
def __init__(self, cfg):
super(FPNPredictor, self).__init__()
num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
representation_size = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM
self.return_feats = cfg.MODEL.ROI_BOX_HEAD.RETURN_FC_FEATS
self.has_attributes = cfg.MODEL.ROI_BOX_HEAD.ATTR
self.cls_score = nn.Linear(representation_size, num_classes)
self.bbox_pred = nn.Linear(representation_size, num_classes * 4)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for l in [self.cls_score, self.bbox_pred]:
nn.init.constant_(l.bias, 0)
if self.has_attributes:
self.cls_embed = nn.Embedding(num_classes, 256)
self.attr_linear1 = nn.Linear(representation_size + 256, 512)
self.attr_linear2 = nn.Linear(512, 400)
nn.init.normal_(self.cls_embed.weight, std=0.01)
nn.init.normal_(self.attr_linear1.weight, std=0.01)
nn.init.normal_(self.attr_linear2.weight, std=0.01)
nn.init.constant_(self.attr_linear1.bias, 0)
nn.init.constant_(self.attr_linear2.bias, 0)
def forward(self, x, proposals=None):
if isinstance(x, dict):
in_feat = x["fc7"]
else:
in_feat = x
scores = self.cls_score(in_feat)
bbox_deltas = self.bbox_pred(in_feat)
if self.return_feats:
x["scores"] = scores
x["bbox_deltas"] = bbox_deltas
if self.has_attributes:
assert proposals is not None, "Proposals are None while attr=True"
# get labels and indices of proposals with foreground
all_labels = cat([prop.get_field("labels") for prop in proposals], dim=0)
fg_idx = all_labels > 0
fg_labels = all_labels[fg_idx]
# slice fc7 for those indices
fc7_fg = in_feat[fg_idx]
# get embeddings of indices using gt cls labels
cls_embed_out = self.cls_embed(fg_labels)
# concat with fc7 feats
concat_attr = cat([fc7_fg, cls_embed_out], dim=1)
# pass through attr head layers
fc_attr = self.attr_linear1(concat_attr)
attr_score = F.relu(self.attr_linear2(fc_attr))
x["attr_score"] = attr_score
return x
return scores, bbox_deltas
_ROI_BOX_PREDICTOR = {"FastRCNNPredictor": FastRCNNPredictor, "FPNPredictor": FPNPredictor}
def make_roi_box_predictor(cfg):
func = _ROI_BOX_PREDICTOR[cfg.MODEL.ROI_BOX_HEAD.PREDICTOR]
return func(cfg)
|
left, top, right, bottom = 166, 56, 303, 84
import tkinter as tk
from turtle import RawTurtle, TurtleScreen, ScrolledCanvas
root = tk.Tk()
width, height = root.winfo_screenwidth(), root.winfo_screenheight()
root.overrideredirect(True)
root.attributes('-alpha', 0.08)
canvas = ScrolledCanvas(root)
canvas.pack(fill=tk.BOTH, expand=tk.YES)
screen = TurtleScreen(canvas)
root.state('zoomed')
turtle = RawTurtle(screen)
turtle.color('red', 'red')
turtle.speed('fastest')
turtle.pensize(4)
# left, top, right, bottom = 100, 100, 900, 900
# def draw_rect(left, top, right, bottom):
# turtle.penup()
# turtle.goto(left, top) # left, top
# turtle.pendown()
# turtle.goto(right, top)
# turtle.goto(right, bottom)
# turtle.goto(left, bottom)
# turtle.goto(left, top)
# turtle.penup()
#
#
# draw_rect(left, top, right, bottom)
# screen.mainloop()
|
import bs4
from bs4 import BeautifulSoup, NavigableString, Tag
import time
import requests
import sys
from config import create_api
def getLyrics():
print("Enter a J. Cole song you'd like to return the lyrics for.")
print("Keep in mind, if the song has spaces, it must be one entirely lowercase string.")
print("For example, Goin' Off would be : goingoff")
song = input(">> ")
url = "https://www.azlyrics.com/lyrics/jcole/" + song + ".html"
rawHTML = requests.get(url).text
soup = BeautifulSoup(rawHTML, "lxml")
for br in soup.findAll('br'):
lyrics = br.nextSibling
if not (lyrics and isinstance(lyrics,NavigableString)):
break
next_lyric = lyrics.nextSibling
if next_lyric and isinstance(next_lyric,Tag) and next_lyric.name == 'br':
text = str(lyrics).strip()
if text:
with open("stored_lyrics.txt", "a") as storage:
print(text,file=storage)
|
class SwimmingPoolPayDesk:
def calculate_admission_fee(self, age):
""" Calulate admission fee
:param age: age of visitor
:return: fee for visitor
"""
if not isinstance(age, (int, float)) : raise TypeError
if age < 0 : raise ValueError
elif 0 <= age <= 6 : return 1.00
elif 6 < age <= 16 : return 2.00
elif 16 < age <= 65 : return 2.50 # moet 2.50 zijn
else : return 1.50
|
from keras import applications
from keras.models import Model, Sequential
from keras.layers import Dense, Input, BatchNormalization
from keras.layers.pooling import GlobalAveragePooling2D, GlobalAveragePooling1D
from keras.layers.recurrent import LSTM
from keras.layers.wrappers import TimeDistributed
from keras.optimizers import Nadam, SGD, Adam
#from keras.preprocessing.image import ImageDataGenerator
from keras.layers.convolutional_recurrent import ConvLSTM2D
from keras.layers import Conv3D, Conv2D, MaxPool2D, Flatten, Dropout, Lambda
import os
import numpy as np
import keras.backend as K
from keras.preprocessing.image import img_to_array, load_img
import pandas as pd
from ImageGenerator_v2 import ImageDataGenerator
train_data_dir = "/home/saireddy/Action/TrainImages"
validation_data_dir = "/home/saireddy/Action/ValImages"
def obtain_datagen(datagen, train_path):
return datagen.flow_from_directory(train_path, class_mode='binary',
target_size=(320, 180), classes = ['goal', 'FreeKick'],
batch_size=8, frames_per_step=2)
datagen = ImageDataGenerator(
rescale=1./ 225,
shear_range=0.2,
zoom_range=0.2)
train_generator = obtain_datagen(datagen, train_data_dir)
validation_generator = obtain_datagen(datagen, validation_data_dir)
frames = 2
img_width = 320
img_height = 180
channels = 3
model = Sequential()
model.add(TimeDistributed(Conv2D(5, 2, 2,activation='relu'
,border_mode='valid'),
input_shape = (frames, img_width, img_height, channels)))
print(model.output_shape)
model.add(TimeDistributed(Flatten()))
print(model.output_shape)
#model.add(Dropout(0.5, input_shape = (16, 30)))
model.add(LSTM(3, return_sequences=False))
print(model.output_shape)
model.add(Dense(2, activation = 'sigmoid'))
model.summary()
optimizer = SGD(lr=0.01)
loss = 'sparse_categorical_crossentropy'
model.compile(loss=loss,optimizer=optimizer, metrics=['accuracy'])
history = model.fit_generator(train_generator,
steps_per_epoch=80,
epochs=50,
validation_data=validation_generator,
validation_steps=40)
model.save("/home/saireddy/Action/LRCNN2.h5")
######Testing-----------------*****************
score = model.evaluate(X_train, y_train, steps=10)
print("Accuracyloss:-", score[0])
print("AccuracyScore",score[1] )
import matplotlib.pyplot as plt
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
|
import copy
import os
import re
import time
from Constants import PrintOpts
from check_environment import check_lapack, check_python_environment
from import_user_input import get_user_input
from system_functions import print_it, get_p3can_version, in_d
class Sim:
# TODO: add class attribute describtion
"""Simulation information"""
def __init__(self, in_file, out_dir):
self.ui, self.invalid_input = get_user_input(in_file)
os.environ["AUTO_PRINT"] = str(self.ui['AUTO_PRINT'])
print_it("p3can simulation started")
print_it("this is p3can version {}".format(get_p3can_version()))
print_it("checking python and fortran libraries")
check_python_environment()
check_lapack()
print_it("setting up simulation")
print_it("validating user input", PrintOpts.lvl1.value)
for key, value in self.invalid_input.items():
print_it(
"parameter '{} = {}' seems to be invalid. "
"trying to go on anyway.".format(key, value),
PrintOpts.lvl2.value)
self.simulation_type = self.ui['simulation_type']
self.simulation_name = self.ui['simulation_name']
self.auto_print = self.ui['auto_print']
self.auto_plot = self.ui['auto_plot']
self.auto_report = self.ui['auto_report']
self.start_time = time.time()
self.calculation_input = ["simulation_type"]
self.required_user_input = None
self.optional_user_input = None
self.positive_vars = None
self.res_dir = None
self.odd_vars = None
self.alphanumeric_vars = None
self.parameter_id = None
self.infl_db_file_hand = None
self.mk_uniq_res_dir(self.ui['simulation_name'], out_dir)
print_it("results folder created: {}".format(self.res_dir),
PrintOpts.lvl1.value)
self.input_file_name = self.save_user_input()
print_it(
"saved calculation input to file: {}".format(self.input_file_name),
PrintOpts.lvl1.value)
def print_time_elapsed(self):
"""Prints time elapsed since start of program"""
print_it(("time elapsed: " "%d s" % (time.time() - self.start_time)))
def get_time_elapsed(self):
"""Gets time elapsed since start of program"""
return time.time() - self.start_time
def finished(self):
"""Prints total run time at end of program execution"""
print_it("\np3can finished")
print_it("you can find all calculation outputs in {}".format(
self.res_dir.replace('..{}'.format(os.sep), '')))
self.print_time_elapsed()
def mk_uniq_res_dir(self, simulation_name, out_dir):
"""Makes unique results folder based on 'simulation_name' and current
date and time"""
out_path = os.sep.join([out_dir, 'results/'])
if os.path.isdir(out_path) is False:
os.mkdir(out_path)
valid_file_name = "".join(
[c for c in simulation_name if re.match(r'\w|-', c)])
results_folder = out_path + valid_file_name + '--' + time.strftime(
"%Y-%m-%d--%H-%M-%S")
unique_results_folder = copy.copy(results_folder)
counter = 1
while os.path.isdir('/'.join([os.getcwd(), unique_results_folder])):
unique_results_folder = results_folder + '-{}'.format(counter)
counter += 1
self.res_dir = unique_results_folder
os.mkdir(self.res_dir)
def save_user_input(self):
"""Creates text file in results folder that contains the user input
required for the current simulation. variables that are not required
for the simulation but were defined by the user are not saved to the
file"""
input_file_name = os.sep.join([self.res_dir, "simulation_input.txt"])
with open(input_file_name, "a") as results_file:
print_str = "# list of relevant user input.\n" \
"# variables that were not used for the calculation " \
"are not listed here.\n\n"
results_file.write(print_str)
for key, value in self.ui.items():
if isinstance(value, str):
print_str = key + " = '" + value + "'\n"
else:
print_str = key + " = {}\n".format(value)
results_file.write(print_str)
return input_file_name
def mk_uniq_parameter_id(self):
"""Make a unique parameter id (basically one very long string
containing all inputs) based on the user input. this is used to
identify unique calculation inputs (if the very long strings are
identical) and allows to reuse
influence matrices"""
self.parameter_id = '-'.join(
[str(in_d('simulation_type', self.ui, 'x')),
str(in_d('radial_clearance', self.ui, 'x')),
str(in_d('radial_clearance', self.ui, 'x')),
str(in_d('number_balls', self.ui, 'x')),
str(in_d('number_rollers', self.ui, 'x')),
str(in_d('number_pins', self.ui, 'x')),
str(in_d('number_planets', self.ui, 'x')),
str(in_d('e_cb1', self.ui, 'x')),
str(in_d('ny_cb1', self.ui, 'x')),
str(in_d('diameter_cb1', self.ui, 'x')),
str(in_d('length_cb1', self.ui, 'x')),
str(in_d('type_profile_cb1', self.ui, 'x')),
str(in_d('path_profile_cb1', self.ui, 'x')),
str(in_d('profile_radius_cb1', self.ui, 'x')),
str(in_d('e_cb2', self.ui, 'x')),
str(in_d('ny_cb2', self.ui, 'x')),
str(in_d('diameter_cb2', self.ui, 'x')),
str(in_d('type_profile_cb2', self.ui, 'x')),
str(in_d('profile_radius_cb2', self.ui, 'x')),
str(in_d('length_cb2', self.ui, 'x')),
str(in_d('path_profile_cb2', self.ui, 'x')),
str(in_d('e_cb3', self.ui, 'x')),
str(in_d('ny_cb3', self.ui, 'x')),
str(in_d('diameter_cb3', self.ui, 'x')),
str(in_d('type_profile_cb3', self.ui, 'x')),
str(in_d('length_cb3', self.ui, 'x')),
str(in_d('profile_radius_cb3', self.ui, 'x')),
str(in_d('path_profile_cb3', self.ui, 'x')),
str(in_d('global_force', self.ui, 'x')),
str(in_d('res_x', self.ui, 'x')),
str(in_d('res_y', self.ui, 'x')),
str(in_d('res_pol', self.ui, 'x'))
]).replace('/', 'Slash')
self.ui.update({'parameter_id': self.parameter_id})
|
"""
Definition of forms.
"""
from django import forms
from django.contrib.auth.forms import AuthenticationForm
from django.utils.translation import ugettext_lazy as _
from .models import Comment
from .models import Blog
class BootstrapAuthenticationForm(AuthenticationForm):
"""Authentication form which uses boostrap CSS."""
username = forms.CharField(
max_length=254,
widget=forms.TextInput({
'class': 'form-control',
'placeholder': 'User name'}))
password = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput({
'class': 'form-control',
'placeholder':'Password'}))
class contactForm(forms.Form):
"""Contact form which uses boostrap CSS classes."""
name = forms.CharField(
label='Name',
min_length=2,
max_length=100,
widget=forms.TextInput(attrs={'class': 'form-control'}))
email = forms.EmailField(
label='E-mail',
widget=forms.TextInput(attrs={'class': 'form-control'}))
message = forms.CharField(
label='Message',
widget=forms.Textarea(attrs={
'rows': 12,
'col': 20,
'class': 'form-control'}))
class BlogForm(forms.ModelForm):
class Meta:
model = Blog
fields = ('title', 'description', 'content',)
labels = {'title': 'title', 'description': 'description', 'content': 'content'}
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('text',)
labels = {'text': 'Comment'}
|
from typing import Dict
from abc import ABC, abstractmethod
from selenium_base import SeleniumBase
import logging
class TaxerDriverBase(ABC):
def __init__(self):
self._base_url = 'https://taxer.ua'
self._token = None
self._cookies = dict()
def get_url(self, path):
return '{}/{}'.format(self._base_url, path)
@property
def token(self) -> str: return self._token
@token.setter
def token(self, value): self._token = value
@property
def cookies(self) -> Dict[str, str]: return self._cookies
@cookies.setter
def cookies(self, value): self._cookies = value
@property
def base_url(self) -> str: return self._base_url
class TaxerDriver(SeleniumBase, TaxerDriverBase):
def __init__(self, username, password, driver=None, headless:bool=False):
SeleniumBase.__init__(self, driver=driver, headless=headless)
TaxerDriverBase.__init__(self)
self.logger = logging.getLogger(__class__.__name__)
self.username = username
self.password = password
self.cookies = None
self.login_url = self.get_url('ru/login')
def goto_main(self):
self.set_location(self.base_url)
self.random_sleep(2, 1)
def check_logged_in(self):
try:
e = self.driver.find_element_by_xpath('//a[contains(@class, "tt-accountant-account-name")]')
return not e.is_displayed()
except:
pass
return False
def login(self, username=None, password=None):
self.username = username or self.username
self.password = password or self.password
self.logger.info('Logging in {0} via url {1}'.format(self.username, self.login_url))
self.driver.get(self.login_url)
self.wait_for_element_by_formcontrolname('email').send_keys(self.username)
self.wait_for_element_by_attribute('ppassword').send_keys(self.password)
self.wait_for_element_by_xpath('//button[@type="submit"]').click()
self.wait_for_element_by_css_selector('routerlink', '/my/settings/account')
self.cookies = dict((c['name'], c['value']) for c in self.driver.get_cookies())
self.token = self.cookies['XSRF-TOKEN']
self.logger.debug('Current location {0}'.format(self.driver.current_url))
def initialize(self):
self.goto_main()
r = self.check_logged_in()
if not r:
self.login()
r = self.check_logged_in()
return r
def quit(self):
self.driver.quit()
from http.cookies import SimpleCookie
class TaxerTokenDriver(TaxerDriverBase):
def __init__(self, cookies:str):
super().__init__()
self.logger = logging.getLogger(__class__.__name__)
if cookies is not None and isinstance(cookies, str):
cookie = SimpleCookie()
cookie.load(cookies)
self.cookies = {key:morsel.value for (key,morsel) in cookie.items()}
self.token = self.cookies['XSRF-TOKEN']
self.logger.debug(f'Using token {self.token}')
|
dia = float(input('quantos dias o carro ficou alugado?'))
km = float(input('quantos km foram percorridos?'))
x= 60 * dia + 0.15 * km
print('\033[7;30m O total a ser pago é de R${:.2f}\033[m'.format(x))
|
from django.db import models
class Owl(models.Model):
ioc_sequence = models.IntegerField()
common_name = models.CharField(max_length=64)
binomial_name = models.CharField(max_length=64)
def __str__(self):
return self.common_name
|
import pandas as pd
import openpyxl
import json
from datetime import datetime, date
from pymongo import MongoClient
import math
# Paths necesarios
path_ayuntamiento = "C:\\Users\\crist\\OneDrive - UPV\\TFM - Cristian Villarroya\\datos\\datos_vehiculos\\ayuntamiento\\"
path_web = "C:\\Users\\crist\\OneDrive - UPV\\TFM - Cristian Villarroya\\datos\\datos_vehiculos\\web\\"
# Dado una fecha (dd/mm/YYYY), comprueba si es festivo
def definir_festivo(fecha, dia):
festivos_2019 = ["01/01/2019", "22/01/2019", "19/03/2019", "19/04/2019/", "22/04/2019", "29/04/2019", "01/05/2019", "24/06/2019", "15/08/2019", "09/10/2019", "12/10/2019",
"01/11/2019", "06/12/2019"
]
fallas_2019 = ["01/03/2019", "02/03/2019", "03/03/2019", "04/03/2019", "05/03/2019", "06/03/2019", "07/03/2019", "08/03/2019", "09/03/2019", "10/03/2019", "11/03/2019",
"12/03/2019", "13/03/2019", "14/03/2019", "15/03/2019", "16/03/2019" ,"17/03/2019" ,"18/03/2019" ,"19/03/2019"
]
festivos_2020 = ["01/01/2020", "06/01/2020", "22/01/2020", "19/03/2020", "10/04/2020/", "13/04/2020/", "20/04/2020", "01/05/2020", "24/06/2020", "15/08/2020", "09/09/2020", "12/10/2020",
"08/12/2020", "25/12/2020"
]
festivos_2021 = ["01/01/2021", "06/01/2021", "22/01/2021", "19/03/2021", "02/04/2021", "05/04/2021", "12/01/2021", "01/05/2021", "24/06/2021", "09/10/2021",
"12/10/2021", "01/11/2021", "06/12/2021", "08/12/2021", "25/12/2021"
]
festivo = 0
if (fecha in festivos_2019 or fecha in festivos_2020 or fecha in festivos_2021 or fecha in fallas_2019 or dia == "Domingo"):
festivo = 1
return festivo
# Dado un dia en el formato del csv, le asigna el nombre del dia correspondiente
def definir_dia(dia):
if dia == "(lu.)" or dia == 0:
return "Lunes"
elif dia == "(ma.)" or dia == 1:
return "Martes"
elif dia == "(mi.)" or dia == 2:
return "Miercoles"
elif dia == "(ju.)" or dia == 3:
return "Jueves"
elif dia == "(vi.)" or dia == 4:
return "Viernes"
elif dia == "(sá.)" or dia == 5:
return "Sabado"
elif dia == "(do.)" or dia == 6:
return "Domingo"
# Lee la informacion de las calles en el fichero y devuelve un diccionario con la informacion
def leer_info_calles(fichero):
df = pd.read_csv('infocalles.csv')
"""
FORMATO CSV:
ATA, DESCRIPCION, TIPO, BARRIO, CP, SENTIDO, CARRILES, VELOCIDAD LIMITE, COMENTARIO
Tipo => primaria, secundaria, terciaria, resdiencial, entrada o salida.
SENTIDO => 1 unico, 2 doble
COMENTARIO => Calle pequeña, calle larga, avenida, puente, entrada, salida
"""
ATAS = {}
barrios = []
for row in df.itertuples():
ATA = row[1]
desc = row[2]
tipo = row[3]
barrio = row[4]
cp = row[5]
sentido = row[6]
carriles = row[7]
velocidad = row[8]
comentario = row[9]
if (barrio not in barrios):
barrios.append(barrio)
ATAS[ATA] = [desc, tipo, barrio, cp, sentido, carriles, velocidad, comentario]
print(barrios)
return ATAS
"""
Lee el csv con la informacion del conteo de vehiculos propocionado por el ayuntamiento.
A dicho csv antes hay que hacer un preproceso:
- Por simplicidad, cambiar formato a csv
- Quitar cabeceras
- Quitar lineas en blanco (;;;...)
- Volver a fichero xlsx
"""
def leer_csv_atas(fichero):
libro = openpyxl.load_workbook(fichero)
hoja = libro.active
ATA = ""
ATAS = []
inserts = 0
errores = 0
info_ATAS = leer_info_calles("infocalles.csv")
for fila in hoja.iter_rows(min_row= 1, min_col = 1):
dia = ""
fecha = ""
intensidades = []
horas = ["00", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "13", "14" , "15", "16", "17", "18", "19", "20", "21", "22", "23"]
vacios = 0
for columna in fila:
if (isinstance(columna.value, str)):
strings = columna.value.split(' ')
# Para detectar el ATA
if "ATA" in columna.value:
ATA = strings[1]
ATAS.append(ATA)
if(ATA not in info_ATAS.keys()):
print(ATA)
# Para leer los datos diarios
if ("/2020" in columna.value or "/2019" in columna.value):
dia = definir_dia(strings[0])
fecha = strings[1]
elif (isinstance(columna.value, int) or isinstance(columna.value, float)):
intensidad = round(columna.value)
intensidades.append(intensidad)
else:
# Falta el dato
vacios += 1
intensidades.append(-1)
# Si se tiene el dia entero
if (len(intensidades) == len(horas)):
# Leer y escribir todas las horas
# ATA, DESCRIPCION, TIPO, BARRIO, CP, SENTIDO, CARRILES, VELOCIDAD LIMITE, COMENTARIO
if ATA in info_ATAS.keys():
desc = info_ATAS[ATA][0]
tipo = info_ATAS[ATA][1]
barrio = info_ATAS[ATA][2]
cp = info_ATAS[ATA][3]
sentido = info_ATAS[ATA][4]
carriles = info_ATAS[ATA][5]
velocidad = info_ATAS[ATA][6]
comentario = info_ATAS[ATA][7]
for i in range(len(horas)):
# Si falta el valor, coger el valor de la hora anterior
if i != 0:
if intensidades[i] == 0:
intensidades[i] = intensidades[i-1]
obj = {
'ATA' : ATA,
'desc' : desc,
'tipo' : tipo,
'barrio' : barrio,
'cp' : cp,
'sentido' : sentido,
'carriles' : carriles,
'velocidad' : velocidad,
'comentario' : comentario,
'fecha' : fecha,
'dia' : dia,
'hora' : horas[i],
'festivo' : definir_festivo(fecha, dia),
'coches' : intensidades[i]
}
# Si no es un salto de linea, escribe
if vacios < 24:
# Escribe en un fichero el resultado
with open(path_ayuntamiento + 'datos_horarios4.json', 'a') as json_file:
json.dump(obj, json_file)
json_file.write('\n')
inserts += 1
json_file.close()
else:
with open(path_ayuntamiento + 'errores.json', 'a') as file:
json.dump(obj, file)
file.write('\n')
errores += 1
file.close()
print("Insertados : " + str(inserts))
print("Dias : " + str(inserts/24))
print("Errores : " + str(errores))
with open('ATAS.txt', 'w') as file:
for ata in ATAS:
file.write(ata)
file.write('\n')
# Lee el json obtenido de la web del ayuntamiento con los datos 15 minutales
def leer_json_web(fichero):
with open(fichero, encoding='iso-8859-1') as json_file:
info_ATAS = leer_info_calles("infocalles.csv")
inserts = 0
valor_previo = 0
ATA_previo = ""
for data in json_file:
obj = json.loads(data)
ATA = obj['ATA'].upper()
fecha = obj['fecha']
time = datetime.strptime(fecha, '%Y:%m:%d')
fecha = time.strftime('%d/%m/%Y')
dia = time.weekday()
dia = definir_dia(dia)
intensidades = []
horas = ["00", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "13", "14" , "15", "16", "17", "18", "19", "20", "21", "22", "23"]
if ATA in info_ATAS.keys():
desc = info_ATAS[ATA][0]
tipo = info_ATAS[ATA][1]
barrio = info_ATAS[ATA][2]
cp = info_ATAS[ATA][3]
sentido = info_ATAS[ATA][4]
carriles = info_ATAS[ATA][5]
velocidad = info_ATAS[ATA][6]
comentario = info_ATAS[ATA][7]
# Hay datos erroneos muy elevados
if int(obj['coches']) > 15000 or int(obj['coches']) < 0:
if ATA_previo == ATA:
obj['coches'] = float("NaN")
else:
obj['coches'] = float("NaN")
obj = {
'ATA' : ATA,
'desc' : desc,
'tipo' : tipo,
'barrio' : barrio,
'cp' : cp,
'sentido' : sentido,
'carriles' : carriles,
'velocidad' : velocidad,
'comentario' : comentario,
'fecha' : fecha,
'dia' : dia,
'hora' : obj['hora'],
'festivo' : definir_festivo(fecha, dia),
'coches' : float(obj['coches'])
}
valor_previo = float(obj['coches'])
ATA_previo = ATA
# Escribe en un fichero el resultado
with open(path_web + 'datos_web_completo.json', 'a', encoding='utf-8') as json_file:
json.dump(obj, json_file, ensure_ascii=False)
json_file.write('\n')
inserts += 1
json_file.close()
print("Insertados : " + str(inserts))
print("Dias : " + str(inserts/24))
#ficheros = ["enejun2019.xlsx", "juldic2019.xlsx", "enejun2020.xlsx", "juldic2020.xlsx" ]
#ficheros = ["enejun2020.xlsx"]
#for fichero in ficheros:
# leer_csv_atas(path_ayuntamiento + fichero)
#leer_info_calles("a")
#ficheros_web = ["data_web_bueno.json", "datos_web_bueno_2.json"]
ficheros_web = ["data_web_bueno.json", "datav2.json"]
for fichero_web in ficheros_web:
leer_json_web(path_web + fichero_web)
"""
with open('ATAS.txt', 'r') as file:
ATAS = []
for ata in file:
if ata in ATAS:
print("SI")
else:
ATAS.append(ata)
"""
|
"""
Copyright Government of Canada 2020-2021
Written by: Xia Liu, National Microbiology Laboratory,
Public Health Agency of Canada
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this work except in compliance with the License. You may obtain a copy of the
License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import shutil
import os
from contextlib import closing
import urllib.request as request
from gnali.exceptions import ReferenceDownloadError
def download_file(url, dest_path, max_time):
"""Download a file from a url.
Args:
url: url for a file
dest_path: where to save file
max_time: maximum time to wait for
download. An exception is
raised if download doesn't
complete in this time.
"""
file_type = url.split(":")[0]
try:
if file_type == 'ftp':
with closing(request.urlopen(url)) as resp:
with open(dest_path, 'wb') as fh:
shutil.copyfileobj(resp.raw, fh)
else:
with request.urlopen(url) as resp:
with open(dest_path, 'wb') as fh:
shutil.copyfileobj(resp, fh)
except Exception:
if os.path.exists(dest_path):
os.remove(dest_path)
raise ReferenceDownloadError("Error downloading {}".format(url))
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def deleteNode(self, root: Optional[TreeNode], key: int) -> Optional[TreeNode]:
if root == None:
return None
if root.val >key:
root.left = self.deleteNode(root.left, key)
elif root.val <key:
root.right = self.deleteNode(root.right,key)
else:
if root.right == None:
if root.left == None:
root = None
else:
root = root.left
else:
if root.left == None:
root = root.right
else:
iop = self.inOrderPredecesor(root)
self.deleteNode(root, iop)
root.val = iop
return root
def inOrderPredecesor(self, root):
root = root.left
while root.right!= None:
root = root.right
return root.val
|
"""Support for the Hive switches."""
from homeassistant.components.switch import SwitchDevice
from . import DATA_HIVE, DOMAIN
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Hive switches."""
if discovery_info is None:
return
session = hass.data.get(DATA_HIVE)
add_entities([HiveDevicePlug(session, discovery_info)])
class HiveDevicePlug(SwitchDevice):
"""Hive Active Plug."""
def __init__(self, hivesession, hivedevice):
"""Initialize the Switch device."""
self.node_id = hivedevice["Hive_NodeID"]
self.node_name = hivedevice["Hive_NodeName"]
self.device_type = hivedevice["HA_DeviceType"]
self.session = hivesession
self.attributes = {}
self.data_updatesource = "{}.{}".format(self.device_type, self.node_id)
self._unique_id = "{}-{}".format(self.node_id, self.device_type)
self.session.entities.append(self)
@property
def unique_id(self):
"""Return unique ID of entity."""
return self._unique_id
@property
def device_info(self):
"""Return device information."""
return {"identifiers": {(DOMAIN, self.unique_id)}, "name": self.name}
def handle_update(self, updatesource):
"""Handle the new update request."""
if "{}.{}".format(self.device_type, self.node_id) not in updatesource:
self.schedule_update_ha_state()
@property
def name(self):
"""Return the name of this Switch device if any."""
return self.node_name
@property
def device_state_attributes(self):
"""Show Device Attributes."""
return self.attributes
@property
def current_power_w(self):
"""Return the current power usage in W."""
return self.session.switch.get_power_usage(self.node_id)
@property
def is_on(self):
"""Return true if switch is on."""
return self.session.switch.get_state(self.node_id)
def turn_on(self, **kwargs):
"""Turn the switch on."""
self.session.switch.turn_on(self.node_id)
for entity in self.session.entities:
entity.handle_update(self.data_updatesource)
def turn_off(self, **kwargs):
"""Turn the device off."""
self.session.switch.turn_off(self.node_id)
for entity in self.session.entities:
entity.handle_update(self.data_updatesource)
def update(self):
"""Update all Node data from Hive."""
self.session.core.update_data(self.node_id)
self.attributes = self.session.attributes.state_attributes(self.node_id)
|
import os
config = os.environ.setdefault("CELERY_FUNTEST_CONFIG_MODULE",
"celery.tests.functional.config")
os.environ["CELERY_CONFIG_MODULE"] = config
os.environ["CELERY_LOADER"] = "default"
|
import os
import sys
import socket
import subprocess
import uuid
import time
import platform
import tempfile
import secrets
from pgrok import pgrok
from pyngrok import ngrok
from http import HTTPStatus
from urllib.request import urlopen
import logging
from kafka_logger import init_kafka_logger, OutputLogger
import gdrivefs.config
from gdrivefs import gdfuse
from gdrivefs import oauth_authorize
DEFAULT_DOWNLOAD_TIMEOUT = 6
DEFAULT_RETRY_COUNT = 0
PRINT_PROGRESS_ENABLE = True
logger = logging.getLogger(__name__)
EXTENSIONS = ["ms-python.python", "ms-toolsai.jupyter", "mechatroner.rainbow-csv", "vscode-icons-team.vscode-icons"]
CODESERVER_VERSION = "3.11.1"
class ColabCode:
def __init__(
self,
port=10000,
password=None,
authtoken=None,
subdomain=None,
mount_drive=False,
lab=False,
interactive=False,
tunnel_backend='pgrok',
logger_name="kafka.logger",
settings_ini=None,
):
global logger
self.port = port
self.password = password
self.authtoken = authtoken
self._mount = mount_drive
self._lab = lab
self.auth_storage_filepath = gdrivefs.config.DEFAULT_CREDENTIALS_FILEPATH
self.subdomain = subdomain if subdomain else secrets.token_hex(4)
if settings_ini:
logger = init_kafka_logger(logger_name, settings_ini)
else:
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.INFO)
stdout_handler.setFormatter(logging.Formatter(
"%(asctime)s %(name)-12s %(levelname)-8s %(message)s", "%Y-%m-%dT%H:%M:%S"
))
logger.addHandler(stdout_handler)
self.redirector = OutputLogger(logger)
# Auth Google drive, interactive mode in notebook/shell env
if self._mount and interactive and platform.system().lower() == 'linux':
self._handle_auth_url()
authcode = input("Enter Auth code from your browser\n")
self._auth_write(authcode)
# Install and start server
if not self._lab:
self._install_code()
self._install_extensions()
if tunnel_backend == 'pgrok':
self._start_pgrok_server()
else:
self._start_server()
if self._lab:
self._run_lab()
else:
self._run_code()
def mount_gdrive(self):
gdfuse.mount(
auth_storage_filepath=self.auth_storage_filepath,
mountpoint=self._mount,
debug=gdrivefs.config.IS_DEBUG,
nothreads=gdrivefs.config.IS_DEBUG,
option_string=self.option_string
)
def _get_url(self):
# This won't actually be needed so set it to the default.
gdfuse.set_auth_cache_filepath(self.auth_storage_filepath)
oa = oauth_authorize.get_auth()
return oa.step1_get_auth_url()
def _handle_auth_url(self):
url = self._get_url()
with self.redirector:
print(
"To authorize FUSE to use your Google Drive account, visit the "
"following URL to produce an authorization code:\n\n%s\n" % (url,)
)
def _auth_write(self, authcode):
gdfuse.set_auth_cache_filepath(self.auth_storage_filepath)
oa = oauth_authorize.get_auth()
oa.step2_doexchange(authcode)
with self.redirector:
print("Authorization code recorded.")
def _print_progress(self, line):
if PRINT_PROGRESS_ENABLE:
sys.stdout.write("{}\r".format(line))
sys.stdout.flush()
def _clear_progress(self, spaces=100):
if PRINT_PROGRESS_ENABLE:
sys.stdout.write((" " * spaces) + "\r")
sys.stdout.flush()
def _download_code(self, url, retries=0, **kwargs):
"""
Given the url download the code binaries from the given url
Args:
url ([type]): [description]
retries ([type], optional): [description]. Defaults to 0.
Raises:
e: [description]
Returns:
[type]: [description]
"""
kwargs["timeout"] = kwargs.get("timeout", DEFAULT_DOWNLOAD_TIMEOUT)
try:
with self.redirector:
self._print_progress("Downloading code ...")
print("Downloading code from {} ...".format(url))
local_filename = url.split("/")[-1]
response = urlopen(url)
status_code = response.getcode()
if status_code != HTTPStatus.OK:
with self.redirector:
print("Response status code: {}".format(status_code))
return None
length = response.getheader("Content-Length")
if length:
length = int(length)
chunk_size = max(4096, length // 100)
else:
chunk_size = 64 * 1024
download_path = os.path.join(tempfile.gettempdir(), local_filename)
with open(download_path, "wb") as f, self.redirector:
size = 0
while True:
buffer = response.read(chunk_size)
if not buffer:
break
f.write(buffer)
size += len(buffer)
if length:
percent_done = int((float(size) / float(length)) * 100)
self._print_progress("Downloading code: {}%".format(percent_done))
self._clear_progress()
return download_path
except socket.timeout as e:
if retries < DEFAULT_RETRY_COUNT:
time.sleep(0.5)
return self._download_file(url, retries + 1, **kwargs)
else:
raise e
def _install_code(self):
downloded_path = self._download_code('https://raw.githubusercontent.com/cdr/code-server/main/install.sh')
os.chmod(downloded_path, int("777", 8))
subprocess.run(
[downloded_path, "--version", f"{CODESERVER_VERSION}"],
stdout=subprocess.PIPE, shell=True
)
def _install_extensions(self):
for ext in EXTENSIONS:
subprocess.run(["code-server", "--install-extension", f"{ext}"])
def _start_server(self):
if self.authtoken:
ngrok.set_auth_token(self.authtoken)
active_tunnels = ngrok.get_tunnels()
for tunnel in active_tunnels:
public_url = tunnel.public_url
ngrok.disconnect(public_url)
url = ngrok.connect(addr=self.port, bind_tls=True)
with self.redirector:
if not self._lab:
print(f"Code Server can be accessed on: {url}")
else:
print(f"Public URL: {url}")
def _start_pgrok_server(self):
active_tunnels = pgrok.get_tunnels()
for tunnel in active_tunnels:
public_url = tunnel.public_url
pgrok.disconnect(public_url)
tunnel = pgrok.connect(addr=self.port, name=self.subdomain)
with self.redirector:
if not self._lab:
print(f"Code Server can be accessed on: {tunnel.proto}://{tunnel.public_url}")
else:
print(f"Public URL: {tunnel.proto}://{tunnel.public_url}")
def _run_lab(self):
token = str(uuid.uuid1())
logger.info(f"Jupyter lab token: {token}")
base_cmd = "jupyter-lab --ip='localhost' --allow-root --ServerApp.allow_remote_access=True --no-browser"
# TODO: GdriveFS only works for Linux now. Support for Mac will be added later
if self._mount and platform.system().lower() == 'linux':
os.system(f"fuser -n tcp -k {self.port}")
self.mount_gdrive()
if self.password:
lab_cmd = f" --ServerApp.token='{token}' --ServerApp.password='{self.password}' --port {self.port}"
else:
lab_cmd = f" --ServerApp.token='{token}' --ServerApp.password='' --port {self.port}"
lab_cmd = base_cmd + lab_cmd
with subprocess.Popen(
[lab_cmd],
shell=True,
stdout=subprocess.PIPE,
bufsize=1,
universal_newlines=True,
) as proc, self.redirector:
for line in proc.stdout:
print(line, end="")
def _run_code(self):
# TODO: GdriveFS only works for Linux now. Support for Mac will be added later
if self._mount and platform.system().lower() == 'linux':
os.system(f"fuser -n tcp -k {self.port}")
self.mount_gdrive()
if self.password:
code_cmd = f"PASSWORD={self.password} code-server --port {self.port} --disable-telemetry"
else:
code_cmd = f"code-server --port {self.port} --auth none --disable-telemetry"
with subprocess.Popen(
[code_cmd],
shell=True,
stdout=subprocess.PIPE,
bufsize=1,
universal_newlines=True,
) as proc, self.redirector:
for line in proc.stdout:
print(line, end="")
|
# Code modified from https://github.com/clcarwin/focal_loss_pytorch/blob/master/focalloss.py
from strategies.strategy_template import StrategyTemplate
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class FocalLoss(StrategyTemplate):
def get_parser(parser = None):
if parser is None: parser = argparse.ArgumentParser()
parser.add_argument('--gamma', type=int, default=2,
help='Adjusts the rate at which easy examples are downweighted. At gamma=0, equivalent to CE')
parser.add_argument('--alpha', type=bool, default=False,
help='If False, the weighting factor alpha=1 for all classes, otherwise alpha=<inverse support class freq>')
parser.add_argument('--size_average', type=bool, default=False,
help='If true, averages the loss, else sums the loss')
return parser
def __init__(self, args, device, seed):
super(FocalLoss, self).__init__(args, device, seed)
self.focal_loss = FocalLossFunc(args, device)
self.inv_clas_freq = []
def update_support_set(self, support):
support = super(FocalLoss, self).update_support_set(support)
x, y = support
uniq, counts = torch.unique(y, return_counts=True)
self.inv_clas_freq = 1 / counts.float().to(self.device)
return support
def apply_inner_loss(self, loss_fn, *args):
if type(loss_fn) == nn.CrossEntropyLoss:
return self.focal_loss.forward(*args, weights=self.inv_clas_freq)
else:
raise Exception("Focal Loss not compatible with {}".format(type(loss_fn)))
def apply_outer_loss(self, loss_fn, *args):
if type(loss_fn) == nn.CrossEntropyLoss:
return self.focal_loss.forward(*args, weights=self.inv_clas_freq)
else:
raise Exception("Focal Loss not compatible with {}".format(type(loss_fn)))
class FocalLossFunc(nn.Module):
def __init__(self, args, device):
super(FocalLossFunc, self).__init__()
self.args = args
self.device = device
def forward(self, input, target, weights=None):
gamma = self.args.gamma
is_alpha = self.args.alpha
size_average = self.args.size_average
if input.dim()>2:
input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W
input = input.transpose(1,2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1,1)
logpt = F.log_softmax(input, dim=1)
logpt = logpt.gather(1,target)
logpt = logpt.view(-1)
pt = Variable(logpt.data.exp()) #.to(self.device)
if is_alpha:
if weights is None:
uniq, count = torch.unique(target, return_counts=True)
n = input.shape[1]
weights = torch.zeros((n,)).type_as(input.data)
weights[uniq] = count.sum() / (n * count.type_as(input.data))
at = weights.gather(0,target.data.view(-1))
logpt = logpt * Variable(at) #.to(self.device)
at = weights.gather(0,target.data.view(-1))
logpt = logpt * Variable(at) #.to(self.device)
loss = -1 * (1-pt)**gamma * logpt
if size_average: return loss.mean()
else: return loss.sum()
|
from .data import *
from .plot import *
from .data_acquisition import *
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'liuzhijun'
import os
import sys
import logging
import time
from datetime import timedelta
from tornado import queues
from tornado import gen
from tornado import ioloop
from core import crawl_detail_info, crawl_base_info
PROJECT_PATH = os.path.realpath(os.path.join("..", os.path.dirname(__file__)))
if PROJECT_PATH not in sys.path:
sys.path.append(PROJECT_PATH)
from models import JavaPost, PythonPost
import config
base_concurrency_num = 10
detail_concurrency_num = 50
@gen.coroutine
def main(category, page_count):
start = time.time()
base_info_queue = queues.Queue() # 解析基本信息的队列
detail_info_queue = queues.Queue() # 解析正文内容的队列
url = config.java_source if category == 'java' else config.python_source
for page in range(1, page_count + 1):
url = url if page == 1 else url + "/page/%d" % page
base_info_queue.put(url)
@gen.coroutine
def base_info_work():
"""
解析基本的信息的进程
:return:
"""
while True:
current_url = yield base_info_queue.get()
posts = yield crawl_base_info(current_url)
for post in posts:
yield detail_info_queue.put(post)
base_info_queue.task_done()
@gen.coroutine
def detail_info_work():
while True:
raw_post = yield detail_info_queue.get()
post = yield crawl_detail_info(raw_post)
obj = JavaPost(**post) if category == 'java' else PythonPost(**post)
obj.save()
detail_info_queue.task_done()
for _ in range(base_concurrency_num):
# 启动concurrency_num数量的进程
base_info_work()
for _ in range(detail_concurrency_num):
detail_info_work()
yield base_info_queue.join(timeout=timedelta(seconds=300))
yield detail_info_queue.join(timeout=timedelta(seconds=300))
print('Done in %d seconds' % (time.time() - start))
if __name__ == '__main__':
logging.basicConfig()
io_loop = ioloop.IOLoop.current()
io_loop.run_sync(lambda: main('python', 1))
io_loop.run_sync(lambda: main("java", 1))
|
#
# Copyright (c) 2012 Will Page <compenguy@gmail.com>
# See the file LICENSE.txt for your full rights.
#
# Derivative of vantage.py and wmr100.py, credit to Tom Keffer
"""Classes and functions for interfacing with Oregon Scientific WM-918, WMR9x8 and WMR-968 weather stations
See http://wx200.planetfall.com/wx200.txt or http://www.qsl.net/zl1vfo/wx200/wx200.txt or
http://ed.toton.org/projects/weather/station-protocol.txt for documentation on the WM-918 / WX-200 serial protocol
See http://www.netsky.org/WMR/Protocol.htm for documentation on the WMR9x8 serial protocol,
and http://code.google.com/p/wmr968/source/browse/trunk/src/edu/washington/apl/weather/packet/
for sample (java) code.
"""
import time
import operator
import syslog
import serial
import weewx.drivers
from math import exp
DRIVER_NAME = 'WMR9x8'
DRIVER_VERSION = "3.0"
def loader(config_dict, engine): # @UnusedVariable
return WMR9x8(**config_dict[DRIVER_NAME])
def confeditor_loader():
return WMR9x8ConfEditor()
DEFAULT_PORT = '/dev/ttyS0'
class WMR9x8ProtocolError(weewx.WeeWxIOError):
"""Used to signal a protocol error condition"""
def channel_decoder(chan):
if 1 <= chan <= 2:
outchan = chan
elif chan == 4:
outchan = 3
else:
raise WMR9x8ProtocolError("Bad channel number %d" % chan)
return outchan
# Dictionary that maps a measurement code, to a function that can decode it:
# packet_type_decoder_map and packet_type_size_map are filled out using the @<type>_registerpackettype
# decorator below
wmr9x8_packet_type_decoder_map = {}
wmr9x8_packet_type_size_map = {}
wm918_packet_type_decoder_map = {}
wm918_packet_type_size_map = {}
def wmr9x8_registerpackettype(typecode, size):
""" Function decorator that registers the function as a handler
for a particular packet type. Parameters to the decorator
are typecode and size (in bytes). """
def wrap(dispatcher):
wmr9x8_packet_type_decoder_map[typecode] = dispatcher
wmr9x8_packet_type_size_map[typecode] = size
return wrap
def wm918_registerpackettype(typecode, size):
""" Function decorator that registers the function as a handler
for a particular packet type. Parameters to the decorator
are typecode and size (in bytes). """
def wrap(dispatcher):
wm918_packet_type_decoder_map[typecode] = dispatcher
wm918_packet_type_size_map[typecode] = size
return wrap
class SerialWrapper(object):
"""Wraps a serial connection returned from package serial"""
def __init__(self, port):
self.port = port
# WMR9x8 specific settings
self.serialconfig = {
"bytesize": serial.EIGHTBITS,
"parity": serial.PARITY_NONE,
"stopbits": serial.STOPBITS_ONE,
"timeout": None,
"rtscts": 1
}
def flush_input(self):
self.serial_port.flushInput()
def queued_bytes(self):
return self.serial_port.inWaiting()
def read(self, chars=1):
_buffer = self.serial_port.read(chars)
N = len(_buffer)
if N != chars:
raise weewx.WeeWxIOError("Expected to read %d chars; got %d instead" % (chars, N))
return _buffer
def openPort(self):
# Open up the port and store it
self.serial_port = serial.Serial(self.port, **self.serialconfig)
syslog.syslog(syslog.LOG_DEBUG, "wmr9x8: Opened up serial port %s" % self.port)
def closePort(self):
self.serial_port.close()
#==============================================================================
# Class WMR9x8
#==============================================================================
class WMR9x8(weewx.drivers.AbstractDevice):
"""Class that represents a connection to a Oregon Scientific WMR9x8 console.
The connection to the console will be open after initialization"""
def __init__(self, **stn_dict):
"""Initialize an object of type WMR9x8.
NAMED ARGUMENTS:
model: Which station model is this?
[Optional. Default is 'WMR968']
port: The serial port of the WM918/WMR918/WMR968.
[Required if serial communication]
baudrate: Baudrate of the port. [Optional. Default 9600]
timeout: How long to wait before giving up on a response from the
serial port. [Optional. Default is 5]
"""
self.model = stn_dict.get('model', 'WMR968')
self.last_totalRain = None
# Create the specified port
self.port = WMR9x8._port_factory(stn_dict)
# Open it up:
self.port.openPort()
@property
def hardware_name(self):
return self.model
def openPort(self):
"""Open up the connection to the console"""
self.port.openPort()
def closePort(self):
"""Close the connection to the console. """
self.port.closePort()
def genLoopPackets(self):
"""Generator function that continuously returns loop packets"""
buf = []
# We keep a buffer the size of the largest supported packet
wmr9x8max = max(wmr9x8_packet_type_size_map.items(), key=operator.itemgetter(1))[1]
wm918max = max(wm918_packet_type_size_map.items(), key=operator.itemgetter(1))[1]
preBufferSize = max(wmr9x8max, wm918max)
while True:
buf.extend(map(ord, self.port.read(preBufferSize - len(buf))))
# WMR-9x8/968 packets are framed by 0xFF characters
if buf[0] == 0xFF and buf[1] == 0xFF and buf[2] in wmr9x8_packet_type_size_map:
# Look up packet type, the expected size of this packet type
ptype = buf[2]
psize = wmr9x8_packet_type_size_map[ptype]
# Capture only the data belonging to this packet
pdata = buf[0:psize]
if weewx.debug >= 2:
self.log_packet(pdata)
# Validate the checksum
sent_checksum = pdata[-1]
calc_checksum = reduce(operator.add, pdata[0:-1]) & 0xFF
if sent_checksum == calc_checksum:
syslog.syslog(syslog.LOG_DEBUG, "wmr9x8: Received WMR9x8 data packet.")
payload = pdata[2:-1]
_record = wmr9x8_packet_type_decoder_map[ptype](self, payload)
if _record is not None:
yield _record
# Eliminate all packet data from the buffer
buf = buf[psize:]
else:
syslog.syslog(syslog.LOG_DEBUG, "wmr9x8: Invalid data packet (%s)." % pdata)
# Drop the first byte of the buffer and start scanning again
buf.pop(0)
# WM-918 packets have no framing
elif buf[0] in wm918_packet_type_size_map:
# Look up packet type, the expected size of this packet type
ptype = buf[0]
psize = wm918_packet_type_size_map[ptype]
# Capture only the data belonging to this packet
pdata = buf[0:psize]
# Validate the checksum
sent_checksum = pdata[-1]
calc_checksum = reduce(operator.add, pdata[0:-1]) & 0xFF
if sent_checksum == calc_checksum:
syslog.syslog(syslog.LOG_DEBUG, "wmr9x8: Received WM-918 data packet.")
payload = pdata[0:-1] #send all of packet but crc
_record = wm918_packet_type_decoder_map[ptype](self, payload)
if _record is not None:
yield _record
# Eliminate all packet data from the buffer
buf = buf[psize:]
else:
syslog.syslog(syslog.LOG_DEBUG, "wmr9x8: Invalid data packet (%s)." % pdata)
# Drop the first byte of the buffer and start scanning again
buf.pop(0)
else:
syslog.syslog(syslog.LOG_DEBUG, "wmr9x8: Advancing buffer by one for the next potential packet")
buf.pop(0)
#==========================================================================
# Oregon Scientific WMR9x8 utility functions
#==========================================================================
@staticmethod
def _port_factory(stn_dict):
"""Produce a serial port object"""
# Get the connection type. If it is not specified, assume 'serial':
connection_type = stn_dict.get('type', 'serial').lower()
if connection_type == "serial":
port = stn_dict['port']
return SerialWrapper(port)
raise weewx.UnsupportedFeature(stn_dict['type'])
@staticmethod
def _get_nibble_data(packet):
nibbles = bytearray()
for byte in packet:
nibbles.extend([(byte & 0x0F), (byte & 0xF0) >> 4])
return nibbles
def log_packet(self, packet):
packet_str = ','.join(["x%x" % v for v in packet])
print "%d, %s, %s" % (int(time.time()+0.5), time.asctime(), packet_str)
@wmr9x8_registerpackettype(typecode=0x00, size=11)
def _wmr9x8_wind_packet(self, packet):
"""Decode a wind packet. Wind speed will be in kph"""
null, status, dir1, dir10, dir100, gust10th, gust1, gust10, avg10th, avg1, avg10, chillstatus, chill1, chill10 = self._get_nibble_data(packet[1:]) # @UnusedVariable
battery = (status & 0x04) >> 2
# The console returns wind speeds in m/s. Our metric system requires kph,
# so the result needs to be multiplied by 3.6
_record = {
'windBatteryStatus' : battery,
'windSpeed' : ((avg10th / 10.0) + avg1 + (avg10 * 10)) * 3.6,
'windDir' : dir1 + (dir10 * 10) + (dir100 * 100),
'dateTime' : int(time.time() + 0.5),
'usUnits' : weewx.METRIC
}
# Sometimes the station emits a wind gust that is less than the average wind.
# Ignore it if this is the case.
windGustSpeed = ((gust10th / 10.0) + gust1 + (gust10 * 10)) * 3.6
if windGustSpeed >= _record['windSpeed']:
_record['windGust'] = windGustSpeed
# Bit 1 of chillstatus is on if there is no wind chill data;
# Bit 2 is on if it has overflowed. Check them both:
if chillstatus & 0x6 == 0:
chill = chill1 + (10 * chill10)
if chillstatus & 0x8:
chill = -chill
_record['windchill'] = chill
else:
_record['windchill'] = None
return _record
@wmr9x8_registerpackettype(typecode=0x01, size=16)
def _wmr9x8_rain_packet(self, packet):
null, status, cur1, cur10, cur100, tot10th, tot1, tot10, tot100, tot1000, yest1, yest10, yest100, yest1000, totstartmin1, totstartmin10, totstarthr1, totstarthr10, totstartday1, totstartday10, totstartmonth1, totstartmonth10, totstartyear1, totstartyear10 = self._get_nibble_data(packet[1:]) # @UnusedVariable
battery = (status & 0x04) >> 2
# station units are mm and mm/hr while the internal metric units are cm and cm/hr
# It is reported that total rainfall is biased by +0.5 mm
_record = {
'rainBatteryStatus' : battery,
'rainRate' : (cur1 + (cur10 * 10) + (cur100 * 100)) / 10.0,
'yesterdayRain' : (yest1 + (yest10 * 10) + (yest100 * 100) + (yest1000 * 1000)) / 10.0,
'totalRain' : (tot10th / 10.0 + tot1 + 10.0 * tot10 + 100.0 * tot100 + 1000.0 * tot1000) / 10.0,
'dateTime' : int(time.time() + 0.5),
'usUnits' : weewx.METRIC
}
# Because the WMR does not offer anything like bucket tips, we must
# calculate it by looking for the change in total rain. Of course, this
# won't work for the very first rain packet.
_record['rain'] = (_record['totalRain'] - self.last_totalRain) if self.last_totalRain is not None else None
self.last_totalRain = _record['totalRain']
return _record
@wmr9x8_registerpackettype(typecode=0x02, size=9)
def _wmr9x8_thermohygro_packet(self, packet):
chan, status, temp10th, temp1, temp10, temp100etc, hum1, hum10, dew1, dew10 = self._get_nibble_data(packet[1:])
chan = channel_decoder(chan)
battery = (status & 0x04) >> 2
_record = {
'dateTime' : int(time.time() + 0.5),
'usUnits' : weewx.METRIC,
'batteryStatusTH%d' % chan : battery
}
_record['extraHumid%d' % chan] = hum1 + (hum10 * 10)
tempoverunder = temp100etc & 0x04
if not tempoverunder:
temp = (temp10th / 10.0) + temp1 + (temp10 * 10) + ((temp100etc & 0x03) * 100)
if temp100etc & 0x08:
temp = -temp
_record['extraTemp%d' % chan] = temp
else:
_record['extraTemp%d' % chan] = None
dewunder = bool(status & 0x01)
# If dew point is valid, save it.
if not dewunder:
_record['dewpoint%d' % chan] = dew1 + (dew10 * 10)
return _record
@wmr9x8_registerpackettype(typecode=0x03, size=9)
def _wmr9x8_mushroom_packet(self, packet):
_, status, temp10th, temp1, temp10, temp100etc, hum1, hum10, dew1, dew10 = self._get_nibble_data(packet[1:])
battery = (status & 0x04) >> 2
_record = {
'dateTime' : int(time.time() + 0.5),
'usUnits' : weewx.METRIC,
'outTempBatteryStatus' : battery,
'outHumidity' : hum1 + (hum10 * 10)
}
tempoverunder = temp100etc & 0x04
if not tempoverunder:
temp = (temp10th / 10.0) + temp1 + (temp10 * 10) + ((temp100etc & 0x03) * 100)
if temp100etc & 0x08:
temp = -temp
_record['outTemp'] = temp
else:
_record['outTemp'] = None
dewunder = bool(status & 0x01)
# If dew point is valid, save it.
if not dewunder:
_record['dewpoint'] = dew1 + (dew10 * 10)
return _record
@wmr9x8_registerpackettype(typecode=0x04, size=7)
def _wmr9x8_therm_packet(self, packet):
chan, status, temp10th, temp1, temp10, temp100etc = self._get_nibble_data(packet[1:])
chan = channel_decoder(chan)
battery = (status & 0x04) >> 2
_record = {'dateTime' : int(time.time() + 0.5),
'usUnits' : weewx.METRIC,
'batteryStatusT%d' % chan : battery}
temp = temp10th / 10.0 + temp1 + 10.0 * temp10 + 100.0 * (temp100etc & 0x03)
if temp100etc & 0x08:
temp = -temp
tempoverunder = temp100etc & 0x04
_record['extraTemp%d' % chan] = temp if not tempoverunder else None
return _record
@wmr9x8_registerpackettype(typecode=0x05, size=13)
def _wmr9x8_in_thermohygrobaro_packet(self, packet):
null, status, temp10th, temp1, temp10, temp100etc, hum1, hum10, dew1, dew10, baro1, baro10, wstatus, null2, slpoff10th, slpoff1, slpoff10, slpoff100 = self._get_nibble_data(packet[1:]) # @UnusedVariable
battery = (status & 0x04) >> 2
hum = hum1 + (hum10 * 10)
tempoverunder = bool(temp100etc & 0x04)
if not tempoverunder:
temp = (temp10th / 10.0) + temp1 + (temp10 * 10) + ((temp100etc & 0x03) * 100)
if temp100etc & 0x08:
temp = -temp
else:
temp = None
dewunder = bool(status & 0x01)
if not dewunder:
dew = dew1 + (dew10 * 10)
else:
dew = None
rawsp = ((baro10 & 0xF) << 4) | baro1
sp = rawsp + 795
pre_slpoff = (slpoff10th / 10.0) + slpoff1 + (slpoff10 * 10) + (slpoff100 * 100)
slpoff = (1000 + pre_slpoff) if pre_slpoff < 400.0 else pre_slpoff
_record = {
'inTempBatteryStatus' : battery,
'inHumidity' : hum,
'inTemp' : temp,
'dewpoint' : dew,
'barometer' : rawsp + slpoff,
'pressure' : sp,
'dateTime' : int(time.time() + 0.5),
'usUnits' : weewx.METRIC
}
return _record
@wmr9x8_registerpackettype(typecode=0x06, size=14)
def _wmr9x8_in_ext_thermohygrobaro_packet(self, packet):
null, status, temp10th, temp1, temp10, temp100etc, hum1, hum10, dew1, dew10, baro1, baro10, baro100, wstatus, null2, slpoff10th, slpoff1, slpoff10, slpoff100, slpoff1000 = self._get_nibble_data(packet[1:]) # @UnusedVariable
battery = (status & 0x04) >> 2
hum = hum1 + (hum10 * 10)
tempoverunder = bool(temp100etc & 0x04)
if not tempoverunder:
temp = (temp10th / 10.0) + temp1 + (temp10 * 10) + ((temp100etc & 0x03) * 100)
if temp100etc & 0x08:
temp = -temp
else:
temp = None
dewunder = bool(status & 0x01)
if not dewunder:
dew = dew1 + (dew10 * 10)
else:
dew = None
rawsp = ((baro100 & 0x01) << 8) | ((baro10 & 0xF) << 4) | baro1
sp = rawsp + 600
slpoff = (slpoff10th / 10.0) + slpoff1 + (slpoff10 * 10) + (slpoff100 * 100) + (slpoff1000 * 1000)
_record = {
'inTempBatteryStatus' : battery,
'inHumidity' : hum,
'inTemp' : temp,
'inDewpoint' : dew,
'barometer' : rawsp+slpoff,
'pressure' : sp,
'dateTime' : int(time.time() + 0.5),
'usUnits' : weewx.METRIC
}
return _record
@wmr9x8_registerpackettype(typecode=0x0e, size=5)
def _wmr9x8_time_packet(self, packet):
"""The (partial) time packet is not used by weewx.
However, the last time is saved in case getTime() is called."""
min1, min10 = self._get_nibble_data(packet[1:])
minutes = min1 + ((min10 & 0x07) * 10)
cur = time.gmtime()
self.last_time = time.mktime(
(cur.tm_year, cur.tm_mon, cur.tm_mday,
cur.tm_hour, minutes, 0,
cur.tm_wday, cur.tm_yday, cur.tm_isdst))
return None
@wmr9x8_registerpackettype(typecode=0x0f, size=9)
def _wmr9x8_clock_packet(self, packet):
"""The clock packet is not used by weewx.
However, the last time is saved in case getTime() is called."""
min1, min10, hour1, hour10, day1, day10, month1, month10, year1, year10 = self._get_nibble_data(packet[1:])
year = year1 + (year10 * 10)
# The station initializes itself to "1999" as the first year
# Thus 99 = 1999, 00 = 2000, 01 = 2001, etc.
year += 1900 if year == 99 else 2000
month = month1 + (month10 * 10)
day = day1 + (day10 * 10)
hour = hour1 + (hour10 * 10)
minutes = min1 + ((min10 & 0x07) * 10)
cur = time.gmtime()
# TODO: not sure if using tm_isdst is correct here
self.last_time = time.mktime(
(year, month, day,
hour, minutes, 0,
cur.tm_wday, cur.tm_yday, cur.tm_isdst))
return None
@wm918_registerpackettype(typecode=0xcf, size=27)
def _wm918_wind_packet(self, packet):
"""Decode a wind packet. Wind speed will be in m/s"""
gust10th, gust1, gust10, dir1, dir10, dir100, avg10th, avg1, avg10, avgdir1, avgdir10, avgdir100 = self._get_nibble_data(packet[1:7])
_chill10, _chill1 = self._get_nibble_data(packet[16:17])
# The console returns wind speeds in m/s. Our metric system requires kph,
# so the result needs to be multiplied by 3.6
_record = {
'windSpeed' : ((avg10th / 10.0) + avg1 + (avg10*10)) * 3.6,
'windDir' : avgdir1 + (avgdir10 * 10) + (avgdir100 * 100),
'windGust' : ((gust10th / 10.0) + gust1 + (gust10 * 10)) * 3.6,
'windGustDir' : dir1 + (dir10 * 10) + (dir100 * 100),
'dateTime' : int(time.time() + 0.5),
'usUnits' : weewx.METRIC
}
# Sometimes the station emits a wind gust that is less than the average wind.
# Ignore it if this is the case.
if _record['windGust'] < _record['windSpeed']:
_record['windGust'] = _record['windSpeed']
# Save the windspeed to be used for windchill and apparent temperature
self.last_windSpeed = _record['windSpeed']
return _record
@wm918_registerpackettype(typecode=0xbf, size=14)
def _wm918_rain_packet(self, packet):
cur1, cur10, cur100, _stat, yest1, yest10, yest100, yest1000, tot1, tot10, tot100, tot1000 = self._get_nibble_data(packet[1:7])
# It is reported that total rainfall is biased by +0.5 mm
_record = {
'rainRate' : (cur1 + (cur10 * 10) + (cur100 * 100)) / 10.0,
'yesterdayRain' : (yest1 + (yest10 * 10) + (yest100 * 100) + (yest1000 * 1000)) / 10.0,
'totalRain' : (tot1 + (tot10 * 10) + (tot100 * 100) + (tot1000 * 1000)) / 10.0,
'dateTime' : int(time.time() + 0.5),
'usUnits' : weewx.METRIC
}
# Because the WM does not offer anything like bucket tips, we must
# calculate it by looking for the change in total rain. Of course, this
# won't work for the very first rain packet.
# the WM reports rain rate as rain_rate, rain yesterday (updated by wm at midnight) and total rain since last reset
# weewx needs rain since last packet we need to divide by 10 to mimic Vantage reading
_record['rain'] = (_record['totalRain'] - self.last_totalRain) if self.last_totalRain is not None else None
self.last_totalRain = _record['totalRain']
return _record
@wm918_registerpackettype(typecode=0x8f, size=35)
def _wm918_humidity_packet(self, packet):
hum1, hum10 = self._get_nibble_data(packet[8:9])
humout1, humout10 = self._get_nibble_data(packet[20:21])
hum = hum1 + (hum10 * 10)
humout = humout1 + (humout10 * 10)
_record = {
'outHumidity' : humout,
'inHumidity' : hum,
'dateTime' : int(time.time() + 0.5),
'usUnits' : weewx.METRIC
}
self.last_outHumidity = _record['outHumidity'] # save the humidity for the heat index and apparent temp calculation
return _record
@wm918_registerpackettype(typecode=0x9f, size=34)
def _wm918_therm_packet(self, packet):
temp10th, temp1, temp10, null = self._get_nibble_data(packet[1:3]) # @UnusedVariable
tempout10th, tempout1, tempout10, null = self._get_nibble_data(packet[16:18]) # @UnusedVariable
temp = (temp10th / 10.0) + temp1 + ((temp10 & 0x7) * 10)
temp *= -1 if (temp10 & 0x08) else 1
tempout = (tempout10th / 10.0) + tempout1 + ((tempout10 & 0x7) * 10)
tempout *= -1 if (tempout10 & 0x08) else 1
_record = {
'inTemp' : temp,
'outTemp' : tempout
}
try:
_record['apparentTemp'] = tempout + 0.33 * ((self.last_outHumidity / 100.0) * 6.105 * exp(17.27 * tempout / (237.7 + tempout))) -0.70 * (self.last_windSpeed / 3.6) - 4.00
except AttributeError:
_record['apparentTemp'] = None
_record['dateTime'] = int(time.time() + 0.5)
_record['usUnits'] = weewx.METRIC
return _record
@wm918_registerpackettype(typecode=0xaf, size=31)
def _wm918_baro_dew_packet(self, packet):
baro1, baro10, baro100, baro1000, slp10th, slp1, slp10, slp100, slp1000, fmt, prediction, trend, dewin1, dewin10 = self._get_nibble_data(packet[1:8]) # @UnusedVariable
dewout1, dewout10 = self._get_nibble_data(packet[18:19]) # @UnusedVariable
#dew = dewin1 + (dewin10 * 10)
#dewout = dewout1 + (dewout10 *10)
sp = baro1 + (baro10 * 10) + (baro100 * 100) + (baro1000 * 1000)
slp = (slp10th / 10.0) + slp1 + (slp10 * 10) + (slp100 * 100) + (slp1000 * 1000)
_record = {
'barometer' : slp,
'pressure' : sp,
#'inDewpoint' : dew,
#'outDewpoint' : dewout,
#'dewpoint' : dewout,
'dateTime' : int(time.time() + 0.5),
'usUnits' : weewx.METRIC
}
return _record
class WMR9x8ConfEditor(weewx.drivers.AbstractConfEditor):
@property
def default_stanza(self):
return """
[WMR9x8]
# This section is for the Oregon Scientific WMR918/968
# Connection type. For now, 'serial' is the only option.
type = serial
# Serial port such as /dev/ttyS0, /dev/ttyUSB0, or /dev/cuaU0
port = /dev/ttyUSB0
# The station model, e.g., WMR918, Radio Shack 63-1016
model = WMR968
# The driver to use:
driver = weewx.drivers.wmr9x8
"""
def prompt_for_settings(self):
print "Specify the serial port on which the station is connected, for"
print "example /dev/ttyUSB0 or /dev/ttyS0."
port = self._prompt('port', '/dev/ttyUSB0')
return {'port': port}
def modify_config(self, config_dict):
print """
Setting rainRate, windchill, and dewpoint calculations to hardware."""
config_dict.setdefault('StdWXCalculate', {})
config_dict['StdWXCalculate'].setdefault('Calculatios', {})
config_dict['StdWXCalculate']['Calculations']['rainRate'] = 'hardware'
config_dict['StdWXCalculate']['Calculations']['windchill'] = 'hardware'
config_dict['StdWXCalculate']['Calculations']['dewpoint'] = 'hardware'
# Define a main entry point for basic testing without the weewx engine.
# Invoke this as follows from the weewx root dir:
#
# PYTHONPATH=bin python bin/weewx/drivers/wmr9x8.py
if __name__ == '__main__':
import optparse
usage = """Usage: %prog --help
%prog --version
%prog --gen-packets [--port=PORT]"""
syslog.openlog('wmr9x8', syslog.LOG_PID | syslog.LOG_CONS)
syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_DEBUG))
weewx.debug = 2
parser = optparse.OptionParser(usage=usage)
parser.add_option('--version', dest='version', action='store_true',
help='Display driver version')
parser.add_option('--port', dest='port', metavar='PORT',
help='The port to use. Default is %s' % DEFAULT_PORT,
default=DEFAULT_PORT)
parser.add_option('--gen-packets', dest='gen_packets', action='store_true',
help="Generate packets indefinitely")
(options, args) = parser.parse_args()
if options.version:
print "WMR9x8 driver version %s" % DRIVER_VERSION
exit(0)
if options.gen_packets:
syslog.syslog(syslog.LOG_DEBUG, "wmr9x8: Running genLoopPackets()")
stn_dict={'port': options.port}
stn = WMR9x8(**stn_dict)
for packet in stn.genLoopPackets():
print packet
|
o=[int(x) for x in input('Enter numbers seprated by space: ').split()]
o.sort()
r=[]
for i in range(0,len(o)-1):
if o[i]==o[i+1]:
r.append(o[i])
r=list(set(r))
o=set(o)
while(len(r)>0):
o.remove(r[0])
r.pop(0)
print(sum(o))
|
from dsame.trees.BinaryTreeNode import *
def search_element(ele, root: BinaryTreeNode):
if not root:
return False
if ele == root.data:
return True
if search_element(ele, root.left):
return True
else:
return search_element(ele, root.right)
print(search_element(4, initializeBinaryTree()))
|
# Copyright 2017 Intel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import syntribos.signal
def validate_content(test):
"""Checks if the API is responding to TRACE requests
Checks if the response body contains the request header
"TRACE_THIS".
:returns: SynSignal
"""
check_name = "VALID_CONTENT"
strength = 1.0
tags = []
if not test.init_signals.ran_check(check_name):
resp = test.init_resp
else:
resp = test.test_resp
data = {"response_content": resp.text}
# vulnerable to XST if response body has the request header
xst_header = "TRACE_THIS: XST_Vuln"
if "Content-type" in resp.headers:
content_type = resp.headers["Content-type"]
data["content_type"] = content_type
if data["response_content"]:
if data["response_content"].find(xst_header) != -1:
text = "Request header in response: {}".format(xst_header)
slug = "HEADER_XST"
return syntribos.signal.SynSignal(
data=data,
tags=tags,
text=text,
slug=slug,
strength=strength,
check_name=check_name)
|
#the print function should not have '[]'
#the first 'if' should have and instead of or
#and after the first 'if', the other ones should be 'elif'
for number in range(1, 101):
if number % 3 == 0 and number % 5 == 0:
print("FizzBuzz")
elif number % 3 == 0:
print("Fizz")
elif number % 5 == 0:
print("Buzz")
else:
print(number)
#old code, includes bugs
#for number in range(1, 101):
# if number % 3 == 0 or number % 5 == 0:
# print("FizzBuzz")
# if number % 3 == 0:
# print("Fizz")
# if number % 5 == 0:
# print("Buzz")
# else:
# print([number])
|
import datetime
import json
import typing
import pytest
from starlette.testclient import TestClient
from beaconator.backend import dao
from beaconator.backend.utils.images import image_queries
def test_index(client: TestClient):
response = client.get("/")
assert response.status_code == 200
def test_login_wrong_password(client: TestClient):
response = client.post("/api/login", data=json.dumps({"password": "wrongpass"}))
assert response.status_code == 401
def test_login_right_password(client: TestClient):
response = client.post(
"/api/login", data=json.dumps({"password": "tDkZMLvsao93Cm5I9FZbwqPH"})
)
assert response.status_code == 200
assert "token" in response.json()
def test_get_codes_unauth(client: TestClient):
response = client.get("/api/codes")
assert response.status_code == 401
def test_get_codes(auth_client: TestClient, monkeypatch):
"""
Test GET codes endpoint
"""
test_data = [
{
"id": 1,
"name": "Test Code",
"code": "UA-TEST-CODE-123",
"created_at": datetime.datetime.now(),
"updated_at": datetime.datetime.now(),
"active": True,
},
{
"id": 2,
"name": "Test Code",
"code": "UA-TEST-CODE-123",
"created_at": datetime.datetime.now(),
"updated_at": datetime.datetime.now(),
"active": True,
},
]
def mock_get(db=None, id=None, skip=None, limit=None):
return test_data
monkeypatch.setattr(dao, "get_ga_codes", mock_get)
response = auth_client.get("/api/codes")
assert response.status_code == 200
assert len(response.json()) == len(test_data)
def test_get_code_unauth(client: TestClient):
response = client.get("/api/codes/1")
assert response.status_code == 401
def test_get_code(auth_client: TestClient, monkeypatch):
"""
Test GET code endpoint
"""
test_data = {
"id": 1,
"name": "Test Code",
"code": "UA-TEST-CODE-123",
"created_at": datetime.datetime.now(),
"updated_at": datetime.datetime.now(),
"active": True,
}
def mock_get(db=None, id=None):
return test_data
monkeypatch.setattr(dao, "get_ga_code", mock_get)
response = auth_client.get("/api/codes/1")
assert response.status_code == 200
assert response.json()["id"] == test_data["id"]
def test_get_code_incorrect_id(auth_client: TestClient, monkeypatch):
def mock_get(db=None, id=None):
return None
monkeypatch.setattr(dao, "get_ga_code", mock_get)
response = auth_client.get("/api/codes/999")
assert response.status_code == 404
assert response.json()["detail"] == "Code not found"
def test_create_code_unauth(client: TestClient):
response = client.post("/api/codes", data=json.dumps({"title": "something"}))
assert response.status_code == 401
def test_create_code(auth_client: TestClient, monkeypatch):
test_request_payload = {"name": "Test Code", "code": "UA-TEST-CODE-123"}
test_response_payload = {
"id": 1,
"name": "Test Code",
"code": "UA-TEST-CODE-123",
"created_at": datetime.datetime.now(),
"updated_at": datetime.datetime.now(),
"active": True,
}
def mock_post(db=None, item=None):
return test_response_payload
monkeypatch.setattr(dao, "create_ga_code", mock_post)
response = auth_client.post("/api/codes", data=json.dumps(test_request_payload),)
assert response.status_code == 201
assert "id" in response.json()
def test_create_code_invalid_json(auth_client: TestClient):
response = auth_client.post("/api/codes", data=json.dumps({"title": "something"}))
assert response.status_code == 422
def test_update_code_unauth(client: TestClient):
response = client.patch("/api/codes/1", data=json.dumps({"title": "something"}))
assert response.status_code == 401
def test_update_code(auth_client: TestClient, monkeypatch):
test_update_data = {"name": "HelloThere", "active": False}
def mock_put(db=None, id=None, item=None):
return 1
monkeypatch.setattr(dao, "update_ga_code", mock_put)
test_data = {
"id": 1,
"name": "HelloThere",
"code": "UA-TEST-CODE-123",
"created_at": datetime.datetime.now(),
"updated_at": datetime.datetime.now(),
"active": False,
}
def mock_get(db=None, id=None):
return test_data
monkeypatch.setattr(dao, "get_ga_code", mock_get)
response = auth_client.patch("/api/codes/1", data=json.dumps(test_update_data))
assert response.status_code == 200
assert "id" in response.json()
def test_update_code_invalid(auth_client: TestClient, monkeypatch):
test_update_data = {"created_at": "Hello"}
def mock_put(db=None, id=None, item=None):
return 1
monkeypatch.setattr(dao, "update_ga_code", mock_put)
response = auth_client.patch("/api/codes/1", data=json.dumps(test_update_data))
assert response.status_code == 422
def test_delete_code_unauth(client: TestClient):
response = client.delete("/api/codes/1")
assert response.status_code == 401
def test_delete_code(auth_client: TestClient, monkeypatch):
def mock_delete(db=None, id=None):
return 1
monkeypatch.setattr(dao, "delete_ga_code", mock_delete)
response = auth_client.delete("/api/codes/1")
assert response.status_code == 204
assert response.json() == None
def test_delete_code_incorrect_id(auth_client: TestClient, monkeypatch):
def mock_get(db=None, id=None):
return 0
monkeypatch.setattr(dao, "delete_ga_code", mock_get)
response = auth_client.delete("/api/codes/999")
assert response.status_code == 404
assert response.json()["detail"] == "Code not found"
def test_get_properties_unauth(client: TestClient):
response = client.get("/api/properties")
assert response.status_code == 401
def test_get_properties(auth_client: TestClient, monkeypatch):
"""
Test GET codes endpoint
"""
test_data = [
{
"id": 1,
"name": "Test Property",
"code": "testcode",
"ga_code_id": 1,
"ga_code": {
"id": 1,
"name": "Test Code",
"code": "UA-TEST-CODE-123",
"created_at": datetime.datetime.now(),
"updated_at": datetime.datetime.now(),
"active": True,
},
"image": "pixel",
"created_at": datetime.datetime.now(),
"updated_at": datetime.datetime.now(),
"active": True,
},
{
"id": 2,
"name": "Test Property",
"code": "testcode",
"ga_code_id": 1,
"ga_code": {
"id": 1,
"name": "Test Code",
"code": "UA-TEST-CODE-123",
"created_at": datetime.datetime.now(),
"updated_at": datetime.datetime.now(),
"active": True,
},
"image": "pixel",
"created_at": datetime.datetime.now(),
"updated_at": datetime.datetime.now(),
"active": True,
},
]
def mock_get(db=None, id=None, skip=None, limit=None):
return test_data
monkeypatch.setattr(dao, "get_properties", mock_get)
response = auth_client.get("/api/properties")
assert response.status_code == 200
assert len(response.json()) == len(test_data)
def test_get_property_unauth(client: TestClient):
response = client.get("/api/properties/1")
assert response.status_code == 401
def test_get_property(auth_client: TestClient, monkeypatch):
"""
Test GET code endpoint
"""
test_data = {
"id": 1,
"name": "Test Property",
"code": "testcode",
"ga_code_id": 1,
"ga_code": {
"id": 1,
"name": "Test Code",
"code": "UA-TEST-CODE-123",
"created_at": datetime.datetime.now(),
"updated_at": datetime.datetime.now(),
"active": True,
},
"image": "pixel",
"created_at": datetime.datetime.now(),
"updated_at": datetime.datetime.now(),
"active": True,
}
def mock_get(db=None, id=None):
return test_data
monkeypatch.setattr(dao, "get_property", mock_get)
response = auth_client.get("/api/properties/1")
assert response.status_code == 200
assert response.json()["id"] == test_data["id"]
def test_get_perperty_incorrect_id(auth_client: TestClient, monkeypatch):
def mock_get(db=None, id=None):
return None
monkeypatch.setattr(dao, "get_property", mock_get)
response = auth_client.get("/api/properties/999")
assert response.status_code == 404
assert response.json()["detail"] == "Property not found"
def test_create_property_unauth(client: TestClient):
response = client.post("/api/properties", data=json.dumps({"title": "something"}))
assert response.status_code == 401
def test_create_property(auth_client: TestClient, monkeypatch):
test_request_payload = {"name": "Test Property", "ga_code_id": 1, "image": "pixel"}
test_response_payload = {
"id": 1,
"name": "Test Property",
"code": "testcode",
"ga_code_id": 1,
"ga_code": {
"id": 1,
"name": "Test Code",
"code": "UA-TEST-CODE-123",
"created_at": datetime.datetime.now(),
"updated_at": datetime.datetime.now(),
"active": True,
},
"image": "pixel",
"created_at": datetime.datetime.now(),
"updated_at": datetime.datetime.now(),
"active": True,
}
def mock_post(db=None, item=None):
return test_response_payload
monkeypatch.setattr(dao, "create_property", mock_post)
response = auth_client.post(
"/api/properties", data=json.dumps(test_request_payload),
)
assert response.status_code == 201
assert "id" in response.json()
def test_create_property_invalid_json(auth_client: TestClient):
response = auth_client.post(
"/api/properties", data=json.dumps({"title": "something"})
)
assert response.status_code == 422
# need to build out
def test_update_property_unauth(client: TestClient):
response = client.patch(
"/api/properties/1", data=json.dumps({"title": "something"})
)
assert response.status_code == 401
def test_update_property(auth_client: TestClient, monkeypatch):
test_update_data = {"name": "HelloThere", "active": False}
def mock_put(db=None, id=None, item=None):
return 1
monkeypatch.setattr(dao, "update_property", mock_put)
test_data = {
"id": 1,
"name": "HelloThere",
"code": "testcode",
"ga_code_id": 1,
"ga_code": {
"id": 1,
"name": "Test Code",
"code": "UA-TEST-CODE-123",
"created_at": datetime.datetime.now(),
"updated_at": datetime.datetime.now(),
"active": True,
},
"image": "pixel",
"created_at": datetime.datetime.now(),
"updated_at": datetime.datetime.now(),
"active": False,
}
def mock_get(db=None, id=None):
return test_data
monkeypatch.setattr(dao, "get_property", mock_get)
response = auth_client.patch("/api/properties/1", data=json.dumps(test_update_data))
assert response.status_code == 200
assert "id" in response.json()
def test_update_property_invalid(auth_client: TestClient, monkeypatch):
test_update_data = {"created_at": "Hello"}
def mock_put(db=None, id=None, item=None):
return 1
monkeypatch.setattr(dao, "update_property", mock_put)
response = auth_client.patch("/api/properties/1", data=json.dumps(test_update_data))
assert response.status_code == 422
def test_delete_property_unauth(client: TestClient):
response = client.delete("/api/properties/1")
assert response.status_code == 401
def test_delete_property(auth_client: TestClient, monkeypatch):
def mock_delete(db=None, id=None):
return 1
monkeypatch.setattr(dao, "delete_property", mock_delete)
response = auth_client.delete("/api/properties/1")
assert response.status_code == 204
assert response.json() == None
def test_delete_property_incorrect_id(auth_client: TestClient, monkeypatch):
def mock_get(db=None, id=None):
return 0
monkeypatch.setattr(dao, "delete_property", mock_get)
response = auth_client.delete("/api/properties/999")
assert response.status_code == 404
assert response.json()["detail"] == "Property not found"
@pytest.mark.parametrize(
"name, info", [[name, info] for name, info in image_queries.items()],
)
def test_api_images(client: TestClient, name: str, info: typing.Dict):
response = client.get(f"/api/other/images?type={name}")
assert response.status_code == 200
assert response.headers["content-type"] == info.get("media_type")
# get_image
|
class InitFromSlots(type):
def __new__(meta, name, bases, bodydict):
slots = bodydict['__slots__']
if slots and '__init__' not in bodydict:
parts = ['def __init__(self, %s):' % ', '.join(slots)]
for slot in slots:
parts.append(' self.%s = %s' % (slot, slot))
exec '\n'.join(parts) in bodydict
super_new = super(InitFromSlots, meta).__new__
return super_new(meta, name, bases, bodydict)
class Record(object):
__metaclass__ = InitFromSlots
__slots__ = ()
def _items(self):
for name in self.__slots__:
yield name, getattr(self, name)
def __repr__(self):
args = ', '.join('%s=%r' % tup for tup in self._items())
return '%s(%s)' % (type(self).__name__, args)
def __iter__(self):
for name in self.__slots__:
yield getattr(self, name)
def __getstate__(self):
return dict(self._items())
def __setstate__(self, statedict):
self.__init__(**statedict)
# =========================
# At the interactive prompt
# =========================
>>> class Point(Record):
... __slots__ = 'x', 'y'
...
>>> Point(3, 4)
Point(x=3, y=4)
>>> Point(y=5, x=2)
Point(x=2, y=5)
>>> point = Point(-1, 42)
>>> point.x, point.y
(-1, 42)
>>> x, y = point
>>> x, y
(-1, 42)
>>> class Badger(Record):
... __slots__ = 'large', 'wooden'
...
>>> badger = Badger('spam', 'eggs')
>>> import pickle
>>> pickle.loads(pickle.dumps(badger))
Badger(large='spam', wooden='eggs')
>>> class Answer(Record):
... __slots__ = 'life', 'universe', 'everything'
...
>>> eval(repr(Answer(42, 42, 42)))
Answer(life=42, universe=42, everything=42)
|
# -*- coding: utf-8 -*-
import os
import cv2
import csv
import numpy as np
import pandas as pd
import progressbar
from scripts import Timer
from scripts import Retrievor
from scripts import Extractor
from scripts import mean_reciprocal_rank
from scripts import mean_mean_average_precision
from scripts import rank1_accuracy
from preprocessors import AspectAwarePreprocessor
from preprocessors import ImageToArrayPreprocessor
# data
data = pd.read_csv('./outputs/test.csv')
# initialize process
iap = ImageToArrayPreprocessor()
aap = AspectAwarePreprocessor(224, 224)
# evaluation.csv
if not os.path.isfile('./outputs/evaluation.csv'):
with open('./outputs/evaluation.csv', 'w') as file:
writer = csv.writer(file)
writer.writerow([
'extractor', 'distance', 'mrr', 'mmap', 'accuracy', 'average_time', 'errors', 'comparison_item'
])
# type of extractor
types = [
'ORB', 'SURF', 'AKAZE',
'VGG16', 'VGG19', 'MobileNet',
'autoencoder'
]
# distances
distances = [
'cosinus', 'manhattan', 'euclidean'
]
# metric
items = [
'color', 'style', 'both'
]
for dType in types:
db, errors = [], 0
colors, styles = [], []
both = []
extractor = Extractor(dType)
timer = Timer()
print('[INFO]: Working on {} ...'.format(dType))
widgets = [
"Evaluation: PART 1 - Extraction ", progressbar.Percentage(), " ",
progressbar.Bar(), " ", progressbar.ETA()
]
pbar = progressbar.ProgressBar(maxval=len(data), widgets=widgets).start()
# loop over data
for index, row in data.iterrows():
# preprocessing
timer.tic()
image = cv2.imread(row.path)
image = aap.preprocess(image)
if dType in ['VGG16', 'VGG19', 'MobileNet', 'autoencoder']:
image = iap.preprocess(image)
features = extractor.extract(image)
timer.toc()
if not isinstance(features, np.ndarray):
errors += 1
continue
db.append(features)
colors.append(row.color)
styles.append(row.type)
both.append(row.color + '_' + row.type)
pbar.update(index)
pbar.finish()
average_time_extraction = timer.average_time
for distance in distances:
timer.clear()
# varaibles
retrievor = Retrievor('./features/' + dType + '_features.pck')
retrievals_color, retrievals_style = [], []
retrievals_both = []
widgets = [
"Evaluation: PART 2 - Search ", progressbar.Percentage(), " ",
progressbar.Bar(), " ", progressbar.ETA()
]
pbar = progressbar.ProgressBar(maxval=len(data), widgets=widgets).start()
for i, features in enumerate(db):
timer.tic()
_colors, _styles, _ = retrievor.search(features, distance, depth=5)
timer.toc()
retrievals_color.append(_colors)
retrievals_style.append(_styles)
retrievals_both.append([
c + '_' + s for c, s in zip(_colors, _styles)
])
pbar.update(i)
pbar.finish()
# summary
average_time_search = timer.average_time
for item in items:
if item == 'color':
mrr = mean_reciprocal_rank(retrievals_color, colors)
mmap = mean_mean_average_precision(retrievals_color, colors)
rank_1 = rank1_accuracy(retrievals_color, colors)
elif items == 'style':
mrr = mean_reciprocal_rank(retrievals_style, styles)
mmap = mean_mean_average_precision(retrievals_style, styles)
rank_1 = rank1_accuracy(retrievals_style, styles)
else:
mrr = mean_reciprocal_rank(retrievals_both, both)
mmap = mean_mean_average_precision(retrievals_both, both)
rank_1 = rank1_accuracy(retrievals_both, both)
with open('./outputs/evaluation.csv', 'a+', newline='') as file:
writer = csv.writer(file)
writer.writerow([
dType, distance, mrr, mmap, rank_1, average_time_search + average_time_extraction, errors, item
])
|
import unittest
#from nose.plugins.attrib import attr
class DummyTestL(unittest.TestCase):
"""Today is brought to you by the letter L"""
def test_something(self):
"""
Lizards love testing
Lizards have very long tongues.
"""
self.assertTrue(True, 'example assertion')
#Attributes don't work in subclasses of unittest.TestCase
#@attr(description="Lanky developers are not lanky anymore")
def test_something_else(self):
"""
Lanky developers love testing.
"""
self.assertTrue(True, 'example assertion')
|
#!/usr/bin/python3
import argparse
from http.server import HTTPServer, SimpleHTTPRequestHandler
def main(port):
SimpleHTTPRequestHandler.extensions_map[".wasm"] = "application/wasm"
server = HTTPServer(("localhost", port), SimpleHTTPRequestHandler)
server.serve_forever()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--port", type=int, default=8000, help="port to bind to", required=False)
args = parser.parse_args()
main(args.port)
|
# Link --> https://www.hackerrank.com/challenges/30-binary-numbers/problem
# Code:
import math
import os
import random
import re
import sys
if __name__ == '__main__':
n = int(input().strip())
binary = bin(n).replace("0b", "")
answer = 0
current_answer = 0
for i in range(len(binary)):
if binary[i] == '1':
current_answer += 1
else:
answer = max(answer, current_answer)
current_answer = 0
print(max(answer, current_answer))
|
# ==================================================================================
#
# Copyright (c) 2018, Evangelos G. Karakasis
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ==================================================================================
# MovingWindow
# ============
#
# In this module, a moving (or sliding, or rolling) window algorithm for
# filtering/processing signals is implemented. It has been created in
# order to serve as a tool in 1D signal processing. There are many
# different situations (find envelopes, trends, smooth or even normalize a
# signal) where a sliding window along with a properly selected metric
# (mean, max, min, rms, std, etc.) will do a great job.
#
# Dependencies
# ~~~~~~~~~~~~
#
# This module depends on three different packages:
#
# * NumPy
# * SciPy
# * InputCheck
#
# The first two packages are known to everyone interested in data science.
# Something like:
#
# `pip install <packageName>`
#
# or
#
# `conda install <packageName>`
#
# if you use Anaconda or Miniconda will do the job.
#
# For the installation of the third package please read the corresponding
# `README.md <https://github.com/ekarakasis/InputCheck/blob/master/README.md>`__
#
# Installation
# ~~~~~~~~~~~~
#
# To install this package just download the repository from GitHub or by
# using the following command line:
#
# `git clone https://github.com/ekarakasis/MovingWindow`
#
# Afterwards, go to the local root folder, open a command line and run:
#
# `pip install .`
#
# **NOTE:** *Do not forget the dot punctuation mark (“.”) in the end of
# the “pip install .” command*
#
# Run the Tests
# -------------
#
# To run the tests just go to the *root/MovingWindow/tests* folder, open a
# command line and write:
#
# `python test_all.py`
#
# License
# -------
#
# This project is licensed under the MIT License.
import sys
sys.path.append('../')
sys.path.append('../../')
from MovingWindow import allowedWindows
from MovingWindow import get_window, np
from MovingWindow import acceptedTypes, acceptedValues, functionType, enableChecks, chkCmds
def SlidingWindow(signal, windowSize, step=1):
"""A generator, which results in the sliding window values
Parameters
----------
signal : numpy.ndarray
The actual signal we want to process.
windowSize : int
The size of the moving/sliding window.
step : int
The step that determines the overlap percentage of
two consecutive windows.
Yields
------
numpy.ndarray
The moving window values.
"""
# a pad is created in order to deal with potential edges issues
signal = np.pad(signal, (int((windowSize-1)/2),
int((windowSize-1)/2)), 'reflect')
numOfChunks = int(((len(signal)-windowSize)/step)+1)
for i in range(0, numOfChunks*step, step):
yield signal[i:i+windowSize]
@acceptedTypes(np.ndarray, int, int, functionType, str, bool, typesCheckEnabled = enableChecks)
@acceptedValues(
chkCmds.noCheck,
chkCmds.MinV(2),
chkCmds.MinV(1),
chkCmds.noCheck,
chkCmds.SetV(allowedWindows),
chkCmds.noCheck,
valueCheckEnabled = enableChecks,
)
def MovingWindow(signal, windowSize=16, step=1, metric=np.mean, window='box', normalizedWindow=False):
"""Applies a moving window-based processing on the input signal.
In this function, a moving (or sliding, or rolling) window algorithm for
filtering/processing signals is implemented. It has been created in
order to serve as a tool in 1D signal processing.
Parameters
----------
signal : numpy.ndarray
The actual signal we want to process.
windowSize : int
The size of the moving window. This input must have value greater
than or equal to 2.
step : int
Determines the overlap percentage of two consecutive windows.
This input must have value greater than or equal to 1.
metric : <class 'function'>
A function which is applied to each window
(e.g. for a *moving average* the metric must be <np.mean\>).
window : str
The window type we want to apply. The allowed window types are:
* box
* gaussian
* nuttall
* hanning
* hann
* hamming
* blackman
* blackmanharris
normalizedWindow : bool
When this flag is True, the selected window (e.g. hann) is
normalized so as the sum of its elements to be equal to 1.
Raises
------
TypeError
If any input has different type.
ValueError
If any input has value different than the expected.
Returns
-------
numpy.ndarray
The function returns a moving window-based processed signal.
"""
# we want the window size to be odd number like 3, 5, 7, etc.
if windowSize % 2 == 0:
windowSize = windowSize + 1
if window == 'box':
WT = get_window('boxcar', windowSize, fftbins=False)
elif window == 'gaussian':
WT = get_window(('gaussian', windowSize/6), windowSize, fftbins=False)
else:
WT = get_window(window, windowSize, fftbins=False)
if normalizedWindow:
WT = WT / np.sum(WT)
out = []
outa = out.append
for item in SlidingWindow(signal, windowSize, step):
# here we apply the window and then the metric
outa(metric(item*WT))
return np.array(out)
|
from netforce.model import get_model
from netforce import migration
class Migration(migration.Migration):
_name="cms.theme"
_version="1.183.0"
def migrate(self):
res=get_model("cms.theme").search([["state","=","active"]])
if res:
return
vals={
"name": "olsonkart",
}
get_model("cms.theme").create(vals)
vals={
"name": "ecom",
}
theme_id=get_model("cms.theme").create(vals)
obj=get_model("cms.theme").browse([theme_id])
obj.activate()
Migration.register()
|
#!/usr/bin/env python3
def find_matching(L, pattern):
indices = []
for idx, value in enumerate(L):
if pattern in value:
indices.append(idx)
return indices
def main():
indices = find_matching(["english", "finnish", "book", "cat", "ticklish"], "ish")
print(indices)
if __name__ == "__main__":
main()
|
#Задача 4
#Найти произведение ряда чисел от 1 до 10. Полученный результат вывести на экран.
n = 1
for i in range(1,11):
n *= i
print(n)
|
from django.db.models import Q
from accounts.models import User
from accounts.serializers import UserSerializer
from chanel.models import Chanel
from django.http import JsonResponse
from rest_framework.status import HTTP_200_OK
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from chanel.serializers import ChanelSerializer
from posts.models import Post
from posts.serializers import PostSerializer
class Search(APIView):
permission_classes = (IsAuthenticated,)
def post(self, request):
body = request.data.get('body')
result = {}
post_records = Post.objects.filter(Q(title__icontains=body) | Q(body__icontains=body))
result['post'] = PostSerializer(post_records, many=True).data
chanel_records = Chanel.objects.filter(Q(description__icontains=body) | Q(identifier__icontains=body))
result['chanel'] = ChanelSerializer(chanel_records, many=True).data
user_records = User.objects.filter(Q(first_name__icontains=body) | Q(last_name__icontains=body) |
Q(email__icontains=body) | Q(username__icontains=body))
result['user'] = UserSerializer(user_records, many=True).data
return JsonResponse(data={'data': result, 'success': True}, status=HTTP_200_OK)
|
# -*- coding: utf-8 -*-
"""
Watch commandline is not tested because of some limitations with
``CliRunner`` that is not able to communicate with invoked command, so we
won't be able to send interrupt with "CTRL+C" to stop watching at the end of
tests.
"""
|
#!/usr/bin/env python3.6
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello():
return 'Employees Service'
if __name__ == '__main__':
app.run(ssl_context=('../ca2/server-cert.pem', '../ca2/server-key.pem'))
|
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from config import chains, isValidAppId, APP_ID
import json, base64
from database import client
from binascii import hexlify,unhexlify
from utils.base58 import burn_address
def BlockchainDaemon(app):
try:
uri = "http://%s:%s@%s:%s"%(chains[chains['active']]["user"], chains[chains['active']]["password"], chains[chains['active']]["server"], chains[chains['active']]["port"])
client.UpdateChainStatus('XTO','OK')
a = True
return AuthServiceProxy(uri)
except:
a = False
if not a:
app.logger.error("Cannot connect to %s daemon.", chains[chains['active']]["abbr"])
client.UpdateChainStatus('XTO','OFFLINE')
def ProcessBlock(app,block):
app.logger.info("Processing block #%s: %s", block["height"], block["hash"])
td = BlockchainDaemon(app)
for tx in block['tx']:
rawtx = td.getrawtransaction(tx)
transaction = td.decoderawtransaction(rawtx)
for vout in transaction["vout"]:
if "OP_RETURN" in vout["scriptPubKey"]["asm"]:
tag = vout["scriptPubKey"]["asm"].replace("OP_RETURN","").strip()
if tag == hexlify(hexlify(APP_ID[0]))[0:40] or tag == hexlify(hexlify(APP_ID[1])[0:40]):
app.logger.info('Data found @ %s in block %s', tx, block['height'])
# It's OP_RETURN, but is there a valid JSON data block with it?
txdata = base64.b64decode(transaction["data"])
# We're pulling down all IP data, we don't care what app it's for yet
# Just store it in the SQL table
if not client.SaveTXData(chains[chains['active']]["abbr"],tx,txdata,block['height']):
app.logger.warning("Data found in TX %s but malformed, skipping.", tx)
def ScanMempool(app):
td = BlockchainDaemon(app)
txdata = td.getrawmempool()
for tx in txdata:
rawtx = td.getrawtransaction(tx)
transaction = td.decoderawtransaction(rawtx)
for vout in transaction["vout"]:
if "OP_RETURN" in vout["scriptPubKey"]["asm"]:
tag = vout["scriptPubKey"]["asm"].replace("OP_RETURN","").strip()
if tag == hexlify(hexlify(APP_ID[0]))[0:40] or tag == hexlify(hexlify(APP_ID[1])[0:40]):
# It's OP_RETURN, but is there a valid JSON data block with it?
txdata = base64.b64decode(transaction["data"])
# We're pulling down all IP data, we don't care what app it's for yet
# Just store it in the SQL table
if not client.SaveTXData(chains[chains['active']]["abbr"],tx,txdata,None):
app.logger.warning("Data found in TX %s but malformed, skipping.", tx)
def SyncChain(app,rescan=True):
# The process of validating a blockchain by the hash values is VITAL to the trust relationships possible
# through the mathematics. This process is similar to the operation of Electrum and other Simple Payment
# Verification schemas. Since we are not validating payments, this is Simple Data Verification (or SDV).
# Start at the genesis block
td = BlockchainDaemon(app)
lb = hexlify(client.GetLastBlockProcessed())
if rescan:
data=td.getblock(chains[chains['active']]["genesis"])
client.TruncateBlockDataTable()
else:
data=td.getblock(lb)
client.UpdateChainStatus('XTO','SYNC')
while data.has_key('nextblockhash'):
try:
prev_hash=data['hash']
data=td.getblock(data['nextblockhash'])
if prev_hash != data['previousblockhash']:
app.logger.info("Hash match sanity check failed: %s", data['hash'])
ProcessBlock(app,data)
except Exception as inst:
app.logger.error(type(inst)) # the exception instance
app.logger.error(inst.args) # arguments stored in .args
app.logger.error(inst)
break
if not client.UpdateLastBlockHash(chains[chains['active']]["abbr"],data['hash']):
app.logger.error("Error updating last block hash.")
def CheckBalance(app):
td = BlockchainDaemon(app)
derp=td.getinfo()
return derp['balance']
def commit_object(app,msg):
address = burn_address()
try:
#data = BlockchainDaemon(app).sendtoaddress(address,0.0001,base64.b64encode(msg),hexlify(APP_ID[1])[0:40]) # Always APP_ID 1
return True
except JSONRPCException, e:
app.logger.error(type(e)) # the exception instance
app.logger.error(e.args) # arguments stored in .args
app.logger.error(e)
return False
|
class Solution:
def solve(self, matrix, r, c, target):
dfs = [[r,c]]
original_color = matrix[r][c]
seen = {(r,c)}
while dfs:
cr,cc = dfs.pop()
matrix[cr][cc] = target
for nr,nc in [[cr+1,cc],[cr-1,cc],[cr,cc+1],[cr,cc-1]]:
if 0<=nr<len(matrix) and 0<=nc<len(matrix[0]) and (nr,nc) not in seen and matrix[nr][nc]==original_color:
dfs.append([nr,nc])
seen.add((nr,nc))
return matrix
|
# import sys
# input = sys.stdin.readline
N, *a = [int(x) for x in open(0)]
a.insert(0, None)
first = 1
pres = first
count = 0
nex = a[pres] # press the button
count += 1
for i in range(N - 1): # operations are at most N-1 times.
if nex == 2:
break
prev = pres
pres = nex
nex = a[pres] # press the button
count += 1
if nex == prev or nex == pres:
break
if nex == 2:
print(count)
else:
print(-1)
|
from __future__ import print_function
import os
from tqdm import tqdm
import time
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torch.autograd import Variable
import torch.optim as optim
from torchvision import datasets, transforms
import numpy as np
from model import get_model_for_attack
parser = argparse.ArgumentParser(description='PyTorch CIFAR PGD Attack Evaluation with Output Diversified Initialization')
parser.add_argument('--test-batch-size', type=int, default=100, metavar='N',
help='input batch size for testing (default: 100)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--num-restarts', default=6, type=int,
help='number of restarts')
parser.add_argument('--epsilon', default=8/255, type=float,
help='perturbation')
parser.add_argument('--num-steps', default=20, type=int,
help='perturb number of steps')
parser.add_argument('--step-size', default=2/255, type=float,
help='perturb step size')
parser.add_argument('--ODI-num-steps', default=2, type=int,
help='ODI perturb number of steps')
parser.add_argument('--ODI-step-size', default=8/255, type=float,
help='ODI perturb step size')
parser.add_argument('--lossFunc', help='loss function for PGD',
type=str, default='margin', choices=['xent','margin'])
parser.add_argument('--random',
default=True,
help='random initialization for PGD')
parser.add_argument('--data-dir',
default='./data',
help='path to data folder')
'''
parser.add_argument('--model-path',
default='./checkpoints/model-latest.pt',
help='model for white-box attack evaluation')
parser.add_argument('--source-model-path',
default='./checkpoints/model-latest.pt',
help='source model for black-box attack evaluation')
parser.add_argument('--target-model-path',
default='./checkpoints/model-latest.pt',
help='target model for black-box attack evaluation')
'''
parser.add_argument('--model_name', type=str, default='model2')
parser.add_argument('--white-box-attack', default=1,type=int,
help='whether perform white-box attack')
parser.add_argument('--arch', help='architectures',
type=str, default='ResNet', choices=['ResNet','WideResNet'])
parser.add_argument('--archTarget', help='architectures of target model',
type=str, default='ResNet', choices=['ResNet','WideResNet'])
parser.add_argument('--archSource', help='architectures of source model',
type=str, default='ResNet', choices=['ResNet','WideResNet'])
parser.add_argument('--gpu_id', type=str, default="2,3")
args = parser.parse_args()
print(args)
# settings
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {}
# set up data loader
transform_test = transforms.Compose([transforms.ToTensor(),])
testset = torchvision.datasets.CIFAR10(root=args.data_dir, train=False, download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch_size, shuffle=False, **kwargs)
def margin_loss(logits,y):
logit_org = logits.gather(1,y.view(-1,1))
logit_target = logits.gather(1,(logits - torch.eye(10)[y].to("cuda") * 9999).argmax(1, keepdim=True))
loss = -logit_org + logit_target
loss = torch.sum(loss)
return loss
def _pgd_whitebox(model,
X,
y,
epsilon=args.epsilon,
num_steps=args.num_steps,
step_size=args.step_size,
ODI_num_steps=args.ODI_num_steps,
ODI_step_size=args.ODI_step_size
):
out = model(X)
acc_clean = (out.data.max(1)[1] == y.data).float().sum()
X_pgd = Variable(X.data, requires_grad=True)
randVector_ = torch.FloatTensor(*model(X_pgd).shape).uniform_(-1.,1.).to(device)
if args.random:
random_noise = torch.FloatTensor(*X_pgd.shape).uniform_(-epsilon, epsilon).to(device)
X_pgd = Variable(X_pgd.data + random_noise, requires_grad=True)
for i in range(ODI_num_steps + num_steps):
opt = optim.SGD([X_pgd], lr=1e-3)
opt.zero_grad()
with torch.enable_grad():
if i < ODI_num_steps:
loss = (model(X_pgd) * randVector_).sum()
elif args.lossFunc == 'xent':
loss = nn.CrossEntropyLoss()(model(X_pgd), y)
else:
loss = margin_loss(model(X_pgd),y)
loss.backward()
if i < ODI_num_steps:
eta = ODI_step_size * X_pgd.grad.data.sign()
else:
eta = step_size * X_pgd.grad.data.sign()
X_pgd = Variable(X_pgd.data + eta, requires_grad=True)
eta = torch.clamp(X_pgd.data - X.data, -epsilon, epsilon)
X_pgd = Variable(X.data + eta, requires_grad=True)
X_pgd = Variable(torch.clamp(X_pgd, 0, 1.0), requires_grad=True)
acc_each = (model(X_pgd).data.max(1)[1] == y.data).detach().cpu().numpy()
acc_pgd = (model(X_pgd).data.max(1)[1] == y.data).float().sum()
return acc_clean, acc_pgd, acc_each
def _pgd_blackbox(model_target,
model_source,
X,
y,
epsilon=args.epsilon,
num_steps=args.num_steps,
step_size=args.step_size,
ODI_num_steps=args.ODI_num_steps,
ODI_step_size=args.ODI_step_size
):
out = model_target(X)
acc_clean = (out.data.max(1)[1] == y.data).float().sum()
X_pgd = Variable(X.data, requires_grad=True)
randVector_ = torch.FloatTensor(*out.shape).uniform_(-1.,1.).to(device)
if args.random:
random_noise = torch.FloatTensor(*X_pgd.shape).uniform_(-epsilon, epsilon).to(device)
X_pgd = Variable(X_pgd.data + random_noise, requires_grad=True)
for i in range(ODI_num_steps + num_steps):
opt = optim.SGD([X_pgd], lr=1e-3)
opt.zero_grad()
with torch.enable_grad():
if i < ODI_num_steps:
loss = (model_source(X_pgd) * randVector_).sum()
elif args.lossFunc == 'xent':
loss = nn.CrossEntropyLoss()(model_source(X_pgd), y)
else:
loss = margin_loss(model_source(X_pgd),y)
loss.backward()
if i < ODI_num_steps:
eta = ODI_step_size * X_pgd.grad.data.sign()
else:
eta = step_size * X_pgd.grad.data.sign()
X_pgd = Variable(X_pgd.data + eta, requires_grad=True)
eta = torch.clamp(X_pgd.data - X.data, -epsilon, epsilon)
X_pgd = Variable(X.data + eta, requires_grad=True)
X_pgd = Variable(torch.clamp(X_pgd, 0, 1.0), requires_grad=True)
acc_each = (model_target(X_pgd).data.max(1)[1] == y.data).detach().cpu().numpy()
acc_pgd = (model_target(X_pgd).data.max(1)[1] == y.data).float().sum()
return acc_clean, acc_pgd, acc_each
def eval_adv_test_whitebox(model, device, test_loader):
"""
evaluate model by white-box attack
"""
model.eval()
acc_total = np.ones(10000)
acc_curve = []
for _ in tqdm(range(args.num_restarts)):
natural_acc_oneshot = 0
robust_acc_oneshot = 0
for i, [data, target] in enumerate(test_loader):
bstart = i * args.test_batch_size
bend = (i+1) * args.test_batch_size
data, target = data.to(device), target.to(device)
# pgd attack
X, y = Variable(data, requires_grad=True), Variable(target)
acc_natural, acc_robust, acc_each = _pgd_whitebox(model, X, y)
acc_total[bstart:bend] = acc_total[bstart:bend] * acc_each
natural_acc_oneshot += acc_natural
robust_acc_oneshot += acc_robust
print('natural_acc_oneshot: ', natural_acc_oneshot)
print('robust_acc_oneshot: ', robust_acc_oneshot)
print('accuracy_total: ', acc_total.sum())
acc_curve.append(acc_total.sum())
print('accuracy_curve: ', acc_curve)
def eval_adv_test_blackbox(model_target, model_source, device, test_loader):
"""
evaluate model by black-box attack
"""
model_target.eval()
model_source.eval()
acc_total = np.ones(10000)
acc_curve = []
for _ in range(args.num_restarts):
natural_acc_oneshot = 0
robust_acc_oneshot = 0
for i, [data, target] in enumerate(test_loader):
bstart = i * args.test_batch_size
bend = (i+1) * args.test_batch_size
data, target = data.to(device), target.to(device)
# pgd attack
X, y = Variable(data, requires_grad=True), Variable(target)
acc_natural, acc_robust,acc_each = _pgd_blackbox(model_target, model_source, X, y)
acc_total[bstart:bend] = acc_total[bstart:bend] * acc_each
natural_acc_oneshot += acc_natural
robust_acc_oneshot += acc_robust
print('natural_acc_oneshot: ', natural_acc_oneshot)
print('robust_acc_oneshot: ', robust_acc_oneshot)
print('accuracy_total: ', acc_total.sum())
acc_curve.append(acc_total.sum())
print('accuracy_curve: ', acc_curve)
def main():
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id #多卡机设置使用的gpu卡号
gpu_num = max(len(args.gpu_id.split(',')), 1)
if args.white_box_attack:
'''
model = ResNet18().to(device) if args.arch=='ResNet' else WideResNet().to(device)
model.load_state_dict(torch.load(args.model_path))
'''
model = get_model_for_attack(args.model_name).to(device) # 根据model_name, 切换要攻击的model
model = nn.DataParallel(model, device_ids=[i for i in range(gpu_num)])
eval_adv_test_whitebox(model, device, test_loader)
else:
# black-box attack
print('pgd black-box attack')
model_target = ResNet18().to(device) if args.archTarget=='ResNet' else WideResNet().to(device)
model_target.load_state_dict(torch.load(args.target_model_path))
model_source = ResNet18().to(device) if args.archSource=='ResNet' else WideResNet().to(device)
model_source.load_state_dict(torch.load(args.source_model_path))
eval_adv_test_blackbox(model_target, model_source, device, test_loader)
if __name__ == '__main__':
st = time.time()
main()
ed = time.time()
print((ed - st) / 60 )
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# code mostly stolen from Dennis Kaarsemaker (see https://github.com/rthalley/dnspython/blob/master/examples/zonediff.py)
# i use this to verify my zones are imported correctly
# run sync.py, afterwards login to inwx web interface, go to nameservers, and you can download an axfr dump / zonefile
# usage: python3 diff.py example.com zones/example.com ~/Downloads/example.com_zone.txt
import dns.zone
import sys
def diff_zones(zone1, zone2, ignore_ttl=False, ignore_soa=False):
"""diff_zones(zone1, zone2, ignore_ttl=False, ignore_soa=False) -> changes
Compares two dns.zone.Zone objects and returns a list of all changes
in the format (name, oldnode, newnode).
If ignore_ttl is true, a node will not be added to this list if the
only change is its TTL.
If ignore_soa is true, a node will not be added to this list if the
only changes is a change in a SOA Rdata set.
The returned nodes do include all Rdata sets, including unchanged ones.
"""
changes = []
for name in zone1:
name = str(name)
n1 = zone1.get_node(name)
n2 = zone2.get_node(name)
if not n2:
changes.append((str(name), n1, n2))
elif _nodes_differ(n1, n2, ignore_ttl, ignore_soa):
changes.append((str(name), n1, n2))
for name in zone2:
n1 = zone1.get_node(name)
if not n1:
n2 = zone2.get_node(name)
changes.append((str(name), n1, n2))
return changes
def _nodes_differ(n1, n2, ignore_ttl, ignore_soa):
if ignore_soa or not ignore_ttl:
# Compare datasets directly
for r in n1.rdatasets:
if ignore_soa and r.rdtype == dns.rdatatype.SOA:
continue
if r not in n2.rdatasets:
return True
if not ignore_ttl:
return r.ttl != n2.find_rdataset(r.rdclass, r.rdtype).ttl
for r in n2.rdatasets:
if ignore_soa and r.rdtype == dns.rdatatype.SOA:
continue
if r not in n1.rdatasets:
return True
else:
return n1 != n2
def format_changes_plain(oldf, newf, changes, ignore_ttl=False):
"""format_changes(oldfile, newfile, changes, ignore_ttl=False) -> str
Given 2 filenames and a list of changes from diff_zones, produce diff-like
output. If ignore_ttl is True, TTL-only changes are not displayed"""
ret = "--- %s\n+++ %s\n" % (oldf, newf)
for name, old, new in changes:
ret += "@ %s\n" % name
if not old:
for r in new.rdatasets:
ret += "+ %s\n" % str(r).replace('\n', '\n+ ')
elif not new:
for r in old.rdatasets:
ret += "- %s\n" % str(r).replace('\n', '\n+ ')
else:
for r in old.rdatasets:
if r not in new.rdatasets or (
r.ttl != new.find_rdataset(r.rdclass, r.rdtype).ttl and
not ignore_ttl
):
ret += "- %s\n" % str(r).replace('\n', '\n+ ')
for r in new.rdatasets:
if r not in old.rdatasets or (
r.ttl != old.find_rdataset(r.rdclass, r.rdtype).ttl and
not ignore_ttl
):
ret += "+ %s\n" % str(r).replace('\n', '\n+ ')
return ret
origin = sys.argv[1]
revafile = sys.argv[2]
revbfile = sys.argv[3]
reva = dns.zone.from_file(revafile, origin=origin)
revb = dns.zone.from_file(revbfile, origin=origin)
changes = diff_zones(reva, revb, ignore_soa=True)
if not changes:
print("all good!")
else:
print(format_changes_plain(revafile, revbfile, changes))
|
import uuid
import time
from sqlalchemy import Column, Integer, String
from anarcho import db
ANDR = 'andr'
IOS = 'ios'
class Application(db.Model):
__tablename__ = "apps"
id = Column('id', Integer, primary_key=True)
name = Column('name', String)
package = Column('package', String)
app_key = Column('app_key', String, unique=True)
created_on = Column('created_on', Integer)
app_type = Column('app_type', String(4))
def __init__(self, name):
self.name = name
self.app_key = uuid.uuid1().__str__()
self.created_on = time.time()
def __repr__(self):
return '<Application %r>' % self.name
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from indico.core.db import db
from indico.core.db.sqlalchemy.principals import clone_principals
from indico.core.db.sqlalchemy.util.models import get_simple_column_attrs
from indico.modules.events.cloning import EventCloner
from indico.modules.events.models.events import EventType
from indico.modules.events.models.persons import EventPerson, EventPersonLink
from indico.modules.events.models.principals import EventPrincipal
from indico.modules.events.sessions import session_settings
from indico.util.i18n import _
class EventLocationCloner(EventCloner):
name = 'event_location'
friendly_name = _('Venue/Room')
is_default = True
@property
def is_available(self):
return self.old_event.has_location_info
def run(self, new_event, cloners, shared_data):
with db.session.no_autoflush:
self._clone_location(new_event)
db.session.flush()
def _clone_location(self, new_event):
new_event.location_data = self.old_event.location_data
class EventPersonCloner(EventCloner):
name = 'event_persons'
friendly_name = _('Persons')
is_internal = True
is_default = True
# We do not override `is_available` as we have cloners depending
# on this internal cloner even if it won't clone anything.
def run(self, new_event, cloners, shared_data):
self._person_map = {}
with db.session.no_autoflush:
self._clone_persons(new_event)
db.session.flush()
return {'person_map': self._person_map}
def _clone_persons(self, new_event):
attrs = get_simple_column_attrs(EventPerson) | {'user'}
for old_person in self.old_event.persons:
person = EventPerson(event=new_event)
person.populate_from_attrs(old_person, attrs)
assert person not in db.session
self._person_map[old_person] = person
class EventPersonLinkCloner(EventCloner):
name = 'event_person_links'
requires = {'event_persons'}
is_default = True
@property
def friendly_name(self):
if self.old_event.type_ == EventType.lecture:
return _('Speakers')
else:
return _('Chairpersons')
@property
def is_available(self):
return bool(self.old_event.person_links)
def run(self, new_event, cloners, shared_data):
self._person_map = shared_data['event_persons']['person_map']
with db.session.no_autoflush:
self._clone_person_links(new_event)
db.session.flush()
def _clone_person_links(self, new_event):
attrs = get_simple_column_attrs(EventPersonLink)
for old_link in self.old_event.person_links:
link = EventPersonLink()
link.populate_from_attrs(old_link, attrs)
link.person = self._person_map[old_link.person]
new_event.person_links.append(link)
class EventProtectionCloner(EventCloner):
name = 'event_protection'
friendly_name = _('ACLs and protection settings')
is_default = True
uses = {'event_roles'}
def run(self, new_event, cloners, shared_data):
self._event_role_map = shared_data['event_roles']['event_role_map'] if 'event_roles' in cloners else None
with db.session.no_autoflush:
self._clone_protection(new_event)
self._clone_session_coordinator_privs(new_event)
self._clone_acl(new_event)
self._clone_visibility(new_event)
db.session.flush()
def _clone_protection(self, new_event):
new_event.protection_mode = self.old_event.protection_mode
new_event.access_key = self.old_event.access_key
def _clone_visibility(self, new_event):
new_event.visibility = self.old_event.visibility if new_event.category == self.old_event.category else None
def _clone_session_coordinator_privs(self, new_event):
session_settings_data = session_settings.get_all(self.old_event)
session_settings.set_multi(new_event, {
'coordinators_manage_contributions': session_settings_data['coordinators_manage_contributions'],
'coordinators_manage_blocks': session_settings_data['coordinators_manage_blocks']
})
def _clone_acl(self, new_event):
new_event.acl_entries = clone_principals(EventPrincipal, self.old_event.acl_entries, self._event_role_map)
|
import os
from pathlib import Path
import unittest, pytest
import numpy as np
import torch as th
import dgl_graphloader
def create_category_node_feat(tmpdir, file_name, separator='\t'):
node_feat_f = open(os.path.join(tmpdir, file_name), "w")
node_feat_f.write("node{}feat1{}feat2{}feat3\n".format(separator,separator,separator))
node_feat_f.write("node1{}A{}B{}A,B\n".format(separator,separator,separator))
node_feat_f.write("node2{}A{}{}A\n".format(separator,separator,separator))
node_feat_f.write("node3{}C{}B{}C,B\n".format(separator,separator,separator))
node_feat_f.write("node3{}A{}C{}A,C\n".format(separator,separator,separator))
node_feat_f.close()
def create_numerical_node_feat(tmpdir, file_name, sep='\t'):
node_feat_f = open(os.path.join(tmpdir, file_name), "w")
node_feat_f.write("node{}feat1{}feat2{}feat3{}feat4\n".format(sep,sep,sep,sep))
node_feat_f.write("node1{}1.{}2.{}0.{}1.,2.,0.\n".format(sep,sep,sep,sep))
node_feat_f.write("node2{}2.{}-1.{}0.{}2.,-1.,0.\n".format(sep,sep,sep,sep))
node_feat_f.write("node3{}0.{}0.{}0.{}0.,0.,0.\n".format(sep,sep,sep,sep))
node_feat_f.write("node3{}4.{}-2.{}0.{}4.,-2.,0.\n".format(sep,sep,sep,sep))
node_feat_f.close()
def create_numerical_bucket_node_feat(tmpdir, file_name, sep='\t'):
node_feat_f = open(os.path.join(tmpdir, file_name), "w")
node_feat_f.write("node{}feat1{}feat2\n".format(sep,sep,sep))
node_feat_f.write("node1{}0{}0.\n".format(sep,sep,sep))
node_feat_f.write("node1{}0{}5.\n".format(sep,sep,sep))
node_feat_f.write("node1{}0{}15.\n".format(sep,sep,sep))
node_feat_f.write("node1{}0{}20.\n".format(sep,sep,sep))
node_feat_f.write("node1{}0{}10.1\n".format(sep,sep,sep))
node_feat_f.write("node1{}0{}25.\n".format(sep,sep,sep))
node_feat_f.write("node1{}0{}30.1\n".format(sep,sep,sep))
node_feat_f.write("node1{}0{}40.\n".format(sep,sep,sep))
node_feat_f.close()
def create_numerical_edge_feat(tmpdir, file_name, sep='\t'):
node_feat_f = open(os.path.join(tmpdir, file_name), "w")
node_feat_f.write("node_s{}node_d{}feat1\n".format(sep,sep))
node_feat_f.write("node1{}node4{}1.\n".format(sep,sep))
node_feat_f.write("node2{}node5{}2.\n".format(sep,sep))
node_feat_f.write("node3{}node6{}0.\n".format(sep,sep))
node_feat_f.write("node3{}node3{}4.\n".format(sep,sep))
node_feat_f.close()
def create_word_node_feat(tmpdir, file_name, separator='\t'):
node_feat_f = open(os.path.join(tmpdir, file_name), "w")
node_feat_f.write("node{}feat1{}feat2{}feat3\n".format(separator,separator,separator))
node_feat_f.write("node1{}A{}B{}24\n".format(separator,separator,separator))
node_feat_f.write("node2{}A{}{}1\n".format(separator,separator,separator))
node_feat_f.write("node3{}C{}B{}12\n".format(separator,separator,separator))
node_feat_f.write("node3{}A{}C{}13\n".format(separator,separator,separator))
node_feat_f.close()
def create_multiple_node_feat(tmpdir, file_name, separator='\t'):
node_feat_f = open(os.path.join(tmpdir, file_name), "w")
node_feat_f.write("node{}feat1{}feat2{}feat3\n".format(separator,separator,separator))
node_feat_f.write("node1{}A{}0.1{}A,B\n".format(separator,separator,separator))
node_feat_f.write("node2{}A{}0.3{}A\n".format(separator,separator,separator))
node_feat_f.write("node3{}C{}0.2{}C,B\n".format(separator,separator,separator))
node_feat_f.write("node4{}A{}-1.1{}A,C\n".format(separator,separator,separator))
node_feat_f.close()
def create_multiple_edge_feat(tmpdir, file_name, sep='\t'):
node_feat_f = open(os.path.join(tmpdir, file_name), "w")
node_feat_f.write("node_s{}node_d{}feat1{}feat2{}feat3\n".format(sep,sep,sep,sep))
node_feat_f.write("node1{}node_a{}0.2{}0.1{}1.1\n".format(sep,sep,sep,sep))
node_feat_f.write("node2{}node_b{}-0.3{}0.3{}1.2\n".format(sep,sep,sep,sep))
node_feat_f.write("node3{}node_c{}0.3{}0.2{}-1.2\n".format(sep,sep,sep,sep))
node_feat_f.write("node4{}node_d{}-0.2{}-1.1{}0.9\n".format(sep,sep,sep,sep))
node_feat_f.close()
def create_node_feats(tmpdir, file_name, separator='\t'):
node_feat_f = open(os.path.join(tmpdir, file_name), "w")
node_feat_f.write("node{}label1{}label2\n".format(separator,separator))
node_feat_f.write("node1{}A{}D,A\n".format(separator,separator))
node_feat_f.write("node2{}A{}E,C,D\n".format(separator,separator))
node_feat_f.write("node3{}C{}F,A,B\n".format(separator,separator))
node_feat_f.write("node4{}A{}G,E\n".format(separator,separator))
node_feat_f.write("node5{}A{}D,A\n".format(separator,separator))
node_feat_f.write("node6{}C{}E,C,D\n".format(separator,separator))
node_feat_f.write("node7{}A{}D,A\n".format(separator,separator))
node_feat_f.write("node8{}A{}E,C,D\n".format(separator,separator))
node_feat_f.close()
def create_node_labels(tmpdir, file_name, separator='\t'):
node_feat_f = open(os.path.join(tmpdir, file_name), "w")
node_feat_f.write("node{}label1{}label2\n".format(separator,separator))
node_feat_f.write("node1{}A{}D,A\n".format(separator,separator))
node_feat_f.write("node2{}A{}E,C,D\n".format(separator,separator))
node_feat_f.write("node3{}C{}F,A,B\n".format(separator,separator))
node_feat_f.write("node4{}A{}G,E\n".format(separator,separator))
node_feat_f.close()
def create_node_valid_labels(tmpdir, file_name, separator='\t'):
node_feat_f = open(os.path.join(tmpdir, file_name), "w")
node_feat_f.write("node{}label1{}label2\n".format(separator,separator))
node_feat_f.write("node5{}A{}D,A\n".format(separator,separator))
node_feat_f.write("node6{}C{}E,C,D\n".format(separator,separator))
node_feat_f.close()
def create_node_test_labels(tmpdir, file_name, separator='\t'):
node_feat_f = open(os.path.join(tmpdir, file_name), "w")
node_feat_f.write("node{}label1{}label2\n".format(separator,separator))
node_feat_f.write("node7{}A{}D,A\n".format(separator,separator))
node_feat_f.write("node8{}A{}E,C,D\n".format(separator,separator))
node_feat_f.close()
def create_edge_labels(tmpdir, file_name, sep='\t'):
node_feat_f = open(os.path.join(tmpdir, file_name), "w")
node_feat_f.write("node_0{}node_1{}label1{}label2\n".format(sep,sep,sep))
node_feat_f.write("node1{}node4{}A{}D,A\n".format(sep,sep,sep))
node_feat_f.write("node2{}node3{}A{}E,C,D\n".format(sep,sep,sep))
node_feat_f.write("node3{}node2{}C{}F,A,B\n".format(sep,sep,sep))
node_feat_f.write("node4{}node1{}A{}G,E\n".format(sep,sep,sep))
node_feat_f.close()
def create_train_edge_labels(tmpdir, file_name, sep='\t'):
node_feat_f = open(os.path.join(tmpdir, file_name), "w")
node_feat_f.write("node_0{}node_1{}label1{}label2\n".format(sep,sep,sep))
node_feat_f.write("node4{}node2{}A{}D,A\n".format(sep,sep,sep))
node_feat_f.write("node3{}node3{}A{}E,C,D\n".format(sep,sep,sep))
node_feat_f.close()
def create_graph_edges(tmpdir, file_name, sep='\t'):
node_feat_f = open(os.path.join(tmpdir, file_name), "w")
node_feat_f.write("node_0{}node_1{}rel_1{}rel_2\n".format(sep,sep,sep))
node_feat_f.write("node1{}node2{}A{}C\n".format(sep,sep,sep))
node_feat_f.write("node2{}node1{}A{}C\n".format(sep,sep,sep))
node_feat_f.write("node3{}node1{}A{}C\n".format(sep,sep,sep))
node_feat_f.write("node4{}node3{}A{}B\n".format(sep,sep,sep))
node_feat_f.write("node4{}node4{}A{}A\n".format(sep,sep,sep))
node_feat_f.close()
def create_graph_feat_edges(tmpdir, file_name, sep='\t'):
node_feat_f = open(os.path.join(tmpdir, file_name), "w")
node_feat_f.write("node_0{}node_1{}feat_1\n".format(sep,sep))
node_feat_f.write("node1{}node4{}0.1\n".format(sep,sep))
node_feat_f.write("node2{}node3{}0.2\n".format(sep,sep))
node_feat_f.write("node3{}node2{}0.3\n".format(sep,sep))
node_feat_f.write("node4{}node1{}0.4\n".format(sep,sep))
node_feat_f.write("node1{}node2{}0.5\n".format(sep,sep))
node_feat_f.write("node2{}node1{}0.6\n".format(sep,sep))
node_feat_f.write("node3{}node1{}0.7\n".format(sep,sep))
node_feat_f.write("node4{}node3{}0.8\n".format(sep,sep))
node_feat_f.write("node4{}node4{}0.9\n".format(sep,sep))
node_feat_f.close()
def create_multiple_label(tmpdir, file_name, sep='\t'):
node_feat_f = open(os.path.join(tmpdir, file_name), "w")
node_feat_f.write(
"node{}label1{}label2{}label3{}label4{}label5{}node_d{}node_d2{}node_d3\n".format(
sep,sep,sep,sep,sep,sep,sep,sep))
node_feat_f.write("node1{}A{}A{}C{}A,B{}A,C{}node3{}node1{}node4\n".format(
sep,sep,sep,sep,sep,sep,sep,sep))
node_feat_f.write("node2{}B{}B{}B{}A{}B{}node4{}node2{}node5\n".format(
sep,sep,sep,sep,sep,sep,sep,sep))
node_feat_f.write("node3{}C{}C{}A{}C,B{}A{}node5{}node1{}node6\n".format(
sep,sep,sep,sep,sep,sep,sep,sep))
node_feat_f.write("node4{}A{}A{}A{}A,C{}A,B{}node6{}node2{}node7\n".format(
sep,sep,sep,sep,sep,sep,sep,sep))
node_feat_f.close()
def test_node_category_feature_loader():
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
create_category_node_feat(Path(tmpdirname), 'node_category_feat.csv')
feat_loader = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname,
'node_category_feat.csv'))
feat_loader.addCategoryFeature([0, 1], feat_name='tf')
feat_loader.addCategoryFeature(['node', 'feat1'], norm='row', node_type='node')
feat_loader.addCategoryFeature(['node', 'feat1'], norm='col', node_type='node')
f_1 = feat_loader._raw_features[0]
f_2 = feat_loader._raw_features[1]
f_3 = feat_loader._raw_features[2]
assert f_1[0] == 'tf'
assert f_2[0] == 'nf'
assert f_3[0] == 'nf'
assert f_1[1] is None
assert f_2[1] == 'node'
assert f_3[1] == 'node'
assert f_1[2] == f_2[2]
assert f_1[2] == f_3[2]
assert np.allclose(np.array([[1,0],[1,0],[0,1],[1,0]]),
f_1[3])
assert np.allclose(np.array([[1,0],[1,0],[0,1],[1,0]]),
f_2[3])
assert np.allclose(np.array([[1./3.,0],[1./3.,0],[0,1],[1./3.,0]]),
f_3[3])
feat_loader = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname,
'node_category_feat.csv'))
feat_loader.addCategoryFeature([0, 1, 2])
feat_loader.addCategoryFeature(['node', 'feat1', 'feat2'], norm='row', node_type='node')
feat_loader.addCategoryFeature(['node', 'feat1', 'feat2'], norm='col', node_type='node')
f_1 = feat_loader._raw_features[0]
f_2 = feat_loader._raw_features[1]
f_3 = feat_loader._raw_features[2]
assert f_1[0] == 'nf'
assert f_2[0] == 'nf'
assert f_3[0] == 'nf'
assert f_1[1] is None
assert f_2[1] == 'node'
assert f_3[1] == 'node'
assert f_1[2] == f_2[2]
assert f_1[2] == f_3[2]
assert np.allclose(np.array([[1,1,0],[1,0,0],[0,1,1],[1,0,1]]),
f_1[3])
assert np.allclose(np.array([[0.5,0.5,0],[1,0,0],[0,0.5,0.5],[0.5,0,0.5]]),
f_2[3])
assert np.allclose(np.array([[1./3.,1./2.,0],
[1./3.,0, 0],
[0, 1./2.,1./2.],
[1./3.,0, 1./2.]]),
f_3[3])
feat_loader = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname,
'node_category_feat.csv'))
feat_loader.addCategoryFeature([0, 1, 2], rows=[0,1,3])
feat_loader.addCategoryFeature(['node', 'feat1', 'feat2'],
rows=[0,1,3], norm='row', node_type='node')
feat_loader.addCategoryFeature(['node', 'feat1', 'feat2'],
rows=[0,1,3], norm='col', node_type='node')
f_1 = feat_loader._raw_features[0]
f_2 = feat_loader._raw_features[1]
f_3 = feat_loader._raw_features[2]
assert f_1[1] is None
assert f_2[1] == 'node'
assert f_3[1] == 'node'
assert f_1[2] == f_2[2]
assert f_1[2] == f_3[2]
assert np.allclose(np.array([[1,1,0],[1,0,0],[1,0,1]]),
f_1[3])
assert np.allclose(np.array([[0.5,0.5,0],[1,0,0],[0.5,0,0.5]]),
f_2[3])
assert np.allclose(np.array([[1./3.,1.,0.],
[1./3.,0.,0.],
[1./3.,0.,1.]]),
f_3[3])
feat_loader = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname,
'node_category_feat.csv'))
feat_loader.addMultiCategoryFeature([0, 3], separator=',')
feat_loader.addMultiCategoryFeature(['node', 'feat3'], separator=',', norm='row', node_type='node')
feat_loader.addMultiCategoryFeature(['node', 'feat3'], separator=',', norm='col', node_type='node')
f_1 = feat_loader._raw_features[0]
f_2 = feat_loader._raw_features[1]
f_3 = feat_loader._raw_features[2]
assert f_1[1] is None
assert f_2[1] == 'node'
assert f_3[1] == 'node'
assert f_1[2] == f_2[2]
assert f_1[2] == f_3[2]
assert np.allclose(np.array([[1,1,0],[1,0,0],[0,1,1],[1,0,1]]),
f_1[3])
assert np.allclose(np.array([[0.5,0.5,0],[1,0,0],[0,0.5,0.5],[0.5,0,0.5]]),
f_2[3])
assert np.allclose(np.array([[1./3.,1./2.,0],
[1./3.,0, 0],
[0, 1./2.,1./2.],
[1./3.,0, 1./2.]]),
f_3[3])
feat_loader.addMultiCategoryFeature([0, 3], rows=[0,1,3], separator=',')
feat_loader.addMultiCategoryFeature(['node', 'feat3'], separator=',',
rows=[0,1,3], norm='row', node_type='node')
feat_loader.addMultiCategoryFeature(['node', 'feat3'], separator=',',
rows=[0,1,3], norm='col', node_type='node')
f_1 = feat_loader._raw_features[3]
f_2 = feat_loader._raw_features[4]
f_3 = feat_loader._raw_features[5]
assert f_1[1] is None
assert f_2[1] == 'node'
assert f_3[1] == 'node'
assert f_1[2] == f_2[2]
assert f_1[2] == f_3[2]
assert np.allclose(np.array([[1,1,0],[1,0,0],[1,0,1]]),
f_1[3])
assert np.allclose(np.array([[0.5,0.5,0],[1,0,0],[0.5,0,0.5]]),
f_2[3])
assert np.allclose(np.array([[1./3.,1.,0.],
[1./3.,0.,0.],
[1./3.,0.,1.]]),
f_3[3])
def test_node_numerical_feature_loader():
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
create_numerical_node_feat(Path(tmpdirname), 'node_numerical_feat.csv')
feat_loader = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname,
'node_numerical_feat.csv'))
feat_loader.addNumericalFeature([0, 1])
feat_loader.addNumericalFeature(['node', 'feat1'], norm='standard', node_type='node')
feat_loader.addNumericalFeature(['node', 'feat1'], norm='min-max', node_type='node')
f_1 = feat_loader._raw_features[0]
f_2 = feat_loader._raw_features[1]
f_3 = feat_loader._raw_features[2]
assert f_1[0] == 'nf'
assert f_2[0] == 'nf'
assert f_3[0] == 'nf'
assert f_1[1] is None
assert f_2[1] == 'node'
assert f_3[1] == 'node'
assert f_1[2] == f_2[2]
assert f_1[2] == f_3[2]
assert np.allclose(np.array([[1.],[2.],[0.],[4.]]),
f_1[3])
assert np.allclose(np.array([[1./7.],[2./7.],[0.],[4./7.]]),
f_2[3])
assert np.allclose(np.array([[1./4.],[2./4],[0.],[1.]]),
f_3[3])
feat_loader = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname,
'node_numerical_feat.csv'))
feat_loader.addNumericalFeature([0,1,2,3],feat_name='tf')
feat_loader.addNumericalFeature(['node', 'feat1','feat2','feat3'],
norm='standard',
node_type='node')
feat_loader.addNumericalFeature(['node', 'feat1','feat2','feat3'],
norm='min-max',
node_type='node')
f_1 = feat_loader._raw_features[0]
f_2 = feat_loader._raw_features[1]
f_3 = feat_loader._raw_features[2]
assert f_1[0] == 'tf'
assert f_2[0] == 'nf'
assert f_3[0] == 'nf'
assert f_1[1] is None
assert f_2[1] == 'node'
assert f_3[1] == 'node'
assert f_1[2] == f_2[2]
assert f_1[2] == f_3[2]
assert np.allclose(np.array([[1.,2.,0.],[2.,-1.,0.],[0.,0.,0.],[4.,-2.,0.]]),
f_1[3])
assert np.allclose(np.array([[1./7.,2./5.,0.],[2./7.,-1./5.,0.],[0.,0.,0.],[4./7.,-2./5.,0.]]),
f_2[3])
assert np.allclose(np.array([[1./4.,1.,0.],[2./4,1./4.,0.],[0.,2./4.,0.],[1.,0.,0.]]),
f_3[3])
feat_loader.addNumericalFeature([0,1,2,3],rows=[1,2,3])
feat_loader.addNumericalFeature(['node', 'feat1','feat2','feat3'],
rows=[1,2,3],
norm='standard',
node_type='node')
feat_loader.addNumericalFeature(['node', 'feat1','feat2','feat3'],
rows=[1,2,3],
norm='min-max',
node_type='node')
f_1 = feat_loader._raw_features[3]
f_2 = feat_loader._raw_features[4]
f_3 = feat_loader._raw_features[5]
assert f_1[1] is None
assert f_2[1] == 'node'
assert f_3[1] == 'node'
assert f_1[2] == f_2[2]
assert f_1[2] == f_3[2]
assert np.allclose(np.array([[2.,-1.,0.],[0.,0.,0.],[4.,-2.,0.]]),
f_1[3])
assert np.allclose(np.array([[2./6.,-1./3.,0.],[0.,0.,0.],[4./6.,-2./3.,0.]]),
f_2[3])
assert np.allclose(np.array([[2./4.,1./2.,0.],[0.,1.,0.],[1.,0.,0.]]),
f_3[3])
feat_loader = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname,
'node_numerical_feat.csv'))
feat_loader.addMultiNumericalFeature([0,4], separator=',')
feat_loader.addMultiNumericalFeature(['node', 'feat4'],
separator=',',
norm='standard',
node_type='node')
feat_loader.addMultiNumericalFeature(['node', 'feat4'],
separator=',',
norm='min-max',
node_type='node')
f_1 = feat_loader._raw_features[0]
f_2 = feat_loader._raw_features[1]
f_3 = feat_loader._raw_features[2]
assert f_1[1] is None
assert f_2[1] == 'node'
assert f_3[1] == 'node'
assert f_1[2] == f_2[2]
assert f_1[2] == f_3[2]
assert np.allclose(np.array([[1.,2.,0.],[2.,-1.,0.],[0.,0.,0.],[4.,-2.,0.]]),
f_1[3])
assert np.allclose(np.array([[1./7.,2./5.,0.],[2./7.,-1./5.,0.],[0.,0.,0.],[4./7.,-2./5.,0.]]),
f_2[3])
assert np.allclose(np.array([[1./4.,1.,0.],[2./4,1./4.,0.],[0.,2./4.,0.],[1.,0.,0.]]),
f_3[3])
feat_loader.addMultiNumericalFeature([0,4], separator=',', rows=[1,2,3])
feat_loader.addMultiNumericalFeature(['node', 'feat4'],
separator=',',
rows=[1,2,3],
norm='standard',
node_type='node')
feat_loader.addMultiNumericalFeature(['node', 'feat4'],
separator=',',
rows=[1,2,3],
norm='min-max',
node_type='node')
f_1 = feat_loader._raw_features[3]
f_2 = feat_loader._raw_features[4]
f_3 = feat_loader._raw_features[5]
assert f_1[1] is None
assert f_2[1] == 'node'
assert f_3[1] == 'node'
assert f_1[2] == f_2[2]
assert f_1[2] == f_3[2]
assert np.allclose(np.array([[2.,-1.,0.],[0.,0.,0.],[4.,-2.,0.]]),
f_1[3])
assert np.allclose(np.array([[2./6.,-1./3.,0.],[0.,0.,0.],[4./6.,-2./3.,0.]]),
f_2[3])
assert np.allclose(np.array([[2./4.,1./2.,0.],[0.,1.,0.],[1.,0.,0.]]),
f_3[3])
with tempfile.TemporaryDirectory() as tmpdirname:
create_numerical_bucket_node_feat(Path(tmpdirname), 'node_numerical_bucket_feat.csv')
feat_loader = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname,
'node_numerical_bucket_feat.csv'))
feat_loader.addNumericalBucketFeature([0, 2],
feat_name='tf',
range=[10,30],
bucket_cnt=2)
feat_loader.addNumericalBucketFeature(['node', 'feat2'],
range=[10,30],
bucket_cnt=2,
norm='row', node_type='node')
feat_loader.addNumericalBucketFeature(['node', 'feat2'],
range=[10,30],
bucket_cnt=2,
norm='col', node_type='node')
f_1 = feat_loader._raw_features[0]
f_2 = feat_loader._raw_features[1]
f_3 = feat_loader._raw_features[2]
assert f_1[0] == 'tf'
assert f_2[0] == 'nf'
assert f_3[0] == 'nf'
assert f_1[1] is None
assert f_2[1] == 'node'
assert f_3[1] == 'node'
assert f_1[2] == f_2[2]
assert f_1[2] == f_3[2]
assert np.allclose(np.array([[1., 0.], [1., 0.], [1., 0.], [0., 1.],
[1., 0.], [0., 1.], [0., 1.], [0., 1.]]),
f_1[3])
assert np.allclose(np.array([[1., 0.], [1., 0.], [1., 0.], [0., 1.],
[1., 0.], [0., 1.], [0., 1.], [0., 1.]]),
f_2[3])
assert np.allclose(np.array([[1./4., 0.], [1./4., 0.], [1./4., 0.], [0., 1./4],
[1./4., 0.], [0., 1./4.], [0., 1./4.], [0., 1./4.]]),
f_3[3])
feat_loader.addNumericalBucketFeature([0, 2],
rows=[0,2,3,4,5,6],
range=[10,30],
bucket_cnt=2)
feat_loader.addNumericalBucketFeature(['node', 'feat2'],
rows=[0,2,3,4,5,6],
range=[10,30],
bucket_cnt=2,
norm='row', node_type='node')
feat_loader.addNumericalBucketFeature(['node', 'feat2'],
rows=[0,2,3,4,5,6],
range=[10,30],
bucket_cnt=2,
norm='col', node_type='node')
f_1 = feat_loader._raw_features[3]
f_2 = feat_loader._raw_features[4]
f_3 = feat_loader._raw_features[5]
assert f_1[1] is None
assert f_2[1] == 'node'
assert f_3[1] == 'node'
assert f_1[2] == f_2[2]
assert f_1[2] == f_3[2]
assert np.allclose(np.array([[1., 0.], [1., 0.], [0., 1.],
[1., 0.], [0., 1.], [0., 1.]]),
f_1[3])
assert np.allclose(np.array([[1., 0.], [1., 0.], [0., 1.],
[1., 0.], [0., 1.], [0., 1.]]),
f_2[3])
assert np.allclose(np.array([[1./3., 0.], [1./3., 0.], [0., 1./3],
[1./3., 0.], [0., 1./3.], [0., 1./3.]]),
f_3[3])
feat_loader = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname,
'node_numerical_bucket_feat.csv'))
feat_loader.addNumericalBucketFeature([0, 2],
feat_name='tf',
range=[10,30],
bucket_cnt=4,
slide_window_size=10.)
feat_loader.addNumericalBucketFeature(['node', 'feat2'],
range=[10,30],
bucket_cnt=4,
slide_window_size=10.,
norm='row', node_type='node')
feat_loader.addNumericalBucketFeature(['node', 'feat2'],
range=[10,30],
bucket_cnt=4,
slide_window_size=10.,
norm='col', node_type='node')
f_1 = feat_loader._raw_features[0]
f_2 = feat_loader._raw_features[1]
f_3 = feat_loader._raw_features[2]
assert f_1[0] == 'tf'
assert f_2[0] == 'nf'
assert f_3[0] == 'nf'
assert f_1[1] is None
assert f_2[1] == 'node'
assert f_3[1] == 'node'
assert f_1[2] == f_2[2]
assert f_1[2] == f_3[2]
assert np.allclose(np.array([[1., 0., 0., 0],
[1., 0., 0., 0],
[1., 1., 1., 0.],
[0., 1., 1., 1.],
[1., 1., 0., 0.],
[0., 0., 1., 1.],
[0., 0., 0., 1.],
[0., 0., 0., 1.]]),
f_1[3])
assert np.allclose(np.array([[1., 0., 0., 0],
[1., 0., 0., 0],
[1./3., 1./3., 1./3., 0.],
[0., 1./3., 1./3., 1./3.],
[1./2., 1./2., 0., 0.],
[0., 0., 1./2., 1./2.],
[0., 0., 0., 1.],
[0., 0., 0., 1.]]),
f_2[3])
assert np.allclose(np.array([[1./4., 0., 0., 0],
[1./4., 0., 0., 0],
[1./4., 1./3., 1./3., 0.],
[0., 1./3., 1./3., 1./4.],
[1./4., 1./3., 0., 0.],
[0., 0., 1./3., 1./4.],
[0., 0., 0., 1./4.],
[0., 0., 0., 1./4.]]),
f_3[3])
def test_edge_numerical_feature_loader():
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
create_numerical_edge_feat(Path(tmpdirname), 'edge_numerical_feat.csv')
feat_loader = dgl_graphloader.EdgeFeatureLoader(os.path.join(tmpdirname,
'edge_numerical_feat.csv'))
feat_loader.addNumericalFeature([0, 1, 2], feat_name='tf')
feat_loader.addNumericalFeature(['node_s', 'node_d', 'feat1'],
norm='standard',
edge_type=('src', 'rel', 'dst'))
feat_loader.addNumericalFeature(['node_d', 'node_s', 'feat1'],
norm='min-max',
edge_type=('dst', 'rev-rel', 'src'))
f_1 = feat_loader._raw_features[0]
f_2 = feat_loader._raw_features[1]
f_3 = feat_loader._raw_features[2]
assert f_1[0] == 'tf'
assert f_2[0] == 'ef'
assert f_3[0] == 'ef'
assert f_1[1] is None
assert f_2[1] == ('src', 'rel', 'dst')
assert f_3[1] == ('dst', 'rev-rel', 'src')
assert f_1[2] == f_2[2]
assert f_1[2] == ['node1','node2','node3','node3']
assert f_3[2] == ['node4','node5','node6','node3']
assert f_1[3] == f_2[3]
assert f_1[3] == ['node4','node5','node6','node3']
assert f_3[3] == ['node1','node2','node3','node3']
assert np.allclose(np.array([[1.],[2.],[0.],[4.]]),
f_1[4])
assert np.allclose(np.array([[1./7.],[2./7.],[0.],[4./7.]]),
f_2[4])
assert np.allclose(np.array([[1./4.],[2./4],[0.],[1.]]),
f_3[4])
feat_loader.addNumericalFeature(['node_s', 'node_d', 'feat1'],
rows=[1,2,3],
norm='standard',
edge_type=('src', 'rel', 'dst'))
feat_loader.addNumericalFeature(['node_d', 'node_s', 'feat1'],
rows=[1,2,3],
norm='min-max',
edge_type=('dst', 'rev-rel', 'src'))
f_1 = feat_loader._raw_features[3]
f_2 = feat_loader._raw_features[4]
assert f_1[1] == ('src', 'rel', 'dst')
assert f_2[1] == ('dst', 'rev-rel', 'src')
assert f_1[2] == ['node2','node3','node3']
assert f_2[2] == ['node5','node6','node3']
assert f_1[3] == ['node5','node6','node3']
assert f_2[3] == ['node2','node3','node3']
assert np.allclose(np.array([[2./6.],[0.],[4./6.]]),
f_1[4])
assert np.allclose(np.array([[2./4],[0.],[1.]]),
f_2[4])
def test_node_word2vec_feature_loader():
import tempfile
import spacy
with tempfile.TemporaryDirectory() as tmpdirname:
create_word_node_feat(Path(tmpdirname), 'node_word_feat.csv')
feat_loader = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname,
'node_word_feat.csv'))
feat_loader.addWord2VecFeature([0, 1], languages=['en_core_web_lg'], feat_name='tf')
feat_loader.addWord2VecFeature(['node', 'feat1'],
languages=['en_core_web_lg'],
node_type='node')
feat_loader.addWord2VecFeature(['node', 'feat1'],
languages=['en_core_web_lg'],
node_type='node')
f_1 = feat_loader._raw_features[0]
f_2 = feat_loader._raw_features[1]
f_3 = feat_loader._raw_features[2]
assert f_1[0] == 'tf'
assert f_2[0] == 'nf'
assert f_3[0] == 'nf'
assert f_1[1] is None
assert f_2[1] == 'node'
assert f_3[1] == 'node'
assert f_1[2] == f_2[2]
assert f_1[2] == f_3[2]
assert np.allclose(f_1[3], f_2[3])
assert np.allclose(f_1[3], f_3[3])
nlp = spacy.load('en_core_web_lg')
assert np.allclose(np.array([nlp("A").vector,
nlp("A").vector,
nlp("C").vector,
nlp("A").vector]),
f_1[3])
feat_loader.addWord2VecFeature([0, 3], languages=['en_core_web_lg', 'fr_core_news_lg'])
feat_loader.addWord2VecFeature(['node', 'feat3'],
languages=['en_core_web_lg', 'fr_core_news_lg'],
node_type='node')
feat_loader.addWord2VecFeature(['node', 'feat3'],
languages=['en_core_web_lg', 'fr_core_news_lg'],
node_type='node')
f_1 = feat_loader._raw_features[3]
f_2 = feat_loader._raw_features[4]
f_3 = feat_loader._raw_features[5]
assert f_1[1] is None
assert f_2[1] == 'node'
assert f_3[1] == 'node'
assert f_1[2] == f_2[2]
assert f_1[2] == f_3[2]
assert np.allclose(f_1[3], f_2[3])
assert np.allclose(f_1[3], f_3[3])
nlp1 = spacy.load('fr_core_news_lg')
assert np.allclose(np.array([np.concatenate((nlp("24").vector, nlp1("24").vector)),
np.concatenate((nlp("1").vector, nlp1("1").vector)),
np.concatenate((nlp("12").vector, nlp1("12").vector)),
np.concatenate((nlp("13").vector, nlp1("13").vector))]),
f_1[3])
feat_loader = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname,
'node_word_feat.csv'))
feat_loader.addWord2VecFeature([0, 3],
rows=[1,2],
languages=['en_core_web_lg', 'fr_core_news_lg'])
feat_loader.addWord2VecFeature(['node', 'feat3'],
rows=[1,2],
languages=['en_core_web_lg', 'fr_core_news_lg'],
node_type='node')
feat_loader.addWord2VecFeature(['node', 'feat3'],
rows=[1,2],
languages=['en_core_web_lg', 'fr_core_news_lg'],
node_type='node')
f_1 = feat_loader._raw_features[0]
f_2 = feat_loader._raw_features[1]
f_3 = feat_loader._raw_features[2]
assert f_1[1] is None
assert f_2[1] == 'node'
assert f_3[1] == 'node'
assert f_1[2] == f_2[2]
assert f_1[2] == f_3[2]
assert np.allclose(f_1[3], f_2[3])
assert np.allclose(f_1[3], f_3[3])
nlp1 = spacy.load('fr_core_news_lg')
assert np.allclose(np.array([np.concatenate((nlp("1").vector, nlp1("1").vector)),
np.concatenate((nlp("12").vector, nlp1("12").vector))]),
f_1[3])
def test_node_label_loader():
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
create_node_labels(Path(tmpdirname), 'labels.csv')
label_loader = dgl_graphloader.NodeLabelLoader(os.path.join(tmpdirname,
'labels.csv'))
label_loader.addTrainSet([0,1])
label_loader.addValidSet(['node','label1'], node_type='node')
label_loader.addTestSet(['node','label1'], rows=[0,2], node_type='node')
label_loader.addSet(['node','label1'], [0.5, 0.25, 0.25], rows=[0,1,2,3], node_type='nt')
l_1 = label_loader._labels[0]
l_2 = label_loader._labels[1]
l_3 = label_loader._labels[2]
l_4 = label_loader._labels[3]
assert l_1[0] == None
assert l_2[0] == 'node'
assert l_3[0] == 'node'
assert l_4[0] == 'nt'
assert l_1[1] == l_2[1]
assert l_1[1] == ['node1', 'node2', 'node3', 'node4']
assert l_3[1] == ['node1', 'node3']
assert l_4[1] == l_1[1]
assert l_1[2] == l_2[2]
assert l_1[2] == ['A','A','C','A']
assert l_3[2] == ['A','C']
assert l_4[2] == l_1[2]
assert l_1[3] == (1., 0., 0.)
assert l_2[3] == (0., 1., 0.)
assert l_3[3] == (0., 0., 1.)
assert l_4[3] == (0.5, 0.25, 0.25)
label_loader = dgl_graphloader.NodeLabelLoader(os.path.join(tmpdirname,
'labels.csv'))
label_loader.addTrainSet([0,2], multilabel=True, separator=',')
label_loader.addValidSet(['node','label2'],
multilabel=True,
separator=',',
node_type='node')
label_loader.addTestSet(['node','label2'],
multilabel=True,
separator=',',
rows=[0,2],
node_type='node')
label_loader.addSet(['node','label2'],
[0.5, 0.25, 0.25],
multilabel=True,
separator=',', rows=[0,1,2,3], node_type='nt')
l_1 = label_loader._labels[0]
l_2 = label_loader._labels[1]
l_3 = label_loader._labels[2]
l_4 = label_loader._labels[3]
assert l_1[0] == None
assert l_2[0] == 'node'
assert l_3[0] == 'node'
assert l_4[0] == 'nt'
assert l_1[1] == l_2[1]
assert l_1[1] == ['node1', 'node2', 'node3', 'node4']
assert l_3[1] == ['node1', 'node3']
assert l_4[1] == l_1[1]
assert l_1[2] == l_2[2]
assert l_1[2] == [['D','A'],['E','C','D'],['F','A','B'],['G','E']]
assert l_3[2] == [['D','A'],['F','A','B']]
assert l_4[2] == l_1[2]
assert l_1[3] == (1., 0., 0.)
assert l_2[3] == (0., 1., 0.)
assert l_3[3] == (0., 0., 1.)
assert l_4[3] == (0.5, 0.25, 0.25)
# check warning
label_loader.addSet(['node','label2'],
[0.51, 0.25, 0.25],
multilabel=True,
separator=',', rows=[0,1,2,3], node_type='nt')
def test_edge_label_loader():
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
create_edge_labels(Path(tmpdirname), 'edge_labels.csv')
label_loader = dgl_graphloader.EdgeLabelLoader(os.path.join(tmpdirname,
'edge_labels.csv'))
label_loader.addTrainSet([0,1,2])
label_loader.addValidSet(['node_0','node_1','label1'],
edge_type=('src','rel','dst'))
label_loader.addTestSet(['node_0','node_1','label1'],
rows=[0,2],
edge_type=('src','rel','dst'))
label_loader.addSet(['node_0','node_1','label1'],
[0.5, 0.25, 0.25],
rows=[0,1,2,3],
edge_type=('src_n','rel_r','dst_n'))
l_1 = label_loader._labels[0]
l_2 = label_loader._labels[1]
l_3 = label_loader._labels[2]
l_4 = label_loader._labels[3]
assert l_1[0] == None
assert l_2[0] == ('src','rel','dst')
assert l_3[0] == ('src','rel','dst')
assert l_4[0] == ('src_n','rel_r','dst_n')
assert l_1[1] == l_2[1]
assert l_1[1] == ['node1', 'node2', 'node3', 'node4']
assert l_3[1] == ['node1', 'node3']
assert l_4[1] == l_1[1]
assert l_1[2] == l_2[2]
assert l_1[2] == ['node4', 'node3', 'node2', 'node1']
assert l_3[2] == ['node4', 'node2']
assert l_4[2] == l_1[2]
assert l_1[3] == l_2[3]
assert l_1[3] == ['A','A','C','A']
assert l_3[3] == ['A','C']
assert l_4[3] == l_1[3]
assert l_1[4] == (1., 0., 0.)
assert l_2[4] == (0., 1., 0.)
assert l_3[4] == (0., 0., 1.)
assert l_4[4] == (0.5, 0.25, 0.25)
label_loader = dgl_graphloader.EdgeLabelLoader(os.path.join(tmpdirname,
'edge_labels.csv'))
label_loader.addTrainSet([0,1,3], multilabel=True, separator=',')
label_loader.addValidSet(['node_0','node_1','label2'],
multilabel=True,
separator=',',
edge_type=('src','rel','dst'))
label_loader.addTestSet(['node_0','node_1','label2'],
multilabel=True,
separator=',',
rows=[0,2],
edge_type=('src','rel','dst'))
label_loader.addSet(['node_0','node_1','label2'],
[0.5, 0.25, 0.25],
multilabel=True,
separator=',',
rows=[0,1,2,3],
edge_type=('src_n','rel_r','dst_n'))
l_1 = label_loader._labels[0]
l_2 = label_loader._labels[1]
l_3 = label_loader._labels[2]
l_4 = label_loader._labels[3]
assert l_1[0] == None
assert l_2[0] == ('src','rel','dst')
assert l_3[0] == ('src','rel','dst')
assert l_4[0] == ('src_n','rel_r','dst_n')
assert l_1[1] == l_2[1]
assert l_1[1] == ['node1', 'node2', 'node3', 'node4']
assert l_3[1] == ['node1', 'node3']
assert l_4[1] == l_1[1]
assert l_1[2] == l_2[2]
assert l_1[2] == ['node4', 'node3', 'node2', 'node1']
assert l_3[2] == ['node4', 'node2']
assert l_4[2] == l_1[2]
assert l_1[3] == l_2[3]
assert l_1[3] == [['D','A'],['E','C','D'],['F','A','B'],['G','E']]
assert l_3[3] == [['D','A'],['F','A','B']]
assert l_4[3] == l_1[3]
assert l_1[4] == (1., 0., 0.)
assert l_2[4] == (0., 1., 0.)
assert l_3[4] == (0., 0., 1.)
assert l_4[4] == (0.5, 0.25, 0.25)
# check warning
label_loader.addSet(['node_0','node_1','label2'],
[0.5, 0.25, 0.26],
multilabel=True,
separator=',',
rows=[0,1,2,3],
edge_type=('src_n','rel_r','dst_n'))
def test_edge_loader():
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
create_graph_edges(Path(tmpdirname), 'graphs.csv')
edge_loader = dgl_graphloader.EdgeLoader(os.path.join(tmpdirname,
'graphs.csv'))
edge_loader.addEdges([0,1])
edge_loader.addEdges(['node_0','node_1'])
edge_loader.addEdges(['node_0','node_1'],
rows=np.array([1,2,3,4]),
edge_type=('src', 'edge', 'dst'))
e_1 = edge_loader._edges[0]
e_2 = edge_loader._edges[1]
e_3 = edge_loader._edges[2]
assert e_1[0] == None
assert e_2[0] == None
assert e_3[0] == ('src','edge','dst')
assert e_1[1] == e_2[1]
assert e_1[1] == ['node1', 'node2', 'node3', 'node4', 'node4']
assert e_3[1] == ['node2', 'node3', 'node4', 'node4']
assert e_1[2] == e_2[2]
assert e_1[2] == ['node2', 'node1', 'node1', 'node3', 'node4']
assert e_3[2] == ['node1', 'node1', 'node3', 'node4']
edge_loader = dgl_graphloader.EdgeLoader(os.path.join(tmpdirname,
'graphs.csv'))
edge_loader.addCategoryRelationEdge([0,1,2],
src_type='src_t',
dst_type='dst_t')
edge_loader.addCategoryRelationEdge(['node_0','node_1','rel_1'],
src_type='src_t',
dst_type='dst_t')
edge_loader.addCategoryRelationEdge(['node_0','node_1','rel_1'],
rows=np.array([1,2,3,4]),
src_type='src',
dst_type='dst')
e_1 = edge_loader._edges[0]
e_2 = edge_loader._edges[1]
e_3 = edge_loader._edges[2]
assert e_1[0] == ('src_t','A','dst_t')
assert e_2[0] == ('src_t','A','dst_t')
assert e_3[0] == ('src','A','dst')
assert e_1[1] == e_2[1]
assert e_1[1] == ['node1', 'node2', 'node3', 'node4', 'node4']
assert e_3[1] == ['node2', 'node3', 'node4', 'node4']
assert e_1[2] == e_2[2]
assert e_1[2] == ['node2', 'node1', 'node1', 'node3', 'node4']
assert e_3[2] == ['node1', 'node1', 'node3', 'node4']
edge_loader = dgl_graphloader.EdgeLoader(os.path.join(tmpdirname,
'graphs.csv'))
edge_loader.addCategoryRelationEdge([0,1,3],
src_type='src_t',
dst_type='dst_t')
edge_loader.addCategoryRelationEdge(['node_0','node_1','rel_2'],
src_type='src_t',
dst_type='dst_t')
edge_loader.addCategoryRelationEdge(['node_0','node_1','rel_2'],
rows=np.array([1,2,3,4]),
src_type='src',
dst_type='dst')
e_1 = edge_loader._edges[0]
e_2 = edge_loader._edges[1]
e_3 = edge_loader._edges[2]
assert e_1[0] == ('src_t','C','dst_t')
assert e_2[0] == ('src_t','B','dst_t')
assert e_3[0] == ('src_t','A','dst_t')
e_4 = edge_loader._edges[3]
e_5 = edge_loader._edges[4]
e_6 = edge_loader._edges[5]
assert e_4[0] == ('src_t','C','dst_t')
assert e_5[0] == ('src_t','B','dst_t')
assert e_6[0] == ('src_t','A','dst_t')
assert e_1[1] == e_4[1]
assert e_2[1] == e_5[1]
assert e_3[1] == e_6[1]
assert e_1[1] == ['node1', 'node2', 'node3']
assert e_2[1] == ['node4']
assert e_3[1] == ['node4']
assert e_1[2] == e_4[2]
assert e_2[2] == e_5[2]
assert e_3[2] == e_6[2]
assert e_1[2] == ['node2', 'node1', 'node1']
assert e_2[2] == ['node3']
assert e_3[2] == ['node4']
e_7 = edge_loader._edges[6]
e_8 = edge_loader._edges[7]
e_9 = edge_loader._edges[8]
assert e_7[0] == ('src','C','dst')
assert e_8[0] == ('src','B','dst')
assert e_9[0] == ('src','A','dst')
assert e_7[1] == ['node2', 'node3']
assert e_8[1] == ['node4']
assert e_9[1] == ['node4']
assert e_7[2] == ['node1', 'node1']
assert e_8[2] == ['node3']
assert e_9[2] == ['node4']
def test_node_feature_process():
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
create_multiple_node_feat(Path(tmpdirname), 'node_feat.csv')
feat_loader = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname,
'node_feat.csv'))
feat_loader.addNumericalFeature([0,2],norm='standard')
feat_loader.addCategoryFeature([0,1])
feat_loader.addMultiCategoryFeature([0,3], separator=',')
node_dicts = {}
result = feat_loader.process(node_dicts)
assert len(result) == 1
nids, feats = result[None]['nf']
assert np.allclose(np.array([0,1,2,3]), nids)
assert np.allclose(np.concatenate([np.array([[0.1/1.7],[0.3/1.7],[0.2/1.7],[-1.1/1.7]]),
np.array([[1.,0.],[1.,0.],[0.,1.],[1.,0.]]),
np.array([[1.,1.,0.],[1.,0.,0.],[0.,1.,1.],[1.,0.,1.]])],
axis=1),
feats)
assert node_dicts[None]['node1'] == 0
assert node_dicts[None]['node2'] == 1
assert node_dicts[None]['node3'] == 2
assert node_dicts[None]['node4'] == 3
node_dicts = {None: {'node1':3,
'node2':2,
'node3':1,
'node4':0}}
result = feat_loader.process(node_dicts)
nids, feats = result[None]['nf']
assert np.allclose(np.array([3,2,1,0]), nids)
assert np.allclose(np.concatenate([np.array([[0.1/1.7],[0.3/1.7],[0.2/1.7],[-1.1/1.7]]),
np.array([[1.,0.],[1.,0.],[0.,1.],[1.,0.]]),
np.array([[1.,1.,0.],[1.,0.,0.],[0.,1.,1.],[1.,0.,1.]])],
axis=1),
feats)
feat_loader = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname,
'node_feat.csv'))
feat_loader.addCategoryFeature(['node','feat1'], node_type='n1')
feat_loader.addMultiCategoryFeature(['node','feat3'], separator=',', node_type='n1')
feat_loader.addNumericalFeature(['node','feat2'], norm='standard', node_type='n2')
node_dicts = {'n2':{'node1':3,
'node2':2,
'node3':1,
'node4':0}}
result = feat_loader.process(node_dicts)
assert len(result) == 2
assert len(node_dicts) == 2
nids, feats = result['n1']['nf']
assert np.allclose(np.array([0,1,2,3]), nids)
assert np.allclose(np.concatenate([np.array([[1.,0.],[1.,0.],[0.,1.],[1.,0.]]),
np.array([[1.,1.,0.],[1.,0.,0.],[0.,1.,1.],[1.,0.,1.]])],
axis=1),
feats)
nids, feats = result['n2']['nf']
assert np.allclose(np.array([3,2,1,0]), nids)
assert np.allclose(np.array([[0.1/1.7],[0.3/1.7],[0.2/1.7],[-1.1/1.7]]),
feats)
def test_edge_feature_process():
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
create_multiple_edge_feat(Path(tmpdirname), 'edge_feat.csv')
feat_loader = dgl_graphloader.EdgeFeatureLoader(os.path.join(tmpdirname,
'edge_feat.csv'))
feat_loader.addNumericalFeature([0,1,2],norm='standard')
feat_loader.addNumericalFeature([0,1,3],norm='min-max')
feat_loader.addNumericalFeature([0,1,4])
node_dicts = {}
result = feat_loader.process(node_dicts)
assert len(result) == 1
snids, dnids, feats = result[None]['ef']
assert np.allclose(np.array([0,1,2,3]), snids)
assert np.allclose(np.array([4,5,6,7]), dnids)
assert np.allclose(np.concatenate([np.array([[0.2/1.0],[-0.3/1.0],[0.3/1.0],[-0.2/1.0]]),
np.array([[1.2/1.4],[1.0],[1.3/1.4],[0.]]),
np.array([[1.1],[1.2],[-1.2],[0.9]])],
axis=1),
feats)
assert node_dicts[None]['node1'] == 0
assert node_dicts[None]['node2'] == 1
assert node_dicts[None]['node3'] == 2
assert node_dicts[None]['node4'] == 3
node_dicts = {None: {'node1':3,
'node2':2,
'node3':1,
'node4':0}}
result = feat_loader.process(node_dicts)
snids, dnids, feats = result[None]['ef']
assert np.allclose(np.array([3,2,1,0]), snids)
assert np.allclose(np.array([4,5,6,7]), dnids)
assert np.allclose(np.concatenate([np.array([[0.2/1.0],[-0.3/1.0],[0.3/1.0],[-0.2/1.0]]),
np.array([[1.2/1.4],[1.0],[1.3/1.4],[0.]]),
np.array([[1.1],[1.2],[-1.2],[0.9]])],
axis=1),
feats)
feat_loader = dgl_graphloader.EdgeFeatureLoader(os.path.join(tmpdirname,
'edge_feat.csv'))
feat_loader.addNumericalFeature([0,1,2],norm='standard',edge_type=('n0','r0','n1'))
feat_loader.addNumericalFeature([0,1,3],norm='min-max',edge_type=('n0','r0','n1'))
feat_loader.addNumericalFeature([0,1,4],edge_type=('n1','r1','n0'))
node_dicts = {'n0':{'node1':3,
'node2':2,
'node3':1,
'node4':0}}
result = feat_loader.process(node_dicts)
assert len(result) == 2
snids, dnids, feats = result[('n0','r0','n1')]['ef']
assert np.allclose(np.array([3,2,1,0]), snids)
assert np.allclose(np.array([0,1,2,3]), dnids)
assert np.allclose(np.concatenate([np.array([[0.2/1.0],[-0.3/1.0],[0.3/1.0],[-0.2/1.0]]),
np.array([[1.2/1.4],[1.0],[1.3/1.4],[0.]])],
axis=1),
feats)
snids, dnids, feats = result[('n1','r1','n0')]['ef']
assert np.allclose(np.array([4,5,6,7]), snids)
assert np.allclose(np.array([4,5,6,7]), dnids)
assert np.allclose(np.array([[1.1],[1.2],[-1.2],[0.9]]),
feats)
def test_node_label_process():
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
create_multiple_label(Path(tmpdirname), 'node_label.csv')
label_loader = dgl_graphloader.NodeLabelLoader(os.path.join(tmpdirname,
'node_label.csv'))
label_loader.addTrainSet([0,1])
node_dicts = {}
result = label_loader.process(node_dicts)
assert len(result) == 1
train_nids, train_labels, valid_nids, valid_labels, test_nids, test_labels = result[None]
assert np.array_equal(np.array([0,1,2,3]), train_nids)
assert valid_nids is None
assert test_nids is None
assert np.array_equal(np.array([[1,0,0],[0,1,0],[0,0,1],[1,0,0]]), train_labels)
assert valid_labels is None
assert test_labels is None
label_loader.addValidSet([0,2])
label_loader.addTestSet([0,3])
node_dicts = {}
result = label_loader.process(node_dicts)
train_nids, train_labels, valid_nids, valid_labels, test_nids, test_labels = result[None]
assert np.array_equal(np.array([0,1,2,3]), train_nids)
assert np.array_equal(np.array([0,1,2,3]), valid_nids)
assert np.array_equal(np.array([0,1,2,3]), test_nids)
assert np.array_equal(np.array([[1,0,0],[0,1,0],[0,0,1],[1,0,0]]), train_labels)
assert np.array_equal(np.array([[1,0,0],[0,1,0],[0,0,1],[1,0,0]]), valid_labels)
assert np.array_equal(np.array([[0,0,1],[0,1,0],[1,0,0],[1,0,0]]), test_labels)
# test with node type
label_loader = dgl_graphloader.NodeLabelLoader(os.path.join(tmpdirname,
'node_label.csv'))
label_loader.addTrainSet([0,1], node_type='n1')
node_dicts = {'n1':{'node1':3,
'node2':2,
'node3':1,
'node4':0}}
label_loader.addValidSet([0,2], rows=[1,2,3], node_type='n1')
label_loader.addTestSet([0,3], rows=[0,1,2], node_type='n1')
result = label_loader.process(node_dicts)
assert len(result) == 1
assert 'n1' in result
train_nids, train_labels, valid_nids, valid_labels, test_nids, test_labels = result['n1']
assert np.array_equal(np.array([3,2,1,0]), train_nids)
assert np.array_equal(np.array([2,1,0]), valid_nids)
assert np.array_equal(np.array([3,2,1]), test_nids)
assert np.array_equal(np.array([[1,0,0],[0,1,0],[0,0,1],[1,0,0]]), train_labels)
assert np.array_equal(np.array([[0,1,0],[0,0,1],[1,0,0]]), valid_labels)
assert np.array_equal(np.array([[0,0,1],[0,1,0],[1,0,0]]), test_labels)
# test multilabel
# test with node type
label_loader = dgl_graphloader.NodeLabelLoader(os.path.join(tmpdirname,
'node_label.csv'))
label_loader.addTrainSet(['node','label4'],
multilabel=True,
separator=',',
node_type='n1')
label_loader.addSet(['node', 'label5'],
split_rate=[0.,0.5,0.5],
multilabel=True,
separator=',',
node_type='n1')
node_dicts = {'n1':{'node1':3,
'node2':2,
'node3':1,
'node4':0}}
np.random.seed(0)
result = label_loader.process(node_dicts)
assert len(result) == 1
assert 'n1' in result
train_nids, train_labels, valid_nids, valid_labels, test_nids, test_labels = result['n1']
label_map = label_loader.label_map
rev_map = {val:key for key,val in label_map['n1'].items()}
vl_truth = np.zeros((2,3),dtype='int32')
vl_truth[0][rev_map['A']] = 1
vl_truth[1][rev_map['A']] = 1
vl_truth[1][rev_map['B']] = 1
tl_truth = np.zeros((2,3),dtype='int32')
tl_truth[0][rev_map['B']] = 1
tl_truth[1][rev_map['A']] = 1
tl_truth[1][rev_map['C']] = 1
assert np.array_equal(np.array([3,2,1,0]), train_nids)
assert np.array_equal(np.array([1,0]), valid_nids)
assert np.array_equal(np.array([2,3]), test_nids)
assert np.array_equal(np.array([[1,1,0],[1,0,0],[0,1,1],[1,0,1]]), train_labels)
assert np.array_equal(vl_truth, valid_labels)
assert np.array_equal(tl_truth, test_labels)
def test_edge_label_process():
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
create_multiple_label(Path(tmpdirname), 'edge_label.csv')
label_loader = dgl_graphloader.EdgeLabelLoader(os.path.join(tmpdirname,
'edge_label.csv'))
# only existence of the edge
label_loader.addTrainSet([0,6])
node_dicts = {}
result = label_loader.process(node_dicts)
assert len(result) == 1
train_snids, train_dnids, train_labels, \
valid_snids, valid_dnids, valid_labels, \
test_snids, test_dnids, test_labels = result[None]
assert np.array_equal(np.array([0,1,2,3]), train_snids)
assert np.array_equal(np.array([2,3,4,5]), train_dnids)
assert valid_snids is None
assert valid_dnids is None
assert test_snids is None
assert test_dnids is None
assert train_labels is None
assert valid_labels is None
assert test_labels is None
label_loader.addValidSet([0,7])
label_loader.addTestSet([6,8])
node_dicts = {}
result = label_loader.process(node_dicts)
assert len(result) == 1
train_snids, train_dnids, train_labels, \
valid_snids, valid_dnids, valid_labels, \
test_snids, test_dnids, test_labels = result[None]
assert np.array_equal(np.array([0,1,2,3]), train_snids)
assert np.array_equal(np.array([2,3,4,5]), train_dnids)
assert np.array_equal(np.array([0,1,2,3]), valid_snids)
assert np.array_equal(np.array([0,1,0,1]), valid_dnids)
assert np.array_equal(np.array([2,3,4,5]), test_snids)
assert np.array_equal(np.array([3,4,5,6]), test_dnids)
# with labels
label_loader = dgl_graphloader.EdgeLabelLoader(os.path.join(tmpdirname,
'edge_label.csv'))
label_loader.addTrainSet([0,6,1], edge_type=('n1', 'like', 'n1'))
node_dicts = {'n1':{'node1':3,
'node2':2,
'node3':1,
'node4':0}}
label_loader.addValidSet(['node', 'node_d2', 'label2'], rows=[1,2,3], edge_type=('n1', 'like', 'n1'))
label_loader.addTestSet(['node_d', 'node_d3', 'label3'], rows=[0,1,2], edge_type=('n1', 'like', 'n1'))
result = label_loader.process(node_dicts)
assert len(result) == 1
assert ('n1', 'like', 'n1') in result
train_snids, train_dnids, train_labels, \
valid_snids, valid_dnids, valid_labels, \
test_snids, test_dnids, test_labels = result[('n1', 'like', 'n1')]
assert np.array_equal(np.array([3,2,1,0]), train_snids)
assert np.array_equal(np.array([1,0,4,5]), train_dnids)
assert np.array_equal(np.array([2,1,0]), valid_snids)
assert np.array_equal(np.array([2,3,2]), valid_dnids)
assert np.array_equal(np.array([1,0,4]), test_snids)
assert np.array_equal(np.array([0,4,5]), test_dnids)
assert np.array_equal(np.array([[1,0,0],[0,1,0],[0,0,1],[1,0,0]]), train_labels)
assert np.array_equal(np.array([[0,1,0],[0,0,1],[1,0,0]]), valid_labels)
assert np.array_equal(np.array([[0,0,1],[0,1,0],[1,0,0]]), test_labels)
# with multiple labels
label_loader = dgl_graphloader.EdgeLabelLoader(os.path.join(tmpdirname,
'edge_label.csv'))
label_loader.addTrainSet(['node','node_d','label4'],
multilabel=True,
separator=',',
edge_type=('n1', 'like', 'n2'))
node_dicts = {'n1':{'node1':3,
'node2':2,
'node3':1,
'node4':0}}
label_loader.addSet(['node_d2', 'node_d3', 'label5'],
split_rate=[0.,0.5,0.5],
multilabel=True,
separator=',',
edge_type=('n1', 'like', 'n2'))
np.random.seed(0)
result = label_loader.process(node_dicts)
assert len(result) == 1
assert ('n1', 'like', 'n2') in result
train_snids, train_dnids, train_labels, \
valid_snids, valid_dnids, valid_labels, \
test_snids, test_dnids, test_labels = result[('n1', 'like', 'n2')]
label_map = label_loader.label_map
rev_map = {val:key for key,val in label_map[('n1', 'like', 'n2')].items()}
vl_truth = np.zeros((2,3),dtype='int32')
vl_truth[0][rev_map['A']] = 1
vl_truth[1][rev_map['A']] = 1
vl_truth[1][rev_map['B']] = 1
tl_truth = np.zeros((2,3),dtype='int32')
tl_truth[0][rev_map['B']] = 1
tl_truth[1][rev_map['A']] = 1
tl_truth[1][rev_map['C']] = 1
assert np.array_equal(np.array([3,2,1,0]), train_snids)
assert np.array_equal(np.array([0,1,2,3]), train_dnids)
assert np.array_equal(np.array([3,2]), valid_snids)
assert np.array_equal(np.array([3,4]), valid_dnids)
assert np.array_equal(np.array([2,3]), test_snids)
assert np.array_equal(np.array([2,1]), test_dnids)
assert np.array_equal(np.array([[1,1,0],[1,0,0],[0,1,1],[1,0,1]]), train_labels)
assert np.array_equal(vl_truth, valid_labels)
assert np.array_equal(tl_truth, test_labels)
def test_relation_edge_label_process():
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
create_graph_edges(Path(tmpdirname), 'edge_label.csv')
label_loader = dgl_graphloader.EdgeLabelLoader(os.path.join(tmpdirname,
'edge_label.csv'))
# only existence of the edge
label_loader.addRelationalTrainSet([0,1,2])
node_dicts = {}
result = label_loader.process(node_dicts)
assert len(result) == 1
train_snids, train_dnids, train_labels, \
valid_snids, valid_dnids, valid_labels, \
test_snids, test_dnids, test_labels = result[('node','A','node')]
assert np.array_equal(np.array([0,1,2,3,3]), train_snids)
assert np.array_equal(np.array([1,0,0,2,3]), train_dnids)
assert valid_snids is None
assert valid_dnids is None
assert test_snids is None
assert test_dnids is None
assert train_labels is None
assert valid_labels is None
assert test_labels is None
label_loader.addRelationalValidSet([0,1,3],rows=[0,3])
label_loader.addRelationalTestSet([0,1,3],rows=[1,2,4])
result = label_loader.process(node_dicts)
assert len(result) == 3
train_snids, train_dnids, train_labels, \
valid_snids, valid_dnids, valid_labels, \
test_snids, test_dnids, test_labels = result[('node','A','node')]
assert np.array_equal(np.array([0,1,2,3,3]), train_snids)
assert np.array_equal(np.array([1,0,0,2,3]), train_dnids)
assert valid_snids is None
assert valid_dnids is None
assert np.array_equal(np.array([3]), test_snids)
assert np.array_equal(np.array([3]), test_dnids)
assert train_labels is None
assert valid_labels is None
assert test_labels is None
train_snids, train_dnids, train_labels, \
valid_snids, valid_dnids, valid_labels, \
test_snids, test_dnids, test_labels = result[('node','C','node')]
assert train_snids is None
assert train_dnids is None
assert np.array_equal(np.array([0]), valid_snids)
assert np.array_equal(np.array([1]), valid_dnids)
assert np.array_equal(np.array([1,2]), test_snids)
assert np.array_equal(np.array([0,0]), test_dnids)
assert train_labels is None
assert valid_labels is None
assert test_labels is None
train_snids, train_dnids, train_labels, \
valid_snids, valid_dnids, valid_labels, \
test_snids, test_dnids, test_labels = result[('node','B','node')]
assert train_snids is None
assert train_dnids is None
assert np.array_equal(np.array([3]), valid_snids)
assert np.array_equal(np.array([2]), valid_dnids)
assert test_snids is None
assert test_dnids is None
assert train_labels is None
assert valid_labels is None
assert test_labels is None
np.random.seed(0)
label_loader = dgl_graphloader.EdgeLabelLoader(os.path.join(tmpdirname,
'edge_label.csv'))
label_loader.addRelationalTrainSet([0,1,2])
label_loader.addRelationalSet([0,1,3], split_rate=[0.,0.4,0.6])
result = label_loader.process(node_dicts)
assert len(result) == 3
train_snids, train_dnids, train_labels, \
valid_snids, valid_dnids, valid_labels, \
test_snids, test_dnids, test_labels = result[('node','A','node')]
assert np.array_equal(np.array([0,1,2,3,3]), train_snids)
assert np.array_equal(np.array([1,0,0,2,3]), train_dnids)
assert np.array_equal(np.array([3]), test_snids)
assert np.array_equal(np.array([3]), test_dnids)
assert valid_snids is None
assert valid_dnids is None
assert train_labels is None
assert valid_labels is None
assert test_labels is None
train_snids, train_dnids, train_labels, \
valid_snids, valid_dnids, valid_labels, \
test_snids, test_dnids, test_labels = result[('node','C','node')]
assert train_snids is None
assert train_dnids is None
assert np.array_equal(np.array([2]), valid_snids)
assert np.array_equal(np.array([0]), valid_dnids)
assert np.array_equal(np.array([1,0]), test_snids)
assert np.array_equal(np.array([0,1]), test_dnids)
assert train_labels is None
assert valid_labels is None
assert test_labels is None
train_snids, train_dnids, train_labels, \
valid_snids, valid_dnids, valid_labels, \
test_snids, test_dnids, test_labels = result[('node','B','node')]
assert train_snids is None
assert train_dnids is None
assert valid_snids is None
assert valid_dnids is None
assert np.array_equal(np.array([3]), test_snids)
assert np.array_equal(np.array([2]), test_dnids)
assert train_labels is None
assert valid_labels is None
assert test_labels is None
#test warning
label_loader.addRelationalSet([0,1,3], split_rate=[0.01,0.4,0.6])
def test_edge_process():
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
create_graph_edges(Path(tmpdirname), 'graphs.csv')
edge_loader = dgl_graphloader.EdgeLoader(os.path.join(tmpdirname,
'graphs.csv'))
edge_loader.addEdges([0,1])
edge_loader.addEdges(['node_0','node_1'])
edge_loader.addEdges(['node_0','node_1'],
rows=np.array([1,2,3,4]),
edge_type=('src', 'edge', 'src'))
node_dicts = {}
result = edge_loader.process(node_dicts)
assert len(result) == 2
snids, dnids = result[None]
assert np.array_equal(np.array([0,1,2,3,3,0,1,2,3,3]), snids)
assert np.array_equal(np.array([1,0,0,2,3,1,0,0,2,3]), dnids)
snids, dnids = result[('src', 'edge', 'src')]
assert np.array_equal(np.array([0,1,2,2]), snids)
assert np.array_equal(np.array([3,3,1,2]), dnids)
# with categorical relation
edge_loader = dgl_graphloader.EdgeLoader(os.path.join(tmpdirname,
'graphs.csv'))
edge_loader.addCategoryRelationEdge([0,1,2],
src_type='src_t',
dst_type='dst_t')
edge_loader.addCategoryRelationEdge(['node_0','node_1','rel_2'],
src_type='src_t',
dst_type='dst_t')
edge_loader.addCategoryRelationEdge(['node_0','node_1','rel_1'],
rows=np.array([1,2,3,4]),
src_type='src',
dst_type='dst')
node_dicts = {'src_t':{'node1':3,
'node2':2,
'node3':1,
'node4':0}}
result = edge_loader.process(node_dicts)
assert len(result) == 4
snids, dnids = result[('src_t','A','dst_t')]
assert np.array_equal(np.array([3,2,1,0,0,0]), snids)
assert np.array_equal(np.array([0,1,1,2,3,3]), dnids)
snids, dnids = result[('src_t','B','dst_t')]
assert np.array_equal(np.array([0]), snids)
assert np.array_equal(np.array([2]), dnids)
snids, dnids = result[('src_t','C','dst_t')]
assert np.array_equal(np.array([3,2,1]), snids)
assert np.array_equal(np.array([0,1,1]), dnids)
snids, dnids = result[('src','A','dst')]
assert np.array_equal(np.array([0,1,2,2]), snids)
assert np.array_equal(np.array([0,0,1,2]), dnids)
def test_build_graph():
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
create_graph_edges(Path(tmpdirname), 'edges.csv')
create_edge_labels(Path(tmpdirname), 'edge_labels.csv')
create_node_labels(Path(tmpdirname), 'node_labels.csv')
# homogeneous graph loader (edge labels)
node_feat_loader = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname, 'node_labels.csv'))
node_feat_loader.addCategoryFeature([0,1])
node_feat_loader.addMultiCategoryFeature([0,2], separator=',')
edge_label_loader = dgl_graphloader.EdgeLabelLoader(os.path.join(tmpdirname, 'edge_labels.csv'))
edge_label_loader.addSet([0,1,2],split_rate=[0.5,0.25,0.25])
edge_loader = dgl_graphloader.EdgeLoader(os.path.join(tmpdirname, 'edges.csv'))
edge_loader.addEdges([0,1])
np.random.seed(0)
graphloader = dgl_graphloader.GraphLoader(name='example')
graphloader.appendEdge(edge_loader)
graphloader.appendLabel(edge_label_loader)
graphloader.appendFeature(node_feat_loader)
graphloader.process()
node_id_map = graphloader.node2id
assert None in node_id_map
assert len(node_id_map[None]) == 4
for idx, key in enumerate(['node1', 'node2', 'node3', 'node4']):
assert node_id_map[None][key] == idx
id_node_map = graphloader.id2node
assert None in id_node_map
assert len(id_node_map[None]) == 4
for idx, key in enumerate(['node1', 'node2', 'node3', 'node4']):
assert id_node_map[None][idx] == key
label_map = graphloader.label_map
assert len(label_map[None]) == 2
assert label_map[None][0] == 'A'
assert label_map[None][1] == 'C'
g = graphloader.graph
assert g.num_edges() == 9
assert np.array_equal(g.edata['labels'].long().numpy(),
np.array([[-1,-1],[-1,-1],[-1,-1],[-1,-1],[-1,-1],[0,1],[1,0],[1,0],[1,0]]))
assert th.nonzero(g.edata['train_mask']).shape[0] == 2
assert th.nonzero(g.edata['valid_mask']).shape[0] == 1
assert th.nonzero(g.edata['test_mask']).shape[0] == 1
assert np.allclose(g.ndata['nf'].numpy(),
np.array([[1,0,1,0,0,1,0,0,0],[1,0,0,0,1,1,1,0,0],[0,1,1,1,0,0,0,1,0],[1,0,0,0,0,0,1,0,1]]))
# heterogeneous graph loader (edge labels)
create_train_edge_labels(Path(tmpdirname), 'edge_train_labels.csv')
node_feat_loader = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname, 'node_labels.csv'))
node_feat_loader.addCategoryFeature([0,1], node_type='a')
node_feat_loader.addMultiCategoryFeature([0,2], separator=',', node_type='a')
edge_label_loader = dgl_graphloader.EdgeLabelLoader(os.path.join(tmpdirname, 'edge_labels.csv'))
edge_label_loader.addSet([0,1,2],split_rate=[0.5,0.25,0.25], edge_type=('a', 'follow', 'b'))
edge_train_label_loader = dgl_graphloader.EdgeLabelLoader(os.path.join(tmpdirname, 'edge_train_labels.csv'))
edge_train_label_loader.addTrainSet([0,1,2], edge_type=('a', 'follow', 'b'))
edge_loader = dgl_graphloader.EdgeLoader(os.path.join(tmpdirname, 'edges.csv'))
edge_loader.addEdges([0,1], edge_type=('a', 'follow', 'b'))
node_feat_loader2 = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname, 'node_labels.csv'))
node_feat_loader2.addCategoryFeature([0,1], node_type='b')
edge_loader2 = dgl_graphloader.EdgeLoader(os.path.join(tmpdirname, 'edges.csv'))
edge_loader2.addEdges([0,1], edge_type=('b', 'follow', 'a'))
np.random.seed(0)
graphloader = dgl_graphloader.GraphLoader(name='example')
graphloader.appendEdge(edge_loader)
graphloader.appendEdge(edge_loader2)
graphloader.appendLabel(edge_label_loader)
graphloader.appendLabel(edge_train_label_loader)
graphloader.appendFeature(node_feat_loader)
graphloader.appendFeature(node_feat_loader2)
graphloader.process()
node_id_map = graphloader.node2id
assert 'a' in node_id_map
assert len(node_id_map['a']) == 4
for idx, key in enumerate(['node1', 'node2', 'node3', 'node4']):
assert node_id_map['a'][key] == idx
id_node_map = graphloader.id2node
assert 'a' in id_node_map
assert len(id_node_map['a']) == 4
for idx, key in enumerate(['node1', 'node2', 'node3', 'node4']):
assert id_node_map['a'][idx] == key
assert 'b' in node_id_map
assert len(node_id_map['b']) == 4
for idx, key in enumerate(['node2', 'node1', 'node3', 'node4']):
assert node_id_map['b'][key] == idx
assert 'b' in id_node_map
assert len(id_node_map['b']) == 4
for idx, key in enumerate(['node2', 'node1', 'node3', 'node4']):
assert id_node_map['b'][idx] == key
label_map = graphloader.label_map
assert len(label_map[('a', 'follow', 'b')]) == 2
assert label_map[('a', 'follow', 'b')][0] == 'A'
assert label_map[('a', 'follow', 'b')][1] == 'C'
g = graphloader.graph
assert g.num_edges(('a', 'follow', 'b')) == 11
assert g.num_edges(('b', 'follow', 'a')) == 5
assert np.array_equal(g.edges[('a', 'follow', 'b')].data['labels'].long().numpy(),
np.array([[-1,-1],[-1,-1],[-1,-1],[-1,-1],[-1,-1],[0,1],[1,0],[1,0],[1,0],[1,0],[1,0]]))
assert th.nonzero(g.edges[('a', 'follow', 'b')].data['train_mask']).shape[0] == 4
assert th.nonzero(g.edges[('a', 'follow', 'b')].data['valid_mask']).shape[0] == 1
assert th.nonzero(g.edges[('a', 'follow', 'b')].data['test_mask']).shape[0] == 1
assert np.allclose(g.nodes['a'].data['nf'].numpy(),
np.array([[1,0,1,0,0,1,0,0,0],[1,0,0,0,1,1,1,0,0],[0,1,1,1,0,0,0,1,0],[1,0,0,0,0,0,1,0,1]]))
assert np.allclose(g.nodes['b'].data['nf'].numpy(),
np.array([[1.,0.,],[1.,0.],[0.,1.],[1.,0.]]))
# edge feat with edge labels
create_graph_feat_edges(Path(tmpdirname), 'edges_feats.csv')
node_feat_loader = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname, 'node_labels.csv'))
node_feat_loader.addCategoryFeature([0,1], node_type='a')
node_feat_loader.addMultiCategoryFeature([0,2], separator=',', node_type='a')
edge_label_loader = dgl_graphloader.EdgeLabelLoader(os.path.join(tmpdirname, 'edge_labels.csv'))
edge_label_loader.addSet([0,1,2],split_rate=[0.5,0.25,0.25], edge_type=('a', 'follow', 'b'))
edge_feat_loader = dgl_graphloader.EdgeFeatureLoader(os.path.join(tmpdirname, 'edges_feats.csv'))
edge_feat_loader.addNumericalFeature([0,1,2], edge_type=('a', 'follow', 'b'))
node_feat_loader2 = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname, 'node_labels.csv'))
node_feat_loader2.addCategoryFeature([0,1], node_type='b')
edge_loader2 = dgl_graphloader.EdgeLoader(os.path.join(tmpdirname, 'edges.csv'))
edge_loader2.addEdges([0,1], edge_type=('b', 'follow', 'a'))
np.random.seed(0)
graphloader = dgl_graphloader.GraphLoader(name='example')
graphloader.appendEdge(edge_loader2)
graphloader.appendLabel(edge_label_loader)
graphloader.appendFeature(edge_feat_loader)
graphloader.appendFeature(node_feat_loader)
graphloader.appendFeature(node_feat_loader2)
graphloader.process()
node_id_map = graphloader.node2id
assert 'b' in node_id_map
assert len(node_id_map['b']) == 4
for idx, key in enumerate(['node1', 'node2', 'node3', 'node4']):
assert node_id_map['b'][key] == idx
id_node_map = graphloader.id2node
assert 'b' in id_node_map
assert len(id_node_map['b']) == 4
for idx, key in enumerate(['node1', 'node2', 'node3', 'node4']):
assert id_node_map['b'][idx] == key
assert 'a' in node_id_map
assert len(node_id_map['a']) == 4
for idx, key in enumerate(['node2', 'node1', 'node3', 'node4']):
assert node_id_map['a'][key] == idx
assert 'a' in id_node_map
assert len(id_node_map['a']) == 4
for idx, key in enumerate(['node2', 'node1', 'node3', 'node4']):
assert id_node_map['a'][idx] == key
g = graphloader.graph
assert g.num_edges(('a', 'follow', 'b')) == 9
assert g.num_edges(('b', 'follow', 'a')) == 5
assert np.array_equal(g.edges[('a', 'follow', 'b')].data['labels'].long().numpy(),
np.array([[1,0],[1,0],[0,1],[1,0],[-1,-1],[-1,-1],[-1,-1],[-1,-1],[-1,-1]]))
assert th.nonzero(g.edges[('a', 'follow', 'b')].data['train_mask']).shape[0] == 2
assert th.nonzero(g.edges[('a', 'follow', 'b')].data['valid_mask']).shape[0] == 1
assert th.nonzero(g.edges[('a', 'follow', 'b')].data['test_mask']).shape[0] == 1
assert np.allclose(g.edges[('a', 'follow', 'b')].data['ef'].numpy(),
np.array([[0.1],[0.2],[0.3],[0.4],[0.5],[0.6],[0.7],[0.8],[0.9]]))
# heterogeneous graph loader (edge no labels)
node_feat_loader = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname, 'node_labels.csv'))
node_feat_loader.addCategoryFeature([0,1], node_type='a')
node_feat_loader.addMultiCategoryFeature([0,2], separator=',', node_type='a')
edge_label_loader = dgl_graphloader.EdgeLabelLoader(os.path.join(tmpdirname, 'edge_labels.csv'))
edge_label_loader.addSet([0,1],split_rate=[0.5,0.25,0.25], edge_type=('a', 'follow', 'b'))
edge_loader = dgl_graphloader.EdgeLoader(os.path.join(tmpdirname, 'edges.csv'))
edge_loader.addEdges([0,1], edge_type=('a', 'follow', 'b'))
node_feat_loader2 = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname, 'node_labels.csv'))
node_feat_loader2.addCategoryFeature([0,1], node_type='b')
edge_loader2 = dgl_graphloader.EdgeLoader(os.path.join(tmpdirname, 'edges.csv'))
edge_loader2.addEdges([0,1], edge_type=('b', 'follow', 'a'))
np.random.seed(0)
graphloader = dgl_graphloader.GraphLoader(name='example')
graphloader.appendEdge(edge_loader)
graphloader.appendEdge(edge_loader2)
graphloader.appendLabel(edge_label_loader)
graphloader.appendFeature(node_feat_loader)
graphloader.appendFeature(node_feat_loader2)
graphloader.process()
label_map = graphloader.label_map
assert len(label_map) == 0
g = graphloader.graph
assert g.num_edges(('a', 'follow', 'b')) == 9
assert g.num_edges(('b', 'follow', 'a')) == 5
assert th.nonzero(g.edges[('a', 'follow', 'b')].data['train_mask']).shape[0] == 2
assert th.nonzero(g.edges[('a', 'follow', 'b')].data['valid_mask']).shape[0] == 1
assert th.nonzero(g.edges[('a', 'follow', 'b')].data['test_mask']).shape[0] == 1
assert np.allclose(g.nodes['a'].data['nf'].numpy(),
np.array([[1,0,1,0,0,1,0,0,0],[1,0,0,0,1,1,1,0,0],[0,1,1,1,0,0,0,1,0],[1,0,0,0,0,0,1,0,1]]))
assert np.allclose(g.nodes['b'].data['nf'].numpy(),
np.array([[1.,0.,],[1.,0.],[0.,1.],[1.,0.]]))
# heterogeneous graph loader (node labels)
create_node_valid_labels(Path(tmpdirname), 'node_valid.csv')
create_node_test_labels(Path(tmpdirname), 'node_test.csv')
create_node_feats(Path(tmpdirname), 'node_feat.csv')
node_label_loader = dgl_graphloader.NodeLabelLoader(os.path.join(tmpdirname, 'node_labels.csv'))
node_label_loader.addTrainSet([0,1], node_type='a')
valid_label_loader = dgl_graphloader.NodeLabelLoader(os.path.join(tmpdirname, 'node_valid.csv'))
valid_label_loader.addValidSet([0,1], node_type='a')
test_label_loader = dgl_graphloader.NodeLabelLoader(os.path.join(tmpdirname, 'node_test.csv'))
test_label_loader.addTestSet([0,1], node_type='a')
edge_feat_loader = dgl_graphloader.EdgeFeatureLoader(os.path.join(tmpdirname, 'edges_feats.csv'))
edge_feat_loader.addNumericalFeature([0,1,2], edge_type=('a', 'in', 'aa'))
edge_loader = dgl_graphloader.EdgeLoader(os.path.join(tmpdirname, 'edges.csv'))
edge_loader.addEdges([0,1], edge_type=('a', 'follow', 'a'))
node_feat_loader = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname, 'node_feat.csv'))
node_feat_loader.addCategoryFeature([0,1], node_type='a')
node_feat_loader.addMultiCategoryFeature([0,2], separator=',', node_type='a')
graphloader = dgl_graphloader.GraphLoader(name='example')
graphloader.appendEdge(edge_loader)
graphloader.appendLabel(node_label_loader)
graphloader.appendLabel(valid_label_loader)
graphloader.appendLabel(test_label_loader)
graphloader.appendFeature(edge_feat_loader)
graphloader.appendFeature(node_feat_loader)
graphloader.process()
node_id_map = graphloader.node2id
assert 'a' in node_id_map
assert len(node_id_map['a']) == 8
for idx, key in enumerate(['node1', 'node2', 'node3', 'node4', 'node5', 'node6', 'node7', 'node8']):
assert node_id_map['a'][key] == idx
id_node_map = graphloader.id2node
assert 'a' in id_node_map
assert len(id_node_map['a']) == 8
for idx, key in enumerate(['node1', 'node2', 'node3', 'node4', 'node5', 'node6', 'node7', 'node8']):
assert id_node_map['a'][idx] == key
assert 'aa' in node_id_map
assert len(node_id_map['aa']) == 4
for idx, key in enumerate(['node4', 'node3', 'node2', 'node1']):
assert node_id_map['aa'][key] == idx
assert 'aa' in id_node_map
assert len(id_node_map['aa']) == 4
for idx, key in enumerate(['node4', 'node3', 'node2', 'node1']):
assert id_node_map['aa'][idx] == key
label_map = graphloader.label_map
assert len(label_map['a']) == 2
assert label_map['a'][0] == 'A'
assert label_map['a'][1] == 'C'
g = graphloader.graph
assert g.num_edges(('a', 'in', 'aa')) == 9
assert g.num_edges(('a', 'follow', 'a')) == 5
assert np.array_equal(g.nodes['a'].data['train_mask'].long().numpy(), np.array([1,1,1,1,0,0,0,0]))
assert np.array_equal(g.nodes['a'].data['valid_mask'].long().numpy(), np.array([0,0,0,0,1,1,0,0]))
assert np.array_equal(g.nodes['a'].data['test_mask'].long().numpy(), np.array([0,0,0,0,0,0,1,1]))
assert np.allclose(g.nodes['a'].data['nf'].numpy(),
np.array([[1,0,1,0,0,1,0,0,0],[1,0,0,0,1,1,1,0,0],
[0,1,1,1,0,0,0,1,0],[1,0,0,0,0,0,1,0,1],
[1,0,1,0,0,1,0,0,0],[0,1,0,0,1,1,1,0,0],
[1,0,1,0,0,1,0,0,0],[1,0,0,0,1,1,1,0,0]]))
assert np.allclose(g.edges[('a', 'in', 'aa')].data['ef'].numpy(),
np.array([[0.1],[0.2],[0.3],[0.4],[0.5],[0.6],[0.7],[0.8],[0.9]]))
def test_add_reverse_edge():
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
create_graph_edges(Path(tmpdirname), 'edges.csv')
create_edge_labels(Path(tmpdirname), 'edge_labels.csv')
create_node_labels(Path(tmpdirname), 'node_labels.csv')
create_train_edge_labels(Path(tmpdirname), 'edge_train_labels.csv')
node_feat_loader = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname, 'node_labels.csv'))
node_feat_loader.addCategoryFeature([0,1], node_type='a')
node_feat_loader.addMultiCategoryFeature([0,2], separator=',', node_type='a')
edge_label_loader = dgl_graphloader.EdgeLabelLoader(os.path.join(tmpdirname, 'edge_labels.csv'))
edge_label_loader.addSet([0,1,2],split_rate=[0.5,0.25,0.25], edge_type=('a', 'follow', 'b'))
edge_train_label_loader = dgl_graphloader.EdgeLabelLoader(os.path.join(tmpdirname, 'edge_train_labels.csv'))
edge_train_label_loader.addTrainSet([0,1,2], edge_type=('a', 'follow', 'b'))
edge_loader = dgl_graphloader.EdgeLoader(os.path.join(tmpdirname, 'edges.csv'))
edge_loader.addEdges([0,1], edge_type=('a', 'follow', 'b'))
node_feat_loader2 = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname, 'node_labels.csv'))
node_feat_loader2.addCategoryFeature([0,1], node_type='b')
edge_loader2 = dgl_graphloader.EdgeLoader(os.path.join(tmpdirname, 'edges.csv'))
edge_loader2.addEdges([0,1], edge_type=('b', 'follow', 'a'))
np.random.seed(0)
graphloader = dgl_graphloader.GraphLoader(name='example')
graphloader.appendEdge(edge_loader)
graphloader.appendEdge(edge_loader2)
graphloader.appendLabel(edge_label_loader)
graphloader.appendLabel(edge_train_label_loader)
graphloader.appendFeature(node_feat_loader)
graphloader.appendFeature(node_feat_loader2)
graphloader.addReverseEdge()
graphloader.process()
node_id_map = graphloader.node2id
assert 'a' in node_id_map
assert len(node_id_map['a']) == 4
for idx, key in enumerate(['node1', 'node2', 'node3', 'node4']):
assert node_id_map['a'][key] == idx
id_node_map = graphloader.id2node
assert 'a' in id_node_map
assert len(id_node_map['a']) == 4
for idx, key in enumerate(['node1', 'node2', 'node3', 'node4']):
assert id_node_map['a'][idx] == key
assert 'b' in node_id_map
assert len(node_id_map['b']) == 4
for idx, key in enumerate(['node2', 'node1', 'node3', 'node4']):
assert node_id_map['b'][key] == idx
assert 'b' in id_node_map
assert len(id_node_map['b']) == 4
for idx, key in enumerate(['node2', 'node1', 'node3', 'node4']):
assert id_node_map['b'][idx] == key
label_map = graphloader.label_map
assert len(label_map[('a', 'follow', 'b')]) == 2
assert label_map[('a', 'follow', 'b')][0] == 'A'
assert label_map[('a', 'follow', 'b')][1] == 'C'
g = graphloader.graph
assert g.num_edges(('a', 'follow', 'b')) == 11
assert g.num_edges(('b', 'follow', 'a')) == 5
assert g.num_edges(('b', 'rev-follow', 'a')) == 11
assert g.num_edges(('a', 'rev-follow', 'b')) == 5
assert 'labels' in g.edges[('a', 'follow', 'b')].data
assert 'labels' not in g.edges[('b', 'rev-follow', 'a')].data
assert th.nonzero(g.edges[('a', 'follow', 'b')].data['train_mask']).shape[0] == 4
assert th.nonzero(g.edges[('a', 'follow', 'b')].data['valid_mask']).shape[0] == 1
assert th.nonzero(g.edges[('a', 'follow', 'b')].data['test_mask']).shape[0] == 1
assert th.nonzero(g.edges[('b', 'rev-follow', 'a')].data['rev_train_mask']).shape[0] == 4
assert th.nonzero(g.edges[('b', 'rev-follow', 'a')].data['rev_valid_mask']).shape[0] == 1
assert th.nonzero(g.edges[('b', 'rev-follow', 'a')].data['rev_test_mask']).shape[0] == 1
assert np.allclose(g.nodes['a'].data['nf'].numpy(),
np.array([[1,0,1,0,0,1,0,0,0],[1,0,0,0,1,1,1,0,0],[0,1,1,1,0,0,0,1,0],[1,0,0,0,0,0,1,0,1]]))
assert np.allclose(g.nodes['b'].data['nf'].numpy(),
np.array([[1.,0.,],[1.,0.],[0.,1.],[1.,0.]]))
# heterogeneous graph loader (edge no labels)
node_feat_loader = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname, 'node_labels.csv'))
node_feat_loader.addCategoryFeature([0,1], node_type='a')
node_feat_loader.addMultiCategoryFeature([0,2], separator=',', node_type='a')
edge_label_loader = dgl_graphloader.EdgeLabelLoader(os.path.join(tmpdirname, 'edge_labels.csv'))
edge_label_loader.addSet([0,1],split_rate=[0.5,0.25,0.25], edge_type=('a', 'follow', 'b'))
edge_loader = dgl_graphloader.EdgeLoader(os.path.join(tmpdirname, 'edges.csv'))
edge_loader.addEdges([0,1], edge_type=('a', 'follow', 'b'))
node_feat_loader2 = dgl_graphloader.NodeFeatureLoader(os.path.join(tmpdirname, 'node_labels.csv'))
node_feat_loader2.addCategoryFeature([0,1], node_type='b')
edge_loader2 = dgl_graphloader.EdgeLoader(os.path.join(tmpdirname, 'edges.csv'))
edge_loader2.addEdges([0,1], edge_type=('b', 'follow', 'a'))
np.random.seed(0)
graphloader = dgl_graphloader.GraphLoader(name='example')
graphloader.appendEdge(edge_loader)
graphloader.appendEdge(edge_loader2)
graphloader.appendLabel(edge_label_loader)
graphloader.appendFeature(node_feat_loader)
graphloader.appendFeature(node_feat_loader2)
graphloader.addReverseEdge()
graphloader.process()
label_map = graphloader.label_map
assert len(label_map) == 0
g = graphloader.graph
assert g.num_edges(('a', 'follow', 'b')) == 9
assert g.num_edges(('b', 'follow', 'a')) == 5
assert g.num_edges(('b', 'rev-follow', 'a')) == 9
assert g.num_edges(('a', 'rev-follow', 'b')) == 5
assert th.nonzero(g.edges[('a', 'follow', 'b')].data['train_mask']).shape[0] == 2
assert th.nonzero(g.edges[('a', 'follow', 'b')].data['valid_mask']).shape[0] == 1
assert th.nonzero(g.edges[('a', 'follow', 'b')].data['test_mask']).shape[0] == 1
assert th.nonzero(g.edges[('b', 'rev-follow', 'a')].data['rev_train_mask']).shape[0] == 2
assert th.nonzero(g.edges[('b', 'rev-follow', 'a')].data['rev_valid_mask']).shape[0] == 1
assert th.nonzero(g.edges[('b', 'rev-follow', 'a')].data['rev_test_mask']).shape[0] == 1
assert np.allclose(g.nodes['a'].data['nf'].numpy(),
np.array([[1,0,1,0,0,1,0,0,0],[1,0,0,0,1,1,1,0,0],[0,1,1,1,0,0,0,1,0],[1,0,0,0,0,0,1,0,1]]))
assert np.allclose(g.nodes['b'].data['nf'].numpy(),
np.array([[1.,0.,],[1.,0.],[0.,1.],[1.,0.]]))
create_graph_edges(Path(tmpdirname), 'edge_label.csv')
label_loader = dgl_graphloader.EdgeLabelLoader(os.path.join(tmpdirname,
'edge_label.csv'))
# only existence of the edge
label_loader.addRelationalTrainSet([0,1,2],rows=[0,1,2,3])
label_loader.addRelationalTrainSet([0,1,3],rows=[2,3])
label_loader.addRelationalValidSet([0,1,3],rows=[0])
label_loader.addRelationalTestSet([0,1,3],rows=[1,4])
graphloader = dgl_graphloader.GraphLoader(name='example')
graphloader.appendLabel(label_loader)
graphloader.addReverseEdge()
graphloader.process()
label_map = graphloader.label_map
assert len(label_map) == 0
g = graphloader.graph
assert th.nonzero(g.edges[('node','A','node')].data['train_mask']).shape[0] == 4
assert th.nonzero(g.edges[('node','A','node')].data['valid_mask']).shape[0] == 0
assert th.nonzero(g.edges[('node','A','node')].data['test_mask']).shape[0] == 1
assert th.nonzero(g.edges[('node','rev-A','node')].data['rev_train_mask']).shape[0] == 4
assert th.nonzero(g.edges[('node','rev-A','node')].data['rev_valid_mask']).shape[0] == 0
assert th.nonzero(g.edges[('node','rev-A','node')].data['rev_test_mask']).shape[0] == 1
assert th.nonzero(g.edges[('node','B','node')].data['train_mask']).shape[0] == 1
assert th.nonzero(g.edges[('node','B','node')].data['valid_mask']).shape[0] == 0
assert th.nonzero(g.edges[('node','B','node')].data['test_mask']).shape[0] == 0
assert th.nonzero(g.edges[('node','rev-B','node')].data['rev_train_mask']).shape[0] == 1
assert th.nonzero(g.edges[('node','rev-B','node')].data['rev_valid_mask']).shape[0] == 0
assert th.nonzero(g.edges[('node','rev-B','node')].data['rev_test_mask']).shape[0] == 0
assert th.nonzero(g.edges[('node','C','node')].data['train_mask']).shape[0] == 1
assert th.nonzero(g.edges[('node','C','node')].data['valid_mask']).shape[0] == 1
assert th.nonzero(g.edges[('node','C','node')].data['test_mask']).shape[0] == 1
assert th.nonzero(g.edges[('node','rev-C','node')].data['rev_train_mask']).shape[0] == 1
assert th.nonzero(g.edges[('node','rev-C','node')].data['rev_valid_mask']).shape[0] == 1
assert th.nonzero(g.edges[('node','rev-C','node')].data['rev_test_mask']).shape[0] == 1
if __name__ == '__main__':
# test Feature Loader
test_node_category_feature_loader()
test_node_numerical_feature_loader()
test_node_word2vec_feature_loader()
test_edge_numerical_feature_loader()
# test Label Loader
test_node_label_loader()
test_edge_label_loader()
# test Edge Loader
test_edge_loader()
# test feature process
test_node_feature_process()
test_edge_feature_process()
# test label process
test_node_label_process()
test_edge_label_process()
test_relation_edge_label_process()
# test edge process
test_edge_process()
test_build_graph()
test_add_reverse_edge()
|
from datetime import date, timedelta
from allocation.domain import events
from allocation.domain.model import Product, OrderLine, Batch
today = date.today()
tomorrow = today + timedelta(days=1)
later = tomorrow + timedelta(days=10)
def test_prefers_warehouse_batches_to_shipments():
in_stock_batch = Batch("in-stock-batch", "RETRO-CLOCK", 100, eta=None)
shipment_batch = Batch("shipment-batch", "RETRO-CLOCK", 100, eta=tomorrow)
product = Product(sku="RETRO-CLOCK", batches=[in_stock_batch, shipment_batch])
line = OrderLine("oref", "RETRO-CLOCK", 10)
product.allocate(line)
assert in_stock_batch.available_quantity == 90
assert shipment_batch.available_quantity == 100
def test_prefers_earlier_batches():
earliest = Batch("speedy-batch", "MINIMALIST-SPOON", 100, eta=today)
medium = Batch("normal-batch", "MINIMALIST-SPOON", 100, eta=tomorrow)
latest = Batch("slow-batch", "MINIMALIST-SPOON", 100, eta=later)
product = Product(sku="MINIMALIST-SPOON", batches=[medium, earliest, latest])
line = OrderLine("order1", "MINIMALIST-SPOON", 10)
product.allocate(line)
assert earliest.available_quantity == 90
assert medium.available_quantity == 100
assert latest.available_quantity == 100
def test_returns_allocated_batch_ref():
in_stock_batch = Batch("in-stock-batch-ref", "HIGHBROW-POSTER", 100, eta=None)
shipment_batch = Batch("shipment-batch-ref", "HIGHBROW-POSTER", 100, eta=tomorrow)
line = OrderLine("oref", "HIGHBROW-POSTER", 10)
product = Product(sku="HIGHBROW-POSTER", batches=[in_stock_batch, shipment_batch])
allocation = product.allocate(line)
assert allocation == in_stock_batch.reference
def test_outputs_allocated_event():
batch = Batch("batchref", "RETRO-LAMPSHADE", 100, eta=None)
line = OrderLine("oref", "RETRO-LAMPSHADE", 10)
product = Product(sku="RETRO-LAMPSHADE", batches=[batch])
product.allocate(line)
expected = events.Allocated(
orderid="oref", sku="RETRO-LAMPSHADE", qty=10, batchref=batch.reference
)
assert product.events[-1] == expected
def test_records_out_of_stock_event_if_cannot_allocate():
batch = Batch('batch1', 'SMALL-FORK', 10, eta=today)
product = Product(sku="SMALL-FORK", batches=[batch])
product.allocate(OrderLine('order1', 'SMALL-FORK', 10))
allocation = product.allocate(OrderLine('order2', 'SMALL-FORK', 1))
assert product.events[-1] == events.OutOfStock(sku="SMALL-FORK")
assert allocation is None
def test_increments_version_number():
line = OrderLine('oref', "SCANDI-PEN", 10)
product = Product(sku="SCANDI-PEN", batches=[Batch('b1', "SCANDI-PEN", 100, eta=None)])
product.version_number = 7
product.allocate(line)
assert product.version_number == 8
|
from unittest import TestCase
from unittest.mock import MagicMock
from src.Core import Core
from src.Processor import Processor
from src.Queue import Queue
class TestProcessor(TestCase):
def setUp(self):
mock_core = Core()
mock_core.canHostEntity = MagicMock(return_value=True)
mock_core.nextArrival = MagicMock()
mock_core.decreaseEntitiesSystem = MagicMock()
self.processorObj = Processor(mock_core)
def tearDown(self):
self.processorObj = None
def test_isIdle(self):
self.assertTrue(self.processorObj.isIdle(), "The processor should be idle")
def test_isIdle_notIdle(self):
self.processorObj.hostedEntity = 1
self.assertFalse(self.processorObj.isIdle(), "The processor shouldn't be idle")
def test_endService_empty_queue(self):
mock_input = Queue()
mock_input.getQueueLength = MagicMock(return_value=0)
self.processorObj.addInput(mock_input)
self.processorObj.hostedEntity = 1
self.assertFalse(self.processorObj.isIdle(), "The processor shouldn't be idle")
self.processorObj.endService()
self.assertTrue(self.processorObj.isIdle(), "The processor should be idle")
def test_endService_non_empty_queue(self):
mock_input = Queue()
mock_input.getQueueLength = MagicMock(return_value=1)
mock_input.getEntity = MagicMock()
self.processorObj.addInput(mock_input)
self.processorObj.hostedEntity = 1
self.assertFalse(self.processorObj.isIdle(), "The processor shouldn't be idle")
self.processorObj.endService()
self.assertFalse(self.processorObj.isIdle(), "The processor shouldn't be idle")
|
#!/usr/bin/env python
#Run this .py file in your PowerShell(windows) or Console (Linux), don't edit the code if you don't know what you're doing; it may (and most likely will) cause issues later on.
#SINGLE Call naar libwide.ini
######Importing dependencies
import platform
import time
import os
import subprocess
import base64
import sys
absdir = os.path.dirname(sys.argv[0])
fnm = "/libwide.ini"
fpt = str(absdir)+fnm
######FUNCTIONS
def wincheck():
os.startfile(fpt)
print "Close this program if you are happy with the settings in the configuration file."
print "Rerun this program if you aren't happy with the settings in the configuration file."
time.sleep(5)
exit()
def unixcheck():
prog = "leafpad"
subprocess.Popen([prog, fpt])
time.sleep(5)
y = raw_input("Press <Enter> to close")
exit()
######Setting Variables
configfile = open(fpt, "w+")
URL = raw_input("What is the Internet Address of this library's catalogue? \n")
server = raw_input("What is the IP address of the Pi-Lib-server?\n")
password = raw_input("Give the master-password for Pi-Lib-software, use only numbers and letters!!.\n")
password = password.strip()
#Cipher to hide the password from viewers.
#The goal is to obscure the password in stead of storing it as a plain-text string.
#Code attribution:(DATE:25/11/2016) I modified the code shared by Stackoverflow user smehmood in an anwser to Stackoverflow user RexE (See: http://stackoverflow.com/questions/2490334/simple-way-to-encode-a-string-according-to-a-password)
encoded_chars = []
for i in xrange(len(password)):
key = "freepilibsoftwareforeveryoneonthislovelyplanetofours"
key_c = key[i % len(key)]
encoded_c = chr(ord(password[i]) + ord(key_c) % 256)
encoded_chars.append(encoded_c)
encoded_string = "".join(encoded_chars)
password = base64.urlsafe_b64encode(encoded_string)
#End of code attribution
URL = URL.strip()
y = "http://"
if URL[0:4] != "http":
URL = y +URL
server = server.strip()
plf = platform.system()
######Writing to file
configfile.write("Do not modify this configuration file on your own, use the 'FIRST Libwide configuration.py' script to modify if needed.\nThis file is needed to store the Pi-Lib-Password, the URL of the catalogue and the IP of the Pi-Lib-server.")
configfile.write("\n")
configfile.write ("What follows are your variables:.\n\n")
#Variables are stored on lines 6, 10 and 14
configfile.write("""The Internet Address of this library's catalogue (Variable stored below this line): \n%s\n\n
The IP address of the Pi-Lib-server (Variable stored below this line): \n%s\n\n
The master-password for Pi-Lib-software. (Variable stored below this line): \n%s\n\n""" % (URL, server, password))
configfile.close()
#######Verification
print "Do you wish to verify the settings by opening the configuration file?"
answer = raw_input("Answer with Y or N: ")
answer = answer.lower()
if answer == "n":
print "The program will terminate, you can close this window."
time.sleep(5)
exit()
elif answer == "y":
if plf == "Windows":
wincheck()
elif plf == "Linux":
unixcheck()
else:
print "The Pi-Lib-software package wasn't designed to handle this OS, the program will terminate."
time.sleep(5)
exit()
|
from math import pi
from approxeng.chassis.simulation import Simulation, SimulationDrive
from approxeng.chassis.simulationplot import show_plot
from approxeng.chassis.util import get_regular_triangular_chassis
simulation = Simulation()
drive = SimulationDrive(simulation=simulation,
chassis=get_regular_triangular_chassis(wheel_distance=204,
wheel_radius=29.5,
max_rotations_per_second=500 / 60))
show_plot(simulation).start()
simulation.start()
drive.drive_at(x=70, y=150, speed=200, turn_speed=pi)
while 1:
pass
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from operator import add
from operator import mul
from functools import partial
def test():
add1 = partial(add, 1)
mul100 = partial(mul, 100)
print(add1(10))
print(mul100(2))
if __name__ == '__main__':
test()
|
import gym
import gym_grand_prix
import numpy as np
from math import pi
class Agent:
def __init__(self):
self.prev_sin = None
self.prev_cos = None
self.prev_steering = None
self.prev_values_valid = False
def optimal_action(self, sensor_info, p1=0.45, p2=1.0):
"""
Найти оптимальное управление action = (steering, acceleration)
как функцию данных датчиков
"""
collision = False
v = sensor_info[0]
sin = -sensor_info[1]
ladar = np.array(sensor_info[2:])
l = len(ladar)
n = int((l - 1) / 2) # индекс в массиве ladar для направления вперед
s = ladar[n] # расстояние до стенки прямо по курсу
# самое приоритетное действие - если близко по курсу стенка - тормозим!
if v > pow(p1 * s, 0.5):
action = (0, -.75)
# print("Экстренное ториожение!")
return action
# определим косинус угла, зная синус и историю руления
if self.prev_values_valid:
cos = pow(1 - sin * sin, 0.5) # косинус угла между вектором скорости и направлением на центр (но знак
# неизвестен)
if self.prev_steering == 0 and sin > self.prev_sin: # синус вырос - значит мы приближаемся к центру,
# косинус отрицательный
cos *= -1.
else:
if sin * self.prev_sin < 0 and abs(sin) > .1: # синус резко сменил знак - вероятно из-за отскока от
# стены.
if self.prev_cos is None:
cos = None
else:
cos *= -np.sign(self.prev_cos) # значит и косинус поменял знак.
collision = True
# print("Collision detected!")
else:
cos = None #
else:
# делаем шаг вперед чтобы понять ориентацию (знак косинуса)
action = (0, .75)
# print("sin=%.3f" % (sin))
# print("action: ", action)
self.prev_sin = sin
self.prev_steering = action[0]
self.prev_values_valid = True
return action
fi = np.linspace(-pi / 2, pi / 2, l) # углы между вектором скорости и направлением ладара
if cos is None:
dist = ladar * (sin * np.cos(fi))
else:
dist = ladar * (
sin * np.cos(fi) + cos * np.sin(fi)) # проекции расстояний ладара на "оптимальное" направление (
# перпендикулярное направлению на центр)
i = np.argmax(dist) # индекс максимального значения проекции
if i == n: # мы уже движемся в правильном направлении - тогда либо ускоряемся, либо тормозим
action = (0, .75) if v < pow(p1 * max(s - p2, 0), 0.5) else (0, -.75)
else: # есть более выгодное направление - рулим в эту сторону
action = (1, .75) if i > n else (-1, .75)
# print(dist, i)
# if cos is None:
# print("sin=%.3f cos Unknown" % sin)
# else:
# print("sin=%.3f cos=%.3f" % (sin, cos))
# print("action: ", action)
# input()
self.prev_sin = sin
self.prev_cos = cos
self.prev_steering = action[0]
self.prev_values_valid = True
return action
env = gym.make('GrandPrix-v0')
done = False
vision = env.reset()
a = Agent()
while not done:
action = a.optimal_action(vision)
vision, reward, done, _ = env.step(action)
env.render()
# print(f"New state ={vision}")
# print(f"reward={reward} , Done={done}")
env.close()
|
import string
class City:
def __init__(self, name):
self.name = string.capwords(name, sep = None)
def __str__(self):
return self.name
def __bool__(self):
if self.name[-1] in ['a','e','i','o','u']:
return False
return True
p1 = City('new york')
print(p1) # печатает "New York"
print(bool(p1)) # печатает "True"
p2 = City('SaN frANCISco')
print(p2) # печатает "San Francisco"
print(p2 == True) # печатает "False"
|
from flexget.components.pending_approval.db import PendingEntry
from flexget.manager import Session
class TestPendingApproval:
config = """
tasks:
test:
mock:
- {title: 'title 1', url: 'http://localhost/title1', other_attribute: 'bla'}
pending_approval: yes
"""
def test_pending_approval(self, execute_task, manager):
task = execute_task('test')
assert len(task.all_entries) == 1
assert len(task.rejected) == 1
assert len(task.accepted) == 0
# Mark entry as approved, this will be done by CLI/API
with Session() as session:
pnd_entry = session.query(PendingEntry).first()
pnd_entry.approved = True
task = execute_task('test')
assert len(task.all_entries) == 2
assert len(task.rejected) == 0
assert len(task.accepted) == 1
assert task.find_entry(other_attribute='bla')
task = execute_task('test')
assert len(task.all_entries) == 1
assert len(task.rejected) == 1
assert len(task.accepted) == 0
|
import numpy as np
import matplotlib.pyplot as plt
from uncertainties import correlated_values
from scipy.optimize import curve_fit
from uncertainties.unumpy import nominal_values as noms
from uncertainties.unumpy import std_devs as stds
from lmfit import Model
from scipy.special import wofz
from scipy.special import erf
from lmfit.models import ConstantModel
from lmfit.model import save_modelresult
from lmfit.model import load_modelresult
from uncertainties import ufloat
def voigt(x, x0, sigma, gamma):
"""
Voigt function as model for the peaks. Calculated numerically
with the complex errorfunction wofz
(https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.special.wofz.html)
"""
return np.real(wofz(((x - x0) + 1j*gamma) / sigma / np.sqrt(2)))
def poly(x, a):
"""
Constant function as model for the background.
"""
return [a for i in x]
class ramanspectrum(object):
def __init__(self, data_file, label, peakfile = None, baselinefile = None, fitfile = None):
self.x, self.y = np.genfromtxt(data_file, unpack = True)
self.maxyvalue = np.max(self.y)
self.y = self.y / self.maxyvalue #norm the intensity for faster fit
self.label = label
self.peakfile = peakfile
self.baselinefile = baselinefile
self.fitfile = fitfile
def PlotRawData(self, show = True, ax = None):
"""
Creates a plot of the raw data. show = True will show the plot, show = False will return a matplotlib object
"""
if (ax != None):
return ax.plot(self.x, self.y, 'kx', label = 'Messdaten', linewidth = 0.5)
if(show == True):
plt.plot(self.x, self.y, 'k-', label = 'Messdaten')
plt.show()
else:
return plt.plot(self.x, self.y, 'bx', label = 'Messdaten', linewidth = 0.5)
def SelectPeaks(self):
"""
Function opens a Window with the data, you can choose initial values for the peaks by clicking on the plot.
"""
fig, ax = plt.subplots()
ax.plot(self.x, self.y)
polyparams = self.Fitbaseline()
ax.plot(self.x, poly(self.x, *noms(polyparams)), 'r-')
x = []
y = []
def onclickpeaks(event):
if event.button:
x.append(event.xdata)
y.append(event.ydata)
plt.plot(event.xdata, event.ydata, 'ko')
fig.canvas.draw()
cid = fig.canvas.mpl_connect('button_press_event', onclickpeaks)
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
plt.show()
np.savetxt(self.label + '/locpeak_' + self.label + '.txt', np.transpose([np.array(x), np.array(y)])) # store the chosen initial values
self.peakfile = self.label + '/locpeak_' + self.label + '.txt'
def SelectBaseline(self):
"""
Function opens a window with the data, you can select the regions that do not belong to background signal by clicking.
"""
fig, ax = plt.subplots()
ax.plot(self.x, self.y)
ax.set_title('Baseline-Fit')
ax.set_ylim(bottom = 0)
x = []
def onclickbase(event):
if event.button:
x.append(event.xdata)
plt.vlines(x = event.xdata, color = 'r', linestyle = '--', ymin = 0, ymax = np.max(self.y))
if(len(x) % 2 == 0 & len(x) != 1):
barx0 = np.array([(x[-1] - x[-2])/2])
height = np.array([np.max(self.y)])
width = np.array([x[-1] - x[-2]])
plt.bar(x[-2], height = height, width = width, align = 'edge',facecolor="red", alpha=0.2, edgecolor="black",linewidth = 5, ecolor = 'black', bottom = 0)
fig.canvas.draw()
cid = fig.canvas.mpl_connect('button_press_event', onclickbase)
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
plt.show()
np.savetxt(self.label + '/baseline_' + self.label + '.txt', np.array(x))
self.baselinefile = self.label + '/baseline_'+ self.label + '.txt'
def SelectSpectrum(self):
"""
Select the interesting region in the spectrum.
"""
fig, ax = plt.subplots()
ax.plot(self.x, self.y)
ax.set_title('Select Spectrum')
ax.set_ylim(bottom = 0)
x = []
def onclickbase(event):
if event.button:
x.append(event.xdata)
plt.vlines(x = event.xdata, color = 'g', linestyle = '--', ymin = 0, ymax = np.max(self.y))
if(len(x) % 2 == 0 & len(x) != 1):
barx0 = np.array([(x[-1] - x[-2])/2])
height = np.array([np.max(self.y)])
width = np.array([x[-1] - x[-2]])
plt.bar(x[-2], height = height, width = width, align = 'edge',facecolor="green", alpha=0.2, edgecolor="black",linewidth = 5, ecolor = 'black', bottom = 0)
fig.canvas.draw()
cid = fig.canvas.mpl_connect('button_press_event', onclickbase)
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
plt.show()
self.y = self.y[(self.x > x[0]) & (self.x < x[-1])]
self.x = self.x[(self.x > x[0]) & (self.x < x[-1])]
np.savetxt(self.label + '/spectrumborders_' + self.label + '.txt', np.array(x))
def FitSpectrumInit(self, label):
"""
Fit the spectrum with the fit params of another spectrum (given by label) as initial values. Useful when you fit big number of similar spectra.
"""
borders = np.genfromtxt(label + '/spectrumborders_' + label + '.txt', unpack = True)
np.savetxt(self.label + '/spectrumborders_' + self.label + '.txt', borders)
self.y = self.y[(self.x > borders[0]) & (self.x < borders[-1])]
self.x = self.x[(self.x > borders[0]) & (self.x < borders[-1])]
FitData = np.load(label + '/fitparams_' + label + '.npz')
baseline = FitData['c'] / self.maxyvalue
ctr = FitData['x0']
sigma = FitData['sigma']
gamma = FitData['gamma']
ramanmodel = ConstantModel()
ramanmodel.set_param_hint('c', value = baseline[0], min = 0)
for i in range(len(sigma)):
prefix = 'p' + str(i + 1)
tempvoigt = Model(func = voigt, prefix = prefix)
tempvoigt.set_param_hint(prefix + 'x0', value = ctr[i], min = 0)
tempvoigt.set_param_hint(prefix + 'sigma', value = sigma[i], min = 0)
tempvoigt.set_param_hint(prefix + 'gamma', value = gamma[i], min = 0)
tempvoigt.set_param_hint(prefix + 'height', expr = 'wofz(((0) + 1j*'+ prefix + 'gamma) / '+ prefix + 'sigma / sqrt(2)).real')
tempvoigt.set_param_hint(prefix + 'fwhm', expr = '0.5346 * 2 *' + prefix + 'gamma + sqrt(0.2166 * (2*' + prefix + 'gamma)**2 + (2 * ' + prefix + 'sigma * sqrt(2 * log(2) ) )**2 )')
ramanmodel += tempvoigt
pars = ramanmodel.make_params()
fitresult = ramanmodel.fit(self.y, pars, x = self.x, scale_covar = True)
plt.clf()
comps = fitresult.eval_components()
xplot = np.linspace(self.x[0], self.x[-1], 1000)
plt.plot(self.x, self.y* self.maxyvalue, 'r-')
plt.plot(self.x, fitresult.best_fit* self.maxyvalue)
for i in range(0, len(sigma)):
plt.plot(self.x, comps['p' + str(i+1)]* self.maxyvalue + comps['constant']* self.maxyvalue, 'k-')
plt.savefig(self.label + '/rawplot_' + self.label + '.pdf')
save_modelresult(fitresult, self.label + '/modelresult_' + self.label + '.sav')
plt.clf()
def FitSpectrum(self):
"""
Fit Spectrum with initial values provided by SelectBaseline() and SelectPeaks()
"""
polyparams = self.Fitbaseline(self)
base = polyparams[0].n
ramanmodel = ConstantModel()
ramanmodel.set_param_hint('c', value = base, min = 0)
globwidth = 1
xpeak, ypeak = np.genfromtxt(self.peakfile, unpack = True)
if type(xpeak) == np.float64:
xpeak = [xpeak]
ypeak = [ypeak]
for i in range(0, len(xpeak)):
prefix = 'p' + str(i + 1)
tempvoigt = Model(func = voigt, prefix = prefix)
tempvoigt.set_param_hint(prefix + 'x0', value = xpeak[i], min = 0)
tempvoigt.set_param_hint(prefix + 'sigma', value = globwidth, min = 0)
tempvoigt.set_param_hint(prefix + 'gamma', value = globwidth, min = 0)
tempvoigt.set_param_hint(prefix + 'height',value = ypeak[i], expr = 'wofz(((0) + 1j*'+ prefix + 'gamma) / '+ prefix + 'sigma / sqrt(2)).real')
tempvoigt.set_param_hint(prefix + 'fwhm', expr = '0.5346 * 2 *' + prefix + 'gamma + sqrt(0.2166 * (2*' + prefix + 'gamma)**2 + (2 * ' + prefix + 'sigma * sqrt(2 * log(2) ) )**2 )')
ramanmodel += tempvoigt
pars = ramanmodel.make_params()
fitresult = ramanmodel.fit(self.y, pars, x = self.x, scale_covar = True)
print(fitresult.fit_report(min_correl=0.5))
comps = fitresult.eval_components()
xplot = np.linspace(self.x[0], self.x[-1], 1000)
plt.plot(self.x, self.y* self.maxyvalue, 'rx')
plt.plot(self.x, fitresult.best_fit* self.maxyvalue)
for i in range(0, len(xpeak)):
plt.plot(self.x, comps['p' + str(i+1)]* self.maxyvalue + comps['constant']* self.maxyvalue, 'k-')
plt.show()
plt.savefig(self.label + '/rawplot_' + self.label + '.pdf')
save_modelresult(fitresult, self.label + '/modelresult_' + self.label + '.sav')
def SaveFitParams(self):
"""
Save the Results of the fit in a .zip file using numpy.savez().
"""
fitresult = load_modelresult(self.label + '/modelresult_' + self.label + '.sav')
fitparams = fitresult.params
c, stdc, x0, stdx0, height, stdheight, sigma, stdsigma, gamma, stdgamma, fwhm, stdfwhm = ([] for i in range(12))
for name in list(fitparams.keys()):
par = fitparams[name]
param = ufloat(par.value, par.stderr)
if ('c' in name):
param = param * self.maxyvalue
c.append(param.n)
stdc.append(param.s)
elif ('height' in name):
param = param * self.maxyvalue
height.append(param.n)
stdheight.append(param.s)
elif ('x0' in name):
x0.append(param.n)
stdx0.append(param.s)
elif ('sigma' in name):
sigma.append(param.n)
stdsigma.append(param.s)
elif ('gamma' in name):
gamma.append(param.n)
stdgamma.append(param.s)
elif ('fwhm' in name):
fwhm.append(param.n)
stdfwhm.append(param.s)
np.savez(self.label + '/fitparams_' + self.label , x0 = x0, stdx0 = stdx0, c = c, stdc = c, height = height, stdheight = stdheight, sigma = sigma, stdsigma = stdsigma, gamma = gamma, stdgamma = stdgamma, fwhm = fwhm, stdfwhm = stdfwhm)
def Fitbaseline(self, show = False):
bed = np.genfromtxt(self.baselinefile, unpack = True) # load the data from SelectBaseline
#generate mask for baseline fit
bgndx = (self.x <= bed[0])
for i in range(1, len(bed) - 2, 2):
bgndx = bgndx | ((self.x >= bed[i]) & (self.x <= bed[i + 1]))
bgndx = bgndx | (self.x >= bed[-1])
#FIT Baseline
polyparams, cov = curve_fit(poly, self.x[bgndx], self.y[bgndx])
if (show == True):
self.PlotRawData(False)
xplot = np.linspace(self.x[0], self.x[-1], 100)
plt.plot(xplot, poly(xplot, *polyparams), 'r-')
plt.show()
self.base = polyparams[0]
return correlated_values(polyparams, cov)
|
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import time
import re
class HobbyStats(object):
"""
书影音数据
"""
def __init__(self, hobby, do=0, wish=0, collect=0):
"""
初始化数据
:param hobby: 喜好的类型:book, movie, music
:param do: 对应正在的数量
:param wish: 对应想要的数量
:param collect: 对应做完的数量
:type hobby: str
:type do: int
:type wish: int
:type collect: int
"""
super(HobbyStats, self).__init__()
self.hobby = hobby
self.do = do
self.wish = wish
self.collect = collect
def print_stats(self):
"""
打印喜好信息
"""
print('喜好: {}\t do: {}\t wish: {}\t collect: {}'.format(self.hobby, self.do, self.wish, self.collect))
class DoubanUser(object):
"""
豆瓣用户
"""
def __init__(self, usr_id):
"""
初始化普通豆瓣用户
:param usr_id: 豆瓣ID
:type usr_id: str
"""
super(DoubanUser, self).__init__()
self.usr_id = usr_id
self.register_date = time.strptime(time.strftime("%Y-%m-%d", time.localtime()), "%Y-%m-%d")
self.com_likes = 0
self.group_num = 0
self.contacts_num = 0
self.rev_contacts_num = 0
self.stats_book = HobbyStats('book')
self.stats_movie = HobbyStats('movie')
self.stats_music = HobbyStats('music')
self.usr_page_buffer = ''
def set_register_date(self, register_date=time.strptime(time.strftime("%Y-%m-%d", time.localtime()), "%Y-%m-%d")):
"""
设置注册日期
:param register_date: 注册日期
:type register_date: time.struct_time
"""
self.register_date = register_date
def set_common_likes(self, com_likes=0):
"""
设置共同喜好数目
:param com_likes: 共同喜好数目
:type com_likes: int
"""
self.com_likes = com_likes
def set_group_num(self, group_num=0):
"""
设置关注的小组数目
:param group_num: 关注的小组数目
:type group_num: int
"""
self.group_num = group_num
def set_contacts_num(self, contacts_num=0):
"""
设置关注的用户数目
:param contacts_num: 关注的用户数目
:type contacts_num: int
"""
self.contacts_num = contacts_num
def set_rev_contacts_num(self, rev_contacts_num=0):
"""
设置被关注用户的数目
:param rev_contacts_num: 被关注用户的数目
:type rev_contacts_num: int
"""
self.rev_contacts_num = rev_contacts_num
def set_stats_book(self, stats):
"""
设置书数据
:param stats: 书数据
:type stats: HobbyStats
"""
self.stats_book = stats
def set_stats_movie(self, stats):
"""
设置影数据
:param stats: 影数据
:type stats: HobbyStats
"""
self.stats_movie = stats
def set_stats_music(self, stats):
"""
设置音数据
:param stats: 音数据
:type stats: HobbyStats
"""
self.stats_music = stats
def set_cur_member_page(self, s):
"""
获取当前成员的首页内容
:param s: 一次连接会话
:type s: requests.sessions.Session
"""
usr_page_url = 'https://www.douban.com/people/{}'.format(self.usr_id)
usr_page = s.get(usr_page_url)
usr_page_buffer = usr_page.text
self.usr_page_buffer = usr_page_buffer
def get_register_date(self):
"""
从当前成员的首页获得其加入的时间
:return: 注册日期
:rtype: time.struct_time
"""
usr_page_buffer = self.usr_page_buffer
register_date_str = re.search(r'(\d{4}-\d{1,2}-\d{1,2})加入', usr_page_buffer)
if register_date_str and register_date_str is not '':
return time.strptime(register_date_str.group(1), "%Y-%m-%d")
return time.strptime(time.strftime("%Y-%m-%d", time.localtime()), "%Y-%m-%d")
def get_common_likes(self):
"""
从当前成员的首页获得与对方的共同喜好数目
:return: 共同喜好数目
:rtype: int
"""
usr_page_buffer = self.usr_page_buffer
common_like_str = re.search(r'共同的喜好\((\d*)\)', usr_page_buffer)
if common_like_str:
return int(common_like_str.group(1))
else:
soup = BeautifulSoup(usr_page_buffer, features="lxml")
win = soup.find('ul', {"id": "win"})
if win:
li = win.findAll('li', {"class": "aob"})
if li:
return len(li)
return 0
def get_group_num(self):
"""
从当前成员的首页获得其关注的小组数
:return: 关注的小组数目
:rtype: int
"""
usr_page_buffer = self.usr_page_buffer
group_num_str = re.search(r'常去的小组\((\d*)\)', usr_page_buffer)
if group_num_str and group_num_str.group(1) is not '':
return int(group_num_str.group(1))
return 0
def get_contacts_num(self):
"""
从当前成员的首页获得其关注的用户数
:return: 关注的用户数目
:rtype: int
"""
usr_page_buffer = self.usr_page_buffer
contacts_num_str = re.search(r'成员(\d*)', usr_page_buffer)
if contacts_num_str and contacts_num_str.group(1) is not '':
return int(contacts_num_str.group(1))
return 0
def get_rev_contacts_num(self):
"""
从当前成员的首页获得关注其的用户数
:return: 被关注用户的数目
:rtype: int
"""
usr_page_buffer = self.usr_page_buffer
rev_contacts_num_str = re.search(r'被(\d*)人关注', usr_page_buffer)
if rev_contacts_num_str and rev_contacts_num_str.group(1) is not '':
return int(rev_contacts_num_str.group(1))
return 0
def get_stats_book(self):
"""
从当前成员的首页获得书的数据
:return: 书数据
:rtype: HobbyStats
"""
usr_page_buffer = self.usr_page_buffer
soup = BeautifulSoup(usr_page_buffer, features="lxml")
div_book = soup.find('div', {'id': 'book'})
do, wish, collect = 0, 0, 0
if div_book:
span_pl = div_book.find('span', {'class': 'pl'})
if span_pl:
do_str = re.search(r'(\d*)本在读', span_pl.text)
if do_str and do_str.group(1) is not '':
do = int(do_str.group(1))
wish_str = re.search(r'(\d*)本想读', span_pl.text)
if wish_str and wish_str.group(1) is not '':
wish = int(wish_str.group(1))
collect_str = re.search(r'(\d*)本读过', span_pl.text)
if collect_str and collect_str.group(1) is not '':
collect = int(collect_str.group(1))
return HobbyStats('book', do, wish, collect)
def get_stats_movie(self):
"""
从当前成员的首页获得影的数据
:return: 影数据
:rtype: HobbyStats
"""
usr_page_buffer = self.usr_page_buffer
soup = BeautifulSoup(usr_page_buffer, features="lxml")
div_book = soup.find('div', {'id': 'movie'})
do, wish, collect = 0, 0, 0
if div_book:
span_pl = div_book.find('span', {'class': 'pl'})
if span_pl:
do_str = re.search(r'(\d*)部在看', span_pl.text)
if do_str and do_str.group(1) is not '':
do = int(do_str.group(1))
wish_str = re.search(r'(\d*)部想看', span_pl.text)
if wish_str and wish_str.group(1) is not '':
wish = int(wish_str.group(1))
collect_str = re.search(r'(\d*)部看过', span_pl.text)
if collect_str and collect_str.group(1) is not '':
collect = int(collect_str.group(1))
return HobbyStats('movie', do, wish, collect)
def get_stats_music(self):
"""
从当前成员的首页获得音的数据
:return: 音数据
:rtype: HobbyStats
"""
usr_page_buffer = self.usr_page_buffer
soup = BeautifulSoup(usr_page_buffer, features="lxml")
div_book = soup.find('div', {'id': 'music'})
do, wish, collect = 0, 0, 0
if div_book:
span_pl = div_book.find('span', {'class': 'pl'})
if span_pl:
do_str = re.search(r'(\d*)张在听', span_pl.text)
if do_str and do_str.group(1) is not '':
do = int(do_str.group(1))
wish_str = re.search(r'(\d*)张想听', span_pl.text)
if wish_str and wish_str.group(1) is not '':
wish = int(wish_str.group(1))
collect_str = re.search(r'(\d*)张听过', span_pl.text)
if collect_str and collect_str.group(1) is not '':
collect = int(collect_str.group(1))
return HobbyStats('music', do, wish, collect)
class GroupMember(DoubanUser):
"""
小组成员
"""
def __init__(self, usr_id, usr_name, usr_addr, url_icon):
"""
初始化基本信息
:param usr_id: 豆瓣ID
:param usr_name: 用户名
:param usr_addr: 常居地地址
:param url_icon: 头像url地址
:type usr_id: str
:type usr_name: str
:type usr_addr: str
:type url_icon: str
"""
super(GroupMember, self).__init__(usr_id)
self.usr_id = usr_id
self.usr_name = usr_name
self.usr_addr = usr_addr
self.url_icon = url_icon
def update_infos(self, data):
"""
用于查询数据库时更新其它信息,方便打印
:param data: 数据库查询得到的其它信息
:type data: Tuple
"""
# 注册日期
if data[0] is None:
self.register_date = time.strptime(time.strftime("%Y-%m-%d", time.localtime()), "%Y-%m-%d")
else:
self.register_date = time.strptime(data[0], "%Y-%m-%d")
# 其它数据
stats_num = lambda num: 0 if num is None else num
self.com_likes = stats_num(data[1])
self.group_num = stats_num(data[2])
self.contacts_num = stats_num(data[3])
self.rev_contacts_num = stats_num(data[4])
# 书影音数据
self.stats_book = HobbyStats('book', data[5], data[6], data[7])
self.stats_movie = HobbyStats('movie', data[8], data[9], data[10])
self.stats_music = HobbyStats('music', data[11], data[12], data[13])
def print_basic_infos(self):
"""
打印从小组成员页面可以获得的基本信息:昵称,常居地,用户ID(主页地址),头像
"""
usr_addr = (lambda addr: 'None' if addr is None else addr)(self.usr_addr)
print('----------------------------------------------------------------------------------')
print('主页: https://www.douban.com/people/{}'.format(self.usr_id))
print('昵称: {:30s}\t 常居地: {:6s}'.format(self.usr_name, usr_addr))
print('头像: {}'.format(self.url_icon))
print('----------------------------------------------------------------------------------')
def print_infos(self):
"""
打印基本信息与通过访问用户的个人主页获得的剩余信息
"""
self.print_basic_infos()
print("加入: ", time.strftime("%Y-%m-%d", self.register_date))
print('共同喜好数: {}\t 关注小组数: {}\t 关注用户数: {}\t 被关注用户数: {}'.format(
self.com_likes, self.group_num, self.contacts_num, self.rev_contacts_num))
self.stats_book.print_stats()
self.stats_movie.print_stats()
self.stats_music.print_stats()
print('----------------------------------------------------------------------------------')
class ContactsMember(DoubanUser):
"""
我的关注列表用户
"""
def __init__(self, usr_id, usr_name, usr_addr, url_icon, usr_sign, usr_rs):
"""
初始化基本信息
:param usr_id: 豆瓣ID
:param usr_name: 用户名
:param usr_addr: 常居地地址
:param url_icon: 头像url地址
:param usr_sign: 签名
:param usr_rs: 所属标签组
:type usr_id: str
:type usr_name: str
:type usr_addr: str
:type url_icon: str
:type usr_sign: str
:type usr_rs: str
"""
super(ContactsMember, self).__init__(usr_id)
self.usr_id = usr_id
self.usr_name = usr_name
self.usr_addr = usr_addr
self.url_icon = url_icon
self.usr_sign = usr_sign
self.usr_rs = usr_rs
def print_basic_infos(self):
"""
打印从关注列表页面可以获得的基本信息:昵称,常居地,用户ID(主页地址),头像,签名,所属标签组
"""
usr_addr = (lambda addr: 'None' if addr is None else addr)(self.usr_addr)
print('-----------------------------------------------------------------------------------------')
print('主页: https://www.douban.com/people/{}'.format(self.usr_id))
print('昵称: {:30s}\t 所属标签组: {}\t 常居地: {:6s}'.format(self.usr_name, self.usr_rs, usr_addr))
print('签名: {}'.format(self.usr_sign))
print('头像: {}'.format(self.url_icon))
print('-----------------------------------------------------------------------------------------')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.