content stringlengths 5 1.05M |
|---|
import time
import random
import statsd
counter_name = 'lmm.test'
wait_s = 1
for i in range(10):
c = statsd.StatsClient('127.0.0.1', 8125)
random_count = random.randrange(1, 100)
print("Count=(%d)" % (random_count))
c.gauge(counter_name, random_count)
t = c.timer(counter_name)
t.start()
while random_count > 0:
c.incr(counter_name, 1)
random_count -= 1
t.stop()
time.sleep(wait_s)
|
# modulo aleatorio
from random import choice
from datetime import date
# cores
cores = {'limpa': '\033[m',
'titulo': '\033[1;4;35m',
'preto': '\033[1;30m',
'vermelho': '\01933[1;31m',
'verde': '\033[1;32m',
'amarelo': '\033[1;33m',
'azul': '\033[1;34m',
'roxo': '\033[1;35m',
'ciano': '\033[1;36m',
'cinza': '\033[1;37m'}
# cores aleatorias
cor = ['preto', 'vermelho', 'verde', 'amarelo', 'azul', 'roxo', 'ciano', 'cinza']
# numero de elementos do titulo
t = len('Pesquisa')+35
# titulo
print('{}{:^{}}{}'.format(cores['titulo'], 'Pesquisa', t, cores['limpa']))
anoatual = date.today().year
maioridade = 0
menoridade = 0
for c in range(1,8):
anonasc = int(input('Em que ano o {}º estrevistado nasceu?: '.format(c)))
idade = anoatual - anonasc
if idade >= 21:
maioridade += 1
else:
menoridade += 1
print('{} entrevistados são {}Maiores de Idade{} \n{} entrevistados são {}Menores de Idade{}'
.format(maioridade, cores['azul'], cores['limpa'], menoridade, cores['vermelho'], cores['limpa']))
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules for generating FHE-C++."""
_FHE_TRANSPILER = "//transpiler"
_STRUCT_HEADER_GENERATOR = "//transpiler/struct_transpiler:struct_transpiler"
_XLSCC = "@com_google_xls//xls/contrib/xlscc:xlscc"
_XLS_BOOLEANIFY = "@com_google_xls//xls/tools:booleanify_main"
_XLS_OPT = "@com_google_xls//xls/tools:opt_main"
_GET_TOP_FUNC_FROM_PROTO = "@com_google_xls//xls/contrib/xlscc:get_top_func_from_proto"
_XLS_CODEGEN = "@com_google_xls//xls/tools:codegen_main"
_YOSYS = "@yosys//:yosys_bin"
_ABC = "@abc//:abc_bin"
_TFHE_CELLS_LIBERTY = "//transpiler:tfhe_cells.liberty"
_PALISADE_CELLS_LIBERTY = "//transpiler:palisade_cells.liberty"
def _run(ctx, inputs, out_ext, tool, args, entry = None):
"""A helper to run a shell script and capture the output.
ctx: The blaze context.
inputs: A list of files used by the shell.
out_ext: An extension to add to the current label for the output file.
tool: What tool to run.
args: A list of arguments to pass to the tool.
entry: If specified, it points to a file contianing the entry point; that
information is extracted and provided as value to the --top
command-line switch.
Returns:
The File output.
"""
library_name = ctx.attr.library_name or ctx.label.name
out = ctx.actions.declare_file("%s%s" % (library_name, out_ext))
arguments = " ".join(args)
if entry != None:
arguments += " --top $(cat {})".format(entry.path)
ctx.actions.run_shell(
inputs = inputs,
outputs = [out],
tools = [tool],
command = "%s %s > %s" % (tool.path, arguments, out.path),
)
return out
def _get_top_func(ctx, metadata_file):
"""Extract the name of the entry function from the metadata file."""
return _run(
ctx,
[metadata_file],
".entry",
ctx.executable._get_top_func_from_proto,
[metadata_file.path],
)
def _build_ir(ctx):
"""Build the XLS IR from a C++ source.
Args:
ctx: The Blaze context.
Returns:
A File containing the generated IR and one containing metadata about
the translated function (signature, etc.).
"""
library_name = ctx.attr.library_name or ctx.label.name
ir_file = ctx.actions.declare_file("%s.ir" % library_name)
metadata_file = ctx.actions.declare_file("%s_meta.proto" % library_name)
ctx.actions.run_shell(
inputs = [ctx.file.src] + ctx.files.hdrs,
outputs = [ir_file, metadata_file],
tools = [ctx.executable._xlscc],
command = "%s %s -meta_out %s > %s" % (
ctx.executable._xlscc.path,
ctx.file.src.path,
metadata_file.path,
ir_file.path,
),
)
return (ir_file, metadata_file, _get_top_func(ctx, metadata_file))
def _optimize_ir(ctx, src, extension, entry, options = []):
"""Optimize XLS IR."""
return _run(ctx, [src, entry], extension, ctx.executable._xls_opt, [src.path] + options, entry)
def _booleanify_ir(ctx, src, extension, entry):
"""Optimize XLS IR."""
return _run(ctx, [src, entry], extension, ctx.executable._xls_booleanify, ["--ir_path", src.path], entry)
def _optimize_and_booleanify_repeatedly(ctx, ir_file, entry):
"""Runs several passes of optimization followed by booleanification.
Returns [%.opt.ir, %.opt.bool.ir, %.opt.bool.opt.ir, %.opt.bool.opt.bool.ir, ...]
"""
results = [ir_file]
suffix = ""
# With zero optimization passes, we still want to run the optimizer with an
# inlining pass, as the booleanifier expects a single function.
if ctx.attr.num_opt_passes == 0:
suffix += ".opt"
results.append(_optimize_ir(ctx, results[-1], suffix + ".ir", entry, ["--run_only_passes=inlining"]))
suffix += ".bool"
results.append(_booleanify_ir(ctx, results[-1], suffix + ".ir", entry))
else:
for _ in range(ctx.attr.num_opt_passes):
suffix += ".opt"
results.append(_optimize_ir(ctx, results[-1], suffix + ".ir", entry))
suffix += ".bool"
results.append(_booleanify_ir(ctx, results[-1], suffix + ".ir", entry))
return results[1:]
def _pick_last_bool_file(optimized_files):
""" Pick the last booleanifed IR file from a list of file produced by _optimize_and_booleanify_repeatedly().
The last %.*.bool.ir file may or may not be the smallest one. For some IR
inputs, an additional optimization/booleanification pass results in a
larger file. This is why we have num_opt_passes.
"""
# structure is [%.opt.ir, %.opt.bool.ir, %.opt.bool.opt.ir,
# %.opt.bool.opt.bool.ir, ...], so every other file is the result of an
# optimization + booleanification pass.
return optimized_files[-1]
def _fhe_transpile_ir(ctx, src, metadata, optimizer, encryption, interpreter):
"""Transpile XLS IR into C++ source."""
library_name = ctx.attr.library_name or ctx.label.name
out_cc = ctx.actions.declare_file("%s.cc" % library_name)
out_h = ctx.actions.declare_file("%s.h" % library_name)
args = [
"-ir_path",
src.path,
"-metadata_path",
metadata.path,
"-cc_path",
out_cc.path,
"-header_path",
out_h.path,
"-optimizer",
optimizer,
"-encryption",
encryption,
]
if interpreter:
args.append("-interpreter")
if optimizer == "yosys":
args += ["-liberty_path", ctx.file.cell_library.path]
ctx.actions.run(
inputs = [src, metadata, ctx.file.cell_library],
outputs = [out_cc, out_h],
executable = ctx.executable._fhe_transpiler,
arguments = args,
)
return [out_cc, out_h]
def _generate_struct_header(ctx, metadata, encryption):
"""Transpile XLS IR into C++ source."""
library_name = ctx.attr.library_name or ctx.label.name
generic_struct_h = ctx.actions.declare_file("%s.generic.types.h" % library_name)
specific_struct_h = ctx.actions.declare_file("%s.types.h" % library_name)
args = [
"-metadata_path",
metadata.path,
"-original_headers",
",".join([hdr.path for hdr in ctx.files.hdrs]),
"-output_path",
generic_struct_h.path,
]
ctx.actions.run(
inputs = [metadata],
outputs = [generic_struct_h],
executable = ctx.executable._struct_header_generator,
arguments = args,
)
args = [
"-metadata_path",
metadata.path,
"-output_path",
specific_struct_h.path,
"-generic_header_path",
generic_struct_h.path,
"-backend_type",
encryption,
]
ctx.actions.run(
inputs = [metadata, generic_struct_h],
outputs = [specific_struct_h],
executable = ctx.executable._struct_header_generator,
arguments = args,
)
return [generic_struct_h, specific_struct_h]
def _generate_verilog(ctx, src, extension, entry):
"""Convert optimized XLS IR to Verilog."""
return _run(
ctx,
[src, entry],
extension,
ctx.executable._xls_codegen,
[
src.path,
"--delay_model=unit",
"--clock_period_ps=1000",
"--generator=combinational",
"--use_system_verilog=false", # edit the YS script if this changes
],
entry,
)
def _generate_yosys_script(ctx, verilog, netlist_path, entry, cell_library):
library_name = ctx.attr.library_name or ctx.label.name
ys_script = ctx.actions.declare_file("%s.ys" % library_name)
sh_cmd = """cat>{script}<<EOF
# read_verilog -sv {verilog} # if we want to use SV
read_verilog {verilog}
hierarchy -check -top $(cat {entry})
proc; opt;
flatten; opt;
fsm; opt;
memory; opt
techmap; opt
dfflibmap -liberty {cell_library}
abc -liberty {cell_library}
opt_clean -purge
clean
write_verilog {netlist_path}
EOF
""".format(
script = ys_script.path,
verilog = verilog.path,
entry = entry.path,
cell_library = cell_library.path,
netlist_path = netlist_path,
)
ctx.actions.run_shell(
inputs = [entry],
outputs = [ys_script],
command = sh_cmd,
)
return ys_script
def _generate_netlist(ctx, verilog, entry):
library_name = ctx.attr.library_name or ctx.label.name
netlist = ctx.actions.declare_file("%s.netlist.v" % library_name)
script = _generate_yosys_script(ctx, verilog, netlist.path, entry, ctx.file.cell_library)
yosys_runfiles_dir = ctx.executable._yosys.path + ".runfiles"
args = ctx.actions.args()
args.add("-q") # quiet mode only errors printed to stderr
args.add("-q") # second q don't print warnings
args.add("-Q") # Don't print header
args.add("-T") # Don't print footer
args.add_all("-s", [script.path]) # command execution
ctx.actions.run(
inputs = [verilog, script],
outputs = [netlist],
arguments = [args],
executable = ctx.executable._yosys,
tools = [ctx.file.cell_library, ctx.executable._abc],
env = {
"YOSYS_DATDIR": yosys_runfiles_dir + "/yosys/share/yosys",
},
)
return (netlist, script)
def _fhe_transpile_impl(ctx):
ir_file, metadata_file, metadata_entry_file = _build_ir(ctx)
optimizer = ctx.attr.optimizer
encryption = ctx.attr.encryption
interpreter = ctx.attr.interpreter
outputs = []
optimized_files = []
netlist_file = None
if optimizer == "yosys":
optimized_ir_file = _optimize_ir(ctx, ir_file, ".opt.ir", metadata_entry_file)
optimized_files.append(optimized_ir_file)
verilog_ir_file = _generate_verilog(ctx, optimized_ir_file, ".v", metadata_entry_file)
netlist_file, yosys_script_file = _generate_netlist(ctx, verilog_ir_file, metadata_entry_file)
outputs.extend([verilog_ir_file, netlist_file, yosys_script_file])
ir_input = netlist_file
else:
optimized_files = _optimize_and_booleanify_repeatedly(ctx, ir_file, metadata_entry_file)
ir_input = _pick_last_bool_file(optimized_files)
hdrs = _generate_struct_header(ctx, metadata_file, encryption)
out_cc, out_h = _fhe_transpile_ir(
ctx,
ir_input,
metadata_file,
optimizer,
encryption,
interpreter,
)
hdrs.append(out_h)
outputs = [
ir_file,
metadata_file,
metadata_entry_file,
out_cc,
] + optimized_files + hdrs + outputs
return [
DefaultInfo(files = depset(outputs)),
OutputGroupInfo(
sources = depset([out_cc]),
headers = depset(hdrs),
),
]
def _executable_attr(label):
"""A helper for declaring internal executable dependencies."""
return attr.label(
default = Label(label),
allow_single_file = True,
executable = True,
cfg = "exec",
)
fhe_transpile = rule(
doc = """
This rule produces transpiled C++ code that can be included in other
libraries and binaries.
""",
implementation = _fhe_transpile_impl,
attrs = {
"src": attr.label(
doc = "A single C++ source file to transpile.",
allow_single_file = [".cc"],
),
"hdrs": attr.label_list(
doc = "Any headers necessary for conversion to XLS IR.",
allow_files = [".h"],
),
"library_name": attr.string(
doc = """
The name used for the output files (<library_name>.cc and <library_name>.h);
defaults to the name of this target.
""",
),
"num_opt_passes": attr.int(
doc = """
The number of optimization passes to run on XLS IR (default 1).
Values <= 0 will skip optimization altogether.
(Only affects the XLS optimizer.)
""",
default = 1,
),
"encryption": attr.string(
doc = """
FHE encryption scheme used by the resulting program. Choices are
{tfhe, palisade, cleartext}. 'cleartext' means the program runs in cleartext,
skipping encryption; this has zero security, but is useful for debugging.
""",
values = [
"tfhe",
"palisade",
"cleartext",
],
default = "tfhe",
),
"optimizer": attr.string(
doc = """
Optimizing/lowering pipeline to use in transpilation. Choices are {xls, yosys}.
'xls' uses the built-in XLS tools to transform the program into an optimized
Boolean circuit; 'yosys' uses Yosys to synthesize a circuit that targets the
given backend.
""",
values = [
"xls",
"yosys",
],
default = "xls",
),
"interpreter": attr.bool(
doc = """
Controls whether the resulting program executes directly (single-threaded C++),
or invokes a multi-threaded interpreter.
""",
default = False,
),
"cell_library": attr.label(
doc = "A single cell-definition library in Liberty format.",
allow_single_file = [".liberty"],
),
"_xlscc": _executable_attr(_XLSCC),
"_xls_booleanify": _executable_attr(_XLS_BOOLEANIFY),
"_xls_opt": _executable_attr(_XLS_OPT),
"_fhe_transpiler": _executable_attr(_FHE_TRANSPILER),
"_struct_header_generator": _executable_attr(_STRUCT_HEADER_GENERATOR),
"_get_top_func_from_proto": attr.label(
default = Label(_GET_TOP_FUNC_FROM_PROTO),
executable = True,
cfg = "exec",
),
"_yosys": _executable_attr(_YOSYS),
"_abc": _executable_attr(_ABC),
"_xls_codegen": _executable_attr(_XLS_CODEGEN),
},
)
def fhe_cc_library(
name,
src,
hdrs,
copts = [],
num_opt_passes = 1,
encryption = "tfhe",
optimizer = "xls",
interpreter = False,
**kwargs):
"""A rule for building FHE-based cc_libraries.
Example usage:
fhe_cc_library(
name = "secret_computation",
src = "secret_computation.cc",
hdrs = ["secret_computation.h"],
num_opt_passes = 2,
encryption = "cleartext",
optimizer = "xls",
)
cc_binary(
name = "secret_computation_consumer",
srcs = ["main.cc"],
deps = [":secret_computation"],
)
To generate just the transpiled sources, you can build the "<TARGET>.transpiled_files"
subtarget; in the above example, you could run:
blaze build :secret_computation.transpiled_files
Args:
name: The name of the cc_library target.
src: The transpiler-compatible C++ file that are processed to create the library.
hdrs: The list of header files required while transpiling the `src`.
copts: The list of options to the C++ compilation command.
num_opt_passes: The number of optimization passes to run on XLS IR (default 1).
Values <= 0 will skip optimization altogether.
(Only affects the XLS optimizer.)
encryption: Defaults to "tfhe"; FHE encryption scheme used by the resulting program.
Choices are {tfhe, palisade, cleartext}. 'cleartext' means the program runs in
cleartext, skipping encryption; this has zero security, but is useful for
debugging.
optimizer: Defaults to "xls"; optimizing/lowering pipeline to use in transpilation.
Choices are {xls, yosys}. 'xls' uses the built-in XLS tools to transform the
program into an optimized Boolean circuit; 'yosys' uses Yosys to synthesize
a circuit that targets the given backend. (In most cases, Yosys's optimizer
is more powerful.)
interpreter: Defaults to False; controls whether the resulting program executes
directly (single-threaded C++), or invokes a multi-threaded interpreter.
**kwargs: Keyword arguments to pass through to the cc_library target.
"""
tags = kwargs.pop("tags", None)
transpiled_files = "{}.transpiled_files".format(name)
cell_library = _TFHE_CELLS_LIBERTY
if encryption == "palisade":
cell_library = _PALISADE_CELLS_LIBERTY
fhe_transpile(
name = transpiled_files,
src = src,
hdrs = hdrs,
library_name = name,
num_opt_passes = num_opt_passes,
encryption = encryption,
optimizer = optimizer,
interpreter = interpreter,
tags = tags,
cell_library = cell_library,
)
transpiled_source = "{}.srcs".format(name)
native.filegroup(
name = transpiled_source,
srcs = [":" + transpiled_files],
output_group = "sources",
tags = tags,
)
transpiled_headers = "{}.hdrs".format(name)
native.filegroup(
name = transpiled_headers,
srcs = [":" + transpiled_files],
output_group = "headers",
tags = tags,
)
deps = [
"@com_google_absl//absl/status",
"@com_google_absl//absl/types:span",
"//transpiler:common_runner",
]
if optimizer == "yosys":
if not interpreter:
fail("The Yosys pipeline only implements interpreter execution.")
if encryption == "cleartext":
deps.extend([
"@com_google_absl//absl/status:statusor",
"//transpiler:yosys_cleartext_runner",
"//transpiler/data:boolean_data",
"@com_google_xls//xls/common/status:status_macros",
])
elif encryption == "tfhe":
deps.extend([
"@com_google_absl//absl/status:statusor",
"//transpiler:yosys_tfhe_runner",
"//transpiler/data:boolean_data",
"//transpiler/data:tfhe_data",
"@tfhe//:libtfhe",
"@com_google_xls//xls/common/status:status_macros",
])
elif encryption == "palisade":
deps.extend([
"@com_google_absl//absl/status:statusor",
"//transpiler:yosys_palisade_runner",
"//transpiler/data:boolean_data",
"//transpiler/data:palisade_data",
"@palisade//:binfhe",
"@com_google_xls//xls/common/status:status_macros",
])
elif optimizer == "xls":
if encryption == "cleartext":
if interpreter:
fail("No XLS interpreter for cleartext is currently implemented.")
deps.extend([
"//transpiler/data:boolean_data",
])
elif encryption == "tfhe":
deps.extend([
"@tfhe//:libtfhe",
"//transpiler/data:boolean_data",
"//transpiler/data:tfhe_data",
])
if interpreter:
deps.extend([
"@com_google_absl//absl/status:statusor",
"//transpiler:tfhe_runner",
"@com_google_xls//xls/common/status:status_macros",
])
elif encryption == "palisade":
deps.extend([
"//transpiler/data:boolean_data",
"//transpiler/data:palisade_data",
"@palisade//:binfhe",
])
if interpreter:
deps.extend([
"@com_google_absl//absl/status:statusor",
"//transpiler:palisade_runner",
"@com_google_xls//xls/common/status:status_macros",
])
native.cc_library(
name = name,
srcs = [":" + transpiled_source],
hdrs = [":" + transpiled_headers] + hdrs,
copts = ["-O0"] + copts,
tags = tags,
deps = deps,
**kwargs
)
|
# File used by pyinstaller to create the executable
from PFERD.__main__ import main
if __name__ == "__main__":
main()
|
'''Entry point to the Flask application'''
# from .app import create_app
# APP = create_app() |
def giveWord(phrase):
for item in phrase:
print(item[0]))
print(giveWord("tallyhoo")) |
#!/usr/bin/env python3
from importlib import import_module
import sys
SP = '\N{SPACE}'
HLIN = '\N{BOX DRAWINGS LIGHT HORIZONTAL}' * 2 + SP # ──
VLIN = '\N{BOX DRAWINGS LIGHT VERTICAL}' + SP * 3 # │
TEE = '\N{BOX DRAWINGS LIGHT VERTICAL AND RIGHT}' + HLIN # ├──
ELBOW = '\N{BOX DRAWINGS LIGHT UP AND RIGHT}' + HLIN # └──
def subclasses(cls):
try:
return cls.__subclasses__()
except TypeError: # handle the `type` type
return cls.__subclasses__(cls)
def tree(cls, level=0, last_sibling=True):
yield cls, level, last_sibling
chidren = subclasses(cls)
if chidren:
last = chidren[-1]
for child in chidren:
yield from tree(child, level + 1, child is last)
def render_lines(tree_generator):
cls, _, _ = next(tree_generator)
yield cls.__name__
prefix = ''
for cls, level, last in tree_generator:
prefix = prefix[: 4 * (level - 1)]
prefix = prefix.replace(TEE, VLIN).replace(ELBOW, SP * 4)
prefix += ELBOW if last else TEE
yield prefix + cls.__name__
def draw(cls):
for line in render_lines(tree(cls)):
print(line)
def parse(name):
if '.' in name:
return name.rsplit('.', 1)
else:
return 'builtins', name
def main(name):
module_name, cls_name = parse(name)
try:
cls = getattr(import_module(module_name), cls_name)
except ModuleNotFoundError:
print(f'*** Could not import {module_name!r}.')
except AttributeError:
print(f'*** {cls_name!r} not found in {module_name!r}.')
else:
if isinstance(cls, type):
draw(cls)
else:
print(f'*** {cls_name!r} is not a class.')
if __name__ == '__main__':
if len(sys.argv) == 2:
main(sys.argv[1])
else:
print('Usage:'
f'\t{sys.argv[0]} Class # for builtin classes\n'
f'\t{sys.argv[0]} package.Class # for other classes'
)
|
#!/usr/bin/python
#import
import lcd
import time
import subprocess
def main():
# Main program block
# Initialise display
lcd_init()
ip = subprocess.check_output('hostname -I', shell=True).decode('utf-8')
# Send some right justified text
lcd_byte(LCD_LINE_1, LCD_CMD)
lcd_string(ip, 1)
if __name__ == '__main__':
main()
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import falcon
from oslo_config import cfg
from oslo_log import log
from werkzeug.contrib.profiler import ProfilerMiddleware
from deckhand.control import base
from deckhand.control import buckets
from deckhand.control import health
from deckhand.control import middleware
from deckhand.control import revision_diffing
from deckhand.control import revision_documents
from deckhand.control import revision_tags
from deckhand.control import revisions
from deckhand.control import rollback
from deckhand.control import validations
from deckhand.control import versions
from deckhand import errors
CONF = cfg.CONF
LOG = log.getLogger(__name__)
def configure_app(app, version=''):
v1_0_routes = [
('buckets/{bucket_name}/documents', buckets.BucketsResource()),
('health', health.HealthResource()),
('revisions', revisions.RevisionsResource()),
('revisions/{revision_id}', revisions.RevisionsResource()),
('revisions/{revision_id}/diff/{comparison_revision_id}',
revision_diffing.RevisionDiffingResource()),
('revisions/{revision_id}/documents',
revision_documents.RevisionDocumentsResource()),
('revisions/{revision_id}/rendered-documents',
revision_documents.RenderedDocumentsResource()),
('revisions/{revision_id}/tags', revision_tags.RevisionTagsResource()),
('revisions/{revision_id}/tags/{tag}',
revision_tags.RevisionTagsResource()),
('revisions/{revision_id}/validations',
validations.ValidationsResource()),
('revisions/{revision_id}/validations/{validation_name}',
validations.ValidationsResource()),
('revisions/{revision_id}/validations/{validation_name}'
'/entries/{entry_id}',
validations.ValidationsResource()),
('rollback/{revision_id}', rollback.RollbackResource())
]
for path, res in v1_0_routes:
app.add_route(os.path.join('/api/%s' % version, path), res)
app.add_route('/versions', versions.VersionsResource())
# Error handlers (FILO handling).
app.add_error_handler(Exception, errors.default_exception_handler)
# Built-in error serializer.
app.set_error_serializer(errors.default_exception_serializer)
return app
def deckhand_app_factory(global_config, **local_config):
# The order of the middleware is important because the `process_response`
# method for `YAMLTranslator` should execute after that of any other
# middleware to convert the response to YAML format.
middleware_list = [middleware.YAMLTranslator(),
middleware.ContextMiddleware()]
app = falcon.API(request_type=base.DeckhandRequest,
middleware=middleware_list)
if CONF.profiler:
LOG.warning("Profiler ENABLED. Expect significant "
"performance overhead.")
profile_dir = "/tmp/profiles" # nosec w/o profile data
if not os.path.isdir(profile_dir):
os.mkdir(profile_dir)
LOG.debug("Profiler artifacts will be saved to %s.", profile_dir)
return ProfilerMiddleware(
configure_app(app, version='v1.0'),
profile_dir=profile_dir)
else:
return configure_app(app, version='v1.0')
|
#!/usr/bin/env python3
# To find catkin python3 build of tf2_py
import sys
sys.path.insert(0, '/home/smb/catkin_ws/devel/lib/python3/dist-packages')
import json
import rospy
import message_filters
import torch
import numpy as np
import cv2
import tf2_ros
from cv_bridge import CvBridge
from scipy.spatial.transform import Rotation
from geometry_msgs.msg import PointStamped, PoseStamped
from perception.utils import ros as ros_utils
from sensor_msgs.msg import Image
from perception import pipeline
from perception.utils import camera_utils
from matplotlib import cm
from vision_msgs.msg import BoundingBox3D
from . import utils
def _to_msg(keypoint, time, frame):
msg = PointStamped()
msg.header.stamp = time
msg.header.frame_id = frame
msg.point.x = keypoint[0]
msg.point.y = keypoint[1]
msg.point.z = keypoint[2]
return msg
class ObjectKeypointPipeline:
def __init__(self):
left_image_topic = rospy.get_param("object_keypoints_ros/left_image_topic", "/zedm/zed_node/left_raw/image_raw_color")
right_image_topic = rospy.get_param("object_keypoints_ros/right_image_topic", "/zedm/zed_node/right_raw/image_raw_color")
self.left_camera_frame = rospy.get_param('object_keypoints_ros/left_camera_frame')
self.left_sub = rospy.Subscriber(left_image_topic, Image, callback=self._right_image_callback, queue_size=1)
self.right_sub = rospy.Subscriber(right_image_topic, Image, callback=self._left_image_callback, queue_size=1)
self.left_image = None
self.left_image_ts = None
self.right_image = None
self.right_image_ts = None
self.bridge = CvBridge()
self.input_size = (360, 640)
model = rospy.get_param('object_keypoints_ros/load_model', "/home/ken/Hack/catkin_ws/src/object_keypoints/model/modelv2.pt")
if rospy.get_param('object_keypoints_ros/pnp', False):
self.pipeline = pipeline.PnPKeypointPipeline(model, self._read_keypoints(), torch.cuda.is_available())
else:
self.pipeline = pipeline.KeypointPipeline(model, self._read_keypoints(), torch.cuda.is_available())
self.rgb_mean = torch.tensor([0.5, 0.5, 0.5], requires_grad=False, dtype=torch.float32)[:, None, None]
self.rgb_std = torch.tensor([0.25, 0.25, 0.25], requires_grad=False, dtype=torch.float32)[:, None, None]
self._read_calibration()
self.prediction_size = (90, 160)
scaling_factor = np.array(self.image_size) / np.array(self.prediction_size)
self.pipeline.reset(self.K, self.Kp, self.D, self.Dp, self.T_RL, scaling_factor)
self._compute_bbox_dimensions()
# TF
self.tf_buffer = tf2_ros.Buffer()
self.listener = tf2_ros.TransformListener(self.tf_buffer)
# Publishers
self.center_point_publisher = rospy.Publisher("object_keypoints_ros/center", PointStamped, queue_size=1)
self.point0_pub = rospy.Publisher("object_keypoints_ros/0", PointStamped, queue_size=1)
self.point1_pub = rospy.Publisher("object_keypoints_ros/1", PointStamped, queue_size=1)
self.point2_pub = rospy.Publisher("object_keypoints_ros/2", PointStamped, queue_size=1)
self.point3_pub = rospy.Publisher("object_keypoints_ros/3", PointStamped, queue_size=1)
self.left_heatmap_pub = rospy.Publisher("object_keypoints_ros/heatmap_left", Image, queue_size=1)
self.right_heatmap_pub = rospy.Publisher("object_keypoints_ros/heatmap_right", Image, queue_size=1)
self.pose_pub = rospy.Publisher("object_keypoints_ros/pose", PoseStamped, queue_size=1)
# Only used if an object mesh is set.
if self.bbox_size is not None:
self.bbox_pub = rospy.Publisher("object_keypoints_ros/bbox", BoundingBox3D, queue_size=1)
else:
self.bbox_pub = None
def _read_calibration(self):
path = rospy.get_param('object_keypoints_ros/calibration')
params = camera_utils.load_calibration_params(path)
self.K = params['K']
self.Kp = params['Kp']
self.D = params['D']
self.Dp = params['Dp']
self.T_RL = params['T_RL']
self.image_size = params['image_size']
def _read_keypoints(self):
path = rospy.get_param('object_keypoints_ros/keypoints')
with open(path, 'rt') as f:
return np.array(json.loads(f.read())['3d_points'])
def _compute_bbox_dimensions(self):
mesh_file = rospy.get_param('object_keypoints_ros/object_mesh', None)
if mesh_file is not None:
bounding_box = utils.compute_bounding_box(mesh_file)
# Size is in both directions, surrounding the object from the object center.
self.bbox_size = (bounding_box.max(axis=0) - bounding_box.min(axis=0)) * 0.5
else:
self.bbox_size = None
def _right_image_callback(self, image):
img = self.bridge.imgmsg_to_cv2(image, 'rgb8')
self.right_image = img
self.right_image_ts = image.header.stamp
def _left_image_callback(self, image):
img = self.bridge.imgmsg_to_cv2(image, 'rgb8')
self.left_image = img
self.left_image_ts = image.header.stamp
def _preprocess_image(self, image):
image = image.transpose([2, 0, 1])
image = torch.tensor(image / 255.0, dtype=torch.float32)
image -= self.rgb_mean
image /= self.rgb_std
image = image[None]
return torch.nn.functional.interpolate(image, size=self.input_size, mode='bilinear', align_corners=False).detach()
def _publish_keypoints(self, keypoints, time):
for i in range(min(keypoints.shape[0], 4)):
msg = _to_msg(keypoints[i], rospy.Time(0), self.left_camera_frame)
getattr(self, f'point{i}_pub').publish(msg)
def _publish_pose(self, pose_msg, time):
pose_msg = ros_utils.transform_to_pose(T, self.left_camera_frame, rospy.Time(0))
self.pose_pub.publish(pose_msg)
self._publish_bounding_box(pose_msg)
def _publish_heatmaps(self, left, right, left_keypoints, right_keypoints):
left = ((left + 1.0) * 0.5).sum(axis=0)
right = ((right + 1.0) * 0.5).sum(axis=0)
left = np.clip(cm.inferno(left) * 255.0, 0, 255.0).astype(np.uint8)
right = np.clip(cm.inferno(right) * 255.0, 0, 255.0).astype(np.uint8)
for kp in left_keypoints:
kp = kp.round().astype(int)
left = cv2.circle(left, (kp[0], kp[1]), radius=2, color=(0, 255, 0), thickness=-1)
left_msg = self.bridge.cv2_to_imgmsg(left[:, :, :3], encoding='passthrough')
self.left_heatmap_pub.publish(left_msg)
for kp in right_keypoints:
kp = kp.round().astype(int)
right = cv2.circle(right, (kp[0], kp[1]), radius=1, color=(0, 255, 0, 100), thickness=-1)
right_msg = self.bridge.cv2_to_imgmsg(right[:, :, :3], encoding='passthrough')
self.right_heatmap_pub.publish(right_msg)
def _publish_bounding_box(self, T, pose_msg):
if self.bbox_size is not None:
msg = BoundingBox3D()
msg.pose = pose_msg.pose
msg.size.x = self.bbox_size[0]
msg.size.y = self.bbox_size[1]
msg.size.z = self.bbox_size[2]
self.bbox_pub.publish(msg)
def step(self):
I = torch.eye(4)[None]
if self.left_image is not None and self.right_image is not None:
left_image = self._preprocess_image(self.left_image)
right_image = self._preprocess_image(self.right_image)
out = self.pipeline(left_image, right_image)
self.left_image = None
self.right_image = None
self._publish_keypoints(out['keypoints'][0], self.left_image_ts)
self._publish_pose(out['pose'][0], self.left_image_ts)
self._publish_heatmaps(out['heatmap_left'][0], out['heatmap_right'][0], out['keypoints_left'][0], out['keypoints_right'][0])
if __name__ == "__main__":
with torch.no_grad():
rospy.init_node("object_keypoints_ros")
keypoint_pipeline = ObjectKeypointPipeline()
rate = rospy.Rate(10)
with torch.no_grad():
while not rospy.is_shutdown():
keypoint_pipeline.step()
rate.sleep()
|
from modules.lexer.position import Position
from modules.lexer.token_types import TT
from modules.visitor import errors as v_errors
from .ast_node import ASTNode
class TernaryOperationNode(ASTNode):
def __init__(self, operations, values):
self.left, self.middle, self.right = values
self.operations = operations
self.operation_values = tuple(map(lambda x: x.value, self.operations))
self.operators = tuple(map(lambda x: x.type, self.operations))
super().__init__(Position(self.left.position.start, self.right.position.end))
def __repr__(self):
left = f"({self.left}, {self.operations[0]}, "
return left + f"{self.middle}, {self.operations[1]}, {self.right})"
def interpret(self, interpreter):
left = self.left.interpret(interpreter)
middle = self.middle.interpret(interpreter)
right = self.right.interpret(interpreter)
inputs = (left, middle, right)
if self.operators == (TT.KEYWORD, TT.KEYWORD):
return OPERATOR_KEYWORD_FUNC[self.operation_values](*inputs)
return OPERATOR_FUNC[self.operations](*inputs)
def transpile(self, transpiler):
left = self.left.transpile(transpiler)
middle = self.middle.transpile(transpiler)
right = self.right.transpile(transpiler)
if self.operators != (TT.KEYWORD, TT.KEYWORD) or self.operation_values != ("if", "else"):
raise v_errors.UnimplementedOperationError(self)
return f"({middle} ? {left} : {right})"
OPERATOR_FUNC = {
}
OPERATOR_KEYWORD_FUNC = {
("if", "else"): (lambda x, y, z: x if y else z)
}
|
import numpy as np
import pandas as pd
import os
import dotenv
import matplotlib.pyplot as plt
project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
dotenv_path = os.path.join(project_dir, '.env')
dotenv.load_dotenv(dotenv_path)
def ks_gini(loss, score):
"""Calculate KS and Gini score"""
df = pd.DataFrame({'loss': loss, 'score': score})
df = df.sort_values(by=['score'])
iden = np.ones(df.shape[0])
iden = np.cumsum(iden) / df.shape[0]
cumsum_score = np.cumsum(df['loss']) / np.sum(df['loss'])
ks = np.max(np.abs(iden - cumsum_score))
gini = 2 * np.sum(iden - cumsum_score) / df.shape[0]
return {'ks': ks, 'gini': gini}
def lorenz_curve(loss_pred, score_pred, loss_valid, score_valid, title='Lorenz Curve'):
n = len(loss_pred)
df = pd.DataFrame({'loss_pred': loss_pred, 'score_pred': score_pred})
df = df.sort_values(by='score_pred')
total_loss_pred = np.sum(df['loss_pred'])
cum_loss_pred = np.cumsum(df['loss_pred']) / total_loss_pred
base_line = np.cumsum(np.ones(n)) / n
best_line = np.cumsum(df['loss_pred'].sort_values()) / total_loss_pred
plt.plot(np.arange(n)/n, cum_loss_pred, label='Train')
plt.plot(np.arange(n)/n, base_line, label='Base Line')
plt.plot(np.arange(n)/n, best_line, label='Perfect Predictions')
loss_valid = pd.Series(loss_valid)
score_valid = pd.Series(score_valid)
n = len(loss_valid)
df = pd.DataFrame({'loss_valid': loss_valid, 'score_pred': score_valid})
df = df.sort_values(by='score_valid')
total_loss_valid = np.sum(df['loss_valid'])
cum_loss_valid = np.cumsum(df['loss_valid']) / total_loss_valid
plt.plot(np.arange(n)/n, cum_loss_valid, label='Validation')
plt.xlabel('% of Records')
plt.ylabel('% of Total Actual')
train_gini = "{:.3f}".format(ks_gini(loss_pred, score_pred)['gini'])
valid_gini = "{:.3f}".format(ks_gini(loss_valid, score_valid)['gini'])
plt.suptitle(title)
plt.title('Train gini: ' + train_gini + ', Valid gini: ' + valid_gini)
plt.legend(bbox_to_anchor=(1.15, 1), loc=2, borderaxespad=0.)
plt.show()
plt.close()
def gains_chart(loss, score, num_bins=10, title='Gains Chart', return_table=True, include_scores=True):
"""Show gains chart of binned scores along with KS and Gini."""
n = len(loss)
df = pd.DataFrame({'loss': loss, 'score': score})
df = df.sort_values(by='score')
# Initialize arrays that store aggregated values
bin_scores = np.zeros(num_bins)
bin_losses = np.zeros(num_bins)
bin_sizes = np.zeros(num_bins)
virtual_bins = num_bins
min_score = np.min(score)
max_score = np.max(score)
raw_step_size = (max_score - min_score) / num_bins
break_start = 0
break_end = num_bins
breaks = np.linspace(min_score, max_score, virtual_bins + 1)
for i in np.arange(break_start, break_end):
bin_scores[i] = np.mean(df['score'][(df['score'] > breaks[i]) & (df['score'] <= breaks[i+1])])
bin_losses[i] = np.mean(df['loss'][(df['score'] > breaks[i]) & (df['score'] <= breaks[i+1])])
bin_sizes[i] = np.sum((df['score'] > breaks[i]) & (df['score'] <= breaks[i+1]))
# Convert gains chart table to DataFrame
gains_chart_df = pd.DataFrame({'bin': np.arange(1, num_bins + 1),
'count': bin_sizes,
'avg_score': bin_scores,
'avg_loss': bin_losses}, index=np.arange(1, num_bins + 1))
gains_chart_df = gains_chart_df[['bin', 'count', 'avg_score', 'avg_loss']]
# gains_chart_df['rl_avg_loss'] = gains_chart_df['avg_loss'] / np.mean(df['loss'])
# gains_chart_df['rl_avg_score'] = gains_chart_df['avg_score'] / np.mean(df['score'])
gini = ks_gini(loss, score)
gini = "{:.3f}".format(gini['gini'])
if include_scores:
chart_min = min(np.min(gains_chart_df['avg_score']), np.min(gains_chart_df['avg_loss'])) - .1
chart_max = max(np.max(gains_chart_df['avg_score']), np.max(gains_chart_df['avg_loss'])) + .1
else:
chart_min = np.min(gains_chart_df['avg_loss']) - .1
chart_max = np.max(gains_chart_df['avg_loss']) + .1
# Create the plot
plt.ylim(min(chart_min, 0.0), max(1.0, chart_max))
plt.title(title + ', Gini: ' + gini)
plt.grid()
plt.xticks(np.arange(1,11))
plt.plot(gains_chart_df['bin'], gains_chart_df['avg_loss'], zorder=15)
plt.scatter(gains_chart_df['bin'], gains_chart_df['avg_loss'], s=15, label='_nolegend_', zorder=14)
if include_scores:
plt.plot(gains_chart_df['bin'], gains_chart_df['avg_score'], zorder=8)
plt.scatter(gains_chart_df['bin'], gains_chart_df['avg_score'], s=15, label='_nolegend', zorder=7)
plt.legend(bbox_to_anchor=(1.15, 1), loc=2, borderaxespad=0.)
plt.ylabel('Avg Actual')
plt.xlabel('Bin')
axes2 = plt.twinx()
axes2.set_ylabel('Count')
axes2.bar(gains_chart_df['bin'], gains_chart_df['count'], zorder=2, color='grey')
axes2.set_ylim(0, 3*np.max(gains_chart_df['count']))
plt.show()
plt.close()
if return_table:
return gains_chart_df
return
def mse(loss, score):
"""Calculate the mean squared error."""
return np.mean(np.power(loss - score, 2))
def correct_prediction_rate(loss, score, threshold=0.5):
"""Calculate the percentage of games correctly predicted."""
correct_blue_team_win = np.sum(np.logical_and(score >= threshold, loss == 1))
correct_red_team_win = np.sum(np.logical_and(score < threshold, loss == 0))
return (correct_blue_team_win + correct_red_team_win)/len(loss)
def best_threshold(loss, score, step_size = .01):
"""Calculate the best threshold to use for binary prediction."""
best_thresh = 0
best_score = 0
for i in np.arange(0, 1, step_size):
current_score = correct_prediction_rate(loss, score, i)
if current_score > best_score:
best_score = current_score
best_thresh = i
return {'threshold': best_thresh, 'accuracy': best_score}
def record_gbm_performance(description='', learning_rate='', max_depth='', n_estimators='', min_samples_split='',
min_samples_leaf='', random_state='', best_iter='', num_vars='', train_rows='',
valid_rows='', correct_pred_train='', correct_pred_validation='', ks_train='', ks_valid='',
gini_train='', gini_valid='', mse_train='', mse_valid='', train_time='',
file=os.getenv('MODEL_PERF_DATA_DIR') + 'gbm_eval.csv'):
results = pd.DataFrame(index=np.arange(0,1),
columns=['description', 'learning_rate', 'max_depth', 'n_estimators', 'min_samples_split',
'min_samples_leaf', 'random_state', 'best_iter', 'num_vars', 'train_rows',
'valid_rows', 'correct_pred_train', 'correct_pred_validation', 'ks_train',
'ks_valid', 'gini_train', 'gini_valid', 'mse_train', 'mse_valid', 'train_time'])
results.iloc[0,:] = [description, learning_rate, max_depth, n_estimators, min_samples_split, min_samples_leaf,
random_state, best_iter, num_vars, train_rows, valid_rows, correct_pred_train,
correct_pred_validation, ks_train, ks_valid, gini_train, gini_valid, mse_train, mse_valid,
train_time]
if os.path.isfile(file):
results.to_csv(file, mode='a', header=False)
else:
results.to_csv(file, mode='w')
return results
def gbm_best_iter(model, validation, actual, evaluation_metric='ks'):
"""Determines value of n_estimators that performed the best in tree ensemble model."""
scores = []
if evaluation_metric == 'mse':
for x in model.staged_predict(validation):
scores = scores.append(mse(actual, x))
best_iter = np.argmax(scores) + 1
else:
for x in model.staged_predict(validation):
ks = ks_gini(actual, x)['ks']
scores = scores + [ks]
best_iter = np.argmax(scores) + 1
return {'scores': scores, 'best_iter': best_iter} |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../"))
sys.path.insert(0, os.path.abspath("../src"))
# sys.path.insert(0, os.path.abspath(os.path.join("..", "..", "src")))
# -- Project information -----------------------------------------------------
project = "systa"
copyright = "2021, Dustin Wyatt"
author = "Dustin Wyatt"
# The full version, including alpha/beta/rc tags
release = "0.0.1"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.autosectionlabel",
"sphinx.ext.intersphinx",
"sphinx_multiversion",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_book_theme"
html_theme_options = {
"repository_url": "https://github.com/dmwyatt/systa",
"use_repository_button": True,
"use_issues_button": True,
"use_edit_page_button": True,
"repository_branch": "dev",
"path_to_docs": "docs/source",
"home_page_in_toc": True,
}
html_sidebars = {
"**": [
"search-field.html",
"sbt-sidebar-nav.html",
"versions-nav.html",
"changelog-link.html",
"sbt-sidebar-footer.html",
]
}
def get_latest_version():
import subprocess
results = subprocess.run("git tag -l --sort=version:refname", capture_output=True)
try:
return [v for v in results.stdout.decode("utf8").split("\n") if v][-1]
except IndexError:
return None
smv_latest_version = get_latest_version()
smv_remote_whitelist = None
smv_released_pattern = r"^refs/tags/v?\d.*$"
smv_branch_whitelist = r"^main$|^dev$"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
rst_prolog = """
.. role:: python(code)
:language: python
"""
autodoc_typehints = "description"
autoclass_content = "both"
autodoc_type_aliases = {
"EventRangesType": "systa.events.types.EventRangesType",
"EventRangeType": "systa.events.types.EventRangeType",
"EventType": "systa.events.types.EventType",
"EventsTypes": "systa.events.types.EventsType",
"EventTypeNamesType": "systa.events.types.EventTypeNamesType",
"WindowLookupType": "systa.windows.WindowLookupType",
"ObjIdType": "systa.events.types.ObjIdType",
}
doctest_global_setup = """
import subprocess
import time
from systa.windows import current_windows
current_notepads = current_windows["Untitled - Notepad"]
for np in current_notepads:
np.exists = False
notepad_process = subprocess.Popen(["notepad.exe"])
time.sleep(.3)
"""
doctest_global_cleanup = """
notepad_process.kill()
from systa.events.store import callback_store
callback_store.clear_store()
"""
autosectionlabel_prefix_document = True
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"pynput": ("https://pynput.readthedocs.io/en/latest", None),
}
|
import re
import numpy as np
import warnings
import copy
from .utils import is_pos_int, is_non_neg_int, \
is_proportion, is_positive, is_non_negative, \
inherits
class layout:
def __init__(self,
ncol=None,
nrow=None,
byrow=None,
rel_widths=None,
rel_heights=None,
design=None
):
"""
layout class to store information about arangement of patches found
in `cow.patch`.
Arguments
---------
ncol : integer
Integer for the number of columns to arrange the the patches in.
The default is None (which avoids conflicts if a value for
`design` is provided). If ``ncol`` is None but ``nrow`` is not,
then ``ncol`` will default to the minimum number of columns to
make sure that all patches can be visualized.
nrow : integer
Integer for the number of rows to arrange the the patches in.
The default is None (which avoids conflicts if a value for
``design`` is provided). If ``nrow`` is None but ``ncol`` is not,
then ``nrow`` will default to the minimum number of rows to make
sure that all patches can be visualized.
byrow : boolean
If ``ncol`` and/or ``nrow`` is included, then this boolean
indicates if the patches should be ordered by row (default if
``byrow`` is None or when parameter is ``True``) or by column (if
``byrow`` was ``False``).
design : np.array (float based) or str
Specification of the location of each patch in the arrangement.
Can either be a float numpy array with integers between 0 and
the number of patches to arrange, or a text string that captures
similar ideas to the array approach but uses capital alphabetical
characters (A-Z) to indicate each figure. More information is in
Notes.
rel_widths : list, np vector or tuple
Numerical vector of relative columns widths. This not required,
the default would be ``np.ones(ncol)`` or
``np.ones(design.shape[0])``. Note that this is a relative sizing
and the values are only required to be non-negative, non-zero
values, for example ``[1,2]`` would would make the first column
twice as wide as the second column.
rel_heights : list or tuple
Numerical vector of relative row heights. This not required,
the default would be ``np.ones(nrow)`` or
``np.ones(design.shape[1])``. Note that this is a relative sizing
and the values are only required to be non-negative, non-zero
values, for example ``[1,2]`` would would make the first row twice
as tall as the second row.
Notes
-----
*Design*
The ``design`` parameter expects specific input.
1. If the ``design`` is input as a numpy array, we expect it to have
integers only (0 to # patches-1). It is allowed to have ``np.nan``
values if certain "squares" of the layout are not covered by others
(the covering is defined by the value ordering). Note that we won't
check for overlap and ``np.nan`` is not enforced if another patches'
relative (min-x,min-y) and (max-x, max-y) define a box over that
``np.nan``'s area.
An example of a design of the numpy array form could look like
>>> my_np_design = np.array([[1,1,2],
... [3,3,2],
... [3,3,np.nan]])
2. if the ``design`` parameter takes in a string, we expect it to have
a structure such that each line (pre ``\\\\n``) contains the same number
of characters, and these characters must come from the first
(number of patches) capital alphabetical characters or the ``\#`` or
``.`` sign to indicate an empty square. Similar arguments w.r.t.
overlap and the lack of real enforcement for empty squares applies
(as in 1.).
An example of a design of the string form could look like
>>> my_str_design = \"\"\"
... AAB
... CCB
... CC\#
... \"\"\"
or
>>> my_str_design = \"\"\"
... AAB
... CCB
... CC.
... \"\"\"
See the `Layout guide`_ for more detailed examples of functionality.
.. _Layout guide: https://benjaminleroy.github.io/cowpatch/guides/Layout.html
*Similarities to our `R` cousins:*
This layout function is similar to `patchwork\:\:plot_layout <https://patchwork.data-imaginist.com/reference/plot_layout.html>`_
(with a special node to ``design`` parameter) and helps perform similar
ideas to `gridExtra\:\:arrangeGrob <https://cran.r-project.org/web/packages/gridExtra/vignettes/arrangeGrob.html>`_'s
``layout_matrix`` parameter, and `cowplot\:\:plot_grid <https://wilkelab.org/cowplot/reference/plot_grid.html>`_'s
``rel_widths`` and ``rel_heights`` parameters.
Examples
--------
>>> # Necessary libraries for example
>>> import numpy as np
>>> import cowpatch as cow
>>> import plotnine as p9
>>> import plotnine.data as p9_data
>>> g0 = p9.ggplot(p9_data.mpg) +\\
... p9.geom_bar(p9.aes(x="hwy")) +\\
... p9.labs(title = 'Plot 0')
>>> g1 = p9.ggplot(p9_data.mpg) +\\
... p9.geom_point(p9.aes(x="hwy", y = "displ")) +\\
... p9.labs(title = 'Plot 1')
>>> g2 = p9.ggplot(p9_data.mpg) +\\
... p9.geom_point(p9.aes(x="hwy", y = "displ", color="class")) +\\
... p9.labs(title = 'Plot 2')
>>> g3 = p9.ggplot(p9_data.mpg[p9_data.mpg["class"].isin(["compact",
... "suv",
... "pickup"])]) +\\
... p9.geom_histogram(p9.aes(x="hwy"),bins=10) +\\
... p9.facet_wrap("class")
>>> # design matrix
>>> vis_obj = cow.patch(g1,g2,g3)
>>> vis_obj += cow.layout(design = np.array([[0,1],
... [2,2]]))
>>> vis_obj.show()
>>> # design string
>>> vis_obj2 = cow.patch(g1,g2,g3)
>>> vis_obj2 += cow.layout(design = \"\"\"
... AB
... CC
... \"\"\")
>>> vis_obj2.show()
>>> # nrow, ncol, byrow
>>> vis_obj3 = cow.patch(g0,g1,g2,g3)
>>> vis_obj3 += cow.layout(nrow=2, byrow=False)
>>> vis_obj3.show()
>>> # rel_widths/heights
>>> vis_obj = cow.patch(g1,g2,g3)
>>> vis_obj += cow.layout(design = np.array([[0,1],
... [2,2]]),
... rel_widths = np.array([1,2]))
>>> vis_obj.show()
See also
--------
area : object class that helps ``layout`` define where plots will go
in the arangement
patch : fundamental object class which is combined with ``layout`` to
defin the overall arangement of plots
"""
if design is not None:
if ncol is not None or nrow is not None:
warnings.warn("ncol and nrow are overridden"+\
" by the design parameter")
if isinstance(design, np.ndarray):
if len(design.shape) == 1:
warnings.warn("design matrix is 1d,"+\
" will be seen as a 1-row design")
nrow, ncol = 1, design.shape[0]
design = design.reshape((1,-1))
else:
nrow, ncol = design.shape
if isinstance(design, str):
# convert design to desirable structure matrix structure
design = self._design_string_to_mat(design)
nrow, ncol = design.shape
if ncol is None:
if rel_widths is not None:
if isinstance(rel_widths, np.ndarray):
ncol = rel_widths.shape[0]
if isinstance(rel_widths, list) or \
isinstance(rel_widths, tuple):
ncol = len(rel_widths)
rel_widths = np.array(rel_widths)
if nrow is None:
if rel_heights is not None:
if isinstance(rel_heights, np.ndarray):
nrow = rel_heights.shape[0]
if isinstance(rel_heights, list) or \
isinstance(rel_heights, tuple):
nrow = len(rel_heights)
rel_heights= np.array(rel_heights)
if rel_widths is None and rel_heights is None:
assert not (ncol is None and nrow is None), \
"need some parameters to not be none in design initialization"
if rel_widths is None and ncol is not None:
rel_widths = np.ones(ncol)
if rel_heights is None and nrow is not None:
rel_heights = np.ones(nrow)
if rel_heights is not None:
rel_heights = np.array(rel_heights)
if rel_widths is not None:
rel_widths = np.array(rel_widths)
# if design is None:
# if byrow is None or byrow:
# order_str = "C"
# else:
# order_str = "F"
# design = np.arange(ncol*nrow,dtype = int).reshape((nrow, ncol),
# order = order_str)
if design is not None:
byrow = None
# ncol/nrow and rel_widths/rel_heights correct alignment
if ncol is not None and rel_widths is not None:
if ncol != rel_widths.shape[0]:
raise ValueError("ncol (potentially from the design) and "+\
"rel_widths disagree on size of layout")
if nrow is not None and rel_heights is not None:
if nrow != rel_heights.shape[0]:
raise ValueError("nrow (potentially from the design) and "+\
"rel_heights disagree on size of layout")
self.ncol = ncol
self.nrow = nrow
self.__design = design
self.byrow = byrow
self.rel_widths = rel_widths
self.rel_heights = rel_heights
self.num_grobs = self._assess_mat(design)
def _design_string_to_mat(self, design):
"""
Internal function to convert design string into a matrix
Arguments
---------
design : str
design in a string format
Returns
-------
design : np.array integer
design in np.array format
"""
design_clean = re.sub(" *\t*", "", design) # removing spaces and tabs
design_clean = re.sub("^\n*", "", design_clean) # remove leading nl
design_clean = re.sub("\n*$", "", design_clean) # remove following nl
row_info = re.split("\n", design_clean)
ncol_lengths = np.unique([len(x) for x in row_info])
if ncol_lengths.shape != (1,):
raise ValueError("expect all rows in design to have the same "+\
"number of entries, use # for an empty space "+\
"if using a string format.")
ncol = int(ncol_lengths)
nrow = len(re.findall("\n", design)) + 1
design = np.array([[ ord(val)-65
if not np.any([val == x for x in ["#","."]])
else np.nan
for val in r]
for r in row_info])
return design
def _get_design(self, num_grobs=None):
"""
create a design matrix if not explicit design has been provided
"""
if self.__design is not None:
return self.__design
if num_grobs is None:
if self.num_grobs is None:
raise ValueError("unclear number of grobs in layout...")
else:
num_grobs = self.num_grobs
if self.byrow is None or self.byrow:
order_str = "C"
else:
order_str = "F"
# if only ncol or nrow is defined...
ncol = self.ncol
nrow = self.nrow
if ncol is None:
ncol = int(np.ceil(num_grobs / nrow))
if nrow is None:
nrow = int(np.ceil(num_grobs / ncol))
inner_design = np.arange(ncol*nrow,
dtype = float).reshape((nrow, ncol),
order = order_str)
inner_design[inner_design >= num_grobs] = np.nan
_ = self._assess_mat(inner_design) # should pass since we just built it...
return inner_design
# property
design = property(_get_design)
"""
defines underlying ``design`` attribute (potentially defined relative to a
``cow.patch`` object if certain structure are not extremely specific.
"""
def _assess_mat(self, design):
"""
Assesses if the design matrix includes at least 1 box for patches
indexed 0 to (number of patches - 1). This doesn't actually assume to know
the number of patches.
Arguments
---------
design : np.array (integer)
design in numpy array format
Returns
-------
int
number of patches expected in the overall matrix.
Raises
------
ValueError
if design matrix doesn't include at least at least 1 box for all
indices between 0 to (number of patches - 1)
"""
if design is None:
return None # to identify later that we don't have a design matrix
unique_vals = np.unique(design)
unique_vals = np.sort(
unique_vals[np.logical_not(np.isnan(unique_vals))])
num_unique = unique_vals.shape[0]
if not np.allclose(unique_vals, np.arange(num_unique)):
raise ValueError("design input requires values starting "+\
"with 0/A and through integer/alphabetical "+\
"value expected for the number of patches "+\
"provided")
return num_unique
def _rel_structure(self, num_grobs=None):
"""
provide rel_structure (rel_widths, rel_heights) if missing
Arguments
---------
num_grobs : int
if not None, then this value will be used to understand the number
of grobs to be laid out
Returns
-------
rel_widths : np.array vector
a vector of relative widths of the columns of the layout design
rel_heights : np.array vector
a vector of relative heights of the rows of the layout design
"""
if num_grobs is None:
if not (self.ncol is not None and \
self.nrow is not None) and \
not (self.rel_widths is not None and \
self.rel_heights is not None):
raise ValueError("unclear number of grobs in layout -> "+\
"unable to identify relative width and height")
rel_widths = self.rel_widths
rel_heights = self.rel_heights
ncol = self.ncol
nrow = self.nrow
if rel_widths is not None and ncol is None:
ncol = rel_widths.shape[0]
if rel_heights is not None and nrow is None:
nrow = rel_heights.shape[0]
if ncol is None:
ncol = int(np.ceil(num_grobs/nrow))
if rel_widths is None:
rel_widths = np.ones(ncol)
if nrow is None:
nrow = int(np.ceil(num_grobs/ncol))
if rel_heights is None:
rel_heights = np.ones(nrow)
return rel_widths, rel_heights
def _element_locations(self, width_pt, height_pt, num_grobs=None):
"""
create a list of ``area`` objects associated with the location of
each of the layout's grobs w.r.t. a given points width and height
Arguments
---------
width_pt : float
global width (in points) of the full arangement of patches
height_pt : float
global height (in points) of the full arangement of patches
num_grobs : integer
if not ``None``, then this value will be used to understand the
number of grobs to be laid out
Returns
-------
list
list of ``area`` objects describing the location for each of the
layout's grobs (in the order of the index in the self.design)
"""
if self.num_grobs is None and num_grobs is None:
raise ValueError("unclear number of grobs in layout...")
if self.num_grobs is not None:
if num_grobs is not None and num_grobs != self.num_grobs:
warnings.warn("_element_locations overrides num_grobs "+\
"with self.num_grobs")
num_grobs = self.num_grobs
rel_widths, rel_heights = self._rel_structure(num_grobs=num_grobs)
areas = []
for p_idx in np.arange(num_grobs):
dmat_logic = self._get_design(num_grobs=num_grobs) == p_idx
r_logic = dmat_logic.sum(axis=1) > 0
c_logic = dmat_logic.sum(axis=0) > 0
inner_x_where = np.argwhere(c_logic)
inner_x_left = np.min(inner_x_where)
inner_x_right = np.max(inner_x_where)
inner_width = inner_x_right - inner_x_left + 1
inner_x_where = np.argwhere(r_logic)
inner_y_top = np.min(inner_x_where)
inner_y_bottom = np.max(inner_x_where)
inner_height = inner_y_bottom - inner_y_top + 1
inner_design_area = area(x_left = inner_x_left,
y_top = inner_y_top,
width = inner_width,
height = inner_height,
_type = "design")
areas.append(inner_design_area.pt(rel_widths=rel_widths,
rel_heights=rel_heights,
width_pt=width_pt,
height_pt=height_pt))
return areas
def _yokogaki_ordering(self, num_grobs=None):
"""
calculates the yokogaki (left to right, top to bottom) ordering
the the patches
Arguments
---------
num_grobs : integer
if not ``None``, then this value will be used to understand the
number of grobs to be laid out
Returns
-------
numpy array (vector) of integer index of plots in yokogaki ordering
Notes
-----
Yokogaki is a Japanese word that concisely describes the left to right,
top to bottom writing format. We'd like to thank `stack overflow`_.
for pointing this out.
.. _stack overflow:
https://english.stackexchange.com/questions/81520/is-there-a-word-for-left-to-right-and-top-to-bottom
"""
if self.num_grobs is None and num_grobs is None:
raise ValueError("unclear number of grobs in layout...")
if self.num_grobs is not None:
if num_grobs is not None and num_grobs != self.num_grobs:
warnings.warn("_element_locations overrides num_grobs "+\
"with self.num_grobs")
num_grobs = self.num_grobs
areas = self._element_locations(1,1) # basically getting relative positions (doesn't matter) - nor does it matter about rel_height and width, but ah well
all_x_left = np.array([a.x_left for a in areas])
all_y_top = np.array([a.y_top for a in areas])
index_list = np.arange(num_grobs)
yokogaki_ordering = []
# remember y_tops are w.r.t top axis
for y_val in np.sort(np.unique(all_y_top)):
given_row_logic = all_y_top == y_val
inner_index = index_list[given_row_logic]
inner_x_left = all_x_left[given_row_logic]
row_ids = inner_index[np.argsort(inner_x_left)]
yokogaki_ordering += list(row_ids)
return np.array(yokogaki_ordering)
def __hash__(self):
"""
Creates a 'unique' hash for the object to help with identification
Returns
-------
hash integer
"""
if self.num_grobs is None:
design_list = [None]
else:
design_list = list(self.design.ravel())
rw_list = [None]
if self.rel_widths is not None:
rw_list = list(self.rel_widths)
rh_list = [None]
if self.rel_heights is not None:
rh_list = list(self.rel_heights)
info_list = design_list + \
rw_list + rh_list +\
[self.ncol, self.nrow, self.num_grobs]
return abs(hash(tuple(info_list)))
def __str__(self):
return "<layout (%d)>" % self.__hash__()
def __repr__(self):
nrow_str = str(self.nrow)
if self.nrow is None:
nrow_str = "unk"
ncol_str = str(self.ncol)
if self.ncol is None:
ncol_str = "unk"
if self.num_grobs is None:
design_str = "*unk*"
else:
design_str = self.design.__str__()
rw_str = "unk"
if self.rel_widths is not None:
rw_str = self.rel_widths.__str__()
rh_str = "unk"
if self.rel_heights is not None:
rh_str = self.rel_heights.__str__()
out = "design (%s, %s):\n\n"% (nrow_str, ncol_str) +\
design_str +\
"\n\nwidths:\n" +\
rw_str +\
"\nheights:\n" +\
rh_str
return self.__str__() + "\n" + out
def __eq__(self, value):
"""
checks if object is equal to another object (value)
Arguments
---------
value : object
another object (that major or may not be of the layout class)
Returns
-------
boolean
if current object and other object (value) are equal
"""
# if value is not a layout...
if not inherits(value, layout):
return False
# if __design hasn't been specified on 1 but is on another
if (self.__design is None and value.__design is not None) or\
(self.__design is not None and value.__design is None):
return False
# accounting for lack of __design specification
design_logic = True
if self.__design is not None:
design_logic = np.allclose(self.design,value.design,equal_nan=True)
return design_logic and \
self.ncol == value.ncol and \
self.nrow == value.nrow and \
np.unique(self.rel_heights/value.rel_heights).shape[0] == 1 and \
np.unique(self.rel_widths/value.rel_widths).shape[0] == 1
class area:
def __init__(self,
x_left, y_top,
width, height,
_type):
"""
object that stores information about what area a ``patch`` will fill
Arguments
---------
x_left : float
scalar of where the left-most point of the patch is located (impacted
by the ``_type`` parameter)
y_top : float
scalar of where the top-most point of the patch is located (impacted
by the ``_type`` parameter)
width : float
scalar of the width of the patch (impacted by the ``_type``
parameter)
height : float
scalar of the height of the patch (impacted by the ``_type``
parameter)
_type : str {"design", "relative", "pt"}
describes how the parameters are stored. See Notes for more
information between the options.
Notes
-----
These objects provide structural information about where in the overall
arangement individual plots / sub arangments lie.
The ``_type`` parameter informs how to understand the other parameters:
1. "design" means that the values are w.r.t. to a design matrix
relative to the `layout` class, and values are relative to the rows
and columns units.
2. "relative" means the values are defined relative to the full size of
the canvas and taking values between 0-1 (inclusive).
3. "pt" means that values are defined relative to point values
See also
--------
layout : object that incorporates multiple area definitions to define
layouts.
"""
# some structure check:
self._check_info_wrt_type(x_left, y_top, width, height, _type)
self.x_left = x_left
self.y_top = y_top
self.width = width
self.height = height
self._type = _type
def _check_info_wrt_type(self, x_left, y_top, width, height, _type):
"""
some logic checks of inputs relative to ``_type`` parameter
Arguments
---------
x_left : float
scalar of where the left-most point of the patch is located
(impacted by the ``_type`` parameter)
y_top : float
scalar of where the top-most point of the patch is located
(impacted by the ``_type`` parameter)
width : float
scalar of the width of the patch (impacted by the ``_type``
parameter)
height : float
scalar of the height of the patch (impacted by the ``_type``
parameter)
_type : str {"design", "relative", "pt"}
describes how the parameters are stored. Options include
["design", "relative", "pt"]. See class docstring for more info
Raises
------
ValueError
if any of the first four parameters don't make sense with respect
to the ``_type`` parameter
"""
if _type not in ["design", "relative", "pt"]:
raise ValueError("_type parameter not an acceptable option, see"+\
" documentation")
if _type == "design" and \
not np.all([is_non_neg_int(val) for val in [x_left,y_top]] +\
[is_pos_int(val) for val in [width,height]]) :
raise ValueError("with _type=\"design\", all parameters must be "+\
"positive integers")
elif _type == "relative" and \
not np.all([is_proportion(val) for val in [x_left,y_top,
width,height]] +\
[is_positive(val) for val in [width,height]]):
raise ValueError("with _type=\"relative\", all parameters should"+\
" be between 0 and 1 (inclusive) and width and"+\
" height cannot be 0")
elif _type == "pt" and \
not np.all([is_non_negative(val) for val in [x_left,y_top]] +\
[is_positive(val) for val in [width,height]]):
raise ValueError("with _type=\"pt\", all x_left and y_top should"+\
" be non-negative and width and height should"+\
" be strictly positive")
def _design_to_relative(self, rel_widths, rel_heights):
"""
translates an area object with ``_type`` = "design" to area object
with ``_type`` = "relative".
Arguments
---------
rel_widths : np.array (vector)
list of relative widths of each column of the layout matrix
rel_heights : np.array (vector)
list of relative heights of each row of the layout matrix
Returns
-------
area object
area object of ``_type`` = "relative"
"""
rel_widths = rel_widths/np.sum(rel_widths)
rel_heights = rel_heights/np.sum(rel_heights)
x_left = np.sum(rel_widths[:(self.x_left)])
y_top = np.sum(rel_heights[:(self.y_top)])
width = np.sum(rel_widths[self.x_left:(self.x_left + self.width)])
height = np.sum(rel_heights[self.y_top:(self.y_top + self.height)])
rel_area = area(x_left=x_left,
y_top=y_top,
width=width,
height=height,
_type="relative")
return rel_area
def _relative_to_pt(self, width_pt, height_pt):
"""
translates an area object with ``_type`` = "relative" to area object
with ``_type`` = "pt".
Arguments
---------
width_pt : float
width in points
height_pt : float
height in points
Returns
-------
area object
area object of ``_type`` = "pt"
"""
return area(x_left = self.x_left * width_pt,
y_top = self.y_top * height_pt,
width = self.width * width_pt,
height = self.height * height_pt,
_type = "pt")
def pt(self,
width_pt=None,
height_pt=None,
rel_widths=None,
rel_heights=None
):
"""
Translates area object to ``_type`` = "pt"
Arguments
---------
width_pt : float
width in points (required if ``_type`` is not "pt")
height_pt : float
height in points (required if ``_type`` is not "pt")
rel_widths : np.array (vector)
list of relative widths of each column of the layout matrix
(required if ``_type`` is "design")
rel_heights : np.array (vector)
list of relative heights of each row of the layout matrix
(required if ``_type`` is "design")
Returns
-------
area object
area object of ``_type`` = "pt"
"""
if self._type == "design":
rel_area = self._design_to_relative(rel_widths = rel_widths,
rel_heights = rel_heights)
return rel_area.pt(width_pt = width_pt, height_pt = height_pt)
elif self._type == "relative":
return self._relative_to_pt(width_pt = width_pt,
height_pt = height_pt)
elif self._type == "pt":
return copy.deepcopy(self)
else:
raise ValueError("_type attributes altered to a non-acceptable"+\
" value")
def _hash(self):
"""
replacement function for ``__hash__`` due to equality conflicts
Notes
-----
required since we defined ``__eq__`` and this conflicts with the
standard ``__hash__``
"""
return hash((self.x_left, self.y_top,
self.width, self.height,
self._type))
def __str__(self):
return "<area (%d)>" % self._hash()
def __repr__(self):
out = "_type: " + self._type +\
"\n\nx_left: " +\
self.x_left.__str__() +\
"\ny_top: " +\
self.y_top.__str__() +\
"\nwidth: " +\
self.width.__str__() +\
"\nheight: " +\
self.height.__str__()
return self.__str__() + "\n" + out
def __eq__(self, value):
return type(self) == type(value) and \
np.allclose(np.array([self.x_left, self.y_top,
self.width, self.height]),
np.array([value.x_left, value.y_top,
value.width, value.height])) and \
self._type == value._type
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
from .analysis import polyfit
#for task 2E
def plot_water_levels(station, dates, levels):
"""displays a plot of the water level data against time for a station"""
# Plot
plt.plot(dates, levels)
#adds plot lines for typical low and high levels
levelRange = station.typical_range
plt.axhline(y=levelRange[0])
plt.axhline(y=levelRange[1])
# Add axis labels, rotate date labels and add plot title
plt.xlabel('date')
plt.ylabel('water level (m)')
plt.xticks(rotation=45);
plt.title(station.name)
# Display plot
plt.tight_layout() # This makes sure plot does not cut off date labels
plt.show()
#for task 2F
def plot_water_level_with_fit(station, dates, levels, p):
"""plots water level data and best fit polynomial"""
poly, d0 = polyfit(dates, levels, p)
#format data
dates = matplotlib.dates.date2num(dates) - d0
x1 = np.linspace(dates[0],dates[-1],30)
#plot
plt.plot(dates, levels, '.')
plt.plot(x1, poly(x1))
#adds plot for typical high/low range
plt.plot(x1, np.linspace(station.typical_range[0],station.typical_range[0],30),"-r")
plt.plot(x1, np.linspace(station.typical_range[1],station.typical_range[1],30),"-r")
#add titles and labels
plt.xlabel("days ago")
plt.ylabel("water level(m)")
plt.title(station.name)
plt.show() |
#!/usr/bin/python3
# coding: utf-8
import gensim
import logging
import sys
from evaluate_lemmas import evaluate_synsets
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
# Loading model and semantic similarity dataset
modelfile, wordnet_scores, static_scores = sys.argv[1:]
model = gensim.models.KeyedVectors.load_word2vec_format(modelfile, binary=False)
# Pre-calculating vector norms
model.init_sims(replace=True)
wordnet_synset_score = model.evaluate_word_pairs(wordnet_scores, dummy4unknown=True)
static_synset_score = model.evaluate_word_pairs(static_scores, dummy4unknown=True)
dynamic_synset_score = evaluate_synsets(model, 'simlex/simlex_original.tsv', logger,
dummy4unknown=True)
name = modelfile.replace('_embeddings_', '_')[:-7]
print('Model\tWordnet\tStatic\tDynamic')
print(name + '\t' + str(round(wordnet_synset_score[1][0], 4)) + '\t'
+ str(round(static_synset_score[1][0], 4)) + '\t'
+ str(round(dynamic_synset_score[1][0], 4)))
|
"""
Here: f(x) = x^2 - A
We've got to minimize f(x) to find the square root of A, which is x.
Newton-Raphson's method -
Primer:
For a curve, tangent at point x_k is:
a) y = f'(x)*(x - x_k) + f(x_k) (slope is f'(x), f(x_k) is "c")
The method now says, let's start from an initial condition: x_k
The next best estimate of the root: x_k+1 is the x-intercept of tangent at x_k; which means y=0.
Plugging this at a),
0 = f'(x_k)*(x_k+1 - x_k) + f(x_k)
x_k+1 = x_k - f(x_k) / f'(x_k)
Now x_k+1 = x_k for the next iteration.
side note: it is mesmerizing to see how newton thought algorithmically. Math is pretty much algorithms with constraints.
This also made me read about quasi-newton methods, like BFGS, which I had used in camera calibration. The appreciation for mathematics isn't enough.
"""
import numpy as np
def square_root(number, accuracy_tolerance=1e-4, num_iterations=1000):
"""Estimate the square root of a natural `number`.
Args:
number:
accuracy_tolerance:
num_iterations:
Returns:
the estimated square root
Raises:
ValueError: If number isn't a natural number.
"""
if number < 0:
raise ValueError("Cannot estimate the square root of a negative number.")
estimated_root = number
for _ in range(num_iterations):
new_estimated_root = 0.5 * (estimated_root + (float(number) / estimated_root))
if np.abs(new_estimated_root - estimated_root) < accuracy_tolerance:
break
estimated_root = new_estimated_root
return estimated_root
if __name__ == "__main__":
print(f"Square root for 3 is: {square_root(3)}")
print(f"Square root for 100 is: {square_root(100)}")
print(f"Square root for 17 is: {square_root(17)}")
print(f"Square root for -1 is: {square_root(-1)}")
|
# MIT License
#
# Copyright (c) 2018 Evgeny Medvedev, evge.medvedev@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import logging
from ethereumetl.executors.batch_work_executor import BatchWorkExecutor
from blockchainetl.jobs.base_job import BaseJob
from ethereumetl.jobs.export_receipts_job import ExportReceiptsJob
from ethereumetl.json_rpc_requests import generate_get_block_by_number_json_rpc, generate_get_block_uncle_count_by_number_json_rpc, generate_get_receipt_json_rpc
from ethereumetl.mappers.block_extra_mapper import EthBlockMapper
from ethereumetl.mappers.transaction_mapper import EthTransactionMapper
from ethereumetl.utils import hex_to_dec, rpc_response_batch_to_results, validate_range
# BROKEN_TXS = {
# "0x7ac4949d21b5679af6a2041f470e2ed004431efefabc823ddd64f457cb65897a",
# "0xa5465e01d820a8f2dcbe55e5fa5c36986cdc864a36bf9316c23f4762236f3941",
# "0x58a1524a8d872504b6900351e49c5ee6fe33272884ac881ba54f9190565a9f84",
# "0xd1f022ffa5b75ef96d0fb7379043c2d0b91bda29a48fdde925bb135ba49a1323",
# "0xc12bd04fba091eed3161975008b69813a719f1248b0b9c74dce6a99c457a6233",
# "0xa2d5ccc614570c8006b8615f4bdc709cea35ebf4f5738e842e701ff901ac1c5c",
# "0xccdcc97f3139c8ed54b0c361b211dc1fc4d52bcabf715ea9a4bedd30ef0d4dbd",
# "0xf37510b264972109a17a1a70289bd4bb097bfce60dd04dd1fa672d94b0bd1341",
# "0x31a9cc433a2b8bd90c00ce048d90fea21406de750217a0788e22053606ecb670",
# "0x93fb3bd9147e214b877ecacb1d5b5f93e8e8e2864d83aafe6ca133ac6816fb0a",
# "0x846a51a0f7ee288474f60ca046b3ffbc86fbd93ce2bd9def14c3fb9e9e1ffb12",
# "0x1e5eef53d170ce48bf418da8e46760b258b9fd1248d5439bb4d43ebd6e451646",
# "0x46599ad372fc26e03bac8cc3601818e30b1aa2d1232e04ed24740846b37d0d99",
# "0xc8f760283ae9c8bca685bdab0162236bc5473fb50f7202b799304792afdcb6c1",
# "0xef39dac21b863f63f70d992dd18c449c9429679470954a15538ff29410a6711b",
# "0x4d46501c20a8e37a174b6f58704d0d404cc4ac5fc2ca0a329643d785433a20ee",
# "0xb6dcf129d147ab8a646dde2cbe8bd039d6687a362264429d84ea2025cab94474",
# "0x6e7fd5d96916067c964b79f4d23a1a64b09c21ff5118f295534a6553e12fad6e",
# "0xe3d541c1a6477bd1ba58c762b6f409bd9d0f981facdb12f5f35f1bb7962de9ea",
# "0x70a0f7ac05ec3102ff7fe3161446b1b69e5fc737a69a2272eaff37135e695edc",
# "0x17d6b93fb01419247ae22bba0b8caca97ec20cfcbafb4d499881c59fd18480d5",
# "0x7c5789a050b85d6fcd131d1f7f85f3ede17c73d4f8095865fa3e97fb726ba513",
# }
BROKEN_TXS = set()
# BROKEN_BLOCKS = {
# 165804,
# 165805,
# 187794,
# 187795,
# }
# Exports blocks and transactions
class ExportBlocksExtraJob(BaseJob):
def __init__(
self,
start_block,
end_block,
batch_size,
batch_web3_provider,
max_workers,
item_exporter,
export_blocks=True,
export_transactions=True):
validate_range(start_block, end_block)
self.start_block = start_block
self.end_block = end_block
self.batch_web3_provider = batch_web3_provider
self.batch_work_executor = BatchWorkExecutor(batch_size, max_workers)
self.item_exporter = item_exporter
self.export_blocks = export_blocks
self.export_transactions = export_transactions
if not self.export_blocks and not self.export_transactions:
raise ValueError('At least one of export_blocks or export_transactions must be True')
self.block_mapper = EthBlockMapper()
self.transaction_mapper = EthTransactionMapper()
def _start(self):
self.item_exporter.open()
def _export(self):
self.batch_work_executor.execute(
range(self.start_block, self.end_block + 1),
self._export_batch,
total_items=self.end_block - self.start_block + 1
)
def _export_batch(self, block_number_batch):
# logging.warning(f"{block_number_batch.__class__} {block_number_batch}")
# block_number_batch = list(n for n in block_number_batch if not (164_000 <= n < 166_000 or n in BROKEN_BLOCKS))
if len(block_number_batch) == 0:
return
blocks_rpc = list(generate_get_block_by_number_json_rpc(block_number_batch, self.export_transactions))
# print(blocks_rpc[:2])
response = self.batch_web3_provider.make_batch_request(json.dumps(blocks_rpc))
# print(response)
results = list(rpc_response_batch_to_results(response))
# print(results[:2])
blocks = [self.block_mapper.json_dict_to_block(result) for result in results]
# handle transaction fees
for block in blocks:
tx_hashes = list(tx.hash for tx in block.transactions if tx.hash not in BROKEN_TXS)
# if we have no txids then shortcut the rest
if len(tx_hashes) == 0:
block.tx_fees = 0
continue
txr_requests = list(generate_get_receipt_json_rpc(tx_hashes))
response = self.batch_web3_provider.make_batch_request(json.dumps(txr_requests))
results = rpc_response_batch_to_results(response, request=txr_requests, block=block)
# raise Exception(str(list(results))[:2000])
for i,res in enumerate(results):
if res:
block.transactions[i].gas_used = hex_to_dec(res.get('gasUsed'))
# logging.warn(f"{block.transactions[i].gas_used} == {res.get('gasUsed')} ? (yes)")
# logging.info(f"{block.transactions[-1].gas_used}")
block.tx_fees = sum(tx.gas_price * tx.gas_used for tx in block.transactions)
# logging.info(f"{block.number} >> {block.tx_fees}")
# update with uncle count
uncles_rpc = list(generate_get_block_uncle_count_by_number_json_rpc(block_number_batch))
response = self.batch_web3_provider.make_batch_request(json.dumps(uncles_rpc))
results = list(rpc_response_batch_to_results(response))
for (block, uncle_count) in zip(blocks, results):
block.uncle_count = hex_to_dec(uncle_count)
for block in blocks:
self._export_block(block)
def _export_block(self, block):
if self.export_blocks:
self.item_exporter.export_item(self.block_mapper.block_to_dict(block))
# if self.export_transactions:
# for tx in block.transactions:
# self.item_exporter.export_item(self.transaction_mapper.transaction_to_dict(tx))
def _end(self):
self.batch_work_executor.shutdown()
self.item_exporter.close()
|
#!/usr/bin/env python3
PROJECT = 'ai-analytics-solutions'
BUCKET = 'ai-analytics-solutions-kfpdemo'
REGION = 'us-central1'
INPUT = 'input.json'
RUNNER = 'DirectRunner'
OUTPUT = 'output.json'
# to try it in streaming mode, write one json message at a time to pub/sub
# and change the input to beam.io.ReadFromPubSub(topic=input_topic)
# and change the output to beam.io.WriteStringsToPubSub(output_topic)
from datetime import datetime
import apache_beam as beam
class Inventory:
# only dye & concentrate can be carried forward in time, not labor or water
def __init__(self, leftover=[]):
if len(leftover) == 4:
self.dye = leftover[0]
self.concentrate = leftover[3]
else:
self.dye = self.concentrate = 0
def update(self, leftover):
self.dye = leftover[0]
self.concentrate = leftover[3]
def linopt(materials, inventory):
import numpy as np
from scipy.optimize import linprog
from scipy.optimize import OptimizeResult
# coefficients of optimization function to *minimize*
c = -1 * np.array([50, 100, 125, 40])
# constraints A_ub @x <= b_ub (could also use a_eq, b_eq, etc.)
A_ub = [
[50, 60, 100, 50],
[5, 25, 10, 5],
[300, 400, 800, 200],
[30, 75, 50, 20]
]
b_ub = [
materials['dye'] + inventory.dye,
materials['labor'],
materials['water'],
materials['concentrate'] + inventory.concentrate
]
bounds = [
(0, np.inf),
(0, 25),
(0, 10),
(0, np.inf)
]
def log_info(status):
print(status.nit, status.fun)
res = linprog(c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, callback=log_info)
qty = np.floor(np.round(res.x, 1))
leftover = b_ub - np.matmul(A_ub, qty)
print("{} --> {} --> {} + {}".format(materials, b_ub, qty, list(np.round(leftover))))
inventory.update(leftover)
return qty
def get_latest_inventory(pvalue):
return Inventory(beam.pvalue.AsSingleton(lambda x: x[-1])) # last value
def run():
import json
options = beam.options.pipeline_options.PipelineOptions()
setup_options = options.view_as(beam.options.pipeline_options.SetupOptions)
setup_options.save_main_session = True
google_cloud_options = options.view_as(beam.options.pipeline_options.GoogleCloudOptions)
google_cloud_options.project = PROJECT
google_cloud_options.region = REGION
google_cloud_options.job_name = 'linearopt-{}'.format(datetime.now().strftime("%Y%m%d-%H%M%S"))
google_cloud_options.staging_location = 'gs://{}/staging'.format(BUCKET)
google_cloud_options.temp_location = 'gs://{}/temp'.format(BUCKET)
std_options = options.view_as(beam.options.pipeline_options.StandardOptions)
std_options.runner = RUNNER
p = beam.Pipeline(options=options)
inventory = Inventory()
(p
| 'ingest' >> beam.io.ReadFromText(INPUT)
| 'parse' >> beam.Map(lambda x: json.loads(x))
| 'with_ts' >> beam.Map(lambda x: beam.window.TimestampedValue(x, x['timestamp']))
# | 'windowed' >> beam.WindowInto(beam.window.FixedWindows(60)) # 1-minute windows
# | 'materials' >> beam.CombinePerKey(sum)
| 'optimize' >> beam.Map(lambda x: linopt(x, inventory))
| 'output' >> beam.io.WriteToText(OUTPUT)
)
result = p.run()
result.wait_until_finish()
if __name__ == '__main__':
run()
|
"""Connector stuffs."""
import random
from asyncio import sleep as asyncio_sleep
from asyncio import wait_for
from ssl import SSLContext
from typing import Coroutine
from urllib.parse import ParseResult
# import h2.connection (unused)
from hyperframe.frame import SettingsFrame
# from concurrent import futures (unused)
from aiosonic.exceptions import (ConnectionPoolAcquireTimeout, ConnectTimeout,
HttpParsingError, TimeoutException)
from aiosonic.pools import SmartPool
from aiosonic.resolver import DefaultResolver
from aiosonic.timeout import Timeouts
from aiosonic.utils import ExpirableCache
class TCPConnector:
"""TCPConnector.
Holds the main logic for making connections to destination hosts.
Params:
* **pool_size**: size for pool of connections
* **timeouts**: global timeouts to use for connections with this connector. default: :class:`aiosonic.timeout.Timeouts` instance with default args.
* **connection_cls**: connection class to be used. default: :class:`aiosonic.connection.Connection`
* **pool_cls**: pool class to be used. default: :class:`aiosonic.pools.SmartPool`
* **resolver**: resolver to be used. default: :class:`aiosonic.resolver.DefaultResolver`
* **ttl_dns_cache**: ttl in milliseconds for dns cache. default: `10000` 10 seconds
* **use_dns_cache**: Flag to indicate usage of dns cache. default: `True`
"""
def __init__(self,
pool_size: int = 25,
timeouts: Timeouts = None,
connection_cls=None,
pool_cls=None,
resolver=None,
ttl_dns_cache=10000,
use_dns_cache=True):
from aiosonic.connection import Connection # avoid circular dependency
self.pool_size = pool_size
connection_cls = connection_cls or Connection
pool_cls = pool_cls or SmartPool
self.pool = pool_cls(self, pool_size, connection_cls)
self.timeouts = timeouts or Timeouts()
self.resolver = resolver or DefaultResolver()
self.use_dns_cache = use_dns_cache
if self.use_dns_cache:
self.cache = ExpirableCache(512, ttl_dns_cache)
async def acquire(self, urlparsed: ParseResult, verify, ssl, timeouts, http2):
"""Acquire connection."""
if not urlparsed.hostname:
raise HttpParsingError('missing hostname')
# Faster without timeout
if not self.timeouts.pool_acquire:
conn = await self.pool.acquire(urlparsed)
return await self.after_acquire(
urlparsed, conn, verify, ssl, timeouts, http2)
try:
conn = await wait_for(self.pool.acquire(urlparsed),
self.timeouts.pool_acquire)
return await self.after_acquire(
urlparsed, conn, verify, ssl, timeouts, http2)
except TimeoutException:
raise ConnectionPoolAcquireTimeout()
async def after_acquire(self, urlparsed, conn, verify, ssl, timeouts, http2):
dns_info = await self.__resolve_dns(
urlparsed.hostname, urlparsed.port)
try:
await wait_for(conn.connect(
urlparsed, dns_info, verify, ssl, http2
), timeout=timeouts.sock_connect)
except TimeoutException:
raise ConnectTimeout()
return conn
async def release(self, conn):
"""Release connection."""
res = self.pool.release(conn)
if isinstance(res, Coroutine):
await res
async def wait_free_pool(self):
"""Wait until free pool."""
while True:
if self.pool.is_all_free():
return True
asyncio_sleep(0.02) # pragma: no cover
async def cleanup(self):
"""Cleanup connector connections."""
await self.pool.cleanup()
async def __resolve_dns(self, host: str, port: int):
key = f'{host}-{port}'
dns_data = self.cache.get(key)
if not dns_data:
dns_data = await self.resolver.resolve(host, port)
self.cache.set(key, dns_data)
return random.choice(dns_data)
|
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RUuid(RPackage):
"""Tools for generating and handling of UUIDs (Universally Unique
Identifiers)."""
homepage = "http://www.rforge.net/uuid"
url = "https://cran.rstudio.com/src/contrib/uuid_0.1-2.tar.gz"
version('0.1-2', 'f97d000c0b16bca455fb5bf2cd668ddf')
|
import logging
import click
import haascli
from haascli import Defaults
from haascli import cluster as haascli_cluster
from haascli import stack as haascli_stack
from haascli import data as haascli_data
@click.group(context_settings=dict(help_option_names=['-h', '--help']))
@click.option('--debug/--no-debug', default=False)
@click.option('--test/--no-test', default=False)
@click.option('--haas_dir',
type=click.Path(exists=True, resolve_path=True),
default=Defaults['haas_dir'],
help="The haas configuration directory (default: {})".format(
Defaults['haas_dir']))
@click.option('-L', '--log-file',
help='set log file; default "{}"; "-" for stdout'
.format(Defaults['log_file']))
@click.option('-i', '--identity', help="PEM file")
@click.option('-u', '--username',
help="user name in AMI (default: {})".format(
Defaults['username']))
@click.option('-r', '--region',
help='AWS region name (default: {})'.format(Defaults['region']))
@click.option('-k', '--key', help='AWS key')
@click.option('-s', '--secret', help='AWS secret key')
@click.pass_context
def cli(ctx, **kwargs):
"""This is a command line tool for HPCC-as-a-Service (HaaS)
"""
# seed obj dict with defaults; then overwrite with rcfile; last
# overwrit with cmd-line params
try:
ctx.obj.update(Defaults)
except AttributeError:
# if obj is None
ctx.obj = Defaults
ctx.obj['d'] = 'f'
# conditional update; do not update if none
for k, v in kwargs.items():
if v:
ctx.obj[k] = v
try:
f = open(haascli.RCFILE)
lines = f.readlines()
f.close()
for n, line in enumerate(lines):
try:
key, val = line.split('=', 1)
key = key.strip()
if key in Defaults:
val = val.strip()
if val.lower() in ['t', 'true', 'yes', 'y']:
ctx.obj[key] = True
elif val.lower() in ['f', 'false', 'no', 'n']:
ctx.obj[key] = False
else:
ctx.obj[key] = val
else:
print(click.style('unknown parameter {} in rc file {} '
'at line {}'.format(
key, haascli.RCFILE, n+1), fg='red'))
ctx.abort()
except ValueError:
print(click.style('error in rc file {} at line {}'.format(
haascli.RCFILE, n+1), fg='red'))
ctx.abort()
except IOError:
# no rcfile; go on
pass
haascli.setup_logging(
level=logging.DEBUG if kwargs['debug'] else logging.INFO,
file=ctx.obj['log_file'])
cli.add_command(haascli_stack.cli, name='stack')
cli.add_command(haascli_cluster.cli, name='cluster')
cli.add_command(haascli_data.cli, name='data')
|
# -*- coding: utf-8 -*-
"""
@author: Yi Zhang
@contact: zhangyi_aero@hotmail.com
@time:
"""
import sys
if './' not in sys.path: sys.path.append('./')
from screws.freeze.base import FrozenOnly
class _2nCSCG_CellIS(FrozenOnly):
""""""
def __init__(self, cell):
""""""
self._cell_ = cell
self._atb_ = None
self._U = None
self._D = None
self._L = None
self._R = None
self._freeze_self_()
@property
def root(self):
return self._cell_.___isroot___
@property
def attached_to_Lv0cell_boundary(self):
"""If this cell is attached to the cscg-element(level-0-cell)-boundary (lv0-trace-element)."""
if self._atb_ is None:
indices = self._cell_.indices
LEN = len(indices)
if LEN <= 2:
self._atb_ = True
if LEN == 1:
self._U = self._D = self._L = self._R = True
else:
i1 = indices[1]
if i1 == 0:
self._U = self._L = True
self._D = self._R = False
elif i1 == 1:
self._D = self._L = True
self._U = self._R = False
elif i1 == 2:
self._U = self._R = True
self._D = self._L = False
elif i1 == 3:
self._D = self._R = True
self._U = self._L = False
else:
raise Exception()
else:
origin, delta = self._cell_.mesh.do.find.origin_and_delta(indices)
end = [_ + delta for _ in origin]
if -1 in origin:
self._atb_ = True
else:
if 1 in end:
self._atb_ = True
else:
self._atb_ = False
if origin[0] == -1:
self._U = True
else:
self._U = False
if end[0] == 1:
self._D = True
else:
self._D = False
if origin[1] == -1:
self._L = True
else:
self._L = False
if end[1] == 1:
self._R = True
else:
self._R = False
return self._atb_
@property
def attached_to_Lv0cell_U_boundary(self):
if self._U is None:
_ = self.attached_to_Lv0cell_boundary
return self._U
@property
def attached_to_Lv0cell_D_boundary(self):
if self._D is None:
_ = self.attached_to_Lv0cell_boundary
return self._D
@property
def attached_to_Lv0cell_L_boundary(self):
if self._L is None:
_ = self.attached_to_Lv0cell_boundary
return self._L
@property
def attached_to_Lv0cell_R_boundary(self):
if self._R is None:
_ = self.attached_to_Lv0cell_boundary
return self._R
if __name__ == '__main__':
# mpiexec -n 4 python
pass
|
import os
import sys
import tempfile
import argparse
from generate_complex_files import generate_complexes
from pymol import cmd
def main():
# get form data and initialize parameters
receptor = sys.argv[1] # receptor
poses = sys.argv[2] # ligand poses
score = sys.argv[3] # output file path
rmsd = sys.argv[4]
eta = sys.argv[5]
start_frame = 1
try:
stop_frame = int(sys.argv[6])
except:
stop_frame = -1
complex_name = "complex"
# some debugging feedback
print('[RNAPosers Debugging] Parameters:', receptor, poses, score, rmsd, eta, stop_frame)
# redefine pdb and dcd
with tempfile.TemporaryDirectory() as tmpDir:
mol2 = tmpDir + "/lig.mol2"
pdb = tmpDir + "/complex.pdb"
dcd = tmpDir + "/complexes.dcd"
generate_complexes(receptor, poses, dcd, pdb, mol2)
cmd.delete(complex_name)
cmd.load(pdb, complex_name)
cmd.load_traj(dcd, complex_name, state=1, stop=stop_frame)
featureFile = tmpDir + "/features"
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.environ.get('RNAPOSERS_PATH')
rnaposers_cmd = " ".join(["bash", dir_path + "/src/rna_poser.sh", pdb, mol2, dcd, rmsd, eta, featureFile, score, str(stop_frame)])
print('[RNAPosers Debugging]',rnaposers_cmd)
os.system(rnaposers_cmd)
return 0
if __name__ == '__main__':
main()
|
# -*-coding:utf-8-*-
import os
from xml.etree.ElementTree import dump
import json
import pprint
import argparse
from Format import VOC, COCO, KITTI, YOLO
parser = argparse.ArgumentParser(description='label Converting example.')
parser.add_argument('--datasets', type=str, help='type of datasets')
parser.add_argument('--img_path', type=str, help='directory of image folder')
parser.add_argument('--label', type=str,
help='directory of label folder or label file path')
parser.add_argument('--convert_output_path', type=str,
help='directory of label folder')
parser.add_argument('--img_type', type=str, help='type of image')
parser.add_argument('--manifest_path', type=str,
help='directory of manipast file', default="./")
parser.add_argument('--cls_list_file', type=str,
help='directory of *.names file', default="./")
args = parser.parse_args()
def main(config):
if config["datasets"] == "VOC":
voc = VOC()
yolo = YOLO(os.path.abspath(config["cls_list"]))
flag, data = voc.parse(config["label"])
if flag == True:
flag, data = yolo.generate(data)
if flag == True:
flag, data = yolo.save(data, config["output_path"], config["img_path"],
config["img_type"], config["manifest_path"])
if flag == False:
print("Saving Result : {}, msg : {}".format(flag, data))
else:
print("YOLO Generating Result : {}, msg : {}".format(flag, data))
else:
print("VOC Parsing Result : {}, msg : {}".format(flag, data))
elif config["datasets"] == "COCO":
coco = COCO()
flag, data, cls_hierarchy = coco.parse(
config["label"], config["img_path"])
yolo = YOLO(os.path.abspath(
config["cls_list"]), cls_hierarchy=cls_hierarchy)
if flag == True:
flag, data = yolo.generate(data)
if flag == True:
flag, data = yolo.save(data, config["output_path"], config["img_path"],
config["img_type"], config["manifest_path"])
if flag == False:
print("Saving Result : {}, msg : {}".format(flag, data))
else:
print("YOLO Generating Result : {}, msg : {}".format(flag, data))
else:
print("COCO Parsing Result : {}, msg : {}".format(flag, data))
elif config["datasets"] == "KITTI":
kitti = KITTI()
yolo = YOLO(os.path.abspath(config["cls_list"]))
flag, data = kitti.parse(
config["label"], config["img_path"], img_type=config["img_type"])
if flag == True:
flag, data = yolo.generate(data)
if flag == True:
flag, data = yolo.save(data, config["output_path"], config["img_path"],
config["img_type"], config["manifest_path"])
if flag == False:
print("Saving Result : {}, msg : {}".format(flag, data))
else:
print("YOLO Generating Result : {}, msg : {}".format(flag, data))
else:
print("KITTI Parsing Result : {}, msg : {}".format(flag, data))
else:
print("Unkwon Datasets")
if __name__ == '__main__':
if not os.path.exists( args.convert_output_path):
os.makedirs( args.convert_output_path)
config = {
"datasets": args.datasets,
"img_path": args.img_path,
"label": args.label,
"img_type": args.img_type,
"manifest_path": args.manifest_path,
"output_path": args.convert_output_path,
"cls_list": args.cls_list_file,
}
main(config)
|
#!/usr/bin/env python
# -*- encoding:utf-8 -*-
import bottle
from bottle import request, route, hook
import beaker.middleware
from setting import session_path
session_opts = {
'session.type': 'file',
'session.data_dir': session_path,
'session.auto': True,
}
app_middlware = beaker.middleware.SessionMiddleware(bottle.app(), session_opts)
#app_session = app.request.environ.get('beaker.session')
@hook('before_request')
def setup_request():
request.session = request.environ['beaker.session'] |
# Extremely hard.
import collections
import itertools
inp = open("Day20.txt").read()
tilesLines = inp.strip().split("\n\n")
tilesSides = {}
tiles = {}
for tilelines in tilesLines:
lt = ""
rt = ""
name, lines = tilelines[5:].split(":\n")
lines = lines.split()
for line in lines:
rt += line[-1]
lt += line[0]
tilesSides[name] = (lines[0], rt, lines[-1], lt)
tiles[name] = lines
# Convert to binary
def toNumber(s):
return int(s.replace("#", "1").replace(".", "0"), 2)
tilesnum = {}
for name, strs in tilesSides.items():
tilesnum[name] = tuple(map(toNumber, strs))
def sideReverse(side):
return int("".join(reversed(format(side, "010b"))), 2)
def _find(base):
possibleBase = set()
possibleBase.update(tilesnum[base])
possibleBase.update(map(sideReverse, tilesnum[base]))
possibleadjcnt = set()
for name, sides in tilesnum.items():
if name == base:
continue
possiblesides = set()
possiblesides.update(sides)
possiblesides.update(map(sideReverse, sides))
if len(possibleBase.intersection(possiblesides)):
possibleadjcnt.add(name)
return possibleadjcnt
adjcnt = {}
for name, sides in tilesnum.items():
adjcnt[name] = _find(name)
# All of them are right
corners = set()
sides = set()
for name in sorted(adjcnt.keys()):
numadj = len(adjcnt[name])
if numadj == 2:
corners.add(name)
elif numadj == 3:
sides.add(name)
x = 1
for name in corners:
x *= int(name)
print("Q1:", x)
def findSide(sideName, _opposite):
sides = list(adjcnt[sideName] - {_opposite})
if len(adjcnt[sides[0]]) != 4:
return sides[0]
else:
return sides[1]
image = []
row = []
cur = list(corners)[0]
while True:
row.append(cur)
adj = adjcnt[cur]
if len(adj) == 2 and len(row) == 1:
nxt = list(adj)[0]
cur = nxt
elif len(adj) == 2 and len(row) > 1:
break
else:
cur = findSide(cur, row[-2])
image.append(row)
# Left side
sideLenght = len(sides) // 4 + 2
cur = list(adjcnt[row[0]] - {row[1]})[0]
while True:
image.append([cur])
if len(adjcnt[cur]) == 2:
break
cur = findSide(cur, image[-2][0])
# fill the image
for r in range(1, sideLenght):
for c in range(1, sideLenght):
p = list(
adjcnt[image[r][c-1]].intersection(adjcnt[image[r-1][c]]) - {image[r-1][c-1]})
if len(p) != 1:
raise Exception("%d %d %s" % (r, c, p))
image[r].append(p[0])
def _rotate(tile):
new_tile = []
for k in range(len(tile[0])):
row = ""
for r in range(len(tile) - 1, -1, -1):
row += tile[r][k]
new_tile.append(row)
return new_tile
def flip(tile):
new_tile = []
for row in tile:
new_tile.append("".join(reversed(row)))
return new_tile
def M(tile):
for _ in range(4):
yield tile
tile = _rotate(tile)
tile = flip(tile)
for _ in range(4):
yield tile
tile = _rotate(tile)
TOP = 0
RT = 1
BOT = 2
LT = 3
def getSide(tile, side):
if side == TOP:
return tile[0]
elif side == BOT:
return tile[-1]
elif side == RT:
return "".join(r[-1] for r in tile)
else:
return "".join(r[0] for r in tile)
imagespositioned = []
for corner, bottom in itertools.product(
M(tiles[image[0][0]]),
M(tiles[image[1][0]])):
if getSide(corner, BOT) == getSide(bottom, TOP):
break
match = False
for right in M(tiles[image[0][1]]):
if getSide(corner, RT) == getSide(right, LT):
match = True
break
if not match:
corner = flip(corner)
bottom = flip(bottom)
assert getSide(corner, BOT) == getSide(bottom, TOP)
for right in M(tiles[image[0][1]]):
if getSide(corner, RT) == getSide(right, LT):
match = True
break
assert match
tiles[image[0][0]] = corner
tiles[image[0][1]] = right
tiles[image[1][0]] = bottom
# Left
for r in range(2, sideLenght):
top = tiles[image[r-1][0]]
for bottom in M(tiles[image[r][0]]):
if getSide(top, BOT) == getSide(bottom, TOP):
tiles[image[r][0]] = bottom
for r in range(sideLenght):
for c in range(1, sideLenght):
left = tiles[image[r][c-1]]
for right in M(tiles[image[r][c]]):
if getSide(left, RT) == getSide(right, LT):
tiles[image[r][c]] = right
break
fullimage = []
for r in range(sideLenght):
for l in range(1, 9):
fullimage.append("".join(tiles[tilenum][l][1:-1]
for tilenum in image[r]))
# Count sea monsters.
MONSTER = [" # ",
"# ## ## ###",
" # # # # # # "]
def hasmonster(monster, r, c):
for mr, mrow in enumerate(monster):
for mc, ch in enumerate(mrow):
if ch == "#":
if fullimage[r+mr][c+mc] != "#":
return False
return True
for monster in M(MONSTER):
cnt = 0
for r in range(len(fullimage) - len(monster) + 1):
for c in range(len(fullimage[0]) - len(monster[0]) + 1):
if hasmonster(monster, r, c):
cnt += 1
if cnt != 0:
totalpounds = sum(row.count("#") for row in fullimage)
monsterpounds = sum(row.count("#") for row in monster)
print("Q2:", totalpounds - (monsterpounds * cnt))
break
|
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table
import pandas as pd
import scipy.interpolate
import plotly.express as px
from math import sqrt
import os, random
from scipy.optimize import curve_fit
from numpy import exp, log
# """Extracting all files"""
# files = os.listdir(".")
# done_zip_files = []
# for file in files:
# if file[-3:] == "zip":
# flag = False
# with zipfile.ZipFile(file, 'r') as zipref:
# os.mkdir(file[:-4])
# zipref.extractall(file[:-4])
# done_zip_files.append(file)
""""File Paths"""
def return_folder_paths(file_name):
raw_files_path = f'./{file_name}/xsm/data/{file_name[8:12]}/{file_name[12:14]}/{file_name[14:16]}/raw'
calibrated_files_path = f'./{file_name}/xsm/data/{file_name[8:12]}/{file_name[12:14]}/{file_name[14:16]}/calibrated'
return raw_files_path, calibrated_files_path
"""Returning a Light Curve from a file"""
def lightcurve(file, should_plot=False):
t = Table.read(file)
tmp = t["RATE"]
if should_plot:
print(len(tmp))
plt.figure(figsize=(25, 7))
plt.scatter(range(len(tmp)), tmp, s=1)
return tmp
"""Plotting the curve using Plotly"""
def plot_as_plotly(_x, _y, _title):
_df = pd.DataFrame(zip(_x, _y))
_df.columns = ['X-Axis', 'Y-Axis']
_fig = px.line(_df, x='X-Axis', y='Y-Axis', title=_title)
_fig.show()
return
"""Return the slope calculated between two data points in 2-Dimensional space"""
def get_slope(x1, x2, y1, y2):
return ((y1-y2) / (x1-x2))
"""Return the Eucleidean distance between two points"""
def pythagorean(x1, x2, y1, y2):
_y = (y1-y2)*(y1-y2)
_x = (x1-x2)*(x1-x2)
return sqrt(_x + _y)
"""Helper functions to fit on the detected flares"""
k = 0.5
def exp_fit_func(x, ln_a, b):
t = (x ** k)
return (ln_a - b*t)
def exp_func(x, a, b):
t = -1 * b * (x ** k)
return (a * np.exp(t))
def inverse_exp_func(y, a, b):
t1 = log(y) - log(a)
t2 = -1 * t1 / b
return int(t2 ** (1. /k))
"""Main function"""
def main():
files = os.listdir()
flag = True
valid_dirs = []
for file in files:
if file[:3] == 'ch2':
valid_dirs.append(file)
random.seed(0)
file = random.choice(valid_dirs)
raw, calib = return_folder_paths(file)
path_to_lc = calib + '/' + file + '_level2.lc'
rand_lc = lightcurve(path_to_lc, should_plot=True)
print(file)
window_sz = 40
new_norm = []
new_norm_data_points = []
for i in range(1, len(rand_lc), window_sz):
tmp = np.mean(rand_lc[i:i+window_sz])
new_norm.append(tmp)
new_norm_data_points.append(i)
new_norm = np.array(new_norm)
new_norm_data_points = np.array(new_norm_data_points)
sz = new_norm.shape[0]
tck, u = scipy.interpolate.splprep([new_norm_data_points, new_norm], s=0)
xnew, ynew = scipy.interpolate.splev(np.linspace(0, 1, len(rand_lc)), tck, der=0)
_s0 = []
_p0 = []
for i in range(1, len(ynew)-1):
if (ynew[i]>ynew[i-1]) and (ynew[i]>ynew[i+1]):
_p0.append(i)
elif (ynew[i]<ynew[i-1]) and (ynew[i]<ynew[i+1]):
_s0.append(i)
max_before_min = 0
if (xnew[_s0[0]] < xnew[_p0[0]]):
max_before_min = 1
_p1 = []
l = len(_p0)
is_increasing = True
for i in range(l-1):
if is_increasing:
while (i < l-1) and (ynew[_p0[i]] < ynew[_p0[i+1]]):
i += 1
_p1.append(_p0[i])
is_increasing = False
else:
if (i < l-1):
if (ynew[_p0[i]] < ynew[_p0[i+1]]):
is_increasing = True
_s1 = []
for i in range(1, len(_p1)):
k = 0
for j in range(len(_s0)):
if (xnew[_s0[j]] > xnew[_p1[i]]):
k = j
break
tmp_sl = get_slope(xnew[_s0[k]], xnew[_p1[i]], ynew[_s0[k]], ynew[_p1[i]])
tmp_val = _s0[k]
while (xnew[_s0[k]] > xnew[_p1[i-1]]):
t_sl = get_slope(xnew[_s0[k]], xnew[_p1[i]], ynew[_s0[k]], ynew[_p1[i]])
if (t_sl > tmp_sl):
tmp_sl = t_sl
tmp_val = _s0[k]
k -= 1
if k<0:
break
_s1.append(tmp_val)
_s2 = []
_p2 = []
for i in range(len(_p1)):
for j in range(len(_s1)-1):
if (xnew[_s1[j+1]] > xnew[_p1[i]]):
_s2.append(_s1[j])
_p2.append(_p1[i])
break
_s3 = []
_p3 = []
s = np.std(np.array([ynew[_p2[i]] for i in range(len(_p2))]))
threshold_height = s
threshold_time = 0 #! EXTRA PARAMETER SETTED
for i in range(len(_p2)):
# x = pythagorean(xnew[_s2[i]], xnew[_p2[i]], ynew[_s2[i]], ynew[_p2[i]])
y__ = ynew[_p2[i]]-ynew[_s2[i]]
if (y__ > threshold_height):
if (xnew[_p2[i]]-xnew[_s2[i]] > threshold_time) or (y__ > s):
_s3.append(_s2[i])
_p3.append(_p2[i])
_s4 = []
_p4 = []
s = set()
for i in range(len(_s3)):
if _s3[i] in s:
continue
s.add(_s3[i])
_s4.append(_s3[i])
_p4.append(_p3[i])
_e0 = []
for i in range(len(_p4)-1):
for j in range(len(xnew)):
if (xnew[j] > xnew[_p4[i]]):
if (ynew[j] < (ynew[_p4[i]] + ynew[_s4[i]])/2):
_e0.append(j)
break
if (xnew[j] > xnew[_s4[i+1]]):
_e0.append(j-1)
break
_e0.append(_s2[-1])
_e1 = []
for i in range(len(_e0)):
if (ynew[_e0[i]] < ynew[_p4[i]]):
_e1.append(_e0[i])
else:
for j in range(len(_s0)):
if (xnew[_s0[j]] > xnew[_p4[i]]):
if (ynew[_s0[j+1]] > ynew[_s0[j]]):
_e1.append(_s0[j])
break
start_times = []
peak_times = []
end_times = []
peak_intensities = []
for i in range(len(_s4)):
if (_p4[i]-_s4[i] > 0) and (_e1[i]-_p4[i] > 0):
start_times.append(_s4[i])
peak_times.append(_p4[i])
end_times.append(_e1[i])
peak_intensities.append(ynew[_p4[i]])
_zip = pd.DataFrame(zip(start_times, peak_times, end_times, peak_intensities))
_zip.columns = ['start_time', 'peak_time', 'end_time', 'peak_intensity']
st = _zip['start_time']
pt = _zip['peak_time']
et = _zip['end_time']
pi = _zip['peak_intensity']
y_min = np.min(ynew)
final_st = []
final_pt = []
final_et = []
est_et = []
final_pi = []
final_err = []
for i in range(len(st)):
x_range = [int(xnew[j]-xnew[pt[i]]) for j in range(pt[i], et[i])]
ln_y_range = [np.log(ynew[j]) for j in range(pt[i], et[i])]
popc, pcov = curve_fit(exp_fit_func, x_range, ln_y_range)
ln_a, b = popc
a = np.exp(ln_a)
if (b < 0):
continue
_calc_et = inverse_exp_func(ynew[st[i]], a, b)
final_st.append(st[i])
final_pt.append(pt[i])
final_et.append(et[i])
final_pi.append(pi[i])
est_et.append(_calc_et + pt[i])
y_dash = []
y_diff = []
y_proj = []
x_proj = []
for _i, j in enumerate(x_range):
__y = exp_func(xnew[j], a, b)
y_dash.append(__y)
y_diff.append(abs(exp(ln_y_range[_i]) - __y))
for j in range(et[i]-pt[i], _calc_et):
if ((j + pt[i]) < len(xnew)):
x_proj.append(xnew[j + pt[i]])
y_proj.append(exp_func(xnew[j], a, b))
_y_ = np.array(y_diff)
final_err.append((np.sum(_y_)) / ((pi[i] - y_min) * (len(x_range))))
final_zip = pd.DataFrame(zip(final_st, final_pt, final_et, est_et, final_pi, final_err))
final_zip.columns = ['start_time', 'peak_time', 'end_time', 'est_end_time', 'peak_intensity', 'error']
# final_zip.sort_values(by=['error'], ascending=True, inplace=True)
final_zip.to_csv('results_{}.csv'.format(file))
if __name__ == '__main__':
main() |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_batching.py
DESCRIPTION:
These samples demonstrate how to use the batch transaction API to perform multiple
operations within a single request
USAGE:
python sample_batching.py
Set the environment variables with your own values before running the sample:
1) AZURE_STORAGE_CONNECTION_STRING - the connection string to your storage account
"""
from datetime import datetime, timedelta
import os
from time import sleep
import asyncio
from dotenv import find_dotenv, load_dotenv
class CreateClients(object):
def __init__(self):
load_dotenv(find_dotenv())
self.access_key = os.getenv("TABLES_PRIMARY_STORAGE_ACCOUNT_KEY")
self.endpoint_suffix = os.getenv("TABLES_STORAGE_ENDPOINT_SUFFIX")
self.account_name = os.getenv("TABLES_STORAGE_ACCOUNT_NAME")
self.endpoint = "{}.table.{}".format(self.account_name, self.endpoint_suffix)
self.connection_string = "DefaultEndpointsProtocol=https;AccountName={};AccountKey={};EndpointSuffix={}".format(
self.account_name, self.access_key, self.endpoint_suffix
)
self.table_name = "sampleTransactionAsync"
async def _create_entities(self):
from azure.core.exceptions import ResourceExistsError
self.entity1 = {"PartitionKey": "pk001", "RowKey": "rk001", "Value": 4, "day": "Monday", "float": 4.003}
self.entity2 = {"PartitionKey": "pk001", "RowKey": "rk002", "Value": 4, "day": "Tuesday", "float": 4.003}
self.entity3 = {"PartitionKey": "pk001", "RowKey": "rk003", "Value": 4, "day": "Wednesday", "float": 4.003}
self.entity4 = {"PartitionKey": "pk001", "RowKey": "rk004", "Value": 4, "day": "Thursday", "float": 4.003}
entities = [self.entity2, self.entity3, self.entity4]
for entity in entities:
try:
await self.table_client.create_entity(entity)
except ResourceExistsError:
print("entity already exists")
pass
async def sample_transaction(self):
# Instantiate a TableServiceClient using a connection string
# [START batching]
from azure.data.tables.aio import TableClient
from azure.data.tables import TableTransactionError
from azure.core.exceptions import ResourceExistsError
self.table_client = TableClient.from_connection_string(
conn_str=self.connection_string, table_name=self.table_name
)
try:
await self.table_client.create_table()
print("Created table")
except ResourceExistsError:
print("Table already exists")
await self._create_entities()
operations = [
("create", self.entity1),
("delete", self.entity2),
("upsert", self.entity3),
("update", self.entity4, {"mode": "replace"}),
]
try:
await self.table_client.submit_transaction(operations)
except TableTransactionError as e:
print("There was an error with the transaction operation")
print("Error: {}".format(e))
# [END batching]
async def clean_up(self):
await self.table_client.delete_table()
await self.table_client.__aexit__()
async def main():
sample = CreateClients()
await sample.sample_transaction()
await sample.clean_up()
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
#!/usr/bin/env python3
import pathlib
class TestSet1:
def test_challenge01(self):
from cryptopals.set1.challenge01 import challenge01, BYTES, RESULT
res = challenge01(BYTES)
assert res == RESULT, "The result does not match the expected value"
def test_challenge02(self):
from cryptopals.set1.challenge02 import challenge02, BYTES_A, BYTES_B, RESULT
res = challenge02(BYTES_A, BYTES_B)
assert res == RESULT, "The result does not match the expected value"
def test_challenge03(self):
from cryptopals.set1.challenge03 import challenge03, CIPHER_TEXT, RESULT
res = challenge03(CIPHER_TEXT)
assert res == RESULT, "The result does not match the expected value"
def test_challenge04(self):
from cryptopals.set1.challenge04 import challenge04, RESULT
res = challenge04(
pathlib.Path(__file__).absolute().parent.parent / "data" / "4.txt"
)
assert res == RESULT, "The result does not match the expected value"
def test_challenge05(self):
from cryptopals.set1.challenge05 import challenge05, TEXT, KEY, RESULT
res = challenge05(TEXT, KEY)
assert res == RESULT, "The result does not match the expected value"
def test_challenge06(self):
from cryptopals.set1.challenge06 import challenge06, RESULT
res = challenge06(
pathlib.Path(__file__).absolute().parent.parent / "data" / "6.txt"
)
assert res == RESULT, "The result does not match the expected value"
def test_challenge07(self):
from cryptopals.set1.challenge07 import challenge07, RESULT
res = challenge07(
pathlib.Path(__file__).absolute().parent.parent / "data" / "7.txt"
)
assert res == RESULT, "The result does not match the expected value"
def test_challenge08(self):
from cryptopals.set1.challenge08 import challenge08, RESULT
res = challenge08(
pathlib.Path(__file__).absolute().parent.parent / "data" / "8.txt"
)
assert res == RESULT, "The result does not match the expected value"
|
from dataclasses import dataclass, field
__NAMESPACE__ = "NISTSchema-SV-IV-atomic-NCName-pattern-5-NS"
@dataclass
class NistschemaSvIvAtomicNcnamePattern5:
class Meta:
name = "NISTSchema-SV-IV-atomic-NCName-pattern-5"
namespace = "NISTSchema-SV-IV-atomic-NCName-pattern-5-NS"
value: str = field(
default="",
metadata={
"required": True,
"pattern": r"[\i-[:]][\c-[:]]{63}",
}
)
|
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from segmentation.models import Image, UserImage, AssignedImage
import sys
# usage: python manage.py assignImage 1 COCO_val2014_000000000042.jpg
class Command(BaseCommand):
def handle(self, *args, **kwargs):
userid = int(args[0])
newImage = args[1]
# userid = 1
# newImage = "COCO_val2014_000000000042.jpg"
res = AssignedImage.objects.all()
#print(res[userid])
#print(res[userid].images.all())
try:
imageObject = Image.objects.get(name=newImage)
except:
print("image name: " + newImage + " not exist")
sys.exit()
#print(imageObject)
# if deleting ..
# res[userid].images.remove(imageObject)
res[userid].images.add(imageObject)
print("now user " + str(userid) + " has " + str(len(res[userid].images.all())) + " images")
|
### Takes in a Caffe log output from training, and extracts
### the training and testing loss at each iteration (multiple of 100).
### Dumps this to a .csv for local visualization.
import re
import pandas as pd
with open('lstm_np_03/lstm_np_03_log.txt', 'rb') as f:
lines = f.read().split('\n')
train_losses = []
test_losses = []
for line in lines:
if 'Test net output #0' in line:
test_losses.append(float(
line[line.index('loss = ') + 7: line.index('(') - 1]))
elif 'Train net output #0' in line:
train_losses.append(float(
line[line.index('loss = ') + 7: line.index('(') - 1]))
df = pd.DataFrame({
'iteration': xrange(501, 1001),
'training_loss': train_losses,
'testing_loss': test_losses[:-1] # Extra last tesing iteration
})
df.to_csv('lstm_np_03/lstm_np_03_loss.csv', index=False) |
def sliding_window(image, window_size, step_size):
for y in range(0, image.shape[0], step_size[1]):
for x in range(0, image.shape[1], step_size[0]):
yield (x, y, image[y: y + window_size[1], x: x + window_size[0]]) |
# mimic ffequity.py in process but do it for carbon allocation
from utils.dataframefile import DataFrameFile
from processors.validator import Validator
from processors.analyst import Analyst
folderNames = ['assessment', 'financial_data']
def main():
# create object instance of DataFrameFile
# have it read in the assessment data files and the financial data
# run the fair share allocation for each year
# write the final data to .csv in benchmark
# create object instance of dataframefile and validator
dataframefile = DataFrameFile()
# read in the assessment datafiles and the financial datafiles
validator = Validator(folderNames)
# tell validator to use dataframefile to validate all data and read into dfs
dfs = validator.validate(dataframefile)
# create analyst object and pass in dfs to be written out to master spreadsheets
analyst = Analyst(dfs)
analyst.analyze_carbon(dataframefile)
print("Congratulations, the tool has completed the analysis!")
if __name__ == "__main__":
main()
|
import numpy as np
import pandas as pd
def merge(path_to_mf_lf_1, path_to_mf_lf_2, path_to_mf_hf_1, path_to_mf_hf_2, path_to_sf,
path_to_mf_lf, path_to_mf_hf, path_to_sf_copy):
"""Merge multi fidelity results to be able to assess differences between scenarios."""
mf_lf = merge_one_case(pd.read_csv(path_to_mf_lf_1, index_col=0), pd.read_csv(path_to_mf_lf_2, index_col=0))
mf_hf = merge_one_case(pd.read_csv(path_to_mf_hf_1, index_col=0), pd.read_csv(path_to_mf_hf_2, index_col=0))
sf = pd.read_csv(path_to_sf, index_col=0) # only bypassed
mf_lf.to_csv(path_to_mf_lf, index=True, header=True)
mf_hf.to_csv(path_to_mf_hf, index=True, header=True)
sf.to_csv(path_to_sf_copy, index=True, header=True)
def merge_one_case(mf1, mf2):
for df in [mf1, mf2]:
df["y-supply-gw"] = (
df["y-wind-gw"]
+ df["y-pv-gw"]
+ df["y-hydro-gw"]
)
df["y-balancing-gw"] = (
df["y-biofuel-gw"]
+ df["y-storage-gw"]
)
return pd.DataFrame(
index=mf1.index,
data={
'd_r': mf1['d_r'],
'c_util': mf1['c_util'],
'c_roof': mf1['c_roof'],
'c_wind': mf1['c_wind'],
'c_offshore': mf1['c_offshore'],
'c_sts_p': mf1['c_sts_p'],
'c_sts_e': mf1['c_sts_e'],
'c_lts_p': mf1['c_lts_p'],
'c_lts_e': mf1['c_lts_e'],
'c_ntc': mf1['c_ntc'],
'c_bio': mf1['c_bio'],
'ac_bio': mf1['ac_bio'],
"y-large-scale-cost-eur": mf1["y-cost-eur"],
"y-small-scale-cost-eur": mf2["y-cost-eur"],
"y-cost-diff-eur": mf2["y-cost-eur"] - mf1["y-cost-eur"],
"y-cost-diff-relative": relative_measure(mf1, mf2, "y-cost-eur"),
"y-supply-diff-relative": relative_measure(mf1, mf2, "y-supply-gw"),
"y-wind-diff-relative": relative_measure(mf1, mf2, "y-wind-gw"),
"y-balancing-diff-relative": relative_measure(mf1, mf2, "y-balancing-gw"),
"y-large-scale-pv-gw": mf1["y-pv-gw"],
"y-small-scale-pv-gw": mf2["y-pv-gw"],
"y-large-scale-wind-gw": mf1["y-wind-gw"],
"y-small-scale-wind-gw": mf2["y-wind-gw"],
"y-large-scale-hydro-gw": mf1["y-hydro-gw"],
"y-small-scale-hydro-gw": mf2["y-hydro-gw"],
"y-large-scale-biofuel-gw": mf1["y-biofuel-gw"],
"y-small-scale-biofuel-gw": mf2["y-biofuel-gw"],
"y-large-scale-storage-gw": mf1["y-storage-gw"],
"y-small-scale-storage-gw": mf2["y-storage-gw"],
"y-large-scale-storage-gwh": mf1["y-storage-gwh"],
"y-small-scale-storage-gwh": mf2["y-storage-gwh"],
"y-large-scale-transmission-gwkm": mf1["y-transmission-gwkm"],
"y-small-scale-transmission-gwkm": mf2["y-transmission-gwkm"],
}
)
def relative_measure(mf1, mf2, measure):
diff = (mf2[measure] - mf1[measure]) / mf1[measure]
return diff.fillna(0).replace(np.inf, 0) # special case treatment not necessary for final runs
if __name__ == "__main__":
merge(
path_to_mf_lf_1=snakemake.input.mf_lf_1,
path_to_mf_lf_2=snakemake.input.mf_lf_2,
path_to_mf_hf_1=snakemake.input.mf_hf_1,
path_to_mf_hf_2=snakemake.input.mf_hf_2,
path_to_sf=snakemake.input.sf,
path_to_mf_lf=snakemake.output.mf_lf,
path_to_mf_hf=snakemake.output.mf_hf,
path_to_sf_copy=snakemake.output.sf
)
|
# Copyright 2016 Oursky Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import tempfile
from pathlib import Path
from urllib.error import URLError
from urllib.request import urlretrieve
import jinja2
logger = logging.getLogger(__name__)
class TemplateNotFound(Exception):
def __init__(self, template_name):
self._template_name = template_name
@property
def template_name(self):
return self._template_name
def __str__(self):
return 'Cannot find template: {}'.format(self.template_name)
class FileTemplateDownloadError(Exception):
def __init__(self, template_name, url, reason):
self._template_name = template_name
self._url = url
self._reason = reason
@property
def template_name(self):
return self._template_name
@property
def url(self):
return self._url
@property
def reason(self):
return self._reason
def __str__(self):
return 'Cannot download {} from {}: {}'.format(self.template_name,
self.url,
self.reason)
class BaseTemplate:
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
def get(self):
"""
Get the template content.
This method is expected to be overridden by subclasses.
"""
return None
def render(self, **kwargs):
"""
Render template content.
"""
template_content = self.get()
return template_content.render(**kwargs) if template_content else None
class FileTemplate(BaseTemplate):
@classmethod
def get_download_dir_path(cls):
return Path(tempfile.gettempdir()).joinpath('forgot_password',
'templates')
@classmethod
def get_jinja_env(cls):
return jinja2.Environment(loader=jinja2.ChoiceLoader([
jinja2.FileSystemLoader(str(cls.get_download_dir_path())),
jinja2.FileSystemLoader(
os.path.abspath("templates/forgot_password")),
jinja2.PackageLoader('forgot_password', 'templates'),
]))
def __init__(self, name, file_name, download_url=None, required=True):
super(FileTemplate, self).__init__(name)
self._file_name = file_name
self._download_url = download_url
self._required = required
@property
def file_name(self):
return self._file_name
@property
def download_url(self):
return self._download_url
@property
def required(self):
return self._required
def download(self):
"""
Download template file from the URL.
"""
dir_path = self.get_download_dir_path()
file_path = dir_path.joinpath(self.file_name)
dir_path.mkdir(parents=True, exist_ok=True)
try:
logger.info('Downloading {} from {}'.format(self.file_name,
self.download_url))
urlretrieve(self.download_url, str(file_path))
except URLError as ex:
logger.error('Failed to download {} from {}: {}'.format(
self.file_name, self.download_url, ex.reason))
raise FileTemplateDownloadError(self.file_name,
self.download_url,
ex.reason)
def get(self):
"""
Get the template content.
"""
dir_path = self.get_download_dir_path()
file_path = dir_path.joinpath(self.file_name)
if self.download_url and not file_path.exists():
self.download()
try:
return self.get_jinja_env().get_template(self.file_name)
except jinja2.TemplateNotFound:
if self.required:
raise TemplateNotFound(self.name)
return None
class StringTemplate(BaseTemplate):
@classmethod
def get_jinja_env(cls):
return jinja2.Environment(loader=jinja2.BaseLoader())
def __init__(self, name, content):
super(StringTemplate, self).__init__(name)
self._content = content
@property
def content(self):
return self._content
def get(self):
"""
Get the template content.
"""
if not self.content:
return None
return self.get_jinja_env().from_string(self.content)
class TemplateProvider:
def __init__(self, *args):
self._templates = {}
for each_template in args:
self.add_template(each_template)
def add_template(self, template):
name = template.name
self._templates[name] = template
def get_template(self, name):
try:
return self._templates[name]
except KeyError:
raise TemplateNotFound(name)
|
from unittest.case import TestCase
from probability.distributions import Lomax
class TestLomax(TestCase):
def setUp(self) -> None:
pass
def test_fit(self):
for lambda_, alpha in zip(
(1, 2, 4, 6),
(2, 2, 1, 1)
):
lomax_orig = Lomax(lambda_=lambda_, alpha=alpha)
lomax_fit = Lomax.fit(data=lomax_orig.rvs(100_000))
self.assertAlmostEqual(lomax_orig.lambda_,
lomax_fit.lambda_, delta=0.2)
self.assertAlmostEqual(lomax_orig.alpha,
lomax_fit.alpha, delta=0.2)
|
from rest_framework import serializers
from books.serializers import UserSerializer, BookSerializer, LibrarySerializer
from waitlist.models import WaitlistItem
class WaitlistItemSerializer(serializers.ModelSerializer):
user = UserSerializer()
library = LibrarySerializer()
book = BookSerializer()
added_date = serializers.DateTimeField()
class Meta:
model = WaitlistItem
fields = '__all__' |
"""
This file enumerates JSON-LD Framing options to be used in serialising Irish Spatial Data Exchange metadata objects
"""
from enum import Enum
class JSONLDFraming(Enum):
DATASET_SCHEMA_ORG = {"@context":
{
"@vocab": "https://schema.org/"
},
"@id": None
}
"""
Framing for a Schema.org Dataset
""" |
# Compatible with ranger 1.6.0 through ranger 1.7.*
#
# This plugin serves as an example for adding key bindings through a plugin.
# It could replace the ten lines in the rc.conf that create the key bindings
# for the "chmod" command.
import ranger.api
old_hook_init = ranger.api.hook_init
def hook_init(fm):
old_hook_init(fm)
# Generate key bindings for the chmod command
command = "map {0}{1}{2} shell -d chmod {1}{0}{2} %s"
for mode in list('ugoa') + ['']:
for perm in "rwxXst":
fm.execute_console(command.format('-', mode, perm))
fm.execute_console(command.format('+', mode, perm))
ranger.api.hook_init = hook_init
|
"""
Environments
* Bind names to values, allowing lookup and modification by trampling
* Chain off of one another, allowing lookups and assignments to other
environments
"""
import json
class Environment:
def __init__(self, name = "(?)", upstream = None, default_value = "",
initial_bindings = None):
self.__name = name
self.__bindings = ({} if initial_bindings is None else initial_bindings)
# Default value to give when something isn't found
self.__default = default_value
if type(upstream) not in [type(None), Environment]:
raise RuntimeError("Upstream environment must be None or an " +
"environment")
self.__upstream = upstream
@property
def name(self):
return self.__name
def copy(self):
return Environment(
name = self.__name,
upstream = self.__upstream,
default_value = self.__default,
initial_bindings = self.__bindings.copy()
)
# Bindings search up as far as possible for something to trample, but
# otherwise stay at this height
def bind(self, name, value):
if self.update_upstream(name, value) is None:
self.__bindings[name] = value
def bind_here(self, name, value):
self.__bindings [name] = value
# Return name of environment where binding was updated, or None if no
# matching binding was found
def update_upstream(self, name, value):
up = None
if self.__upstream is not None:
up = self.__upstream.update_upstream(name, value)
if up is None and self.__bindings.get(name, None) is not None:
self.__bindings[name] = value
return self.__name
return up
def bind_no_trample(self, name, value):
if name in self.__bindings.keys():
raise KeyError("Key {} already present in environment {}"
.format(name, self.__name))
self.bind_here(name, value)
def unbind(self, name):
try:
del self.__bindings[name]
except KeyError as e:
# Unbinding something we don't have is fine
pass
# Don't catch the exception when unbinding something we don't have
def unbind_strict(self, name):
del self.__bindings[name]
# Downstream environments have priority
def get(self, name):
mine = self.__bindings.get(name, None)
if mine is None:
if self.__upstream is None:
return self.__default
else:
return self.__upstream.get(name)
else:
return mine
def __getitem__(self, name):
return self.get(name)
def load_from(self, file_like):
self.__bindings = json.load(file_like)
def write_to(self, file_like):
json.dump(self.__bindings, file_like, indent = 4, sort_keys = True)
def list(self):
return [ "* {} -> {}".format(k, v) for k, v in self.__bindings.items() ]
def list_tree(self):
finger = self.__upstream
accum = ["==========\n{}\n==========".format(self.__name),
"\n".join(self.list())]
while finger is not None:
accum.append("==========\n{}\n==========".format(finger.__name))
accum.append("\n".join(finger.list()))
finger = finger.__upstream
return accum
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
from secretpy import ColumnarTransposition
from secretpy import alphabets
import unittest
class TestColumnarTransposition(unittest.TestCase):
alphabet = (
alphabets.ENGLISH,
alphabets.ENGLISH,
alphabets.ENGLISH,
alphabets.GERMAN,
alphabets.SPANISH,
)
key = (
u"zebras",
u"zebras",
u"german",
u"schlüssel",
u"clave",
)
plaintext = (
u"wearediscoveredfleeatonceqkjeu",
u"wearediscoveredfleeatonce",
u"defendtheeastwallofthecastlexx",
u"textnachtricht",
u"unmensaiedetexto",
)
ciphertext = (
u"evlneacdtkeseaqrofojdeecuwiree",
u"evlnacdtesearofodeecwiree",
u"nalcxehwttdttfseeleedsoaxfeahl",
u"eihxcthttracnt",
u"mieuseondtnateex",
)
cipher = ColumnarTransposition()
def test_encrypt(self):
for i, alphabet in enumerate(self.alphabet):
enc = self.cipher.encrypt(
self.plaintext[i], self.key[i], alphabet)
self.assertEqual(enc, self.ciphertext[i])
def test_decrypt(self):
for i, alphabet in enumerate(self.alphabet):
dec = self.cipher.decrypt(
self.ciphertext[i], self.key[i], alphabet)
self.assertEqual(dec, self.plaintext[i])
if __name__ == '__main__':
unittest.main()
|
year = 0 # Year 0
tuition = 10000 # Year 1
while tuition < 20000:
year += 1
tuition = tuition * 1.07
print("Tuition will be doubled in", year, "years")
print("Tuition will be $" + format(tuition, ".2f"),
"in", year, "years")
|
#!/usr/bin/python
from __future__ import absolute_import
from roberta.ev3 import Hal
from ev3dev import ev3 as ev3dev
import math
import os
import time
class BreakOutOfALoop(Exception): pass
class ContinueLoop(Exception): pass
_brickConfiguration = {
'wheel-diameter': 5.6,
'track-width': 18.0,
'actors': {
},
'sensors': {
},
}
hal = Hal(_brickConfiguration)
___input = [1, 2, 3, 4, 3]
___input2 = []
___result = 0
def run():
global ___input, ___input2, ___result
___result = len( ___input)
___result = ___result + len( ___input2)
if not ___input:
___result = ___result + 1
else:
___result = ___result + 2
if not ___input2:
___result = ___result + 1
else:
___result = ___result + 2
# 8
___result = ___result + ___input.index(3)
___result = ___result + (len(___input) - 1) - ___input[::-1].index(3)
# 14
___result = ___result + ___input[1]
___result = ___result + ___input[-1 -1]
___result = ___result + ___input[0]
___result = ___result + ___input[-1]
# 24
___result = ___result + ___input.pop(1)
___result = ___result + ___input.pop(-1 -1)
___result = ___result + ___input.pop(0)
___result = ___result + ___input.pop(-1)
___result = ___result + len( ___input)
# 35
___input.insert(0, 1)
___input.insert(-1 -1, 2)
___input.insert(0, 0)
___input.insert(-1, 4)
___result = ___result + len( ___input)
# 40
___input.pop(1)
___input.pop(-1 -1)
___input.pop(0)
___input.pop(-1)
___result = ___result + len( ___input)
___result = ___result + ___input[-1]
# 42
___input.insert(0, 1)
___input.insert(-1 -1, 2)
___input.insert(0, 0)
___input.insert(-1, 4)
___input[2] = 3
___input[1] = 2
___input[-1 -1] = 4
___input[0] = 1
___input[-1] = 5
___result = ___result + sum(___input)
# 57
___result = ___result + sum(___input[1:3])
___result = ___result + sum(___input[1:-1 -1])
___result = ___result + sum(___input[1:])
# 89
___result = ___result + sum(___input[-1 -3:4])
___result = ___result + sum(___input[-1 -4:-1 -3])
___result = ___result + sum(___input[-1 -3:])
# 120
___result = ___result + sum(___input[0:3])
___result = ___result + sum(___input[0:-1 -3])
___result = ___result + sum(___input[0:])
# 148
___result = ___result + min(___input)
___result = ___result + max(___input)
# 154
___result = ___result + float(sum(___input))/len(___input)
___result = ___result + _median(___input)
___result = ___result + _standard_deviation(___input)
# 161.414...
# 161.414 - sim, 161.5 - board, OK
def main():
try:
run()
except Exception as e:
hal.drawText('Fehler im EV3', 0, 0)
hal.drawText(e.__class__.__name__, 0, 1)
hal.drawText(str(e), 0, 2)
hal.drawText('Press any key', 0, 4)
while not hal.isKeyPressed('any'): hal.waitFor(500)
raise
def _median(l):
l = sorted(l)
l_len = len(l)
if l_len < 1:
return None
if l_len % 2 == 0:
return (l[int((l_len - 1) / 2)] + l[int((l_len + 1) / 2)] ) / 2.0
else:
return l[int((l_len - 1) / 2)]
def _standard_deviation(l):
mean = float(sum(l)) / len(l)
sd = 0
for i in l:
sd += (i - mean)*(i - mean)
return math.sqrt(sd / len(l))
if __name__ == "__main__":
main() |
"""
Django settings for feder project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import sys
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path("feder")
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# Useful template tags:
"django.contrib.sitemaps",
"django.contrib.humanize",
# Admin
"django.contrib.admin",
)
THIRD_PARTY_APPS = (
"crispy_forms", # Form layouts
"allauth", # registration
"allauth.account", # registration
"allauth.socialaccount", # registration
"dal",
"dal_select2",
"formtools",
"mptt",
"atom",
"guardian",
"teryt_tree",
"bootstrap_pagination",
"rest_framework",
"reversion",
"django_filters",
"background_task",
"corsheaders",
)
# Local apps which should be put before any other apps
# allowing for example to override third party app's templates.
PRIORITY_LOCAL_APPS = ("feder.main",)
# Apps specific for this project go here.
LOCAL_APPS = (
"feder.teryt",
"feder.users",
"feder.institutions",
"feder.monitorings",
"feder.cases",
"feder.cases_tags",
"feder.letters",
"feder.alerts",
"feder.letters.logs",
"feder.domains",
"feder.records.apps.RecordsConfig",
"feder.parcels.apps.ParcelsConfig",
"feder.virus_scan",
"feder.organisations",
"feder.es_search.apps.EsSearchConfig",
# Your stuff: custom apps go here
)
ALLAUTH_PROVIDERS_APPS = ("allauth.socialaccount.providers.google",)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = (
PRIORITY_LOCAL_APPS
+ DJANGO_APPS
+ THIRD_PARTY_APPS
+ ALLAUTH_PROVIDERS_APPS
+ LOCAL_APPS
+ ("django_cleanup.apps.CleanupConfig",) # should be placed after all other apps
)
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
"django.contrib.sessions.middleware.SessionMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"reversion.middleware.RevisionMiddleware",
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {"sites": "feder.contrib.sites.migrations"}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (str(APPS_DIR.path("fixtures")),)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend"
)
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Karol Breguła""", "karol.bregula@siecobywatelska.pl"),
("""Jakub Stanclik""", "jakub.stanclik@siecobywatelska.pl"),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
"default": env.db("DATABASE_URL", default="mysql:///feder")
}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = "UTC"
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = "pl"
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# Default format for datetime.strftime.method
STRFTIME_FORMAT = "%Y-%m-%d %H:%M:%S"
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
"BACKEND": "django.template.backends.django.DjangoTemplates",
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
"OPTIONS": {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
"debug": DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
# Your stuff: custom template context processors go here
],
},
}
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = "bootstrap3"
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR("staticfiles"))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (str(APPS_DIR.path("static")),)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR("media"))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "/media/"
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = "feder.main.urls"
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
"guardian.backends.ObjectPermissionBackend",
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = "username_email"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
SOCIALACCOUNT_EMAIL_VERIFICATION = "optional"
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "users:redirect"
LOGIN_URL = "account_login"
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = "feder.main.slugifier.ascii_slugify"
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"handlers": {"console": {"class": "logging.StreamHandler"}},
"loggers": {
"django.request": {"handlers": [], "level": "ERROR", "propagate": True},
"feder.letters.models": {
"handlers": ["console"] if "test" not in environ.sys.argv else [],
"level": "INFO",
},
},
}
# Your common stuff: Below this line define 3rd party library settings
ANONYMOUS_USER_ID = -1
GUARDIAN_RAISE_403 = True
CASE_EMAIL_TEMPLATE = env("CASE_EMAIL_TEMPLATE", default="sprawa-{pk}@{domain}")
DJANGO_MAILBOX_STORE_ORIGINAL_MESSAGE = True
DJANGO_MAILBOX_COMPRESS_ORIGINAL_MESSAGE = True
FILTERS_HELP_TEXT_FILTER = False
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
"DEFAULT_PERMISSION_CLASSES": [
"rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly"
],
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.BasicAuthentication",
"rest_framework.authentication.SessionAuthentication",
),
"DEFAULT_PAGINATION_CLASS": "feder.main.paginator.DefaultPagination",
"PAGE_SIZE": 100,
"DEFAULT_FILTER_BACKENDS": ("django_filters.rest_framework.DjangoFilterBackend",),
}
SOCIALACCOUNT_PROVIDERS = {
"github": {"SCOPE": ["user"]},
"gilab": {"SCOPE": ["read_user", "openid"]},
}
EMAIL_NOTIFICATION = env("EMAIL_NOTIFICATION", default="no-reply@siecobywatelska.pl")
EMAILLABS_APP_KEY = env("EMAILLABS_APP_KEY", default="Dummy")
EMAILLABS_SECRET_KEY = env("EMAILLABS_SECRET_KEY", default="Dummy")
INSTALLED_APPS += ("github_revision",)
GITHUB_REVISION_REPO_URL = "https://github.com/watchdogpolska/feder"
SENDFILE_BACKEND = "django_sendfile.backends.development"
SENDFILE_ROOT = MEDIA_ROOT
DATA_UPLOAD_MAX_MEMORY_SIZE = 200000000 # 200MB
NECESSARY_FILES = {
"letters.Letter": {"path": "record__case__monitoring", "fields": ["eml"]},
"letters.Attachment": {
"path": "letter__record__case__monitoring",
"fields": ["attachment"],
},
"parcels.IncomingParcelPost": {
"path": "record__case__monitoring",
"fields": ["content"],
},
"parcels.OutgoingParcelPost": {
"path": "record__case__monitoring",
"fields": ["content"],
},
}
VIRUSTOTAL_API_KEY = env("VIRUSTOTAL_API_KEY", default=None)
ATTACHMENTSCANNER_API_KEY = env("ATTACHMENTSCANNER_API_KEY", default=None)
ATTACHMENTSCANNER_API_URL = env(
"ATTACHMENTSCANNER_API_URL", default="https://beta-eu.attachmentscanner.com"
)
METADEFENDER_API_KEY = env("METADEFENDER_API_KEY", default=None)
METADEFENDER_API_URL = env(
"METADEFENDER_API_URL", default="https://api.metadefender.com"
)
CORS_ALLOWED_ORIGINS = [
"https://sprawdzamyjakjest.pl",
"https://demo.sprawdzamyjakjest.pl",
"https://sjj.127.0.0.1.nip.io",
]
CORS_URLS_REGEX = r"^/api/.*$"
ELASTICSEARCH_URL = env("ELASTICSEARCH_URL", default=None)
APACHE_TIKA_URL = env("APACHE_TIKA_URL", default="http://localhost:9998/tika")
ELASTICSEARCH_SHOW_SIMILAR = env("ELASTICSEARCH_SHOW_SIMILAR", default=False)
|
from __future__ import print_function, division
import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
from network.mine import Miner, cite_graph
from sklearn.feature_extraction import text
from utils.lib import O
from collections import OrderedDict
from classify.model import read_papers, make_heatmap, vectorize
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from classify.predict import Metrics
GRAPH_CSV = "data/citemap_v4.csv"
CLASSIFY_CSV = "classify/data.csv"
ACCEPTED = 0
REJECTED = 1
RANDOM_STATE = 1
GROUP_CONFERENCE_MAP = {
15: 'A', 16: 'A',
5: 'B', 8: 'B', 2: 'B', 3: 'B',
10: 'C', 4: 'C', 12: 'C',
14: 'D',
13: 'E', 1: 'E', 9: 'E', 11: 'E',
6: 'F', 7: 'F'
}
# For 11 TOPICS
N_TOPICS = 11
ALPHA = 0.22359
BETA = 0.53915
ITERATIONS = 100
TOPICS = ["Design", "Testing", "Modelling", "Mobile", "Energy", "Defects",
"SourceCode", "WebApps", "Configuration", "Developer", "Mining"]
TOPIC_THRESHOLD = 3
DELIMITER = '|'
STOP_WORDS = text.ENGLISH_STOP_WORDS.union(['software', 'engineering'])
TOKEN_PATTERN = r"(?u)\b\w\w\w+\b"
K_BEST_RATE = 0.2
IS_INDEPENDENT_CONFERENCE = True
STUDIED_CONFERENCES = ['FSE', 'MSR']
def top_authors(graph):
authors = graph.get_papers_by_authors()
author_cites = []
for author_id, papers in authors.items():
cite_count = 0
for paper_id, year, __ in papers:
cited = graph.paper_nodes[paper_id].cited_count
if cited:
cite_count += cited
author_cites.append((author_id, cite_count, graph.author_nodes[author_id].name))
tops = sorted(author_cites, key=lambda x: x[1], reverse=True)
author_dict = OrderedDict()
for a_id, cites, name in tops:
author_dict[name] = (a_id, cites, name)
return author_dict
def format_conf_acceptance(papers):
formatted = {}
for paper in papers:
# if paper.conference not in STUDIED_CONFERENCES: continue
key = "%s-%s" % (paper.conference, paper.year)
if key not in formatted:
formatted[key] = []
formatted[key].append(paper)
return formatted
def desk_rejects():
papers = read_papers()
vectorize(papers)
submissions = format_conf_acceptance(papers)
for conf_id, papers in submissions.items():
a_topics, a_count = np.array([0] * N_TOPICS), 0
r_topics, r_count = np.array([0] * N_TOPICS), 0
da_topics, da_count = np.array([0] * N_TOPICS), 0
dr_topics, dr_count = np.array([0] * N_TOPICS), 0
for paper in papers:
if paper.raw_decision == 'pre-reject':
dr_topics = np.add(dr_topics, paper.transformed)
dr_count += 1
elif paper.raw_decision == 'pre-accept':
da_topics = np.add(da_topics, paper.transformed)
da_count += 1
elif paper.decision == 'reject':
r_topics = np.add(r_topics, paper.transformed)
r_count += 1
elif paper.decision == 'accept':
a_topics = np.add(a_topics, paper.transformed)
a_count += 1
if dr_count > 0: dr_topics = dr_topics / float(dr_count)
if da_count > 0: da_topics = da_topics / float(da_count)
if r_count > 0: r_topics = r_topics / float(r_count)
if a_count > 0: a_topics = a_topics / float(a_count)
col_labels = TOPICS
row_labels = ["Accept - Desk Rejects"]
heatmap_arr = np.array([[int(round(100 * (a - dr), 0)) for dr, a in zip(dr_topics, a_topics)]], np.int)
cmap = mpl.colors.ListedColormap(['red', 'lightsalmon', 'white', 'palegreen', 'lime'])
bounds = [-10, -8, -2, 2, 8, 10]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
cax = plt.matshow(heatmap_arr, interpolation='nearest', cmap=cmap, norm=norm)
for (i, j), z in np.ndenumerate(heatmap_arr):
plt.text(j, i, abs(z), ha='center', va='center', fontsize=11)
ticks = [-20, -10, 0, 10, 20]
plt.colorbar(cax, cmap=cmap, norm=norm, boundaries=bounds, ticks=ticks)
plt.xticks(np.arange(len(list(col_labels))), list(col_labels), rotation="vertical")
plt.yticks(np.arange(len(list(row_labels))), list(row_labels))
plt.savefig("classify/figs/desks-%s.png" % conf_id, bbox_inches='tight')
def reputation(only_first=False):
if only_first:
print("## First Authors Only")
papers = read_papers()
submissions = format_conf_acceptance(papers)
author_map = top_authors(cite_graph(GRAPH_CSV))
accepteds, rejecteds = [], []
for conf_id, papers in submissions.items():
accepted, rejected = [], []
for paper in papers:
for i, author in enumerate(paper.authors):
if only_first and i > 0: break
cites = 0
if author in author_map:
cites = author_map[author][1]
if paper.decision == 'accept':
accepted.append(cites)
else:
rejected.append(cites)
print("#### %s" % conf_id)
print("**Accepted** => Med: %0.2f, IQR: %0.2f, Min: %d, Max: %d" %
(Metrics.median(accepted), Metrics.iqr(accepted), min(accepted), max(accepted)))
print("**Rejected** => Med: %0.2f, IQR: %0.2f, Min: %d, Max: %d" %
(Metrics.median(rejected), Metrics.iqr(rejected), min(rejected), max(rejected)))
accepteds += accepted
rejecteds += rejected
print("#### All")
print("**Accepted** => Med: %0.2f, IQR: %0.2f, Min: %d, Max: %d" %
(Metrics.median(accepteds), Metrics.iqr(accepteds), min(accepteds), max(accepteds)))
print("**Rejected** => Med: %0.2f, IQR: %0.2f, Min: %d, Max: %d" %
(Metrics.median(rejecteds), Metrics.iqr(rejecteds), min(rejecteds), max(rejecteds)))
if __name__ == "__main__":
# desk_rejects()
reputation(True)
|
from collections import Iterable
import numpy as np
def get_item(collection, item_path):
"""Retrieve an item from a component's collection, possibly going through
multiple levels of nesting.
:param collection: the collection where the item resides.
:param item_path: a string with the name of the item, or an iterable of
strings giving its location in a nested hierarchy.
:returns: the value of the specified item in the collection.
"""
if isinstance(item_path, str):
return collection[item_path]
else:
assert isinstance(item_path, Iterable)
# iterate through the parts of the path until we reach the final level
current = collection
for item in item_path:
current = current[item]
return current
def with_units(func):
"""Wrap a NumPy function so that it can be run on iterables of quantities."""
def aux(values, *args, **kwargs):
try:
return func([v.to(values[0].units).magnitude for v in values] *
values[0].units, *args, **kwargs)
except AttributeError: # in case of non-quantity (eg boolean) value
return np.nan
return aux
|
#from decimal import decimal
from django.conf import settings
from django.urls import reverse
from django.shortcuts import render, get_object_or_404
from paypal.standard.forms import PayPalPaymentsForm
from orders.models import Order
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt
def payment_done(request):
return render(request,'payment/done.html')
@csrf_exempt
def payment_canceled(request):
return render(request,'payment/canceled.html')
def payment_process(request):
order_id=request.session.get('order_id')
order=get_object_or_404(Order,id=order_id)
host= request.get_host()
paypal_dict={
'business':settings.PAYPAL_RECEIVER_EMAIL,
'amount':'%.2f' % order_get_total_cost().quantize(Decimal('.01')),
'item_name':' Order {}'.format(order.id),
'invoice':str(order.id),
'currency_code':'USD',
'notify_url':'http//{}{}'.format(host,reverse('paypal-ipn')),
'return_url':'http//{}{}'.format(host,reverse('payment_done')),
'cancel-return':'http//{}{}'.format(host,reverse('payment_canceled')),
}
form=PayPalPaymentsForm(initial=paypal_dict)
return render(request,'payment/process.html',{'order':order,'form':form })
|
from selenium import webdriver
from bs4 import BeautifulSoup
import time
import pickle
import os
driver = webdriver.Chrome('D:\py\chromedriver_win32\chromedriver.exe')
link_list = []
link_list_v = []
#load profile links from file links.txt
if os.path.getsize('links.txt') > 0:
with open('links.txt', 'rb') as fp:
link_list = pickle.load(fp)
#load Already Visited Links from file links_v.txt
if os.path.getsize('links_v.txt') > 0:
with open('links_v.txt', 'rb') as fp:
link_list_v = pickle.load(fp)
def login_and_search():
#open login page for linkedin
driver.get('https://www.linkedin.com/uas/login?goback=&trk=hb_signin')
#maximize the window
driver.maximize_window()
#enter username
email = driver.find_element_by_xpath('//*[@id="username"]')
email.send_keys('YOUREMAIL') # change it to your username
#enter password
password = driver.find_element_by_xpath('//*[@id="password"]')
password.send_keys('YOURPASSWORD') #change it to your password
#click login
login = driver.find_element_by_xpath('//*[@id="app__container"]/main/div/form/div[3]/button')
login.click()
return driver
def get_page_links(linkz):
global link_list
global link_list_v
driver.get(linkz)
soup = BeautifulSoup(driver.page_source, 'lxml')
print ('OPEN: '+driver.current_url)
link_list_v.append(linkz)
ul = soup.find('ul', class_='results-list')
if ul:
for a in ul.find_all('a')[0:(len(ul.find_all('a'))-2)]:
new_link='https://www.linkedin.com' + a['href']
if new_link not in link_list and new_link not in link_list_v:
link_list.append(new_link)
print (new_link)
else:
print('Not List found')
ul = soup.find('ul', class_='mt4')
if ul:
for a in ul.find_all('a')[0:(len(ul.find_all('a'))-2)]:
new_link='https://www.linkedin.com' + a['href']
if new_link not in link_list and new_link not in link_list_v:
link_list.append(new_link)
print (new_link)
else:
print('Not List found')
with open('links.txt', 'wb') as fp:
pickle.dump(link_list, fp)
with open('links_v.txt', 'wb') as fp:
pickle.dump(link_list_v, fp)
print( str(len(link_list_v)) + ' Visited out of ' + str(len(link_list)))
return link_list
########################################### MAIN ###################################
login_and_search()
if link_list:
for linkz in link_list:
if linkz not in link_list_v or not link_list_v:
get_page_links(linkz)
time.sleep(5)
else:
get_page_links('https://www.linkedin.com/search/results/people/?keyword=python')
#driver.quit()
####################################################################################
|
import re
pattern = r'(\||#)([A-Za-z]+ *[A-Za-z]+)\1([0-9]{2}\/[0-9]{2}\/[0-9]{2})\1([0-9]+)\1'
text = input()
calories_count = 0
CALORY_FOR_DAY = 2000
matches = re.findall(pattern, text)
days = 0
for match in matches:
if match != []:
food = match[1]
time = match[2]
calories = int(match[3])
calories_count += calories
#print(calories_count)
while calories_count >= CALORY_FOR_DAY:
calories_count -= 2000
days += 1
#print(matches)
print(f'You have food to last you for: {days} days!')
for match in matches:
print(f'Item: {match[1]}, Best before: {match[2]}, Nutrition: {match[3]}') |
import pandas as pd
import numpy as np
from itertools import product
from scipy.stats import chisquare
import matplotlib.pyplot as plt
from Bio.Seq import Seq
import logomaker
from scipy.stats import rankdata
window = 30
codons = [''.join(i) for i in product('AGTC', repeat=3)]
context_2 = pd.read_csv("context.tsv", sep="\t", header=0, index_col=0)
baseline_2 = pd.read_csv("baseline.tsv", sep="\t", header=0, index_col=0)
context_2 = context_2.drop(labels=['0'], axis=1)
baseline_2 = baseline_2.drop(labels=['0'], axis=1)
tmp = context_2[context_2>5]
r, _ = np.where(tmp.isna())
r = list(set(r))
tmp = tmp.dropna()
tmp2 = baseline_2.drop(labels=baseline_2.index[r], axis=0)
for d in ['-1', '1']:
plt.figure(figsize=(10, 5))
plt.scatter(tmp.index, tmp.loc[:, d], c='r', label="LOEUF<0.35 (vals>5)")
plt.scatter(tmp2.index, tmp2.loc[:, d], c='g', label="COCOPUTS")
plt.xticks(rotation=90)
plt.legend()
plt.grid(axis='x', linestyle=':')
plt.title("delta = " + d)
plt.savefig(d+".png", bbox_inches='tight')
# Residuals
plt.figure(figsize=(10, 5))
plt.scatter(tmp.index, (tmp.loc[:, d] - tmp2.loc[:, d])/np.sqrt(tmp2.loc[:, d]), c='r', label="residuals")
plt.xticks(rotation=90)
plt.legend()
plt.grid(axis='x', linestyle=':')
plt.title("delta = " + d)
plt.savefig(d+"_residuals.png", bbox_inches='tight')
# Amino Acids
freq = dict()
freq2 = dict()
seq = tmp.loc[:, d]
seq2 = tmp2.loc[:, d]
for (codon, val) in seq.iteritems():
q = Seq(codon).translate()
if q not in freq:
freq[q] = 0
freq[q] += val
for (codon, val) in seq2.iteritems():
q = Seq(codon).translate()
if q not in freq2:
freq2[q] = 0
freq2[q] += val
if freq2.keys() != freq.keys():
print("BAD")
rng = range(len(freq2.keys()))
arr = dict(zip(rng, freq2.keys()))
plt.figure(figsize=(10, 5))
plt.scatter(rng, [freq[arr[i]] for i in rng], c='r', label="LOEUF<0.35 (vals>5)")
plt.scatter(rng, [freq2[arr[i]] for i in rng], c='g', label="COCOPUTS")
plt.xticks(ticks=rng, labels=[arr[i] for i in rng])
plt.legend()
plt.grid(axis='x', linestyle=':')
plt.title("delta = " + d)
plt.savefig(d+"_aa.png", bbox_inches='tight')
# Amino Acids residuals
plt.figure(figsize=(10, 5))
obs = np.array([freq[arr[i]] for i in rng])
exp = np.array([freq2[arr[i]] for i in rng])
plt.scatter(rng, (obs-exp)/np.sqrt(exp), c='r', label="residuals")
plt.xticks(ticks=rng, labels=[arr[i] for i in rng])
plt.legend()
plt.grid(axis='x', linestyle=':')
plt.title("delta = " + d)
plt.savefig(d+"_aa_residuals.png", bbox_inches='tight') |
import pytest
from django.core import mail
from grandchallenge.subdomains.utils import reverse
from grandchallenge.participants.models import RegistrationRequest
from tests.factories import UserFactory, RegistrationRequestFactory
from tests.utils import get_view_for_user
@pytest.mark.django_db
@pytest.mark.parametrize("participant_review", [True, False])
def test_new_registration_email(participant_review, client, ChallengeSet):
user = UserFactory()
ChallengeSet.challenge.require_participant_review = participant_review
ChallengeSet.challenge.save()
assert not RegistrationRequest.objects.filter(
user=user, challenge=ChallengeSet.challenge
).exists()
response = get_view_for_user(
viewname="participants:registration-create",
client=client,
method=client.post,
user=user,
challenge=ChallengeSet.challenge,
)
assert response.status_code == 302
assert RegistrationRequest.objects.filter(
user=user, challenge=ChallengeSet.challenge
).exists()
if participant_review:
email = mail.outbox[-1]
approval_link = reverse(
"participants:registration-list",
kwargs={"challenge_short_name": ChallengeSet.challenge.short_name},
)
assert ChallengeSet.admin.email in email.to
assert "new participation request" in email.subject.lower()
assert ChallengeSet.challenge.short_name in email.subject
assert approval_link in email.alternatives[0][0]
else:
with pytest.raises(IndexError):
# No emails if no review
# noinspection PyStatementEffect
mail.outbox[-1]
@pytest.mark.django_db
@pytest.mark.parametrize(
"new_state", [RegistrationRequest.REJECTED, RegistrationRequest.ACCEPTED]
)
def test_registration_updated_email(new_state, client, ChallengeSet):
rr = RegistrationRequestFactory(challenge=ChallengeSet.challenge)
response = get_view_for_user(
viewname="participants:registration-update",
client=client,
method=client.post,
user=ChallengeSet.admin,
challenge=ChallengeSet.challenge,
reverse_kwargs={"pk": rr.pk},
data={"status": new_state},
)
assert response.status_code == 302
email = mail.outbox[-1]
assert rr.user.email in email.to
if new_state == RegistrationRequest.ACCEPTED:
assert "request accepted" in email.subject.lower()
else:
assert "request rejected" in email.subject.lower()
assert ChallengeSet.challenge.short_name in email.body
|
#!/usr/bin/env python3
import sys
import math
import csv
import itertools
from pprint import pprint
import func
INPUTFILE = './task05.input'
def main():
with open(file=INPUTFILE, mode='r') as fileinput:
lines = list(map(int, fileinput.readlines()))
steps = 0
index = 0
reading = None
# hvis index går under 0 eller over lengden på input, så er vi ute av labyrinten
while index >= 0 and index < len(lines):
reading = lines[index]
if reading >= 3:
lines[index] -= 1
else:
lines[index] += 1
index += reading
steps += 1
print(steps)
if __name__ == '__main__':
if len(sys.argv) == 2:
INPUTFILE = sys.argv[1]
main()
|
import random
import string
import re
import os.path
import jsonpickle
import getopt
import sys
from models.contact import Contact
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of contacts", "file"])
except:
getopt.usage()
sys.exit(2)
n = 5
f = "data/contacts.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(max_length):
sym = string.ascii_letters + string.digits + " "*10
return re.sub('\s+', ' ', ("".join([random.choice(sym) for i in range(random.randrange(3, max_length))]).strip()))
def random_email():
domen = [".com", ".org", ".ua", ".de", ".co.uk", ".gov", ".eu", ".tv"]
return re.sub('\s+', '', random_string(10)) + "@" + re.sub('\s+', '',random_string(10)) + random.choice(domen)
def random_phone():
return "+" + "".join([random.choice(string.digits) for i in range(random.randrange(6, 12))])
def random_address():
return random_string(20) + "\n" + random_string(20) + "\n" + random_string(20)
testdata = [Contact(first_name="", last_name="", email_1="", mobile_phone="", address="")] + [
Contact(first_name=random_string(10), last_name=random_string(15),
mobile_phone=random_phone(), work_phone=random_phone(), home_phone=random_phone(), fax=random_phone(),
email_1=random_email(), email_2=random_email(), email_3=random_email(), address=random_address())
for i in range(n)
]
data_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(data_file, "w") as f_out:
jsonpickle.set_encoder_options("json", indent=2)
f_out.write(jsonpickle.encode(testdata)) |
import datetime
import time
import re
from email.utils import formatdate # rfc822
import markdown
from wmk_utils import slugify
__all__ = [
'date_to_iso',
'date_to_rfc822',
'date',
'date_short',
'date_short_us',
'date_long',
'date_long_us',
'slugify',
'markdownify',
'truncate',
'truncatewords',
'p_unwrap',
'strip_html',
'cleanurl',
]
def _ensure_datetime(d):
"""
Converts dates, unix time stamps and ISO strings to datetime.
Also handles the special strings 'now' and 'today'.
"""
if isinstance(d, datetime.datetime):
return d
elif isinstance(d, datetime.date):
return datetime.datetime(d.year, d.month, d.day)
if isinstance(d, str) and d.isdigit():
d = int(d)
if isinstance(d, (int, float)):
return datetime.datetime.fromtimestamp(d)
elif isinstance(d, str):
if d.lower() == 'now':
return datetime.datetime.now()
elif d.lower() == 'today':
today = datetime.date.today()
return datetime.datetime(today.year, today.month, today.day)
try:
return datetime.datetime.fromisoformat(d)
except:
pass
return None
def date_to_iso(s=None, sep='T', upto='sec', with_tz=False):
"""
Similar to Jekyll's date_to_xmlschema but more flexible.
Examples:
- 2008-11-07T13:07:54-08:00 (with sep='T', upto='sec', with_tz=True)
- 2008-11-07 13:07 (with sep=' ', upto='min')
"""
def inner(s):
no_tz = not with_tz
d = _ensure_datetime(s)
if d is None:
return s
d = str(d)
if sep and sep != ' ' and len(sep) == 1:
d = d.replace(' ', sep, 1)
tz = '+00:00'
found = re.search(r'([-+]\d\d:\d\d)$', d)
if found:
tz = found.group(1)
if upto.startswith('day'):
no_tz = True
d = d[:10]
elif upto.startswith('min'):
d = d[:16]
elif upto.startswith('sec'):
d = d[:19]
if not no_tz:
d += tz
return d
return inner if s is None else inner(s)
def date_to_rfc822(s):
"""
Example: Thu, 5 Apr 2012 23:47:37 +0200
"""
d = _ensure_datetime(s)
if d is None:
return s
return formatdate(d)
def date_short(s):
"""
E.g. 7 Nov 2008
"""
d = _ensure_datetime(s)
if d is None:
return s
return d.strftime('%-d %b %Y')
def date_short_us(s):
"""
E.g. Nov 7th, 2008
"""
d = _ensure_datetime(s)
if d is None:
return s
if d.day in (1, 21, 31):
return d.strftime('%b %-dst, %Y')
elif d.day in (2, 22):
return d.strftime('%b %-dnd, %Y')
elif d.day in (3, 23):
return d.strftime('%b %-drd, %Y')
else:
return d.strftime('%b %-dth, %Y')
def date_long(s):
"""
E.g. 7 November 2008
"""
d = _ensure_datetime(s)
if d is None:
return s
return d.strftime('%-d %B %Y')
def date_long_us(s):
"""
E.g. Nov 7th, 2008
"""
d = _ensure_datetime(s)
if d is None:
return s
if d.day in (1, 21, 31):
return d.strftime('%B %-dst, %Y')
elif d.day in (2, 22):
return d.strftime('%B %-dnd, %Y')
elif d.day in (3, 23):
return d.strftime('%B %-drd, %Y')
else:
return d.strftime('%B %-dth, %Y')
def date(s=None, fmt=None):
"""
Strftime filter. The default format is '%c'.
"""
if not fmt:
fmt = '%c'
def inner(s):
d = _ensure_datetime(s)
return d.strftime(fmt)
return inner if s is None else inner(s)
def markdownify(s=None, extensions=None):
"""
Convert markdown to HTML.
"""
if extensions is None:
extensions = ['extra']
def inner(s):
return markdown.markdown(s, extensions=extensions)
return inner if s is None else inner(s)
def truncate(s=None, length=200, ellipsis='…'):
"""
Truncate to given number of characters. If any shortening occurs,
an ellipsis will be appended. HTML tags will be stripped.
"""
def inner(s):
s_orig = strip_html(s)
ret = s_orig[:length-1]
if (len(ret) < len(s_orig)
and s_orig[length] not in (' ', '.', '!', '?', ';', ':')):
ret = re.sub(r' [^ ]*$', '', ret)
if len(ret) < len(s_orig):
ret += ellipsis
return ret
return inner if s is None else inner(s)
def truncatewords(s=None, length=25, ellipsis='…'):
"""
Truncate to given number of words. If any shortening occurs,
an ellipsis will be appended. HTML tags will be stripped.
"""
def inner(s):
s_orig = strip_html(s).split(' ')
if len(s_orig) <= length:
return ' '.join(s_orig)
else:
return ' '.join(s_orig[:length]) + ellipsis
return inner if s is None else inner(s)
def p_unwrap(s):
"""
Remove wrapping <p> tag - iff there is only one.
Typically used like this: `${ short_text | markdownify,p_unwrap }`,
so as to keep inline tags inside the paragraph but not the wrapping
p tag.
"""
s = s.strip()
if s.startswith('<p>') and s.count('<p>') == 1:
return s.replace('<p>','').replace('</p>', '').strip()
def strip_html(s):
"""
Remove all html tags (converting markdown to html beforehand).
TODO: handle entity and character references.
"""
ret = markdownify()(s)
# hidden tags:
for tag in ('script', 'style', 'template', 'iframe', 'object'):
rx = r'<' + tag + r'[^>]*>.*?</' + tag + r'[^>]*>'
ret = re.sub(rx, '', ret, flags=re.IGNORECASE)
# block tags (at least for our purposes)
blocktags = ['address', 'article', 'blockquote', 'details', 'dialog',
'dd', 'div', 'dl', 'dt', 'fieldset', 'figcaption', 'figure',
'footer', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'img', 'hgroup', 'hr', 'li', 'main', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'td', 'th', 'ul']
rx = r'<+/?(?:' + '|'.join(blocktags) + r')[^>]*>'
ret = re.sub(rx, ' ', ret, flags=re.IGNORECASE)
# Inline tags
ret = re.sub(r'<[^>]+>', '', ret, flags=re.IGNORECASE)
return ret.strip()
def cleanurl(s):
"""
Change /path/index.html to /path/.
"""
if s.endswith('/index.html'):
return s[:-10]
return s
|
from .database import db
from .tokens import Tokens
from .user import User
__all__ = [
'db',
'Tokens',
'User'
]
|
"""
An example of mixed python/c code fuzzing. In this case, it is assumed that the Pillow
package in use has been compiled with e.g. afl's llvm_mode..
"""
from cpytraceafl.rewriter import install_rewriter
install_rewriter()
from cpytraceafl import fuzz_from_here, DEFAULT_MAP_SIZE_BITS, get_map_size_bits_env
# must ensure the tracehook module gets imported *before* any instrumented native modules,
# so that the __afl_area_ptr and __afl_prev_loc global symbols have been loaded
from cpytraceafl.tracehook import set_map_start
import sysv_ipc
# if we're going to "warm up" the code under test in a way that executes native instrumented
# code *before* we do the fork & start tracing, we need to provide a dummy memory area for
# __afl_area_ptr to point to. here, use some fresh sysv shared memory because it's what we
# have to hand.
map_size_bits = get_map_size_bits_env() or DEFAULT_MAP_SIZE_BITS
dummy_sm = sysv_ipc.SharedMemory(None, size=1<<map_size_bits, flags=sysv_ipc.IPC_CREX)
set_map_start(dummy_sm.address)
import PIL
# we only want to exercise the PCX code for now: unregister all other plugins so our input
# doesn't get recognized as those formats. not getting recognized as a PCX should just lead to
# a single boring path that doesn't distract the fuzzing process.
PIL._plugins[:] = ["PcxImagePlugin"]
from PIL import Image
import codecs
from io import BytesIO
import sys
# warm up code under test, ensure lazy imports are performed and internal caches are populated.
Image.open(BytesIO(codecs.decode(
"0A05010100000000570033004001C800000000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF00010C0000003400000000000000"
"000000000000000000000000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000CB0077CC77CC77CB7700CB0077CC77CC77CB7700CB0077CC77CC77CB"
"7700CB0077CC77CC77CB7700CB0077CC77CC77CB7700CB0077CC77CC77CB7700CB0077CC77CC77"
"CB7700CB0077CC77CC77CB7700CB0077CC77CC77CB7700CB0077CC77CC77CB7700CB0077CC77CC"
"77CB7700CB0077CC77CC77CB7700CB0077CC77CC77CB7776CBFF77",
"hex",
))).getdata()
fuzz_from_here()
with open(sys.argv[1], "rb") as f:
try:
Image.open(f).getdata()
except Exception:
# in this case, a python exception isn't the end of the world - we're after crashers
pass
|
import base64
import json
import os
import time
import cv2
import numpy as np
import requests
from django.core.files.uploadedfile import InMemoryUploadedFile, TemporaryUploadedFile
from django.http import HttpResponse
from cv.controllers.log import logger
from cv.models.plant_disease_recognizer import pdr
URL_PORT = 'http://localhost:8001'
def upload_and_rec_plant_disease(request):
"""
upload and recognize plant disease
:param request:
:return:
"""
tik = time.time()
image_dir = 'cv/static/PDRUpload'
if not os.path.exists(image_dir):
os.makedirs(image_dir)
result = {'data': {}}
image_url_record = ''
network_latency = 0
if request.method == "POST":
img_f = request.FILES.get("image", None)
image = img_f if img_f is not None else request.POST.get("image", None)
if image is None:
result['code'] = 1
result['msg'] = 'Invalid Image'
result['elapse'] = 0
json_result = json.dumps(result, ensure_ascii=False)
return HttpResponse(json_result)
else:
network_latency_tik = time.time()
if not isinstance(image, InMemoryUploadedFile) and not isinstance(image, TemporaryUploadedFile):
# imgstr = request.POST.get("image", None)
imgstr = image
if 'http://' in imgstr or 'https://' in imgstr:
image_url_record = imgstr
# in case of bad image URL
try:
response = requests.get(image_url_record)
image = np.asarray(bytearray(response.content), dtype="uint8")
network_latency_tok = time.time()
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
data = pdr.infer(image)
result['data'] = data
result['code'] = 0
result['msg'] = 'success'
except:
result['code'] = 1
result['msg'] = 'Invalid Image'
result['elapse'] = 0
else: # base64
# in case of broken base64 image
try:
img_base64 = base64.b64decode(imgstr)
image = np.frombuffer(img_base64, dtype=np.float64)
network_latency_tok = time.time()
data = pdr.infer(image)
result['data'] = data
result['code'] = 0
result['msg'] = 'success'
except:
result['code'] = 1
result['msg'] = 'Invalid Image'
result['elapse'] = 0
else: # used in browser
destination = open(os.path.join(image_dir, image.name), 'wb+')
for chunk in image.chunks():
destination.write(chunk)
destination.close()
network_latency_tok = time.time()
data = pdr.infer(os.path.join(image_dir, image.name))
result['data'] = data
result['code'] = 0
result['msg'] = 'success'
tok = time.time()
result['elapse'] = tok - tik
network_latency = network_latency_tok - network_latency_tik
logger.debug("NetworkLatency {}".format(network_latency))
json_str = json.dumps(result, ensure_ascii=False)
return HttpResponse(json_str)
else:
result['code'] = 2
result['msg'] = 'Invalid HTTP Method'
result['data'] = None
result['elapse'] = 0
json_result = json.dumps(result, ensure_ascii=False)
return HttpResponse(json_result)
|
import pandas as pd
from ai4netmon.Analysis.bias import generate_distribution_plots as gdp
from matplotlib import pyplot as plt
from matplotlib import cm, colors
AGGREGATE_DATA_FNAME = 'https://raw.githubusercontent.com/sermpezis/ai4netmon/dev/data/aggregate_data/asn_aggregate_data_20211201.csv'
BIAS_DF = './data/bias_df__no_stubs.csv'
IMPROVEMENTS = '../../data/misc/improvements20210601.txt'
SAVE_CSV_FNAME = './data/df_bias_vs_improvement_detailed.csv'
RENAME_COLUMNS = True
## load network data
df = pd.read_csv(AGGREGATE_DATA_FNAME, header=0, index_col=0)
df['is_personal_AS'].fillna(0, inplace=True)
## load improvement data
df_imp = pd.read_csv(IMPROVEMENTS, sep=" ", names=['loc', 'IPv', 'ASN', 'improvement'])
df_imp.set_index('ASN', inplace=True)
df_imp = df_imp[(df_imp['loc']=='GLOBAL') & (df_imp['IPv']==4)]
df_imp = df_imp.loc[~df_imp.index.str.contains('{'),:]
df_imp.index = [int(i) for i in df_imp.index]
df_imp.index = df_imp.index.astype(float)
df_imp.index = df_imp.index.astype(int)
## load detailed bias data
df_bias = pd.read_csv(BIAS_DF, header=0, index_col=0).T
df_bias.index.name = 'ASN'
ripe_bias_df = df_bias.loc['RIPE RIS']
df_bias.drop(index=['RIPE RIS'], inplace=True)
df_bias.index = df_bias.index.astype(float)
df_bias.index = df_bias.index.astype(int)
for c in df_bias.columns:
# df_bias[c] = (df_bias[c] - ripe_bias_df[c])/ripe_bias_df[c] * 100
df_bias[c] = df_bias[c] - ripe_bias_df[c]
df_bias = df_bias.add_prefix('bias_')
# create dataframe from existing info for plotting
df_plot = pd.merge(df_imp,df_bias, left_index=True, right_index=True)
df_plot = pd.merge(df_plot,df[['AS_rank_continent','AS_rank_iso']], how='left', left_index=True, right_index=True)
df_plot.drop(columns=['loc', 'IPv'], inplace=True)
print(df_plot)
# colors_property = 'AS_rank_continent'
# unique_values = pd.unique(df_plot[colors_property]).tolist()
# dict_colors = {v:i for i,v in enumerate(unique_values)}
# df_plot['Colors'] = [dict_colors.get(v) for v in df_plot[colors_property]]
df_plot['ASN'] = df_plot.index
if RENAME_COLUMNS:
new_names = {
'bias_AS_rank_source': 'RIR region',
'bias_AS_rank_iso': 'Country',
'bias_AS_rank_continent': 'Continent',
'bias_AS_rank_numberAsns': 'Customer cone (#ASNs)',
'bias_AS_rank_numberPrefixes': 'Customer cone (#prefixes)',
'bias_AS_rank_numberAddresses': 'Customer cone (#addresses)',
'bias_AS_hegemony': 'AS hegemony',
'bias_AS_rank_total': '#neighbors (total)',
'bias_AS_rank_peer': '#neighbors (peers)',
'bias_AS_rank_customer': '#neighbors (customers)',
'bias_AS_rank_provider': '#neighbors (providers)',
'bias_peeringDB_ix_count': '#IXPs',
'bias_peeringDB_fac_count': '#facilities',
'bias_peeringDB_policy_general': 'Peering policy',
'bias_peeringDB_info_type': 'Network type',
'bias_peeringDB_info_ratio': 'Traffic ratio',
'bias_peeringDB_info_traffic': 'Traffic volume',
'bias_peeringDB_info_scope': 'Scope',
'bias_is_personal_AS': 'Personal ASN'}
df_plot.rename(new_names, axis=1, inplace=True)
print(df_plot.columns)
df_plot.to_csv(SAVE_CSV_FNAME, index=False)
print(df_plot) |
import os
from Jumpscale import j
from .OauthInstance import OauthClient
JSConfigs = j.baseclasses.object_config_collection
class OauthFactory(JSConfigs):
__jslocation__ = "j.clients.oauth"
_CHILDCLASS = OauthClient
|
from backend.database.models import StaffToken
from backend.site import StaffSite
import cognitojwt
from cryptography.fernet import Fernet
from pyramid.httpexceptions import HTTPBadRequest
from pyramid.httpexceptions import HTTPSeeOther
from pyramid.view import view_config
import datetime
import hashlib
import json
import logging
import requests
import urllib.parse
log = logging.getLogger(__name__)
def get_redirect_uri(request, staff_site):
return request.resource_url(staff_site, 'cognito-cb')
@view_config(
name='login',
context=StaffSite)
def login(staff_site, request):
"""Redirect to Amazon Cognito login"""
settings = request.ferlysettings
qs = urllib.parse.urlencode([
('response_type', 'code'),
('client_id', settings.cognito_client_id),
('redirect_uri', get_redirect_uri(request, staff_site)),
])
url = 'https://%s/login?%s' % (settings.cognito_domain, qs)
return HTTPSeeOther(url)
@view_config(
name='cognito-cb',
context=StaffSite,
renderer='templates/cognito_cb.pt')
def cognito_callback(staff_site, request):
"""Receive an OAuth code, add a StaffToken, and redirect."""
settings = request.ferlysettings
code = request.params.get('code')
if not code or len(code) > 100:
raise HTTPBadRequest()
token_url = 'https://%s/oauth2/token' % settings.cognito_domain
token_data = [
('grant_type', 'authorization_code'),
('client_id', settings.cognito_client_id),
('code', code),
('redirect_uri', get_redirect_uri(request, staff_site)),
]
resp = requests.post(
token_url,
auth=(settings.cognito_client_id, settings.cognito_client_secret),
data=token_data)
resp.raise_for_status()
tokens_json = resp.json()
assert 'access_token' in tokens_json
assert 'refresh_token' in tokens_json
verified_claims = cognitojwt.decode(
tokens_json['id_token'],
settings.cognito_region,
settings.cognito_userpool_id,
app_client_id=settings.cognito_client_id,
)
# access_token = tokens_json['access_token']
# info_url = 'https://%s/oauth2/userInfo' % settings.cognito_domain
# resp = requests.get(
# info_url, headers={
# 'Authorization': 'Bearer %s' % access_token,
# })
# resp.raise_for_status()
# user_info = resp.json()
# log.info("Staff login user_info: %s", user_info)
username = verified_claims['cognito:username']
groups = verified_claims['cognito:groups']
secret = Fernet.generate_key()
secret_sha256 = hashlib.sha256(secret).hexdigest()
tokens_encoded = json.dumps(tokens_json).encode('utf-8')
tokens_fernet = Fernet(secret).encrypt(tokens_encoded).decode('ascii')
now = datetime.datetime.utcnow()
st = StaffToken(
secret_sha256=secret_sha256,
tokens_fernet=tokens_fernet,
update_ts=now + datetime.timedelta(
seconds=settings.token_trust_duration),
expires=now + datetime.timedelta(
seconds=settings.token_duration),
user_agent=request.user_agent,
remote_addr=request.remote_addr,
username=username,
groups=groups,
id_claims=verified_claims,
)
request.dbsession.add(st)
request.dbsession.flush() # Assign st.id
cookie_value = '%s-%s' % (st.id, secret.decode('ascii'))
location = request.resource_url(staff_site)
request.response.set_cookie(
'staff_token',
cookie_value,
path=request.resource_path(staff_site),
secure=settings.secure_cookie,
httponly=True,
max_age=datetime.timedelta(days=365))
delete_old_tokens(request)
return {'location': location}
@view_config(
name='logout',
context=StaffSite,
renderer='templates/logout.pt')
def logout(staff_site, request):
"""Unset the staff_token cookie."""
settings = request.ferlysettings
request.response.set_cookie(
'staff_token',
'',
path=request.resource_path(staff_site),
secure=settings.secure_cookie,
httponly=True)
return {}
def delete_old_tokens(request):
expire_time = datetime.datetime.utcnow() - datetime.timedelta(
days=request.ferlysettings.token_delete_days)
q = request.dbsession.query(StaffToken)
q.filter(StaffToken.expires <= expire_time).delete()
|
"""
Here we'll define the dropdown with
all the information options.
"""
from __future__ import unicode_literals
import re
import subprocess
import sys
import click
import isort # noqa: F401
import questionary
# import snoop
from questionary import Separator, Style
subprocess.run(["isort", __file__])
# @snoop
def dropdown():
"""
We'll use Questionary's multiple choice option, to ask what information he wants.
It was used variables to identify the questions strings, because this allows for a
value, dependent on a series of 'if' statements, to be chosen from them. When I did
the same without the loop, the value was always the last if clause value. It was also
added the 'path' and 'units' values to their respective 'app' and 'resposta' variables,
so that, when running 'main', all the necessary information is already processed.
"""
custom_style_monitor = Style(
[
("qmark", "fg:#8E806A bold"),
("question", "fg:#E0DDAA bold"),
("answer", "fg:#eeedde"),
("pointer", "fg:#BB6464 bold"),
("highlighted", "fg:#E5E3C9 bold"),
("selected", "fg:#94B49F bold"),
("separator", "fg:#ff5c8d"),
("instruction", "fg:#E4CDA7"),
("text", "fg:#F1E0AC bold"),
]
)
ambit = questionary.confirm(
"Is your question about a specific service?",
qmark="[x]",
default=False,
auto_enter=False,
).ask()
if ambit:
app = questionary.select(
"What app do you want to use?",
qmark="[x]",
pointer="++",
use_indicator=True,
style=custom_style_monitor,
choices=["Backups Service", "Yay Service", "Flower", "Pip", Separator("----- EXIT -----"), "Exit"],
).ask()
resposta = questionary.checkbox(
"What do you want to see?",
qmark="[x]",
pointer="++",
style=custom_style_monitor,
choices=[
Separator("----- CELERY INFORMATION -----"),
"See: Clock",
"See: Scheduled",
"See: Stats",
"See: Reports",
"See: Events",
Separator("----- SYSTEMD INFORMATION -----"),
"See: Service_Status",
"See: Service_Logs",
Separator("----- SYSTEMD ACTIONS -----"),
"See: Delete_Service",
"See: Create_Service",
"See: Stop_Service",
"See: Edit_Service",
"See: Start_Service",
"See: Daemon_Reload",
"See: Reset_Failed",
Separator("----- EXIT -----"),
"Exit",
],
).ask()
print(click.style(f"app: {app}, resposta: {resposta}", fg="bright_white", bold=True))
response = [app, resposta]
return response
if ambit is False:
generalist = questionary.checkbox(
"What do you want to see?",
qmark="[x]",
pointer="++",
style=custom_style_monitor,
choices=[
Separator("----- CELERY INFORMATION -----"),
"See: Active_Nodes",
Separator("----- SYSTEMD INFORMATION -----"),
"See: Timers",
"See: Active_Services",
"See: Service_Logs",
Separator("----- SYSTEMD ACTIONS -----"),
"See: Delete_Service",
"See: Create_Service",
Separator("----- EXIT -----"),
"Exit",
],
).ask()
print(click.style(f"generalist: {generalist}", fg="bright_white", bold=True))
general = ["dummy_app", generalist, "dummy_service"]
return general
if __name__ == "__main__":
dropdown()
|
# Copyright (c) Fahad Ahammed 2021.
|
# -*- encoding:utf-8 -*-
"""
company:IT
author:pengjinfu
project:migu
time:2020.5.4
"""
from info import user, pwd
import requests
import execjs
import asyncio
class Login():
def __init__(self):
self.session = requests.Session()
self.headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3314.0 Safari/537.36 SE 2.X MetaSr 1.0'}
async def get_js_info(self):
with open('yitongdai.js', 'r') as file:
js = file.read()
password = execjs.compile(js).call('toMd5', pwd)
return password
async def handle_request(self, url, data=None, headers=None):
response = self.session.post(url, data=data, headers=headers)
if response.status_code == 200:
return response.json()
async def login(self):
pwd = await self.get_js_info()
url = 'https://app.etongdai.com/login/verifylogin'
data = {
'loginName': user,
'password': pwd,
'check': 'on',
'next': 'null'
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Host': 'app.etongdai.com',
'Origin': 'https://app.etongdai.com',
'Referer': 'https://app.etongdai.com/login/index',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3314.0 Safari/537.36 SE 2.X MetaSr 1.0',
}
response = await self.handle_request(url, data, headers)
return response
async def run(self):
print(await self.login())
if __name__ == '__main__':
login = Login()
task = login.run()
asyncio.get_event_loop().run_until_complete(task)
|
import os
# Build paths (PROJECT_DIR = sandbox and BASE_DIR = root)
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
|
import os
from pathlib import Path
import pytest
from pipeline.execution import _patch_subprocess_environment
@pytest.mark.unit
def test_patch_subprocess_environment(monkeypatch):
monkeypatch.setattr(os, "environ", {})
path = Path("C:/dummy_project_directory")
config = {"project_directory": str(path)}
result = _patch_subprocess_environment(config)
assert result == {"PYTHONPATH": f"{path};"}
|
/usr/local/lib/python3.6/copyreg.py |
from fila_da_creche.queries.dt_atualizacao import get_dt_atualizacao
from fila_da_creche.queries.fila_por_escolas import get_fila_por_escolas
from fila_da_creche.queries.espera import get_espera
from rest_framework.response import Response
from rest_framework.views import APIView
from utils.get_raio import get_raio
class GetFilaByEscola(APIView):
def get(self, request, cd_serie, lat, lon):
resposta = {}
if cd_serie not in [1, 4, 27, 28]:
return Response('Série invalida')
raio = get_raio(cd_serie)
# Queries no Banco do Fila da Creche
resposta['espera'] = get_espera(cd_serie, lon, lat, raio)
resposta['escolas'] = get_fila_por_escolas(cd_serie, lon, lat, raio)
resposta['dt_atualizacao'] = get_dt_atualizacao()
return Response(resposta)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenAppTestapiiSyncModel(object):
def __init__(self):
self._in_1 = None
self._in_2 = None
@property
def in_1(self):
return self._in_1
@in_1.setter
def in_1(self, value):
self._in_1 = value
@property
def in_2(self):
return self._in_2
@in_2.setter
def in_2(self, value):
self._in_2 = value
def to_alipay_dict(self):
params = dict()
if self.in_1:
if hasattr(self.in_1, 'to_alipay_dict'):
params['in_1'] = self.in_1.to_alipay_dict()
else:
params['in_1'] = self.in_1
if self.in_2:
if hasattr(self.in_2, 'to_alipay_dict'):
params['in_2'] = self.in_2.to_alipay_dict()
else:
params['in_2'] = self.in_2
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenAppTestapiiSyncModel()
if 'in_1' in d:
o.in_1 = d['in_1']
if 'in_2' in d:
o.in_2 = d['in_2']
return o
|
import pandas
import requests
import re
from .utils import \
last_seven_days, get_str_day, \
strip_irc_chars, strip_channel_name, \
get_nick_from_message
# close "A value is trying to be set on a copy of a slice from a DataFrame." warning
pandas.options.mode.chained_assignment = None
def get_log(base_url, channel, date):
[year, month, day] = get_str_day(date)
url = base_url + '/api/irc-logs?' + \
'channel=' + strip_channel_name(channel) + \
'&year=' + year + \
'&month=' + month + \
'&day=' + day + \
'&limit=0&offset=0'
r = requests.get(url)
return r.json()['data']['logs']
def last_week_data(base_url, channel, bot_list = [], mask_dict = None):
def to_df(date):
return pandas.DataFrame(get_log(base_url, channel, date))
# add extra info date, channel to DataFrame
def add_extra(df, date):
[year, month, day] = get_str_day(date)
df['date'] = pandas.Series(year + '-' + month + '-' + day, index=df.index)
df['channel'] = pandas.Series(channel, index=df.index)
return df
def db_append(host, nd):
result = host.append(nd, ignore_index=True)
return result
def convert_bot_log(df, bot_name):
tdf = df[df['nick'] == bot_name]
tdf['nick'] = tdf['message'].map(lambda message: get_nick_from_message(message)) # real nick
tdf['message'] = tdf['message'].map(lambda message: re.sub(R'^\[(\w+)\]', '', message).strip()) # remove nick from message
# write data back
df['nick'].update(tdf['nick'])
df['message'].update(tdf['message'])
return df
days_list = last_seven_days()
df = None
for date in days_list:
try:
tmp_df = add_extra(to_df(date), date)
except:
tmp_df = pandas.DataFrame([])
df = db_append(df, tmp_df) if df is not None else tmp_df
# strip special irc chars
df['message'] = df['message'].map(lambda message: strip_irc_chars(message))
# convert bot log
if len(bot_list):
for name in bot_list:
df = convert_bot_log(df, name)
# replace mask to real nick
if mask_dict:
df['nick'] = df['nick'].map(lambda nick: mask_dict[nick] if nick in mask_dict else nick)
return df[['channel', 'date', 'time', 'nick', 'message']]
|
import numpy
import noodles
import time
from copy import copy
@noodles.schedule(call_by_ref=['gobble'])
def mul(x, y, gobble):
return x*y
@noodles.schedule(call_by_ref=['gobble'])
def factorial(x, gobble):
time.sleep(0.1)
if numpy.all(x == 0):
return numpy.ones_like(x)
else:
return mul(factorial(x - 1, copy(gobble)), x, gobble)
gobble_size = 10000000
gobble = numpy.zeros(gobble_size)
result = noodles.run_single(factorial(10, gobble))
|
# power_of_2.py
#
# print a list of powers of 2
# also includes input validation
# CSC 110
# Fall 2011
top = int(input('Enter a value between 1 and 20: '))
# an indefinite loop for input validation
while top < 1 or top > 20:
print('Error. ' + str(top) + ' is not between 1 and 20. Please try again.')
top = int(input('Enter a value between 1 and 20 '))
# a count controlled loop
for exp in range(0, top + 1):
value = 2**exp
print('2**' + str(exp) + ' = ' + str(value))
|
# -*- coding: utf-8 -*-
"""
discopy error messages.
"""
IGNORE_WARNINGS = [
"No GPU/TPU found, falling back to CPU.",
"Casting complex values to real discards the imaginary part"]
def empty_name(got):
""" Empty name error. """
return "Expected non-empty name, got {}.".format(repr(got))
def type_err(expected, got):
""" Type error. """
return "Expected {}.{}, got {} of type {} instead.".format(
expected.__module__, expected.__name__,
repr(got), type(got).__name__)
def does_not_compose(left, right):
""" Composition error. """
return "{} does not compose with {}.".format(left, right)
def is_not_connected(diagram):
""" Disconnected error. """
return "{} is not connected.".format(str(diagram))
def boxes_and_offsets_must_have_same_len():
""" Disconnected diagram error. """
return "Boxes and offsets must have the same length."
def are_not_adjoints(left, right):
""" Adjunction error. """
return "{} and {} are not adjoints.".format(left, right)
def pivotal_not_implemented():
""" Pivotal error. """
return "Pivotal categories are not implemented."
def cup_vs_cups(left, right):
""" Simple type error. """
return "Cup can only witness adjunctions between simple types. "\
"Use Diagram.cups({}, {}) instead.".format(left, right)
def cap_vs_caps(left, right):
""" Simple type error. """
return cup_vs_cups(left, right).replace('up', 'ap')
def cannot_add(left, right):
""" Addition error. """
return "Cannot add {} and {}.".format(left, right)
def expected_pregroup():
""" pregroup.draw error. """
return "Expected a pregroup diagram, use diagram.draw() instead."
def expected_input_length(function, values):
return "Expected input of length {}, got {} instead.".format(
len(function.dom), len(values))
|
alien_0 = {'color': 'Green', 'points': 5}
print(alien_0['color'])
print(alien_0['points'])
answer = 17
if answer != 17:
print("error")
else:
print('ok')
|
from ntptime import settime
from micropython import const, alloc_emergency_exception_buf
from uctypes import addressof
from machine import Pin, Timer, I2C, WDT
from gc import collect
from esp32 import RMT
from wifiman import get_connection
import time, dst
disp=const(0)
cyear=const(0)
cmonth=const(1)
cday=const(2)
chour=const(3)
cminute=const(4)
csecond=const(5)
cweekday=const(6)
cdst=const(7)
# time Y-M-D h:m:s weekday dst
sendtime=bytearray(8)
# second counter 0-59
second=bytearray(1)
# TX bits for one minute
minute=bytearray(59)
# index for writing to minute[]
index=bytearray(1)
# 1-second timer
timer=Timer(3)
# last day NTP was set
ntpday=0
led=Pin(2,Pin.OUT)
antena=Pin(15,Pin.OUT)
ask=RMT(0,pin=antena,carrier_freq=0,clock_div=1) # 80 MHz
ask.loop(True)
if disp:
import ssd1306
i2c = I2C(scl=Pin(4), sda=Pin(5))
oled = ssd1306.SSD1306_I2C(128, 64, i2c, 0x3c)
oled.fill(0)
oled.text("DCF77", 0, 0)
oled.show()
weekdaystr = ["MO","TU","WE","TH","FR","SA","SU"]
# desired carrier frequency
freq=77500
# tuning paramters - adjust with scope
# coarse tuning, about 75 Hz per step
tuning_coarse=0
# fine tuning 0-16, about 5 Hz per step
tuning_fine=2
period=int(ask.source_freq())//freq-tuning_coarse
print("period", period)
# coarse tuned for 77.5 kHz
# power level 2 (50% DTC)
on2=period//2
off2=period-on2
# power level 1 (adjust 25% amplitude on scope)
on1=on2//8
off1=period-on1
# debug - no level change
#on1=on2
#off1=off2
#on2=on1
#off2=off1
def tuning(t):
global pwr1, pwr2
m=16 # levels of fine tuning
pwr1=[]
pwr2=[]
for i in range(m):
pwr1.append(off1)
pwr1.append(on1)
pwr2.append(off2)
pwr2.append(on2)
for i in range(t):
pwr1[i*2]-=1
pwr2[i*2]-=1
# print tuning results, should be around 77500 for both
print("tuning", int(ask.source_freq())*m//sum(pwr1), int(ask.source_freq())*m//sum(pwr2), "Hz")
# write n bits of val at ith position, LSB first
@micropython.viper
def to_binary(i:int, val:int, n:int):
m=ptr8(addressof(minute))
for j in range(n):
m[i+j]=48+(val&1)
val >>= 1
# write n bits of bcd starting from minute[i]
@micropython.viper
def bcd(val:int, n:int):
x=ptr8(addressof(index))
i=x[0]
if n>4:
to_binary(i,val%10,4)
to_binary(i+4,val//10,n-4)
else:
to_binary(i,val,n)
x[0]+=n
# write parity for previous n bits
@micropython.viper
def parity(n:int):
x=ptr8(addressof(index))
m=ptr8(addressof(minute))
p=0
j=x[0]-1
for i in range(n):
p^=m[j-i]
m[x[0]]=48+(p&1)
x[0]+=1
def generate_time():
t=time.time()+60
lt=dst.localtime(t)
for i in range(7):
sendtime[i]=lt[i]%100
if dst.is_dst(t):
sendtime[cdst]=1
else:
sendtime[cdst]=0
# convert timebcd to minute bits
@micropython.viper
def generate_minute():
m=ptr8(addressof(minute))
x=ptr8(addressof(index))
for i in range(17):
m[i]=48
# skip first 17 bits
x[0]=17
#to_binary(0,0x55,8)
if sendtime[cdst]:
bcd(1,2)
else:
bcd(2,2)
# start time code
bcd(2,2)
# minutes + parity
bcd(sendtime[cminute],7)
parity(7)
# hours + parity bit
bcd(sendtime[chour],6)
parity(6)
# day of month
bcd(sendtime[cday],6)
# day of week
bcd(int(sendtime[cweekday])+1,3)
# month
bcd(sendtime[cmonth],5)
# year (0-99) + parity for all 22 date bits
bcd(sendtime[cyear],8)
parity(22)
def set_ntp():
global ntpday
try:
settime()
ntpday=time.localtime()[cday]
except:
ntpday=0
def second_tick(t):
#global pwr1, pwr2
p=memoryview(second)
m=memoryview(minute)
xd=(p[0]%15)*8
yd=(p[0]//15)*12+17
if p[0]<59:
if ntpday>0:
bit=m[p[0]]&1
led.on()
ask.write_pulses(pwr1,start=0)
time.sleep_ms(100*(bit+1))
ask.write_pulses(pwr2,start=0)
led.off()
if disp:
oled.text("%d"%bit,xd,yd)
oled.hline(xd,yd+10,(bit+1),1)
oled.hline(xd+bit+1,yd+8,7-bit,1)
if disp:
oled.show()
p[0]+=1
else:
if ntpday==0 or (sendtime[cday]!=ntpday and sendtime[cminute]==30):
set_ntp()
if ntpday==0:
ask.write_pulses([4000],start=0) # turn off transmitter
get_connection() # if not connected, reconnect to internet
generate_time()
generate_minute()
# every 10 minutes synchronize seconds
if sendtime[cminute]%10==5:
p[0]=sendtime[csecond]
else:
p[0]=0
if disp:
oled.hline(xd,yd+8,8,1)
oled.show()
oled.fill(0)
oled.text("DST%d %02d:%02d NTP%d" %
(sendtime[cdst],sendtime[chour],sendtime[cminute],ntpday),0,0)
oled.text("20%02d-%02d-%02d %2s" %
(sendtime[cyear],sendtime[cmonth],sendtime[cday],weekdaystr[sendtime[cweekday]]),0,8)
collect()
wdt.feed()
def run():
timer.init(mode=Timer.PERIODIC, period=1000, callback=second_tick)
wdt=WDT(timeout=120000)
set_ntp()
tuning(tuning_fine)
generate_time()
generate_minute()
second[0]=sendtime[csecond]
print(minute)
wdt.feed()
run()
|
from settings import *
def nearby_cells_count(cells: list, x: int, y: int):
res = []
if (x > 0) and (y > 0) and (x < cell_count-1) and (y < cell_count-1):
for i in range(x-1, x+2):
for j in range(y-1, y+2):
if (i, j) != (x, y):
res.append(cells[i][j])
# print(res)
if res.count(1) == None:
return 0
else:
return res.count(1)
def nearby_cell_data(cells: list):
data = []
for i in range(cell_count):
temp = []
for j in range(cell_count):
temp.append(0)
data.append(temp)
for x in range(len(cells)):
for y in range(len(cells[x])):
data[x][y] = nearby_cells_count(cells, x, y)
return data
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pytest
import numpy as np
import tvm
from tvm.contrib.hexagon.session import Session
# use pytest -sv to observe gtest output
# use --gtest_args to pass arguments to gtest
# for example to run all "foo" tests twice and observe gtest output run
# pytest -sv <this file> --gtests_args="--gtest_filter=*foo* --gtest_repeat=2"
@tvm.testing.requires_hexagon
def test_run_unit_tests(hexagon_session: Session, gtest_args):
try:
func = hexagon_session._rpc.get_function("hexagon.run_unit_tests")
except:
print(
"This test requires TVM Runtime to be built with a Hexagon gtest version using Hexagon API cmake flag -DUSE_HEXAGON_GTEST=/path/to/hexagon/sdk/utils/googletest/gtest"
)
raise
gtest_error_code_and_output = func(gtest_args)
gtest_error_code = int(gtest_error_code_and_output.splitlines()[0])
gtest_output = gtest_error_code_and_output.split("\n", 1)[-1]
print(gtest_output)
np.testing.assert_equal(gtest_error_code, 0)
|
import json
annotation_filename_1 = 'data/images/annotations/instances.json'
annotation_filename_2 = 'data/images/annotations/instances_remaining.json'
with open(annotation_filename_1,'r') as f:
annotations_1 = json.load(f)
with open(annotation_filename_2,'r') as f:
annotations_2 = json.load(f)
new_images = list()
image_ids_1_to_image_ids = dict()
image_nb = 0
for image in annotations_1['images']:
image_ids_1_to_image_ids[image['id']] = image_nb
image['id'] = image_nb
new_images.append(image)
image_nb+=1
image_ids_2_to_image_ids = dict()
for image in annotations_2['images']:
image_ids_2_to_image_ids[image['id']] = image_nb
image['id'] = image_nb
new_images.append(image)
image_nb+=1
new_annotations = []
annotation_nb = 0
for annotation in annotations_1['annotations']:
annotation['image_id'] = image_ids_1_to_image_ids[annotation['image_id']]
annotation['id'] = annotation_nb
new_annotations.append(annotation)
annotation_nb+=1
for annotation in annotations_2['annotations']:
annotation['image_id'] = image_ids_2_to_image_ids[annotation['image_id']]
annotation['id'] = annotation_nb
new_annotations.append(annotation)
annotation_nb+=1
annotations = {'annotations':new_annotations,'images':new_images,'categories':annotations_1['categories']}
with open('data/images/annotations/instances.json','w') as f:
json.dump(annotations, f) |
import pymoo.problems as mop
from pymoo.factory import get_problem
import numpy as np
import time
import math
import mabs.utils.reproblems as reprob
class optimization_test_functions(object):
def __init__(self, test):
self.test_name = test
def predict(self, test_name, x, num_vars=2, num_objs=None):
if self.test_name == 'sf1':
return self.schaffer_func_1(x)
elif self.test_name == 'sf2':
return self.schaffer_func_2(x)
elif 'zdt' in self.test_name:
return self.zdt(x, num_vars)
elif self.test_name == 'tsp':
return self.tsp(x)
elif self.test_name == 'tsp_perm':
return self.tsp_perm(x)
elif self.test_name == 'welded_beam':
return self.welded_beam(x)
elif self.test_name == 'truss2d':
return self.truss2d(x)
elif 'dtlz' in self.test_name:
return self.dtlz(x, num_vars, num_objs)
elif 'cre' in self.test_name:
return self.crep(x)
elif 're' in self.test_name:
return self.rep(x)
def schaffer_func_1(self, x):
"""
MOO Benchmark test function Schaffer Function N. 1
x varies between values of -A to A, where A from 10 to 10^5 are typically successful, higher values increase the difficulty of the problem
"""
f1 = x[0] ** 2
f2 = (x[0]-2) ** 2
return {'F': [float(f1),float(f2)]}
def schaffer_func_2(self, x):
"""
MOO Benchmark test function Schaffer Function N. 2
x varies between -5 and 10
"""
x = x[0]
if x <= 1:
f1 = -x
elif x <= 3:
f1 = x-2
elif x <= 4:
f1 = 4-x
else:
f1 = x-4
f2 = (x-5) ** 2
return {'F': [float(f1),float(f2)]}
def zdt(self, x, num_vars):
"""
ZDT Benchmark from pymop
"""
problem = get_problem(self.test_name, n_var=num_vars)
#problem = mop.ZDT1(n_var=num_vars)
soln = problem.evaluate(x, return_values_of=['F','G'], return_as_dictionary=True)
return soln
def dtlz(self, x, num_vars, num_objs):
problem = get_problem(self.test_name, n_var=num_vars, n_obj=num_objs)
soln = problem.evaluate(x, return_values_of=['F','G'], return_as_dictionary=True)
return soln
def tsp(self, x):
"""
traveling sales person
"""
if type(x[0]) == str:
x = [int(y) for y in x]
graph = [[0, 10, 15, 20], [10, 0, 35, 25],
[15, 35, 0, 30], [20, 25, 30, 0]]
path = 0
k = 0
for j in x:
path += graph[k][j]
k=j
path += graph[k][0]
return {'F': [path]}
def tsp_perm(self, x):
"""
traveling sales person - permutation
"""
if type(x[0][0]) == str:
x = [int(y) for y in x[0]]
graph = [[0, 10, 15, 20], [10, 0, 35, 25],
[15, 35, 0, 30], [20, 25, 30, 0]]
path = 0
k = 0
for j in x:
path += graph[k][j]
k=j
path += graph[k][0]
return {'F': [path]}
def welded_beam(self, x):
problem = get_problem('welded_beam')
soln = problem.evaluate(x, return_values_of=['F','G'], return_as_dictionary=True)
return soln
def truss2d(self, x):
problem = get_problem('truss2d')
soln = problem.evaluate(x, return_values_of=['F','G'], return_as_dictionary=True)
return soln
def rep(self, x):
problem = reprob.get_problem(self.test_name)
objs = problem.evaluate(x)
return {'F': objs.tolist()}
def crep(self, x):
problem = reprob.get_problem(self.test_name)
objs, consts = problem.evaluate(x)
return {'F': objs.tolist(), 'G': consts.tolist()} |
PACKAGE_NAME = 'cockroachdb'
SERVICE_NAME = 'cockroachdb'
DEFAULT_TASK_COUNT = 3
DEFAULT_POD_TYPE = 'cockroachdb'
DEFAULT_TASK_NAME = 'node'
|
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import JsonResponse
from django.shortcuts import render, redirect
# Create your views here.
from login.models import User
def login(request):
'''显示登录页面'''
# 判断用户是否已经登录
# 获取session
if request.session.has_key('islogin'):
return redirect('/main')
else:
# 获取cookie保存的username
if 'username' in request.COOKIES:
# 获取cookie
username = request.COOKIES['username']
else:
username = ''
return render(request, 'login/login.html', {'username': username})
def login_check(request):
'''登录校验'''
query_dict = request.POST
username = query_dict.get('username')
password = query_dict.get('password')
remember = query_dict.get('remember')
print(username + remember)
# 从数据库查询
user = User.objects.filter(Q(username__exact=username) & Q(password__exact=password))
if user:
# 登录成功
response = JsonResponse({'res': '1'})
# 判断是否需要记住用户名
if remember == 'true':
# 设置cookie
response.set_cookie('username', username, max_age=7 * 24 * 3600)
# 记住用户的登录状态
# 设置session
request.session['islogin'] = True
# 记住登录用户的用户名
request.session['username'] = username
request.session.set_expiry(7 * 24 * 3600)
return response
else:
# 登录失败
return JsonResponse({'res': '0'})
def main(request):
'''登录成功后,显示登录主页面'''
return render(request, 'login/main.html')
def ajax_test(request):
'''显示ajax页面'''
return render(request, 'login/ajax_test.html')
def ajax_handler(request):
'''处理ajax请求'''
return JsonResponse({'res': '1'})
# ---------------------CSRF跨域请求伪造----------------------------
def login_required(function):
'''检验必须登录的装饰器'''
def wapper(request, *args, **kwargs):
if request.session.has_key('islogin'):
return function(request, *args, **kwargs)
else:
return redirect(reverse('login:login'))
return wapper
@login_required
def change_pwd(request):
'''显示修改密码页面'''
return render(request, 'login/change_pwd.html')
@login_required
def handler_change_pwd(request):
'''处理修改密码逻辑'''
# 获取新密码
query_dict = request.POST
pwd = query_dict.get('pwd')
# 从session中获取用户名
username = request.session.get('username')
return JsonResponse({'res': '1'})
# 修改数据库中该用户的密码
# user = User.objects.get(username=username)
# if user:
# user.password = pwd
# user.save()
# return JsonResponse({'res': '1'})
# else:
# return JsonResponse({'res': '0'})
|
import ijmfttxt
# Initialize and setup Connection to TXT Controller
txt = ijmfttxt.TXT()
# Get Motor connected to Output 1
motor_hl = txt.motor(1)
# Get Motor connected to Output 2
motor_vl = txt.motor(2)
# Get Motor connected to Output 3
motor_vr = txt.motor(3)
# Get Motor connected to Output 4
motor_hr = txt.motor(4)
# Get proximity sensor connected via i2c-connection
gesture_sensor = txt.gestureSensor()
# Turn gesture sensor on
gesture_sensor.turnOn()
# Method for setting motor speed acording to drive direction
def controll_robot(ud, lr):
if ud != 0:
# Moving forwards/backwards
motor_hl.setSpeed(8 * ud)
motor_vl.setSpeed(8 * ud)
motor_vr.setSpeed(8 * ud)
motor_hr.setSpeed(8 * ud)
elif lr != 0:
# Moving to the left/right
motor_hl.setSpeed(8 * lr)
motor_vr.setSpeed(8 * -lr)
motor_vl.setSpeed(8 * lr)
motor_hr.setSpeed(8 * -lr)
else:
# Stop
motor_hl.stop()
motor_vl.stop()
motor_vr.stop()
motor_hr.stop()
# Direction for forwards/backwards
up_down = 0
# Direction for left/right
left_right = 0
# Repeat forever
while True:
# Get latest detected Motion
gesture = gesture_sensor.getGesture()
if gesture == "UP":
# Set direction values for forward motion
up_down += 1
left_right = 0
elif gesture == "DOWN":
# Set direction values for backward motion
up_down -= 1
left_right = 0
elif gesture == "LEFT":
# Set direction values for left-strave motion
left_right += 1
up_down = 0
elif gesture == "RIGHT":
# Set direction values for right-strave motion
left_right -= 1
up_down = 0
controll_robot(up_down, left_right)
|
import os
import json
import codecs
import sys
from keras import layers
from keras import models
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
# This is module with image preprocessing utilities
from keras.applications import VGG16
from keras.callbacks import Callback
from keras.callbacks import ModelCheckpoint
import numpy as np
class LossHistory(Callback):
# https://stackoverflow.com/a/53653154/852795
def on_epoch_end(self, epoch, logs=None):
new_history = {}
for k, v in logs.items(): # compile new history from logs
new_history[k] = [v] # convert values into lists
current_history = loadHist(history_filename) # load history from current training
current_history = appendHist(current_history, new_history) # append the logs
saveHist(history_filename, current_history) # save history from current training
def saveHist(path, history): # Used to save trining history
with codecs.open(path, 'w', encoding='utf-8') as f:
json.dump(history, f, separators=(',', ':'), sort_keys=True, indent=4)
def loadHist(path): # Used to load trining history
n = {} # set history to empty
if os.path.exists(path): # reload history if it exists
with codecs.open(path, 'r', encoding='utf-8') as f:
n = json.loads(f.read())
return n
def appendHist(h1, h2):
if h1 == {}:
return h2
else:
dest = {}
for key, value in h1.items():
dest[key] = value + h2[key]
return dest
# def extract_features(directory, sample_count):
# # features is the last layer
# # since we have 5 MaxPooling2D, the last layer shape should be (None, 6, 6, 512)
# features = np.zeros(shape=(sample_count, 6, 6, 512))
# labels = np.zeros(shape=(sample_count))
# generator = datagen.flow_from_directory(
# directory,
# target_size=(img_height, img_width),
# batch_size=batch_size,
# class_mode='binary')
# i = 0
# for inputs_batch, labels_batch in generator:
# # print('i = ', i)
# features_batch = conv_base.predict(inputs_batch)
# # print('inputs_batch.shape = ', inputs_batch.shape) # (20, 200, 200, 3)
# features[i*batch_size: (i + 1)*batch_size] = features_batch
# labels[i*batch_size: (i + 1)*batch_size] = labels_batch
# i += 1
# if i * batch_size >= sample_count:
# # Note that since generators yield data indefinitely in a loop,
# # we must `break` after every image has been seen once.
# break
# return features, labels
def smooth_curve(points, factor=0.8):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
# The directory where we store our augmented dataset
# (we still do data augmentation during the training)
base_dir = '../data/images/5AugmentedImages_2Classes_split'
if not os.path.exists(base_dir):
print(base_dir, 'does NOT exists!')
sys.exit()
# Set parameters
img_height = 200
img_width = 200
train_sample_num = 624
val_sample_num = 208
test_sample_num = 208
batch_size = 20
steps_per_epoch = train_sample_num//batch_size + 1
epochs = 50
smooth_factor = 0 # For plotting the accuracy and loss
output_dir = '../data/output/VGG16_2classes_all_new'
if not os.path.exists(output_dir):
os.mkdir(output_dir)
model_filename = output_dir + '/foodsafety_VGG16_2classes.h5'
history_filename = output_dir + '/model-history.json'
test_result_filename = output_dir + '/test_result.json'
# Directories for our training, validation and testing splits
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'val')
test_dir = os.path.join(base_dir, 'test')
# VGG16 has 13 Convolutional Layer and 3 Fully connected Layer. Thus 16=13+3
conv_base = VGG16(weights='imagenet',
include_top=False,
input_shape=(img_height, img_width, 3))
conv_base.summary()
# # Generate data inputs for training, validation and testing
# datagen = ImageDataGenerator(rescale=1./255)
#
# train_features, train_labels = extract_features(train_dir, train_sample_num)
# print('train_features.shape = ', train_features.shape)
# print('train_labels.shape = ', train_labels.shape)
# validation_features, validation_labels = extract_features(validation_dir, val_sample_num)
# print('validation_features.shape = ', validation_features.shape)
# print('validation_labels.shape = ', validation_labels.shape)
# test_features, test_labels = extract_features(test_dir, test_sample_num)
# print('test_features.shape = ', test_features.shape)
# print('test_labels.shape = ', test_labels.shape)
#
# train_features = np.reshape(train_features, (train_sample_num, 6 * 6 * 512))
# validation_features = np.reshape(validation_features, (val_sample_num, 6 * 6 * 512))
# test_features = np.reshape(test_features, (test_sample_num, 6 * 6 * 512))
# Construct model layers
if os.path.exists(model_filename):
# load model is a saved model already exists
model = models.load_model(model_filename)
# summarize model.
model.summary()
else:
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.summary()
print('This is the number of trainable weights '
'before conv_base.trainable = True:', len(model.trainable_weights))
conv_base.trainable = True
# # if we want to finetune some layers
# # set the layers before block5_conv1 to untrainable
# # set the layers equal or after block5_conv1 to trainable
# set_trainable = False
# for layer in conv_base.layers:
# print('layer.name = ', layer.name)
# if layer.name == 'block5_conv1':
# set_trainable = True
# if set_trainable:
# layer.trainable = True
# else:
# layer.trainable = False
# print('This is the number of trainable weights '
# 'after conv_base.trainable = True:', len(model.trainable_weights))
# Augment training data.
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=180,
# width_shift_range=0.2,
# height_shift_range=0.2,
# shear_range=0.2,
# zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
# Note that the validation data should not be augmented!
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
# This is the target directory
train_dir,
# All images will be resized to img_height x img_width
target_size=(img_height, img_width),
batch_size=batch_size,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary')
# Compile model
print('Compile model')
model.compile(loss='binary_crossentropy',
optimizer=optimizers.Adam(lr=1e-5),
# optimizer=optimizers.RMSprop(lr=1e-5),
metrics=['acc'])
# Create checkpoints
print('Create checkpoints')
model_checkpoint = ModelCheckpoint(model_filename, verbose=1, period=1, save_best_only=True, mode='min')
history_checkpoint = LossHistory()
callbacks_list = [model_checkpoint, history_checkpoint]
print('Now training starts!')
vgg16_history = model.fit_generator(
train_generator,
epochs=epochs,
validation_data=validation_generator,
callbacks=callbacks_list)
# In model.fit_generator(), there are many parameters we can tune
# steps_per_epoch= ?? # It is the number of steps per epoch. By default it should be queals to data_num/batchsize, which is len(DataGenerator).
# If we set this number smaller than the default, then each epoch will end earlier.
# validation_steps=?? # It is the number of data we want to evaluate in each evaluation epoch. By default it should be queals to len(DataGenerator).
# And there are many more others...
# print('appendHist ...')
# history = appendHist(history, new_history.history)
# Plot accuracy and loss
historical_history = loadHist(history_filename) # load history from current training
print('Plot accuracy and loss ...')
# acc = vgg16_history.history['acc']
# val_acc = vgg16_history.history['val_acc']
# loss = vgg16_history.history['loss']
# val_loss = vgg16_history.history['val_loss']
acc = historical_history['acc']
val_acc = historical_history['val_acc']
loss = historical_history['loss']
val_loss = historical_history['val_loss']
epochs = range(len(acc))
plt.figure()
plt.plot(epochs, acc, 'b-', label='Training acc')
plt.plot(epochs, acc, 'b*', label='Training acc')
plt.plot(epochs, val_acc, 'r--', label='Validation acc')
plt.plot(epochs, val_acc, 'ro', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
# plt.show()
image_name = output_dir + '/accuracy_smooth_'+str(smooth_factor)+'.png'
plt.savefig(image_name)
plt.close()
plt.figure()
plt.plot(epochs, loss, 'b-', label='Training loss')
plt.plot(epochs, loss, 'b*', label='Training loss')
plt.plot(epochs, val_loss, 'r--', label='Validation loss')
plt.plot(epochs, val_loss, 'ro', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
image_name = output_dir + '/loss_smooth_'+str(smooth_factor)+'.png'
plt.savefig(image_name)
plt.close()
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary')
test_loss, test_acc = model.evaluate_generator(test_generator, steps=test_sample_num//batch_size)
print('test loss:', test_loss)
print('test acc:', test_acc)
test_result = {}
test_result['test acc'] = [test_acc]
test_result['test loss'] = [test_loss]
saveHist(test_result_filename, test_result)
|
from settings import TOKEN, headers
import telebot
from telebot import types
import requests
from bs4 import BeautifulSoup
import sqlite3
bot = telebot.TeleBot(TOKEN)
@bot.message_handler(commands=["start"])
def start(message):
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
item1 = types.KeyboardButton("Курс валют")
item2 = types.KeyboardButton("Погода")
item3 = types.KeyboardButton("Анекдот")
markup.add(item1)
markup.add(item2)
markup.add(item3)
bot.send_message(message.chat.id, 'Чего желаете?', reply_markup=markup)
@bot.message_handler(content_types=["text"])
def handle_text(message):
if message.text.strip() == 'Курс валют':
course(message)
elif message.text.strip() == 'Погода':
a = bot.send_message(message.chat.id, "Введите название города: ")
bot.register_next_step_handler(a, weather)
elif message.text.strip() == 'Анекдот':
joke(message)
elif message.text.strip() == '/help':
answer = 'Напишите /start чтобы вызвать меню или напишите:\nПогода\nКурс валют\nАнекдот '
bot.send_message(message.chat.id, answer)
elif message.text.strip() == 'Привет':
bot.send_message(message.chat.id,
'Привет, {0.first_name}!'.format(message.from_user, bot.get_me(), parse_mode='html'))
else:
bot.send_message(message.chat.id, 'Извините, я не понимаю, напишите /help, чтобы посмотреть команды')
def weather(message):
try:
city = message.text
city = city + " weather"
city = city.replace(" ", "+")
res = requests.get(
f'https://www.google.com/search?q={city}&oq={city}&aqs=chrome.0.35i39l2j0l4j46j69i60.6128j1j7&sourceid'
f'=chrome&ie=UTF-8', headers=headers)
soup = BeautifulSoup(res.text, 'html.parser')
location = soup.select('#wob_loc')[0].getText().strip()
time = soup.select('#wob_dts')[0].getText().strip()
info = soup.select('#wob_dc')[0].getText().strip()
wind = soup.select('#wob_ws')[0].getText().strip()
weather = soup.select('#wob_tm')[0].getText().strip()
answer = location + '\n' + time + '\n' + info + '\n' + wind + '\n' + weather + "°C"
except Exception:
answer = "Город не найден, попробуйте еще раз"
bot.send_message(message.chat.id, answer)
start(message)
def course(message):
response = requests.get('https://ru.investing.com/currencies/usd-rub')
bs = BeautifulSoup(response.text, "lxml")
dollar = bs.find('span', class_="text-2xl")
response2 = requests.get('https://ru.investing.com/currencies/eur-rub')
bs2 = BeautifulSoup(response2.text, "lxml")
euro = bs2.find('span', class_="text-2xl")
ans = 'Курс доллара: ' + str(dollar.text) + '\n' + 'Курс евро: ' + str(euro.text)
bot.send_message(message.chat.id, ans)
def joke(message):
connection = sqlite3.connect('anekdot.db')
cursor = connection.cursor()
cursor.execute('SELECT anekdot FROM anekdot ORDER BY RANDOM() LIMIT 1;')
row = cursor.fetchone()
bot.send_message(message.chat.id, row)
@bot.message_handler(content_types=['sticker'])
def get_text_messages(message):
bot.send_message(message.from_user.id, "Предпочитаю общаться текстом :)")
bot.polling(none_stop=True, interval=0)
|
# Copyright 2020 Francesco Ceccon
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import pyomo.environ as pe
from coramin.relaxations.mccormick import PWMcCormickRelaxation
from pooling_network.network import Network
from pooling_network.pooling import (
compute_beta_kl_bounds,
compute_gamma_ijk,
compute_gamma_kl_bounds,
problem_pool_output_qualities,
index_set_lj,
index_set_jkl
)
def _generate_pooling_inequalities(block: pe.Block, parent: pe.Block, pool_name: str, output_name: str,
quality_name: str, problem: Network, violation_threshold: Optional[float] = None):
gamma_lower, gamma_upper = compute_gamma_kl_bounds(
pool_name, output_name, quality_name, problem
)
beta_lower, beta_upper = compute_beta_kl_bounds(
pool_name, output_name, quality_name, problem
)
if beta_lower is None or beta_upper is None:
return
assert gamma_lower is not None and gamma_upper is not None
assert beta_lower is not None and beta_upper is not None
cut_info = {
'pool': pool_name,
'output': output_name,
'quality': quality_name,
'gamma_lower': gamma_lower,
'gamma_upper': gamma_upper,
'beta_lower': beta_lower,
'beta_upper': beta_upper,
}
y = block.y[output_name, quality_name, pool_name]
x = block.s[pool_name, output_name]
u = block.u[output_name, quality_name, pool_name]
t = block.t[output_name, quality_name, pool_name]
if beta_lower < 0:
# Eq 28
expr = (
(gamma_lower - beta_lower)*(gamma_upper*x - u)
+ beta_lower*(gamma_upper - t)
)
violated = True
cut_info['type'] = 'inequality_28'
cut_info['viol'] = 0.0
if violation_threshold is not None:
expr_value = pe.value(expr, exception=False)
if expr_value is not None:
violated = expr_value > violation_threshold
cut_info['viol'] = expr_value
if violated:
yield expr <= 0, cut_info
if beta_upper > 0:
# Eq 22
expr = (
(gamma_upper - gamma_lower)*y
+ gamma_lower*(gamma_upper*x - u)
+ beta_upper*(u - gamma_lower * x)
- beta_upper*(t - gamma_lower)
)
violated = True
cut_info['type'] = 'inequality_22'
cut_info['viol'] = 0.0
if violation_threshold is not None:
expr_value = pe.value(expr, exception=False)
if expr_value is not None:
violated = expr_value > violation_threshold
cut_info['viol'] = expr_value
if violated:
yield expr <= 0, cut_info
def add_pooling_inequalities(block: pe.Block, parent: pe.Block, pool_name: str, output_name: str, quality_name: str,
problem: Network):
for inequality, _ in _generate_pooling_inequalities(block, parent, pool_name, output_name, quality_name, problem):
block._inequalities.add(inequality)
def _t_bounds(problem):
def _bounds(m, j, k, l):
return compute_gamma_kl_bounds(l, j, k, problem)
return _bounds
def add_all_pooling_inequalities_variables(block: pe.Block, parent: pe.Block, problem: Network):
block.z = pe.Var(index_set_lj(problem), bounds=(0, None))
# s is the scaled parent.y[l, j]
block.s = pe.Var(index_set_lj(problem), bounds=(0, None))
block.u = pe.Var(index_set_jkl(problem))
block.y = pe.Var(index_set_jkl(problem), initialize=1.0)
block.t = pe.Var(index_set_jkl(problem), bounds=_t_bounds(problem))
@block.Constraint(index_set_lj(problem))
def z_def(b, l, j):
output = problem.nodes[j]
cap_lower, cap_upper = output.capacity
return b.z[l, j] == (1/cap_upper) * sum(
parent.z[input.name, j] for input in problem.predecessors(j, layer=0)
) + sum(
parent.y[pool_.name, j] for pool_ in problem.predecessors(j, layer=1)
if l != pool_.name
)
@block.Constraint(index_set_lj(problem))
def s_def(b, l, j):
output = problem.nodes[j]
cap_lower, cap_upper = output.capacity
return b.s[l, j] == (1 / cap_upper) * sum(
parent.v[input.name, l, j]
for input in problem.predecessors(l, layer=0)
)
@block.Constraint(index_set_jkl(problem))
def u_def(b, j, k, l):
output = problem.nodes[j]
cap_lower, cap_upper = output.capacity
return b.u[j, k, l] == (1/cap_upper) * sum(
compute_gamma_ijk(input, output, k) * parent.v[input.name, l, j]
for input in problem.predecessors(l, layer=0)
)
@block.Constraint(index_set_jkl(problem))
def y_def(b, j, k, l):
output = problem.nodes[j]
cap_lower, cap_upper = output.capacity
return b.y[j, k, l] == (1/cap_upper) * (
sum(
compute_gamma_ijk(input, output, k) * parent.z[input.name, j]
for input in problem.predecessors(j, layer=0)
) + sum(
compute_gamma_ijk(input, output, k) * parent.v[input.name, pool_.name, j]
for pool_ in problem.predecessors(j, layer=1)
for input in problem.predecessors(pool_.name, layer=0)
if pool_.name != l
)
)
@block.Constraint(index_set_jkl(problem))
def t_def(b, j, k, l):
# NOTE: this variable is not scaled.
output = problem.nodes[j]
return b.t[j, k, l] == sum(
compute_gamma_ijk(i, output, k) * parent.q[i.name, l]
for i in problem.predecessors(l, layer=0)
)
def add_all_pooling_cuts_variables(block: pe.Block, parent: pe.Block, problem: Network):
block.cut_var_v = pe.Var(index_set_jkl(problem))
block.cut_var_cone = pe.Var(index_set_jkl(problem))
@block.Constraint(index_set_jkl(problem))
def cut_var_v_def(b, j, k, l):
gamma_lower, _ = compute_gamma_kl_bounds(l, j, k, problem)
return b.cut_var_v[j, k, l] == b.u[j, k, l] - gamma_lower*b.s[l, j]
@block.Constraint(index_set_jkl(problem))
def cut_var_cone_def(b, j, k, l):
beta_lower, beta_upper = compute_beta_kl_bounds(l, j, k, problem)
gamma_lower, gamma_upper = compute_gamma_kl_bounds(l, j, k, problem)
if beta_lower is None or beta_lower >= 0:
return pe.Constraint.Skip
return b.cut_var_cone[j, k, l] == (
- beta_lower*(b.t[j, k, l] - gamma_lower)
+ (beta_lower - gamma_lower)*(b.u[j, k, l] - gamma_lower*b.s[l, j])
)
def add_all_ust_equations(block: pe.Block, parent: pe.Block, problem: Network):
block.uxt = pe.Block()
uxt_count = 1
for pool_name, output_name, quality_name in problem_pool_output_qualities(problem):
u = block.u[output_name, quality_name, pool_name]
s = block.s[pool_name, output_name]
t = block.t[output_name, quality_name, pool_name]
rel = PWMcCormickRelaxation()
rel.set_input(aux_var=u, x1=s, x2=t)
setattr(block.uxt, 'rel' + str(uxt_count), rel)
rel.rebuild()
uxt_count += 1
def add_all_pooling_inequalities(block: pe.Block, parent: pe.Block, problem: Network, add_variables=True,
add_inequalities=True, add_uxt=True):
if add_variables:
add_all_pooling_inequalities_variables(block, parent, problem)
add_all_pooling_cuts_variables(block, parent, problem)
block._inequalities = pe.ConstraintList()
block._cuts = pe.ConstraintList()
if add_inequalities:
for pool_name, output_name, quality_name in problem_pool_output_qualities(problem):
add_pooling_inequalities(
block, parent, pool_name, output_name, quality_name, problem
)
if add_uxt:
add_all_ust_equations(block, parent, problem) |
from datetime import date, datetime
dateformat = "%b %d %Y %H:%M:%S"
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.strftime(dateformat)
raise TypeError ("Type %s not serializable" % type(obj)) |
from flask import Flask, redirect, render_template, request, url_for
import sys
import os
import helpers
from analyzer import Analyzer
N_TWEETS = 100
app = Flask(__name__)
def get_scores(tweets):
# absolute paths to lists
positives = os.path.join(sys.path[0], "positive-words.txt")
negatives = os.path.join(sys.path[0], "negative-words.txt")
# instantiate analyzer
analyzer = Analyzer(positives, negatives)
# initial scores
n_pos = 0.0
n_neg = 0.0
n_neut = 0.0
# iterate throught tweets
for t in tweets:
score = analyzer.analyze(t)
if score > 0.0:
n_pos += 1
elif score < 0.0:
n_neg += 1
else:
n_neut += 1
# final scores
n_pos /= N_TWEETS
n_neg /= N_TWEETS
n_neut /= N_TWEETS
return n_pos, n_neg, n_neut
@app.route("/")
def index():
return render_template("index.html")
@app.route("/search")
def search():
# validate screen_name
screen_name = request.args.get("screen_name", "").lstrip("@")
if not screen_name:
return redirect(url_for("index"))
# get screen_name's tweets
tweets = helpers.get_user_timeline(screen_name, count=N_TWEETS)
# TODO
# color print
positive, negative, neutral = get_scores(tweets)
# generate chart
chart = helpers.chart(positive, negative, neutral)
# render results
return render_template("search.html", chart=chart, screen_name=screen_name)
|
def case_lookup(request):
if request.method == 'POST':
url = reverse('case-details', kwargs={'case_id': request.POST['case-id']})
return redirect(url)
else:
return render(request, 'case_lookup.html')
@login_required
def case_details(request, case_id=None):
'''
Removed from main app because it's not clear whether a Client would need to login
to see Case details. They probably know what's going on through their correspondence
with their Attorney.
'''
if request.method == 'GET':
c = Case.objects.get(case_id=case_id)
return render(
request,
'case.html',
{
'opened_on': c.opened_on,
'attorney': c.attorney.name,
'case_id': c.case_id,
'is_open': c.is_open,
'closed_on': c.closed_on
}
)
else:
url = reverse('case-lookup')
return redirect(url) |
import datetime
from modules.Module import Module
class Date(Module):
def __init__(self, pipe):
super().__init__(self, pipe)
def run(self, command: str, regex) -> str:
try:
now = datetime.datetime.now()
day = self._parse_day(now)
self.say('It is {0:%A} {0:%B} {1}, {0:%Y}'.format(now, day))
except Exception as e:
self.log_exception(e)
def _parse_day(self, now: datetime.date) -> str:
day = f'{now:%d}'.lstrip('0')
if day.endswith('1'):
day = f'{day}st'
elif day.endswith('2'):
day = f'{day}nd'
elif day.endswith('3'):
day = f'{day}rd'
else:
day = f'{day}th'
return day
|
class Region(object):
LEFT = 1
MIDDLE = 2
RIGHT = 3
_names = None
_values = None
@classmethod
def names(cls):
return cls._names
@classmethod
def values(cls):
return cls._values
class Color(object):
OFF = 0
RED = 1
ORANGE = 2
YELLOW = 3
GREEN = 4
SKY = 5
BLUE = 6
PURPLE = 7
WHITE = 8
_names = None
_values = None
@classmethod
def names(cls):
return cls._names
@classmethod
def values(cls):
return cls._values
class Level(object):
LIGHT = 3
LOW = 2
MED = 1
HIGH = 0
_names = None
_values = None
@classmethod
def names(cls):
return cls._names
@classmethod
def values(cls):
return cls._values
class Mode(object):
NORMAL = 1
GAMING = 2
BREATHE = 3
DEMO = 4
WAVE = 5
_names = None
_values = None
@classmethod
def names(cls):
return cls._names
@classmethod
def values(cls):
return cls._values
def _enum_vals(enums):
for enum in enums:
d = dict(enum.__dict__)
for k, v in enum.__dict__.items():
if k[0] == '_' or type(v) == classmethod:
del d[k]
enum._names = d
enum._values = {v: k for k, v in d.items()}
_enum_vals([Region, Color, Level, Mode])
|
f = open('text.txt')
text = f.read()
#Словарь
import pymorphy2
morph = pymorphy2.MorphAnalyzer()
def LEG (word):
p = morph.parse(word)[0]
pp = p.normal_form
return pp
#print(LEG('звери'))
LEG ('звери')
#e = список знаков препинания формат ,"#знак препинания"
e = ",", ".","!","-","?","»","«","—"
for i in range(len(e)):
text = text.replace(e[i], "")
#Текст без знаков препинания
print(text)
#Текст списком
lll = text.split()
print(lll)
print(type(lll))
# lower().
#нижний регистр
low_text = list(map(str.lower, lll))
low_text = list(map(LEG, lll))
print(low_text)
dictt = {}
for word in low_text:
# print(dictt)
if word in dictt:
dictt[word] +=1
else:
dictt.update({word:1})
list_d = list(dictt.items())
list_d.sort(key=lambda i: i[1])
for i in list_d:
print(i[0], ':', i[1])
# print("yes!")
# for word in word_list:
# #2) проверить ключ в словаре word_count = {};
# if word in word_count:
#
# #3) создать индекс в словаре:
# word_count[word] = 0
|
#Resolvendo o 060 com Módulo
#Módulo para calcular o fatorial:
from math import factorial
n = int(input('Digite um número para cálculo do fatorial: '))
fatorial = factorial(n)
print('O fatorial do número {}! é: {}'.format(n, fatorial)) |
from .reproducibility import kth_diag_indices, pairwise_distances
from .embedding import PCA, MDS, tSNE, SpectralEmbedding, PHATE
__all__ = ["kth_diag_indices",
"pairwise_distances",
"PCA",
"MDS",
"tSNE",
"SpectralEmbedding",
"PHATE"
]
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""This script takes a directory of meshes and generates a (D)SIF dataset.
The dataset can be used for training, evaluation, and inference on ldif models.
"""
import glob
import random
import os
from absl import app
from absl import flags
# pylint: disable=g-multiple-import
from joblib import Parallel, delayed
# pylint: enable=g-multiple-import
import tqdm
import tensorflow as tf
# LDIF is local code, should be imported last.
# pylint: disable=g-bad-import-order
from ldif.datasets import process_element
from ldif.scripts import make_example
from ldif.util import file_util
from ldif.util import path_util
from ldif.util.file_util import log
# pylint: enable=g-bad-import-order
FLAGS = flags.FLAGS
flags.DEFINE_string('mesh_directory', '', 'Path to meshes. This folder should'
' have the structure <root>/{train,test,val}/<class>/*.ply')
flags.DEFINE_string('dataset_directory', '', 'Path to output dataset.')
flags.DEFINE_boolean(
'skip_existing', True, 'Whether to skip process examples'
' that are already written into the output dataset. True'
' enables completing a processing run that crashed, or '
' adding examples to a dataset that already exists with'
' partial overlap. False enables updating a dataset'
' in-place.')
flags.DEFINE_integer(
'max_threads', -1, 'The maximum number of threads to use.'
' If -1, will allocate all available threads on CPU.')
flags.DEFINE_string('log_level', 'INFO',
'One of VERBOSE, INFO, WARNING, ERROR. Sets logs to print '
'only at or above the specified level.')
flags.DEFINE_boolean(
'optimize', True, 'Whether to create an optimized tfrecords '
'dataset. This will substantially improve IO throughput, at '
'the expense of approximately doubling disk usage and adding '
'a moderate amount of additional dataset creation time. '
'Recommended unless disk space is very tight or data is stored '
'on a local NVMe drive or similar.')
flags.DEFINE_boolean(
'trample_optimized', True, 'Whether to erase and re-create '
'optimized files. Set True if changes have been made to the '
'dataset since the last time meshes2dataset was run; set '
'False to complete optimization if it was halted midway.')
flags.DEFINE_boolean(
'optimize_only', False, 'Whether to skip dataset creation '
'and only write tfrecords files.')
def process_one(f, mesh_directory, dataset_directory, skip_existing, log_level):
"""Processes a single mesh, adding it to the dataset."""
relpath = f.replace(mesh_directory, '')
print('relpath:', relpath)
assert relpath[0] == '/'
relpath = relpath[1:]
split, synset = relpath.split('/')[:2]
log.verbose(f'The split is {split} and the synset is {synset}')
name = os.path.basename(f)
name, extension = os.path.splitext(name)
valid_extensions = ['.ply']
if extension not in valid_extensions:
raise ValueError(f'File with unsupported extension {extension} found: {f}.'
f' Only {valid_extensions} are supported.')
output_dir = f'{dataset_directory}/{split}/{synset}/{name}/'
# This is the last file the processing writes, if it already exists the
# example has already been processed.
final_file_written = f'{output_dir}/depth_and_normals.npz'
make_example.mesh_to_example(
os.path.join(path_util.get_path_to_ldif_parent(), 'ldif'), f,
f'{dataset_directory}/{split}/{synset}/{name}/', skip_existing, log_level)
return output_dir
def serialize(example_dir, log_level):
d = process_element.load_example_dict(example_dir, log_level)
s = process_element.make_tf_example(d)
return s
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
random.seed(2077)
log.set_level(FLAGS.log_level)
n_jobs = os.cpu_count()
assert FLAGS.max_threads != 0
if FLAGS.max_threads > 0:
n_jobs = FLAGS.max_threads
mesh_directory = FLAGS.mesh_directory
if mesh_directory[-1] == '/':
mesh_directory = mesh_directory[:-1]
files = glob.glob(f'{mesh_directory}/*/*/*.ply')
if not files and not FLAGS.optimize_only:
raise ValueError(f"Didn't find any ply files in {mesh_directory}. "
"Please make sure the directory structure is "
"[mesh_directory]/[splits]/[class names]/[ply files]")
# Make the directories first because it's not threadsafe and also might fail.
if files and not FLAGS.optimize_only:
log.info('Creating directories...')
for i, f in tqdm.tqdm(enumerate(files)):
relpath = f.replace(mesh_directory, '')
# log.info(f'Relpath: {relpath}')
assert relpath[0] == '/'
relpath = relpath[1:]
split, synset = relpath.split('/')[:2]
if not os.path.isdir(f'{FLAGS.dataset_directory}/{split}'):
os.makedirs(f'{FLAGS.dataset_directory}/{split}')
if not os.path.isdir(f'{FLAGS.dataset_directory}/{split}/{synset}'):
os.mkdir(f'{FLAGS.dataset_directory}/{split}/{synset}')
log.info('Making dataset...')
# Flags can't be pickled:
output_dirs = Parallel(n_jobs=n_jobs)(
delayed(process_one)(f, mesh_directory, FLAGS.dataset_directory,
FLAGS.skip_existing, FLAGS.log_level) for f in tqdm.tqdm(files))
log.info('Making dataset registry...')
else:
output_dirs = glob.glob(f'{FLAGS.dataset_directory}/*/*/*/surface_samples_from_dodeca.pts')
output_dirs = [os.path.dirname(f) + '/' for f in output_dirs]
output_dirs.sort() # So randomize with a fixed seed always results in the same order
splits = {x.split('/')[-4] for x in output_dirs}
if 'optimized' in splits:
raise ValueError(f'The keyword "optimized" cannot be used for a split name, it is reserved.')
for split in splits:
elements_of_split = [x for x in output_dirs if x.split('/')[-4] == split]
with open(f'{FLAGS.dataset_directory}/{split}.txt', 'wt') as f:
f.write('\n'.join(elements_of_split) + '\n')
log.info('Done!')
if FLAGS.optimize:
log.info('Precomputing optimized tfrecord files...')
opt_dir = f'{FLAGS.dataset_directory}/optimized'
if FLAGS.trample_optimized and os.path.isdir(opt_dir):
for f in os.listdir(opt_dir):
if f.endswith('.tfrecords'):
os.remove(os.path.join(opt_dir, f))
if not os.path.isdir(opt_dir):
os.mkdir(opt_dir)
for split in splits:
log.info(f'Optimizing split {split}...')
elements_of_split = [x for x in output_dirs if x.split('/')[-4] == split]
examples_per_shard=64
# Make sure shards are totally random:
random.shuffle(elements_of_split)
n_shards = int(len(elements_of_split) / examples_per_shard)
if len(elements_of_split) % examples_per_shard:
n_shards += 1
shard_dir = f'{FLAGS.dataset_directory}/optimized/{split}'
if not os.path.isdir(shard_dir):
os.mkdir(shard_dir)
for shard_idx in tqdm.tqdm(range(n_shards)):
shard_name = f'{shard_dir}/{split}-%.5d-of-%.5d.tfrecords' % (shard_idx, n_shards)
if not FLAGS.trample_optimized and os.path.isfile(shard_name):
continue
start_idx = shard_idx * examples_per_shard
end_idx = (shard_idx + 1) * examples_per_shard
options = tf.io.TFRecordOptions(tf.compat.v1.io.TFRecordCompressionType.GZIP)
with tf.io.TFRecordWriter(shard_name, options=options) as writer:
to_process = elements_of_split[start_idx:end_idx]
serialized = Parallel(n_jobs=n_jobs)(delayed(serialize)(d, FLAGS.log_level)
for d in to_process)
for s in serialized:
writer.write(s)
if __name__ == '__main__':
app.run(main)
|
"""The rtorrent component."""
|
import collections
import logging
from functools import partial
from Qt import QtWidgets, QtCore
import qtawesome
from bson.objectid import ObjectId
from avalon import io
from openpype import style
from openpype.pipeline import (
HeroVersionType,
update_container,
remove_container,
discover_inventory_actions,
)
from openpype.modules import ModulesManager
from openpype.tools.utils.lib import (
get_progress_for_repre,
iter_model_rows,
format_version
)
from .switch_dialog import SwitchAssetDialog
from .model import InventoryModel
DEFAULT_COLOR = "#fb9c15"
log = logging.getLogger("SceneInventory")
class SceneInventoryView(QtWidgets.QTreeView):
data_changed = QtCore.Signal()
hierarchy_view_changed = QtCore.Signal(bool)
def __init__(self, parent=None):
super(SceneInventoryView, self).__init__(parent=parent)
# view settings
self.setIndentation(12)
self.setAlternatingRowColors(True)
self.setSortingEnabled(True)
self.setSelectionMode(self.ExtendedSelection)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self._show_right_mouse_menu)
self._hierarchy_view = False
self._selected = None
manager = ModulesManager()
self.sync_server = manager.modules_by_name["sync_server"]
self.sync_enabled = self.sync_server.enabled
def _set_hierarchy_view(self, enabled):
if enabled == self._hierarchy_view:
return
self._hierarchy_view = enabled
self.hierarchy_view_changed.emit(enabled)
def _enter_hierarchy(self, items):
self._selected = set(i["objectName"] for i in items)
self._set_hierarchy_view(True)
self.data_changed.emit()
self.expandToDepth(1)
self.setStyleSheet("""
QTreeView {
border-color: #fb9c15;
}
""")
def _leave_hierarchy(self):
self._set_hierarchy_view(False)
self.data_changed.emit()
self.setStyleSheet("QTreeView {}")
def _build_item_menu_for_selection(self, items, menu):
if not items:
return
repre_ids = []
for item in items:
item_id = ObjectId(item["representation"])
if item_id not in repre_ids:
repre_ids.append(item_id)
repre_docs = io.find(
{
"type": "representation",
"_id": {"$in": repre_ids}
},
{"parent": 1}
)
version_ids = []
for repre_doc in repre_docs:
version_id = repre_doc["parent"]
if version_id not in version_ids:
version_ids.append(version_id)
loaded_versions = io.find({
"_id": {"$in": version_ids},
"type": {"$in": ["version", "hero_version"]}
})
loaded_hero_versions = []
versions_by_parent_id = collections.defaultdict(list)
version_parents = []
for version in loaded_versions:
if version["type"] == "hero_version":
loaded_hero_versions.append(version)
else:
parent_id = version["parent"]
versions_by_parent_id[parent_id].append(version)
if parent_id not in version_parents:
version_parents.append(parent_id)
all_versions = io.find({
"type": {"$in": ["hero_version", "version"]},
"parent": {"$in": version_parents}
})
hero_versions = []
versions = []
for version in all_versions:
if version["type"] == "hero_version":
hero_versions.append(version)
else:
versions.append(version)
has_loaded_hero_versions = len(loaded_hero_versions) > 0
has_available_hero_version = len(hero_versions) > 0
has_outdated = False
for version in versions:
parent_id = version["parent"]
current_versions = versions_by_parent_id[parent_id]
for current_version in current_versions:
if current_version["name"] < version["name"]:
has_outdated = True
break
if has_outdated:
break
switch_to_versioned = None
if has_loaded_hero_versions:
def _on_switch_to_versioned(items):
repre_ids = []
for item in items:
item_id = ObjectId(item["representation"])
if item_id not in repre_ids:
repre_ids.append(item_id)
repre_docs = io.find(
{
"type": "representation",
"_id": {"$in": repre_ids}
},
{"parent": 1}
)
version_ids = []
version_id_by_repre_id = {}
for repre_doc in repre_docs:
version_id = repre_doc["parent"]
version_id_by_repre_id[repre_doc["_id"]] = version_id
if version_id not in version_ids:
version_ids.append(version_id)
hero_versions = io.find(
{
"_id": {"$in": version_ids},
"type": "hero_version"
},
{"version_id": 1}
)
version_ids = set()
for hero_version in hero_versions:
version_id = hero_version["version_id"]
version_ids.add(version_id)
hero_version_id = hero_version["_id"]
for _repre_id, current_version_id in (
version_id_by_repre_id.items()
):
if current_version_id == hero_version_id:
version_id_by_repre_id[_repre_id] = version_id
version_docs = io.find(
{
"_id": {"$in": list(version_ids)},
"type": "version"
},
{"name": 1}
)
version_name_by_id = {}
for version_doc in version_docs:
version_name_by_id[version_doc["_id"]] = \
version_doc["name"]
for item in items:
repre_id = ObjectId(item["representation"])
version_id = version_id_by_repre_id.get(repre_id)
version_name = version_name_by_id.get(version_id)
if version_name is not None:
try:
update_container(item, version_name)
except AssertionError:
self._show_version_error_dialog(
version_name, [item]
)
log.warning("Update failed", exc_info=True)
self.data_changed.emit()
update_icon = qtawesome.icon(
"fa.asterisk",
color=DEFAULT_COLOR
)
switch_to_versioned = QtWidgets.QAction(
update_icon,
"Switch to versioned",
menu
)
switch_to_versioned.triggered.connect(
lambda: _on_switch_to_versioned(items)
)
update_to_latest_action = None
if has_outdated or has_loaded_hero_versions:
# update to latest version
def _on_update_to_latest(items):
for item in items:
try:
update_container(item, -1)
except AssertionError:
self._show_version_error_dialog(None, [item])
log.warning("Update failed", exc_info=True)
self.data_changed.emit()
update_icon = qtawesome.icon(
"fa.angle-double-up",
color=DEFAULT_COLOR
)
update_to_latest_action = QtWidgets.QAction(
update_icon,
"Update to latest",
menu
)
update_to_latest_action.triggered.connect(
lambda: _on_update_to_latest(items)
)
change_to_hero = None
if has_available_hero_version:
# change to hero version
def _on_update_to_hero(items):
for item in items:
try:
update_container(item, HeroVersionType(-1))
except AssertionError:
self._show_version_error_dialog('hero', [item])
log.warning("Update failed", exc_info=True)
self.data_changed.emit()
# TODO change icon
change_icon = qtawesome.icon(
"fa.asterisk",
color="#00b359"
)
change_to_hero = QtWidgets.QAction(
change_icon,
"Change to hero",
menu
)
change_to_hero.triggered.connect(
lambda: _on_update_to_hero(items)
)
# set version
set_version_icon = qtawesome.icon("fa.hashtag", color=DEFAULT_COLOR)
set_version_action = QtWidgets.QAction(
set_version_icon,
"Set version",
menu
)
set_version_action.triggered.connect(
lambda: self._show_version_dialog(items))
# switch asset
switch_asset_icon = qtawesome.icon("fa.sitemap", color=DEFAULT_COLOR)
switch_asset_action = QtWidgets.QAction(
switch_asset_icon,
"Switch Asset",
menu
)
switch_asset_action.triggered.connect(
lambda: self._show_switch_dialog(items))
# remove
remove_icon = qtawesome.icon("fa.remove", color=DEFAULT_COLOR)
remove_action = QtWidgets.QAction(remove_icon, "Remove items", menu)
remove_action.triggered.connect(
lambda: self._show_remove_warning_dialog(items))
# add the actions
if switch_to_versioned:
menu.addAction(switch_to_versioned)
if update_to_latest_action:
menu.addAction(update_to_latest_action)
if change_to_hero:
menu.addAction(change_to_hero)
menu.addAction(set_version_action)
menu.addAction(switch_asset_action)
menu.addSeparator()
menu.addAction(remove_action)
self._handle_sync_server(menu, repre_ids)
def _handle_sync_server(self, menu, repre_ids):
"""
Adds actions for download/upload when SyncServer is enabled
Args:
menu (OptionMenu)
repre_ids (list) of object_ids
Returns:
(OptionMenu)
"""
if not self.sync_enabled:
return
menu.addSeparator()
download_icon = qtawesome.icon("fa.download", color=DEFAULT_COLOR)
download_active_action = QtWidgets.QAction(
download_icon,
"Download",
menu
)
download_active_action.triggered.connect(
lambda: self._add_sites(repre_ids, 'active_site'))
upload_icon = qtawesome.icon("fa.upload", color=DEFAULT_COLOR)
upload_remote_action = QtWidgets.QAction(
upload_icon,
"Upload",
menu
)
upload_remote_action.triggered.connect(
lambda: self._add_sites(repre_ids, 'remote_site'))
menu.addAction(download_active_action)
menu.addAction(upload_remote_action)
def _add_sites(self, repre_ids, side):
"""
(Re)sync all 'repre_ids' to specific site.
It checks if opposite site has fully available content to limit
accidents. (ReSync active when no remote >> losing active content)
Args:
repre_ids (list)
side (str): 'active_site'|'remote_site'
"""
project_name = io.Session["AVALON_PROJECT"]
active_site = self.sync_server.get_active_site(project_name)
remote_site = self.sync_server.get_remote_site(project_name)
repre_docs = io.find({
"type": "representation",
"_id": {"$in": repre_ids}
})
repre_docs_by_id = {
repre_doc["_id"]: repre_doc
for repre_doc in repre_docs
}
for repre_id in repre_ids:
repre_doc = repre_docs_by_id.get(repre_id)
if not repre_doc:
continue
progress = get_progress_for_repre(
repre_doc,
active_site,
remote_site
)
if side == "active_site":
# check opposite from added site, must be 1 or unable to sync
check_progress = progress[remote_site]
site = active_site
else:
check_progress = progress[active_site]
site = remote_site
if check_progress == 1:
self.sync_server.add_site(
project_name, repre_id, site, force=True
)
self.data_changed.emit()
def _build_item_menu(self, items=None):
"""Create menu for the selected items"""
if not items:
items = []
menu = QtWidgets.QMenu(self)
# add the actions
self._build_item_menu_for_selection(items, menu)
# These two actions should be able to work without selection
# expand all items
expandall_action = QtWidgets.QAction(menu, text="Expand all items")
expandall_action.triggered.connect(self.expandAll)
# collapse all items
collapse_action = QtWidgets.QAction(menu, text="Collapse all items")
collapse_action.triggered.connect(self.collapseAll)
menu.addAction(expandall_action)
menu.addAction(collapse_action)
custom_actions = self._get_custom_actions(containers=items)
if custom_actions:
submenu = QtWidgets.QMenu("Actions", self)
for action in custom_actions:
color = action.color or DEFAULT_COLOR
icon = qtawesome.icon("fa.%s" % action.icon, color=color)
action_item = QtWidgets.QAction(icon, action.label, submenu)
action_item.triggered.connect(
partial(self._process_custom_action, action, items))
submenu.addAction(action_item)
menu.addMenu(submenu)
# go back to flat view
if self._hierarchy_view:
back_to_flat_icon = qtawesome.icon("fa.list", color=DEFAULT_COLOR)
back_to_flat_action = QtWidgets.QAction(
back_to_flat_icon,
"Back to Full-View",
menu
)
back_to_flat_action.triggered.connect(self._leave_hierarchy)
# send items to hierarchy view
enter_hierarchy_icon = qtawesome.icon("fa.indent", color="#d8d8d8")
enter_hierarchy_action = QtWidgets.QAction(
enter_hierarchy_icon,
"Cherry-Pick (Hierarchy)",
menu
)
enter_hierarchy_action.triggered.connect(
lambda: self._enter_hierarchy(items))
if items:
menu.addAction(enter_hierarchy_action)
if self._hierarchy_view:
menu.addAction(back_to_flat_action)
return menu
def _get_custom_actions(self, containers):
"""Get the registered Inventory Actions
Args:
containers(list): collection of containers
Returns:
list: collection of filter and initialized actions
"""
def sorter(Plugin):
"""Sort based on order attribute of the plugin"""
return Plugin.order
# Fedd an empty dict if no selection, this will ensure the compat
# lookup always work, so plugin can interact with Scene Inventory
# reversely.
containers = containers or [dict()]
# Check which action will be available in the menu
Plugins = discover_inventory_actions()
compatible = [p() for p in Plugins if
any(p.is_compatible(c) for c in containers)]
return sorted(compatible, key=sorter)
def _process_custom_action(self, action, containers):
"""Run action and if results are returned positive update the view
If the result is list or dict, will select view items by the result.
Args:
action (InventoryAction): Inventory Action instance
containers (list): Data of currently selected items
Returns:
None
"""
result = action.process(containers)
if result:
self.data_changed.emit()
if isinstance(result, (list, set)):
self._select_items_by_action(result)
if isinstance(result, dict):
self._select_items_by_action(
result["objectNames"], result["options"]
)
def _select_items_by_action(self, object_names, options=None):
"""Select view items by the result of action
Args:
object_names (list or set): A list/set of container object name
options (dict): GUI operation options.
Returns:
None
"""
options = options or dict()
if options.get("clear", True):
self.clearSelection()
object_names = set(object_names)
if (
self._hierarchy_view
and not self._selected.issuperset(object_names)
):
# If any container not in current cherry-picked view, update
# view before selecting them.
self._selected.update(object_names)
self.data_changed.emit()
model = self.model()
selection_model = self.selectionModel()
select_mode = {
"select": selection_model.Select,
"deselect": selection_model.Deselect,
"toggle": selection_model.Toggle,
}[options.get("mode", "select")]
for item in iter_model_rows(model, 0):
item = item.data(InventoryModel.ItemRole)
if item.get("isGroupNode"):
continue
name = item.get("objectName")
if name in object_names:
self.scrollTo(item) # Ensure item is visible
flags = select_mode | selection_model.Rows
selection_model.select(item, flags)
object_names.remove(name)
if len(object_names) == 0:
break
def _show_right_mouse_menu(self, pos):
"""Display the menu when at the position of the item clicked"""
globalpos = self.viewport().mapToGlobal(pos)
if not self.selectionModel().hasSelection():
print("No selection")
# Build menu without selection, feed an empty list
menu = self._build_item_menu()
menu.exec_(globalpos)
return
active = self.currentIndex() # index under mouse
active = active.sibling(active.row(), 0) # get first column
# move index under mouse
indices = self.get_indices()
if active in indices:
indices.remove(active)
indices.append(active)
# Extend to the sub-items
all_indices = self._extend_to_children(indices)
items = [dict(i.data(InventoryModel.ItemRole)) for i in all_indices
if i.parent().isValid()]
if self._hierarchy_view:
# Ensure no group item
items = [n for n in items if not n.get("isGroupNode")]
menu = self._build_item_menu(items)
menu.exec_(globalpos)
def get_indices(self):
"""Get the selected rows"""
selection_model = self.selectionModel()
return selection_model.selectedRows()
def _extend_to_children(self, indices):
"""Extend the indices to the children indices.
Top-level indices are extended to its children indices. Sub-items
are kept as is.
Args:
indices (list): The indices to extend.
Returns:
list: The children indices
"""
def get_children(i):
model = i.model()
rows = model.rowCount(parent=i)
for row in range(rows):
child = model.index(row, 0, parent=i)
yield child
subitems = set()
for i in indices:
valid_parent = i.parent().isValid()
if valid_parent and i not in subitems:
subitems.add(i)
if self._hierarchy_view:
# Assume this is a group item
for child in get_children(i):
subitems.add(child)
else:
# is top level item
for child in get_children(i):
subitems.add(child)
return list(subitems)
def _show_version_dialog(self, items):
"""Create a dialog with the available versions for the selected file
Args:
items (list): list of items to run the "set_version" for
Returns:
None
"""
active = items[-1]
# Get available versions for active representation
representation_id = ObjectId(active["representation"])
representation = io.find_one({"_id": representation_id})
version = io.find_one({
"_id": representation["parent"]
})
versions = list(io.find(
{
"parent": version["parent"],
"type": "version"
},
sort=[("name", 1)]
))
hero_version = io.find_one({
"parent": version["parent"],
"type": "hero_version"
})
if hero_version:
_version_id = hero_version["version_id"]
for _version in versions:
if _version["_id"] != _version_id:
continue
hero_version["name"] = HeroVersionType(
_version["name"]
)
hero_version["data"] = _version["data"]
break
# Get index among the listed versions
current_item = None
current_version = active["version"]
if isinstance(current_version, HeroVersionType):
current_item = hero_version
else:
for version in versions:
if version["name"] == current_version:
current_item = version
break
all_versions = []
if hero_version:
all_versions.append(hero_version)
all_versions.extend(reversed(versions))
if current_item:
index = all_versions.index(current_item)
else:
index = 0
versions_by_label = dict()
labels = []
for version in all_versions:
is_hero = version["type"] == "hero_version"
label = format_version(version["name"], is_hero)
labels.append(label)
versions_by_label[label] = version["name"]
label, state = QtWidgets.QInputDialog.getItem(
self,
"Set version..",
"Set version number to",
labels,
current=index,
editable=False
)
if not state:
return
if label:
version = versions_by_label[label]
for item in items:
try:
update_container(item, version)
except AssertionError:
self._show_version_error_dialog(version, [item])
log.warning("Update failed", exc_info=True)
# refresh model when done
self.data_changed.emit()
def _show_switch_dialog(self, items):
"""Display Switch dialog"""
dialog = SwitchAssetDialog(self, items)
dialog.switched.connect(self.data_changed.emit)
dialog.show()
def _show_remove_warning_dialog(self, items):
"""Prompt a dialog to inform the user the action will remove items"""
accept = QtWidgets.QMessageBox.Ok
buttons = accept | QtWidgets.QMessageBox.Cancel
state = QtWidgets.QMessageBox.question(
self,
"Are you sure?",
"Are you sure you want to remove {} item(s)".format(len(items)),
buttons=buttons,
defaultButton=accept
)
if state != accept:
return
for item in items:
remove_container(item)
self.data_changed.emit()
def _show_version_error_dialog(self, version, items):
"""Shows QMessageBox when version switch doesn't work
Args:
version: str or int or None
"""
if not version:
version_str = "latest"
elif version == "hero":
version_str = "hero"
elif isinstance(version, int):
version_str = "v{:03d}".format(version)
else:
version_str = version
dialog = QtWidgets.QMessageBox()
dialog.setIcon(QtWidgets.QMessageBox.Warning)
dialog.setStyleSheet(style.load_stylesheet())
dialog.setWindowTitle("Update failed")
switch_btn = dialog.addButton(
"Switch Asset",
QtWidgets.QMessageBox.ActionRole
)
switch_btn.clicked.connect(lambda: self._show_switch_dialog(items))
dialog.addButton(QtWidgets.QMessageBox.Cancel)
msg = (
"Version update to '{}' failed as representation doesn't exist."
"\n\nPlease update to version with a valid representation"
" OR \n use 'Switch Asset' button to change asset."
).format(version_str)
dialog.setText(msg)
dialog.exec_()
def update_all(self):
"""Update all items that are currently 'outdated' in the view"""
# Get the source model through the proxy model
model = self.model().sourceModel()
# Get all items from outdated groups
outdated_items = []
for index in iter_model_rows(model,
column=0,
include_root=False):
item = index.data(model.ItemRole)
if not item.get("isGroupNode"):
continue
# Only the group nodes contain the "highest_version" data and as
# such we find only the groups and take its children.
if not model.outdated(item):
continue
# Collect all children which we want to update
children = item.children()
outdated_items.extend(children)
if not outdated_items:
log.info("Nothing to update.")
return
# Trigger update to latest
for item in outdated_items:
try:
update_container(item, -1)
except AssertionError:
self._show_version_error_dialog(None, [item])
log.warning("Update failed", exc_info=True)
self.data_changed.emit()
|
from tkinter import *
from tkinter import ttk
class Calculator:
calc_value = 0.0
div_trigger = False
mult_trigger = False
add_trigger = False
sub_trigger = False
def button_press(self, value):
if value == "AC":
self.calc_value = 0.0
self.div_trigger = False
self.mult_trigger = False
self.add_trigger = False
self.sub_trigger = False
self.number_entry.delete(0, "end")
entry_val = 0
# self.number_entry.insert(0, entry_val)
else:
entry_val = self.number_entry.get()
entry_val += value
self.number_entry.delete(0, "end")
self.number_entry.insert(0, entry_val)
def is_float(self, value):
try:
float(value)
return True
except ValueError:
return False
def math_button_press(self, value):
if self.is_float(str(self.number_entry.get())):
self.div_trigger = False
self.mult_trigger = False
self.add_trigger = False
self.sub_trigger = False
self.calc_value = float(self.entry_value.get())
if value == '/':
self.div_trigger = True
print("/ Pressed")
elif value == '*':
self.mult_trigger = True
print("* Pressed")
elif value == '+':
self.add_trigger = True
print("+ Pressed")
elif value == '-':
self.sub_trigger = True
print("- Pressed")
self.number_entry.delete(0, "end")
def equal_button_pressed(self):
if self.add_trigger or self.sub_trigger or self.mult_trigger or self.div_trigger:
if self.add_trigger:
solution = self.calc_value + float(self.entry_value.get())
elif self.sub_trigger:
solution = self.calc_value - float(self.entry_value.get())
elif self.div_trigger:
solution = self.calc_value / float(self.entry_value.get())
elif self.mult_trigger:
solution = self.calc_value * float(self.entry_value.get())
self.number_entry.delete(0, "end")
self.number_entry.insert(0, solution)
def __init__(self, root):
self.entry_value = StringVar(root, value="")
root.title("Calculator")
# root.geometry("523x220")
root.resizable(width=False, height=False)
style = ttk.Style()
style.configure("TButton", font="Serif 15", paddind=10)
style.configure("TEntry", font="Serif 18", padding=10)
self.number_entry = ttk.Entry(root, textvariable=self.entry_value, width=80)
self.number_entry.grid(row=0, columnspan=4, sticky=(W, E))
self.button7 = ttk.Button(root, text="7", command=lambda: self.button_press('7'))
self.button7.grid(row=1, column=0,
sticky=(W, E))
self.button8 = ttk.Button(root, text="8", command=lambda: self.button_press('8'))
self.button8.grid(row=1, column=1, sticky=(W, E))
self.button9 = ttk.Button(root, text="9", command=lambda: self.button_press('9'))
self.button9.grid(row=1, column=2, sticky=(W, E))
self.button_div = ttk.Button(root, text="/", command=lambda: self.math_button_press('/'))
self.button_div.grid(row=1, column=3, sticky=(W, E))
# Row2
self.button4 = ttk.Button(root, text="4", command=lambda: self.button_press('4'))
self.button4.grid(row=2, column=0, sticky=(W, E))
self.button5 = ttk.Button(root, text="5", command=lambda: self.button_press('5'))
self.button5.grid(row=2, column=1, sticky=(W, E))
self.button6 = ttk.Button(root, text="6", command=lambda: self.button_press('6'))
self.button6.grid(row=2, column=2, sticky=(W, E))
self.button_mult = ttk.Button(root, text="*", command=lambda: self.math_button_press('*'))
self.button_mult.grid(row=2, column=3, sticky=(W, E))
# Row3
self.button1 = ttk.Button(root, text="1", command=lambda: self.button_press('1'))
self.button1.grid(row=3, column=0, sticky=(W, E))
self.button2 = ttk.Button(root, text="2", command=lambda: self.button_press('2'))
self.button2.grid(row=3, column=1, sticky=(W, E))
self.button3 = ttk.Button(root, text="3", command=lambda: self.button_press('3'))
self.button3.grid(row=3, column=2, sticky=(W, E))
self.button_add = ttk.Button(root, text="+", command=lambda: self.math_button_press('+'))
self.button_add.grid(row=3, column=3, sticky=(W, E))
# Row4
self.button_clear = ttk.Button(root, text="AC", command=lambda: self.button_press('AC'))
self.button_clear.grid(row=4, column=0, sticky=(W, E))
self.button0 = ttk.Button(root, text="0", command=lambda: self.button_press('0'))
self.button0.grid(row=4, column=1, sticky=(W, E))
self.button_equal = ttk.Button(root, text="=", command=lambda: self.equal_button_pressed())
self.button_equal.grid(row=4, column=2, sticky=(W, E))
self.button_sub = ttk.Button(root, text="-", command=lambda: self.math_button_press('-'))
self.button_sub.grid(row=4, column=3, sticky=(W, E))
root = Tk()
calc = Calculator(root)
root.mainloop()
|
# -*- coding: utf-8 -*-
"""Vector classification algorithms, not designed specifically for time series."""
|
# flake8: noqa
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# https://seaborn.pydata.org/generated/seaborn.set_context.html
# https://seaborn.pydata.org/generated/seaborn.set_style.html
sns.set_style("white")
sns.set_context("paper", font_scale=1)
np.random.seed(12345)
from numpy_ml.lda import LDA
def generate_corpus():
# Generate some fake data
D = 300
T = 10
V = 30
N = np.random.randint(150, 200, size=D)
# Create a document-topic distribution for 3 different types of documents
alpha1 = np.array((20, 15, 10, 1, 1, 1, 1, 1, 1, 1))
alpha2 = np.array((1, 1, 1, 10, 15, 20, 1, 1, 1, 1))
alpha3 = np.array((1, 1, 1, 1, 1, 1, 10, 12, 15, 18))
# Arbitrarily choose each topic to have 3 very common, diagnostic words
# These words are barely shared with any other topic
beta_probs = (
np.ones((V, T)) + np.array([np.arange(V) % T == t for t in range(T)]).T * 19
)
beta_gen = np.array(list(map(lambda x: np.random.dirichlet(x), beta_probs.T))).T
corpus = []
theta = np.empty((D, T))
# Generate each document from the LDA model
for d in range(D):
# Draw topic distribution for the document
if d < (D / 3):
theta[d, :] = np.random.dirichlet(alpha1, 1)[0]
elif d < 2 * (D / 3):
theta[d, :] = np.random.dirichlet(alpha2, 1)[0]
else:
theta[d, :] = np.random.dirichlet(alpha3, 1)[0]
doc = np.array([])
for n in range(N[d]):
# Draw a topic according to the document's topic distribution
z_n = np.random.choice(np.arange(T), p=theta[d, :])
# Draw a word according to the topic-word distribution
w_n = np.random.choice(np.arange(V), p=beta_gen[:, z_n])
doc = np.append(doc, w_n)
corpus.append(doc)
return corpus, T
def plot_unsmoothed():
corpus, T = generate_corpus()
L = LDA(T)
L.train(corpus, verbose=False)
fig, axes = plt.subplots(1, 2)
ax1 = sns.heatmap(L.beta, xticklabels=[], yticklabels=[], ax=axes[0])
ax1.set_xlabel("Topics")
ax1.set_ylabel("Words")
ax1.set_title("Recovered topic-word distribution")
ax2 = sns.heatmap(L.gamma, xticklabels=[], yticklabels=[], ax=axes[1])
ax2.set_xlabel("Topics")
ax2.set_ylabel("Documents")
ax2.set_title("Recovered document-topic distribution")
plt.savefig("img/plot_unsmoothed.png", dpi=300)
plt.close("all")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.