max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
belphegor/utils/modding.py
|
nguuuquaaa/Belphegor
| 16
|
12777351
|
<reponame>nguuuquaaa/Belphegor<gh_stars>10-100
import discord
from discord.ext import commands
from . import checks, string_utils
import multidict
import functools
from yarl import URL
#==================================================================================================================================================
class SupressAttributeError(str):
@property
def name(self):
return self
class BadValue(commands.CommandError):
def __init__(self, key, value):
self.key = key
self.value = value
#==================================================================================================================================================
class MultiDict(multidict.MultiDict):
def geteither(self, *keys, default=None):
for key in keys:
try:
value = self.getone(key)
except KeyError:
continue
else:
return value
else:
return default
def getalltext(self, key, *, default="", delimiter=" "):
try:
temp = self.getall(key)
except KeyError:
return default
else:
return delimiter.join((str(t) for t in temp))
def to_default_dict(self):
ret = {}
for key, value in self.items():
rv = ret.get(key, [])
rv.append(value)
ret[key] = rv
return ret
EMPTY = MultiDict()
#==================================================================================================================================================
_quotes = commands.view._quotes
_all_quotes = set((*_quotes.keys(), *_quotes.values()))
def _greater_than(number):
try:
return number.set_positive_sign(True)
except AttributeError:
raise commands.BadArgument("Input <{number}> cannot be compared.")
def _less_than(number):
try:
return number.set_positive_sign(False)
except AttributeError:
raise commands.BadArgument("Input <{number}> cannot be compared.")
def _equal(anything):
return anything
_standard_comparison = {
">": _greater_than,
"<": _less_than,
"=": _equal
}
_equality = {
"=": _equal
}
_delimiters = _all_quotes | _standard_comparison.keys()
def _check_char(c):
return c.isspace() or c in _delimiters
#==================================================================================================================================================
class Equality:
def __init__(self, number):
self.number = number
self.positive_sign = None
def set_positive_sign(self, positive_sign):
self.positive_sign = positive_sign
return self
def to_query(self):
if self.positive_sign is True:
return {"$gt": self.number}
elif self.positive_sign is False:
return {"$lt": self.number}
else:
return self.number
class Comparison(commands.Converter):
def __init__(self, type):
self.type = type
def get_comparison(self):
return _standard_comparison
async def convert(self, ctx, argument):
value = await ctx.command._actual_conversion(ctx, self.type, argument, SupressAttributeError("type_conv"))
return Equality(value)
#==================================================================================================================================================
class KeyValue(commands.Converter):
def __init__(self, conversion={}, *, escape=False, clean=False, multiline=False):
self.escape = escape
if clean:
self.clean = string_utils.clean_codeblock
else:
self.clean = str.strip
self.multiline = multiline
self.conversion = {}
self.comparisons = {}
for key, value in conversion.items():
try:
c = value.get_comparison()
except AttributeError:
c = _equality
if isinstance(key, tuple):
for k in key:
self.conversion[k] = value
self.comparisons[k] = c
else:
self.conversion[key] = value
self.comparisons[key] = c
async def convert(self, ctx, argument):
text = self.clean(argument)
ret = MultiDict()
empty = {}
async def resolve(key, value, handle):
key = key.lower()
orig_value = value
if self.escape:
value = value.encode("raw_unicode_escape").decode("unicode_escape")
conv = self.conversion.get(key)
if conv:
try:
value = await ctx.command._actual_conversion(ctx, conv, value, SupressAttributeError(key))
value = handle(value)
except commands.BadArgument:
raise BadValue(key, orig_value)
ret.add(key, value)
if self.multiline:
for line in text.splitlines():
line = line.strip()
if line:
value = ""
for i, c in enumerate(line):
comparison = self.comparisons.get(value, _equality)
if c in comparison:
handle = comparison[c]
key = value
value = line[i+1:]
break
else:
value = value + c
else:
handle = _equal
key = ""
value = line
key, value = key.strip(), value.strip()
await resolve(key, value, handle)
else:
wi = string_utils.split_iter(text, check=_check_char)
key = ""
value = ""
handle = _equal
while True:
try:
word = next(wi)
except StopIteration:
break
if key:
comparison = empty
else:
comparison = self.comparisons.get(value, _equality)
if word in comparison:
key = value
value = ""
handle = comparison[word]
elif word in _quotes:
if value:
raise commands.BadArgument("Quote character must be placed at the start.")
quote_close = _quotes[word]
quote_words = []
escape = False
while True:
try:
w = next(wi)
except StopIteration:
raise commands.BadArgument("No closing quote.")
else:
if escape:
quote_words.append(w)
escape = False
elif w == quote_close:
value = "".join(quote_words)
break
else:
if w == "\\":
escape = True
quote_words.append(w)
elif not word.isspace():
value = value + word
else:
await resolve(key, value, handle)
key = ""
value = ""
handle = _equal
if key or value:
await resolve(key, value, handle)
return ret
#==================================================================================================================================================
class URLConverter(commands.Converter):
def __init__(self, schemes=["http", "https"]):
self.schemes = schemes
self._accept_string = "/".join(schemes)
async def convert(self, ctx, argument):
argument = argument.lstrip("<").rstrip(">")
url = URL(argument)
if url.scheme in self.schemes:
if url.scheme and url.host and url.path:
return url
else:
raise checks.CustomError("Malformed URL.")
else:
raise checks.CustomError(f"This command accepts url with scheme {self._accept_string} only.")
#==================================================================================================================================================
def _transfer_modding(from_, to_):
try:
to_.category = from_.category
except AttributeError:
return
else:
to_.brief = from_.brief
to_.field = from_.field
to_.paragraph = from_.paragraph
#modding.help hax, so new attributes are preserved when creating a commands.Cog instance
def _wrap_transfer(func):
@functools.wraps(func)
def new_func(self):
ret = func(self)
_transfer_modding(self, ret)
return ret
return new_func
commands.Command.copy = _wrap_transfer(commands.Command.copy)
#end hax
def help(**kwargs):
def wrapper(command):
command.brief = kwargs.pop("brief", None)
command.category = kwargs.pop("category", None)
command.field = kwargs.pop("field", "Commands")
command.paragraph = kwargs.pop("paragraph", 0)
return command
return wrapper
| 2.578125
| 3
|
pavement.py
|
robotframework/HTMLChecker
| 9
|
12777352
|
import os
from os.path import join as _join
from subprocess import call
from paver.easy import *
from paver.setuputils import setup
BASEDIR = os.path.dirname(__file__)
VERSION = '0.3'
setup(
name='robotframework-htmlchecker',
package_dir = {'': 'src'},
packages=['HTMLChecker', 'HTMLChecker.lib'],
version=VERSION,
url='https://github.com/robotframework/HTMLChecker',
author='Robot Framework developers',
author_email='<EMAIL>'
)
@task
@needs('generate_setup', 'minilib', 'setuptools.command.sdist')
def sdist():
"""Overrides sdist to make sure that our setup.py is generated."""
pass
@task
def robot():
testdir = _join(BASEDIR, 'test', 'robot')
_sh(['pybot', '-d', _join(testdir, 'results'), testdir])
@task
def version():
version_path = _join(BASEDIR, 'src', 'HTMLChecker', 'version.py')
with open(version_path, 'w') as verfile:
verfile.write('''"This file is updated by running `paver version`."
VERSION="%s"
''' % VERSION)
@task
@needs('version')
def doc():
libdoc = _join(BASEDIR, 'lib', 'libdoc.py')
docdir = _get_dir('doc')
_sh(['python', libdoc , '-o', '%s/HTMLChecker-%s.html' % (docdir, VERSION),
'HTMLChecker'])
@task
@needs('version', 'sdist', 'doc')
def release():
_sh(['git', 'ci', '-a', '-m', 'updated version'])
_sh(['git', 'tag', VERSION])
print 'Created git tag for %s' % VERSION
print 'Windows installer has to be created with `paver bdist_wininst`'
print 'Remember to `git push --tags` and upload sdist & doc to GitHub'
def _sh(cmd):
env = os.environ
env.update({'PYTHONPATH': 'src'})
call(cmd, shell=(os.sep=='\\'), env=env)
def _get_dir(name):
dirname = _join(BASEDIR, name)
if not os.path.exists(dirname):
os.makedirs(dirname)
return dirname
| 2.078125
| 2
|
dev/results/half_wing_swept_45_deg/machline_iterator.py
|
usuaero/MachLine
| 2
|
12777353
|
# This script is to run automate running machline for the Weber and Brebner results
import numpy as np
import json
import subprocess
import time
import multiprocessing as mp
import os
# Record and print the time required to run MachLine
start_time = time.time()
def mach_iter(AoA, Node, formulation, freestream):
if formulation == "source-free":
formulation_adjusted = "source_free"
else:
formulation_adjusted = formulation
# Modify freestream velocities based on angle of attack
AoA_rad = float(AoA)*np.pi/180
x_flow = freestream * np.cos(AoA_rad)
z_flow = freestream * np.sin(AoA_rad)
# Identify filebases used throughout iterator
filebase = "dev/results/half_wing_swept_45_deg/"
output_filebase = filebase + "MachLine_Results/" + AoA + "_degrees_AoA/half_wing_A_" + Node + "_nodes_" + AoA + "_deg_AoA_" + formulation_adjusted
# Rewrite the input files based on angle of attack and node densities
dict1 = {
"flow": {
"freestream_velocity": [
x_flow,
0.0,
z_flow
]
},
"geometry": {
"file": filebase + "half_wing_A_meshes/half_wing_A_" + Node + "_nodes.vtk",
"mirror_about": "xz",
"singularity_order": {
"doublet": 1,
"source": 0
},
"wake_model": {
"wake_shedding_angle": 90.0,
"trefftz_distance": 10000.0,
"N_panels": 1
},
"reference": {
"area": 1.0
}
},
"solver": {
"formulation": formulation,
"control_point_offset": 1.1e-05
},
"post_processing" : {
},
"output": {
"body_file": output_filebase + "_formulation.vtk",
"wake_file": output_filebase + "_formulation_wake.vtk",
"control_point_file": output_filebase + "_control_points.vtk",
"report_file": "../../report.txt"
}
}
# Identify output file location
filename = AoA + "_deg_angle_of_attack_input.json"
inputfile = filebase + 'half_wing_A_swept_inputs/' + filename
# file_location = "dev/results/half_wing_swept_45deg/test/" + AoA + "_degree_AoA_test_file_" + Node + "_nodes.json"
with open(inputfile, "w") as output_file:
json.dump(dict1, output_file, indent=4)
print("\n***",Node, "node input file saved successfully ***\n")
# Run machline with current input file
# machline_command = "./machline.exe {0}".format(inputfile)
subprocess.call(["./machline.exe", inputfile])
## Main
input_conditions = "Swept_half_wing_conditions_input.json"
json_string = open(input_conditions).read()
json_vals = json.loads(json_string)
# Identify values to pass from input conditions file
Nodes_input = json_vals["geometry"]["nodes"]
AoA_list_input = json_vals["geometry"]["AoA list"]
freestream_velocity = json_vals["flow conditions"]["freestream velocity"]
formulation_input = json_vals["solver"]["formulation"]
# Identify number of CPU available to work with
# n_processors = mp.cpu_count()
n_processors = 8
Arguments = []
# Change the working directory to the main MachLine directory for execution
os.chdir("../../../")
# Call the machline iterator with the desired inputs
with mp.Pool(n_processors) as pool:
for form in formulation_input:
for AoA in AoA_list_input:
for node in Nodes_input:
Arguments.append((AoA, node, form, freestream_velocity))
pool.starmap(mach_iter, Arguments)
pool.join()
# mach_iter(AoA_list_input, Nodes_input, formulation_input, freestream_velocity)
print("MachLine Iterator executed successfully in %s seconds" % "{:.4f}".format(time.time()-start_time))
| 2.3125
| 2
|
Lib/site-packages/hackedit/vendor/coloredlogs/tests.py
|
fochoao/cpython
| 0
|
12777354
|
# Automated tests for the `coloredlogs' package.
#
# Author: <NAME> <<EMAIL>>
# Last Change: November 14, 2015
# URL: https://coloredlogs.readthedocs.org
"""Automated tests for the `coloredlogs` package."""
# Standard library modules.
import logging
import logging.handlers
import os
import random
import re
import string
import sys
import tempfile
import unittest
# External dependencies.
from humanfriendly.terminal import ansi_wrap
# The module we're testing.
import coloredlogs
import coloredlogs.cli
from coloredlogs import (
CHROOT_FILES,
decrease_verbosity,
find_defined_levels,
find_handler,
find_hostname,
find_program_name,
get_level,
increase_verbosity,
install,
is_verbose,
level_to_number,
NameNormalizer,
parse_encoded_styles,
set_level,
walk_propagation_tree,
)
from coloredlogs.syslog import SystemLogging
from coloredlogs.converter import capture, convert
# External test dependencies.
from capturer import CaptureOutput
from verboselogs import VerboseLogger
from humanfriendly.compat import StringIO
# Compiled regular expression that matches a single line of output produced by
# the default log format (does not include matching of ANSI escape sequences).
PLAIN_TEXT_PATTERN = re.compile(r'''
(?P<date> \d{4}-\d{2}-\d{2} )
\s (?P<time> \d{2}:\d{2}:\d{2} )
\s (?P<hostname> \S+ )
\s (?P<logger_name> \w+ )
\[ (?P<process_id> \d+ ) \]
\s (?P<severity> [A-Z]+ )
\s (?P<message> .* )
''', re.VERBOSE)
def setUpModule():
"""Speed up the tests by disabling the demo's artificial delay."""
os.environ['COLOREDLOGS_DEMO_DELAY'] = '0'
coloredlogs.demo.DEMO_DELAY = 0
class ColoredLogsTestCase(unittest.TestCase):
"""Container for the `coloredlogs` tests."""
def test_level_to_number(self):
"""Make sure :func:`level_to_number()` works as intended."""
# Make sure the default levels are translated as expected.
assert level_to_number('debug') == logging.DEBUG
assert level_to_number('info') == logging.INFO
assert level_to_number('warn') == logging.WARNING
assert level_to_number('error') == logging.ERROR
assert level_to_number('fatal') == logging.FATAL
# Make sure bogus level names don't blow up.
assert level_to_number('bogus-level') == logging.INFO
def test_find_hostname(self):
"""Make sure :func:`~find_hostname()` works correctly."""
assert find_hostname()
# Create a temporary file as a placeholder for e.g. /etc/debian_chroot.
fd, temporary_file = tempfile.mkstemp()
try:
with open(temporary_file, 'w') as handle:
handle.write('first line\n')
handle.write('second line\n')
CHROOT_FILES.insert(0, temporary_file)
# Make sure the chroot file is being read.
assert find_hostname() == 'first line'
finally:
# Clean up.
CHROOT_FILES.pop(0)
os.unlink(temporary_file)
# Test that unreadable chroot files don't break coloredlogs.
try:
CHROOT_FILES.insert(0, temporary_file)
# Make sure that a usable value is still produced.
assert find_hostname()
finally:
# Clean up.
CHROOT_FILES.pop(0)
def test_host_name_filter(self):
"""Make sure :func:`install()` integrates with :class:`~coloredlogs.HostNameFilter()`."""
install(fmt='%(hostname)s')
with CaptureOutput() as capturer:
logging.info("A truly insignificant message ..")
output = capturer.get_text()
assert find_hostname() in output
def test_program_name_filter(self):
"""Make sure :func:`install()` integrates with :class:`~coloredlogs.ProgramNameFilter()`."""
install(fmt='%(programname)s')
with CaptureOutput() as capturer:
logging.info("A truly insignificant message ..")
output = capturer.get_text()
assert find_program_name() in output
def test_system_logging(self):
"""Make sure the :mod:`coloredlogs.syslog` module works."""
expected_message = random_string(50)
with SystemLogging(programname='coloredlogs-test-suite') as syslog:
logging.info("%s", expected_message)
if syslog and os.path.isfile('/var/log/syslog'):
with open('/var/log/syslog') as handle:
assert any(expected_message in line for line in handle)
def test_name_normalization(self):
"""Make sure :class:`~coloredlogs.NameNormalizer` works as intended."""
nn = NameNormalizer()
for canonical_name in ['debug', 'info', 'warning', 'error', 'critical']:
assert nn.normalize_name(canonical_name) == canonical_name
assert nn.normalize_name(canonical_name.upper()) == canonical_name
assert nn.normalize_name('warn') == 'warning'
assert nn.normalize_name('fatal') == 'critical'
def test_style_parsing(self):
"""Make sure :func:`~coloredlogs.parse_encoded_styles()` works as intended."""
encoded_styles = 'debug=green;warning=yellow;error=red;critical=red,bold'
decoded_styles = parse_encoded_styles(encoded_styles, normalize_key=lambda k: k.upper())
assert sorted(decoded_styles.keys()) == sorted(['debug', 'warning', 'error', 'critical'])
assert decoded_styles['debug']['color'] == 'green'
assert decoded_styles['warning']['color'] == 'yellow'
assert decoded_styles['error']['color'] == 'red'
assert decoded_styles['critical']['color'] == 'red'
assert decoded_styles['critical']['bold'] is True
def test_is_verbose(self):
"""Make sure is_verbose() does what it should :-)."""
set_level(logging.INFO)
assert not is_verbose()
set_level(logging.DEBUG)
assert is_verbose()
set_level(logging.VERBOSE)
assert is_verbose()
def test_increase_verbosity(self):
"""Make sure increase_verbosity() respects default and custom levels."""
# Start from a known state.
set_level(logging.INFO)
assert get_level() == logging.INFO
# INFO -> VERBOSE.
increase_verbosity()
assert get_level() == logging.VERBOSE
# VERBOSE -> DEBUG.
increase_verbosity()
assert get_level() == logging.DEBUG
# DEBUG -> NOTSET.
increase_verbosity()
assert get_level() == logging.NOTSET
# NOTSET -> NOTSET.
increase_verbosity()
assert get_level() == logging.NOTSET
def test_decrease_verbosity(self):
"""Make sure decrease_verbosity() respects default and custom levels."""
# Start from a known state.
set_level(logging.INFO)
assert get_level() == logging.INFO
# INFO -> WARNING.
decrease_verbosity()
assert get_level() == logging.WARNING
# WARNING -> ERROR.
decrease_verbosity()
assert get_level() == logging.ERROR
# ERROR -> CRITICAL.
decrease_verbosity()
assert get_level() == logging.CRITICAL
# CRITICAL -> CRITICAL.
decrease_verbosity()
assert get_level() == logging.CRITICAL
def test_level_discovery(self):
"""Make sure find_defined_levels() always reports the levels defined in Python's standard library."""
defined_levels = find_defined_levels()
level_values = defined_levels.values()
for number in (0, 10, 20, 30, 40, 50):
assert number in level_values
def test_walk_propagation_tree(self):
"""Make sure walk_propagation_tree() properly walks the tree of loggers."""
root, parent, child, grand_child = self.get_logger_tree()
# Check the default mode of operation.
loggers = list(walk_propagation_tree(grand_child))
assert loggers == [grand_child, child, parent, root]
# Now change the propagation (non-default mode of operation).
child.propagate = False
loggers = list(walk_propagation_tree(grand_child))
assert loggers == [grand_child, child]
def test_find_handler(self):
"""Make sure find_handler() works as intended."""
root, parent, child, grand_child = self.get_logger_tree()
# Add some handlers to the tree.
stream_handler = logging.StreamHandler()
syslog_handler = logging.handlers.SysLogHandler()
child.addHandler(stream_handler)
parent.addHandler(syslog_handler)
# Make sure the first matching handler is returned.
matched_handler, matched_logger = find_handler(grand_child, lambda h: isinstance(h, logging.Handler))
assert matched_handler is stream_handler
# Make sure the first matching handler of the given type is returned.
matched_handler, matched_logger = find_handler(child, lambda h: isinstance(h, logging.handlers.SysLogHandler))
assert matched_handler is syslog_handler
def get_logger_tree(self):
"""Create and return a tree of loggers."""
# Get the root logger.
root = logging.getLogger()
# Create a top level logger for ourselves.
parent_name = random_string()
parent = logging.getLogger(parent_name)
# Create a child logger.
child_name = '%s.%s' % (parent_name, random_string())
child = logging.getLogger(child_name)
# Create a grand child logger.
grand_child_name = '%s.%s' % (child_name, random_string())
grand_child = logging.getLogger(grand_child_name)
return root, parent, child, grand_child
def test_plain_text_output_format(self):
"""Inspect the plain text output of coloredlogs."""
logger = VerboseLogger(random_string(25))
stream = StringIO()
install(level=logging.NOTSET, logger=logger, stream=stream)
# Test that filtering on severity works.
logger.setLevel(logging.INFO)
logger.debug("No one should see this message.")
assert len(stream.getvalue().strip()) == 0
# Test that the default output format looks okay in plain text.
logger.setLevel(logging.NOTSET)
for method, severity in ((logger.debug, 'DEBUG'),
(logger.info, 'INFO'),
(logger.verbose, 'VERBOSE'),
(logger.warning, 'WARN'),
(logger.error, 'ERROR'),
(logger.critical, 'CRITICAL')):
# Prepare the text.
text = "This is a message with severity %r." % severity.lower()
# Log the message with the given severity.
method(text)
# Get the line of output generated by the handler.
output = stream.getvalue()
lines = output.splitlines()
last_line = lines[-1]
assert text in last_line
assert severity in last_line
assert PLAIN_TEXT_PATTERN.match(last_line)
def test_html_conversion(self):
"""Check the conversion from ANSI escape sequences to HTML."""
ansi_encoded_text = 'I like %s - www.eelstheband.com' % ansi_wrap('birds', bold=True, color='blue')
assert ansi_encoded_text == 'I like \x1b[1;34mbirds\x1b[0m - www.eelstheband.com'
html_encoded_text = convert(ansi_encoded_text)
assert html_encoded_text == (
'I like <span style="font-weight: bold; color: blue;">birds</span> - '
'<a href="http://www.eelstheband.com" style="color: inherit;">www.eelstheband.com</a>'
)
def test_output_interception(self):
"""Test capturing of output from external commands."""
expected_output = 'testing, 1, 2, 3 ..'
assert capture(['sh', '-c', 'echo -n %s' % expected_output]) == expected_output
def test_cli_demo(self):
"""Test the command line colored logging demonstration."""
with CaptureOutput() as capturer:
main('coloredlogs', '--demo')
output = capturer.get_text()
# Make sure the output contains all of the expected logging level names.
for name in 'debug', 'info', 'warning', 'error', 'critical':
assert name.upper() in output
def test_cli_conversion(self):
"""Test the command line HTML conversion."""
output = main('coloredlogs', '--convert', 'coloredlogs', '--demo', capture=True)
# Make sure the output is encoded as HTML.
assert '<span' in output
def test_implicit_usage_message(self):
"""Test that the usage message is shown when no actions are given."""
assert 'Usage:' in main('coloredlogs', capture=True)
def test_explicit_usage_message(self):
"""Test that the usage message is shown when ``--help`` is given."""
assert 'Usage:' in main('coloredlogs', '--help', capture=True)
def main(*arguments, **options):
"""Simple wrapper to run the command line interface."""
capture = options.get('capture', False)
saved_argv = sys.argv
saved_stdout = sys.stdout
try:
sys.argv = arguments
if capture:
sys.stdout = StringIO()
coloredlogs.cli.main()
if capture:
return sys.stdout.getvalue()
finally:
sys.argv = saved_argv
sys.stdout = saved_stdout
def random_string(length=25):
"""Generate a random string."""
return ''.join(random.choice(string.ascii_letters) for i in range(25))
| 2.0625
| 2
|
nesi/devices/softbox/api/models/portprofile_models.py
|
inexio/NESi
| 30
|
12777355
|
# This file is part of the NESi software.
#
# Copyright (c) 2020
# Original Software Design by <NAME> <https://github.com/etingof>.
#
# Software adapted by inexio <https://github.com/inexio>.
# - <NAME> <https://github.com/unkn0wn-user>
# - <NAME> <https://github.com/Connyko65>
# - <NAME> <https://github.com/Dinker1996>
#
# License: https://github.com/inexio/NESi/LICENSE.rst
import uuid
from nesi.devices.softbox.api import db
class PortProfile(db.Model):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(64))
description = db.Column(db.String())
box_id = db.Column(db.Integer, db.ForeignKey('box.id'))
type = db.Column(db.Enum('service', 'spectrum', 'dpbo', 'rtx', 'vect', 'sos', 'ghs', 'qos', 'policer', 'vce',
'data-rate', 'noise-margin', 'inp-delay', 'mode-specific-psd'))
# Alcatel Data
up_policer = db.Column(db.String(), default=None, nullable=True)
down_policer = db.Column(db.String(), default=None, nullable=True)
committed_info_rate = db.Column(db.Integer(), default=0, nullable=False)
committed_burst_size = db.Column(db.Integer(), default=0, nullable=False)
logical_flow_type = db.Column(db.Enum('generic'), default='generic')
# Huawei data
maximum_bit_error_ratio = db.Column(db.Integer(), default=None)
path_mode = db.Column(db.Integer(), default=None)
rate = db.Column(db.String(), default=None)
etr_max = db.Column(db.Integer(), default=None)
etr_min = db.Column(db.Integer(), default=None)
ndr_max = db.Column(db.Integer(), default=None)
working_mode = db.Column(db.Integer(), default=None)
eside_electrical_length = db.Column(db.String(), default=None)
assumed_exchange_psd = db.Column(db.String(), default=None)
eside_cable_model = db.Column(db.String(), default=None)
min_usable_signal = db.Column(db.Integer(), default=None)
span_frequency = db.Column(db.String(), default=None)
dpbo_calculation = db.Column(db.Integer(), default=None)
snr_margin = db.Column(db.String(), default=None)
rate_adapt = db.Column(db.String(), default=None)
snr_mode = db.Column(db.String(), default=None)
inp_4khz = db.Column(db.String(), default=None)
inp_8khz = db.Column(db.String(), default=None)
interleaved_delay = db.Column(db.String(), default=None)
delay_variation = db.Column(db.Integer(), default=None)
channel_policy = db.Column(db.Integer(), default=None)
nominal_transmit_PSD_ds = db.Column(db.Integer(), default=None)
nominal_transmit_PSD_us = db.Column(db.Integer(), default=None)
aggregate_transmit_power_ds = db.Column(db.Integer(), default=None)
aggregate_transmit_power_us = db.Column(db.Integer(), default=None)
aggregate_receive_power_us = db.Column(db.Integer(), default=None)
upstream_psd_mask_selection = db.Column(db.Integer(), default=None)
psd_class_mask = db.Column(db.Integer(), default=None)
psd_limit_mask = db.Column(db.Integer(), default=None)
l0_time = db.Column(db.Integer(), default=None)
l2_time = db.Column(db.Integer(), default=None)
l3_time = db.Column(db.Integer(), default=None)
max_transmite_power_reduction = db.Column(db.Integer(), default=None)
total_max_power_reduction = db.Column(db.Integer(), default=None)
bit_swap_ds = db.Column(db.Integer(), default=None)
bit_swap_us = db.Column(db.Integer(), default=None)
overhead_datarate_us = db.Column(db.Integer(), default=None)
overhead_datarate_ds = db.Column(db.Integer(), default=None)
allow_transitions_to_idle = db.Column(db.Integer(), default=None)
allow_transitions_to_lowpower = db.Column(db.Integer(), default=None)
reference_clock = db.Column(db.String(), default=None)
cyclic_extension_flag = db.Column(db.Integer(), default=None)
force_inp_ds = db.Column(db.Integer(), default=None)
force_inp_us = db.Column(db.Integer(), default=None)
g_993_2_profile = db.Column(db.Integer(), default=None)
mode_specific = db.Column(db.String(), default=None)
transmode = db.Column(db.String(), default=None)
T1_413 = db.Column(db.String(), default=None)
G_992_1 = db.Column(db.String(), default=None)
G_992_2 = db.Column(db.String(), default=None)
G_992_3 = db.Column(db.String(), default=None)
G_992_4 = db.Column(db.String(), default=None)
G_992_5 = db.Column(db.String(), default=None)
AnnexB_G_993_2 = db.Column(db.String(), default=None)
ETSI = db.Column(db.String(), default=None)
us0_psd_mask = db.Column(db.Integer(), default=None)
vdsltoneblackout = db.Column(db.String(), default=None)
internal_id = db.Column(db.Integer(), default=None)
vmac_ipoe = db.Column(db.Enum('enable', 'disable'), default=None)
vmac_pppoe = db.Column(db.Enum('enable', 'disable'), default=None)
vmac_pppoa = db.Column(db.Enum('enable', 'disable'), default=None)
vlan_mac = db.Column(db.Enum('forwarding', 'discard'), default=None)
packet_policy_multicast = db.Column(db.Enum('forward', 'discard'), default=None)
packet_policy_unicast = db.Column(db.Enum('forward', 'discard'), default=None)
security_anti_ipspoofing = db.Column(db.Enum('enable', 'disable'), default=None)
security_anti_macspoofing = db.Column(db.Enum('enable', 'disable'), default=None)
igmp_mismatch = db.Column(db.Enum('transparent'), default=None)
commit = db.Column(db.Boolean(), default=False)
number = db.Column(db.Integer, default=None)
| 1.648438
| 2
|
train_softmax_clean.py
|
ad349/fashionmnist
| 0
|
12777356
|
<filename>train_softmax_clean.py
#!/usr/bin env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import numpy as np
import tensorflow as tf
import argparse
from utils import normalize, decode
from model import graph
def input_pipeline(trainpath, validationpath, buffer_size, batch_size):
# Skip the header and filter any comments.
training_dataset = tf.data.TextLineDataset(trainpath).skip(1).filter(lambda line: tf.not_equal(tf.substr(line, 0, 1), "#"))
validation_dataset = tf.data.TextLineDataset(validationpath).skip(1).filter(lambda line: tf.not_equal(tf.substr(line, 0, 1), "#"))
default_values = [[0.0] for _ in range(785)]
# The dataset api reads the csv as text.
# Using the below function we can split the text into labels and pixels.
training_dataset = (training_dataset.cache().map(lambda x: decode(x, default_values)))
validation_dataset = (validation_dataset.cache().map(lambda x: decode(x, default_values)))
# Normalize the dataset to 0-1 range
training_dataset = training_dataset.map(lambda label, pixel: tf.py_func(normalize, [label, pixel], [tf.float32, tf.float32]))
validation_dataset = validation_dataset.map(lambda label, pixel: tf.py_func(normalize, [label, pixel], [tf.float32, tf.float32]))
# Randomly shuffles the dataset
training_dataset = training_dataset.shuffle(buffer_size=buffer_size)
# Creating batchs here for training
training_dataset = training_dataset.batch(batch_size)
validation_dataset = validation_dataset.batch(batch_size)
# A feedable iterator is defined by a handle placeholder and its structure. We
# could use the `output_types` and `output_shapes` properties of either
# `training_dataset` or `validation_dataset` here, because they have
# identical structure.
handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(handle, training_dataset.output_types, training_dataset.output_shapes)
next_element = iterator.get_next()
# You can use feedable iterators with a variety of different kinds of iterator
# (such as one-shot and initializable iterators).
training_iterator = training_dataset.make_initializable_iterator()
validation_iterator = validation_dataset.make_initializable_iterator()
return next_element, handle, training_iterator, validation_iterator
def train(batch_size, learning_rate, x, y):
logits = graph(x)
_y = tf.one_hot(indices=tf.cast(y, tf.int32), depth=10)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=_y, logits=logits), axis=0)
acc = tf.equal(tf.argmax(logits, 1), tf.argmax(_y, 1))
acc = tf.reduce_mean(tf.cast(acc, tf.float32))
global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name="global_step")
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
return loss, acc, train_op, global_step
def print_results(iteration, losses, accuracy):
print("Batch: {0:5d} loss: {1:0.3f} accuracy: {2:0.3f}"
.format(iteration, np.mean(losses), np.mean(accuracy)))
def print_results_epoch(iteration, losses, accuracy):
print("Epoch: {0:5d} loss: {1:0.3f} accuracy {2:0.3f}"
.format(iteration+1, np.mean(losses), np.mean(accuracy)))
def print_results_val(losses, accuracy):
print("Validation loss: {0:0.3f} accuracy {1:0.3f}"
.format(np.mean(losses), np.mean(accuracy)))
# def print_accuracy(iteration, acc):
# print("Batch: {0:5d} Accuracy: {1:0.3f}"
# .format(iteration, np.mean(acc)))
# def print_accuracy_epoch(iteration, acc):
# print("Epoch: {0:5d} Accuracy: {1:0.3f}"
# .format(iteration+1, np.mean(acc)))
def parser(argv):
parser = argparse.ArgumentParser(description='Trains a Deep Neural Network on Fashion MNIST Data')
parser.add_argument('--train_csv', default='training.csv', type=str, required=True, help='Path to the training csv.')
parser.add_argument('--validation_csv', default='validation.csv', type=str, help='Path to the validation csv.')
parser.add_argument('--batch_size', default=100, type=int, help='Batch Size of one iteration.')
parser.add_argument('--buffer_size', default=10000, type=int, help='Buffer Size for random selection of images.')
parser.add_argument('--lr', default=0.01, type=float, help='Learning Rate.')
parser.add_argument('--nrof_epochs', default=20, type=int, help='Number of Epochs for training.')
parser.add_argument('--log_dir', default='./log', type=str, help='Location of log.')
parser.add_argument('--model_dir', default='./model', type=str, help='Location of saved model.')
args = parser.parse_args()
return args
def main(args):
trainpath = args.train_csv
validationpath = args.validation_csv
batch_size = args.batch_size
buffer_size = args.buffer_size
learning_rate = args.lr
nepochs = args.nrof_epochs
logdir = args.log_dir
savepath = args.model_dir
if not os.path.exists(trainpath):
raise IOError('Training file does not exist')
if not buffer_size or not batch_size:
raise ValueError('Please provide valid value for buffer_size and batch_size')
if not os.path.exists(savepath):
os.makedirs(savepath)
x = tf.placeholder('float32',shape=[batch_size,None])
y = tf.placeholder('int32',shape=[batch_size])
next_element, handle, training_iterator, validation_iterator = input_pipeline(trainpath, validationpath, buffer_size, batch_size)
loss, acc, train_op, global_step = train(batch_size, learning_rate, x, y)
tf.summary.scalar('loss', loss)
tf.summary.scalar('accuracy', acc)
merged = tf.summary.merge_all()
training_loss = []
epoch_loss = []
train_acc = []
epoch_acc = []
val_loss = []
val_acc = []
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer()))
if os.path.exists(os.path.join(savepath,"checkpoint")):
print("="*30)
print("Restoring existing model..")
print("="*30)
print()
saver.restore(sess, os.path.join(savepath, "model.ckpt"))
train_writer = tf.summary.FileWriter(logdir, graph=tf.get_default_graph())
# train_writer.add_graph(tf.get_default_graph())
# The `Iterator.string_handle()` method returns a tensor that can be evaluated
# and used to feed the `handle` placeholder.
training_handle = sess.run(training_iterator.string_handle())
validation_handle = sess.run(validation_iterator.string_handle())
for i in range(nepochs):
sess.run(training_iterator.initializer)
while True:
try:
label_batch, image_batch = sess.run(next_element, feed_dict={handle: training_handle})
summary, _loss, _acc, _, g = sess.run([merged, loss, acc, train_op, global_step], feed_dict = {x:image_batch, y:label_batch})
training_loss.append(_loss)
epoch_loss.append(_loss)
train_acc.append(_acc)
epoch_acc.append(_acc)
if tf.train.global_step(sess, global_step)%10==0:
train_writer.add_summary(summary, g)
print_results(g, training_loss, train_acc)
training_loss = []
train_acc = []
except tf.errors.OutOfRangeError:
print('='*60)
print('Epoch {} Finished !'.format(i+1))
print_results_epoch(i, epoch_loss, epoch_acc)
print('='*60)
print()
print('Running forward pass on validation set..')
sess.run(validation_iterator.initializer)
while True:
try:
val_label_batch, val_image_batch = sess.run(next_element, feed_dict={handle: validation_handle})
_val_loss, _val_acc = sess.run([loss, acc], feed_dict = {x:val_image_batch, y:val_label_batch})
val_loss.append(_val_loss)
val_acc.append(_val_acc)
except tf.errors.OutOfRangeError:
break
print('='*60)
print_results_val(val_loss, val_acc)
print('='*60)
print()
break
# print_results_epoch(i, epoch_loss, epoch_acc)
epoch_loss = []
epoch_acc = []
if int(nepochs - i) <= 2:
saver.save(sess, os.path.join(savepath,"model.ckpt"))
print("Model saved in %s" % (savepath))
print()
return
if __name__ == '__main__':
main(parser(sys.argv[1:]))
| 2.859375
| 3
|
qord/core/shard.py
|
TheFarGG/qord
| 0
|
12777357
|
<filename>qord/core/shard.py<gh_stars>0
# MIT License
# Copyright (c) 2022 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
from qord.exceptions import MissingPrivilegedIntents, ShardCloseException
import asyncio
import zlib
import json
import sys
import time
import logging
import typing
if typing.TYPE_CHECKING:
from qord.core.client import Client
class GatewayOP:
DISPATCH = 0
HEARTBEAT = 1
IDENTIFY = 2
PRESENCE_UPDATE = 3
VOICE_STATE_UPDATE = 4
RESUME = 6
RECONNECT = 7
REQUEST_GUILD_MEMBERS = 8
INVALID_SESSION = 9
HELLO = 10
HEARTBEAT_ACK = 11
_LOGGER = logging.getLogger(__name__)
_ZLIB_SUFFIX = b'\x00\x00\xff\xff'
_UNHANDLEABLE_CODES = (4004, 4010, 4012, 4013, 4014)
class _SignalResume(Exception):
def __init__(self, resume: bool = True, delay: float = None) -> None:
self.resume = resume
self.delay = delay
class Shard:
r"""Represents a shard that connects to Discord gateway.
A shard is simply a separate websocket connection to Discord gateway. In
bots that are in less then 1000 guilds, There is generally only one shard
that maintains all the guilds. However when this limit is exceeded, Discord
requires the bots to shard their connection to equally divide the workload
of guilds in multiple shards.
Sharding is handled transparently by the library automatically and requires
no user interaction. This class mostly is documented for completeness and
is not usually not relevant to a general use case.
You should not instansiate this class yourself. Instead, Consider using one
of the following ways to obtain it:
- :attr:`Client.shards`
- :attr:`Client.get_shard`
"""
if typing.TYPE_CHECKING:
_worker_task: typing.Optional[asyncio.Task]
_heartbeat_task: typing.Optional[asyncio.Task]
_last_heartbeat: typing.Optional[float]
_heartbeat_interval: typing.Optional[float]
_session_id: typing.Optional[str]
_sequence: typing.Optional[int]
_latency: float
def __init__(self, id: int, client: Client) -> None:
self._id = id
self._client = client
# For faster attribute accessing
self._rest = client._rest
self._handle_dispatch = client._dispatch.handle
self._running = False
self._worker_task = None
self._heartbeat_task = None
self._inflator = None
self._buffer = bytearray()
self._identified = asyncio.Event()
self._resume_on_connect = False
self._clear_gateway_data()
def _clear_gateway_data(self):
self._last_heartbeat = None
self._latency = float("inf")
self._heartbeat_interval = None
self._session_id = None
self._sequence = None
@property
def id(self) -> int:
r"""The ID of the shard. This starts from 0 and for each shard maintained by
a client, This ID increments till :attr:`Client.shards_count`.
In theory, If a client is running 5 shards for example. All shard IDs can
be obtained by::
>>> shard_ids = list(range(client.shards_count)) # shards_count is 5
[0, 1, 2, 3, 4]
Returns
-------
:class:`builtins.int`
"""
return self._id
@property
def client(self) -> Client:
r"""The client that instansiated the client.
Returns
-------
:class:`Client`
"""
return self._client
@property
def latency(self) -> float:
r"""The latency of this shard. This is measured on the basis of delay between
a heartbeat sent by the shard and it's acknowledgement sent by Discord gateway.
Returns
-------
:class:`builtins.float`
"""
return self._latency
@property
def heartbeat_interval(self) -> typing.Optional[float]:
r"""The heartbeat interval for this shard. This is only available after
shard has done the initial websocket handshake.
Returns
-------
:class:`builtins.float`
"""
return self._heartbeat_interval
@property
def session_id(self) -> typing.Optional[str]:
r"""The current session ID for the shard. This is only available
after shard has successfully connected to gateway.
The session ID is not same for all shards. Furthermore, The session
ID is not guaranteed to be same through the shard lifetime as shard
may start new sessions for reconnection purposes.
Returns
-------
:class:`builtins.str`
"""
return self._session_id
@property
def sequence(self) -> typing.Optional[int]:
r"""The current dispatch sequence number of the shard. This may be None.
Returns
-------
:class:`builtins.int`
"""
return self._sequence
def _log(self, level: int, message: typing.Any, *args: typing.Any) -> None:
_LOGGER.log(level, f"[Shard {self._id}] {message}", *args)
def _decompress_message(self, message: bytes) -> typing.Any:
self._buffer.extend(message)
decomp = self._inflator.decompress(self._buffer) # type: ignore
self._buffer = bytearray()
return decomp.decode()
def _notify_waiters(self):
# This is a hack to prevent timeout error when initially
# starting shards.
self._identified.set()
self._identified.clear()
async def _receive(self) -> typing.Any:
message = await self._websocket.receive() # type: ignore
message = message.data
if isinstance(message, bytes):
if len(message) > 4 and message[-4:] != _ZLIB_SUFFIX:
return
message = self._decompress_message(message)
if isinstance(message, int):
# Close code more then likely.
return message
elif isinstance(message, str):
try:
ret = json.loads(message)
except json.JSONDecodeError:
# message is not a valid JSON?
return message
else:
return ret
return False
async def _heartbeat_handler(self, interval: float):
self._heartbeat_interval = interval
self._log(logging.INFO, f"HEARTBEAT task started with interval of {interval} seconds.")
while True:
await self._send_heartbeat_packet()
self._last_heartbeat = time.time()
await asyncio.sleep(interval)
async def _handle_recv(self) -> typing.Any:
packet = await self._receive()
if not packet:
return
if isinstance(packet, int):
# Close code is sent.
if not packet in _UNHANDLEABLE_CODES:
raise _SignalResume(resume=True, delay=None)
if packet == 4014:
raise MissingPrivilegedIntents(shard=self)
else:
raise ShardCloseException(
self,
packet,
f"Shard closed with unhandleable close code: {packet}"
)
if packet is False:
return False
op = packet["op"]
data = packet["d"]
if op is GatewayOP.HELLO:
if self._resume_on_connect:
await self._send_resume_packet()
self._resume_on_connect = False
else:
await self._send_identify_packet()
interval = data["heartbeat_interval"] // 1000
self._heartbeat_task = asyncio.create_task(
self._heartbeat_handler(interval),
name=f"shard-heartbeat-worker:{self._id}"
)
return True
elif op is GatewayOP.HEARTBEAT_ACK:
self._latency = time.time() - self._last_heartbeat # type: ignore
elif op is GatewayOP.DISPATCH:
self._sequence = packet["s"]
event = packet["t"]
if event == "READY":
self._session_id = data["session_id"]
self._identified.set()
self._log(logging.INFO, "Established a new session with Discord gateway. (Session: %s)", self._session_id)
elif event == "RESUMED":
self._log(logging.INFO, "Resumed the session %s", self._session_id)
await self._handle_dispatch(self, event, data)
elif op is GatewayOP.HEARTBEAT:
self._log(logging.DEBUG, "Gateway is requesting a HEARTBEAT.")
await self._send_heartbeat_packet()
elif op is GatewayOP.INVALID_SESSION:
if self._session_id is None:
# If we're here, We more then likely got identify ratelimited
# this generally should never happen.
self._notify_waiters()
self._log(logging.INFO, "Session was prematurely invalidated.")
raise _SignalResume(resume=False, delay=5.0)
self._log(logging.INFO, "Session %s has been invalidated. Attempting to RESUME if possible.", self._session_id)
# NOTE: inner payload (`data`) indicates whether the session is resumeable
raise _SignalResume(resume=data, delay=5.0)
elif op is GatewayOP.RECONNECT:
self._log(logging.INFO, "Gateway has requested to reconnect the shard.")
raise _SignalResume(resume=True)
return True
async def _launch(self, url: str) -> None:
if self._running:
raise RuntimeError("Shard is already running")
self._running = True
while self._running:
session = self._rest._ensure_session()
self._websocket = await session.ws_connect(url)
self._inflator = zlib.decompressobj()
while True:
try:
recv = await self._handle_recv()
except _SignalResume as signal:
if signal.delay:
self._log(logging.INFO, "Delaying %s seconds before reconnecting.", signal.delay)
await asyncio.sleep(signal.delay)
self._resume_on_connect = signal.resume
await self._close(code=4000)
break
else:
if not recv:
self._log(logging.INFO, "Shard is closing.")
self._running = False
return
async def _wrapped_launch(self, url: str, future: asyncio.Future) -> None:
try:
await self._launch(url)
except Exception as exc:
self._running = False
future.set_result(exc)
async def _close(self, code: int = 1000, _clean: bool = False) -> None:
if self._heartbeat_task:
self._heartbeat_task.cancel()
self._heartbeat_task = None
if self._websocket:
await self._websocket.close(code=code)
self._websocket = None
if _clean:
self._clear_gateway_data()
self._identified.clear()
self._worker_task.cancel()
self._running = False
self._worker_task = None
async def _send_data(self, data: typing.Dict[str, typing.Any]) -> None:
await self._websocket.send_str(json.dumps(data)) # type: ignore
async def _send_heartbeat_packet(self):
await self._send_data({
"op": GatewayOP.HEARTBEAT,
"d": self._sequence,
})
self._log(logging.DEBUG, "Sent the HEARTBEAT packet.")
async def _send_identify_packet(self):
await self._send_data({
"op": GatewayOP.IDENTIFY,
"d": {
"properties": {
"$browser": "Qord",
"$device": "Qord",
"$os": sys.platform,
},
"intents": self._client.intents.value,
"token": self._rest.token,
"compress": True,
"shard": [self._id, self._client.shards_count],
},
})
self._log(logging.DEBUG, "Sent the IDENTIFY packet.")
async def _send_resume_packet(self):
await self._send_data({
"op": GatewayOP.RESUME,
"d": {
"session_id": self._session_id,
"token": self._rest.token,
"seq": self._sequence,
},
})
self._log(logging.DEBUG, "Sent the RESUME packet.")
| 1.601563
| 2
|
example/states.py
|
BLeAm/trigger
| 0
|
12777358
|
from trigger_generator import *
@trigger
class MyTrigger:
counter: int = 0
build()
| 1.335938
| 1
|
robot_manager/handler/irc/connection_handler.py
|
ES-TUDelft/interaction-design-tool-ir
| 1
|
12777359
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# **
#
# ================== #
# CONNECTION_HANDLER #
# ================== #
# Handler class for controlling the connection to the robot
#
# @author ES
# **
import logging
import threading
from autobahn.twisted.component import Component, run
from twisted.internet.defer import inlineCallbacks
import es_common.utils.config_helper as config_helper
from es_common.model.observable import Observable
class ConnectionHandler(object):
def __init__(self):
self.logger = logging.getLogger("Connection Handler")
self.rie = None
self.session_observers = Observable()
self.session = None
@inlineCallbacks
def on_connect(self, session, details=None):
self.logger.debug("Created session: {}".format(session))
self.session = session
yield self.session_observers.notify_all(session)
def start_rie_session(self, robot_name=None, robot_realm=None):
try:
if robot_realm is None:
# get the realm from config
name_key = "pepper" if robot_name is None else robot_name.lower()
robot_realm = config_helper.get_robot_settings()["realm"][name_key]
self.logger.info("{} REALM: {}".format(robot_name, robot_realm))
self.rie = Component(
transports=[{
'url': u"wss://wamp.robotsindeklas.nl",
'serializers': ['msgpack'],
'max_retries': 0
}],
realm=robot_realm
)
self.logger.info("** {}".format(threading.current_thread().name))
self.rie.on_join(self.on_connect)
self.logger.info("Running the rie component")
run([self.rie])
except Exception as e:
self.logger.error("Unable to run the rie component | {}".format(e))
def stop_session(self):
try:
if self.session:
self.session.leave()
self.session_observers.notify_all(None)
self.logger.info("Closed the robot session.")
else:
self.logger.info("There is no active session.")
except Exception as e:
self.logger.error("Error while closing rie session: {}".format(e))
| 1.726563
| 2
|
setup.py
|
theroggy/geofileops
| 1
|
12777360
|
<filename>setup.py
import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
with open('version.txt', mode='r') as file:
version = file.readline()
setuptools.setup(
name='geofileops',
version=version,
author='<NAME>',
author_email='<EMAIL>',
description='Package to do spatial operations on geo files.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/theroggy/geofileops',
include_package_data=True,
packages=setuptools.find_packages(),
install_requires=['geopandas>=0.9', 'pygeos', 'pyproj', 'psutil'],
extras_require = {
'full': ['simplification']
},
classifiers=[
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
],
python_requires='>=3.8',
)
| 1.585938
| 2
|
scripts/price_feed_scripts/read_price_feed.py
|
coozebra/chainlink-mix
| 9
|
12777361
|
#!/usr/bin/python3
from brownie import PriceFeed
def main():
price_feed_contract = PriceFeed[len(PriceFeed) - 1]
print("Reading data from {}".format(price_feed_contract.address))
print(price_feed_contract.getLatestPrice())
| 3.0625
| 3
|
src/024_train-probabilistic-svm.py
|
BirdVox/bv_context_adaptation
| 5
|
12777362
|
<reponame>BirdVox/bv_context_adaptation<filename>src/024_train-probabilistic-svm.py
import csv
import datetime
import h5py
from sklearn.externals import joblib
import numpy as np
import os
import pandas as pd
import pickle
import sklearn.preprocessing
import sklearn.svm
import skm
import sys
import time
sys.path.append("../src")
import localmodule
# Define constants.
data_dir = localmodule.get_data_dir()
dataset_name = localmodule.get_dataset_name()
patch_width = 32
n_patches_per_clip = 1
aug_str = "original"
instanced_aug_str = aug_str
# Parse arguments.
args = sys.argv[1:]
test_unit_str = args[0]
trial_id = int(args[1])
# Print header.
start_time = int(time.time())
print(str(datetime.datetime.now()) + " Start.")
print("Training probabilistic SVM for " + dataset_name + " clips.")
print("Test unit: " + test_unit_str + ".")
print("Trial ID: " + str(trial_id) + ".")
print("")
print("h5py version: {:s}".format(h5py.__version__))
print("numpy version: {:s}".format(np.__version__))
print("pandas version: {:s}".format(pd.__version__))
print("scikit-learn version: {:s}".format(sklearn.__version__))
print("skm version: {:s}".format(skm.__version__))
print("")
# Retrieve fold such that test_unit_str is in the test set.
folds = localmodule.fold_units()
fold = [f for f in folds if test_unit_str in f[0]][0]
test_units = fold[0]
training_units = fold[1]
validation_units = fold[2]
# Define input folder.
logmelspec_name = "_".join([dataset_name, "skm-logmelspec"])
logmelspec_dir = os.path.join(data_dir, logmelspec_name)
aug_dir = os.path.join(logmelspec_dir, aug_str)
# Initialize matrix of training data.
X_train = []
y_train = []
# Loop over training units.
for train_unit_str in training_units:
# Load HDF5 container of logmelspecs.
hdf5_name = "_".join([dataset_name, instanced_aug_str, train_unit_str])
in_path = os.path.join(aug_dir, hdf5_name + ".hdf5")
in_file = h5py.File(in_path)
# List clips.
clip_names = list(in_file["logmelspec"].keys())
# Loop over clips.
for clip_name in clip_names:
# Read label.
y_clip = int(clip_name.split("_")[3])
# Load logmelspec.
logmelspec = in_file["logmelspec"][clip_name].value
# Load time-frequency patches.
logmelspec_width = logmelspec.shape[1]
logmelspec_mid = np.round(logmelspec_width * 0.5).astype('int')
logmelspec_start = logmelspec_mid -\
np.round(patch_width * n_patches_per_clip * 0.5).astype('int')
# Extract patch.
patch_start = logmelspec_start
patch_stop = patch_start + patch_width
patch = logmelspec[:, patch_start:patch_stop]
# Ravel patch.
X_train.append(np.ravel(patch))
# Append label.
y_train.append(y_clip)
# Concatenate raveled patches as rows.
X_train = np.stack(X_train)
# Load SKM model.
models_dir = localmodule.get_models_dir()
model_name = "skm-cv"
model_dir = os.path.join(models_dir, model_name)
unit_dir = os.path.join(model_dir, test_unit_str)
trial_str = "trial-" + str(trial_id)
trial_dir = os.path.join(unit_dir, trial_str)
model_name = "_".join([
dataset_name, model_name, test_unit_str, trial_str, "model.pkl"
])
model_path = os.path.join(trial_dir, model_name)
skm_model = skm.SKM(k=256)
skm_model = skm_model.load(model_path)
# Transform training set with SKM.
X_train = skm_model.transform(X_train.T).T
# Load standardizer.
scaler_name = "_".join([
dataset_name,
"skm-cv",
test_unit_str,
trial_str,
"scaler.pkl"
])
scaler_path = os.path.join(trial_dir, scaler_name)
scaler = joblib.load(scaler_path)
# Standardize training set.
X_train = scaler.transform(X_train)
# Define CSV file for validation metrics.
val_metrics_name = "_".join([
dataset_name,
"skm-cv",
test_unit_str,
trial_str,
"svm-model",
"val-metrics.csv"
])
csv_header = [
"Dataset",
"Test unit",
"Trial ID",
"log2(C)",
"Validation accuracy (%)"
]
val_metrics_path = os.path.join(
trial_dir, val_metrics_name)
# Open CSV file as Pandas DataFrame.
val_metrics_df = pd.read_csv(val_metrics_path, header=None, names=csv_header)
# Find C maximizing validation accuracy.
max_val_acc = np.max(val_metrics_df["Validation accuracy (%)"])
best_log2C = val_metrics_df["log2(C)"][
np.argmax(val_metrics_df["Validation accuracy (%)"])]
# Define SVM model.
svc = sklearn.svm.SVC(
C=2.0**best_log2C,
kernel='rbf',
degree=3,
gamma='auto',
coef0=0.0,
shrinking=True,
probability=True,
tol=0.001,
cache_size=200,
class_weight=None,
verbose=False,
max_iter=-1,
random_state=None)
# Train SVM model.
svc.fit(X_train, y_train)
# Save SVM model.
if np.sign(best_log2C) >= 0:
best_log2C_str = "+" + str(abs(best_log2C)).zfill(2)
else:
best_log2C_str = "-" + str(abs(best_log2C)).zfill(2)
svm_name = "_".join([
dataset_name,
"skm-cv",
test_unit_str,
trial_str,
"svm-proba-model",
"log2C-(" + best_log2C_str + ").pkl"
])
svm_path = os.path.join(trial_dir, svm_name)
joblib.dump(svc, svm_path)
# Initialize matrix of test data.
X_test = []
y_test_true = []
# Load HDF5 container of logmelspecs.
hdf5_name = "_".join([dataset_name, instanced_aug_str, test_unit_str])
in_path = os.path.join(aug_dir, hdf5_name + ".hdf5")
in_file = h5py.File(in_path)
# List clips.
clip_names = list(in_file["logmelspec"].keys())
# Loop over clips.
for clip_name in clip_names:
# Read label.
y_clip = int(clip_name.split("_")[3])
# Load logmelspec.
logmelspec = in_file["logmelspec"][clip_name].value
# Load time-frequency patches.
logmelspec_width = logmelspec.shape[1]
logmelspec_mid = np.round(logmelspec_width * 0.5).astype('int')
logmelspec_start = logmelspec_mid -\
np.round(patch_width * n_patches_per_clip * 0.5).astype('int')
# Extract patch.
patch_start = logmelspec_start
patch_stop = patch_start + patch_width
patch = logmelspec[:, patch_start:patch_stop]
# Ravel patch.
X_test.append(np.ravel(patch))
# Append label.
y_test_true.append(y_clip)
# Concatenate raveled patches as rows.
X_test = np.stack(X_test)
# Transform test set with SKM.
X_test = skm_model.transform(X_test.T).T
# Standardize test set.
X_test = scaler.transform(X_test)
# Predict.
y_test_pred = svc.predict(X_test)
# Create CSV file.
model_name = "skm-proba"
predict_unit_str = test_unit_str
prediction_name = "_".join([dataset_name, model_name,
"test-" + test_unit_str, trial_str, "predict-" + predict_unit_str,
"clip-predictions"])
prediction_path = os.path.join(trial_dir, prediction_name + ".csv")
csv_file = open(prediction_path, 'w')
csv_writer = csv.writer(csv_file, delimiter=',')
# Create CSV header.
csv_header = ["Dataset", "Test unit", "Prediction unit", "Timestamp",
"Key", "Predicted probability"]
csv_writer.writerow(csv_header)
# Loop over keys.
for clip_id, key in enumerate(clip_names):
# Store prediction as DataFrame row.
key_split = key.split("_")
timestamp_str = key_split[1]
freq_str = key_split[2]
ground_truth_str = key_split[3]
aug_str = key_split[4]
predicted_probability = y_test_pred[clip_id]
predicted_probability_str = "{:.16f}".format(predicted_probability)
row = [dataset_name, test_unit_str, predict_unit_str, timestamp_str,
freq_str, aug_str, key, ground_truth_str, predicted_probability_str]
csv_writer.writerow(row)
# Close CSV file.
csv_file.close()
# Print score.
print("Accuracy = {:5.2f}".format(
100 * sklearn.metrics.accuracy_score(y_test_pred, y_test_true)))
print("")
# Print elapsed time.
print(str(datetime.datetime.now()) + " Finish.")
elapsed_time = time.time() - int(start_time)
elapsed_hours = int(elapsed_time / (60 * 60))
elapsed_minutes = int((elapsed_time % (60 * 60)) / 60)
elapsed_seconds = elapsed_time % 60.
elapsed_str = "{:>02}:{:>02}:{:>05.2f}".format(elapsed_hours,
elapsed_minutes,
elapsed_seconds)
print("Total elapsed time: " + elapsed_str + ".")
| 2.109375
| 2
|
app/app.py
|
rilder-almeida/projeto_case_enfase
| 0
|
12777363
|
"""
Módulo da aplicação usando Streamlit para gerar a estrutura front-end
"""
# FIXME: Por algum motivo o streamlit não aceita importar as páginas
# via __init__.py ou importação relativa
# pylint: disable=import-error
import streamlit as st
from introducao import intro
from questao_problema import case
from analise_geografica import geografica
from analise_prazos_x_atrasos import prazos_atrasos
from report import report
from solucoes import solucoes
from consideracoes_finais import consideracoes_finais
# pylint: enable=import-error
PAGES = {
"Introdução": intro,
"Questão Problema": case,
"Análise Geográfica das Vendas e Compras": geografica,
"Análise dos Atrasos dos Pedidos": prazos_atrasos,
"Pandas Profiling": report,
"Relatório Final e Soluções Propostas": solucoes,
"Considerações": consideracoes_finais,
}
st.sidebar.title("Índice")
selection = st.sidebar.radio("", list(PAGES.keys()))
page = PAGES[selection]
page()
| 2.796875
| 3
|
customer/migrations/0002_auto_20210618_2044.py
|
Sukikiroi/Django-Smart-lms-backend
| 1
|
12777364
|
# Generated by Django 3.2.4 on 2021-06-18 19:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customer', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Messages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=240, verbose_name='Name')),
('message', models.CharField(max_length=240, verbose_name='Name')),
],
),
migrations.RemoveField(
model_name='customer',
name='created',
),
]
| 1.820313
| 2
|
contrib/experimental/alert-ucs.py
|
MailOnline/alerta
| 0
|
12777365
|
<reponame>MailOnline/alerta<filename>contrib/experimental/alert-ucs.py
import UcsSdk
import time
def EventHandler(mce):
print 'Received a New Event with ClassId: ' + str(mce.mo.classId)
print "ChangeList: ", mce.changeList
print "EventId: ", mce.eventId
def main():
ucs = UcsSdk.UcsHandle()
ucs.UcsHandle.Login(username='', password='')
ucs.UcsHandle.AddEventHandler(classId='', callBack=EventHandler)
while True:
print '.',
time.sleep(5)
ucs.UcsHandle.Logout()
if __name__ == '__main__':
main()
| 2.25
| 2
|
recipe_db/etl/loader.py
|
scheb/beer-analytics
| 21
|
12777366
|
<reponame>scheb/beer-analytics
import abc
from typing import Tuple, List
from django.core.exceptions import ValidationError
from django.db import transaction
from recipe_db.etl.format.parser import FormatParser, ParserResult
from recipe_db.models import Recipe, RecipeHop, RecipeFermentable, RecipeYeast
class ResultPostProcessor:
@abc.abstractmethod
def process(self, result: ParserResult) -> None:
raise NotImplementedError
class RecipeLoader:
@transaction.atomic
def import_recipe(self, uid: str, result: ParserResult) -> None:
result.recipe.uid = uid
self.set_amount_percent(result.fermentables)
self.set_amount_percent(result.hops)
self.validate_and_fix_recipe(result.recipe)
result.recipe.save()
result.recipe.recipefermentable_set.add(*result.fermentables, bulk=False)
for fermentable in result.fermentables:
self.validate_and_fix_fermentable(fermentable)
fermentable.save()
result.recipe.recipehop_set.add(*result.hops, bulk=False)
for hop in result.hops:
self.validate_and_fix_hop(hop)
hop.save()
result.recipe.recipeyeast_set.add(*result.yeasts, bulk=False)
for yeast in result.yeasts:
self.validate_and_fix_yeast(yeast)
yeast.save()
def set_amount_percent(self, items: list) -> None:
total_amount = 0
for item in items:
if item.amount is not None:
total_amount += item.amount
if total_amount:
for item in items:
if item.amount is not None:
item.amount_percent = item.amount / total_amount
def validate_and_fix_recipe(self, recipe: Recipe):
self.unset_bad_data(recipe)
def validate_and_fix_fermentable(self, fermentable: RecipeFermentable):
self.unset_bad_data(fermentable)
def validate_and_fix_hop(self, hop: RecipeHop):
# Remove odd alpha values
if hop.alpha is not None and hop.alpha > 30:
if hop.kind_raw is None:
hop.alpha = None
elif not ("extract" in hop.kind_raw.lower() or "oil" in hop.kind_raw.lower()):
hop.alpha = None
if hop.time is not None and hop.use is not None:
if hop.use == RecipeHop.DRY_HOP:
if hop.time > 43200: # Limit dry hop time to 30 days max
hop.time = None
else:
if hop.time > 240:
hop.time = None # Limit boil time to 4 hours max
self.unset_bad_data(hop)
def validate_and_fix_yeast(self, yeast: RecipeYeast):
self.unset_bad_data(yeast)
def unset_bad_data(self, item):
last_err = None
for i in range(0, len(item.__dict__.keys())):
try:
item.clean_fields()
return
except ValidationError as err:
last_err = err
for attribute_name in err.message_dict:
setattr(item, attribute_name, None)
if last_err is not None:
raise last_err
class RecipeFileProcessor:
def __init__(
self,
importer: RecipeLoader,
format_parsers: List[FormatParser],
post_processors: List[ResultPostProcessor] = None,
replace_existing=False
) -> None:
self.importer = importer
self.format_parsers = format_parsers
self.post_processors = post_processors
self.replace_existing = replace_existing
def import_recipe_from_file(self, file_paths: List[str], uid: str) -> Tuple[Recipe, bool]:
# Clear the existing recipe if necessary, otherwise skip
existing_recipes = Recipe.objects.filter(pk=uid)
if existing_recipes.count() > 0:
if self.replace_existing:
existing_recipes.delete()
else:
return Recipe.objects.get(pk=uid), False
result = ParserResult()
parsing_steps = zip(file_paths, self.format_parsers)
for parsing_step in parsing_steps:
(file_path, parser) = parsing_step
if file_path is not None:
parser.parse(result, file_path)
if self.post_processors is not None:
for post_processor in self.post_processors:
post_processor.process(result)
self.importer.import_recipe(uid, result)
return result.recipe, True
| 2.09375
| 2
|
src/utils/setseed.py
|
seung-sss/model-optimization-level3-cv-04
| 1
|
12777367
|
<reponame>seung-sss/model-optimization-level3-cv-04
import numpy as np
import random
import torch
def setSeed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if use multi-GPU
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
| 2.171875
| 2
|
services/traction/api/endpoints/routes/v1/tenant/governance/schema_templates.py
|
bcgov/traction
| 12
|
12777368
|
import logging
from uuid import UUID
from fastapi import APIRouter, Depends
from sqlalchemy.ext.asyncio import AsyncSession
from starlette import status
from starlette.requests import Request
from api.core.config import settings
from api.endpoints.dependencies.db import get_db
from api.endpoints.dependencies.tenant_security import get_from_context
from api.endpoints.routes.v1.link_utils import build_list_links
from api.services.v1 import governance_service
from api.endpoints.models.v1.governance import (
SchemaTemplateListResponse,
SchemaTemplateListParameters,
CreateSchemaTemplatePayload,
CreateSchemaTemplateResponse,
ImportSchemaTemplatePayload,
ImportSchemaTemplateResponse,
TemplateStatusType,
)
from api.tasks import SendCredDefRequestTask, SendSchemaRequestTask
router = APIRouter()
logger = logging.getLogger(__name__)
@router.get(
"/", status_code=status.HTTP_200_OK, response_model=SchemaTemplateListResponse
)
async def list_schema_templates(
request: Request,
page_num: int | None = 1,
page_size: int | None = settings.DEFAULT_PAGE_SIZE,
name: str | None = None,
schema_id: str | None = None,
schema_template_id: UUID | None = None,
status: TemplateStatusType | None = None,
tags: str | None = None,
deleted: bool | None = False,
db: AsyncSession = Depends(get_db),
) -> SchemaTemplateListResponse:
wallet_id = get_from_context("TENANT_WALLET_ID")
tenant_id = get_from_context("TENANT_ID")
parameters = SchemaTemplateListParameters(
url=str(request.url),
page_num=page_num,
page_size=page_size,
name=name,
deleted=deleted,
schema_id=schema_id,
schema_template_id=schema_template_id,
status=status,
tags=tags,
)
items, total_count = await governance_service.list_schema_templates(
db, tenant_id, wallet_id, parameters
)
links = build_list_links(total_count, parameters)
return SchemaTemplateListResponse(
items=items, count=len(items), total=total_count, links=links
)
@router.post("/", status_code=status.HTTP_200_OK)
async def create_schema_template(
payload: CreateSchemaTemplatePayload,
db: AsyncSession = Depends(get_db),
) -> CreateSchemaTemplateResponse:
"""
Create a new schema and/or credential definition.
"schema_definition", defines the new schema.
If "credential_definition" is provided, create a credential definition.
"""
logger.info("> create_schema_template()")
wallet_id = get_from_context("TENANT_WALLET_ID")
tenant_id = get_from_context("TENANT_ID")
logger.debug(f"wallet_id = {wallet_id}")
logger.debug(f"tenant_id = {tenant_id}")
item, c_t_item = await governance_service.create_schema_template(
db, tenant_id, wallet_id, payload=payload
)
links = [] # TODO
# this will kick off the call to the ledger and then event listeners will finish
# populating the schema (and cred def) data.
logger.debug("> > SendSchemaRequestTask.assign()")
await SendSchemaRequestTask.assign(
tenant_id, wallet_id, payload.schema_definition, item.schema_template_id
)
logger.debug("< < SendSchemaRequestTask.assign()")
logger.debug(f"item = {item}")
logger.debug(f"credential_template = {c_t_item}")
logger.info("< create_schema_template()")
return CreateSchemaTemplateResponse(
item=item, credential_template=c_t_item, links=links
)
@router.post("/import", status_code=status.HTTP_200_OK)
async def import_schema_template(
payload: ImportSchemaTemplatePayload,
db: AsyncSession = Depends(get_db),
) -> ImportSchemaTemplateResponse:
"""
Import an existing public schema and optionally create a credential definition.
"schema_id" is the ledger's schema id.
If "credential_definition" is provided, create a credential definition.
"""
logger.info("> import_schema_template()")
wallet_id = get_from_context("TENANT_WALLET_ID")
tenant_id = get_from_context("TENANT_ID")
logger.debug(f"wallet_id = {wallet_id}")
logger.debug(f"tenant_id = {tenant_id}")
item, c_t_item = await governance_service.import_schema_template(
db, tenant_id, wallet_id, payload=payload
)
links = [] # TODO
# this will kick off the call to the ledger and then event listeners will finish
# populating the cred def
if c_t_item:
logger.debug("> > SendCredDefRequestTask.assign()")
await SendCredDefRequestTask.assign(
tenant_id, wallet_id, c_t_item.credential_template_id
)
logger.debug("< < SendCredDefRequestTask.assign()")
logger.debug(f"item = {item}")
logger.debug(f"credential_template = {c_t_item}")
logger.info("< import_schema_template()")
return ImportSchemaTemplateResponse(
item=item, credential_template=c_t_item, links=links
)
| 1.90625
| 2
|
pdf.py
|
isLinXu/AIToodlBox
| 3
|
12777369
|
<gh_stars>1-10
from PIL import Image
import fitz # fitz: pip install PyMuPDF
def pdf2images(doc, zoom=2, color='RGB'):
"""pdf to images
example:
doc = fitz.open(/path/to/pdf)
images = pdf2images(doc)
example:
stream = open(/path/to/pdf, 'rb')
doc = fitz.open(stream)
images = pdf2images(doc)
example:
doc = fitz.open(stream=bytes, filetype='bytes')
images = pdf2images(doc)
"""
mat = fitz.Matrix(zoom, zoom)
images = []
# mat = fitz.Matrix(zoom_x, zoom_y).preRotate(rotate)
# for pg in range(doc.pageCount):
# page = doc[pg]
for page in doc:
pix = page.getPixmap(matrix=mat, alpha=False)
images.append(Image.frombytes(color, [pix.width, pix.height], pix.samples))
return images
if __name__ == "__main__":
import sys
doc = fitz.open(sys.argv[1])
images = pdf2images(doc)
print(len(images), images[0].size)
| 2.890625
| 3
|
local/lib/python3.6/site-packages/pgadmin4/pgadmin/tools/restore/tests/test_restore_create_job_unit_test.py
|
sahilsdei/django_ecommerce
| 0
|
12777370
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2018, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import sys
import simplejson as json
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from pgadmin.utils import server_utils as server_utils
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
if sys.version_info < (3, 3):
from mock import patch, MagicMock
else:
from unittest.mock import patch, MagicMock
class RestoreCreateJobTest(BaseTestGenerator):
"""Test the RestoreCreateJob class"""
scenarios = [
('When restore object with default options',
dict(
class_params=dict(
sid=1,
name='test_restore_server',
port=5444,
host='localhost',
database='postgres',
bfile='test_restore',
username='postgres'
),
params=dict(
file='test_restore_file',
format='custom',
custom=False,
verbose=True,
blobs=True,
schemas=[],
tables=[],
database='postgres'
),
url='/restore/job/{0}',
expected_cmd_opts=['--verbose'],
not_expected_cmd_opts=[],
expected_exit_code=[0, None]
)),
('When restore object with format directory',
dict(
class_params=dict(
sid=1,
name='test_restore_server',
port=5444,
host='localhost',
database='postgres',
bfile='test_restore',
username='postgres'
),
params=dict(
file='test_restore_file',
format='directory',
custom=False,
verbose=True,
blobs=False,
schemas=[],
tables=[],
database='postgres'
),
url='/restore/job/{0}',
expected_cmd_opts=['--verbose', '--format=d'],
not_expected_cmd_opts=[],
expected_exit_code=[0, None]
)),
('When restore object with the sections options',
dict(
class_params=dict(
sid=1,
name='test_restore_server',
port=5444,
host='localhost',
database='postgres',
bfile='test_restore',
username='postgres'
),
params=dict(
file='test_restore_file',
format='custom',
no_of_jobs='2',
custom=False,
verbose=True,
schemas=[],
tables=[],
database='postgres',
data=True,
pre_data=True,
post_data=True,
only_data=True,
only_schema=True
),
url='/restore/job/{0}',
expected_cmd_opts=['--verbose', '--jobs', '2',
'--section=pre-data', '--section=data',
'--section=post-data'],
not_expected_cmd_opts=[],
# Below options should be enabled once we fix the issue #3368
# not_expected_cmd_opts=['--data-only', '--schema-only'],
expected_exit_code=[0, None],
)),
('When restore the object with Type of objects',
dict(
class_params=dict(
sid=1,
name='test_restore_server',
port=5444,
host='localhost',
database='postgres',
bfile='test_restore',
username='postgres'
),
params=dict(
file='test_restore_file',
format='custom',
no_of_jobs='2',
custom=False,
verbose=True,
schemas=[],
tables=[],
database='postgres',
only_data=True,
only_schema=True,
dns_owner=True
),
url='/restore/job/{0}',
expected_cmd_opts=['--verbose', '--data-only'],
not_expected_cmd_opts=[],
# Below options should be enabled once we fix the issue #3368
# not_expected_cmd_opts=['--schema-only', '--no-owner'],
expected_exit_code=[0, None],
)),
('When restore object with option - Do not save',
dict(
class_params=dict(
sid=1,
name='test_restore_server',
port=5444,
host='localhost',
database='postgres',
bfile='test_restore',
username='postgres'
),
params=dict(
file='test_restore_file',
format='custom',
verbose=True,
custom=False,
schemas=[],
tables=[],
database='postgres',
dns_owner=True,
dns_privilege=True,
dns_tablespace=True,
only_data=False
),
url='/restore/job/{0}',
expected_cmd_opts=['--no-owner',
'--no-tablespaces',
'--no-privileges'],
not_expected_cmd_opts=[],
expected_exit_code=[0, None]
)),
('When restore object with option - Do not save comments',
dict(
class_params=dict(
sid=1,
name='test_restore_server',
port=5444,
host='localhost',
database='postgres',
bfile='test_restore',
username='postgres'
),
params=dict(
file='test_restore_file',
format='custom',
verbose=True,
custom=False,
schemas=[],
tables=[],
database='postgres',
no_comments=True,
only_data=False
),
url='/restore/job/{0}',
expected_cmd_opts=['--no-comments'],
not_expected_cmd_opts=[],
expected_exit_code=[0, None],
server_min_version=110000,
message='Restore object with --no-comments are not supported '
'by EPAS/PG server less than 11.0'
)),
('When restore object with option - Queries',
dict(
class_params=dict(
sid=1,
name='test_restore_file',
port=5444,
host='localhost',
database='postgres',
bfile='test_restore',
username='postgres'
),
params=dict(
file='test_backup_file',
format='custom',
verbose=True,
schemas=[],
tables=[],
database='postgres',
clean=True,
include_create_database=True,
single_transaction=True,
),
url='/restore/job/{0}',
expected_cmd_opts=['--create', '--clean',
'--single-transaction'],
not_expected_cmd_opts=[],
expected_exit_code=[0, None]
)),
('When restore object with option - Disbale',
dict(
class_params=dict(
sid=1,
name='test_restore_file',
port=5444,
host='localhost',
database='postgres',
bfile='test_restore',
username='postgres'
),
params=dict(
file='test_backup_file',
format='custom',
verbose=True,
schemas=[],
tables=[],
database='postgres',
disable_trigger=True,
no_data_fail_table=True,
only_schema=False
),
url='/restore/job/{0}',
expected_cmd_opts=['--disable-triggers',
'--no-data-for-failed-tables'],
not_expected_cmd_opts=[],
expected_exit_code=[0, None]
)),
('When restore object with option - Miscellaneous',
dict(
class_params=dict(
sid=1,
name='test_restore_file',
port=5444,
host='localhost',
database='postgres',
bfile='test_restore',
username='postgres'
),
params=dict(
file='test_backup_file',
format='custom',
verbose=True,
schemas=[],
tables=[],
database='postgres',
use_set_session_auth=True,
exit_on_error=True,
),
url='/restore/job/{0}',
# Add '--use_set_session_auth' into
# expected_cmd_opts once #3363 fixed
expected_cmd_opts=['--exit-on-error'],
not_expected_cmd_opts=[],
expected_exit_code=[0, None]
)),
]
def setUp(self):
if self.server['default_binary_paths'] is None:
self.skipTest(
"default_binary_paths is not set for the server {0}".format(
self.server['name']
)
)
@patch('pgadmin.tools.restore.Server')
@patch('pgadmin.tools.restore.current_user')
@patch('pgadmin.tools.restore.RestoreMessage')
@patch('pgadmin.tools.restore.filename_with_file_manager_path')
@patch('pgadmin.tools.restore.BatchProcess')
@patch('pgadmin.utils.driver.psycopg2.server_manager.ServerManager.'
'export_password_env')
def runTest(self, export_password_env_mock, batch_process_mock,
filename_mock, restore_message_mock,
current_user_mock, server_mock):
class TestMockServer():
def __init__(self, name, host, port, id, username):
self.name = name
self.host = host
self.port = port
self.id = id
self.username = username
self.db_name = ''
self.server_id = parent_node_dict["server"][-1]["server_id"]
mock_obj = TestMockServer(self.class_params['name'],
self.class_params['host'],
self.class_params['port'],
self.server_id,
self.class_params['username']
)
mock_result = server_mock.query.filter_by.return_value
mock_result.first.return_value = mock_obj
filename_mock.return_value = self.params['file']
batch_process_mock.set_env_variables = MagicMock(
return_value=True
)
batch_process_mock.start = MagicMock(
return_value=True
)
export_password_env_mock.return_value = True
server_response = server_utils.connect_server(self, self.server_id)
if server_response["info"] == "Server connected.":
db_owner = server_response['data']['user']['name']
self.data = database_utils.get_db_data(db_owner)
self.db_name = self.data['name']
if hasattr(self, 'server_min_version') and \
server_response["data"]["version"] < \
self.server_min_version:
self.skipTest(self.message)
url = self.url.format(self.server_id)
# Create the restore job
response = self.tester.post(url,
data=json.dumps(self.params),
content_type='html/json')
self.assertEqual(response.status_code, 200)
self.assertTrue(restore_message_mock.called)
self.assertTrue(batch_process_mock.called)
if self.expected_cmd_opts:
for opt in self.expected_cmd_opts:
self.assertIn(
opt,
batch_process_mock.call_args_list[0][1]['args']
)
if self.not_expected_cmd_opts:
for opt in self.not_expected_cmd_opts:
self.assertNotIn(
opt,
batch_process_mock.call_args_list[0][1]['args']
)
| 1.851563
| 2
|
change_ab.py
|
ximury/python
| 0
|
12777371
|
<reponame>ximury/python
a, b = 3, 4
print(a, b)
a, b = b, a
print(a, b)
print('---------------------')
a = 1
a += 1
print(a)
| 3.484375
| 3
|
openff/evaluator/protocols/utils.py
|
lilyminium/openff-evaluator
| 0
|
12777372
|
"""
A set of utilities for setting up property estimation workflows.
"""
from dataclasses import astuple, dataclass
from typing import Generic, Optional, Tuple, TypeVar
from openff.evaluator import unit
from openff.evaluator.attributes import PlaceholderValue
from openff.evaluator.datasets import PropertyPhase
from openff.evaluator.protocols import (
analysis,
coordinates,
forcefield,
gradients,
groups,
miscellaneous,
openmm,
reweighting,
storage,
)
from openff.evaluator.protocols.groups import ConditionalGroup
from openff.evaluator.storage.data import StoredSimulationData
from openff.evaluator.thermodynamics import Ensemble
from openff.evaluator.utils.observables import ObservableType
from openff.evaluator.workflow import ProtocolGroup
from openff.evaluator.workflow.schemas import ProtocolReplicator
from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue
S = TypeVar("S", bound=analysis.BaseAverageObservable)
T = TypeVar("T", bound=reweighting.BaseMBARProtocol)
@dataclass
class SimulationProtocols(Generic[S]):
"""The common set of protocols which would be required to estimate an observable
by running a new molecule simulation."""
build_coordinates: coordinates.BuildCoordinatesPackmol
assign_parameters: forcefield.BaseBuildSystem
energy_minimisation: openmm.OpenMMEnergyMinimisation
equilibration_simulation: openmm.OpenMMSimulation
production_simulation: openmm.OpenMMSimulation
analysis_protocol: S
converge_uncertainty: ProtocolGroup
decorrelate_trajectory: analysis.DecorrelateTrajectory
decorrelate_observables: analysis.DecorrelateObservables
def __iter__(self):
yield from astuple(self)
@dataclass
class ReweightingProtocols(Generic[S, T]):
"""The common set of protocols which would be required to re-weight an observable
from cached simulation data."""
unpack_stored_data: storage.UnpackStoredSimulationData
join_trajectories: reweighting.ConcatenateTrajectories
join_observables: reweighting.ConcatenateObservables
build_reference_system: forcefield.BaseBuildSystem
evaluate_reference_potential: reweighting.BaseEvaluateEnergies
build_target_system: forcefield.BaseBuildSystem
evaluate_target_potential: reweighting.BaseEvaluateEnergies
statistical_inefficiency: S
replicate_statistics: miscellaneous.DummyProtocol
decorrelate_reference_potential: analysis.DecorrelateObservables
decorrelate_target_potential: analysis.DecorrelateObservables
decorrelate_observable: analysis.DecorrelateObservables
zero_gradients: Optional[gradients.ZeroGradients]
reweight_observable: T
def __iter__(self):
yield from astuple(self)
def generate_base_reweighting_protocols(
statistical_inefficiency: S,
reweight_observable: T,
replicator_id: str = "data_replicator",
id_suffix: str = "",
) -> Tuple[ReweightingProtocols[S, T], ProtocolReplicator]:
"""Constructs a set of protocols which, when combined in a workflow schema, may be
executed to reweight a set of cached simulation data to estimate the average
value of an observable.
Parameters
----------
statistical_inefficiency
The protocol which will be used to compute the statistical inefficiency and
equilibration time of the observable of interest. This information will be
used to decorrelate the cached data prior to reweighting.
reweight_observable
The MBAR reweighting protocol to use to reweight the observable to the target
state. This method will automatically set the reduced potentials on the
object.
replicator_id: str
The id to use for the cached data replicator.
id_suffix: str
A string suffix to append to each of the protocol ids.
Returns
-------
The protocols to add to the workflow, a reference to the average value of the
estimated observable (an ``Observable`` object), and the replicator which will
clone the workflow for each piece of cached simulation data.
"""
# Create the replicator which will apply these protocol once for each piece of
# cached simulation data.
data_replicator = ProtocolReplicator(replicator_id=replicator_id)
data_replicator.template_values = ProtocolPath("full_system_data", "global")
# Validate the inputs.
assert isinstance(statistical_inefficiency, analysis.BaseAverageObservable)
assert data_replicator.placeholder_id in statistical_inefficiency.id
assert data_replicator.placeholder_id not in reweight_observable.id
replicator_suffix = f"_{data_replicator.placeholder_id}{id_suffix}"
# Unpack all the of the stored data.
unpack_stored_data = storage.UnpackStoredSimulationData(
"unpack_data{}".format(replicator_suffix)
)
unpack_stored_data.simulation_data_path = ReplicatorValue(replicator_id)
# Join the individual trajectories together.
join_trajectories = reweighting.ConcatenateTrajectories(
f"join_trajectories{id_suffix}"
)
join_trajectories.input_coordinate_paths = ProtocolPath(
"coordinate_file_path", unpack_stored_data.id
)
join_trajectories.input_trajectory_paths = ProtocolPath(
"trajectory_file_path", unpack_stored_data.id
)
join_observables = reweighting.ConcatenateObservables(
f"join_observables{id_suffix}"
)
join_observables.input_observables = ProtocolPath(
"observables", unpack_stored_data.id
)
# Calculate the reduced potentials for each of the reference states.
build_reference_system = forcefield.BaseBuildSystem(
f"build_system{replicator_suffix}"
)
build_reference_system.force_field_path = ProtocolPath(
"force_field_path", unpack_stored_data.id
)
build_reference_system.coordinate_file_path = ProtocolPath(
"coordinate_file_path", unpack_stored_data.id
)
build_reference_system.substance = ProtocolPath("substance", unpack_stored_data.id)
reduced_reference_potential = openmm.OpenMMEvaluateEnergies(
f"reduced_potential{replicator_suffix}"
)
reduced_reference_potential.parameterized_system = ProtocolPath(
"parameterized_system", build_reference_system.id
)
reduced_reference_potential.thermodynamic_state = ProtocolPath(
"thermodynamic_state", unpack_stored_data.id
)
reduced_reference_potential.coordinate_file_path = ProtocolPath(
"coordinate_file_path", unpack_stored_data.id
)
reduced_reference_potential.trajectory_file_path = ProtocolPath(
"output_trajectory_path", join_trajectories.id
)
# Calculate the reduced potential of the target state.
build_target_system = forcefield.BaseBuildSystem(f"build_system_target{id_suffix}")
build_target_system.force_field_path = ProtocolPath("force_field_path", "global")
build_target_system.substance = ProtocolPath("substance", "global")
build_target_system.coordinate_file_path = ProtocolPath(
"output_coordinate_path", join_trajectories.id
)
reduced_target_potential = openmm.OpenMMEvaluateEnergies(
f"reduced_potential_target{id_suffix}"
)
reduced_target_potential.thermodynamic_state = ProtocolPath(
"thermodynamic_state", "global"
)
reduced_target_potential.parameterized_system = ProtocolPath(
"parameterized_system", build_target_system.id
)
reduced_target_potential.coordinate_file_path = ProtocolPath(
"output_coordinate_path", join_trajectories.id
)
reduced_target_potential.trajectory_file_path = ProtocolPath(
"output_trajectory_path", join_trajectories.id
)
reduced_target_potential.gradient_parameters = ProtocolPath(
"parameter_gradient_keys", "global"
)
# Compute the observable gradients.
zero_gradients = gradients.ZeroGradients(f"zero_gradients{id_suffix}")
zero_gradients.force_field_path = ProtocolPath("force_field_path", "global")
zero_gradients.gradient_parameters = ProtocolPath(
"parameter_gradient_keys", "global"
)
# Decorrelate the target potentials and observables.
if not isinstance(statistical_inefficiency, analysis.BaseAverageObservable):
raise NotImplementedError()
decorrelate_target_potential = analysis.DecorrelateObservables(
f"decorrelate_target_potential{id_suffix}"
)
decorrelate_target_potential.time_series_statistics = ProtocolPath(
"time_series_statistics", statistical_inefficiency.id
)
decorrelate_target_potential.input_observables = ProtocolPath(
"output_observables", reduced_target_potential.id
)
decorrelate_observable = analysis.DecorrelateObservables(
f"decorrelate_observable{id_suffix}"
)
decorrelate_observable.time_series_statistics = ProtocolPath(
"time_series_statistics", statistical_inefficiency.id
)
decorrelate_observable.input_observables = ProtocolPath(
"output_observables", zero_gradients.id
)
# Decorrelate the reference potentials. Due to a quirk of how workflow replicators
# work the time series statistics need to be passed via a dummy protocol first.
#
# Because the `statistical_inefficiency` and `decorrelate_reference_potential`
# protocols are replicated by the same replicator the `time_series_statistics`
# input of `decorrelate_reference_potential_X` will take its value from
# the `time_series_statistics` output of `statistical_inefficiency_X` rather than
# as a list of of [statistical_inefficiency_0.time_series_statistics...
# statistical_inefficiency_N.time_series_statistics]. Passing the statistics via
# an un-replicated intermediate resolves this.
replicate_statistics = miscellaneous.DummyProtocol(
f"replicated_statistics{id_suffix}"
)
replicate_statistics.input_value = ProtocolPath(
"time_series_statistics", statistical_inefficiency.id
)
decorrelate_reference_potential = analysis.DecorrelateObservables(
f"decorrelate_reference_potential{replicator_suffix}"
)
decorrelate_reference_potential.time_series_statistics = ProtocolPath(
"output_value", replicate_statistics.id
)
decorrelate_reference_potential.input_observables = ProtocolPath(
"output_observables", reduced_reference_potential.id
)
# Finally, apply MBAR to get the reweighted value.
reweight_observable.reference_reduced_potentials = ProtocolPath(
"output_observables[ReducedPotential]", decorrelate_reference_potential.id
)
reweight_observable.target_reduced_potentials = ProtocolPath(
"output_observables[ReducedPotential]", decorrelate_target_potential.id
)
reweight_observable.observable = ProtocolPath(
"output_observables", decorrelate_observable.id
)
reweight_observable.frame_counts = ProtocolPath(
"time_series_statistics.n_uncorrelated_points", statistical_inefficiency.id
)
protocols = ReweightingProtocols(
unpack_stored_data,
#
join_trajectories,
join_observables,
#
build_reference_system,
reduced_reference_potential,
#
build_target_system,
reduced_target_potential,
#
statistical_inefficiency,
replicate_statistics,
#
decorrelate_reference_potential,
decorrelate_target_potential,
#
decorrelate_observable,
zero_gradients,
#
reweight_observable,
)
return protocols, data_replicator
def generate_reweighting_protocols(
observable_type: ObservableType,
replicator_id: str = "data_replicator",
id_suffix: str = "",
) -> Tuple[
ReweightingProtocols[analysis.AverageObservable, reweighting.ReweightObservable],
ProtocolReplicator,
]:
assert observable_type not in [
ObservableType.KineticEnergy,
ObservableType.TotalEnergy,
ObservableType.Enthalpy,
]
statistical_inefficiency = analysis.AverageObservable(
f"observable_inefficiency_$({replicator_id}){id_suffix}"
)
statistical_inefficiency.bootstrap_iterations = 1
reweight_observable = reweighting.ReweightObservable(
f"reweight_observable{id_suffix}"
)
protocols, data_replicator = generate_base_reweighting_protocols(
statistical_inefficiency, reweight_observable, replicator_id, id_suffix
)
protocols.statistical_inefficiency.observable = ProtocolPath(
f"observables[{observable_type.value}]", protocols.unpack_stored_data.id
)
if (
observable_type != ObservableType.PotentialEnergy
and observable_type != ObservableType.TotalEnergy
and observable_type != ObservableType.Enthalpy
and observable_type != ObservableType.ReducedPotential
):
protocols.zero_gradients.input_observables = ProtocolPath(
f"output_observables[{observable_type.value}]",
protocols.join_observables.id,
)
else:
protocols.zero_gradients = None
protocols.decorrelate_observable = protocols.decorrelate_target_potential
protocols.reweight_observable.observable = ProtocolPath(
f"output_observables[{observable_type.value}]",
protocols.decorrelate_observable.id,
)
return protocols, data_replicator
def generate_simulation_protocols(
analysis_protocol: S,
use_target_uncertainty: bool,
id_suffix: str = "",
conditional_group: Optional[ConditionalGroup] = None,
n_molecules: int = 1000,
) -> Tuple[SimulationProtocols[S], ProtocolPath, StoredSimulationData]:
"""Constructs a set of protocols which, when combined in a workflow schema, may be
executed to run a single simulation to estimate the average value of an observable.
The protocols returned will:
1) Build a set of liquid coordinates for the
property substance using packmol.
2) Assign a set of smirnoff force field parameters
to the system.
3) Perform an energy minimisation on the system.
4) Run a short NPT equilibration simulation for 100000 steps
using a timestep of 2fs.
5) Within a conditional group (up to a maximum of 100 times):
5a) Run a longer NPT production simulation for 1000000 steps using a
timestep of 2fs
5b) Extract the average value of an observable and it's uncertainty.
5c) If a convergence mode is set by the options, check if the target
uncertainty has been met. If not, repeat steps 5a), 5b) and 5c).
6) Extract uncorrelated configurations from a generated production
simulation.
7) Extract uncorrelated statistics from a generated production
simulation.
Parameters
----------
analysis_protocol
The protocol which will extract the observable of
interest from the generated simulation data.
use_target_uncertainty
Whether to run the simulation until the observable is
estimated to within the target uncertainty.
id_suffix: str
A string suffix to append to each of the protocol ids.
conditional_group: ProtocolGroup, optional
A custom group to wrap the main simulation / extraction
protocols within. It is up to the caller of this method to
manually add the convergence conditions to this group.
If `None`, a default group with uncertainty convergence
conditions is automatically constructed.
n_molecules: int
The number of molecules to use in the workflow.
Returns
-------
The protocols to add to the workflow, a reference to the average value of the
estimated observable (an ``Observable`` object), and an object which describes
the default data from a simulation to store, such as the uncorrelated statistics
and configurations.
"""
build_coordinates = coordinates.BuildCoordinatesPackmol(
f"build_coordinates{id_suffix}"
)
build_coordinates.substance = ProtocolPath("substance", "global")
build_coordinates.max_molecules = n_molecules
assign_parameters = forcefield.BaseBuildSystem(f"assign_parameters{id_suffix}")
assign_parameters.force_field_path = ProtocolPath("force_field_path", "global")
assign_parameters.coordinate_file_path = ProtocolPath(
"coordinate_file_path", build_coordinates.id
)
assign_parameters.substance = ProtocolPath("output_substance", build_coordinates.id)
# Equilibration
energy_minimisation = openmm.OpenMMEnergyMinimisation(
f"energy_minimisation{id_suffix}"
)
energy_minimisation.input_coordinate_file = ProtocolPath(
"coordinate_file_path", build_coordinates.id
)
energy_minimisation.parameterized_system = ProtocolPath(
"parameterized_system", assign_parameters.id
)
equilibration_simulation = openmm.OpenMMSimulation(
f"equilibration_simulation{id_suffix}"
)
equilibration_simulation.ensemble = Ensemble.NPT
equilibration_simulation.steps_per_iteration = 100000
equilibration_simulation.output_frequency = 5000
equilibration_simulation.timestep = 2.0 * unit.femtosecond
equilibration_simulation.thermodynamic_state = ProtocolPath(
"thermodynamic_state", "global"
)
equilibration_simulation.input_coordinate_file = ProtocolPath(
"output_coordinate_file", energy_minimisation.id
)
equilibration_simulation.parameterized_system = ProtocolPath(
"parameterized_system", assign_parameters.id
)
# Production
production_simulation = openmm.OpenMMSimulation(f"production_simulation{id_suffix}")
production_simulation.ensemble = Ensemble.NPT
production_simulation.steps_per_iteration = 1000000
production_simulation.output_frequency = 2000
production_simulation.timestep = 2.0 * unit.femtosecond
production_simulation.thermodynamic_state = ProtocolPath(
"thermodynamic_state", "global"
)
production_simulation.input_coordinate_file = ProtocolPath(
"output_coordinate_file", equilibration_simulation.id
)
production_simulation.parameterized_system = ProtocolPath(
"parameterized_system", assign_parameters.id
)
production_simulation.gradient_parameters = ProtocolPath(
"parameter_gradient_keys", "global"
)
# Set up a conditional group to ensure convergence of uncertainty
if conditional_group is None:
conditional_group = groups.ConditionalGroup(f"conditional_group{id_suffix}")
conditional_group.max_iterations = 100
if use_target_uncertainty:
condition = groups.ConditionalGroup.Condition()
condition.right_hand_value = ProtocolPath("target_uncertainty", "global")
condition.type = groups.ConditionalGroup.Condition.Type.LessThan
condition.left_hand_value = ProtocolPath(
"value.error", conditional_group.id, analysis_protocol.id
)
conditional_group.add_condition(condition)
# Make sure the simulation gets extended after each iteration.
production_simulation.total_number_of_iterations = ProtocolPath(
"current_iteration", conditional_group.id
)
conditional_group.add_protocols(production_simulation, analysis_protocol)
# Point the analyse protocol to the correct data sources
if not isinstance(analysis_protocol, analysis.BaseAverageObservable):
raise ValueError(
"The analysis protocol must inherit from either the "
"AverageTrajectoryObservable or BaseAverageObservable "
"protocols."
)
analysis_protocol.thermodynamic_state = ProtocolPath(
"thermodynamic_state", "global"
)
analysis_protocol.potential_energies = ProtocolPath(
f"observables[{ObservableType.PotentialEnergy.value}]",
production_simulation.id,
)
# Finally, extract uncorrelated data
time_series_statistics = ProtocolPath(
"time_series_statistics", conditional_group.id, analysis_protocol.id
)
coordinate_file = ProtocolPath(
"output_coordinate_file", conditional_group.id, production_simulation.id
)
trajectory_path = ProtocolPath(
"trajectory_file_path", conditional_group.id, production_simulation.id
)
observables = ProtocolPath(
"observables", conditional_group.id, production_simulation.id
)
decorrelate_trajectory = analysis.DecorrelateTrajectory(
f"decorrelate_trajectory{id_suffix}"
)
decorrelate_trajectory.time_series_statistics = time_series_statistics
decorrelate_trajectory.input_coordinate_file = coordinate_file
decorrelate_trajectory.input_trajectory_path = trajectory_path
decorrelate_observables = analysis.DecorrelateObservables(
f"decorrelate_observables{id_suffix}"
)
decorrelate_observables.time_series_statistics = time_series_statistics
decorrelate_observables.input_observables = observables
# Build the object which defines which pieces of simulation data to store.
output_to_store = StoredSimulationData()
output_to_store.thermodynamic_state = ProtocolPath("thermodynamic_state", "global")
output_to_store.property_phase = PropertyPhase.Liquid
output_to_store.force_field_id = PlaceholderValue()
output_to_store.number_of_molecules = ProtocolPath(
"output_number_of_molecules", build_coordinates.id
)
output_to_store.substance = ProtocolPath("output_substance", build_coordinates.id)
output_to_store.statistical_inefficiency = ProtocolPath(
"time_series_statistics.statistical_inefficiency",
conditional_group.id,
analysis_protocol.id,
)
output_to_store.observables = ProtocolPath(
"output_observables", decorrelate_observables.id
)
output_to_store.trajectory_file_name = ProtocolPath(
"output_trajectory_path", decorrelate_trajectory.id
)
output_to_store.coordinate_file_name = coordinate_file
output_to_store.source_calculation_id = PlaceholderValue()
# Define where the final values come from.
final_value_source = ProtocolPath(
"value", conditional_group.id, analysis_protocol.id
)
base_protocols = SimulationProtocols(
build_coordinates,
assign_parameters,
energy_minimisation,
equilibration_simulation,
production_simulation,
analysis_protocol,
conditional_group,
decorrelate_trajectory,
decorrelate_observables,
)
return base_protocols, final_value_source, output_to_store
| 2.140625
| 2
|
lib/solutions/CHK/checkout_solution.py
|
DPNT-Sourcecode/CHK-zxlf01
| 0
|
12777373
|
# noinspection PyUnusedLocal
# skus = unicode string
def checkout(skus):
items = {
"A": {"price": 50, "deals": [{"quantity": 5, "price": 200}, {"quantity": 3, "price": 130}]},
"B": {"price": 30, "deals": [{"quantity": 2, "price": 45}]},
"C": {"price": 20},
"D": {"price": 15},
"E": {"price": 40, "free_items": {"quantity": 2, "item": "B"}},
"F": {"price": 10, "free_items": {"quantity": 3, "item": "F"}},
"G": {"price": 20},
"H": {"price": 10, "deals": [{"quantity": 10, "price": 80}, {"quantity": 5, "price": 45}]},
"I": {"price": 35},
"J": {"price": 60},
"K": {"price": 70, "deals": [{"quantity": 2, "price": 120}]},
"L": {"price": 90},
"M": {"price": 15},
"N": {"price": 40, "free_items": {"quantity": 3, "item": "M"}},
"O": {"price": 10},
"P": {"price": 50, "deals": [{"quantity": 5, "price": 200}]},
"Q": {"price": 30, "deals": [{"quantity": 3, "price": 80}]},
"R": {"price": 50, "free_items": {"quantity": 3, "item": "Q"}},
"S": {"price": 20},
"T": {"price": 20},
"U": {"price": 40, "free_items": {"quantity": 4, "item": "U"}},
"V": {"price": 50, "deals": [{"quantity": 3, "price": 130}, {"quantity": 2, "price": 90}]},
"W": {"price": 20},
"X": {"price": 17},
"Y": {"price": 20},
"Z": {"price": 21}
}
special_offer = {"collection": ["S", "T", "X", "Y", "Z"], "cost": 45}
total_cost = 0
all_items = dict.fromkeys(items, 0)
for sku in skus:
if sku in items:
all_items[sku] += 1
else:
return -1
# Applies special offer up front - allows for multiple instances and combinations.
# Did not originally account for multiple instances of each item.
offer_items_collection = special_offer["collection"]
all_prices = []
offer_items_in_basket = []
for offer_item in offer_items_collection:
tuple = (offer_item, items[offer_item]["price"])
all_prices.append(tuple)
sorted_by_price = sorted(all_prices, key=lambda x: x[1])
for item, _ in sorted_by_price:
for i in range(0, all_items[item]):
offer_items_in_basket.append(item)
checking_for_multi_discount = True
while checking_for_multi_discount:
if len(offer_items_in_basket) >= 3:
for i in range(0, 3):
item = offer_items_in_basket.pop()
all_items[item] -= 1
total_cost += special_offer["cost"]
else:
checking_for_multi_discount = False
# Checks for free items and removes from shopping list
for item, item_details in items.items():
item_count = all_items[item]
if item_details.get("free_items") and item_count:
free_items = item_details["free_items"]
quantity_required = item_details["free_items"]["quantity"]
free_item = item_details["free_items"]["item"]
complete_deals = item_count // quantity_required
all_items[free_item] -= complete_deals
if all_items[free_item] < 0:
all_items[free_item] = 0
# Charges for items left in shopping cart (with deals)
for item, item_details in items.items():
item_price = item_details["price"]
item_deals = item_details.get("deals")
item_count = all_items[item]
available_deals = []
if item_details.get("deals"):
for deal in item_deals:
item_deal_quantity = deal["quantity"]
item_deal_price = deal["price"]
complete_deals = item_count//item_deal_quantity
total_cost += (complete_deals * item_deal_price)
item_count -= complete_deals * item_deal_quantity
total_cost += (item_count*item_price)
return total_cost
| 2.421875
| 2
|
todo-api/app/core/tests/test_middleware.py
|
rkkhub/todo
| 0
|
12777374
|
from django.contrib.auth import get_user_model
from django.test import TestCase, override_settings
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
TASK_URL = reverse('todo:task-list')
def sample_get_request(client):
return client.get(TASK_URL)
def sample_post_request(client):
payload = {'title': 'Middleware POST test'}
return client.post(TASK_URL, payload)
class MiddlewareResponseTests(TestCase):
"""Tests the custom middleware"""
def setUp(self):
self.user = get_user_model().objects.create(
email="<EMAIL>",
password="<PASSWORD>")
self.client = APIClient()
self.client.force_authenticate(self.user)
@override_settings(MAINTENANCE_MODE=True)
def test_maintenance_mode_ON(self):
"""
Tests the response for all allowed methods
when on maintenance mode enabled
"""
# Test GET method
self.assertEqual(sample_get_request(self.client).status_code,
status.HTTP_503_SERVICE_UNAVAILABLE)
# Test POST method
self.assertEqual(sample_post_request(self.client).status_code,
status.HTTP_503_SERVICE_UNAVAILABLE)
@override_settings(MAINTENANCE_MODE=False)
def test_maintenance_mode_OFF(self):
"""
Test the response for all allowed methods
when maintenance mode disabled
"""
# Test Get method
self.assertEqual(sample_get_request(self.client).status_code,
status.HTTP_200_OK)
# Test POST method
self.assertEqual(sample_post_request(self.client).status_code,
status.HTTP_201_CREATED)
| 2.296875
| 2
|
backend/src/applications/session/create_session_request.py
|
Seina88/attendance-system
| 2
|
12777375
|
class CreateSessionRequest:
def __init__(self, info: str, password: str) -> None:
self.info = info
self.password = password
| 2.046875
| 2
|
server/utils/__init__.py
|
lolimay/digit-recognition
| 6
|
12777376
|
<reponame>lolimay/digit-recognition<filename>server/utils/__init__.py
"""
Created by lolimay <<EMAIL>>
Last Updated 2019-07-11
"""
| 0.941406
| 1
|
packages/arb-compiler-evm/arbitrum/evm/types.py
|
pangxieshousi/arbitrum
| 1
|
12777377
|
# Copyright 2019, Offchain Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .. import std
from .. import value
contract_state = std.Struct(
"contract_state",
[("storage", std.keyvalue_int_int.typ), ("wallet", std.currency_store.typ)],
)
message = std.Struct(
"message",
[
("data", value.ValueType()),
("sender", value.IntType()),
("amount", value.IntType()),
("type", value.IntType()),
],
)
message_blockchain_data = std.Struct(
"message_blockchain_data",
[
("data", value.ValueType()),
("timestamp", value.IntType()),
("block_number", value.IntType()),
("txhash", value.IntType()),
],
)
message_data = std.Struct(
"message_data",
[
("data", value.ValueType()),
("contract_id", value.IntType()),
("sequence_num", value.IntType()),
],
)
contract_store = std.make_keyvalue_type(value.IntType(), contract_state.typ)
local_exec_state = std.Struct(
"local_exec_state",
[
("data", value.ValueType()),
("sender", value.IntType()),
("amount", value.IntType()),
("type", value.IntType()),
],
)
| 1.742188
| 2
|
turma1/favela_radical/favela_radical/favela_radical.py
|
Niyudi/favela-radical
| 0
|
12777378
|
<filename>turma1/favela_radical/favela_radical/favela_radical.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#from -> de import -> importar
from PyQt5.QtWidgets import QApplication
# sys -> sistema
from sys import argv, exit
from interface import JanelaPrincipal
class Aplicativo(QApplication):
def __init__(self, argv):
super().__init__(argv)
self.window = JanelaPrincipal()
def main():
app = Aplicativo(argv)
exit(app.exec_())
if __name__ == "__main__":
main()
| 2.46875
| 2
|
tests/unit_tests/test_session.py
|
slashsec-edu/cryton-core
| 0
|
12777379
|
from django.test import TestCase
from mock import patch
from cryton.lib.util import exceptions, logger
from cryton.lib.models import session
from cryton.cryton_rest_api.models import (
SessionModel,
PlanExecutionModel,
StepModel
)
import os
from model_bakery import baker
TESTS_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
@patch('cryton.lib.util.logger.logger', logger.structlog.getLogger('cryton-debug'))
class TestSession(TestCase):
def setUp(self) -> None:
self.plan_exec_obj = baker.make(PlanExecutionModel)
self.named_session_obj = SessionModel.objects.create(plan_execution=self.plan_exec_obj,
session_id='42',
session_name='test-session',
session_type=SessionModel.MSF_SHELL_TYPE
)
self.step_model = baker.make(StepModel)
pass
def test_create_session(self):
# Wrong plan execution ID
with self.assertRaises(exceptions.PlanExecutionDoesNotExist):
session.create_session(0, '0', 'test')
sess_obj = session.create_session(self.plan_exec_obj.id, '0', 'test', SessionModel.MSF_SHELL_TYPE)
self.assertEqual(sess_obj.session_name, 'test')
self.assertEqual(sess_obj.session_type, SessionModel.MSF_SHELL_TYPE)
def test_get_msf_session_id(self):
session_id = session.get_msf_session_id('test-session', self.plan_exec_obj.id)
self.assertEqual(session_id, '42')
def test_get_msf_session_id_ex(self):
with self.assertRaises(exceptions.SessionObjectDoesNotExist):
session.get_msf_session_id('non-existent-session', self.plan_exec_obj.id)
def test_set_msf_session_id(self):
session.set_msf_session_id('test-session', '666', self.plan_exec_obj.id)
self.assertEqual(session.get_msf_session_id('test-session', self.plan_exec_obj.id), '666')
with self.assertRaises(exceptions.SessionObjectDoesNotExist):
session.set_msf_session_id('test-session', '666', 666)
# @patch('cryton.lib.session.get_session_ids')
# def test_get_session_ids(self, mock_get_sess):
# mock_stub = Mock()
# mock_stub.sessions_list().sess_list = '["1", "2"]'
#
# self.step_model.use_any_session_to_target = '1.2.3.4'
# session_list = session.get_session_ids('1.2.3.4', self.plan_exec_obj.id)
#
# self.assertEqual('2', session_list[-1])
| 2.109375
| 2
|
tests/integration/test_release_event.py
|
majamassarini/packit-service
| 20
|
12777380
|
<gh_stars>10-100
# Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
import json
import shutil
import pytest
from celery.app.task import Context, Task
from celery.canvas import Signature
from flexmock import flexmock
from github import Github
from rebasehelper.exceptions import RebaseHelperError
from packit.api import PackitAPI
from packit.config import JobConfigTriggerType
from packit.config.aliases import get_branches
from packit.distgit import DistGit
from packit.local_project import LocalProject
from packit.pkgtool import PkgTool
from packit_service import sentry_integration
from packit_service.config import ServiceConfig
from packit_service.constants import TASK_ACCEPTED
from packit_service.models import (
JobTriggerModelType,
PipelineModel,
ProjectReleaseModel,
ProposeDownstreamModel,
ProposeDownstreamStatus,
ProposeDownstreamTargetModel,
ProposeDownstreamTargetStatus,
)
from packit_service.service.db_triggers import AddReleaseDbTrigger
from packit_service.service.urls import get_propose_downstream_info_url
from packit_service.worker.allowlist import Allowlist
from packit_service.worker.jobs import SteveJobs
from packit_service.worker.monitoring import Pushgateway
from packit_service.worker.helpers.propose_downstream import ProposeDownstreamJobHelper
from packit_service.worker.reporting import BaseCommitStatus
from packit_service.worker.tasks import run_propose_downstream_handler
from tests.spellbook import first_dict_value, get_parameters_from_results
@pytest.fixture(scope="module")
def fedora_branches():
return sorted(get_branches("fedora-all"))
@pytest.fixture
def mock_propose_downstream_functionality():
trigger = flexmock(
job_trigger_model_type=JobTriggerModelType.release,
id=12,
job_config_trigger_type=JobConfigTriggerType.release,
)
run_model = flexmock(PipelineModel)
flexmock(ProjectReleaseModel).should_receive("get_or_create").with_args(
tag_name="0.3.0",
namespace="packit-service",
repo_name="hello-world",
project_url="https://github.com/packit-service/hello-world",
commit_hash="123456",
).and_return(trigger).once()
propose_downstream_model = flexmock(id=123, propose_downstream_targets=[])
flexmock(ProposeDownstreamModel).should_receive("create_with_new_run").with_args(
status=ProposeDownstreamStatus.running,
trigger_model=trigger,
).and_return(propose_downstream_model, run_model).once()
model = flexmock(status="queued", id=1234)
flexmock(ProposeDownstreamTargetModel).should_receive("create").with_args(
status=ProposeDownstreamTargetStatus.queued
).and_return(model)
flexmock(ProposeDownstreamJobHelper).should_receive(
"report_status_to_all"
).with_args(
description=TASK_ACCEPTED,
state=BaseCommitStatus.pending,
url="",
).once()
yield propose_downstream_model, model
def test_dist_git_push_release_handle(
github_release_webhook, mock_propose_downstream_functionality
):
propose_downstream_model, model = mock_propose_downstream_functionality
packit_yaml = (
"{'specfile_path': 'hello-world.spec', 'synced_files': []"
", jobs: [{trigger: release, job: propose_downstream, metadata: {targets:[]}}]}"
)
flexmock(Github, get_repo=lambda full_name_or_id: None)
project = flexmock(
get_file_content=lambda path, ref: packit_yaml,
full_repo_name="packit-service/hello-world",
repo="hello-world",
namespace="packit-service",
get_files=lambda ref, filter_regex: [],
get_sha_from_tag=lambda tag_name: "123456",
get_web_url=lambda: "https://github.com/packit/hello-world",
is_private=lambda: False,
default_branch="main",
)
lp = flexmock(LocalProject, refresh_the_arguments=lambda: None)
lp.working_dir = ""
lp.git_project = project
flexmock(DistGit).should_receive("local_project").and_return(lp)
# reset of the upstream repo
flexmock(LocalProject).should_receive("git_repo").and_return(
flexmock(
head=flexmock()
.should_receive("reset")
.with_args("HEAD", index=True, working_tree=True)
.once()
.mock(),
git=flexmock(clear_cache=lambda: None),
)
)
flexmock(Allowlist, check_and_report=True)
ServiceConfig().get_service_config().get_project = lambda url: project
flexmock(PackitAPI).should_receive("sync_release").with_args(
dist_git_branch="main", tag="0.3.0", create_pr=True
).and_return(flexmock(url="some_url")).once()
flexmock(PackitAPI).should_receive("clean")
flexmock(model).should_receive("set_status").with_args(
status=ProposeDownstreamTargetStatus.running
).once()
flexmock(model).should_receive("set_branch").with_args(branch="main").once()
flexmock(model).should_receive("set_downstream_pr_url").with_args(
downstream_pr_url="some_url"
).once()
flexmock(model).should_receive("set_status").with_args(
status=ProposeDownstreamTargetStatus.submitted
).once()
flexmock(model).should_receive("set_start_time").once()
flexmock(model).should_receive("set_finished_time").once()
flexmock(model).should_receive("set_logs").once()
flexmock(propose_downstream_model).should_receive("set_status").with_args(
status=ProposeDownstreamStatus.finished
).once()
flexmock(AddReleaseDbTrigger).should_receive("db_trigger").and_return(
flexmock(
job_config_trigger_type=JobConfigTriggerType.release,
id=123,
job_trigger_model_type=JobTriggerModelType.release,
)
)
flexmock(Signature).should_receive("apply_async").once()
flexmock(Pushgateway).should_receive("push").times(2).and_return()
flexmock(shutil).should_receive("rmtree").with_args("")
url = get_propose_downstream_info_url(model.id)
flexmock(ProposeDownstreamJobHelper).should_receive(
"report_status_to_branch"
).with_args(
branch="main",
description="Starting propose downstream...",
state=BaseCommitStatus.running,
url=url,
).once()
flexmock(ProposeDownstreamJobHelper).should_receive(
"report_status_to_branch"
).with_args(
branch="main",
description="Propose downstream finished successfully.",
state=BaseCommitStatus.success,
url=url,
).once()
processing_results = SteveJobs().process_message(github_release_webhook)
event_dict, job, job_config, package_config = get_parameters_from_results(
processing_results
)
assert json.dumps(event_dict)
results = run_propose_downstream_handler(
package_config=package_config,
event=event_dict,
job_config=job_config,
)
assert first_dict_value(results["job"])["success"]
def test_dist_git_push_release_handle_multiple_branches(
github_release_webhook, fedora_branches, mock_propose_downstream_functionality
):
propose_downstream_model, model = mock_propose_downstream_functionality
packit_yaml = (
"{'specfile_path': 'hello-world.spec', 'synced_files': []"
", jobs: [{trigger: release, job: propose_downstream, "
"metadata: {targets:[], dist-git-branch: fedora-all}}]}"
)
flexmock(Github, get_repo=lambda full_name_or_id: None)
project = flexmock(
get_file_content=lambda path, ref: packit_yaml,
full_repo_name="packit-service/hello-world",
repo="hello-world",
namespace="packit-service",
get_files=lambda ref, filter_regex: [],
get_sha_from_tag=lambda tag_name: "123456",
get_web_url=lambda: "https://github.com/packit/hello-world",
is_private=lambda: False,
default_branch="main",
)
flexmock(LocalProject, refresh_the_arguments=lambda: None)
flexmock(LocalProject).should_receive("git_repo").and_return(
flexmock(
head=flexmock()
.should_receive("reset")
.with_args("HEAD", index=True, working_tree=True)
.times(len(fedora_branches))
.mock(),
git=flexmock(clear_cache=lambda: None),
)
)
flexmock(Allowlist, check_and_report=True)
ServiceConfig().get_service_config().get_project = lambda url: project
for branch in fedora_branches:
flexmock(PackitAPI).should_receive("sync_release").with_args(
dist_git_branch=branch, tag="0.3.0", create_pr=True
).and_return(flexmock(url="some_url")).once()
flexmock(model).should_receive("set_status").with_args(
status=ProposeDownstreamTargetStatus.running
).times(len(fedora_branches))
flexmock(model).should_receive("set_branch").times(len(fedora_branches))
flexmock(model).should_receive("set_downstream_pr_url").with_args(
downstream_pr_url="some_url"
).times(len(fedora_branches))
flexmock(model).should_receive("set_status").with_args(
status=ProposeDownstreamTargetStatus.submitted
).times(len(fedora_branches))
flexmock(model).should_receive("set_start_time").times(len(fedora_branches))
flexmock(model).should_receive("set_finished_time").times(len(fedora_branches))
flexmock(model).should_receive("set_logs").times(len(fedora_branches))
flexmock(propose_downstream_model).should_receive("set_status").with_args(
status=ProposeDownstreamStatus.finished
).once()
flexmock(PkgTool).should_receive("clone").and_return(None)
flexmock(AddReleaseDbTrigger).should_receive("db_trigger").and_return(
flexmock(
job_config_trigger_type=JobConfigTriggerType.release,
id=123,
job_trigger_model_type=JobTriggerModelType.release,
)
)
flexmock(Signature).should_receive("apply_async").once()
flexmock(Pushgateway).should_receive("push").times(2).and_return()
url = get_propose_downstream_info_url(model.id)
for branch in fedora_branches:
flexmock(ProposeDownstreamJobHelper).should_receive(
"report_status_to_branch"
).with_args(
branch=branch,
description="Starting propose downstream...",
state=BaseCommitStatus.running,
url=url,
).once()
for branch in fedora_branches:
flexmock(ProposeDownstreamJobHelper).should_receive(
"report_status_to_branch"
).with_args(
branch=branch,
description="Propose downstream finished successfully.",
state=BaseCommitStatus.success,
url=url,
).once()
processing_results = SteveJobs().process_message(github_release_webhook)
event_dict, job, job_config, package_config = get_parameters_from_results(
processing_results
)
assert json.dumps(event_dict)
results = run_propose_downstream_handler(
package_config=package_config,
event=event_dict,
job_config=job_config,
)
assert first_dict_value(results["job"])["success"]
def test_dist_git_push_release_handle_one_failed(
github_release_webhook, fedora_branches, mock_propose_downstream_functionality
):
propose_downstream_model, model = mock_propose_downstream_functionality
packit_yaml = (
"{'specfile_path': 'hello-world.spec', 'synced_files': []"
", jobs: [{trigger: release, job: propose_downstream, "
"targets:[], dist_git_branches: [fedora-all,]}]}"
)
flexmock(Github, get_repo=lambda full_name_or_id: None)
project = (
flexmock(
get_file_content=lambda path, ref: packit_yaml,
full_repo_name="packit-service/hello-world",
repo="hello-world",
namespace="packit-service",
get_files=lambda ref, filter_regex: [],
get_sha_from_tag=lambda tag_name: "123456",
get_web_url=lambda: "https://github.com/packit/hello-world",
is_private=lambda: False,
default_branch="main",
)
.should_receive("create_issue")
.once()
.and_return(flexmock(id="1", url="an url"))
.mock()
)
project.should_receive("get_issue_list").and_return([])
flexmock(LocalProject, refresh_the_arguments=lambda: None)
flexmock(LocalProject).should_receive("git_repo").and_return(
flexmock(
head=flexmock()
.should_receive("reset")
.with_args("HEAD", index=True, working_tree=True)
.times(len(fedora_branches))
.mock(),
git=flexmock(clear_cache=lambda: None),
)
)
flexmock(Allowlist, check_and_report=True)
ServiceConfig().get_service_config().get_project = lambda url: project
for i, branch in enumerate(fedora_branches):
if i == 1:
flexmock(PackitAPI).should_receive("sync_release").with_args(
dist_git_branch=branch, tag="0.3.0", create_pr=True
).and_raise(Exception, f"Failed {branch}").once()
else:
flexmock(PackitAPI).should_receive("sync_release").with_args(
dist_git_branch=branch, tag="0.3.0", create_pr=True
).and_return(flexmock(url="some_url")).once()
flexmock(model).should_receive("set_status").with_args(
status=ProposeDownstreamTargetStatus.running
).times(len(fedora_branches))
flexmock(model).should_receive("set_branch").times(len(fedora_branches))
flexmock(model).should_receive("set_start_time").times(len(fedora_branches))
flexmock(model).should_receive("set_finished_time").times(len(fedora_branches))
flexmock(model).should_receive("set_logs").times(len(fedora_branches))
flexmock(model).should_receive("set_downstream_pr_url").with_args(
downstream_pr_url="some_url"
).times(
len(fedora_branches) - 1 # one branch failed
)
flexmock(model).should_receive("set_status").with_args(
status=ProposeDownstreamTargetStatus.submitted
).times(
len(fedora_branches) - 1
) # one branch failed
flexmock(model).should_receive("set_status").with_args(
status=ProposeDownstreamTargetStatus.error
).once() # this is the failed branch
flexmock(propose_downstream_model).should_receive("set_status").with_args(
status=ProposeDownstreamStatus.error
).once()
flexmock(PkgTool).should_receive("clone").and_return(None)
flexmock(sentry_integration).should_receive("send_to_sentry").and_return().once()
flexmock(AddReleaseDbTrigger).should_receive("db_trigger").and_return(
flexmock(
job_config_trigger_type=JobConfigTriggerType.release,
id=123,
job_trigger_model_type=JobTriggerModelType.release,
)
)
flexmock(Signature).should_receive("apply_async").once()
flexmock(Pushgateway).should_receive("push").times(2).and_return()
url = get_propose_downstream_info_url(model.id)
for branch in fedora_branches:
flexmock(ProposeDownstreamJobHelper).should_receive(
"report_status_to_branch"
).with_args(
branch=branch,
description="Starting propose downstream...",
state=BaseCommitStatus.running,
url=url,
).once()
for i in range(len(fedora_branches)):
if i == 1:
flexmock(ProposeDownstreamJobHelper).should_receive(
"report_status_to_branch"
).with_args(
branch=fedora_branches[i],
description=f"Propose downstream failed: Failed {fedora_branches[i]}",
state=BaseCommitStatus.failure,
url=url,
).once()
else:
flexmock(ProposeDownstreamJobHelper).should_receive(
"report_status_to_branch"
).with_args(
branch=fedora_branches[i],
description="Propose downstream finished successfully.",
state=BaseCommitStatus.success,
url=url,
).once()
processing_results = SteveJobs().process_message(github_release_webhook)
event_dict, job, job_config, package_config = get_parameters_from_results(
processing_results
)
assert json.dumps(event_dict)
results = run_propose_downstream_handler(
package_config=package_config,
event=event_dict,
job_config=job_config,
)
assert not first_dict_value(results["job"])["success"]
def test_dist_git_push_release_handle_all_failed(
github_release_webhook, fedora_branches, mock_propose_downstream_functionality
):
propose_downstream_model, model = mock_propose_downstream_functionality
packit_yaml = (
"{'specfile_path': 'hello-world.spec', 'synced_files': []"
", jobs: [{trigger: release, job: propose_downstream, "
"metadata: {targets:[], dist-git-branch: fedora-all}}]}"
)
flexmock(Github, get_repo=lambda full_name_or_id: None)
table_content = ""
for branch in fedora_branches:
table_content += f"| `{branch}` | `Failed` |\n"
project = (
flexmock(
get_file_content=lambda path, ref: packit_yaml,
full_repo_name="packit-service/hello-world",
repo="hello-world",
namespace="packit-service",
get_files=lambda ref, filter_regex: [],
get_sha_from_tag=lambda tag_name: "123456",
get_web_url=lambda: "https://github.com/packit/hello-world",
is_private=lambda: False,
default_branch="main",
)
.should_receive("create_issue")
.with_args(
title="[packit] Propose downstream failed for release 0.3.0",
body="Packit failed on creating pull-requests in dist-git:\n\n"
"| dist-git branch | error |\n"
"| --------------- | ----- |\n"
f"{table_content}\n\n"
"You can retrigger the update by adding a comment (`/packit propose-downstream`)"
" into this issue.\n",
)
.once()
.and_return(flexmock(id="1", url="an url"))
.mock()
)
project.should_receive("get_issue_list").and_return([])
lp = flexmock(LocalProject, refresh_the_arguments=lambda: None)
lp.git_project = project
lp.working_dir = ""
flexmock(DistGit).should_receive("local_project").and_return(lp)
# reset of the upstream repo
flexmock(LocalProject).should_receive("git_repo").and_return(
flexmock(
head=flexmock()
.should_receive("reset")
.with_args("HEAD", index=True, working_tree=True)
.times(len(fedora_branches))
.mock(),
git=flexmock(clear_cache=lambda: None),
)
)
flexmock(Allowlist, check_and_report=True)
ServiceConfig().get_service_config().get_project = lambda url: project
flexmock(PackitAPI).should_receive("sync_release").and_raise(
Exception, "Failed"
).times(len(fedora_branches))
flexmock(AddReleaseDbTrigger).should_receive("db_trigger").and_return(
flexmock(
job_config_trigger_type=JobConfigTriggerType.release,
id=123,
job_trigger_model_type=JobTriggerModelType.release,
)
)
flexmock(model).should_receive("set_status").with_args(
status=ProposeDownstreamTargetStatus.running
).times(len(fedora_branches))
flexmock(model).should_receive("set_branch").times(len(fedora_branches))
flexmock(model).should_receive("set_status").with_args(
status=ProposeDownstreamTargetStatus.error
).times(len(fedora_branches))
flexmock(model).should_receive("set_start_time").times(len(fedora_branches))
flexmock(model).should_receive("set_finished_time").times(len(fedora_branches))
flexmock(model).should_receive("set_logs").times(len(fedora_branches))
flexmock(propose_downstream_model).should_receive("set_status").with_args(
status=ProposeDownstreamStatus.error
).once()
flexmock(sentry_integration).should_receive("send_to_sentry").and_return().times(
len(fedora_branches)
)
flexmock(shutil).should_receive("rmtree").with_args("")
flexmock(Signature).should_receive("apply_async").once()
flexmock(Pushgateway).should_receive("push").times(2).and_return()
url = get_propose_downstream_info_url(model.id)
for branch in fedora_branches:
flexmock(ProposeDownstreamJobHelper).should_receive(
"report_status_to_branch"
).with_args(
branch=branch,
description="Starting propose downstream...",
state=BaseCommitStatus.running,
url=url,
).once()
for branch in fedora_branches:
flexmock(ProposeDownstreamJobHelper).should_receive(
"report_status_to_branch"
).with_args(
branch=branch,
description="Propose downstream failed: Failed",
state=BaseCommitStatus.failure,
url=url,
).once()
processing_results = SteveJobs().process_message(github_release_webhook)
event_dict, job, job_config, package_config = get_parameters_from_results(
processing_results
)
assert json.dumps(event_dict)
results = run_propose_downstream_handler(
package_config=package_config,
event=event_dict,
job_config=job_config,
)
assert not first_dict_value(results["job"])["success"]
def test_retry_propose_downstream_task(
github_release_webhook, mock_propose_downstream_functionality
):
propose_downstream_model, model = mock_propose_downstream_functionality
packit_yaml = (
"{'specfile_path': 'hello-world.spec', 'synced_files': []"
", jobs: [{trigger: release, job: propose_downstream, metadata: {targets:[]}}]}"
)
flexmock(Github, get_repo=lambda full_name_or_id: None)
project = flexmock(
get_file_content=lambda path, ref: packit_yaml,
full_repo_name="packit-service/hello-world",
repo="hello-world",
namespace="packit-service",
get_files=lambda ref, filter_regex: [],
get_sha_from_tag=lambda tag_name: "123456",
get_web_url=lambda: "https://github.com/packit/hello-world",
is_private=lambda: False,
default_branch="main",
)
lp = flexmock(LocalProject, refresh_the_arguments=lambda: None)
lp.git_project = project
lp.working_dir = ""
flexmock(DistGit).should_receive("local_project").and_return(lp)
# reset of the upstream repo
flexmock(LocalProject).should_receive("git_repo").and_return(
flexmock(
head=flexmock()
.should_receive("reset")
.with_args("HEAD", index=True, working_tree=True)
.once()
.mock(),
git=flexmock(clear_cache=lambda: None),
)
)
flexmock(Allowlist, check_and_report=True)
ServiceConfig().get_service_config().get_project = lambda url: project
flexmock(AddReleaseDbTrigger).should_receive("db_trigger").and_return(
flexmock(
job_config_trigger_type=JobConfigTriggerType.release,
id=123,
job_trigger_model_type=JobTriggerModelType.release,
)
)
flexmock(Signature).should_receive("apply_async").once()
flexmock(PackitAPI).should_receive("sync_release").with_args(
dist_git_branch="main", tag="0.3.0", create_pr=True
).and_raise(
RebaseHelperError, "Failed to download file from URL example.com"
).once()
flexmock(model).should_receive("set_status").with_args(
status=ProposeDownstreamTargetStatus.running
).once()
flexmock(model).should_receive("set_branch").with_args(branch="main").once()
flexmock(model).should_receive("set_status").with_args(
status=ProposeDownstreamTargetStatus.retry
).once()
flexmock(model).should_receive("set_start_time").once()
flexmock(model).should_receive("set_finished_time").once()
flexmock(model).should_receive("set_logs").once()
flexmock(shutil).should_receive("rmtree").with_args("")
flexmock(Task).should_receive("retry").once().and_return()
flexmock(Pushgateway).should_receive("push").times(2).and_return()
url = get_propose_downstream_info_url(model.id)
flexmock(ProposeDownstreamJobHelper).should_receive(
"report_status_to_branch"
).with_args(
branch="main",
description="Starting propose downstream...",
state=BaseCommitStatus.running,
url=url,
).once()
flexmock(ProposeDownstreamJobHelper).should_receive(
"report_status_to_branch"
).with_args(
branch="main",
description="Propose downstream is being retried because "
"we were not able yet to download the archive. ",
state=BaseCommitStatus.pending,
url=url,
).once()
processing_results = SteveJobs().process_message(github_release_webhook)
event_dict, job, job_config, package_config = get_parameters_from_results(
processing_results
)
assert json.dumps(event_dict)
results = run_propose_downstream_handler(event_dict, package_config, job_config)
assert first_dict_value(results["job"])["success"] # yes, success, see #1140
assert "Not able to download" in first_dict_value(results["job"])["details"]["msg"]
def test_dont_retry_propose_downstream_task(
github_release_webhook, mock_propose_downstream_functionality
):
propose_downstream_model, model = mock_propose_downstream_functionality
packit_yaml = (
"{'specfile_path': 'hello-world.spec', 'synced_files': []"
", jobs: [{trigger: release, job: propose_downstream, metadata: {targets:[]}}]}"
)
flexmock(Github, get_repo=lambda full_name_or_id: None)
project = (
flexmock(
get_file_content=lambda path, ref: packit_yaml,
full_repo_name="packit-service/hello-world",
repo="hello-world",
namespace="packit-service",
get_files=lambda ref, filter_regex: [],
get_sha_from_tag=lambda tag_name: "123456",
get_web_url=lambda: "https://github.com/packit/hello-world",
is_private=lambda: False,
default_branch="main",
)
.should_receive("create_issue")
.once()
.and_return(flexmock(id="1", url="an url"))
.mock()
)
project.should_receive("get_issue_list").and_return([]).once()
lp = flexmock(LocalProject, refresh_the_arguments=lambda: None)
lp.git_project = project
lp.working_dir = ""
flexmock(DistGit).should_receive("local_project").and_return(lp)
flexmock(Allowlist, check_and_report=True)
ServiceConfig().get_service_config().get_project = lambda url: project
flexmock(AddReleaseDbTrigger).should_receive("db_trigger").and_return(
flexmock(
job_config_trigger_type=JobConfigTriggerType.release,
id=123,
job_trigger_model_type=JobTriggerModelType.release,
)
)
flexmock(Signature).should_receive("apply_async").once()
flexmock(PackitAPI).should_receive("sync_release").with_args(
dist_git_branch="main", tag="0.3.0", create_pr=True
).and_raise(
RebaseHelperError, "Failed to download file from URL example.com"
).once()
flexmock(model).should_receive("set_status").with_args(
status=ProposeDownstreamTargetStatus.running
).once()
flexmock(model).should_receive("set_branch").with_args(branch="main").once()
flexmock(model).should_receive("set_status").with_args(
status=ProposeDownstreamTargetStatus.error
).once()
flexmock(model).should_receive("set_start_time").once()
flexmock(model).should_receive("set_finished_time").once()
flexmock(model).should_receive("set_logs").once()
flexmock(propose_downstream_model).should_receive("set_status").with_args(
status=ProposeDownstreamStatus.error
).once()
flexmock(LocalProject).should_receive("git_repo").and_return(
flexmock(
head=flexmock()
.should_receive("reset")
.with_args("HEAD", index=True, working_tree=True)
.once()
.mock(),
git=flexmock(clear_cache=lambda: None),
)
)
flexmock(Context, retries=2)
flexmock(shutil).should_receive("rmtree").with_args("")
flexmock(Task).should_receive("retry").never()
flexmock(Pushgateway).should_receive("push").times(2).and_return()
url = get_propose_downstream_info_url(model.id)
flexmock(ProposeDownstreamJobHelper).should_receive(
"report_status_to_branch"
).with_args(
branch="main",
description="Starting propose downstream...",
state=BaseCommitStatus.running,
url=url,
).once()
flexmock(ProposeDownstreamJobHelper).should_receive(
"report_status_to_branch"
).with_args(
branch="main",
description="Propose downstream failed: Failed to download file from URL example.com",
state=BaseCommitStatus.failure,
url=url,
).once()
processing_results = SteveJobs().process_message(github_release_webhook)
event_dict, job, job_config, package_config = get_parameters_from_results(
processing_results
)
assert json.dumps(event_dict)
results = run_propose_downstream_handler(event_dict, package_config, job_config)
assert not first_dict_value(results["job"])["success"]
| 1.734375
| 2
|
sms_sender.py
|
sleekmike/Twilio-Rasa-Leads-Chatbot
| 0
|
12777381
|
<filename>sms_sender.py
from twilio.rest import Client
import requests
import os
# ===========================> Getting Environments Variables <================================
twilio_account_sid = os.getenv('TWILIO_ACCOUNT_SID')
twilio_auth_token = os.getenv('TWILIO_AUTH_TOKEN')
twilio_number = os.getenv('TWILIO_SMS_FROM')
messaging_service_sid = os.getenv('MESSAGING_SERVICE_SID')
print("twilio_account_sid: ", twilio_account_sid)
print("twilio_auth_token: ", twilio_auth_token)
print("twilio_number: ", twilio_number)
print("messaging_service_sid: ", messaging_service_sid)
# ===========================> First Engagement <================================ #
def first_engagement(lead):
""" Sends first engagement message to the new lead. """
# lead details
lead_name = lead["lead_name"]
number = lead["number"]
#url = "localhost:5005"
#linker = '192.168.3.11:5005'
linker = '<YOU DOMAIN NAME HERE>:5005'
#linker = 'b68a-165-232-137-196.ngrok.io'
url = f"http://{linker}/conversations/{number}/trigger_intent?output_channel=latest"
# lead payload
payload = {
"name": "send_first_SMS",
"entities": {
"lead_number": number,
"lead_name": lead_name
},
}
# sending SMS request
headers = {"Content-Type": "application/json"}
response = requests.request("POST", url, headers=headers, json=payload)
# result1 = response.json
result = response
return result
# New Leads (ADD Lead Details Here)
mike1 = {"number": "+2348035469768", "lead_name": "mike"}
david = {"number": "+19167671669", "lead_name": "<NAME>"}
abram = {"number": "+19163060375", "lead_name": "<NAME>"}
mike2 = {"number": "+19162510635", "lead_name": "<NAME>"}
mike3 = {"number": "+2348133120975", "lead_name": "<NAME>"}
shailendra = {"number": "+19165181950", "lead_name": "<NAME>"}
#result = first_engagement(mike3)
result = first_engagement(david)
print("result:", result)
| 2.484375
| 2
|
lib/innvestigate/src/innvestigate/utils/__init__.py
|
vwesselkamp/deepfake-fingerprint-atacks
| 0
|
12777382
|
<gh_stars>0
# Get Python six functionality:
from __future__ import\
absolute_import, print_function, division, unicode_literals
###############################################################################
###############################################################################
###############################################################################
import tensorflow.keras.backend as K
import tensorflow.keras.utils as keras_utils
import math
__all__ = [
"model_wo_softmax",
"to_list",
"BatchSequence",
"TargetAugmentedSequence",
"preprocess_images",
"postprocess_images",
]
###############################################################################
###############################################################################
###############################################################################
def model_wo_softmax(*args, **kwargs):
# Break cyclic import
from .keras.graph import model_wo_softmax
return model_wo_softmax(*args, **kwargs)
###############################################################################
###############################################################################
###############################################################################
def to_list(l):
""" If not list, wraps parameter into a list."""
if not isinstance(l, list):
return [l, ]
else:
return l
###############################################################################
###############################################################################
###############################################################################
class BatchSequence(keras_utils.Sequence):
"""Batch sequence generator.
Take a (list of) input tensors and a batch size
and creates a generators that creates a sequence of batches.
:param Xs: One or a list of tensors. First axis needs to have same length.
:param batch_size: Batch size. Default 32.
"""
def __init__(self, Xs, batch_size=32):
self.Xs = to_list(Xs)
self.single_tensor = len(Xs) == 1
self.batch_size = batch_size
if not self.single_tensor:
for X in self.Xs[1:]:
assert X.shape[0] == self.Xs[0].shape[0]
super(BatchSequence, self).__init__()
def __len__(self):
return int(math.ceil(float(len(self.Xs[0])) / self.batch_size))
def __getitem__(self, idx):
ret = [X[idx*self.batch_size:(idx+1)*self.batch_size]
for X in self.Xs]
if self.single_tensor:
return ret[0]
else:
return tuple(ret)
class TargetAugmentedSequence(keras_utils.Sequence):
"""Augments a sequence with a target on the fly.
Takes a sequence/generator and a function that
creates on the fly for each batch a target.
The generator takes a batch from that sequence,
computes the target and returns both.
:param sequence: A sequence or generator.
:param augment_f: Takes a batch and returns a target.
"""
def __init__(self, sequence, augment_f):
self.sequence = sequence
self.augment_f = augment_f
super(TargetAugmentedSequence, self).__init__()
def __len__(self):
return len(self.sequence)
def __getitem__(self, idx):
inputs = self.sequence[idx]
if isinstance(inputs, tuple):
assert len(inputs) == 1
inputs = inputs[0]
targets = self.augment_f(to_list(inputs))
return inputs, targets
###############################################################################
###############################################################################
###############################################################################
def preprocess_images(images, color_coding=None):
"""Image preprocessing
Takes a batch of images and:
* Adjust the color axis to the Keras format.
* Fixes the color coding.
:param images: Batch of images with 4 axes.
:param color_coding: Determines the color coding.
Can be None, 'RGBtoBGR' or 'BGRtoRGB'.
:return: The preprocessed batch.
"""
ret = images
image_data_format = K.image_data_format()
# todo: not very general:
channels_first = images.shape[1] in [1, 3]
if image_data_format == "channels_first" and not channels_first:
ret = ret.transpose(0, 3, 1, 2)
if image_data_format == "channels_last" and channels_first:
ret = ret.transpose(0, 2, 3, 1)
assert color_coding in [None, "RGBtoBGR", "BGRtoRGB"]
if color_coding in ["RGBtoBGR", "BGRtoRGB"]:
if image_data_format == "channels_first":
ret = ret[:, ::-1, :, :]
if image_data_format == "channels_last":
ret = ret[:, :, :, ::-1]
return ret
def postprocess_images(images, color_coding=None, channels_first=None):
"""Image postprocessing
Takes a batch of images and reverts the preprocessing.
:param images: A batch of images with 4 axes.
:param color_coding: The initial color coding,
see :func:`preprocess_images`.
:param channels_first: The output channel format.
:return: The postprocessed images.
"""
ret = images
image_data_format = K.image_data_format()
assert color_coding in [None, "RGBtoBGR", "BGRtoRGB"]
if color_coding in ["RGBtoBGR", "BGRtoRGB"]:
if image_data_format == "channels_first":
ret = ret[:, ::-1, :, :]
if image_data_format == "channels_last":
ret = ret[:, :, :, ::-1]
if image_data_format == "channels_first" and not channels_first:
ret = ret.transpose(0, 2, 3, 1)
if image_data_format == "channels_last" and channels_first:
ret = ret.transpose(0, 3, 1, 2)
return ret
| 1.96875
| 2
|
modules/2.79/bpy/types/ThemeUserPreferences.py
|
cmbasnett/fake-bpy-module
| 0
|
12777383
|
ThemeUserPreferences.space = None
| 1.132813
| 1
|
napari_browser_adv.py
|
sebi06/czi_demos
| 3
|
12777384
|
# -*- coding: utf-8 -*-
#################################################################
# File : napari_browser_adv.py
# Version : 0.0.1
# Author : czsrh
# Date : 18.11.2020
# Institution : Carl Zeiss Microscopy GmbH
#
# Copyright (c) 2020 <NAME>, Germany. All Rights Reserved.
#################################################################
from PyQt5.QtWidgets import (
# QPushButton,
# QComboBox,
QHBoxLayout,
QFileDialog,
QDialogButtonBox,
QWidget,
QTableWidget,
QTableWidgetItem,
QCheckBox,
# QDockWidget,
# QSlider,
)
from PyQt5.QtCore import Qt
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QFont
import napari
import numpy as np
# from czitools import imgfileutils as imf
import imgfileutils as imf
from aicsimageio import AICSImage
import dask.array as da
import os
from pathlib import Path
def show_image_napari(array, metadata,
blending='additive',
gamma=0.75,
rename_sliders=False):
"""Show the multidimensional array using the Napari viewer
:param array: multidimensional NumPy.Array containing the pixeldata
:type array: NumPy.Array
:param metadata: dictionary with CZI or OME-TIFF metadata
:type metadata: dict
:param blending: NapariViewer option for blending, defaults to 'additive'
:type blending: str, optional
:param gamma: NapariViewer value for Gamma, defaults to 0.85
:type gamma: float, optional
:param verbose: show additional output, defaults to True
:type verbose: bool, optional
:param rename_sliders: name slider with correct labels output, defaults to False
:type verbose: bool, optional
"""
# create scalefcator with all ones
scalefactors = [1.0] * len(array.shape)
dimpos = imf.get_dimpositions(metadata['Axes_aics'])
# get the scalefactors from the metadata
scalef = imf.get_scalefactor(metadata)
# modify the tuple for the scales for napari
scalefactors[dimpos['Z']] = scalef['zx']
# remove C dimension from scalefactor
scalefactors_ch = scalefactors.copy()
del scalefactors_ch[dimpos['C']]
if metadata['SizeC'] > 1:
# add all channels as layers
for ch in range(metadata['SizeC']):
try:
# get the channel name
chname = metadata['Channels'][ch]
except KeyError as e:
print(e)
# or use CH1 etc. as string for the name
chname = 'CH' + str(ch + 1)
# cut out channel
# use dask if array is a dask.array
if isinstance(array, da.Array):
print('Extract Channel using Dask.Array')
channel = array.compute().take(ch, axis=dimpos['C'])
else:
# use normal numpy if not
print('Extract Channel NumPy.Array')
channel = array.take(ch, axis=dimpos['C'])
# actually show the image array
print('Adding Channel : ', chname)
print('Shape Channel : ', ch, channel.shape)
print('Scaling Factors : ', scalefactors_ch)
# get min-max values for initial scaling
clim = imf.calc_scaling(channel,
corr_min=1.0,
offset_min=0,
corr_max=0.85,
offset_max=0)
# add channel to napari viewer
viewer.add_image(channel,
name=chname,
scale=scalefactors_ch,
contrast_limits=clim,
blending=blending,
gamma=gamma)
if metadata['SizeC'] == 1:
# just add one channel as a layer
try:
# get the channel name
chname = metadata['Channels'][0]
except KeyError:
# or use CH1 etc. as string for the name
chname = 'CH' + str(ch + 1)
# actually show the image array
print('Adding Channel: ', chname)
print('Scaling Factors: ', scalefactors)
# use dask if array is a dask.array
if isinstance(array, da.Array):
print('Extract Channel using Dask.Array')
array = array.compute()
# get min-max values for initial scaling
clim = imf.calc_scaling(array)
viewer.add_image(array,
name=chname,
scale=scalefactors,
contrast_limits=clim,
blending=blending,
gamma=gamma)
if rename_sliders:
print('Renaming the Sliders based on the Dimension String ....')
if metadata['SizeC'] == 1:
# get the position of dimension entries after removing C dimension
dimpos_viewer = imf.get_dimpositions(metadata['Axes_aics'])
# get the label of the sliders
sliders = viewer.dims.axis_labels
# update the labels with the correct dimension strings
slidernames = ['B', 'S', 'T', 'Z', 'C']
if metadata['SizeC'] > 1:
new_dimstring = metadata['Axes_aics'].replace('C', '')
# get the position of dimension entries after removing C dimension
dimpos_viewer = imf.get_dimpositions(new_dimstring)
# get the label of the sliders
sliders = viewer.dims.axis_labels
# update the labels with the correct dimension strings
slidernames = ['B', 'S', 'T', 'Z']
for s in slidernames:
if dimpos_viewer[s] >= 0:
sliders[dimpos_viewer[s]] = s
# apply the new labels to the viewer
viewer.dims.axis_labels = sliders
class CheckBoxWidget(QWidget):
def __init__(self):
super(QWidget, self).__init__()
self.layout = QHBoxLayout(self)
self.cbox = QCheckBox("Use Dask Delayed ImageReader", self)
self.layout.addWidget(self.cbox)
self.cbox.setChecked(True)
# adjust font
fnt = QFont()
fnt.setPointSize(12)
fnt.setBold(True)
fnt.setFamily("Arial")
self.cbox.setFont(fnt)
class TableWidget(QWidget):
# def __init__(self, md):
def __init__(self):
super(QWidget, self).__init__()
self.layout = QHBoxLayout(self)
self.mdtable = QTableWidget()
self.layout.addWidget(self.mdtable)
self.mdtable.setShowGrid(True)
self.mdtable.setHorizontalHeaderLabels(['Parameter', 'Value'])
header = self.mdtable.horizontalHeader()
header.setDefaultAlignment(Qt.AlignLeft)
def update_metadata(self, md):
row_count = len(md)
col_count = 2
self.mdtable.setColumnCount(col_count)
self.mdtable.setRowCount(row_count)
row = 0
for key, value in md.items():
newkey = QTableWidgetItem(key)
self.mdtable.setItem(row, 0, newkey)
newvalue = QTableWidgetItem(str(value))
self.mdtable.setItem(row, 1, newvalue)
row += 1
# fit columns to content
self.mdtable.resizeColumnsToContents()
def update_style(self):
fnt = QFont()
fnt.setPointSize(11)
fnt.setBold(True)
fnt.setFamily("Arial")
item1 = QtWidgets.QTableWidgetItem('Parameter')
item1.setForeground(QtGui.QColor(25, 25, 25))
item1.setFont(fnt)
self.mdtable.setHorizontalHeaderItem(0, item1)
item2 = QtWidgets.QTableWidgetItem('Value')
item2.setForeground(QtGui.QColor(25, 25, 25))
item2.setFont(fnt)
self.mdtable.setHorizontalHeaderItem(1, item2)
class Open_files(QWidget):
def __init__(self):
super(QWidget, self).__init__()
self.layout = QHBoxLayout(self)
self.file_dialog = QFileDialog()
self.file_dialog.setWindowFlags(Qt.Widget)
self.file_dialog.setModal(False)
self.file_dialog.setOption(QFileDialog.DontUseNativeDialog)
# Remove open and cancel button from widget
self.buttonBox = self.file_dialog.findChild(QDialogButtonBox, "buttonBox")
self.buttonBox.clear()
# Only open following file types
self.file_dialog.setNameFilter("Images (*.czi *.ome.tiff *ome.tif *.tiff *.tif)")
self.layout.addWidget(self.file_dialog)
self.file_dialog.currentChanged.connect(self.open_path)
def open_path(self, path):
if os.path.isfile(path):
# remove exitings layers from napari
viewer.layers.select_all()
viewer.layers.remove_selected()
# get the metadata
md, addmd = imf.get_metadata(path)
# add the metadata and adapt the table display
mdbrowser.update_metadata(md)
mdbrowser.update_style()
use_dask = checkbox.cbox.isChecked()
print('Use Dask : ', use_dask)
# get AICSImageIO object
img = AICSImage(path)
if use_dask:
stack = img.dask_data
if not use_dask:
stack = img.get_image_data()
# add the image stack to the napari viewer
show_image_napari(stack, md,
blending='additive',
gamma=0.85,
rename_sliders=True)
# start the main application
with napari.gui_qt():
filebrowser = Open_files()
mdbrowser = TableWidget()
checkbox = CheckBoxWidget()
# create a viewer
viewer = napari.Viewer()
# add widgets
viewer.window.add_dock_widget(filebrowser, name='filebrowser', area='right')
viewer.window.add_dock_widget(checkbox, name='checkbox', area='right')
viewer.window.add_dock_widget(mdbrowser, name='mdbrowser', area='right')
| 2.125
| 2
|
ez_utils/date_utils.py
|
darkripples/none-web-frame
| 2
|
12777385
|
#!/usr/bin/env python
# coding:utf8
"""
@Time : 2018/10/31
@Author : fls
@Contact : <EMAIL>
@Desc : fls易用性utils-日期相关utils
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2018/10/31 11:41 fls 1.0 create
2020/08/01 11:43 fls 1.1 新增函数get_current_week
"""
import datetime
FMT_DATETIME = '%Y%m%d%H%M%S'
FMT_DATETIME_SEPARATE = '%Y-%m-%d %H:%M:%S'
FMT_DATE = '%Y%m%d'
FMT_TIME = '%H%M%S'
def fmt_date(date=None, fmt=FMT_DATETIME_SEPARATE):
"""格式化日期(date = datetime.datetime.now(), fmt = '%Y-%m-%d %H:%M:%S')
\t\t@param: date 日期,为空则取当前日期
\t\t@param: fmt 格式化样式
"""
if not date:
date = datetime.datetime.now()
n = date.strftime(fmt)
return n
def str2date(date=None, fmt=FMT_DATETIME_SEPARATE):
"""
字符串转日期时间格式
:param date:
:param fmt:
:return:
"""
if not date:
return fmt_date(date=None, fmt=fmt)
return datetime.datetime.strptime(date, fmt)
def get_day_n(date=None, day=1, fmt=FMT_DATETIME_SEPARATE):
"""获取n天后或-n天前的日期(date = datetime.datetime.now(), day = 1, fmt = '%Y-%m-%d %H:%M:%S')
\t\t@param: date 日期,为空则取当前日期
\t\t@param: day n天后的日期,默认1天后,为负数则取n天前的日期
\t\t@param: fmt 格式化样式
"""
if not date:
date = datetime.datetime.now()
return fmt_date(date=date + datetime.timedelta(days=day), fmt=fmt)
def get_seconds_n(date=None, seconds=0, fmt=FMT_DATETIME_SEPARATE):
"""获取n秒后或-n秒前的日期(date = datetime.datetime.now(), seconds = 1, fmt = '%Y-%m-%d %H:%M:%S')
\t\t@param: date 日期,为空则取当前日期
\t\t@param: seconds n秒后的时间,默认0秒后,为负数则取n秒前的时间
\t\t@param: fmt 格式化样式
"""
if not date:
date = datetime.datetime.now()
return fmt_date(date=date + datetime.timedelta(seconds=seconds), fmt=fmt)
def get_interval_day(start, end, fmt=FMT_DATE):
"""获取日期间的天数(start, end, fmt = '%Y%m%d')
\t\t@param: start 开始日期
\t\t@param: end 结束日期
\t\t@param: fmt 格式化样式
"""
def gen_dates(b_date, days):
day = datetime.timedelta(days=1)
for i in range(days):
yield b_date + day * i
if start is None:
return []
start = datetime.datetime.strptime(start, fmt)
if end is None:
end = datetime.datetime.now()
else:
end = datetime.datetime.strptime(end, fmt)
data = []
for d in gen_dates(start, (end - start).days + 1):
data.append(d.strftime(fmt))
return data
def reformat_date_str(rq1, fmt1, fmt2):
"""按目标格式,重新格式化日期(rq1, fmt1, fmt2)
\t\t@param: rq1 开始日期
\t\t@param: fmt1 rq1的格式
\t\t@param: fmt2 目标格式
"""
return datetime.datetime.strptime(rq1, fmt1).strftime(fmt2)
def get_current_week(date=None, fmt=FMT_DATE):
"""
返回日期所在周的日期字符串列表
:param date:
:param fmt:
:return:
"""
if not date:
date = datetime.datetime.now()
monday = date
one_day = datetime.timedelta(days=1)
while monday.weekday() != 0:
monday -= one_day
# 返回所在周的字符串列表
ret = []
for i in range(7):
ret.append((monday + datetime.timedelta(days=i)).strftime(fmt))
return ret
def help(num='①'):
print(num + "关于日期时间")
print("\tfmt_date(date = datetime.datetime.now(), fmt = '%Y-%m-%d %H:%M:%S')")
print("\t" + fmt_date.__doc__)
print("\tafter_date(date = datetime.datetime.now(), day = 1, fmt = '%Y-%m-%d %H:%M:%S)")
print("\t" + get_day_n.__doc__)
print("\tafterSeconds(date = datetime.datetime.now(), seconds = 0, fmt = '%Y-%m-%d %H:%M:%S)")
print("\t" + get_seconds_n.__doc__)
print("\tinterval_day(start, end, fmt = '%Y%m%d')")
print("\t" + get_interval_day.__doc__)
print("\treformat_date_str(rq1, fmt1, fmt2)")
print("\t" + reformat_date_str.__doc__)
| 2.828125
| 3
|
src/encrypted_bigquery_client_test.py
|
datavirtualization/encrypted-bq-client
| 0
|
12777386
|
<reponame>datavirtualization/encrypted-bq-client
#!/usr/bin/env python
# Copyright 2013 Google Inc. All Rights Reserved.
"""Unit tests for Encrypted Bigquery Client module."""
from copy import deepcopy
import random
from google.apputils import app
import gflags as flags
from google.apputils import basetest as googletest
import bigquery_client
import common_util as util
import ebq_crypto as ecrypto
import encrypted_bigquery_client
import test_util
FLAGS = flags.FLAGS
# TODO(user): Need to add unit tests for _DecryptRows.
class EncryptedBigqueryClientTest(googletest.TestCase):
def _EncryptTable(self, cipher, table, column_index):
rewritten_table = deepcopy(table)
for i in range(len(table)):
rewritten_table[i][column_index] = cipher.Encrypt(table[i][column_index])
return rewritten_table
def testComputeRows(self):
# Query is 'SELECT 1 + 1, 1 * 1'
# Testing no queried values.
stack = [[1, 1, util.OperatorToken('+', 2)],
[1, 1, util.OperatorToken('*', 2)]]
query = {}
real_result = [['2', '1']]
result = encrypted_bigquery_client._ComputeRows(stack, query)
self.assertEqual(result, real_result)
# Query is 'SELECT 1 + a, 1 * b, "hello"'
# There are two rows of values for a and b (shown in query).
# Result becomes as below:
# 1 + a | 1 * b | "hello"
# 2 3 "hello"
# 4 5 "hello"
stack = [[1, util.FieldToken('a'), util.OperatorToken('+', 2)],
[1, util.FieldToken('b'), util.OperatorToken('*', 2)],
[util.StringLiteralToken('"hello"')]]
query = {'a': [1, 3], 'b': [3, 5]}
real_result = [['2', '3', 'hello'], ['4', '5', 'hello']]
result = encrypted_bigquery_client._ComputeRows(stack, query)
self.assertEqual(result, real_result)
def testDecryptValues(self):
cars_schema = test_util.GetCarsSchema()
jobs_schema = test_util.GetJobsSchema()
master_key = test_util.GetMasterKey()
field = '%sInvoice_Price' % util.HOMOMORPHIC_INT_PREFIX
table = [[1], [2], [3]]
cipher = ecrypto.HomomorphicIntCipher(master_key)
ciphers = {util.HOMOMORPHIC_INT_PREFIX: cipher}
table = self._EncryptTable(cipher, table, 0)
table.append([None])
column = encrypted_bigquery_client._DecryptValues(
field, table, 0, ciphers, cars_schema,
util.HOMOMORPHIC_INT_PREFIX)
self.assertEqual(column, [1, 2, 3, util.LiteralToken('null', None)])
field = 'citiesLived.job.%sposition' % util.PSEUDONYM_PREFIX
table = [[0, unicode('Hello')], [1, unicode('My')], [-1, unicode('job')]]
cipher = ecrypto.PseudonymCipher(master_key)
ciphers = {util.PSEUDONYM_PREFIX: cipher}
table = self._EncryptTable(cipher, table, 1)
table.insert(1, [100, None])
column = encrypted_bigquery_client._DecryptValues(
field, table, 1, ciphers, jobs_schema,
util.PSEUDONYM_PREFIX)
self.assertEqual(column,
[util.StringLiteralToken('"Hello"'),
util.LiteralToken('null', None),
util.StringLiteralToken('"My"'),
util.StringLiteralToken('"job"')])
field = '%snonexistent_field' % util.HOMOMORPHIC_FLOAT_PREFIX
self.assertRaises(ValueError,
encrypted_bigquery_client._DecryptValues,
field, table, 1, ciphers, cars_schema,
util.HOMOMORPHIC_FLOAT_PREFIX)
def testGetUnencryptedValues(self):
table = [[1], [2], [3], [None]]
column = encrypted_bigquery_client._GetUnencryptedValuesWithType(
table, 0, 'integer')
self.assertEqual(column, [1, 2, 3, util.LiteralToken('null', None)])
table = [[1, 'Hello'], [2, None], [None, 'Bye']]
column = encrypted_bigquery_client._GetUnencryptedValuesWithType(
table, 1, 'string')
self.assertEqual(column,
[util.StringLiteralToken('"Hello"'),
util.LiteralToken('null', None),
util.StringLiteralToken('"Bye"')])
self.assertRaises(ValueError,
encrypted_bigquery_client._GetUnencryptedValuesWithType,
table, 1, None)
def testDecryptGroupConcatValues(self):
cars_schema = test_util.GetCarsSchema()
jobs_schema = test_util.GetJobsSchema()
master_key = test_util.GetMasterKey()
query = 'GROUP_CONCAT(%sModel)' % util.PROBABILISTIC_PREFIX
cipher = ecrypto.ProbabilisticCipher(master_key)
ciphers = {util.PROBABILISTIC_PREFIX: cipher}
unencrypted_values = (
[['A', 'B', 'C', 'D'], ['1', '2', '3', '4'], ['Hello', 'Bye']])
table = []
for values in unencrypted_values:
encrypted_values = []
for token in values:
encrypted_values.append(cipher.Encrypt(unicode(token)))
table.append([','.join(encrypted_values), random.random()])
table.insert(0, [None, None])
column = encrypted_bigquery_client._DecryptGroupConcatValues(
query, table, 0, ciphers, cars_schema, util.PROBABILISTIC_PREFIX)
self.assertEqual(column,
[util.LiteralToken('null', None),
util.StringLiteralToken('"A,B,C,D"'),
util.StringLiteralToken('"1,2,3,4"'),
util.StringLiteralToken('"Hello,Bye"')])
query = ('GROUP_CONCAT(citiesLived.job.%sposition) within citiesLived.job'
% util.PSEUDONYM_PREFIX)
cipher = ecrypto.PseudonymCipher(master_key)
ciphers = {util.PSEUDONYM_PREFIX: cipher}
table = []
for values in unencrypted_values:
encrypted_values = []
for token in values:
encrypted_values.append(cipher.Encrypt(unicode(token)))
table.append([','.join(encrypted_values)])
column = encrypted_bigquery_client._DecryptGroupConcatValues(
query, table, 0, ciphers, jobs_schema, util.PSEUDONYM_PREFIX)
self.assertEqual(column,
[util.StringLiteralToken('"A,B,C,D"'),
util.StringLiteralToken('"1,2,3,4"'),
util.StringLiteralToken('"Hello,Bye"')])
query = '%sModel' % util.PROBABILISTIC_PREFIX
self.assertRaises(ValueError,
encrypted_bigquery_client._DecryptGroupConcatValues,
query, table, 0, ciphers, cars_schema,
util.PROBABILISTIC_PREFIX)
query = ('GROUP_CONCAT(citiesLived.%snumberOfYears) within citiesLived'
% util.HOMOMORPHIC_FLOAT_PREFIX)
self.assertRaises(bigquery_client.BigqueryInvalidQueryError,
encrypted_bigquery_client._DecryptGroupConcatValues,
query, table, 0, ciphers, jobs_schema,
util.HOMOMORPHIC_FLOAT_PREFIX)
def main(_):
googletest.main()
if __name__ == '__main__':
app.run()
| 2.8125
| 3
|
server/sttp.py
|
S1ckret/stm32-esp8266-smart-house
| 2
|
12777387
|
<filename>server/sttp.py
# S1ckret Team Transfer Protocol
import constants
dict_sensor_id_to_file = {
1 : "ledStatus.txt",
2 : "temperature.txt"
}
def handle_R_frame(msg):
sensor_id = msg[1]
payload_size = msg[2]
payload = msg[3:]
data = int.from_bytes(payload, byteorder="big")
print(f"Got frame: sensor ID: {sensor_id}, payload size: {payload_size}, payload: {payload}\nConverted data: {data} ")
file_name = dict_sensor_id_to_file[sensor_id]
try:
with open(constants.fileRoot + file_name, 'wb') as file:
# TODO: add support for different payload size
file.write(str(data).encode(constants.encoding))
except OSError as e:
print("ERROR #{} while writing into {}".format(e.errno, constants.fileRoot + file_name))
return b"ERROR"
return b"OK"
def handle_sttp_msg(msg):
response = b""
if (chr(msg[0]) == 'R'):
response = handle_R_frame(msg)
return response
| 2.71875
| 3
|
backend/api/migrations/0007_auto_20210930_1352.py
|
giacomooo/CASFEE_Project2
| 0
|
12777388
|
# Generated by Django 3.2.4 on 2021-09-30 11:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0006_reservation_iscanceled'),
]
operations = [
migrations.AddField(
model_name='reservation',
name='Amount',
field=models.DecimalField(decimal_places=2, default=10, max_digits=7),
preserve_default=False,
),
migrations.AddField(
model_name='reservation',
name='PricePerHour',
field=models.DecimalField(decimal_places=2, default=3, max_digits=5),
preserve_default=False,
),
]
| 1.703125
| 2
|
__init__.py
|
MattEvans16/TempController
| 0
|
12777389
|
<filename>__init__.py
from flask import Flask, render_template, request, redirect, url_for, session, abort, make_response, g, Response, stream_with_context, jsonify, send_from_directory
from temp_config import *
from temp_control import *
from random import randint
app = Flask('temp_control')
@app.teardown_appcontext
def close_db(error):
"""closes db connection"""
app.logger.debug("closing db")
if hasattr(g, 'db'):
app.logger.debug("actually closing db")
g.db.close()
@app.route('/', methods=['GET'])
def Home():
return render_template('index.html')
@app.route('/get/temp', methods=['GET','POST'])
def getTemp():
'''returns all of the current sensor values. We should probably actually have this pulled from DB or something, not sure if we should really wait on I2C Comms during a web request....?
'''
with open('tmp/fTemp') as fo:
tempF = fo.read()
with open('tmp/cTemp') as fo:
tempC = fo.read()
with open('tmp/humidity') as fo:
humidity = fo.read()
data = {'tempF':tempF, 'tempC':tempC, 'humidity':humidity}
return jsonify(**data)
@app.route('/set/power', methods=['POST','GET'])
def setPower():
""" writers the value 1 or 0 to the tmp/servo_setting file
"""
formData = request.form.to_dict()
powerValue = formData.get('powerValue',None)#str(randint(0,1))
if powerValue is None:
data = {'err':1, 'powerValue':powerValue}
else:
app.logger.info("Recv'd setPower = {}".format(powerValue))
with open('tmp/servo_setting', 'w') as f:
f.write(powerValue)
data = {'err':0, 'powerValue':powerValue}
return jsonify(**data)
if __name__ == '__main__':
with app.app_context():
setConfig()
app.logger.debug("starting main flask app")
app.run(threaded=False,host='0.0.0.0')
| 2.578125
| 3
|
WikiRecs-notebook.py
|
drsaunders/wikirecs
| 2
|
12777390
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.10.3
# kernelspec:
# display_name: wikirecs
# language: python
# name: wikirecs
# ---
# # WikiRecs
# A project to recommend the next Wikipedia article you might like to edit
# + init_cell=true
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import logging
import wikipedia
import requests
import os
import wikirecs as wr
import implicit
from scipy.sparse import csr_matrix, csc_matrix, lil_matrix, coo_matrix
from tqdm.auto import tqdm
import umap
import pickle
import collections
import recommenders
import plotly.express as px
from pyarrow import feather
import itertools
from itables import show
import matplotlib
from implicit.nearest_neighbours import (
bm25_weight)
# -
from itables.javascript import load_datatables
load_datatables()
# + init_cell=true
pd.set_option('display.max_rows', 100)
pd.set_option('display.min_rows', 100)
# + init_cell=true
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
# -
# # Assemble the complete histories
import os
all_histories = []
for fname in os.listdir('edit_histories_2021-05-28'):
if 'feather' in fname:
all_histories.append(feather.read_feather('edit_histories_2021-05-28/{}'.format(fname)))
all_histories = pd.concat(all_histories, ignore_index=True)
feather.write_feather(all_histories, "all_histories_2021-05-28.feather")
# %%time
all_histories = feather.read_feather("all_histories_2021-05-28.feather")
all_histories.columns
len(all_histories.pageid.unique())
# # Load all_histories (raw data), transform and split
# +
# %%time
all_histories = feather.read_feather("all_histories_2021-05-28.feather")
print("Length raw edit history data: {}".format(len(all_histories)))
# +
from pull_edit_histories import get_edit_history
## Add one particular user
cols = ['userid', 'user', 'pageid', 'title',
'timestamp', 'sizediff']
with open("../username.txt", "r") as file:
for username in file:
oneuser = get_edit_history(user=username.strip(),
latest_timestamp="2021-05-28T22:02:09Z",
earliest_timestamp="2020-05-28T22:02:09Z")
oneuser = pd.DataFrame(oneuser).loc[:,cols]
all_histories = pd.concat([all_histories, oneuser], ignore_index=True)
print("Length after adding users: {}".format(len(all_histories)))
# -
# ## EDA on raw histories
# Look at the distribution of edit counts
edit_counts = all_histories.groupby('userid').userid.count().values
plt.figure(figsize=(20,8))
plt.subplot(1,2,1)
sns.distplot(edit_counts,kde=False,bins=np.arange(0,20000,200))
plt.xlabel('Number of edits by user')
plt.subplot(1,2,2)
sns.distplot(edit_counts,kde=False,bins=np.arange(0,200,1))
plt.xlim([0,200])
plt.xlabel('Number of edits by user')
num_counts = len(edit_counts)
print("Median edit counts: %d" % np.median(edit_counts))
thres = 5
over_thres = np.sum(edit_counts > thres)
print("Number over threshold %d: %d (%.f%%)" % (thres, over_thres, 100*over_thres/num_counts))
# Most edits by user
all_histories.groupby(['userid','user']).userid.count().sort_values(ascending=False)
# Find the elbow in number of edits
plt.plot(all_histories.groupby(['userid','user']).userid.count().sort_values(ascending=False).values)
# plt.ylim([0,20000])
# +
# What are the most popular pages (edited by the most users)
page_popularity = all_histories.drop_duplicates(subset=['title','user']).groupby('title').count().user.sort_values()
pd.set_option('display.max_rows', 1000)
page_popularity.iloc[-1000:].iloc[::-1]
# -
# ## Clean data
# ### Remove consecutive edits and summarize runs
# +
# %%time
def remove_consecutive_edits(df):
c = dict(zip(df.columns, range(len(df.columns))))
keyfunc = lambda x: (x[c['userid']],x[c['pageid']])
first_and_last = lambda run: [run[0][c['userid']],
run[0][c['user']],
run[0][c['pageid']],
run[0][c['title']],
run[-1][c['timestamp']],
run[0][c['timestamp']],
sum([abs(r[c['sizediff']]) for r in run]),
len(run)]
d = df.values.tolist()
return pd.DataFrame([first_and_last(list(g)) for k,g in itertools.groupby(d, key=keyfunc)],
columns=['userid', 'user', 'pageid', 'title', 'first_timestamp', 'last_timestamp','sum_sizediff','consecutive_edits'])
clean_histories = remove_consecutive_edits(all_histories)
# -
# ### Remove top N most popular pages
# +
# Get the top most popular pages
TOPN = 20
popularpages = all_histories.drop_duplicates(subset=['title','pageid','userid']).groupby(['title','pageid']).count().user.sort_values()[-TOPN:]
before_count = len(all_histories)
# -
popularpages
# Remove those popular pages
popular_pageids = popularpages.index.get_level_values(level='pageid').values
is_popular_page_edit = clean_histories.pageid.isin(popular_pageids)
clean_histories = clean_histories.loc[~is_popular_page_edit].copy()
all_histories = None
after_count = len(clean_histories)
print("%d edits (%.1f%%) were in top %d popular pages. Length after removing: %d" % (np.sum(is_popular_page_edit),
100* np.sum(is_popular_page_edit)/before_count,
TOPN,
after_count)
)
print("Number of unique page ids: {}".format(len(clean_histories.pageid.unique())))
# ### Remove users with too many or too few edits
MIN_EDITS = 5
MAX_EDITS = 10000
# Get user edit counts
all_user_edit_counts = clean_histories.groupby(['userid','user']).userid.count()
# +
# Remove users with too few edits
keep_user = all_user_edit_counts.values >= MIN_EDITS
# Remove users with too many edits
keep_user = keep_user & (all_user_edit_counts.values <= MAX_EDITS)
# Remove users with "bot" in the name
is_bot = ['bot' in username.lower() for username in all_user_edit_counts.index.get_level_values(1).values]
keep_user = keep_user & ~np.array(is_bot)
print("Keep %d users out of %d (%.1f%%)" % (np.sum(keep_user), len(all_user_edit_counts), 100*float(np.sum(keep_user))/len(all_user_edit_counts)))
# +
# Remove those users
userids_to_keep = all_user_edit_counts.index.get_level_values(0).values[keep_user]
clean_histories = clean_histories.loc[clean_histories.userid.isin(userids_to_keep)]
clean_histories = clean_histories.reset_index(drop=True)
# -
print("Length after removing users: {}".format(len(clean_histories)))
# %%time
# Save cleaned histories
feather.write_feather(clean_histories, '../clean_histories_2021-05-28.feather')
# ## Build lookup tables
# %%time
clean_histories = feather.read_feather('../clean_histories_2021-05-28.feather')
# +
# Page id to title and back
lookup = clean_histories.drop_duplicates(subset=['pageid']).loc[:,['pageid','title']]
p2t = dict(zip(lookup.pageid, lookup.title))
t2p = dict(zip(lookup.title, lookup.pageid))
# User id to name and back
lookup = clean_histories.drop_duplicates(subset=['userid']).loc[:,['userid','user']]
u2n = dict(zip(lookup.userid, lookup.user))
n2u = dict(zip(lookup.user, lookup.userid))
# +
# Page id and userid to index in cooccurence matrix and back
pageids = np.sort(clean_histories.pageid.unique())
userids = np.sort(clean_histories.userid.unique())
p2i = {pageid:i for i, pageid in enumerate(pageids)}
u2i = {userid:i for i, userid in enumerate(userids)}
i2p = {v: k for k, v in p2i.items()}
i2u = {v: k for k, v in u2i.items()}
# +
# User name and page title to index and back
n2i = {k:u2i[v] for k, v in n2u.items() if v in u2i}
t2i = {k:p2i[v] for k, v in t2p.items() if v in p2i}
i2n = {v: k for k, v in n2i.items()}
i2t = {v: k for k, v in t2i.items()}
# -
wr.save_pickle((p2t, t2p, u2n, n2u, p2i, u2i, i2p, i2u, n2i, t2i, i2n, i2t), '../lookup_tables_2021-05-28.pickle')
wr.save_pickle((userids, pageids), '../users_and_pages_2021-05-28.pickle')
#
# ## Build test and training set
p2t, t2p, u2n, n2u, p2i, u2i, i2p, i2u, n2i, t2i, i2n, i2t = wr.load_pickle('../lookup_tables_2021-05-28.pickle')
userids, pageids = wr.load_pickle('../users_and_pages_2021-05-28.pickle')
# Make a test set from the most recent edit by each user
histories_test = clean_histories.groupby(['userid','user'],as_index=False).first()
# Subtract it from the rest to make the training set
histories_train = wr.dataframe_set_subtract(clean_histories, histories_test)
histories_train.reset_index(drop=True, inplace=True)
# Make a dev set from the second most recent edit by each user
histories_dev = histories_train.groupby(['userid','user'],as_index=False).first()
# Subtract it from the rest to make the final training set
histories_train = wr.dataframe_set_subtract(histories_train, histories_dev)
histories_train.reset_index(drop=True, inplace=True)
print("Length of test set: {}".format(len(histories_test)))
print("Length of dev set: {}".format(len(histories_dev)))
print("Length of training after removal of test: {}".format(len(histories_train)))
print("Number of pages in training set: {}".format(len(histories_train.pageid.unique())))
print("Number of users in training set: {}".format(len(histories_train.userid.unique())))
print("Number of pages with > 1 user editing: {}".format(np.sum(histories_train.drop_duplicates(subset=['title','user']).groupby('title').count().user > 1)))
feather.write_feather(histories_train, '../histories_train_2021-05-28.feather')
feather.write_feather(histories_dev, '../histories_dev_2021-05-28.feather')
feather.write_feather(histories_test, '../histories_test_2021-05-28.feather')
# +
resurface_userids, discovery_userids = wr.get_resurface_discovery(histories_train, histories_dev)
print("%d out of %d userids are resurfaced (%.1f%%)" % (len(resurface_userids), len(userids), 100*float(len(resurface_userids))/len(userids)))
print("%d out of %d userids are discovered (%.1f%%)" % (len(discovery_userids), len(userids), 100*float(len(discovery_userids))/len(userids)))
# -
wr.save_pickle((resurface_userids, discovery_userids), '../resurface_discovery_users_2021-05-28.pickle')
# # FIG Rama and other examples
print("Number of edits by Rama in a year: {}".format(len(all_histories.loc[all_histories.user == 'Rama'])))
print("Number of pages edited: {}".format(len(all_histories.loc[all_histories.user == 'Rama'].drop_duplicates(subset=['pageid']))))
# +
from pull_edit_histories import get_edit_history
oneuser = get_edit_history(user="Thornstrom",
latest_timestamp="2021-05-28T22:02:09Z",
earliest_timestamp="2020-05-28T22:02:09Z")
oneuser = pd.DataFrame(oneuser).loc[:,cols]
# -
wr.print_user_history(all_histories, user="Rama")
wr.print_user_history(all_histories, user="Meow")
# # Build matrix for implicit collaborative filtering
# +
# %%time
# Get the user/page edit counts
for_implicit = histories_train.groupby(["userid","pageid"]).count().first_timestamp.reset_index().rename(columns={'first_timestamp':'edits'})
for_implicit.loc[:,'edits'] = for_implicit.edits.astype(np.int32)
# +
row = np.array([p2i[p] for p in for_implicit.pageid.values])
col = np.array([u2i[u] for u in for_implicit.userid.values])
implicit_matrix_coo = coo_matrix((for_implicit.edits.values, (row, col)))
implicit_matrix = csc_matrix(implicit_matrix_coo)
# -
# %%time
wr.save_pickle(implicit_matrix,'../implicit_matrix_2021-05-28.pickle')
# ### Test the matrix and indices
implicit_matrix = wr.load_pickle('../implicit_matrix_2021-05-28.pickle')
# +
# Crude item to item recs by looking for items edited by the same editors (count how many editors overlap)
veditors = np.flatnonzero(implicit_matrix[t2i['Hamburger'],:].toarray())
indices = np.flatnonzero(np.sum(implicit_matrix[:,veditors] > 0,axis=1))
totals = np.asarray(np.sum(implicit_matrix[:,veditors] > 0 ,axis=1)[indices])
sorted_order = np.argsort(totals.squeeze())
[i2t.get(i, "") + " " + str(total[0]) for i,total in zip(indices[sorted_order],totals[sorted_order])][::-1]
# -
# Histories of editors who had that item
for ved in veditors:
print("\n\n\n" + i2n[ved])
wr.print_user_history(all_histories, user=i2n[ved])
# # Implicit recommendation
implicit_matrix = wr.load_pickle('../implicit_matrix_2021-05-28.pickle')
p2t, t2p, u2n, n2u, p2i, u2i, i2p, i2u, n2i, t2i, i2n, i2t = wr.load_pickle('../lookup_tables_2021-05-28.pickle')
bm25_matrix = bm25_weight(implicit_matrix, K1=100, B=0.25)
num_factors =200
regularization = 0.01
os.environ["OPENBLAS_NUM_THREADS"] = "1"
model = implicit.als.AlternatingLeastSquares(
factors=num_factors, regularization=regularization
)
model.fit(bm25_matrix)
wr.save_pickle(model,'../als%d_bm25_model.pickle' % num_factors)
model = wr.load_pickle('../als200_bm25_model_2021-05-28.pickle')
results = model.similar_items(t2i['Steven Universe'],20)
['%s %.4f' % (i2t[ind], score) for ind, score in results]
u = n2u["Rama"]
recommendations = model.recommend(u2i[u], bm25_matrix.tocsc(), N=1000, filter_already_liked_items=False)
[ ("*" if implicit_matrix[ind,u2i[u]]>0 else "") +
'%s %.4f' % (i2t[ind], score) + ' %d' % (implicit_matrix[ind,:]>0).sum()
for ind, score in recommendations]
# ## Grid search results
grid_search_results = wr.load_pickle("../implicit_grid_search.pickle")
pd.DataFrame(grid_search_results)
pd.DataFrame([[i['num_factors'], i['regularization']] + list(i['metrics'].values()) for i in grid_search_results],
columns = ['num_factors','regularization'] + list(grid_search_results[0]['metrics'].keys()))
grid_search_results_bm25 = wr.load_pickle("../implicit_grid_search_bm25.pickle")
pd.DataFrame([[i['num_factors'], i['regularization']] + list(i['metrics'].values()) for i in grid_search_results_bm25],
columns = ['num_factors','regularization'] + list(grid_search_results_bm25[0]['metrics'].keys()))
# # B25 Recommendation
from implicit.nearest_neighbours import BM25Recommender
# +
bm25_matrix = bm25_weight(implicit_matrix, K1=20, B=1)
bm25_matrix = bm25_matrix.tocsc()
sns.distplot(implicit_matrix[implicit_matrix.nonzero()],bins = np.arange(0,100,1),kde=False)
sns.distplot(bm25_matrix[bm25_matrix.nonzero()],bins = np.arange(0,100,1),kde=False)
# -
K1 = 100
B = 0.25
model = BM25Recommender(K1, B)
model.fit(implicit_matrix)
wr.save_pickle(model, '../bm25_model_2021-05-28.pkl')
results = model.similar_items(t2i['<NAME>'],20)
['%s %.4f' % (i2t[ind], score) for ind, score in results]
a = ['Steven Universe 429.4746',
'List of Steven Universe episodes 178.4544',
'Demon Bear 128.7237',
'Legion of Super Heroes (TV series) 128.7237',
'The Amazing World of Gumball 126.3522',
'Steven Universe Future 123.9198']
results = model.similar_items(t2i['Steven Universe'],20)
['%s %.4f' % (i2t[ind], score) for ind, score in results]
results = model.similar_items(t2i['<NAME>'],20)
['%s %.4f' % (i2t[ind], score) for ind, score in results]
results = model.similar_items(t2i['Hamburger'],20)
['%s %.4f' % (i2t[ind], score) for ind, score in results]
u = n2u["Rama"]
recommendations = model.recommend(u2i[u], implicit_matrix.astype(np.float32), N=1000, filter_already_liked_items=True)
[ ("*" if implicit_matrix[ind,u2i[u]]>0 else "") +
'%s %.4f' % (i2t[ind], score)
for ind, score in recommendations]
plt.plot([ score for i,(ind, score) in enumerate(recommendations) if implicit_matrix[ind,u2i[u]]==0])
wr.save_pickle(model, "b25_model.pickle")
model = wr.load_pickle("b25_model.pickle")
# # Evaluate models
# ## Item to item recommendation
results = model.similar_items(t2i['Steven Universe'],20)
['%s %.4f' % (i2t[ind], score) for ind, score in results]
# ## User to item recommendations
# +
# Check out a specific example
u = n2u["HyprMarc"]
wr.print_user_history(clean_histories, userid=u)
# -
u = n2u["HyprMarc"]
recommendations = model.recommend(u2i[u], implicit_matrix, N=100, filter_already_liked_items=False)
[ ("*" if implicit_matrix[ind,u2i[u]]>0 else "") +
'%s %.4f' % (i2t[ind], score)
for ind, score in recommendations]
# # Visualize implicit embeddings
model = wr.load_pickle('../als150_model.pickle')
# +
# Only plot the ones with over 3 entries
indices = np.squeeze(np.asarray(np.sum(implicit_matrix[nonzero,:],axis=1))) > 3
indices = nonzero[indices]
# -
len(indices)
# Visualize the collaborative filtering item vectors, embedding into 2D space with UMAP
# nonzero = np.flatnonzero(implicit_matrix.sum(axis=1))
# indices = nonzero[::100]
embedding = umap.UMAP().fit_transform(model.item_factors[indices,:])
plt.figure(figsize=(10,10))
plt.plot(embedding[:,0], embedding[:,1],'.')
# _ = plt.axis('square')
# ## Visualize actors in the embeddings space
# +
edit_counts = np.squeeze(np.asarray(np.sum(implicit_matrix[indices,:],axis=1)))
log_edit_counts = np.log10(np.squeeze(np.asarray(np.sum(implicit_matrix[indices,:],axis=1))))
emb_df = pd.DataFrame({'dim1':embedding[:,0].squeeze(),
'dim2':embedding[:,1].squeeze(),
'title':[i2t[i] for i in indices],
'edit_count':edit_counts,
'log_edit_count':log_edit_counts
})
# -
actors = ['<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME> (actor)',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>']
actor_indices = [t2i[a] for a in actors]
edit_counts = np.squeeze(np.asarray(np.sum(implicit_matrix[actor_indices,:],axis=1)))
log_edit_counts = np.log10(np.squeeze(np.asarray(np.sum(implicit_matrix[actor_indices,:],axis=1))))
embedding = umap.UMAP().fit_transform(model.item_factors[actor_indices,:])
emb_df = pd.DataFrame({'dim1':embedding[:,0].squeeze(),
'dim2':embedding[:,1].squeeze(),
'title':[i2t[i] for i in actor_indices],
'edit_count':edit_counts,
'log_edit_count':log_edit_counts
})
key = np.zeros(len(actors))
key[:8] = 1
fig = px.scatter(data_frame=emb_df,
x='dim1',
y='dim2',
hover_name='title',
color=key,
hover_data=['edit_count'])
fig.update_layout(
autosize=False,
width=600,
height=600,)
fig.show()
# +
# Full embedding plotly interactive visualization
emb_df = pd.DataFrame({'dim1':embedding[:,0].squeeze(),
'dim2':embedding[:,1].squeeze(),
'title':[i2t[i] for i in indices],
'edit_count':edit_counts,
'log_edit_count':log_edit_counts
})
fig = px.scatter(data_frame=emb_df,
x='dim1',
y='dim2',
hover_name='title',
color='log_edit_count',
hover_data=['edit_count'])
fig.update_layout(
autosize=False,
width=600,
height=600,)
fig.show()
# -
# # Evaluate on test set
# +
# Load the edit histories in the training set and the test set
histories_train = feather.read_feather('../histories_train_2021-05-28.feather')
histories_test = feather.read_feather('../histories_test_2021-05-28.feather')
histories_dev = feather.read_feather('../histories_dev_2021-05-28.feather')
implicit_matrix = wr.load_pickle('../implicit_matrix_2021-05-28.pickle')
p2t, t2p, u2n, n2u, p2i, u2i, i2p, i2u, n2i, t2i, i2n, i2t = wr.load_pickle('../lookup_tables_2021-05-28.pickle')
userids, pageids = wr.load_pickle('../users_and_pages_2021-05-28.pickle')
resurface_userids, discovery_userids = wr.load_pickle('../resurface_discovery_users_2021-05-28.pickle')
results = {}
# -
wr.display_recs_with_history(
recs,
userids[:100],
histories_test,
histories_train,
p2t,
u2n,
recs_to_display=5,
hist_to_display=10,
)
# ## Most popular
# +
# %%time
K=20
rec_name = "Popularity"
prec = recommenders.PopularityRecommender(histories_train)
precs = prec.recommend_all(userids, K)
wr.save_pickle(precs, "../" + rec_name +"_recs.pickle")
# +
results[rec_name] = wr.get_recs_metrics(
histories_dev, precs, K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
# -
# ## Most recent
# %%time
# Most recent
K=20
rrec = recommenders.MostRecentRecommender(histories_train)
rrecs = rrec.recommend_all(userids, K, interactions=histories_train)
rec_name = "Recent"
wr.save_pickle(rrecs, "../" + rec_name +"_recs.pickle")
len(resurface_userids)
results ={}
results[rec_name] = wr.get_recs_metrics(
histories_dev, rrecs, K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
# ## Most frequent
# %%time
# Sorted by frequency of edits
K=20
frec = recommenders.MostFrequentRecommender(histories_train)
frecs = frec.recommend_all(userids, K, interactions=histories_train)
rec_name = "Frequent"
wr.save_pickle(frecs, "../" + rec_name +"_recs.pickle")
results[rec_name] = wr.get_recs_metrics(
histories_dev, frecs, K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
# ## BM25
# %%time
K=20
brec = recommenders.MyBM25Recommender(model, implicit_matrix)
brecs = brec.recommend_all(userids, K, u2i=u2i, n2i=n2i, i2p=i2p, filter_already_liked_items=False)
rec_name = "bm25"
wr.save_pickle(brecs, "../" + rec_name +"_recs.pickle")
# filter_already_liked_items = False
results[rec_name] = wr.get_recs_metrics(
histories_dev, brecs, K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
# filter_already_liked_items = True
rec_name = "bm25_filtered"
brecs_filtered = brec.recommend_all(userids, K, u2i=u2i, n2i=n2i, i2p=i2p, filter_already_liked_items=True)
wr.save_pickle(brecs_filtered, "../" + rec_name +"_recs.pickle")
results[rec_name] = wr.get_recs_metrics(
histories_dev, recs['bm25_filtered'], K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
results[rec_name] = wr.get_recs_metrics(
histories_dev, recs['bm25_filtered'], K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
# ## ALS Implicit collaborative filtering
model_als = wr.load_pickle('../als200_bm25_model_2021-05-28.pickle')
# %%time
rec_name = "als"
K=20
irec = recommenders.ImplicitCollaborativeRecommender(model_als, bm25_matrix.tocsc())
irecs = irec.recommend_all(userids, K, i2p=i2p, filter_already_liked_items=False)
wr.save_pickle(irecs, "../" + rec_name +"_recs.pickle")
results[rec_name] = wr.get_recs_metrics(
histories_dev, irecs, K, discovery_userids, resurface_userids, bm25_matrix.tocsc(), i2p, u2i)
results[rec_name]
rec_name = "als_filtered"
K=20
irec = recommenders.ImplicitCollaborativeRecommender(model_als, bm25_matrix.tocsc())
irecs_filtered = irec.recommend_all(userids, K, i2p=i2p, filter_already_liked_items=True)
results[rec_name] = wr.get_recs_metrics(
histories_dev, irecs_filtered, K, discovery_userids, resurface_userids, bm25_matrix.tocsc(), i2p, u2i)
results[rec_name]
wr.save_pickle(irecs_filtered, "../" + rec_name +"_recs.pickle")
show(pd.DataFrame(results).T)
# ## Jaccard
# %%time
# Sorted by Jaccard
K=20
rrec = recommenders.MostRecentRecommender(histories_train)
recent_pages_dict = rrec.all_recent_only(K, userids, interactions=histories_train)
jrec = recommenders.JaccardRecommender(implicit_matrix, p2i=p2i, t2i=t2i, i2t=i2t, i2p=i2p, n2i=n2i, u2i=u2i, i2u=i2u)
jrecs = jrec.recommend_all(userids,
K,
num_lookpage_pages=1,
recent_pages_dict=recent_pages_dict,
interactions=histories_train)
wr.save_pickle(jrecs,"jaccard-1_recs.pickle")
rec_name = "Jaccard"
results[rec_name] = wr.get_recs_metrics(
histories_dev, jrecs, K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
wr.display_recs_with_history(
jrecs,
userids[:30],
histories_test,
histories_train,
p2t,
u2n,
recs_to_display=5,
hist_to_display=10,
)
# %%time
# Sorted by Jaccard
K=5
jrec = recommenders.JaccardRecommender(implicit_matrix, p2i=p2i, t2i=t2i, i2t=i2t, i2p=i2p, n2i=n2i, u2i=u2i, i2u=i2u)
jrecs = jrec.recommend_all(userids[:1000],
10,
num_lookpage_pages=50,
recent_pages_dict=recent_pages_dict,
interactions=histories_train)
print("Jaccard")
print("Recall @ %d: %.1f%%" % (K, 100*wr.recall(histories_test, jrecs, K)))
print("Prop resurfaced: %.1f%%" % (100*wr.prop_resurface(jrecs, K, implicit_matrix, i2p, u2i)))
print("Recall @ %d (discovery): %.1f%%" % (K, 100*wr.recall(histories_test, jrecs, K, userid_subset=discovery_userids)))
print("Recall @ %d (resurface): %.1f%%" % (K, 100*wr.recall(histories_test, jrecs, K, userid_subset=resurface_userids)))
# ## Interleaved
recs.keys()
# +
# Interleaved jaccard and recent
K=20
rec_name = "Interleaved"
print(rec_name)
intrec = recommenders.InterleaveRecommender()
intrecs = intrec.recommend_all(K, [recs['Recent'], recs['bm25_filtered']])
wr.save_pickle(intrecs, "../" + rec_name +"_recs.pickle")
# -
results[rec_name] = wr.get_recs_metrics(
histories_dev, intrecs, K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
# # Report on evaluations results
# ## Hard coded metrics
# +
results = {}
results["Popularity"] = {'recall': 0.16187274312040842,
'ndcg': 0.0005356797596941751,
'resurfaced': 0.6213422985929523,
'recall_discover': 0.11947959996459864,
'recall_resurface': 0.2624396388830569,
'ndcg_discover': 0.000410354483750028,
'ndcg_resurface': 0.0008329819416998272}
results["Recent"] = {'recall': 22.618602913709378,
'ndcg': 0.14306080818547054,
'resurfaced': 71.13808990163118,
'recall_discover': 0.03982653332153288,
'recall_resurface': 76.18097837497375,
'ndcg_discover': 0.00011494775493754298,
'ndcg_resurface': 0.4821633227780786}
results["Frequent"] = {'recall': 20.834889802017184,
'ndcg': 0.11356953338215306,
'resurfaced': 76.10353629684971,
'recall_discover': 0.035401362952473675,
'recall_resurface': 70.17635943732941,
'ndcg_discover': 9.90570471847343e-05,
'ndcg_resurface': 0.38274923359395385}
results["ALS"] = {'recall': 5.488108579255385,
'ndcg': 0.026193145556306998,
'resurfaced': 16.251556468683848,
'recall_discover': 1.146119125586335,
'recall_resurface': 15.788368675204703,
'ndcg_discover': 0.004817135435898367,
'ndcg_resurface': 0.0769022655123215}
results["ALS_filtered"] = {'recall': 0.9027518366330469,
'ndcg': 0.003856703716094881,
'resurfaced': 0.0,
'recall_discover': 1.2832994070271706,
'recall_resurface': 0.0,
'ndcg_discover': 0.005482465270193466,
'ndcg_resurface': 0.0}
results["BM25"] = {'recall': 18.945336819823186,
'ndcg': 0.1015175508656068,
'resurfaced': 74.0469742248786,
'recall_discover': 1.3939286662536507,
'recall_resurface': 60.581566239764854,
'ndcg_discover': 0.004204510293040833,
'ndcg_resurface': 0.332367864833573}
results["BM25_filtered"] = {'recall': 1.8148424853691942,
'ndcg': 0.008622285155255174,
'resurfaced': 0.14848711243929774,
'recall_discover': 2.522347110363749,
'recall_resurface': 0.1364686122191896,
'ndcg_discover': 0.011740495141426633,
'ndcg_resurface': 0.0012251290280766518}
results["Interleaved"] = {'recall': 21.382766778732414,
'ndcg': 0.12924273396038563,
'resurfaced': 42.478676379031256,
'recall_discover': 1.8364457031595716,
'recall_resurface': 67.75141717404996,
'ndcg_discover': 0.006943981897312752,
'ndcg_resurface': 0.4193652616867473}
results_df = pd.DataFrame(results).T
results_df.reset_index(inplace=True)
# -
# ## Table of results
results_df
# ### FIG Table for post
# +
def scatter_text(x, y, text_column, data, title, xlabel, ylabel):
"""Scatter plot with country codes on the x y coordinates
Based on this answer: https://stackoverflow.com/a/54789170/2641825"""
# Create the scatter plot
p1 = sns.scatterplot(x, y, data=data, size = 8, legend=False)
# Add text besides each point
for line in range(0,data.shape[0]):
p1.text(data[x][line]+0.01, data[y][line],
data[text_column][line], horizontalalignment='left',
size='medium', color='black', weight='semibold')
# Set title and axis labels
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
return p1
def highlight_max(s):
'''
highlight the maximum in a Series yellow.
'''
is_max = s == s.max()
return ['background-color: yellow' if v else '' for v in is_max]
results_df.sort_values("recall", ascending=False).style.apply(highlight_max, subset=["recall",
"ndcg",
"resurfaced",
"recall_discover",
"recall_resurface",
"ndcg_discover",
"ndcg_resurface",]).format({"recall": "{:.1f}%",
"ndcg": "{:.3f}",
"resurfaced": "{:.1f}%",
"recall_discover": "{:.1f}%",
"recall_resurface": "{:.1f}%",
"ndcg_discover": "{:.3f}",
"ndcg_resurface": "{:.3f}",
})
# -
colnames = ["Recommender", "Recall@20", "nDCG@20","Resurfaced","Recall@20 discovery","Recall@20 resurface","nDCG@20 discovery","nDCG@20 resurface"]
#apply(highlight_max, subset=colnames[1:]).
results_df.columns = colnames
results_df.sort_values("Recall@20", ascending=False).style.\
format({"Recall@20": "{:.1f}%",
"nDCG@20": "{:.3f}",
"Resurfaced": "{:.1f}%",
"Recall@20 discovery": "{:.1f}%",
"Recall@20 resurface": "{:.1f}%",
"nDCG@20 discovery": "{:.3f}",
"nDCG@20 resurface": "{:.3f}",
})
# ## Scatter plots (resurface vs discover)
fig = px.scatter(data_frame=results_df,
x='ndcg_discover',
y='ndcg_resurface',
hover_name='index')
# hover_name='title',)
fig.show()
fig = px.scatter(data_frame=results_df,
x='recall_discover',
y='recall_resurface',
hover_name='index')
# hover_name='title',)
fig.show()
# ### FIG Scatterplot for post
x = 2*[results_df.loc[results_df.Recommender == "Interleaved","Recall@20 resurface"].values[0]]
y = [0, results_df.loc[results_df.Recommender == "Interleaved","Recall@20 discovery"].values[0]]
# +
sns.set_theme(style="darkgrid")
matplotlib.rcParams.update({'font.size': 48, 'figure.figsize':(8,5), 'legend.edgecolor':'k'})
plt.figure(figsize=(12,7))
A = results_df.loc[:,'Recall@20 discovery']
B = results_df.loc[:,'Recall@20 resurface']
x = 2*[results_df.loc[results_df.Recommender == "Interleaved","Recall@20 discovery"].values[0]]
y = [-1, results_df.loc[results_df.Recommender == "Interleaved","Recall@20 resurface"].values[0]]
plt.plot(x,y,":k")
x[0] = 0
y[0] = y[1]
# plt.rcParams.update({'font.size': 48})
plt.rc('xtick', labelsize=3)
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 22}
matplotlib.rc('font', **font)
plt.plot(x,y,":k")
plt.plot(A,B,'.', MarkerSize=15)
for xyz in zip(results_df.Recommender, A, B): # <--
plt.gca().annotate('%s' % xyz[0], xy=np.array(xyz[1:])+(0.05,0), textcoords='data', fontsize=18) # <--
for tick in plt.gca().xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in plt.gca().yaxis.get_major_ticks():
tick.label.set_fontsize(20)
plt.xlabel("Recall@20 discovery (%)",fontsize=20)
plt.ylabel("Recall@20 resurface (%)",fontsize=20)
plt.xlim([0,3])
plt.ylim([-2,85])
axes = plt.gca()
# -
# ## Read recs in from files
recommender_names = ['Popularity', 'Recent', 'Frequent', 'ALS', 'ALS_filtered', 'BM25', 'BM25_filtered', 'Interleaved']
recs = {rname:wr.load_pickle("../" + rname + "_recs.pickle") for rname in recommender_names}
# ## Recall curves
histories_dev = feather.read_feather('../histories_dev_2021-05-28.feather')
plt.figure(figsize=(15,10))
for rname in recommender_names:
recall_curve = wr.recall_curve(histories_dev, recs[rname], 20)
# print(recall_curve[-1])
plt.plot(recall_curve,'.-')
plt.legend(recommender_names)
plt.figure(figsize=(15,10))
for rname in recommender_names:
recall_curve = wr.recall_curve(histories_dev, recs[rname], 20, discovery_userids)
plt.plot(recall_curve,'.-')
plt.legend(recommender_names)
plt.figure(figsize=(15,10))
for rname in recommender_names:
recall_curve = wr.recall_curve(histories_dev, recs[rname], 20, resurface_userids)
plt.plot(recall_curve,'.-')
plt.legend(recommender_names)
# ### FIG Implicit vs BM25 figure
sns.set_theme(style="darkgrid")
matplotlib.rcParams.update({'font.size': 18, 'figure.figsize':(8,5), 'legend.edgecolor':'k'})
plt.figure(figsize=(10,6))
for rname in ["ALS","BM25"]:
recall_curve = wr.recall_curve(histories_dev, recs[rname], 20, discovery_userids)
plt.plot(np.array(recall_curve)*100,'.-',markersize=12)
plt.legend( ["ALS","BM25"],title="Algorithm", fontsize=16, title_fontsize=16, facecolor="w")
plt.xlabel("@N",fontsize=20)
plt.ylabel("Discovery recall (%)",fontsize=20)
_ = plt.xticks(np.arange(0,20,2),np.arange(0,20,2)+1)
# plt.gca().legend(prop=dict(size=20))
for tick in plt.gca().xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in plt.gca().yaxis.get_major_ticks():
tick.label.set_fontsize(20)
# # User recommendation comparison
recs_subset = ["Recent","Frequent","Popularity","Implicit","bm25","interleaved"]
print("Next edit: " + histories_dev.loc[histories_dev.userid == userid].title.values[0])
# ## FIG Rama table
# +
def bold_viewed(val, viewed_pages):
"""
Takes a scalar and returns a string with
the css property `'color: red'` for negative
strings, black otherwise.
"""
weight = 'bold' if val in viewed_pages else 'normal'
return 'font-weight: %s' % weight
def color_target(val, target_page):
"""
Takes a scalar and returns a string with
the css property `'color: red'` for negative
strings, black otherwise.
"""
color = 'red' if val == target_page else 'black'
return 'color: %s' % color
def display_user_recs_comparison(user_name, recs, recs_subset, train_set, test_set, N=20):
userid = n2u[user_name]
recs_table = pd.DataFrame({rec_name: [p2t[r] for r in recs[rec_name][userid][:N]] for rec_name in recs_subset})
recs_table = recs_table.reset_index()
recs_table.loc[:,"index"] = recs_table.loc[:,"index"]+1
recs_table = recs_table.rename(columns={"index":""})
viewed_pages = train_set.loc[train_set.userid == userid,["title"]].drop_duplicates(subset=["title"]).values.squeeze()
target_page = test_set.loc[test_set.userid == userid].title.values[0]
# print("Next edit: " + target_page)
s = recs_table.style.applymap(bold_viewed, viewed_pages=viewed_pages).applymap(color_target, target_page=target_page)
display(s)
# +
recs_subset = ["Recent","Frequent","Popularity","ALS","ALS_filtered","BM25","BM25_filtered"]
display_user_recs_comparison('Rama', recs, recs_subset, histories_train, histories_dev, N=10)
# -
# ## Other individuals tables
display_user_recs_comparison('Meow', recs, recs_subset, histories_train, histories_dev, N=10)
display_user_recs_comparison('KingArti', recs, recs_subset, histories_train, histories_dev, N=10)
display_user_recs_comparison('Tulietto', recs, recs_subset, histories_train, histories_dev, N=10)
display_user_recs_comparison('Thornstrom', recs, recs_subset, histories_train, histories_dev, N=10)
# ## FIG Interleaved
display_user_recs_comparison('Rama', recs,['Interleaved'], histories_train, histories_dev, N=10)
display_user_recs_comparison('KingArti', recs,['Interleaved'], histories_train, histories_dev, N=10)
N = 20
display(pd.DataFrame({rec_name: [p2t[r] for r in recs[rec_name][n2u['HenryXVII']]][:N] for rec_name in recs_subset}))
persons_of_interest = [
"DoctorWho42",
"AxelSjögren",
"<NAME>",
"Tulietto",
"LipaCityPH",
"<NAME>",
"Thornstrom",
"Meow",
"HyprMarc",
"Jampilot",
"Rama"
]
N=10
irec_500 = recommenders.ImplicitCollaborativeRecommender(model, implicit_matrix)
irecs_poi = irec_500.recommend_all([n2u[user_name] for user_name in persons_of_interest], N, u2i=u2i, n2i=n2i, i2p=i2p)
# # Find interesting users
# +
edited_pages = clean_histories.drop_duplicates(subset=['title','user']).groupby('user').userid.count()
edited_pages = edited_pages[edited_pages > 50]
edited_pages = edited_pages[edited_pages < 300]
# -
clean_histories.columns
display_user_recs_comparison("Rama", recs, recs_subset, histories_train, histories_dev, N=20)
# +
index = list(range(len(edited_pages)))
np.random.shuffle(index)
for i in index[:10]:
user_name = edited_pages.index[i]
print(user_name)
display_user_recs_comparison(user_name, recs, recs_subset, histories_train, histories_dev, N=20)
print("\n\n\n")
# +
index = list(range(len(edited_pages)))
np.random.shuffle(index)
for i in index[:10]:
print(edited_pages.index[i])
display_user_recs_comparison
wr.print_user_history(user=edited_pages.index[i],all_histories=clean_histories)
print("\n\n\n")
# -
sns.distplot(edited_pages,kde=False,bins=np.arange(0,2000,20))
# # Repetition analysis
import itertools
clean_histories.head()
clean_histories.iloc[:1000].values.tolist()
df = clean_histories
dict(zip(df.columns, range(len(df.columns))))
def identify_runs(df):
d = df.loc[:,['userid','pageid']].values.tolist()
return [(k, len(list(g))) for k,g in itertools.groupby(d)]
# %%time
runs = identify_runs(clean_histories)
# +
lens = np.array([r[1] for r in runs])
single_edits = np.sum(lens==1)
total_edits = len(clean_histories)
print("Percent of edits that are part of a run: %.1f%%" % (100*(1-(float(single_edits)/total_edits))))
print("Percent of edits that are repetitions: %.1f%%" % (100*(1-len(runs)/total_edits)))
| 2.546875
| 3
|
SaveTheGalaxy.py
|
ObradovicNikola/SaveTheGalaxy
| 3
|
12777391
|
import random
import os.path
import pygame
import sys
from pygame.locals import *
WIDTH = 800
HEIGHT = 640
FPS = 60
POWERUP_TIME = 4000
RELOAD = 300
NUMSTARS = 30
TYPING_SPEED = 300
PLAYER_MAX_HEALTH = 100
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
YELLOW = (255, 211, 0)
LIGHT_GREEN = (185, 235, 98)
FONT = 'MyFont.ttf'
pygame.mixer.pre_init(44100, -16, 1, 512) # Decreasing the size of the buffer will reduce the latency
pygame.mixer.init() # handles sound
pygame.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption('Save The Galaxy')
clock = pygame.time.Clock()
if hasattr(sys, '_MEIPASS'):
main_dir = sys._MEIPASS
else: main_dir = os.path.split(os.path.abspath(__file__))[0] + '\\data'
textfile_dir = os.path.split(os.path.abspath(__file__))[0]
FONT = main_dir + '\\' + FONT
def loadImage(file):
file = os.path.join(main_dir, file)
img = pygame.image.load(file)
return img.convert_alpha()
iconImg = pygame.transform.scale(loadImage('icon.png'), (30, 30))
pygame.display.set_icon(iconImg)
loadingScreenImg = pygame.transform.scale(loadImage('loadingscreen.png'), (WIDTH, HEIGHT))
loadingScreenImgRect = loadingScreenImg.get_rect()
screen.blit(loadingScreenImg, loadingScreenImgRect)
pygame.display.update()
def loadSound(file):
file = os.path.join(main_dir, file)
sound = pygame.mixer.Sound(file)
return sound
def printText(surface, text, size, x, y, color, center = 0):
font = pygame.font.Font(FONT, size)
font.set_bold(True)
textSurface = font.render(text, True, color)
text_rect = textSurface.get_rect()
if center == 0:
text_rect.bottomleft = (x, y)
else:
text_rect.center = center
surface.blit(textSurface, text_rect)
def slowType(s, y):
global TYPING_SPEED
typeFPS = 60
k = len(s)
i = 0
x = 30
lastLetter = pygame.time.get_ticks()
while i < k:
clock.tick(typeFPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
elif event.type == pygame.KEYDOWN:
if event.key == K_KP_ENTER or event.key == K_ESCAPE:
typeFPS = 0
if (pygame.time.get_ticks() - lastLetter) > (random.random()*TYPING_SPEED):
printText(screen, s[i], 16, x, y, YELLOW)
keyPress_sound.play()
pygame.display.update()
x += 16
i += 1
lastLetter = pygame.time.get_ticks()
def showStory():
screen.blit(storyImg, storyImgRect)
pygame.display.update()
story_music.play(-1)
slowType('GREETINGS BRAVE WARRIOR,', 20)
slowType('YOUR GALAXY IS IN GREAT DANGER', 40)
slowType('OF RUTHLESS ALIEN INVASION', 60)
slowType('YOU HAVE BEEN CHOSEN', 80)
slowType('TO FACE AGAINST THIS TYRANNY', 100)
slowType('YOU GOT MOST ADVANCED SPACE SHIP', 120)
slowType('YOU HAVE ASSIGNMENT TO DESTROY ENEMY ARMY', 140)
slowType('AND DEFEAT CAPTAIN, GENERAL AND LEADER.', 160)
slowType('IF YOU ACCOMPLISH THIS MISSION SUCCESSFULLY,', 180)
slowType('WHOLE GALAXY WILL BE ETERNALLY GRATEFUL AND', 200)
slowType('MAY THE FORCE ALWAYS BE ON YOUR SIDE', 220)
slowType('PRESS ANY KEY TO CONTINUE...', 260)
while True:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
elif event.type == pygame.KEYDOWN:
story_music.stop()
showGameStartScreen()
def drawHealthBar(surface, x, y, health, healthColor, maxhealth, barLength):
if health < 0:
health = 0
barHeight = 25
fill = (health / maxhealth) * barLength
outlineRect = pygame.Rect(x, y, barLength, barHeight)
fillRect = pygame.Rect(x, y, fill, barHeight)
pygame.draw.rect(surface, healthColor, fillRect)
pygame.draw.rect(surface, WHITE, outlineRect, 2)
def drawLives(surface, x, y, lives, img):
for i in range(lives):
imgRect = img.get_rect()
imgRect.x = x + 35*i
imgRect.y = y
surface.blit(img, imgRect)
class Player(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = playerImg
self.rect = self.image.get_rect()
self.radius = 22
self.rect.bottom = HEIGHT - 30
self.rect.centerx = WIDTH / 2
self.speedx = 5
self.speedy = 3
self.lives = 3
self.health = PLAYER_MAX_HEALTH
self.hidden = False
self.hide_timer = pygame.time.get_ticks()
self.immune = False
self.immune_timer = pygame.time.get_ticks()
self.powerLvl = 1
self.power_timer = pygame.time.get_ticks()
self.shoot_timer = pygame.time.get_ticks()
self.score = 0
def update(self):
if self.immune:
self.image = playerImg_immune
else:
self.image = playerImg
if player.lives < 1:
pygame.mixer.music.stop()
boss_fight_music.stop()
pygame.mixer.music.play(-1)
showGameOverScreen()
if self.powerLvl > 1:
if pygame.time.get_ticks() - self.power_timer > POWERUP_TIME:
self.powerLvl = 1
self.power_timer = pygame.time.get_ticks()
if self.hidden and pygame.time.get_ticks() - self.hide_timer > 1200:
self.hidden = False
self.rect.bottom = HEIGHT - 30
self.rect.centerx = WIDTH / 2
self.immune = True
self.immune_timer = pygame.time.get_ticks()
if self.immune and pygame.time.get_ticks() - self.immune_timer > 1500:
self.immune = False
keystate = pygame.key.get_pressed()
if keystate[K_LEFT]:
self.rect.x -= self.speedx
if keystate[K_RIGHT]:
self.rect.x += self.speedx
if keystate[K_UP]:
self.rect.y -= self.speedy
if keystate[K_DOWN]:
self.rect.y += self.speedy
if self.rect.right > WIDTH + 20:
self.rect.right = WIDTH + 20
if self.rect.left < -20 and self.rect.left > -200:
self.rect.left = -20
if self.rect.top <= 0 and self.rect.top > -200:
self.rect.top = 0
if self.rect.bottom >= HEIGHT - 30:
self.rect.bottom = HEIGHT - 30
def shoot(self):
if not self.hidden:
self.shoot_timer = pygame.time.get_ticks()
if self.powerLvl == 1:
bullet = Bullet(self.rect.centerx, self.rect.top)
allSprites.add(bullet)
bullets.add(bullet)
shoot_sound.play()
elif self.powerLvl == 2:
bullet1 = Bullet(self.rect.left+5, self.rect.centery)
bullet2 = Bullet(self.rect.right-5, self.rect.centery)
allSprites.add(bullet1, bullet2)
bullets.add(bullet1, bullet2)
shoot_sound.play()
else:
bullet = Bullet(self.rect.centerx, self.rect.top)
bullet1 = Bullet(self.rect.left + 5, self.rect.centery)
bullet2 = Bullet(self.rect.right - 5, self.rect.centery)
allSprites.add(bullet, bullet1, bullet2)
bullets.add(bullet, bullet1, bullet2)
shoot_sound.play()
def hide(self):
self.hidden = True
self.hide_timer = pygame.time.get_ticks()
self.rect.center = (-500, -500)
def powerup(self):
self.powerLvl += 1
self.power_timer = pygame.time.get_ticks()
def reset(self):
self.rect.bottom = HEIGHT - 30
self.rect.centerx = WIDTH / 2
self.lives = 3
self.health = PLAYER_MAX_HEALTH
self.hidden = False
self.powerLvl = 1
self.score = 0
class Alien(pygame.sprite.Sprite):
def __init__(self, x, y, img1, img2, smartShoot, fly):
pygame.sprite.Sprite.__init__(self)
self.img1 = img1
self.img2 = img2
self.image = self.img1
self.rect = self.image.get_rect()
self.radius = 20
self.rect.x = x
self.rect.y = y
self.speedy = 0
self.speedx = random.randrange(1, 3)
self.direction = 1
self.lastUpdate = pygame.time.get_ticks()
self.lastBomb = pygame.time.get_ticks()
self.smartShoot = smartShoot
self.canFly = fly
self.fly = False
self.fly_timer = pygame.time.get_ticks()
self.starty = self.rect.y
self.hitbottom = False
self.flyTime = random.randrange(5000, 30000)
def move(self, direction, y = 0):
if self.rect.y < self.starty:
self.rect.y = self.starty
self.fly = False
if y == 0:
self.rect.x += self.speedx * self.direction
else:
self.rect.y += 4 * direction
if self.rect.bottom > player.rect.bottom:
self.rect.bottom = player.rect.bottom
self.hitbottom = True
if self.rect.y == self.starty:
self.fly = False
alliens.remove(self)
hits = pygame.sprite.spritecollide(self, alliens, False)
if hits:
self.direction *= -1
alliens.add(self)
def update(self):
now = pygame.time.get_ticks()
if now - self.lastUpdate > 80:
self.lastUpdate = now
if self.image == self.img1:
self.image = self.img2
else:
self.image = self.img1
x = self.rect.x
y = self.rect.y
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
if self.canFly:
if now - self.fly_timer > self.flyTime:
self.fly_timer = now
self.fly = True
if self.fly == False:
self.hitbottom = False
if self.rect.left <=0:
self.rect.left = 0
self.direction *= -1
if self.rect.right >= WIDTH:
self.rect.right = WIDTH
self.direction *= -1
self.move(self.direction)
if now - self.lastBomb > random.randrange(800, 1000000):
self.lastBomb = now
if self.smartShoot:
if self.rect.x < player.rect.x:
bomba = Bomb(self.rect.centerx, self.rect.bottom, 1)
else:
bomba = Bomb(self.rect.centerx, self.rect.bottom, -1)
else:
bomba = Bomb(self.rect.centerx, self.rect.bottom, random.randrange(4))
allSprites.add(bomba)
bombs.add(bomba)
elif self.fly == True:
if self.hitbottom:
self.move(-1, 5)
else:
self.move(1, 5)
class Boss(pygame.sprite.Sprite):
def __init__(self, bosstype):
pygame.sprite.Sprite.__init__(self)
self.image = bossImg[bosstype-1]
self.rect = self.image.get_rect()
self.rect.centerx = screen.get_rect().centerx
self.rect.y = 5
self.speedy = random.randrange(5*bosstype, 10*bosstype)
self.speedx = random.randrange(5*bosstype, 10*bosstype)
self.directionx = random.choice([-1, 1])
self.directiony = random.choice([-1, 1])
self.lastUpdate = pygame.time.get_ticks()
self.lastDirection = pygame.time.get_ticks()
self.lastBomb = pygame.time.get_ticks()
self.bosstype = bosstype
self.health = 1000 * bosstype
def move(self):
if self.rect.y < 5:
self.rect.y = 5
if self.rect.bottom > HEIGHT - 200:
self.rect.bottom = HEIGHT - 200
if self.rect.x >= 5 and self.rect.y <= HEIGHT - 200:
self.rect.y += self.speedy * self.directiony
if self.rect.x < 5:
self.rect.x = 5
if self.rect.right > WIDTH - 5:
self.rect.right = WIDTH - 5
if self.rect.x >= 5 and self.rect.x <= WIDTH - 5:
self.rect.x += self.speedx * self.directionx
def update(self):
now = pygame.time.get_ticks()
if now - self.lastDirection > random.randrange(1300,10000):
self.lastDirection = now
self.directionx = random.choice([-1, 1])
self.directiony = random.choice([-1, 1])
if now - self.lastUpdate > random.randrange(80, 200):
self.lastUpdate = now
self.move()
if now - self.lastBomb > random.randrange(100, round(100000/self.bosstype)):
self.lastBomb = now
if self.bosstype > 1:
if self.rect.x < player.rect.x:
bomba1 = Bomb(self.rect.centerx, self.rect.bottom, 1)
bomba2 = Bomb(self.rect.centerx - 20, self.rect.bottom, 1)
bomba3 = Bomb(self.rect.centerx + 20, self.rect.bottom, 1)
if self.bosstype == 3:
bomba4 = Bomb(self.rect.centerx - 40, self.rect.bottom, 1)
bomba5 = Bomb(self.rect.centerx + 40, self.rect.bottom, 1)
allSprites.add(bomba4)
bombs.add(bomba4)
allSprites.add(bomba5)
bombs.add(bomba5)
else:
bomba1 = Bomb(self.rect.centerx, self.rect.bottom, -1)
bomba2 = Bomb(self.rect.centerx - 20, self.rect.bottom, -1)
bomba3 = Bomb(self.rect.centerx + 20, self.rect.bottom, -1)
if self.bosstype == 3:
bomba4 = Bomb(self.rect.centerx - 40, self.rect.bottom, -1)
bomba5 = Bomb(self.rect.centerx + 40, self.rect.bottom, -1)
allSprites.add(bomba4)
bombs.add(bomba4)
allSprites.add(bomba5)
bombs.add(bomba5)
else:
bomba1 = Bomb(self.rect.centerx, self.rect.bottom)
bomba2 = Bomb(self.rect.centerx - 20, self.rect.bottom)
bomba3 = Bomb(self.rect.centerx + 20, self.rect.bottom)
allSprites.add(bomba1)
bombs.add(bomba1)
allSprites.add(bomba2)
bombs.add(bomba2)
allSprites.add(bomba3)
bombs.add(bomba3)
class Bomb(pygame.sprite.Sprite):
def __init__(self, x, y, direction = random.choice([-1, 1])):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.transform.scale(bombImg, (10, 20))
self.rect = self.image.get_rect()
self.rect.midtop = (x, y)
self.speedy = random.randrange(2, 6)
self.speedx = random.randrange(3)
self.direction = direction
bomb_sound.play()
def update(self):
self.rect.y += self.speedy
self.rect.x += self.speedx * self.direction
if self.rect.top > HEIGHT or self.rect.left > WIDTH or self.rect.right < 0:
self.kill()
class Bullet(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.transform.scale(bulletImg, (10, 25))
self.rect = self.image.get_rect()
self.rect.bottom = y
self.rect.centerx = x
self.speedy = -7
def update(self):
self.rect.y += self.speedy
if self.rect.bottom < 0:
self.kill()
class PowerUp(pygame.sprite.Sprite):
def __init__(self, center):
pygame.sprite.Sprite.__init__(self)
self.type = random.choice(['health', 'fire'])
if random.random() > 0.9:
self.type = 'life'
self.image = powerupImgs[self.type]
self.rect = self.image.get_rect()
self.rect.center = center
self.speedy = random.randrange(3, 6)
def update(self):
self.rect.y += self.speedy
if self.rect.top > HEIGHT:
self.kill()
class Explosion(pygame.sprite.Sprite):
def __init__(self, center, size):
pygame.sprite.Sprite.__init__(self)
self.size = size
self.image = explosion[self.size][0]
self.rect = self.image.get_rect()
self.rect.center = center
self.frame = 0
self.lastUpdate = pygame.time.get_ticks()
self.frameRate = 50
def update(self):
now = pygame.time.get_ticks()
if now - self.lastUpdate > self.frameRate:
self.lastUpdate = now
self.frame += 1
if self.frame == len(explosion[self.size]):
self.kill()
else:
center = self.rect.center
self.image = explosion[self.size][self.frame]
self.rect = self.image.get_rect()
self.rect.center = center
class Meteor(pygame.sprite.Sprite):
def __init__(self, speedCap, timeCap = 0):
pygame.sprite.Sprite.__init__(self)
self.startImage = random.choice(meteorImg)
self.image = self.startImage.copy()
self.rect = self.image.get_rect()
self.radius = int(self.rect.width / 2)
self.rect.x = random.randrange(WIDTH - self.rect.width)
self.rect.y = random.randrange(-150, -100)
self.speedCap = speedCap
self.speedx = random.randrange(3)
self.speedy = random.randrange(self.speedCap)
self.direction = random.choice([-1, 1])
self.timeCap = timeCap
self.timeStart = pygame.time.get_ticks()
self.rotationAngle = 0
self.rotationSpeed = random.randrange(-9, 9)
self.lastRotation = pygame.time.get_ticks()
def update(self):
if self.timeCap > 0:
if pygame.time.get_ticks() - self.timeStart > self.timeCap:
if self.rect.y < 0:
self.kill()
now = pygame.time.get_ticks()
if now - self.lastRotation > 50:
self.lastRotation = now
self.rotationAngle = (self.rotationAngle + self.rotationSpeed) % 360
oldCenter = self.rect.center
self.image = pygame.transform.rotate(self.startImage, self.rotationAngle)
self.rect = self.image.get_rect()
self.rect.center = oldCenter
self.rect.x += self.speedx * self.direction
self.rect.y += self.speedy
if self.rect.y > HEIGHT or self.rect.right < 0 or self.rect.width > WIDTH:
self.rect.x = random.randrange(WIDTH - self.rect.width)
self.rect.y = random.randrange(-150, -100)
self.speedx = random.randrange(3)
self.speedy = random.randrange(self.speedCap)
class Star(pygame.sprite.Sprite):
def __init__(self, x):
pygame.sprite.Sprite.__init__(self)
self.startImage = pygame.transform.scale(random.choice(starImg), (random.randrange(10,20),random.randrange(10,20)))
self.image = self.startImage.copy()
self.rect = self.image.get_rect()
self.rect.x = x
self.startx = x
self.rect.y = -30
self.speedx = random.randrange(2, 5)
self.speedy = random.randrange(2, 6)
self.direction = random.choice([-1, 1])
self.timeStart = pygame.time.get_ticks()
self.rotationAngle = 0
self.rotationSpeed = random.randrange(-7, 7)
self.lastRotation = pygame.time.get_ticks()
def update(self):
self.rect.x += self.speedx * self.direction
self.rect.y += self.speedy
if self.rect.y > HEIGHT+25 or self.rect.x < 0-15 or self.rect.x > WIDTH+15:
self.rect.y = -25
self.rect.x = self.startx
now = pygame.time.get_ticks()
if now - self.lastRotation > 50:
self.lastRotation = now
self.rotationAngle = (self.rotationAngle + self.rotationSpeed) % 360
oldCenter = self.rect.center
self.image = pygame.transform.rotate(self.startImage, self.rotationAngle)
self.rect = self.image.get_rect()
self.rect.center = oldCenter
def destroy(self):
if self.rect.y > HEIGHT or self.rect.y < 0 or self.rect.x < 0 or self.rect.x > WIDTH:
self.kill()
class Button(pygame.sprite.Sprite):
def __init__(self, x, y, type):
pygame.sprite.Sprite.__init__(self)
self.type = type
self.image = buttonImg
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
self.clicked = pygame.mouse.get_pressed()
def update(self):
mouse = pygame.mouse.get_pos()
self.clicked = pygame.mouse.get_pressed()
if mouse[0] >= self.rect.x and mouse[0] <= self.rect.right and mouse[1] >= self.rect.y and mouse[1] <= self.rect.bottom:
self.image = buttonLitImg
if self.clicked[0] == 1:
self.action()
else:
self.image = buttonImg
printText(screen, self.type, 42, self.rect.x + 22, self.rect.y + 55, LIGHT_GREEN, self.rect.center)
def action(self):
if self.type == 'PLAY':
runGame()
elif self.type == 'EXIT':
pygame.quit()
playerImg = loadImage('avion.png')
playerImg_immune = loadImage('avion_immune.png')
playerLifeImg = pygame.transform.scale(loadImage('life.png'), (25, 20))
bulletImg = loadImage('raketa.png')
bombImg = loadImage('bomba.png')
allienImg = [loadImage('vanzemaljaca0.png'), loadImage('vanzemaljaca1.png'), loadImage('vanzemaljacb0.png'),
loadImage('vanzemaljacb1.png'), loadImage('vanzemaljacc0.png'), loadImage('vanzemaljacc1.png'), ]
bossImg = [pygame.transform.scale(loadImage('boss1.png'), (200, 200)),
pygame.transform.scale(loadImage('boss2.png'), (200, 200)),
pygame.transform.scale(loadImage('boss3.png'), (200, 200))]
meteorImg = [pygame.transform.scale(loadImage('meteor1.png'), (100, 100)),
pygame.transform.scale(loadImage('meteor2.png'), (70, 70)),
pygame.transform.scale(loadImage('meteor3.png'), (50, 50)),
pygame.transform.scale(loadImage('meteor4.png'), (30, 30)),
pygame.transform.scale(loadImage('meteor5.png'), (20, 20))]
starImg = [loadImage('star1.png'), loadImage('star2.png'), loadImage('star3.png'), loadImage('star4.png'), loadImage('star5.png')]
buttonImg = pygame.transform.scale(loadImage('button.png'), (170, 70))
buttonLitImg = pygame.transform.scale(loadImage('buttonLit.png'), (170, 70))
backgroundImg = pygame.transform.scale(loadImage('starfield.png'), (WIDTH, HEIGHT))
backgroundRect = backgroundImg.get_rect()
startImg = pygame.transform.scale(loadImage('startscreen.png'), (WIDTH, HEIGHT))
startImgRect = startImg.get_rect()
storyImg = pygame.transform.scale(loadImage('storyImg.png'), (WIDTH, HEIGHT))
storyImgRect = storyImg.get_rect()
pauseScreen = pygame.Surface((WIDTH, HEIGHT)).convert_alpha()
pauseScreen.fill((0, 0, 0, 190))
explosion = {}
explosion['large'] = []
explosion['small'] = []
powerupImgs = {}
powerupImgs['health'] = pygame.transform.scale(loadImage('health.png'), (30, 30))
powerupImgs['fire'] = pygame.transform.scale(loadImage('fire.png'), (30, 30))
powerupImgs['life'] = pygame.transform.scale(loadImage('life.png'), (30, 30))
for i in range(10):
file = 'explosion{}.png'.format(i)
img = loadImage(file)
imgLarge = pygame.transform.scale(img, (70, 70))
explosion['large'].append(imgLarge)
imgSmall = pygame.transform.scale(img, (30, 30))
explosion['small'].append(imgSmall)
background_music = loadSound('RoundtableRival.ogg')
pygame.mixer.music = background_music
pygame.mixer.music.set_volume(0.2)
boss_fight_music = loadSound('DBZ_BOSS_FIGHT.ogg')
story_music = loadSound('STAR_WARS.ogg')
shoot_sound = loadSound('shoot.wav')
pygame.mixer.Sound.set_volume(shoot_sound, 0.4)
bomb_sound = loadSound('bomb.wav')
pygame.mixer.Sound.set_volume(bomb_sound, 0.3)
powerup_sound = loadSound('powerup.wav')
pygame.mixer.Sound.set_volume(powerup_sound, 0.6)
playerExplosion_sound = loadSound('playerExplosion.wav')
meteorExplosion_sound = loadSound('meteorExplosion.wav')
pygame.mixer.Sound.set_volume(meteorExplosion_sound, 0.6)
allienExplosion_sound = loadSound('allienExplosion.wav')
pygame.mixer.Sound.set_volume(allienExplosion_sound, 0.5)
keyPress_sound = loadSound('keypress.wav')
pygame.mixer.Sound.set_volume(keyPress_sound, 0.5)
# LOADING HIGH SCORE
try:
with open(os.path.join(textfile_dir, 'highscore.txt'), 'r') as f: # automatic file close after loop
try:
highscore = int(f.read())
except:
highscore = 0
except:
with open(os.path.join(textfile_dir, 'highscore.txt'), 'w') as f: # automatic file close after loop
highscore = 0
allSprites = pygame.sprite.Group()
alliens = pygame.sprite.Group()
meteors = pygame.sprite.Group()
bullets = pygame.sprite.Group()
bombs = pygame.sprite.Group()
bosses = pygame.sprite.Group()
stars = pygame.sprite.Group()
powerups = pygame.sprite.Group()
buttons = pygame.sprite.Group()
player = Player()
allSprites.add(player)
paused = False
level = 1
def initializeGame():
global paused
alliens.empty()
meteors.empty()
bullets.empty()
bombs.empty()
powerups.empty()
bosses.empty()
stars.empty()
player.reset()
allSprites.empty()
allSprites.add(player)
paused = False
def showGameStartScreen():
pygame.mixer.music.play(-1)
buttons.empty()
btn = Button(280, 300, 'PLAY')
buttons.add(btn)
btn = Button(600, 550, 'EXIT')
buttons.add(btn)
while True:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
screen.blit(startImg, startImgRect)
buttons.draw(screen)
printText(screen, 'HIGH SCORE:' + str(highscore), 30, WIDTH/2 - 165, HEIGHT-30, LIGHT_GREEN)
buttons.update() # PRINTING TEXT ON BUTTONS
pygame.display.update()
def showTransitionScreen(text):
global paused, level
running = True
timer = pygame.time.get_ticks()
#add stars
for i in range(NUMSTARS):
x = random.randrange(WIDTH)
z = Star(x)
stars.add(z)
stars.update()
while stars:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
elif event.type == pygame.KEYDOWN:
if event.key == K_SPACE and not paused and (pygame.time.get_ticks() - player.shoot_timer > RELOAD):
player.shoot()
if event.key == K_p:
paused = not paused
hits = pygame.sprite.spritecollide(player, powerups, True)
for hit in hits:
powerup_sound.play()
if hit.type == 'health':
player.health += 20
if player.health > PLAYER_MAX_HEALTH:
player.health = PLAYER_MAX_HEALTH
elif hit.type == 'life':
player.lives += 1
if player.lives > 3:
player.lives = 3
else:
player.powerup()
if not paused:
stars.update()
allSprites.update()
# DRAW
screen.fill(BLACK)
screen.blit(backgroundImg, backgroundRect)
stars.draw(screen)
printText(screen, 'Level: ' + str(level), 25, 9, HEIGHT - 29, LIGHT_GREEN)
printText(screen, 'SCORE:' + str(player.score), 25, WIDTH - 185, HEIGHT - 3, LIGHT_GREEN)
allSprites.draw(screen)
now = pygame.time.get_ticks()
if now - timer > 3000 and now - timer < 6000:
if (pygame.time.get_ticks() - timer) % 120 <= 100:
printText(screen, text, 70, 0, 0, LIGHT_GREEN, (WIDTH/2, 100))
drawHealthBar(screen, 10, HEIGHT - 30, player.health, GREEN, PLAYER_MAX_HEALTH, 200)
drawLives(screen, 15, HEIGHT - 29, player.lives, playerLifeImg)
if paused:
printText(screen, text, 70, 0, 0, LIGHT_GREEN, (WIDTH / 2, 100))
screen.blit(pauseScreen, (0, 0))
printText(screen, 'PAUSE', 100, 0, 0, LIGHT_GREEN, screen.get_rect().center)
pygame.display.update()
if now - timer > 5000 and not paused:
for z in stars:
Star.destroy(z)
def startLevel(allienRows, smartShoot, suicide):
for k in range(allienRows):
for i in range(11):
tmp = random.choice([0, 2, 4])
a = Alien(70 * i, k * 70, allienImg[tmp], allienImg[tmp + 1], smartShoot, suicide)
allSprites.add(a)
alliens.add(a)
def startMeteorRain(k, speedCap, time):
for i in range(k):
m = Meteor(speedCap, time)
meteors.add(m)
allSprites.add(m)
def spawnBoss(x):
boss = Boss(x)
bosses.add(boss)
allSprites.add(boss)
runLvl()
boss_fight_music.stop()
pygame.mixer.music.play(-1)
def checkCollision():
hits = pygame.sprite.spritecollide(player, powerups, True)
for hit in hits:
powerup_sound.play()
if hit.type == 'health':
player.health += 20
if player.health > PLAYER_MAX_HEALTH:
player.health = PLAYER_MAX_HEALTH
elif hit.type == 'life':
player.lives += 1
if player.lives > 3:
player.lives = 3
else:
player.powerup()
hits = pygame.sprite.groupcollide(alliens, bullets, True, True)
for hit in hits:
player.score += 7 * hit.speedx
allienExplosion_sound.play()
expl = Explosion(hit.rect.center, 'large')
allSprites.add(expl)
if random.random() > 0.8:
pow = PowerUp(hit.rect.center)
powerups.add(pow)
allSprites.add(pow)
hits = pygame.sprite.groupcollide(bullets, bosses, True, False)
for hit in hits:
allienExplosion_sound.play()
expl = Explosion(hit.rect.midtop, 'large')
allSprites.add(expl)
for boss in bosses:
player.score += 5 * (boss.speedx + 1)
boss.health -= 99
if boss.health <= 0:
bosses.remove()
hits = pygame.sprite.spritecollide(player, bombs, True)
for hit in hits:
if not player.immune:
player.health -= 13 * hit.speedy
if player.health <= 0:
expl = Explosion(player.rect.center, 'large')
player.lives -= 1
player.hide()
allSprites.add(expl)
playerExplosion_sound.play()
if player.lives > 0:
player.health = PLAYER_MAX_HEALTH
else:
expl = Explosion(hit.rect.center, 'small')
allSprites.add(expl)
playerExplosion_sound.play()
hits = pygame.sprite.groupcollide(meteors, bullets, True, True)
for hit in hits:
player.score += 60 - hit.radius
meteorExplosion_sound.play()
expl = Explosion(hit.rect.center, 'large')
allSprites.add(expl)
hits = pygame.sprite.spritecollide(player, meteors, True, pygame.sprite.collide_circle)
for hit in hits:
if not player.immune:
player.health -= 2 * hit.radius
if player.health <= 0:
expl = Explosion(hit.rect.center, 'large')
player.lives -= 1
player.hide()
allSprites.add(expl)
expl = Explosion(player.rect.center, 'large')
allSprites.add(expl)
playerExplosion_sound.play()
meteorExplosion_sound.play()
if player.lives > 0:
player.health = PLAYER_MAX_HEALTH
else:
expl = Explosion(hit.rect.center, 'small')
allSprites.add(expl)
playerExplosion_sound.play()
hits = pygame.sprite.spritecollide(player, alliens, True)
for hit in hits:
if not player.immune:
player.lives -= 1
if player.lives > 0:
player.health = PLAYER_MAX_HEALTH
expl = Explosion(player.rect.center, 'large')
player.hide()
allSprites.add(expl)
playerExplosion_sound.play()
expl = Explosion(hit.rect.center, 'large')
allienExplosion_sound.play()
allSprites.add(expl)
hits = pygame.sprite.spritecollide(player, bosses, False)
for hit in hits:
if not player.immune:
player.lives -= 1
if player.lives > 0:
player.health = PLAYER_MAX_HEALTH
expl = Explosion(player.rect.center, 'large')
player.hide()
allSprites.add(expl)
playerExplosion_sound.play()
def showGameOverScreen():
global highscore
buttons.empty()
btn = Button(280, 550, 'PLAY')
buttons.add(btn)
btn = Button(600, 550, 'EXIT')
buttons.add(btn)
if player.score > highscore:
highscore = player.score
with open(os.path.join(textfile_dir, 'highscore.txt'), 'w') as f: # automatic file close after loop
f.write(str(highscore))
while True:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
screen.fill(BLACK)
screen.blit(backgroundImg, backgroundRect)
if player.lives > 0:
printText(screen, 'VICTORY', 100, 0, 0, LIGHT_GREEN, (WIDTH/2, HEIGHT/2-120))
else:
printText(screen, 'DEFEAT', 100, 0, 0, LIGHT_GREEN, (WIDTH/2, HEIGHT/2-120))
if player.score == highscore:
printText(screen, 'NEW HIGH SCORE!', 70, 0, 0, LIGHT_GREEN, (WIDTH / 2, HEIGHT / 2))
printText(screen, str(highscore), 70, 0, 0, LIGHT_GREEN, (WIDTH / 2, HEIGHT / 2 + 90))
else:
printText(screen, 'SCORE: ' + str(player.score), 65, 0, 0, LIGHT_GREEN, (WIDTH/2, HEIGHT/2))
printText(screen, 'HIGH SCORE: ' + str(highscore), 65, 0, 0, LIGHT_GREEN, (WIDTH/2, HEIGHT/2 + 90))
buttons.draw(screen)
buttons.update() # PRINTING TEXT ON BUTTONS
pygame.display.update()
def runLvl():
global paused, player
while alliens or meteors or bosses:
clock.tick(FPS)
# PROCESS
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
elif event.type == pygame.KEYDOWN:
if event.key == K_SPACE and not paused and (pygame.time.get_ticks() - player.shoot_timer > RELOAD):
player.shoot()
if event.key == K_p:
paused = not paused
checkCollision()
# UPDATE
if not paused:
allSprites.update()
# DRAW
screen.fill(BLACK)
screen.blit(backgroundImg, backgroundRect)
printText(screen, 'Level: ' + str(level), 25, 9, HEIGHT - 29, LIGHT_GREEN)
printText(screen, 'SCORE:' + str(player.score), 25, WIDTH - 185, HEIGHT - 3, LIGHT_GREEN)
allSprites.draw(screen)
for boss in bosses:
drawHealthBar(screen, 240, HEIGHT - 30, boss.health, RED, 1000*boss.bosstype, 350)
if boss.health <= 0:
player.score += 300*boss.bosstype
bosses.remove(boss)
allSprites.remove(boss)
drawHealthBar(screen, 10, HEIGHT - 30, player.health, GREEN, PLAYER_MAX_HEALTH, 200)
drawLives(screen, 15, HEIGHT - 29, player.lives, playerLifeImg)
if paused:
screen.blit(pauseScreen, (0, 0))
printText(screen, 'PAUSE', 100, 0, 0, LIGHT_GREEN, screen.get_rect().center)
pygame.display.update()
def runGame():
initializeGame()
global level
showTransitionScreen('ARMY ATTACKS')
startLevel(3, False, False)
runLvl()
showTransitionScreen('METEOR RAIN')
startMeteorRain(30, 6, 2500)
runLvl()
pygame.mixer.music.stop()
boss_fight_music.play(-1)
showTransitionScreen('CAPTAIN ATTACKS')
spawnBoss(1)
level += 1
showTransitionScreen('ARMY ATTACKS')
startLevel(4, True, False)
runLvl()
showTransitionScreen('METEOR RAIN')
startMeteorRain(45, 8, 5000)
runLvl()
pygame.mixer.music.stop()
boss_fight_music.play(-1)
showTransitionScreen('GENERAL ATTACKS')
spawnBoss(2)
level += 1
showTransitionScreen('ARMY ATTACKS')
startLevel(5, True, True)
runLvl()
showTransitionScreen('METEOR RAIN')
startMeteorRain(50, 8, 5500)
runLvl()
pygame.mixer.music.stop()
boss_fight_music.play(-1)
showTransitionScreen('LEADER ATTACKS')
spawnBoss(3)
if (not alliens) and (not bosses):
showTransitionScreen('ALIENS DEFEATED')
showGameOverScreen()
# MAIN
showStory()
pygame.quit()
| 2.59375
| 3
|
app/urls.py
|
RegilioSpee/k_sec-filecify
| 0
|
12777392
|
<reponame>RegilioSpee/k_sec-filecify
from django.urls import path,include
from . import views
urlpatterns = [
path('registration/', views.registration),
path('login/',views.user_login,name="login")
]
| 1.820313
| 2
|
geetools/ui/ipytools.py
|
guy1ziv2/gee_tools
| 0
|
12777393
|
# coding=utf-8
""" General tools for the Jupyter Notebook and Lab """
from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, \
VBox, Button, Box, ToggleButton, IntSlider, FloatText
from traitlets import List, Unicode, observe, Instance, Tuple, Int, Float
from .. import batch
# imports for async widgets
from multiprocessing import Pool
import time
# import EE
import ee
if not ee.data._initialized: ee.Initialize()
def create_accordion(dictionary):
""" Create an Accordion output from a dict object """
widlist = []
ini = 0
widget = Accordion()
widget.selected_index = None # this will unselect all
for key, val in dictionary.items():
if isinstance(val, dict):
newwidget = create_accordion(val)
widlist.append(newwidget)
elif isinstance(val, list):
# tranform list to a dictionary
dictval = {k: v for k, v in enumerate(val)}
newwidget = create_accordion(dictval)
widlist.append(newwidget)
else:
value = HTML(str(val))
widlist.append(value)
widget.set_title(ini, key)
ini += 1
widget.children = widlist
return widget
def create_object_output(object):
''' Create a output Widget for Images, Geometries and Features '''
ty = object.__class__.__name__
if ty == 'Image':
info = object.getInfo()
image_id = info['id'] if 'id' in info else 'No Image ID'
prop = info['properties']
bands = info['bands']
bands_names = [band['id'] for band in bands]
bands_types = [band['data_type']['precision'] for band in bands]
bands_crs = [band['crs'] for band in bands]
new_band_names = ['<li>{} - {} - {}</li>'.format(name, ty, epsg) for name, ty, epsg in zip(bands_names, bands_types, bands_crs)]
new_properties = ['<li><b>{}</b>: {}</li>'.format(key, val) for key, val in prop.items()]
header = HTML('<b>Image id:</b> {id} </br>'.format(id=image_id))
bands_wid = HTML('<ul>'+''.join(new_band_names)+'</ul>')
prop_wid = HTML('<ul>'+''.join(new_properties)+'</ul>')
acc = Accordion([bands_wid, prop_wid])
acc.set_title(0, 'Bands')
acc.set_title(1, 'Properties')
acc.selected_index = None # this will unselect all
return VBox([header, acc])
elif ty == 'FeatureCollection':
try:
info = object.getInfo()
except:
print('FeatureCollection limited to 4000 features')
info = object.limit(4000)
return create_accordion(info)
else:
info = object.getInfo()
return create_accordion(info)
def create_async_output(object, widget):
child = create_object_output(object)
widget.children = [child]
# def recrusive_delete_asset_to_widget(assetId, widget):
def recrusive_delete_asset_to_widget(args):
''' adapted version to print streaming results in a widget '''
assetId = args[0]
widget = args[1]
try:
content = ee.data.getList({'id':assetId})
except Exception as e:
widget.value = str(e)
return
if content == 0:
# delete empty colletion and/or folder
ee.data.deleteAsset(assetId)
else:
for asset in content:
path = asset['id']
ty = asset['type']
if ty == 'Image':
ee.data.deleteAsset(path)
widget.value += 'deleting {} ({})</br>'.format(path, ty)
else:
# clear output
widget.value = ''
recrusive_delete_asset_to_widget(path, widget)
# delete empty colletion and/or folder
ee.data.deleteAsset(assetId)
class CheckRow(HBox):
checkbox = Instance(Checkbox)
widget = Instance(Widget)
def __init__(self, widget, **kwargs):
self.checkbox = Checkbox(indent=False,
layout=Layout(flex='1 1 20', width='auto'))
self.widget = widget
super(CheckRow, self).__init__(children=(self.checkbox, self.widget),
**kwargs)
self.layout = Layout(display='flex', flex_flow='row',
align_content='flex-start')
@observe('widget')
def _ob_wid(self, change):
new = change['new']
self.children = (self.checkbox, new)
def observe_checkbox(self, handler, extra_params={}, **kwargs):
""" set handler for the checkbox widget. Use the property 'widget' of
change to get the corresponding widget
:param handler: callback function
:type handler: function
:param extra_params: extra parameters that can be passed to the handler
:type extra_params: dict
:param kwargs: parameters from traitlets.observe
:type kwargs: dict
"""
# by default only observe value
name = kwargs.get('names', 'value')
def proxy_handler(handler):
def wrap(change):
change['widget'] = self.widget
for key, val in extra_params.items():
change[key] = val
return handler(change)
return wrap
self.checkbox.observe(proxy_handler(handler), names=name, **kwargs)
def observe_widget(self, handler, extra_params={}, **kwargs):
""" set handler for the widget alongside de checkbox
:param handler: callback function
:type handler: function
:param extra_params: extra parameters that can be passed to the handler
:type extra_params: dict
:param kwargs: parameters from traitlets.observe
:type kwargs: dict
"""
def proxy_handler(handler):
def wrap(change):
change['checkbox'] = self.checkbox
for key, val in extra_params.items():
change[key] = val
return handler(change)
return wrap
self.widget.observe(proxy_handler(handler), **kwargs)
class CheckAccordion(VBox):
widgets = Tuple()
def __init__(self, widgets, **kwargs):
# self.widgets = widgets
super(CheckAccordion, self).__init__(**kwargs)
self.widgets = widgets
@observe('widgets')
def _on_child(self, change):
new = change['new'] # list of any widget
newwidgets = []
for widget in new:
# constract the widget
acc = Accordion(children=(widget,))
acc.selected_index = None # this will unselect all
# create a CheckRow
checkrow = CheckRow(acc)
newwidgets.append(checkrow)
newchildren = tuple(newwidgets)
self.children = newchildren
def set_title(self, index, title):
''' set the title of the widget at indicated index'''
checkrow = self.children[index]
acc = checkrow.widget
acc.set_title(0, title)
def get_title(self, index):
''' get the title of the widget at indicated index'''
checkrow = self.children[index]
acc = checkrow.widget
return acc.get_title(0)
def get_check(self, index):
''' get the state of checkbox in index '''
checkrow = self.children[index]
return checkrow.checkbox.value
def set_check(self, index, state):
''' set the state of checkbox in index '''
checkrow = self.children[index]
checkrow.checkbox.value = state
def checked_rows(self):
''' return a list of indexes of checked rows '''
checked = []
for i, checkrow in enumerate(self.children):
state = checkrow.checkbox.value
if state: checked.append(i)
return checked
def get_widget(self, index):
''' get the widget in index '''
checkrow = self.children[index]
return checkrow.widget
def set_widget(self, index, widget):
''' set the widget for index '''
checkrow = self.children[index]
checkrow.widget.children = (widget,) # Accordion has 1 child
def set_row(self, index, title, widget):
''' set values for the row '''
self.set_title(index, title)
self.set_widget(index, widget)
def set_accordion_handler(self, index, handler, **kwargs):
''' set the handler for Accordion in index '''
checkrow = self.children[index]
checkrow.observe_widget(handler, names=['selected_index'], **kwargs)
def set_checkbox_handler(self, index, handler, **kwargs):
''' set the handler for CheckBox in index '''
checkrow = self.children[index]
checkrow.observe_checkbox(handler, **kwargs)
class AssetManager(VBox):
""" Asset Manager Widget """
POOL_SIZE = 5
def __init__(self, map=None, **kwargs):
super(AssetManager, self).__init__(**kwargs)
# Thumb height
self.thumb_height = kwargs.get('thumb_height', 300)
self.root_path = ee.data.getAssetRoots()[0]['id']
# Map
self.map = map
# Header
self.reload_button = Button(description='Reload')
self.add2map = Button(description='Add to Map')
self.delete = Button(description='Delete Selected')
header_children = [self.reload_button, self.delete]
# Add2map only if a Map has been passed
if self.map:
header_children.append(self.add2map)
self.header = HBox(header_children)
# Reload handler
def reload_handler(button):
new_accordion = self.core(self.root_path)
# Set VBox children
self.children = [self.header, new_accordion]
# add2map handler
def add2map_handler(themap):
def wrap(button):
selected_rows = self.get_selected()
for asset, ty in selected_rows.items():
if ty == 'Image':
im = ee.Image(asset)
themap.addLayer(im, {}, asset)
elif ty == 'ImageCollection':
col = ee.ImageCollection(asset)
themap.addLayer(col)
return wrap
# Set reload handler
# self.reload_button.on_click(reload_handler)
self.reload_button.on_click(self.reload)
# Set reload handler
self.add2map.on_click(add2map_handler(self.map))
# Set delete selected handler
self.delete.on_click(self.delete_selected)
# First Accordion
self.root_acc = self.core(self.root_path)
# Set VBox children
self.children = [self.header, self.root_acc]
def delete_selected(self, button=None):
''' function to delete selected assets '''
selected = self.get_selected()
# Output widget
output = HTML('')
def handle_yes(button):
self.children = [self.header, output]
pool = Pool(self.POOL_SIZE)
# pool = pp.ProcessPool(self.POOL_SIZE)
if selected:
''' OLD
for asset, ty in selected.items():
recrusive_delete_asset_to_widget(asset, output)
args = []
for asset, ty in selected.items():
args.append((asset, output))
# pool.map(recrusive_delete_asset_to_widget, args)
# pool.map(test2, args)
# pool.close()
# pool.join()
'''
assets = [ass for ass in selected.keys()]
pool.map(batch.recrusive_delete_asset, assets)
# TODO: cant map recrusive_delete_asset_to_widget because the passed widget is not pickable
pool.close()
pool.join()
# when deleting end, reload
self.reload()
def handle_no(button):
self.reload()
def handle_cancel(button):
self.reload()
assets_str = ['{} ({})'.format(ass, ty) for ass, ty in selected.items()]
assets_str = '</br>'.join(assets_str)
confirm = ConfirmationWidget('<h2>Delete {} assets</h2>'.format(len(selected.keys())),
'The following assets are going to be deleted: </br> {} </br> Are you sure?'.format(assets_str),
handle_yes=handle_yes,
handle_no=handle_no,
handle_cancel=handle_cancel)
self.children = [self.header, confirm, output]
def reload(self, button=None):
new_accordion = self.core(self.root_path)
# Set VBox children
self.children = [self.header, new_accordion]
def get_selected(self):
''' get the selected assets
:return: a dictionary with the type as key and asset root as value
:rtype: dict
'''
def wrap(checkacc, assets={}, root=self.root_path):
children = checkacc.children # list of CheckRow
for child in children:
checkbox = child.children[0] # checkbox of the CheckRow
widget = child.children[1] # widget of the CheckRow (Accordion)
state = checkbox.value
if isinstance(widget.children[0], CheckAccordion):
title = widget.get_title(0).split(' ')[0]
new_root = '{}/{}'.format(root, title)
newselection = wrap(widget.children[0], assets, new_root)
assets = newselection
else:
if state:
title = child.children[1].get_title(0)
# remove type that is between ()
ass = title.split(' ')[0]
ty = title.split(' ')[1][1:-1]
# append root
ass = '{}/{}'.format(root, ass)
# append title to selected list
# assets.append(title)
assets[ass] = ty
return assets
# get selection on root
begin = self.children[1] # CheckAccordion of root
return wrap(begin)
def core(self, path):
# Get Assets data
root_list = ee.data.getList({'id': path})
# empty lists to fill with ids, types, widgets and paths
ids = []
types = []
widgets = []
paths = []
# iterate over the list of the root
for content in root_list:
# get data
id = content['id']
ty = content['type']
# append data to lists
paths.append(id)
ids.append(id.replace(path+'/', ''))
types.append(ty)
wid = HTML('Loading..')
widgets.append(wid)
# super(AssetManager, self).__init__(widgets=widgets, **kwargs)
# self.widgets = widgets
asset_acc = CheckAccordion(widgets=widgets)
# TODO: set handler for title's checkbox: select all checkboxes
# set titles
for i, (title, ty) in enumerate(zip(ids, types)):
final_title = '{title} ({type})'.format(title=title, type=ty)
asset_acc.set_title(i, final_title)
def handle_new_accordion(change):
path = change['path']
index = change['index']
ty = change['type']
if ty == 'Folder' or ty == 'ImageCollection':
wid = self.core(path)
else:
image = ee.Image(path)
info = image.getInfo()
width = int(info['bands'][0]['dimensions'][0])
height = int(info['bands'][0]['dimensions'][1])
new_width = int(self.thumb_height)/height*width
thumb = image.getThumbURL({'dimensions':[new_width, self.thumb_height]})
# wid = ImageWid(value=thumb)
wid_i = HTML('<img src={}>'.format(thumb))
wid_info = create_accordion(info)
wid = HBox(children=[wid_i, wid_info])
asset_acc.set_widget(index, wid)
def handle_checkbox(change):
path = change['path']
widget = change['widget'] # Accordion
wid_children = widget.children[0] # can be a HTML or CheckAccordion
new = change['new']
if isinstance(wid_children, CheckAccordion): # set all checkboxes to True
for child in wid_children.children:
check = child.children[0]
check.value = new
# set handlers
for i, (path, ty) in enumerate(zip(paths, types)):
asset_acc.set_accordion_handler(
i, handle_new_accordion,
extra_params={'path':path, 'index':i, 'type': ty}
)
asset_acc.set_checkbox_handler(
i, handle_checkbox,
extra_params={'path':path, 'index':i, 'type': ty}
)
return asset_acc
class TaskManager(VBox):
def __init__(self, **kwargs):
super(TaskManager, self).__init__(**kwargs)
# Header
self.checkbox = Checkbox(indent=False,
layout=Layout(flex='1 1 20', width='auto'))
self.cancel_selected = Button(description='Cancel Selected',
tooltip='Cancel all selected tasks')
self.cancel_all = Button(description='Cancell All',
tooltip='Cancel all tasks')
self.refresh = Button(description='Refresh',
tooltip='Refresh Tasks List')
self.autorefresh = ToggleButton(description='auto-refresh',
tooltip='click to enable/disable autorefresh')
self.slider = IntSlider(min=1, max=10, step=1, value=5)
self.hbox = HBox([self.checkbox, self.refresh,
self.cancel_selected, self.cancel_all,
self.autorefresh, self.slider])
# Tabs for COMPLETED, FAILED, etc
self.tabs = Tab()
# Tabs index
self.tab_index = {0: 'RUNNING',
1: 'COMPLETED',
2: 'FAILED',
3: 'CANCELED',
4: 'UNKNOWN'}
self.taskVBox = VBox()
self.runningVBox = VBox()
self.completedVBox = VBox()
self.failedVBox = VBox()
self.canceledVBox = VBox()
self.unknownVBox = VBox()
self.tab_widgets_rel = {'RUNNING': self.runningVBox,
'COMPLETED': self.completedVBox,
'FAILED': self.failedVBox,
'CANCELED': self.canceledVBox,
'UNKNOWN': self.unknownVBox}
# Create Tabs
self.tab_widgets = []
for key, val in self.tab_index.items():
widget = self.tab_widgets_rel[val]
self.tab_widgets.append(widget)
self.tabs.children = self.tab_widgets
self.tabs.set_title(key, val)
''' autorefresh
def update_task_list(widget):
# widget is a VBox
tasklist = ee.data.getTaskList()
widlist = []
for task in tasklist:
accordion = create_accordion(task)
if task.has_key('description'):
name = '{} ({})'.format(task['description'], task['state'])
else:
name = '{} ({})'.format(task['output_url'][0].split('/')[-1], task['state'])
mainacc = Accordion(children=(accordion, ))
mainacc.set_title(0, name)
mainacc.selected_index = None
wid = CheckRow(mainacc)
#wid = CheckRow(accordion)
widlist.append(wid)
widget.children = tuple(widlist)
'''
def loop(widget):
while True:
self.update_task_list()(self.refresh)
time.sleep(self.slider.value)
# First widget
self.update_task_list(vbox=self.runningVBox)(self.refresh)
# self.children = (self.hbox, self.taskVBox)
self.children = (self.hbox, self.tabs)
# Set on_click for refresh button
self.refresh.on_click(self.update_task_list(vbox=self.selected_tab()))
''' autorefresh
thread = threading.Thread(target=loop, args=(self.taskVBox,))
thread.start()
'''
# Set on_clicks
self.cancel_all.on_click(self.cancel_all_click)
self.cancel_selected.on_click(self.cancel_selected_click)
# self.autorefresh
def autorefresh_loop(self):
pass
def tab_handler(self, change):
if change['name'] == 'selected_index':
self.update_task_list()(self.refresh)
def selected_tab(self):
''' get the selected tab '''
index = self.tabs.selected_index
tab_name = self.tab_index[index]
return self.tab_widgets_rel[tab_name]
def update_task_list(self, **kwargs):
def wrap(button):
self.selected_tab().children = (HTML('Loading...'),)
try:
tasklist = ee.data.getTaskList()
# empty lists
running_list = []
completed_list = []
failed_list = []
canceled_list = []
unknown_list = []
all_list = {'RUNNING': running_list, 'COMPLETED': completed_list,
'FAILED': failed_list, 'CANCELED': canceled_list,
'UNKNOWN': unknown_list}
for task in tasklist:
state = task['state']
accordion = create_accordion(task)
if task['state'] == 'COMPLETED':
start = int(task['start_timestamp_ms'])
end = int(task['creation_timestamp_ms'])
seconds = float((start-end))/1000
name = '{} ({} sec)'.format(task['output_url'][0].split('/')[-1],
seconds)
else:
name = '{}'.format(task['description'])
# Accordion for CheckRow widget
mainacc = Accordion(children=(accordion, ))
mainacc.set_title(0, name)
mainacc.selected_index = None
# CheckRow
wid = CheckRow(mainacc)
# Append widget to the CORRECT list
all_list[state].append(wid)
# Assign Children
self.runningVBox.children = tuple(running_list)
self.completedVBox.children = tuple(completed_list)
self.failedVBox.children = tuple(failed_list)
self.canceledVBox.children = tuple(canceled_list)
self.unknownVBox.children = tuple(unknown_list)
except Exception as e:
self.selected_tab().children = (HTML(str(e)),)
return wrap
def get_selected(self):
""" Get selected Tasks
:return: a list of the selected indexes
"""
selected = []
children = self.selected_tab().children
for i, child in enumerate(children):
# checkrow = child.children[0] # child is an accordion
state = child.checkbox.value
if state: selected.append(i)
return selected
def get_taskid(self, index):
# Get selected Tab
selected_wid = self.selected_tab() # VBox
# Children of the Tab's VBox
children = selected_wid.children
# Get CheckRow that corresponds to the passed index
checkrow = children[index]
# Get main accordion
mainacc = checkrow.widget
# Get details accordion
selectedacc = mainacc.children[0]
for n, child in enumerate(selectedacc.children):
title = selectedacc.get_title(n)
if title == 'id':
return child.value
def get_selected_taskid(self):
selected = self.get_selected()
selected_wid = self.selected_tab() # VBox
children = selected_wid.children
taskid_list = []
for select in selected:
'''
checkrow = children[select]
mainacc = checkrow.widget
selectedacc = mainacc.children[0]
for n, child in enumerate(selectedacc.children):
title = selectedacc.get_title(n)
if title == 'id':
taskid_list.append(child.value)
'''
taskid = self.get_taskid(select)
taskid_list.append(taskid)
return taskid_list
def cancel_selected_click(self, button):
selected = self.get_selected_taskid()
for taskid in selected:
try:
ee.data.cancelTask(taskid)
except:
continue
self.update_task_list()(self.refresh)
def cancel_all_click(self, button):
selected_wid = self.selected_tab() # VBox
children = selected_wid.children
for n, child in enumerate(children):
taskid = self.get_taskid(n)
try:
ee.data.cancelTask(taskid)
except:
continue
self.update_task_list()(self.refresh)
class ConfirmationWidget(VBox):
def __init__(self, title='Confirmation', legend='Are you sure?',
handle_yes=None, handle_no=None, handle_cancel=None, **kwargs):
super(ConfirmationWidget, self).__init__(**kwargs)
# Title Widget
self.title = title
self.title_widget = HTML(self.title)
# Legend Widget
self.legend = legend
self.legend_widget = HTML(self.legend)
# Buttons
self.yes = Button(description='Yes')
handler_yes = handle_yes if handle_yes else lambda x: x
self.yes.on_click(handler_yes)
self.no = Button(description='No')
handler_no = handle_no if handle_no else lambda x: x
self.no.on_click(handler_no)
self.cancel = Button(description='Cancel')
handler_cancel = handle_cancel if handle_cancel else lambda x: x
self.cancel.on_click(handler_cancel)
self.buttons = HBox([self.yes, self.no, self.cancel])
self.children = [self.title_widget, self.legend_widget, self.buttons]
class RealBox(Box):
""" Real Box Layout
items:
[[widget1, widget2],
[widget3, widget4]]
"""
items = List()
width = Int()
border_inside = Unicode()
border_outside = Unicode()
def __init__(self, **kwargs):
super(RealBox, self).__init__(**kwargs)
self.layout = Layout(display='flex', flex_flow='column',
border=self.border_outside)
def max_row_elements(self):
maxn = 0
for el in self.items:
n = len(el)
if n>maxn:
maxn = n
return maxn
@observe('items')
def _ob_items(self, change):
layout_columns = Layout(display='flex', flex_flow='row')
new = change['new']
children = []
# recompute size
maxn = self.max_row_elements()
width = 100/maxn
for el in new:
for wid in el:
if not wid.layout.width:
if self.width:
wid.layout = Layout(width='{}px'.format(self.width),
border=self.border_inside)
else:
wid.layout = Layout(width='{}%'.format(width),
border=self.border_inside)
hbox = Box(el, layout=layout_columns)
children.append(hbox)
self.children = children
class FloatBandWidget(HBox):
min = Float(0)
max = Float(1)
def __init__(self, **kwargs):
super(FloatBandWidget, self).__init__(**kwargs)
self.minWid = FloatText(value=self.min, description='min')
self.maxWid = FloatText(value=self.max, description='max')
self.children = [self.minWid, self.maxWid]
self.observe(self._ob_min, names=['min'])
self.observe(self._ob_max, names=['max'])
def _ob_min(self, change):
new = change['new']
self.minWid.value = new
def _ob_max(self, change):
new = change['new']
self.maxWid.value = new
| 2.34375
| 2
|
models.py
|
dahe-cvl/isvc2020_overscan_detection
| 0
|
12777394
|
<reponame>dahe-cvl/isvc2020_overscan_detection<filename>models.py
from torchvision import models
from torchvision.models.segmentation.deeplabv3 import DeepLabHead
from torchvision.models.segmentation.fcn import FCNHead
from metrics import *
from datasets import *
from collections import OrderedDict
class CNN(nn.Module):
"""CNN."""
def __init__(self, model_arch="resnet50", n_classes=2, include_top=False, pretrained=False, lower_features=False):
"""CNN Builder."""
super(CNN, self).__init__()
self.include_top = include_top
self.pretrained = pretrained
self.lower_features = lower_features
self.gradients = None
self.classifier = None
if (model_arch == "resnet50"):
self.model = models.resnet50(pretrained=True)
for params in self.model.parameters():
params.requires_grad = self.pretrained
num_ftrs = self.model.fc.in_features
self.model.fc = torch.nn.Linear(num_ftrs, n_classes)
self.features = nn.Sequential(*list(self.model.children())[:-1])
#print(self.features)
self.features_dict = OrderedDict()
elif (model_arch == "resnet101"):
self.model = models.resnet101(pretrained=True)
#print(self.model)
for params in self.model.parameters():
params.requires_grad = self.pretrained
num_ftrs = self.model.fc.in_features
self.model.fc = torch.nn.Linear(num_ftrs, n_classes)
self.features_dict = OrderedDict()
if (lower_features == True):
self.model = nn.Sequential(*list(self.model.children())[:5])
else:
self.model = nn.Sequential(*list(self.model.children())[:-2])
elif (model_arch == "squeezenet"):
self.model = models.squeezenet1_1(pretrained=True)
#print(self.model)
#self.classifier = self.model.classifier
for params in self.model.parameters():
params.requires_grad = self.pretrained
#num_ftrs = self.model.fc.in_features
#self.model.fc = torch.nn.Linear(num_ftrs, n_classes)
#num_ftrs = 512
#self.model.classifier[-1] = torch.nn.Linear(num_ftrs, n_classes)
self.features_dict = OrderedDict()
if (lower_features == True):
self.model = nn.Sequential(self.model.features[:6])
else:
self.model = nn.Sequential(self.model.features)
#print(self.model)
#exit()
elif (model_arch == "densenet121"):
self.model = models.densenet121(pretrained=True)
for params in self.model.parameters():
params.requires_grad = self.pretrained
num_ftrs = self.model.classifier.in_features
self.model.classifier = torch.nn.Linear(num_ftrs, n_classes)
self.features = nn.Sequential(*list(self.model.children())[:-1])
print(self.model)
elif (model_arch == "vgg19"):
self.model = models.vgg19(pretrained=True)
for params in self.model.parameters():
params.requires_grad = self.pretrained
num_ftrs = self.model.classifier[0].in_features
self.model.classifier[-1] = torch.nn.Linear(num_ftrs, n_classes)
self.features = nn.Sequential(*list(self.model.children())[:-1])
#print(self.features)
print(self.model)
elif (model_arch == "vgg16"):
self.model = models.vgg16(pretrained=True);
for params in self.model.parameters():
params.requires_grad = self.pretrained
num_ftrs = self.model.classifier[0].in_features
self.model.classifier[-1] = torch.nn.Linear(num_ftrs, n_classes)
if(lower_features == True):
self.model = nn.Sequential(self.model.features[:5])
else:
self.model = nn.Sequential(*list(self.model.children())[:-2])
#print(self.features)
#print(self.model)
#exit()
print(self.model)
self.features_dict = OrderedDict()
elif (model_arch == "mobilenet"):
self.model = models.mobilenet_v2(pretrained=True);
for params in self.model.parameters():
params.requires_grad = self.pretrained
num_ftrs = self.model.classifier[1].in_features
self.model.classifier[-1] = torch.nn.Linear(num_ftrs, n_classes)
if(lower_features == True):
#self.model = nn.Sequential(self.model.features[:5])
self.model = nn.Sequential(*list(self.model.features)[:5])
else:
#self.model = nn.Sequential(*list(self.model.children())[:-1])
self.model = nn.Sequential(*list(self.model.features))
self.features_dict = OrderedDict()
elif (model_arch == "alexnet"):
self.model = models.alexnet(pretrained=True)
for params in self.model.parameters():
params.requires_grad = self.pretrained
num_ftrs = self.model.classifier[0].in_features
self.model.classifier[-1] = torch.nn.Linear(num_ftrs, n_classes)
self.features = nn.Sequential(*list(self.model.children())[:-1])
#print(self.features)
print(self.model)
else:
self.model_arch = None
print("No valid backbone cnn network selected!")
def activations_hook(self, grad):
self.gradients = grad
def get_activations_gradient(self):
return self.gradients
def forward(self, x):
"""Perform forward."""
if(self.include_top == False):
# extract features
x = self.model(x)
self.features_dict['out'] = x
self.features_dict['aux'] = x
return self.features_dict
elif(self.include_top == True):
#print(x.size())
x = self.model(x)
# flatten
x = x.view(x.size(0), -1)
x = self.classifier(x)
self.features_dict['out'] = x
return self.features_dict
return x
def loadModel(model_arch="", classes=None, pre_trained_path=None, expType=None, trainable_backbone_flag=False, lower_features=False):
print("Load model architecture ... ")
if (model_arch == "deeplabv3_resnet101_orig"):
print("deeplab_resnet architecture selected ...")
model = models.segmentation.deeplabv3_resnet101(pretrained=True, progress=True)
for params in model.parameters():
params.requires_grad = trainable_backbone_flag
model.classifier[-1] = torch.nn.Conv2d(256, len(classes), kernel_size=(1, 1))
model.aux_classifier[-1] = torch.nn.Conv2d(256, len(classes), kernel_size=(1, 1))
features = model.backbone
if (pre_trained_path != None):
print("load pre-trained-weights ... ")
model_dict_state = torch.load(pre_trained_path) # + "/best_model.pth")
model.load_state_dict(model_dict_state['net'])
return model, features
elif (model_arch == "fcn_resnet101_orig"):
print("deeplab_resnet architecture selected ...")
model = models.segmentation.fcn_resnet101(pretrained=True, progress=True)
for params in model.parameters():
params.requires_grad = trainable_backbone_flag
model.classifier[-1] = torch.nn.Conv2d(512, len(classes), kernel_size=(1, 1))
model.aux_classifier[-1] = torch.nn.Conv2d(256, len(classes), kernel_size=(1, 1))
features = model.backbone
if (pre_trained_path != None):
print("load pre-trained-weights ... ")
model_dict_state = torch.load(pre_trained_path)# + "/best_model.pth")
model.load_state_dict(model_dict_state['net'])
return model, features
elif (model_arch == "deeplabv3_resnet101"):
print("deeplabv3_resnet101 architecture selected ...")
backbone_net = CNN(model_arch="resnet101", n_classes=len(classes), include_top=False, pretrained=trainable_backbone_flag, lower_features=lower_features)
if (lower_features == True):
classifier = nn.Sequential(
DeepLabHead(256, len(classes)),
# nn.Softmax()
)
else:
classifier = nn.Sequential(
DeepLabHead(2048, len(classes)),
# nn.Softmax()
)
features = backbone_net
model = models.segmentation.DeepLabV3(backbone=backbone_net, classifier=classifier, aux_classifier=None)
if (pre_trained_path != None):
print("load pre-trained-weights ... ")
model_dict_state = torch.load(pre_trained_path)# + "/best_model.pth")
model.load_state_dict(model_dict_state['net'])
return model, features
elif (model_arch == "deeplabv3_vgg16"):
print("deeplabv3_vgg architecture selected ...")
# backbone_net = CNN(model_arch="resnet101", n_classes=len(classes), include_top=False)
backbone_net = CNN(model_arch="vgg16", n_classes=len(classes), include_top=False, pretrained=trainable_backbone_flag, lower_features=lower_features)
if (lower_features == True):
classifier = nn.Sequential(
DeepLabHead(64, len(classes)),
# nn.Softmax()
)
else:
classifier = nn.Sequential(
DeepLabHead(512, len(classes)),
# nn.Softmax()
)
features = backbone_net
model = models.segmentation.DeepLabV3(backbone=backbone_net, classifier=classifier, aux_classifier=None)
#print(model)
#exit()
if (pre_trained_path != None):
print("load pre-trained-weights ... ")
model_dict_state = torch.load(pre_trained_path) # + "/best_model.pth")
model.load_state_dict(model_dict_state['net'])
# Find total parameters and trainable parameters
total_params = sum(p.numel() for p in model.parameters())
print("total_params:" + str(total_params))
total_trainable_params = sum(
p.numel() for p in model.parameters() if p.requires_grad)
print("total_trainable_params: " + str(total_trainable_params))
#exit()
return model, features
elif (model_arch == "deeplabv3_mobilenet"):
print("deeplabv3_mobilenet architecture selected ...")
backbone_net = CNN(model_arch="mobilenet", n_classes=len(classes), include_top=False, pretrained=trainable_backbone_flag, lower_features=lower_features)
if (lower_features == True):
classifier = nn.Sequential(
DeepLabHead(32, len(classes)),
# nn.Softmax()
)
else:
classifier = nn.Sequential(
DeepLabHead(1280, len(classes)),
# nn.Softmax()
)
features = backbone_net
model = models.segmentation.DeepLabV3(backbone=backbone_net, classifier=classifier, aux_classifier=None)
if (pre_trained_path != None):
print("load pre-trained-weights ... ")
model_dict_state = torch.load(pre_trained_path)
model.load_state_dict(model_dict_state['net'])
return model, features
elif (model_arch == "deeplabv3_squeezenet"):
print("deeplabv3_mobilenet architecture selected ...")
backbone_net = CNN(model_arch="squeezenet", n_classes=len(classes), include_top=False, pretrained=trainable_backbone_flag, lower_features=lower_features)
if (lower_features == True):
classifier = nn.Sequential(
DeepLabHead(128, len(classes)),
# nn.Softmax()
)
else:
classifier = nn.Sequential(
DeepLabHead(512, len(classes)),
# nn.Softmax()
)
features = backbone_net
model = models.segmentation.DeepLabV3(backbone=backbone_net, classifier=classifier, aux_classifier=None)
if (pre_trained_path != None):
print("load pre-trained-weights ... ")
model_dict_state = torch.load(pre_trained_path)# + "/best_model.pth")
model.load_state_dict(model_dict_state['net'])
return model, features
elif (model_arch == "fcn_vgg16"):
print("fcn_vgg16 architecture selected ...")
backbone_net = CNN(model_arch="vgg16", n_classes=len(classes), include_top=False, pretrained=trainable_backbone_flag, lower_features=lower_features)
if(lower_features == True):
classifier = nn.Sequential(
FCNHead(64, len(classes)),
# nn.Softmax()
)
else:
classifier = nn.Sequential(
FCNHead(512, len(classes)),
# nn.Softmax()
)
features = backbone_net
model = models.segmentation.FCN(backbone=backbone_net, classifier=classifier, aux_classifier=None)
# print(model)
if (pre_trained_path != None):
print("load pre-trained-weights ... ")
model_dict_state = torch.load(pre_trained_path)# + "/best_model.pth")
model.load_state_dict(model_dict_state['net'])
return model, features
elif (model_arch == "fcn_resnet101"):
print("fcn_resnet101 architecture selected ...")
backbone_net = CNN(model_arch="resnet101", n_classes=len(classes), include_top=False, pretrained=trainable_backbone_flag, lower_features=lower_features)
if (lower_features == True):
classifier = nn.Sequential(
FCNHead(256, len(classes)),
# nn.Softmax()
)
else:
classifier = nn.Sequential(
FCNHead(2048, len(classes)),
# nn.Softmax()
)
features = backbone_net
model = models.segmentation.FCN(backbone=backbone_net, classifier=classifier, aux_classifier=None)
if (pre_trained_path != None):
print("load pre-trained-weights ... ")
model_dict_state = torch.load(pre_trained_path) # + "/best_model.pth")
model.load_state_dict(model_dict_state['net'])
# Find total parameters and trainable parameters
total_params = sum(p.numel() for p in model.parameters())
print("total_params:" + str(total_params))
total_trainable_params = sum(
p.numel() for p in model.parameters() if p.requires_grad)
print("total_trainable_params: " + str(total_trainable_params))
#exit()
return model, features
elif (model_arch == "fcn_squeezenet"):
print("deeplabv3_squeezenet architecture selected ...")
backbone_net = CNN(model_arch="squeezenet", n_classes=len(classes), include_top=False, pretrained=trainable_backbone_flag, lower_features=lower_features)
if (lower_features == True):
classifier = nn.Sequential(
FCNHead(128, len(classes)),
# nn.Softmax()
)
else:
classifier = nn.Sequential(
FCNHead(512, len(classes)),
# nn.Softmax()
)
features = backbone_net
model = models.segmentation.FCN(backbone=backbone_net, classifier=classifier, aux_classifier=None)
if (pre_trained_path != None):
print("load pre-trained-weights ... ")
model_dict_state = torch.load(pre_trained_path)# + "/best_model.pth")
model.load_state_dict(model_dict_state['net'])
# Find total parameters and trainable parameters
total_params = sum(p.numel() for p in model.parameters())
print("total_params:" + str(total_params))
total_trainable_params = sum(
p.numel() for p in model.parameters() if p.requires_grad)
print("total_trainable_params: " + str(total_trainable_params))
# exit()
return model, features
elif (model_arch == "fcn_mobilenet"):
print("deeplabv3_mobilenet architecture selected ...")
backbone_net = CNN(model_arch="mobilenet", n_classes=len(classes), include_top=False, pretrained=trainable_backbone_flag, lower_features=lower_features)
if (lower_features == True):
classifier = nn.Sequential(
FCNHead(32, len(classes)),
# nn.Softmax()
)
else:
classifier = nn.Sequential(
FCNHead(1280, len(classes)),
# nn.Softmax()
)
features = backbone_net
model = models.segmentation.FCN(backbone=backbone_net, classifier=classifier, aux_classifier=None)
if (pre_trained_path != None):
print("load pre-trained-weights ... ")
model_dict_state = torch.load(pre_trained_path)# + "/best_model.pth")
model.load_state_dict(model_dict_state['net'])
# Find total parameters and trainable parameters
total_params = sum(p.numel() for p in model.parameters())
print("total_params:" + str(total_params))
total_trainable_params = sum(
p.numel() for p in model.parameters() if p.requires_grad)
print("total_trainable_params: " + str(total_trainable_params))
# exit()
return model, features
else:
print("ERROR: select valid model architecture!")
exit()
| 2.203125
| 2
|
hexFileParser.py
|
rdaforno/hexfilepatcher
| 4
|
12777395
|
<gh_stars>1-10
#!/usr/bin/python3
######################################################################################
#
# Intel HEX file parser
#
# 2020, rdaforno
#
######################################################################################
import binascii
# Intel HEX file parser
class HexFileParser:
lines = []
def __init__(self, filename):
self.lines.clear()
self.load(filename)
def load(self, filename):
self.lines.clear()
with open(filename) as fp:
for line in fp:
if line[0] != ':':
print("line '%s' skipped.." % line)
continue
line = line[1:].strip()
l = int(line[0:2], 16)
addr = int(line[2:6], 16)
t = int(line[6:8], 16)
data = line[8:-2]
if len(data) != (l * 2):
print("invalid data length! line '%s' skipped.." % line)
continue
crc = int(line[-2:], 16)
if self.calc_line_crc(line[:-2]) != crc:
print("invalid hex file, CRC doesn't match!")
break
self.lines.append({ 'len':l, 'addr':addr, 'type':t, 'data':data, 'crc':crc })
print("%u lines loaded from %s" % (len(self.lines), filename))
def print_lines(self):
for line in self.lines:
print("length: %u, address: 0x%02x, type: %u, data: %s, crc: %02x" % (line['len'], line['addr'], line['type'], line['data'], line['crc']))
def save(self, filename):
fp = open(filename, "w")
if fp:
for line in self.lines:
fp.write(":%02X%04X%02X%s%02X\n" % (line['len'], line['addr'], line['type'], line['data'], line['crc']))
fp.close()
print("hex file saved as %s" % filename)
def save_as_c_var(self, filename):
fp = open(filename, "w")
if fp:
fp.write("const char hex_image[] = {\n \"")
fp.write("\"\n \"".join(self.serialize_data()))
fp.write("\"\n};\n")
fp.close()
print("hex file saved as %s" % filename)
def save_as_binary(self, filename):
fp = open(filename, "wb")
if fp:
fp.write(bytes.fromhex("".join(self.serialize_data())))
fp.close()
print("binary file saved as %s" % filename)
def calc_crc32(self):
return "0x%x" % binascii.crc32(bytes.fromhex("".join(self.serialize_data())))
def serialize_data(self, start_addr=0, line_width=64):
serialized_data = []
curr_ofs = 0
for line in self.lines:
if line['type'] == 0:
curr_addr = curr_ofs + line['addr']
if curr_addr > start_addr:
serialized_data.append('00'*(curr_addr - start_addr)) # fill gap with zeros
print("added %d padding bytes at address 0x%x" % (curr_addr - start_addr, curr_addr))
serialized_data.append(line['data'])
start_addr = curr_addr + line['len']
elif line['type'] == 4 or line['type'] == 2:
if line['type'] == 4:
curr_ofs = int(line['data'][0:4], 16) * 65536
else:
curr_ofs = int(line['data'][0:4], 16) << 4
if start_addr == 0:
# if this is the first line and start_addr is not given, then use this offset as the start address
start_addr = curr_ofs
print("start address set to 0x%x" % start_addr)
else:
print("address offset found: 0x%x" % curr_ofs)
if curr_ofs < start_addr:
print("invalid address offset")
return None
elif line['type'] == 1:
pass # marks the EOF
elif line['type'] == 3:
# defines the start address
pass
else:
print("skipping line of type %u" % line['type'])
serialized_str = "".join(serialized_data)
print("binary size is %u bytes" % (len(serialized_str) / 2))
if line_width == 0:
return serialized_str
else:
return [serialized_str[i:i+line_width] for i in range(0, len(serialized_str), line_width)]
# returns a tuple of line index and line address
def addr_to_lineno(self, addr):
addr_ofs = 0
for i in range(len(self.lines)):
if self.lines[i]['type'] == 4: # extended linear address record
addr_ofs = int(self.lines[i]['data'][0:4], 16) * 65536
elif self.lines[i]['type'] == 2: # extended segment address record (bits 4–19)
addr_ofs = int(self.lines[i]['data'][0:4], 16) << 4
elif self.lines[i]['type'] == 0:
if (addr_ofs + self.lines[i]['addr']) <= addr and (addr_ofs + self.lines[i]['addr'] + self.lines[i]['len']) > addr:
return (i, addr_ofs + self.lines[i]['addr'])
return (-1, -1)
def replace_data(self, addr, size, data):
if size != 1 and size != 2 and size != 4:
print("size %d is not supported" % size)
return False
(i, line_addr) = self.addr_to_lineno(addr)
if i >= 0:
ofs = (addr - line_addr)
if (addr + size) > (line_addr + self.lines[i]['len']): # data stretches over 2 lines
# make sure there is no jump in address to the next line
if (i+1) == len(self.lines) or self.lines[i]['type'] != 6 or ((self.lines[i+1]['addr'] - self.lines[i]['addr']) > self.lines[i]['len']):
print("out of bound error") # trying to overwrite an address that is not present in the hex file
return False
self.lines[i]['data'] = self.insert_data(self.lines[i]['data'], ofs, self.lines[i]['len'] - ofs, data)
self.lines[i+1]['data'] = self.insert_data(self.lines[i+1]['data'], 0, size - (self.lines[i]['len'] - ofs), data)
self.update_line_crc(i+1)
else:
self.lines[i]['data'] = self.insert_data(self.lines[i]['data'], (addr - line_addr), size, data)
self.update_line_crc(i)
return True
return False
def insert_data(self, line, ofs, size, data): # inserts 'data' of length 'size' into 'line' at offset 'ofs'
if size == 1:
return line[:ofs*2] + ("%02X" % (data % 256)) + line[(ofs+size)*2:]
elif size == 2:
return line[:ofs*2] + ("%02X%02X" % (data % 256, (data >> 8) % 256)) + line[(ofs+size)*2:] # little endian!
elif size == 4:
return line[:ofs*2] + ("%02X%02X%02X%02X" % (data % 256, (data >> 8) % 256, (data >> 16) % 256, (data >> 24) % 256)) + line[(ofs+size)*2:] # little endian!
def update_line_crc(self, idx):
if idx < len(self.lines):
self.lines[idx]['crc'] = self.calc_line_crc("%02X%04X%02X%s" % (self.lines[idx]['len'], self.lines[idx]['addr'], self.lines[idx]['type'], self.lines[idx]['data']))
def calc_line_crc(self, line):
crc = 0
l = 0
while l < len(line) - 1:
crc = crc + int(line[l:l+2], 16)
l = l + 2
crc = (~crc + 1) % 256
return crc
| 3.125
| 3
|
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_alarmgr_server_oper.py
|
tkamata-test/ydk-py
| 0
|
12777396
|
<reponame>tkamata-test/ydk-py
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'TimingBucketEnum' : _MetaInfoEnum('TimingBucketEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper',
{
'not-specified':'not_specified',
'fifteen-min':'fifteen_min',
'one-day':'one_day',
}, 'Cisco-IOS-XR-alarmgr-server-oper', _yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper']),
'AlarmSeverityEnum' : _MetaInfoEnum('AlarmSeverityEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper',
{
'unknown':'unknown',
'not-reported':'not_reported',
'not-alarmed':'not_alarmed',
'minor':'minor',
'major':'major',
'critical':'critical',
'severity-last':'severity_last',
}, 'Cisco-IOS-XR-alarmgr-server-oper', _yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper']),
'AlarmDirectionEnum' : _MetaInfoEnum('AlarmDirectionEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper',
{
'not-specified':'not_specified',
'send':'send',
'receive':'receive',
'send-receive':'send_receive',
}, 'Cisco-IOS-XR-alarmgr-server-oper', _yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper']),
'AlarmStatusEnum' : _MetaInfoEnum('AlarmStatusEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper',
{
'unknown':'unknown',
'set':'set',
'clear':'clear',
'suppress':'suppress',
'last':'last',
}, 'Cisco-IOS-XR-alarmgr-server-oper', _yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper']),
'AlarmServiceAffectingEnum' : _MetaInfoEnum('AlarmServiceAffectingEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper',
{
'unknown':'unknown',
'not-service-affecting':'not_service_affecting',
'service-affecting':'service_affecting',
}, 'Cisco-IOS-XR-alarmgr-server-oper', _yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper']),
'AlarmNotificationSrcEnum' : _MetaInfoEnum('AlarmNotificationSrcEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper',
{
'not-specified':'not_specified',
'near-end':'near_end',
'far-end':'far_end',
}, 'Cisco-IOS-XR-alarmgr-server-oper', _yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper']),
'AlarmEventEnum' : _MetaInfoEnum('AlarmEventEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper',
{
'default':'default',
'notification':'notification',
'last':'last',
}, 'Cisco-IOS-XR-alarmgr-server-oper', _yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper']),
'AlarmClientEnum' : _MetaInfoEnum('AlarmClientEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper',
{
'unknown':'unknown',
'producer':'producer',
'consumer':'consumer',
'subscriber':'subscriber',
'client-last':'client_last',
}, 'Cisco-IOS-XR-alarmgr-server-oper', _yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper']),
'AlarmClientStateEnum' : _MetaInfoEnum('AlarmClientStateEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper',
{
'start':'start',
'init':'init',
'connecting':'connecting',
'connected':'connected',
'registered':'registered',
'disconnected':'disconnected',
'ready':'ready',
}, 'Cisco-IOS-XR-alarmgr-server-oper', _yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper']),
'AlarmGroupsEnum' : _MetaInfoEnum('AlarmGroupsEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper',
{
'unknown':'unknown',
'environ':'environ',
'ethernet':'ethernet',
'fabric':'fabric',
'power':'power',
'software':'software',
'slice':'slice',
'cpu':'cpu',
'controller':'controller',
'sonet':'sonet',
'otn':'otn',
'sdh-controller':'sdh_controller',
'asic':'asic',
'fpd-infra':'fpd_infra',
'shelf':'shelf',
'mpa':'mpa',
'ots':'ots',
'last':'last',
}, 'Cisco-IOS-XR-alarmgr-server-oper', _yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper']),
'Alarms.Detail.DetailSystem.Active.AlarmInfo.Otn' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.Active.AlarmInfo.Otn',
False,
[
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AlarmDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmDirectionEnum',
[], [],
''' Alarm direction
''',
'direction',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('notification-source', REFERENCE_ENUM_CLASS, 'AlarmNotificationSrcEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmNotificationSrcEnum',
[], [],
''' Source of Alarm
''',
'notification_source',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'otn',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.Active.AlarmInfo.Tca' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.Active.AlarmInfo.Tca',
False,
[
_MetaInfoClassMember('bucket-type', REFERENCE_ENUM_CLASS, 'TimingBucketEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'TimingBucketEnum',
[], [],
''' Timing Bucket
''',
'bucket_type',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('current-value', ATTRIBUTE, 'str' , None, None,
[(0, 20)], [],
''' Alarm Threshold
''',
'current_value',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('threshold-value', ATTRIBUTE, 'str' , None, None,
[(0, 20)], [],
''' Alarm Threshold
''',
'threshold_value',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'tca',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.Active.AlarmInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.Active.AlarmInfo',
False,
[
_MetaInfoClassMember('aid', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm aid
''',
'aid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('alarm-name', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm name
''',
'alarm_name',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clear-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm clear time
''',
'clear_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clear-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm clear time(timestamp format)
''',
'clear_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Alarm description
''',
'description',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('eid', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm eid
''',
'eid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' Alarm group
''',
'group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('interface', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm interface name
''',
'interface',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm location
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('module', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm module description
''',
'module',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('otn', REFERENCE_CLASS, 'Otn' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.Active.AlarmInfo.Otn',
[], [],
''' OTN feature specific alarm attributes
''',
'otn',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('pending-sync', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Pending async flag
''',
'pending_sync',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('reporting-agent-id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Reporting agent id
''',
'reporting_agent_id',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('service-affecting', REFERENCE_ENUM_CLASS, 'AlarmServiceAffectingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmServiceAffectingEnum',
[], [],
''' Alarm service affecting
''',
'service_affecting',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm set time
''',
'set_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm set time(timestamp format)
''',
'set_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' Alarm severity
''',
'severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'AlarmStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmStatusEnum',
[], [],
''' Alarm status
''',
'status',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('tag', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm tag description
''',
'tag',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('tca', REFERENCE_CLASS, 'Tca' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.Active.AlarmInfo.Tca',
[], [],
''' TCA feature specific alarm attributes
''',
'tca',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'AlarmEventEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmEventEnum',
[], [],
''' alarm event type
''',
'type',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'alarm-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.Active' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.Active',
False,
[
_MetaInfoClassMember('alarm-info', REFERENCE_LIST, 'AlarmInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.Active.AlarmInfo',
[], [],
''' Alarm List
''',
'alarm_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'active',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.History.AlarmInfo.Otn' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.History.AlarmInfo.Otn',
False,
[
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AlarmDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmDirectionEnum',
[], [],
''' Alarm direction
''',
'direction',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('notification-source', REFERENCE_ENUM_CLASS, 'AlarmNotificationSrcEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmNotificationSrcEnum',
[], [],
''' Source of Alarm
''',
'notification_source',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'otn',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.History.AlarmInfo.Tca' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.History.AlarmInfo.Tca',
False,
[
_MetaInfoClassMember('bucket-type', REFERENCE_ENUM_CLASS, 'TimingBucketEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'TimingBucketEnum',
[], [],
''' Timing Bucket
''',
'bucket_type',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('current-value', ATTRIBUTE, 'str' , None, None,
[(0, 20)], [],
''' Alarm Threshold
''',
'current_value',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('threshold-value', ATTRIBUTE, 'str' , None, None,
[(0, 20)], [],
''' Alarm Threshold
''',
'threshold_value',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'tca',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.History.AlarmInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.History.AlarmInfo',
False,
[
_MetaInfoClassMember('aid', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm aid
''',
'aid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('alarm-name', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm name
''',
'alarm_name',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clear-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm clear time
''',
'clear_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clear-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm clear time(timestamp format)
''',
'clear_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Alarm description
''',
'description',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('eid', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm eid
''',
'eid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' Alarm group
''',
'group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('interface', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm interface name
''',
'interface',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm location
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('module', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm module description
''',
'module',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('otn', REFERENCE_CLASS, 'Otn' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.History.AlarmInfo.Otn',
[], [],
''' OTN feature specific alarm attributes
''',
'otn',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('pending-sync', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Pending async flag
''',
'pending_sync',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('reporting-agent-id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Reporting agent id
''',
'reporting_agent_id',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('service-affecting', REFERENCE_ENUM_CLASS, 'AlarmServiceAffectingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmServiceAffectingEnum',
[], [],
''' Alarm service affecting
''',
'service_affecting',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm set time
''',
'set_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm set time(timestamp format)
''',
'set_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' Alarm severity
''',
'severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'AlarmStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmStatusEnum',
[], [],
''' Alarm status
''',
'status',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('tag', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm tag description
''',
'tag',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('tca', REFERENCE_CLASS, 'Tca' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.History.AlarmInfo.Tca',
[], [],
''' TCA feature specific alarm attributes
''',
'tca',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'AlarmEventEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmEventEnum',
[], [],
''' alarm event type
''',
'type',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'alarm-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.History' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.History',
False,
[
_MetaInfoClassMember('alarm-info', REFERENCE_LIST, 'AlarmInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.History.AlarmInfo',
[], [],
''' Alarm List
''',
'alarm_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'history',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.Suppressed.SuppressedInfo.Otn' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.Suppressed.SuppressedInfo.Otn',
False,
[
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AlarmDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmDirectionEnum',
[], [],
''' Alarm direction
''',
'direction',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('notification-source', REFERENCE_ENUM_CLASS, 'AlarmNotificationSrcEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmNotificationSrcEnum',
[], [],
''' Source of Alarm
''',
'notification_source',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'otn',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.Suppressed.SuppressedInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.Suppressed.SuppressedInfo',
False,
[
_MetaInfoClassMember('aid', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm aid
''',
'aid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('alarm-name', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm name
''',
'alarm_name',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Alarm description
''',
'description',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('eid', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm eid
''',
'eid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' Alarm group
''',
'group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('interface', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm interface name
''',
'interface',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm location
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('module', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm module description
''',
'module',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('otn', REFERENCE_CLASS, 'Otn' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.Suppressed.SuppressedInfo.Otn',
[], [],
''' OTN feature specific alarm attributes
''',
'otn',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('pending-sync', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Pending async flag
''',
'pending_sync',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('reporting-agent-id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Reporting agent id
''',
'reporting_agent_id',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('service-affecting', REFERENCE_ENUM_CLASS, 'AlarmServiceAffectingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmServiceAffectingEnum',
[], [],
''' Alarm service affecting
''',
'service_affecting',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm set time
''',
'set_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm set time(timestamp format)
''',
'set_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' Alarm severity
''',
'severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'AlarmStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmStatusEnum',
[], [],
''' Alarm status
''',
'status',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm suppressed time
''',
'suppressed_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm suppressed time(timestamp format)
''',
'suppressed_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('tag', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm tag description
''',
'tag',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'suppressed-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.Suppressed' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.Suppressed',
False,
[
_MetaInfoClassMember('suppressed-info', REFERENCE_LIST, 'SuppressedInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.Suppressed.SuppressedInfo',
[], [],
''' Suppressed Alarm List
''',
'suppressed_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'suppressed',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.Stats' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.Stats',
False,
[
_MetaInfoClassMember('active', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that are currently in the active state
''',
'active',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('cache-hit', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total alarms which had the cache hit
''',
'cache_hit',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('cache-miss', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total alarms which had the cache miss
''',
'cache_miss',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('dropped', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that we couldn't keep track due to some
error or other
''',
'dropped',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('dropped-clear-without-set', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms dropped clear without set
''',
'dropped_clear_without_set',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('dropped-db-error', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms dropped due to db error
''',
'dropped_db_error',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('dropped-duplicate', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms dropped which were duplicate
''',
'dropped_duplicate',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('dropped-insuff-mem', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms dropped due to insufficient memory
''',
'dropped_insuff_mem',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('dropped-invalid-aid', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms dropped due to invalid aid
''',
'dropped_invalid_aid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('history', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that are cleared. This one is counted
over a long period of time
''',
'history',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('reported', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that were in all reported to this Alarm
Mgr
''',
'reported',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that are in suppressed state
''',
'suppressed',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('sysadmin-active', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that are currently in the active
state(sysadmin plane)
''',
'sysadmin_active',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('sysadmin-history', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that are cleared in sysadmin plane. This
one is counted over a long period of time
''',
'sysadmin_history',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('sysadmin-suppressed', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that are suppressed in sysadmin plane.
''',
'sysadmin_suppressed',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'stats',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.Clients.ClientInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.Clients.ClientInfo',
False,
[
_MetaInfoClassMember('connect-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of times the agent connected to the alarm
mgr
''',
'connect_count',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('connect-timestamp', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Agent connect timestamp
''',
'connect_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('filter-disp', ATTRIBUTE, 'bool' , None, None,
[], [],
''' The current subscription status of the client
''',
'filter_disp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('filter-group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' The filter used for alarm group
''',
'filter_group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('filter-severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' The filter used for alarm severity
''',
'filter_severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('filter-state', REFERENCE_ENUM_CLASS, 'AlarmStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmStatusEnum',
[], [],
''' The filter used for alarm bi-state state+
''',
'filter_state',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('get-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of times the agent queried for alarms
''',
'get_count',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('handle', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' The client handle through which interface
''',
'handle',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms agent id of the client
''',
'id',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' The location of this client
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm client
''',
'name',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('report-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of times the agent reported alarms
''',
'report_count',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('state', REFERENCE_ENUM_CLASS, 'AlarmClientStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmClientStateEnum',
[], [],
''' The current state of the client
''',
'state',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('subscribe-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of times the agent subscribed for alarms
''',
'subscribe_count',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('subscriber-id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms agent subscriber id of the client
''',
'subscriber_id',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'AlarmClientEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmClientEnum',
[], [],
''' The type of the client
''',
'type',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'client-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.Clients' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.Clients',
False,
[
_MetaInfoClassMember('client-info', REFERENCE_LIST, 'ClientInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.Clients.ClientInfo',
[], [],
''' Client List
''',
'client_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'clients',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem',
False,
[
_MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.Active',
[], [],
''' Show the active alarms at this scope.
''',
'active',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clients', REFERENCE_CLASS, 'Clients' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.Clients',
[], [],
''' Show the clients associated with this service.
''',
'clients',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('history', REFERENCE_CLASS, 'History' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.History',
[], [],
''' Show the history alarms at this scope.
''',
'history',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('stats', REFERENCE_CLASS, 'Stats' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.Stats',
[], [],
''' Show the service statistics.
''',
'stats',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed', REFERENCE_CLASS, 'Suppressed' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.Suppressed',
[], [],
''' Show the suppressed alarms at this scope.
''',
'suppressed',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'detail-system',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo.Otn' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo.Otn',
False,
[
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AlarmDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmDirectionEnum',
[], [],
''' Alarm direction
''',
'direction',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('notification-source', REFERENCE_ENUM_CLASS, 'AlarmNotificationSrcEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmNotificationSrcEnum',
[], [],
''' Source of Alarm
''',
'notification_source',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'otn',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo.Tca' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo.Tca',
False,
[
_MetaInfoClassMember('bucket-type', REFERENCE_ENUM_CLASS, 'TimingBucketEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'TimingBucketEnum',
[], [],
''' Timing Bucket
''',
'bucket_type',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('current-value', ATTRIBUTE, 'str' , None, None,
[(0, 20)], [],
''' Alarm Threshold
''',
'current_value',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('threshold-value', ATTRIBUTE, 'str' , None, None,
[(0, 20)], [],
''' Alarm Threshold
''',
'threshold_value',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'tca',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo',
False,
[
_MetaInfoClassMember('aid', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm aid
''',
'aid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('alarm-name', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm name
''',
'alarm_name',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clear-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm clear time
''',
'clear_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clear-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm clear time(timestamp format)
''',
'clear_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Alarm description
''',
'description',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('eid', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm eid
''',
'eid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' Alarm group
''',
'group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('interface', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm interface name
''',
'interface',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm location
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('module', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm module description
''',
'module',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('otn', REFERENCE_CLASS, 'Otn' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo.Otn',
[], [],
''' OTN feature specific alarm attributes
''',
'otn',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('pending-sync', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Pending async flag
''',
'pending_sync',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('reporting-agent-id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Reporting agent id
''',
'reporting_agent_id',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('service-affecting', REFERENCE_ENUM_CLASS, 'AlarmServiceAffectingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmServiceAffectingEnum',
[], [],
''' Alarm service affecting
''',
'service_affecting',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm set time
''',
'set_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm set time(timestamp format)
''',
'set_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' Alarm severity
''',
'severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'AlarmStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmStatusEnum',
[], [],
''' Alarm status
''',
'status',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('tag', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm tag description
''',
'tag',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('tca', REFERENCE_CLASS, 'Tca' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo.Tca',
[], [],
''' TCA feature specific alarm attributes
''',
'tca',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'AlarmEventEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmEventEnum',
[], [],
''' alarm event type
''',
'type',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'alarm-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active',
False,
[
_MetaInfoClassMember('alarm-info', REFERENCE_LIST, 'AlarmInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo',
[], [],
''' Alarm List
''',
'alarm_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'active',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo.Otn' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo.Otn',
False,
[
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AlarmDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmDirectionEnum',
[], [],
''' Alarm direction
''',
'direction',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('notification-source', REFERENCE_ENUM_CLASS, 'AlarmNotificationSrcEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmNotificationSrcEnum',
[], [],
''' Source of Alarm
''',
'notification_source',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'otn',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo.Tca' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo.Tca',
False,
[
_MetaInfoClassMember('bucket-type', REFERENCE_ENUM_CLASS, 'TimingBucketEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'TimingBucketEnum',
[], [],
''' Timing Bucket
''',
'bucket_type',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('current-value', ATTRIBUTE, 'str' , None, None,
[(0, 20)], [],
''' Alarm Threshold
''',
'current_value',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('threshold-value', ATTRIBUTE, 'str' , None, None,
[(0, 20)], [],
''' Alarm Threshold
''',
'threshold_value',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'tca',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo',
False,
[
_MetaInfoClassMember('aid', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm aid
''',
'aid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('alarm-name', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm name
''',
'alarm_name',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clear-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm clear time
''',
'clear_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clear-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm clear time(timestamp format)
''',
'clear_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Alarm description
''',
'description',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('eid', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm eid
''',
'eid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' Alarm group
''',
'group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('interface', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm interface name
''',
'interface',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm location
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('module', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm module description
''',
'module',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('otn', REFERENCE_CLASS, 'Otn' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo.Otn',
[], [],
''' OTN feature specific alarm attributes
''',
'otn',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('pending-sync', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Pending async flag
''',
'pending_sync',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('reporting-agent-id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Reporting agent id
''',
'reporting_agent_id',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('service-affecting', REFERENCE_ENUM_CLASS, 'AlarmServiceAffectingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmServiceAffectingEnum',
[], [],
''' Alarm service affecting
''',
'service_affecting',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm set time
''',
'set_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm set time(timestamp format)
''',
'set_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' Alarm severity
''',
'severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'AlarmStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmStatusEnum',
[], [],
''' Alarm status
''',
'status',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('tag', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm tag description
''',
'tag',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('tca', REFERENCE_CLASS, 'Tca' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo.Tca',
[], [],
''' TCA feature specific alarm attributes
''',
'tca',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'AlarmEventEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmEventEnum',
[], [],
''' alarm event type
''',
'type',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'alarm-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History',
False,
[
_MetaInfoClassMember('alarm-info', REFERENCE_LIST, 'AlarmInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo',
[], [],
''' Alarm List
''',
'alarm_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'history',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed.SuppressedInfo.Otn' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed.SuppressedInfo.Otn',
False,
[
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AlarmDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmDirectionEnum',
[], [],
''' Alarm direction
''',
'direction',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('notification-source', REFERENCE_ENUM_CLASS, 'AlarmNotificationSrcEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmNotificationSrcEnum',
[], [],
''' Source of Alarm
''',
'notification_source',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'otn',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed.SuppressedInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed.SuppressedInfo',
False,
[
_MetaInfoClassMember('aid', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm aid
''',
'aid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('alarm-name', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm name
''',
'alarm_name',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Alarm description
''',
'description',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('eid', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm eid
''',
'eid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' Alarm group
''',
'group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('interface', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm interface name
''',
'interface',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm location
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('module', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm module description
''',
'module',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('otn', REFERENCE_CLASS, 'Otn' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed.SuppressedInfo.Otn',
[], [],
''' OTN feature specific alarm attributes
''',
'otn',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('pending-sync', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Pending async flag
''',
'pending_sync',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('reporting-agent-id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Reporting agent id
''',
'reporting_agent_id',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('service-affecting', REFERENCE_ENUM_CLASS, 'AlarmServiceAffectingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmServiceAffectingEnum',
[], [],
''' Alarm service affecting
''',
'service_affecting',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm set time
''',
'set_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm set time(timestamp format)
''',
'set_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' Alarm severity
''',
'severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'AlarmStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmStatusEnum',
[], [],
''' Alarm status
''',
'status',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm suppressed time
''',
'suppressed_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm suppressed time(timestamp format)
''',
'suppressed_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('tag', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm tag description
''',
'tag',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'suppressed-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed',
False,
[
_MetaInfoClassMember('suppressed-info', REFERENCE_LIST, 'SuppressedInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed.SuppressedInfo',
[], [],
''' Suppressed Alarm List
''',
'suppressed_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'suppressed',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Stats' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Stats',
False,
[
_MetaInfoClassMember('active', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that are currently in the active state
''',
'active',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('cache-hit', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total alarms which had the cache hit
''',
'cache_hit',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('cache-miss', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total alarms which had the cache miss
''',
'cache_miss',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('dropped', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that we couldn't keep track due to some
error or other
''',
'dropped',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('dropped-clear-without-set', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms dropped clear without set
''',
'dropped_clear_without_set',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('dropped-db-error', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms dropped due to db error
''',
'dropped_db_error',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('dropped-duplicate', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms dropped which were duplicate
''',
'dropped_duplicate',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('dropped-insuff-mem', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms dropped due to insufficient memory
''',
'dropped_insuff_mem',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('dropped-invalid-aid', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms dropped due to invalid aid
''',
'dropped_invalid_aid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('history', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that are cleared. This one is counted
over a long period of time
''',
'history',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('reported', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that were in all reported to this Alarm
Mgr
''',
'reported',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that are in suppressed state
''',
'suppressed',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('sysadmin-active', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that are currently in the active
state(sysadmin plane)
''',
'sysadmin_active',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('sysadmin-history', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that are cleared in sysadmin plane. This
one is counted over a long period of time
''',
'sysadmin_history',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('sysadmin-suppressed', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that are suppressed in sysadmin plane.
''',
'sysadmin_suppressed',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'stats',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Clients.ClientInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Clients.ClientInfo',
False,
[
_MetaInfoClassMember('connect-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of times the agent connected to the alarm
mgr
''',
'connect_count',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('connect-timestamp', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Agent connect timestamp
''',
'connect_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('filter-disp', ATTRIBUTE, 'bool' , None, None,
[], [],
''' The current subscription status of the client
''',
'filter_disp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('filter-group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' The filter used for alarm group
''',
'filter_group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('filter-severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' The filter used for alarm severity
''',
'filter_severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('filter-state', REFERENCE_ENUM_CLASS, 'AlarmStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmStatusEnum',
[], [],
''' The filter used for alarm bi-state state+
''',
'filter_state',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('get-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of times the agent queried for alarms
''',
'get_count',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('handle', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' The client handle through which interface
''',
'handle',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms agent id of the client
''',
'id',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' The location of this client
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm client
''',
'name',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('report-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of times the agent reported alarms
''',
'report_count',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('state', REFERENCE_ENUM_CLASS, 'AlarmClientStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmClientStateEnum',
[], [],
''' The current state of the client
''',
'state',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('subscribe-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of times the agent subscribed for alarms
''',
'subscribe_count',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('subscriber-id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms agent subscriber id of the client
''',
'subscriber_id',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'AlarmClientEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmClientEnum',
[], [],
''' The type of the client
''',
'type',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'client-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Clients' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Clients',
False,
[
_MetaInfoClassMember('client-info', REFERENCE_LIST, 'ClientInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Clients.ClientInfo',
[], [],
''' Client List
''',
'client_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'clients',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation',
False,
[
_MetaInfoClassMember('node-id', ATTRIBUTE, 'str' , None, None,
[], ['([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' NodeID of the Location
''',
'node_id',
'Cisco-IOS-XR-alarmgr-server-oper', True),
_MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active',
[], [],
''' Show the active alarms at this scope.
''',
'active',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clients', REFERENCE_CLASS, 'Clients' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Clients',
[], [],
''' Show the clients associated with this
service.
''',
'clients',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('history', REFERENCE_CLASS, 'History' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History',
[], [],
''' Show the history alarms at this scope.
''',
'history',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('stats', REFERENCE_CLASS, 'Stats' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Stats',
[], [],
''' Show the service statistics.
''',
'stats',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed', REFERENCE_CLASS, 'Suppressed' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed',
[], [],
''' Show the suppressed alarms at this scope.
''',
'suppressed',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'detail-location',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations',
False,
[
_MetaInfoClassMember('detail-location', REFERENCE_LIST, 'DetailLocation' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation',
[], [],
''' Specify a card location for alarms.
''',
'detail_location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'detail-locations',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard',
False,
[
_MetaInfoClassMember('detail-locations', REFERENCE_CLASS, 'DetailLocations' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations',
[], [],
''' Table of DetailLocation
''',
'detail_locations',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'detail-card',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail' : {
'meta_info' : _MetaInfoClass('Alarms.Detail',
False,
[
_MetaInfoClassMember('detail-card', REFERENCE_CLASS, 'DetailCard' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard',
[], [],
''' Show detail card scope alarm related data.
''',
'detail_card',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('detail-system', REFERENCE_CLASS, 'DetailSystem' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem',
[], [],
''' show detail system scope alarm related data.
''',
'detail_system',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'detail',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Active.AlarmInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Active.AlarmInfo',
False,
[
_MetaInfoClassMember('clear-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm clear time
''',
'clear_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clear-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm clear time(timestamp format)
''',
'clear_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Alarm description
''',
'description',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' Alarm group
''',
'group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm location
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm set time
''',
'set_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm set time(timestamp format)
''',
'set_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' Alarm severity
''',
'severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'alarm-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Active' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Active',
False,
[
_MetaInfoClassMember('alarm-info', REFERENCE_LIST, 'AlarmInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Active.AlarmInfo',
[], [],
''' Alarm List
''',
'alarm_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'active',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefCard.BriefLocations.BriefLocation.History.AlarmInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefCard.BriefLocations.BriefLocation.History.AlarmInfo',
False,
[
_MetaInfoClassMember('clear-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm clear time
''',
'clear_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clear-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm clear time(timestamp format)
''',
'clear_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Alarm description
''',
'description',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' Alarm group
''',
'group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm location
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm set time
''',
'set_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm set time(timestamp format)
''',
'set_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' Alarm severity
''',
'severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'alarm-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefCard.BriefLocations.BriefLocation.History' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefCard.BriefLocations.BriefLocation.History',
False,
[
_MetaInfoClassMember('alarm-info', REFERENCE_LIST, 'AlarmInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefCard.BriefLocations.BriefLocation.History.AlarmInfo',
[], [],
''' Alarm List
''',
'alarm_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'history',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Suppressed.SuppressedInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Suppressed.SuppressedInfo',
False,
[
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Alarm description
''',
'description',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' Alarm group
''',
'group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm location
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm set time
''',
'set_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm set time(timestamp format)
''',
'set_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' Alarm severity
''',
'severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm suppressed time
''',
'suppressed_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm suppressed time(timestamp format)
''',
'suppressed_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'suppressed-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Suppressed' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Suppressed',
False,
[
_MetaInfoClassMember('suppressed-info', REFERENCE_LIST, 'SuppressedInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Suppressed.SuppressedInfo',
[], [],
''' Suppressed Alarm List
''',
'suppressed_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'suppressed',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefCard.BriefLocations.BriefLocation' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefCard.BriefLocations.BriefLocation',
False,
[
_MetaInfoClassMember('node-id', ATTRIBUTE, 'str' , None, None,
[], ['([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' NodeID of the Location
''',
'node_id',
'Cisco-IOS-XR-alarmgr-server-oper', True),
_MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Active',
[], [],
''' Show the active alarms at this scope.
''',
'active',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('history', REFERENCE_CLASS, 'History' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefCard.BriefLocations.BriefLocation.History',
[], [],
''' Show the history alarms at this scope.
''',
'history',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed', REFERENCE_CLASS, 'Suppressed' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Suppressed',
[], [],
''' Show the suppressed alarms at this scope.
''',
'suppressed',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'brief-location',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefCard.BriefLocations' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefCard.BriefLocations',
False,
[
_MetaInfoClassMember('brief-location', REFERENCE_LIST, 'BriefLocation' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefCard.BriefLocations.BriefLocation',
[], [],
''' Specify a card location for alarms.
''',
'brief_location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'brief-locations',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefCard' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefCard',
False,
[
_MetaInfoClassMember('brief-locations', REFERENCE_CLASS, 'BriefLocations' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefCard.BriefLocations',
[], [],
''' Table of BriefLocation
''',
'brief_locations',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'brief-card',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefSystem.Active.AlarmInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefSystem.Active.AlarmInfo',
False,
[
_MetaInfoClassMember('clear-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm clear time
''',
'clear_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clear-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm clear time(timestamp format)
''',
'clear_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Alarm description
''',
'description',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' Alarm group
''',
'group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm location
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm set time
''',
'set_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm set time(timestamp format)
''',
'set_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' Alarm severity
''',
'severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'alarm-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefSystem.Active' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefSystem.Active',
False,
[
_MetaInfoClassMember('alarm-info', REFERENCE_LIST, 'AlarmInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefSystem.Active.AlarmInfo',
[], [],
''' Alarm List
''',
'alarm_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'active',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefSystem.History.AlarmInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefSystem.History.AlarmInfo',
False,
[
_MetaInfoClassMember('clear-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm clear time
''',
'clear_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clear-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm clear time(timestamp format)
''',
'clear_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Alarm description
''',
'description',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' Alarm group
''',
'group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm location
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm set time
''',
'set_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm set time(timestamp format)
''',
'set_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' Alarm severity
''',
'severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'alarm-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefSystem.History' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefSystem.History',
False,
[
_MetaInfoClassMember('alarm-info', REFERENCE_LIST, 'AlarmInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefSystem.History.AlarmInfo',
[], [],
''' Alarm List
''',
'alarm_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'history',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefSystem.Suppressed.SuppressedInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefSystem.Suppressed.SuppressedInfo',
False,
[
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Alarm description
''',
'description',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' Alarm group
''',
'group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm location
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm set time
''',
'set_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm set time(timestamp format)
''',
'set_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' Alarm severity
''',
'severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm suppressed time
''',
'suppressed_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm suppressed time(timestamp format)
''',
'suppressed_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'suppressed-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefSystem.Suppressed' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefSystem.Suppressed',
False,
[
_MetaInfoClassMember('suppressed-info', REFERENCE_LIST, 'SuppressedInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefSystem.Suppressed.SuppressedInfo',
[], [],
''' Suppressed Alarm List
''',
'suppressed_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'suppressed',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefSystem' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefSystem',
False,
[
_MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefSystem.Active',
[], [],
''' Show the active alarms at this scope.
''',
'active',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('history', REFERENCE_CLASS, 'History' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefSystem.History',
[], [],
''' Show the history alarms at this scope.
''',
'history',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed', REFERENCE_CLASS, 'Suppressed' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefSystem.Suppressed',
[], [],
''' Show the suppressed alarms at this scope.
''',
'suppressed',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'brief-system',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief' : {
'meta_info' : _MetaInfoClass('Alarms.Brief',
False,
[
_MetaInfoClassMember('brief-card', REFERENCE_CLASS, 'BriefCard' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefCard',
[], [],
''' Show brief card scope alarm related data.
''',
'brief_card',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('brief-system', REFERENCE_CLASS, 'BriefSystem' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefSystem',
[], [],
''' Show brief system scope alarm related data.
''',
'brief_system',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'brief',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms' : {
'meta_info' : _MetaInfoClass('Alarms',
False,
[
_MetaInfoClassMember('brief', REFERENCE_CLASS, 'Brief' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief',
[], [],
''' A set of brief alarm commands.
''',
'brief',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('detail', REFERENCE_CLASS, 'Detail' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail',
[], [],
''' A set of detail alarm commands.
''',
'detail',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'alarms',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
}
_meta_table['Alarms.Detail.DetailSystem.Active.AlarmInfo.Otn']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem.Active.AlarmInfo']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.Active.AlarmInfo.Tca']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem.Active.AlarmInfo']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.Active.AlarmInfo']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem.Active']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.History.AlarmInfo.Otn']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem.History.AlarmInfo']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.History.AlarmInfo.Tca']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem.History.AlarmInfo']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.History.AlarmInfo']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem.History']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.Suppressed.SuppressedInfo.Otn']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem.Suppressed.SuppressedInfo']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.Suppressed.SuppressedInfo']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem.Suppressed']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.Clients.ClientInfo']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem.Clients']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.Active']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.History']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.Suppressed']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.Stats']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.Clients']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo.Otn']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo.Tca']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo.Otn']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo.Tca']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed.SuppressedInfo.Otn']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed.SuppressedInfo']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed.SuppressedInfo']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Clients.ClientInfo']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Clients']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Stats']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Clients']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard']['meta_info']
_meta_table['Alarms.Detail.DetailSystem']['meta_info'].parent =_meta_table['Alarms.Detail']['meta_info']
_meta_table['Alarms.Detail.DetailCard']['meta_info'].parent =_meta_table['Alarms.Detail']['meta_info']
_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Active.AlarmInfo']['meta_info'].parent =_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Active']['meta_info']
_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation.History.AlarmInfo']['meta_info'].parent =_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation.History']['meta_info']
_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Suppressed.SuppressedInfo']['meta_info'].parent =_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Suppressed']['meta_info']
_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Active']['meta_info'].parent =_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation']['meta_info']
_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation.History']['meta_info'].parent =_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation']['meta_info']
_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Suppressed']['meta_info'].parent =_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation']['meta_info']
_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation']['meta_info'].parent =_meta_table['Alarms.Brief.BriefCard.BriefLocations']['meta_info']
_meta_table['Alarms.Brief.BriefCard.BriefLocations']['meta_info'].parent =_meta_table['Alarms.Brief.BriefCard']['meta_info']
_meta_table['Alarms.Brief.BriefSystem.Active.AlarmInfo']['meta_info'].parent =_meta_table['Alarms.Brief.BriefSystem.Active']['meta_info']
_meta_table['Alarms.Brief.BriefSystem.History.AlarmInfo']['meta_info'].parent =_meta_table['Alarms.Brief.BriefSystem.History']['meta_info']
_meta_table['Alarms.Brief.BriefSystem.Suppressed.SuppressedInfo']['meta_info'].parent =_meta_table['Alarms.Brief.BriefSystem.Suppressed']['meta_info']
_meta_table['Alarms.Brief.BriefSystem.Active']['meta_info'].parent =_meta_table['Alarms.Brief.BriefSystem']['meta_info']
_meta_table['Alarms.Brief.BriefSystem.History']['meta_info'].parent =_meta_table['Alarms.Brief.BriefSystem']['meta_info']
_meta_table['Alarms.Brief.BriefSystem.Suppressed']['meta_info'].parent =_meta_table['Alarms.Brief.BriefSystem']['meta_info']
_meta_table['Alarms.Brief.BriefCard']['meta_info'].parent =_meta_table['Alarms.Brief']['meta_info']
_meta_table['Alarms.Brief.BriefSystem']['meta_info'].parent =_meta_table['Alarms.Brief']['meta_info']
_meta_table['Alarms.Detail']['meta_info'].parent =_meta_table['Alarms']['meta_info']
_meta_table['Alarms.Brief']['meta_info'].parent =_meta_table['Alarms']['meta_info']
| 1.59375
| 2
|
2020/day5.py
|
Hofei90/Hofei_AdventofCode
| 1
|
12777397
|
<gh_stars>1-10
import os
SKRIPTPFAD = os.path.abspath(os.path.dirname(__file__))
class BinaryBoarding:
def __init__(self, max_row, max_column, seat):
self.rows = [value for value in range(0, max_row)]
self.columns = [value for value in range(0, max_column)]
self.seat = seat
self.seat_id = None
def analyze_seat_id(self):
for spell in self.seat:
if spell == "F":
self.change_row(False)
elif spell == "B":
self.change_row(True)
elif spell == "R":
self.change_column(True)
elif spell == "L":
self.change_column(False)
self.seat_id = self.rows[0] * 8 + self.columns[0]
def change_row(self, lower_upper):
if lower_upper:
self.rows = get_upper_half(self.rows)
else:
self.rows = get_lower_half(self.rows)
def change_column(self, lower_upper):
if lower_upper:
self.columns = get_upper_half(self.columns)
else:
self.columns = get_lower_half(self.columns)
def read_input(datei):
with open(datei) as file:
inhalt = file.readlines()
return inhalt
def get_upper_half(plaetze):
half = get_half(plaetze)
return plaetze[half:]
def get_lower_half(plaetze):
half = get_half(plaetze)
return plaetze[:half]
def get_half(plaetze):
return int(len(plaetze) / 2)
def main():
max_row = 128
max_column = 8
inhalt = read_input(os.path.join(SKRIPTPFAD, "input_5_1"))
max_seat_id = 0
seat_ids = []
for seat in inhalt:
boarding = BinaryBoarding(max_row, max_column, seat)
boarding.analyze_seat_id()
max_seat_id = max(boarding.seat_id, max_seat_id)
seat_ids.append(boarding.seat_id)
print(max_seat_id)
# Tag 5 #2
min_seat_id = min(seat_ids)
max_seat_id = max(seat_ids)
ids = [id_ for id_ in range(min_seat_id, max_seat_id + 1)]
my_id = set(ids) - set(seat_ids)
print(my_id)
if __name__ == "__main__":
main()
| 3.015625
| 3
|
rubiksnet/shiftlib/rubiks3d/primitive.py
|
javierlorenzod/RubiksNet
| 86
|
12777398
|
<gh_stars>10-100
import torch # this line is necessary for CUDAExtension to load
import rubiksnet_cuda
from rubiksnet.utils import *
__all__ = [
"rubiks_shift_3d_forward",
"rubiks_shift_3d_backward",
"rubiks_shift_3d",
]
def _make_tuple(elem, repeats):
"""
expand 3 into (3, 3) for strides/paddings
"""
if isinstance(elem, int):
return [elem] * repeats
else:
assert len(elem) == repeats
return [int(x) for x in elem]
def _get_output_dim(orig, stride, padding):
return (orig + 2 * padding - 1) / stride + 1
def compute_output_shape(x, stride, padding, shift_dim):
batch, T_in, C_in, H_in, W_in = x.size()
T_out, H_out, W_out = T_in, H_in, W_in
strides = _make_tuple(stride, shift_dim)
paddings = _make_tuple(padding, shift_dim)
if shift_dim == 1:
T_out = _get_output_dim(T_in, strides[0], paddings[0])
elif shift_dim == 2:
H_out = _get_output_dim(H_in, strides[0], paddings[0])
W_out = _get_output_dim(W_in, strides[1], paddings[1])
elif shift_dim == 3:
T_out = _get_output_dim(T_in, strides[0], paddings[0])
H_out = _get_output_dim(H_in, strides[1], paddings[1])
W_out = _get_output_dim(W_in, strides[2], paddings[2])
else:
raise NotImplementedError("only 1D, 2D, 3D shifts supported")
return batch, int(T_out), C_in, int(H_out), int(W_out)
# ========================================================
# ===== CUDA forward primitive =====
# ========================================================
def make_rubiks_forward(forward_float, forward_double, dim):
def _rubiks_forward(x, shift, stride, padding, quantize=False, output=None):
"""
Pure forward pass primitive, no gradient computation
"""
strides = _make_tuple(stride, repeats=dim)
paddings = _make_tuple(padding, repeats=dim)
# x: (N, T, C, H, W), shift: (DIM, C)
assert x.is_cuda, "rubiks shift only works on CUDA tensors"
assert x.size(2) == shift.size(
1
), "x tensor channel dim[2] must match shift channel dim[1]"
assert x.dtype == shift.dtype, "x and shift must have the same dtype"
if x.dtype == torch.float32:
shift_func = forward_float
elif x.dtype == torch.float64:
shift_func = forward_double
else:
raise ValueError(
"rubiks_shift_{}d only supports float32 and float64 (double) dtypes.".format(
dim
)
)
output_shape = compute_output_shape(x, strides, paddings, shift_dim=dim)
output = allocate_output(output, x, output_shape)
ret = shift_func(x, shift, strides, paddings, quantize, output)
assert ret == 0, "CUDA kernel return code {} != 0, error".format(ret)
return output
_rubiks_forward.__name__ = "rubiks_shift_{}d_forward".format(dim)
return _rubiks_forward
# ========================================================
# ===== CUDA backward primitive =====
# ========================================================
def make_rubiks_backward(backward_float, backward_double, dim):
def _rubiks_backward(
upstream_grad,
x,
shift,
stride,
padding,
normalize_grad,
normalize_t_factor=1.0,
quantize=False,
x_grad_output=None,
shift_grad_output=None,
):
"""
Pure backward pass primitive.
Args:
upstream_grad: Receives gradient w.r.t output from upstream.
x: original input tensor
shift: original shift tensor
"""
strides = _make_tuple(stride, repeats=dim)
paddings = _make_tuple(padding, repeats=dim)
assert (
x.is_cuda and upstream_grad.is_cuda
), "rubiks shift only works on CUDA tensors"
if x.dtype == torch.float32:
grad_func = backward_float
elif x.dtype == torch.float64:
grad_func = backward_double
else:
raise ValueError(
"rubiks_shift_{}d only supports float32 and float64 (double) dtypes.".format(
dim
)
)
x_grad = allocate_output(x_grad_output, x, x.size())
shift_grad = allocate_output(shift_grad_output, shift, shift.size())
ret = grad_func(
x,
shift,
upstream_grad,
strides,
paddings,
x_grad,
shift_grad,
normalize_grad,
normalize_t_factor,
quantize,
)
assert ret == 0, "CUDA return code {} != 0, error".format(ret)
return x_grad, shift_grad
_rubiks_backward.__name__ = "rubiks_shift_{}d_backward".format(dim)
return _rubiks_backward
def make_rubiks_functional(forward_method, backward_method, dim):
# make primitive autograd.Function class
class _RubiksShiftFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx, x, shift, stride, padding, normalize_grad, normalize_t_factor, quantize
):
assert isinstance(normalize_grad, bool)
ctx.stride = stride
ctx.padding = padding
ctx.normalize_grad = normalize_grad
ctx.normalize_t_factor = normalize_t_factor
ctx.quantize = quantize
ctx.save_for_backward(x, shift)
return forward_method(x, shift, stride, padding, quantize=quantize)
@staticmethod
def backward(ctx, grad_output):
"""
Refer to https://pytorch.org/docs/stable/notes/extending.html
"""
x, shift = ctx.saved_tensors
x_grad = shift_grad = None
# compute grad only if either X or shift needs gradient
if any(ctx.needs_input_grad):
_x_grad, _shift_grad = backward_method(
grad_output,
x,
shift,
stride=ctx.stride,
padding=ctx.padding,
normalize_grad=ctx.normalize_grad,
normalize_t_factor=ctx.normalize_t_factor,
quantize=ctx.quantize,
)
if ctx.needs_input_grad[0]:
x_grad = _x_grad
if ctx.needs_input_grad[1]:
shift_grad = _shift_grad
# must match the number of input args
return x_grad, shift_grad, None, None, None, None, None
_RubiksShiftFunc.__name__ = "RubiksShift{}DFunc".format(dim)
# user facing functional
def _rubiks_shift(
x,
shift,
stride=1,
padding=0,
normalize_grad=True,
normalize_t_factor=1.0,
quantize=False,
):
"""
Also supports grouped shift
"""
assert len(x.size()) == 5, "x must be [N, T, C, H, W]"
H, T, C, H, W = x.size()
shift_channel = shift.size(1)
assert C == shift_channel, "group shift is deprecated. Now C dim must match."
if normalize_t_factor == "auto":
normalize_t_factor = T / H
else:
assert isinstance(normalize_t_factor, (int, float))
return _RubiksShiftFunc.apply(
x, shift, stride, padding, normalize_grad, normalize_t_factor, quantize
)
_rubiks_shift.__name__ = "rubiks_shift_{}d".format(dim)
return _rubiks_shift
rubiks_shift_3d_forward = make_rubiks_forward(
rubiksnet_cuda.rubiks_shift_3d_forward_float,
rubiksnet_cuda.rubiks_shift_3d_forward_double,
dim=3,
)
rubiks_shift_3d_backward = make_rubiks_backward(
rubiksnet_cuda.rubiks_shift_3d_backward_float,
rubiksnet_cuda.rubiks_shift_3d_backward_double,
dim=3,
)
rubiks_shift_3d = make_rubiks_functional(
rubiks_shift_3d_forward, rubiks_shift_3d_backward, 3
)
| 2.28125
| 2
|
ietf/meeting/migrations/0026_cancel_107_sessions.py
|
hassanakbar4/ietfdb
| 25
|
12777399
|
# Copyright The IETF Trust 2020, All Rights Reserved
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-03-18 16:18
from __future__ import unicode_literals
from django.db import migrations
def cancel_sessions(apps, schema_editor):
Session = apps.get_model('meeting', 'Session')
SchedulingEvent = apps.get_model('meeting', 'SchedulingEvent')
SessionStatusName = apps.get_model('name', 'SessionStatusName')
Person = apps.get_model('person', 'Person')
excludes = ['txauth','dispatch','add','raw','masque','wpack','drip','gendispatch','privacypass', 'ript', 'secdispatch', 'webtrans']
canceled = SessionStatusName.objects.get(slug='canceled')
person = Person.objects.get(name='<NAME>')
sessions = Session.objects.filter(meeting__number=107,group__type__in=['wg','rg','ag']).exclude(group__acronym__in=excludes)
for session in sessions:
SchedulingEvent.objects.create(
session = session,
status = canceled,
by = person)
def reverse(apps, schema_editor):
SchedulingEvent = apps.get_model('meeting', 'SchedulingEvent')
Person = apps.get_model('person', 'Person')
person = Person.objects.get(name='<NAME>')
SchedulingEvent.objects.filter(session__meeting__number=107, by=person).delete()
class Migration(migrations.Migration):
dependencies = [
('meeting', '0025_rename_type_session_to_regular'),
]
operations = [
migrations.RunPython(cancel_sessions, reverse),
]
| 1.710938
| 2
|
ibalert/asgi.py
|
ItsMilann/channels-alert
| 0
|
12777400
|
<reponame>ItsMilann/channels-alert
import imp
from channels.routing import ProtocolTypeRouter, URLRouter
from channels.auth import AuthMiddlewareStack
from django.urls import path
from ibalert.consumers import NotificationConsumer
application = ProtocolTypeRouter(
{
"websocket": AuthMiddlewareStack(
URLRouter(
[
path("notifications/", NotificationConsumer().as_asgi()),
]
)
)
}
)
| 1.859375
| 2
|
tests/conftest.py
|
lumapps/changelog-generator
| 0
|
12777401
|
<filename>tests/conftest.py
import os
import tempfile
from pathlib import Path
import pytest
from git import Repo
@pytest.fixture(scope="session", autouse=True)
def core_repo() -> Repo:
tempdir = Path(tempfile.gettempdir()) / "changelog_generator" / "core"
try:
os.makedirs(tempdir)
repo = Repo.clone_from("<EMAIL>:lumapps/core.git", tempdir)
except OSError:
repo = Repo(tempdir)
return repo
@pytest.fixture(scope="session", autouse=True)
def organization_repo() -> Repo:
tempdir = Path(tempfile.gettempdir()) / "changelog_generator" / "organization"
try:
os.makedirs(tempdir)
repo = Repo.clone_from("<EMAIL>:lumapps/organization.git", tempdir)
except OSError:
repo = Repo(tempdir)
return repo
| 1.875
| 2
|
pycatia/knowledge_interfaces/relations.py
|
evereux/catia_python
| 90
|
12777402
|
<reponame>evereux/catia_python<filename>pycatia/knowledge_interfaces/relations.py
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from typing import Iterator
from pathlib import Path
from pycatia.exception_handling.exceptions import CATIAApplicationException
from pycatia.knowledge_interfaces.check import Check
from pycatia.knowledge_interfaces.design_table import DesignTable
from pycatia.knowledge_interfaces.formula import Formula
from pycatia.knowledge_interfaces.law import Law
from pycatia.knowledge_interfaces.optimizations import Optimizations
from pycatia.knowledge_interfaces.relation import Relation
from pycatia.knowledge_interfaces.rule import Rule
from pycatia.knowledge_interfaces.set_of_equation import SetOfEquation
from pycatia.system_interfaces.any_object import AnyObject
from pycatia.system_interfaces.collection import Collection
from pycatia.types.general import cat_variant
class Relations(Collection):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.Collection
| Relations
|
| Represents the collection of relations of the part or the
| product.
|
| A relation computes values. A relation can belong to one of the following
| types:
|
| Formula
| It combines parameters to compute the value of one output parameter only.
| For example, the mass of a cuboid can be the output parameter of a formula,
| while the value is computed using the following
| parameters:
|
|
| FormulaBody = (height*width*depth)*density
|
|
| Program
| It combines conditions and actions on parameters to compute one or several
| output parameter values. For example, the following is a
| program:
|
| ProgramBody = if (mass>2kg) { depth=2mm length=10mm } else { depth=1mm length=5mm }
|
|
| Check
| It only contains conditions on parameter values. For example, the following
| is a check:
|
| CheckBody = mass<10kg
|
|
| The parameters should be defined previously.
|
| The following example shows how to retrieve the collection of relations from a
| newly created part document:
|
| Dim CATDocs As Documents
| Set CATDocs = CATIA.Documents
| Dim part As Document
| Set part = CATDocs.Add("CATPart")
| Dim relations As Relations
| Set relations = part.Relations
|
|
| See also:
| Formula, Rule, Check, DesignTable
"""
def __init__(self, com_object):
super().__init__(com_object, child_object=Relation)
self.relations = com_object
@property
def optimizations(self) -> Optimizations:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property Optimizations() As Optimizations (Read Only)
|
| Returns the optimization collection.
| It can be empty if no optimization is defined in the
| document.
| This property is available only when the Product Engineering Optimizer
| license is available.
:return: Optimizations
:rtype: Optimizations
"""
return Optimizations(self.relations.Optimizations)
def create_check(self, i_name: str, i_comment: str, i_check_body: str) -> Check:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func CreateCheck(CATBSTR iName,
| CATBSTR iComment,
| CATBSTR iCheckBody) As Check
|
| Creates a check relation and adds it to the part's collection of
| relations.
|
| Parameters:
|
| iName
| The check name
| iComment
| A description of the check
| iCheckBody
| The check definition
|
| Returns:
| The created check
| Example:
| This example creates the maximummass check relation and adds it to the
| newly created part:
|
| Dim CATDocs As Documents
| Set CATDocs = CATIA.Documents
| Dim partdoc As Document
| Set partdoc = CATDocs.Add("CATPart")
| Dim part As Part
| Set part = partdoc.Part
| Dim massCheck As Check
| Set massCheck = part.Relations.CreateCheck
| ("maximummass",
| "Ensures that the mass is less than 10
| kg",
| "mass<10kg")
:param str i_name:
:param str i_comment:
:param str i_check_body:
:return: Check
:rtype: Check
"""
return Check(self.relations.CreateCheck(i_name, i_comment, i_check_body))
def create_design_table(self, i_name: str, i_comment: str, i_copy_mode: bool, i_sheet_path: Path) -> DesignTable:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func CreateDesignTable(CATBSTR iName,
| CATBSTR iComment,
| boolean iCopyMode,
| CATBSTR iSheetPath) As DesignTable
|
| Creates a design table based on a file organized in an vertical way and
| adds it to the part's collection of relations.
|
| Parameters:
|
| iName
| The design table name
| iComment
| A description of the design table
| iCopyMode
|
| Returns:
| The created design table
| Example:
| This example creates the dt design table and adds it to the newly
| created part:
|
| Dim CATDocs As Documents
| Set CATDocs = CATIA.Documents
| Dim partdoc As Document
| Set partdoc = CATDocs.Add("CATPart")
| Dim part As Part
| Set part = partdoc.Part
| Dim designtable As DesignTable
| Set designtable = part.Relations.CreateDesignTable
| ("dt",
| "Ensures that the mass is less than 10
| kg",
| TRUE,
|
| "/u/users/client/data/sheet.txt")
:param str i_name:
:param str i_comment:
:param bool i_copy_mode:
:param Path i_sheet_path:
:return: DesignTable
:rtype: DesignTable
"""
if not i_sheet_path.exists():
raise CATIAApplicationException(f'Could not find design table "{i_sheet_path}".')
return DesignTable(self.relations.CreateDesignTable(i_name, i_comment, i_copy_mode, i_sheet_path))
def create_formula(self, i_name, i_comment, i_output_parameter, i_formula_body):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func CreateFormula(CATBSTR iName,
| CATBSTR iComment,
| Parameter iOutputParameter,
| CATBSTR iFormulaBody) As Formula
|
| Creates a formula relation and adds it to the part's collection of
| relations.
|
| Parameters:
|
| iName
| The formula name
| iComment
| A description of the formula
| iOutputParameter
| The parameter which stores the result of the formula
|
| iFormulaBody
| The formula definition
|
| Returns:
| The created formula
| Example:
| This example creates the computemass formula relation and adds it to
| the newly created part:
|
| Dim CATDocs As Documents
| Set CATDocs = CATIA.Documents
| Dim partdoc As Document
| Set partdoc = CATDocs.Add("CATPart")
| Dim part As Part
| Set part = partdoc.Part
| Dim massFormula As Formula
| Set massFormula = part.Relations.CreateFormula
| ("computemass",
| "Computes the cuboid mass",
| mass,
| "(height*width*depth)*density")
:param str i_name:
:param str i_comment:
:param Parameter i_output_parameter:
:param str i_formula_body:
:return: Formula
:rtype: Formula
"""
return Formula(self.relations.CreateFormula(i_name, i_comment, i_output_parameter.com_object, i_formula_body))
def create_horizontal_design_table(self,
i_name: str,
i_comment: str,
i_copy_mode: bool,
i_sheet_path: str) -> DesignTable:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func CreateHorizontalDesignTable(CATBSTR iName,
| CATBSTR iComment,
| boolean iCopyMode,
| CATBSTR iSheetPath) As DesignTable
|
| Creates a design table based on a file organized in an horizontal way and
| adds it to the part's collection of relations.
|
| Parameters:
|
| iName
| The design table name
| iComment
| A description of the design table
| iCopyMode
|
| Returns:
| The created design table
| Example:
| This example creates the dt design table and adds it to the newly
| created part:
|
| Dim CATDocs As Documents
| Set CATDocs = CATIA.Documents
| Dim partdoc As Document
| Set partdoc = CATDocs.Add("CATPart")
| Dim part As Part
| Set part = partdoc.Part
| Dim designtable As DesignTable
| Set designtable = part.Relations.CreateHorizontalDesignTable
| ("dt",
| "Ensures that the mass is less than 10
| kg",
| TRUE,
| "/u/users/client/data/horizontalsheet.txt")
:param str i_name:
:param str i_comment:
:param bool i_copy_mode:
:param str i_sheet_path:
:return: DesignTable
:rtype: DesignTable
"""
return DesignTable(self.relations.CreateHorizontalDesignTable(i_name, i_comment, i_copy_mode, i_sheet_path))
def create_law(self, i_name: str, i_comment: str, i_law_body: str) -> Law:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func CreateLaw(CATBSTR iName,
| CATBSTR iComment,
| CATBSTR iLawBody) As Law
|
| Creates a law relation and adds it to the part's collection of
| relations.
|
| Parameters:
|
| iName
| The law name
| iComment
| A description of the law
| iLawBody
| The law definition
|
| Returns:
| The created law
:param str i_name:
:param str i_comment:
:param str i_law_body:
:return: Law
:rtype: Law
"""
return Law(self.relations.CreateLaw(i_name, i_comment, i_law_body))
def create_program(self, i_name: str, i_comment: str, i_program_body: str) -> Rule:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func CreateProgram(CATBSTR iName,
| CATBSTR iComment,
| CATBSTR iProgramBody) As Rule
|
| Creates a program relation and adds it to the part's collection of
| relations.
|
| Parameters:
|
| iName
| The program name
| iComment
| A description of the program
| iProgramBody
| The program definition
|
| Returns:
| The created program
| Example:
| This example creates the selectdepth program relation and adds it to
| the newly created part:
|
| Dim CATDocs As Documents
| Set CATDocs = CATIA.Documents
| Dim partdoc As Document
| Set partdoc = CATDocs.Add("CATPart")
| Dim part As Part
| Set part = partdoc.Part
| Dim depthProgram As Program
| Set depthProgram = part.Relations.CreateProgram
| ("selectdepth",
| "Select depth with respect to
| mass",
| "if (mass>2kg) { depth=2mm } else { depth=1 mm
| }")
:param str i_name:
:param str i_comment:
:param str i_program_body:
:return: Rule
:rtype: Rule
"""
return Rule(self.relations.CreateProgram(i_name, i_comment, i_program_body))
def create_rule_base(self, i_name: str) -> Relation:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func CreateRuleBase(CATBSTR iName) As Relation
|
| Creates a rulebase.
|
| Parameters:
|
| iName
| The name of the rulebase.
|
| Returns:
| The created rulebase.
| See also:
| ExpertRuleBase
:param str i_name:
:return: Relation
:rtype: Relation
"""
return Relation(self.relations.CreateRuleBase(i_name))
def create_set_of_equations(self, i_name: str, i_comment: str, i_formula_body: str) -> SetOfEquation:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func CreateSetOfEquations(CATBSTR iName,
| CATBSTR iComment,
| CATBSTR iFormulaBody) As SetOfEquation
|
| Creates a set of equations.
|
| Parameters:
|
| iName
| The name of the set of equation.
| iComment
| The comment of the set of equation.
| iFormulaBody
| The body of the set of equation " a==b+4; c ≤ 90".
|
|
| Returns:
| The created set of equations
:param str i_name:
:param str i_comment:
:param str i_formula_body:
:return: SetOfEquation
:rtype: SetOfEquation
"""
return SetOfEquation(self.relations.CreateSetOfEquations(i_name, i_comment, i_formula_body))
def create_set_of_relations(self, i_parent: AnyObject) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Sub CreateSetOfRelations(AnyObject iParent)
|
| Creates a set of relations and appends it to a parent
| object.
|
| Parameters:
|
| iParent
| The object to which the set is appended
:param AnyObject i_parent:
:return: None
:rtype: None
"""
return self.relations.CreateSetOfRelations(i_parent.com_object)
def generate_xml_report_for_checks(self, i_name: str) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Sub GenerateXMLReportForChecks(CATBSTR iName)
|
| Generates an XML Report on all checks in the current
| document.
|
| Parameters:
|
| iName
| The name of the XML file
:param str i_name:
:return: None
:rtype: None
"""
return self.relations.GenerateXMLReportForChecks(i_name)
def item(self, i_index: cat_variant) -> Relation:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func Item(CATVariant iIndex) As Relation
|
| Retrieves a relation using its index or its name from the Relations
| collection.
|
| Parameters:
|
| iIndex
| The index or the name of the relation to retrieve from the
| collection of relations. As a numerics, this index is the rank of the relation
| in the collection. The index of the first relation in the collection is 1, and
| the index of the last relation is Count. As a string, it is the name you
| assigned to the relation using the
|
| AnyObject.Name property or when creating the relation.
|
| Returns:
| The retrieved relation
| Example:
| This example retrieves the last relation in the relations
| collection.
|
| Dim lastRelation As Relation
| Set lastRelation = relations.Item(relations.Count)
:param cat_variant i_index:
:return: Relation
:rtype: Relation
"""
return Relation(self.relations.Item(i_index))
def remove(self, i_index: cat_variant) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Sub Remove(CATVariant iIndex)
|
| Removes a relation from the Relations collection.
|
| Parameters:
|
| iIndex
| The index or the name of the relation to remove from the collection
| of relations. As a numerics, this index is the rank of the relation in the
| collection. The index of the first relation in the collection is 1, and the
| index of the last relation is Count. As a string, it is the name you assigned
| to the relation using the
|
| AnyObject.Name property or when creating the relation.
|
|
| Example:
| This example removes the relation named density from the relations
| collection.
|
| relations.Remove("density")
:param cat_variant i_index:
:return: None
:rtype: None
"""
return self.relations.Remove(i_index)
def sub_list(self, i_feature: AnyObject, i_recursively: bool) -> 'Relations':
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func SubList(AnyObject iFeature,
| boolean iRecursively) As Relations
|
| Returns a sub-collection of relations aggregated to an
| object.
|
| Parameters:
|
| iFeature
| The object used to filter the the whole relation collection to get
| the resulting sub-collection.
| iRecursively
| A flag to specify if children parameters are to be searched for in
| the returned collection
|
| Returns:
| The resulting sub-collection
| Example:
| This example shows how to get a collection of relations that are under
| a Pad
|
| Dim Relations1 As Relations
| Set Relations1 = CATIA.ActiveDocument.Part.Relations' gets the collection of relations in the
| part
| Dim Body0 As AnyObject
| Set Body0 = CATIA.ActiveDocument.Part.Bodies.Item ( "MechanicalTool.1" )
| Dim Pad1 As AnyObject
| Set Pad1 = Body0.Shapes.Item ( "Pad.1" ) ' gets the pad Pad.1
| Dim Relations2 As Relations
| Set Relations2 = Relations1.SubList(Pad1, TRUE) ' gets the collection of relations that are
| under the pad Pad.1
:param AnyObject i_feature:
:param bool i_recursively:
:return: Relations
:rtype: Relations
"""
return Relations(self.relations.SubList(i_feature.com_object, i_recursively))
def __getitem__(self, n: int) -> Relation:
if (n + 1) > self.count:
raise StopIteration
return Relation(self.relations.item(n + 1))
def __iter__(self) -> Iterator[Relation]:
for i in range(self.count):
yield self.child_object(self.com_object.item(i + 1))
def __repr__(self):
return f'Relations(name="{self.name}")'
| 1.929688
| 2
|
src/semantic_parsing_with_constrained_lm/configs/qdmr_break_emnlp_camera_ready.py
|
microsoft/semantic_parsing_with_constrained_lm
| 17
|
12777403
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""This config file is for running experiments needed for the EMNLP camera ready.
It will generate the following experiments (depending on the value of eval_split and model):
- 100 dev examples
- GPT-3 Constrained Canonical, n = 1000
- GPT-3 Constrained Canonical, n = 100
- GPT-3 Constrained Canonical, n = 25
- GPT-3 Constrained Canonical, n = 200
- GPT-3 Constrained Meaning, n = 200
- GPT-3 Unconstrained Canonical, n = 200
- GPT-3 Unconstrained Meaning, n = 200
- All dev examples
- GPT-3 Constrained Meaning, n = 200
- BART Constrained Canonical
- BART Constrained Meaning
- BART Unconstrained Canonical
- BART Unconstrained Meaning
- GPT-2 Constrained Canonical
- GPT-2 Constrained Meaning
- GPT-2 Unconstrained Canonical
- GPT-2 Unconstrained Meaning
"""
from typing import Any, Callable, Dict
import torch
from typing_extensions import Literal
from semantic_parsing_with_constrained_lm.configs.lib.common import PromptOrder, make_semantic_parser
from semantic_parsing_with_constrained_lm.datum import Datum
from semantic_parsing_with_constrained_lm.domains.qdmr_break import (
BreakDataType,
BreakDatum,
BreakMetrics,
BreakPieces,
BreakSamplingType,
)
from semantic_parsing_with_constrained_lm.fit_max_steps import compute_and_print_fit
from semantic_parsing_with_constrained_lm.lm import TRAINED_MODEL_DIR, AutoregressiveModel, ClientType
from semantic_parsing_with_constrained_lm.lm_bart import Seq2SeqBart
from semantic_parsing_with_constrained_lm.lm_openai_gpt3 import IncrementalOpenAIGPT3
from semantic_parsing_with_constrained_lm.run_exp import EvalSplit, Experiment
from semantic_parsing_with_constrained_lm.search import PartialParse, StartsWithSpacePartialParse
def build_config(
log_dir, # pylint: disable=unused-argument
eval_split: EvalSplit,
model: ClientType,
rank: int,
**kwargs: Any, # pylint: disable=unused-argument
) -> Dict[str, Callable[[], Experiment]]:
BEAM_SIZE = 10
DEV_SUBSET_SIZE = 100
MAX_STEPS_FOR_COMPLETION = 145
use_gpt3 = model == ClientType.GPT3
def create_exp(
problem_type: Literal[
"constrained", "unconstrained-beam", "unconstrained-greedy"
],
output_type: BreakDataType,
train_size: int,
exp_name: str,
):
lm: AutoregressiveModel
if model == ClientType.GPT3:
lm = IncrementalOpenAIGPT3()
elif model == ClientType.BART:
lm = Seq2SeqBart(
# Part after / is set to match lm_finetune.py
f"{TRAINED_MODEL_DIR}/20000/break_{output_type}/",
device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu"),
)
else:
raise ValueError(model)
piece = BreakPieces.build(
tokenizer=lm.tokenizer,
data_type=output_type,
train_sampling_type=BreakSamplingType.proportional,
test_sampling_type=BreakSamplingType.random,
train_total=train_size,
test_total=DEV_SUBSET_SIZE,
seed=0,
)
train_data = piece.train_data
test_data = piece.test_data
if eval_split == EvalSplit.TrainSubset:
piece = BreakPieces.build(
tokenizer=lm.tokenizer,
data_type=output_type,
train_sampling_type=BreakSamplingType.proportional,
test_sampling_type=BreakSamplingType.random,
train_total=100000,
test_total=1,
seed=0,
)
test_data = piece.train_data[-100:]
elif eval_split == EvalSplit.DevFull:
piece = BreakPieces.build(
tokenizer=lm.tokenizer,
data_type=output_type,
train_sampling_type=BreakSamplingType.proportional,
test_sampling_type=BreakSamplingType.random,
train_total=train_size,
test_total=1000000,
seed=0,
skip_if_needed=False,
)
test_data = piece.test_data
elif eval_split == EvalSplit.DevSubset:
# train_data and test_data were already set outside of this if block
pass
else:
raise ValueError(f"{eval_split} not supported currently")
partial_parse_builder: Callable[[BreakDatum], PartialParse]
if problem_type == "constrained":
partial_parse_builder = piece.partial_parse_builder # type: ignore
beam_size = BEAM_SIZE
elif problem_type.startswith("unconstrained"):
# TODO: Only impose this if we are using a GPT-2-style tokenizer
partial_parse = StartsWithSpacePartialParse(lm.tokenizer)
partial_parse_builder = lambda _: partial_parse
if problem_type == "unconstrained-beam":
beam_size = BEAM_SIZE
elif problem_type == "unconstrained-greedy":
beam_size = 1
else:
raise ValueError(problem_type)
else:
raise ValueError(f"{problem_type} not allowed")
# Compute max_steps_fn
pairs = []
for d in train_data:
num_input_tokens = len(lm.tokenizer.tokenize(d.natural))
num_output_tokens = len(lm.tokenizer.tokenize(d.canonical)) + 1
pairs.append((num_input_tokens, num_output_tokens))
max_steps_intercept, max_steps_slope = compute_and_print_fit(pairs, 10, 3)
def max_steps_fn(datum: Datum) -> int:
return min(
int(
len(lm.tokenizer.tokenize(datum.natural)) * max_steps_slope
+ max_steps_intercept
),
MAX_STEPS_FOR_COMPLETION,
)
parser = make_semantic_parser(
train_data,
lm,
use_gpt3,
MAX_STEPS_FOR_COMPLETION,
beam_size,
partial_parse_builder,
max_steps_fn,
PromptOrder.BestLast,
)
return Experiment( # type: ignore
model=parser,
metrics={
"break_metrics": BreakMetrics(
log_dir=log_dir / exp_name / str(rank),
data_type=piece.data_type,
num_results=BEAM_SIZE,
),
},
test_data=test_data,
client=lm,
)
def add_exp_to_dict(
exps_dict: Dict[str, Callable[[], Experiment]],
problem_type: Literal[
"constrained", "unconstrained-beam", "unconstrained-greedy"
],
output_type: BreakDataType,
train_size: int,
):
exp_name = (
f"break_{model}_{eval_split}_{problem_type}_{output_type}_train{train_size}"
)
exps_dict[exp_name] = lambda: create_exp(
problem_type, output_type, train_size, exp_name
)
result: Dict[str, Callable[[], Experiment]] = {}
if eval_split == EvalSplit.DevFull:
if use_gpt3:
# - GPT-3 Constrained Meaning, n = 200
add_exp_to_dict(result, "constrained", BreakDataType.nested, train_size=200)
else:
# - BART Constrained Canonical
# - BART Constrained Meaning
# - BART Unconstrained Canonical
# - BART Unconstrained Meaning
# - GPT-2 Constrained Canonical
# - GPT-2 Constrained Meaning
# - GPT-2 Unconstrained Canonical
# - GPT-2 Unconstrained Meaning
add_exp_to_dict(result, "constrained", BreakDataType.nested, train_size=200)
add_exp_to_dict(result, "constrained", BreakDataType.qdmr, train_size=200)
add_exp_to_dict(
result, "unconstrained-greedy", BreakDataType.nested, train_size=200
)
add_exp_to_dict(
result, "unconstrained-greedy", BreakDataType.qdmr, train_size=200
)
elif eval_split == EvalSplit.DevSubset:
if use_gpt3:
# - GPT-3 Constrained Canonical, n = 1000
# - GPT-3 Constrained Canonical, n = 100
# - GPT-3 Constrained Canonical, n = 25
add_exp_to_dict(
result, "constrained", BreakDataType.nested, train_size=1000
)
add_exp_to_dict(result, "constrained", BreakDataType.nested, train_size=100)
add_exp_to_dict(result, "constrained", BreakDataType.nested, train_size=25)
# - GPT-3 Constrained Canonical, n = 200
# - GPT-3 Constrained Meaning, n = 200
# - GPT-3 Unconstrained Canonical, n = 200
# - GPT-3 Unconstrained Meaning, n = 200
add_exp_to_dict(result, "constrained", BreakDataType.nested, train_size=200)
add_exp_to_dict(result, "constrained", BreakDataType.qdmr, train_size=200)
add_exp_to_dict(
result, "unconstrained-greedy", BreakDataType.nested, train_size=200
)
add_exp_to_dict(
result, "unconstrained-greedy", BreakDataType.qdmr, train_size=200
)
else:
# No subset experiments for BART and GPT-2
pass
elif eval_split == EvalSplit.TrainSubset:
add_exp_to_dict(result, "constrained", BreakDataType.nested, train_size=200)
add_exp_to_dict(result, "constrained", BreakDataType.qdmr, train_size=200)
add_exp_to_dict(
result, "unconstrained-greedy", BreakDataType.nested, train_size=200
)
add_exp_to_dict(
result, "unconstrained-greedy", BreakDataType.qdmr, train_size=200
)
return result
| 1.929688
| 2
|
egret/model_library/transmission/bus.py
|
breldridge/Egret
| 0
|
12777404
|
# ___________________________________________________________________________
#
# EGRET: Electrical Grid Research and Engineering Tools
# Copyright 2019 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
# This software is distributed under the Revised BSD License.
# ___________________________________________________________________________
"""
This module contains the declarations for the modeling components
typically used for buses (including loads and shunts)
"""
import pyomo.environ as pe
import egret.model_library.decl as decl
from pyomo.core.util import quicksum
from pyomo.core.expr.numeric_expr import LinearExpression
from egret.model_library.defn import FlowType, CoordinateType, ApproximationType
from math import tan, radians
def declare_var_vr(model, index_set, **kwargs):
"""
Create variable for the real component of the voltage at a bus
"""
decl.declare_var('vr', model=model, index_set=index_set, **kwargs)
def declare_var_vj(model, index_set, **kwargs):
"""
Create variable for the imaginary component of the voltage at a bus
"""
decl.declare_var('vj', model=model, index_set=index_set, **kwargs)
def declare_var_vm(model, index_set, **kwargs):
"""
Create variable for the voltage magnitude of the voltage at a bus
"""
decl.declare_var('vm', model=model, index_set=index_set, **kwargs)
def declare_var_va(model, index_set, **kwargs):
"""
Create variable for the phase angle of the voltage at a bus
"""
decl.declare_var('va', model=model, index_set=index_set, **kwargs)
def declare_expr_vmsq(model, index_set, coordinate_type=CoordinateType.POLAR):
"""
Create an expression for the voltage magnitude squared at a bus
"""
m = model
expr_set = decl.declare_set('_expr_vmsq', model, index_set)
m.vmsq = pe.Expression(expr_set)
if coordinate_type == CoordinateType.RECTANGULAR:
for bus in expr_set:
m.vmsq[bus] = m.vr[bus] ** 2 + m.vj[bus] ** 2
elif coordinate_type == CoordinateType.POLAR:
for bus in expr_set:
m.vmsq[bus] = m.vm[bus] ** 2
def declare_var_vmsq(model, index_set, **kwargs):
"""
Create auxiliary variable for the voltage magnitude squared at a bus
"""
decl.declare_var('vmsq', model=model, index_set=index_set, **kwargs)
def declare_eq_vmsq(model, index_set, coordinate_type=CoordinateType.POLAR):
"""
Create a constraint relating vmsq to the voltages
"""
m = model
con_set = decl.declare_set('_con_eq_vmsq', model, index_set)
m.eq_vmsq = pe.Constraint(con_set)
if coordinate_type == CoordinateType.POLAR:
for bus in con_set:
m.eq_vmsq[bus] = m.vmsq[bus] == m.vm[bus] ** 2
elif coordinate_type == CoordinateType.RECTANGULAR:
for bus in con_set:
m.eq_vmsq[bus] = m.vmsq[bus] == m.vr[bus]**2 + m.vj[bus]**2
else:
raise ValueError('unexpected coordinate_type: {0}'.format(str(coordinate_type)))
def declare_var_ir_aggregation_at_bus(model, index_set, **kwargs):
"""
Create a variable for the aggregated real current at a bus
"""
decl.declare_var('ir_aggregation_at_bus', model=model, index_set=index_set, **kwargs)
def declare_var_ij_aggregation_at_bus(model, index_set, **kwargs):
"""
Create a variable for the aggregated imaginary current at a bus
"""
decl.declare_var('ij_aggregation_at_bus', model=model, index_set=index_set, **kwargs)
def declare_var_pl(model, index_set, **kwargs):
"""
Create variable for the real power load at a bus
"""
decl.declare_var('pl', model=model, index_set=index_set, **kwargs)
def declare_var_ql(model, index_set, **kwargs):
"""
Create variable for the reactive power load at a bus
"""
decl.declare_var('ql', model=model, index_set=index_set, **kwargs)
def declare_var_p_nw(model, index_set, **kwargs):
"""
Create variable for the net real power withdrawals at a bus
"""
decl.declare_var('p_nw', model=model, index_set=index_set, **kwargs)
def declare_var_q_nw(model, index_set, **kwargs):
"""
Create variable for the net reactive power withdrawals at a bus
"""
decl.declare_var('q_nw', model=model, index_set=index_set, **kwargs)
def declare_expr_shunt_power_at_bus(model, index_set, shunt_attrs,
coordinate_type=CoordinateType.POLAR):
"""
Create the expression for the shunt power at the bus
"""
m = model
expr_set = decl.declare_set('_expr_shunt_at_bus_set', model, index_set)
m.shunt_p = pe.Expression(expr_set, initialize=0.0)
m.shunt_q = pe.Expression(expr_set, initialize=0.0)
if coordinate_type == CoordinateType.POLAR:
for bus_name in expr_set:
if bus_name in shunt_attrs['bus']:
vmsq = m.vm[bus_name]**2
m.shunt_p[bus_name] = shunt_attrs['gs'][bus_name]*vmsq
m.shunt_q[bus_name] = -shunt_attrs['bs'][bus_name]*vmsq
elif coordinate_type == CoordinateType.RECTANGULAR:
for bus_name in expr_set:
if bus_name in shunt_attrs['bus']:
vmsq = m.vr[bus_name]**2 + m.vj[bus_name]**2
m.shunt_p[bus_name] = shunt_attrs['gs'][bus_name]*vmsq
m.shunt_q[bus_name] = -shunt_attrs['bs'][bus_name]*vmsq
def _get_dc_dicts(dc_inlet_branches_by_bus, dc_outlet_branches_by_bus, con_set):
if dc_inlet_branches_by_bus is None:
assert dc_outlet_branches_by_bus is None
dc_inlet_branches_by_bus = {bn:() for bn in con_set}
if dc_outlet_branches_by_bus is None:
dc_outlet_branches_by_bus = dc_inlet_branches_by_bus
return dc_inlet_branches_by_bus, dc_outlet_branches_by_bus
def declare_expr_p_net_withdraw_at_bus(model, index_set, bus_p_loads, gens_by_bus, bus_gs_fixed_shunts,
dc_inlet_branches_by_bus=None, dc_outlet_branches_by_bus=None,
vm_by_bus=None, **kwargs):
"""
Create a named pyomo expression for bus net withdraw
"""
m = model
decl.declare_expr('p_nw', model, index_set)
dc_inlet_branches_by_bus, dc_outlet_branches_by_bus = _get_dc_dicts(dc_inlet_branches_by_bus,
dc_outlet_branches_by_bus,
index_set)
if kwargs and vm_by_bus is not None:
for idx,val in kwargs.items():
if idx=='linearize_shunts' and val==True:
for b in index_set:
m.p_nw[b] = ( bus_gs_fixed_shunts[b] * (2 * vm_by_bus[b] * m.vm[b] - vm_by_bus[b] ** 2)
+ (m.pl[b] if bus_p_loads[b] != 0.0 else 0.0)
- sum(m.pg[g] for g in gens_by_bus[b])
+ sum(m.dcpf[branch_name] for branch_name in dc_outlet_branches_by_bus[b])
- sum(m.dcpf[branch_name] for branch_name in dc_inlet_branches_by_bus[b])
)
return
if idx=='linearize_shunts' and val==False:
for b in index_set:
m.p_nw[b] = ( bus_gs_fixed_shunts[b] * vm_by_bus[b] ** 2
+ (m.pl[b] if bus_p_loads[b] != 0.0 else 0.0)
- sum(m.pg[g] for g in gens_by_bus[b])
+ sum(m.dcpf[branch_name] for branch_name in dc_outlet_branches_by_bus[b])
- sum(m.dcpf[branch_name] for branch_name in dc_inlet_branches_by_bus[b])
)
return
for b in index_set:
m.p_nw[b] = ( bus_gs_fixed_shunts[b]
+ ( m.pl[b] if bus_p_loads[b] != 0.0 else 0.0 )
- sum( m.pg[g] for g in gens_by_bus[b] )
+ sum(m.dcpf[branch_name] for branch_name in dc_outlet_branches_by_bus[b])
- sum(m.dcpf[branch_name] for branch_name in dc_inlet_branches_by_bus[b])
)
def declare_eq_p_net_withdraw_at_bus(model, index_set, bus_p_loads, gens_by_bus, bus_gs_fixed_shunts,
dc_inlet_branches_by_bus=None, dc_outlet_branches_by_bus=None,
vm_by_bus=None, **kwargs):
"""
Create a named pyomo constraint for bus net withdraw
"""
m = model
con_set = decl.declare_set('_con_eq_p_net_withdraw_at_bus', model, index_set)
dc_inlet_branches_by_bus, dc_outlet_branches_by_bus = _get_dc_dicts(dc_inlet_branches_by_bus,
dc_outlet_branches_by_bus,
index_set)
m.eq_p_net_withdraw_at_bus = pe.Constraint(con_set)
constr = m.eq_p_net_withdraw_at_bus
if kwargs and vm_by_bus is not None:
for idx,val in kwargs.items():
if idx=='linearize_shunts' and val==True:
for b in index_set:
constr[b] = m.p_nw[b] == ( bus_gs_fixed_shunts[b] * (2 * vm_by_bus[b] * m.vm[b] - vm_by_bus[b] ** 2)
+ (m.pl[b] if bus_p_loads[b] != 0.0 else 0.0)
- sum(m.pg[g] for g in gens_by_bus[b])
+ sum(m.dcpf[branch_name] for branch_name in dc_outlet_branches_by_bus[b])
- sum(m.dcpf[branch_name] for branch_name in dc_inlet_branches_by_bus[b])
)
return
if idx=='linearize_shunts' and val==False:
for b in index_set:
constr[b] = m.p_nw[b] == ( bus_gs_fixed_shunts[b] * vm_by_bus[b] ** 2
+ (m.pl[b] if bus_p_loads[b] != 0.0 else 0.0)
- sum(m.pg[g] for g in gens_by_bus[b])
+ sum(m.dcpf[branch_name] for branch_name in dc_outlet_branches_by_bus[b])
- sum(m.dcpf[branch_name] for branch_name in dc_inlet_branches_by_bus[b])
)
return
else:
for b in index_set:
constr[b] = m.p_nw[b] == ( bus_gs_fixed_shunts[b]
+ ( m.pl[b] if bus_p_loads[b] != 0.0 else 0.0 )
- sum( m.pg[g] for g in gens_by_bus[b] )
+ sum(m.dcpf[branch_name] for branch_name in dc_outlet_branches_by_bus[b])
- sum(m.dcpf[branch_name] for branch_name in dc_inlet_branches_by_bus[b])
)
def declare_expr_q_net_withdraw_at_bus(model, index_set, bus_q_loads, gens_by_bus, bus_bs_fixed_shunts,
vm_by_bus=None, **kwargs):
"""
Create a named pyomo expression for bus net withdraw
"""
m = model
decl.declare_expr('q_nw', model, index_set)
if kwargs and vm_by_bus is not None:
for idx,val in kwargs.items():
if idx=='linearize_shunts' and val==True:
for b in index_set:
m.q_nw[b] = (-bus_bs_fixed_shunts[b] * (2 * vm_by_bus[b] * m.vm[b] - vm_by_bus[b] ** 2)
+ (m.ql[b] if bus_q_loads[b] != 0.0 else 0.0)
- sum(m.qg[g] for g in gens_by_bus[b])
)
return
if idx=='linearize_shunts' and val==False:
for b in index_set:
m.q_nw[b] = (-bus_bs_fixed_shunts[b] * vm_by_bus[b] ** 2
+ (m.ql[b] if bus_q_loads[b] != 0.0 else 0.0)
- sum(m.qg[g] for g in gens_by_bus[b])
)
return
for b in index_set:
m.q_nw[b] = (-bus_bs_fixed_shunts[b]
+ ( m.ql[b] if bus_q_loads[b] != 0.0 else 0.0 )
- sum( m.qg[g] for g in gens_by_bus[b] )
)
def declare_eq_q_net_withdraw_at_bus(model, index_set, bus_q_loads, gens_by_bus, bus_bs_fixed_shunts,
vm_by_bus=None, **kwargs):
"""
Create a named pyomo constraint for bus net withdraw
"""
m = model
con_set = decl.declare_set('_con_eq_q_net_withdraw_at_bus', model, index_set)
m.eq_q_net_withdraw_at_bus = pe.Constraint(con_set)
constr = m.eq_q_net_withdraw_at_bus
if kwargs and vm_by_bus is not None:
for idx,val in kwargs.items():
if idx=='linearize_shunts' and val==True:
for b in index_set:
constr[b] = m.q_nw[b] == (-bus_bs_fixed_shunts[b] * (2 * vm_by_bus[b] * m.vm[b] - vm_by_bus[b] ** 2)
+ (m.ql[b] if bus_q_loads[b] != 0.0 else 0.0)
- sum(m.qg[g] for g in gens_by_bus[b])
)
return
if idx=='linearize_shunts' and val==False:
for b in index_set:
constr[b] = m.q_nw[b] == (-bus_bs_fixed_shunts[b] * vm_by_bus[b] ** 2
+ (m.ql[b] if bus_q_loads[b] != 0.0 else 0.0)
- sum(m.qg[g] for g in gens_by_bus[b])
)
return
for b in index_set:
constr[b] = m.q_nw[b] == (-bus_bs_fixed_shunts[b]
+ ( m.ql[b] if bus_q_loads[b] != 0.0 else 0.0 )
- sum( m.qg[g] for g in gens_by_bus[b] )
)
def declare_eq_ref_bus_nonzero(model, ref_angle, ref_bus):
"""
Create an equality constraint to enforce tan(\theta) = vj/vr at the reference bus
"""
m = model
m.eq_ref_bus_nonzero = pe.Constraint(expr = tan(radians(ref_angle)) * m.vr[ref_bus] == m.vj[ref_bus])
def declare_eq_i_aggregation_at_bus(model, index_set,
bus_bs_fixed_shunts, bus_gs_fixed_shunts,
inlet_branches_by_bus, outlet_branches_by_bus):
"""
Create the equality constraints for the aggregated real and imaginary
currents at the bus
"""
m = model
con_set = decl.declare_set('_con_eq_i_aggregation_at_bus_set', model, index_set)
m.eq_ir_aggregation_at_bus = pe.Constraint(con_set)
m.eq_ij_aggregation_at_bus = pe.Constraint(con_set)
for bus_name in con_set:
ir_expr = sum([m.ifr[branch_name] for branch_name in outlet_branches_by_bus[bus_name]])
ir_expr += sum([m.itr[branch_name] for branch_name in inlet_branches_by_bus[bus_name]])
ij_expr = sum([m.ifj[branch_name] for branch_name in outlet_branches_by_bus[bus_name]])
ij_expr += sum([m.itj[branch_name] for branch_name in inlet_branches_by_bus[bus_name]])
if bus_bs_fixed_shunts[bus_name] != 0.0:
ir_expr -= bus_bs_fixed_shunts[bus_name] * m.vj[bus_name]
ij_expr += bus_bs_fixed_shunts[bus_name] * m.vr[bus_name]
if bus_gs_fixed_shunts[bus_name] != 0.0:
ir_expr += bus_gs_fixed_shunts[bus_name] * m.vr[bus_name]
ij_expr += bus_gs_fixed_shunts[bus_name] * m.vj[bus_name]
ir_expr -= m.ir_aggregation_at_bus[bus_name]
ij_expr -= m.ij_aggregation_at_bus[bus_name]
m.eq_ir_aggregation_at_bus[bus_name] = ir_expr == 0
m.eq_ij_aggregation_at_bus[bus_name] = ij_expr == 0
def declare_eq_p_balance_ed(model, index_set, bus_p_loads, gens_by_bus, bus_gs_fixed_shunts, **rhs_kwargs):
"""
Create the equality constraints for the system-wide real power balance.
NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign
"""
m = model
p_expr = sum(m.pg[gen_name] for bus_name in index_set for gen_name in gens_by_bus[bus_name])
p_expr -= sum(m.pl[bus_name] for bus_name in index_set if bus_p_loads[bus_name] is not None)
p_expr -= sum(bus_gs_fixed_shunts[bus_name] for bus_name in index_set if bus_gs_fixed_shunts[bus_name] != 0.0)
relaxed_balance = False
if rhs_kwargs:
for idx, val in rhs_kwargs.items():
if idx == 'include_feasibility_load_shed':
p_expr += eval("m." + val)
if idx == 'include_feasibility_over_generation':
p_expr -= eval("m." + val)
if idx == 'include_losses':
p_expr -= sum(m.pfl[branch_name] for branch_name in val)
if idx == 'relax_balance':
relaxed_balance = True
if relaxed_balance:
m.eq_p_balance = pe.Constraint(expr=p_expr >= 0.0)
else:
m.eq_p_balance = pe.Constraint(expr=p_expr == 0.0)
def declare_eq_p_balance_lopf(model, index_set, bus_p_loads, gens_by_bus, bus_gs_fixed_shunts, vm_by_bus, **rhs_kwargs):
"""
Create the equality constraints for the system-wide real power balance.
NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign
"""
m = model
p_expr = sum(m.pg[gen_name] for bus_name in index_set for gen_name in gens_by_bus[bus_name])
p_expr -= sum(m.pl[bus_name] for bus_name in index_set if bus_p_loads[bus_name] is not None)
relaxed_balance = False
if rhs_kwargs:
for idx,val in rhs_kwargs.items():
if idx == 'include_feasibility_load_shed':
p_expr += eval("m." + val)
if idx == 'include_feasibility_over_generation':
p_expr -= eval("m." + val)
if idx == 'include_branch_losses':
pass # branch losses are added to the constraint after updating pfl constraints
if idx == 'include_system_losses':
p_expr -= m.ploss
if idx == 'relax_balance':
relaxed_balance = True
if idx == 'linearize_shunts':
if val == True:
p_expr -= sum( bus_gs_fixed_shunts[b] * (2 * vm_by_bus[b] * m.vm[b] - vm_by_bus[b] ** 2) \
for b in index_set if bus_gs_fixed_shunts[b] != 0.0)
elif val == False:
p_expr -= sum( bus_gs_fixed_shunts[b] * vm_by_bus[b] ** 2 \
for b in index_set if bus_gs_fixed_shunts[b] != 0.0)
else:
raise Exception('linearize_shunts option is invalid.')
if relaxed_balance:
m.eq_p_balance = pe.Constraint(expr = p_expr >= 0.0)
else:
m.eq_p_balance = pe.Constraint(expr = p_expr == 0.0)
def declare_eq_q_balance_lopf(model, index_set, bus_q_loads, gens_by_bus, bus_bs_fixed_shunts, vm_by_bus, **rhs_kwargs):
"""
Create the equality constraints for the system-wide real power balance.
NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign
"""
m = model
q_expr = sum(m.qg[gen_name] for bus_name in index_set for gen_name in gens_by_bus[bus_name])
q_expr -= sum(m.ql[bus_name] for bus_name in index_set if bus_q_loads[bus_name] is not None)
relaxed_balance = False
if rhs_kwargs:
for idx,val in rhs_kwargs.items():
if idx == 'include_reactive_load_shed':
q_expr += eval("m." + val)
if idx == 'include_reactive_over_generation':
q_expr -= eval("m." + val)
if idx == 'include_branch_losses':
pass # branch losses are added to the constraint after updating qfl constraints
if idx == 'include_system_losses':
q_expr -= m.qloss
if idx == 'relax_balance':
relaxed_balance = True
if idx == 'linearize_shunts':
if val == True:
q_expr -= sum( bus_bs_fixed_shunts[b] * (2 * vm_by_bus[b] * m.vm[b] - vm_by_bus[b] ** 2) \
for b in index_set if bus_bs_fixed_shunts[b] != 0.0)
elif val == False:
q_expr -= sum( bus_bs_fixed_shunts[b] * vm_by_bus[b] ** 2 \
for b in index_set if bus_bs_fixed_shunts[b] != 0.0)
else:
raise Exception('linearize_shunts option is invalid.')
if relaxed_balance:
m.eq_q_balance = pe.Constraint(expr = q_expr >= 0.0)
else:
m.eq_q_balance = pe.Constraint(expr = q_expr == 0.0)
def declare_eq_ploss_sum_of_pfl(model, index_set):
"""
Create the equality constraint or expression for total real power losses (from PTDF approximation)
"""
m=model
ploss_is_var = isinstance(m.ploss, pe.Var)
if ploss_is_var:
m.eq_ploss = pe.Constraint()
else:
if not isinstance(m.ploss, pe.Expression):
raise Exception("Unrecognized type for m.ploss", m.ploss.pprint())
expr = sum(m.pfl[bn] for bn in index_set)
if ploss_is_var:
m.eq_ploss = m.ploss == expr
else:
m.ploss = expr
def declare_eq_qloss_sum_of_qfl(model, index_set):
"""
Create the equality constraint or expression for total real power losses (from PTDF approximation)
"""
m=model
qloss_is_var = isinstance(m.qloss, pe.Var)
if qloss_is_var:
m.eq_qloss = pe.Constraint()
else:
if not isinstance(m.qloss, pe.Expression):
raise Exception("Unrecognized type for m.qloss", m.qloss.pprint())
expr = sum(m.qfl[bn] for bn in index_set)
if qloss_is_var:
m.eq_qloss = m.qloss == expr
else:
m.qloss = expr
def declare_eq_ploss_ptdf_approx(model, PTDF, rel_ptdf_tol=None, abs_ptdf_tol=None, use_residuals=False):
"""
Create the equality constraint or expression for total real power losses (from PTDF approximation)
"""
m = model
ploss_is_var = isinstance(m.ploss, pe.Var)
if ploss_is_var:
m.eq_ploss = pe.Constraint()
else:
if not isinstance(m.ploss, pe.Expression):
raise Exception("Unrecognized type for m.ploss", m.ploss.pprint())
if rel_ptdf_tol is None:
rel_ptdf_tol = 0.
if abs_ptdf_tol is None:
abs_ptdf_tol = 0.
expr = get_ploss_expr_ptdf_approx(m, PTDF, abs_ptdf_tol=abs_ptdf_tol, rel_ptdf_tol=rel_ptdf_tol, use_residuals=use_residuals)
if ploss_is_var:
m.eq_ploss = m.ploss == expr
else:
m.ploss = expr
def get_ploss_expr_ptdf_approx(m, PTDF, abs_ptdf_tol=None, rel_ptdf_tol=None, use_residuals=False):
if not use_residuals:
const = PTDF.get_lossoffset()
iterator = PTDF.get_lossfactor_iterator()
else:
const = PTDF.get_lossoffset_resid()
iterator = PTDF.get_lossfactor_resid_iterator()
max_coef = PTDF.get_lossfactor_abs_max()
ptdf_tol = max(abs_ptdf_tol, rel_ptdf_tol*max_coef)
m_p_nw = m.p_nw
## if model.p_nw is Var, we can use LinearExpression
## to build these dense constraints much faster
coef_list = []
var_list = []
for bus_name, coef in iterator:
if abs(coef) >= ptdf_tol:
coef_list.append(coef)
var_list.append(m_p_nw[bus_name])
if use_residuals:
for i in m._idx_monitored:
bn = PTDF.branches_keys_masked[i]
coef_list.append(1)
var_list.append(m.pfl[bn])
if isinstance(m_p_nw, pe.Var):
expr = LinearExpression(linear_vars=var_list, linear_coefs=coef_list, constant=const)
else:
expr = quicksum( (coef*var for coef, var in zip(coef_list, var_list)), start=const, linear=True)
return expr
def declare_eq_qloss_ptdf_approx(model, PTDF, rel_ptdf_tol=None, abs_ptdf_tol=None, use_residuals=False):
"""
Create the equality constraint or expression for total real power losses (from PTDF approximation)
"""
m = model
qloss_is_var = isinstance(m.qloss, pe.Var)
if qloss_is_var:
m.eq_qloss = pe.Constraint()
else:
if not isinstance(m.qloss, pe.Expression):
raise Exception("Unrecognized type for m.qloss", m.qloss.pprint())
if rel_ptdf_tol is None:
rel_ptdf_tol = 0.
if abs_ptdf_tol is None:
abs_ptdf_tol = 0.
expr = get_qloss_expr_ptdf_approx(m, PTDF, abs_ptdf_tol=abs_ptdf_tol, rel_ptdf_tol=rel_ptdf_tol, use_residuals=use_residuals)
if qloss_is_var:
m.eq_qloss = m.qloss == expr
else:
m.qloss = expr
def get_qloss_expr_ptdf_approx(m, PTDF, abs_ptdf_tol=None, rel_ptdf_tol=None, use_residuals=False):
if not use_residuals:
const = PTDF.get_qlossoffset()
iterator = PTDF.get_qlossfactor_iterator()
else:
const = PTDF.get_qlossoffset_resid()
iterator = PTDF.get_qlossfactor_resid_iterator()
max_coef = PTDF.get_qlossfactor_abs_max()
ptdf_tol = max(abs_ptdf_tol, rel_ptdf_tol*max_coef)
m_q_nw = m.q_nw
## if model.q_nw is Var, we can use LinearExpression
## to build these dense constraints much faster
coef_list = []
var_list = []
for bus_name, coef in iterator:
if abs(coef) >= ptdf_tol:
coef_list.append(coef)
var_list.append(m_q_nw[bus_name])
if use_residuals:
for i in m._idx_monitored:
bn = PTDF.branches_keys[i]
coef_list.append(1)
var_list.append(m.qfl[bn])
if isinstance(m_q_nw, pe.Var):
expr = LinearExpression(linear_vars=var_list, linear_coefs=coef_list, constant=const)
else:
expr = quicksum( (coef*var for coef, var in zip(coef_list, var_list)), start=const, linear=True)
return expr
def declare_eq_bus_vm_approx(model, index_set, PTDF=None, rel_ptdf_tol=None, abs_ptdf_tol=None):
"""
Create the equality constraints or expressions for voltage magnitude (from PTDF
approximation) at the bus
"""
m = model
con_set = decl.declare_set("_con_eq_bus_vm_approx_set", model, index_set)
vm_is_var = isinstance(m.vm, pe.Var)
if vm_is_var:
m.eq_vm_bus = pe.Constraint(con_set)
else:
if not isinstance(m.vm, pe.Expression):
raise Exception("Unrecognized type for m.vm", m.vm.pprint())
if PTDF is None:
return
for bus_name in con_set:
expr = \
get_vm_expr_ptdf_approx(m, bus_name, PTDF, rel_ptdf_tol=rel_ptdf_tol, abs_ptdf_tol=abs_ptdf_tol)
if vm_is_var:
m.eq_vm_bus[bus_name] = \
m.vm[bus_name] == expr
else:
m.vm[bus_name] = expr
def get_vm_expr_ptdf_approx(model, bus_name, PTDF, rel_ptdf_tol=None, abs_ptdf_tol=None):
"""
Create a pyomo reactive power flow expression from PTDF matrix
"""
if rel_ptdf_tol is None:
rel_ptdf_tol = 0.
if abs_ptdf_tol is None:
abs_ptdf_tol = 0.
const = PTDF.get_bus_vdf_const(bus_name)
max_coef = PTDF.get_bus_vdf_abs_max(bus_name)
ptdf_tol = max(abs_ptdf_tol, rel_ptdf_tol*max_coef)
## NOTE: It would be easy to hold on to the 'ptdf' dictionary here, if we wanted to
m_q_nw = model.q_nw
qnw_is_var = isinstance(m_q_nw, pe.Var)
## if model.q_nw is Var, we can use LinearExpression
## to build these dense constraints much faster
coef_list = []
var_list = []
for bn, coef in PTDF.get_bus_vdf_iterator(bus_name):
if abs(coef) >= ptdf_tol:
coef_list.append(coef)
var_list.append(m_q_nw[bn])
elif qnw_is_var:
const += coef * m_q_nw[bn].value
else:
const += coef * m_q_nw[bn].expr()
if qnw_is_var:
expr = LinearExpression(linear_vars=var_list, linear_coefs=coef_list, constant=const)
else:
expr = quicksum( (coef*var for coef, var in zip(coef_list, var_list)), start=const, linear=True)
return expr
def declare_eq_p_balance_dc_approx(model, index_set,
bus_p_loads,
gens_by_bus,
bus_gs_fixed_shunts,
inlet_branches_by_bus, outlet_branches_by_bus,
approximation_type=ApproximationType.BTHETA,
dc_inlet_branches_by_bus=None,
dc_outlet_branches_by_bus=None,
**rhs_kwargs):
"""
Create the equality constraints for the real power balance
at a bus using the variables for real power flows, respectively.
NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign
"""
m = model
con_set = decl.declare_set('_con_eq_p_balance', model, index_set)
m.eq_p_balance = pe.Constraint(con_set)
for bus_name in con_set:
if approximation_type == ApproximationType.BTHETA:
p_expr = -sum(m.pf[branch_name] for branch_name in outlet_branches_by_bus[bus_name])
p_expr += sum(m.pf[branch_name] for branch_name in inlet_branches_by_bus[bus_name])
elif approximation_type == ApproximationType.BTHETA_LOSSES:
p_expr = -0.5*sum(m.pfl[branch_name] for branch_name in inlet_branches_by_bus[bus_name])
p_expr -= 0.5*sum(m.pfl[branch_name] for branch_name in outlet_branches_by_bus[bus_name])
p_expr -= sum(m.pf[branch_name] for branch_name in outlet_branches_by_bus[bus_name])
p_expr += sum(m.pf[branch_name] for branch_name in inlet_branches_by_bus[bus_name])
if dc_inlet_branches_by_bus is not None:
p_expr -= sum(m.dcpf[branch_name] for branch_name in dc_outlet_branches_by_bus[bus_name])
p_expr += sum(m.dcpf[branch_name] for branch_name in dc_inlet_branches_by_bus[bus_name])
if bus_gs_fixed_shunts[bus_name] != 0.0:
p_expr -= bus_gs_fixed_shunts[bus_name]
if bus_p_loads[bus_name] != 0.0: # only applies to fixed loads, otherwise may cause an error
p_expr -= m.pl[bus_name]
if rhs_kwargs:
k = bus_name
for idx, val in rhs_kwargs.items():
if isinstance(val, tuple):
val,key = val
k = (key,bus_name)
if not k in eval("m." + val).index_set():
continue
if idx == 'include_feasibility_load_shed':
p_expr += eval("m." + val)[k]
if idx == 'include_feasibility_over_generation':
p_expr -= eval("m." + val)[k]
for gen_name in gens_by_bus[bus_name]:
p_expr += m.pg[gen_name]
m.eq_p_balance[bus_name] = \
p_expr == 0.0
def declare_eq_p_balance(model, index_set,
bus_p_loads,
gens_by_bus,
bus_gs_fixed_shunts,
inlet_branches_by_bus, outlet_branches_by_bus,
**rhs_kwargs):
"""
Create the equality constraints for the real power balance
at a bus using the variables for real power flows, respectively.
NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign
"""
m = model
con_set = decl.declare_set('_con_eq_p_balance', model, index_set)
m.eq_p_balance = pe.Constraint(con_set)
for bus_name in con_set:
p_expr = -sum([m.pf[branch_name] for branch_name in outlet_branches_by_bus[bus_name]])
p_expr -= sum([m.pt[branch_name] for branch_name in inlet_branches_by_bus[bus_name]])
if bus_gs_fixed_shunts[bus_name] != 0.0:
vmsq = m.vmsq[bus_name]
p_expr -= bus_gs_fixed_shunts[bus_name] * vmsq
if bus_p_loads[bus_name] != 0.0: # only applies to fixed loads, otherwise may cause an error
p_expr -= m.pl[bus_name]
if rhs_kwargs:
for idx, val in rhs_kwargs.items():
if idx == 'include_feasibility_load_shed':
p_expr += eval("m." + val)[bus_name]
if idx == 'include_feasibility_over_generation':
p_expr -= eval("m." + val)[bus_name]
for gen_name in gens_by_bus[bus_name]:
p_expr += m.pg[gen_name]
m.eq_p_balance[bus_name] = \
p_expr == 0.0
def declare_eq_p_balance_with_i_aggregation(model, index_set,
bus_p_loads,
gens_by_bus,
**rhs_kwargs):
"""
Create the equality constraints for the real power balance
at a bus using the variables for real power flows, respectively.
NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign
"""
m = model
con_set = decl.declare_set('_con_eq_p_balance', model, index_set)
m.eq_p_balance = pe.Constraint(con_set)
for bus_name in con_set:
p_expr = -m.vr[bus_name] * m.ir_aggregation_at_bus[bus_name] + \
-m.vj[bus_name] * m.ij_aggregation_at_bus[bus_name]
if bus_p_loads[bus_name] != 0.0: # only applies to fixed loads, otherwise may cause an error
p_expr -= m.pl[bus_name]
if rhs_kwargs:
for idx, val in rhs_kwargs.items():
if idx == 'include_feasibility_load_shed':
p_expr += eval("m." + val)[bus_name]
if idx == 'include_feasibility_over_generation':
p_expr -= eval("m." + val)[bus_name]
for gen_name in gens_by_bus[bus_name]:
p_expr += m.pg[gen_name]
m.eq_p_balance[bus_name] = \
p_expr == 0.0
def declare_eq_q_balance(model, index_set,
bus_q_loads,
gens_by_bus,
bus_bs_fixed_shunts,
inlet_branches_by_bus, outlet_branches_by_bus,
**rhs_kwargs):
"""
Create the equality constraints for the reactive power balance
at a bus using the variables for reactive power flows, respectively.
NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign
"""
m = model
con_set = decl.declare_set('_con_eq_q_balance', model, index_set)
m.eq_q_balance = pe.Constraint(con_set)
for bus_name in con_set:
q_expr = -sum([m.qf[branch_name] for branch_name in outlet_branches_by_bus[bus_name]])
q_expr -= sum([m.qt[branch_name] for branch_name in inlet_branches_by_bus[bus_name]])
if bus_bs_fixed_shunts[bus_name] != 0.0:
vmsq = m.vmsq[bus_name]
q_expr += bus_bs_fixed_shunts[bus_name] * vmsq
if bus_q_loads[bus_name] != 0.0: # only applies to fixed loads, otherwise may cause an error
q_expr -= m.ql[bus_name]
if rhs_kwargs:
for idx, val in rhs_kwargs.items():
if idx == 'include_feasibility_load_shed':
q_expr += eval("m." + val)[bus_name]
if idx == 'include_feasibility_over_generation':
q_expr -= eval("m." + val)[bus_name]
for gen_name in gens_by_bus[bus_name]:
q_expr += m.qg[gen_name]
m.eq_q_balance[bus_name] = \
q_expr == 0.0
def declare_eq_q_balance_with_i_aggregation(model, index_set,
bus_q_loads,
gens_by_bus,
**rhs_kwargs):
"""
Create the equality constraints for the reactive power balance
at a bus using the variables for reactive power flows, respectively.
NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign
"""
m = model
con_set = decl.declare_set('_con_eq_q_balance', model, index_set)
m.eq_q_balance = pe.Constraint(con_set)
for bus_name in con_set:
q_expr = m.vr[bus_name] * m.ij_aggregation_at_bus[bus_name] + \
-m.vj[bus_name] * m.ir_aggregation_at_bus[bus_name]
if bus_q_loads[bus_name] != 0.0: # only applies to fixed loads, otherwise may cause an error
q_expr -= m.ql[bus_name]
if rhs_kwargs:
for idx, val in rhs_kwargs.items():
if idx == 'include_feasibility_load_shed':
q_expr += eval("m." + val)[bus_name]
if idx == 'include_feasibility_over_generation':
q_expr -= eval("m." + val)[bus_name]
for gen_name in gens_by_bus[bus_name]:
q_expr += m.qg[gen_name]
m.eq_q_balance[bus_name] = \
q_expr == 0.0
def declare_ineq_vm_bus_lbub(model, index_set, buses, coordinate_type=CoordinateType.POLAR):
"""
Create the inequalities for the voltage magnitudes from the
voltage variables
"""
m = model
con_set = decl.declare_set('_con_ineq_vm_bus_lbub',
model=model, index_set=index_set)
m.ineq_vm_bus_lb = pe.Constraint(con_set)
m.ineq_vm_bus_ub = pe.Constraint(con_set)
if coordinate_type == CoordinateType.POLAR:
for bus_name in con_set:
m.ineq_vm_bus_lb[bus_name] = \
buses[bus_name]['v_min'] <= m.vm[bus_name]
m.ineq_vm_bus_ub[bus_name] = \
m.vm[bus_name] <= buses[bus_name]['v_max']
elif coordinate_type == CoordinateType.RECTANGULAR:
for bus_name in con_set:
m.ineq_vm_bus_lb[bus_name] = \
buses[bus_name]['v_min']**2 <= m.vr[bus_name]**2 + m.vj[bus_name]**2
m.ineq_vm_bus_ub[bus_name] = \
m.vr[bus_name]**2 + m.vj[bus_name]**2 <= buses[bus_name]['v_max']**2
| 2.515625
| 3
|
tests/integration/test_with_rabbitmq.py
|
thiagopena/python-mcollective
| 1
|
12777405
|
import os
from pymco.test import ctxt
from . import base
class RabbitMQTestCase(base.IntegrationTestCase):
'''RabbitMQ integration test case.'''
CTXT = {
'connector': 'rabbitmq',
'plugin.rabbitmq.vhost': '/mcollective',
'plugin.rabbitmq.pool.size': '1',
'plugin.rabbitmq.pool.1.host': 'localhost',
'plugin.rabbitmq.pool.1.port': '61613',
'plugin.rabbitmq.pool.1.user': 'mcollective',
'plugin.rabbitmq.pool.1.password': '<PASSWORD>',
}
class TestWithRabbitMQMCo22x(base.MCollective22x, RabbitMQTestCase):
'''MCollective integration test case.'''
class TestWithRabbitMQMCo23x(base.MCollective23x, RabbitMQTestCase):
'''MCollective integration test case.'''
class TestWithRabbitMQMCo24x(base.MCollective24x, RabbitMQTestCase):
'''MCollective integration test case.'''
class TestWithRabbitMQSSLMCo23x(base.MCollective23x, RabbitMQTestCase):
"""MCollective integration test case."""
CTXT = {
'connector': 'rabbitmq',
'plugin.rabbitmq.vhost': '/mcollective',
'plugin.rabbitmq.pool.size': '1',
'plugin.rabbitmq.pool.1.host': 'localhost',
'plugin.rabbitmq.pool.1.port': 61612,
'plugin.rabbitmq.pool.1.user': 'mcollective',
'plugin.rabbitmq.pool.1.password': '<PASSWORD>',
'plugin.rabbitmq.pool.1.ssl': 'true',
'plugin.rabbitmq.pool.1.ssl.ca': os.path.join(ctxt.ROOT,
'fixtures/ca.pem'),
'plugin.rabbitmq.pool.1.ssl.key': os.path.join(
ctxt.ROOT,
'fixtures/activemq_private.pem'),
'plugin.rabbitmq.pool.1.ssl.cert': os.path.join(
ctxt.ROOT,
'fixtures/activemq_cert.pem',
),
}
| 2.015625
| 2
|
Modules/Discord/Helix/utils/config/i18n.py
|
SinLess-Games/Helix
| 3
|
12777406
|
import contextlib
import functools
import io
import os
from pathlib import Path
from typing import Callable, Union, Dict, Optional
import babel.localedata
from babel.core import Locale
__all__ = [
"get_locale",
"set_locale",
"reload_locales",
"cog_i18n",
"Translator",
"get_babel_locale",
]
_current_locale = "en-US"
WAITING_FOR_MSGID = 1
IN_MSGID = 2
WAITING_FOR_MSGSTR = 3
IN_MSGSTR = 4
MSGID = 'msgid "'
MSGSTR = 'msgstr "'
_translators = []
def get_locale():
return _current_locale
def set_locale(locale):
global _current_locale
_current_locale = locale
reload_locales()
def reload_locales():
for translator in _translators:
translator.load_translations()
def _parse(translation_file: io.TextIOWrapper) -> Dict[str, str]:
"""
Custom gettext parsing of translation files.
Parameters
----------
translation_file : io.TextIOWrapper
An open text file containing translations.
Returns
-------
Dict[str, str]
A dict mapping the original strings to their translations. Empty
translated strings are omitted.
"""
step = None
untranslated = ""
translated = ""
translations = {}
for line in translation_file:
line = line.strip()
if line.startswith(MSGID):
# New msgid
if step is IN_MSGSTR and translated:
# Store the last translation
translations[_unescape(untranslated)] = _unescape(translated)
step = IN_MSGID
untranslated = line[len(MSGID): -1]
elif line.startswith('"') and line.endswith('"'):
if step is IN_MSGID:
# Line continuing on from msgid
untranslated += line[1:-1]
elif step is IN_MSGSTR:
# Line continuing on from msgstr
translated += line[1:-1]
elif line.startswith(MSGSTR):
# New msgstr
step = IN_MSGSTR
translated = line[len(MSGSTR): -1]
if step is IN_MSGSTR and translated:
# Store the final translation
translations[_unescape(untranslated)] = _unescape(translated)
return translations
def _unescape(string):
string = string.replace(r"\\", "\\")
string = string.replace(r"\t", "\t")
string = string.replace(r"\r", "\r")
string = string.replace(r"\n", "\n")
string = string.replace(r"\"", '"')
return string
def get_locale_path(cog_folder: Path, extension: str) -> Path:
"""
Gets the folder path containing localization files.
:param Path cog_folder:
The cog folder that we want localizations for.
:param str extension:
Extension of localization files.
:return:
Path of possible localization file, it may not exist.
"""
return cog_folder / "locales" / "{}.{}".format(get_locale(), extension)
class Translator(Callable[[str], str]):
"""Function to get translated strings at runtime."""
def __init__(self, name: str, file_location: Union[str, Path, os.PathLike]):
"""
Initializes an internationalization object.
Parameters
----------
name : str
Your cog name.
file_location : `str` or `pathlib.Path`
This should always be ``__file__`` otherwise your localizations
will not load.
"""
self.cog_folder = Path(file_location).resolve().parent
self.cog_name = name
self.translations = {}
_translators.append(self)
self.load_translations()
def __call__(self, untranslated: str) -> str:
"""Translate the given string.
This will look for the string in the translator's :code:`.pot` file,
with respect to the current locale.
"""
try:
return self.translations[untranslated]
except KeyError:
return untranslated
def load_translations(self):
"""
Loads the current translations.
"""
self.translations = {}
locale_path = get_locale_path(self.cog_folder, "po")
with contextlib.suppress(IOError, FileNotFoundError):
with locale_path.open(encoding="utf-8") as file:
self._parse(file)
def _parse(self, translation_file):
self.translations.update(_parse(translation_file))
def _add_translation(self, untranslated, translated):
untranslated = _unescape(untranslated)
translated = _unescape(translated)
if translated:
self.translations[untranslated] = translated
@functools.lru_cache()
def _get_babel_locale(red_locale: str) -> babel.core.Locale:
supported_locales = babel.localedata.locale_identifiers()
try: # Handles cases where red_locale is already Babel supported
babel_locale = Locale(*babel.parse_locale(red_locale))
except (ValueError, babel.core.UnknownLocaleError):
try:
babel_locale = Locale(*babel.parse_locale(red_locale, sep="-"))
except (ValueError, babel.core.UnknownLocaleError):
# ValueError is Raised by `parse_locale` when an invalid Locale is given to it
# Lets handle it silently and default to "en_US"
try:
# Try to find a babel locale that's close to the one used by red
babel_locale = Locale(Locale.negotiate([red_locale], supported_locales, sep="-"))
except (ValueError, TypeError, babel.core.UnknownLocaleError):
# If we fail to get a close match we will then default to "en_US"
babel_locale = Locale("en", "US")
return babel_locale
def get_babel_locale(locale: Optional[str] = None) -> babel.core.Locale:
"""Function to convert a locale to a ``babel.core.Locale``.
Parameters
----------
locale : Optional[str]
The locale to convert, if not specified it defaults to the bot's locale.
Returns
-------
babel.core.Locale
The babel locale object.
"""
if locale is None:
locale = get_locale()
return _get_babel_locale(locale)
# This import to be down here to avoid circular import issues.
# This will be cleaned up at a later date
# noinspection PyPep8
from Helix.utils import commands
def cog_i18n(translator: Translator):
"""Get a class decorator to link the translator to this cog."""
def decorator(cog_class: type):
cog_class.__translator__ = translator
for name, attr in cog_class.__dict__.items():
if isinstance(attr, (commands.Group, commands.Command)):
attr.translator = translator
setattr(cog_class, name, attr)
return cog_class
return decorator
| 2.921875
| 3
|
src/dfi/fs.py
|
slyphon/dfinstall
| 0
|
12777407
|
<reponame>slyphon/dfinstall
from typing import List, Optional, Union, cast, Dict, Callable
import sys
import os
import os.path as osp
from stat import *
from pathlib import Path
import json
import logging
import arrow
from .dotfile import LinkData
from .config import Settings, TFileStrategy, TSymlinkStrategy, file_strategy_validator, symlink_strategy_validator
from .exceptions import (BackupFailed, TooManySymbolicLinks, FatalConflict, FilesystemConflictError)
log = logging.getLogger(__name__)
_DATE_FORMAT_STR = 'YYYYMMDDHHmmss'
class _skipConflictingEntry(Exception):
pass
def skip_it() -> None:
raise _skipConflictingEntry()
def timestamp() -> str:
return cast(str, arrow.utcnow().format(_DATE_FORMAT_STR))
def backup(p: Path) -> Optional[Path]:
log.debug(f"handle rename for p: {p}, p.exists: {p.exists()}")
if p.exists():
for n in range(0, 100):
newp = p.with_suffix(f".dfi_{timestamp()}_{n:03}")
if newp.exists():
log.debug(f"backup path {newp!s} existed, retrying")
continue
else:
p.rename(newp)
return newp
else:
raise BackupFailed(p)
else:
return None
def is_link(p: Path) -> Optional[bool]:
try:
s = os.lstat(p)
return S_ISLNK(s.st_mode)
except FileNotFoundError:
return None
def chase_links(link: Path) -> Path:
cur = link
depth = 0
while depth <= 50:
depth += 1
if not is_link(cur):
return cur
cur = Path(osp.normpath(osp.join(cur.parent, os.readlink(cur))))
else:
raise TooManySymbolicLinks(link, depth)
def link_points_to(link: Path, target: Path) -> Optional[bool]:
try:
data = os.readlink(link)
return osp.samefile(chase_links(link), target)
except FileNotFoundError:
return None
def backup_file_strategy(p: Path) -> None:
"""when a link_path exists and is a file, this method moves it to a unique location"""
log.debug(f"backup_file_strategy: {p}")
backup(p)
def delete_strategy(p: Path) -> None:
"""when a link_path exists and is a file, this method removes it"""
log.debug(f"delete_strategy: {p}")
p.unlink()
def warn_strategy(p: Path) -> None:
log.warning(f"File location {str(p)!r} already exists and 'warn' strategy selected, continuing.")
skip_it()
def fail_strategy(p: Path) -> None:
raise FatalConflict(p)
StrategyFn = Callable[[Path], None]
_FILE_STRATEGY_MAP: Dict[TFileStrategy, StrategyFn] = {
'backup': backup_file_strategy,
'delete': delete_strategy,
'warn': warn_strategy,
'fail': fail_strategy,
}
_SYMLINK_STRATEGY_MAP: Dict[TSymlinkStrategy, StrategyFn] = {
'replace': delete_strategy,
'warn': warn_strategy,
'fail': fail_strategy,
}
def _apply_link_data(
ld: LinkData, create_missing: bool, file_stgy: StrategyFn, link_stgy: StrategyFn
) -> None:
target, link_data, link_path = ld.vpath, ld.link_data, ld.link_path
# TODO: make this a setting
if not link_path.parent.exists():
link_path.parent.mkdir(mode=0o755, parents=True, exist_ok=True)
def fn() -> None:
# os.path.exists reports false for a broken symlink
if not os.path.exists(link_path) or is_link(link_path):
if not is_link(link_path):
link_path.symlink_to(link_data) # ok, we're clear, do it
return
log.debug(f"{link_path} is symlink")
if link_points_to(link_path, target):
log.debug(f"{link_path} resolves to {target}")
return # ok, we already did this, so skip it
else:
log.debug(f"{link_path} points to {os.readlink(link_path)}")
link_stgy(link_path)
return fn() # recurse
elif link_path.is_file() or link_path.is_dir():
file_stgy(link_path)
return fn() # and recurse
else: # what the what?
raise FilesystemConflictError(link_path, os.stat(link_path))
try:
fn()
except _skipConflictingEntry as e:
return None
def apply_link_data(
link_datas: List[LinkData], create_missing: bool, fs: StrategyFn, ls: StrategyFn
) -> None:
for ld in link_datas:
_apply_link_data(ld, create_missing, fs, ls)
def apply_settings(settings: Settings) -> None:
apply_link_data(
settings.link_data,
settings.create_missing_target_dirs,
# revalidating here is silly, but it appeases mypy, because declaring
# these as literal types on the Settings object messes up serialization
_FILE_STRATEGY_MAP[file_strategy_validator(settings.conflicting_file_strategy)],
_SYMLINK_STRATEGY_MAP[symlink_strategy_validator(settings.conflicting_symlink_strategy)]
)
| 1.953125
| 2
|
tests/mocklogin_duo.py
|
cloudposse/duo_unix
| 1
|
12777408
|
#!/usr/bin/env python
import os
import pexpect
import paths
PROMPT = '.* or option \(1-4\): $'
def _login_duo():
p = pexpect.spawn(paths.login_duo + ' -d -c confs/mockduo.conf ' + \
'-f foobar echo SUCCESS')
p.expect(PROMPT, timeout=2)
print '===> %r' % p.match.group(0)
return p
def main():
p = _login_duo()
# 3 failures in a row
p.sendline('123456')
p.expect(PROMPT)
print '===> %r' % p.match.group(0)
p.sendline('wefawefgoiagj3rj')
p.expect(PROMPT)
print '===> %r' % p.match.group(0)
p.sendline('A' * 500)
p.expect(pexpect.EOF)
print '===> %r' % p.before
# menu options
p = _login_duo()
p.sendline('3')
p.expect(PROMPT)
print '===> %r' % p.match.group(0)
p.sendline('4')
p.expect(PROMPT)
print '===> %r' % p.match.group(0)
p.sendline('1')
p.expect(pexpect.EOF)
print '===> %r' % p.before
p = _login_duo()
p.sendline('2')
p.expect(pexpect.EOF)
print '===> %r' % p.before
if __name__ == '__main__':
main()
| 2.5
| 2
|
src/cambiopy/azure.py
|
CNuge/CamBioPy
| 0
|
12777409
|
#!/bin/env/python3
import sys
import os
import argparse
#location = '.'
#suffix = '.py'
def get_filelist(location, suffix = None, recursive = False):
""" Get a list of files in a directory and optionally its subdirs,
with optional suffix matching requirement."""
if recursive == False:
if suffix is None:
filelist = [location+x for x in os.listdir(location)]
else:
filelist = [location+x for x in os.listdir(location) if x[-len(suffix):] == suffix]
elif recursive == True:
filelist = []
for path, subdirs, files in os.walk(location):
for x in files:
if suffix is None or x[-len(suffix):] == suffix:
rpath = os.path.join(path, x)
filelist.append(rpath)
return filelist
def build_to_azure_calls(files, local_location, azure_location,
keep_structure = True,
relative_paths = False,
trim_local_paths = None):
""" Take a list of local relative filepaths and build azure transfer calls
If keep_structure == true, the subfolders will be added to the azure calls.
Note for the retention of structure, only subfolders of the current working
directory will be valid (no higher levels in file heirarchy permitted)
"""
outlist = []
for f in files:
if keep_structure == False:
outstr = f'azcopy copy "{f}" "{azure_location}"\n'
else:
if relative_paths == True:
if f[:2] != './':
raise ValueError("The keep_structure argument requires relative imports (leading dotslash ('./')")
parts = f[2:].split("/")
else:
parts = f.split("/")
add_path = "/".join(parts[:-1])
add_path+="/"
#second bit of logic here is to avoid the double end slash when not
#including any subfolders
if trim_local_paths is None and add_path != "/":
outstr = f'azcopy copy "{f}" "{azure_location}{add_path}"\n'
else:
az_path = add_path.replace(local_location, '')
outstr = f'azcopy copy "{f}" "{azure_location}{az_path}"\n'
outlist.append(outstr)
return outlist
def build_from_azure_calls(files, azure_location, local_location = "."):
""" Take a list of files and their location on azure and build transfer calls
to move them to a specified local location."""
outlist = []
for f in files:
outstr = f'azcopy copy "{azure_location}{f}" "{local_location}"\n'
outlist.append(outstr)
return outlist
def read_filelist(file):
""" Read in a list of files for transfer FROM azure to local."""
dat = []
with open(file, "r") as f:
for line in f:
line = line.rstrip()
dat.append(line)
return dat
def write_calls_file(calls, outfile):
""" Take the produced azcopy file-by-file calls and write the output script """
f=open(outfile, 'w')
for line in calls:
f.write(line)
f.close()
| 3.109375
| 3
|
chapter6.py
|
rpmva/Python1
| 0
|
12777410
|
#Chapter 6 Notes
#List = ['a', 'b', 'c'];
#Character at a certain index: List[0] = 'a';
#len(List) = 3;
#Count how often something appears in a list: List.count('a') = 1; List.count('d') = 0;
#Where an index occurs: List.index('b') = 1
#Checking if a character or string is in a list: 'd' in List = False
#Adding items to a list: x = []; x.append('one'); x.append('two'); x = ['one','two']
#Making a list longer: x1 = [1,2,3]; x2 = [4,5,6]; x1.extend(x2);
#Removing items from a list: numbers = [1,2,3,4,5,6]; numbers.remove(6); numbers = [1,2,3,4,5]
#Inserting items at specific indices: list = ['a','c','d']; list.insert(1,'b')
#Adding a list: a = [1,2,3]; b = [4,5,6]; a + b = [1,2,3,4,5,6]
#reverse() puts list in reverse order
#sort() puts numbers in numerical or alphabetical order
#Chapter 6 Exercise
stock = ['pepperoni', 'sausage', 'cheese', 'peppers']
x = raw_input("Please give me a topping: ")
toppings = [];
if x in stock:
toppings.append(x)
print "We have " + x + "!"
else:
print "Sorry, we don't have " + x + "."
y = raw_input("Please give me one more topping: ")
if y in stock:
toppings.append(y)
print "We have " + y + "!"
else:
print " Sorry, we don't have " + y + "."
print "Here are toppings: {}".format(toppings)
| 4.40625
| 4
|
tests/molecular/functional_groups/functional_group/generic_functional_group/test_get_bonder_ids.py
|
stevenbennett96/stk
| 21
|
12777411
|
import itertools as it
def test_get_bonder_ids(generic_case_data):
"""
Test :meth:`.GenericFunctionalGroup.get_bonder_ids`.
Parameters
----------
generic_case_data : :class:`.GenericCaseData`
The test case. Holds the functional group to test and the
atoms holding the correct bonder ids.
Returns
-------
None : :class:`NoneType`
"""
_test_get_bonder_ids(
functional_group=generic_case_data.functional_group,
bonders=generic_case_data.bonders,
)
def _test_get_bonder_ids(functional_group, bonders):
"""
Test :meth:`.GenericFunctionalGroup.get_bonder_ids`.
Parameters
----------
functional_group : :class:`.GenericFunctionalGroup`
The functional group to test.
bonders : :class:`tuple` of :class:`.Atom`
The atoms holding the correct bonder ids.
Returns
-------
None : :class:`NoneType`
"""
for id_, atom in it.zip_longest(
functional_group.get_bonder_ids(),
bonders,
):
assert id_ == atom.get_id()
| 2.421875
| 2
|
SoftLayer/tests/CLI/modules/vs_tests.py
|
briancline/softlayer-python
| 0
|
12777412
|
<gh_stars>0
"""
SoftLayer.tests.CLI.modules.vs_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
import mock
from SoftLayer import testing
import json
class VirtTests(testing.TestCase):
def test_list_vs(self):
result = self.run_command(['vs', 'list', '--tag=tag'])
self.assertEqual(result.exit_code, 0)
self.assertEqual(json.loads(result.output),
[{'datacenter': 'TEST00',
'primary_ip': '172.16.240.2',
'hostname': 'vs-test1',
'action': None,
'id': 100,
'backend_ip': '10.45.19.37'},
{'datacenter': 'TEST00',
'primary_ip': '172.16.240.7',
'hostname': 'vs-test2',
'action': None,
'id': 104,
'backend_ip': '10.45.19.35'}])
def test_detail_vs(self):
result = self.run_command(['vs', 'detail', '100',
'--passwords', '--price'])
self.assertEqual(result.exit_code, 0)
self.assertEqual(json.loads(result.output),
{'active_transaction': None,
'cores': 2,
'created': '2013-08-01 15:23:45',
'datacenter': 'TEST00',
'hostname': 'vs-test1',
'domain': 'test.sftlyr.ws',
'fqdn': 'vs-test1.test.sftlyr.ws',
'id': 100,
'guid': '1a2b3c-1701',
'memory': 1024,
'modified': {},
'os': '12.04-64 Minimal for VSI',
'os_version': '12.04-64 Minimal for VSI',
'notes': 'notes',
'price rate': 1.54,
'tags': ['production'],
'private_cpu': {},
'private_ip': '10.45.19.37',
'private_only': {},
'ptr': 'test.softlayer.com.',
'public_ip': '172.16.240.2',
'state': 'RUNNING',
'status': 'ACTIVE',
'users': [{'password': '<PASSWORD>', 'username': 'user'}],
'vlans': [{'type': 'PUBLIC',
'number': 23,
'id': 1}],
'owner': 'chechu'})
def test_detail_vs_empty_tag(self):
mock = self.set_mock('SoftLayer_Virtual_Guest', 'getObject')
mock.return_value = {
'id': 100,
'maxCpu': 2,
'maxMemory': 1024,
'tagReferences': [
{'tag': {'name': 'example-tag'}},
{},
],
}
result = self.run_command(['vs', 'detail', '100'])
self.assertEqual(result.exit_code, 0)
self.assertEqual(
json.loads(result.output)['tags'],
['example-tag'],
)
def test_create_options(self):
result = self.run_command(['vs', 'create-options'])
self.assertEqual(result.exit_code, 0)
self.assertEqual(json.loads(result.output),
{'cpus (private)': [],
'cpus (standard)': ['1', '2', '3', '4'],
'datacenter': ['ams01', 'dal05'],
'local disk(0)': ['25', '100'],
'memory': ['1024', '2048', '3072', '4096'],
'nic': ['10', '100', '1000'],
'os (CENTOS)': 'CENTOS_6_64',
'os (DEBIAN)': 'DEBIAN_7_64',
'os (UBUNTU)': 'UBUNTU_12_64'})
@mock.patch('SoftLayer.CLI.formatting.confirm')
def test_create(self, confirm_mock):
confirm_mock.return_value = True
result = self.run_command(['vs', 'create',
'--cpu=2',
'--domain=example.com',
'--hostname=host',
'--os=UBUNTU_LATEST',
'--memory=1',
'--network=100',
'--billing=hourly',
'--datacenter=dal05',
'--tag=dev',
'--tag=green'])
self.assertEqual(result.exit_code, 0)
self.assertEqual(json.loads(result.output),
{'guid': '1a2b3c-1701',
'id': 100,
'created': '2013-08-01 15:23:45'})
args = ({'datacenter': {'name': 'dal05'},
'domain': 'example.com',
'hourlyBillingFlag': True,
'localDiskFlag': True,
'maxMemory': 1024,
'hostname': 'host',
'startCpus': 2,
'operatingSystemReferenceCode': 'UBUNTU_LATEST',
'networkComponents': [{'maxSpeed': '100'}]},)
self.assert_called_with('SoftLayer_Virtual_Guest', 'createObject',
args=args)
| 1.945313
| 2
|
tests/test_obspack_surface_collection_recipes.py
|
dkauf42/gdess
| 2
|
12777413
|
<reponame>dkauf42/gdess
import os
import pytest
import xarray as xr
from co2_diag import load_stations_dict
from co2_diag.data_source.observations.gvplus_surface import Collection
@pytest.fixture
def newEmptySurfaceStation():
mySurfaceInstance = Collection()
return mySurfaceInstance
def test_station_MLO_is_present(newEmptySurfaceStation):
station_dict = load_stations_dict()
assert 'mlo' in station_dict
def test_simplest_preprocessed_type(rootdir, newEmptySurfaceStation):
test_path = os.path.join(rootdir, 'test_data', 'globalview')
newEmptySurfaceStation.preprocess(datadir=test_path, station_name='mlo')
assert isinstance(newEmptySurfaceStation.stepA_original_datasets['mlo'], xr.Dataset)
def test_recipe_input_year_error(rootdir, newEmptySurfaceStation):
test_path = os.path.join(rootdir, 'test_data', 'globalview')
recipe_options = {
'ref_data': test_path,
'start_yr': "198012",
'end_yr': "201042",
'station_code': 'mlo'}
with pytest.raises(SystemExit):
newEmptySurfaceStation.run_recipe_for_timeseries(verbose='DEBUG', options=recipe_options)
def test_recipe_input_stationcode_error(rootdir, newEmptySurfaceStation):
test_path = os.path.join(rootdir, 'test_data', 'globalview')
recipe_options = {
'ref_data': test_path,
'start_yr': "1980",
'end_yr': "2010",
'station_code': 'asdkjhfasg'}
with pytest.raises(SystemExit):
newEmptySurfaceStation.run_recipe_for_timeseries(verbose='DEBUG', options=recipe_options)
def test_timeseries_recipe_completes_with_no_errors(rootdir, newEmptySurfaceStation):
test_path = os.path.join(rootdir, 'test_data', 'globalview')
recipe_options = {
'ref_data': test_path,
'start_yr': "1980",
'end_yr': "2010",
'station_code': 'mlo'}
try:
newEmptySurfaceStation.run_recipe_for_timeseries(verbose='DEBUG', options=recipe_options)
except Exception as exc:
assert False, f"'run_recipe_for_timeseries' raised an exception {exc}"
| 1.9375
| 2
|
sidekick/management/commands/import_members.py
|
cybera/netbox_sidekick
| 1
|
12777414
|
import csv
from django.core.management.base import BaseCommand
from django.utils.text import slugify
from dcim.models import Site
from tenancy.models import Tenant
from sidekick.utils import MEMBER_TYPES
class Command(BaseCommand):
help = "Import existing members"
def add_arguments(self, parser):
parser.add_argument(
'--file', required=True, help='The path to the CSV file')
parser.add_argument(
'--quiet', required=False, action='store_true',
help='Suppress messages')
parser.add_argument(
'--dry-run', required=False, action='store_true',
help='Perform a dry-run and make no changes')
def handle(self, *args, **options):
quiet = options['quiet']
dry_run = options['dry_run']
f = options['file']
rows = []
with open(f) as csvfile:
r = csv.reader(csvfile)
for row in r:
rows.append(row)
for row in rows:
(name, description, member_type, comments, latitude, longitude) = row
name = name.strip()
if member_type not in MEMBER_TYPES:
self.stdout.write(f"ERROR: Incorrect member type for {name}: {member_type}. Skipping.")
continue
# See if there is an existing tenant/member.
# If there is, compare values and update as needed.
# If there isn't, create one.
try:
changed = False
tenant = Tenant.objects.get(name=name)
if tenant.description != description:
changed = True
tenant.description = description
if dry_run or not quiet:
self.stdout.write(f"Changing description of {name} to {description}")
if tenant.comments != comments:
changed = True
tenant.comments = comments
if dry_run or not quiet:
self.stdout.write(f"Changing comments of {name} to {comments}")
if 'member_type' not in tenant.cf or tenant.cf['member_type'] != member_type:
changed = True
tenant.cf['member_type'] = member_type
if dry_run or not quiet:
self.stdout.write(f"Changing member_type of {name} to {member_type}")
if not dry_run and changed:
self.stdout.write(f"Updated Tenant: {name}")
tenant.save()
except Tenant.MultipleObjectsReturned:
self.stdout.write(f"WARNING: Multiple results found for {name}. Skipping.")
continue
except Tenant.DoesNotExist:
if options['dry_run']:
self.stdout.write(f"Would have created Tenant: {name}")
continue
tenant = Tenant.objects.create(
name=name,
slug=slugify(name),
description=description,
comments=comments,
)
tenant.cf['member_type'] = member_type
tenant.save()
self.stdout.write(f"Created Tenant: {name}")
# See if there is an existing site.
# If there is, compare values and update as needed.
# If there isn't, create one.
try:
changed = False
site = Site.objects.get(name=name)
if site.latitude != latitude:
changed = True
site.latitude = latitude
if dry_run or not quiet:
self.stdout.write(f"Changing latitude of Site {name} to {latitude}")
if site.longitude != longitude:
changed = True
site.longitude = longitude
if dry_run or not quiet:
self.stdout.write(f"Changing longitude of Site {name} to {longitude}")
if not dry_run and changed:
self.stdout.write(f"Updated Site: {name}")
site.save()
except Site.MultipleObjectsReturned:
self.stdout.write(f"WARNING: Multiple sites found for {name}. Skipping.")
continue
except Site.DoesNotExist:
if options['dry_run']:
self.stdout.write(f"Would have created Site: {name}")
continue
site = Site.objects.create(
name=name,
tenant=tenant,
slug=slugify(name),
latitude=latitude,
longitude=longitude,
)
site.save()
self.stdout.write(f"Created Site: {name}")
| 2.140625
| 2
|
parsers/app/base.py
|
vintage/moba_quiz
| 5
|
12777415
|
import json
import os
import shutil
import functools
import re
import requests
from unidecode import unidecode as udecode
from PIL import Image
from slugify import slugify
class ImageNotFound(Exception):
pass
def parse_string(string):
if string is None:
return None
return string.strip()
class Champion(object):
def __init__(self, pk, name, image, title=None, is_range=None, nation=None):
self.pk = pk
self.name = name
self.image = image
self.title = title
self.is_range = is_range
self.nation = nation
self.skills = []
self.translations = {}
def add_skill(self, skill):
self.skills.append(skill)
def add_translation(self, field, value):
self.translations[field] = value
def to_dict(self):
data = {
'id': parse_string(self.pk),
'name': parse_string(self.name),
'nation': parse_string(self.nation),
'image': parse_string(self.image),
'title': parse_string(self.title),
'is_range': self.is_range,
'skills': [s.to_dict() for s in self.skills]
}
for i18n_key, i18n_value in self.translations.items():
data['{}_i18n'.format(i18n_key)] = i18n_value
return data
class Skill(object):
def __init__(self, pk, name, image):
self.pk = pk
self.name = name
self.image = image
self.translations = {}
def add_translation(self, field, value):
self.translations[field] = value
def to_dict(self):
data = {
'id': parse_string(self.pk),
'name': parse_string(self.name),
'image': parse_string(self.image),
}
for i18n_key, i18n_value in self.translations.items():
data['{}_i18n'.format(i18n_key)] = i18n_value
return data
class Item(object):
def __init__(self, pk, name, image, into, _from, price):
self.pk = pk
self.name = name
self.image = image
self.into = into
self._from = _from
self.price = int(price) if price else None
self.translations = {}
def add_translation(self, field, value):
self.translations[field] = value
def to_dict(self):
data = {
'id': parse_string(self.pk),
'name': parse_string(self.name),
'image': parse_string(self.image),
'into': self.into,
'from': self._from,
'price': self.price,
}
for i18n_key, i18n_value in self.translations.items():
data['{}_i18n'.format(i18n_key)] = i18n_value
return data
class Importer(object):
export_path = './data/champions.json'
image_path = './data/images/champions/'
def run(self):
os.makedirs(self.image_path, exist_ok=True)
objects = self.get_objects()
try:
is_valid = self.validate(objects)
except Exception as e:
import ipdb; ipdb.set_trace()
is_valid = False
if not is_valid:
raise Exception('Something went wrong in the validate method.')
self.export(objects)
return objects
def get_objects(self):
return []
def export(self, objects):
with open(self.export_path, 'w') as outfile:
json.dump([o.to_dict() for o in objects], outfile, ensure_ascii=False)
return outfile
def slugify(self, value):
return slugify(value)
def clean_filename(self, filename):
filename = udecode(''.join(filename.split()).lower())
extension_dot = filename.rindex('.')
left_part = filename[:extension_dot]
right_part = filename[extension_dot:]
# Characters after last . can be [a-z] only
right_part = " ".join(re.findall("[a-zA-Z]+", right_part))
return "{}.{}".format(left_part, right_part)
def download_image(self, url, filename):
response = requests.get(url, stream=True)
if response.status_code != 200:
msg = 'Image at {} not found'.format(url)
print(msg)
raise ImageNotFound(msg)
filename = self.clean_filename(filename)
full_path = os.path.join(self.image_path, filename)
with open(full_path, 'wb') as outfile:
shutil.copyfileobj(response.raw, outfile)
# compress image
image = Image.open(full_path)
image.save(full_path, quality=95, optimize=True)
del response
return filename
def validate(self, objects):
return True
class ChampionImporter(Importer):
export_path = './data/champions.json'
image_path = './data/images/champions/'
def validate(self, objects):
for obj in objects:
# Validate basic fields
if not all([obj.pk, obj.name, obj.image]):
raise Exception('Champion {} missing fields.'.format(obj.pk))
# Validate skills
skills = obj.skills
if not skills:
raise Exception('Champion {} missing skills.'.format(obj.pk))
for skill in skills:
if not all([skill.pk, skill.name, skill.image]):
raise Exception('Champion {} skill {} missing fields'.format(
obj.pk, skill.pk
))
return True
class ItemImporter(Importer):
export_path = './data/items.json'
image_path = './data/images/items/'
def get_objects(self):
return []
def validate(self, objects):
flat_ids = set([i.pk for i in objects])
for obj in objects:
# Validate basic fields
if not all([obj.pk, obj.name, obj.image]):
raise Exception('Item {} missing fields.'.format(obj.pk))
# Validate recipe
components = obj._from
if not components:
continue
if not set(components).issubset(flat_ids):
raise Exception('Item {} contains invalid recipe: {}'.format(
obj.pk, components
))
return True
class SettingsImporter(Importer):
export_path = './data/settings.json'
def export(self, objects):
with open(self.export_path, 'w') as outfile:
json.dump(objects, outfile)
return outfile
def get_objects(self):
return {
'ios': {
'ad_small': 'ca-app-pub-4764697513834958/5120930069',
'ad_big': 'ca-app-pub-4764697513834958/7934795665',
'tracking': 'UA-77793311-8',
'store': 'itms-apps://itunes.apple.com/app/id1121065896',
'store_premium': 'com.puppybox.quizpokemon.premium_version',
},
'android': {
'ad_small': 'ca-app-pub-4764697513834958/5480856869',
'ad_big': 'ca-app-pub-4764697513834958/5062054468',
'tracking': 'UA-77793311-9',
'store': 'market://details?id=com.puppybox.quizpokemon',
'store_premium': 'com.puppybox.quizpokemon.premium_version',
},
'windows': {
'ad_small': 'ca-app-pub-4764697513834958/7883646863',
'ad_big': 'ca-app-pub-4764697513834958/7744046068',
'tracking': '',
'store': '',
'store_premium': '',
},
'legal_disclaimer': 'This application is not created, sponsored or endorsed by Niantic and doesn’t reflect the views or opinions of Niantic or anyone officially involved in producing or managing Pokemon GO. Pokemon GO is a registered trademark of Niantic. All in-game characters, locations, imagery and videos of game content are copyright and are trademarked to their respective owners. Usage for this game falls within fair use guidelines.',
'highscore_url': 'http://mobascore-puppybox.rhcloud.com/api/v1/leaderboards/pokemon/scores/',
'source_name': 'Pokemon GO',
'source_url': 'http://www.pokemongo.com/',
}
class AchievementImporter(Importer):
export_path = './data/achievements.json'
def __init__(self, items, champions):
self.items = items
self.champions = champions
def export(self, objects):
with open(self.export_path, 'w') as outfile:
json.dump(objects, outfile)
return outfile
def get_objects(self):
items = self.items
champions = self.champions
item_count = len(list(filter(lambda x: len(x._from) > 0, items)))
champion_count = len(champions)
skill_count = functools.reduce(
lambda x, y: x + len(y.skills), champions, 0
)
objects = [
{
"id": "seen_all_skills",
"name": "Watching your every move",
"description": "Open all skill levels",
"type": "array",
"goal": skill_count,
},
{
"id": "seen_all_items",
"name": "Recipe observer",
"description": "Open all recipe levels",
"type": "array",
"goal": item_count,
},
{
"id": "seen_all_champions",
"name": "High Five Everybody",
"description": "Open all champion levels",
"type": "array",
"goal": champion_count,
},
{
"id": "solved_all_skills",
"name": "Every move is mine",
"description": "Solve all skill levels",
"type": "array",
"goal": skill_count,
},
{
"id": "solved_all_items",
"name": "<NAME> blacksmith",
"description": "Solve all recipe levels",
"type": "array",
"goal": item_count,
},
{
"id": "solved_all_champions",
"name": "I know all of them",
"description": "Solve all champion levels",
"type": "array",
"goal": champion_count,
},
{
"id": "gameplay_small_strike",
"name": "<NAME>",
"description": "Make a 10x strike",
"type": "number",
"goal": 10
},
{
"id": "gameplay_medium_strike",
"name": "Unstoppable",
"description": "Make a 50x strike",
"type": "number",
"goal": 50
},
{
"id": "gameplay_big_strike",
"name": "Godlike",
"description": "Make a 150x strike",
"type": "number",
"goal": 150
},
{
"id": "gameplay_small_play_count",
"name": "Gamer",
"description": "Play the game 100 times",
"type": "increment",
"goal": 100
},
{
"id": "gameplay_medium_play_count",
"name": "<NAME>",
"description": "Play the game 250 times",
"type": "increment",
"goal": 250
},
{
"id": "gameplay_big_play_count",
"name": "<NAME>",
"description": "Play the game 1000 times",
"type": "increment",
"goal": 1000
},
]
return objects
| 2.625
| 3
|
getTopics.py
|
mitliagkas/pyliakmon
| 3
|
12777416
|
import numpy as np
import json
with open('db/cpt.json', 'rb') as outfile:
procHier = json.load(outfile)
outfile.close()
with open('db/icd.json', 'rb') as outfile:
icdHier = json.load(outfile)
outfile.close()
icdMap=dict([(icdHier[x]['level2'],{'desc':icdHier[x]['desc'],'code':x}) for x in icdHier.keys()])
procMap=dict([(procHier[x]['level2'],{'desc':procHier[x]['desc'],'code':x}) for x in procHier.keys()])
pcs=np.loadtxt('results/cmsQOrder2.txt')
p,k=pcs.shape
# Get the
l=5
print
print
for c in range(k):
print
print "[Component", c+1, "]"
comp=pcs[:,c]
#comp=pcs[:,c]
#ind=abs(comp).argsort()[-l:]
if c>0:
print "Positive Pole"
ind=comp.argsort()[-l:]
ind=ind.tolist()
ind.reverse()
for id,magnitude in [(x,comp[x]) for x in ind]:
if id < 132:
# ICD
print " ICD9", icdMap[id]['desc'].ljust(70), magnitude
else:
# Procedure
id-=132
print " Proc", procMap[id]['desc'].ljust(70), magnitude
if c>0:
print "Negative Pole"
ind=comp.argsort()[:l]
ind=ind.tolist()
for id,magnitude in [(x,comp[x]) for x in ind]:
if id < 132:
# ICD
print " ICD9", icdMap[id]['desc'].ljust(70), magnitude
else:
# Procedure
id-=132
print " Proc", procMap[id]['desc'].ljust(70), magnitude
pcs=np.loadtxt('results/cmsCompOrder3.txt')
pcs=np.loadtxt('results/cmsQOrder2.txt')
p,k=pcs.shape
l=5
print
print
for c in range(k):
print
print "[Component", c+1, "]"
comp=pcs[:,c]
#comp=pcs[:,c]
#ind=abs(comp).argsort()[-l:]
if c>0:
print "Positive Pole"
ind=comp.argsort()[-l:]
ind=ind.tolist()
ind.reverse()
for id,magnitude in [(x,comp[x]) for x in ind]:
if id < 132:
# ICD
print " ICD9", icdMap[id]['desc'].ljust(70), magnitude
else:
# Procedure
id-=132
print " Proc", procMap[id]['desc'].ljust(70), magnitude
if c>0:
print "Negative Pole"
ind=comp.argsort()[:l]
ind=ind.tolist()
for id,magnitude in [(x,comp[x]) for x in ind]:
if id < 132:
# ICD
print " ICD9", icdMap[id]['desc'].ljust(70), magnitude
else:
# Procedure
id-=132
print " Proc", procMap[id]['desc'].ljust(70), magnitude
| 2.59375
| 3
|
generator_lesson/map_lesson.py
|
farooq-teqniqly/pakt-complete-python-course
| 0
|
12777417
|
<filename>generator_lesson/map_lesson.py
class Vehicle:
def __init__(self, make: str, model: str):
self.make = make
self.model = model
def __repr__(self):
return f"{self.make}, {self.model}"
@classmethod
def create(cls, **properties):
return cls(**properties)
if __name__ == "__main__":
makes = ["BMW", "Ford", "Dodge", "Mercedes-Benz", "Mercury"]
starts_with_mer = (make for make in makes if make.startswith("Mer"))
upper_case = (make.upper() for make in starts_with_mer)
print(list(upper_case))
vehicle = Vehicle.create(model="Mustang", make="Ford")
print(vehicle)
| 4.0625
| 4
|
compiler/custom/write_driver.py
|
lekez2005/OpenRAM
| 0
|
12777418
|
<reponame>lekez2005/OpenRAM
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2019 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import debug
import design
from tech import cell_properties as props
class write_driver(design.design):
"""
Tristate write driver to be active during write operations only.
This module implements the write driver cell used in the design. It
is a hand-made cell, so the layout and netlist should be available in
the technology library.
"""
def __init__(self, name):
super().__init__(name, prop=props.write_driver)
debug.info(2, "Create write_driver")
def get_bl_names(self):
return "bl"
def get_br_names(self):
return "br"
@property
def din_name(self):
return "din"
@property
def en_name(self):
return "en"
def get_w_en_cin(self):
"""Get the relative capacitance of a single input"""
# This is approximated from SCMOS. It has roughly 5 3x transistor gates.
return 5 * 3
def build_graph(self, graph, inst_name, port_nets):
"""Adds edges based on inputs/outputs. Overrides base class function."""
self.add_graph_edges(graph, port_nets)
| 2.5625
| 3
|
create_db.py
|
ayushsingh-07/mailer
| 0
|
12777419
|
<filename>create_db.py
# -*- encoding: utf-8 -*-
"""Create all Data-Base"""
import os
# setting the environment
from dotenv import load_dotenv # Python 3.6+
from app.main import (
db, # SQLAlchemy Connector dB Object
create_app
)
from app.main.models import * # noqa: F401, F403
load_dotenv(verbose=True)
app = create_app(os.getenv("PROJECT_ENV_NAME") or "demo")
with app.app_context():
db.init_app(app)
db.create_all()
| 2.3125
| 2
|
deprecated_examples/affect/humor_late_fusion.py
|
TianhaoFu/MultiBench
| 0
|
12777420
|
<filename>deprecated_examples/affect/humor_late_fusion.py
import sys
import os
sys.path.append(os.getcwd())
import torch
from training_structures.Supervised_Learning import train, test
from fusions.common_fusions import Concat
from datasets.affect.get_data import get_dataloader
from unimodals.common_models import GRU, MLP
# Support mosi/mosi_unaligned/mosei/mosei_unaligned
traindata, validdata, testdata = get_dataloader('/home/pliang/multibench/affect/processed/humor_data.pkl')
# humor 371 81 300
encoders = GRU(752, 1128, dropout=True, has_padding=True).cuda()
head = MLP(1128, 512, 1).cuda()
# encoders=[GRU(35,70,dropout=True,has_padding=True).cuda(), \
# GRU(74,150,dropout=True,has_padding=True).cuda(),\
# GRU(300,600,dropout=True,has_padding=True).cuda()]
# head=MLP(820,400,1).cuda()
fusion = Concat().cuda()
# Support simple late_fusion and late_fusion with removing bias
train(encoders, fusion, head, traindata, validdata, 1000, is_packed=True, early_stop=True, \
task="classification", optimtype=torch.optim.AdamW, lr=1e-5, save='humor_lf_best.pt', \
weight_decay=0.01, objective=torch.nn.MSELoss())
print("Testing:")
model=torch.load('humor_lf_best.pt').cuda()
test(model, testdata, True, torch.nn.L1Loss(), "regression")
# test(model,testdata,True,)
| 1.914063
| 2
|
Climate_Shocks/__init__.py
|
Komanawa-Solutions-Ltd/SLMACC-2020-CSRA
| 0
|
12777421
|
"""
Author: <NAME>
Created: 14/10/2020 10:47 AM
"""
| 0.632813
| 1
|
demo/users/urls.py
|
physili/django_test
| 1
|
12777422
|
<reponame>physili/django_test<gh_stars>1-10
from django.conf.urls import re_path
from . import views
app_name = 'user'
urlpatterns = [
re_path(r'^index/$', views.index, name='index'),
re_path(r'^haha/$', views.haha, name='haha'),
re_path(r'^jump/$', views.jump, name='jump'),
]
| 1.59375
| 2
|
plate_yolov4.py
|
conspicio-ai/alpr
| 1
|
12777423
|
import sys, os
sys.path.append('yolov3_detector')
from yolov3_custom_helper import yolo_detector
from darknet import Darknet
sys.path.append('pytorch-YOLOv4')
from tool.darknet2pytorch import Darknet as DarknetYolov4
import argparse
import cv2,time
import numpy as np
from tool.plateprocessing import find_coordinates, plate_to_string, padder, get_color
from tool.utils import alphanumeric_segemntor,plot_boxes_cv2
from tool.torch_utils import *
import time
from utility_codes.tsv_converter import ConverterTSV
use_cuda = True
#################### PLATE ####################
cfg_v4 = 'pytorch-YOLOv4/cfg/yolo-obj.cfg'
weight_v4 = 'weights/plate.weights'
m = DarknetYolov4(cfg_v4)
m.load_weights(weight_v4)
num_classes = m.num_classes
class_names = ['plate']
print('Loading weights from %s... Done!' % (weight_v4))
if use_cuda:
m.cuda()
# m_alpha.cuda()
# yolo_vehicle.cuda()
vehicle_save_filename = 'tsv_files/plate_tester.tsv'
vehicle_writer = ConverterTSV(vehicle_save_filename,file_type='vehicle')
image_dir = 'SIH_hackathon/Detection_Day3/Day3'
image_files = os.listdir(image_dir)
image_files.sort()
OUTPUT_SIZE = (1280, 720)
for img_name in image_files:
frame = cv2.imread(os.path.join(image_dir, img_name))
h, w = frame.shape[0:2]
sized = cv2.resize(frame, (m.width, m.height))
sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)
confidence = 0.2
boxes = do_detect(m, sized, confidence , 0.6, use_cuda)
result_img, cls_conf_plate, coordinates_all, labels = plot_boxes_cv2(frame, boxes[0],classes_to_detect=class_names,fontScale=0.5,thick=2, savename=False, class_names=class_names)
cls_conf_plate = float(cls_conf_plate)
for i,co in enumerate(coordinates_all):
print(co)
data = [img_name, co, labels[i]]
vehicle_writer.put_vehicle(img_name, co, 'plate')
# vehicle_writer.put_vehicle(img_loc, c, 'plate')
cv2.imshow('Image', result_img)
if cv2.waitKey(1) & 0xff == ord('q'):
break
# cv2.waitKey(0)
cv2.destroyAllWindows()
import pandas as pd
def merge_and_save(fp1, fp2, outfile_path):
tsv_file1 = pd.read_csv(fp1, sep='\t', header=0)
tsv_file2 = pd.read_csv(fp2, sep='\t', header=0)
merged = pd.concat([tsv_file1, tsv_file2])
outfile = merged.sort_values(by='Image').reset_index(drop=True)
outfile.to_csv(outfile_path, sep='\t', index=False)
merge_and_save('tsv_files/plate_tester.tsv', 'tsv_files/vehicle_tester.tsv', 'tsv_files/IvLabs_Detection_Day3.tsv')
| 2.21875
| 2
|
src/enginemonitor.py
|
BennyCarbajal/enginemonitor
| 0
|
12777424
|
#! /usr/bin/env python
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, ningh"
__credits__ = [ "<NAME>" ]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = [ "<NAME>" ]
__email__ = [ "<EMAIL>" ]
__status__ = "Beta"
import subprocess, json, socket, psutil, os, wmi
from pymongo import MongoClient
class Engine( object ) :
"""docstring for Info"""
def __init__( self ):
self.client = MongoClient( 'mongodb://127.0.0.1:27017' )
self.db = self.client[ 'machine' ]
self.conn = wmi.WMI()
def getSize( self, bytes, suffix = 'B' ) :
"""
Return the bytes unit and suffix.
"""
factor = 1024
for unit in [ '', 'K', 'M', 'G', 'T', 'P' ]:
if bytes < factor:
return "{0} {1}{2}".format( bytes, unit, suffix )
bytes /= factor
def getIp( self ) :
"""
Return IP address.
"""
return socket.gethostbyname( socket.gethostname() )
def getUser( self ) :
"""
Return the current username.
"""
return os.environ.get( 'USERNAME' )
def getComputer( self ) :
"""
Return the current computername.
"""
return os.environ.get( 'COMPUTERNAME' )
def getCpu( self ) :
"""
Return the name of Processor.
"""
for pr in self.conn.Win32_Processor():
return pr.Name
def getCores( self ) :
"""
Return the physical cores and total cores.
"""
out = {
'PhysicalCores': psutil.cpu_count( logical=False ),
'TotalCores': psutil.cpu_count( logical=True ),
}
return out
def getRam( self ):
"""
Return the size of Ram Memory.
"""
mem = psutil.virtual_memory()
return self.getSize(mem.total)
def getBoard( self ):
"""
Return the motherboard name.
"""
cs = self.conn.Win32_ComputerSystem()[0]
return cs.Model
def getGpu( self ):
"""
Return a list of GPUs.
"""
out = []
for vc in self.conn.Win32_VideoController():
out.append(vc.Name)
return out
def getDisks( self ):
"""
Return a list of dictionaries.
"""
out = []
for ld in self.conn.Win32_logicaldisk() :
if ld.DriveType == 3 :
kind = 'Local Disk'
elif ld.DriveType == 4 :
kind = 'Network Drive'
inside = {
'device': ld.DeviceID,
'type': kind,
'provider': ld.ProviderName
}
try:
inside[ 'size' ] = self.getSize( int( ld.Size ) )
inside[ 'free' ] = self.getSize( int( ld.FreeSpace ) )
except Exception as e:
pass
out.append( inside )
return out
################################################
# By SubProcess #
################################################
def getSensorBySpecs( self, hwType, snsrType, filename='bySpecs' ) :
"""
By subprocess returns sensor information from the requested hardware.
"""
subprocess.check_output(
os.path.abspath( os.path.dirname( __file__ ) ) + "\\monitor\\GarboMonitor {0} {1} {2}".format(
hwType,
snsrType,
filename
),
shell = True
)
with open( "C:/bin/garbo/log/{}.json".format( filename ) ) as json_file:
data = json.load(json_file)
return data
def getSensorsByHardware( self, hwType, filename='byHardware' ) :
"""
By subprocess returns the information of all sensors of the requested hardware
"""
subprocess.check_output(
os.path.abspath( os.path.dirname( __file__ ) ) + "\\monitor\\GarboMonitor {0} {1}".format( hwType, filename ),
shell = True
)
with open( "C:/bin/garbo/log/{}.json".format( filename ) ) as json_file:
data = json.load(json_file)
return data
def getSensors( self, filename='sensors' ) :
"""
By subprocess returns the information of all sensors of each important hardware
"""
subprocess.check_output(
os.path.abspath( os.path.dirname( __file__ ) ) + "\\monitor\\GarboMonitor {}".format( filename ),
shell = True
)
with open( "C:/bin/garbo/log/{}.json".format( filename ) ) as json_file:
data = json.load( json_file )
return data
################################################
# By Service #
################################################
def getMonitorServiceBySpecs( self, hwType, snsrType ) :
"""
By service returns sensor information from the requested hardware.
"""
out = { 'name': '', 'type': '', 'sensors': [] }
try :
data = list( self.db.hardware.find( { 'type': hwType }, { '_id': 0 } ) )
for item in data :
out[ 'name' ] = item[ 'name' ]
out[ 'type' ] = item[ 'type' ]
for sensor in item['sensors'] :
if sensor['type'] == snsrType :
out['sensors'].append(sensor)
return out
except Exception as e :
return e
def getMonitorServiceByHardware( self, hwType ) :
"""
By service returns the information of all sensors of the requested hardware
"""
try :
byHw = list( self.db.hardware.find( { 'type': hwType }, { '_id': 0 } ) )
return byHw
except Exception as e :
return e
def getMonitorService( self ) :
"""
By service returns the information of all sensors of each important hardware
"""
try :
byHw = list( self.db.hardware.find( {}, { '_id': 0 } ) )
return byHw
except Exception as e :
return e
| 2.296875
| 2
|
algo/mappo2/elements/agent.py
|
xlnwel/g2rl
| 1
|
12777425
|
from core.elements.agent import create_agent
| 1.070313
| 1
|
dag_executor/Extensions/AWS/__init__.py
|
GennadiiTurutin/dag_executor
| 0
|
12777426
|
from .SNS import SNS
from .SQS import SQS
from .S3 import S3
| 1.085938
| 1
|
nii/mapcore_api.py
|
tsukaeru/RDM-osf.io
| 11
|
12777427
|
# -*- coding: utf-8 -*-
#
# MAPCore class: mAP Core API handling
#
# @COPYRIGHT@
#
import sys
import time
import json
import logging
import hashlib
import requests
from urllib.parse import urlencode
from django.utils import timezone
from django.db import transaction
from osf.models.user import OSFUser
from website.settings import (MAPCORE_HOSTNAME,
MAPCORE_REFRESH_PATH,
MAPCORE_API_PATH,
MAPCORE_CLIENTID,
MAPCORE_SECRET)
#
# Global settings.
#
VERIFY = True # for requests.{get,post}(verify=VERIFY)
MAPCORE_API_MEMBER_LIST_BUG_WORKAROUND = False # 2019/5/24 fixed
MAPCORE_DEBUG = False
# unicode to utf-8
def utf8(s):
return s.encode('utf-8')
class MAPCoreLogger(object):
def __init__(self, logger):
self.logger = logger
def error(self, msg, *args, **kwargs):
self.logger.error('MAPCORE: ' + msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
self.logger.warning('MAPCORE: ' + msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
self.logger.info('MAPCORE:' + msg, *args, **kwargs)
def debug(self, msg, *args, **kwargs):
self.logger.debug('MAPCORE: ' + msg, *args, **kwargs)
def setLevel(self, level=logging.INFO):
self.logger.setLevel(level=level)
class MAPCoreLoggerDebug(object):
def __init__(self, logger):
self.logger = logger
def error(self, msg, *args, **kwargs):
self.logger.error('MAPCORE_ERROR: ' + msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
self.logger.error('MAPCORE_WARNING: ' + msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
self.logger.error('MAPCORE_INFO:' + msg, *args, **kwargs)
def debug(self, msg, *args, **kwargs):
self.logger.error('MAPCORE_DEBUG: ' + msg, *args, **kwargs)
def setLevel(self, level=logging.INFO):
self.logger.setLevel(level=level)
def mapcore_logger(logger):
if MAPCORE_DEBUG:
logger = MAPCoreLoggerDebug(logger)
else:
logger = MAPCoreLogger(logger)
return logger
def mapcore_api_disable_log(level=logging.CRITICAL):
logger.setLevel(level=level)
logger = mapcore_logger(logging.getLogger(__name__))
class MAPCoreException(Exception):
def __init__(self, mapcore, ext_message):
self.mapcore = mapcore
if ext_message is not None and mapcore is None:
super(MAPCoreException, self).__init__(
'ext_message={}'.format(ext_message))
else:
super(MAPCoreException, self).__init__(
'http_status_code={}, api_error_code={}, message={}, ext_message={}'.format(
mapcore.http_status_code, mapcore.api_error_code,
mapcore.error_message, ext_message))
def listing_group_member_is_not_permitted(self):
if self.mapcore.api_error_code == 206 and \
self.mapcore.error_message == 'Listing group member is not permitted':
return True
return False
def group_does_not_exist(self):
if self.mapcore.api_error_code == 208 and \
self.mapcore.error_message == 'You do not have access permission':
return True
return False
class MAPCoreTokenExpired(MAPCoreException):
def __init__(self, mapcore, ext_message):
self.caller = mapcore.user
super(MAPCoreTokenExpired, self).__init__(mapcore, ext_message)
def __str__(self):
if self.caller:
username = self.caller.username
else:
username = 'UNKNOWN USER'
return 'mAP Core Access Token (for {}) is expired'.format(username)
if MAPCORE_API_MEMBER_LIST_BUG_WORKAROUND:
OPEN_MEMBER_PRIVATE = 1
OPEN_MEMBER_PUBLIC = 0
OPEN_MEMBER_MEMBER_ONLY = 2
OPEN_MEMBER_DEFAULT = OPEN_MEMBER_MEMBER_ONLY
else:
OPEN_MEMBER_PRIVATE = 0
OPEN_MEMBER_PUBLIC = 1
OPEN_MEMBER_MEMBER_ONLY = 2
OPEN_MEMBER_DEFAULT = OPEN_MEMBER_PUBLIC
def mapcore_group_member_is_private(group_info):
return group_info['open_member'] == OPEN_MEMBER_PRIVATE
def mapcore_group_member_is_public(group_info):
return group_info['open_member'] == OPEN_MEMBER_PUBLIC
def mapcore_group_member_is_member_only(group_info):
return group_info['open_member'] == OPEN_MEMBER_MEMBER_ONLY
class MAPCore(object):
MODE_MEMBER = 0 # Ordinary member
MODE_ADMIN = 2 # Administrator member
user = False
http_status_code = None
api_error_code = None
error_message = None
#
# Constructor.
#
def __init__(self, user):
self.user = user
#
# Refresh access token.
#
def refresh_token0(self):
#logger.debug('MAPCore::refresh_token:')
url = MAPCORE_HOSTNAME + MAPCORE_REFRESH_PATH
basic_auth = (MAPCORE_CLIENTID, MAPCORE_SECRET)
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
}
params = {
'grant_type': 'refresh_token',
'refresh_token': self.user.map_profile.oauth_refresh_token
}
params = urlencode(params)
logger.debug('MAPCore::refresh_token: params=' + params)
r = requests.post(url, auth=basic_auth, headers=headers, data=params, verify=VERIFY)
if r.status_code != requests.codes.ok:
logger.info('MAPCore::refresh_token: Refreshing token failed: status_code=' + str(r.status_code) + ', user=' + str(self.user) + ', text=' + r.text)
return False
j = json.loads(r.content)
if 'error' in j:
logger.info('MAPCore::refresh_token: Refreshing token failed: ' + j['error'] + ', user=' + str(self.user))
if 'error_description' in j:
logger.info('MAPCore::refresh_token: Refreshing token failed: ' + j['error_description'] + ', user=' + str(self.user))
return False
logger.debug('MAPCore::refresh_token: SUCCESS: user=' + str(self.user))
#logger.debug(' New access_token: ' + j['access_token'])
#logger.debug(' New refresh_token: ' + j['refresh_token'])
self.user.map_profile.oauth_access_token = j['access_token']
self.user.map_profile.oauth_refresh_token = j['refresh_token']
#
# Update database.
#
self.user.map_profile.oauth_refresh_time = timezone.now()
self.user.map_profile.save()
self.user.save()
return True
def refresh_token(self):
try:
self.lock_refresh()
return self.refresh_token0()
finally:
self.unlock_refresh()
#
# Lock refresh process.
#
def lock_refresh(self):
while True:
#print('before transaction.atomic')
with transaction.atomic():
#print('transaction.atomic start')
u = OSFUser.objects.select_for_update().get(username=self.user.username)
if not u.mapcore_refresh_locked:
#print('before lock')
#time.sleep(5) # for debug
u.mapcore_refresh_locked = True
u.save()
logger.debug('OSFUser(' + u.username + ').mapcore_refresh_locked=True')
return
#print('cannot get lock, sleep 1')
time.sleep(1)
#
# Unlock refresh process.
#
def unlock_refresh(self):
with transaction.atomic():
u = OSFUser.objects.select_for_update().get(username=self.user.username)
u.mapcore_refresh_locked = False
u.save()
logger.debug('OSFUser(' + u.username + ').mapcore_refresh_locked=False')
#
# GET|POST|DELETE for methods.
#
def req_api(self, method_name, args, requests_method, path, parameters):
logger.debug('MAPCore(user={}).{}{}'.format(self.user.username, method_name, str(args)))
if self.user.map_profile is None:
# Access token is not issued yet.
raise self.get_token_expired()
url = MAPCORE_HOSTNAME + MAPCORE_API_PATH + path
count = 0
while count < 2: # retry once
time_stamp, signature = self.calc_signature()
if requests_method == requests.get or \
requests_method == requests.delete:
payload = {'time_stamp': time_stamp, 'signature': signature}
if parameters:
for k, v in parameters.items():
payload[k] = v
headers = {'Authorization': 'Bearer '
+ self.user.map_profile.oauth_access_token}
r = requests_method(url, headers=headers,
params=payload, verify=VERIFY)
elif requests_method == requests.post:
params = {}
params['request'] = {
'time_stamp': time_stamp,
'signature': signature
}
params['parameter'] = parameters
params = json.dumps(params).encode('utf-8')
headers = {
'Authorization':
'Bearer ' + self.user.map_profile.oauth_access_token,
'Content-Type': 'application/json; charset=utf-8',
'Content-Length': str(len(params))
}
r = requests_method(url, headers=headers,
data=params, verify=VERIFY)
else:
raise Exception('unknown requests_method')
j = self.check_result(r, method_name, args)
if j is not False:
# Function succeeded.
return j
if self.is_token_expired(r, method_name, args):
if self.refresh_token() is False:
# Automatic refreshing token failed.
raise self.get_token_expired()
else:
# Any other API error.
raise self.get_exception()
count += 1
# Could not refresh token after retries (may not occur).
raise self.get_token_expired()
#
# Get API version.
#
def get_api_version(self):
method_name = sys._getframe().f_code.co_name
return self.req_api(method_name, (), requests.get, '/version', None)
#
# Get group information by group name. (unused by mapcore.py)
#
def get_group_by_name(self, group_name):
method_name = sys._getframe().f_code.co_name
parameters = {'searchWord': group_name.encode('utf-8')}
path = '/mygroup'
j = self.req_api(method_name, (group_name,),
requests.get, path, parameters)
if len(j['result']['groups']) == 0:
self.error_message = 'Group not found'
logger.debug(' {}'.format(self.error_message))
# Group not found.
raise self.get_exception()
return j
#
# Get group information by group key.
#
def get_group_by_key(self, group_key):
method_name = sys._getframe().f_code.co_name
path = '/group/' + group_key
j = self.req_api(method_name, (group_key,), requests.get, path, None)
if len(j['result']['groups']) == 0:
self.error_message = 'Group not found'
logger.debug(' {}'.format(self.error_message))
raise self.get_exception()
return j
#
# delete group by group key.
#
def delete_group(self, group_key):
method_name = sys._getframe().f_code.co_name
path = '/group/' + group_key
j = self.req_api(method_name, (group_key,),
requests.delete, path, None)
return j
#
# Create new group, and make it public, active and open_member.
#
def create_group(self, group_name):
method_name = sys._getframe().f_code.co_name
path = '/group'
parameters = {
'group_name': group_name,
'group_name_en': group_name
}
j = self.req_api(method_name, (group_name,),
requests.post, path, parameters)
group_key = j['result']['groups'][0]['group_key']
logger.debug(' New group has been created (group_key=' + group_key + ')')
# to set description (Empty description is invalid on CG)
j = self.edit_group(group_key, group_name, group_name)
return j
#
# Change group properties.
#
def edit_group(self, group_key, group_name, introduction):
method_name = sys._getframe().f_code.co_name
path = '/group/' + group_key
parameters = {
'group_name': group_name,
'group_name_en': '',
'introduction': introduction,
'introduction_en': '',
'public': 1,
'active': 1,
'open_member': OPEN_MEMBER_DEFAULT
}
j = self.req_api(method_name, (group_key, group_name, introduction),
requests.post, path, parameters)
return j
#
# Get member of group.
#
def get_group_members(self, group_key):
method_name = sys._getframe().f_code.co_name
path = '/member/' + group_key
parameters = None
j = self.req_api(method_name, (group_key,),
requests.get, path, parameters)
return j
#
# Get joined group list.
#
def get_my_groups(self):
method_name = sys._getframe().f_code.co_name
path = '/mygroup'
parameters = None
j = self.req_api(method_name, (), requests.get, path, parameters)
return j
#
# Add to group.
#
def add_to_group(self, group_key, eppn, admin):
method_name = sys._getframe().f_code.co_name
path = '/member/' + group_key + '/' + eppn
parameters = {
'admin': admin
}
j = self.req_api(method_name, (group_key, eppn, admin),
requests.post, path, parameters)
return j
#
# Remove from group.
#
def remove_from_group(self, group_key, eppn):
method_name = sys._getframe().f_code.co_name
path = '/member/' + group_key + '/' + eppn
parameters = None
j = self.req_api(method_name, (group_key, eppn),
requests.delete, path, parameters)
return j
#
# Edit member.
#
def edit_member(self, group_key, eppn, admin):
#logger.debug('MAPCore::edit_member (group_key=' + group_key + ', eppn=' + eppn + ', admin=' + str(admin) + ')')
# NOTE: If error occurs, an exception will be thrown.
j = self.remove_from_group(group_key, eppn)
j = self.add_to_group(group_key, eppn, admin)
return j
#
# Get MAPCoreException.
#
def get_exception(self):
return MAPCoreException(self, None)
#
# Get MAPCoreTokenExpired.
#
def get_token_expired(self):
return MAPCoreTokenExpired(self, None)
#
# Calculate API signature.
#
def calc_signature(self):
time_stamp = str(int(time.time()))
s = MAPCORE_SECRET + self.user.map_profile.oauth_access_token + time_stamp
digest = hashlib.sha256(s.encode('utf-8')).hexdigest()
return time_stamp, digest
WWW_AUTHENTICATE = 'WWW-Authenticate'
MSG_ACCESS_TOKEN_EXPIRED = 'Access token expired'
MSG_INVALID_ACCESS_TOKEN = 'Invalid access token'
#
# Check API result status.
# If any error occurs, a False will be returned.
#
def check_result(self, result, method_name, args):
self.http_status_code = result.status_code
self.api_error_code = None
self.error_message = ''
if result.status_code != requests.codes.ok:
if self.is_token_expired(result, method_name, args):
self.error_message = self.MSG_ACCESS_TOKEN_EXPIRED
else:
self.error_message = result.headers.get(self.WWW_AUTHENTICATE)
if not self.error_message:
self.error_message = result.text
logger.info('MAPCore(user={},eppn={}).{}{}:check_result: status_code={}, error_msg={}'.format(self.user.username, self.user.eppn, method_name, args, result.status_code, self.error_message))
return False
#logger.debug('result.encoding={}'.format(result.encoding))
j = json.loads(result.content)
if j['status']['error_code'] != 0:
self.api_error_code = j['status']['error_code']
self.error_message = j['status']['error_msg']
logger.info('MAPCore(user={},eppn={}).{}{}:check_result: error_code={}, error_msg={}'.format(self.user.username, self.user.eppn, method_name, args, self.api_error_code, self.error_message))
return False
return j
def is_token_expired(self, result, method_name, args):
if result.status_code != requests.codes.ok:
s = result.headers.get(self.WWW_AUTHENTICATE)
if s is None:
return False
#if s.find(self.MSG_ACCESS_TOKEN_EXPIRED) != -1 \
# or s.find(self.MSG_INVALID_ACCESS_TOKEN) != -1:
if result.status_code == 401: # Unauthorized
logger.debug('MAPCore(user={},eppn={}).{}{}:is_token_expired: status_code={}, {}={}'.format(self.user.username, self.user.eppn, method_name, args, result.status_code, self.WWW_AUTHENTICATE, self.error_message))
return True
else:
return False
return False
def encode_recursive(o, encoding='utf-8'):
if isinstance(o, dict):
return {encode_recursive(key): encode_recursive(val) for key, val in o.iteritems()}
elif isinstance(o, list):
return [encode_recursive(elem) for elem in o]
elif isinstance(o, str):
return o.encode(encoding)
else:
return o
| 2.015625
| 2
|
fileReader.py
|
EthanCota/ScholarshipRipper
| 1
|
12777428
|
#Used to initialize the list of scholarship codes from a text file.
f = open('TheUltimateScholarshipBook2019.txt','r')
g = open('ScholarshipCodes.txt','w')
while True:
x = f.readline()
if not x: break
if x.startswith("Exclusive:"):
print >> g, x
print("Printed" + x)
| 3.75
| 4
|
tests/template_backends/test_django.py
|
spapas/django
| 0
|
12777429
|
<gh_stars>0
from django.template.backends.django import DjangoTemplates
from django.test import RequestFactory
from template_tests.test_response import test_processor_name
from .test_dummy import TemplateStringsTests
class DjangoTemplatesTests(TemplateStringsTests):
engine_class = DjangoTemplates
backend_name = 'django'
def test_context_has_priority_over_template_context_processors(self):
# See ticket #23789.
engine = DjangoTemplates({
'DIRS': [],
'APP_DIRS': False,
'NAME': 'django',
'OPTIONS': {
'context_processors': [test_processor_name],
},
})
template = engine.from_string('{{ processors }}')
request = RequestFactory().get('/')
# Check that context processors run
content = template.render({}, request)
self.assertEqual(content, 'yes')
# Check that context overrides context processors
content = template.render({'processors': 'no'}, request)
self.assertEqual(content, 'no')
| 2.09375
| 2
|
octant/common/base.py
|
Orange-OpenSource/octant
| 4
|
12777430
|
# Copyright 2018 Orange
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""General purpose datastructures used by octant"""
class Z3ParseError(Exception):
"""Raised on syntax errors at end of parsing."""
def __init__(self, *args, **kwargs):
super(Z3ParseError, self).__init__(self, *args, **kwargs)
class Z3TypeError(Exception):
"""Raised for a theory that is not well typed"""
def __init__(self, *args, **kwargs):
super(Z3TypeError, self).__init__(self, *args, **kwargs)
class Z3SourceError(Exception):
"""Raised when a source or its description is wrong"""
def __init__(self, *args, **kwargs):
super(Z3SourceError, self).__init__(self, *args, **kwargs)
class Z3NotWellFormed(Exception):
"""Raised for a theory that do not respect well-formedness rules"""
def __init__(self, *args, **kwargs):
super(Z3NotWellFormed, self).__init__(self, *args, **kwargs)
| 2.171875
| 2
|
plugins/dokuwiki.py
|
antoniotrento/wig
| 3
|
12777431
|
<filename>plugins/dokuwiki.py<gh_stars>1-10
from classes.specializedRequesters import CMSReqMD5, CMSReqString, CMSReqRegex
class DokuWikiMD5(CMSReqMD5):
def __init__(self, host, cache, results):
super().__init__(host, cache, results)
self.name = "DokuWiki"
self.prefix = ["/dokuwiki", ""]
self.data_file = "data/cms/md5/dokuwiki.json"
class DokuWikiString(CMSReqString):
def __init__(self, host, cache, results):
super().__init__(host, cache, results)
self.name = "DokuWiki"
self.prefix = ["/dokuwiki", ""]
self.data_file = "data/cms/string/dokuwiki.json"
class DokuWikiRegex(CMSReqRegex):
def __init__(self, host, cache, results):
super().__init__(host, cache, results)
self.name = "DokuWiki"
self.prefix = ["/dokuwiki", ""]
self.data_file = "data/cms/regex/dokuwiki.json"
def get_instances(host, cache, results):
return [
DokuWikiMD5(host, cache, results),
DokuWikiString(host, cache, results),
DokuWikiRegex(host, cache, results),
]
| 2.390625
| 2
|
arekit/contrib/experiment_rusentrel/labels/formatters/rusentiframes.py
|
nicolay-r/AREk
| 18
|
12777432
|
from arekit.contrib.experiment_rusentrel.labels.types import ExperimentPositiveLabel, ExperimentNegativeLabel
from arekit.contrib.source.rusentiframes.labels_fmt import \
RuSentiFramesEffectLabelsFormatter, \
RuSentiFramesLabelsFormatter
class ExperimentRuSentiFramesLabelsFormatter(RuSentiFramesLabelsFormatter):
@classmethod
def _positive_label_type(cls):
return ExperimentPositiveLabel
@classmethod
def _negative_label_type(cls):
return ExperimentNegativeLabel
class ExperimentRuSentiFramesEffectLabelsFormatter(RuSentiFramesEffectLabelsFormatter):
@classmethod
def _positive_label_type(cls):
return ExperimentPositiveLabel
@classmethod
def _negative_label_type(cls):
return ExperimentNegativeLabel
| 1.8125
| 2
|
demo/graph_tutorial/tutorial/views.py
|
dschien/msgraph-django-app
| 0
|
12777433
|
import openpyxl
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from datetime import datetime, timedelta
from dateutil import tz, parser
from tutorial.auth_helper import get_sign_in_flow, get_token_from_code, store_user, remove_user_and_token, get_token
from tutorial.graph_helper import *
import dateutil.parser
# <HomeViewSnippet>
def home(request):
context = initialize_context(request)
return render(request, 'tutorial/home.html', context)
# </HomeViewSnippet>
# <InitializeContextSnippet>
def initialize_context(request):
context = {}
# Check for any errors in the session
error = request.session.pop('flash_error', None)
if error != None:
context['errors'] = []
context['errors'].append(error)
# Check for user in the session
context['user'] = request.session.get('user', {'is_authenticated': False})
return context
# </InitializeContextSnippet>
# <SignInViewSnippet>
def sign_in(request):
# Get the sign-in flow
flow = get_sign_in_flow()
# Save the expected flow so we can use it in the callback
try:
request.session['auth_flow'] = flow
except Exception as e:
print(e)
# Redirect to the Azure sign-in page
return HttpResponseRedirect(flow['auth_uri'])
# </SignInViewSnippet>
# <SignOutViewSnippet>
def sign_out(request):
# Clear out the user and token
remove_user_and_token(request)
return HttpResponseRedirect(reverse('home'))
# </SignOutViewSnippet>
# <CallbackViewSnippet>
def callback(request):
# Make the token request
result = get_token_from_code(request)
# Get the user's profile
# user = get_user(result['code'])
user = get_user(result['access_token'])
# Store user
store_user(request, user)
return HttpResponseRedirect(reverse('home'))
# </CallbackViewSnippet>
# <CalendarViewSnippet>
def calendar(request):
context = initialize_context(request)
user = context['user']
# Load the user's time zone
# Microsoft Graph can return the user's time zone as either
# a Windows time zone name or an IANA time zone identifier
# Python datetime requires IANA, so convert Windows to IANA
time_zone = get_iana_from_windows(user['timeZone'])
tz_info = tz.gettz(time_zone)
# Get midnight today in user's time zone
today = datetime.now(tz_info).replace(
hour=0,
minute=0,
second=0,
microsecond=0)
# Based on today, get the start of the week (Sunday)
if (today.weekday() != 6):
start = today - timedelta(days=today.isoweekday())
else:
start = today
end = start + timedelta(days=7)
token = get_token(request)
events = get_calendar_events(
token,
start.isoformat(timespec='seconds'),
end.isoformat(timespec='seconds'),
user['timeZone'])
if events:
# Convert the ISO 8601 date times to a datetime object
# This allows the Django template to format the value nicely
for event in events['value']:
event['start']['dateTime'] = parser.parse(event['start']['dateTime'])
event['end']['dateTime'] = parser.parse(event['end']['dateTime'])
context['events'] = events['value']
return render(request, 'tutorial/calendar.html', context)
# </CalendarViewSnippet>
# <NewEventViewSnippet>
def newevent(request):
context = initialize_context(request)
user = context['user']
if request.method == 'POST':
# Validate the form values
# Required values
if (not request.POST['ev-subject']) or \
(not request.POST['ev-start']) or \
(not request.POST['ev-end']):
context['errors'] = [
{'message': 'Invalid values', 'debug': 'The subject, start, and end fields are required.'}
]
return render(request, 'tutorial/newevent.html', context)
attendees = None
if request.POST['ev-attendees']:
attendees = request.POST['ev-attendees'].split(';')
body = request.POST['ev-body']
# Create the event
token = get_token(request)
create_event(
token,
request.POST['ev-subject'],
request.POST['ev-start'],
request.POST['ev-end'],
attendees,
request.POST['ev-body'],
user['timeZone'])
# Redirect back to calendar view
return HttpResponseRedirect(reverse('calendar'))
else:
# Render the form
return render(request, 'tutorial/newevent.html', context)
# print('hello')
# </NewEventViewSnippet>
def bulkevent(request):
context = initialize_context(request)
user = context['user']
if request.method == 'POST':
body = request.POST['ev-body']
if not request.POST['ev-subject']:
context['errors'] = [
{'message': 'Invalid values', 'debug': 'The subject, start, and end fields are required.'}
]
return render(request, 'tutorial/bulkevent.html', context)
excel_file = request.FILES["excel_file"]
# you may put validations here to check extension or file size
try:
data = read_excel(excel_file)
except Exception as e:
context['errors'] = [
{'message': 'Excel parsing failed', 'debug': 'Check the format of your file.'}
]
return render(request, 'tutorial/bulkevent.html', context)
results = []
for row in data:
start_date = row[1]
start_time = row[2]
group = row[3]
attendees = row[4:]
# '2021-05-08T11:56'
start_time = datetime.combine(dateutil.parser.parse(start_date).date(),
dateutil.parser.parse(start_time).time()
)
end_time = start_time + timedelta(minutes=int(request.POST['ev-duration']))
# Create the event
token = get_token(request)
res = create_event(
token,
request.POST['ev-subject'] + " " + group,
start_time.isoformat(),
end_time.isoformat(),
attendees,
request.POST['ev-body'],
user['timeZone'])
results.append({'result':res,'group': group})
# Redirect back to calendar view
context['messages'] = [
{'message': f'Group {res["group"]}', 'detail': res["result"].status_code} for res in results
]
return render(request, 'tutorial/bulkevent.html', context)
# return HttpResponseRedirect(reverse('calendar'))
else:
# Render the form
return render(request, 'tutorial/bulkevent.html', context)
# print('hello')
def read_excel(excel_file):
wb = openpyxl.load_workbook(excel_file)
# getting a particular sheet by name out of many sheets
worksheet = wb["schedule"]
# print(worksheet)
excel_data = list()
# iterating over the rows and
# getting value from each cell in row
for row in worksheet.iter_rows():
row_data = list()
for cell in row:
row_data.append(str(cell.value))
excel_data.append(row_data)
return excel_data
| 2.25
| 2
|
Beans 1.0/clear.py
|
Washiii/beans
| 1
|
12777434
|
<filename>Beans 1.0/clear.py
import platform
import os
sistem = platform.system()
def clear():
if sistem == "Linux" or "Darwin" and sistem != "Windows":
os.system('clear')
elif sistem == "Windows" and sistem != "Linux" or "Darwin":
os.system('cls')
else:
print('The OS detection has failed. Exiting...')
exit()
| 3
| 3
|
libs/plugin_loader.py
|
gradiuscypher/newsbot-py
| 0
|
12777435
|
import os
import json
import logging
import traceback
from importlib import import_module
class PluginLoader:
def __init__(self):
self.logger = logging.getLogger('newsbot.py')
self.actions = []
self.parsers = []
self.action_dir = 'actions'
self.parser_dir = 'parsers'
self.config = None
self.action_config = None
self.parser_config = None
def load_plugins(self, config):
self.config = config
self.action_config = json.loads(self.config.get('newsbot', 'action_plugins'))
self.parser_config = json.loads(self.config.get('newsbot', 'parser_plugins'))
# Load the action plugins
try:
count = 0
for plugin in self.action_config:
plugin_file = plugin + '.py'
location = os.path.join(self.action_dir, plugin_file)
if not os.path.isdir(location):
self.actions.append(import_module(self.action_dir + '.' + plugin))
count += 1
self.logger.info("Loaded {} actions.".format(count))
except:
self.logger.error(traceback.format_exc())
# Load the parser plugins
try:
count = 0
for plugin in self.parser_config:
plugin_file = plugin + '.py'
location = os.path.join(self.parser_dir, plugin_file)
if not os.path.isdir(location):
self.parsers.append(import_module(self.parser_dir + '.' + plugin))
count += 1
self.logger.info("Loaded {} parsers.".format(count))
except:
self.logger.error(traceback.format_exc())
| 2.5
| 2
|
app/tests/v2/test_admin.py
|
salma-nyagaka/FastFoodFastApi
| 0
|
12777436
|
'''tests for admins endpoints'''
import json
from unittest import TestCase
from manage import Connection
from app import create_app
class TestOrders(TestCase):
'''loads up all confiugration settings'''
def setUp(self):
self.app = create_app("testing")
self.client = self.app.test_client()
with self.app.app_context():
Connection().drop()
Connection().create()
Connection().create_admin()
self.order_data = {
"name": "Burger",
"description": "Beef burger",
"price": 60}
def login(self):
""" test for loggin in """
login_data = {
"username": "Admin",
"password": "<PASSWORD>"
}
response = self.client.post(
"api/v2/auth/login",
data=json.dumps(login_data),
headers={'content-type': 'application/json'})
return response
def user_login(self):
""" test for signing up"""
signup_data = {
"username": "salmaa",
"email": "<EMAIL>",
"password": "<PASSWORD>",
"confirm_password": "<PASSWORD>"
}
self.client.post(
"api/v2/auth/signup",
data=json.dumps(signup_data),
headers={'content-type': 'application/json'}
)
login_data = {
"username": "salmaa",
"password": "<PASSWORD>"
}
response = self.client.post(
"api/v2/auth/login",
data=json.dumps(login_data),
headers={'content-type': 'application/json'}
)
return response
def get_token(self):
""" function to get user token """
response = self.login()
token = json.loads(response.data.decode('utf-8')).get('token', None)
return token
def get_user_token(self):
""" function to get user token """
response = self.user_login()
token = json.loads(response.data.decode('utf-8')).get('token', None)
return token
def test_place_new_menu(self):
''' Test to place an order '''
token = self.get_token()
order_data = {
"name": "Burger",
"description": "Beef burger",
"image": "Burger",
"price": 60
}
response = self.client.post(
"/api/v2/menu",
data=json.dumps(order_data),
headers={"content-type": "application/json",
'Authorization': 'Bearer {}'.format(token)}
)
response_data = json.loads(response.data.decode('utf-8'))
self.assertEqual(response_data['message'], "Food menu created", 201)
# def test_all_menu(self):
# '''Test get all menu'''
# token = self.get_token()
# response = self.client.post(
# "/api/v2/menu",
# data=json.dumps(self.order_data),
# headers={"content-type": "application/json",
# 'Authorization': 'Bearer {}'.format(token)}
# )
# response = self.client.get(
# "/api/v2/menu",
# data=json.dumps(self.order_data),
# headers={"content-type": "application/json",
# 'Authorization': 'Bearer {}'.format(token)})
# print(response.data)
# self.assertEqual(response.status_code, 200)
def test_empty_menu(self):
'''Test get all menu'''
token = self.get_token()
response = self.client.get(
"/api/v2/menu",
data=json.dumps(self.order_data),
headers={"content-type": "application/json",
'Authorization': 'Bearer {}'.format(token)})
self.assertEqual(response.status_code, 404)
# def test_get_specific_menu(self):
# '''Test to get a specific menu'''
# token = self.get_token()
# order_data = {
# "name": "Burger",
# "description": "Beef burger",
# "price": 60
# }
# response = self.client.post(
# "/api/v2/menu",
# data=json.dumps(order_data),
# headers={"content-type": "application/json",
# 'Authorization': 'Bearer {}'.format(token)})
# response = self.client.get(
# "/api/v2/menu/1",
# data=json.dumps(self.order_data),
# headers={"content-type": "application/json",
# 'Authorization': 'Bearer {}'.format(token)})
# self.assertEqual(response.status_code, 200)
# def test_get_specific_order(self):
# '''Test to get a specific menu'''
# user_token = self.get_user_token()
# token = self.get_token()
# self.client.post(
# "/api/v2/menu",
# data=json.dumps(self.order_data),
# headers={"content-type": "application/json",
# 'Authorization': 'Bearer {}'.format(token)})
# data = {
# 'name': 'Chicken'
# }
# response = self.client.post(
# "/api/v2/users/orders/1",
# data=json.dumps(data),
# headers={"content-type": "application/json",
# 'Authorization': 'Bearer {}'.format(user_token)})
# response = self.client.get(
# "/api/v2/orders/1",
# data=json.dumps(self.order_data),
# headers={"content-type": "application/json",
# 'Authorization': 'Bearer {}'.format(token)})
# self.assertEqual(response.status_code, 200)
def test_get_non_existing_menu(self):
'''Test to get a specific menu'''
token = self.get_token()
response = self.client.post(
"/api/v2/menu",
data=json.dumps(self.order_data),
headers={"content-type": "application/json",
'Authorization': 'Bearer {}'.format(token)})
response = self.client.get(
"/api/v2/menu/2331",
data=json.dumps(self.order_data),
headers={"content-type": "application/json",
'Authorization': 'Bearer {}'.format(token)})
self.assertEqual(response.status_code, 404)
# def test_update_order_status(self):
# '''Test to get a specific menu'''
# user_token = self.get_user_token()
# token = self.get_token()
# self.client.post(
# "/api/v2/menu",
# data=json.dumps(self.order_data),
# headers={"content-type": "application/json",
# 'Authorization': 'Bearer {}'.format(token)})
# data = {
# 'name': 'Burger'
# }
# status = {
# "status": "accept"
# }
# self.client.post(
# "/api/v2/users/orders/1",
# data=json.dumps(data),
# headers={"content-type": "application/json",
# 'Authorization': 'Bearer {}'.format(user_token)})
# response = self.client.put(
# "/api/v2/update/order/1",
# data=json.dumps(status),
# headers={"content-type": "application/json",
# 'Authorization': 'Bearer {}'.format(token)})
# self.assertEqual(response.status_code, 200)
| 3.03125
| 3
|
tests/testOrtho.py
|
Rombur/ParrLO
| 3
|
12777437
|
<filename>tests/testOrtho.py<gh_stars>1-10
#!/usr/bin/env python
import sys
import os
import subprocess
import string
print("Test Ortho...")
nargs=len(sys.argv)
mpicmd = sys.argv[1]+" "+sys.argv[2]+" "+sys.argv[3]
for i in range(4,nargs-2):
mpicmd = mpicmd + " "+sys.argv[i]
print("MPI run command: {}".format(mpicmd))
exe = sys.argv[nargs-2]
inp = sys.argv[nargs-1]
print("Input file: %s"%inp)
#run main code
command = "{} {} -c {}".format(mpicmd,exe,inp)
output = subprocess.check_output(command,shell=True)
#analyse standard output
lines=output.split(b'\n')
tol = 1.e-8
for line in lines:
#check orthogonality before orthogonalization
if line.count(b'orthogonality') and line.count(b'before'):
words=line.split()
print(words)
delta = eval(words[5])
print("Departure from orthogonality before orthogonalization = {}".format(delta))
if delta<100.*tol:
print("Departure from orthogonality before orthogonalization too small: {}".format(delta))
sys.exit(1)
#check orthogonality after orthogonalization
if line.count(b'orthogonality') and line.count(b'after'):
words=line.split()
delta = eval(words[5])
if delta>tol:
print("TEST FAILED: Orthogonalization not achieved!")
print("Departure from orthogonality: {}".format(delta))
sys.exit(1)
sys.exit(0)
| 2.734375
| 3
|
puller/__init__.py
|
adesso-mobile/puller
| 0
|
12777438
|
import hmac
from typing import Optional
from fastapi import FastAPI, Request
from .config import get_config
import logging
import os
import subprocess
import uvicorn
from shellescape import quote
app = FastAPI()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def run_hooks(repo_config, hook_type):
old_path = os.getcwd()
os.chdir(repo_config["path"])
for hook_file in repo_config.get("hooks", {}).get(hook_type, []):
subprocess.run(
command_preparation([hook_file], repo_config.get("executing_user"))
)
os.chdir(old_path)
def command_preparation(command, user):
if user is None:
return command
return ["su", user, "-s", "/bin/bash", "-c", " ".join([quote(c) for c in command])]
@app.post(path="/pull/{repo}")
async def pull(request: Request, repo: str):
c = get_config()
#X-Event-Key: diagnostics:ping
try:
event_key = request.headers["X-Event-Key"]
if event_key == 'diagnostics:ping':
return "ok"
except KeyError:
pass
try:
repo_config = c[repo]
except KeyError:
logger.error("Repo does not seem to be configured")
return
body = await request.body()
signature_local = hmac.new(
bytes(repo_config["shared_secret"], "UTF-8"), body, digestmod="SHA256"
).hexdigest()
signature_request = request.headers["X-Hub-Signature"].split("=")[1]
if signature_local != signature_request:
logger.error("Repo does not seem to be configured")
return
path = os.getcwd()
os.chdir(repo_config["path"])
if repo_config.get("git_reset"):
logging.info("Resetting the repository before pulling")
subprocess.run(
command_preparation(
["git", "reset", "--hard"], repo_config.get("executing_user")
)
)
pull_process = subprocess.run(
command_preparation(["git", "pull"], repo_config.get("executing_user"))
)
git_url_process = subprocess.run(
command_preparation(
["git", "config", "--get", "remote.origin.url"],
repo_config.get("executing_user"),
),
capture_output=True,
)
git_url = git_url_process.stdout.decode("UTF-8").split("\n")[0]
run_hooks(repo_config, "post_pull")
os.chdir(path)
if pull_process.returncode != 0 and repo_config.get("git_delete_if_pull_failed"):
subprocess.run(
command_preparation(
["rm", "-rf", repo_config["path"]], repo_config.get("executing_user")
)
)
subprocess.run(
command_preparation(
["git", "clone", git_url, repo_config["path"]],
repo_config.get("executing_user"),
)
)
pass
return {}
def start_server():
try:
port = int(os.environ["PULLER_PORT"])
except:
port = 8000
uvicorn.run("puller:app", host="0.0.0.0", port=port, log_level="info")
| 2.3125
| 2
|
ahvl/generate/password.py
|
gardar/ahvl
| 4
|
12777439
|
#
# import modules
#
from ahvl.options.generate.password import OptionsGeneratePassword
from ahvl.helper import AhvlMsg, AhvlHelper
from passlib import pwd
#
# helper/message
#
msg = AhvlMsg()
hlp = AhvlHelper()
#
# GeneratePassword
#
class GeneratePassword:
def __init__(self, lookup_plugin):
# set lookup plugin
self.lookup_plugin = lookup_plugin
self.variables = lookup_plugin.variables
self.kwargs = lookup_plugin.kwargs
# set options
self.opts = OptionsGeneratePassword(lookup_plugin)
def generate(self):
# password or passphrase
if self.opts.get('pwd_type') == "phrase":
passwd = pwd.genphrase(entropy=self.opts.get('pwd_entropy'),
length=self.opts.get('pwd_length'),
returns=None,
words=self.opts.get('pwd_words'),
wordset=self.opts.get('pwd_wordset'),
sep=self.opts.get('pwd_sep'))
else:
passwd = pwd.genword(entropy=self.opts.get('pwd_entropy'),
length=self.opts.get('pwd_length'),
returns=None,
chars=self.opts.get('pwd_words'),
charset=self.opts.get('pwd_charset'))
# return result
return passwd
| 2.546875
| 3
|
setka/pipes/SaveResult.py
|
SlinkoIgor/setka
| 0
|
12777440
|
<reponame>SlinkoIgor/setka
from .Pipe import Pipe
import os
import torch
class SaveResult(Pipe):
'''
pipe for saving predictions of the model. The results are
stored in a directory ```predictions``` and in directory specified in
```trainer._predictions_dir```. Batches are processed with the
specified function ```f```. The directory is flushed during the
```__init__``` and the result is saved when the batch is
finished (when after_batch is triggered).
Args:
f (callable): function to process the predictions.
dir (string): location where the predictions will be saved
'''
def __init__(self,
f=None,
dir='./'):
self.f = f
self.index = 0
self.dir = dir
self.root_dir = os.path.join(self.dir, './predictions')
if not os.path.exists(self.root_dir):
os.makedirs(self.root_dir)
@staticmethod
def get_one(input, item_index):
if isinstance(input, (list, tuple)):
one = []
for list_index in range(len(input)):
one.append(input[list_index][item_index])
return one
else:
one = input[item_index]
return one
def after_batch(self):
if self.trainer._mode == 'test':
res = {}
for index in range(len(self.trainer._ids)):
one_input = self.get_one(self.trainer._input, index)
one_output = self.get_one(self.trainer._output, index)
res[self.trainer._ids[index]] = one_output
if self.f is not None:
res[self.trainer._ids[index]] = self.f(
one_input,
one_output)
torch.save(res, os.path.join(self.root_dir, str(self.index) + '.pth.tar'))
self.index += 1
| 2.515625
| 3
|
MLP_InSample_UTD.py
|
IdeasLabUT/EDA-Artifact-Detection
| 10
|
12777441
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 23 12:03:59 2017
@author: Kevin
"""
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import LeaveOneGroupOut,GridSearchCV
dataPath = 'UTDallas/'
dataName = 'UTD'
nJobs = 12 # Number of cores to use
# Load feature matrices, labels, and groups (denoting which labeled time
# segment each row of the feature matrix comes from)
featuresAll = np.loadtxt(dataPath+dataName+'_all.csv',delimiter=',')
featuresAcc = np.loadtxt(dataPath+dataName+'_acc.csv',delimiter=',')
featuresEda = np.loadtxt(dataPath+dataName+'_eda.csv',delimiter=',')
labels = np.loadtxt(dataPath+dataName+'_label.csv')
groups = np.loadtxt(dataPath+dataName+'_groups.csv')
# Indicates the subjects that have no MAs, in order to exclude them during grid search
includeRowsTrain = np.logical_and(
np.logical_and(np.where(groups!=5,True,False),
np.where(groups!=17,True,False)),np.where(groups!=18,True,False))
# Leave-one-group-out cross-validation
cv = LeaveOneGroupOut()
# Parameter tuning by grid search
solver='lbfgs'
activation='relu'
regParam = 10.0**np.arange(-3,5)
# Comment out one of the choices below (either 1 or 2 hidden layers)
# 1 hidden layer
hiddenLayerSizes = 2**np.arange(0,8)
"""
# 2 hidden layers
hidden1,hidden2 = np.meshgrid(2**np.arange(0,8),2**np.arange(0,8))
hiddenLayerSizes = np.reshape(np.stack([hidden1,hidden2]),
(2,np.size(hidden1))).T.tolist()
"""
parameters = {'alpha': regParam,
'hidden_layer_sizes': hiddenLayerSizes}
gsAll = GridSearchCV(MLPClassifier(solver=solver,activation=activation),
parameters,'roc_auc',n_jobs=nJobs,cv=cv,refit=False,
verbose=1)
gsAll.fit(featuresAll[includeRowsTrain,:],labels[includeRowsTrain],
groups[includeRowsTrain])
bestAlphaAll = gsAll.best_params_['alpha']
bestHiddenSizesAll = gsAll.best_params_['hidden_layer_sizes']
gsAcc = GridSearchCV(MLPClassifier(solver=solver,activation=activation),
parameters,'roc_auc',n_jobs=nJobs,cv=cv,refit=False,
verbose=1)
gsAcc.fit(featuresAcc[includeRowsTrain,:],labels[includeRowsTrain],
groups[includeRowsTrain])
bestAlphaAcc = gsAcc.best_params_['alpha']
bestHiddenSizesAcc = gsAcc.best_params_['hidden_layer_sizes']
gsEda = GridSearchCV(MLPClassifier(solver=solver,activation=activation),
parameters,'roc_auc',n_jobs=nJobs,cv=cv,refit=False,
verbose=1)
gsEda.fit(featuresEda[includeRowsTrain,:],labels[includeRowsTrain],
groups[includeRowsTrain])
bestAlphaEda = gsEda.best_params_['alpha']
bestHiddenSizesEda = gsEda.best_params_['hidden_layer_sizes']
predAll = np.zeros(np.shape(labels))
predAcc = np.zeros(np.shape(labels))
predEda = np.zeros(np.shape(labels))
for train, test in cv.split(featuresAll,labels,groups):
mlpAll = MLPClassifier(hidden_layer_sizes=bestHiddenSizesAll,
solver=solver,alpha=bestAlphaAll)
mlpAll.fit(featuresAll[train,:],labels[train])
predAll[test] = mlpAll.predict_proba(featuresAll[test,:])[:,1]
mlpAcc = MLPClassifier(hidden_layer_sizes=bestHiddenSizesAcc,
solver=solver,alpha=bestAlphaAcc)
mlpAcc.fit(featuresAcc[train,:],labels[train])
predAcc[test] = mlpAcc.predict_proba(featuresAcc[test,:])[:,1]
mlpEda = MLPClassifier(hidden_layer_sizes=bestHiddenSizesEda,
solver=solver,alpha=bestAlphaEda)
mlpEda.fit(featuresEda[train,:],labels[train])
predEda[test] = mlpEda.predict_proba(featuresEda[test,:])[:,1]
# Save the scores for further analysis
#np.save('MLPpredAllScores_UTD',predAll)
#np.save('MLPpredAccScores_UTD',predAcc)
#np.save('MLPpredEdaScores_UTD',predEda)
print('MLP AUC ALL: %f (%s)' % (roc_auc_score(labels,predAll),gsAll.best_params_))
print('MLP AUC ACC: %f (%s)' % (roc_auc_score(labels,predAcc),gsAcc.best_params_))
print('MLP AUC EDA: %f (%s)' % (roc_auc_score(labels,predEda),gsEda.best_params_))
| 2.4375
| 2
|
settings/development.py
|
GhalebKhaled/fb-bot-test
| 0
|
12777442
|
<gh_stars>0
from __future__ import unicode_literals
from .base import *
WSGI_APPLICATION = 'wsgi.heroku.application'
WSGI_APPLICATION = 'wsgi.heroku.application'
ADMIN_MEDIA_PREFIX = ''.join([STATIC_URL, 'admin/'])
import dj_database_url
DATABASE_URL = os.environ['DATABASE_URL']
DATABASES = {
'default': dj_database_url.parse(DATABASE_URL),
}
DATABASES['default']['CONN_MAX_AGE'] = None
SECRET_KEY = os.environ['SECRET_KEY']
CONFIGURED_ALLOWED_HOSTS = os.environ['ALLOWED_HOSTS'].split(',')
for host in CONFIGURED_ALLOWED_HOSTS:
if host:
ALLOWED_HOSTS.append(host)
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# djangosecure settings
SECURE_FRAME_DENY = True
SECURE_HSTS_SECONDS = True
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_SSL_REDIRECT = True
SECURE_CONTENT_TYPE_NOSNIFF = True
| 1.796875
| 2
|
renderer/render_utils.py
|
archonic/frankmocap
| 1,612
|
12777443
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
# vertices: frames x meshVerNum x 3
# trifaces: facePolygonNum x 3 = 22800 x 3
def ComputeNormal(vertices, trifaces):
if vertices.shape[0] > 5000:
print('ComputeNormal: Warning: too big to compute {0}'.format(vertices.shape) )
return
#compute vertex Normals for all frames
U = vertices[:,trifaces[:,1],:] - vertices[:,trifaces[:,0],:] #frames x faceNum x 3
V = vertices[:,trifaces[:,2],:] - vertices[:,trifaces[:,1],:] #frames x faceNum x 3
originalShape = U.shape #remember: frames x faceNum x 3
U = np.reshape(U, [-1,3])
V = np.reshape(V, [-1,3])
faceNormals = np.cross(U,V) #frames x 13776 x 3
from sklearn.preprocessing import normalize
if np.isnan(np.max(faceNormals)):
print('ComputeNormal: Warning nan is detected {0}')
return
faceNormals = normalize(faceNormals)
faceNormals = np.reshape(faceNormals, originalShape)
if False: #Slow version
vertex_normals = np.zeros(vertices.shape) #(frames x 11510) x 3
for fIdx, vIdx in enumerate(trifaces[:,0]):
vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:]
for fIdx, vIdx in enumerate(trifaces[:,1]):
vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:]
for fIdx, vIdx in enumerate(trifaces[:,2]):
vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:]
else: #Faster version
# Computing vertex normals, much faster (and obscure) replacement
index = np.vstack((np.ravel(trifaces), np.repeat(np.arange(len(trifaces)), 3))).T
index_sorted = index[index[:,0].argsort()]
vertex_normals = np.add.reduceat(faceNormals[:,index_sorted[:, 1],:][0],
np.concatenate(([0], np.cumsum(np.unique(index_sorted[:, 0],
return_counts=True)[1])[:-1])))[None, :]
vertex_normals = vertex_normals.astype(np.float64)
originalShape = vertex_normals.shape
vertex_normals = np.reshape(vertex_normals, [-1,3])
vertex_normals = normalize(vertex_normals)
vertex_normals = np.reshape(vertex_normals,originalShape)
return vertex_normals
def ComputeNormal_gpu(vertices, trifaces):
import torch
import torch.nn.functional as F
if vertices.shape[0] > 5000:
print('ComputeNormal: Warning: too big to compute {0}'.format(vertices.shape) )
return
#compute vertex Normals for all frames
#trifaces_cuda = torch.from_numpy(trifaces.astype(np.long)).cuda()
vertices_cuda = torch.from_numpy(vertices.astype(np.float32)).cuda()
U_cuda = vertices_cuda[:,trifaces[:,1],:] - vertices_cuda[:,trifaces[:,0],:] #frames x faceNum x 3
V_cuda = vertices_cuda[:,trifaces[:,2],:] - vertices_cuda[:,trifaces[:,1],:] #frames x faceNum x 3
originalShape = list(U_cuda.size()) #remember: frames x faceNum x 3
U_cuda = torch.reshape(U_cuda, [-1,3])#.astype(np.float32)
V_cuda = torch.reshape(V_cuda, [-1,3])#.astype(np.float32)
faceNormals = U_cuda.cross(V_cuda)
faceNormals = F.normalize(faceNormals,dim=1)
faceNormals = torch.reshape(faceNormals, originalShape)
# trifaces has duplicated vertex index, so cannot be parallazied
# vertex_normals = torch.zeros(vertices.shape,dtype=torch.float32).cuda() #(frames x 11510) x 3
# for fIdx, vIdx in enumerate(trifaces[:,0]):
# vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:]
# for fIdx, vIdx in enumerate(trifaces[:,1]):
# vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:]
# for fIdx, vIdx in enumerate(trifaces[:,2]):
# vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:]
# Computing vertex normals, much faster (and obscure) replacement
index = np.vstack((np.ravel(trifaces), np.repeat(np.arange(len(trifaces)), 3))).T
index_sorted = index[index[:,0].argsort()]
vertex_normals = np.add.reduceat(faceNormals[:,index_sorted[:, 1],:][0],
np.concatenate(([0], np.cumsum(np.unique(index_sorted[:, 0],
return_counts=True)[1])[:-1])))[None, :]
vertex_normals = torch.from_numpy(vertex_normals).float().cuda()
vertex_normals = F.normalize(vertex_normals,dim=2)
vertex_normals = vertex_normals.data.cpu().numpy() #(batch, chunksize, dim)
return vertex_normals
| 2.234375
| 2
|
utils/remove_pixel_values.py
|
kkahatapitiya/occlusion_removal
| 2
|
12777444
|
import os
import numpy as np
import cv2
import sys
#sys.path.insert(0, '/home/kumarak/Desktop/campus_temp/pred2/')
#import get_dataset_colormap
read="./all_at_100_nocol/"
gtread=open("./thinglabels.txt").readlines()
gt={}
#print(gtread)
for i in gtread:
gt[int(i.split(':')[0])]=i.split(':')[1][1:-1]
#print(gt)
#map=get_dataset_colormap.create_label_colormap()
#list=[(map[i],i) for i in range(0,len(map))]
list=[]
for filename in os.listdir(read):
#print(filename)
if filename.endswith('.png'):
img=cv2.imread(read+filename)
classes=[gt[i] for i in np.unique(img) if i!=255]
list.append((filename,classes))
for i in sorted(list):
print(i)
| 2.59375
| 3
|
backend/data/__init__.py
|
TiFu/runepicker-helper
| 0
|
12777445
|
<reponame>TiFu/runepicker-helper
from .champion import championById
from .wiki import wikiById
| 0.921875
| 1
|
python-codes/tests/testcircle.py
|
WillianEsp/RM_with_CV
| 1
|
12777446
|
import cv2
import numpy as np
img = cv2.imread('imagem.jpg')
##img = cv2.imread('imagem3.jpg',0)
cv2.imshow('imagem',img)
img = cv2.GaussianBlur(img, (7, 5), 0)
cv2.imshow('imagemblur',img)
gray_img = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
circles = cv2.HoughCircles(gray_img,cv2.HOUGH_GRADIENT,1,30,
param1=50,param2=30,minRadius=0,maxRadius=60)
cimg = img
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
cv2.circle(cimg,(0,0),i[2],(0,0,255),2)
cv2.circle(cimg,(390,390),i[2],(255,0,0),2)
cv2.imshow('detected circles',cimg)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 3.28125
| 3
|
repeat.py
|
arunkumarang/python
| 0
|
12777447
|
<gh_stars>0
#!/usr/bin/env python
import sys
def repeat(s, exclaim):
"""
Return the string 's' repeated 3 times.
If exclaim is true, add exclamation mark.
"""
result = s + s + s
if exclaim:
result = result + '!!!'
return result
def main():
print repeat('Yay', False)
print repeat('Woo Hoo', True)
if __name__ == '__main__':
main()
#help(len)
#help(sys)
#dir(list)
help(list)
sys.exit(0)
| 3.671875
| 4
|
bazel/spec-bundling/index.bzl
|
aspect-build/dev-infra
| 33
|
12777448
|
load("@build_bazel_rules_nodejs//:index.bzl", "js_library")
load("//bazel/esbuild:index.bzl", "esbuild", "esbuild_amd", "esbuild_config")
load("//bazel/spec-bundling:spec-entrypoint.bzl", "spec_entrypoint")
load("//bazel/spec-bundling:bundle-config.bzl", "spec_bundle_config_file")
"""
Starlark file exposing a macro for bundling Bazel targets with spec files into
a single spec ESM/AMD file. Bundling is helpful as it avoids unnecessary complexity
with module resolution at runtime with loaders such as SystemJS or RequireJS.
Additionally, given that Angular framework packages do no longer ship UMD bundles,
bundling simplifies the integration of those FW packages significantly. It also helps
with incorporating Angular linker-processed output of library ESM files.
"""
def spec_bundle(
name,
deps,
platform,
run_angular_linker = False,
# We cannot use `ES2017` or higher as that would result in `async/await` not being downleveled.
# ZoneJS needs to be able to intercept these as otherwise change detection would not work properly.
target = "es2016",
workspace_name = None,
**kwargs):
"""
Macro that will bundle all test files, with their respective transitive dependencies,
into a single bundle file that can be loaded within Karma or NodeJS directly. Test files
are bundled as Angular framework packages do not ship UMD files and to avoid overall
complexity with maintaining a runtime loader such as RequireJS or SystemJS.
"""
is_browser_test = platform == "browser"
package_name = native.package_name()
spec_entrypoint(
name = "%s_spec_entrypoint" % name,
deps = deps,
testonly = True,
)
spec_bundle_config_file(
name = "%s_config_file" % name,
testonly = True,
output_name = "%s_config.mjs" % name,
run_angular_linker = run_angular_linker,
)
esbuild_config(
name = "%s_config" % name,
config_file = ":%s_config_file" % name,
testonly = True,
deps = ["//shared-scripts/angular-linker:js_lib"],
)
if is_browser_test and not workspace_name:
fail("The spec-bundling target %s is declared as browser test. In order to be able " +
"to construct an AMD module name, the `workspace_name` attribute needs to be set.")
# Browser tests (Karma) need named AMD modules to load.
# TODO(devversion): consider updating `@bazel/concatjs` to support loading JS files directly.
esbuild_rule = esbuild_amd if is_browser_test else esbuild
amd_name = "%s/%s/%s" % (workspace_name, package_name, name + "_spec") if is_browser_test else None
esbuild_rule(
name = "%s_bundle" % name,
testonly = True,
config = ":%s_config" % name,
entry_point = ":%s_spec_entrypoint" % name,
module_name = amd_name,
output = "%s_spec.js" % name,
target = target,
platform = platform,
deps = deps + [":%s_spec_entrypoint" % name],
link_workspace_root = True,
**kwargs
)
js_library(
name = name,
testonly = True,
named_module_srcs = [":%s_bundle" % name],
)
| 1.5
| 2
|
sdk/test/test_file.py
|
DarrahK/yapily-sdk-python
| 0
|
12777449
|
<filename>sdk/test/test_file.py<gh_stars>0
# coding: utf-8
"""
Yapily API
To access endpoints that require authentication, use your application key and secret created in the Dashboard (https://dashboard.yapily.com) # noqa: E501
The version of the OpenAPI document: 1.157.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import yapily
from yapily.models.file import File # noqa: E501
from yapily.rest import ApiException
class TestFile(unittest.TestCase):
"""File unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test File
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = yapily.models.file.File() # noqa: E501
if include_optional :
return File(
absolute = True,
absolute_file = yapily.models.file.File(
absolute = True,
absolute_file = yapily.models.file.File(
absolute = True,
absolute_path = '0',
canonical_file = yapily.models.file.File(
absolute = True,
absolute_path = '0',
canonical_path = '0',
directory = True,
file = True,
free_space = 56,
hidden = True,
name = '0',
parent = '0',
parent_file = yapily.models.file.File(
absolute = True,
absolute_path = '0',
canonical_path = '0',
directory = True,
file = True,
free_space = 56,
hidden = True,
name = '0',
parent = '0',
path = '0',
total_space = 56,
usable_space = 56, ),
path = '0',
total_space = 56,
usable_space = 56, ),
canonical_path = '0',
directory = True,
file = True,
free_space = 56,
hidden = True,
name = '0',
parent = '0',
parent_file = yapily.models.file.File(
absolute = True,
absolute_path = '0',
canonical_path = '0',
directory = True,
file = True,
free_space = 56,
hidden = True,
name = '0',
parent = '0',
path = '0',
total_space = 56,
usable_space = 56, ),
path = '0',
total_space = 56,
usable_space = 56, ),
absolute_path = '0',
canonical_file = yapily.models.file.File(
absolute = True,
absolute_path = '0',
canonical_path = '0',
directory = True,
file = True,
free_space = 56,
hidden = True,
name = '0',
parent = '0',
path = '0',
total_space = 56,
usable_space = 56, ),
canonical_path = '0',
directory = True,
file = True,
free_space = 56,
hidden = True,
name = '0',
parent = '0',
parent_file = yapily.models.file.File(
absolute = True,
absolute_path = '0',
canonical_path = '0',
directory = True,
file = True,
free_space = 56,
hidden = True,
name = '0',
parent = '0',
path = '0',
total_space = 56,
usable_space = 56, ),
path = '0',
total_space = 56,
usable_space = 56, ),
absolute_path = '0',
canonical_file = yapily.models.file.File(
absolute = True,
absolute_file = yapily.models.file.File(
absolute = True,
absolute_path = '0',
canonical_path = '0',
directory = True,
file = True,
free_space = 56,
hidden = True,
name = '0',
parent = '0',
parent_file = yapily.models.file.File(
absolute = True,
absolute_path = '0',
canonical_path = '0',
directory = True,
file = True,
free_space = 56,
hidden = True,
name = '0',
parent = '0',
path = '0',
total_space = 56,
usable_space = 56, ),
path = '0',
total_space = 56,
usable_space = 56, ),
absolute_path = '0',
canonical_path = '0',
directory = True,
file = True,
free_space = 56,
hidden = True,
name = '0',
parent = '0',
parent_file = yapily.models.file.File(
absolute = True,
absolute_path = '0',
canonical_path = '0',
directory = True,
file = True,
free_space = 56,
hidden = True,
name = '0',
parent = '0',
path = '0',
total_space = 56,
usable_space = 56, ),
path = '0',
total_space = 56,
usable_space = 56, ),
canonical_path = '0',
directory = True,
file = True,
free_space = 56,
hidden = True,
name = '0',
parent = '0',
parent_file = yapily.models.file.File(
absolute = True,
absolute_file = yapily.models.file.File(
absolute = True,
absolute_path = '0',
canonical_file = yapily.models.file.File(
absolute = True,
absolute_path = '0',
canonical_path = '0',
directory = True,
file = True,
free_space = 56,
hidden = True,
name = '0',
parent = '0',
path = '0',
total_space = 56,
usable_space = 56, ),
canonical_path = '0',
directory = True,
file = True,
free_space = 56,
hidden = True,
name = '0',
parent = '0',
path = '0',
total_space = 56,
usable_space = 56, ),
absolute_path = '0',
canonical_file = yapily.models.file.File(
absolute = True,
absolute_path = '0',
canonical_path = '0',
directory = True,
file = True,
free_space = 56,
hidden = True,
name = '0',
parent = '0',
path = '0',
total_space = 56,
usable_space = 56, ),
canonical_path = '0',
directory = True,
file = True,
free_space = 56,
hidden = True,
name = '0',
parent = '0',
path = '0',
total_space = 56,
usable_space = 56, ),
path = '0',
total_space = 56,
usable_space = 56
)
else :
return File(
)
def testFile(self):
"""Test File"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 2.1875
| 2
|
hw_2/hw2_1_b.py
|
ssupasanya/Coursework
| 0
|
12777450
|
# File: hw2_1_b.py
expenses = [
'''Amount:Category:Date:Description''',
'''5.25:supply:20170222:box of staples''',
'''79.81:meal:20170222:lunch with ABC Corp. clients Al, Bob, and Cy''',
'''43.00:travel:20170222:cab back to office''',
'''383.75:travel:20170223:flight to Boston, to visit ABC Corp.''',
'''55.00:travel:20170223:cab to ABC Corp. in Cambridge, MA''',
'''23.25:meal:20170223:dinner at Logan Airport''',
'''318.47:supply:20170224:paper, toner, pens, paperclips, tape''',
'''142.12:meal:20170226:host dinner with ABC clients, Al, Bob, Cy, Dave, Ellie''',
'''303.94:util:20170227:Peoples Gas''',
'''121.07:util:20170227:Verizon Wireless''',
'''7.59:supply:20170227:Python book (used)''',
'''79.99:supply:20170227:spare 20" monitor''',
'''49.86:supply:20170228:Stoch Cal for Finance II''',
'''6.53:meal:20170302:Dunkin Donuts, drive to Big Inc. near DC''',
'''127.23:meal:20170302:dinner, Tavern64''',
'''33.07:meal:20170303:dinner, Uncle Julio's''',
'''86.00:travel:20170304:mileage, drive to/from Big Inc., Reston, VA''',
'''22.00:travel:20170304:tolls''',
'''378.81:travel:20170304:Hyatt Hotel, Reston VA, for Big Inc. meeting''',
'''1247.49:supply:20170306:Dell 7000 laptop/workstation''',
'''6.99:supply:20170306:HDMI cable''',
'''212.06:util:20170308:Duquesne Light''',
'''23.86:supply:20170309:Practical Guide to Quant Finance Interviews''',
'''195.89:supply:20170309:black toner, HP 304A, 2-pack''',
'''86.00:travel:20170317:mileage, drive to/from Big Inc., Reston, VA''',
'''32.27:meal:20170317:lunch at Clyde's with Fred and Gina, Big Inc.''',
'''22.00:travel:20170317:tolls''',
'''119.56:util:20170319:Verizon Wireless''',
'''284.23:util:20170323:Peoples Gas''',
'''8.98:supply:20170325:Flair pens'''
]
separated_expenses = []
for expense in expenses:
separated_expenses.append(expense.split(':'))
expenses_list = [float(expense[0]) for expense in separated_expenses[1:]]
print(expenses_list)
def sum_of_vals(vals):
"""
:param vals: an iterable such as list
:return: sum of the values from the iterable
"""
sum = 0
for val in vals:
sum += val
return sum
def mean_val(vals):
"""
:param vals: an iterable such as list
:return: the mean of the values from the iterable
"""
return sum_of_vals(vals) / len(vals)
def stdev_of_vals(vals):
"""
:param vals: an iterable such as list
:return: the sample standard deviation of the values from the iterable
"""
squared_differences = []
for val in vals:
squared_differences.append(pow(val - mean_val(vals), 2))
return pow(sum_of_vals(squared_differences) / (len(vals) - 1), 0.5)
def median_val(vals):
"""
:param vals: an iterable such as list
:return: the median of the values from the iterable
"""
n = len(vals)
sorted_vals = sorted(vals)
if n % 2 == 0:
return (sorted_vals[n // 2] + sorted_vals[n // 2 - 1]) / 2
else:
return sorted_vals[n // 2]
def min_max_vals(vals):
"""
:param vals: an iterable such as list
:return: a tuple in which the first item is the minimum value from the iterable,
and the second item is the maximum value from the iterable
"""
sorted_vals = sorted(vals)
return sorted_vals[0], sorted_vals[-1]
print(f"{'Num of values:':14s} {len(expenses_list):8d}\n"
f"{'Sum of values:':14s} {sum_of_vals(expenses_list):8.2f}\n"
f"{'Mean value:':14s} {mean_val(expenses_list):8.2f}\n"
f"{'Std Deviation:':14s} {stdev_of_vals(expenses_list):8.2f}\n"
f"{'Median value:':14s} {median_val(expenses_list):8.2f}\n"
f"{'Minimum value:':14s} {min_max_vals(expenses_list)[0]:8.2f}\n"
f"{'Maximum value:':14s} {min_max_vals(expenses_list)[1]:8.2f}")
| 1.773438
| 2
|