content stringlengths 5 1.05M |
|---|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('library', '0032_auto_20170502_0132'),
]
operations = [
migrations.AddField(
model_name='casebrief',
name='reasons_notes',
field=models.TextField(verbose_name='Explanation for selected reason', null=True, blank=True),
),
]
|
import sys, os
# need a different name
import random as rnd
import pprint
pp = pprint.pprint
import pdb
kwdbg = 0
W, H = 542, 1050
fullwidth = int(W-20)
tilewidth = int((fullwidth-10) / 2.0)
# check for Nodebox
NB = True
try:
_ctx
except(NameError):
NB = False
if NB:
size(W, H)
pb = ximport("photobot")
else:
WIDTH, HEIGHT = W, H
import photobot as pb
import imagewells
if kwdbg:
# make random choices repeatable for debugging
rnd.seed(8)
imagewell = imagewells.loadImageWell(resultfile="imagewell-files")
tiles = imagewell['landscape']
rnd.shuffle(tiles)
# pick 2 images
img1path = tiles.pop()
img2path = tiles.pop()
# create a white canvas
c = pb.canvas( WIDTH, HEIGHT)
c.fill( (192, 192, 192) )
#
# Image 1
#
_, filename = os.path.split( img1path )
# create, scale and place the image
x, y = 10, 10
img1, w1, h1 = pb.placeImage(c, img1path, x, y, tilewidth, "Image 1 Base")
c.layers[img1].duplicate()
c.top.autocontrast(cutoff=0)
pb.label(c, "%s autocontrast: 0" % filename, x, y)
#
# Image 2
#
c.layers[img1].duplicate()
c.top.autocontrast(cutoff=5)
x, y = w1+20, 10
c.top.translate( x, y)
pb.label(c, "%s autocontrast: 5" % filename, x, y)
#
# Image 3
#
c.layers[img1].duplicate()
c.top.autocontrast(cutoff=10)
x, y = 10, h1 + 20
c.top.translate( x, y)
pb.label(c, "%s autocontrast: 10" % filename, x, y)
#
# Image 4
#
c.layers[img1].duplicate()
c.top.autocontrast(cutoff=15)
x, y = w1+20, h1 + 20
c.top.translate( x, y)
pb.label(c, "%s autocontrast: 15" % filename, x, y)
#
# Image 5
#
c.layers[img1].duplicate()
c.top.autocontrast(cutoff=30)
x, y = 10, 2*h1 + 30
c.top.translate( x, y)
pb.label(c, "%s autocontrast: 30" % filename, x, y)
#
# Image 6
#
c.layers[img1].duplicate()
c.top.autocontrast(cutoff=35)
x, y = w1+20, 2*h1 + 30
c.top.translate( x, y)
pb.label(c, "%s autocontrast: 35" % filename, x, y)
#
# Image 7
#
c.layers[img1].duplicate()
c.top.autocontrast(42)
x, y = 10, 3*h1 + 40
c.top.translate( x, y)
pb.label(c, "%s autocontrast: 42" % filename, x, y)
#
# Image 8
#
c.layers[img1].duplicate()
c.top.autocontrast(cutoff=49)
x, y = w1 + 20, 3*h1 + 40
c.top.translate( x, y)
pb.label(c, "%s autocontrast: 49" % filename, x, y)
# draw the result
c.draw(name="Layer_function_autocontrast")
|
import typing as t
SchemeType = t.Literal["RA", "CA", "MA", "PA", ""]
NodeType = t.Literal["RA", "CA", "MA", "PA", "", "I", "TA", "YA", "L"]
DATE_FORMAT = "YYYY-MM-DD HH:mm:ss"
class Node(t.TypedDict):
nodeID: str
text: str
type: NodeType
timestamp: str
class Edge(t.TypedDict):
edgeID: str
fromID: str
toID: str
formEdgeID: None
class Locution(t.TypedDict):
nodeID: str
personID: str
timestamp: str
start: t.Optional[str]
end: t.Optional[str]
source: t.Optional[str]
class Graph(t.TypedDict):
nodes: t.List[Node]
edges: t.List[Edge]
locutions: t.List[Locution]
|
"""
A pair of production base environments.
"""
def update_envs_production(env_dir_name, env_yml_path,
env_path_a="/prj/ids/ids-conda/envs_ping",
env_path_b="/prj/ids/ids-conda/envs_pong",
prod_env_path_link="/prj/ids/ids-conda/envs_prod"):
""" Update or creates a climate of environments.
Updates or creates a given production climate. A production env
"""
# Determine the current production and staging paths
production_path = os.path.realpath(prod_env_path_link)
if production_path == env_path_a:
staged_path = env_path_b
prod_path = env_path_a
elif production_path == env_path_b:
staged_path = env_path_a
prod_path = env_path_a
else:
raise PingPongInconsistentException()
print("Updating env %s/n "
" Current prod: ", env_dir_name)
# Update the conda env
subprocess.run(["conda", "env", "update",
"-p", os.path.join(staged_path, env_dir_name),
"-f", env_yml_path])
# Swap staged and production paths
os.symlink(staged_path, prod_env_path_link)
print(" Updated env %s and moved production to:\n %s", staged_path)
class PingPongInconsistentException(Exception):
""" An exception thrown when the production env is pointing to neither
the ping, nor pong environment.
"""
pass |
import RPi.GPIO as GPIO
import time
from enum import Enum
class StickType(Enum):
Center = 7
Front = 8
Right = 9
Left = 10
Back = 11
class Joystick:
def __init__(self):
self.callback = None
for pin in StickType:
GPIO.setup(pin.value, GPIO.IN, GPIO.PUD_UP)
GPIO.add_event_detect(pin.value, GPIO.FALLING,
callback=self._callback)
def _callback(self, channel):
if self.callback:
self.callback(StickType(channel))
|
# SPDX-FileCopyrightText: 2021 Carnegie Mellon University
#
# SPDX-License-Identifier: Apache-2.0
import os
import psycopg2
# connect to the testdb database
try:
host_addr = "127.0.0.1"
pw = os.getenv("LIVEMAP_DB_PASSWORD")
conn = psycopg2.connect(
database="livemap_db",
user="osm",
password=pw,
host=host_addr,
port="5432",
)
# create a new cursor
cur = conn.cursor()
# Delete data from tables and reset identity columns
cur.execute(
"TRUNCATE ONLY detection, trajectory, rec_images \
RESTART IDENTITY"
)
# commit the changes to the database
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
|
##########################################
# check ancestry of the unknown
# sequence by comparing with
# known sequences
##########################################
f=open("asia","r")
asia=f.readline()
f.close()
f=open("africa","r")
africa=f.readline()
f.close()
f=open("south-america","r")
south=f.readline()
f.close()
f=open("europe","r")
europe=f.readline()
f.close()
# Load unknown sequence
f=open("find.txt","r")
given=f.readline()
f.close()
#
# four counters
#
ca=0
cf=0
cs=0
ce=0
# Compare each position of unknown
# sequence with the corresponding
# position of the known sequences.
#
# Increment counter, if their is
# difference.
for i in range(len(given)):
if(given[i]!=asia[i]): ca+=1
if(given[i]!=africa[i]): cf+=1
if(given[i]!=south[i]): cs+=1
if(given[i]!=europe[i]): ce+=1
print "asia=",ca
print "africa=",cf
print "south america=",cs
print "europe=",ce
|
from http.client import HTTPConnection, HTTPSConnection
from http.cookies import SimpleCookie
from json import loads as json_loads
from urllib.parse import urlencode, urljoin, urlsplit
from wheezy.core.collections import attrdict, defaultdict
from wheezy.core.gzip import decompress
class HTTPClient(object):
"""HTTP client sends HTTP requests to server in order to accomplish
an application specific use cases, e.g. remote web server API, etc.
"""
def __init__(self, url, headers=None):
"""
`url` - a base url for interaction with remote server.
`headers` - a dictionary of headers.
"""
scheme, netloc, path, query, fragment = urlsplit(url)
http_class = scheme == "http" and HTTPConnection or HTTPSConnection
self.connection = http_class(netloc)
self.default_headers = {
"Accept-Encoding": "gzip",
"Connection": "close",
}
if headers:
self.default_headers.update(headers)
self.path = path
self.method = None
self.headers = None
self.cookies = {}
self.etags = {}
self.status_code = 0
self.body = None
self.__content = None
self.__json = None
@property
def content(self):
"""Returns a content of the response."""
if self.__content is None:
self.__content = self.body.decode("utf-8")
return self.__content
@property
def json(self):
"""Returns a json response."""
if self.__json is None:
assert "application/json" in self.headers["content-type"][0]
self.__json = json_loads(self.content, object_hook=attrdict)
return self.__json
def get(self, path, **kwargs):
"""Sends GET HTTP request."""
return self.go(path, "GET", **kwargs)
def ajax_get(self, path, **kwargs):
"""Sends GET HTTP AJAX request."""
return self.ajax_go(path, "GET", **kwargs)
def head(self, path, **kwargs):
"""Sends HEAD HTTP request."""
return self.go(path, "HEAD", **kwargs)
def post(self, path, **kwargs):
"""Sends POST HTTP request."""
return self.go(path, "POST", **kwargs)
def ajax_post(self, path, **kwargs):
"""Sends POST HTTP AJAX request."""
return self.ajax_go(path, "POST", **kwargs)
def follow(self):
"""Follows HTTP redirect (e.g. status code 302)."""
sc = self.status_code
assert sc in [207, 301, 302, 303, 307]
location = self.headers["location"][0]
scheme, netloc, path, query, fragment = urlsplit(location)
method = sc == 307 and self.method or "GET"
return self.go(path, method)
def ajax_go(
self,
path=None,
method="GET",
params=None,
headers=None,
content_type="",
body="",
):
"""Sends HTTP AJAX request to web server."""
headers = headers or {}
headers["X-Requested-With"] = "XMLHttpRequest"
return self.go(path, method, params, headers, content_type, body)
def go(
self,
path=None,
method="GET",
params=None,
headers=None,
content_type="",
body="",
):
"""Sends HTTP request to web server.
The ``content_type`` takes priority over ``params`` to use
``body``. The ``body`` can be a string or file like object.
"""
self.method = method
headers = (
headers
and dict(self.default_headers, **headers)
or dict(self.default_headers)
)
if self.cookies:
headers["Cookie"] = "; ".join(
"%s=%s" % cookie for cookie in self.cookies.items()
)
path = urljoin(self.path, path)
if path in self.etags:
headers["If-None-Match"] = self.etags[path]
if content_type:
headers["Content-Type"] = content_type
elif params:
if method == "GET":
path += "?" + urlencode(params, doseq=True)
else:
body = urlencode(params, doseq=True)
headers["Content-Type"] = "application/x-www-form-urlencoded"
self.status_code = 0
self.body = None
self.__content = None
self.__json = None
self.connection.connect()
self.connection.request(method, path, body, headers)
r = self.connection.getresponse()
self.body = r.read()
self.connection.close()
self.status_code = r.status
self.headers = defaultdict(list)
for name, value in r.getheaders():
self.headers[name].append(value)
self.process_content_encoding()
self.process_etag(path)
self.process_cookies()
return self.status_code
# region: internal details
def process_content_encoding(self):
if (
"content-encoding" in self.headers
and "gzip" in self.headers["content-encoding"]
):
self.body = decompress(self.body)
def process_etag(self, path):
if "etag" in self.headers:
self.etags[path] = self.headers["etag"][-1]
def process_cookies(self):
if "set-cookie" in self.headers:
for cookie_string in self.headers["set-cookie"]:
cookies = SimpleCookie(cookie_string)
for name in cookies:
value = cookies[name].value
if value:
self.cookies[name] = value
elif name in self.cookies:
del self.cookies[name]
|
import cv2, mediapipe as mp
class PoseTrack:
def __init__(self,
static_image_mode=False,
upper_body_only=False,
smooth_landmarks=True,
min_detection_confidence=0.5,
min_tracking_confidence=0.5
):
self.static_image_mode = static_image_mode
self.upper_body_only = upper_body_only
self.smooth_landmarks = smooth_landmarks
self.min_detection_confidence = min_detection_confidence
self.min_tracking_confidence = min_tracking_confidence
self.mpDraw = mp.solutions.drawing_utils
self.mpPose = mp.solutions.pose
self.pose = self.mpPose.Pose(self.static_image_mode,
self.upper_body_only,
self.smooth_landmarks,
self.min_detection_confidence,
self.min_tracking_confidence)
def find_pose(self, mat, draw=True, connections=True):
rgb = cv2.cvtColor(mat, cv2.COLOR_BGR2RGB)
results = self.pose.process(rgb)
if draw:
if results.pose_landmarks:
if connections:
self.mpDraw.draw_landmarks(mat, results.pose_landmarks, self.mpPose.POSE_CONNECTIONS)
elif not connections:
self.mpDraw.draw_landmarks(mat, results.pose_landmars)
return mat
def findPosition(self, img, handNo=0, draw=True):
lmlist =[]
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = self.pose.process(rgb)
if results.multi_hand_landmarks:
mypose = results.multi_hand_landmarks[handNo]
for id,lm in enumerate(mypose.landmark):
h,w,c = img.shape
cx, cy = (lm.x*w),(lm.y*h)
lmlist.append([id,cx,cy])
return lmlist
|
import math
def solution(A, B):
maxA = max(A)
fib = [1] * (maxA + 1)
for i in range(2, maxA + 1):
fib[i] = fib[i - 1] + fib[i - 2]
n = len(A)
answers = [0] * n
for i in range(n):
answers[i] = fib[A[i]] % int(math.pow(2, B[i]))
return answers
|
import os
import torch
from utils.utils import get_logger, is_logging_process
import wandb
def test_model(cfg, model, test_loader, writer):
logger = get_logger(cfg, os.path.basename(__file__))
model.net.eval()
total_test_loss = 0
test_loop_len = 0
with torch.no_grad():
example_images = []
for model_input, model_target in test_loader:
output = model.inference(model_input)
loss_v = model.loss_f(output, model_target.to(cfg.device))
if cfg.dist.gpus > 0:
# Aggregate loss_v from all GPUs. loss_v is set as the sum of all GPUs' loss_v.
torch.distributed.all_reduce(loss_v)
loss_v /= torch.tensor(float(cfg.dist.gpus))
total_test_loss += loss_v.to("cpu").item()
test_loop_len += 1
if test_loop_len <10:
example_images.append(wandb.Image(
model_input[0], caption="Pred: {} Truth: {}".format(output.max(1, keepdim=True)[1][0].item(), model_target[0])))
total_test_loss /= test_loop_len
if writer is not None:
writer.logging_with_step(total_test_loss, model.step, "test_loss")
writer.logging_with_step(example_images, model.step, "Examples")
if is_logging_process():
logger.info("Test Loss %.04f at step %d" % (total_test_loss, model.step))
|
import os as _os
import sys as _sys
import dash as _dash
from .version import __version__
_current_path = _os.path.dirname(_os.path.abspath(__file__))
_components = _dash.development.component_loader.load_components(
_os.path.join(_current_path, 'metadata.json'),
'dash_core_components'
)
_this_module = _sys.modules[__name__]
_js_dist = [
{
'external_url': 'https://cdn.plot.ly/plotly-1.32.0.min.js',
'relative_package_path': 'plotly-1.32.0.min.js',
'namespace': 'dash_core_components'
},
{
'relative_package_path': 'bundle.js',
'external_url': (
'https://unpkg.com/dash-core-components@{}'
'/dash_core_components/bundle.js'
).format(__version__),
'namespace': 'dash_core_components'
}
]
_css_dist = [
{
'relative_package_path': [
'rc-slider@6.1.2.css',
'react-select@1.0.0-rc.3.min.css',
'react-virtualized@9.9.0.css',
'react-virtualized-select@3.1.0.css',
'react-dates@12.3.0.css'
],
'external_url': [
'https://unpkg.com/react-select@1.0.0-rc.3/dist/react-select.min.css',
'https://unpkg.com/react-virtualized@9.9.0/styles.css',
'https://unpkg.com/react-virtualized-select@3.1.0/styles.css',
'https://unpkg.com/rc-slider@6.1.2/assets/index.css',
'https://unpkg.com/dash-core-components@{}/dash_core_components/react-dates@12.3.0.css'.format(__version__)
],
'namespace': 'dash_core_components'
}
]
for component in _components:
setattr(_this_module, component.__name__, component)
setattr(component, '_js_dist', _js_dist)
setattr(component, '_css_dist', _css_dist)
|
name = input("Enter your name : ")
print(name)
name = "max"
print(name)
length = len(name)
print(length)
# Variables rules
"""
1. only a-z, A-Z, 0-9, _ are allowed
2. Do not start with number
3. No other special characters
4. No use of reserved words
5. case sensitive
""" |
"""Schema for the API endpoints."""
from marshmallow import Schema, fields
class PresentationSchema(Schema):
"""Schema for the presetntation."""
participant_id = fields.Int()
role = fields.Str()
order = fields.Int()
class NewPanelSchema(Schema):
"""Schema for the panel."""
name = fields.Str(required=True)
start = fields.DateTime(required=True)
duration = fields.Int(required=True)
gap = fields.Int(required=True)
presentations = fields.Nested(PresentationSchema(many=True))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File : .ycm_extra_osx_conf.py
# License: MIT/X11
# Author : Dries Pauwels <2mjolk@gmail.com>
# Date : do 13 sep 2018 10:24
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=gnu11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x', 'c'
# This path will only work on OS X, but extra paths that don't exist are not
# harmful
#'-isystem',
#'/usr/local/include',
#'-I',
#'/usr/local/include',
#'-I',
#'include',
#'-I',
#'.'
]
def FlagsForFile( filename, **kwargs ):
return {
'flags': flags,
'do_cache': True
}
|
import tkinter as tk
def say():
m.config(text="WELCOME " + e.get())
root = tk.Tk()
root.title('Strawberry Cake')
root.config(bg='#0394fc')
m = tk.Label(root, text='WELCOME!', bg='#0394fc', fg='orange',
font=('Comic Sans MS', 20))
m.pack()
e =tk.Entry(root, bg='#0394fc', fg='orange', font=('times', 20))
e.pack()
b = tk.Button(root, text="Click Here", command=say, bg='#0394fc',
fg='orange', font=('Comic Sans MS', 20))
b.pack()
# root.mainloop() |
"""Denon HEOS Media Player."""
from __future__ import annotations
import asyncio
from datetime import timedelta
import logging
from pyheos import Heos, HeosError, const as heos_const
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_HOST, EVENT_HOMEASSISTANT_STOP, Platform
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady, HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.typing import ConfigType
from homeassistant.util import Throttle
from . import services
from .config_flow import format_title
from .const import (
COMMAND_RETRY_ATTEMPTS,
COMMAND_RETRY_DELAY,
DATA_CONTROLLER_MANAGER,
DATA_ENTITY_ID_MAP,
DATA_GROUP_MANAGER,
DATA_SOURCE_MANAGER,
DOMAIN,
SIGNAL_HEOS_PLAYER_ADDED,
SIGNAL_HEOS_UPDATED,
)
PLATFORMS = [Platform.MEDIA_PLAYER]
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN),
{DOMAIN: vol.Schema({vol.Required(CONF_HOST): cv.string})},
),
extra=vol.ALLOW_EXTRA,
)
MIN_UPDATE_SOURCES = timedelta(seconds=1)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the HEOS component."""
if DOMAIN not in config:
return True
host = config[DOMAIN][CONF_HOST]
entries = hass.config_entries.async_entries(DOMAIN)
if not entries:
# Create new entry based on config
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data={CONF_HOST: host}
)
)
else:
# Check if host needs to be updated
entry = entries[0]
if entry.data[CONF_HOST] != host:
hass.config_entries.async_update_entry(
entry, title=format_title(host), data={**entry.data, CONF_HOST: host}
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Initialize config entry which represents the HEOS controller."""
# For backwards compat
if entry.unique_id is None:
hass.config_entries.async_update_entry(entry, unique_id=DOMAIN)
host = entry.data[CONF_HOST]
# Setting all_progress_events=False ensures that we only receive a
# media position update upon start of playback or when media changes
controller = Heos(host, all_progress_events=False)
try:
await controller.connect(auto_reconnect=True)
# Auto reconnect only operates if initial connection was successful.
except HeosError as error:
await controller.disconnect()
_LOGGER.debug("Unable to connect to controller %s: %s", host, error)
raise ConfigEntryNotReady from error
# Disconnect when shutting down
async def disconnect_controller(event):
await controller.disconnect()
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, disconnect_controller)
)
# Get players and sources
try:
players = await controller.get_players()
favorites = {}
if controller.is_signed_in:
favorites = await controller.get_favorites()
else:
_LOGGER.warning(
"%s is not logged in to a HEOS account and will be unable to retrieve "
"HEOS favorites: Use the 'heos.sign_in' service to sign-in to a HEOS account",
host,
)
inputs = await controller.get_input_sources()
except HeosError as error:
await controller.disconnect()
_LOGGER.debug("Unable to retrieve players and sources: %s", error)
raise ConfigEntryNotReady from error
controller_manager = ControllerManager(hass, controller)
await controller_manager.connect_listeners()
source_manager = SourceManager(favorites, inputs)
source_manager.connect_update(hass, controller)
group_manager = GroupManager(hass, controller)
hass.data[DOMAIN] = {
DATA_CONTROLLER_MANAGER: controller_manager,
DATA_GROUP_MANAGER: group_manager,
DATA_SOURCE_MANAGER: source_manager,
Platform.MEDIA_PLAYER: players,
# Maps player_id to entity_id. Populated by the individual HeosMediaPlayer entities.
DATA_ENTITY_ID_MAP: {},
}
services.register(hass, controller)
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
group_manager.connect_update()
entry.async_on_unload(group_manager.disconnect_update)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
controller_manager = hass.data[DOMAIN][DATA_CONTROLLER_MANAGER]
await controller_manager.disconnect()
hass.data.pop(DOMAIN)
services.remove(hass)
return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
class ControllerManager:
"""Class that manages events of the controller."""
def __init__(self, hass, controller):
"""Init the controller manager."""
self._hass = hass
self._device_registry = None
self._entity_registry = None
self.controller = controller
self._signals = []
async def connect_listeners(self):
"""Subscribe to events of interest."""
self._device_registry, self._entity_registry = await asyncio.gather(
self._hass.helpers.device_registry.async_get_registry(),
self._hass.helpers.entity_registry.async_get_registry(),
)
# Handle controller events
self._signals.append(
self.controller.dispatcher.connect(
heos_const.SIGNAL_CONTROLLER_EVENT, self._controller_event
)
)
# Handle connection-related events
self._signals.append(
self.controller.dispatcher.connect(
heos_const.SIGNAL_HEOS_EVENT, self._heos_event
)
)
async def disconnect(self):
"""Disconnect subscriptions."""
for signal_remove in self._signals:
signal_remove()
self._signals.clear()
self.controller.dispatcher.disconnect_all()
await self.controller.disconnect()
async def _controller_event(self, event, data):
"""Handle controller event."""
if event == heos_const.EVENT_PLAYERS_CHANGED:
self.update_ids(data[heos_const.DATA_MAPPED_IDS])
# Update players
async_dispatcher_send(self._hass, SIGNAL_HEOS_UPDATED)
async def _heos_event(self, event):
"""Handle connection event."""
if event == heos_const.EVENT_CONNECTED:
try:
# Retrieve latest players and refresh status
data = await self.controller.load_players()
self.update_ids(data[heos_const.DATA_MAPPED_IDS])
except HeosError as ex:
_LOGGER.error("Unable to refresh players: %s", ex)
# Update players
async_dispatcher_send(self._hass, SIGNAL_HEOS_UPDATED)
def update_ids(self, mapped_ids: dict[int, int]):
"""Update the IDs in the device and entity registry."""
# mapped_ids contains the mapped IDs (new:old)
for new_id, old_id in mapped_ids.items():
# update device registry
entry = self._device_registry.async_get_device({(DOMAIN, old_id)})
new_identifiers = {(DOMAIN, new_id)}
if entry:
self._device_registry.async_update_device(
entry.id, new_identifiers=new_identifiers
)
_LOGGER.debug(
"Updated device %s identifiers to %s", entry.id, new_identifiers
)
# update entity registry
entity_id = self._entity_registry.async_get_entity_id(
Platform.MEDIA_PLAYER, DOMAIN, str(old_id)
)
if entity_id:
self._entity_registry.async_update_entity(
entity_id, new_unique_id=str(new_id)
)
_LOGGER.debug("Updated entity %s unique id to %s", entity_id, new_id)
class GroupManager:
"""Class that manages HEOS groups."""
def __init__(self, hass, controller):
"""Init group manager."""
self._hass = hass
self._group_membership = {}
self._disconnect_player_added = None
self._initialized = False
self.controller = controller
def _get_entity_id_to_player_id_map(self) -> dict:
"""Return a dictionary which maps all HeosMediaPlayer entity_ids to player_ids."""
return {v: k for k, v in self._hass.data[DOMAIN][DATA_ENTITY_ID_MAP].items()}
async def async_get_group_membership(self):
"""Return a dictionary which contains all group members for each player as entity_ids."""
group_info_by_entity_id = {
player_entity_id: []
for player_entity_id in self._get_entity_id_to_player_id_map()
}
try:
groups = await self.controller.get_groups(refresh=True)
except HeosError as err:
_LOGGER.error("Unable to get HEOS group info: %s", err)
return group_info_by_entity_id
player_id_to_entity_id_map = self._hass.data[DOMAIN][DATA_ENTITY_ID_MAP]
for group in groups.values():
leader_entity_id = player_id_to_entity_id_map.get(group.leader.player_id)
member_entity_ids = [
player_id_to_entity_id_map[member.player_id]
for member in group.members
if member.player_id in player_id_to_entity_id_map
]
# Make sure the group leader is always the first element
group_info = [leader_entity_id, *member_entity_ids]
if leader_entity_id:
group_info_by_entity_id[leader_entity_id] = group_info
for member_entity_id in member_entity_ids:
group_info_by_entity_id[member_entity_id] = group_info
return group_info_by_entity_id
async def async_join_players(
self, leader_entity_id: str, member_entity_ids: list[str]
) -> None:
"""Create a group with `leader_entity_id` as group leader and `member_entity_ids` as member players."""
entity_id_to_player_id_map = self._get_entity_id_to_player_id_map()
leader_id = entity_id_to_player_id_map.get(leader_entity_id)
if not leader_id:
raise HomeAssistantError(
f"The group leader {leader_entity_id} could not be resolved to a HEOS player."
)
member_ids = [
entity_id_to_player_id_map[member]
for member in member_entity_ids
if member in entity_id_to_player_id_map
]
try:
await self.controller.create_group(leader_id, member_ids)
except HeosError as err:
_LOGGER.error(
"Failed to group %s with %s: %s",
leader_entity_id,
member_entity_ids,
err,
)
async def async_unjoin_player(self, player_entity_id: str):
"""Remove `player_entity_id` from any group."""
player_id = self._get_entity_id_to_player_id_map().get(player_entity_id)
if not player_id:
raise HomeAssistantError(
f"The player {player_entity_id} could not be resolved to a HEOS player."
)
try:
await self.controller.create_group(player_id, [])
except HeosError as err:
_LOGGER.error(
"Failed to ungroup %s: %s",
player_entity_id,
err,
)
async def async_update_groups(self, event, data=None):
"""Update the group membership from the controller."""
if event in (
heos_const.EVENT_GROUPS_CHANGED,
heos_const.EVENT_CONNECTED,
SIGNAL_HEOS_PLAYER_ADDED,
):
if groups := await self.async_get_group_membership():
self._group_membership = groups
_LOGGER.debug("Groups updated due to change event")
# Let players know to update
async_dispatcher_send(self._hass, SIGNAL_HEOS_UPDATED)
else:
_LOGGER.debug("Groups empty")
def connect_update(self):
"""Connect listener for when groups change and signal player update."""
self.controller.dispatcher.connect(
heos_const.SIGNAL_CONTROLLER_EVENT, self.async_update_groups
)
self.controller.dispatcher.connect(
heos_const.SIGNAL_HEOS_EVENT, self.async_update_groups
)
# When adding a new HEOS player we need to update the groups.
async def _async_handle_player_added():
# Avoid calling async_update_groups when `DATA_ENTITY_ID_MAP` has not been
# fully populated yet. This may only happen during early startup.
if (
len(self._hass.data[DOMAIN][Platform.MEDIA_PLAYER])
<= len(self._hass.data[DOMAIN][DATA_ENTITY_ID_MAP])
and not self._initialized
):
self._initialized = True
await self.async_update_groups(SIGNAL_HEOS_PLAYER_ADDED)
self._disconnect_player_added = async_dispatcher_connect(
self._hass, SIGNAL_HEOS_PLAYER_ADDED, _async_handle_player_added
)
@callback
def disconnect_update(self):
"""Disconnect the listeners."""
if self._disconnect_player_added:
self._disconnect_player_added()
self._disconnect_player_added = None
@property
def group_membership(self):
"""Provide access to group members for player entities."""
return self._group_membership
class SourceManager:
"""Class that manages sources for players."""
def __init__(
self,
favorites,
inputs,
*,
retry_delay: int = COMMAND_RETRY_DELAY,
max_retry_attempts: int = COMMAND_RETRY_ATTEMPTS,
):
"""Init input manager."""
self.retry_delay = retry_delay
self.max_retry_attempts = max_retry_attempts
self.favorites = favorites
self.inputs = inputs
self.source_list = self._build_source_list()
def _build_source_list(self):
"""Build a single list of inputs from various types."""
source_list = []
source_list.extend([favorite.name for favorite in self.favorites.values()])
source_list.extend([source.name for source in self.inputs])
return source_list
async def play_source(self, source: str, player):
"""Determine type of source and play it."""
index = next(
(
index
for index, favorite in self.favorites.items()
if favorite.name == source
),
None,
)
if index is not None:
await player.play_favorite(index)
return
input_source = next(
(
input_source
for input_source in self.inputs
if input_source.name == source
),
None,
)
if input_source is not None:
await player.play_input_source(input_source)
return
_LOGGER.error("Unknown source: %s", source)
def get_current_source(self, now_playing_media):
"""Determine current source from now playing media."""
# Match input by input_name:media_id
if now_playing_media.source_id == heos_const.MUSIC_SOURCE_AUX_INPUT:
return next(
(
input_source.name
for input_source in self.inputs
if input_source.input_name == now_playing_media.media_id
),
None,
)
# Try matching favorite by name:station or media_id:album_id
return next(
(
source.name
for source in self.favorites.values()
if source.name == now_playing_media.station
or source.media_id == now_playing_media.album_id
),
None,
)
def connect_update(self, hass, controller):
"""
Connect listener for when sources change and signal player update.
EVENT_SOURCES_CHANGED is often raised multiple times in response to a
physical event therefore throttle it. Retrieving sources immediately
after the event may fail so retry.
"""
@Throttle(MIN_UPDATE_SOURCES)
async def get_sources():
retry_attempts = 0
while True:
try:
favorites = {}
if controller.is_signed_in:
favorites = await controller.get_favorites()
inputs = await controller.get_input_sources()
return favorites, inputs
except HeosError as error:
if retry_attempts < self.max_retry_attempts:
retry_attempts += 1
_LOGGER.debug(
"Error retrieving sources and will retry: %s", error
)
await asyncio.sleep(self.retry_delay)
else:
_LOGGER.error("Unable to update sources: %s", error)
return
async def update_sources(event, data=None):
if event in (
heos_const.EVENT_SOURCES_CHANGED,
heos_const.EVENT_USER_CHANGED,
heos_const.EVENT_CONNECTED,
):
# If throttled, it will return None
if sources := await get_sources():
self.favorites, self.inputs = sources
self.source_list = self._build_source_list()
_LOGGER.debug("Sources updated due to changed event")
# Let players know to update
async_dispatcher_send(hass, SIGNAL_HEOS_UPDATED)
controller.dispatcher.connect(
heos_const.SIGNAL_CONTROLLER_EVENT, update_sources
)
controller.dispatcher.connect(heos_const.SIGNAL_HEOS_EVENT, update_sources)
|
import datetime
from footynews.aggregator import exceptions
from footynews.aggregator.base import (Aggregator, Article, InvalidArticle,
make_soup)
from footynews.aggregator.utils import code_to_month
class FourFourTwo(Aggregator):
base_url = 'http://www.fourfourtwo.com/features'
source = 'FourFourTwo'
EXCLUDE_IF_IN_TITLE = ['Video:', 'Quiz:']
def extract(self):
soup = make_soup(FourFourTwo.base_url)
divs = soup.find('div', {'class': 'content-wrapper'})
divs = divs.find('div', {'class': 'view-content'})
divs = iter(divs.findChildren(recursive=False))
self.articles = (self.crawl(div) for div in divs)
return super().extract()
def crawl(self, tag):
url = None
try:
anchor = tag.find('div', {'class': 'title'}).find('a')
url = self.get_url(anchor)
title = self.get_title(anchor)
if title:
date_published = self.get_date_published(tag.find('div',
{'class': 'created'}))
div = make_soup(url)
author = self.get_author(div.find('p', {'class': 'authorName'}))
return Article(FourFourTwo.source, title, url, author,
date_published)
except (exceptions.WebCrawlException, AttributeError) as e:
return InvalidArticle(FourFourTwo.source, e.__class__.__name__,
e.message, url, str(e.tag))
def get_date_published(self, tag):
try:
date_published = tag.text.strip().split()
date_published[1] = code_to_month[date_published[1][:3].lower()]
date_published.reverse()
date_published = datetime.datetime(*map(int, date_published)).date()
return date_published
except (IndexError, AttributeError, ValueError):
raise exceptions.DatePublishedNotFoundException(e, tag)
def get_url(self, tag):
try:
url = tag['href']
url = url.split('/')[-1]
url = FourFourTwo.base_url + '/' + url
return url
except (KeyError, IndexError, AttributeError, ValueError, TypeError):
raise exceptions.UrlNotFoundException(e, tag)
def setup():
return FourFourTwo()
if __name__ == '__main__':
fourfourtwo = FourFourTwo()
print(fourfourtwo.extract()) |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_software_update
short_description: Manage the software update settings of a BIG-IP
description:
- Manage the software update settings of a BIG-IP.
version_added: "2.4"
options:
auto_check:
description:
- Specifies whether to automatically check for updates on the F5
Networks downloads server.
required: False
default: None
choices:
- yes
- no
frequency:
description:
- Specifies the schedule for the automatic update check.
required: False
default: None
choices:
- daily
- monthly
- weekly
notes:
- Requires the f5-sdk Python package on the host This is as easy as pip
install f5-sdk
extends_documentation_fragment: f5
requirements:
- f5-sdk >= 2.2.3
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
'''
RETURN = '''
'''
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
HAS_F5SDK,
F5ModuleError,
iControlUnexpectedHTTPError
)
class Parameters(AnsibleF5Parameters):
api_map = {
'autoCheck': 'auto_check'
}
updatables = [
'auto_check', 'frequency'
]
returnables = [
'auto_check', 'frequency'
]
@property
def auto_check(self):
if self._values['auto_check'] is None:
return None
elif self._values['auto_check'] in [True, 'enabled']:
return 'enabled'
else:
return 'disabled'
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.network == 'default':
result['network'] = None
elif self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def exec_module(self):
result = dict()
try:
changed = self.update()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = Parameters(changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def update_on_device(self):
params = self.want.api_params()
result = self.client.api.tm.sys.software.update.load()
result.modify(**params)
def read_current_from_device(self):
resource = self.client.api.tm.sys.software.update.load()
result = resource.attrs
return Parameters(result)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
auto_check=dict(
type='bool'
),
frequency=dict(
choices=['daily', 'monthly', 'weekly']
)
)
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
if __name__ == '__main__':
main()
|
class Solution:
def longestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
# if len less then two
if len(s) < 2:
return s
self.res = ""
# loop true each letter nd run helper function
for i in range(len(s)):
# for odd number of car
self.helper(s, i, i)
# for even number of car
self.helper(s, i, i+1)
return self.res
def helper(self,s, left, right):
# run till 0 or end and check if letter are ecoual at this point of time
while left>=0 and right < len(s) and s[left] == s[right]:
# increment left and decrement rigth
left -= 1
right += 1
# if neg length is biger - mekr new res
if right -left -1 > len(self.res):
self.res = s[left+1:right] |
from os.path import isfile
from Message import Message
from api import send_message
from command.Command import Command
class Remind(Command):
def __init__(self):
super().__init__()
self.help = "/remind — напомнить сообщение\n"
self.full_help = "/remind [ключ] — напомнить сообщение по ключу\n"
def on_message(self, event, vk):
message = Message(event)
if message.is_empty("/remind"):
send_message(event, vk,
message="И как мне предлагаешь вспомнить по пустому ключу?")
return
spl = message.text.split()[1]
if spl.find("/") != -1:
send_message(event, vk, message="Иньекцию захотел сделать? А вот хрен!")
return
if not isfile(f"save/{spl}"):
send_message(event, vk,
message=f"Сообщение по ключу {spl} не найдено")
return
with open(f"save/{spl}", "rb") as f:
message = Message.load(f.read())
send_message(event, vk,
message=message.text.replace("/remember", "", 1).replace(spl, "", 1),
attachment=message.attachments,
forward_messages=message.forward_messages,
reply_to=message.reply_to)
|
import sys
import numpy as np
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QLabel, QComboBox, QLineEdit, QListWidget
from PyQt5.QtCore import pyqtSignal
class GuessTransition(QWidget):
'''Pop-up Widget to select ions'''
# Exporting signals
send_z_cal = pyqtSignal(list)
# Initialization - Widget main layout
def __init__(self, linelist, wavelength, wave_std):
super().__init__()
# internal values
self.linelist = linelist
self.wave = wavelength
self.std = wave_std
#self.z_cal = 0.
# widget layout
self.resize(200, 1000)
self.layout = QVBoxLayout()
self.transitions = QListWidget()
self.transitions.addItems(self.linelist['name'].to_numpy())
#print(self.linelist['name'].to_numpy())
#print(type(self.linelist['name']))
self.layout.addWidget(self.transitions)
self.setLayout(self.layout)
# link clicking movement to more actions
self.transitions.itemClicked.connect(self._select_ion)
# define more actions after selecting an ion from widget
def _select_ion(self):
ion_num = self.transitions.currentRow()
ion_wrest = self.linelist['wave'][ion_num]
z_cal = (self.wave - ion_wrest)/ion_wrest
if self.std is np.nan:
z_std = np.nan
else:
z_std = self.std / self.wave * z_cal
self.send_z_cal.emit([z_cal, z_std])
|
'''
Copyright 2020 CITA
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
os.chdir(os.path.dirname(os.path.abspath(__file__)))
DEEPSIGHT_DIR = os.getenv('DEEPSIGHT_DIR')
if DEEPSIGHT_DIR is None:
DEEPSIGHT_DIR = ""
try:
import _deepsight as deepsight
except ImportError:
print("Importing local build.")
import importlib.util, sys
from os.path import abspath
spec = importlib.util.spec_from_file_location("_deepsight", abspath("../bin/_deepsight.cp{}{}-win_amd64.pyd".format(sys.version_info.major, sys.version_info.minor)))
deepsight = importlib.util.module_from_spec(spec)
import numpy
def query_grid(grid, grid_name):
bb = grid.getBoundingBox(grid_name)
bmin = bb[0]
bmax = bb[1]
print(bmin)
print(bmax)
return grid.getDense(grid_name, bmin, bmax)
'''
Plot a slice of a grid
'''
def plot_image(grid, grid_name, z):
bb = grid.getBoundingBox(grid_name)
bmin = bb[0]
bmax = bb[1]
bmin[2] = z
bmax[2] = z
w = bmax[0] - bmin[0]
h = bmax[1] - bmin[1]
d = bmax[2] - bmin[2]
dense = grid.getDense(grid_name, bmin, bmax)
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
imgplot = plt.imshow(dense, interpolation='none')
plt.show()
def main():
# Density at which to filter values
density_cutoff = 0.0
# Switch to use either VDB or TIFF
vdb_path = r"data\p15_.vdb"
tiff_path = r"data\p15_.tiff"
if True:
path = vdb_path
else:
path = tiff_path
print("Loading {}...".format(path))
# Create grid object
grid = deepsight.Grid()
# Load appropriate file
if path.endswith('vdb'):
grid.read(path)
elif path.endswith('tiff'):
grid.from_multipage_tiff(path, "computed_tomography", 1.0e-4)
# Print out all available grid names
names = grid.grid_names()
print("Found {} grid(s):".format(len(names)))
for name in names:
print(" {}".format(name))
# Get dense representation of first grid
q = query_grid(grid, names[0])
# Plot values
#plot_scatter3d(q, density_cutoff)
plot_image(grid, names[0], 200)
if __name__ == "__main__":
main() |
from passlib.hash import pbkdf2_sha256
import time
import json
from citybuilder import core
from random import randrange
class Unit:
def __init__(self, key, level):
self.type = key
self.level = level
class Job:
def __init__(self, player, product):
self.player = player
self.product = product
self.finish_time = time.time() + product['time']
def check_finish(self):
if time.time() > self.finish_time or core.config['debug']:
if self.product['type'] == "building":
self.player.buildings[self.product['name']] += 1
elif self.product['type'] == "research":
self.player.research[self.product['name']] += 1
elif self.product['type'] == "unit":
self.player.units.append(Unit(self.product['name'], self.product['level']))
elif self.product['type'] == "mission":
mission = self.product['mission']
if mission['type'] == "gather":
for resource, amount in mission['rewards']['resources'].items():
self.player.add_resource(resource, randrange(int(amount * 0.9), int(amount * 1.1)))
for unit in self.product['units']:
self.player.units.append(unit)
return True
return False
class Player:
def __init__(self, username, password):
self.username = username
self.set_password(password)
self.jobs = list()
self.buildings = { key: 0 for key in core.config['building'] }
self.units = [ Unit(unit['type'], unit['level']) for unit in core.config['general']['start']['units'] ]
self.resources = { resource: core.config['general']['start'].get(resource, 0) for resource in core.config['general']['resources'] }
self.research = { key: 0 for key in core.config['research'] }
self.missions = list()
def login(self, ws):
self.ws = ws
def set_password(self, password):
self.password = pbkdf2_sha256.encrypt(password, rounds=200000, salt_size=16)
def check_password(self, password):
return pbkdf2_sha256.verify(password, self.password)
def add_job(self, product, requirements, cost):
if product['type'] in ("building", "research"):
for job in self.jobs:
if job.product['name'] == product['name']:
return 3
if not self.check_requirements(requirements):
return 2
if not self.resource_check(cost):
return 1
self.jobs.append(Job(self, product))
return 0
def get_storage_space(self, resource):
space = core.config['general']['storage'].get(resource, 0)
for building in self.buildings.keys():
spec = core.config['building'][building]
if 'storage' in spec and self.buildings[building] > 0:
space += spec['levels'][self.buildings[building] - 1]['capacity']
return space
def add_resource(self, resource, amount):
self.resources[resource] += amount
self.resources[resource] = min(self.get_storage_space(resource), self.resources[resource])
def check_requirements(self, requirements):
if 'buildings' in requirements:
for building, level in requirements['buildings'].items():
if self.buildings[building] < level:
return False
if 'research' in requirements:
for research, level in requirements['research'].items():
if self.research[research] < level:
return False
return True
def resource_check(self, cost):
"""Checks if the resources are available,
and takes them if that is the case."""
cost = { k: cost[k] for k in cost.keys() if k != "time" }
for resource, required in cost.items():
if self.resources[resource] < required:
return False
for resource, required in cost.items():
self.resources[resource] -= required
return True
def get_production(self, resource):
production = 0
for building, level in self.buildings.items():
spec = core.config['building'][building]
if 'production' in spec and spec['production'] == resource and level > 0:
production += spec['levels'][self.buildings[building] - 1]['rate']
return production
def update(self, tick_length):
while len(self.missions) < self.buildings['palace'] + 1:
missions_available = [mission for mission in core.config['missions'] if self.check_requirements(mission['requirements'])]
random_index = randrange(0, len(missions_available))
self.missions.append(missions_available[random_index])
self.jobs = [job for job in self.jobs if not job.check_finish()]
# Resource generation
for resource in self.resources.keys():
self.add_resource(resource, self.get_production(resource) * tick_length)
self.ws.send_json({
'username': self.username,
'jobs': [{ 'product': job.product, 'finish_time': job.finish_time } for job in self.jobs],
'buildings': self.buildings,
'units': self.units,
'resources': self.resources,
'resources_production': {resource: self.get_production(resource) for resource in self.resources.keys()},
'resources_max': { key: self.get_storage_space(key) for key in self.resources.keys() },
'research': self.research,
'missions': self.missions,
})
|
# -*- coding: utf-8 -*-
from statsmodels.compat.python import lzip
import numpy as np
from . import kernels
#TODO: should this be a function?
class KDE(object):
"""
Kernel Density Estimator
Parameters
----------
x : array_like
N-dimensional array from which the density is to be estimated
kernel : Kernel Class
Should be a class from *
"""
#TODO: amend docs for Nd case?
def __init__(self, x, kernel=None):
x = np.asarray(x)
if x.ndim == 1:
x = x[:,None]
nobs, n_series = x.shape
if kernel is None:
kernel = kernels.Gaussian() # no meaningful bandwidth yet
if n_series > 1:
if isinstance( kernel, kernels.CustomKernel ):
kernel = kernels.NdKernel(n_series, kernels = kernel)
self.kernel = kernel
self.n = n_series #TODO change attribute
self.x = x
def density(self, x):
return self.kernel.density(self.x, x)
def __call__(self, x, h="scott"):
return np.array([self.density(xx) for xx in x])
def evaluate(self, x, h="silverman"):
density = self.kernel.density
return np.array([density(xx) for xx in x])
if __name__ == "__main__":
from numpy import random
import matplotlib.pyplot as plt
import statsmodels.nonparametric.bandwidths as bw
from statsmodels.sandbox.nonparametric.testdata import kdetest
# 1-D case
random.seed(142)
x = random.standard_t(4.2, size = 50)
h = bw.bw_silverman(x)
#NOTE: try to do it with convolution
support = np.linspace(-10,10,512)
kern = kernels.Gaussian(h = h)
kde = KDE( x, kern)
print(kde.density(1.015469))
print(0.2034675)
Xs = np.arange(-10,10,0.1)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(Xs, kde(Xs), "-")
ax.set_ylim(-10, 10)
ax.set_ylim(0,0.4)
# 2-D case
x = lzip(kdetest.faithfulData["eruptions"], kdetest.faithfulData["waiting"])
x = np.array(x)
x = (x - x.mean(0))/x.std(0)
nobs = x.shape[0]
H = kdetest.Hpi
kern = kernels.NdKernel( 2 )
kde = KDE( x, kern )
print(kde.density( np.matrix( [1,2 ]))) #.T
plt.figure()
plt.plot(x[:,0], x[:,1], 'o')
n_grid = 50
xsp = np.linspace(x.min(0)[0], x.max(0)[0], n_grid)
ysp = np.linspace(x.min(0)[1], x.max(0)[1], n_grid)
# xsorted = np.sort(x)
# xlow = xsorted[nobs/4]
# xupp = xsorted[3*nobs/4]
# xsp = np.linspace(xlow[0], xupp[0], n_grid)
# ysp = np.linspace(xlow[1], xupp[1], n_grid)
xr, yr = np.meshgrid(xsp, ysp)
kde_vals = np.array([kde.density( np.matrix( [xi, yi ]) ) for xi, yi in
zip(xr.ravel(), yr.ravel())])
plt.contour(xsp, ysp, kde_vals.reshape(n_grid, n_grid))
plt.show()
# 5 D case
# random.seed(142)
# mu = [1.0, 4.0, 3.5, -2.4, 0.0]
# sigma = np.matrix(
# [[ 0.6 - 0.1*abs(i-j) if i != j else 1.0 for j in xrange(5)] for i in xrange(5)])
# x = random.multivariate_normal(mu, sigma, size = 100)
# kern = kernel.Gaussian()
# kde = KernelEstimate( x, kern )
|
"""
Based on rllab's logger.
https://github.com/rll/rllab
"""
import csv
import datetime
import errno
import joblib
import json
import os
import os.path as osp
import pickle
import sys
import torch
from collections import OrderedDict
from contextlib import contextmanager
from enum import Enum
import dateutil.tz
import dateutil.tz
import numpy as np
import uuid
from rlkit.core.tabulate import tabulate
from rlkit import pythonplusplus as ppp
def add_prefix(log_dict: OrderedDict, prefix: str, divider=''):
with_prefix = OrderedDict()
for key, val in log_dict.items():
with_prefix[prefix + divider + key] = val
return with_prefix
def append_log(log_dict, to_add_dict, prefix=None):
if prefix is not None:
to_add_dict = add_prefix(to_add_dict, prefix=prefix)
return log_dict.update(to_add_dict)
class TerminalTablePrinter(object):
def __init__(self):
self.headers = None
self.tabulars = []
def print_tabular(self, new_tabular):
if self.headers is None:
self.headers = [x[0] for x in new_tabular]
else:
assert len(self.headers) == len(new_tabular)
self.tabulars.append([x[1] for x in new_tabular])
self.refresh()
def refresh(self):
import os
rows, columns = os.popen('stty size', 'r').read().split()
tabulars = self.tabulars[-(int(rows) - 3):]
sys.stdout.write("\x1b[2J\x1b[H")
sys.stdout.write(tabulate(tabulars, self.headers))
sys.stdout.write("\n")
class MyEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, type):
return {'$class': o.__module__ + "." + o.__name__}
elif isinstance(o, Enum):
return {
'$enum': o.__module__ + "." + o.__class__.__name__ + '.' + o.name
}
elif callable(o):
return {
'$function': o.__module__ + "." + o.__name__
}
return json.JSONEncoder.default(self, o)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class Logger(object):
def __init__(self):
self._prefixes = []
self._prefix_str = ''
self._tabular_prefixes = []
self._tabular_prefix_str = ''
self._tabular = []
self._text_outputs = []
self._tabular_outputs = []
self._tabular_keys = None
self._text_fds = {}
self._tabular_fds = {}
self._tabular_header_written = set()
self._snapshot_dir = None
self._snapshot_mode = 'all'
self._snapshot_gap = 1
self._log_tabular_only = False
self._header_printed = False
self.table_printer = TerminalTablePrinter()
self._use_tensorboard = False
self.epoch = 0
self._save_param_mode = 'torch'
def reset(self):
self.__init__()
def _add_output(self, file_name, arr, fds, mode='a'):
if file_name not in arr:
mkdir_p(os.path.dirname(file_name))
arr.append(file_name)
fds[file_name] = open(file_name, mode)
def _remove_output(self, file_name, arr, fds):
if file_name in arr:
fds[file_name].close()
del fds[file_name]
arr.remove(file_name)
self._tabular_keys=None
def push_prefix(self, prefix):
self._prefixes.append(prefix)
self._prefix_str = ''.join(self._prefixes)
def add_text_output(self, file_name):
self._add_output(file_name, self._text_outputs, self._text_fds,
mode='a')
def add_tensorboard_output(self, file_name):
import tensorboard_logger
self._use_tensorboard = True
self.tensorboard_logger = tensorboard_logger.Logger(file_name)
def remove_text_output(self, file_name):
self._remove_output(file_name, self._text_outputs, self._text_fds)
def add_tabular_output(self, file_name, relative_to_snapshot_dir=False):
if relative_to_snapshot_dir:
file_name = osp.join(self._snapshot_dir, file_name)
self._add_output(file_name, self._tabular_outputs, self._tabular_fds,
mode='w')
def remove_tabular_output(self, file_name, relative_to_snapshot_dir=False):
if relative_to_snapshot_dir:
file_name = osp.join(self._snapshot_dir, file_name)
if self._tabular_fds[file_name] in self._tabular_header_written:
self._tabular_header_written.remove(self._tabular_fds[file_name])
self._remove_output(file_name, self._tabular_outputs, self._tabular_fds)
def set_snapshot_dir(self, dir_name):
self._snapshot_dir = dir_name
def get_snapshot_dir(self, ):
return self._snapshot_dir
def get_snapshot_mode(self, ):
return self._snapshot_mode
def set_snapshot_mode(self, mode):
self._snapshot_mode = mode
def get_snapshot_gap(self, ):
return self._snapshot_gap
def set_snapshot_gap(self, gap):
self._snapshot_gap = gap
def get_save_param_mode(self, ):
return self._save_param_mode
def set_save_param_mode(self, mode):
assert mode in ['pickle', 'torch', 'joblib']
self._save_param_mode = mode
def set_log_tabular_only(self, log_tabular_only):
self._log_tabular_only = log_tabular_only
def get_log_tabular_only(self, ):
return self._log_tabular_only
def log(self, s, with_prefix=True, with_timestamp=True):
out = s
if with_prefix:
out = self._prefix_str + out
if with_timestamp:
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y-%m-%d %H:%M:%S.%f %Z')
out = "%s | %s" % (timestamp, out)
if not self._log_tabular_only:
# Also log to stdout
print(out)
for fd in list(self._text_fds.values()):
fd.write(out + '\n')
fd.flush()
sys.stdout.flush()
def record_tabular(self, key, val):
self._tabular.append((self._tabular_prefix_str + str(key), str(val)))
if self._use_tensorboard:
self.tensorboard_logger.log_value(self._tabular_prefix_str + str(key), val, self.epoch)
def record_dict(self, d, prefix=None):
if prefix is not None:
self.push_tabular_prefix(prefix)
for k, v in d.items():
self.record_tabular(k, v)
if prefix is not None:
self.pop_tabular_prefix()
def push_tabular_prefix(self, key):
self._tabular_prefixes.append(key)
self._tabular_prefix_str = ''.join(self._tabular_prefixes)
def pop_tabular_prefix(self, ):
del self._tabular_prefixes[-1]
self._tabular_prefix_str = ''.join(self._tabular_prefixes)
def save_extra_data(self, data, file_name='extra_data', mode='joblib'):
"""
Data saved here will always override the last entry
:param data: Something pickle'able.
"""
file_name = osp.join(self._snapshot_dir, file_name)
self._save_params_to_file(data, file_name, mode=mode)
return file_name
def get_table_dict(self, ):
return dict(self._tabular)
def get_table_key_set(self, ):
return set(key for key, value in self._tabular)
@contextmanager
def prefix(self, key):
self.push_prefix(key)
try:
yield
finally:
self.pop_prefix()
@contextmanager
def tabular_prefix(self, key):
self.push_tabular_prefix(key)
yield
self.pop_tabular_prefix()
def log_variant(self, log_file, variant_data):
mkdir_p(os.path.dirname(log_file))
with open(log_file, "w") as f:
json.dump(variant_data, f, indent=2, sort_keys=True, cls=MyEncoder)
def record_tabular_misc_stat(self, key, values, placement='back'):
if placement == 'front':
prefix = ""
suffix = key
else:
prefix = key
suffix = ""
if len(values) > 0:
self.record_tabular(prefix + "Average" + suffix, np.average(values))
self.record_tabular(prefix + "Std" + suffix, np.std(values))
self.record_tabular(prefix + "Median" + suffix, np.median(values))
self.record_tabular(prefix + "Min" + suffix, np.min(values))
self.record_tabular(prefix + "Max" + suffix, np.max(values))
else:
self.record_tabular(prefix + "Average" + suffix, np.nan)
self.record_tabular(prefix + "Std" + suffix, np.nan)
self.record_tabular(prefix + "Median" + suffix, np.nan)
self.record_tabular(prefix + "Min" + suffix, np.nan)
self.record_tabular(prefix + "Max" + suffix, np.nan)
def dump_tabular(self, *args, **kwargs):
self.epoch += 1
wh = kwargs.pop("write_header", None)
if len(self._tabular) > 0:
if self._log_tabular_only:
self.table_printer.print_tabular(self._tabular)
else:
for line in tabulate(self._tabular).split('\n'):
self.log(line, *args, **kwargs)
tabular_dict = dict(self._tabular)
# Only saves keys in first iteration to CSV!
# (But every key is printed out in text)
if self._tabular_keys is None:
self._tabular_keys = list(sorted(tabular_dict.keys()))
# Write to the csv files
for tabular_fd in list(self._tabular_fds.values()):
writer = csv.DictWriter(tabular_fd,
fieldnames=self._tabular_keys,
extrasaction="ignore",)
if wh or (
wh is None and tabular_fd not in self._tabular_header_written):
writer.writeheader()
self._tabular_header_written.add(tabular_fd)
writer.writerow(tabular_dict)
tabular_fd.flush()
del self._tabular[:]
def pop_prefix(self, ):
del self._prefixes[-1]
self._prefix_str = ''.join(self._prefixes)
def _save_params_to_file(self, params, file_name, mode):
if mode == 'joblib':
joblib.dump(params, file_name + ".pkl", compress=3)
elif mode == 'pickle':
pickle.dump(params, open(file_name + ".pkl", "wb"))
elif mode == 'torch':
torch.save(params, file_name + ".pt")
else:
raise ValueError("Invalid mode: {}".format(mode))
def save_itr_params(self, itr, params):
if self._snapshot_dir:
if self._snapshot_mode == 'all':
file_name = osp.join(self._snapshot_dir, 'itr_%d' % itr)
self._save_params_to_file(params, file_name, mode=self._save_param_mode)
elif self._snapshot_mode == 'last':
# override previous params
file_name = osp.join(self._snapshot_dir, 'params')
self._save_params_to_file(params, file_name, mode=self._save_param_mode)
elif self._snapshot_mode == "gap":
if itr % self._snapshot_gap == 0:
file_name = osp.join(self._snapshot_dir, 'itr_%d' % itr)
self._save_params_to_file(params, file_name, mode=self._save_param_mode)
elif self._snapshot_mode == "gap_and_last":
if itr % self._snapshot_gap == 0:
file_name = osp.join(self._snapshot_dir, 'itr_%d' % itr)
self._save_params_to_file(params, file_name, mode=self._save_param_mode)
file_name = osp.join(self._snapshot_dir, 'params')
self._save_params_to_file(params, file_name, mode=self._save_param_mode)
elif self._snapshot_mode == 'none':
pass
else:
raise NotImplementedError
def setup_logger(
logger,
exp_name,
base_log_dir,
variant=None,
text_log_file="debug.log",
variant_log_file="variant.json",
tabular_log_file="progress.csv",
snapshot_mode="last",
snapshot_gap=1,
log_tabular_only=False,
log_dir=None,
tensorboard=False,
unique_id=None,
git_infos=None,
script_name=None,
run_id=None,
**create_log_dir_kwargs
):
"""
Set up logger to have some reasonable default settings.
Will save log output to
based_log_dir/exp_name/exp_name.
exp_name will be auto-generated to be unique.
If log_dir is specified, then that directory is used as the output dir.
:param exp_name: The sub-directory for this specific experiment.
:param variant:
:param base_log_dir: The directory where all log should be saved.
:param text_log_file:
:param variant_log_file:
:param tabular_log_file:
:param snapshot_mode:
:param log_tabular_only:
:param snapshot_gap:
:param log_dir:
:return:
"""
logger.reset()
variant = variant or {}
unique_id = unique_id or str(uuid.uuid4())
first_time = log_dir is None
if first_time:
log_dir = create_log_dir(
exp_name=exp_name,
base_log_dir=base_log_dir,
variant=variant,
run_id=run_id,
**create_log_dir_kwargs
)
if tensorboard:
tensorboard_log_path = osp.join(log_dir, "tensorboard")
logger.add_tensorboard_output(tensorboard_log_path)
logger.log("Variant:")
variant_to_save = variant.copy()
variant_to_save['unique_id'] = unique_id
variant_to_save['exp_name'] = exp_name
variant_to_save['trial_name'] = log_dir.split('/')[-1]
logger.log(
json.dumps(ppp.dict_to_safe_json(variant_to_save, sort=True), indent=2)
)
variant_log_path = osp.join(log_dir, variant_log_file)
logger.log_variant(variant_log_path, variant_to_save)
tabular_log_path = osp.join(log_dir, tabular_log_file)
text_log_path = osp.join(log_dir, text_log_file)
logger.add_text_output(text_log_path)
if first_time:
logger.add_tabular_output(tabular_log_path)
else:
logger._add_output(tabular_log_path, logger._tabular_outputs,
logger._tabular_fds, mode='a')
for tabular_fd in logger._tabular_fds:
logger._tabular_header_written.add(tabular_fd)
logger.set_snapshot_dir(log_dir)
logger.set_snapshot_mode(snapshot_mode)
logger.set_snapshot_gap(snapshot_gap)
logger.set_log_tabular_only(log_tabular_only)
exp_name = log_dir.split("/")[-1]
logger.push_prefix("[%s] " % exp_name)
if git_infos:
save_git_infos(git_infos, log_dir)
if script_name:
with open(osp.join(log_dir, "script_name.txt"), "w") as f:
f.write(script_name)
return log_dir
def save_git_infos(git_infos, log_dir):
for (
directory, code_diff, code_diff_staged, commit_hash, branch_name
) in git_infos:
if directory[-1] == '/':
diff_file_name = directory[1:-1].replace("/", "-") + ".patch"
diff_staged_file_name = (
directory[1:-1].replace("/", "-") + "_staged.patch"
)
else:
diff_file_name = directory[1:].replace("/", "-") + ".patch"
diff_staged_file_name = (
directory[1:].replace("/", "-") + "_staged.patch"
)
if code_diff is not None and len(code_diff) > 0:
with open(osp.join(log_dir, diff_file_name), "w") as f:
f.write(code_diff + '\n')
if code_diff_staged is not None and len(code_diff_staged) > 0:
with open(osp.join(log_dir, diff_staged_file_name), "w") as f:
f.write(code_diff_staged + '\n')
with open(osp.join(log_dir, "git_infos.txt"), "a") as f:
f.write("directory: {}".format(directory))
f.write('\n')
f.write("git hash: {}".format(commit_hash))
f.write('\n')
f.write("git branch name: {}".format(branch_name))
f.write('\n\n')
def create_log_dir(
exp_name,
base_log_dir,
exp_id=0,
seed=0,
variant=None,
trial_dir_suffix=None,
add_time_suffix=True,
include_exp_name_sub_dir=True,
run_id=None,
):
"""
Creates and returns a unique log directory.
:param exp_name: All experiments with this prefix will have log
directories be under this directory.
:param exp_id: Different exp_ids will be in different directories.
:return:
"""
if run_id is not None:
exp_id = variant["exp_id"]
if variant.get("num_exps_per_instance", 0) > 1:
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
trial_name = "run%s/id%s/%s--s%d" % (run_id, exp_id, timestamp, seed)
else:
trial_name = "run{}/id{}".format(run_id, exp_id)
else:
trial_name = create_trial_name(exp_name, exp_id=exp_id, seed=seed, add_time_suffix=add_time_suffix)
if trial_dir_suffix is not None:
trial_name = "{}-{}".format(trial_name, trial_dir_suffix)
if include_exp_name_sub_dir:
log_dir = osp.join(base_log_dir, exp_name.replace("_", "-"), trial_name)
else:
log_dir = osp.join(base_log_dir, trial_name)
if osp.exists(log_dir):
print("WARNING: Log directory already exists {}".format(log_dir))
os.makedirs(log_dir, exist_ok=True)
return log_dir
def create_trial_name(exp_name, exp_id=0, seed=0, add_time_suffix=True):
"""
Create a semi-unique experiment name that has a timestamp
:param exp_name:
:param exp_id:
:return:
"""
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
if add_time_suffix:
return "%s_%s_id%03d--s%d" % (exp_name, timestamp, exp_id, seed)
else:
return "%s_id%03d--s%d" % (exp_name, exp_id, seed)
logger = Logger()
|
import numpy as np
from PyQt5 import QtWidgets
def save_fig(fig):
fname = QtWidgets.QFileDialog.getSaveFileName(caption='Save current figure', filter='Image (*.png)')
if fname[0]:
fig.savefig(fname[0], bbox_inches='tight')
|
from . import nn, rl, util, RaggedArray, ContinuousSpace, FiniteSpace, optim, thutil
import policyopt
import numpy as np
import theano.tensor as T
from scipy.spatial.distance import cosine
from contextlib import contextmanager
import theano; from theano import tensor
from scipy.optimize import fmin_l_bfgs_b
import pickle
class MMDReward(object):
# This is just copy version of LinearReward
# TODO-LIST : cost function equals to MMD witness function!!
# Consider only Gaussian Kernel (RBF Kernel)
# TODO-1 : Determine bandwidth parameters
# TODO-2 : Implement Radial Basis Kernel function
def __init__(self,
obsfeat_space, action_space,
enable_inputnorm, favor_zero_expert_reward,
include_time,
time_scale,
exobs_Bex_Do, exa_Bex_Da, ext_Bex,
kernel_bandwidth_params,
kernel_batchsize,
kernel_reg_weight,
use_median_heuristic,
use_logscale_reward,
save_reward,
epsilon
):
self.obsfeat_space, self.action_space = obsfeat_space, action_space
self.favor_zero_expert_reward = favor_zero_expert_reward
self.include_time = include_time
self.time_scale = time_scale
self.exobs_Bex_Do, self.exa_Bex_Da, self.ext_Bex = exobs_Bex_Do, exa_Bex_Da, ext_Bex
self.use_logscale_reward = use_logscale_reward
self.save_reward = save_reward
self.epsilon = epsilon
with nn.variable_scope('inputnorm'):
# Standardize both observations and actions if actions are continuous
# otherwise standardize observations only.
self.inputnorm = (nn.Standardizer if enable_inputnorm else nn.NoOpStandardizer)(
(obsfeat_space.dim + action_space.dim) if isinstance(action_space, ContinuousSpace)
else obsfeat_space.dim)
self.inputnorm_updated = False
self.update_inputnorm(self.exobs_Bex_Do, self.exa_Bex_Da) # pre-standardize with expert data
# Expert feature expectations
#self.expert_feat_Df = self._compute_featexp(self.exobs_Bex_Do, self.exa_Bex_Da, self.ext_Bex)
self.expert_feat_B_Df = self._featurize(self.exobs_Bex_Do, self.exa_Bex_Da, self.ext_Bex)
# Arguments for MMD Reward
self.kernel_bandwidth_params = kernel_bandwidth_params
self.kernel_batchsize = kernel_batchsize
self.kernel_reg_weight = kernel_reg_weight
self.use_median_heuristic = use_median_heuristic
self.mmd_square = 1.
self.expert_sigmas = []
self.iteration = 0
self.YY = None
self.min_param = 100.0
self.max_param = 300.0
# MMD reward function
# - Use Radial Basis Function Kernel
# : k(x,y) = \sum exp(- sigma(i) * ||x-y||^2 )
# - sigmas : Bandwidth parameters
x = T.matrix('x')
y = T.matrix('y')
sigmas = T.vector('sigmas')
feat_dim = self.expert_feat_B_Df.shape[1]
# - dist[i]: ||x[i]-y[i]||^2
# We should normalize x, y w.r.t its dimension
# since in large dimension, a small difference between x, y
# makes large difference in total kernel function value.
normalized_x = x / feat_dim
normalized_y = y / feat_dim
dist_B = ((normalized_x)**2).sum(1).reshape((normalized_x.shape[0], 1)) \
+ ((normalized_y)**2).sum(1).reshape((1, normalized_y.shape[0])) \
- 2*(normalized_x).dot((normalized_y).T)
rbf_kernel_sum, _ = theano.scan(fn=lambda sigma, distance: T.exp(-sigma*distance),
outputs_info=None,
sequences=sigmas, non_sequences=dist_B)
rbf_kernel = rbf_kernel_sum.mean(axis=0)
if self.kernel_reg_weight > 0.0:
xynorm = T.outer(normalized_x.norm(2, axis=1), normalized_y.norm(2, axis=1))
rbf_kernel += self.kernel_reg_weight*((normalized_x).dot(normalized_y.T)) / xynorm
self.kernel_function = theano.function([x, y, sigmas],
[rbf_kernel],
allow_input_downcast=True)
# Evaluate k( expert, expert )
if not (self.use_median_heuristic > 0):
self.kernel_exex_total = self.kernel_function(self.expert_feat_B_Df,
self.expert_feat_B_Df,
self.kernel_bandwidth_params)
self.kernel_exex_total = np.mean(self.kernel_exex_total)
def _featurize(self, obsfeat_B_Do, a_B_Da, t_B):
assert self.inputnorm_updated
assert obsfeat_B_Do.shape[0] == a_B_Da.shape[0] == t_B.shape[0]
B = obsfeat_B_Do.shape[0]
# Standardize observations and actions
if isinstance(self.action_space, ContinuousSpace):
trans_B_Doa = self.inputnorm.standardize(np.concatenate([obsfeat_B_Do, a_B_Da], axis=1))
obsfeat_B_Do, a_B_Da = trans_B_Doa[:,:obsfeat_B_Do.shape[1]], trans_B_Doa[:,obsfeat_B_Do.shape[1]:]
assert obsfeat_B_Do.shape[1] == self.obsfeat_space.dim and a_B_Da.shape[1] == self.action_space.dim
else:
assert a_B_Da.shape[1] == 1 and np.allclose(a_B_Da, a_B_Da.astype(int)), 'actions must all be ints'
obsfeat_B_Do = self.inputnorm.standardize(obsfeat_B_Do)
# Concatenate with other stuff to get final features
scaledt_B_1 = t_B[:,None]*self.time_scale
if isinstance(self.action_space, ContinuousSpace):
feat_cols = [obsfeat_B_Do, a_B_Da]
if self.include_time:
feat_cols.extend([scaledt_B_1, scaledt_B_1**2, scaledt_B_1**3])
feat_cols.append(np.ones((B,1)))
feat_B_Df = np.concatenate(feat_cols, axis=1)
else:
# Observation-only features
obsonly_feat_cols = [obsfeat_B_Do, (.01*obsfeat_B_Do)**2]
if self.include_time:
obsonly_feat_cols.extend([scaledt_B_1, scaledt_B_1**2, scaledt_B_1**3])
obsonly_feat_B_f = np.concatenate(obsonly_feat_cols, axis=1)
# To get features that include actions, we'll have blocks of obs-only features,
# one block for each action.
assert a_B_Da.shape[1] == 1
action_inds = [np.flatnonzero(a_B_Da[:,0] == a) for a in xrange(self.action_space.size)]
assert sum(len(inds) for inds in action_inds) == B
action_block_size = obsonly_feat_B_f.shape[1]
# Place obs features into their appropriate blocks
blocked_feat_B_Dfm1 = np.zeros((obsonly_feat_B_f.shape[0], action_block_size*self.action_space.size))
for a in range(self.action_space.size):
blocked_feat_B_Dfm1[action_inds[a],a*action_block_size:(a+1)*action_block_size] = obsonly_feat_B_f[action_inds[a],:]
assert np.isfinite(blocked_feat_B_Dfm1).all()
feat_B_Df = np.concatenate([blocked_feat_B_Dfm1, np.ones((B,1))], axis=1)
assert feat_B_Df.ndim == 2 and feat_B_Df.shape[0] == B
return feat_B_Df
def _get_median_bandwidth(self, feat_B_Df):
print "Calculating bandwidth parameters..."
sigmas = []
N = feat_B_Df.shape[0]
M = self.expert_feat_B_Df.shape[0]
index = np.random.choice(N, M, replace=(N < M))
initial_points = feat_B_Df[index, :] / feat_B_Df.shape[1]
expert_points = self.expert_feat_B_Df / feat_B_Df.shape[1]
# sigma_2 : median of pairwise squared-l2 distance among
# data points from the expert policy
if len(self.expert_sigmas) == 0:
self.YY = np.multiply(expert_points, expert_points).sum(axis=1).reshape((1, -1))
dist_matrix = self.YY + (self.YY).T - 2 * np.matmul(expert_points, expert_points.T)
dist_array = np.absolute(np.asarray(dist_matrix).reshape(-1))
sigma_2 = 1. / np.median(dist_array)
self.expert_sigmas.append(sigma_2)
if self.use_median_heuristic == 2:
self.expert_sigmas.append(1. / np.percentile(dist_array, 25))
self.expert_sigmas.append(1. / np.percentile(dist_array, 75))
if self.use_median_heuristic < 4:
sigmas.extend(self.expert_sigmas)
# sigma_1 : median of pairwise squared-l2 distance between
# data points from the expert policy and from initial policy
XX = np.multiply(initial_points, initial_points).sum(axis=1).reshape((1, -1))
dist_matrix = XX + (self.YY).T - 2 * np.matmul(initial_points, expert_points.T)
dist_array = np.absolute(np.asarray(dist_matrix).reshape(-1))
sigma_1 = 1. / np.median(dist_array)
sigmas.append(sigma_1)
# - use_median_heuristic 2 :
# also use lower quantile (.25 percentile) and upper quantile (.75)
if self.use_median_heuristic == 2:
sigmas.append(1. / np.percentile(dist_array, 25))
sigmas.append(1. / np.percentile(dist_array, 75))
print "sigmas : ", sigmas
return sigmas
# grid search for bandwidth parameter which maximizes MMD^2
def _get_bandwidth_with_parameter_search(self, feat_B_Df, min_value, max_value, grid_num):
print("Executing grid search for bandwidth parameter...")
params = np.linspace(min_value, max_value, grid_num)
max_param = 0
max_mmdsquare = 0
N = feat_B_Df.shape[0]
M = self.expert_feat_B_Df.shape[0]
batchsize = self.kernel_batchsize
total_index = range(len(feat_B_Df))
start_index = [index for index in total_index[0:len(feat_B_Df):batchsize]]
end_index = [index for index in total_index[batchsize:len(feat_B_Df):batchsize]]
end_index.append(len(feat_B_Df))
indices_list = [range(start, end) for (start, end) in zip(start_index, end_index)]
print('parameter, mmd_square')
for param in params:
kernel_learned_total = 0
kernel_expert_total = 0
# kernel_exex_total = 0
for indices in indices_list:
kernel_learned = \
self.kernel_function(feat_B_Df,
feat_B_Df[indices, :],
[param])
kernel_learned_total += np.sum(np.sum(kernel_learned, axis=0))
kernel_expert = \
self.kernel_function(self.expert_feat_B_Df,
feat_B_Df[indices, :],
[param])
kernel_expert_total += np.sum(np.sum(kernel_expert, axis=0))
kernel_exex = \
self.kernel_function(self.expert_feat_B_Df,
self.expert_feat_B_Df,
[param])
kernel_exex_total = np.sum(np.sum(kernel_exex, axis=0))
mmd_square = kernel_learned_total / (N * N) - 2. * kernel_expert_total / (N * M) + kernel_exex_total / (M * M)
print(param, mmd_square)
if mmd_square > max_mmdsquare:
max_mmdsquare = mmd_square
max_param = param
return [max_param]
def fit(self, obsfeat_B_Do, a_B_Da, t_B, _unused_exobs_Bex_Do, _unused_exa_Bex_Da, _unused_ext_Bex):
# In MMD Reward, we don't need to do anything here
# Return current mmd square value
outputs = [('MMD^2', self.mmd_square, float)]
for i in range(len(self.kernel_bandwidth_params)):
output = (('sigma')+str(i+1), self.kernel_bandwidth_params[i], float)
outputs.append(output)
return outputs
def compute_reward(self, obsfeat_B_Do, a_B_Da, t_B):
# Features from Learned Policy Trajectory
feat_B_Df = self._featurize(obsfeat_B_Do, a_B_Da, t_B)
# Note thant features from expert trajectory : self.expert_feat_B_Df
cost_B = np.zeros(feat_B_Df.shape[0])
N = feat_B_Df.shape[0]
M = self.expert_feat_B_Df.shape[0]
if self.use_median_heuristic == 4 and self.iteration % 100 == 0:
self.kernel_bandwidth_params = \
self._get_bandwidth_with_parameter_search(
feat_B_Df, self.min_param, self.max_param, 100)
self.min_param = max(self.kernel_bandwidth_params[0] - 100.0, 0.0)
self.max_param = min(self.kernel_bandwidth_params[0] + 100.0, 1000.0)
self.kernel_exex_total = self.kernel_function(self.expert_feat_B_Df,
self.expert_feat_B_Df,
self.kernel_bandwidth_params)
self.kernel_exex_total = np.sum(np.sum(self.kernel_exex_total, axis=0))
self.kernel_exex_total /= (M * M)
if len(self.kernel_bandwidth_params) == 0 or \
(self.use_median_heuristic == 3 and self.iteration % 50 == 0):
self.kernel_bandwidth_params = self._get_median_bandwidth(feat_B_Df)
self.kernel_exex_total = self.kernel_function(self.expert_feat_B_Df,
self.expert_feat_B_Df,
self.kernel_bandwidth_params)
self.kernel_exex_total = np.sum(np.sum(self.kernel_exex_total, axis=0))
self.kernel_exex_total /= (M * M)
kernel_learned_total = 0
kernel_expert_total = 0
batchsize = self.kernel_batchsize
total_index = range(len(t_B))
start_index = [index for index in total_index[0:len(t_B):batchsize]]
end_index = [index for index in total_index[batchsize:len(t_B):batchsize]]
end_index.append(len(t_B))
indices_list = [range(start, end) for (start, end) in zip(start_index, end_index)]
for indices in indices_list:
kernel_learned = \
self.kernel_function(feat_B_Df,
feat_B_Df[indices, :],
self.kernel_bandwidth_params)
kernel_learned_total += np.sum(np.sum(kernel_learned, axis=0))
kernel_expert = \
self.kernel_function(self.expert_feat_B_Df,
feat_B_Df[indices, :],
self.kernel_bandwidth_params)
kernel_expert_total += np.sum(np.sum(kernel_expert, axis=0))
cost_B[indices] = np.mean(kernel_learned, axis=1) - np.mean(kernel_expert, axis=1)
# Use unbiased estimator
# mmd_square = kernel_learned_total / (N * (N - 1)) - 2. * kernel_expert_total / (N * M) + self.kernel_exex_total
# Use biased estimator
mmd_square = kernel_learned_total / (N * N) - 2. * kernel_expert_total / (N * M) + self.kernel_exex_total
if mmd_square > 0:
self.mmd_square = mmd_square
else:
print "(Warning) Estimator for MMD^2 should be positive. Use previous MMD^2 value."
cost_B /= np.sqrt(self.mmd_square)
r_B = -cost_B
reward_max = r_B.max()
reward_min = r_B.min()
margin = (reward_max - reward_min) * self.epsilon #0.0001
if self.favor_zero_expert_reward:
# 0 for expert-like states, goes to -inf for non-expert-like states
# compatible with envs with traj cutoffs for good (expert-like) behavior
# e.g. mountain car, which gets cut off when the car reaches the destination
if self.use_logscale_reward:
reward_B = np.log((r_B - reward_min + margin) / (reward_max - reward_min + margin))
else:
reward_B = r_B - reward_max
else:
# 0 for non-expert-like states, goes to +inf for expert-like states
# compatible with envs with traj cutoffs for bad (non-expert-like) behavior
# e.g. walking simulations that get cut off when the robot falls over
if self.use_logscale_reward:
reward_B = -np.log((reward_max - r_B + margin) / (reward_max - reward_min + margin))
else:
reward_B = r_B - reward_min
if self.favor_zero_expert_reward:
assert (reward_B <= 0).all()
else:
assert (reward_B >= 0).all()
self.current_reward = reward_B
# Save imaginary rewards into pickle file
# if self.save_reward and self.iteration % 100 == 0:
#
# with open('reward.pk', 'wb') as reward_f:
# print("Save imaginary reward into pickle file...")
# pickle.dump(self.current_reward, reward_f)
self.iteration += 1
return reward_B
def update_inputnorm(self, obs_B_Do, a_B_Da):
if isinstance(self.action_space, ContinuousSpace):
self.inputnorm.update(np.concatenate([obs_B_Do, a_B_Da], axis=1))
else:
self.inputnorm.update(obs_B_Do)
self.inputnorm_updated = True
|
DOXYFILE = 'Doxyfile-mcss'
MAIN_PROJECT_URL = '../index.html'
SHOW_UNDOCUMENTED = True
LINKS_NAVBAR1 = [
('Pages', 'pages', []),
# ('Namespaces', 'namespaces', []),
('Classes', 'annotated', []),
('Files', 'files', [])
]
LINKS_NAVBAR2 = [
('<a href=\"../python/index.html\">Python</a>', []),
('<a href=\"https://github.com/artivis/manif\">GitHub</a>', [])
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring,import-error,wrong-import-order
import cv2
import numpy as np
from common.camera import Camera
def main():
def callback(frame, _):
edges = cv2.Canny(frame, 100, 200)
cv2.imshow('frame', np.vstack((frame, edges)))
with Camera(0) as cam:
print('Camera: %dx%d, %d' % (
cam.get(cv2.CAP_PROP_FRAME_WIDTH),
cam.get(cv2.CAP_PROP_FRAME_HEIGHT),
cam.get(cv2.CAP_PROP_FPS)))
cam.capture(callback, True)
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
# Canny Edge Detection:
# http://docs.opencv.org/master/da/d22/tutorial_py_canny.html
|
## Lexer
#
# @filename Lexer.py
# @author Ben Mariano
# @date 5/9/2017
#
# @brief Custom language lexer/tokenizer.
from Token import Token
# Token Types
(LPAREN, RPAREN, COMMA, LBRACK, RBRACK, LCURLY, RCURLY, SEMI,
EQUALS, LESSTHAN, GREATERTHAN, LESSEQUAL, GREATEREQUAL, AND, OR, COLON, ID, INTEGER, CAUSES, DOT, QUOTE,
RULES, TYPE, ALL, CONT, IF, NOTEQUAL, STATE, PYTHON, DASH, EOF) = (
'LPAREN', 'RPAREN', 'COMMA', 'LBRACK', 'RBRACK', 'LCURLY',
'RCURLY', 'SEMI', 'EQUALS', 'LESSTHAN','GREATERTHAN', 'LESSEQUAL', 'GREATEREQUAL', 'AND', 'OR', 'COLON', 'ID',
'INTEGER', 'CAUSES', 'DOT', 'QUOTE', 'RULES', 'TYPE',
'ALL', 'CONT', 'IF', 'NOTEQUAL', 'STATE', 'PYTHON', 'DASH', 'EOF'
)
# Automatically tokenizes certain reserved keywords
RESERVED_KEYWORDS = {
'RULES': Token('RULES', 'RULES'),
'TYPE': Token('TYPE', 'TYPE'),
'ALL': Token('ALL', 'ALL'),
'CONT': Token('CONT', 'CONT'),
'STATE': Token('STATE', 'STATE'),
'PYTHON': Token('PYTHON', 'PYTHON'),
'if': Token('IF', 'IF'),
}
## Custom Lexer
#
# @brief The Lexer transforms the raw input text from the custom
# language into a list of tokens
class Lexer(object):
## Constructor
#
# @param text input causal knowledge text
def __init__(self, text):
## @var text
# Raw input code in custom language
self.text = text
## @var pos
# current index/position in input text
self.pos = 0
## @var current_char
# character at the current index/position
self.current_char = self.text[self.pos]
## Lexer Error
#
# @brief Notifies user of use of invalid/unrecognized character
#
# @retval none
def error(self):
raise Exception('Invalid character: {c}'.format(
c = self.current_char
))
## Advance
#
# @brief Changes current position and adjusts current character
# appropriately. Current character is equal to None if the
# position is at the end of the input text
#
# @retval none
def advance(self):
self.pos += 1
if self.pos > len(self.text) - 1:
self.current_char = None
else:
self.current_char = self.text[self.pos]
## Skip Whitespace
#
# @brief Ignores whitespace. Input can have arbitrary spacing.
#
# @retval none
def skip_whitespace(self):
while self.current_char is not None and self.current_char.isspace():
self.advance()
## Integer
#
# @brief Identifies digits/strings of digits and returns them as integers
#
# @retval Integer integer representation of the character sequence
def integer(self):
result = ''
while self.current_char is not None and self.current_char.isdigit():
result += self.current_char
self.advance()
return int(result)
## Peek
#
# @brief Returns the next character without actually moving the current
# position. This is needed for certain Lexing decisions.
#
# @retval char next character after current_char
def peek(self):
peek_pos = self.pos + 1
if peek_pos > len(self.text) - 1:
return None
else:
return self.text[peek_pos]
## ID
#
# @brief Look in keywords for the given ID and return the corresponding
# token
#
#
# @retval Token token representing an id
def _id(self):
result = ''
while self.current_char is not None and (self.current_char.isalnum() or self.current_char == '-' or self.current_char == '_'):
result += self.current_char
self.advance()
result = result.replace('_', ' ')
token = RESERVED_KEYWORDS.get(result, Token(ID, result))
result2 = ''
if token.type == PYTHON:
self.advance()
self.advance()
while self.current_char != '#':
result2 += self.current_char
self.advance()
self.advance()
self.advance()
token = Token(PYTHON, result2)
return token
## Get Next Token
#
# Tokenizes the entire input
#
#
# @retval Token the next token
def get_next_token(self):
while self.current_char is not None:
if self.current_char.isspace():
self.skip_whitespace()
continue
if self.current_char.isalpha():
return self._id()
if self.current_char.isdigit():
return Token(INTEGER, self.integer())
if self.current_char == ':' and self.peek() == '=':
self.advance()
self.advance()
return Token(CAUSES, ':=')
if self.current_char == ';':
self.advance()
return Token(SEMI, ';')
if self.current_char == '-':
self.advance()
return Token(DASH, '-')
if self.current_char == '(':
self.advance()
return Token(LPAREN, '(')
if self.current_char == ')':
self.advance()
return Token(RPAREN, ')')
if self.current_char == '[':
self.advance()
return Token(LBRACK, '[')
if self.current_char == ']':
self.advance()
return Token(RBRACK, ']')
if self.current_char == ',':
self.advance()
return Token(COMMA, ',')
if self.current_char == '{':
self.advance()
return Token(LCURLY, '{')
if self.current_char == '}':
self.advance()
return Token(RCURLY, '}')
if self.current_char == '=':
self.advance()
return Token(EQUALS, '=')
if self.current_char == '!' and self.peek() == '=':
self.advance()
self.advance()
return Token(NOTEQUAL,'!=')
if self.current_char == '<' and self.peek() != '=':
self.advance()
return Token(LESSTHAN,'<')
if self.current_char == '>' and self.peek() != '=':
self.advance()
return Token(GREATERTHAN, '>')
if self.current_char == '>' and self.peek() == '=':
self.advance()
self.advance()
return Token(GREATEREQUAL,'>=')
if self.current_char == '<' and self.peek() == '=':
self.advance()
self.advance()
return Token(LESSEQUAL, '<=')
if self.current_char == '{':
self.advance()
return Token(LCURLY, '{')
if self.current_char == '&' and self.peek() == '&':
self.advance()
self.advance()
return Token(AND, '&&')
if self.current_char == '|' and self.peek() == '|':
self.advance()
self.advance()
return Token(OR, '||')
if self.current_char == ':':
self.advance()
return Token(COLON, ':')
if self.current_char == '.':
self.advance()
return Token(DOT, '.')
if self.current_char == '\'':
self.advance()
return Token(QUOTE, '\'')
self.error()
return Token(EOF, None)
|
from __future__ import absolute_import
import jax
from jax import numpy as jnp
def cuda():
try:
return jax.devices("gpu")[0]
except:
return None
def is_cuda_available():
from jax.lib import xla_bridge
return 'gpu' in str(xla_bridge.get_backend().platform)
def array_equal(a, b):
if a is None or b is None:
return False
return (a == b).all()
def allclose(a, b, rtol=1e-4, atol=1e-4):
return jnp.allclose(
a, b, rtol=rtol, atol=atol
)
def randn(shape):
key = jax.random.PRNGKey(2666)
return jax.random.normal(
key=key,
shape=shape,
dtype=jnp.float32, # this is ridiculous
)
def full(shape, fill_value, dtype, ctx):
# TODO: not sure about device yet
return jnp.full(
shape=shape,
fill_value=fill_value,
dtype=dtype,
)
def narrow_row_set(x, start, stop, new):
x[start:stop] = new
def sparse_to_numpy(x):
return x.to_dense()
def clone(x):
return jnp.asarray(x)
def reduce_sum(x):
return jnp.sum(x)
def softmax(x, dim):
return jax.nn.softmax(x, axis=dim)
def spmm(x, y):
return x @ y
def add(a, b):
return a + b
def sub(a, b):
return a - b
def mul(a, b):
return a * b
def div(a, b):
return a / b
def sum(x, dim, keepdims=False):
return x.sum(dim, keepdims=keepdims)
def max(x, dim):
return x.max(dim)
def min(x, dim):
return x.min(dim)
def prod(x, dim):
return x.prod(dim)
def matmul(a, b):
return a @ b
def dot(a, b):
return sum(mul(a, b), dim=-1)
def abs(a):
return a.abs()
|
import enum
class ActivationType(enum.Enum):
"""
The activation types are used to link a specific precursor mass with an activation
type. There are 26 possible mode values, including some reserved values.
"""
CollisionInducedDissociation = 0
MultiPhotonDissociation = 1
ElectronCaptureDissociation = 2
PQD = 3
ElectronTransferDissociation = 4
HigherEnergyCollisionalDissociation = 5
Any = 6
SAactivation = 7
ProtonTransferReaction = 8
NegativeElectronTransferDissociation = 9
NegativeProtonTransferReaction = 10
UltraVioletPhotoDissociation = 11
ModeA = 12
ModeB = 13
ModeC = 14
ModeD = 15
ModeE = 16
ModeF = 17
ModeG = 18
ModeH = 19
ModeI = 20
ModeJ = 21
ModeK = 22
ModeL = 23
ModeM = 24
ModeN = 25
ModeO = 26
ModeP = 27
ModeQ = 28
ModeR = 29
ModeS = 30
ModeT = 31
ModeU = 32
ModeV = 33
ModeW = 34
ModeX = 35
ModeY = 36
ModeZ = 37
LastActivation = 38 |
"""
!/usr/bin/python3.7
-*- coding: UTF-8 -*-
Author: https://github.com/Guanyan1996
┌─┐ ┌─┐
┌──┘ ┴───────┘ ┴──┐
│ │
│ ─── │
│ ─┬┘ └┬─ │
│ │
│ ─┴─ │
│ │
└───┐ ┌───┘
│ │
│ │
│ │
│ └──────────────┐
│ │
│ ├─┐
│ ┌─┘
│ │
└─┐ ┐ ┌───────┬──┐ ┌──┘
│ ─┤ ─┤ │ ─┤ ─┤
└──┴──┘ └──┴──┘
神兽保佑
代码无BUG!
"""
import base64
import os
import urllib
import requests
from loguru import logger
def base64_2_str(base64_encode):
"""
Args:
base64_encode: base64编码str
Returns: str(utf-8)
"""
base64_decode = base64.b64decode(base64_encode).decode('utf-8')
return base64_decode
def get_gitlab_file(gitlab_ip, project_id, git_commit, private_token,
filepath, output=None):
"""
Args:
gitlab_ip: 例如https://gitlab.xxx.cn
project_id: 从项目的gitlab首页可以看到Project ID
git_commit: 文件所处项目git_commit_id或者branch
private_token: 登陆code后点击左上角头像,Perferences-Access Token - create
filepath: 文件所处的项目下的路径
output: 文件download 本地路径,output不存在,则不下载
Returns: str(git file content)
"""
quote_filepath = urllib.parse.quote(filepath, safe='', encoding=None,
errors=None)
logger.info(quote_filepath)
interface = f"api/v4/projects/{project_id}/repository/files/{quote_filepath}?ref={git_commit}"
gitlab_file_url = urllib.parse.urljoin(gitlab_ip, interface)
logger.info(gitlab_file_url)
header = {"PRIVATE-TOKEN": private_token}
res = requests.get(headers=header, url=gitlab_file_url)
logger.info(f"{res.status_code}, {res.json()['file_name']}")
file_content = base64_2_str(res.json()["content"])
logger.info(file_content)
if output:
with open(os.path.join(output, os.path.basename(filepath)), "w") as f:
f.write(file_content)
return file_content
|
from config import *
from keyboard import *
from functions import *
def start_seller(user):
"""
This is the handler to start seller options
"""
keyboard = seller_menu()
user = get_user(msg=user)
bot.send_message(
user.id,
emoji.emojize(
":robot: What would you like to do today?",
use_aliases=True
),
reply_markup=keyboard
)
def start_buyer(user):
"""
This is the handler to start buyer options
"""
keyboard = buyer_menu()
user = get_user(msg=user)
bot.send_message(
user.id,
emoji.emojize(
":robot: What would you like to do today?",
use_aliases=True
),
reply_markup=keyboard
)
def select_coin(user):
"""
Selecting the right coin option for trade
"""
keyboard = coin_menu()
bot.send_message(
user.id,
emoji.emojize(
":money_bag: What is your preferred coin for payment ? ",
use_aliases=True
),
reply_markup=keyboard
)
##############TRADE CREATION
def trade_price(user):
"""
Receive user input on trade price
"""
question = bot.send_message(
user.id,
emoji.emojize(
":money_bag: How much are you expecting to be paid in your local currency? ",
use_aliases=True
)
)
question = question.wait()
bot.register_next_step_handler(question, trade_address)
def trade_address(msg):
"""
Recieve user input on trade wallet address
"""
price = msg.text
add_price(
user=msg.from_user,
price=float(price)
)
#REQUEST WALLET ADDRESS
question = bot.send_message(
msg.from_user.id,
emoji.emojize(
":money_bag: Paste the wallet address to which you will recieve payment referenced to the coin you selected above (Confirm the wallet address to make sure it is correct) ",
use_aliases=True
)
)
question = question.wait()
bot.register_next_step_handler(question, process_trade)
def process_trade(msg):
"""
Assigning of trade wallet
"""
wallet = msg.text
add_wallet(
user=msg.from_user,
address=wallet
)
trade = get_recent_trade(msg.from_user)
bot.send_message(
trade.seller,
emoji.emojize(
f"""
:memo: <b>Trade Details</b> :memo:
-----------------------
:beginner: <b>ID --> {trade.id}</b>
:beginner: <b>Price --> {trade.price} {trade.currency}</b>
:beginner: <b>Preferred method of payment --> {trade.coin}</b>
:beginner: <b>Created on --> {trade.created_at}</b>
Share only the trade ID with your customer to allow his/her join the trade. They would receive all the related information when they join.
""",
use_aliases=True
),
parse_mode=telegram.ParseMode.HTML,
)
#############APPROVING PAYMENTS
def validate_pay(msg):
"Receives the transaction hash for checking"
trade = get_recent_trade(msg.from_user)
trade_hash = msg.text
status = check_payment(trade, trade_hash)
if status == "Approved":
##SEND CONFIRMATION TO SELLER
bot.send_message(
trade.seller,
emoji.emojize(
f"""
:memo: <b>TRADE ID - {trade.id}</b> :memo:
------------------------------------
<b>Buyer Payment Confirmed Successfully :white_check_mark: . Please release the goods to the buyer before being paid</b>
""",
use_aliases=True
),
parse_mode=telegram.ParseMode.HTML
)
##SEND CONFIRMATION TO BUYER
bot.send_message(
trade.buyer,
emoji.emojize(
f"""
:memo: <b>TRADE ID - {trade.id}</b> :memo:
------------------------------------
<b>Payment Confirmed Sucessfully :white_check_mark: . Seller has been instructed to release the goods to you.</b>
""",
use_aliases=True
),
parse_mode=telegram.ParseMode.HTML,
reply_markup=confirm_goods()
)
else:
##SEND ALERT TO SELLER
bot.send_message(
trade.buyer,
emoji.emojize(
f"""
:memo: <b>TRADE ID - {trade.id}</b> :memo:
------------------------------------
<b>Payment Still Pending! :heavy_exclamation_mark: Please cross check the transaction hash and try again.</b>
""",
use_aliases=True
),
parse_mode=telegram.ParseMode.HTML
)
bot.delete_message(msg.chat.id, msg.message_id)
##REFUND PROCESS FOR BUYER
def refund_to_buyer(msg):
"Refund Coins Back To Buyer"
trade = get_recent_trade(msg)
if trade.payment_status == True:
question = bot.send_message(
trade.buyer,
f"A refund was requested for your funds on trade {trade.id}. Please paste a wallet address to receive in {trade.coin}"
)
question = question.wait()
bot.register_next_step_handler(question, refund_coins)
else:
bot.send_message(
msg.id,
emoji.emojize(
":warning: Buyer Has Not Made Payments Yet!!",
use_aliases=True
),
parse_mode=telegram.ParseMode.HTML
)
def refund_coins(msg):
"Payout refund"
wallet = msg.text
trade = get_recent_trade(msg.from_user)
pay_to_buyer(trade, wallet)
bot.send_message(
ADMIN_ID,
emoji.emojize(
"""
<b>Refunds Paid</b> :heavy_check_mark:
""",
use_aliases=True
),
parse_mode=telegram.ParseMode.HTML,
)
##REFUND PROCES SELLER TO RECEIVE FUNDS
def refund_to_seller(msg):
"Refund Coins Back To Buyer"
trade = get_recent_trade(msg)
confirm_pay(trade)
if trade.payment_status == True:
pay_funds_to_seller(trade)
bot.send_message(
ADMIN_ID,
emoji.emojize(
"""
<b>Paid To Seller</b> :heavy_check_mark:
""",
use_aliases=True
),
parse_mode=telegram.ParseMode.HTML,
)
else:
bot.send_message(
msg.id,
emoji.emojize(
":warning: Buyer Has Not Made Payments Yet!!",
use_aliases=True
),
parse_mode=telegram.ParseMode.HTML
)
####CLOSE TRADE WITH NO PAYOUTS
def close_dispute_trade(msg):
"Close Order After Dispute & No Body Has Paid"
trade = get_recent_trade(msg)
close_trade(trade)
users = [trade.seller, trade.buyer]
for user in users:
bot.send_message(
user,
emoji.emojize(
f"<b>Trade {trade.id} Closed</b> :mailbox_closed: ",
use_aliases=True
),
parse_mode=telegram.ParseMode.HTML,
) |
import os
import slack
import requests
import datetime
import yaml
from GoogleAPI import Create_Service
from googleapiclient.http import MediaFileUpload
from slackeventsapi import SlackEventAdapter
from pathlib import Path
from dotenv import load_dotenv
from flask import Flask, request, Response
"""
Load environment
"""
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
"""
Setup Flask App and Slack Client
"""
app = Flask(__name__)
client = slack.WebClient(token=os.environ["SLACK_TOKEN"])
"""
Create Google API Service
"""
GOOGLE_SECRET_FILENAME = "token.json"
GOOGLE_API_NAME = "drive"
GOOGLE_API_VERSION = "v3"
SCOPES = ["https://www.googleapis.com/auth/drive"]
GOOGLE_SERVICE = Create_Service(
GOOGLE_SECRET_FILENAME, GOOGLE_API_NAME, GOOGLE_API_VERSION, SCOPES
)
"""
Create Slack Event Adapter
- Event endpoint: '/slack/events'
"""
slack_event_adapter = SlackEventAdapter(
os.environ["SIGNING_SECRET"], "/slack/events", app
)
BOT_ID = client.api_call("auth.test")["user_id"]
ACCEPTED_FILE_TYPES = ["heic", "heif", "jpeg", "jpg", "mov", "mp4", "mpg", "png", "raw"]
FOLDER_YAML = "folders.yaml"
def get_folder_id(channel_id):
"""
Given the channel id, return the associated folder ID
"""
yaml_file = None
# If the file does not exist, make one
if not os.path.exists(FOLDER_YAML):
open(FOLDER_YAML, 'w').close()
# Load yaml file with contents
with open(FOLDER_YAML, "r") as stream:
yaml_file = yaml.safe_load(stream) or {}
folder_id = yaml_file.get(channel_id, None)
return folder_id
def upload_image(media_url, file_name, file_type, folder_id):
"""
Given the media URL, upload media to GDrive
Parameters
----------
media_url : str
The URL (private) of the media (requires token)
file_name : str
The name to save the file
file_type : str
File extension, which must be part of ACCEPTED_FILE_TYPES
Returns
-------
bool
True on success, False on failure
"""
# Make GET request to retrieve media file from Slack server
media_data = requests.get(
media_url, headers={"Authorization": f'Bearer {os.environ["SLACK_TOKEN"]}'}
)
# If not status OK, return immediately
if media_data.status_code != 200:
return False, "Could not retrieve file from Google Drive API."
# Open file content and configure file names
media = media_data.content
proper_file_name = f"{file_name}.{file_type}"
local_file_name = f"cache/{proper_file_name}"
# Make cache dir if it doesn't exist
if not os.path.exists("cache"):
os.makedirs("cache")
# Write the media to a temp file so that we can upload to GDrive
with open(local_file_name, "wb") as file:
file.write(media)
# Set up metadata to upload to GDrive
file_metadata = {
"name": proper_file_name,
"parents": [folder_id],
}
# Perform the upload
media_body = MediaFileUpload(local_file_name, resumable=True)
try:
GOOGLE_SERVICE.files().create(
body=file_metadata, media_body=media_body, fields="id"
).execute()
media_body = None # NOTE: set to None so that we can delete file
except Exception:
media_body = None # NOTE: set to None so that we can delete file
try:
os.remove(local_file_name)
except:
pass
return (
False,
"Could not upload to Google Drive. Make sure that the Google Drive folder ID is correct "
"by running the slash command: `/current-folder-id`. If the folder ID is correct, "
"consider changing viewing permissions on your folder (set to the *entire organization*) "
"to allow the bot write access.",
)
# Now, delete the file (since we don't need a local copy)
try:
os.remove(local_file_name)
except:
pass
# Make sure to return true
return True, None
# Keep track of ts to avoid duplicate messages
stored_timestamps = set()
@slack_event_adapter.on("message")
def handle_incoming_message(payload):
"""
Handle all incoming slack messages
Parameters
----------
payload : dict
See https://api.slack.com/events/message for more information
Returns
-------
None
"""
# Get relevant slack bot information
event = payload.get("event", {})
channel_id = event.get("channel")
folder_id = get_folder_id(channel_id)
user_id = event.get("user")
ts = event.get("ts")
thread_ts = event.get("thread_ts", ts) # for media in threads
proper_date = datetime.datetime.fromtimestamp(float(ts)).strftime(
"%Y-%m-%d--%H-%M-%S"
)
# First, make sure that GOOGLE_SERVICE is working
if user_id != BOT_ID:
if not GOOGLE_SERVICE:
client.chat_postMessage(
channel=channel_id,
text=":x: Failed to upload files.\n\nCould not generate a token.",
thread_ts=thread_ts,
)
return # Return immediately
# Make sure it isn't the bot that is sending the message
if ts not in stored_timestamps:
# NOTE: For some reason, Slack API might send multiple requests
stored_timestamps.add(ts)
encountered_error_messages = set()
# Check to see if files is part of the payload
if "files" in event:
success = 0
failure = 0
# Iterate through all file objects
for single_file in event["files"]:
file_type = single_file["filetype"]
# If the file type is valid, upload to GDrive
if file_type in ACCEPTED_FILE_TYPES:
# Next, get the folder ID for this channel
if not folder_id:
client.chat_postMessage(
channel=channel_id,
text=":x: Failed to upload files.\n\nThe Google Drive folder ID is not configured "
"for this channel. Use the slash command: `/current-folder-id` to check the "
"current Google Drive folder ID.",
thread_ts=thread_ts,
)
return # Return immediately
private_url = single_file["url_private"]
file_name = proper_date + " " + single_file["id"]
response, message = upload_image(
private_url, file_name, file_type, folder_id
)
# Tally based on the response
if response:
success += 1
else:
failure += 1
encountered_error_messages.add(" • " + message)
# Construct text message
text = ""
if success > 0:
text += f":white_check_mark: Successfully uploaded {success} "
text += "files.\n" if success != 1 else "file.\n"
if failure > 0:
text += f":x: Failed to upload {failure} "
text += "files.\n\n" if failure != 1 else "file.\n\n"
text += "Errors:\n" + "\n".join(encountered_error_messages)
# Post message to channel
if text:
client.chat_postMessage(
channel=channel_id,
text=text,
thread_ts=thread_ts,
)
@app.route("/config-folder-id", methods=["POST"])
def handle_folder_config():
"""
Handle slack command for configuring GDrive folder ID
"""
# Retrieve channel and folder ID's from POST request
data = request.form
channel_id = data["channel_id"]
folder_id = data["text"]
# Load the YAML file of folder mappings
yaml_file = None
if not os.path.exists(FOLDER_YAML):
open(FOLDER_YAML, 'w').close()
with open(FOLDER_YAML, "r") as stream:
yaml_file = yaml.safe_load(stream) or {}
# Create Slack Bot reply message
message = ""
if len(folder_id.split()) > 1: # Make sure it is one word
message = "The Google Drive folder ID should be one string. Please verify and try again."
else: # Otherwise, we can update the yaml file
yaml_file.update({channel_id: folder_id})
with open(FOLDER_YAML, "w") as yamlfile:
yaml.safe_dump(yaml_file, yamlfile)
message = (
f"The Google Drive folder ID for this channel is set to: "
f"<https://drive.google.com/drive/u/0/folders/{folder_id}|{folder_id}>"
)
# Return a message with the current channel_id
client.chat_postMessage(channel=channel_id, text=message)
return Response(), 200
@app.route("/current-folder-id", methods=["POST"])
def return_current_folder():
"""
Given the channel ID, return the GDrive folder ID from FOLDER_YAML
"""
data = request.form
channel_id = data["channel_id"]
folder_id = get_folder_id(channel_id)
# Print message based on the folder ID
message = ""
if not folder_id:
message = (
"This channel's Google Drive folder ID has *not* been configured. "
"Use the slash command: `/config-folder-id <folder-id>` to configure this channel's Google Drive folder ID."
)
else:
message = f"This channel's Google Drive folder ID is: <https://drive.google.com/drive/u/0/folders/{folder_id}|{folder_id}>"
# Return a message with the current channel_id
client.chat_postMessage(channel=channel_id, text=message)
return Response(), 200
if __name__ == "__main__":
app.run(debug=True)
|
# Link urls to views
from django.urls import path
from . import views
# App name
app_name = 'dashboard'
# Routes
urlpatterns = [
path('', views.index, name='index'),
path('detail', views.detail, name='detail'),
path('history', views.history, name='history'),
path('get_newest_readings/<str:racks>/<str:s_type>/<int:amount>', views.get_newest_readings),
path('get_readings_by_date/<str:racks>/<str:s_type>/<str:r_date>', views.get_readings_by_date),
path('get_all_readings_by_date/<int:rack>/<str:r_date>', views.get_all_readings_by_date),
path('get_all_readings_by_range/<str:racks>/<int:date_from>/<int:date_to>', views.get_all_readings_by_range),
path('get_all_readings_since_date/<str:racks>/<int:date>', views.get_all_readings_since_date),
path('alarm/<str:a_type>/<int:alarm>', views.alarm),
path('get_current_alarm', views.get_current_alarm)
] |
from django.apps import AppConfig
from tulius.websockets import runserver
class WebsocketsConfig(AppConfig):
name = 'tulius.websockets'
label = 'websockets'
verbose_name = 'websockets'
def ready(self):
runserver.patch()
|
# -*- coding: utf-8 -*-
"""
NG PyTest Salt Plugin
"""
# pragma: no cover
import os
import re
import sys
from saltfactories._version import get_versions
# Store the version attribute
__version__ = get_versions()["version"]
del get_versions
# Define __version_info__ attribute
VERSION_INFO_REGEX = re.compile(
r"(?P<year>[\d]{4})\.(?P<month>[\d]{1,2})\.(?P<day>[\d]{1,2})"
r"(?:\.dev0\+(?P<commits>[\d]+)\.(?:.*))?"
)
try:
__version_info__ = tuple([int(p) for p in VERSION_INFO_REGEX.match(__version__).groups() if p])
except AttributeError:
__version_info__ = (-1, -1, -1)
finally:
del VERSION_INFO_REGEX
# Define some constants
CODE_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
IS_WINDOWS = sys.platform.startswith("win")
IS_DARWIN = IS_OSX = sys.platform.startswith("darwin")
|
""" Cisco_IOS_XR_fretta_bcm_dpa_npu_stats_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR fretta\-bcm\-dpa\-npu\-stats package operational data.
This module contains definitions
for the following management objects\:
dpa\: Stats Data
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class Dpa(Entity):
"""
Stats Data
.. attribute:: stats
Voq or Trap Data
**type**\: :py:class:`Stats <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_npu_stats_oper.Dpa.Stats>`
"""
_prefix = 'fretta-bcm-dpa-npu-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dpa, self).__init__()
self._top_entity = None
self.yang_name = "dpa"
self.yang_parent_name = "Cisco-IOS-XR-fretta-bcm-dpa-npu-stats-oper"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("stats", ("stats", Dpa.Stats))])
self._leafs = OrderedDict()
self.stats = Dpa.Stats()
self.stats.parent = self
self._children_name_map["stats"] = "stats"
self._segment_path = lambda: "Cisco-IOS-XR-fretta-bcm-dpa-npu-stats-oper:dpa"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dpa, [], name, value)
class Stats(Entity):
"""
Voq or Trap Data
.. attribute:: nodes
DPA data for available nodes
**type**\: :py:class:`Nodes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_npu_stats_oper.Dpa.Stats.Nodes>`
"""
_prefix = 'fretta-bcm-dpa-npu-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dpa.Stats, self).__init__()
self.yang_name = "stats"
self.yang_parent_name = "dpa"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("nodes", ("nodes", Dpa.Stats.Nodes))])
self._leafs = OrderedDict()
self.nodes = Dpa.Stats.Nodes()
self.nodes.parent = self
self._children_name_map["nodes"] = "nodes"
self._segment_path = lambda: "stats"
self._absolute_path = lambda: "Cisco-IOS-XR-fretta-bcm-dpa-npu-stats-oper:dpa/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dpa.Stats, [], name, value)
class Nodes(Entity):
"""
DPA data for available nodes
.. attribute:: node
DPA operational data for a particular node
**type**\: list of :py:class:`Node <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_npu_stats_oper.Dpa.Stats.Nodes.Node>`
"""
_prefix = 'fretta-bcm-dpa-npu-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dpa.Stats.Nodes, self).__init__()
self.yang_name = "nodes"
self.yang_parent_name = "stats"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("node", ("node", Dpa.Stats.Nodes.Node))])
self._leafs = OrderedDict()
self.node = YList(self)
self._segment_path = lambda: "nodes"
self._absolute_path = lambda: "Cisco-IOS-XR-fretta-bcm-dpa-npu-stats-oper:dpa/stats/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dpa.Stats.Nodes, [], name, value)
class Node(Entity):
"""
DPA operational data for a particular node
.. attribute:: node_name (key)
Node ID
**type**\: str
**pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+)
.. attribute:: asic_statistics
ASIC statistics table
**type**\: :py:class:`AsicStatistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_npu_stats_oper.Dpa.Stats.Nodes.Node.AsicStatistics>`
.. attribute:: npu_numbers
Ingress Stats
**type**\: :py:class:`NpuNumbers <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_npu_stats_oper.Dpa.Stats.Nodes.Node.NpuNumbers>`
"""
_prefix = 'fretta-bcm-dpa-npu-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dpa.Stats.Nodes.Node, self).__init__()
self.yang_name = "node"
self.yang_parent_name = "nodes"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['node_name']
self._child_classes = OrderedDict([("asic-statistics", ("asic_statistics", Dpa.Stats.Nodes.Node.AsicStatistics)), ("npu-numbers", ("npu_numbers", Dpa.Stats.Nodes.Node.NpuNumbers))])
self._leafs = OrderedDict([
('node_name', (YLeaf(YType.str, 'node-name'), ['str'])),
])
self.node_name = None
self.asic_statistics = Dpa.Stats.Nodes.Node.AsicStatistics()
self.asic_statistics.parent = self
self._children_name_map["asic_statistics"] = "asic-statistics"
self.npu_numbers = Dpa.Stats.Nodes.Node.NpuNumbers()
self.npu_numbers.parent = self
self._children_name_map["npu_numbers"] = "npu-numbers"
self._segment_path = lambda: "node" + "[node-name='" + str(self.node_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-fretta-bcm-dpa-npu-stats-oper:dpa/stats/nodes/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dpa.Stats.Nodes.Node, ['node_name'], name, value)
class AsicStatistics(Entity):
"""
ASIC statistics table
.. attribute:: asic_statistics_detail_for_npu_ids
Detailed ASIC statistics
**type**\: :py:class:`AsicStatisticsDetailForNpuIds <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_npu_stats_oper.Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsDetailForNpuIds>`
.. attribute:: asic_statistics_for_npu_ids
ASIC statistics
**type**\: :py:class:`AsicStatisticsForNpuIds <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_npu_stats_oper.Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsForNpuIds>`
"""
_prefix = 'fretta-bcm-dpa-npu-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dpa.Stats.Nodes.Node.AsicStatistics, self).__init__()
self.yang_name = "asic-statistics"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("asic-statistics-detail-for-npu-ids", ("asic_statistics_detail_for_npu_ids", Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsDetailForNpuIds)), ("asic-statistics-for-npu-ids", ("asic_statistics_for_npu_ids", Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsForNpuIds))])
self._leafs = OrderedDict()
self.asic_statistics_detail_for_npu_ids = Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsDetailForNpuIds()
self.asic_statistics_detail_for_npu_ids.parent = self
self._children_name_map["asic_statistics_detail_for_npu_ids"] = "asic-statistics-detail-for-npu-ids"
self.asic_statistics_for_npu_ids = Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsForNpuIds()
self.asic_statistics_for_npu_ids.parent = self
self._children_name_map["asic_statistics_for_npu_ids"] = "asic-statistics-for-npu-ids"
self._segment_path = lambda: "asic-statistics"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dpa.Stats.Nodes.Node.AsicStatistics, [], name, value)
class AsicStatisticsDetailForNpuIds(Entity):
"""
Detailed ASIC statistics
.. attribute:: asic_statistics_detail_for_npu_id
Detailed ASIC statistics for a particular NPU
**type**\: list of :py:class:`AsicStatisticsDetailForNpuId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_npu_stats_oper.Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsDetailForNpuIds.AsicStatisticsDetailForNpuId>`
"""
_prefix = 'fretta-bcm-dpa-npu-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsDetailForNpuIds, self).__init__()
self.yang_name = "asic-statistics-detail-for-npu-ids"
self.yang_parent_name = "asic-statistics"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("asic-statistics-detail-for-npu-id", ("asic_statistics_detail_for_npu_id", Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsDetailForNpuIds.AsicStatisticsDetailForNpuId))])
self._leafs = OrderedDict()
self.asic_statistics_detail_for_npu_id = YList(self)
self._segment_path = lambda: "asic-statistics-detail-for-npu-ids"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsDetailForNpuIds, [], name, value)
class AsicStatisticsDetailForNpuId(Entity):
"""
Detailed ASIC statistics for a particular
NPU
.. attribute:: npu_id (key)
NPU number
**type**\: int
**range:** 0..4294967295
.. attribute:: statistics
Statistics
**type**\: :py:class:`Statistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_npu_stats_oper.Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsDetailForNpuIds.AsicStatisticsDetailForNpuId.Statistics>`
.. attribute:: valid
Flag to indicate if data is valid
**type**\: bool
.. attribute:: rack_number
Rack number
**type**\: int
**range:** 0..4294967295
.. attribute:: slot_number
Slot number
**type**\: int
**range:** 0..4294967295
.. attribute:: asic_instance
ASIC instance
**type**\: int
**range:** 0..4294967295
.. attribute:: chip_version
Chip version
**type**\: int
**range:** 0..65535
"""
_prefix = 'fretta-bcm-dpa-npu-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsDetailForNpuIds.AsicStatisticsDetailForNpuId, self).__init__()
self.yang_name = "asic-statistics-detail-for-npu-id"
self.yang_parent_name = "asic-statistics-detail-for-npu-ids"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['npu_id']
self._child_classes = OrderedDict([("statistics", ("statistics", Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsDetailForNpuIds.AsicStatisticsDetailForNpuId.Statistics))])
self._leafs = OrderedDict([
('npu_id', (YLeaf(YType.uint32, 'npu-id'), ['int'])),
('valid', (YLeaf(YType.boolean, 'valid'), ['bool'])),
('rack_number', (YLeaf(YType.uint32, 'rack-number'), ['int'])),
('slot_number', (YLeaf(YType.uint32, 'slot-number'), ['int'])),
('asic_instance', (YLeaf(YType.uint32, 'asic-instance'), ['int'])),
('chip_version', (YLeaf(YType.uint16, 'chip-version'), ['int'])),
])
self.npu_id = None
self.valid = None
self.rack_number = None
self.slot_number = None
self.asic_instance = None
self.chip_version = None
self.statistics = Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsDetailForNpuIds.AsicStatisticsDetailForNpuId.Statistics()
self.statistics.parent = self
self._children_name_map["statistics"] = "statistics"
self._segment_path = lambda: "asic-statistics-detail-for-npu-id" + "[npu-id='" + str(self.npu_id) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsDetailForNpuIds.AsicStatisticsDetailForNpuId, ['npu_id', u'valid', u'rack_number', u'slot_number', u'asic_instance', u'chip_version'], name, value)
class Statistics(Entity):
"""
Statistics
.. attribute:: num_blocks
Number of blocks
**type**\: int
**range:** 0..255
.. attribute:: block_info
Block information
**type**\: list of :py:class:`BlockInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_npu_stats_oper.Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsDetailForNpuIds.AsicStatisticsDetailForNpuId.Statistics.BlockInfo>`
"""
_prefix = 'fretta-bcm-dpa-npu-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsDetailForNpuIds.AsicStatisticsDetailForNpuId.Statistics, self).__init__()
self.yang_name = "statistics"
self.yang_parent_name = "asic-statistics-detail-for-npu-id"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("block-info", ("block_info", Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsDetailForNpuIds.AsicStatisticsDetailForNpuId.Statistics.BlockInfo))])
self._leafs = OrderedDict([
('num_blocks', (YLeaf(YType.uint8, 'num-blocks'), ['int'])),
])
self.num_blocks = None
self.block_info = YList(self)
self._segment_path = lambda: "statistics"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsDetailForNpuIds.AsicStatisticsDetailForNpuId.Statistics, [u'num_blocks'], name, value)
class BlockInfo(Entity):
"""
Block information
.. attribute:: block_name
Block name
**type**\: str
**length:** 0..10
.. attribute:: num_fields
Number of fields
**type**\: int
**range:** 0..255
.. attribute:: field_info
Field information
**type**\: list of :py:class:`FieldInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_npu_stats_oper.Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsDetailForNpuIds.AsicStatisticsDetailForNpuId.Statistics.BlockInfo.FieldInfo>`
"""
_prefix = 'fretta-bcm-dpa-npu-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsDetailForNpuIds.AsicStatisticsDetailForNpuId.Statistics.BlockInfo, self).__init__()
self.yang_name = "block-info"
self.yang_parent_name = "statistics"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("field-info", ("field_info", Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsDetailForNpuIds.AsicStatisticsDetailForNpuId.Statistics.BlockInfo.FieldInfo))])
self._leafs = OrderedDict([
('block_name', (YLeaf(YType.str, 'block-name'), ['str'])),
('num_fields', (YLeaf(YType.uint8, 'num-fields'), ['int'])),
])
self.block_name = None
self.num_fields = None
self.field_info = YList(self)
self._segment_path = lambda: "block-info"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsDetailForNpuIds.AsicStatisticsDetailForNpuId.Statistics.BlockInfo, [u'block_name', u'num_fields'], name, value)
class FieldInfo(Entity):
"""
Field information
.. attribute:: field_name
Field name
**type**\: str
**length:** 0..80
.. attribute:: field_value
Field value
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: is_overflow
Flag to indicate overflow
**type**\: bool
"""
_prefix = 'fretta-bcm-dpa-npu-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsDetailForNpuIds.AsicStatisticsDetailForNpuId.Statistics.BlockInfo.FieldInfo, self).__init__()
self.yang_name = "field-info"
self.yang_parent_name = "block-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('field_name', (YLeaf(YType.str, 'field-name'), ['str'])),
('field_value', (YLeaf(YType.uint64, 'field-value'), ['int'])),
('is_overflow', (YLeaf(YType.boolean, 'is-overflow'), ['bool'])),
])
self.field_name = None
self.field_value = None
self.is_overflow = None
self._segment_path = lambda: "field-info"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsDetailForNpuIds.AsicStatisticsDetailForNpuId.Statistics.BlockInfo.FieldInfo, [u'field_name', u'field_value', u'is_overflow'], name, value)
class AsicStatisticsForNpuIds(Entity):
"""
ASIC statistics
.. attribute:: asic_statistics_for_npu_id
ASIC statistics for a particular NPU
**type**\: list of :py:class:`AsicStatisticsForNpuId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_npu_stats_oper.Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsForNpuIds.AsicStatisticsForNpuId>`
"""
_prefix = 'fretta-bcm-dpa-npu-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsForNpuIds, self).__init__()
self.yang_name = "asic-statistics-for-npu-ids"
self.yang_parent_name = "asic-statistics"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("asic-statistics-for-npu-id", ("asic_statistics_for_npu_id", Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsForNpuIds.AsicStatisticsForNpuId))])
self._leafs = OrderedDict()
self.asic_statistics_for_npu_id = YList(self)
self._segment_path = lambda: "asic-statistics-for-npu-ids"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsForNpuIds, [], name, value)
class AsicStatisticsForNpuId(Entity):
"""
ASIC statistics for a particular NPU
.. attribute:: npu_id (key)
NPU number
**type**\: int
**range:** 0..4294967295
.. attribute:: statistics
Statistics
**type**\: :py:class:`Statistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_npu_stats_oper.Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsForNpuIds.AsicStatisticsForNpuId.Statistics>`
.. attribute:: valid
Flag to indicate if data is valid
**type**\: bool
.. attribute:: rack_number
Rack number
**type**\: int
**range:** 0..4294967295
.. attribute:: slot_number
Slot number
**type**\: int
**range:** 0..4294967295
.. attribute:: asic_instance
ASIC instance
**type**\: int
**range:** 0..4294967295
.. attribute:: chip_version
Chip version
**type**\: int
**range:** 0..65535
"""
_prefix = 'fretta-bcm-dpa-npu-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsForNpuIds.AsicStatisticsForNpuId, self).__init__()
self.yang_name = "asic-statistics-for-npu-id"
self.yang_parent_name = "asic-statistics-for-npu-ids"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['npu_id']
self._child_classes = OrderedDict([("statistics", ("statistics", Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsForNpuIds.AsicStatisticsForNpuId.Statistics))])
self._leafs = OrderedDict([
('npu_id', (YLeaf(YType.uint32, 'npu-id'), ['int'])),
('valid', (YLeaf(YType.boolean, 'valid'), ['bool'])),
('rack_number', (YLeaf(YType.uint32, 'rack-number'), ['int'])),
('slot_number', (YLeaf(YType.uint32, 'slot-number'), ['int'])),
('asic_instance', (YLeaf(YType.uint32, 'asic-instance'), ['int'])),
('chip_version', (YLeaf(YType.uint16, 'chip-version'), ['int'])),
])
self.npu_id = None
self.valid = None
self.rack_number = None
self.slot_number = None
self.asic_instance = None
self.chip_version = None
self.statistics = Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsForNpuIds.AsicStatisticsForNpuId.Statistics()
self.statistics.parent = self
self._children_name_map["statistics"] = "statistics"
self._segment_path = lambda: "asic-statistics-for-npu-id" + "[npu-id='" + str(self.npu_id) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsForNpuIds.AsicStatisticsForNpuId, ['npu_id', u'valid', u'rack_number', u'slot_number', u'asic_instance', u'chip_version'], name, value)
class Statistics(Entity):
"""
Statistics
.. attribute:: nbi_rx_total_byte_cnt
Total bytes sent from NIF to IRE
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
.. attribute:: nbi_rx_total_pkt_cnt
Total packets sent from NIF to IRE
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: ire_cpu_pkt_cnt
CPU ingress received packet counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: ire_nif_pkt_cnt
NIF received packet counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: ire_oamp_pkt_cnt
OAMP ingress received packet counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: ire_olp_pkt_cnt
OLP ingress received packet counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: ire_rcy_pkt_cnt
Recycling ingress received packet counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: ire_fdt_if_cnt
Performance counter of the FDT interface
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: idr_mmu_if_cnt
Performance counter of the MMU interface
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: idr_ocb_if_cnt
Performance counter of the OCB interface
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: iqm_enqueue_pkt_cnt
Counts enqueued packets
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: iqm_dequeue_pkt_cnt
Counts dequeued packets
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: iqm_deleted_pkt_cnt
Counts matched packets discarded in the DEQ process
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: iqm_enq_discarded_pkt_cnt
Counts all packets discarded at the ENQ pipe
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: ipt_egq_pkt_cnt
EGQ packet counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: ipt_enq_pkt_cnt
ENQ packet counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: ipt_fdt_pkt_cnt
FDT packet counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: ipt_cfg_event_cnt
Configurable event counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: ipt_cfg_byte_cnt
Configurable bytes counter
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
.. attribute:: fdt_ipt_desc_cell_cnt
Descriptor cell counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fdt_ire_desc_cell_cnt
IRE internal descriptor cell counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fdt_transmitted_data_cells_cnt
Counts all transmitted data cells
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fdr_p1_cell_in_cnt
FDR total incoming cell counter at pipe 1
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fdr_p2_cell_in_cnt
FDR total incoming cell counter at pipe 2
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fdr_p3_cell_in_cnt
FDR total incoming cell counter at pipe 3
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fdr_cell_in_cnt_total
FDR total incoming cell counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fda_cells_in_cnt_p1
FDA input cell counter P1
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fda_cells_in_cnt_p2
FDA input cell counter P2
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fda_cells_in_cnt_p3
FDA input cell counter P3
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fda_cells_in_tdm_cnt
FDA input cell counter TDM
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fda_cells_in_meshmc_cnt
FDA input cell counter MESHMC
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fda_cells_in_ipt_cnt
FDA input cell counter IPT
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fda_cells_out_cnt_p1
FDA output cell counter P1
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fda_cells_out_cnt_p2
FDA output cell counter P2
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fda_cells_out_cnt_p3
FDA output cell counter P3
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fda_cells_out_tdm_cnt
FDA output cell counter TDM
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fda_cells_out_meshmc_cnt
FDA output cell counter MESHMC
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fda_cells_out_ipt_cnt
FDA output cell counter IPT
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fda_egq_drop_cnt
FDA EGQ drop counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fda_egq_meshmc_drop_cnt
FDA EGQ MESHMC drop counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: egq_fqp_pkt_cnt
FQP2EPE packet counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: egq_pqp_uc_pkt_cnt
PQP2FQP unicast packet counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: egq_pqp_discard_uc_pkt_cnt
PQP discarded unicast packet counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: egq_pqp_uc_bytes_cnt
PQP2FQP unicast bytes counter
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
.. attribute:: egq_pqp_mc_pkt_cnt
PQP2FQP multicast packet counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: egq_pqp_discard_mc_pkt_cnt
PQP discarded multicast packet counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: egq_pqp_mc_bytes_cnt
PQP2FQP multicast bytes counter
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
.. attribute:: egq_ehp_uc_pkt_cnt
EHP2PQP unicast packet counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: egq_ehp_mc_high_pkt_cnt
EHP2PQP multicast high packet counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: egq_ehp_mc_low_pkt_cnt
EHP2PQP multicast low packet counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: egq_deleted_pkt_cnt
EHP2PQP discarded packet counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: egq_ehp_mc_high_discard_cnt
Number of multicast high packets discarded because multicast FIFO is full
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: egq_ehp_mc_low_discard_cnt
Number of multicast low packets discarded because multicast FIFO is full
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: egq_erpp_lag_pruning_discard_cnt
Number of packet descriptors discarded due to LAG multicast pruning
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: egq_erpp_pmf_discard_cnt
Number of packet descriptors discarded due to ERPP PMF
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: egq_erpp_vlan_mbr_discard_cnt
Number of packet descriptors discarded because of egress VLAN membership
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: epni_epe_byte_cnt
EPE2PNI bytes counter
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
.. attribute:: epni_epe_pkt_cnt
EPE2PNI packet counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: epni_epe_discard_cnt
EPE discarded packet counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: nbi_tx_total_byte_cnt
Total bytes sent from EGQ to NIF
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
.. attribute:: nbi_tx_total_pkt_cnt
Total packets sent from EGQ to NIF
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'fretta-bcm-dpa-npu-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsForNpuIds.AsicStatisticsForNpuId.Statistics, self).__init__()
self.yang_name = "statistics"
self.yang_parent_name = "asic-statistics-for-npu-id"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('nbi_rx_total_byte_cnt', (YLeaf(YType.uint64, 'nbi-rx-total-byte-cnt'), ['int'])),
('nbi_rx_total_pkt_cnt', (YLeaf(YType.uint64, 'nbi-rx-total-pkt-cnt'), ['int'])),
('ire_cpu_pkt_cnt', (YLeaf(YType.uint64, 'ire-cpu-pkt-cnt'), ['int'])),
('ire_nif_pkt_cnt', (YLeaf(YType.uint64, 'ire-nif-pkt-cnt'), ['int'])),
('ire_oamp_pkt_cnt', (YLeaf(YType.uint64, 'ire-oamp-pkt-cnt'), ['int'])),
('ire_olp_pkt_cnt', (YLeaf(YType.uint64, 'ire-olp-pkt-cnt'), ['int'])),
('ire_rcy_pkt_cnt', (YLeaf(YType.uint64, 'ire-rcy-pkt-cnt'), ['int'])),
('ire_fdt_if_cnt', (YLeaf(YType.uint64, 'ire-fdt-if-cnt'), ['int'])),
('idr_mmu_if_cnt', (YLeaf(YType.uint64, 'idr-mmu-if-cnt'), ['int'])),
('idr_ocb_if_cnt', (YLeaf(YType.uint64, 'idr-ocb-if-cnt'), ['int'])),
('iqm_enqueue_pkt_cnt', (YLeaf(YType.uint64, 'iqm-enqueue-pkt-cnt'), ['int'])),
('iqm_dequeue_pkt_cnt', (YLeaf(YType.uint64, 'iqm-dequeue-pkt-cnt'), ['int'])),
('iqm_deleted_pkt_cnt', (YLeaf(YType.uint64, 'iqm-deleted-pkt-cnt'), ['int'])),
('iqm_enq_discarded_pkt_cnt', (YLeaf(YType.uint64, 'iqm-enq-discarded-pkt-cnt'), ['int'])),
('ipt_egq_pkt_cnt', (YLeaf(YType.uint64, 'ipt-egq-pkt-cnt'), ['int'])),
('ipt_enq_pkt_cnt', (YLeaf(YType.uint64, 'ipt-enq-pkt-cnt'), ['int'])),
('ipt_fdt_pkt_cnt', (YLeaf(YType.uint64, 'ipt-fdt-pkt-cnt'), ['int'])),
('ipt_cfg_event_cnt', (YLeaf(YType.uint64, 'ipt-cfg-event-cnt'), ['int'])),
('ipt_cfg_byte_cnt', (YLeaf(YType.uint64, 'ipt-cfg-byte-cnt'), ['int'])),
('fdt_ipt_desc_cell_cnt', (YLeaf(YType.uint64, 'fdt-ipt-desc-cell-cnt'), ['int'])),
('fdt_ire_desc_cell_cnt', (YLeaf(YType.uint64, 'fdt-ire-desc-cell-cnt'), ['int'])),
('fdt_transmitted_data_cells_cnt', (YLeaf(YType.uint64, 'fdt-transmitted-data-cells-cnt'), ['int'])),
('fdr_p1_cell_in_cnt', (YLeaf(YType.uint64, 'fdr-p1-cell-in-cnt'), ['int'])),
('fdr_p2_cell_in_cnt', (YLeaf(YType.uint64, 'fdr-p2-cell-in-cnt'), ['int'])),
('fdr_p3_cell_in_cnt', (YLeaf(YType.uint64, 'fdr-p3-cell-in-cnt'), ['int'])),
('fdr_cell_in_cnt_total', (YLeaf(YType.uint64, 'fdr-cell-in-cnt-total'), ['int'])),
('fda_cells_in_cnt_p1', (YLeaf(YType.uint64, 'fda-cells-in-cnt-p1'), ['int'])),
('fda_cells_in_cnt_p2', (YLeaf(YType.uint64, 'fda-cells-in-cnt-p2'), ['int'])),
('fda_cells_in_cnt_p3', (YLeaf(YType.uint64, 'fda-cells-in-cnt-p3'), ['int'])),
('fda_cells_in_tdm_cnt', (YLeaf(YType.uint64, 'fda-cells-in-tdm-cnt'), ['int'])),
('fda_cells_in_meshmc_cnt', (YLeaf(YType.uint64, 'fda-cells-in-meshmc-cnt'), ['int'])),
('fda_cells_in_ipt_cnt', (YLeaf(YType.uint64, 'fda-cells-in-ipt-cnt'), ['int'])),
('fda_cells_out_cnt_p1', (YLeaf(YType.uint64, 'fda-cells-out-cnt-p1'), ['int'])),
('fda_cells_out_cnt_p2', (YLeaf(YType.uint64, 'fda-cells-out-cnt-p2'), ['int'])),
('fda_cells_out_cnt_p3', (YLeaf(YType.uint64, 'fda-cells-out-cnt-p3'), ['int'])),
('fda_cells_out_tdm_cnt', (YLeaf(YType.uint64, 'fda-cells-out-tdm-cnt'), ['int'])),
('fda_cells_out_meshmc_cnt', (YLeaf(YType.uint64, 'fda-cells-out-meshmc-cnt'), ['int'])),
('fda_cells_out_ipt_cnt', (YLeaf(YType.uint64, 'fda-cells-out-ipt-cnt'), ['int'])),
('fda_egq_drop_cnt', (YLeaf(YType.uint64, 'fda-egq-drop-cnt'), ['int'])),
('fda_egq_meshmc_drop_cnt', (YLeaf(YType.uint64, 'fda-egq-meshmc-drop-cnt'), ['int'])),
('egq_fqp_pkt_cnt', (YLeaf(YType.uint64, 'egq-fqp-pkt-cnt'), ['int'])),
('egq_pqp_uc_pkt_cnt', (YLeaf(YType.uint64, 'egq-pqp-uc-pkt-cnt'), ['int'])),
('egq_pqp_discard_uc_pkt_cnt', (YLeaf(YType.uint64, 'egq-pqp-discard-uc-pkt-cnt'), ['int'])),
('egq_pqp_uc_bytes_cnt', (YLeaf(YType.uint64, 'egq-pqp-uc-bytes-cnt'), ['int'])),
('egq_pqp_mc_pkt_cnt', (YLeaf(YType.uint64, 'egq-pqp-mc-pkt-cnt'), ['int'])),
('egq_pqp_discard_mc_pkt_cnt', (YLeaf(YType.uint64, 'egq-pqp-discard-mc-pkt-cnt'), ['int'])),
('egq_pqp_mc_bytes_cnt', (YLeaf(YType.uint64, 'egq-pqp-mc-bytes-cnt'), ['int'])),
('egq_ehp_uc_pkt_cnt', (YLeaf(YType.uint64, 'egq-ehp-uc-pkt-cnt'), ['int'])),
('egq_ehp_mc_high_pkt_cnt', (YLeaf(YType.uint64, 'egq-ehp-mc-high-pkt-cnt'), ['int'])),
('egq_ehp_mc_low_pkt_cnt', (YLeaf(YType.uint64, 'egq-ehp-mc-low-pkt-cnt'), ['int'])),
('egq_deleted_pkt_cnt', (YLeaf(YType.uint64, 'egq-deleted-pkt-cnt'), ['int'])),
('egq_ehp_mc_high_discard_cnt', (YLeaf(YType.uint64, 'egq-ehp-mc-high-discard-cnt'), ['int'])),
('egq_ehp_mc_low_discard_cnt', (YLeaf(YType.uint64, 'egq-ehp-mc-low-discard-cnt'), ['int'])),
('egq_erpp_lag_pruning_discard_cnt', (YLeaf(YType.uint64, 'egq-erpp-lag-pruning-discard-cnt'), ['int'])),
('egq_erpp_pmf_discard_cnt', (YLeaf(YType.uint64, 'egq-erpp-pmf-discard-cnt'), ['int'])),
('egq_erpp_vlan_mbr_discard_cnt', (YLeaf(YType.uint64, 'egq-erpp-vlan-mbr-discard-cnt'), ['int'])),
('epni_epe_byte_cnt', (YLeaf(YType.uint64, 'epni-epe-byte-cnt'), ['int'])),
('epni_epe_pkt_cnt', (YLeaf(YType.uint64, 'epni-epe-pkt-cnt'), ['int'])),
('epni_epe_discard_cnt', (YLeaf(YType.uint64, 'epni-epe-discard-cnt'), ['int'])),
('nbi_tx_total_byte_cnt', (YLeaf(YType.uint64, 'nbi-tx-total-byte-cnt'), ['int'])),
('nbi_tx_total_pkt_cnt', (YLeaf(YType.uint64, 'nbi-tx-total-pkt-cnt'), ['int'])),
])
self.nbi_rx_total_byte_cnt = None
self.nbi_rx_total_pkt_cnt = None
self.ire_cpu_pkt_cnt = None
self.ire_nif_pkt_cnt = None
self.ire_oamp_pkt_cnt = None
self.ire_olp_pkt_cnt = None
self.ire_rcy_pkt_cnt = None
self.ire_fdt_if_cnt = None
self.idr_mmu_if_cnt = None
self.idr_ocb_if_cnt = None
self.iqm_enqueue_pkt_cnt = None
self.iqm_dequeue_pkt_cnt = None
self.iqm_deleted_pkt_cnt = None
self.iqm_enq_discarded_pkt_cnt = None
self.ipt_egq_pkt_cnt = None
self.ipt_enq_pkt_cnt = None
self.ipt_fdt_pkt_cnt = None
self.ipt_cfg_event_cnt = None
self.ipt_cfg_byte_cnt = None
self.fdt_ipt_desc_cell_cnt = None
self.fdt_ire_desc_cell_cnt = None
self.fdt_transmitted_data_cells_cnt = None
self.fdr_p1_cell_in_cnt = None
self.fdr_p2_cell_in_cnt = None
self.fdr_p3_cell_in_cnt = None
self.fdr_cell_in_cnt_total = None
self.fda_cells_in_cnt_p1 = None
self.fda_cells_in_cnt_p2 = None
self.fda_cells_in_cnt_p3 = None
self.fda_cells_in_tdm_cnt = None
self.fda_cells_in_meshmc_cnt = None
self.fda_cells_in_ipt_cnt = None
self.fda_cells_out_cnt_p1 = None
self.fda_cells_out_cnt_p2 = None
self.fda_cells_out_cnt_p3 = None
self.fda_cells_out_tdm_cnt = None
self.fda_cells_out_meshmc_cnt = None
self.fda_cells_out_ipt_cnt = None
self.fda_egq_drop_cnt = None
self.fda_egq_meshmc_drop_cnt = None
self.egq_fqp_pkt_cnt = None
self.egq_pqp_uc_pkt_cnt = None
self.egq_pqp_discard_uc_pkt_cnt = None
self.egq_pqp_uc_bytes_cnt = None
self.egq_pqp_mc_pkt_cnt = None
self.egq_pqp_discard_mc_pkt_cnt = None
self.egq_pqp_mc_bytes_cnt = None
self.egq_ehp_uc_pkt_cnt = None
self.egq_ehp_mc_high_pkt_cnt = None
self.egq_ehp_mc_low_pkt_cnt = None
self.egq_deleted_pkt_cnt = None
self.egq_ehp_mc_high_discard_cnt = None
self.egq_ehp_mc_low_discard_cnt = None
self.egq_erpp_lag_pruning_discard_cnt = None
self.egq_erpp_pmf_discard_cnt = None
self.egq_erpp_vlan_mbr_discard_cnt = None
self.epni_epe_byte_cnt = None
self.epni_epe_pkt_cnt = None
self.epni_epe_discard_cnt = None
self.nbi_tx_total_byte_cnt = None
self.nbi_tx_total_pkt_cnt = None
self._segment_path = lambda: "statistics"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dpa.Stats.Nodes.Node.AsicStatistics.AsicStatisticsForNpuIds.AsicStatisticsForNpuId.Statistics, [u'nbi_rx_total_byte_cnt', u'nbi_rx_total_pkt_cnt', u'ire_cpu_pkt_cnt', u'ire_nif_pkt_cnt', u'ire_oamp_pkt_cnt', u'ire_olp_pkt_cnt', u'ire_rcy_pkt_cnt', u'ire_fdt_if_cnt', u'idr_mmu_if_cnt', u'idr_ocb_if_cnt', u'iqm_enqueue_pkt_cnt', u'iqm_dequeue_pkt_cnt', u'iqm_deleted_pkt_cnt', u'iqm_enq_discarded_pkt_cnt', u'ipt_egq_pkt_cnt', u'ipt_enq_pkt_cnt', u'ipt_fdt_pkt_cnt', u'ipt_cfg_event_cnt', u'ipt_cfg_byte_cnt', u'fdt_ipt_desc_cell_cnt', u'fdt_ire_desc_cell_cnt', u'fdt_transmitted_data_cells_cnt', u'fdr_p1_cell_in_cnt', u'fdr_p2_cell_in_cnt', u'fdr_p3_cell_in_cnt', u'fdr_cell_in_cnt_total', u'fda_cells_in_cnt_p1', u'fda_cells_in_cnt_p2', u'fda_cells_in_cnt_p3', u'fda_cells_in_tdm_cnt', u'fda_cells_in_meshmc_cnt', u'fda_cells_in_ipt_cnt', u'fda_cells_out_cnt_p1', u'fda_cells_out_cnt_p2', u'fda_cells_out_cnt_p3', u'fda_cells_out_tdm_cnt', u'fda_cells_out_meshmc_cnt', u'fda_cells_out_ipt_cnt', u'fda_egq_drop_cnt', u'fda_egq_meshmc_drop_cnt', u'egq_fqp_pkt_cnt', u'egq_pqp_uc_pkt_cnt', u'egq_pqp_discard_uc_pkt_cnt', u'egq_pqp_uc_bytes_cnt', u'egq_pqp_mc_pkt_cnt', u'egq_pqp_discard_mc_pkt_cnt', u'egq_pqp_mc_bytes_cnt', u'egq_ehp_uc_pkt_cnt', u'egq_ehp_mc_high_pkt_cnt', u'egq_ehp_mc_low_pkt_cnt', u'egq_deleted_pkt_cnt', u'egq_ehp_mc_high_discard_cnt', u'egq_ehp_mc_low_discard_cnt', u'egq_erpp_lag_pruning_discard_cnt', u'egq_erpp_pmf_discard_cnt', u'egq_erpp_vlan_mbr_discard_cnt', u'epni_epe_byte_cnt', u'epni_epe_pkt_cnt', u'epni_epe_discard_cnt', u'nbi_tx_total_byte_cnt', u'nbi_tx_total_pkt_cnt'], name, value)
class NpuNumbers(Entity):
"""
Ingress Stats
.. attribute:: npu_number
Stats for a particular npu
**type**\: list of :py:class:`NpuNumber <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_npu_stats_oper.Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber>`
"""
_prefix = 'fretta-bcm-dpa-npu-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dpa.Stats.Nodes.Node.NpuNumbers, self).__init__()
self.yang_name = "npu-numbers"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("npu-number", ("npu_number", Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber))])
self._leafs = OrderedDict()
self.npu_number = YList(self)
self._segment_path = lambda: "npu-numbers"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dpa.Stats.Nodes.Node.NpuNumbers, [], name, value)
class NpuNumber(Entity):
"""
Stats for a particular npu
.. attribute:: npu_id (key)
Npu number
**type**\: int
**range:** 0..4294967295
.. attribute:: display
show npu specific voq or trap stats
**type**\: :py:class:`Display <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_npu_stats_oper.Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display>`
"""
_prefix = 'fretta-bcm-dpa-npu-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber, self).__init__()
self.yang_name = "npu-number"
self.yang_parent_name = "npu-numbers"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['npu_id']
self._child_classes = OrderedDict([("display", ("display", Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display))])
self._leafs = OrderedDict([
('npu_id', (YLeaf(YType.uint32, 'npu-id'), ['int'])),
])
self.npu_id = None
self.display = Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display()
self.display.parent = self
self._children_name_map["display"] = "display"
self._segment_path = lambda: "npu-number" + "[npu-id='" + str(self.npu_id) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber, ['npu_id'], name, value)
class Display(Entity):
"""
show npu specific voq or trap stats
.. attribute:: trap_ids
Trap stats for a particular npu
**type**\: :py:class:`TrapIds <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_npu_stats_oper.Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.TrapIds>`
.. attribute:: interface_handles
Voq stats grouped by interface handle
**type**\: :py:class:`InterfaceHandles <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_npu_stats_oper.Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.InterfaceHandles>`
.. attribute:: base_numbers
Voq stats grouped by voq base numbers
**type**\: :py:class:`BaseNumbers <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_npu_stats_oper.Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.BaseNumbers>`
"""
_prefix = 'fretta-bcm-dpa-npu-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display, self).__init__()
self.yang_name = "display"
self.yang_parent_name = "npu-number"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("trap-ids", ("trap_ids", Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.TrapIds)), ("interface-handles", ("interface_handles", Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.InterfaceHandles)), ("base-numbers", ("base_numbers", Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.BaseNumbers))])
self._leafs = OrderedDict()
self.trap_ids = Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.TrapIds()
self.trap_ids.parent = self
self._children_name_map["trap_ids"] = "trap-ids"
self.interface_handles = Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.InterfaceHandles()
self.interface_handles.parent = self
self._children_name_map["interface_handles"] = "interface-handles"
self.base_numbers = Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.BaseNumbers()
self.base_numbers.parent = self
self._children_name_map["base_numbers"] = "base-numbers"
self._segment_path = lambda: "display"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display, [], name, value)
class TrapIds(Entity):
"""
Trap stats for a particular npu
.. attribute:: trap_id
Filter by specific trap id
**type**\: list of :py:class:`TrapId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_npu_stats_oper.Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.TrapIds.TrapId>`
"""
_prefix = 'fretta-bcm-dpa-npu-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.TrapIds, self).__init__()
self.yang_name = "trap-ids"
self.yang_parent_name = "display"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("trap-id", ("trap_id", Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.TrapIds.TrapId))])
self._leafs = OrderedDict()
self.trap_id = YList(self)
self._segment_path = lambda: "trap-ids"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.TrapIds, [], name, value)
class TrapId(Entity):
"""
Filter by specific trap id
.. attribute:: trap_id (key)
Trap ID
**type**\: int
**range:** 0..4294967295
.. attribute:: trap_strength
Trap Strength of the trap
**type**\: int
**range:** 0..4294967295
.. attribute:: priority
Priority of the trap
**type**\: int
**range:** 0..4294967295
.. attribute:: trap_id_xr
Id of the trap
**type**\: int
**range:** 0..4294967295
.. attribute:: gport
Gport of the trap
**type**\: int
**range:** 0..4294967295
.. attribute:: fec_id
Fec id of the trap
**type**\: int
**range:** 0..4294967295
.. attribute:: policer_id
Id of the policer on the trap
**type**\: int
**range:** 0..4294967295
.. attribute:: stats_id
Stats Id of the trap
**type**\: int
**range:** 0..4294967295
.. attribute:: encap_id
Encap Id of the trap
**type**\: int
**range:** 0..4294967295
.. attribute:: mc_group
McGroup of the trap
**type**\: int
**range:** 0..4294967295
.. attribute:: trap_string
Name String of the trap
**type**\: str
.. attribute:: id
Id for internal use
**type**\: int
**range:** 0..4294967295
.. attribute:: offset
Offset for internal use
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: npu_id
NpuId on which trap is enabled
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: packet_dropped
Number of packets dropped after hitting the trap
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: packet_accepted
Number of packets accepted after hitting the trap
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'fretta-bcm-dpa-npu-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.TrapIds.TrapId, self).__init__()
self.yang_name = "trap-id"
self.yang_parent_name = "trap-ids"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['trap_id']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('trap_id', (YLeaf(YType.uint32, 'trap-id'), ['int'])),
('trap_strength', (YLeaf(YType.uint32, 'trap-strength'), ['int'])),
('priority', (YLeaf(YType.uint32, 'priority'), ['int'])),
('trap_id_xr', (YLeaf(YType.uint32, 'trap-id-xr'), ['int'])),
('gport', (YLeaf(YType.uint32, 'gport'), ['int'])),
('fec_id', (YLeaf(YType.uint32, 'fec-id'), ['int'])),
('policer_id', (YLeaf(YType.uint32, 'policer-id'), ['int'])),
('stats_id', (YLeaf(YType.uint32, 'stats-id'), ['int'])),
('encap_id', (YLeaf(YType.uint32, 'encap-id'), ['int'])),
('mc_group', (YLeaf(YType.uint32, 'mc-group'), ['int'])),
('trap_string', (YLeaf(YType.str, 'trap-string'), ['str'])),
('id', (YLeaf(YType.uint32, 'id'), ['int'])),
('offset', (YLeaf(YType.uint64, 'offset'), ['int'])),
('npu_id', (YLeaf(YType.uint64, 'npu-id'), ['int'])),
('packet_dropped', (YLeaf(YType.uint64, 'packet-dropped'), ['int'])),
('packet_accepted', (YLeaf(YType.uint64, 'packet-accepted'), ['int'])),
])
self.trap_id = None
self.trap_strength = None
self.priority = None
self.trap_id_xr = None
self.gport = None
self.fec_id = None
self.policer_id = None
self.stats_id = None
self.encap_id = None
self.mc_group = None
self.trap_string = None
self.id = None
self.offset = None
self.npu_id = None
self.packet_dropped = None
self.packet_accepted = None
self._segment_path = lambda: "trap-id" + "[trap-id='" + str(self.trap_id) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.TrapIds.TrapId, ['trap_id', u'trap_strength', u'priority', u'trap_id_xr', u'gport', u'fec_id', u'policer_id', u'stats_id', u'encap_id', u'mc_group', u'trap_string', u'id', u'offset', u'npu_id', u'packet_dropped', u'packet_accepted'], name, value)
class InterfaceHandles(Entity):
"""
Voq stats grouped by interface handle
.. attribute:: interface_handle
Voq stats for a particular interface handle
**type**\: list of :py:class:`InterfaceHandle <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_npu_stats_oper.Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.InterfaceHandles.InterfaceHandle>`
"""
_prefix = 'fretta-bcm-dpa-npu-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.InterfaceHandles, self).__init__()
self.yang_name = "interface-handles"
self.yang_parent_name = "display"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("interface-handle", ("interface_handle", Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.InterfaceHandles.InterfaceHandle))])
self._leafs = OrderedDict()
self.interface_handle = YList(self)
self._segment_path = lambda: "interface-handles"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.InterfaceHandles, [], name, value)
class InterfaceHandle(Entity):
"""
Voq stats for a particular interface
handle
.. attribute:: interface_handle (key)
Interface Handle
**type**\: int
**range:** 0..4294967295
.. attribute:: in_use
Flag to indicate if port is in use
**type**\: bool
.. attribute:: rack_num
Rack of port
**type**\: int
**range:** 0..255
.. attribute:: slot_num
Slot of port
**type**\: int
**range:** 0..255
.. attribute:: npu_num
NPU of port
**type**\: int
**range:** 0..255
.. attribute:: npu_core
NPU core of port
**type**\: int
**range:** 0..255
.. attribute:: port_num
Port Number of port
**type**\: int
**range:** 0..255
.. attribute:: if_handle
IfHandle of port
**type**\: int
**range:** 0..4294967295
.. attribute:: sys_port
System port of port
**type**\: int
**range:** 0..4294967295
.. attribute:: pp_port
PP Port number of port
**type**\: int
**range:** 0..4294967295
.. attribute:: port_speed
Port speed of port
**type**\: int
**range:** 0..4294967295
.. attribute:: voq_base
Voq Base number of port
**type**\: int
**range:** 0..4294967295
.. attribute:: connector_id
Connector id of port
**type**\: int
**range:** 0..4294967295
.. attribute:: is_local_port
Flag to indicate if port is local to the node
**type**\: bool
.. attribute:: voq_stat
Keeps a record of the received and dropped packets and bytes on the port
**type**\: list of :py:class:`VoqStat <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_npu_stats_oper.Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.InterfaceHandles.InterfaceHandle.VoqStat>`
"""
_prefix = 'fretta-bcm-dpa-npu-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.InterfaceHandles.InterfaceHandle, self).__init__()
self.yang_name = "interface-handle"
self.yang_parent_name = "interface-handles"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['interface_handle']
self._child_classes = OrderedDict([("voq-stat", ("voq_stat", Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.InterfaceHandles.InterfaceHandle.VoqStat))])
self._leafs = OrderedDict([
('interface_handle', (YLeaf(YType.uint32, 'interface-handle'), ['int'])),
('in_use', (YLeaf(YType.boolean, 'in-use'), ['bool'])),
('rack_num', (YLeaf(YType.uint8, 'rack-num'), ['int'])),
('slot_num', (YLeaf(YType.uint8, 'slot-num'), ['int'])),
('npu_num', (YLeaf(YType.uint8, 'npu-num'), ['int'])),
('npu_core', (YLeaf(YType.uint8, 'npu-core'), ['int'])),
('port_num', (YLeaf(YType.uint8, 'port-num'), ['int'])),
('if_handle', (YLeaf(YType.uint32, 'if-handle'), ['int'])),
('sys_port', (YLeaf(YType.uint32, 'sys-port'), ['int'])),
('pp_port', (YLeaf(YType.uint32, 'pp-port'), ['int'])),
('port_speed', (YLeaf(YType.uint32, 'port-speed'), ['int'])),
('voq_base', (YLeaf(YType.uint32, 'voq-base'), ['int'])),
('connector_id', (YLeaf(YType.uint32, 'connector-id'), ['int'])),
('is_local_port', (YLeaf(YType.boolean, 'is-local-port'), ['bool'])),
])
self.interface_handle = None
self.in_use = None
self.rack_num = None
self.slot_num = None
self.npu_num = None
self.npu_core = None
self.port_num = None
self.if_handle = None
self.sys_port = None
self.pp_port = None
self.port_speed = None
self.voq_base = None
self.connector_id = None
self.is_local_port = None
self.voq_stat = YList(self)
self._segment_path = lambda: "interface-handle" + "[interface-handle='" + str(self.interface_handle) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.InterfaceHandles.InterfaceHandle, ['interface_handle', u'in_use', u'rack_num', u'slot_num', u'npu_num', u'npu_core', u'port_num', u'if_handle', u'sys_port', u'pp_port', u'port_speed', u'voq_base', u'connector_id', u'is_local_port'], name, value)
class VoqStat(Entity):
"""
Keeps a record of the received and dropped
packets and bytes on the port
.. attribute:: received_bytes
Bytes Received on the port
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
.. attribute:: received_packets
Packets Received on the port
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: dropped_bytes
Bytes Dropped on the port
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
.. attribute:: dropped_packets
Packets Dropeed on the port
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'fretta-bcm-dpa-npu-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.InterfaceHandles.InterfaceHandle.VoqStat, self).__init__()
self.yang_name = "voq-stat"
self.yang_parent_name = "interface-handle"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('received_bytes', (YLeaf(YType.uint64, 'received-bytes'), ['int'])),
('received_packets', (YLeaf(YType.uint64, 'received-packets'), ['int'])),
('dropped_bytes', (YLeaf(YType.uint64, 'dropped-bytes'), ['int'])),
('dropped_packets', (YLeaf(YType.uint64, 'dropped-packets'), ['int'])),
])
self.received_bytes = None
self.received_packets = None
self.dropped_bytes = None
self.dropped_packets = None
self._segment_path = lambda: "voq-stat"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.InterfaceHandles.InterfaceHandle.VoqStat, [u'received_bytes', u'received_packets', u'dropped_bytes', u'dropped_packets'], name, value)
class BaseNumbers(Entity):
"""
Voq stats grouped by voq base numbers
.. attribute:: base_number
Voq Base Number for a particular voq
**type**\: list of :py:class:`BaseNumber <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_npu_stats_oper.Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.BaseNumbers.BaseNumber>`
"""
_prefix = 'fretta-bcm-dpa-npu-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.BaseNumbers, self).__init__()
self.yang_name = "base-numbers"
self.yang_parent_name = "display"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("base-number", ("base_number", Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.BaseNumbers.BaseNumber))])
self._leafs = OrderedDict()
self.base_number = YList(self)
self._segment_path = lambda: "base-numbers"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.BaseNumbers, [], name, value)
class BaseNumber(Entity):
"""
Voq Base Number for a particular voq
.. attribute:: base_number (key)
Interface handle
**type**\: int
**range:** 0..4294967295
.. attribute:: in_use
Flag to indicate if port is in use
**type**\: bool
.. attribute:: rack_num
Rack of port
**type**\: int
**range:** 0..255
.. attribute:: slot_num
Slot of port
**type**\: int
**range:** 0..255
.. attribute:: npu_num
NPU of port
**type**\: int
**range:** 0..255
.. attribute:: npu_core
NPU core of port
**type**\: int
**range:** 0..255
.. attribute:: port_num
Port Number of port
**type**\: int
**range:** 0..255
.. attribute:: if_handle
IfHandle of port
**type**\: int
**range:** 0..4294967295
.. attribute:: sys_port
System port of port
**type**\: int
**range:** 0..4294967295
.. attribute:: pp_port
PP Port number of port
**type**\: int
**range:** 0..4294967295
.. attribute:: port_speed
Port speed of port
**type**\: int
**range:** 0..4294967295
.. attribute:: voq_base
Voq Base number of port
**type**\: int
**range:** 0..4294967295
.. attribute:: connector_id
Connector id of port
**type**\: int
**range:** 0..4294967295
.. attribute:: is_local_port
Flag to indicate if port is local to the node
**type**\: bool
.. attribute:: voq_stat
Keeps a record of the received and dropped packets and bytes on the port
**type**\: list of :py:class:`VoqStat <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_npu_stats_oper.Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.BaseNumbers.BaseNumber.VoqStat>`
"""
_prefix = 'fretta-bcm-dpa-npu-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.BaseNumbers.BaseNumber, self).__init__()
self.yang_name = "base-number"
self.yang_parent_name = "base-numbers"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['base_number']
self._child_classes = OrderedDict([("voq-stat", ("voq_stat", Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.BaseNumbers.BaseNumber.VoqStat))])
self._leafs = OrderedDict([
('base_number', (YLeaf(YType.uint32, 'base-number'), ['int'])),
('in_use', (YLeaf(YType.boolean, 'in-use'), ['bool'])),
('rack_num', (YLeaf(YType.uint8, 'rack-num'), ['int'])),
('slot_num', (YLeaf(YType.uint8, 'slot-num'), ['int'])),
('npu_num', (YLeaf(YType.uint8, 'npu-num'), ['int'])),
('npu_core', (YLeaf(YType.uint8, 'npu-core'), ['int'])),
('port_num', (YLeaf(YType.uint8, 'port-num'), ['int'])),
('if_handle', (YLeaf(YType.uint32, 'if-handle'), ['int'])),
('sys_port', (YLeaf(YType.uint32, 'sys-port'), ['int'])),
('pp_port', (YLeaf(YType.uint32, 'pp-port'), ['int'])),
('port_speed', (YLeaf(YType.uint32, 'port-speed'), ['int'])),
('voq_base', (YLeaf(YType.uint32, 'voq-base'), ['int'])),
('connector_id', (YLeaf(YType.uint32, 'connector-id'), ['int'])),
('is_local_port', (YLeaf(YType.boolean, 'is-local-port'), ['bool'])),
])
self.base_number = None
self.in_use = None
self.rack_num = None
self.slot_num = None
self.npu_num = None
self.npu_core = None
self.port_num = None
self.if_handle = None
self.sys_port = None
self.pp_port = None
self.port_speed = None
self.voq_base = None
self.connector_id = None
self.is_local_port = None
self.voq_stat = YList(self)
self._segment_path = lambda: "base-number" + "[base-number='" + str(self.base_number) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.BaseNumbers.BaseNumber, ['base_number', u'in_use', u'rack_num', u'slot_num', u'npu_num', u'npu_core', u'port_num', u'if_handle', u'sys_port', u'pp_port', u'port_speed', u'voq_base', u'connector_id', u'is_local_port'], name, value)
class VoqStat(Entity):
"""
Keeps a record of the received and dropped
packets and bytes on the port
.. attribute:: received_bytes
Bytes Received on the port
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
.. attribute:: received_packets
Packets Received on the port
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: dropped_bytes
Bytes Dropped on the port
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
.. attribute:: dropped_packets
Packets Dropeed on the port
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'fretta-bcm-dpa-npu-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.BaseNumbers.BaseNumber.VoqStat, self).__init__()
self.yang_name = "voq-stat"
self.yang_parent_name = "base-number"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('received_bytes', (YLeaf(YType.uint64, 'received-bytes'), ['int'])),
('received_packets', (YLeaf(YType.uint64, 'received-packets'), ['int'])),
('dropped_bytes', (YLeaf(YType.uint64, 'dropped-bytes'), ['int'])),
('dropped_packets', (YLeaf(YType.uint64, 'dropped-packets'), ['int'])),
])
self.received_bytes = None
self.received_packets = None
self.dropped_bytes = None
self.dropped_packets = None
self._segment_path = lambda: "voq-stat"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dpa.Stats.Nodes.Node.NpuNumbers.NpuNumber.Display.BaseNumbers.BaseNumber.VoqStat, [u'received_bytes', u'received_packets', u'dropped_bytes', u'dropped_packets'], name, value)
def clone_ptr(self):
self._top_entity = Dpa()
return self._top_entity
|
from .lenet import LeNet
from .mnf_feed_forward import MNFFeedForward
from .mnf_lenet import MNFLeNet
|
import os
import inspect
import importlib
import torch
import random
import numpy as np
import pandas as pd
import tensorflow as tf
import easyPheno.model
def get_list_of_implemented_models() -> list:
"""
Create a list of all implemented models based on files existing in 'model' subdirectory of the repository.
"""
# Assumption: naming of python source file is the same as the model name specified by the user
if os.path.exists('../model'):
model_src_files = os.listdir('../model')
elif os.path.exists('model'):
model_src_files = os.listdir('model')
elif os.path.exists('easyPheno/model'):
model_src_files = os.listdir('easyPheno/model')
else:
model_src_files = [model_file + '.py' for model_file in easyPheno.model.__all__]
model_src_files = [file for file in model_src_files if file[0] != '_']
return [model[:-3] for model in model_src_files]
def test_likely_categorical(vector_to_test: list, abs_unique_threshold: int = 20) -> bool:
"""
Test whether a vector is most likely categorical.
Simple heuristics: checking if the number of unique values exceeds a specified threshold
:param vector_to_test: vector that is tested if it is most likely categorical
:param abs_unique_threshold: threshold of unique values' ratio to declare vector categorical
:return: True if the vector is most likely categorical, False otherwise
"""
number_unique_values = np.unique(vector_to_test).shape[0]
return number_unique_values <= abs_unique_threshold
def get_mapping_name_to_class() -> dict:
"""
Get a mapping from model name (naming in package model without .py) to class name.
:return: dictionary with mapping model name to class name
"""
if os.path.exists('../model'):
files = os.listdir('../model')
elif os.path.exists('model'):
files = os.listdir('model')
elif os.path.exists('easyPheno/model'):
files = os.listdir('easyPheno/model')
else:
files = [model_file + '.py' for model_file in easyPheno.model.__all__]
modules_mapped = {}
for file in files:
if file not in ['__init__.py', '__pycache__']:
if file[-3:] != '.py':
continue
file_name = file[:-3]
module_name = 'easyPheno.model.' + file_name
for name, cls in inspect.getmembers(importlib.import_module(module_name), inspect.isclass):
if cls.__module__ == module_name:
modules_mapped[file_name] = cls
return modules_mapped
def set_all_seeds(seed: int = 42):
"""
Set all seeds of libs with a specific function for reproducibility of results
:param seed: seed to use
"""
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
tf.random.set_seed(seed)
def get_subpath_for_datasplit(datasplit: str, datasplit_params: list) -> str:
"""
Construct the subpath according to the datasplit.
Datasplit parameters:
- nested-cv: [n_outerfolds, n_innerfolds]
- cv-test: [n_innerfolds, test_set_size_percentage]
- train-val-test: [val_set_size_percentage, train_set_size_percentage]
:param datasplit: datasplit to retrieve
:param datasplit_params: parameters to use for the specific datasplit
:return: string with the subpath
"""
# construct subpath due to the specified datasplit
if datasplit == 'train-val-test':
datasplit_string = f'({100 - datasplit_params[0]}-{datasplit_params[0]})-{datasplit_params[1]}'
elif datasplit == 'cv-test':
datasplit_string = f'{datasplit_params[0]}-{datasplit_params[1]}'
elif datasplit == 'nested-cv':
datasplit_string = f'{datasplit_params[0]}-{datasplit_params[1]}'
return datasplit_string
def save_model_overview_dict(model_overview: dict, save_path: str):
"""
Structure and save results of a whole optimization run for multiple models in one csv file
:param model_overview: dictionary with results overview
:param save_path: filepath for saving the results overview file
"""
results_overiew = pd.DataFrame()
for model_name, fold_dicts in model_overview.items():
result_dicts = {}
result_dicts_std = {}
runtime_dicts = {}
runtime_dicts_std = {}
for fold_name, fold_info in fold_dicts.items():
for result_name, result_info in fold_info.items():
results_overiew.at[fold_name, model_name + '___' + result_name] = [result_info]
if 'eval_metric' in result_name:
for metric_name, metric_result in result_info.items():
if metric_name not in result_dicts.keys():
result_dicts[metric_name] = []
result_dicts[metric_name].append(metric_result)
if 'runtime' in result_name:
for metric_name, metric_result in result_info.items():
if metric_name not in runtime_dicts.keys():
runtime_dicts[metric_name] = []
runtime_dicts[metric_name].append(metric_result)
for metric_name, results in result_dicts.items():
result_dicts[metric_name] = np.mean(results)
result_dicts_std[metric_name] = np.std(results)
for metric_name, results in runtime_dicts.items():
runtime_dicts[metric_name] = np.mean(results)
runtime_dicts_std[metric_name] = np.std(results)
if 'nested' in save_path:
results_overiew.at['mean_over_all_folds', model_name + '___' + 'eval_metrics'] = [result_dicts]
results_overiew.at['std_over_all_folds', model_name + '___' + 'eval_metrics'] = [result_dicts_std]
results_overiew.at['mean_over_all_folds', model_name + '___' + 'runtime_metrics'] = [runtime_dicts]
results_overiew.at['std_over_all_folds', model_name + '___' + 'runtime_metrics'] = [runtime_dicts_std]
results_overiew.to_csv(save_path)
def sort_models_by_encoding(models_list: list) -> list:
"""
Sort models by the encoding that will be used
:param models_list: unsorted list of models
:return: list of models sorted by encoding
"""
encodings = [get_mapping_name_to_class()[model_name].standard_encoding for model_name in models_list]
sorted_models_list = [el[0] for el in sorted(zip(models_list, encodings), key=lambda x: x[1])]
return sorted_models_list
|
import itertools
def containsAny(seq, aset):
for item in itertools.ifilter(aset.__contains__, seq):
return True
return False
|
from im2mesh.onet.models import decoder
from im2mesh.utils import visualize as vis
from im2mesh.training import BaseTrainer
import torch
from torch import nn
from torch.nn import functional as F
from torch import distributions as dist
from torch.optim import Adam
from im2mesh.common import (
compute_iou, make_3d_grid
)
from im2mesh.utils import visualize as vis
from im2mesh.training import BaseTrainer
import os
def generate_n_points(voxels, n, bounds):
"""
Generate n random points within bounds, with voxels interpreted as pixels
Args:
voxels: The voxels
n: How many points to generate
bounds: (x_min, x_max, y_min, y_max, z_min, z_max) interpretation of the bounds for the voxels
Returns:
(points, points_occ)
"""
x_min, x_max, y_min, y_max, z_min, z_max = bounds
xs = torch.rand(n)
ys = torch.rand(n)
zs = torch.rand(n)
points = torch.stack((xs * (x_max - x_min) + x_min,
ys * (y_max - y_min) + y_min,
zs * (z_max - z_min) + z_min), 1)
x_dim, y_dim, z_dim = voxels.shape
indices = (torch.floor(xs * x_dim).long(), torch.floor(ys * y_dim).long(), torch.floor(zs * z_dim).long())
points_occ = voxels[indices] > 0.5
return points, points_occ
class DecoderOnlyModule(nn.Module):
def __init__(self, decoder, device=None):
super().__init__()
self.init_z = torch.empty(decoder.z_dim).to(device)
self.decoder = decoder
nn.init.normal_(self.init_z) # Initial as N(0, 1)
def forward(self, input):
"""
Args:
input: The input (a batch of 3d points)
Returns:
The output
"""
# print("Shape", input.shape)
return self.decoder(input, self.init_z)
class DecoderOnlyTrainer(BaseTrainer):
def __init__(self, model, device=None, vis_dir=None):
super().__init__()
self.model = model.to(device)
self.device = device
self.optimizer = Adam(model.parameters())
self.vis_dir = vis_dir
self.probs = nn.Sigmoid()
self.threshold = 0.5
def compute_loss(self, points, points_occ):
logits = self.model(points)
loss = F.binary_cross_entropy_with_logits(logits, points_occ, reduction='mean')
return loss
def train_step(self, voxels, points=None, points_occ=None, n_points=None):
if points is None:
if n_points is None:
raise ValueError("Either n_points or points should be specified")
points, points_occ = generate_n_points(voxels, n_points, (-0.5, 0.5, -0.5, 0.5, -0.5, 0.5))
points = points.to(self.device)
points_occ = points_occ.float().to(self.device)
self.model.train()
self.optimizer.zero_grad()
loss = self.compute_loss(points, points_occ)
loss.backward()
self.optimizer.step()
return loss
def eval_step(self, points_input, points_occ):
with torch.no_grad():
points = points_input.to(self.device)
points_occ = points_occ.to(self.device)
self.model.eval()
eval_dict = {}
# for cross entropy loss validation
logits = self.model(points)
eval_dict['cross_entropy'] = F.binary_cross_entropy_with_logits(logits, points_occ, reduction='mean')
# for iou validation
m = nn.Sigmoid()
predicted_occ = m(logits)
pred_occ_np = (predicted_occ >= self.threshold).cpu().numpy()
orig_occ_np = (points_occ >= self.threshold).cpu().numpy()
iou = compute_iou(pred_occ_np, orig_occ_np).mean()
eval_dict['iou'] = iou
return eval_dict
def visualize_decoder(self, it, loss, sub_dir=0, best=False):
''' Performs a visualization step for the data.
Args:
data (dict): data dictionary
'''
device = self.device
shape = (128, 128, 128)
p = make_3d_grid([-0.5] * 3, [0.5] * 3, shape).to(device)
with torch.no_grad():
p_r = self.model(p)
occ_hat = self.probs(p_r).view(1, *shape)
voxels_out = (occ_hat >= self.threshold).cpu()
vis_dir = os.path.join(self.vis_dir, '{:06}'.format(sub_dir))
vis.visualize_voxels_new(
voxels_out, 'best' if best else 'it{:06d}_{:.3f}'.format(it, loss), vis_dir)
|
"""Decorator to disable runtime type-checking for a function."""
from typing import TYPE_CHECKING
from typing import TypeVar
if TYPE_CHECKING: # pragma: no cover
F = TypeVar("F")
def typeguard_ignore(f: F) -> F:
"""This decorator is a noop during static type-checking."""
return f
else:
from typing import no_type_check as typeguard_ignore
__all__ = ["typeguard_ignore"]
|
import os
import pytest
from falcon import testing
path = os.path.abspath(__file__)
dir_path = os.path.dirname(os.path.dirname(path))
os.chdir(dir_path)
from api import api # noqa: E402
from lib.token import create_token # noqa: E402
@pytest.fixture()
def client():
return testing.TestClient(api())
def test_exec_env_get(client):
result = client.simulate_get('/exec-env',
headers={'Authorization': create_token()})
assert(result.status_code == 200)
|
from rest_framework import routers
from app import views
router = routers.DefaultRouter()
router.register(r'day-stats', views.DayStatsViewset, basename='day-stats')
|
from openpnm.algorithms import TransientReactiveTransport, AdvectionDiffusion
from openpnm.utils import logging, Docorator, GenericSettings
docstr = Docorator()
logger = logging.getLogger(__name__)
@docstr.dedent
class TransientAdvectionDiffusionSettings(GenericSettings):
r"""
Parameters
----------
%(AdvectionDiffusionSettings.parameters)s
Other Parameters
----------------
%(AdvectionDiffusionSettings.other_parameters)s
----
**The following parameters pertain to the TransientReactiveTransport class**
%(TransientReactiveTransportSettings.other_parameters)s
----
**The following parameters pertain to the ReactiveTransport class**
%(ReactiveTransportSettings.other_parameters)s
----
**The following parameters pertain to the GenericTransport class**
%(GenericTransportSettings.other_parameters)s
"""
class TransientAdvectionDiffusion(TransientReactiveTransport,
AdvectionDiffusion):
r"""
A subclass of GenericTransport to perform steady and transient simulations
of pure diffusion and advection-diffusion problems.
"""
def __init__(self, settings={}, phase=None, **kwargs):
super().__init__(**kwargs)
self.settings._update_settings_and_docs(TransientAdvectionDiffusionSettings())
self.settings.update(settings)
if phase is not None:
self.settings['phase'] = phase.name
|
"""Example of Timer and Compare APIs:
$ python -m examples.sparse.compare
"""
import pickle
import sys
import time
import torch
import torch.utils.benchmark as benchmark_utils
class FauxTorch(object):
"""Emulate different versions of pytorch.
In normal circumstances this would be done with multiple processes
writing serialized measurements, but this simplifies that model to
make the example clearer.
"""
def __init__(self, real_torch, extra_ns_per_element):
self._real_torch = real_torch
self._extra_ns_per_element = extra_ns_per_element
@property
def sparse(self):
return self.Sparse(self._real_torch, self._extra_ns_per_element)
class Sparse:
def __init__(self, real_torch, extra_ns_per_element):
self._real_torch = real_torch
self._extra_ns_per_element = extra_ns_per_element
def extra_overhead(self, result):
# time.sleep has a ~65 us overhead, so only fake a
# per-element overhead if numel is large enough.
size = sum(result.size())
if size > 5000:
time.sleep(size * self._extra_ns_per_element * 1e-9)
return result
def mm(self, *args, **kwargs):
return self.extra_overhead(self._real_torch.sparse.mm(*args, **kwargs))
def generate_coo_data(size, sparse_dim, nnz, dtype, device):
"""
Parameters
----------
size : tuple
sparse_dim : int
nnz : int
dtype : torch.dtype
device : str
Returns
-------
indices : torch.tensor
values : torch.tensor
"""
if dtype is None:
dtype = 'float32'
indices = torch.rand(sparse_dim, nnz, device=device)
indices.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(indices))
indices = indices.to(torch.long)
values = torch.rand([nnz, ], dtype=dtype, device=device)
return indices, values
def gen_sparse(size, density, dtype, device='cpu'):
sparse_dim = len(size)
nnz = int(size[0] * size[1] * density)
indices, values = generate_coo_data(size, sparse_dim, nnz, dtype, device)
return torch.sparse_coo_tensor(indices, values, size, dtype=dtype, device=device)
def main():
tasks = [
("matmul", "x @ y", "torch.sparse.mm(x, y)"),
("matmul", "x @ y + 0", "torch.sparse.mm(x, y) + zero"),
]
serialized_results = []
repeats = 2
timers = [
benchmark_utils.Timer(
stmt=stmt,
globals={
"torch": torch if branch == "master" else FauxTorch(torch, overhead_ns),
"x": gen_sparse(size=size, density=density, dtype=torch.float32),
"y": torch.rand(size, dtype=torch.float32),
"zero": torch.zeros(()),
},
label=label,
sub_label=sub_label,
description=f"size: {size}",
env=branch,
num_threads=num_threads,
)
for branch, overhead_ns in [("master", None), ("my_branch", 1), ("severe_regression", 10)]
for label, sub_label, stmt in tasks
for density in [0.05, 0.1]
for size in [(8, 8), (32, 32), (64, 64), (128, 128)]
for num_threads in [1, 4]
]
for i, timer in enumerate(timers * repeats):
serialized_results.append(pickle.dumps(
timer.blocked_autorange(min_run_time=0.05)
))
print(f"\r{i + 1} / {len(timers) * repeats}", end="")
sys.stdout.flush()
print()
comparison = benchmark_utils.Compare([
pickle.loads(i) for i in serialized_results
])
print("== Unformatted " + "=" * 80 + "\n" + "/" * 95 + "\n")
comparison.print()
print("== Formatted " + "=" * 80 + "\n" + "/" * 93 + "\n")
comparison.trim_significant_figures()
comparison.colorize()
comparison.print()
if __name__ == "__main__":
main()
|
#!/usr/bin/python3
import os.path
import sys
import time
import torch as tr
import numpy as np
import random as rn
import torch.utils.data as dt
from sklearn.metrics import precision_recall_curve, auc
from src.fold_dataset import FoldDataset
from src.model import mirDNN
from src.parameters import ParameterParser
from src.sampler import ImbalancedDatasetSampler
from src.logger import Logger
def main(argv):
pp = ParameterParser(argv)
if not pp.random_seed is None:
rn.seed(pp.random_seed)
np.random.seed(pp.random_seed)
tr.manual_seed(pp.random_seed)
if pp.device.type == 'cuda':
if not pp.random_seed is None:
tr.backends.cudnn.deterministic = True
tr.backends.cudnn.benchmark = False
else:
tr.backends.cudnn.deterministic = False
tr.backends.cudnn.benchmark = True
dataset = FoldDataset(pp.input_files, pp.seq_len)
valid_size = int(pp.valid_prop * len(dataset))
train, valid = dt.random_split(dataset, (len(dataset)-valid_size, valid_size))
train_loader = None
if pp.upsample:
train_sampler = ImbalancedDatasetSampler(train,
max_imbalance = 1.0,
num_samples = 8 * pp.batch_size)
train_loader = dt.DataLoader(train,
batch_size=pp.batch_size,
shuffle=True,
sampler=train_sampler,
pin_memory=True)
else:
train_loader = dt.DataLoader(train,
batch_size=pp.batch_size,
shuffle=True,
pin_memory=True)
valid_loader = dt.DataLoader(valid,
batch_size=pp.batch_size,
pin_memory=True)
model = mirDNN(pp)
model.train()
log = Logger(pp.logfile)
if not pp.model_file is None and os.path.isfile(pp.model_file):
model.load(pp.model_file)
log.write('epoch\ttrainLoss\tvalidAUC\tlast_imp\n')
epoch = 0
train_loss = 100
valid_auc = 0
best_valid_auc = 0
last_improvement = 0
while last_improvement < pp.early_stop:
nbatch = 0
for x, v, y in train_loader:
new_loss = model.train_step(x, v, y)
train_loss = 0.99 * train_loss + 0.01 * new_loss
nbatch += 1
if nbatch >= 1000: continue
preds, labels = tr.Tensor([]), tr.Tensor([])
for x, v, y in valid_loader:
z = model(x, v).cpu().detach()
preds = tr.cat([preds, z.squeeze()])
labels = tr.cat([labels, y.squeeze()])
pr, rc, _ = precision_recall_curve(labels, preds)
valid_auc = 10 * auc(rc, pr) + 0.9 * valid_auc
last_improvement += 1
if valid_auc > best_valid_auc:
best_valid_auc = valid_auc
last_improvement = 0
model.save(pp.model_file)
log.write('%d\t%.4f\t%.4f\t%d\n' %
(epoch, train_loss, valid_auc, last_improvement))
epoch += 1
log.close()
if __name__ == "__main__":
main(sys.argv[1:])
|
import json
data = input()
consulta = input().replace(" ", '').upper()
dictt = json.loads(data)
consEx = ""
contador = 0
for i in consulta:
if i in dictt:
consEx += i + " "
contador += dictt.get(i)
print(contador)
print(consEx) |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-05-29 18:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Sharing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False)),
('public', models.BooleanField(default=True)),
('sharing_id', models.PositiveIntegerField()),
('sharing_type', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='contenttypes.ContentType')),
],
),
]
|
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.websocket
import tornado.gen
from tornado.options import define, options
import multiprocessing
import serialworker
import json
define("port", default=8080, help="run on the given port", type=int)
clients = []
input_queue = multiprocessing.Queue()
output_queue = multiprocessing.Queue()
class WebSocketHandler(tornado.websocket.WebSocketHandler):
def open(self):
print 'new connection'
clients.append(self)
self.write_message("connected")
sp.reset()
def on_message(self, message):
print 'tornado received from client: %s' % json.dumps(message)
input_queue.put(message)
def on_close(self):
print 'connection closed'
clients.remove(self)
sp.close()
# check the queue for pending messages, and rely that to all connected clients
def checkQueue():
if not output_queue.empty():
message = output_queue.get()
for c in clients:
c.write_message(message)
if __name__ == '__main__':
# start the serial worker in background (as a deamon)
sp = serialworker.SerialProcess(input_queue, output_queue)
sp.daemon = True
sp.start()
tornado.options.parse_command_line()
app = tornado.web.Application(
handlers=[
(r"/", WebSocketHandler)
]
)
httpServer = tornado.httpserver.HTTPServer(app)
httpServer.listen(options.port)
print "Listening on port:", options.port
mainLoop = tornado.ioloop.IOLoop.instance()
# adjust the scheduler_interval according to the frames sent by the serial port
scheduler_interval = 100
scheduler = tornado.ioloop.PeriodicCallback(checkQueue, scheduler_interval)
scheduler.start()
mainLoop.start()
|
from tuyaha.devices.base import TuyaDevice
# The minimum brightness value set in the API that does not turn off the light
MIN_BRIGHTNESS = 10.3
# the default range used to return brightness
BRIGHTNESS_STD_RANGE = (1, 255)
# the default range used to set color temperature
COLTEMP_SET_RANGE = (1000, 10000)
# the default range used to return color temperature (in kelvin)
COLTEMP_KELV_RANGE = (2700, 6500)
class TuyaLight(TuyaDevice):
def __init__(self, data, api):
super().__init__(data, api)
self._support_color = False
self.brightness_white_range = BRIGHTNESS_STD_RANGE
self.brightness_color_range = BRIGHTNESS_STD_RANGE
self.color_temp_range = COLTEMP_SET_RANGE
# if color support is not reported by API can be forced by this method
# the attribute _support_color is used by method support_color()
def force_support_color(self):
self._support_color = True
def _color_mode(self):
work_mode = self.data.get("color_mode", "white")
return True if work_mode == "colour" else False
@staticmethod
def _scale(val, src, dst):
"""Scale the given value from the scale of src to the scale of dst."""
if val < 0:
return dst[0]
return ((val - src[0]) / (src[1] - src[0])) * (dst[1] - dst[0]) + dst[0]
def brightness(self):
"""Return the brightness based on the light status scaled to standard range"""
brightness = -1
if self._color_mode():
if "color" in self.data:
brightness = int(self.data.get("color").get("brightness", "-1"))
else:
brightness = int(self.data.get("brightness", "-1"))
# returned value is scaled using standard range
ret_val = TuyaLight._scale(
brightness,
self._brightness_range(),
BRIGHTNESS_STD_RANGE,
)
return round(ret_val)
def _set_brightness(self, brightness):
if self._color_mode():
data = self.data.get("color", {})
data["brightness"] = brightness
self._update_data("color", data, force_val=True)
else:
self._update_data("brightness", brightness)
def _brightness_range(self):
"""return the configured brightness range based on the light status"""
if self._color_mode():
return self.brightness_color_range
else:
return self.brightness_white_range
def support_color(self):
"""return if the light support color"""
if not self._support_color:
if self.data.get("color") or self.data.get("color_mode") == "colour":
self._support_color = True
return self._support_color
def support_color_temp(self):
"""return if the light support color temperature"""
return self.data.get("color_temp") is not None
def hs_color(self):
"""return current hs color"""
if self.support_color():
color = self.data.get("color")
if self._color_mode() and color:
return color.get("hue", 0.0), float(color.get("saturation", 0.0)) * 100
else:
return 0.0, 0.0
else:
return None
def color_temp(self):
"""return current color temperature scaled with standard kelvin range"""
temp = self.data.get("color_temp")
# convert color temperature to kelvin scale for returned value
ret_value = TuyaLight._scale(
temp,
self.color_temp_range,
COLTEMP_KELV_RANGE,
)
return round(ret_value)
def min_color_temp(self):
return COLTEMP_KELV_RANGE[1]
def max_color_temp(self):
return COLTEMP_KELV_RANGE[0]
def turn_on(self):
if self._control_device("turnOnOff", {"value": "1"}):
self._update_data("state", "true")
def turn_off(self):
if self._control_device("turnOnOff", {"value": "0"}):
self._update_data("state", "false")
def set_brightness(self, brightness):
"""Set the brightness(0-255) of light."""
if int(brightness) > 0:
# convert to scale 0-100 with MIN_BRIGHTNESS to set the value
set_value = TuyaLight._scale(
brightness,
BRIGHTNESS_STD_RANGE,
(MIN_BRIGHTNESS, 100),
)
if self._control_device("brightnessSet", {"value": round(set_value, 1)}):
self._update_data("state", "true")
# convert to scale configured for brightness range to update the cache
value = TuyaLight._scale(
brightness,
BRIGHTNESS_STD_RANGE,
self._brightness_range(),
)
self._set_brightness(round(value))
else:
self.turn_off()
def set_color(self, color):
"""Set the color of light."""
cur_brightness = self.data.get("color", {}).get(
"brightness", self.brightness_color_range[0]
)
hsv_color = {
"hue": color[0] if color[1] != 0 else 0, # color white
"saturation": color[1] / 100,
}
if len(color) < 3:
hsv_color["brightness"] = cur_brightness
else:
hsv_color["brightness"] = color[2]
# color white
white_mode = hsv_color["saturation"] == 0
is_color = self._color_mode()
if self._control_device("colorSet", {"color": hsv_color}):
self._update_data("state", "true")
self._update_data("color", hsv_color, force_val=True)
if not is_color and not white_mode:
self._update_data("color_mode", "colour")
elif is_color and white_mode:
self._update_data("color_mode", "white")
def set_color_temp(self, color_temp):
"""Set the color temperature of light."""
# convert to scale configured for color temperature to update the value
set_value = TuyaLight._scale(
color_temp,
COLTEMP_KELV_RANGE,
COLTEMP_SET_RANGE,
)
if self._control_device("colorTemperatureSet", {"value": round(set_value)}):
self._update_data("state", "true")
self._update_data("color_mode", "white")
# convert to scale configured for color temperature to update the cache
data_value = TuyaLight._scale(
color_temp,
COLTEMP_KELV_RANGE,
self.color_temp_range,
)
self._update_data("color_temp", round(data_value))
|
# PurePNG setup.py
# This is the setup.py script used by distutils.
# You can install the png module into your Python distribution with:
# python setup.py install
# You can also do other standard distutil type things, but you can refer
# to the distutil documentation for that.
# This script is also imported as a module by the Sphinx conf.py script
# in the man directory, so that this file forms a single source for
# metadata.
import sys
import os
import logging
from os.path import dirname, join
try:
# http://peak.telecommunity.com/DevCenter/setuptools#basic-use
from setuptools import setup
except ImportError:
# http://docs.python.org/release/2.4.4/dist/setup-script.html
from distutils.core import setup
try:
from Cython.Build import cythonize
except ImportError:
cythonize = False # just to be sure
from distutils.command.build_ext import build_ext as build_ext_orig
from distutils.errors import DistutilsError, CCompilerError, CompileError
import distutils
class build_ext_opt(build_ext_orig):
"""
This is a version of the build_ext command that allow to fail build.
As there is no reqired extension(only acceleration) with failed
build_ext package still be usable.
With `force` option this behavior disabled.
"""
command_name = 'build_ext'
def build_extension(self, ext):
try:
build_ext_orig.build_extension(self, ext)
except (CCompilerError, DistutilsError, CompileError,
Exception):
e = sys.exc_info()[1]
if self.force:
raise
logging.warn('building optional extension "%s" failed: %s' %
(ext.name, e))
distutils.command.build_ext.build_ext = build_ext_opt
try:
def do_unimport(folder=''):
"""Do extraction of filters etc. into target folder"""
src = open(join(folder, 'png.py'))
try:
os.remove(join(folder, 'pngfilters.py'))
except:
pass
new = open(join(folder, 'pngfilters.py'), 'w')
# Fixed part
# Cython directives
new.write('#cython: boundscheck=False\n')
new.write('#cython: wraparound=False\n')
go = False
for line in src:
if line.startswith('class') and\
(line.startswith('class BaseFilter')):
go = True
elif not (line.startswith(' ') or line.strip() == ''):
go = False
if go:
new.write(line)
new.close()
return join(folder, 'pngfilters.py')
except BaseException: # Whatever happens we could work without unimport
cythonize = False # at cost of disabled cythonize
def get_version():
for line in open(join(dirname(__file__), 'png', 'png.py')):
if '__version__' in line:
version = line.split('"')[1]
break
return version
conf = dict(
name='purepng',
version=get_version(),
description='Pure Python PNG image encoder/decoder',
long_description="""
PurePNG allows PNG image files to be read and written using pure Python.
PurePNG can read and write all PNG formats.
PNG supports a generous variety of image formats: RGB or greyscale, with or
without an alpha channel; and a choice of bit depths from 1, 2 or 4
(as long as you want greyscale or a pallete),
8, and 16 (but 16 bits is not allowed for palettes).
A pixel can vary in size from 1 to 64 bits: 1/2/4/8/16/24/32/48/64.
In addition a PNG file can be interlaced or not.
An interlaced file allows an incrementally refined display of images being
downloaded over slow links (yet it`s not implemented in PurePNG for now).
PurePNG is written in pure Python(that`s why it`s called Pure).
""",
author='Pavel Zlatovratskii',
author_email='scondo@mail.ru',
url='https://github.com/scondo/purepng',
package_dir={'png': 'png'},
packages=['png'],
classifiers=[
'Topic :: Multimedia :: Graphics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
'Programming Language :: Python :: 2.3',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: Jython',
'Programming Language :: Python :: Implementation :: PyPy',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Development Status :: 4 - Beta',
],
license='MIT License',
keywords=['png', 'Imaging'],
zip_safe=True,
install_requires=['argparse'],
tests_require=['argparse']
)
if __name__ == '__main__':
if '--no-cython' in sys.argv:
cythonize = False
sys.argv.remove('--no-cython')
# Crude but simple check to disable cython when it's not needed
if '--help' in sys.argv[1:]:
cythonize = False
commands = [it for it in sys.argv[1:] if not it.startswith('-')]
no_c_need = ('check', 'upload', 'register', 'upload_docs', 'build_sphinx',
'saveopts', 'setopt', 'clean', 'develop', 'install_egg_info',
'egg_info', 'alias', )
if not bool([it for it in commands if it not in no_c_need]):
cythonize = False
pre_cythonized = join(conf['package_dir']['png'], 'pngfilters.c')
if cythonize:
cyth_ext = do_unimport(conf['package_dir']['png'])
conf['ext_modules'] = cythonize(cyth_ext)
os.remove(cyth_ext)
elif os.access(pre_cythonized, os.F_OK):
from distutils.extension import Extension
conf['ext_modules'] = [Extension('pngfilters',
[pre_cythonized])]
# cythonized filters clean
if 'clean' in sys.argv:
if os.access(pre_cythonized, os.F_OK):
os.remove(pre_cythonized)
setup(**conf)
|
# parse log files and generate an excel file
import re
import sys, getopt
import pandas as pd
import xlsxwriter
rx_dict = {
'File': re.compile(r'File: (?P<file>.*) , Top Module: (?P<top_module>.*)'),
'Faults': re.compile(r'Found (?P<fault_sites>.*) fault sites in (?P<gates>.*) gates and (?P<ports>.*) ports.'),
'Time': re.compile(r'Time elapsed: (?P<time>.*)s.'),
'Coverage': re.compile(r'Simulations concluded: Coverage (?P<coverage>.*)%'),
'Iteration': re.compile(r'\((?P<current_coverage>.*)%/(?P<min_coverage>.*)%,\) incrementing to (?P<tv_count>.*).'),
}
def main(argv):
log_file, output_file = parse_args(argv)
data = pd.DataFrame(columns=["File", "Top Module", "Fault Sites", "Gate Count", "Ports", "Run Time", "TV Count", "Coverage"])
benchmark = pd.DataFrame(columns=["Current Coverage", "Minimum Coverage", "TV Count"])
sheets = {}
row = {}
iteration = {}
with open(log_file, 'r') as file_object:
line = file_object.readline()
while line:
# at each line check for a match with a regex
key, match = _parse_line(line)
if key == "File":
if row:
tv_count = -1 # indicates coverage is met with minimum set tv count; no iterations took place
if not benchmark.empty: # if coverage is not met with the minimum tv count
sheets[row["File"]] = benchmark
tv_count = benchmark.iloc[-1]["TV Count"]
benchmark = pd.DataFrame(columns=["Current Coverage", "Minimum Coverage", "TV Count"])
row["TV Count"] = tv_count
data = data.append(row, ignore_index=True)
row = {}
row["File"] = match.group(1)
row["Top Module"] = match.group(2)
if key == "Faults":
row["Fault Sites"] = match.group(1)
row["Gate Count"] = match.group(2)
row["Ports"] = match.group(3)
if key == "Time":
row["Run Time"] = match.group(1)
if key == "Coverage":
row["Coverage"] = match.group(1)
if key == "Iteration":
iteration["Current Coverage"] = match.group(1)
iteration["Minimum Coverage"] = match.group(2)
iteration["TV Count"] = match.group(3)
benchmark = benchmark.append(iteration, ignore_index=True)
line = file_object.readline()
# write to output excel file
with pd.ExcelWriter(output_file, engine="openpyxl") as writer:
data.to_excel(writer, sheet_name="Benchmarks")
for file_name, sheet in sheets.items():
sheet.to_excel(writer, sheet_name=file_name)
def _parse_line(line):
for key, rx in rx_dict.items():
match = rx.search(line)
if match:
return key, match
return None, None
def parse_args(argv):
output_file = "logs.xlsx"
log_file = ""
try:
opts, _ = getopt.getopt(argv, "h:o:f:")
except getopt.GetoptError:
print_usage()
sys.exit(2)
for opt, arg in opts:
if opt == "-h":
print_usage()
sys.exit()
elif opt == "-o":
output_file = arg
elif opt == "-f":
log_file = arg
return log_file, output_file
def print_usage():
print ("parser.py -f <logFile> -o <outputfile>")
if __name__ == "__main__":
main(sys.argv[1:]) |
"""Common configure functions for interface"""
# Python
import logging
# Unicon
from unicon.core.errors import SubCommandFailure
# Steps
from pyats.aetest.steps import Steps
log = logging.getLogger(__name__)
def configure_policy_map(device,
policy_name,
class_map_list
):
""" Configures policy_map
Args:
device ('obj'): device to use
policy_name('str) : name of the policy name
class_map_list('list'): list of data type hold number class map information
[
{
class_map_name('str') : name of the class
policer_val('int',optional): police rate value,
match_mode('list',optional): match mode name for cos,
matched_value('list',optional): match mode values for cos traffic_class and dscp,
table_map_name('str',optional): to set the table name for policy_map,
table_map_mode('str',optional : name of the tablemode
} ]
example:
class_map_list=[{'class_map_name':'test1',
'policer_val':2000000000,
'match_mode':['dscp','cos']
'matched_value':['cs1','5']
'table_map_name':'table1'
'table_map_mode':'dscp'}]
Returns:
None
Raises:
SubCommandFailure
"""
log.debug(
"Configuring policy_map {policy_name} with {class_map_name} ".format(
policy_name=policy_name,
class_map_name=class_map_list[0]['class_map_name'],
)
)
cmd = [f"policy-map {policy_name}"]
for class_map in class_map_list:
cmd.append(f"class {class_map['class_map_name']}")
if 'policer_val' in class_map:
cmd.append(f"police rate {class_map['policer_val']}")
if class_map.get('match_mode', None) and class_map.get('matched_value', None):
for match_mode, matched_value in zip(class_map['match_mode'], class_map['matched_value']):
cmd.append(f"set {match_mode} {matched_value}")
if 'table_map_name' in class_map:
cmd.append(f"set {class_map['table_map_mode']} {class_map['table_map_mode']} table {class_map['table_map_name']}")
try:
device.configure(cmd)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not configure class_map. Error:\n{error}".format(
error=e
)
)
def unconfigure_policy_map(device, policy_name):
""" Unconfigures policy-map
Args:
device ('obj'): device to use
policy_name ('str'): name of the class
Returns:
None
Raises:
SubCommandFailure
"""
log.debug(
"Unconfiguring class_map {policy_name}".format(
policy_name=policy_name,
)
)
cmd = f"no policy-map {policy_name}"
try:
device.configure(cmd)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not configure class_map. Error:\n{error}".format(
error=e
)
)
|
import vcf
import re
import sys
if (len(sys.argv)<2):
sys.exit("[USAGE] python correct_ambiguous_bases.py in.vcf.gz out.vcf")
infile=sys.argv[1]
outfile=sys.argv[2]
vcf_reader = vcf.Reader(filename=infile)
vcf_writer = vcf.Writer(open(outfile, 'w'), vcf_reader)
#number of ambiguous bases in the REF/ALT column
no_ref=0
no_alt=0
for record in vcf_reader:
ref=record.REF
if re.search(r"[^ATGC.,]", ref):
ref=re.sub('[^ACGT.]','N',ref)
no_ref=no_ref+ref.count("N")
alt=record.ALT
new_alt=[]
for i in alt:
i=str(i)
if re.search(r"[^ATGC.,]", i):
i=re.sub('[^ACGT.]','N',i)
no_alt=no_alt+i.count("N")
new_alt.append(i)
record.REF=ref
record.ALT=new_alt
vcf_writer.write_record(record)
print("Number of ambiguous bases in the REF column:{0}".format(no_ref))
print("Number of ambiguous bases in the ALT column:{0}".format(no_alt))
|
import json
import os
import time
from typing import cast, Dict, Optional
import boto3
from sebs.cache import Cache
from sebs.faas.config import Config, Credentials, Resources
from sebs.aws.function import LambdaFunction
from sebs.utils import LoggingHandlers
class AWSCredentials(Credentials):
_access_key: str
_secret_key: str
def __init__(self, access_key: str, secret_key: str):
super().__init__()
self._access_key = access_key
self._secret_key = secret_key
@staticmethod
def typename() -> str:
return "AWS.Credentials"
@property
def access_key(self) -> str:
return self._access_key
@property
def secret_key(self) -> str:
return self._secret_key
@staticmethod
def initialize(dct: dict) -> Credentials:
return AWSCredentials(dct["access_key"], dct["secret_key"])
@staticmethod
def deserialize(config: dict, cache: Cache, handlers: LoggingHandlers) -> Credentials:
# FIXME: update return types of both functions to avoid cast
# needs 3.7+ to support annotations
cached_config = cache.get_config("aws")
ret: AWSCredentials
# Load cached values
if cached_config and "credentials" in cached_config:
ret = cast(AWSCredentials, AWSCredentials.initialize(cached_config["credentials"]))
ret.logging_handlers = handlers
ret.logging.info("Using cached credentials for AWS")
else:
# Check for new config
if "credentials" in config:
ret = cast(AWSCredentials, AWSCredentials.initialize(config["credentials"]))
elif "AWS_ACCESS_KEY_ID" in os.environ:
ret = AWSCredentials(
os.environ["AWS_ACCESS_KEY_ID"], os.environ["AWS_SECRET_ACCESS_KEY"]
)
else:
raise RuntimeError(
"AWS login credentials are missing! Please set "
"up environmental variables AWS_ACCESS_KEY_ID and "
"AWS_SECRET_ACCESS_KEY"
)
ret.logging.info("No cached credentials for AWS found, initialize!")
ret.logging_handlers = handlers
return ret
def update_cache(self, cache: Cache):
cache.update_config(val=self.access_key, keys=["aws", "credentials", "access_key"])
cache.update_config(val=self.secret_key, keys=["aws", "credentials", "secret_key"])
def serialize(self) -> dict:
out = {"access_key": self.access_key, "secret_key": self.secret_key}
return out
class AWSResources(Resources):
class HTTPApi:
def __init__(self, arn: str, endpoint: str):
self._arn = arn
self._endpoint = endpoint
@property
def arn(self) -> str:
return self._arn
@property
def endpoint(self) -> str:
return self._endpoint
@staticmethod
def deserialize(dct: dict) -> "AWSResources.HTTPApi":
return AWSResources.HTTPApi(dct["arn"], dct["endpoint"])
def serialize(self) -> dict:
out = {"arn": self.arn, "endpoint": self.endpoint}
return out
def __init__(self, lambda_role: str):
super().__init__()
self._lambda_role = lambda_role
self._http_apis: Dict[str, AWSResources.HTTPApi] = {}
self._region: Optional[str] = None
@staticmethod
def typename() -> str:
return "AWS.Resources"
def set_region(self, region: str):
self._region = region
def lambda_role(self, boto3_session: boto3.session.Session) -> str:
if not self._lambda_role:
iam_client = boto3_session.client(service_name="iam")
trust_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {"Service": "lambda.amazonaws.com"},
"Action": "sts:AssumeRole",
}
],
}
role_name = "sebs-lambda-role"
attached_policies = [
"arn:aws:iam::aws:policy/AmazonS3FullAccess",
"arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole",
]
try:
out = iam_client.get_role(RoleName=role_name)
self._lambda_role = out["Role"]["Arn"]
self.logging.info(f"AWS: Selected {self._lambda_role} IAM role")
except iam_client.exceptions.NoSuchEntityException:
out = iam_client.create_role(
RoleName=role_name,
AssumeRolePolicyDocument=json.dumps(trust_policy),
)
self._lambda_role = out["Role"]["Arn"]
self.logging.info(
f"AWS: Created {self._lambda_role} IAM role. "
"Sleep 10 seconds to avoid problems when using role immediately."
)
time.sleep(10)
# Attach basic AWS Lambda and S3 policies.
for policy in attached_policies:
iam_client.attach_role_policy(RoleName=role_name, PolicyArn=policy)
return self._lambda_role
def http_api(
self, api_name: str, func: LambdaFunction, boto3_session: boto3.session.Session
) -> "AWSResources.HTTPApi":
http_api = self._http_apis.get(api_name)
if not http_api:
# get apigateway client
api_client = boto3_session.client(
service_name="apigatewayv2", region_name=cast(str, self._region)
)
# check existing apis
api_data = None
for api in api_client.get_apis()["Items"]:
if api["Name"] == api_name:
self.logging.info(f"Using existing HTTP API {api_name}")
api_data = api
break
if not api_data:
self.logging.info(f"Creating HTTP API {api_name}")
api_data = api_client.create_api( # type: ignore
Name=api_name, ProtocolType="HTTP", Target=func.arn
)
api_id = api_data["ApiId"] # type: ignore
endpoint = api_data["ApiEndpoint"] # type: ignore
# function's arn format is: arn:aws:{region}:{account-id}:{func}
# easier than querying AWS resources to get account id
account_id = func.arn.split(":")[4]
# API arn is:
arn = f"arn:aws:execute-api:{self._region}:{account_id}:{api_id}"
http_api = AWSResources.HTTPApi(arn, endpoint)
self._http_apis[api_name] = http_api
else:
self.logging.info(f"Using cached HTTP API {api_name}")
return http_api
# FIXME: python3.7+ future annotatons
@staticmethod
def initialize(dct: dict) -> Resources:
ret = AWSResources(dct["lambda-role"] if "lambda-role" in dct else "")
if "http-apis" in dct:
for key, value in dct["http-apis"].items():
ret._http_apis[key] = AWSResources.HTTPApi.deserialize(value)
return ret
def serialize(self) -> dict:
out = {
"lambda-role": self._lambda_role,
"http-apis": {key: value.serialize() for (key, value) in self._http_apis.items()},
}
return out
def update_cache(self, cache: Cache):
cache.update_config(val=self._lambda_role, keys=["aws", "resources", "lambda-role"])
for name, api in self._http_apis.items():
cache.update_config(val=api.serialize(), keys=["aws", "resources", "http-apis", name])
@staticmethod
def deserialize(config: dict, cache: Cache, handlers: LoggingHandlers) -> Resources:
cached_config = cache.get_config("aws")
ret: AWSResources
# Load cached values
if cached_config and "resources" in cached_config:
ret = cast(AWSResources, AWSResources.initialize(cached_config["resources"]))
ret.logging_handlers = handlers
ret.logging.info("Using cached resources for AWS")
else:
# Check for new config
if "resources" in config:
ret = cast(AWSResources, AWSResources.initialize(config["resources"]))
ret.logging_handlers = handlers
ret.logging.info("No cached resources for AWS found, using user configuration.")
else:
ret = AWSResources(lambda_role="")
ret.logging_handlers = handlers
ret.logging.info("No resources for AWS found, initialize!")
return ret
class AWSConfig(Config):
def __init__(self, credentials: AWSCredentials, resources: AWSResources):
super().__init__()
self._credentials = credentials
self._resources = resources
@staticmethod
def typename() -> str:
return "AWS.Config"
@property
def credentials(self) -> AWSCredentials:
return self._credentials
@property
def resources(self) -> AWSResources:
return self._resources
# FIXME: use future annotations (see sebs/faas/system)
@staticmethod
def initialize(cfg: Config, dct: dict):
config = cast(AWSConfig, cfg)
config._region = dct["region"]
@staticmethod
def deserialize(config: dict, cache: Cache, handlers: LoggingHandlers) -> Config:
cached_config = cache.get_config("aws")
# FIXME: use future annotations (see sebs/faas/system)
credentials = cast(AWSCredentials, AWSCredentials.deserialize(config, cache, handlers))
resources = cast(AWSResources, AWSResources.deserialize(config, cache, handlers))
config_obj = AWSConfig(credentials, resources)
config_obj.logging_handlers = handlers
# Load cached values
if cached_config:
config_obj.logging.info("Using cached config for AWS")
AWSConfig.initialize(config_obj, cached_config)
else:
config_obj.logging.info("Using user-provided config for AWS")
AWSConfig.initialize(config_obj, config)
resources.set_region(config_obj.region)
return config_obj
"""
Update the contents of the user cache.
The changes are directly written to the file system.
Update values: region.
"""
def update_cache(self, cache: Cache):
cache.update_config(val=self.region, keys=["aws", "region"])
self.credentials.update_cache(cache)
self.resources.update_cache(cache)
def serialize(self) -> dict:
out = {
"name": "aws",
"region": self._region,
"credentials": self._credentials.serialize(),
"resources": self._resources.serialize(),
}
return out
|
class User(object):
"""
User
"""
_id = None
_first_name = None
_last_name = None
_email = None
_uuid = None
_password = None
def __init__(self, id = "", first_name = "", last_name = "", email = "", uuid = "", password = ""):
"""
Constructor
:param id:
:param first_name:
:param last_name:
:param email:
:param uuid:
:param password:
:return:
"""
self._id = id
self._first_name = first_name
self._last_name = last_name
self._email = email
self._uuid = uuid
self._password = password
|
from model.genre import Genre
from schema.genre import GenreSchema
class GenreService:
@staticmethod
def get_movie_genres_all() -> dict:
return GenreSchema(many=True).dump(
Genre.query.all()
)
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 SoloKeys Developers
#
# Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
# http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
# http://opensource.org/licenses/MIT>, at your option. This file may not be
# copied, modified, or distributed except according to those terms.
def to_websafe(data):
data = data.replace("+", "-")
data = data.replace("/", "_")
data = data.replace("=", "")
return data
def from_websafe(data):
data = data.replace("-", "+")
data = data.replace("_", "/")
return data + "=="[: (3 * len(data)) % 4]
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import mimetypes
import os
from compiled_file_system import SingleFile
from directory_zipper import DirectoryZipper
from docs_server_utils import ToUnicode
from future import Gettable, Future
from third_party.handlebar import Handlebar
class ContentAndType(object):
'''Return value from ContentProvider.GetContentAndType.
'''
def __init__(self, content, content_type):
self.content = content
self.content_type = content_type
class ContentProvider(object):
'''Returns file contents correctly typed for their content-types (in the HTTP
sense). Content-type is determined from Python's mimetype library which
guesses based on the file extension.
Typically the file contents will be either str (for binary content) or
unicode (for text content). However, HTML files *may* be returned as
Handlebar templates (if supports_templates is True on construction), in which
case the caller will presumably want to Render them.
'''
def __init__(self,
name,
compiled_fs_factory,
file_system,
supports_templates=False,
supports_zip=False):
# Public.
self.name = name
self.file_system = file_system
# Private.
self._content_cache = compiled_fs_factory.Create(file_system,
self._CompileContent,
ContentProvider)
self._supports_templates = supports_templates
if supports_zip:
self._directory_zipper = DirectoryZipper(compiled_fs_factory, file_system)
else:
self._directory_zipper = None
@SingleFile
def _CompileContent(self, path, text):
assert text is not None, path
mimetype = mimetypes.guess_type(path)[0]
if mimetype is None:
content = text
mimetype = 'text/plain'
elif mimetype == 'text/html':
content = ToUnicode(text)
if self._supports_templates:
content = Handlebar(content, name=path)
elif (mimetype.startswith('text/') or
mimetype in ('application/javascript', 'application/json')):
content = ToUnicode(text)
else:
content = text
return ContentAndType(content, mimetype)
def GetContentAndType(self, path):
path = path.lstrip('/')
base, ext = os.path.splitext(path)
# Check for a zip file first, if zip is enabled.
if self._directory_zipper and ext == '.zip':
zip_future = self._directory_zipper.Zip(base)
return Future(delegate=Gettable(
lambda: ContentAndType(zip_future.Get(), 'application/zip')))
return self._content_cache.GetFromFile(path)
def Cron(self):
# Running Refresh() on the file system is enough to pull GitHub content,
# which is all we need for now while the full render-every-page cron step
# is in effect.
# TODO(kalman): Walk over the whole filesystem and compile the content.
return self.file_system.Refresh()
def __repr__(self):
return 'ContentProvider of <%s>' % repr(self.file_system)
|
import numpy as np
from abc import ABCMeta, abstractmethod
from scipy.spatial import KDTree
class AbstractPopulationGenerator(object):
__metaclass__ = ABCMeta
def __init__(self, population_size, **kwargs):
"""Create a new population generator.
:param population_size: the size of the population to generate
:param distance_matrix: the distance matrix of points in the dataset
"""
self._population_size = population_size
@abstractmethod
def generate(self, data):
""" Generate a new random population of the given size. """
pass
class SimplePopulationGenerator(AbstractPopulationGenerator):
"""Generate a population based on randomly shuffling 1D array of the
indicies of every data point. This makes no attempt to consider any
heuristic.
"""
def generate(self, data):
""" Generate a new random population of the given size. """
population = np.array([np.random.permutation(data.shape[0])
for _ in xrange(self._population_size)])
return population
class KNNPopulationGenerator(AbstractPopulationGenerator):
"""Generate a population based using the k nearest neighbours for each
city.
"""
def __init__(self, *args, **kwargs):
AbstractPopulationGenerator.__init__(self, *args, **kwargs)
self._random_proportion = kwargs.get('random_proportion', 0.5)
if self._random_proportion < 0 or self._random_proportion > 1.0:
raise ValueError("Probabilities must be in the range 0 <= x <= 1. Value was: %d"
% self._random_proportion)
def generate(self, data):
""" Generate a new random population of the given size. """
num_points = data.shape[0]
knn = KDTree(data, leafsize=10)
population = []
proportion_size = (1.0 - self._random_proportion) * self._population_size
proportion_size = int(np.floor(proportion_size))
# selection a proportion of
for i in xrange(proportion_size):
d, chromosome = knn.query(data[i], k=num_points, distance_upper_bound=20)
population.append(chromosome)
population = np.array(population)
# generate random proportion of population
random_gen = SimplePopulationGenerator(self._population_size - proportion_size)
population = np.vstack((population, random_gen.generate(data)))
return population
|
#Importa biblioteca pandas
import pandas as pd
#Transforma a planilha em dados
data = pd.read_excel("secoes_ibge.xlsx")
#Realiza leitura das colunas e coloca o padrão lower
data['Secção'] = data['Secção'].str.lower()
data['Divisão'] = data['Divisão'].str.lower()
#Realiza leitura das colunas e remove os acentos
data['Secção'] = data['Secção'].str.normalize('NFKD').str.encode('ascii',errors='ignore').str.decode('utf-8')
data['Divisão'] = data['Divisão'].str.normalize('NFKD').str.encode('ascii',errors='ignore').str.decode('utf-8')
#Insere as informações tratadas na planilha novamente, acrescentando ID para vincula dos itenspy
data.to_excel("secoes_ibge.xlsx",
sheet_name='Plan1')
|
from flask import Flask
app = Flask(__name__)
cmdQueueFromUIToAlarm = None
currentPlayState = None
playlist = None
from gui import views
|
import argparse, firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
def init_firestore(project_name):
cred = credentials.ApplicationDefault()
firebase_admin.initialize_app(cred, {
'projectId': project_name,
})
db = firestore.client()
doc_ref = db.collection(u'ok').document(u'test-data')
doc_ref.set({
u'init': u'done!'
})
print("Firestore write done")
if __name__ == "__main__":
# Project ID is passed at run-time
parser = argparse.ArgumentParser()
parser.add_argument("--project_id", help="Google Cloud project id")
args = parser.parse_args()
project_id = args.project_id
print(f"starting to init smaregi starter kit in Google Cloud project:{project_id}")
# init Firestore instance
init_firestore(project_id) |
from mpython import *
import socket
mywifi=wifi() # 创建wifi对象
mywifi.connectWiFi("ssid","password") # 连接网络
# 捕获异常,如果在"try" 代码块中意外中断,则停止关闭套接字
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # 创建UDP的套接字
s.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1) # 设置套接字属性
ip=mywifi.sta.ifconfig()[0] # 获取本机ip地址
s.bind((ip,6000)) # 绑定ip和端口号
print('waiting...')
oled.DispChar("%s:6000" %ip,0,0)
oled.show()
while True:
data,addr=s.recvfrom(1024) # 接收对方发送过来的数据,读取字节设为1024字节,返回(data,addr)二元组
print('received:',data,'from',addr) # 打印接收到数据
oled.fill(0) # 清屏
oled.DispChar("%s" %data.decode(),0,15) # oled显示接收内容
oled.DispChar("from%s" %addr[0],0,31)
oled.show()
# 当捕获异常,关闭套接字、网络
except:
if (s):
s.close()
mywifi.disconnectWiFi() |
# Generated by Django 3.2.4 on 2021-06-09 11:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('geo', '0005_auto_20210609_1027'),
]
operations = [
migrations.CreateModel(
name='Historioco',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(verbose_name='Fecha')),
('price', models.FloatField(verbose_name='Precio')),
('monthly_variation', models.FloatField(blank=True, null=True, verbose_name='Variación mensual')),
('quarterly_variation', models.FloatField(blank=True, null=True, verbose_name='Variación trimestral')),
('annual_variation', models.FloatField(blank=True, null=True, verbose_name='Variación anual')),
('type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='geo.element')),
],
),
]
|
import collections
from sigame_tools.filters import (
make_typed_field_filter
)
def make_get_weight(args, types):
filters = tuple(generate_field_filters(args=args, types=types))
if not filters:
return lambda _: 1
weights = dict()
def impl(value):
result = weights.get(value)
if result is None:
by_field = collections.defaultdict(list)
for field, field_filter, weight in filters:
by_field[field].append(weight if field_filter(getattr(value, field)) else 1)
for field in types.keys():
v = by_field[field]
if not v:
v.append(1)
result = sum(sum(by_field[t]) / len(by_field[t]) if t in by_field else 1 for t in types) / len(types)
weights[value] = result
return result
return impl
def generate_field_filters(args, types):
present = set()
for field, pattern, weight in args:
if (field, pattern) in present:
continue
present.add((field, pattern))
field_filter = make_typed_field_filter(field_type=types[field], pattern=pattern)
if field_filter:
yield field, field_filter, weight
|
# -*- encoding: utf-8 -*-
# Check this
# Author: Alex S. Garzão <alexgarzao@gmail.com>
# mandrill_service.py
import logging
import mandrill
class MandrillService:
'''Class responsible in send emails using the mandrill service.
'''
def __init__(self, config):
self.logger = logging.getLogger(__name__)
self.api_key = config.get('mandrill', 'api_key')
try:
self.mandrill_client = mandrill.Mandrill(self.api_key)
except mandrill.Error, e:
self.logger.error('A mandrill error occurred: %s - %s' % (e.__class__, e))
raise
return
def print_config(self):
self.logger.info('\tMandrill service config')
self.logger.info('\t\tapi_key: %s' % self.api_key)
return
def send(self, email, name, template_name, content, subject = None, attachments = None):
'''Send the email using the defined template.
'''
message = {
'attachments': attachments,
'global_merge_vars': content,
'important': False,
'merge': True,
'merge_language': 'handlebars',
'to': [{'email': email, 'name': name, 'type': 'to'}],
'subject': subject,
}
try:
result = self.mandrill_client.messages.send_template(
template_name=template_name,
template_content=None,
message=message,
async=False,
ip_pool='Main Pool')
sent_status = result[0]['status']
sent_reject_reason = ''
if 'reject_reason' in result[0]:
sent_reject_reason = result[0]['reject_reason']
return (sent_status == 'sent' or sent_status == 'queued'), sent_reject_reason
except mandrill.Error, e:
self.logger.info('A mandrill error occurred: %s - %s' % (e.__class__, e))
return False
|
# https://leetcode.com/problems/unique-number-of-occurrences/
"""
Problem Description
Given an array of integers arr, write a function that returns true if and only if the number of occurrences of each value in the array is unique.
"""
# Time Complexity Space Complexity
# O(n) O(n)
class Solution:
def uniqueOccurrences(self, arr: List[int]) -> bool:
numbers = dict()
count_list = []
for each_val in arr:
if each_val not in numbers:
numbers[each_val] = 1
else:
numbers[each_val] += 1
flag=0
for number, count in numbers.items():
if count not in count_list:
count_list.append(count)
else:
flag=1
break
if(flag==1):
return False
else:
return True
|
# -*- coding: utf-8 -*- #
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for the instance-groups managed update-instances commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.util.apis import arg_utils
import six
STANDBY_NAME = 'standby'
TARGET_SIZE_NAME = 'target-size'
TEMPLATE_NAME = 'template'
def _ParseFixed(fixed_or_percent_str):
"""Retrieves int value from string."""
if re.match(r'^\d+$', fixed_or_percent_str):
return int(fixed_or_percent_str)
return None
def _ParsePercent(fixed_or_percent_str):
"""Retrieves percent value from string."""
if re.match(r'^\d+%$', fixed_or_percent_str):
percent = int(fixed_or_percent_str[:-1])
return percent
return None
def ParseFixedOrPercent(flag_name, flag_param_name,
fixed_or_percent_str, messages):
"""Retrieves value: number or percent.
Args:
flag_name: name of the flag associated with the parsed string.
flag_param_name: name of the inner parameter of the flag.
fixed_or_percent_str: string containing fixed or percent value.
messages: module containing message classes.
Returns:
FixedOrPercent message object.
"""
if fixed_or_percent_str is None:
return None
fixed = _ParseFixed(fixed_or_percent_str)
if fixed is not None:
return messages.FixedOrPercent(fixed=fixed)
percent = _ParsePercent(fixed_or_percent_str)
if percent is not None:
if percent > 100:
raise exceptions.InvalidArgumentException(
flag_name, 'percentage cannot be higher than 100%.')
return messages.FixedOrPercent(percent=percent)
raise exceptions.InvalidArgumentException(
flag_name,
flag_param_name + ' has to be non-negative integer number or percent.')
def ParseUpdatePolicyType(flag_name, policy_type_str, messages):
"""Retrieves value of update policy type: opportunistic or proactive.
Args:
flag_name: name of the flag associated with the parsed string.
policy_type_str: string containing update policy type.
messages: module containing message classes.
Returns:
InstanceGroupManagerUpdatePolicy.TypeValueValuesEnum message enum value.
"""
if policy_type_str == 'opportunistic':
return (messages.InstanceGroupManagerUpdatePolicy
.TypeValueValuesEnum.OPPORTUNISTIC)
elif policy_type_str == 'proactive':
return (messages.InstanceGroupManagerUpdatePolicy
.TypeValueValuesEnum.PROACTIVE)
raise exceptions.InvalidArgumentException(flag_name, 'unknown update policy.')
def ParseReplacementMethod(method_type_str, messages):
"""Retrieves value of update policy type: substitute or recreate.
Args:
method_type_str: string containing update policy type.
messages: module containing message classes.
Returns:
InstanceGroupManagerUpdatePolicy.TypeValueValuesEnum message enum value.
"""
return arg_utils.ChoiceToEnum(
method_type_str,
(messages.InstanceGroupManagerUpdatePolicy
.ReplacementMethodValueValuesEnum))
def ValidateUpdateInstancesArgs(args):
"""Validates update arguments provided by the user.
Args:
args: arguments provided by the user.
"""
if args.action == 'restart':
if args.version_original:
raise exceptions.InvalidArgumentException(
'--version-original', 'can\'t be specified for --action restart.')
if args.version_new:
raise exceptions.InvalidArgumentException(
'--version-new', 'can\'t be specified for --action restart.')
elif args.action == 'replace':
if not args.version_new:
raise exceptions.RequiredArgumentException(
'--version-new',
'must be specified for --action replace (or default).')
if not args.version_original and (TARGET_SIZE_NAME in args.version_new):
if args.version_new[TARGET_SIZE_NAME] == '100%':
del args.version_new[TARGET_SIZE_NAME]
else:
raise exceptions.InvalidArgumentException(
'--version-new',
'target-size can\'t be specified if there is only one version.')
if (args.version_original and args.version_new and
(TARGET_SIZE_NAME in args.version_original)
== (TARGET_SIZE_NAME in args.version_new)):
raise exceptions.ToolException(
'Exactly one version must have the target-size specified.')
def ParseVersion(project, flag_name, version_map, resources, messages):
"""Retrieves version from input map.
Args:
project: name of the project
flag_name: name of the flag associated with the parsed string.
version_map: map containing version data provided by the user.
resources: provides reference for instance template resource.
messages: module containing message classes.
Returns:
InstanceGroupManagerVersion message object.
"""
if TEMPLATE_NAME not in version_map:
raise exceptions.InvalidArgumentException(flag_name,
'template has to be specified.')
template_ref = resources.Parse(
version_map[TEMPLATE_NAME],
params={'project': project},
collection='compute.instanceTemplates')
if TARGET_SIZE_NAME in version_map:
target_size = ParseFixedOrPercent(flag_name, TARGET_SIZE_NAME,
version_map[TARGET_SIZE_NAME], messages)
else:
target_size = None
name = version_map.get('name')
return messages.InstanceGroupManagerVersion(
instanceTemplate=template_ref.SelfLink(),
targetSize=target_size,
name=name)
def ParseInstanceActionFlag(flag_name, instance_action_str, messages):
"""Retrieves value of the instance action type.
Args:
flag_name: name of the flag associated with the parsed string.
instance_action_str: string containing instance action value.
messages: module containing message classes.
Returns:
InstanceAction enum object.
"""
instance_actions_enum_map = {
'none':
messages.InstanceGroupManagerUpdatePolicy.MinimalActionValueValuesEnum
.NONE,
'refresh':
messages.InstanceGroupManagerUpdatePolicy.MinimalActionValueValuesEnum
.REFRESH,
'restart':
messages.InstanceGroupManagerUpdatePolicy.MinimalActionValueValuesEnum
.RESTART,
'replace':
messages.InstanceGroupManagerUpdatePolicy.MinimalActionValueValuesEnum
.REPLACE,
}
if instance_action_str not in instance_actions_enum_map:
raise exceptions.InvalidArgumentException(
flag_name,
'unknown instance action: ' + six.text_type(instance_action_str))
return instance_actions_enum_map[instance_action_str]
def ValidateCanaryVersionFlag(flag_name, version_map):
"""Retrieves canary version from input map.
Args:
flag_name: name of the flag associated with the parsed string.
version_map: map containing version data provided by the user.
"""
if version_map and TARGET_SIZE_NAME not in version_map:
raise exceptions.RequiredArgumentException(
'{} {}={}'.format(flag_name, TARGET_SIZE_NAME,
TARGET_SIZE_NAME.upper()),
'target size must be specified for canary version')
def ValidateIgmReference(igm_ref):
if igm_ref.Collection() not in [
'compute.instanceGroupManagers', 'compute.regionInstanceGroupManagers'
]:
raise ValueError('Unknown reference type {0}'.format(
igm_ref.Collection()))
|
from pathlib import Path
from seaborn import color_palette
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import models, transforms, utils
import copy
from utils import *
from PIL import Image
# load the original Alexnet model
model = models.vgg19(pretrained=True).features
model_conv = models.vgg19(pretrained=True)
# for name, child in model_conv.named_children():
# for name2, params in child.named_parameters():
# print(name, name2)
model= copy.deepcopy(model.eval())
model = model[:17]
old_weights=list(model_conv.parameters())[0]
old_bias=list(model_conv.parameters())[1]
# interest_wight=list(old_weights.parameters())[0].sum(dim=1, keepdim=True)
interest_wight=list(model_conv.parameters())[0].sum(dim=1, keepdim=True)
model_conv.features[0]= nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1)
# list(model_conv.features[0].parameters())[0]=interest_wight
list(model_conv.parameters())[0]=old_weights[:,0:1,:,:]
list(model_conv.parameters())[1]=old_bias
# create custom Alexnet
class CustomModelnet(nn.Module):
def __init__(self, num_classes):
super(CustomModelnet, self).__init__()
self.features = nn.Sequential(*list(model.features.children()))
self.classifier = nn.Sequential(*[list(model.classifier.children())[i] for i in [1, 2, 4, 5]], nn.Linear(4096, num_classes),
nn.Sigmoid()
)
1
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 256 * 6 * 6)
x = self.classifier(x)
return x
# load custom model
model2 = CustomModelnet(num_classes=10)
#
# # load the original Alexnet model
# model = models.alexnet(pretrained=True)
#
# # create custom Alexnet
# class CustomAlexnet(nn.Module):
# def __init__(self, num_classes):
# super(CustomAlexnet, self).__init__()
# self.features = nn.Sequential(*list(model.features.children()))
# self.classifier = nn.Sequential(*[list(model.classifier.children())[i] for i in [1, 2, 4, 5]], nn.Linear(4096, num_classes),
# nn.Sigmoid()
# )
# 1
#
# def forward(self, x):
# x = self.features(x)
# x = x.view(x.size(0), 256 * 6 * 6)
# x = self.classifier(x)
# return x
#
# # load custom model
# model2 = CustomAlexnet(num_classes=10) |
'''
This is the main script that :
1. Parses the client API request.
2. Queries and Fetches the data from IMDb.
3. Extracts and Parses the response into JSON format which is then relayed back to the client.
'''
import requests
from bs4 import BeautifulSoup
import re
def SearchApi(title,title_type,release_date,user_rating,genres,colors,keywords,plot,adult,count):
# Building the query
query='title='+title+'&title_type='+title_type+'&release_date='+release_date+'&user_rating='+user_rating+'&genres='+genres+'&colors='+colors+'&keywords='+keywords+'&plot='+plot+'&adult='+adult+'&count='+count
# Requesting the search result
try:
resp = requests.get('https://www.imdb.com/search/title?'+query)
soup = BeautifulSoup(resp.text,"html.parser")
response=[]
# Extracting Data and Parsing the response into JSON Format
for movie in soup.find_all('div',class_='lister-item'):
header = movie.find('h3',class_='lister-item-header')
name = ' '.join(header.text.split('\n')[2:-1]).strip()
id = header.find('a')['href'].split('/')[2]
try:
details = movie.find_all('p',class_='text-muted')
try:
runtime = details[0].find('span',class_='runtime').text.strip()
except:
runtime=None
try:
genre = details[0].find('span',class_='genre').text.strip()
except:
genre=None
desc = details[1].text.strip() if details[1].text.strip()!='Add a Plot' else None
if desc is not None:
desc=desc.split('...')[0]+'...'
except:
runtime = None
genre = None
desc = None
try:
rating = movie.find('div',class_='ratings-imdb-rating').text.strip()
except:
rating=None
cast = movie.find_all('p')
try:
vg = movie.find('p',class_='sort-num_votes-visible').text
t=re.split('Votes:|Gross:|Vote:|\|',vg)
t = [ i.replace('\n','') for i in t if i.strip()!='']
# print('hello',t,'\n')
if 'Vote' in vg and 'Gross' not in vg:
vote = t[0]
gross = None
elif 'Vote' not in vg and 'Gross' in vg:
vote = None
gross = t[0]
elif 'Vote' not in vg and 'Gross' not in vg:
vote = None
gross = None
else:
vote=t[0]
gross=t[1]
cast=cast[-2]
except:
cast=cast[-1]
vote=None
gross=None
try:
x= cast.find_all('a')
if x == []:
raise Exception('Not Right Block')
cast=cast.text
t=re.split('Directors:|Stars:|Star:|Director:|\|',cast)
t = [ i.replace('\n','') for i in t if i.strip()!='']
if 'Director' in cast and 'Star' not in cast:
director = t[0]
star = None
elif 'Director' not in cast and 'Star' in cast:
director = None
star = t[0]
elif 'Director' not in cast and 'Star' not in cast:
director = None
star = None
else:
director=t[0]
star=t[1]
except:
director = None
star = None
img = movie.find('img',class_='loadlate')['loadlate']
if 'nopicture' not in img:
img = img.split('._V1_')[0]+'._V1_UX300_CR0,0,,300_AL_.jpg'
response.append({'idm_id':id,
'name':name,
'poster':img,
'plot':desc,
'runtime':runtime,
'genre':genre,
'rating':rating,
'directors':director,
'stars':star,
'votes':vote,
'gross':gross})
# A list of Dictionaries is returned to the client
return response
except Exception as e:
print('SERVER ERROR :',e)
return "ERROR" |
"""
In-memory session.
This file is part of the everest project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Created on Jan 8, 2013.
"""
from collections import MutableSequence
from collections import MutableSet
from everest.constants import RELATION_OPERATIONS
from everest.constants import RESOURCE_ATTRIBUTE_KINDS
from everest.entities.traversal import AruVisitor
from everest.entities.utils import get_entity_class
from everest.repositories.base import AutocommittingSessionMixin
from everest.repositories.base import Session
from everest.repositories.base import SessionFactory
from everest.repositories.memory.cache import EntityCache
from everest.repositories.memory.querying import MemoryRepositoryQuery
from everest.repositories.state import EntityState
from everest.repositories.uow import UnitOfWork
from everest.traversal import SourceTargetDataTreeTraverser
from pyramid.compat import iteritems_
from threading import local
from transaction.interfaces import IDataManager
from zope.interface import implementer # pylint: disable=E0611,F0401
import transaction
__docformat__ = 'reStructuredText en'
__all__ = ['DataManager',
'MemoryAutocommittingSession',
'MemorySession',
'MemorySessionFactory',
]
class MemorySession(Session):
"""
Session object.
The session
* Holds a Unit Of Work;
* Serves as identity and slug map;
* Performs synchronized commit on repository;
* Sets up data manager to hook into transaction.
"""
IS_MANAGING_BACKREFERENCES = True
def __init__(self, repository, query_class=None, clone_on_load=True):
self.__repository = repository
self.__unit_of_work = UnitOfWork()
self.__cache_map = {}
if query_class is None:
query_class = MemoryRepositoryQuery
self.__query_class = query_class
self.__clone_on_load = clone_on_load
self.__needs_flushing = False
self.__is_flushing = False
def get_by_id(self, entity_class, entity_id):
if self.__needs_flushing:
self.flush()
cache = self.__get_cache(entity_class)
return cache.get_by_id(entity_id)
def get_by_slug(self, entity_class, entity_slug):
if self.__needs_flushing:
self.flush()
cache = self.__get_cache(entity_class)
ents = cache.get_by_slug(entity_slug)
# # When the entity is not found in the cache, it may have been added
# # with an undefined slug; we therefore attempt to look it up in the
# # list of pending NEW entities.
# if ent is None:
# for new_ent in self.__unit_of_work.get_new(entity_class):
# if new_ent.slug == entity_slug:
# ent = new_ent
# break
return ents
def add(self, entity_class, data):
self.__traverse(entity_class, data, None, RELATION_OPERATIONS.ADD)
def remove(self, entity_class, data):
self.__traverse(entity_class, None, data, RELATION_OPERATIONS.REMOVE)
def update(self, entity_class, data, target=None):
return self.__traverse(entity_class, data, target,
RELATION_OPERATIONS.UPDATE)
def query(self, entity_class):
if self.__needs_flushing:
self.flush()
return self.__query_class(entity_class, self, self.__repository)
def flush(self):
if self.__needs_flushing and not self.__is_flushing:
self.__is_flushing = True
with self.__repository.lock:
self.__repository.flush(self.__unit_of_work)
self.__is_flushing = False
for ent_cls in self.__cache_map.keys():
# The flush may have auto-generated IDs for NEW entities,
# so we rebuild the cache.
cache = self.__get_cache(ent_cls)
cache.rebuild(self.__unit_of_work.get_new(ent_cls))
self.__needs_flushing = False
def begin(self):
self.__unit_of_work.reset()
def commit(self):
with self.__repository.lock:
self.__repository.commit(self.__unit_of_work)
self.__unit_of_work.reset()
self.__cache_map.clear()
def rollback(self):
with self.__repository.lock:
self.__repository.rollback(self.__unit_of_work)
self.__unit_of_work.reset()
self.__cache_map.clear()
def reset(self):
self.rollback()
def load(self, entity_class, entity):
"""
Load the given repository entity into the session and return a
clone. If it was already loaded before, look up the loaded entity
and return it.
All entities referenced by the loaded entity will also be loaded
(and cloned) recursively.
:raises ValueError: When an attempt is made to load an entity that
has no ID
"""
if self.__needs_flushing:
self.flush()
if entity.id is None:
raise ValueError('Can not load entity without an ID.')
cache = self.__get_cache(entity_class)
sess_ent = cache.get_by_id(entity.id)
if sess_ent is None:
if self.__clone_on_load:
sess_ent = self.__clone(entity, cache)
else: # Only needed by the nosql backend pragma: no cover
cache.add(entity)
sess_ent = entity
self.__unit_of_work.register_clean(entity_class, sess_ent)
return sess_ent
@property
def new(self):
return self.__unit_of_work.get_new()
@property
def deleted(self):
return self.__unit_of_work.get_deleted()
def __contains__(self, entity):
cache = self.__cache_map.get(type(entity))
if not cache is None:
found = entity in cache
else:
found = False
return found
def __traverse(self, entity_class, source_data, target_data, rel_op):
agg = self.__repository.get_aggregate(entity_class)
trv = SourceTargetDataTreeTraverser.make_traverser(source_data,
target_data,
rel_op,
accessor=agg)
vst = AruVisitor(entity_class,
self.__add, self.__remove, self.__update)
trv.run(vst)
# Indicate that we need to flush the changes.
self.__needs_flushing = True
return vst.root
def __add(self, entity):
entity_class = type(entity)
cache = self.__get_cache(entity_class)
# We allow adding the same entity multiple times.
if not (not entity.id is None
and cache.get_by_id(entity.id) is entity):
if not self.__unit_of_work.is_marked_deleted(entity):
self.__unit_of_work.register_new(entity_class, entity)
# FIXME: This is only necessary if the call above re-uses
# an existing state, in which case it needs to be
# marked as pending explicitly. Consider rewriting
# this whole method.
self.__unit_of_work.mark_pending(entity)
if not entity.id is None and cache.has_id(entity.id):
raise ValueError('Duplicate entity ID "%s".' % entity.id)
else:
if self.__unit_of_work.is_marked_pending(entity):
# The changes were not flushed yet; just mark as clean.
self.__unit_of_work.mark_clean(entity)
else:
self.__unit_of_work.mark_new(entity)
self.__unit_of_work.mark_pending(entity)
cache.add(entity)
def __remove(self, entity):
entity_class = type(entity)
if not self.__unit_of_work.is_registered(entity):
if entity.id is None:
raise ValueError('Can not remove un-registered entity '
'without an ID')
self.__unit_of_work.register_deleted(entity_class, entity)
elif not self.__unit_of_work.is_marked_new(entity):
self.__unit_of_work.mark_deleted(entity)
else:
if self.__unit_of_work.is_marked_pending(entity):
# The changes were not flushed yet; just mark as clean.
self.__unit_of_work.mark_clean(entity)
else:
self.__unit_of_work.mark_deleted(entity)
self.__unit_of_work.mark_pending(entity)
cache = self.__get_cache(entity_class)
if entity in cache:
cache.remove(entity)
def __update(self, source_data, target_entity): # pylint: disable=W0613
EntityState.set_state_data(target_entity, source_data)
if self.__unit_of_work.is_marked_persisted(target_entity):
self.__unit_of_work.mark_pending(target_entity)
def __get_cache(self, entity_class):
cache = self.__cache_map.get(entity_class)
if cache is None:
cache = self.__cache_map[entity_class] = EntityCache()
return cache
def __clone(self, entity, cache):
clone = object.__new__(entity.__class__)
# We add the clone with its ID set to the cache *before* we load it
# so that circular references will work.
clone.id = entity.id
cache.add(clone)
state = EntityState.get_state_data(entity)
id_attr = None
for attr, value in iteritems_(state):
if attr.entity_attr == 'id':
id_attr = attr
continue
attr_type = attr.attr_type
if attr.kind != RESOURCE_ATTRIBUTE_KINDS.TERMINAL \
and not self.__repository.is_registered_resource(attr_type):
# Prevent loading of entities from other repositories.
# FIXME: Doing this here is inconsistent, since e.g. the RDB
# session does not perform this kind of check.
continue
elif attr.kind == RESOURCE_ATTRIBUTE_KINDS.MEMBER \
and not value is None:
ent_cls = get_entity_class(attr_type)
new_value = self.load(ent_cls, value)
state[attr] = new_value
elif attr.kind == RESOURCE_ATTRIBUTE_KINDS.COLLECTION \
and len(value) > 0:
value_type = type(value)
new_value = value_type.__new__(value_type)
if issubclass(value_type, MutableSequence):
add_op = new_value.append
elif issubclass(value_type, MutableSet):
add_op = new_value.add
else:
raise ValueError('Do not know how to clone value of type '
'%s for resource attribute %s.'
% (type(new_value), attr))
ent_cls = get_entity_class(attr_type)
for child in value:
child_clone = self.load(ent_cls, child)
add_op(child_clone)
state[attr] = new_value
# We set the ID already above.
if not id_attr is None:
del state[id_attr]
EntityState.set_state_data(clone, state)
return clone
class MemoryAutocommittingSession(AutocommittingSessionMixin, MemorySession):
"""
Autocommitting session in memory.
"""
pass
class MemorySessionFactory(SessionFactory):
"""
Factory for :class:`MemorySession` instances.
The factory creates exactly one session per thread.
"""
def __init__(self, repository, query_class=None, clone_on_load=True):
SessionFactory.__init__(self, repository)
sess_reg = local()
self.__session_registry = sess_reg
self.__query_class = query_class
self.__clone_on_load = clone_on_load
def reset(self):
session = getattr(self.__session_registry, 'session', None)
if not session is None:
session.reset()
self.__session_registry.session = None
def __call__(self):
session = getattr(self.__session_registry, 'session', None)
if session is None:
if not self._repository.autocommit:
session = MemorySession(self._repository,
query_class=self.__query_class,
clone_on_load=self.__clone_on_load)
else:
session = MemoryAutocommittingSession(
self._repository,
query_class=self.__query_class,
clone_on_load=self.__clone_on_load)
self.__session_registry.session = session
if self._repository.join_transaction is True:
self.__session_registry.data_manager = DataManager(session)
if self._repository.join_transaction is True:
trx = transaction.get()
dm = self.__session_registry.data_manager
# We have a new transaction that we need to join.
if not dm.transaction is trx:
trx.join(dm)
dm.transaction = trx
return session
@implementer(IDataManager)
class DataManager(object):
"""
Data manager to plug a :class:`MemorySession` into a Zope transaction.
"""
# TODO: implement safepoints.
def __init__(self, session):
self.__session = session
self.transaction = None
def abort(self, trans): # pylint: disable=W0613
self.__session.rollback()
def tpc_begin(self, trans): # pylint: disable=W0613
pass
def commit(self, trans): # pylint: disable=W0613
self.__session.commit()
def tpc_vote(self, trans): # pylint: disable=W0613
pass
def tpc_finish(self, trans):
pass
def tpc_abort(self, trans): # pylint: disable=W0613
self.__session.rollback()
def sortKey(self):
return "everest:%d" % id(self.__session)
|
"""
Logging Utilities
Written by Patrick Coady (pat-coady.github.io)
"""
import numpy as np
import os
import shutil
import glob
import csv
class Logger(object):
""" Simple training logger: saves to file and optionally prints to stdout """
def __init__(self, logname, now):
"""
Args:
logname: name for log (e.g. 'Hopper-v1')
now: unique sub-directory name (e.g. date/time string)
"""
path = os.path.join('log_files', logname, now)
os.makedirs(path)
# filenames = glob.glob('*.py') # put copy of all python files in log_dir
# for filename in filenames: # for reference
# shutil.copy(filename, path)
path = os.path.join(path, 'log.csv')
self.write_header = True
self.log_entry = {}
self.f = open(path, 'w')
self.writer = None # DictWriter created with first call to write() method
def write(self, display=True):
""" Write 1 log entry to file, and optionally to stdout
Log fields preceded by '_' will not be printed to stdout
Args:
display: boolean, print to stdout
"""
if display:
self.disp(self.log_entry)
if self.write_header:
fieldnames = [x for x in self.log_entry.keys()]
self.writer = csv.DictWriter(self.f, fieldnames=fieldnames)
self.writer.writeheader()
self.write_header = False
self.writer.writerow(self.log_entry)
self.log_entry = {}
@staticmethod
def disp(log):
"""Print metrics to stdout"""
log_keys = [k for k in log.keys()]
log_keys.sort()
print('***** Episode {}, Mean Return = {:.1f}, Mean Discounted Return = {:.1f} *****'.format(log['_Episode'],
log['_AvgRewardSum'], log['_AvgDiscountedRewardSum']))
for key in log_keys:
if key[0] != '_': # don't display log items with leading '_'
print('{:s}: {:.3g}'.format(key, log[key]))
print('\n')
def log(self, items):
""" Update fields in log (does not write to file, used to collect updates.
Args:
items: dictionary of items to update
"""
self.log_entry.update(items)
def close(self):
""" Close log file - log cannot be written after this """
self.f.close()
|
#!/usr/bin/python3
"""
Create a storage object from the selected storage engine
"""
from os import getenv
storage_t = getenv('PERI_TYPE_STORAGE')
if storage_t == "db":
from models.engine.db_storage import DBStorage
storage = DBStorage()
else:
from models.engine.file_storage import FileStorage
storage = FileStorage()
storage.reload()
|
import json
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../../'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "SMS_18.settings")
import django
django.setup()
from main.models import Stock
import time
import random
with open('day1.json') as jsonfile:
dictionary=json.load(jsonfile)
print(dictionary)
j = 0
for i in range(len(dictionary[list(dictionary.keys())[0]])):
for key in dictionary:
print(key)
stock = Stock.objects.get(product_name__iexact=key)
stock.initial_price = dictionary[key][0]
stock.stock_price = dictionary[key][j]
stock.save()
stock.price_trend = (stock.stock_price-stock.initial_price)//stock.initial_price
stock.save()
j += 1
time.sleep(60)
|
from sonos.config import local_store
ACTIVE_HOUSEHOLD_FILE = "household.json"
def save_active_household(household):
local_store.save(ACTIVE_HOUSEHOLD_FILE, household)
def get_active_household():
return local_store.load(ACTIVE_HOUSEHOLD_FILE)
|
#Joseph Daly
def dummy(scooby):
if scooby == 1:
print("shaggy")
else:
print("velma")
|
'''
Created on 23 mars 2014
@author: guillaume
'''
import os,sys
from unittest import TestCase
from tests.utils.utils import utils
s = sys.path.append(utils().buildPath(os.getcwd())+"/vagrant/optraj.istic.univ-rennes1.fr/src/")
from interfacebdd.SiteDAO import SiteDAO
from system.Site import Site
from system.Position import Position
from datetime import date
from interfacebdd.Connexion import Connexion
class test_siteDAO(TestCase):
"""Test the stand-alone module functions."""
def test_insert(self):
d = Connexion().connect()
try:
pos = Position(num=-1, long=001, lat=002, address="test")
siteIns = Site(num=-1, numSite="FR52005", name="champie", siteMaster="test", siteManager="test2", position=pos, dateInit=date(2013, 1, 1), dateEnd=date(2013, 2, 1), color="#6390df", phases=set())
res = SiteDAO().insert(d, siteIns)
siteRes = SiteDAO().getById(d, False, [], res)
siteIns.num=siteRes.num
siteIns.position.num = siteRes.position.num
utils().equal(self, siteIns, siteRes)
except:
Connexion().exception()
finally:
Connexion().disconnect(d)
def test_get(self):
d = Connexion().connect()
try:
pos = Position(num=-1, long=001, lat=002, address="test")
siteIns = Site(num=-1, numSite="52005", name="champie", siteMaster="test", siteManager="test2", position=pos, dateInit=date(2013, 1, 1), dateEnd=date(2013, 2, 1), color="#6390df", phases=set())
res = SiteDAO().insert(d, siteIns)
siteRes = SiteDAO().getById(d, False, [], res)
siteIns.num = siteRes.num
siteIns.position.num = siteRes.position.num
utils().equal(self, siteIns, siteRes)
except:
Connexion().exception()
finally:
Connexion().disconnect(d)
def test_delete(self):
d = Connexion().connect()
try:
pos = Position(num=-1, long=001, lat=002, address="test")
siteIns = Site(num=-1, numSite="52005", name="champie", siteMaster="test", siteManager="test2", position=pos, dateInit=date(2013, 1, 1), dateEnd=date(2013, 2, 1), color="#6390df", phases=set())
res = SiteDAO().insert(d, siteIns)
SiteDAO().deleteById(d, res)
siteRes =self.assertRaises(AttributeError, SiteDAO().getById, d, False, [], res)
print(siteRes)
except:
Connexion().exception()
finally:
Connexion().disconnect(d)
def test_update(self):
d = Connexion().connect()
try:
pos = Position(num=-1, long=001, lat=002, address="test")
siteIns = Site(num=-1, numSite="FR52005", name="champie", siteMaster="test", siteManager="test2", position=pos, dateInit=date(2013, 1, 1), dateEnd=date(2013, 2, 1), color="#6390df", phases=set())
res = SiteDAO().insert(d, siteIns)
siteRes = SiteDAO().getById(d, False, [], res)
siteRes.name = "tutu"
SiteDAO().update(d, siteRes)
siteRes2 = SiteDAO().getById(d, False, [], res)
utils().equal(self, siteRes, siteRes2)
except:
Connexion().exception()
finally:
Connexion().disconnect(d)
|
'''
Narrow phase ray- triangle intersection
'''
import numpy as np
from ..constants import tol
from ..util import diagonal_dot
def rays_triangles_id(triangles,
rays,
ray_candidates=None,
return_any=False):
'''
Intersect a set of rays and triangles.
Arguments
---------
triangles: (n, 3, 3) float array of triangle vertices
rays: (m, 2, 3) float array of ray start, ray directions
ray_candidates: (m, *) int array of which triangles are candidates
for the ray.
return_any: bool, exit loop early if any ray hits any triangle
and change output of function to bool
Returns
---------
if return_any:
hit: bool, whether the set of rays hit any triangle
else:
intersections: (m) sequence of triangle indexes hit by rays
'''
# default set of candidate triangles to be queried
# is every triangle. this is very slow
candidates = np.ones(len(triangles), dtype=np.bool)
hits = [None] * len(rays)
for ray_index, ray in enumerate(rays):
if not (ray_candidates is None):
candidates = ray_candidates[ray_index]
# query the triangle candidates
hit = ray_triangles(triangles[candidates], *ray)
if return_any:
if hit.any(): return True
else:
hits[ray_index] = np.array(candidates)[hit]
if return_any: return False
return np.array(hits)
def ray_triangles(triangles,
ray_origin,
ray_direction):
'''
Intersection of multiple triangles and a single ray.
Moller-Trumbore intersection algorithm.
'''
candidates = np.ones(len(triangles), dtype=np.bool)
# edge vectors and vertex locations in (n,3) format
vert0 = triangles[:, 0, :]
vert1 = triangles[:, 1, :]
vert2 = triangles[:, 2, :]
edge0 = vert1 - vert0
edge1 = vert2 - vert0
# P is a vector perpendicular to the ray direction and one
# triangle edge.
P = np.cross(ray_direction, edge1)
# if determinant is near zero, ray lies in plane of triangle
det = diagonal_dot(edge0, P)
candidates[np.abs(det) < tol.zero] = False
if not candidates.any():
return candidates
# remove previously calculated terms which are no longer candidates
inv_det = 1.0 / det[candidates]
T = ray_origin - vert0[candidates]
u = diagonal_dot(T, P[candidates]) * inv_det
new_candidates = np.logical_not(np.logical_or(u < -tol.zero,
u > (1 + tol.zero)))
candidates[candidates] = new_candidates
if not candidates.any():
return candidates
inv_det = inv_det[new_candidates]
T = T[new_candidates]
u = u[new_candidates]
Q = np.cross(T, edge0[candidates])
v = np.dot(ray_direction, Q.T) * inv_det
new_candidates = np.logical_not(np.logical_or((v < -tol.zero),
(u + v > (1 + tol.zero))))
candidates[candidates] = new_candidates
if not candidates.any():
return candidates
Q = Q[new_candidates]
inv_det = inv_det[new_candidates]
t = diagonal_dot(edge1[candidates], Q) * inv_det
candidates[candidates] = t > tol.zero
return candidates
|
#!/usr/bin/env python
# coding=utf8
from uuid import uuid4 as uuid
import pytest
pymongo = pytest.importorskip("pymongo")
from basic_store import BasicStore
from conftest import ExtendedKeyspaceTests
from minimalkv._mixins import ExtendedKeyspaceMixin
from minimalkv.db.mongo import MongoStore
class TestMongoDB(BasicStore):
@pytest.fixture
def db_name(self):
return "_minimalkv_test_{}".format(uuid())
@pytest.yield_fixture
def store(self, db_name):
try:
conn = pymongo.MongoClient()
except pymongo.errors.ConnectionFailure:
pytest.skip("could not connect to mongodb")
yield MongoStore(conn[db_name], "minimalkv-tests")
conn.drop_database(db_name)
class TestExtendedKeyspaceDictStore(TestMongoDB, ExtendedKeyspaceTests):
@pytest.fixture
def store(self, db_name):
class ExtendedKeyspaceStore(ExtendedKeyspaceMixin, MongoStore):
pass
try:
conn = pymongo.MongoClient()
except pymongo.errors.ConnectionFailure:
pytest.skip("could not connect to mongodb")
yield ExtendedKeyspaceStore(conn[db_name], "minimalkv-tests")
conn.drop_database(db_name)
|
import logging
from collections import deque
from importlib import import_module
from lakesuperior.dictionaries.namespaces import ns_collection as nsc
'''
Constants used in messaging to identify an event type.
'''
RES_CREATED = '_create_'
RES_DELETED = '_delete_'
RES_UPDATED = '_update_'
ROOT_UID = '/'
ROOT_RSRC_URI = nsc['fcres'][ROOT_UID]
class AppGlobals:
'''
Application Globals.
This class sets up all connections and exposes them across the application
outside of the Flask app context.
'''
def __init__(self, conf):
from lakesuperior.messaging.messenger import Messenger
app_conf = conf['application']
# Initialize RDF layout.
rdfly_mod_name = app_conf['store']['ldp_rs']['layout']
rdfly_mod = import_module('lakesuperior.store.ldp_rs.{}'.format(
rdfly_mod_name))
rdfly_cls = getattr(rdfly_mod, self.camelcase(rdfly_mod_name))
#logger.info('RDF layout: {}'.format(rdfly_mod_name))
# Initialize file layout.
nonrdfly_mod_name = app_conf['store']['ldp_nr']['layout']
nonrdfly_mod = import_module('lakesuperior.store.ldp_nr.{}'.format(
nonrdfly_mod_name))
nonrdfly_cls = getattr(nonrdfly_mod, self.camelcase(nonrdfly_mod_name))
#logger.info('Non-RDF layout: {}'.format(nonrdfly_mod_name))
# Set up messaging.
messenger = Messenger(app_conf['messaging'])
# Exposed globals.
self._rdfly = rdfly_cls(app_conf['store']['ldp_rs'])
self._nonrdfly = nonrdfly_cls(app_conf['store']['ldp_nr'])
self._messenger = messenger
self._changelog = deque()
@property
def rdfly(self):
return self._rdfly
@property
def rdf_store(self):
return self._rdfly.store
@property
def nonrdfly(self):
return self._nonrdfly
@property
def messenger(self):
return self._messenger
@property
def changelog(self):
return self._changelog
def camelcase(self, word):
'''
Convert a string with underscores to a camel-cased one.
Ripped from https://stackoverflow.com/a/6425628
'''
return ''.join(x.capitalize() or '_' for x in word.split('_'))
|
#!/usr/bin/python
import obd
from influxdb import InfluxDBClient
obd.debug.console = True
c = obd.OBD()
r = c.query(obd.commands.RPM)
print r.value
def get_run_time(c):
r = c.query(obd.commands.RUN_TIME)
return r.value
def get_engine_load(c):
r = c.query(obd.commands.ENGINE_LOAD)
return r.value
def get_throttle_pos(c):
r = c.query(obd.commands.THROTTLE_POS)
return r.value
def get_rpm(c):
r = c.query(obd.commands.RPM)
return r.value
def get_maf(c):
r = c.query(obd.commands.MAF)
return r.value
def get_speed(c):
r = c.query(obd.commands.SPEED)
return r.value
def get_fuel_lph(c):
maf = get_maf(c)
return _get_fuel_lph(maf)
def get_fuel_lp100km(c):
lph = get_fuel_lph(c)
speed = get_speed(c)
return _get_fuel_lp100km(lph, speed)
def _get_fuel_lph(maf):
# LPH = (MAF / 14.7) / 454 * 3600 = MAF * 0.539423
return maf * 0.539423
def _get_fuel_lp100km(lph, speed):
# LP100KM = ABS(LPH / SPEED * 100) [VSS != 0]
if speed != 0:
return abs(lph / speed * 100)
else:
return None
def main(host='localhost', port=8086):
user = 'root'
password = 'root'
dbname = 'example'
dbuser = 'smly'
dbuser_password = 'my_secret_password'
query = 'select value from cpu_load_short;'
json_body = [
{
"measurement": "cpu_load_short",
"tags": {
"host": "server01",
"region": "us-west"
},
"time": "2009-11-10T23:00:00Z",
"fields": {
"value": 0.64
}
}
]
client = InfluxDBClient(host, port, user, password, dbname)
print("Create database: " + dbname)
client.create_database(dbname)
print("Create a retention policy")
client.create_retention_policy('awesome_policy', '3d', 3, default=True)
print("Switch user: " + dbuser)
client.switch_user(dbuser, dbuser_password)
print("Write points: {0}".format(json_body))
client.write_points(json_body)
print("Queying data: " + query)
result = client.query(query)
print("Result: {0}".format(result))
print("Switch user: " + user)
client.switch_user(user, password)
print("Drop database: " + dbname)
client.drop_database(dbname)
|
# import random
# jogador = int(input('Digite um número entre 0 e 5: '))
# computador = random.randint(0, 5)
# print('O número escolhido pelo computador foi {}'.format(computador))
# if jogador == computador:
# print('Você venceu!')
# else:
# print('Você perdeu!')
# O programa acima foi escrito por mim e está funcionando.
# O programa abaixo foi escrito pelo professor.
import random
from time import sleep
computador = random.randint(0, 5)
print('-=-' * 20)
print('Vou pensar em um número entre 0 e 5. Tente adivinhar...')
print('-=-' * 20)
jogador = int(input('Em que número eu pensei? '))
print('PROCESSANDO...')
sleep(3)
print('')
if jogador == computador:
print('PARABÉNS! Você GANHOU, pois eu também pensei no número {}'.format(jogador))
else:
print('HAHAHA, você PERDEU, pois eu pensei no número {} e não no número {}, boa sorte na próxima vez'. format(computador, jogador))
|
from subprocess import run
from os.path import join, exists
from shutil import rmtree
from invoke import task
from faasmtools.env import (
USABLE_CPUS,
THIRD_PARTY_DIR,
PROJ_ROOT,
)
from faasmtools.build import (
WASM_CC,
WASM_AR,
WASM_LIB_INSTALL,
WASM_NM,
WASM_SYSROOT,
)
@task(default=True)
def build(ctx, clean=False):
"""
Builds the wasi libc fork in this directory
"""
libc_dir = join(THIRD_PARTY_DIR, "wasi-libc")
libc_build_dir = join(libc_dir, "build")
if clean:
run("make clean", shell=True, check=True, cwd=libc_dir)
if exists(libc_build_dir):
rmtree(libc_build_dir)
make_cmd = [
"make",
"-j {}".format(USABLE_CPUS),
"THREAD_MODEL=faasm",
"WASM_CC={}".format(WASM_CC),
"WASM_AR={}".format(WASM_AR),
"WASM_NM={}".format(WASM_NM),
"SYSROOT={}".format(WASM_SYSROOT),
]
make_cmd = " ".join(make_cmd)
print(make_cmd)
# Run the build
run(make_cmd, check=True, shell=True, cwd=libc_dir)
# Copy the import files into place
copy_cmd = "cp -r sysroot_extras/* {}".format(WASM_LIB_INSTALL)
print("\nCopying undefined symbols into place: \n{}".format(copy_cmd))
run(copy_cmd, check=True, shell=True, cwd=PROJ_ROOT)
|
class Solution(object):
def convert(self, s, numRows):
"""
:type s: str
:type numRows: int
:rtype: str
"""
ret = [[] for _ in xrange(numRows)]
row, dir = 0, 1
for s_i in s:
ret[row].append(s_i)
row += dir
if row < 0:
row = abs(row)
dir *= -1
elif row >= numRows:
row = 2 * numRows - row - 2
dir *= -1
return ''.join(''.join(r) for r in ret)
if __name__ == '__main__':
print(Solution().convert("PAYPALISHIRING", 3))
|
# Made By @Its_eviralBoy Keep Credits If You Are Goanna Kang This Lol
# And Thanks To The Creator Of Autopic This Script Was Made from Snippets From That Script
# Usage .actressdp Im Not Responsible For Any Ban caused By This
import asyncio
import os
import random
import shutil
from datetime import datetime
from PIL import Image, ImageDraw, ImageFont
from pySmartDL import SmartDL
from telethon.tl import functions
from FIREX.utils import admin_cmd
from userbot.cmdhelp import CmdHelp
FONT_FILE_TO_USE = "/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf"
# Add telegraph media links of profile pics that are to be used
TELEGRAPH_MEDIA_LINKS = [
"https://telegra.ph/file/2799c5767357cdaa074b9.jpg",
"https://telegra.ph/file/06d96ec5711a9f77a73cc.jpg",
"https://telegra.ph/file/c9bc336ce573ddb0be62c.jpg",
"https://telegra.ph/file/c9bc336ce573ddb0be62c.jpg",
"https://telegra.ph/file/109cffe40624fa5001d16.jpg",
"https://telegra.ph/file/df603a49e4425aaf1632a.jpg",
"https://telegra.ph/file/df603a49e4425aaf1632a.jpg",
"https://telegra.ph/file/f9d79e6ad847203768de5.jpg",
"https://telegra.ph/file/e27b7f46f2a7fde8e8fb5.jpg",
"https://telegra.ph/file/e27b7f46f2a7fde8e8fb5.jpg",
"https://telegra.ph/file/2799c5767357cdaa074b9.jpg",
"https://telegra.ph/file/06d96ec5711a9f77a73cc.jpg",
"https://telegra.ph/file/c9bc336ce573ddb0be62c.jpg",
"https://telegra.ph/file/c9bc336ce573ddb0be62c.jpg",
"https://telegra.ph/file/109cffe40624fa5001d16.jpg",
"https://telegra.ph/file/df603a49e4425aaf1632a.jpg",
"https://telegra.ph/file/df603a49e4425aaf1632a.jpg",
"https://telegra.ph/file/f9d79e6ad847203768de5.jpg",
"https://telegra.ph/file/e27b7f46f2a7fde8e8fb5.jpg",
"https://telegra.ph/file/e27b7f46f2a7fde8e8fb5.jpg",
"https://telegra.ph/file/2799c5767357cdaa074b9.jpg",
"https://telegra.ph/file/06d96ec5711a9f77a73cc.jpg",
"https://telegra.ph/file/c9bc336ce573ddb0be62c.jpg",
"https://telegra.ph/file/c9bc336ce573ddb0be62c.jpg",
"https://telegra.ph/file/109cffe40624fa5001d16.jpg",
"https://telegra.ph/file/df603a49e4425aaf1632a.jpg",
"https://telegra.ph/file/df603a49e4425aaf1632a.jpg",
"https://telegra.ph/file/f9d79e6ad847203768de5.jpg",
"https://telegra.ph/file/e27b7f46f2a7fde8e8fb5.jpg",
"https://telegra.ph/file/e27b7f46f2a7fde8e8fb5.jpg",
"https://telegra.ph/file/2799c5767357cdaa074b9.jpg",
"https://telegra.ph/file/06d96ec5711a9f77a73cc.jpg",
"https://telegra.ph/file/c9bc336ce573ddb0be62c.jpg",
"https://telegra.ph/file/c9bc336ce573ddb0be62c.jpg",
"https://telegra.ph/file/109cffe40624fa5001d16.jpg",
"https://telegra.ph/file/df603a49e4425aaf1632a.jpg",
"https://telegra.ph/file/df603a49e4425aaf1632a.jpg",
"https://telegra.ph/file/f9d79e6ad847203768de5.jpg",
"https://telegra.ph/file/e27b7f46f2a7fde8e8fb5.jpg",
"https://telegra.ph/file/e27b7f46f2a7fde8e8fb5.jpg",
]
@borg.on(admin_cmd(pattern="sexydp ?(.*)"))
async def autopic(event):
while True:
piclink = random.randint(0, len(TELEGRAPH_MEDIA_LINKS) - 1)
AUTOPP = TELEGRAPH_MEDIA_LINKS[piclink]
downloaded_file_name = "./DOWNLOADS/original_pic.png"
downloader = SmartDL(AUTOPP, downloaded_file_name, progress_bar=True)
downloader.start(blocking=False)
photo = "photo_pfp.png"
while not downloader.isFinished():
pass
shutil.copy(downloaded_file_name, photo)
Image.open(photo)
current_time = datetime.now().strftime(
"\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n Time: %H:%M:%S \n Date: %d/%m/%y "
)
img = Image.open(photo)
drawn_text = ImageDraw.Draw(img)
fnt = ImageFont.truetype(FONT_FILE_TO_USE, 30)
drawn_text.text((300, 450), current_time, font=fnt, fill=(255, 255, 255))
img.save(photo)
file = await event.client.upload_file(photo) # pylint:disable=E0602
try:
await event.client(
functions.photos.UploadProfilePhotoRequest(file) # pylint:disable=E0602
)
os.remove(photo)
await asyncio.sleep(60)
except:
return
CmdHelp("sexydp").add_command(
"sexydp", None, "Starts autodp of sexy pic & wait for 5 min"
).add()
|
import torch
import torch.nn.functional as F
from .divergences import jensen_shannon_div, symmetric_kl_div
def mse_triplet_loss(anchor, positive, negative, margin=0.5):
'''
Computes the MSE triplet loss between three batches.
Args:
anchor: Logits Tensor of dimension (batch x classes)
positive: Logits Tensor of dimension (batch x classes)
negative: Logits Tensor of dimension (batch x classes)
Returns:
Triplet loss value, averaged over batch dimension.
'''
triplet_loss = 0
mse_AP = F.mse_loss(anchor, positive, reduction='none').mean(dim=1)
mse_AN = F.mse_loss(anchor, negative, reduction='none').mean(dim=1)
triplet_loss += torch.max(mse_AP - mse_AN + margin, torch.zeros_like(mse_AP)).mean()
return triplet_loss
def kl_triplet_loss(anchor, positive, negative, margin=0.5, divergence='jensen_shannon', symmetric=False):
'''
Computes the triplet loss between three logits batches.
Args:
anchor: Logits Tensor of dimension (batch x classes)
positive: Logits Tensor of dimension (batch x classes)
negative: Logits Tensor of dimension (batch x classes)
divergence: Type of divergence to compute. Either "jensen_shannon" or "symmetric_kl".
symmetric: Set to True to return the average of the triplet loss applied to the anchor and the positive.
Returns:
Triplet loss value, averaged over batch dimension.
'''
if divergence == 'jensen_shannon':
div = jensen_shannon_div
elif divergence == 'symmetric_kl':
div = symmetric_kl_div
else:
raise ValueError(f'{divergence} is not a supported divergence.')
triplet_loss = 0
div_AP = div(anchor, positive, reduction='none').sum(dim=1)
div_AN = div(anchor, negative, reduction='none').sum(dim=1)
triplet_loss += torch.max(div_AP - div_AN + margin, torch.zeros_like(div_AP)).mean()
if symmetric:
div_PN = div(positive, negative, reduction='none').sum(dim=1)
triplet_loss += torch.max(div_AP - div_PN + margin, torch.zeros_like(div_AP)).mean()
triplet_loss /= 2
return triplet_loss |
import os
from io import BytesIO
from os.path import join, getsize
from pathlib import PurePosixPath
import cv2
import numpy as np
from PIL import Image, ExifTags
def reduce_pil_image(img):
"""
压缩 PIL image对象, 返回一个PIL image对象
:param max_size:
:param img: PIL image对象
:return: PIL image对象
"""
try:
# 获取图片拍摄角度,再处理是保持角度
orientation = 0
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
exif = dict(img.getexif().items())
if exif[orientation] == 3:
img = img.rotate(180, expand=True)
elif exif[orientation] == 6:
img = img.rotate(270, expand=True)
elif exif[orientation] == 8:
img = img.rotate(90, expand=True)
except KeyError as e:
print('KeyError, the key is {}'.format(e))
except Exception as e:
print('unkonwn Exception is {}'.format(e))
# xsize, ysize = img.size
# if xsize > max_size:
# ysize = int(max_size / xsize * ysize)
# xsize = max_size
# elif ysize > max_size:
# xsize = int(max_size / ysize * xsize)
# ysize = max_size
# img = img.resize((xsize, ysize))
return img
def reduce_by_opencv(impath, newpath):
'''
opencv 通过改变尺寸压缩图片, 然后保存为jpg格式
:param impath:
:param suffix:
:return:
'''
# img = cv2.imread(impath, flags=cv2.IMREAD_COLOR)
img = cv2.imdecode(np.fromfile(impath, dtype=np.uint8), cv2.IMREAD_COLOR)
if img is not None:
# cv2.imwrite(newpath, img, (80,))
cv2.imencode(ext='.jpg', img=img)[1].tofile(newpath)
else:
print('######################### img read failed {} #########################'.format(impath))
def reduce_by_pil(impath, newpath):
'''
pil 通过改变尺寸压缩图片, 然后保存为jpg格式
:param impath:
:param suffix:
:return:
'''
with open(impath, mode='rb') as fobj:
stream = BytesIO(fobj.read())
img = Image.open(stream).convert("RGB")
img = reduce_pil_image(img)
img.save(newpath)
if __name__ == '__main__':
img_dir = r'D:\xiaomi10_photoes'
reduce_dir = r'D:\xiaomi10_photoes_reduce'
for root, dirs, files in os.walk(img_dir):
# print(root, "consumes", end="")
# print(sum([getsize(join(root, n)) for n in files]), end="")
# print("bytes in", len(files), "non-directory files")
new_root = root.replace(img_dir, reduce_dir)
if not os.path.exists(new_root):
os.mkdir(new_root)
for fname in files:
suffix = PurePosixPath(fname).suffix
if suffix in ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG']:
impath = join(root, fname)
new_path = join(new_root, fname)
new_path = new_path.replace(suffix, '.jpg')
print('impath {}, newpath {}'.format(impath, new_path))
try:
reduce_by_opencv(impath, new_path)
except Exception as e:
print('######################### Exception {} #########################'.format(e))
|
from .msgpack_rpc import MSGPackRPC
__version__ = '1.0.1'
__all__ = ['MSGPackRPC']
|
class Solution:
def maxProfitAssignment(self, difficulty, profit, worker):
"""
:type difficulty: List[int]
:type profit: List[int]
:type worker: List[int]
:rtype: int
"""
jobs = sorted([a, b] for a, b in zip(difficulty, profit))
res = i = maxp = 0
for ability in sorted(worker):
while i < len(jobs) and ability >= jobs[i][0]:
maxp = max(jobs[i][1], maxp)
i += 1
res += maxp
return res |
class Solution:
def addStrings(self, num1: str, num2: str) -> str:
m, n = len(num1), len(num2)
longer, shorter = list(num1), list(num2)
if m < n:
longer, shorter = shorter, longer
m, n = n, m
i, carry = 1, 0
while n - i >= 0:
sum = ord(longer[m - i]) - ord('0') + ord(shorter[n - i]) - ord('0') + carry
carry = sum // 10
sum %= 10
longer[m - i] = str(sum)
i += 1
while carry:
if m - i < 0:
longer = ['1'] + longer
break
sum = ord(longer[m - i]) - ord('0') + carry
carry = sum // 10
sum %= 10
longer[m - i] = str(sum)
i += 1
return "".join(longer)
|
def find_next_empty(puzzle):
# return (row,col) (None,None)
for r in range(9):
for c in range(9):
if puzzle[r][c] == -1:
return r, c
return None, None
def is_valid(puzzle, guess, row, col):
row_vals = puzzle[row]
if guess in row_vals:
return False
col_vals = [puzzle[i][col] for i in range(9)]
if guess in col_vals:
return False
row_start = (row // 3) * 3
col_start = (col // 3) * 3
for r in range(row_start, row_start + 3):
for c in range(col_start, col_start + 3):
if puzzle[r][c] == guess:
return False
return True
def solve_sudoku(puzzle):
row, col = find_next_empty(puzzle)
if row is None:
return True
for guess in range(1, 10):
# check valid guess
if is_valid(puzzle, guess, row, col):
puzzle[row][col] = guess
# recursion
if solve_sudoku(puzzle):
return True
puzzle[row][col] = -1
return False
if __name__ == '__main__':
sample = [
[3,9,-1, -1,5,-1, -1,-1,-1],
[-1,-1,-1, 2,-1,-1, -1,-1,5],
[-1,-1,-1, 7,1,9, -1,8,-1],
[-1,5,-1, -1,6,8, -1,-1,-1],
[2,-1,6, -1,-1,3, -1,-1,-1],
[-1,-1,-1, -1,-1,-1, -1,-1,4],
[5,-1,-1, -1,-1,-1, -1,-1,-1],
[6,7,-1, 1,-1,5, -1,4,-1],
[1,-1,9, -1,-1,-1, 2,-1,-1],
]
print(solve_sudoku(sample))
for _ in sample:
print(_) |
import argparse
from .sniffer import TLSHandshakeSniffer
def parse_args():
parser = argparse.ArgumentParser(prog="pytlssniff")
parser.add_argument("-d", '--debug', dest='debug', action='store_true',
help="enable debug mode")
parser.add_argument("-s", '--sni', dest='sni', action='store_true',
help="sniff SNI values from TLS handshakes")
parser.add_argument("-a", '--san', dest='san', action='store_true',
help="sniff domains from certificate SAN section")
parser.add_argument("-c", '--cn', dest='cn', action='store_true',
help="sniff Common Name from certificate CN section")
parser.add_argument("-i", '--interface', dest='interface', default="any",
help="name or idx of interface (default: any)", required=False)
parser.add_argument("-r", '--input-file', dest='input_file', default=None,
help="set the filename to read from (- to read from stdin)", required=False)
parser.add_argument("-p", '--packet-count', dest='packet_count', type=int, default=None,
help="stop after n packets (def: infinite)", required=False)
parser.add_argument("-b", "--bpf-filter", dest="bpf_filter", default="",
help="packet filter in Berkeley Packet Filter (BPF) syntax (for live trace only)", required=False)
parser.add_argument("-Y", "--display-filter", dest="display_filter", default="",
help="packet displaY filter in Wireshark display filter", required=False)
return parser.parse_args()
def run():
args = parse_args()
handshake_sniffer = TLSHandshakeSniffer(
args.interface, args.input_file, args.bpf_filter, args.display_filter
)
for message in handshake_sniffer.listen(args.sni, args.cn, args.san, args.packet_count, args.debug):
dns_name = ''
if message.sni is not None:
dns_name = message.sni
if message.cn is not None:
if dns_name != '' and dns_name != message.cn:
dns_name += f",{message.cn}"
else:
dns_name = message.cn
if message.san is not None:
if message.sni in message.san:
message.san.remove(message.sni)
if message.cn in message.san:
message.san.remove(message.cn)
if len(message.san) > 0:
if dns_name != '':
dns_name += ','
dns_name += ','.join(message.san)
if message.ip_version == 4:
ip_version = 'IPv4'
src_ip = message.src_ip
dst_ip = message.dst_ip
else:
ip_version = 'IPv6'
src_ip = f'[{message.src_ip}]'
dst_ip = f'[{message.dst_ip}]'
print(
f"{message.handshake_type.name}({message.handshake_type.value})\t{ip_version}\t"
f"{src_ip}:{message.src_port}\t{dst_ip}:{message.dst_port}\t{dns_name}", flush=True
)
if __name__ == "__main__":
run()
|
#!/usr/bin/env python
#coding:utf-8
# Created: 21.03.2010
# Copyright (C) 2010, Manfred Moitzi
# License: MIT License
__author__ = "mozman <mozman@gmx.at>"
try:
# Python 2.6 and earlier need the unittest2 package
# try: easy_install unittest2
# or download source from: http://pypi.python.org/pypi/unittest2
import unittest2 as unittest
except ImportError:
import unittest
from dxfwrite.helpers import normalize_dxf_chunk
from dxfwrite.base import dxfstr
from dxfwrite.dimlines import ArcDimension
class TestArcDimAPI(unittest.TestCase):
def test_init(self):
dimline = ArcDimension(
pos=(5, 5),
center=(0, 0),
start=(1, 0),
end=(1, 1),
arc3points=False,
dimstyle='default',
layer="ARCDIMENSION",
roundval=1)
self.assertTrue("ARCDIMENSION" in dxfstr(dimline))
class TestArcDimImplementation(unittest.TestCase):
def test_45deg(self):
expected = " 0\nARC\n 62\n7\n 8\nDIMENSIONS\n 10\n0.0\n 20\n0.0\n"\
" 30\n0.0\n 40\n7.07106781187\n 50\n0.0\n 51\n45.0\n 0\n"\
"LINE\n 62\n5\n 8\nDIMENSIONS\n 10\n1.3\n 20\n0.0\n 30\n"\
"0.0\n 11\n7.07106781187\n 21\n0.0\n 31\n0.0\n 0\nLINE\n"\
" 62\n5\n 8\nDIMENSIONS\n 10\n0.919238815543\n 20\n"\
"0.919238815543\n 30\n0.0\n 11\n5.0\n 21\n5.0\n 31\n0.0\n"\
" 0\nTEXT\n 62\n7\n 8\nDIMENSIONS\n 10\n6.94856061401\n"\
" 20\n2.8781880453\n 30\n0.0\n 40\n0.5\n 1\n79\n 50\n-67.5\n"\
" 7\nISOCPEUR\n 72\n1\n 73\n2\n 11\n6.94856061401\n 21\n"\
"2.8781880453\n 31\n0.0\n 0\nINSERT\n 8\nDIMENSIONS\n 2\n"\
"DIMTICK_ARCH\n 10\n7.07106781187\n 20\n0.0\n 30\n0.0\n 41\n"\
"1.0\n 42\n1.0\n 50\n90.0\n 0\nINSERT\n 8\nDIMENSIONS\n"\
" 2\nDIMTICK_ARCH\n 10\n5.0\n 20\n5.0\n 30\n0.0\n 41\n1.0\n"\
" 42\n1.0\n 50\n135.0\n"
dimline = ArcDimension(pos=(5,5), center=(0, 0), start=(1, 0),
end=(1, 1), )
self.assertSequenceEqual(normalize_dxf_chunk(dimline.__dxf__()), normalize_dxf_chunk(expected))
def test_setup(self):
dimline = ArcDimension((5,5), (4,0), (0,4), (-4,0), arc3points=True)
dimline._setup()
center = dimline.center
self.assertAlmostEqual(center[0], 0)
self.assertAlmostEqual(center[1], 0)
if __name__=='__main__':
unittest.main()
|
import os
import settings
if not os.path.exists(settings.STORAGE):
print('STORAGE folder not found, Creating!')
os.makedirs(settings.STORAGE)
print('STORAGE folder Created at ' + settings.STORAGE)
|
import tempfile
import numpy as np
import pytest
import time
import zarr
from pathlib import Path
from itertools import product
from swyft.store.store import DirectoryStore, MemoryStore
from swyft.store.simulator import Simulator
from swyft import Prior
def model(params):
p = np.linspace(-1, 1, 10)
a, b = params
mu = p * a + b
return dict(x=mu)
def model_multi_out(params):
p = np.linspace(-1, 1, 10)
a, b = params
mu = p * a + b
mu2 = np.reshape(p * a - b, (2, 5))
return dict(x1=mu, x2=mu2)
sim = Simulator(model, sim_shapes=dict(x=(10,)))
sim_multi_out = Simulator(model_multi_out, sim_shapes=dict(x1=(10,), x2=(2, 5)))
prior = Prior.from_uv(lambda u: u * np.array([1.0, 0.5]), 2)
class TestStoreIO:
def test_init_memory_store(self):
store = MemoryStore(2, simulator=sim)
assert len(store.params) == 2
assert isinstance(store.zarr_store, zarr.storage.MemoryStore)
assert isinstance(store._simulator, Simulator)
def test_init_memory_store_multi_outputs(self):
store = MemoryStore(2, simulator=sim_multi_out)
assert len(store.params) == 2
assert {k: v for k, v in store._simulator.sim_shapes.items()} == {
"x1": (10,),
"x2": (2, 5),
}
def test_init_directory_store_multi_outputs(self):
with tempfile.TemporaryDirectory() as td:
store = DirectoryStore(2, simulator=sim_multi_out, path=td)
assert len(store.params) == 2
assert {k: v for k, v in store._simulator.sim_shapes.items()} == {
"x1": (10,),
"x2": (2, 5),
}
td_path = Path(td)
items = [
p.relative_to(td).as_posix() for p in td_path.rglob("*/") if p.is_dir()
]
assert len(items) > 0
def test_memory_store_save(self):
store = MemoryStore.from_model(model, prior)
with tempfile.TemporaryDirectory() as td:
td_path = Path(td)
store.save(td)
items = [
p.relative_to(td).as_posix() for p in td_path.rglob("*") if p.is_dir()
]
assert len(items) > 0
def test_memory_store_load(self):
store = MemoryStore(2, simulator=sim_multi_out)
with tempfile.TemporaryDirectory() as td:
store.save(td)
loaded = MemoryStore.load(td)
loaded.set_simulator(sim_multi_out)
assert loaded.params == store.params
assert loaded.zarr_store.root == store.zarr_store.root
assert loaded._simulator.sim_shapes == sim_multi_out.sim_shapes
def test_directory_store_load(self):
with tempfile.TemporaryDirectory() as td:
store = DirectoryStore(2, simulator=sim_multi_out, path=td)
loaded = DirectoryStore.load(td)
assert loaded.params == store.params
assert loaded.zarr_store.path == store.zarr_store.path
class TestStoreRun:
def test_memory_store_sample(self):
store = MemoryStore.from_model(model, prior)
indices = store.sample(100, prior)
assert len(indices) == len(store)
def test_memory_store_simulate(self):
store = MemoryStore(2, simulator=sim_multi_out)
indices = store.sample(100, prior)
ind_sim = indices[:50]
store.simulate(ind_sim)
assert store.sims.x1[49].sum() != 0
assert store.sims.x1[50].sum() == 0
def test_directory_store_sample(self):
with tempfile.TemporaryDirectory() as td:
store = DirectoryStore(2, simulator=sim_multi_out, path=td)
indices = store.sample(100, prior)
assert len(indices) == len(store)
def test_directory_store_simulate(self):
with tempfile.TemporaryDirectory() as td:
store = DirectoryStore(2, simulator=sim, path=td)
ind_sim = store.sample(100, prior)
store.simulate(ind_sim)
assert store.sims.x[:].sum(axis=1).all()
def test_directory_store_simulate_partial(self):
with tempfile.TemporaryDirectory() as td:
store = DirectoryStore(2, simulator=sim, path=td)
ind_sim = store.sample(100, prior)
ind_sim = ind_sim[:40]
store.simulate(ind_sim)
assert store.sims.x[39].sum() != 0
assert store.sims.x[40].sum() == 0
def test_store_lockfile(self):
with tempfile.TemporaryDirectory() as td:
store_dir = DirectoryStore(2, simulator=sim, path=td, sync_path=td + ".sync")
assert store_dir._lock is not None
assert store_dir._lock.lockfile is None
store_dir.lock()
assert store_dir._lock.lockfile is not None
store_dir.unlock()
assert store_dir._lock.lockfile is None
if __name__ == "__main__":
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.