content
stringlengths 5
1.05M
|
|---|
class double_buffer:
def __init__(self, cls):
self.__name__ = cls.__name__
self.__doc__ = cls.__doc__
try:
self._transition_hook = getattr(cls, 'on_transition')
except AttributeError:
self._transition_hook = lambda self: None
try:
self._change_hook = getattr(cls, 'on_change')
except AttributeError:
self._change_hook = lambda self, before, after: None
if self._change_hook.__code__.co_argcount != 3:
raise TypeError('The on_change hook of a double_buffer must take 3 arguments (self, before, after): {}'
.format(self.__name__))
underscores = 0
for c in self.__name__:
if c == '_':
underscores += 1
else:
break
name = self.__name__[underscores:]
underscores = self.__name__[:underscores]
self._curr_name = '__current_{}'.format(self.__name__)
self._prev_name = '{}old_{}'.format(underscores, name)
self._transition_hook_name = '{}on_{}_transition'.format(underscores, name)
self._change_hook_name = '{}on_{}_change'.format(underscores, name)
def __get__(self, instance, owner):
try:
return getattr(instance, self._curr_name)
except ValueError:
return getattr(owner, self._curr_name)
def __set__(self, instance, value):
# If not loaded yet, treat as an initial value
if not instance.is_loaded:
setattr(instance, self._prev_name, value)
setattr(instance, self._curr_name, value)
return
# Call change hook if necessary
before = getattr(instance, self._curr_name)
setattr(instance, self._curr_name, value)
if value != before:
getattr(instance, self._change_hook_name)(before, value)
# Put transition hook at front of queue or remove it
queue = getattr(instance, _HookHandler._transition_hook_queue_name)
try:
queue.remove(self)
except ValueError:
pass
if value != getattr(instance, self._prev_name):
queue.append(self)
class _responsive:
def __init__(self, func, init, priority, children_first):
self._hook = func
self._initial_value = init
self._priority = priority
self._children_first = children_first
self.__name__ = func.__name__
self.__doc__ = func.__doc__
underscores = 0
for c in self.__name__:
if c == '_':
underscores += 1
else:
break
name = self.__name__[underscores:]
if not name.startswith('refresh_'):
raise NameError('Responsive method names must start with \'refresh_\': {}'.format(self.__name__))
self._flag_name = '{}_flag'.format(self.__name__)
self._hook_name = self.__name__
if func.__code__.co_argcount != 1:
raise TypeError('Responsive methods must accept 1 positional argument, not {}: {}'
.format(func.__code__.co_argcount, self.__name__))
def responsive(init=False, priority=0, children_first=False):
def responsive_factory(func):
return _responsive(func, init, priority, children_first)
return responsive_factory
class _HookHandler(type):
_initialized_flag_name = '__HookHandler_is_initialized'
_transition_hook_queue_name = '__transition_hook_queue'
_double_buffers_name = '__double_buffers'
_responsive_attrs_name = '__responsive_attrs'
def __init__(cls, name, *bases, **namespace):
double_buffers = {attr for attr in bases[1].values() if isinstance(attr, double_buffer)}
new_double_buffers = tuple(double_buffers)
responsive_attrs = {attr for attr in bases[1].values() if isinstance(attr, _responsive)}
new_responsive_attrs = tuple(responsive_attrs)
for sprcls in bases[0]:
if isinstance(sprcls, _HookHandler):
double_buffers |= frozenset(getattr(sprcls, _HookHandler._double_buffers_name))
responsive_attrs |= frozenset(getattr(sprcls, _HookHandler._responsive_attrs_name))
double_buffers = tuple(double_buffers)
responsive_attrs = tuple(sorted(responsive_attrs, key=lambda a: a._priority))
setattr(cls, _HookHandler._double_buffers_name, double_buffers)
setattr(cls, _HookHandler._responsive_attrs_name, responsive_attrs)
# Create appropriate class attributes
for attr in new_double_buffers:
setattr(cls, attr._curr_name, None)
setattr(cls, attr._prev_name, None)
setattr(cls, attr._transition_hook_name, attr._transition_hook)
setattr(cls, attr._change_hook_name, attr._change_hook)
for attr in new_responsive_attrs:
setattr(cls, attr._flag_name, attr._initial_value)
setattr(cls, attr._hook_name, attr._hook)
# Don't double inject if _HookHandler already injected stuff into a superclass
if any(isinstance(sprcls, _HookHandler) for sprcls in bases[0]):
super().__init__(name, bases, namespace)
return
# Create initialized flag
setattr(cls, _HookHandler._initialized_flag_name, False)
def call_transition_hooks(self):
if getattr(self, _HookHandler._initialized_flag_name):
for attr in getattr(self, _HookHandler._transition_hook_queue_name):
getattr(self, attr._transition_hook_name)()
def flip_transition_hooks(self):
setattr(self, _HookHandler._initialized_flag_name, True)
queue = getattr(self, _HookHandler._transition_hook_queue_name)
for attr in queue:
setattr(self, attr._prev_name, getattr(self, attr._curr_name))
queue.clear()
def refresh_responsive_attrs(self, children_first):
for attr in getattr(self, _HookHandler._responsive_attrs_name):
if attr._children_first == children_first and getattr(self, attr._flag_name):
getattr(self, attr._hook_name)()
setattr(self, attr._flag_name, False)
# TODO: Maintain priority-specified order AND after_children-specified order?
def recursive_refresh_responsive_attrs(self):
self._refresh_responsive_attrs(children_first=False)
for child in self._children:
if child.old_is_active or child.is_active:
child._recursive_refresh_responsive_attrs()
self._refresh_responsive_attrs(children_first=True)
def recursive_call_transition_hooks(self):
self._call_transition_hooks()
for child in self._children:
if child.old_is_active or child.is_active:
child._recursive_call_transition_hooks()
setattr(cls, '_call_transition_hooks', call_transition_hooks)
setattr(cls, '_recursive_call_transition_hooks', recursive_call_transition_hooks)
setattr(cls, '_flip_transition_hooks', flip_transition_hooks)
setattr(cls, '_refresh_responsive_attrs', refresh_responsive_attrs)
setattr(cls, '_recursive_refresh_responsive_attrs', recursive_refresh_responsive_attrs)
# Modify __init__ to initialize hook queues
old_init = cls.__init__
def new_init(self, *args, **kwargs):
setattr(self, _HookHandler._transition_hook_queue_name, [])
old_init(self, *args, **kwargs)
cls.__init__ = new_init
super().__init__(name, bases, namespace)
|
# -*- coding: utf-8 -*-
"""
SMART_2011 Assignment
Includes Conversion from Scilab to Python3
Brunston Poon
begun 2014.09.22 - completed 2014.09.25
"""
import numpy as np
from matplotlib import pyplot as plt
file = r"C:\ast\SMART_data.txt"
numberOfColumns = 7
data = np.loadtxt(file)
radii = data[:,0] # Distance column, in kiloparsecs (kpc)
neutralGasMass = data[:,1] # Neutral Gas column, in solar masses (m_sun)
luminosityB = data[:,2] # Luminosity in the B band, solar luminosities (l_sun)
luminosityR = data[:,3] # Luminosity in R band, l_sun
velocityModel = data[:,4] # Velocity, modeled to "smooth" the data (kms)
colCounterRotating = data[:,5] # This velocity array includes counter-rotating elements.
velocity = data[:,6] # Velocity column in kilometers/second (kms)
number = 1
plt.figure(number)
plt.clf() #housekeeping, clears plot
plt.cla() #housekeeping, clears plot
number = number + 1
# plot velocity (km/s) versus distance (kpc)
plt.plot(radii,velocity,'b.', markersize = 3)
plt.plot(radii,velocityModel,'g.',markersize = 3)
plt.title("Plot of Velocity Data and Velocity Model versus Radius")
plt.xlabel("Radius (kpc)")
plt.ylabel("Velocity (m_sun)")
# plot gas mass (m_sun) versus distance (kpc)
plt.figure(number)
plt.clf() #housekeeping
plt.cla()#housekeeping
number = number + 1
plt.plot(radii,neutralGasMass,'b.', markersize = 3)
plt.title("Plot of Neutral Gas Mass versus Radius")
plt.xlabel("Radius (kpc)")
plt.ylabel("Mass (m_sun)")
# plot luminosity (l_sun) versus distance (kpc)
plt.figure(number)
plt.clf() #housekeeping
plt.cla() #housekeeping
number = number + 1
plt.plot(radii,luminosityR,'b.', markersize = 3)
plt.title("Plot of Luminosity versus Radius")
plt.xlabel("Radius (kpc)")
plt.ylabel("Luminosity (l_sun)")
#luminosity profile to mass profile
mlr1 = 0.477
mlr2 = 0.891
massStarsR1 = luminosityR * mlr1
massStarsR2 = luminosityR * mlr2
massStarsR1b = luminosityB * mlr1
massStarsR2b = luminosityB * mlr2
plt.figure(number)
plt.clf() #housekeeping
plt.cla() #housekeeping
number = number + 1
plt.plot(radii,massStarsR1,'b.', markersize = 3)
plt.plot(radii,massStarsR2,'g.', markersize = 3)
plt.title("Plot of Stellar Mass (R1 B, R2 G) versus Radius")
plt.xlabel("Radius (kpc)")
plt.ylabel("Stellar Mass (m_sun)")
grav = 4.28*10**(-6) # units of kpc*km^2/(M_sun*s^2)
massEnc = velocityModel**2*radii/grav
#this is the total mass within each radius (from zero) based on the rotation curve formula
indexArray = []
for i in range(0,len(massEnc-1)):
n = 2 + i
indexArray.append(n)
incrementalMass = []
for i in range(0,len(indexArray)):
incrementalMass.append(massEnc[i]-massEnc[i-1])
incrementalMass[0] = velocityModel[0]**2*radii[0]/grav
#this will plot the incremental mass versus radius
plt.figure(number)
plt.clf() #housekeeping
plt.cla() #housekeeping
number = number + 1
plt.plot(radii,incrementalMass, 'b.', markersize = 3)
plt.title("Plot of ""Incremental"" Mass versus Radius")
plt.xlabel("Radius (kpc)")
plt.ylabel("Incremental Mass (m_sun)")
darkMatter = incrementalMass - neutralGasMass - massStarsR1
darkMatterb = incrementalMass - neutralGasMass - massStarsR1b
plt.figure(number)
plt.clf() #housekeeping
plt.cla() #housekeeping
number = number + 1
#INCREMENTAL MASS = TOTAL MASS, incMass = darkMatter + neutralGasMass + massStarsR1 or R2
plt.plot(radii,darkMatter, 'b.', markersize = 3)
plt.plot(radii,massStarsR1, 'g.', markersize = 3)
plt.plot(radii, darkMatterb, 'r.', markersize = 5)
plt.plot(radii,incrementalMass, 'r.', markersize = 3)
plt.plot(radii,neutralGasMass, 'y.', markersize = 3)
plt.title("Total Mass (red), Stellar Mass (green),\n Dark Matter Bband (thickRed), Dark Matter Rband (blue), \n\
Neutral Gas Mass (red), versus Radius")
plt.xlabel("Radius (kpc)")
plt.ylabel("Mass (m_sun)")
plt.show()
|
from django.views.generic import *
from wikis.mixins import *
import json
from commons.markdown import markdown_tools
from commons import file_name_tools
from commons.file import file_utils
from .forms import PageCreateForm, PageUpdateForm, PageCopyForm
from django.urls import reverse
import datetime
from django.http import HttpResponse
from django.template.loader import render_to_string
from logging import getLogger
logger = getLogger(__name__)
class IndexView(RedirectView):
"""
TOPページへのアクセスはFrontPageにリダイレクトする。
"""
def get_redirect_url(self, *args, **kwargs):
wiki_id = kwargs.get("wiki_id")
argzz = [wiki_id, "FrontPage"]
return reverse('wikis.pages:show', args=argzz)
class ShowView(PageMixin, CommentFlgMixin, TemplateView):
"""
ページの表示
"""
template_name = "wikis/pages/show.html"
mode = "show"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# コンテンツ
context["main_contents"] = markdown_tools.get_contents(self.request.wiki_id, self.request.page_dirs)
return context
class CreateView(ModeMixin, EditAuthorityMixin, FormView):
"""
ページの新規作成
"""
form_class = PageCreateForm
template_name = "wikis/pages/create.html"
mode = "create"
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['wiki_id'] = self.request.wiki_id
return kwargs
def form_valid(self, form):
# ページ新規作成
form.put(self.request)
return super().form_valid(form)
def get_success_url(self):
form = super().get_form()
argzz = [form.data["wiki_id"]]
argzz.extend(file_name_tools.to_page_dirs(form.data["page_name"]))
return reverse('wikis.pages:show', args=argzz)
class EditView(PageMixin, EditAuthorityMixin, FormView):
"""
ページの編集
ページが凍結中の場合は凍結画面を表示する。
"""
form_class = PageUpdateForm
template_name = "wikis/pages/edit.html"
mode = "edit"
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['wiki_id'] = self.request.wiki_id
kwargs['page_dirs'] = self.request.page_dirs
return kwargs
def form_valid(self, form):
# ページ更新
form.put(self.request)
return super().form_valid(form)
def get_success_url(self):
form = super().get_form()
argzz = [form.data["wiki_id"]]
argzz.extend(file_name_tools.to_page_dirs(form.data["page_name"]))
return reverse('wikis.pages:show', args=argzz)
def get_template_names(self):
if self.request.page_conf["confs"].get("freeze_mode") == "1":
return ["wikis/pages/freeze.html"]
else:
return super().get_template_names()
class CopyView(PageMixin, EditAuthorityMixin, FormView):
"""
ページをコピーして新規作成
コピーしたいページを表示した状態で[複写]を選択することで遷移出来る。
"""
form_class = PageCopyForm
template_name = "wikis/pages/copy.html"
mode = "copy"
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['wiki_id'] = self.request.wiki_id
kwargs['page_dirs'] = self.request.page_dirs
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# コンテンツ
context["main_contents"] = markdown_tools.get_contents(self.request.wiki_id, self.request.page_dirs)
return context
def form_valid(self, form):
# ページ新規作成
form.put(self.request)
return super().form_valid(form)
def get_success_url(self):
form = super().get_form()
argzz = [form.data["wiki_id"]]
argzz.extend(file_name_tools.to_page_dirs(form.data["page_name"]))
return reverse('wikis.pages:show', args=argzz)
class ListView(PageMixin, TemplateView):
"""
ページの一覧
ファイル名の昇順で全ページを表示する。
"""
template_name = "wikis/pages/list.html"
mode = "list"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# 一覧ファイル取得
pages = json.loads(file_utils.get_file(self.request.wiki_id, "pages.json"))
pages = pages["data"]["pages"]
pages = sorted(pages, key=lambda x: x['file_name'])
for page in pages:
file_name_slash = file_name_tools.file_name_to_page_name(page["file_name"])
page["file_name"] = file_name_slash
argzz = [self.request.wiki_id]
argzz.extend(file_name_slash.split("/"))
page["url"] = reverse('wikis.pages:show', args=argzz)
page["update"] = datetime.datetime.strptime(page["update"], "%Y-%m-%dT%H:%M:%S.%f")
context["pages"] = pages
return context
class UpdatesView(PageMixin, TemplateView):
"""
ページの一覧
更新日時の降順で表示する。
"""
template_name = "wikis/pages/updates.html"
mode = "list"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# 一覧ファイル取得
pages = json.loads(file_utils.get_file(self.request.wiki_id, "pages.json"))
pages = pages["data"]["pages"]
pages = sorted(pages, key=lambda x: x['update'], reverse=True)
for page in pages:
file_name_slash = file_name_tools.file_name_to_page_name(page["file_name"])
page["file_name"] = file_name_slash
argzz = [self.request.wiki_id]
argzz.extend(file_name_slash.split("/"))
page["url"] = reverse('wikis.pages:show', args=argzz)
tdatetime = datetime.datetime.strptime(page["update"], '%Y-%m-%dT%H:%M:%S.%f')
page["update_day"] = datetime.date(tdatetime.year, tdatetime.month, tdatetime.day)
page["update"] = tdatetime
context["pages"] = pages
return context
class HelpView(ModeMixin, TemplateView):
"""
ヘルプ画面
"""
template_name = "wikis/pages/help.html"
mode = "help"
def sitemap(request, wiki_id):
page_file = json.loads(file_utils.get_file(wiki_id, "pages.json"))
pages = page_file["data"]["pages"]
pages = sorted(pages, key=lambda x: x['update'], reverse=True)
for page in pages:
file_name_slash = file_name_tools.file_name_to_page_name(page["file_name"])
page["file_name"] = file_name_slash
argzz = [wiki_id]
argzz.extend(file_name_slash.split("/"))
loc = "{}://{}{}".format(
request.get_raw_uri().split(":")[0]
, request.get_host()
, reverse('wikis.pages:show', args=argzz))
page["loc"] = loc
tdatetime = datetime.datetime.strptime(page["update"], '%Y-%m-%dT%H:%M:%S.%f')
tdate = datetime.date(tdatetime.year, tdatetime.month, tdatetime.day).strftime('%Y-%m-%dT%H:%M:%SZ')
page["lastmod"] = tdate
context = {"wiki_id": wiki_id, "pages": pages}
html = render_to_string("wikis/pages/sitemap.xml", context)
return HttpResponse(html, content_type="application/xhtml+xml")
|
import os
import subprocess
from ctypes import RTLD_LOCAL, RTLD_GLOBAL
class LibraryMeta(type):
def __call__(cls, name, mode=RTLD_LOCAL, nm="nm"):
if os.name == "nt":
from ctypes import WinDLL
# WinDLL does demangle the __stdcall names, so use that.
return WinDLL(name, mode=mode)
if os.path.exists(name) and mode != RTLD_GLOBAL and nm is not None:
# Use 'nm' on Unixes to load native and cross-compiled libraries
# (this is only possible if mode != RTLD_GLOBAL)
return super(LibraryMeta, cls).__call__(name, nm)
from ctypes import CDLL
from ctypes.util import find_library
path = find_library(name)
if path is None:
# Maybe 'name' is not a library name in the linker style,
# give CDLL a last chance to find the library.
path = name
return CDLL(path, mode=mode)
class Library(metaclass=LibraryMeta):
def __init__(self, filepath, nm):
self._filepath = filepath
self._name = os.path.basename(self._filepath)
self.__symbols = {}
self._get_symbols(nm)
# nm will print lines like this:
# <addr> <kind> <name>
def _get_symbols(self, nm):
cmd = [nm, "--dynamic", "--defined-only", self._filepath]
output = subprocess.check_output(cmd, universal_newlines=True)
for line in output.split('\n'):
fields = line.split(' ', 2)
if len(fields) >= 3 and fields[1] in ("T", "D", "G", "R", "S"):
self.__symbols[fields[2]] = fields[0]
def __getattr__(self, name):
try:
return self.__symbols[name]
except KeyError:
pass
raise AttributeError(name)
|
import torch
import torchtestcase
import unittest
from survae.tests.nn import ModuleTest
from survae.nn.layers.encoding import PositionalEncodingImage
class PositionalEncodingImageTest(ModuleTest):
def test_layer_is_well_behaved(self):
batch_size = 10
shape = (3,8,8)
x = torch.rand(batch_size, *shape, 16)
module = PositionalEncodingImage(shape, 16)
self.assert_layer_is_well_behaved(module, x)
def test_output(self):
batch_size = 10
shape = (3,8,8)
x = torch.zeros(batch_size, *shape, 16)
module = PositionalEncodingImage(shape, 16)
y = module(x)
self.assertEqual(y.shape, torch.Size([10, 3, 8, 8, 16]))
upper_left_channel0 = module.encode_c[0,0,0,0]+module.encode_h[0,0,0,0]+module.encode_w[0,0,0,0]
self.assertEqual(y[0,0,0,0], upper_left_channel0)
self.assertEqual(y[1,0,0,0], upper_left_channel0)
# ...
self.assertEqual(y[9,0,0,0], upper_left_channel0)
if __name__ == '__main__':
unittest.main()
|
import importlib
def get_project_settings():
modele = importlib.import_module("setting")
return modele
if __name__ == '__main__':
get_project_settings()
|
#!/usr/bin/python
"""
--- Day 11: Hex Ed ---
Crossing the bridge, you've barely reached the other side of the stream when a program comes up to you, clearly in distress. "It's my child process," she says, "he's gotten lost in an infinite grid!"
Fortunately for her, you have plenty of experience with infinite grids.
Unfortunately for you, it's a hex grid.
The hexagons ("hexes") in this grid are aligned such that adjacent hexes can be found to the north, northeast, southeast, south, southwest, and northwest:
\ n /
nw +--+ ne
/ \
-+ +-
\ /
sw +--+ se
/ s \
You have the path the child process took. Starting where he started, you need to determine the fewest number of steps required to reach him. (A "step" means to move from the hex you are in to any adjacent hex.)
For example:
ne,ne,ne is 3 steps away.
ne,ne,sw,sw is 0 steps away (back where you started).
ne,ne,s,s is 2 steps away (se,se).
se,sw,se,sw,sw is 3 steps away (s,s,sw).
--- Part Two ---
How many steps away is the furthest he ever got from his starting position?
"""
# use a counter instead of a list
# of steps taken to improve speed
dir_idx = { "n" : 0,
"s" : 1,
"ne": 2,
"nw": 3,
"se": 4,
"sw": 5 }
def opposite(a):
if a == "ne":
return "sw"
elif a == "n":
return "s"
elif a == "nw":
return "se"
elif a == "se":
return "nw"
elif a == "s":
return "n"
elif a == "sw":
return "ne"
else:
return None
def condense(a, b):
if a == "ne" and b == "s":
return "se"
elif b == "ne" and a == "s":
return "se"
elif a == "ne" and b == "nw":
return "n"
elif b == "ne" and a == "nw":
return "n"
elif a == "se" and b == "sw":
return "s"
elif b == "se" and a == "sw":
return "s"
elif a == "nw" and b == "s":
return "sw"
elif b == "nw" and a == "s":
return "sw"
elif a == "se" and b == "n":
return "ne"
elif b == "se" and a == "n":
return "ne"
elif a == "sw" and b == "n":
return "nw"
elif b == "sw" and a == "n":
return "nw"
return None
def build_count(arr, seq):
for step in seq:
arr[dir_idx[step]] += 1
return arr
def distance2(counter):
last_sum = 0
while last_sum != sum(counter):
# Cancel N and S
m = min(counter[0],counter[1])
counter[0] -= m
counter[1] -= m
# Cancel NE and SW
m = min(counter[2],counter[5])
counter[2] -= m
counter[5] -= m
# Cancel NW and SE
m = min(counter[3],counter[4])
counter[3] -= m
counter[4] -= m
# Compress NE and S to SE
m = min(counter[2],counter[1])
counter[2] -= m
counter[1] -= m
counter[4] += m
# Compress NE and NW to N
m = min(counter[2],counter[3])
counter[2] -= m
counter[3] -= m
counter[0] += m
# Compress SE and SW to S
m = min(counter[4],counter[5])
counter[4] -= m
counter[5] -= m
counter[1] += m
# Compress NW and S to SW
m = min(counter[3],counter[1])
counter[3] -= m
counter[1] -= m
counter[5] += m
# Compress SE and N to NE
m = min(counter[4],counter[0])
counter[4] -= m
counter[0] -= m
counter[2] += m
# Compress SW and N to NW
m = min(counter[5],counter[0])
counter[5] -= m
counter[0] -= m
counter[3] += m
last_sum = sum(counter)
return counter
def distance(seq):
steps = []
path = seq
while True:
changed = False
while len(path) > 0:
cur = path[0]
op = opposite(cur)
if op in path[1:]:
path.remove(op)
path.remove(cur)
changed = True
else:
steps.append(cur)
path = path[1:]
while len(steps) > 0:
cur = steps[0]
for val in steps[1:]:
cond = condense(cur, val)
if cond != None:
path.append(cond)
steps.remove(val)
cur = ''
changed = True
break
if cur != '':
path.append(cur)
steps = steps[1:]
if changed == False:
break
return path
if __name__ == "__main__":
# Part 1 Solution
with open("day11_input", "r") as infile:
path = infile.read().strip()
#path = "se,sw,se,sw,sw"
path = path.split(",")
print len(distance(path))
# Part 2 Solution
with open("day11_input", "r") as infile:
path = infile.read().strip()
path = path.split(",")
dirs = [0] * 6
most = 0
for step in path:
dirs = build_count(dirs, [step])
most = max(most,sum(distance2(dirs)))
print most
"""
!!! Too Slow !!!
"""
#most = 0
#t_path = []
#for i in range(len(path)):
# t_path.append(path[i])
# t_path = distance(t_path)
# most = max(most, len(t_path))
#print most
|
import time
from pgdrive.envs.pgdrive_env import PGDriveEnv
from pgdrive.utils import setup_logger
def vis_highway_render_with_panda_render():
setup_logger(True)
env = PGDriveEnv(
{
"environment_num": 1,
"manual_control": True,
"use_render": True,
"use_image": False,
"use_topdown": True,
}
)
o = env.reset()
s = time.time()
for i in range(1, 100000):
o, r, d, info = env.step(env.action_space.sample())
env.render()
if d:
env.reset()
if i % 1000 == 0:
print("Steps: {}, Time: {}".format(i, time.time() - s))
env.close()
if __name__ == '__main__':
vis_highway_render_with_panda_render()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""MatrixSetDiag in Python"""
import numpy as np
def matrix_set_diag(input_np, diagonal):
"""matrix_set_diag operator implemented in numpy.
Returns a numpy array with the diagonal of input array
replaced with the provided diagonal values.
Parameters
----------
input : numpy.ndarray
Input Array.
Shape = [D1, D2, D3, ... , Dn-1 , Dn]
diagonal : numpy.ndarray
Values to be filled in the diagonal.
Shape = [D1, D2, D3, ... , Dn-1]
Returns
-------
result : numpy.ndarray
New Array with given diagonal values.
Shape = [D1, D2, D3, ... , Dn-1 , Dn]
"""
out = np.array(input_np, copy=True)
n = min(input_np.shape[-1], input_np.shape[-2])
for i in range(n):
out[..., i, i] = diagonal[..., i]
return out
|
import mido
import json
import keyboard
import sys
# load macro mappings from config file
#TODO: command line option for filename
macros = json.load((open("config.json", "r")))
# { "channel": "1", "onpress": "", "onchange": "", "onincrease": "", "ondecrease": "", "granularity": 8 },
# configure the behavior for each channel. granularity is
# how many triggers per rotation. 1 is the most sensitive,
# try 2, 4, 8, 16 etc to add dead zones before firing the
# macro again.
# this will store the last value for each
# key to determine if it's increasing or
# decreasing
keymap=[0 for i in range(100)]
# detect device
ports = mido.get_input_names()
# no device detected
if len(ports) == 0:
sys.stderr.write("ERROR: Couldn't find midi device\n")
exit(1)
print(ports)
pnum = input("Please select a midi port number: ")
# connect and monitor the device messages
with mido.open_input(ports[int(pnum)]) as inport:
for msg in inport:
#print(msg)
# control_change channel=0 control=13 time=0
lines = str(msg).split()
for i in range(0, len(lines)):
# key value pair in message. a=b
# split it into key and value
kvpair = lines[i].split('=')
# if there was a = in that piece
if len(kvpair)>1:
# check the index and parse
if(i == 2):
channel = int(kvpair[1])
if(i == 3):
# consume new channel value
old_value = keymap[channel]
channel_value = int(kvpair[1])
keymap[channel] = channel_value
# print("channel: " + str(channel))
# TODO: put these entries in a map and use the channel as index
for entry in macros["macros"]:
if int(entry["channel"]) == channel:
if entry["onpress"] != "" and int(channel_value) == 127 :
keyboard.press_and_release(entry["onpress"])
if int(channel_value) % int(entry["granularity"]) == 0:
# print(channel_value)
# check last value
if entry["onchange"] != "":
keyboard.press_and_release(entry["onchange"])
if entry["ondecrease"] != "" and int(channel_value) < int(old_value):
keyboard.press_and_release(entry["ondecrease"])
if entry["onincrease"] != "" and int(channel_value) > int(old_value):
keyboard.press_and_release(entry["onincrease"])
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Location(models.Model):
name = models.CharField(max_length = 30)
def __str__(self):
return self.name
class Neighbourhood(models.Model):
name = models.CharField(max_length=30)
location = models.ForeignKey(Location, on_delete=models.CASCADE)
occupants = models.IntegerField(null=True, default=0)
def create_neighbourhood(self):
self.save()
def delete_neighbourhood(self):
self.delete()
@classmethod
def find_neighbourhood(cls, neighbourhood_id):
neighbourhood = cls.objects.get(id = neighbourhood_id)
return neighbourhood
def update_neighbourhod(self, name):
self.name = name
self.save()
def update_occupants(self, occupants):
self.occupants = occupants
self.save()
def __str__(self):
return self.name
class Profile(models.Model):
name = models.CharField(max_length = 30)
neighbourhood = models.ForeignKey(Neighbourhood, on_delete=models.CASCADE, null=True)
bio = models.TextField(null=True)
email = models.EmailField(null=True)
user = models.OneToOneField(User, on_delete=models.CASCADE)
def __str__(self):
return self.name
class Business(models.Model):
name = models.CharField(max_length = 50)
email = models.EmailField()
description = models.TextField(null=True)
neighbourhood = models.ForeignKey(Neighbourhood, on_delete=models.CASCADE)
user = models.ForeignKey(Profile, on_delete=models.CASCADE)
def create_business(self):
self.save()
def delete_business(self):
self.delete()
@classmethod
def find_business(cls, business_id):
business = cls.objects.get(id = business_id)
return business
def update_business(self, name):
self.name = name
self.save()
@classmethod
def search_by_name(cls,search_term):
business = cls.objects.filter(name__icontains = search_term)
return business
def __str__(self):
return self.name
class Post(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
user = models.ForeignKey(User, on_delete=models.CASCADE)
neighbourhood = models.ForeignKey(Neighbourhood, on_delete=models.CASCADE)
pub_date = models.DateTimeField(auto_now_add=True, null=True)
def __str__(self):
return self.title
|
'''
StaticRoute Genie Ops Object Outputs for NXOS.
'''
class StaticRouteOutput(object):
# 'show ipv4 static route' output
showIpv4StaticRoute = {
'vrf': {
'default': {
'address_family': {
'ipv4': {
'routes': {
'10.4.1.1/32': {
'route': '10.4.1.1/32',
'next_hop': {
'next_hop_list': {
1: {
'index': 1,
'active': True,
'next_hop': '10.1.3.1',
'next_hop_netmask': '32',
'outgoing_interface': 'Ethernet1/2',
},
},
},
},
'10.16.2.2/32': {
'route': '10.16.2.2/32',
'next_hop': {
'next_hop_list': {
1: {
'index': 1,
'active': True,
'next_hop': '10.2.3.2',
'next_hop_netmask': '32',
'outgoing_interface': 'Ethernet1/4',
},
2: {
'index': 2,
'active': True,
'next_hop': '10.229.3.2',
'next_hop_netmask': '32',
'outgoing_interface': 'Ethernet1/1',
},
},
},
},
},
},
},
},
},
}
showIpv6StaticRoute = {
'vrf': {
'default': {
'address_family': {
'ipv6': {
'routes': {
'2001:1:1:1::1/128': {
'route': '2001:1:1:1::1/128',
'next_hop': {
'next_hop_list': {
1: {
'index': 1,
'next_hop_vrf': 'default',
'rnh_active': False,
'next_hop': '2001:10:1:3::1',
'next_hop_netmask': '128',
'outgoing_interface': 'Ethernet1/2',
'bfd_enabled': False,
'resolved_tid': 0,
'preference': 1,
},
2: {
'index': 2,
'next_hop_vrf': 'default',
'rnh_active': False,
'next_hop': '2001:20:1:3::1',
'next_hop_netmask': '128',
'outgoing_interface': 'Ethernet1/3',
'bfd_enabled': False,
'resolved_tid': 0,
'preference': 1,
},
},
},
},
'2001:2:2:2::2/128': {
'route': '2001:2:2:2::2/128',
'next_hop': {
'next_hop_list': {
1: {
'index': 1,
'next_hop_vrf': 'default',
'rnh_active': False,
'next_hop': '2001:10:2:3::2',
'next_hop_netmask': '128',
'outgoing_interface': 'Ethernet1/4',
'bfd_enabled': False,
'resolved_tid': 0,
'preference': 1,
},
2: {
'index': 2,
'next_hop_vrf': 'default',
'rnh_active': False,
'next_hop': '2001:20:2:3::2',
'next_hop_netmask': '128',
'outgoing_interface': 'Ethernet1/1',
'bfd_enabled': False,
'resolved_tid': 0,
'preference': 1,
},
},
},
},
},
},
},
},
'VRF1': {
'address_family': {
'ipv6': {
'routes': {
'2001:1:1:1::1/128': {
'route': '2001:1:1:1::1/128',
'next_hop': {
'outgoing_interface': {
'Null0': {
'outgoing_interface': 'Null0',
'preference': 1,
'resolved_tid': 80000003,
'bfd_enabled': False,
'rnh_active': False,
'next_hop_vrf': 'VRF1',
},
},
'next_hop_list': {
1: {
'index': 1,
'next_hop_vrf': 'VRF1',
'rnh_active': False,
'next_hop': '2001:10:1:3::1',
'next_hop_netmask': '128',
'bfd_enabled': False,
'resolved_tid': 0,
'preference': 1,
},
2: {
'index': 2,
'next_hop_vrf': 'VRF1',
'rnh_active': False,
'next_hop': '2001:20:1:3::1',
'next_hop_netmask': '128',
'bfd_enabled': False,
'resolved_tid': 0,
'preference': 1,
},
},
},
},
'2001:2:2:2::2/128': {
'route': '2001:2:2:2::2/128',
'next_hop': {
'outgoing_interface': {
'Null0': {
'outgoing_interface': 'Null0',
'preference': 2,
'resolved_tid': 80000003,
'bfd_enabled': False,
'rnh_active': False,
'next_hop_vrf': 'VRF1',
},
},
'next_hop_list': {
1: {
'index': 1,
'next_hop_vrf': 'VRF1',
'rnh_active': False,
'next_hop': '2001:10:2:3::2',
'next_hop_netmask': '128',
'bfd_enabled': False,
'resolved_tid': 0,
'preference': 1,
},
2: {
'index': 2,
'next_hop_vrf': 'VRF1',
'rnh_active': False,
'next_hop': '2001:20:2:3::2',
'next_hop_netmask': '128',
'bfd_enabled': False,
'resolved_tid': 0,
'preference': 1,
},
3: {
'index': 3,
'next_hop_vrf': 'VRF1',
'rnh_active': True,
'next_hop': '2001:20:2:3::2',
'next_hop_netmask': '128',
'bfd_enabled': False,
'resolved_tid': 0,
'preference': 3,
},
4: {
'index': 4,
'next_hop_vrf': 'VRF1',
'rnh_active': True,
'next_hop': '2001:50:2:3::2',
'next_hop_netmask': '128',
'bfd_enabled': False,
'resolved_tid': 0,
'preference': 5,
},
},
},
},
},
},
},
},
},
}
staticRouteOpsOutput = {
'vrf': {
'default': {
'address_family': {
'ipv4': {
'routes': {
'10.4.1.1/32': {
'route': '10.4.1.1/32',
'next_hop': {
'next_hop_list': {
1: {
'index': 1,
'active': True,
'next_hop': '10.1.3.1',
'outgoing_interface': 'Ethernet1/2',
},
},
},
},
'10.16.2.2/32': {
'route': '10.16.2.2/32',
'next_hop': {
'next_hop_list': {
1: {
'index': 1,
'active': True,
'next_hop': '10.2.3.2',
'outgoing_interface': 'Ethernet1/4',
},
2: {
'index': 2,
'active': True,
'next_hop': '10.229.3.2',
'outgoing_interface': 'Ethernet1/1',
},
},
},
},
},
},
'ipv6': {
'routes': {
'2001:1:1:1::1/128': {
'route': '2001:1:1:1::1/128',
'next_hop': {
'next_hop_list': {
1: {
'index': 1,
'next_hop_vrf': 'default',
'next_hop': '2001:10:1:3::1',
'outgoing_interface': 'Ethernet1/2',
'preference': 1,
},
2: {
'index': 2,
'next_hop_vrf': 'default',
'next_hop': '2001:20:1:3::1',
'outgoing_interface': 'Ethernet1/3',
'preference': 1,
},
},
},
},
'2001:2:2:2::2/128': {
'route': '2001:2:2:2::2/128',
'next_hop': {
'next_hop_list': {
1: {
'index': 1,
'next_hop_vrf': 'default',
'next_hop': '2001:10:2:3::2',
'outgoing_interface': 'Ethernet1/4',
'preference': 1,
},
2: {
'index': 2,
'next_hop_vrf': 'default',
'next_hop': '2001:20:2:3::2',
'outgoing_interface': 'Ethernet1/1',
'preference': 1,
},
},
},
},
},
},
},
},
'VRF1': {
'address_family': {
'ipv6': {
'routes': {
'2001:1:1:1::1/128': {
'route': '2001:1:1:1::1/128',
'next_hop': {
'outgoing_interface': {
'Null0': {
'outgoing_interface': 'Null0',
'preference': 1,
'next_hop_vrf': 'VRF1',
},
},
'next_hop_list': {
1: {
'index': 1,
'next_hop_vrf': 'VRF1',
'next_hop': '2001:10:1:3::1',
'preference': 1,
},
2: {
'index': 2,
'next_hop_vrf': 'VRF1',
'next_hop': '2001:20:1:3::1',
'preference': 1,
},
},
},
},
'2001:2:2:2::2/128': {
'route': '2001:2:2:2::2/128',
'next_hop': {
'outgoing_interface': {
'Null0': {
'outgoing_interface': 'Null0',
'preference': 2,
'next_hop_vrf': 'VRF1',
},
},
'next_hop_list': {
1: {
'index': 1,
'next_hop_vrf': 'VRF1',
'next_hop': '2001:10:2:3::2',
'preference': 1,
},
2: {
'index': 2,
'next_hop_vrf': 'VRF1',
'next_hop': '2001:20:2:3::2',
'preference': 1,
},
3: {
'index': 3,
'next_hop_vrf': 'VRF1',
'next_hop': '2001:20:2:3::2',
'preference': 3,
},
4: {
'index': 4,
'next_hop_vrf': 'VRF1',
'next_hop': '2001:50:2:3::2',
'preference': 5,
},
},
},
},
},
},
},
},
},
}
|
import os
import sys
if not os.path.isdir(os.path.join(sys.path[0], "data_pipeline", "tests")):
raise Exception("Tests must be run from base dir of repo")
|
from .topology import Topology
from .elements import Atom, Bond, Angle, Dihedral, Proper, Improper
from .molecule import Molecule
from .zmatrix import ZMatrix
from .rotamer import RotamerLibrary
from .conformer import BCEConformations
|
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.Utils import formatdate
from os.path import basename
from smtplib import SMTP
class CustomSMTP(SMTP):
def __init__(self, *args, **kwargs):
self.host = kwargs.pop('host', 'localhost')
self.port = kwargs.pop('port', 25)
self.user = kwargs.pop('user', '')
self.password = kwargs.pop('password', '')
SMTP.__init__(self, *args, **kwargs)
def begin(self):
""" connects and optionally authenticates a connection."""
self.connect(self.host, self.port)
if self.user:
self.starttls()
self.login(self.user, self.password)
def setup_smtp_factory(**settings):
""" expects a dictionary with 'mail.' keys to create an appropriate smtplib.SMTP instance"""
return CustomSMTP(
host=settings.get('mail.host', 'localhost'),
port=int(settings.get('mail.port', 25)),
user=settings.get('mail.user'),
password=settings.get('mail.password'),
timeout=float(settings.get('mail.timeout', 60)),
)
def checkRecipient(gpg_context, uid):
valid_key = bool([k for k in gpg_context.list_keys() if uid in ', '.join(k['uids']) and k['trust'] in 'ofqmu-'])
if not valid_key:
print('Invalid recipient %s' % uid)
return valid_key
def sendMultiPart(smtp, gpg_context, sender, recipients, subject, text, attachments):
""" a helper method that composes and sends an email with attachments
requires a pre-configured smtplib.SMTP instance"""
sent = 0
for to in recipients:
if not to.startswith('<'):
uid = '<%s>' % to
else:
uid = to
if not checkRecipient(gpg_context, uid):
continue
msg = MIMEMultipart()
msg['From'] = sender
msg['To'] = to
msg['Subject'] = subject
msg["Date"] = formatdate(localtime=True)
msg.preamble = u'This is an email in encrypted multipart format.'
attach = MIMEText(str(gpg_context.encrypt(text.encode('utf-8'), uid, always_trust=True)))
attach.set_charset('UTF-8')
msg.attach(attach)
for attachment in attachments:
with open(attachment, 'rb') as fp:
attach = MIMEBase('application', 'octet-stream')
attach.set_payload(str(gpg_context.encrypt_file(fp, uid, always_trust=True)))
attach.add_header('Content-Disposition', 'attachment', filename=basename('%s.pgp' % attachment))
msg.attach(attach)
# TODO: need to catch exception?
# yes :-) we need to adjust the status accordingly (>500 so it will be destroyed)
smtp.begin()
smtp.sendmail(sender, to, msg.as_string())
smtp.quit()
sent += 1
return sent
|
# Ok, lets collect data from preprogrammed pick
import gym
import roboticsPlayroomPybullet
import numpy as np
import os
import shutil
from tqdm import tqdm
env = gym.make('pandaPlay1Obj-v0')
env.render(mode='human')
env.reset()
open_gripper = np.array([0.04])
closed_gripper = np.array([0.01])
p = env.panda.bullet_client
which_object = 0
top_down_grip_ori = np.array(env.p.getQuaternionFromEuler([ np.pi, 0, 0]))
def viz_pos(desired_position):
goal = np.ones(6)
goal[which_object * 3:(which_object + 1) * 3] = desired_position
env.panda.reset_goal_pos(goal)
#--These two skills are used both in picking and pushing, use the offset to push by going next to
def go_above(env, obj_number, offset = np.zeros(3)):
desired_position = env.panda.calc_environment_state()[obj_number]['pos'] + np.array([0, 0.00, 0.1]) + offset
action = np.concatenate([desired_position , top_down_grip_ori, open_gripper])
return action
def descend_push(env, obj_number, offset = np.zeros(3)):
desired_position = env.panda.calc_environment_state()[obj_number]['pos'] + np.array([0, 0,0.0]) + offset
current_position = env.panda.calc_actor_state()['pos']
#current_orn = env.panda.calc_actor_state()['orn']
action = np.concatenate([desired_position , top_down_grip_ori, closed_gripper])
return action
# Skills only used for picking
def descend(env, obj_number, offset = np.zeros(3)):
desired_position = env.panda.calc_environment_state()[obj_number]['pos'] + offset
current_position = env.panda.calc_actor_state()['pos']
# descend slowly for the sake of the IK
desired_position[2] = max(desired_position[2], current_position[2] - 0.03)
#current_orn = p.getEulerFromQuaternion(env.panda.calc_actor_state()['orn'])
viz_pos(desired_position)
action = np.concatenate([desired_position , top_down_grip_ori, open_gripper])
return action
def close(env, obj_number, offset = np.zeros(3)):
desired_position = env.panda.calc_environment_state()[obj_number]['pos']
current_position = env.panda.calc_actor_state()['pos']
#current_orn = env.panda.calc_actor_state()['orn']
action = np.concatenate([desired_position , top_down_grip_ori, closed_gripper])
return action
def lift(env, obj_number, offset = np.zeros(3)):
desired_position = env.panda.calc_environment_state()[obj_number]['pos']
desired_position[2] += 0.1
current_position = env.panda.calc_actor_state()['pos']
viz_pos(desired_position)
#current_orn = env.panda.calc_actor_state()['orn']
action = np.concatenate([desired_position , top_down_grip_ori, closed_gripper])
return action
def take_to(env, position, offset = np.zeros(3)):
desired_position = position
current_position = env.panda.calc_actor_state()['pos']
delta = (desired_position - current_position)*0.2
viz_pos(desired_position)
#current_orn = env.panda.calc_actor_state()['orn']
action = np.concatenate([current_position+delta , top_down_grip_ori, closed_gripper])
return action
def reorient_obj(env, position, offset = np.zeros(3)):
desired_position = position
action = np.concatenate([desired_position, env.panda.default_arm_orn, closed_gripper])
return action
def go_above_reorient(env, obj_number, offset = np.zeros(3)):
desired_position = env.panda.calc_environment_state()[obj_number]['pos'] + np.array([0, 0.00, 0.1])
action = np.concatenate([desired_position , env.panda.default_arm_orn, open_gripper])
return action
def pick_to(env, t, o, counter, acts,obs,currentPoses,ags,cagb,targetPoses):
global which_object
times = np.array([0.7, 1.2, 1.4, 1.6, 2.5, 2.9]) + t
#times = np.array([1.0, 1.5, 2.0, 2.5, 3.0, 3.5]) + t
states = [go_above, descend, close, lift, take_to, go_above]
take_to_pos = np.random.uniform(env.goal_lower_bound, env.goal_upper_bound)
goal = env.panda.goal
goal[which_object*3:(which_object+1)*3] = take_to_pos
env.panda.reset_goal_pos(goal)
data = peform_action(env, t, o, counter, acts,obs,currentPoses,ags,cagb,targetPoses, times, states, goal=take_to_pos, obj_number=which_object)
min(env.num_objects-1, not which_object) # flip which object we are playing with
return data
def pick_reorient(env, t, o, counter, acts,obs,currentPoses,ags,cagb,targetPoses):
global which_object
times = np.array([0.7, 1.2, 1.4, 1.6, 2.5, 2.9]) + t
#times = np.array([1.0, 1.5, 2.0, 2.5, 3.0, 3.5]) + t
states = [go_above, descend, close, lift, reorient_obj, go_above_reorient]
take_to_pos = env.panda.calc_environment_state()[which_object]['pos'] + np.array([0,0,0.05])
goal = env.panda.goal
goal[which_object*3:(which_object+1)*3] = take_to_pos
env.panda.reset_goal_pos(goal)
data = peform_action(env, t, o, counter, acts,obs,currentPoses,ags,cagb,targetPoses, times, states, goal=take_to_pos, obj_number=which_object)
which_object = min(env.num_objects-1, not which_object) # flip which object we are playing with
return data
#################################################### Door script ####################################
door_z = 0.12
def go_up(env, goal):
desired_position = [0,0,0]
desired_position[2] = 0.3
action = np.concatenate([np.array(desired_position), top_down_grip_ori, open_gripper])
return action
def go_in_front(env, goal):
door_x = env.panda.calc_environment_state()[2]['pos'][0]
desired_position = np.array([door_x, 0.30, door_z])
action = np.concatenate([desired_position, env.panda.default_arm_orn, open_gripper])
return action
def close_on_door(env, goal):
door_x = env.panda.calc_environment_state()[2]['pos'][0]
desired_position = np.array([door_x, 0.4, door_z])
action = np.concatenate([desired_position, env.panda.default_arm_orn, closed_gripper])
return action
def pull_door(env, goal):
action = np.concatenate([goal, env.panda.default_arm_orn, closed_gripper])
return action
def toggle_door(env, t, o, counter, acts,obs,currentPoses,ags,cagb,targetPoses):
#times = np.array([0.4, 1.0, 1.4, 1.9, 2.0]) + t
times = np.array([0.7, 1.0, 1.5, 1.6,2.0]) + t
#times = np.array([1.0, 1.5, 2.0, 2.5, 3.0, 3.5]) + t
states = [go_in_front, close_on_door, pull_door, go_in_front, go_up] # go up is optional
door_x = env.panda.calc_environment_state()[2]['pos'][0]
if door_x < 0:
des_x = 0.15
else:
des_x = -0.15
desired_position = np.array([des_x, 0.4, door_z])
data = peform_action(env, t, o, counter, acts,obs,currentPoses,ags,cagb,targetPoses, times, states, goal=desired_position, obj_number=None)
return data
################################################### Toggle Drawer #############################################################
drawer_x = -0.15
drawer_handle = 0.25
def go_above_drawer(env, goal):
drawer_y = -env.panda.calc_environment_state()[3]['pos'][0] -drawer_handle
desired_position = [drawer_x, drawer_y, -0.00]
action = np.concatenate([np.array(desired_position), top_down_grip_ori, open_gripper])
return action
def close_on_drawer(env, goal):
drawer_y = -env.panda.calc_environment_state()[3]['pos'][0] - drawer_handle
desired_position = [drawer_x, drawer_y, -0.1]
action = np.concatenate([np.array(desired_position), top_down_grip_ori, open_gripper])
return action
def pull_drawer(env, goal):
desired_position = goal
action = np.concatenate([np.array(desired_position), top_down_grip_ori, closed_gripper])
return action
def toggle_drawer(env, t, o, counter, acts,obs,currentPoses,ags,cagb,targetPoses):
#times = np.array([0.4, 1.0, 1.4, 1.9, 2.0]) + t
times = np.array([0.7, 1.0, 1.5, 1.6]) + t
#times = np.array([1.0, 1.5, 2.0, 2.5, 3.0, 3.5]) + t
states = [go_above_drawer, close_on_drawer, pull_drawer, go_up] # go up is optional
drawer_y = -env.panda.calc_environment_state()[3]['pos'][0]
if drawer_y < 0:
des_y = 0.15
else:
des_y = -0.15
desired_position = np.array([drawer_x, -0.2+des_y, -0.1])
data = peform_action(env, t, o, counter, acts,obs,currentPoses,ags,cagb,targetPoses, times, states, goal=desired_position, obj_number=None)
return data
def quat_sign_flip(a, idxs):
for pair in idxs:
for i in range(1, len(a)):
quat = a[i, pair[0]:pair[1]]
last_quat = a[i - 1, pair[0]:pair[1]]
if (np.sign(quat) == -np.sign(last_quat)).all(): # i.e, it is an equivalent quaternion
a[i, pair[0]:pair[1]] = - a[i, pair[0]:pair[1]]
return a
def peform_action(env, t, o, counter, acts,obs,currentPoses,ags,cagb,targetPoses, times, states, goal=None, offset=np.zeros(3), obj_number=0):
state_pointer = 0
while (t < times[state_pointer]):
if obj_number is not None:
if state_pointer == 4:
action = states[state_pointer](env, goal, offset = np.zeros(3))
else:
action = states[state_pointer](env, obj_number=obj_number, offset=offset)
else:
action = states[state_pointer](env, goal)
if not debugging:
p.saveBullet(example_path + '/env_states/' + str(counter) + ".bullet")
counter += 1 # little counter for saving the bullet states
acts.append(action), obs.append(o['observation']), ags.append(
o['achieved_goal']), \
cagb.append(o['controllable_achieved_goal']), currentPoses.append(o['joints'])
o2, r, d, info = env.step(action)
targetPoses.append(info['target_poses'])
#print(o2['achieved_goal'][16:])
if d:
print('Env limits exceeded')
return {'success':0, 't':t}
# NOTE! This is different to how it is done in goal conditioned RL, the ag is from
# the same timestep because thats how we'll use it in LFP (and because its harder to do
# the rl style step reset in VR teleop.
o = o2
t += dt
if t >= times[state_pointer]:
state_pointer += 1
if state_pointer > len(times)-1:
break
return {'last_obs': o, 'success': 1, 't':t, 'counter':counter}
debugging = False
dt = 0.04
base_path = 'collected_data/scripted_play_demos/'
obs_act_path = base_path + 'obs_act_etc/'
env_state_path = base_path + 'states_and_ims/'
try:
os.makedirs(obs_act_path)
except:
pass
try:
os.makedirs(env_state_path)
except:
pass
activities = [toggle_drawer, toggle_door, pick_reorient, pick_to] #[pick_to]#, push_directionally]
#activities = [push_directionally]
play_len = 8
for i in tqdm(range(0, 500)): # 60
o = env.reset()
t = 0
acts, obs, currentPoses, ags, cagb, targetPoses = [], [], [], [], [], []
demo_count = len(list(os.listdir(obs_act_path)))
example_path = env_state_path + str(demo_count)
npz_path = obs_act_path + str(demo_count)
if not debugging:
os.makedirs(example_path + '/env_states')
os.makedirs(example_path + '/env_images')
os.makedirs(npz_path)
counter = 0
#pbar = tqdm(total=play_len)
while(t < play_len):
activity_choice = np.random.choice(len(activities))
result = activities[activity_choice](env, t, o, counter, acts,obs,currentPoses,ags,cagb,targetPoses)
if not result['success']:
break
#pbar.update(result['t'] - t)
t = result['t']
counter = result['counter']
o = result['last_obs']
if t>6: #reasonable length with some play interaction
if not debugging:
acts = quat_sign_flip(np.array(acts), [(3, 7)])
obs = quat_sign_flip(np.array(obs), [(3, 7), (10, 14)])
ags = quat_sign_flip(np.array(ags), [(3, 7)])
np.savez(npz_path+ '/data', acts=acts, obs=obs,
achieved_goals =ags,
controllable_achieved_goals =cagb, joint_poses=currentPoses, target_poses=targetPoses)
demo_count += 1
else:
print('Demo failed')
# delete the folder with all the saved states within it
if not debugging:
shutil.rmtree(obs_act_path + str(demo_count))
shutil.rmtree(env_state_path + str(demo_count))
#
# def push_directionally(env, t, o, counter, acts,obs,currentPoses,ags,cagb,targetPoses):
# times = np.array([0.5, 1.0, 1.4]) + t
# states = [go_above, descend_push, go_above]
# # choose a random point in a circle around the block
# alpha = np.random.random(1)*2*np.pi
# r = 0.03
# x,z = r * np.cos(alpha), r * np.sin(alpha)
# offset = np.array([x,0,z])
#
#
# return peform_action(env, t, o, counter, acts, obs, currentPoses, ags, cagb, targetPoses, times, states, offset=offset)
|
import requests
from bs4 import BeautifulSoup
import re
def solr(request,title,author):
r = requests.get("http://katalog.stbib-koeln.de:8983/solr/select?rows=1&q="+title+"+"+author)
soup = BeautifulSoup(r.text)
PublicationDate = soup.find("arr", {"name": "DateOfPublication"}).find("str")
Author = soup.find("arr",{"name":"Author"}).find_all("str")
Authors = []
for a in Author:
Authors.append(a.text)
MediaType = soup.find("arr",{"name":"MaterialType"}).find("str")
Keywords = []
Ky = soup.find("arr",{"name":"text_auto"}).find_all("str")
for k in Ky:
Keywords.append(k.text)
Category = []
Catg = soup.find("arr",{"name":"SubjectHeading"}).find_all("str")
for c in Catg:
Category.append(c.text)
return JsonResponse({'metadata':PublicationDate, Authors, MediaType, Keywords, Category })
|
import json
import os
# formats songs for the app
# note - translate uses python3 syntax, so run this with python3!
# note - needs work on getting rid of [] from genius
things_to_remove = dict.fromkeys(map(ord, '()[]"'), None)
output = ""
if (os.path.isfile("input.txt")):
with open("input.txt") as f:
words = f.read().split()
for word in words:
if not word[0] == '(' and not word[0] == '[' and not word[-1] == ')' and not word[-1] == ']':
output += word.translate(things_to_remove) + " "
f= open("output.txt","w")
f.write(output)
|
from typing import List, Any, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import adjusted_rand_score
from ML import DBscan_step_positions, DBscan_step_positions_and_velocity, build_ground_truth, \
DBscan_step_intuition_dist_multistep_1, DBscan_step_intuition_dist
from utils import charge_labels_simulation
if __name__ == "__main__":
mode = 2
# 0: analyse number of clusters on list_nb_boids on a particular timestep
# 1: analyse number of clusters for each population in list_nb_boids on different timesteps
# 2: analyse ARI scores for each population in list_nb_boids on different timesteps
if mode == 1:
# another part where we watch evolution of number of clusters during the simulation depending on number of boids
list_n_boids = [200, 500, 1000]
step_to_analyse = [1, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400,
1500, 1600, 1700, 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500, 2600, 2700, 2800,
2900, 2999]
# get statistics on number of clusters on time-step :step_to_analyse:
for n_boids in list_n_boids:
list_num_boids = [n_boids] * 4
# create a dataframe for a population
name_pandas_file_statistics = "evolution_clusters_statistics_on_" + str(n_boids)
column_names = ["time step", "std number clusters", "mean number clusters"]
column_names_p = ["time step", "std number clusters p", "mean number clusters p"]
column_names_pv = ["time step", "std number clusters pv", "mean number clusters pv"]
results_statistics = pd.DataFrame(columns=column_names)
results_statistics_p = pd.DataFrame(columns=column_names_p)
results_statistics_pv = pd.DataFrame(columns=column_names_pv)
for step in step_to_analyse:
num_clusters = []
num_clusters_pv = []
num_clusters_p = []
for i in range(10): # 10 different simulations
# get the directory
directory_name = "simulation_data_" + str(list_num_boids) + "_Boids_" + str(i) + "/"
labels_pv = DBscan_step_positions_and_velocity(step=step, old_labels=None,
repository=directory_name)
labels_p = DBscan_step_positions(step=step, old_labels=None, repository=directory_name)
labels = build_ground_truth(step=step, old_labels=None, repository=directory_name,
list_nb_boids=list_num_boids)
nb_clusters = np.unique(labels).shape[0] - 1
nb_clusters_p = np.unique(labels).shape[0] - 1
nb_clusters_pv = np.unique(labels).shape[0] - 1
print("num clusters time step {0} is {1}".format(step, nb_clusters))
num_clusters.append(nb_clusters)
num_clusters_pv.append(nb_clusters_pv)
num_clusters_p.append(nb_clusters_p)
mean_clusters = np.mean(num_clusters)
std_clusters = np.std(num_clusters)
mean_clusters_p = np.mean(num_clusters_p)
std_clusters_p = np.std(num_clusters_p)
mean_clusters_pv = np.mean(num_clusters_pv)
std_clusters_pv = np.std(num_clusters_pv)
results_statistics = results_statistics.append({"time step": step, "mean number clusters":
mean_clusters,
"std number clusters": std_clusters}, ignore_index=True)
results_statistics_p = results_statistics_p.append({"time step": step, "mean number clusters p":
mean_clusters_p,
"std number clusters p": std_clusters_p},
ignore_index=True)
results_statistics_pv = results_statistics_pv.append({"time step": step, "mean number clusters pv":
mean_clusters_pv,
"std number clusters pv": std_clusters_pv},
ignore_index=True)
results_statistics.to_csv(name_pandas_file_statistics + ".csv", index=False)
if mode == 2:
# another part where we watch evolution of number of clusters during the simulation depending on number of boids
list_n_boids = [200, 500, 1000]
# todo to adjust
step_to_analyse_pop200 = [500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400,
1500, 1600, 1700, 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500, 2600, 2700, 2800,
2900, 2999]
step_to_analyse_pop500 = [500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400,
1500, 1600, 1700, 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500, 2600, 2700, 2800,
2900, 2999]
step_to_analyse_pop1000 = [500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400,
1500, 1600, 1700, 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500, 2600, 2700, 2800,
2900, 2999]
method = 4 # see methods behind
# get statistics on number of clusters on time-step :step_to_analyse:
for n_boids in [500, 1000]: # list_n_boids:
if n_boids == 200:
step_to_analyse = step_to_analyse_pop200
elif n_boids == 500:
step_to_analyse = step_to_analyse_pop500
elif n_boids == 1000:
step_to_analyse = step_to_analyse_pop500
list_num_boids = [n_boids] * 4
# create a dataframe for a population
name = None
if method == 1:
name = "DBSCAN_position_Euclidean_metric"
eps = [70, 75, 80, 85]
min_sample = [2, 4, 6, 8, 10]
elif method == 2:
name = "DBSCAN_position_velocities_Euclidean_metric"
# param_to_test
alpha = [0.8, 1, 1.2, 1.4]
beta = [5, 10, 20, 30, 40, 50, 60]
elif method == 3:
name = "DBSCAN_position_velocities_custom_metric"
alpha = [0.8, 1, 1.2, 1.4]
phi = [10, 20, 30, 40, 50]
elif method == 4:
name = "DBSCAN_position_velocities_multistep_Euclidean"
alpha = [0.6, 0.8, 1, 1.2, 1.4]
phi = [10, 20, 30, 40, 50]
gamma = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99]
if method == 1:
for epsilon in eps:
for min_sample_ in min_sample:
params = "min_sample=" + str(min_sample_) + "_" + "epsilon=" + str(epsilon)
name_pandas_file_statistics = "evolution_ARI_statistics_on_" + str(
n_boids) + "_" + name + "_" + params
column_names = ["time step", "std ARI", "mean ARI"]
results_statistics = pd.DataFrame(columns=column_names)
for step in step_to_analyse:
ARI_list = []
for i in range(10): # 5 different simulations
# get the directory
directory_name = "simulation_data_" + str(list_num_boids) + "_Boids_" + str(i) + "/"
# get ground truth
labels_truth = build_ground_truth(step=step, old_labels=None,
repository=directory_name,
list_nb_boids=list_num_boids)
labels = DBscan_step_positions(step=step, old_labels=None,
repository=directory_name,
eps=epsilon, min_sample=min_sample_)
ARI = adjusted_rand_score(labels_truth, labels)
print("rand score time step {0} is {1}".format(step, ARI))
ARI_list.append(ARI)
mean_ARI = np.mean(ARI_list)
std_ARI = np.std(ARI_list)
results_statistics = results_statistics.append({"time step": step, "mean ARI":
mean_ARI,
"std ARI": std_ARI},
ignore_index=True)
results_statistics.to_csv(name_pandas_file_statistics + ".csv", index=False)
if method == 2:
for alpha_ in alpha:
for beta_ in beta:
params = "alpha=" + str(alpha_) + "_" + "beta=" + str(beta_)
name_pandas_file_statistics = "evolution_ARI_statistics_on_" + str(
n_boids) + "_" + name + "_" + params
column_names = ["time step", "std ARI", "mean ARI"]
results_statistics = pd.DataFrame(columns=column_names)
for step in step_to_analyse:
ARI_list = []
for i in range(10): # 5 different simulations
# get the directory
directory_name = "simulation_data_" + str(list_num_boids) + "_Boids_" + str(i) + "/"
# get ground truth
labels_truth = build_ground_truth(step=step, old_labels=None,
repository=directory_name,
list_nb_boids=list_num_boids)
labels = DBscan_step_positions_and_velocity(step=step, old_labels=None,
repository=directory_name,
alpha=alpha_,
beta=beta_)
ARI = adjusted_rand_score(labels_truth, labels)
print("rand score time step {0} is {1}".format(step, ARI))
ARI_list.append(ARI)
mean_ARI = np.mean(ARI_list)
std_ARI = np.std(ARI_list)
results_statistics = results_statistics.append({"time step": step, "mean ARI":
mean_ARI,
"std ARI": std_ARI},
ignore_index=True)
results_statistics.to_csv(name_pandas_file_statistics + ".csv", index=False)
if method == 3:
for alpha_ in alpha:
for phi_ in phi:
params = "alpha=" + str(alpha_) + "_" + "phi=" + str(phi_)
name_pandas_file_statistics = "evolution_ARI_statistics_on_" + str(
n_boids) + "_" + name + "_" + params
column_names = ["time step", "std ARI", "mean ARI"]
results_statistics = pd.DataFrame(columns=column_names)
for step in step_to_analyse:
ARI_list = []
for i in range(10): # 5 different simulations
# get the directory
directory_name = "simulation_data_" + str(list_num_boids) + "_Boids_" + str(i) + "/"
# get ground truth
labels_truth = build_ground_truth(step=step, old_labels=None,
repository=directory_name,
list_nb_boids=list_num_boids)
labels = DBscan_step_intuition_dist(step=step, old_labels=None,
repository=directory_name,
alpha_=alpha_,
phi_=phi_)
ARI = adjusted_rand_score(labels_truth, labels)
print("rand score time step {0} is {1}".format(step, ARI))
ARI_list.append(ARI)
mean_ARI = np.mean(ARI_list)
std_ARI = np.std(ARI_list)
results_statistics = results_statistics.append({"time step": step, "mean ARI":
mean_ARI,
"std ARI": std_ARI},
ignore_index=True)
results_statistics.to_csv(name_pandas_file_statistics + ".csv", index=False)
if method == 4:
for gamma_ in gamma:
for phi_ in phi:
for alpha_ in alpha:
params = "alpha=" + str(alpha_) + "_" + "phi=" + str(phi_) + "_" + "gamma=" + str(gamma_)
name_pandas_file_statistics = "evolution_ARI_statistics_on_" + str(n_boids) + "_" + name \
+ "_" + params
column_names = ["time step", "std ARI", "mean ARI"]
results_statistics = pd.DataFrame(columns=column_names)
for step in step_to_analyse:
ARI_list = []
for i in range(10): # 5 different simulations
# get the directory
directory_name = "simulation_data_" + str(list_num_boids) + "_Boids_" + str(i) + "/"
# get ground truth
labels_truth = build_ground_truth(step=step, old_labels=None,
repository=directory_name,
list_nb_boids=list_num_boids)
labels = DBscan_step_intuition_dist_multistep_1(step=step, old_labels=None,
repository=directory_name,
phi_=phi_,
alpha_=alpha_,
gamma_=gamma_)
ARI = adjusted_rand_score(labels_truth, labels)
print("rand score time step {0} is {1}".format(step, ARI))
ARI_list.append(ARI)
mean_ARI = np.mean(ARI_list)
std_ARI = np.std(ARI_list)
results_statistics = results_statistics.append({"time step": step, "mean ARI":
mean_ARI,
"std ARI": std_ARI},
ignore_index=True)
results_statistics.to_csv(name_pandas_file_statistics + ".csv", index=False)
|
import asyncio
from dataclasses import dataclass
from enum import Enum
import functools
import heapq
from dataclasses_json import dataclass_json
import numpy as np
from runstats import Statistics
from typing import Callable, List, Optional
from knn import utils
from knn.utils import JSONType
from .base import Reducer
class ListReducer(Reducer):
def __init__(self):
self.results = []
def handle_result(self, input, output: JSONType):
self.results.append(output)
@property
def result(self) -> List[JSONType]:
return self.results
class IsFinishedReducer(Reducer):
def __init__(self):
self.finished = asyncio.Event()
def finish(self):
self.finished.set()
def handle_result(self, input, output):
pass
@property
def result(self) -> bool:
return self.finished.is_set()
class TopKReducer(Reducer):
@dataclass_json
@dataclass
class ScoredResult:
score: float
input: JSONType
output: JSONType
def __lt__(self, other):
return self.score < other.score
def __init__(
self, k: int, extract_func: Optional[Callable[[JSONType], float]] = None
) -> None:
super().__init__()
self.k = k
self.extract_func = extract_func or self.extract_value
self._top_k: List[TopKReducer.ScoredResult] = []
def handle_result(self, input: JSONType, output: JSONType) -> None:
result = TopKReducer.ScoredResult(self.extract_func(output), input, output)
if len(self._top_k) < self.k:
heapq.heappush(self._top_k, result)
elif self._top_k[0] < result:
heapq.heapreplace(self._top_k, result)
def extract_value(self, output: JSONType) -> float:
assert isinstance(output, float)
return output
@property
def result(self) -> List[ScoredResult]:
return list(reversed(sorted(self._top_k)))
class VectorReducer(Reducer):
class PoolingType(Enum):
NONE = functools.partial(lambda x: x)
MAX = functools.partial(np.max, axis=0)
AVG = functools.partial(np.mean, axis=0)
def __init__(
self,
pool_func: PoolingType = PoolingType.NONE,
extract_func: Optional[Callable[[JSONType], np.ndarray]] = None,
) -> None:
super().__init__()
self.pool_func = pool_func
self.extract_func = extract_func or self.extract_value
self._results = [] # type: List[np.ndarray]
def handle_result(self, input: JSONType, output: JSONType) -> None:
self._results.append(self.extract_func(output))
def extract_value(self, output: JSONType) -> np.ndarray:
assert isinstance(output, str)
return utils.base64_to_numpy(output)
@property
def result(self) -> np.ndarray:
return self.pool_func.value(np.stack(self._results))
class StatisticsReducer(Reducer):
def __init__(self, extract_func: Optional[Callable[[JSONType], float]] = None):
super().__init__()
self._result = Statistics()
self.extract_func = extract_func or self.extract_value
def handle_result(self, input: JSONType, output: JSONType) -> None:
self._result.push(self.extract_func(output))
def extract_value(self, output: JSONType) -> float:
assert isinstance(output, float) or isinstance(output, int)
return output
@property
def result(self) -> Statistics:
return self._result
|
import json
from nose.tools import assert_equal, assert_not_equal
from tests.fixtures import WebTest
class TestMetricsController(WebTest):
def test_index(self):
response = self.app.get('/metrics/', follow_redirects=True)
assert_equal(
response.status_code,
200,
'/metrics should return OK'
)
assert_equal(
response.data.find('log in with Google'),
-1,
'/metrics should get the list of metrics'
)
assert_not_equal(
response.data.find('BytesAdded'), -1,
'BytesAdded detail should be displayed'
)
assert_not_equal(
response.data.find('NamespaceEdits'), -1,
'NamespaceEdits detail should be displayed'
)
def test_list(self):
response = self.app.get('/metrics/list/', follow_redirects=True)
parsed = json.loads(response.data)
assert_equal(
len(filter(lambda m : m['name'] == 'BytesAdded', parsed['metrics'])),
1,
'test. got: {0}'.format(parsed)
)
def test_configure_get(self):
response = self.app.get('/metrics/configure/BytesAdded')
assert_not_equal(
response.data.find('name="positive_only_sum"'),
-1,
'A form to configure a BytesAdded metric was not rendered'
)
def test_configure_post(self):
response = self.app.post('/metrics/configure/BytesAdded', data=dict(
start_date='hi'
))
assert_not_equal(
response.data.find('<li class="text-error">Not a valid datetime value</li>'),
-1,
'Validation on a BytesAdded configuration is not happening'
)
def test_configure_namespaces_post(self):
response = self.app.post('/metrics/configure/NamespaceEdits', data=dict(
namespaces='abcd',
))
assert_not_equal(
response.data.find('<li class="text-error">'),
-1,
'Validation on the NamespaceEdits configuration, '
'namespaces field is not happening.'
)
|
# -*- coding: utf-8 -*-
"""Crop_data_prep.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1jhfMRdq6na9JiUmcqqZsQAWpknZjvRpY
## Notebook for transforming raw cpdata to Mergable data
### Filter cpdata.csv to MergeFileCrop.cv
### Filter fertilizer.csv to MergerFileFert.csv
"""
import pandas as pd
# Reading the data
crop_data_path = '../Data-raw/cpdata.csv'
fertilizer_data_path = '../Data-raw/Fertilizer.csv'
crop = pd.read_csv(crop_data_path)
fert = pd.read_csv(fertilizer_data_path)
crop.head()
fert.head()
# Function for lowering the cases
def change_case(i):
i = i.replace(" ", "")
i = i.lower()
return i
fert['Crop'] = fert['Crop'].apply(change_case)
crop['label'] = crop['label'].apply(change_case)
#make some changes in ferttilizer dataset
fert['Crop'] = fert['Crop'].replace('mungbeans','mungbean')
fert['Crop'] = fert['Crop'].replace('lentils(masoordal)','lentil')
fert['Crop'] = fert['Crop'].replace('pigeonpeas(toordal)','pigeonpeas')
fert['Crop'] = fert['Crop'].replace('mothbean(matki)','mothbeans')
fert['Crop'] = fert['Crop'].replace('chickpeas(channa)','chickpea')
crop.head()
crop.tail()
crop_names = crop['label'].unique()
crop_names
fert.head()
del fert['Unnamed: 0']
crop_names_from_fert = fert['Crop'].unique()
crop_names_from_fert
for i in crop_names_from_fert:
print(crop[crop['label'] == i])
crop['label']
extract_labels = []
for i in crop_names_from_fert:
if i in crop_names:
extract_labels.append(i)
# using extract labesl on crop to get all the data related to those labels
new_crop = pd.DataFrame(columns = crop.columns)
new_fert = pd.DataFrame(columns = fert.columns)
for label in extract_labels:
new_crop = new_crop.append(crop[crop['label'] == label])
for label in extract_labels:
new_fert = new_fert.append(fert[fert['Crop'] == label].iloc[0])
new_crop
new_fert
new_crop.to_csv('../Data-raw/MergeFileCrop.csv')
new_fert.to_csv('../Data-raw/FertilizerData.csv')
|
import sys
import os.path as op
from pathlib import Path
from databases import Database
sys.path.append(op.abspath(op.join(op.dirname(__file__), '..')))
import create_db # noqa: import not at top of file
projects = {'icepap-ipassign':
{'README': ('tests/test_data/ipa/README.md',
'Complete ipassign documentation'),
'GUI README': ('tests/test_data/ipa/gui/gui.md',
'Qt Gui development documentation')},
'h5py':
{'Groups': ('/home/cydanil/h5py/docs/high/group.rst',
'hdf5 groups manual'),
'Files': ('/home/cydanil/h5py/docs/high/file.rst',
'hdf5 files manual'),
'Build': ('/home/cydanil/h5py/docs/build.rst',
'hdf5 build how-to')},
'rook':
{'Sample docx file': ('tests/test_data/file-sample_1MB.docx',
'Test microsoft docx file handling'),
'Sample doc file': ('tests/test_data/file-sample_1MB.doc',
'Test legacy doc file handling')},
'bluejay': {},
'pelican': {},
'ostrich': {},
}
insert_project = ('INSERT INTO "Project" ("Name") '
'VALUES (:Name)')
insert_document = ('INSERT INTO "Document" '
'("Name", "Location", "Description") '
'VALUES (:Name, :Location, :Description)')
insert_project_entry = ('INSERT INTO "ProjectEntry" '
'("ProjectId", "DocumentId") '
'VALUES (:ProjectId, :DocumentId)')
async def create_and_populate_test_db(path: Path) -> None:
"""Create and populate a test database at the given path."""
if not str(path).endswith('alfie.db'):
path = path / 'alfie.db'
await create_db.create(path)
async with Database(f'sqlite:///{path}') as db:
for project, items in projects.items():
pid = await db.execute(insert_project, {'Name': project})
for name, (loc, desc) in items.items():
did = await db.execute(insert_document, {'Name': name,
'Location': loc,
'Description': desc})
await db.execute(insert_project_entry, {'ProjectId': pid,
'DocumentId': did})
|
#
# PySNMP MIB module TPT-HIGH-AVAIL-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/TPT-HIGH-AVAIL-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:26:19 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
iso, Unsigned32, Bits, ObjectIdentity, Counter32, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, Gauge32, ModuleIdentity, Integer32, IpAddress, TimeTicks, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "Unsigned32", "Bits", "ObjectIdentity", "Counter32", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "Gauge32", "ModuleIdentity", "Integer32", "IpAddress", "TimeTicks", "MibIdentifier")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
EnabledOrNot, = mibBuilder.importSymbols("TPT-PORT-CONFIG-MIB", "EnabledOrNot")
tpt_tpa_eventsV2, tpt_tpa_objs, tpt_tpa_unkparams = mibBuilder.importSymbols("TPT-TPAMIBS-MIB", "tpt-tpa-eventsV2", "tpt-tpa-objs", "tpt-tpa-unkparams")
tpt_high_avail_objs = ModuleIdentity((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 6)).setLabel("tpt-high-avail-objs")
tpt_high_avail_objs.setRevisions(('2016-09-08 18:54', '2016-05-25 18:54',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: tpt_high_avail_objs.setRevisionsDescriptions(('Added new FaultCause values to support TPS. Updated description of highAvailTransparentPartner.', 'Updated copyright information. Minor MIB syntax fixes.',))
if mibBuilder.loadTexts: tpt_high_avail_objs.setLastUpdated('201609081854Z')
if mibBuilder.loadTexts: tpt_high_avail_objs.setOrganization('Trend Micro, Inc.')
if mibBuilder.loadTexts: tpt_high_avail_objs.setContactInfo('www.trendmicro.com')
if mibBuilder.loadTexts: tpt_high_avail_objs.setDescription("Device information related to high availability. Copyright (C) 2016 Trend Micro Incorporated. All Rights Reserved. Trend Micro makes no warranty of any kind with regard to this material, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose. Trend Micro shall not be liable for errors contained herein or for incidental or consequential damages in connection with the furnishing, performance, or use of this material. This document contains proprietary information, which is protected by copyright. No part of this document may be photocopied, reproduced, or translated into another language without the prior written consent of Trend Micro. The information is provided 'as is' without warranty of any kind and is subject to change without notice. The only warranties for Trend Micro products and services are set forth in the express warranty statements accompanying such products and services. Nothing herein should be construed as constituting an additional warranty. Trend Micro shall not be liable for technical or editorial errors or omissions contained herein. TippingPoint(R), the TippingPoint logo, and Digital Vaccine(R) are registered trademarks of Trend Micro. All other company and product names may be trademarks of their respective holders. All rights reserved. This document contains confidential information, trade secrets or both, which are the property of Trend Micro. No part of this documentation may be reproduced in any form or by any means or used to make any derivative work (such as translation, transformation, or adaptation) without written permission from Trend Micro or one of its subsidiaries. All other company and product names may be trademarks of their respective holders. ")
class FaultState(TextualConvention, Integer32):
description = 'The current fault state of the device.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1))
namedValues = NamedValues(("normal", 0), ("fallback", 1))
class FaultCause(TextualConvention, Integer32):
description = 'The reason for the current fault state of the device.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20))
namedValues = NamedValues(("none", 0), ("suspended-task", 1), ("out-of-memory", 2), ("hardware-breakpoint", 3), ("tse-failure", 4), ("watchdog-timeout", 5), ("user-reset", 6), ("user-fallback", 7), ("threshold-failure", 8), ("software-watchdog-timeout", 9), ("oam-fault", 10), ("hard-disk-disable", 11), ("initialization-failure", 12), ("internal-link-failure", 13), ("multiple-fan-failures", 14), ("packet-egress-integrity", 15), ("stack-master", 16), ("waiting-on-stack", 17), ("spike-reboot-or-halt", 18), ("process-error", 19), ("low-health-score", 20))
class ConnectionState(TextualConvention, Integer32):
description = 'State of the connection between a device and its transparent HA partner.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2))
namedValues = NamedValues(("not-connected", 0), ("unresponsive", 1), ("connected", 2))
class PerfProtPhase(TextualConvention, Integer32):
description = 'The performance protection phase (entering, continuing, or exiting).'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("entering", 1), ("continuing", 2), ("exiting", 3))
class ZphaState(TextualConvention, Integer32):
description = 'Whether ZPHA bypass is currently in effect.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1))
namedValues = NamedValues(("normal", 0), ("ips-bypass", 1))
class ZphaAction(TextualConvention, Integer32):
description = 'The ZPHA action (normal or bypass).'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2))
namedValues = NamedValues(("unknown", 0), ("normal", 1), ("bypass", 2))
class ZphaMode(TextualConvention, Integer32):
description = 'The ZPHA fiber mode (single or multi).'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 2, 3))
namedValues = NamedValues(("unknown", 0), ("single", 2), ("multi", 3))
class ZphaPresent(TextualConvention, Integer32):
description = 'Whether segmental ZPHA is supported on the device.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1))
namedValues = NamedValues(("absent", 0), ("present", 1))
highAvailTimeStamp = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 6, 1), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: highAvailTimeStamp.setStatus('current')
if mibBuilder.loadTexts: highAvailTimeStamp.setDescription('The time of the last transition of the fault state (in seconds since January 1, 1970). This value is zero if the fault state has not changed since the last reboot.')
highAvailFaultState = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 6, 2), FaultState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: highAvailFaultState.setStatus('current')
if mibBuilder.loadTexts: highAvailFaultState.setDescription('The current fault state of the device.')
highAvailFaultCause = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 6, 3), FaultCause()).setMaxAccess("readonly")
if mibBuilder.loadTexts: highAvailFaultCause.setStatus('current')
if mibBuilder.loadTexts: highAvailFaultCause.setDescription('The reason for the current fault state of the device.')
highAvailThresholdEnabled = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 6, 4), EnabledOrNot()).setMaxAccess("readonly")
if mibBuilder.loadTexts: highAvailThresholdEnabled.setStatus('current')
if mibBuilder.loadTexts: highAvailThresholdEnabled.setDescription('The current fallback threshold enabled setting for the device.')
highAvailThresholdPercent = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 6, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: highAvailThresholdPercent.setStatus('current')
if mibBuilder.loadTexts: highAvailThresholdPercent.setDescription('If the fallback threshold is enabled, the percent (0-100) packet loss at which the device is configured to enter the fallback state.')
highAvailEnabled = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 6, 6), EnabledOrNot()).setMaxAccess("readonly")
if mibBuilder.loadTexts: highAvailEnabled.setStatus('current')
if mibBuilder.loadTexts: highAvailEnabled.setDescription('Whether intrinisic high availability is enabled for this device.')
highAvailTransparentState = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 6, 7), ConnectionState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: highAvailTransparentState.setStatus('current')
if mibBuilder.loadTexts: highAvailTransparentState.setDescription("State of the connection to the device's transparent HA partner.")
highAvailTransparentEnabled = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 6, 8), EnabledOrNot()).setMaxAccess("readonly")
if mibBuilder.loadTexts: highAvailTransparentEnabled.setStatus('current')
if mibBuilder.loadTexts: highAvailTransparentEnabled.setDescription('Whether transparent high availability is enabled for this device.')
highAvailTransparentPartner = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 6, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: highAvailTransparentPartner.setStatus('current')
if mibBuilder.loadTexts: highAvailTransparentPartner.setDescription("Network address OR serial number of the device's transparent HA partner.")
highAvailZeroPowerState = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 6, 10), ZphaState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: highAvailZeroPowerState.setStatus('current')
if mibBuilder.loadTexts: highAvailZeroPowerState.setDescription('The current zero-power HA state of the device.')
highAvailZeroPowerQuantity = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 6, 11), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: highAvailZeroPowerQuantity.setStatus('current')
if mibBuilder.loadTexts: highAvailZeroPowerQuantity.setDescription('The number of segments with zero-power HA modules.')
highAvailZeroPowerTable = MibTable((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 6, 12), )
if mibBuilder.loadTexts: highAvailZeroPowerTable.setStatus('current')
if mibBuilder.loadTexts: highAvailZeroPowerTable.setDescription('Table of IP addresses on the device and their attributes.')
highAvailZeroPowerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 6, 12, 1), ).setIndexNames((0, "TPT-HIGH-AVAIL-MIB", "highAvailZeroPowerIndex"))
if mibBuilder.loadTexts: highAvailZeroPowerEntry.setStatus('current')
if mibBuilder.loadTexts: highAvailZeroPowerEntry.setDescription('An entry in the host IP address table. Rows cannot be created or deleted.')
highAvailZeroPowerIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 6, 12, 1, 1), Unsigned32())
if mibBuilder.loadTexts: highAvailZeroPowerIndex.setStatus('current')
if mibBuilder.loadTexts: highAvailZeroPowerIndex.setDescription('Index into the ZPHA table, starting with 1.')
highAvailZeroPowerSegment = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 6, 12, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readonly")
if mibBuilder.loadTexts: highAvailZeroPowerSegment.setStatus('current')
if mibBuilder.loadTexts: highAvailZeroPowerSegment.setDescription('The name of the segment to which the ZPHA is attached.')
highAvailZeroPowerActive = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 6, 12, 1, 3), ZphaState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: highAvailZeroPowerActive.setStatus('current')
if mibBuilder.loadTexts: highAvailZeroPowerActive.setDescription('Whether the ZPHA is currently active on this segment.')
highAvailZeroPowerAction = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 6, 12, 1, 4), ZphaAction()).setMaxAccess("readonly")
if mibBuilder.loadTexts: highAvailZeroPowerAction.setStatus('current')
if mibBuilder.loadTexts: highAvailZeroPowerAction.setDescription('The action (usually bypass) that the segment takes when ZPHA is active.')
highAvailZeroPowerMode = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 6, 12, 1, 5), ZphaMode()).setMaxAccess("readonly")
if mibBuilder.loadTexts: highAvailZeroPowerMode.setStatus('current')
if mibBuilder.loadTexts: highAvailZeroPowerMode.setDescription('The fiber mode (single or multi) of this ZPHA.')
highAvailZeroPowerPresence = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 6, 13), ZphaPresent()).setMaxAccess("readonly")
if mibBuilder.loadTexts: highAvailZeroPowerPresence.setStatus('current')
if mibBuilder.loadTexts: highAvailZeroPowerPresence.setDescription('An indication of whether ZPHA is supported on the device.')
tptIhaNotifyDeviceID = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 81), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptIhaNotifyDeviceID.setStatus('current')
if mibBuilder.loadTexts: tptIhaNotifyDeviceID.setDescription('The unique identifier of the device sending this notification.')
tptIhaNotifyTimeStamp = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 82), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptIhaNotifyTimeStamp.setStatus('current')
if mibBuilder.loadTexts: tptIhaNotifyTimeStamp.setDescription('The time of this notification (in seconds since January 1, 1970).')
tptIhaNotifyFaultState = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 83), FaultState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptIhaNotifyFaultState.setStatus('current')
if mibBuilder.loadTexts: tptIhaNotifyFaultState.setDescription('The current fault state of the device.')
tptIhaNotifyFaultCause = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 84), FaultCause()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptIhaNotifyFaultCause.setStatus('current')
if mibBuilder.loadTexts: tptIhaNotifyFaultCause.setDescription('The reason for the current fault state of the device.')
tptIhaNotify = NotificationType((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 0, 15)).setObjects(("TPT-HIGH-AVAIL-MIB", "tptIhaNotifyDeviceID"), ("TPT-HIGH-AVAIL-MIB", "tptIhaNotifyTimeStamp"), ("TPT-HIGH-AVAIL-MIB", "tptIhaNotifyFaultState"), ("TPT-HIGH-AVAIL-MIB", "tptIhaNotifyFaultCause"))
if mibBuilder.loadTexts: tptIhaNotify.setStatus('current')
if mibBuilder.loadTexts: tptIhaNotify.setDescription('Notification: Used to inform the management station of changes in the intrinsic HA fault state on the device.')
tptTrhaNotifyDeviceID = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 86), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptTrhaNotifyDeviceID.setStatus('current')
if mibBuilder.loadTexts: tptTrhaNotifyDeviceID.setDescription('The unique identifier of the device sending this notification.')
tptTrhaNotifyTimeStamp = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 87), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptTrhaNotifyTimeStamp.setStatus('current')
if mibBuilder.loadTexts: tptTrhaNotifyTimeStamp.setDescription('The time of this notification (in seconds since January 1, 1970).')
tptTrhaNotifyNewState = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 88), ConnectionState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptTrhaNotifyNewState.setStatus('current')
if mibBuilder.loadTexts: tptTrhaNotifyNewState.setDescription('The new transparent HA state of the device.')
tptTrhaNotify = NotificationType((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 0, 18)).setObjects(("TPT-HIGH-AVAIL-MIB", "tptTrhaNotifyDeviceID"), ("TPT-HIGH-AVAIL-MIB", "tptTrhaNotifyTimeStamp"), ("TPT-HIGH-AVAIL-MIB", "tptTrhaNotifyNewState"))
if mibBuilder.loadTexts: tptTrhaNotify.setStatus('current')
if mibBuilder.loadTexts: tptTrhaNotify.setDescription('Notification: Used to inform the management station of changes in the transparent HA state on the device.')
tptZphaNotifyDeviceID = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 161), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptZphaNotifyDeviceID.setStatus('current')
if mibBuilder.loadTexts: tptZphaNotifyDeviceID.setDescription('The unique identifier of the device sending this notification.')
tptZphaNotifyTimeStamp = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 162), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptZphaNotifyTimeStamp.setStatus('current')
if mibBuilder.loadTexts: tptZphaNotifyTimeStamp.setDescription('The time of this notification (in seconds since January 1, 1970).')
tptZphaNotifySegmentName = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 163), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptZphaNotifySegmentName.setStatus('current')
if mibBuilder.loadTexts: tptZphaNotifySegmentName.setDescription('The name of the segment whose ZPHA changed state, or an empty string to indicate the external ZPHA.')
tptZphaNotifyNewState = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 164), ZphaState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptZphaNotifyNewState.setStatus('current')
if mibBuilder.loadTexts: tptZphaNotifyNewState.setDescription('The new state of the ZPHA that has changed.')
tptZphaNotify = NotificationType((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 0, 24)).setObjects(("TPT-HIGH-AVAIL-MIB", "tptZphaNotifyDeviceID"), ("TPT-HIGH-AVAIL-MIB", "tptZphaNotifyTimeStamp"), ("TPT-HIGH-AVAIL-MIB", "tptZphaNotifySegmentName"), ("TPT-HIGH-AVAIL-MIB", "tptZphaNotifyNewState"))
if mibBuilder.loadTexts: tptZphaNotify.setStatus('current')
if mibBuilder.loadTexts: tptZphaNotify.setDescription('Notification: Used to inform the management station of changes in a ZPHA state on the device.')
tptPerfProtNotifyDeviceID = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 141), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPerfProtNotifyDeviceID.setStatus('current')
if mibBuilder.loadTexts: tptPerfProtNotifyDeviceID.setDescription('The unique identifier of the device sending this notification.')
tptPerfProtNotifyTimeStamp = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 142), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPerfProtNotifyTimeStamp.setStatus('current')
if mibBuilder.loadTexts: tptPerfProtNotifyTimeStamp.setDescription('The time of this notification (in seconds since January 1, 1970).')
tptPerfProtNotifyPhase = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 143), PerfProtPhase()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPerfProtNotifyPhase.setStatus('current')
if mibBuilder.loadTexts: tptPerfProtNotifyPhase.setDescription('Whether entering, remaining in, or exiting performance protection mode.')
tptPerfProtNotifyPacketLoss = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 144), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPerfProtNotifyPacketLoss.setStatus('current')
if mibBuilder.loadTexts: tptPerfProtNotifyPacketLoss.setDescription('The current packet loss rate per thousand (percent * 10). When exiting performance protection mode, this value is 0.')
tptPerfProtNotifyLossThreshold = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 145), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPerfProtNotifyLossThreshold.setStatus('current')
if mibBuilder.loadTexts: tptPerfProtNotifyLossThreshold.setDescription('The current packet loss threshold per thousand (percent * 10).')
tptPerfProtNotifyDuration = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 146), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPerfProtNotifyDuration.setStatus('current')
if mibBuilder.loadTexts: tptPerfProtNotifyDuration.setDescription('The number of seconds performance protection will be in force.')
tptPerfProtNotifyMissedAlerts = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 147), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPerfProtNotifyMissedAlerts.setStatus('current')
if mibBuilder.loadTexts: tptPerfProtNotifyMissedAlerts.setDescription('The number of alerts missed during the performance protection period. When entering performance protection mode, this value is 0.')
tptPerfProtNotify = NotificationType((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 0, 21)).setObjects(("TPT-HIGH-AVAIL-MIB", "tptPerfProtNotifyDeviceID"), ("TPT-HIGH-AVAIL-MIB", "tptPerfProtNotifyTimeStamp"), ("TPT-HIGH-AVAIL-MIB", "tptPerfProtNotifyPhase"), ("TPT-HIGH-AVAIL-MIB", "tptPerfProtNotifyPacketLoss"), ("TPT-HIGH-AVAIL-MIB", "tptPerfProtNotifyLossThreshold"), ("TPT-HIGH-AVAIL-MIB", "tptPerfProtNotifyDuration"), ("TPT-HIGH-AVAIL-MIB", "tptPerfProtNotifyMissedAlerts"))
if mibBuilder.loadTexts: tptPerfProtNotify.setStatus('current')
if mibBuilder.loadTexts: tptPerfProtNotify.setDescription('Notification: Used to inform the management station of changes in performance protection on the device.')
mibBuilder.exportSymbols("TPT-HIGH-AVAIL-MIB", FaultState=FaultState, tptTrhaNotify=tptTrhaNotify, highAvailZeroPowerMode=highAvailZeroPowerMode, highAvailZeroPowerEntry=highAvailZeroPowerEntry, tptPerfProtNotify=tptPerfProtNotify, tptTrhaNotifyTimeStamp=tptTrhaNotifyTimeStamp, tpt_high_avail_objs=tpt_high_avail_objs, tptIhaNotifyFaultCause=tptIhaNotifyFaultCause, tptPerfProtNotifyMissedAlerts=tptPerfProtNotifyMissedAlerts, ZphaMode=ZphaMode, tptZphaNotifyDeviceID=tptZphaNotifyDeviceID, tptPerfProtNotifyPacketLoss=tptPerfProtNotifyPacketLoss, FaultCause=FaultCause, ZphaPresent=ZphaPresent, PYSNMP_MODULE_ID=tpt_high_avail_objs, tptZphaNotifySegmentName=tptZphaNotifySegmentName, highAvailTransparentState=highAvailTransparentState, highAvailZeroPowerQuantity=highAvailZeroPowerQuantity, tptIhaNotifyTimeStamp=tptIhaNotifyTimeStamp, ZphaAction=ZphaAction, tptTrhaNotifyNewState=tptTrhaNotifyNewState, tptTrhaNotifyDeviceID=tptTrhaNotifyDeviceID, tptPerfProtNotifyLossThreshold=tptPerfProtNotifyLossThreshold, tptZphaNotify=tptZphaNotify, tptPerfProtNotifyDeviceID=tptPerfProtNotifyDeviceID, ConnectionState=ConnectionState, tptZphaNotifyTimeStamp=tptZphaNotifyTimeStamp, highAvailZeroPowerActive=highAvailZeroPowerActive, tptIhaNotifyFaultState=tptIhaNotifyFaultState, highAvailThresholdEnabled=highAvailThresholdEnabled, highAvailZeroPowerTable=highAvailZeroPowerTable, highAvailTransparentEnabled=highAvailTransparentEnabled, highAvailZeroPowerSegment=highAvailZeroPowerSegment, highAvailFaultCause=highAvailFaultCause, tptPerfProtNotifyDuration=tptPerfProtNotifyDuration, highAvailThresholdPercent=highAvailThresholdPercent, highAvailZeroPowerState=highAvailZeroPowerState, highAvailEnabled=highAvailEnabled, highAvailTimeStamp=highAvailTimeStamp, tptZphaNotifyNewState=tptZphaNotifyNewState, highAvailZeroPowerAction=highAvailZeroPowerAction, PerfProtPhase=PerfProtPhase, highAvailZeroPowerIndex=highAvailZeroPowerIndex, tptIhaNotify=tptIhaNotify, tptPerfProtNotifyTimeStamp=tptPerfProtNotifyTimeStamp, ZphaState=ZphaState, tptPerfProtNotifyPhase=tptPerfProtNotifyPhase, tptIhaNotifyDeviceID=tptIhaNotifyDeviceID, highAvailFaultState=highAvailFaultState, highAvailZeroPowerPresence=highAvailZeroPowerPresence, highAvailTransparentPartner=highAvailTransparentPartner)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
"""
from __future__ import absolute_import, division, print_function
from builtins import (bytes, open, str, super, range,
zip, round, input, int, pow, object, map, zip)
import os
import argparse
import multiprocessing
import gunicorn.app.base
from gunicorn.six import iteritems
from cdci_data_analysis.flask_app.app import run_app, app
from cdci_data_analysis.configurer import ConfigEnv
def number_of_workers():
return (multiprocessing.cpu_count() * 2) + 1
class StandaloneApplication(gunicorn.app.base.BaseApplication):
def __init__(self, app, options=None, app_conf=None):
self.options = options or {}
self.application = app
self.app_conf = app_conf
super(StandaloneApplication, self).__init__()
def load_config(self):
config = dict([(key, value) for key, value in iteritems(self.options)
if key in self.cfg.settings and value is not None])
for key, value in iteritems(config):
print ('conf',key.lower(), value)
self.cfg.set(key.lower(), value)
def load(self):
return self.application
def run(self, conf, debug=False, threaded=False):
run_app(conf, debug=debug, threaded=threaded)
#self.application.config['osaconf'] = conf
#self.application.run(host=conf.dispatcher_url, port=conf.dispatcher_port, debug=debug, threaded=threaded)
def main(argv=None):
black_listed_evns=['https_proxy','http_proxy']
for envvar in black_listed_evns:
print ('removing env variable',envvar)
os.unsetenv(envvar)
if envvar in os.environ.keys():
del os.environ[envvar]
parser = argparse.ArgumentParser()
parser.add_argument('-conf_file', type=str, default=None)
parser.add_argument('-use_gunicorn', action='store_true')
parser.add_argument('-debug', action='store_true')
args = parser.parse_args()
conf_file = args.conf_file
conf = ConfigEnv.from_conf_file(conf_file)
use_gunicorn = args.use_gunicorn
debug = args.debug
if use_gunicorn is True:
dispatcher_url = conf.dispatcher_url
port = conf.dispatcher_port
options = {
'bind': '%s:%s' % (dispatcher_url, port),
'workers': 2,
'threads': 4,
#'worker-connections': 10,
#'k': 'gevent',
}
StandaloneApplication(app, options).run(conf, debug=debug,threaded=True)
else:
run_app(conf, debug=debug, threaded=False)
if __name__ == "__main__":
main()
|
from buildings import *
from soldiers import SoldierClass
class Player(object):
def __init__(self, name, money, army, buildings):
self.name = name
self.money = money
self.army = army
self.buildings = buildings
def buyArmy(self, economy, army):
iCost = economy.cost(army)
print "Buying army for %d coins"%(iCost)
if iCost <= self.money:
self.army += [army]
self.money -= iCost
return True;
else:
return False;
def buyBuilding(self, economy, building):
iCost = economy.buildingCost(building)
print "Buying building for %d coins"%(iCost)
if iCost <= self.money:
self.buildings += [building]
self.money -= iCost
return True;
else:
return False;
|
#!/usr/bin/env python3
"""
Export CLA information from bugs.python.org to a JSON file.
"""
from __future__ import annotations
from datetime import datetime
import json
import os
import xmlrpc.client
from dotenv import load_dotenv
from rich.progress import track
try:
import certifi
except ModuleNotFoundError:
raise ImportError("Install certifi first for SSL to work.")
else:
del certifi
load_dotenv()
BPO_AUTH = os.environ["BPO_AUTH"]
DATE_FORMAT = "<Date %Y-%m-%d.%H:%M:%S.000>"
bpo = xmlrpc.client.ServerProxy(
f"https://{BPO_AUTH}@bugs.python.org/xmlrpc", allow_none=True
)
schema = bpo.schema()
assert "user" in schema
user_schema = schema["user"]
assert "contrib_form" in user_schema
assert "contrib_form_date" in user_schema
users = bpo.filter("user", None, {"contrib_form": True})
result = []
for uid in track(users):
u = bpo.display(
f"user{uid}",
"username",
"address",
"alternate_addresses",
"github",
"contrib_form_date",
"contrib_form",
"iscommitter",
)
if not u.get("contrib_form") or not u.get("github"):
# No GitHub account and/or no contrib form signed
continue
addresses = [u["address"]]
for alt in (u.get("alternate_addresses") or "").split():
if "," in alt or ";" in alt:
raise ValueError(f", or ; used in split for user{uid}")
addresses.append(alt)
dt = datetime.now()
if u.get("contrib_form_date"):
dt = datetime.strptime(u["contrib_form_date"], DATE_FORMAT)
for address in addresses:
result.append(
{
"username": u["github"],
"email": address,
"bpo": u["username"],
"cla_date": dt.strftime(DATE_FORMAT),
"committer": u["iscommitter"],
}
)
with open("out.json", "w") as f:
json.dump(result, f, indent=2)
|
from django.utils.functional import lazy
from drf_spectacular.utils import extend_schema_field
from drf_spectacular.openapi import OpenApiTypes
from rest_framework import serializers
from baserow.api.groups.serializers import GroupSerializer
from baserow.core.registries import application_type_registry
from baserow.core.models import Application
class ApplicationSerializer(serializers.ModelSerializer):
type = serializers.SerializerMethodField()
group = GroupSerializer(help_text='The group that the application belongs to.')
class Meta:
model = Application
fields = ('id', 'name', 'order', 'type', 'group')
extra_kwargs = {
'id': {
'read_only': True
}
}
@extend_schema_field(OpenApiTypes.STR)
def get_type(self, instance):
# It could be that the application related to the instance is already in the
# context else we can call the specific_class property to find it.
application = self.context.get('application')
if not application:
application = application_type_registry.get_by_model(
instance.specific_class)
return application.type
class ApplicationCreateSerializer(serializers.ModelSerializer):
type = serializers.ChoiceField(
choices=lazy(application_type_registry.get_types, list)())
class Meta:
model = Application
fields = ('name', 'type')
class ApplicationUpdateSerializer(serializers.ModelSerializer):
class Meta:
model = Application
fields = ('name',)
def get_application_serializer(instance, **kwargs):
"""
Returns an instantiated serializer based on the instance class type. Custom
serializers can be defined per application type. This function will return the one
that is set else it will return the default one.
:param instance: The instance where a serializer is needed for.
:type instance: Application
:return: An instantiated serializer for the instance.
:rtype: ApplicationSerializer
"""
application = application_type_registry.get_by_model(instance.specific_class)
serializer_class = application.instance_serializer_class
if not serializer_class:
serializer_class = ApplicationSerializer
return serializer_class(instance, context={'application': application}, **kwargs)
|
NONE = 0
HALT = 1 << 0
FAULT = 1 << 1
BREAK = 1 << 2
|
"""General utility functions."""
import contextlib
import random
import inspect
from asyncio import ensure_future
from functools import partial
import datetime
from util.commands.errors import PassException
def bdel(s, r): return (s[len(r):] if s.startswith(r) else s)
class DiscordFuncs():
def __init__(self, bot):
self.bot = bot
def __getattr__(self, name):
new_func = None
coro = getattr(self.bot, name)
if not callable(coro):
raise AttributeError('Class can only access client functions')
def async_wrap(coro, *args, **kwargs):
ensure_future(coro(*args, **kwargs))
new_func = partial(async_wrap, coro)
new_func.__name__ = coro.__name__
new_func.__qualname__ = coro.__qualname__
return new_func
def _import(mod_name: str, var_name=None, attr_name=''):
ret = "globals()['{}'] = imp('{}')"
if var_name:
if attr_name:
attr_name = '.' + attr_name
ret = ret.format(var_name, mod_name) + attr_name
else:
ret = ret.format(mod_name, mod_name)
return ret
def _set_var(var_name: str, expr: str):
return "globals()['{}'] = {}".format(var_name, expr)
def _del_var(var_name: str):
return "del globals()['{}']".format(var_name)
snowtime = lambda i: datetime.datetime.fromtimestamp(((float(int(i)) / 4194304.0) + 1420070400000.0 + 18000000.0) / 1000.0).strftime('%a %b %d, %Y %I:%M:%S %p')
class PrintException(Exception):
"""An exception that prints the error."""
def __init__(self, err):
print(str(err))
self.err = err
super().__init__()
@contextlib.contextmanager
def assert_msg(ctx, msg: str):
"""Assert. If error, send msg."""
try:
yield
except AssertionError:
ensure_future(ctx.bot.send_message(ctx.message.channel, msg))
raise PassException()
def check(in_bool: bool):
"""Replacement for assert statement."""
if not in_bool:
raise AssertionError('Assertion failed from check()!')
def encode(content: str) -> str:
"""Goldcode encoder."""
orig_ords = [ord(c) for c in list(content)]
shift = round(random.uniform(1, 145), random.randint(3, 6))
shift_shift = random.randint(1, 14)
shifted_ords = [float(o) + shift for o in orig_ords]
join_chars = list('@!($)_*#%"}?\'=-`\\][')
join_char = random.choice(join_chars)
ords_str = join_char.join([str(s) for s in shifted_ords]) + '~' + join_char.join([str(float(ord(c)) + shift) for c in list('3MainShiftCorrect')])
fn_head_join = random.choice(list('|^&'))
_g = random.uniform(1, 51)
head_keys = {
'd': shift + shift_shift, # encoded (shift_shifted) shift
'g': _g, # shift for join char index
'l': (float(join_chars.index(join_char)) - 4.4689257) + _g
}
head_str = ';'.join([k + str(head_keys[k]) for k in head_keys])
final = head_str + fn_head_join + ords_str
return final
def decode(content: str) -> str:
"""Goldcode decoder."""
try:
expected_key = '3MainShiftCorrect'
join_chars = list('@!($)_*#%"}?\'=-`\\][')
shift_key = content.split('~')[1]
content = content.replace('~' + shift_key, '') # discard shift key
head_keys = {}
for try_decode_head in list('|^&'):
if try_decode_head in content:
dec_head_1 = content.split(try_decode_head)[0]
head_r_keys = dec_head_1.split(';')
for rkey in head_r_keys:
head_keys[rkey[0]] = rkey[1:]
no_head_content = content.replace(dec_head_1 + try_decode_head, '')
head_keys['d'] = float(head_keys['d'])
head_keys['g'] = float(head_keys['g'])
head_keys['l'] = float(head_keys['l'])
j = join_chars[int((head_keys['l'] + 4.4689257) - head_keys['g'])]
for try_shift_shift in range(1, 14):
shift_to_try = head_keys['d'] - float(try_shift_shift)
if ''.join([chr(int(cn - shift_to_try)) for cn in [float(sf) for sf in shift_key.split(j)]]) == expected_key:
shift = shift_to_try
break
content = no_head_content
dec = ''.join([chr(int(cn - shift)) for cn in [float(sf) for sf in content.split(j)]])
return dec
except Exception:
return '⚠ Couldn\'t decode. Maybe your content is corrupted?'
async def async_encode(content: str) -> str:
"""Coroutine version of encode()."""
orig_ords = [ord(c) for c in list(content)]
shift = round(random.uniform(1, 145), random.randint(3, 6))
shift_shift = random.randint(1, 14)
shifted_ords = [float(o) + shift for o in orig_ords]
join_chars = list('@!($)_*#%"}?\'=-`\\][')
join_char = random.choice(join_chars)
ords_str = join_char.join([str(s) for s in shifted_ords]) + '~' + join_char.join([str(float(ord(c)) + shift) for c in list('3MainShiftCorrect')])
fn_head_join = random.choice(list('|^&'))
_g = random.uniform(1, 51)
head_keys = {
'd': shift + shift_shift, # encoded (shift_shifted) shift
'g': _g, # shift for join char index
'l': (float(join_chars.index(join_char)) - 4.4689257) + _g
}
head_str = ';'.join([k + str(head_keys[k]) for k in head_keys])
final = head_str + fn_head_join + ords_str
return final
async def async_decode(content: str) -> str:
"""Coroutine version of decode()."""
try:
expected_key = '3MainShiftCorrect'
join_chars = list('@!($)_*#%"}?\'=-`\\][')
shift_key = content.split('~')[1]
content = content.replace('~' + shift_key, '') # discard shift key
head_keys = {}
for try_decode_head in list('|^&'):
if try_decode_head in content:
dec_head_1 = content.split(try_decode_head)[0]
head_r_keys = dec_head_1.split(';')
for rkey in head_r_keys:
head_keys[rkey[0]] = rkey[1:]
no_head_content = content.replace(dec_head_1 + try_decode_head, '')
head_keys['d'] = float(head_keys['d'])
head_keys['g'] = float(head_keys['g'])
head_keys['l'] = float(head_keys['l'])
j = join_chars[int((head_keys['l'] + 4.4689257) - head_keys['g'])]
for try_shift_shift in range(1, 14):
shift_to_try = head_keys['d'] - float(try_shift_shift)
if ''.join([chr(int(cn - shift_to_try)) for cn in [float(sf) for sf in shift_key.split(j)]]) == expected_key:
shift = shift_to_try
break
content = no_head_content
dec = ''.join([chr(int(cn - shift)) for cn in [float(sf) for sf in content.split(j)]])
return dec
except Exception:
return '⚠ Couldn\'t decode. Maybe your content is corrupted?'
def decoy_print(*ina: str) -> str:
"""Print function!"""
return ' '.join(ina)
def _get_variable(name):
stack = inspect.stack()
try:
for frames in stack:
try:
frame = frames[0]
current_locals = frame.f_locals
if name in current_locals:
return current_locals[name]
finally:
del frame
finally:
del stack
def numberToBase(n, b):
if n == 0:
return [0]
digits = []
while n:
digits.append(int(n % b))
n /= b
return digits[::-1]
def smartjoin(l):
if len(l) > 1:
l[-1] = 'and ' + l[-1]
return ', '.join(l)
|
import os
import pandas as pd
import click
from flask import Flask
from flask.cli import FlaskGroup
from flask.cli import run_command
from .config import update_config
from .routing import register_blueprints
from .routing import register_routes_to_pbo
from .jinja_env import create_jinja_env
from .utils.excel import build_workbook_from_dict
def create_app(excel_file: str = None) -> Flask:
app = Flask(__name__)
excel_file = excel_file or os.environ.get('EXCEL_FILE')
app.config['EXCEL_FILE'] = excel_file
if excel_file:
update_config(app, excel_file)
register_blueprints(app, excel_file)
register_routes_to_pbo(app, excel_file)
create_jinja_env(app, excel_file)
return app
@click.group(cls=FlaskGroup, create_app=create_app)
def cli():
pass
@cli.command('run-excel', context_settings={'ignore_unknown_options': True})
@click.argument('excel_file', nargs=-1, type=click.Path())
@click.option('--env', '-e',
default=lambda: os.getenv('FSE_ENV', 'production'),
help='Your config environment. Different config environments are '
'managed using #config_{env} sheets. `development` and '
'`production` are always valid configs by default.')
@click.pass_context
def run_excel(ctx, excel_file, env):
"""Deploy your Excel file as a website."""
if len(excel_file) == 0:
if 'EXCEL_FILE' in os.environ:
_excel_file = os.environ['EXCEL_FILE']
else:
raise TypeError('Please either define an excel file to load as an '
'argument (recommended), or define an `EXCEL_FILE` '
'environment variable.')
elif len(excel_file) == 1:
_excel_file = excel_file[0]
else:
raise TypeError("You cannot submit more than 1 Excel file. If you'd "
'like to support multiple Excel files, create a '
'`#blueprints` sheet.')
click.echo(f'Deploying {_excel_file}')
os.environ['FLASK_APP'] = f"{__name__}:create_app('{_excel_file}')"
os.environ['EXCEL_FILE'] = _excel_file
os.environ['FLASK_ENV'] = env
ctx.invoke(run_command, reload=True)
@cli.command('create-demo')
def create_demo():
base = {
'#routes': [
['hello_world', '/'],
['foo', '/foo']
],
'#templates': [
['example_template']
],
'#blueprints': [
['example_blueprint.xlsx']
],
'#config': [
['SECRET_KEY', '%SECRET_KEY%'],
['PREFERRED_URL_SCHEME', 'http']
],
'#config_development': [
['INHERIT_FROM', '#config'],
['TESTING', True],
['DEBUG', True]
],
'example_template': [
['<head>', None, None, None],
[None, '<title>', 'Fullstack with Excel', '</title>'],
['</head>', None, None, None],
['{% block content %}', None, None, None],
['{% endblock %}', None, None, None]
],
'hello_world': [
['{% extends "example_template" %}', None, None],
['{% block content %}', None, None],
['<b>', 'hello, world!', '</b>'],
['<br />', '<br />', None],
['<a href="{{ url_for(\'foo\') }}">', 'See some data?', '</a>'],
['{% endblock %}', None, None]
],
'&actual_table': [
['Name', 'Number of pets'],
['Bob', 4],
['Mary', 2],
['Joe', 0]
],
'foo': [
['{% extends "example_template" %}', None],
['{% block content %}', None],
[None, "<h3>My Friends' Pets</h3>"],
[None, '<br \>'],
[None, '{{ render_sheet("&actual_table") }}'],
[None, '<br \>'],
[None, 'Want to see a blueprint now?'],
[None, '<a href="{{ url_for(\'example_blueprint.bar\') }}">Click here!</a>'],
['{% endblock %}', None]
]
}
bp = {
'#routes': [
['bar', '/bar']
],
'bar': [
['{% extends "example_template" %}', None],
['{% block content %}', None],
[None, 'This page was created using a blueprint.'],
['{% endblock %}', None, None],
]
}
build_workbook_from_dict(data=base, file_name='demo_website.xlsx')
build_workbook_from_dict(data=bp, file_name='example_blueprint.xlsx')
|
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.nn as nn
from paddleseg import utils
from paddleseg.cvlibs import manager
@manager.MODELS.add_component
class PortraitNet(nn.Layer):
"""
The PortraitNet implementation based on PaddlePaddle.
The original article refers to
Song-Hai Zhanga, Xin Donga, Jia Lib, Ruilong Lia, Yong-Liang Yangc
"PortraitNet: Real-time Portrait Segmentation Network for Mobile Device"
(https://www.yongliangyang.net/docs/mobilePotrait_c&g19.pdf).
Args:
num_classes (int, optional): The unique number of target classes. Default: 2.
backbone (Paddle.nn.Layer): Backbone network, currently support MobileNetV2.
add_edge (bool, optional): Whether output to edge. Default: False
pretrained (str, optional): The path or url of pretrained model. Default: None
"""
def __init__(self,
num_classes,
backbone,
min_channel=16,
channel_ratio=1.0,
add_edge=False,
pretrained=None):
super(PortraitNet, self).__init__()
self.backbone = backbone
self.head = PortraitNetHead(num_classes, min_channel, channel_ratio,
add_edge)
self.pretrained = pretrained
self.init_weight()
def forward(self, x):
img = x[:, :3, :, :]
img_ori = x[:, 3:, :, :]
feat_list = self.backbone(img)
logits_list = self.head(feat_list)
feat_list = self.backbone(img_ori)
logits_ori_list = self.head(feat_list)
return [
logits_list[0], logits_ori_list[0], logits_list[1],
logits_ori_list[1]
]
def init_weight(self):
if self.pretrained is not None:
utils.load_entire_model(self, self.pretrained)
class PortraitNetHead(nn.Layer):
def __init__(self,
num_classes,
min_channel=16,
channel_ratio=1.0,
add_edge=False):
super().__init__()
self.min_channel = min_channel
self.channel_ratio = channel_ratio
self.add_edge = add_edge
self.deconv1 = nn.Conv2DTranspose(
self.depth(96),
self.depth(96),
groups=1,
kernel_size=4,
stride=2,
padding=1,
bias_attr=False)
self.deconv2 = nn.Conv2DTranspose(
self.depth(32),
self.depth(32),
groups=1,
kernel_size=4,
stride=2,
padding=1,
bias_attr=False)
self.deconv3 = nn.Conv2DTranspose(
self.depth(24),
self.depth(24),
groups=1,
kernel_size=4,
stride=2,
padding=1,
bias_attr=False)
self.deconv4 = nn.Conv2DTranspose(
self.depth(16),
self.depth(16),
groups=1,
kernel_size=4,
stride=2,
padding=1,
bias_attr=False)
self.deconv5 = nn.Conv2DTranspose(
self.depth(8),
self.depth(8),
groups=1,
kernel_size=4,
stride=2,
padding=1,
bias_attr=False)
self.transit1 = ResidualBlock(self.depth(320), self.depth(96))
self.transit2 = ResidualBlock(self.depth(96), self.depth(32))
self.transit3 = ResidualBlock(self.depth(32), self.depth(24))
self.transit4 = ResidualBlock(self.depth(24), self.depth(16))
self.transit5 = ResidualBlock(self.depth(16), self.depth(8))
self.pred = nn.Conv2D(
self.depth(8), num_classes, 3, 1, 1, bias_attr=False)
if self.add_edge:
self.edge = nn.Conv2D(
self.depth(8), num_classes, 3, 1, 1, bias_attr=False)
def depth(self, channels):
min_channel = min(channels, self.min_channel)
return max(min_channel, int(channels * self.channel_ratio))
def forward(self, feat_list):
feature_1_4, feature_1_8, feature_1_16, feature_1_32 = feat_list
up_1_16 = self.deconv1(self.transit1(feature_1_32))
up_1_8 = self.deconv2(self.transit2(feature_1_16 + up_1_16))
up_1_4 = self.deconv3(self.transit3(feature_1_8 + up_1_8))
up_1_2 = self.deconv4(self.transit4(feature_1_4 + up_1_4))
up_1_1 = self.deconv5(self.transit5(up_1_2))
pred = self.pred(up_1_1)
if self.add_edge:
edge = self.edge(up_1_1)
return pred, edge
else:
return pred
class ConvDw(nn.Layer):
def __init__(self, inp, oup, kernel, stride):
super(ConvDw, self).__init__()
self.conv = nn.Sequential(
nn.Conv2D(
inp,
inp,
kernel,
stride, (kernel - 1) // 2,
groups=inp,
bias_attr=False),
nn.BatchNorm2D(num_features=inp, epsilon=1e-05, momentum=0.1),
nn.ReLU(),
nn.Conv2D(inp, oup, 1, 1, 0, bias_attr=False),
nn.BatchNorm2D(num_features=oup, epsilon=1e-05, momentum=0.1),
nn.ReLU(),
)
def forward(self, x):
return self.conv(x)
class ResidualBlock(nn.Layer):
def __init__(self, inp, oup, stride=1):
super(ResidualBlock, self).__init__()
self.block = nn.Sequential(
ConvDw(inp, oup, 3, stride=stride),
nn.Conv2D(
in_channels=oup,
out_channels=oup,
kernel_size=3,
stride=1,
padding=1,
groups=oup,
bias_attr=False),
nn.BatchNorm2D(num_features=oup, epsilon=1e-05, momentum=0.1),
nn.ReLU(),
nn.Conv2D(
in_channels=oup,
out_channels=oup,
kernel_size=1,
stride=1,
padding=0,
bias_attr=False),
nn.BatchNorm2D(num_features=oup, epsilon=1e-05, momentum=0.1),
)
if inp == oup:
self.residual = None
else:
self.residual = nn.Sequential(
nn.Conv2D(
in_channels=inp,
out_channels=oup,
kernel_size=1,
stride=1,
padding=0,
bias_attr=False),
nn.BatchNorm2D(num_features=oup, epsilon=1e-05, momentum=0.1),
)
self.relu = nn.ReLU()
def forward(self, x):
residual = x
out = self.block(x)
if self.residual is not None:
residual = self.residual(x)
out += residual
out = self.relu(out)
return out
|
# Copyright 2017 Great Software Laboratory Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from st2common.runners.base_action import Action
class PublishConfigs(Action):
def __init__(self, config=None):
super(PublishConfigs, self).__init__(config=config)
def run(self):
configs = {}
configs['vip'] = self.config['vip']
configs['username'] = self.config['username']
configs['password'] = self.config['password']
configs['overlay_gateway_name'] = self.config['overlay_gateway_name']
return configs
|
import nni
import GPUtil
from UCTB.dataset import GridTrafficLoader
from UCTB.model import ST_ResNet
from UCTB.evaluation import metric
args = {
'dataset': 'DiDi',
'city': 'Xian',
'num_residual_unit': 4,
'conv_filters': 64,
'kernel_size': 3,
'lr': 1e-5,
'batch_size': 32,
'MergeIndex': 6
}
code_version = 'ST_ResNet_{}_{}_F{}'.format(args['dataset'], args['city'], int(args['MergeIndex'])*5)
nni_params = nni.get_next_parameter()
nni_sid = nni.get_sequence_id()
if nni_params:
args.update(nni_params)
code_version += ('_' + str(nni_sid))
deviceIDs = GPUtil.getAvailable(order='memory', limit=2, maxLoad=1, maxMemory=0.7,
includeNan=False, excludeID=[], excludeUUID=[])
if len(deviceIDs) == 0:
current_device = '-1'
else:
if nni_params:
current_device = str(deviceIDs[int(nni_sid) % len(deviceIDs)])
else:
current_device = str(deviceIDs[0])
# Config data loader
data_loader = GridTrafficLoader(dataset=args['dataset'], city=args['city'], closeness_len=6, period_len=7, trend_len=4, MergeIndex=args['MergeIndex'])
ST_ResNet_Obj = ST_ResNet(closeness_len=data_loader.closeness_len,
period_len=data_loader.period_len,
trend_len=data_loader.trend_len,
external_dim=data_loader.external_dim, lr=args['lr'],
num_residual_unit=args['num_residual_unit'], conv_filters=args['conv_filters'],
kernel_size=args['kernel_size'], width=data_loader.width, height=data_loader.height,
gpu_device=current_device, code_version=code_version)
ST_ResNet_Obj.build()
print(args['dataset'], args['city'], code_version)
print('Number of trainable variables', ST_ResNet_Obj.trainable_vars)
print('Number of training samples', data_loader.train_sequence_len)
print('debug')
# Training
ST_ResNet_Obj.fit(closeness_feature=data_loader.train_closeness,
period_feature=data_loader.train_period,
trend_feature=data_loader.train_trend,
target=data_loader.train_y,
external_feature=data_loader.train_ef,
sequence_length=data_loader.train_sequence_len,
batch_size=args['batch_size'], early_stop_length=200,
validate_ratio=0.1)
# Predict
prediction = ST_ResNet_Obj.predict(closeness_feature=data_loader.test_closeness,
period_feature=data_loader.test_period,
trend_feature=data_loader.test_trend,
target=data_loader.test_y,
external_feature=data_loader.test_ef,
sequence_length=data_loader.test_sequence_len)
# Compute metric
test_rmse = metric.rmse(prediction=data_loader.normalizer.min_max_denormal(prediction['prediction']),
target=data_loader.normalizer.min_max_denormal(data_loader.test_y), threshold=0)
# Evaluate
val_loss = ST_ResNet_Obj.load_event_scalar('val_loss')
best_val_loss = min([e[-1] for e in val_loss])
best_val_loss = data_loader.normalizer.min_max_denormal(best_val_loss)
print('Best val result', best_val_loss)
print('Test result', test_rmse)
print('Converged using %.2f hour' % ((val_loss[-1][0] - val_loss[0][0]) / 3600))
if nni_params:
nni.report_final_result({
'default': best_val_loss,
'test-rmse': test_rmse
})
|
"""Print all tasks.
Usage:
# Print all tasks
python3 print_all_tasks.py
# Print a specific task
python3 print_all_tasks.py --idx 10
"""
import argparse
from common import load_and_register_tasks
def print_task(index, task):
print("=" * 60)
print(f"Index: {index}")
print(f"flop_ct: {task.compute_dag.flop_ct}")
print(f"workload_key: {task.workload_key}")
print("Compute DAG:")
print(task.compute_dag)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--idx", type=int)
args = parser.parse_args()
print("Load tasks...")
tasks = load_and_register_tasks()
if args.idx is None:
for i, t in enumerate(tasks):
print_task(i, t)
else:
print_task(args.idx, tasks[args.idx])
|
import pytest
from jsonschema import ValidationError
from babbage.validation import validate_model
class TestValidation(object):
def test_simple_model(self, simple_model_data):
validate_model(simple_model_data)
def test_invalid_fact_table(self, simple_model_data):
with pytest.raises(ValidationError):
model = simple_model_data
model['fact_table'] = 'b....'
validate_model(model)
def test_no_fact_table(self, simple_model_data):
with pytest.raises(ValidationError):
model = simple_model_data
del model['fact_table']
validate_model(model)
def test_invalid_dimension_name(self, simple_model_data):
with pytest.raises(ValidationError):
model = simple_model_data
model['dimensions']['goo fdj.'] = {'label': 'bar'}
validate_model(model)
def test_invalid_measure_name(self, simple_model_data):
with pytest.raises(ValidationError):
model = simple_model_data
model['measures']['goo fdj.'] = {'label': 'bar'}
validate_model(model)
def test_no_measure(self, simple_model_data):
with pytest.raises(ValidationError):
model = simple_model_data
model['measures'] = {}
validate_model(model)
def test_no_measure_label(self, simple_model_data):
with pytest.raises(ValidationError):
model = simple_model_data
model['measures']['amount'] = {}
validate_model(model)
def test_invalid_aggregate(self, simple_model_data):
with pytest.raises(ValidationError):
model = simple_model_data
model['measures']['amount']['aggregates'] = 'schnasel'
validate_model(model)
def test_invalid_aggregate_string(self, simple_model_data):
with pytest.raises(ValidationError):
model = simple_model_data
model['measures']['amount']['aggregates'] = 'count'
validate_model(model)
def test_invalid_aggregate_string(self, simple_model_data):
model = simple_model_data
model['measures']['amount']['aggregates'] = ['count']
validate_model(model)
def test_dimension_without_attributes(self, simple_model_data):
with pytest.raises(ValidationError):
model = simple_model_data
model['dimensions']['foo']['attributes'] = {}
validate_model(model)
def test_dimension_without_key(self, simple_model_data):
with pytest.raises(ValidationError):
model = simple_model_data
del model['dimensions']['foo']['key_attribute']
validate_model(model)
def test_dimension_invalid_key(self, simple_model_data):
with pytest.raises(ValidationError):
model = simple_model_data
model['dimensions']['foo']['key_attribute'] = 'lala'
validate_model(model)
def test_dimension_invalid_label(self, simple_model_data):
with pytest.raises(ValidationError):
model = simple_model_data
model['dimensions']['foo']['label_attribute'] = 'lala'
validate_model(model)
|
class Solution(object):
def getRow(self, rowIndex):
"""
:type rowIndex: int
:rtype: List[int]
"""
row = [1]
for _ in range(rowIndex):
# pad 0, perform element-wise addition
row = [x + y for x, y in zip([0] + row, row + [0])]
return row
# row = [1]
# for i in range(rowIndex):
# row = [1] + [row[j] + row[j + 1] for j in range(len(row) - 1)] + [1]
# return row
|
from typing import Tuple, Union
from axelrod import Action
C, D = Action.C, Action.D
Score = Union[int, float]
class Game(object):
"""Container for the game matrix and scoring logic.
Attributes
----------
scores: dict
The numerical score attribute to all combinations of action pairs.
"""
def __init__(
self, r: Score = 3, s: Score = 0, t: Score = 5, p: Score = 1, rc: Score = 3, sc: Score = 0, tc: Score = 5, pc: Score = 1
) -> None:
"""Create a new game object.
Parameters
----------
r: int or float
Score obtained by row players for mutual cooperation.
s: int or float
Score obtained by row player for cooperating against column defector.
t: int or float
Score obtained by row player for defecting against column cooperator.
p: int or float
Score obtained by row player for mutual defection.
rc: int or float
Score obtained by column players for mutual cooperation.
sc: int or float
Score obtained by column player for cooperating against row defector.
tc: int or float
Score obtained by column player for defecting against row cooperator.
pc: int or float
Score obtained by column player for mutual defection.
"""
self.scores = {
(C, C): (r, rc),
(D, D): (p, pc),
(C, D): (s, tc),
(D, C): (t, sc),
}
def RPST(self) -> Tuple[Score, Score, Score, Score]:
"""Returns game matrix values in Press and Dyson notation."""
R = self.scores[(C, C)][0]
P = self.scores[(D, D)][0]
S = self.scores[(C, D)][0]
T = self.scores[(D, C)][0]
return R, P, S, T
def score(self, pair: Tuple[Action, Action]) -> Tuple[Score, Score]:
"""Returns the appropriate score for a decision pair.
Parameters
----------
pair: tuple(Action, Action)
A pair actions for two players, for example (C, C).
Returns
-------
tuple of int or float
Scores for two player resulting from their actions.
"""
return self.scores[pair]
def __repr__(self) -> str:
return "Axelrod game: (R,P,S,T) = {}".format(self.RPST())
def __eq__(self, other):
if not isinstance(other, Game):
return False
return self.RPST() == other.RPST()
DefaultGame = Game()
|
import inspect
import sys
from pathlib import Path
import kts.ui.settings
from kts.core.backend.ray_middleware import setup_ray
from kts.core.cache import frame_cache, obj_cache
from kts.core.lists import feature_list, helper_list
from kts.settings import cfg
from kts.util.debug import logger
def find_scope():
frame = inspect.currentframe()
while frame is not None and 'get_ipython' not in frame.f_globals:
frame = frame.f_back
if frame is not None:
return frame.f_globals
else:
return None
def find_config():
p = Path('.').cwd()
while p != p.parent and not (p / 'kts_config.toml').exists():
p = p.parent
config_path = (p / 'kts_config.toml')
if config_path.exists():
return config_path
else:
return None
def init():
cfg.scope = find_scope()
cfg.stdout = sys.stdout
config_path = find_config()
if config_path is not None:
cfg.load(config_path)
if config_path is not None:
frame_cache.path = cfg.storage_path
obj_cache.path = cfg.storage_path
feature_list.sync()
helper_list.sync()
kts.ui.settings.init()
setup_ray()
if not cfg.debug:
logger.level = 50
|
import argparse
import os
import sys
import pickle
import time
import pandas as pd
import re
from src.utils import np
from src.problem import Problem
from src.controller import Parallel_Controller
from src.MMAS_solver import MMAS_Solver
from pathlib import Path
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
'--method',
choices=['DGA', 'RGA', 'MMAS'],
default='MMAS',
type=str,
required=True,
help='algorithm')
parser.add_argument(
'--idx',
type=int,
required=True,
help='Index of slurm')
'--instance',
type=str,
required=True,
help='instance to apply the algorithm to it')
# parser.add_argument(
# '--batch',
# # choices=[101],
# # default=31,
# type=int,
# required=True,
# help='size of batch of run for RGA')
# if 0 : it takes the seed from input
# if 2 : it generates a random seed
# if 1 : it goes for 31 runs.
parser.add_argument(
'--ls',
choices=['none', '1'],
default='none',
type=str,
required=False,
help='Local search method')
parser.add_argument(
'--n-ants',
default=10,
type=int,
required=False,
help='number of ants for population size')
parser.add_argument(
'--maxiter',
default=1000,
type=int,
required=False,
help='Maximum number of iterations')
try:
args = parser.parse_args()
except:
parser.print_help()
import sys
sys.exit(0)
return args
def parse_instance(instance_name):
find_all = np.asarray(re.findall(r'\b\d+\b', instance_name),dtype=int)
return tuple(find_all[-3:])
if __name__ == '__main__':
df = pd.read_csv('mmas-irace-configs-found.csv')
kwargs = {}
# Parse command line arguments
args = parse_arguments()
(n_demand, n_machine, direction) = parse_instance(args.instance)
conf_params = df.loc[df['Instance'] == '({}, {}, {})'.format(n_demand, n_machine, direction)]
problem = Problem(number_machine = n_machine, number_demand = n_demand, parcel = True,
local = 'none', method='MMAS', direction= direction)
# str_instance = '({},{},{},{})'.format(args.n_machine, args.n_demand, int(args.parcel), args.dir)
obj_batch = []
# mkdir
Path('./raw_mmas').mkdir(parents=True, exist_ok=True)
Path('./raw_mmas_local').mkdir(parents=True, exist_ok=True)
# run
if args.method == 'MMAS':
Path('./mmas_data').mkdir(parents=True, exist_ok=True)
# str_alg = '({},{})'.format(args.greedy_param, args.sp)
# if args.batch == 101:
# batch_range = np.arange(101)
# else:
# batch_range = np.arange(1)
# seeds for replication
preseed = np.array([226024, 894631, 118599, 802361, 23414, 976405, 798742, 647772, 82428, 566941
, 175144, 435676, 331388, 428582, 873627, 41918, 7806, 562734, 523424, 609150
, 93564, 209194, 220472, 63488, 570335, 153744, 543934, 625362, 84325, 636283
, 464398, 529193, 318544, 205037, 852066, 988015, 15880, 665647, 658019, 690671
, 362619, 803845, 868070, 394902, 161626, 636900, 332690, 442120, 113993, 276401
, 942972, 134143, 137052, 921830, 727872, 61800, 943104, 108918, 233229, 936444
, 689071, 862780, 944836, 552032, 357025, 92066, 869317, 216829, 493700, 51734
, 691270, 146044, 728563, 471856, 132138, 736886, 77208, 443348, 224069, 656098
, 990195, 516716, 854011, 698891, 184790, 161487, 336484, 22868, 246949, 410368
, 194817, 318576, 98816, 312131, 22585, 889346, 900289, 789335, 25676, 591257
, 839707])
# for idx_batch in batch_range:
# solver = Parallel_Controller(problem=problem, greedy_param = args.greedy_param, selection_pressure=args.sp, ant_kw=None)
if args.ls == 'none':
local_search = 0
else:
local_search = 1
# batch_range = np.arange(int(args.batch))
if int(conf_params['bestant'])==0:
tba = 'BSFA'
else:
tba = 'IBA'
# print(conf_params)
solver = MMAS_Solver(problem=problem,
alpha=int(conf_params['alpha']),
beta=int(conf_params['beta']),
rho=float(conf_params['rho']),
tau0=1,
population_size=args.n_ants,
iteration_max=args.maxiter,
selection_pressure=float(conf_params['sp']),
type_best_ant=tba,
local_search=local_search)
print(conf_params)
try:
seed = preseed[args.idx]
# print(seed)
solver.problem.initialise_seed(seed=seed)
except:
# else:
raise Exception('Problem in seed initialisation')
solver.run()
print('Best-solution: {}'.format(solver.total_obj))
obj_batch.append(solver.total_obj)
# str_alg = '({},{})'.format(args.greedy_param, args.sp)
# solver.export_best_solution_simple()
# solver.export()
|
# -*- coding: utf-8 -*-
"""
eng : import library
tr : kütüphaneleri yüklüyoruz
"""
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from keras.datasets import imdb
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers.embeddings import Embedding
from keras.layers import SimpleRNN, Dense, Activation
"""
eng : load data and examining
tr : veriyi yüklüyoruz ve inceliyoruz.
"""
(X_train, Y_train), (X_test, Y_test) = imdb.load_data(path = "ibdb.npz",
num_words = None,
skip_top = 0,
maxlen = None,
seed = 113,
start_char = 1,
index_from = 3)
print("Type: ", type(X_train))
print("X train shape : ", X_train.shape)
print("Y train shape : ", Y_train.shape)
print("Y train values: ", np.unique(Y_train))
print("Y test values: ", np.unique(Y_test))
unique, counts = np.unique(Y_train, return_counts = True)
print("Y train distrubution : ", dict(zip(unique, counts)))
unique, counts = np.unique(Y_test, return_counts = True)
print("Y test distrubution : ", dict(zip(unique, counts)))
plt.figure()
sns.countplot(Y_train)
plt.xlabel("Classes")
plt.ylabel("Freg")
plt.title("Y train")
plt.figure()
sns.countplot(Y_test)
plt.xlabel("Classes")
plt.ylabel("Freg")
plt.title("Y test")
d = X_train[0]
print(d)
print(len(d))
"""
eng : we look at the length of the sentences.
tr : cümlelerin uzunluğuna bakıyoruz.
"""
review_len_train = []
review_len_test = []
for i, ii in zip(X_train, X_test):
review_len_train.append(len(i))
review_len_test.append(len(ii))
sns.distplot(review_len_train, hist_kws = {"alpha":0.3})
sns.distplot(review_len_test, hist_kws = {"alpha":0.3})
print("Train Mean : ",np.mean(review_len_train))
print("Train Median : ",np.median(review_len_train))
print("Train Mode : ",stats.mode(review_len_train))
"""
eng : number of word
tr : sayıların kelime karşılıkları
"""
word_index = imdb.get_word_index()
print(type(word_index))
print(len(word_index))
"""
eng : word equivalent of the number
tr : sayısal değerin hangi kelimeye karşılık geldiğine bakıyoruz.
"""
for keys, values in word_index.items():
if values == 22:
print(keys)
"""
eng : We are looking at the full review
tr : sayısal değerleri kelime karşılıklarına çevirerek yorumun tamamına bakıyoruz.
"""
def whatIsSay(index = 24):
reverse_index = dict([(value, key) for (key, value) in word_index.items()])
decode_review = " ".join([reverse_index.get(i - 3, "!") for i in X_train[index]])
print(decode_review)
print(Y_train[index])
return decode_review
decoded_review = whatIsSay(36)
"""
eng : preprocess
tr : veriyi model için uygun hale getiriyoruz.
"""
num_words = 15000
(X_train, Y_train), (X_test, Y_test) = imdb.load_data(num_words = num_words)
maxlen = 130
X_train = pad_sequences(X_train, maxlen = maxlen)
X_test = pad_sequences(X_test, maxlen = maxlen)
print(X_train[5])
for i in X_train[0:10]:
print(len(i))
decoded_review = whatIsSay(5)
"""
eng : Create RNN
tr : RNN modelini oluşturuyoruz.
"""
rnn = Sequential()
rnn.add(Embedding(num_words, 32, input_length = len(X_train[0])))
rnn.add(SimpleRNN(16, input_shape = (num_words, maxlen), return_sequences = False, activation = "relu"))
rnn.add(Dense(1))
rnn.add(Activation("sigmoid"))
print(rnn.summary())
rnn.compile(loss = "binary_crossentropy", optimizer = "rmsprop", metrics = ["accuracy"])
"""
eng : fit model
tr : modelimizi eğitiyoruz.
"""
history = rnn.fit(X_train, Y_train, validation_data = (X_test, Y_test), epochs = 15, batch_size = 128, verbose = 1)
score = rnn.evaluate(X_test, Y_test)
print("Accuracy : %",score[1]*100)
"""
eng : Visualize loss and acc
tr : loss ve acc sonuçlarımızı görselleştiriyoruz.
"""
plt.figure()
plt.plot(history.history["accuracy"], label = "Train")
plt.plot(history.history["val_accuracy"], label = "Test")
plt.title("Accuracy")
plt.ylabel("Acc")
plt.xlabel("epochs")
plt.legend()
plt.show()
plt.figure()
plt.plot(history.history["loss"], label = "Train")
plt.plot(history.history["val_loss"], label = "Test")
plt.title("Loss")
plt.ylabel("Acc")
plt.xlabel("epochs")
plt.legend()
plt.show()
|
from __future__ import with_statement
import inspect, logging
log = logging.getLogger(__name__)
import math, threading
from warnings import warn
import otp.ai.passlib.exc as exc, otp.ai.passlib.ifc as ifc
from otp.ai.passlib.exc import MissingBackendError, PasslibConfigWarning, PasslibHashWarning
from otp.ai.passlib.ifc import PasswordHash
from otp.ai.passlib.registry import get_crypt_handler
from otp.ai.passlib.utils import consteq, getrandstr, getrandbytes, rng, to_native_str, is_crypt_handler, to_unicode, MAX_PASSWORD_SIZE, accepts_keyword, as_bool, update_mixin_classes
from otp.ai.passlib.utils.binary import BASE64_CHARS, HASH64_CHARS, PADDED_BASE64_CHARS, HEX_CHARS, UPPER_HEX_CHARS, LOWER_HEX_CHARS, ALL_BYTE_VALUES
from otp.ai.passlib.utils.compat import join_byte_values, irange, u, native_string_types, uascii_to_str, join_unicode, unicode, str_to_uascii, join_unicode, unicode_or_bytes_types, PY2, int_types
from otp.ai.passlib.utils.decor import classproperty, deprecated_method
__all__ = [
'parse_mc2',
'parse_mc3',
'render_mc2',
'render_mc3',
'GenericHandler',
'StaticHandler',
'HasUserContext',
'HasRawChecksum',
'HasManyIdents',
'HasSalt',
'HasRawSalt',
'HasRounds',
'HasManyBackends',
'PrefixWrapper']
H64_CHARS = HASH64_CHARS
B64_CHARS = BASE64_CHARS
PADDED_B64_CHARS = PADDED_BASE64_CHARS
UC_HEX_CHARS = UPPER_HEX_CHARS
LC_HEX_CHARS = LOWER_HEX_CHARS
def _bitsize(count, chars):
if chars and count:
import math
return int(count * math.log(len(chars), 2))
return 0
def guess_app_stacklevel(start=1):
frame = inspect.currentframe()
count = -start
try:
while frame:
name = frame.f_globals.get('__name__', '')
if name.startswith('passlib.tests.') or not name.startswith('passlib.'):
return max(1, count)
count += 1
frame = frame.f_back
return start
finally:
del frame
def warn_hash_settings_deprecation(handler, kwds):
warn("passing settings to %(handler)s.hash() is deprecated, and won't be supported in Passlib 2.0; use '%(handler)s.using(**settings).hash(secret)' instead" % dict(handler=handler.name), DeprecationWarning, stacklevel=guess_app_stacklevel(2))
def extract_settings_kwds(handler, kwds):
context_keys = set(handler.context_kwds)
return dict((key not in context_keys and key, kwds.pop(key)) for key in list(kwds))
_UDOLLAR = u('$')
_UZERO = u('0')
def validate_secret(secret):
if not isinstance(secret, unicode_or_bytes_types):
raise exc.ExpectedStringError(secret, 'secret')
if len(secret) > MAX_PASSWORD_SIZE:
raise exc.PasswordSizeError(MAX_PASSWORD_SIZE)
def to_unicode_for_identify(hash):
if isinstance(hash, unicode):
return hash
if isinstance(hash, bytes):
try:
return hash.decode('utf-8')
except UnicodeDecodeError:
return hash.decode('latin-1')
else:
raise exc.ExpectedStringError(hash, 'hash')
def parse_mc2(hash, prefix, sep=_UDOLLAR, handler=None):
hash = to_unicode(hash, 'ascii', 'hash')
if not hash.startswith(prefix):
raise exc.InvalidHashError(handler)
parts = hash[len(prefix):].split(sep)
if len(parts) == 2:
salt, chk = parts
return (
salt, chk or None)
if len(parts) == 1:
return (parts[0], None)
raise exc.MalformedHashError(handler)
return
def parse_mc3(hash, prefix, sep=_UDOLLAR, rounds_base=10, default_rounds=None, handler=None):
hash = to_unicode(hash, 'ascii', 'hash')
if not hash.startswith(prefix):
raise exc.InvalidHashError(handler)
parts = hash[len(prefix):].split(sep)
if len(parts) == 3:
rounds, salt, chk = parts
else:
if len(parts) == 2:
rounds, salt = parts
chk = None
else:
raise exc.MalformedHashError(handler)
if rounds.startswith(_UZERO) and rounds != _UZERO:
raise exc.ZeroPaddedRoundsError(handler)
else:
if rounds:
rounds = int(rounds, rounds_base)
else:
if default_rounds is None:
raise exc.MalformedHashError(handler, 'empty rounds field')
else:
rounds = default_rounds
return (rounds, salt, chk or None)
def parse_int(source, base=10, default=None, param='value', handler=None):
if source.startswith(_UZERO) and source != _UZERO:
raise exc.MalformedHashError(handler, 'zero-padded %s field' % param)
else:
if source:
return int(source, base)
if default is None:
raise exc.MalformedHashError(handler, 'empty %s field' % param)
else:
return default
return
def render_mc2(ident, salt, checksum, sep=u('$')):
if checksum:
parts = [
ident, salt, sep, checksum]
else:
parts = [
ident, salt]
return uascii_to_str(join_unicode(parts))
def render_mc3(ident, rounds, salt, checksum, sep=u('$'), rounds_base=10):
if rounds is None:
rounds = u('')
else:
if rounds_base == 16:
rounds = u('%x') % rounds
else:
rounds = unicode(rounds)
if checksum:
parts = [
ident, rounds, sep, salt, sep, checksum]
else:
parts = [
ident, rounds, sep, salt]
return uascii_to_str(join_unicode(parts))
def validate_default_value(handler, default, norm, param='value'):
return True
def norm_integer(handler, value, min=1, max=None, param='value', relaxed=False):
if not isinstance(value, int_types):
raise exc.ExpectedTypeError(value, 'integer', param)
if value < min:
msg = '%s: %s (%d) is too low, must be at least %d' % (handler.name, param, value, min)
if relaxed:
warn(msg, exc.PasslibHashWarning)
value = min
else:
raise ValueError(msg)
if max and value > max:
msg = '%s: %s (%d) is too large, cannot be more than %d' % (handler.name, param, value, max)
if relaxed:
warn(msg, exc.PasslibHashWarning)
value = max
else:
raise ValueError(msg)
return value
class MinimalHandler(PasswordHash):
_configured = False
@classmethod
def using(cls, relaxed=False):
name = cls.__name__
if not cls._configured:
name = '<customized %s hasher>' % name
return type(name, (cls,), dict(__module__=cls.__module__, _configured=True))
class TruncateMixin(MinimalHandler):
truncate_error = False
truncate_verify_reject = False
@classmethod
def using(cls, truncate_error=None, **kwds):
subcls = super(TruncateMixin, cls).using(**kwds)
if truncate_error is not None:
truncate_error = as_bool(truncate_error, param='truncate_error')
if truncate_error is not None:
subcls.truncate_error = truncate_error
return subcls
@classmethod
def _check_truncate_policy(cls, secret):
if cls.truncate_error and len(secret) > cls.truncate_size:
raise exc.PasswordTruncateError(cls)
class GenericHandler(MinimalHandler):
setting_kwds = None
context_kwds = ()
ident = None
_hash_regex = None
checksum_size = None
checksum_chars = None
_checksum_is_bytes = False
checksum = None
def __init__(self, checksum=None, use_defaults=False, **kwds):
self.use_defaults = use_defaults
super(GenericHandler, self).__init__(**kwds)
if checksum is not None:
self.checksum = self._norm_checksum(checksum)
return
def _norm_checksum(self, checksum, relaxed=False):
raw = self._checksum_is_bytes
if raw:
if not isinstance(checksum, bytes):
raise exc.ExpectedTypeError(checksum, 'bytes', 'checksum')
else:
if not isinstance(checksum, unicode):
if isinstance(checksum, bytes) and relaxed:
warn('checksum should be unicode, not bytes', PasslibHashWarning)
checksum = checksum.decode('ascii')
else:
raise exc.ExpectedTypeError(checksum, 'unicode', 'checksum')
cc = self.checksum_size
if cc and len(checksum) != cc:
raise exc.ChecksumSizeError(self, raw=raw)
if not raw:
cs = self.checksum_chars
if cs and any(c not in cs for c in checksum):
raise ValueError('invalid characters in %s checksum' % (self.name,))
return checksum
@classmethod
def identify(cls, hash):
hash = to_unicode_for_identify(hash)
if not hash:
return False
ident = cls.ident
if ident is not None:
return hash.startswith(ident)
pat = cls._hash_regex
if pat is not None:
return pat.match(hash) is not None
try:
cls.from_string(hash)
return True
except ValueError:
return False
return
@classmethod
def from_string(cls, hash, **context):
raise NotImplementedError('%s must implement from_string()' % (cls,))
def to_string(self):
raise NotImplementedError('%s must implement from_string()' % (self.__class__,))
@property
def _stub_checksum(self):
if self.checksum_size:
if self._checksum_is_bytes:
return '\x00' * self.checksum_size
if self.checksum_chars:
return self.checksum_chars[0] * self.checksum_size
if isinstance(self, HasRounds):
orig = self.rounds
self.rounds = self.min_rounds or 1
try:
return self._calc_checksum('')
finally:
self.rounds = orig
return self._calc_checksum('')
def _calc_checksum(self, secret):
raise NotImplementedError('%s must implement _calc_checksum()' % (
self.__class__,))
@classmethod
def hash(cls, secret, **kwds):
if kwds:
settings = extract_settings_kwds(cls, kwds)
if settings:
warn_hash_settings_deprecation(cls, settings)
return cls.using(**settings).hash(secret, **kwds)
validate_secret(secret)
self = cls(use_defaults=True, **kwds)
self.checksum = self._calc_checksum(secret)
return self.to_string()
@classmethod
def verify(cls, secret, hash, **context):
validate_secret(secret)
self = cls.from_string(hash, **context)
chk = self.checksum
if chk is None:
raise exc.MissingDigestError(cls)
return consteq(self._calc_checksum(secret), chk)
@deprecated_method(deprecated='1.7', removed='2.0')
@classmethod
def genconfig(cls, **kwds):
settings = extract_settings_kwds(cls, kwds)
if settings:
return cls.using(**settings).genconfig(**kwds)
self = cls(use_defaults=True, **kwds)
self.checksum = self._stub_checksum
return self.to_string()
@deprecated_method(deprecated='1.7', removed='2.0')
@classmethod
def genhash(cls, secret, config, **context):
if config is None:
raise TypeError('config must be string')
validate_secret(secret)
self = cls.from_string(config, **context)
self.checksum = self._calc_checksum(secret)
return self.to_string()
@classmethod
def needs_update(cls, hash, secret=None, **kwds):
self = cls.from_string(hash)
return self._calc_needs_update(secret=secret, **kwds)
def _calc_needs_update(self, secret=None):
return False
_unparsed_settings = ('salt_size', 'relaxed')
_unsafe_settings = ('salt', 'checksum')
@classproperty
def _parsed_settings(cls):
return (key for key in cls.setting_kwds if key not in cls._unparsed_settings)
@staticmethod
def _sanitize(value, char=u('*')):
if value is None:
return
if isinstance(value, bytes):
from otp.ai.passlib.utils.binary import ab64_encode
value = ab64_encode(value).decode('ascii')
else:
if not isinstance(value, unicode):
value = unicode(value)
size = len(value)
clip = min(4, size // 8)
return value[:clip] + char * (size - clip)
@classmethod
def parsehash(cls, hash, checksum=True, sanitize=False):
self = cls.from_string(hash)
UNSET = object()
kwds = dict((
getattr(self, key) != getattr(cls, key, UNSET) and key, getattr(self, key)) for key in self._parsed_settings)
if checksum and self.checksum is not None:
kwds['checksum'] = self.checksum
if sanitize:
if sanitize is True:
sanitize = cls._sanitize
for key in cls._unsafe_settings:
if key in kwds:
kwds[key] = sanitize(kwds[key])
return kwds
@classmethod
def bitsize(cls, **kwds):
try:
info = super(GenericHandler, cls).bitsize(**kwds)
except AttributeError:
info = {}
cc = ALL_BYTE_VALUES if cls._checksum_is_bytes else cls.checksum_chars
if cls.checksum_size and cc:
info['checksum'] = _bitsize(cls.checksum_size, cc)
return info
class StaticHandler(GenericHandler):
setting_kwds = ()
_hash_prefix = u('')
@classmethod
def from_string(cls, hash, **context):
hash = to_unicode(hash, 'ascii', 'hash')
hash = cls._norm_hash(hash)
prefix = cls._hash_prefix
if prefix:
if hash.startswith(prefix):
hash = hash[len(prefix):]
else:
raise exc.InvalidHashError(cls)
return cls(checksum=hash, **context)
@classmethod
def _norm_hash(cls, hash):
return hash
def to_string(self):
return uascii_to_str(self._hash_prefix + self.checksum)
__cc_compat_hack = None
def _calc_checksum(self, secret):
cls = self.__class__
wrapper_cls = cls.__cc_compat_hack
if wrapper_cls is None:
def inner(self, secret):
raise NotImplementedError('%s must implement _calc_checksum()' % (
cls,))
wrapper_cls = cls.__cc_compat_hack = type(cls.__name__ + '_wrapper', (
cls,), dict(_calc_checksum=inner, __module__=cls.__module__))
context = dict((k, getattr(self, k)) for k in self.context_kwds)
try:
hash = wrapper_cls.genhash(secret, None, **context)
except TypeError as err:
if str(err) == 'config must be string':
raise NotImplementedError('%s must implement _calc_checksum()' % (
cls,))
else:
raise
warn('%r should be updated to implement StaticHandler._calc_checksum() instead of StaticHandler.genhash(), support for the latter style will be removed in Passlib 1.8' % cls, DeprecationWarning)
return str_to_uascii(hash)
class HasEncodingContext(GenericHandler):
context_kwds = ('encoding', )
default_encoding = 'utf-8'
def __init__(self, encoding=None, **kwds):
super(HasEncodingContext, self).__init__(**kwds)
self.encoding = encoding or self.default_encoding
class HasUserContext(GenericHandler):
context_kwds = ('user', )
def __init__(self, user=None, **kwds):
super(HasUserContext, self).__init__(**kwds)
self.user = user
@classmethod
def hash(cls, secret, user=None, **context):
return super(HasUserContext, cls).hash(secret, user=user, **context)
@classmethod
def verify(cls, secret, hash, user=None, **context):
return super(HasUserContext, cls).verify(secret, hash, user=user, **context)
@deprecated_method(deprecated='1.7', removed='2.0')
@classmethod
def genhash(cls, secret, config, user=None, **context):
return super(HasUserContext, cls).genhash(secret, config, user=user, **context)
class HasRawChecksum(GenericHandler):
_checksum_is_bytes = True
class HasManyIdents(GenericHandler):
default_ident = None
ident_values = None
ident_aliases = None
ident = None
@classmethod
def using(cls, default_ident=None, ident=None, **kwds):
if ident is not None:
if default_ident is not None:
raise TypeError("'default_ident' and 'ident' are mutually exclusive")
default_ident = ident
subcls = super(HasManyIdents, cls).using(**kwds)
if default_ident is not None:
subcls.default_ident = cls(ident=default_ident, use_defaults=True).ident
return subcls
def __init__(self, ident=None, **kwds):
super(HasManyIdents, self).__init__(**kwds)
if ident is not None:
ident = self._norm_ident(ident)
else:
if self.use_defaults:
ident = self.default_ident
else:
raise TypeError('no ident specified')
self.ident = ident
return
@classmethod
def _norm_ident(cls, ident):
if isinstance(ident, bytes):
ident = ident.decode('ascii')
iv = cls.ident_values
if ident in iv:
return ident
ia = cls.ident_aliases
if ia:
try:
value = ia[ident]
except KeyError:
pass
else:
if value in iv:
return value
raise ValueError('invalid ident: %r' % (ident,))
@classmethod
def identify(cls, hash):
hash = to_unicode_for_identify(hash)
return hash.startswith(cls.ident_values)
@classmethod
def _parse_ident(cls, hash):
hash = to_unicode(hash, 'ascii', 'hash')
for ident in cls.ident_values:
if hash.startswith(ident):
return (ident, hash[len(ident):])
raise exc.InvalidHashError(cls)
class HasSalt(GenericHandler):
min_salt_size = 0
max_salt_size = None
salt_chars = None
@classproperty
def default_salt_size(cls):
return cls.max_salt_size
@classproperty
def default_salt_chars(cls):
return cls.salt_chars
_salt_is_bytes = False
_salt_unit = 'chars'
salt = None
@classmethod
def using(cls, default_salt_size=None, salt_size=None, salt=None, **kwds):
if salt_size is not None:
if default_salt_size is not None:
raise TypeError("'salt_size' and 'default_salt_size' aliases are mutually exclusive")
default_salt_size = salt_size
subcls = super(HasSalt, cls).using(**kwds)
relaxed = kwds.get('relaxed')
if default_salt_size is not None:
if isinstance(default_salt_size, native_string_types):
default_salt_size = int(default_salt_size)
subcls.default_salt_size = subcls._clip_to_valid_salt_size(default_salt_size, param='salt_size', relaxed=relaxed)
if salt is not None:
salt = subcls._norm_salt(salt, relaxed=relaxed)
subcls._generate_salt = staticmethod(lambda : salt)
return subcls
@classmethod
def _clip_to_valid_salt_size(cls, salt_size, param='salt_size', relaxed=True):
mn = cls.min_salt_size
mx = cls.max_salt_size
if mn == mx:
if salt_size != mn:
msg = '%s: %s (%d) must be exactly %d' % (cls.name, param, salt_size, mn)
if relaxed:
warn(msg, PasslibHashWarning)
else:
raise ValueError(msg)
return mn
if salt_size < mn:
msg = '%s: %s (%r) below min_salt_size (%d)' % (cls.name, param, salt_size, mn)
if relaxed:
warn(msg, PasslibHashWarning)
salt_size = mn
else:
raise ValueError(msg)
if mx and salt_size > mx:
msg = '%s: %s (%r) above max_salt_size (%d)' % (cls.name, param, salt_size, mx)
if relaxed:
warn(msg, PasslibHashWarning)
salt_size = mx
else:
raise ValueError(msg)
return salt_size
def __init__(self, salt=None, **kwds):
super(HasSalt, self).__init__(**kwds)
if salt is not None:
salt = self._parse_salt(salt)
else:
if self.use_defaults:
salt = self._generate_salt()
else:
raise TypeError('no salt specified')
self.salt = salt
return
def _parse_salt(self, salt):
return self._norm_salt(salt)
@classmethod
def _norm_salt(cls, salt, relaxed=False):
if cls._salt_is_bytes:
if not isinstance(salt, bytes):
raise exc.ExpectedTypeError(salt, 'bytes', 'salt')
else:
if not isinstance(salt, unicode):
if isinstance(salt, bytes) and (PY2 or relaxed):
salt = salt.decode('ascii')
else:
raise exc.ExpectedTypeError(salt, 'unicode', 'salt')
sc = cls.salt_chars
if sc is not None and any(c not in sc for c in salt):
raise ValueError('invalid characters in %s salt' % cls.name)
mn = cls.min_salt_size
if mn and len(salt) < mn:
msg = 'salt too small (%s requires %s %d %s)' % (cls.name,
'exactly' if mn == cls.max_salt_size else '>=', mn, cls._salt_unit)
raise ValueError(msg)
mx = cls.max_salt_size
if mx and len(salt) > mx:
msg = 'salt too large (%s requires %s %d %s)' % (cls.name,
'exactly' if mx == mn else '<=', mx, cls._salt_unit)
if relaxed:
warn(msg, PasslibHashWarning)
salt = cls._truncate_salt(salt, mx)
else:
raise ValueError(msg)
return salt
@staticmethod
def _truncate_salt(salt, mx):
return salt[:mx]
@classmethod
def _generate_salt(cls):
return getrandstr(rng, cls.default_salt_chars, cls.default_salt_size)
@classmethod
def bitsize(cls, salt_size=None, **kwds):
info = super(HasSalt, cls).bitsize(**kwds)
if salt_size is None:
salt_size = cls.default_salt_size
info['salt'] = _bitsize(salt_size, cls.default_salt_chars)
return info
class HasRawSalt(HasSalt):
salt_chars = ALL_BYTE_VALUES
_salt_is_bytes = True
_salt_unit = 'bytes'
@classmethod
def _generate_salt(cls):
return getrandbytes(rng, cls.default_salt_size)
class HasRounds(GenericHandler):
min_rounds = 0
max_rounds = None
rounds_cost = 'linear'
using_rounds_kwds = ('min_desired_rounds', 'max_desired_rounds', 'min_rounds',
'max_rounds', 'default_rounds', 'vary_rounds')
min_desired_rounds = None
max_desired_rounds = None
default_rounds = None
vary_rounds = None
rounds = None
@classmethod
def using(cls, min_desired_rounds=None, max_desired_rounds=None, default_rounds=None, vary_rounds=None, min_rounds=None, max_rounds=None, rounds=None, **kwds):
if min_rounds is not None:
if min_desired_rounds is not None:
raise TypeError("'min_rounds' and 'min_desired_rounds' aliases are mutually exclusive")
min_desired_rounds = min_rounds
if max_rounds is not None:
if max_desired_rounds is not None:
raise TypeError("'max_rounds' and 'max_desired_rounds' aliases are mutually exclusive")
max_desired_rounds = max_rounds
if rounds is not None:
if min_desired_rounds is None:
min_desired_rounds = rounds
if max_desired_rounds is None:
max_desired_rounds = rounds
if default_rounds is None:
default_rounds = rounds
subcls = super(HasRounds, cls).using(**kwds)
relaxed = kwds.get('relaxed')
if min_desired_rounds is None:
explicit_min_rounds = False
min_desired_rounds = cls.min_desired_rounds
else:
explicit_min_rounds = True
if isinstance(min_desired_rounds, native_string_types):
min_desired_rounds = int(min_desired_rounds)
subcls.min_desired_rounds = subcls._norm_rounds(min_desired_rounds, param='min_desired_rounds', relaxed=relaxed)
if max_desired_rounds is None:
max_desired_rounds = cls.max_desired_rounds
else:
if isinstance(max_desired_rounds, native_string_types):
max_desired_rounds = int(max_desired_rounds)
if min_desired_rounds and max_desired_rounds < min_desired_rounds:
msg = '%s: max_desired_rounds (%r) below min_desired_rounds (%r)' % (
subcls.name, max_desired_rounds, min_desired_rounds)
if explicit_min_rounds:
raise ValueError(msg)
else:
warn(msg, PasslibConfigWarning)
max_desired_rounds = min_desired_rounds
subcls.max_desired_rounds = subcls._norm_rounds(max_desired_rounds, param='max_desired_rounds', relaxed=relaxed)
if default_rounds is not None:
if isinstance(default_rounds, native_string_types):
default_rounds = int(default_rounds)
if min_desired_rounds and default_rounds < min_desired_rounds:
raise ValueError('%s: default_rounds (%r) below min_desired_rounds (%r)' % (
subcls.name, default_rounds, min_desired_rounds))
else:
if max_desired_rounds and default_rounds > max_desired_rounds:
raise ValueError('%s: default_rounds (%r) above max_desired_rounds (%r)' % (
subcls.name, default_rounds, max_desired_rounds))
subcls.default_rounds = subcls._norm_rounds(default_rounds, param='default_rounds', relaxed=relaxed)
if subcls.default_rounds is not None:
subcls.default_rounds = subcls._clip_to_desired_rounds(subcls.default_rounds)
if vary_rounds is not None:
if isinstance(vary_rounds, native_string_types):
if vary_rounds.endswith('%'):
vary_rounds = float(vary_rounds[:-1]) * 0.01
elif '.' in vary_rounds:
vary_rounds = float(vary_rounds)
else:
vary_rounds = int(vary_rounds)
if vary_rounds < 0:
raise ValueError('%s: vary_rounds (%r) below 0' % (
subcls.name, vary_rounds))
else:
if isinstance(vary_rounds, float):
if vary_rounds > 1:
raise ValueError('%s: vary_rounds (%r) above 1.0' % (
subcls.name, vary_rounds))
else:
if not isinstance(vary_rounds, int):
raise TypeError('vary_rounds must be int or float')
if vary_rounds:
warn("The 'vary_rounds' option is deprecated as of Passlib 1.7, and will be removed in Passlib 2.0", PasslibConfigWarning)
subcls.vary_rounds = vary_rounds
return subcls
@classmethod
def _clip_to_desired_rounds(cls, rounds):
mnd = cls.min_desired_rounds or 0
if rounds < mnd:
return mnd
mxd = cls.max_desired_rounds
if mxd and rounds > mxd:
return mxd
return rounds
@classmethod
def _calc_vary_rounds_range(cls, default_rounds):
vary_rounds = cls.vary_rounds
def linear_to_native(value, upper):
return value
if isinstance(vary_rounds, float):
if cls.rounds_cost == 'log2':
default_rounds = 1 << default_rounds
def linear_to_native(value, upper):
if value <= 0:
return 0
if upper:
return int(math.log(value, 2))
return int(math.ceil(math.log(value, 2)))
vary_rounds = int(default_rounds * vary_rounds)
lower = linear_to_native(default_rounds - vary_rounds, False)
upper = linear_to_native(default_rounds + vary_rounds, True)
return (
cls._clip_to_desired_rounds(lower), cls._clip_to_desired_rounds(upper))
def __init__(self, rounds=None, **kwds):
super(HasRounds, self).__init__(**kwds)
if rounds is not None:
rounds = self._parse_rounds(rounds)
else:
if self.use_defaults:
rounds = self._generate_rounds()
else:
raise TypeError('no rounds specified')
self.rounds = rounds
return
def _parse_rounds(self, rounds):
return self._norm_rounds(rounds)
@classmethod
def _norm_rounds(cls, rounds, relaxed=False, param='rounds'):
return norm_integer(cls, rounds, cls.min_rounds, cls.max_rounds, param=param, relaxed=relaxed)
@classmethod
def _generate_rounds(cls):
rounds = cls.default_rounds
if rounds is None:
raise TypeError('%s rounds value must be specified explicitly' % (cls.name,))
if cls.vary_rounds:
lower, upper = cls._calc_vary_rounds_range(rounds)
if lower < upper:
rounds = rng.randint(lower, upper)
return rounds
def _calc_needs_update(self, **kwds):
min_desired_rounds = self.min_desired_rounds
if min_desired_rounds and self.rounds < min_desired_rounds:
return True
max_desired_rounds = self.max_desired_rounds
if max_desired_rounds and self.rounds > max_desired_rounds:
return True
return super(HasRounds, self)._calc_needs_update(**kwds)
@classmethod
def bitsize(cls, rounds=None, vary_rounds=0.1, **kwds):
info = super(HasRounds, cls).bitsize(**kwds)
if cls.rounds_cost != 'log2':
import math
if rounds is None:
rounds = cls.default_rounds
info['rounds'] = max(0, int(1 + math.log(rounds * vary_rounds, 2)))
return info
class ParallelismMixin(GenericHandler):
parallelism = 1
@classmethod
def using(cls, parallelism=None, **kwds):
subcls = super(ParallelismMixin, cls).using(**kwds)
if parallelism is not None:
if isinstance(parallelism, native_string_types):
parallelism = int(parallelism)
subcls.parallelism = subcls._norm_parallelism(parallelism, relaxed=kwds.get('relaxed'))
return subcls
def __init__(self, parallelism=None, **kwds):
super(ParallelismMixin, self).__init__(**kwds)
if parallelism is None:
pass
else:
self.parallelism = self._norm_parallelism(parallelism)
return
@classmethod
def _norm_parallelism(cls, parallelism, relaxed=False):
return norm_integer(cls, parallelism, min=1, param='parallelism', relaxed=relaxed)
def _calc_needs_update(self, **kwds):
if self.parallelism != type(self).parallelism:
return True
return super(ParallelismMixin, self)._calc_needs_update(**kwds)
_backend_lock = threading.RLock()
class BackendMixin(PasswordHash):
backends = None
__backend = None
_no_backend_suggestion = None
_pending_backend = None
_pending_dry_run = False
@classmethod
def get_backend(cls):
if not cls.__backend:
cls.set_backend()
return cls.__backend
@classmethod
def has_backend(cls, name='any'):
try:
cls.set_backend(name, dryrun=True)
return True
except (exc.MissingBackendError, exc.PasslibSecurityError):
return False
@classmethod
def set_backend(cls, name='any', dryrun=False):
if name == 'any' and cls.__backend or name and name == cls.__backend:
return cls.__backend
owner = cls._get_backend_owner()
if owner is not cls:
return owner.set_backend(name, dryrun=dryrun)
if name == 'any' or name == 'default':
default_error = None
for name in cls.backends:
try:
return cls.set_backend(name, dryrun=dryrun)
except exc.MissingBackendError:
continue
except exc.PasslibSecurityError as err:
if default_error is None:
default_error = err
continue
if default_error is None:
msg = '%s: no backends available' % cls.name
if cls._no_backend_suggestion:
msg += cls._no_backend_suggestion
default_error = exc.MissingBackendError(msg)
raise default_error
if name not in cls.backends:
raise exc.UnknownBackendError(cls, name)
with _backend_lock:
orig = (
cls._pending_backend, cls._pending_dry_run)
try:
cls._pending_backend = name
cls._pending_dry_run = dryrun
cls._set_backend(name, dryrun)
finally:
cls._pending_backend, cls._pending_dry_run = orig
if not dryrun:
cls.__backend = name
return name
return
@classmethod
def _get_backend_owner(cls):
return cls
@classmethod
def _set_backend(cls, name, dryrun):
loader = cls._get_backend_loader(name)
kwds = {}
if accepts_keyword(loader, 'name'):
kwds['name'] = name
if accepts_keyword(loader, 'dryrun'):
kwds['dryrun'] = dryrun
ok = loader(**kwds)
if ok is False:
raise exc.MissingBackendError('%s: backend not available: %s' % (
cls.name, name))
else:
if ok is not True:
raise AssertionError('backend loaders must return True or False: %r' % (
ok,))
@classmethod
def _get_backend_loader(cls, name):
raise NotImplementedError('implement in subclass')
@classmethod
def _stub_requires_backend(cls):
if cls.__backend:
raise AssertionError('%s: _finalize_backend(%r) failed to replace lazy loader' % (
cls.name, cls.__backend))
cls.set_backend()
if not cls.__backend:
raise AssertionError('%s: set_backend() failed to load a default backend' % cls.name)
class SubclassBackendMixin(BackendMixin):
_backend_mixin_target = False
_backend_mixin_map = None
@classmethod
def _get_backend_owner(cls):
if not cls._backend_mixin_target:
raise AssertionError('_backend_mixin_target not set')
for base in cls.__mro__:
if base.__dict__.get('_backend_mixin_target'):
return base
raise AssertionError("expected to find class w/ '_backend_mixin_target' set")
@classmethod
def _set_backend(cls, name, dryrun):
super(SubclassBackendMixin, cls)._set_backend(name, dryrun)
mixin_map = cls._backend_mixin_map
mixin_cls = mixin_map[name]
update_mixin_classes(cls, add=mixin_cls, remove=mixin_map.values(), append=True, before=SubclassBackendMixin, dryrun=dryrun)
@classmethod
def _get_backend_loader(cls, name):
return cls._backend_mixin_map[name]._load_backend_mixin
class HasManyBackends(BackendMixin, GenericHandler):
def _calc_checksum(self, secret):
return self._calc_checksum_backend(secret)
def _calc_checksum_backend(self, secret):
self._stub_requires_backend()
return self._calc_checksum_backend(secret)
@classmethod
def _get_backend_loader(cls, name):
loader = getattr(cls, '_load_backend_' + name, None)
if loader is None:
def loader():
return cls.__load_legacy_backend(name)
return loader
@classmethod
def __load_legacy_backend(cls, name):
value = getattr(cls, '_has_backend_' + name)
warn('%s: support for ._has_backend_%s is deprecated as of Passlib 1.7, and will be removed in Passlib 1.9/2.0, please implement ._load_backend_%s() instead' % (
cls.name, name, name), DeprecationWarning)
if value:
func = getattr(cls, '_calc_checksum_' + name)
cls._set_calc_checksum_backend(func)
return True
return False
@classmethod
def _set_calc_checksum_backend(cls, func):
backend = cls._pending_backend
if not callable(func):
raise RuntimeError('%s: backend %r returned invalid callable: %r' % (
cls.name, backend, func))
if not cls._pending_dry_run:
cls._calc_checksum_backend = func
class PrefixWrapper(object):
_using_clone_attrs = ()
def __init__(self, name, wrapped, prefix=u(''), orig_prefix=u(''), lazy=False, doc=None, ident=None):
self.name = name
if isinstance(prefix, bytes):
prefix = prefix.decode('ascii')
self.prefix = prefix
if isinstance(orig_prefix, bytes):
orig_prefix = orig_prefix.decode('ascii')
self.orig_prefix = orig_prefix
if doc:
self.__doc__ = doc
if hasattr(wrapped, 'name'):
self._set_wrapped(wrapped)
else:
self._wrapped_name = wrapped
if not lazy:
self._get_wrapped()
if ident is not None:
if ident is True:
if prefix:
ident = prefix
else:
raise ValueError('no prefix specified')
if isinstance(ident, bytes):
ident = ident.decode('ascii')
if ident[:len(prefix)] != prefix[:len(ident)]:
raise ValueError('ident must agree with prefix')
self._ident = ident
return
_wrapped_name = None
_wrapped_handler = None
def _set_wrapped(self, handler):
if 'ident' in handler.setting_kwds and self.orig_prefix:
warn("PrefixWrapper: 'orig_prefix' option may not work correctly for handlers which have multiple identifiers: %r" % (
handler.name,), exc.PasslibRuntimeWarning)
self._wrapped_handler = handler
def _get_wrapped(self):
handler = self._wrapped_handler
if handler is None:
handler = get_crypt_handler(self._wrapped_name)
self._set_wrapped(handler)
return handler
wrapped = property(_get_wrapped)
_ident = False
@property
def ident(self):
value = self._ident
if value is False:
value = None
if not self.orig_prefix:
wrapped = self.wrapped
ident = getattr(wrapped, 'ident', None)
if ident is not None:
value = self._wrap_hash(ident)
self._ident = value
return value
_ident_values = False
@property
def ident_values(self):
value = self._ident_values
if value is False:
value = None
if not self.orig_prefix:
wrapped = self.wrapped
idents = getattr(wrapped, 'ident_values', None)
if idents:
value = tuple(self._wrap_hash(ident) for ident in idents)
self._ident_values = value
return value
_proxy_attrs = ('setting_kwds', 'context_kwds', 'default_rounds', 'min_rounds',
'max_rounds', 'rounds_cost', 'min_desired_rounds', 'max_desired_rounds',
'vary_rounds', 'default_salt_size', 'min_salt_size', 'max_salt_size',
'salt_chars', 'default_salt_chars', 'backends', 'has_backend',
'get_backend', 'set_backend', 'is_disabled', 'truncate_size',
'truncate_error', 'truncate_verify_reject', '_salt_is_bytes')
def __repr__(self):
args = [
repr(self._wrapped_name or self._wrapped_handler)]
if self.prefix:
args.append('prefix=%r' % self.prefix)
if self.orig_prefix:
args.append('orig_prefix=%r' % self.orig_prefix)
args = (', ').join(args)
return 'PrefixWrapper(%r, %s)' % (self.name, args)
def __dir__(self):
attrs = set(dir(self.__class__))
attrs.update(self.__dict__)
wrapped = self.wrapped
attrs.update(attr for attr in self._proxy_attrs if hasattr(wrapped, attr))
return list(attrs)
def __getattr__(self, attr):
if attr in self._proxy_attrs:
return getattr(self.wrapped, attr)
raise AttributeError('missing attribute: %r' % (attr,))
def __setattr__(self, attr, value):
if attr in self._proxy_attrs and self._derived_from:
wrapped = self.wrapped
if hasattr(wrapped, attr):
setattr(wrapped, attr, value)
return
return object.__setattr__(self, attr, value)
def _unwrap_hash(self, hash):
prefix = self.prefix
if not hash.startswith(prefix):
raise exc.InvalidHashError(self)
return self.orig_prefix + hash[len(prefix):]
def _wrap_hash(self, hash):
if isinstance(hash, bytes):
hash = hash.decode('ascii')
orig_prefix = self.orig_prefix
if not hash.startswith(orig_prefix):
raise exc.InvalidHashError(self.wrapped)
wrapped = self.prefix + hash[len(orig_prefix):]
return uascii_to_str(wrapped)
_derived_from = None
def using(self, **kwds):
subcls = self.wrapped.using(**kwds)
wrapper = PrefixWrapper(self.name, subcls, prefix=self.prefix, orig_prefix=self.orig_prefix)
wrapper._derived_from = self
for attr in self._using_clone_attrs:
setattr(wrapper, attr, getattr(self, attr))
return wrapper
def needs_update(self, hash, **kwds):
hash = self._unwrap_hash(hash)
return self.wrapped.needs_update(hash, **kwds)
def identify(self, hash):
hash = to_unicode_for_identify(hash)
if not hash.startswith(self.prefix):
return False
hash = self._unwrap_hash(hash)
return self.wrapped.identify(hash)
@deprecated_method(deprecated='1.7', removed='2.0')
def genconfig(self, **kwds):
config = self.wrapped.genconfig(**kwds)
if config is None:
raise RuntimeError('.genconfig() must return a string, not None')
return self._wrap_hash(config)
@deprecated_method(deprecated='1.7', removed='2.0')
def genhash(self, secret, config, **kwds):
if config is not None:
config = to_unicode(config, 'ascii', 'config/hash')
config = self._unwrap_hash(config)
return self._wrap_hash(self.wrapped.genhash(secret, config, **kwds))
@deprecated_method(deprecated='1.7', removed='2.0', replacement='.hash()')
def encrypt(self, secret, **kwds):
return self.hash(secret, **kwds)
def hash(self, secret, **kwds):
return self._wrap_hash(self.wrapped.hash(secret, **kwds))
def verify(self, secret, hash, **kwds):
hash = to_unicode(hash, 'ascii', 'hash')
hash = self._unwrap_hash(hash)
return self.wrapped.verify(secret, hash, **kwds)
|
'''
░░░░░░░░░▄░░░░░░░░░░░░░░▄░░░░
░░░░░░░░▌▒█░░░░░░░░░░░▄▀▒▌░░░
░░░░░░░░▌▒▒█░░░░░░░░▄▀▒▒▒▐░░░
░░░░░░░▐▄▀▒▒▀▀▀▀▄▄▄▀▒▒▒▒▒▐░░░
░░░░░▄▄▀▒░▒▒▒▒▒▒▒▒▒█▒▒▄█▒▐░░░
░░░▄▀▒▒▒░░░▒▒▒░░░▒▒▒▀██▀▒▌░░░
░░▐▒▒▒▄▄▒▒▒▒░░░▒▒▒▒▒▒▒▀▄▒▒▌░░
░░▌░░▌█▀▒▒▒▒▒▄▀█▄▒▒▒▒▒▒▒█▒▐░░
░▐░░░▒▒▒▒▒▒▒▒▌██▀▒▒░░░▒▒▒▀▄▌░
░▌░▒▄██▄▒▒▒▒▒▒▒▒▒░░░░░░▒▒▒▒▌░
▀▒▀▐▄█▄█▌▄░▀▒▒░░░░░░░░░░▒▒▒▐░
▐▒▒▐▀▐▀▒░▄▄▒▄▒▒▒▒▒▒░▒░▒░▒▒▒▒▌
▐▒▒▒▀▀▄▄▒▒▒▄▒▒▒▒▒▒▒▒░▒░▒░▒▒▐░
░▌▒▒▒▒▒▒▀▀▀▒▒▒▒▒▒░▒░▒░▒░▒▒▒▌░
░▐▒▒▒▒▒▒▒▒▒▒▒▒▒▒░▒░▒░▒▒▄▒▒▐░░
░░▀▄▒▒▒▒▒▒▒▒▒▒▒░▒░▒░▒▄▒▒▒▒▌░░
░░░░▀▄▒▒▒▒▒▒▒▒▒▒▄▄▄▀▒▒▒▒▄▀░░░
░░░░░░▀▄▄▄▄▄▄▀▀▀▒▒▒▒▒▄▄▀░░░░░
░░░░░░░░░▒▒▒▒▒▒▒▒▒▒▀▀░░░░░░░░
MUCHSCRIPT MADE FOR DOGE
INSPIRED BY DOGE AND ELON MUSK
MADE WITH LOVE AND STRESS by sBlip
@ MUCHSCRIPT 0.1.3 BETA 2021
MUCH WOW. MUCH COOL. MUCH CRYPTO.
'''
__version__ = '0.1.3'
__author__ = 'shaurya-blip'
__github__ = 'https://github.com/shaurya-blip/muchscript/'
ANSWER_OF_THE_QUESTION_WE_DONT_KNOW = 42
PEOPLE_KILLED_IN_THE_DEATH_STAR = 1565231
GEORGE_LUCAS_FIRST_FILM = 1138
PIXAR_EASTER_EGG = 'A113'
|
#!/usr/bin/env python
import pytest
TEST_DATA = [
143845,
86139,
53043,
124340,
73213,
108435,
126874,
131397,
85618,
107774,
66872,
94293,
51015,
51903,
147655,
112891,
100993,
143374,
83737,
145868,
144768,
89793,
124127,
135366,
94017,
81678,
102325,
75394,
103852,
81896,
148050,
142780,
50503,
110691,
117851,
137382,
92841,
138222,
128414,
146834,
59968,
136456,
122397,
147157,
83595,
59916,
75690,
125025,
147797,
112494,
76247,
100221,
63389,
59070,
97466,
91905,
126234,
76561,
128170,
102778,
82342,
131097,
51609,
148204,
74812,
64925,
127927,
79056,
73307,
78431,
88770,
97688,
103564,
76001,
105232,
145361,
77845,
87518,
117293,
110054,
135599,
85005,
85983,
118255,
103031,
142440,
140505,
99614,
69593,
69161,
78795,
54808,
115582,
117976,
148858,
84193,
147285,
89038,
92677,
106574,
]
def f(num):
check = num // 3 - 2
if check < 1:
return 0
return check + f(check)
def solve2(inputs):
return sum(map(f, inputs))
def solve(inputs):
sum = 0
for mass in inputs:
sum += mass // 3 - 2
return sum
@pytest.mark.parametrize("mass,fuel", [(12, 2), (14, 2), (1969, 654), (100756, 33583)])
def test_solve(mass, fuel):
assert fuel == solve([mass])
@pytest.mark.parametrize("mass,fuel", [(14, 2), (1969, 966), (100756, 50346)])
def test_part2(mass, fuel):
assert fuel == solve2([mass])
def main():
print(solve(TEST_DATA))
print(solve2(TEST_DATA))
if __name__ == "__main__":
main()
|
"""
Manipulation with the YANG schemas.
File: schemas.py
Author: Radek Krejci <rkrejci@cesnet.cz>
"""
import json
import os
import errno
import time
from subprocess import check_output
from shutil import copy
from liberouterapi import auth
from flask import request
import yang
from .inventory import INVENTORY, inventory_check
from .error import NetopeerException
__SCHEMAS_EMPTY = '{"timestamp":0, "schemas":{}}'
def __schema_parse(path, format = yang.LYS_IN_UNKNOWN):
try:
ctx = yang.Context(os.path.dirname(path))
except Exception as e:
raise NetopeerException(str(e))
try:
module = ctx.parse_module_path(path, yang.LYS_IN_YANG if format == yang.LYS_IN_UNKNOWN else format)
except Exception as e:
if format != yang.LYS_IN_UNKOWN:
raise NetopeerException(str(e))
try:
module = ctx.parse_module_path(path, ly_LYS_IN_YIN)
except Exception as e:
raise NetopeerException(str(e))
return module
def __schemas_init(path):
schemas = json.loads(__SCHEMAS_EMPTY)
try:
ctx = yang.Context()
except Exception as e:
raise NetopeerException(str(e))
# initialize the list with libyang's internal modules
modules = ctx.get_module_iter()
for module in modules:
name_norm = module.name() + '@' + module.rev().date() + '.yang'
schemas['schemas'][name_norm] = {'name':module.name(), 'revision':module.rev().date()}
try:
with open(os.path.join(path, name_norm), 'w') as schema_file:
schema_file.write(module.print_mem(yang.LYS_OUT_YANG, 0))
except:
pass
try:
nc_schemas_dir = check_output("pkg-config --variable=LNC2_SCHEMAS_DIR libnetconf2", shell = True).decode()
nc_schemas_dir = nc_schemas_dir[:len(nc_schemas_dir) - 1]
for file in os.listdir(nc_schemas_dir):
if file[-5:] == '.yang' or file[-4:] == '.yin':
try:
copy(os.path.join(nc_schemas_dir, file), path)
except:
pass
else:
continue
except:
pass
return schemas
def __schemas_inv_load(path):
schemainv_path = os.path.join(path, 'schemas.json')
try:
with open(schemainv_path, 'r') as schemas_file:
schemas = json.load(schemas_file)
except OSError as e:
if e.errno == errno.ENOENT:
schemas = __schemas_init(path)
else:
raise NetopeerException('Unable to use user\'s schemas inventory ' + schemainv_path + ' (' + str(e) + ').')
except ValueError:
schemas = __schemas_init(path)
return schemas
def __schemas_inv_save(path, schemas):
schemainv_path = os.path.join(path, 'schemas.json')
# update the timestamp
schemas['timestamp'] = time.time()
#store the list
try:
with open(schemainv_path, 'w') as schema_file:
json.dump(schemas, schema_file, sort_keys = True)
except Exception:
pass
return schemas
def __schemas_update(path):
# get schemas database
schemas = __schemas_inv_load(path)
# get the previous timestamp
timestamp = schemas['timestamp']
# check the current content of the storage
for file in os.listdir(path):
if file[-5:] == '.yang':
format = yang.LYS_IN_YANG
elif file[-4:] == '.yin':
format = yang.LYS_IN_YIN
else:
continue
schemapath = os.path.join(path, file);
if os.path.getmtime(schemapath) > timestamp:
# update the list
try:
module = __schema_parse(schemapath, format)
if module.rev_size():
name_norm = module.name() + '@' + module.rev().date() + '.yang'
schemas['schemas'][name_norm] = {'name': module.name(), 'revision': module.rev().date()}
else:
name_norm = module.name() + '.yang'
schemas['schemas'][name_norm] = {'name': module.name()}
if file != name_norm:
try:
with open(os.path.join(path, name_norm), 'w') as schema_file:
schema_file.write(module.print_mem(yang.LYS_OUT_YANG, 0))
except:
pass
try:
os.remove(schemapath)
except:
pass
except:
continue
#store the list
__schemas_inv_save(path, schemas)
# return the up-to-date list
return schemas['schemas']
@auth.required()
def schemas_list():
session = auth.lookup(request.headers.get('Authorization', None))
user = session['user']
path = os.path.join(INVENTORY, user.username)
inventory_check(path)
schemas = __schemas_update(path)
return(json.dumps(schemas, sort_keys = True))
@auth.required()
def schema_get():
session = auth.lookup(request.headers.get('Authorization', None))
user = session['user']
req = request.args.to_dict()
path = os.path.join(INVENTORY, user.username)
if not 'key' in req:
return(json.dumps({'success': False, 'error-msg': 'Missing schema key.'}))
key = req['key']
schemas = __schemas_inv_load(path)
if key in schemas['schemas']:
try:
with open(os.path.join(path, key), 'r') as schema_file:
data = schema_file.read()
return(json.dumps({'success': True, 'data': data}))
except:
pass;
return(json.dumps({'success': False, 'error-msg':'Schema ' + key + ' not found.'}))
@auth.required()
def schemas_add():
if 'schema' not in request.files:
raise NetopeerException('Missing schema file in upload request.')
session = auth.lookup(request.headers.get('Authorization', None))
user = session['user']
file = request.files['schema']
# store the file
path = os.path.join(INVENTORY, user.username, file.filename)
file.save(path)
# parse file
try:
if file.filename[-5:] == '.yang':
format = yang.LYS_IN_YANG
elif file.filename[-4:] == '.yin':
format = yang.LYS_IN_YIN
else:
format = yang.LYS_IN_UNKNOWN
module = __schema_parse(path, format)
# normalize file name to allow removing without remembering schema path
if module.rev_size():
name_norm = module.name() + '@' + module.rev().date() + '.yang'
else:
name_norm = module.name() + '.yang'
if file.filename != name_norm:
with open(os.path.join(INVENTORY, user.username, name_norm), 'w') as schema_file:
schema_file.write(module.print_mem(yang.LYS_OUT_YANG, 0))
try:
os.remove(path)
except:
pass
except Exception:
try:
os.remove(path)
except:
pass
return(json.dumps({'success': False}))
return(json.dumps({'success': True}))
@auth.required()
def schemas_rm():
session = auth.lookup(request.headers.get('Authorization', None))
user = session['user']
path = os.path.join(INVENTORY, user.username)
key = request.get_json()
if not key:
raise NetopeerException('Invalid schema remove request.')
schemas = __schemas_inv_load(path)
try:
schemas['schemas'].pop(key)
except KeyError:
# schema not in inventory
return (json.dumps({'success': False}))
# update the inventory database
__schemas_inv_save(path, schemas)
# remove the schema file
try:
os.remove(os.path.join(path, key))
except Exception as e:
print(e)
# TODO: resolve dependencies ?
return(json.dumps({'success': True}))
|
import os as _os
import sys as _sys
import json
import dash as _dash
from dash_uploader.configure_upload import configure_upload
from dash_uploader.callbacks import callback
from dash_uploader.httprequesthandler import HttpRequestHandler
from dash_uploader.upload import Upload
# noinspection PyUnresolvedReferences
from ._build._imports_ import * # noqa: F403,F401
from ._build._imports_ import __all__ as build_all
# Defines all exposed APIs of this package.
__all__ = ["configure_upload", "callback", "HttpRequestHandler", "Upload"]
if not hasattr(_dash, "development"):
print(
"Dash was not successfully imported. "
"Make sure you don't have a file "
'named \n"dash.py" in your current directory.',
file=_sys.stderr,
)
_sys.exit(1)
_basepath = _os.path.dirname(__file__)
_filepath = _os.path.abspath(_os.path.join(_basepath, "_build", "package-info.json"))
with open(_filepath) as f:
package = json.load(f)
package_name = package["name"].replace(" ", "_").replace("-", "_")
__version__ = package["version"]
_current_path = _os.path.dirname(_os.path.abspath(__file__))
_this_module = _sys.modules[__name__]
_js_dist = [
{"relative_package_path": "_build/dash_uploader.min.js", "namespace": package_name},
{
"relative_package_path": "_build/dash_uploader.min.js.map",
"namespace": package_name,
"dynamic": True,
},
]
_css_dist = []
for _component in build_all:
setattr(locals()[_component], "_js_dist", _js_dist)
setattr(locals()[_component], "_css_dist", _css_dist)
|
import unittest
from .. import lmparser
from .. import tokenstream
from .. import lexertokens
from .. import lmast
from .. import lamarksyntaxerror
class TestLmParser(unittest.TestCase):
def setUp(self):
self.parser = lmparser.LmParser({})
def test_empty(self):
"Parse empty token stream"
tok_stream = tokenstream.TokenStream([])
self._compare_ast(
self.parser.parse(tok_stream),
lmast.Document([])
)
def test_markdown(self):
"Just markdown"
ast = self._make_ast([
lexertokens.OTHER("markdown",0)])
correct_ast = lmast.Document([lmast.Markdown("markdown",0)])
self._compare_ast(ast, correct_ast)
def test_latex(self):
"One LaTeX node"
ast = self._make_ast([
lexertokens.BIN_START("{%latex%}",0),
lexertokens.OTHER("a^2", 0),
lexertokens.BIN_END("{%end%}",0)])
correct_ast = lmast.Document(
[lmast.BinTag(
[Str("a^2", 0)],
0,
"{%latex%}")
]
)
self._compare_ast(ast, correct_ast)
def test_latex(self):
"Empty LaTeX tag."
ast = self._make_ast([
lexertokens.BIN_START("{%latex%}",0),
lexertokens.BIN_END("{%end%}",0)])
correct_ast = lmast.Document([lmast.BinTag([], 0, "{%latex%}")])
self._compare_ast(ast, correct_ast)
def test_latex_no_match(self):
"One BIN_START without BIN_END. Should throw error."
with self.assertRaises(lamarksyntaxerror.LaMarkSyntaxError):
ast = self._make_ast([
lexertokens.BIN_START("{%latex%}",0),
lexertokens.OTHER("a^2", 0)])
def test_latex_invalid_nesting_bin_start(self):
"Nested BIN_STARTS should throw syntax error."
with self.assertRaises(lamarksyntaxerror.LaMarkSyntaxError):
ast = self._make_ast([
lexertokens.BIN_START("{%latex%}",0),
lexertokens.BIN_START("{%latex%}",0)])
def test_latex_invalid_consecutive_bin_end(self):
"Consecutive BIN_END tags should raise syntax error."
with self.assertRaises(lamarksyntaxerror.LaMarkSyntaxError):
ast = self._make_ast([
lexertokens.BIN_START("{%latex%}",0),
lexertokens.BIN_END("{%end%}",0),
lexertokens.BIN_END("{%end%}",0)])
def test_latex_invalid_consecutive_bin_end_after_other(self):
"""Consecutive BIN_END tags should raise syntax error, even if separated
by OTHER tag.
"""
with self.assertRaises(lamarksyntaxerror.LaMarkSyntaxError):
ast = self._make_ast([
lexertokens.BIN_START("{%latex%}",0),
lexertokens.BIN_END("{%end%}",0),
lexertokens.OTHER("some latex", 0),
lexertokens.BIN_END("{%end%}",0)])
def test_escaped_bin_start(self):
"Escaped BIN_START shouldn't start LaTeX section."
ast = self._make_ast([
lexertokens.ESCAPE("\\",0),
lexertokens.BIN_START("{%latex%}",0)])
correct_ast = lmast.Document([lmast.Markdown("{%latex%}", 0)])
self._compare_ast(ast, correct_ast)
def test_escaped_bin_end(self):
"Escaped BIN_END shouldn't end LaTeX section."
ast = self._make_ast([
lexertokens.ESCAPE("\\",0),
lexertokens.BIN_END("{%end%}",0)])
correct_ast = lmast.Document([lmast.Markdown("{%end%}", 0)])
self._compare_ast(ast, correct_ast)
def test_escaped_bin_start_in_latex_section(self):
"Escaped BIN_START in LaTeX section should be ignored."
ast = self._make_ast([
lexertokens.BIN_START("{%latex%}",0),
lexertokens.ESCAPE("\\",0),
lexertokens.BIN_START("{%latex%}",0),
lexertokens.BIN_END("{%end%}",0)])
correct_ast = lmast.Document(
[
lmast.BinTag(
[lmast.Str("{%latex%}",0)],
0,
"{%latex%}")
]
)
self._compare_ast(ast, correct_ast)
def test_escaped_bin_end_section_in_latex(self):
"Escaped BIN_END in LaTeX section should be ignored."
ast = self._make_ast([
lexertokens.BIN_START("{%latex%}",0),
lexertokens.ESCAPE("\\",0),
lexertokens.BIN_END("{%end%}",0),
lexertokens.BIN_END("{%end%}",0)])
correct_ast = lmast.Document(
[
lmast.BinTag(
[lmast.Str("{%end%}",0)],
0,
"{%latex%}")
]
)
self._compare_ast(ast, correct_ast)
def test_escape_in_other_isnt_escape(self):
"An escape tag before an OTHER isn't an escape."
ast = self._make_ast([
lexertokens.OTHER("Some Markdown",0),
lexertokens.ESCAPE("\\",0),
lexertokens.OTHER("Some more Markdown",0)
])
correct_ast = lmast.Document(
[
lmast.Markdown(
"Some Markdown\\Some more Markdown", 0)
]
)
self._compare_ast(ast, correct_ast)
def test_nested_bin_tags1(self):
"""Test nested BinTags with an OTHER token in the inner BinTag"""
ast = self._make_ast([
lexertokens.BIN_START("{%latex%}", 0),
lexertokens.BIN_START("{%latex%}", 1),
lexertokens.OTHER("Some latex.", 1),
lexertokens.BIN_END("{%end%}", 2),
lexertokens.BIN_END("{%end%}", 3),
])
correct_ast = lmast.Document(
[lmast.BinTag(
[
lmast.BinTag(
[lmast.Str("Some latex.", 1)],
1, "{%latex%}"
)
],
0, "{%latex%}"
)]
)
self._compare_ast(ast, correct_ast)
def test_nested_bin_tags2(self):
"""Test nested BinTags with an OTHER token in the other and
inner BinTags.
"""
ast = self._make_ast([
lexertokens.BIN_START("{%latex%}", 0),
lexertokens.OTHER("Some latex.", 1),
lexertokens.BIN_START("{%latex%}", 2),
lexertokens.OTHER("Some latex.", 3),
lexertokens.BIN_END("{%end%}", 4),
lexertokens.BIN_END("{%end%}", 5),
])
correct_ast = lmast.Document(
[
lmast.BinTag(
[
lmast.Str("Some latex.", 1),
lmast.BinTag(
[lmast.Str("Some latex.", 3)],
2, "{%latex%}"
)
],
0, "{%latex%}"
)
]
)
self._compare_ast(ast, correct_ast)
def test_nested_unary_in_binary(self):
"""Test nesting of unary tag inside of binary tag."""
ast = self._make_ast([
lexertokens.BIN_START("{%latex%}", 0),
lexertokens.UNARY_TAG("{%ref-footer%}", 1),
lexertokens.BIN_END("{%end%}", 2),
])
correct_ast = lmast.Document(
[
lmast.BinTag(
[lmast.UnaryTag(1, "{%ref-footer%}")],
0,
"{%latex%}")
]
)
self._compare_ast(ast, correct_ast)
def test_escaped_nested_unary_in_binary(self):
"""Test nesting of escaped unary tag inside of binary tag."""
ast = self._make_ast([
lexertokens.BIN_START("{%latex%}", 0),
lexertokens.ESCAPE("\\", 1),
lexertokens.UNARY_TAG("{%ref-footer%}", 1),
lexertokens.BIN_END("{%end%}", 2),
])
correct_ast = lmast.Document(
[
lmast.BinTag(
[lmast.Str("{%ref-footer%}",1)],
0,
"{%latex%}"
)
]
)
self._compare_ast(ast, correct_ast)
def test_escaped_last(self):
"""Make the last character the escape char"""
ast = self._make_ast([
lexertokens.ESCAPE("\\",0)
])
correct_ast = lmast.Document([
lmast.Markdown("\\",0)
])
self._compare_ast(ast, correct_ast)
def test_bin_tag_then_markdown(self):
"""Make the last character the escape char"""
ast = self._make_ast([
lexertokens.BIN_START("{%latex%}", 0),
lexertokens.BIN_END("{%end%}", 2),
lexertokens.ESCAPE("\\",3),
lexertokens.OTHER("markdown",3)
])
correct_ast = lmast.Document([
lmast.BinTag([], 0, "{%latex%}"),
lmast.Markdown("\markdown", 3)
])
self._compare_ast(ast, correct_ast)
def _compare_ast(self, left_ast, right_ast):
"""Asserts that two ASTs are equal by dumping their contents with
the repr method, and comparing the resultant strings.
"""
self.assertEqual(lmast.dump(left_ast), lmast.dump(right_ast))
def _make_ast(self, token_list):
"""Parse a list of tokens using lmparser. Return the AST."""
tok_stream = tokenstream.TokenStream(token_list)
return self.parser.parse(tok_stream)
|
from __future__ import division
from scitbx.linalg import eigensystem
from scitbx.array_family import flex
from libtbx.utils import null_out
from math import acos,pi
from scitbx import matrix
from iotbx import pdb
import os
class fab_elbow_angle(object):
def __init__(self,
pdb_hierarchy,
chain_ID_light='L',
chain_ID_heavy='H',
limit_light=107,
limit_heavy=113):
'''
Get elbow angle for Fragment antigen-binding (Fab)
- Default heavy and light chains IDs are: H : heavy, L : light
- Default limit (cutoff) between variable and constant parts
is residue number 107/113 for light/heavy chains
- Variable domain si from residue 1 to limit.
Constant domain form limit+1 to end.
- Method of calculating angle is based on Stanfield, et al., JMB 2006
Argument:
---------
pdb_file_name : 4 characters string, a PDB name
chain_ID_heavy : The heavy protion of the protein, chain ID
chain_ID_light : The light protion of the protein, chain ID
limit_heavy : the number of the cutoff residue, between
the variable and constant portions in the heavy chian
limit_light : the number of the cutoff residue, between
the variable and constant portions in the light chian
Main attributes:
----------------
self.fab_elbow_angle : the elbow angle calculated as the dot product of
the VL-VH pseudodyade axie and the CL-CH
pseudodyade axie
Test program at:
cctbx_project\mmtbx\regression\tst_fab_elbow_angle.py
Example:
--------
>>>fab = fab_elbow_angle(
pdb_file_name='1bbd',
chain_ID_light='L',
chain_ID_heavy='H',
limit_light=114,
limit_heavy=118)
>>> print fab.fab_elbow_angle
133
>>>fab = fab_elbow_angle(pdb_file_name='1bbd')
>>> print fab.fab_elbow_angle
126 (127 in Stanfield, et al., JMB 2006)
@author Youval Dar (LBL 2014)
'''
# create selection strings for the heavy/light var/const part of chains
self.select_str(
chain_ID_H=chain_ID_heavy,
limit_H=limit_heavy,
chain_ID_L=chain_ID_light,
limit_L=limit_light)
# get the hirarchy for and divide using selection strings
self.pdb_hierarchy = pdb_hierarchy
self.get_pdb_chains()
# Get heavy to light reference vector before alignment !!!
vh_end = self.pdb_var_H.atoms()[-1].xyz
vl_end = self.pdb_var_L.atoms()[-1].xyz
mid_H_to_L = self.norm_vec(start=vh_end,end=vl_end)
#mid_H_to_L = self.H_to_L_vec()
# Get transformations objects
tranformation_const= self.get_transformation(
fixed_selection=self.pdb_const_H,
moving_selection=self.pdb_const_L)
tranformation_var = self.get_transformation(
fixed_selection=self.pdb_var_H,
moving_selection=self.pdb_var_L)
# Get the angle and eigenvalues
eigen_const = eigensystem.real_symmetric(tranformation_const.r.as_sym_mat3())
eigen_var = eigensystem.real_symmetric(tranformation_var.r.as_sym_mat3())
# c : consttant, v : variable
eigenvectors_c = self.get_eigenvector(eigen_const)
eigenvectors_v = self.get_eigenvector(eigen_var)
# c: res 171 v: res 44
c = 20*eigenvectors_c + flex.double(self.pdb_const_H.atoms()[135].xyz)
v = 20*eigenvectors_v + flex.double(self.pdb_var_H.atoms()[130].xyz)
r = 20*mid_H_to_L + flex.double(self.pdb_var_H.atoms()[-1].xyz)
rs = flex.double(self.pdb_var_H.atoms()[-1].xyz)
re = flex.double(self.pdb_var_L.atoms()[-1].xyz)
print
print('c')
print list(flex.double(self.pdb_const_H.atoms()[135].xyz))
print list(c)
print 'v'
print list(flex.double(self.pdb_var_H.atoms()[130].xyz))
print list(v)
print 'r'
print list(flex.double(self.pdb_var_H.atoms()[-1].xyz))
print list(r)
print 'f'
print self.pdb_var_H.atoms()[-1].id_str()
print list(rs)
print self.pdb_var_L.atoms()[-1].id_str()
print list(re)
#
# test eignevectors pointing in oposite directions
if eigenvectors_c.dot(eigenvectors_v) > 0:
print 'reversing direction of variable rotation eigenvector!!!!'
eigenvectors_v = - eigenvectors_v
# Calc Feb elbow angle
angle = self.get_angle(vec1=eigenvectors_c, vec2=eigenvectors_v)
# Test if elbow angle larger or smaller than 180
zaxis = self.cross(eigenvectors_v, eigenvectors_c)
xaxis = self.cross(eigenvectors_c,zaxis)
x = 20*xaxis + flex.double(self.pdb_const_H.atoms()[135].xyz)
print 'x'
print list(flex.double(self.pdb_const_H.atoms()[135].xyz))
print list(x)
print mid_H_to_L.dot(xaxis)
print mid_H_to_L.dot(zaxis)
#m = matrix.sqr(list(xaxis)+list(eigenvectors_c)+list(zaxis))
#A = m.transpose()
#Ainv = A.inverse(0
# choose ref axis
ref_axis = zaxis
#if abs(mid_H_to_L.dot(xaxis)) > abs(mid_H_to_L.dot(zaxis)):
#ref_axis = xaxis
if mid_H_to_L.dot(ref_axis) < 0:
angle = 360 - angle
self.fab_elbow_angle = angle
def H_to_L_vec(self):
''' get the vector from the center of coordinates of the heavy chain
to the center of coordinates of the light chain'''
H = flex.double([0,0,0])
L = flex.double([0,0,0])
for x in self.pdb_const_H.atoms(): H += flex.double(x.xyz)
for x in self.pdb_var_H.atoms(): H += flex.double(x.xyz)
for x in self.pdb_const_L.atoms(): L += flex.double(x.xyz)
for x in self.pdb_var_L.atoms(): L += flex.double(x.xyz)
H = H/(len(self.pdb_const_H.atoms())+len(self.pdb_var_H.atoms()))
L = L/(len(self.pdb_const_L.atoms())+len(self.pdb_var_L.atoms()))
return self.norm_vec(start=H,end=L)
def norm_vec(self,start,end):
''' retruns normalized vector that starts at "stat" and ends at "end"'''
x = flex.double(end) - flex.double(start)
return x/x.norm()
def get_angle(self,vec1,vec2,larger=True):
'''retrun the larger angle between vec1 and vec2'''
if vec1 and vec1:
angle_cos = vec1.dot(vec2)
angle = 180/pi*acos(angle_cos)
else:
angle = 0
if (angle < 90) and larger: angle = 180 - angle
if (angle > 90) and not larger: angle = 180 - angle
return angle
def cross(self,a,b):
'''(array,array) -> array
returns a normalized cross product vector'''
a1,a2,a3 = a
b1,b2,b3 = b
x = flex.double([a2*b3-a3*b2,a3*b1-a1*b3,a1*b2-a2*b1])
return x/x.norm()
def get_eigenvector(self,eigen):
'''
Get the eigen vector for eigen value 1
and normalize it
'''
v = eigen.vectors()
e = eigen.values()
indx = None
l = 0
# select eigenvector that corespondes to a real egienvalue == 1
for i,x in enumerate(e):
if not isinstance(x,complex):
if abs(1-x)<1e-6:
indx = i
break
# make sure we have egienvalue == 1
assert not indx
eigenvector = v[indx:indx+3]
# normalize
eigenvector = eigenvector / eigenvector.dot(eigenvector)
if e.all_eq(flex.double([1,1,1])):
eigenvector = None
return eigenvector
def get_pdb_chains(self):
'''Create seperate pdb hierarchy for each on the chains we want to align'''
ph = self.pdb_hierarchy
# test selection
test = ph.atom_selection_cache().selection
#
self.pdb_var_H = ph.select(test(self.select_var_str_H))
self.pdb_const_H = ph.select(test(self.select_const_str_H))
self.pdb_var_L = ph.select(test(self.select_var_str_L))
self.pdb_const_L = ph.select(test(self.select_const_str_L))
def get_transformation(self,fixed_selection,moving_selection):
from phenix.command_line import superpose_pdbs
'''
Align the moving pdb hierarchy on to the fixed one.
Provides an object with rotation and translation info
Arguments:
----------
fixed_selection, moving_selection : pdb_hierarchy
Retrun:
-------
lsq_fit_obj : least-squre-fit object that contians the
transformation information
'''
params = superpose_pdbs.master_params.extract()
x = superpose_pdbs.manager(
params,
log=null_out(),
write_output=False,
save_lsq_fit_obj=True,
pdb_hierarchy_fixed=fixed_selection,
pdb_hierarchy_moving=moving_selection)
return x.lsq_fit_obj
def select_str(self,chain_ID_H,limit_H,chain_ID_L,limit_L):
'''create selection strings for the heavy and light chains
seperating the vairable and constant parts of the chains'''
s1 = 'pepnames and (name ca or name n or name c) and altloc " "'
s2 = 'chain {0} and resseq {1}:{2} and {3}'
sel_str = lambda ID,i_s,i_e,s: s2.format(ID,i_s,i_e,s)
self.select_var_str_H = sel_str(chain_ID_H,1,limit_H,s1)
self.select_const_str_H = sel_str(chain_ID_H,limit_H+1,'end',s1)
self.select_var_str_L = sel_str(chain_ID_L,1,limit_L,s1)
self.select_const_str_L = sel_str(chain_ID_L,limit_L+1,'end',s1)
|
import os
import glob
import cc3d
import numpy as np
from skimage import io, transform
from torch.utils.data import Dataset
from copy import copy
from graphics import Voxelgrid
from graphics.transform import compute_tsdf
import h5py
# from graphics.utils import extract_mesh_marching_cubes
# from graphics.visualization import plot_mesh, plot_voxelgrid
from scipy.ndimage.morphology import binary_dilation
from .utils.augmentation import *
from .utils.binvox_utils import read_as_3d_array
class ShapeNet(Dataset):
def __init__(self,
root_dir,
obj=None,
model=None,
scene_list=None,
resolution=(240, 320),
transform=None,
noise_scale=0.05,
outlier_scale=3,
outlier_fraction=0.99,
grid_resolution=64,
repeat=0,
load_smooth=False):
self.noise_scale = noise_scale
self.root_dir = os.path.expanduser(root_dir)
self.resolution = resolution
self.xscale = resolution[0] / 480.
self.yscale = resolution[1] / 640.
self.transform = transform
self.obj = obj
self.model = model
self.scene_list = scene_list
self.noise_scale = noise_scale
self.outlier_scale = outlier_scale
self.outlier_fraction = outlier_fraction
self.grid_resolution = grid_resolution
self.repeat = repeat
self.load_smooth = load_smooth
self._load_frames()
def _load_frames(self):
if self.scene_list is None:
# scene, obj = self.scene.split('/')
path = os.path.join(self.root_dir, self.obj, self.model, 'data', '*.depth.png')
files = glob.glob(path)
self.frames = []
for f in files:
self.frames.append(f.replace('.depth.png', ''))
self._scenes = [os.path.join(self.obj, self.model)]
else:
self.frames = []
self._scenes = []
with open(self.scene_list, 'r') as file:
for line in file:
scene, obj = line.rstrip().split('\t')
path = os.path.join(self.root_dir, scene, obj, 'data', '*.depth.png')
files = glob.glob(path)
for f in files:
self.frames.append(f.replace('.depth.png', ''))
if os.path.join(scene, obj) not in self._scenes:
self._scenes.append(os.path.join(scene, obj))
def __len__(self):
return len(self.frames)
def __getitem__(self, item):
frame = self.frames[item]
pathsplit = frame.split('/')
sc = pathsplit[-4]
obj = pathsplit[-3]
scene_id = '{}/{}'.format(sc, obj)
sample = {}
frame_id = frame.split('/')[-1]
frame_id = int(frame_id)
sample['frame_id'] = frame_id
depth = io.imread('{}.depth.png'.format(frame))
depth = depth.astype(np.float32)
depth = depth / 1000.
# depth[depth == np.max(depth)] = 0.
step_x = depth.shape[0] / self.resolution[0]
step_y = depth.shape[1] / self.resolution[1]
index_y = [int(step_y * i) for i in
range(0, int(depth.shape[1] / step_y))]
index_x = [int(step_x * i) for i in
range(0, int(depth.shape[0] / step_x))]
depth = depth[:, index_y]
depth = depth[index_x, :]
mask = copy(depth)
mask[mask == np.max(depth)] = 0
mask[mask != 0] = 1
sample['original_mask'] = copy(mask)
gradient_mask = binary_dilation(mask, iterations=5)
mask = binary_dilation(mask, iterations=8)
sample['mask'] = mask
sample['gradient_mask'] = gradient_mask
depth[mask == 0] = 0
sample['depth'] = depth
sample['noisy_depth'] = add_kinect_noise(copy(depth), sigma_fraction=self.noise_scale)
sample['noisy_depth_octnetfusion'] = add_depth_noise(copy(depth), noise_sigma=self.noise_scale, seed=42)
sample['outlier_depth'] = add_outliers(copy(sample['noisy_depth_octnetfusion']),
scale=self.outlier_scale,
fraction=self.outlier_fraction)
sample['sparse_depth'] = add_sparse_depth(copy(sample['noisy_depth_octnetfusion']),
percentage=0.001)
sample['outlier_blob_depth'] = add_outlier_blobs(copy(sample['noisy_depth_octnetfusion']),
scale=self.outlier_scale,
fraction=self.outlier_fraction)
intrinsics = np.loadtxt('{}.intrinsics.txt'.format(frame))
# adapt intrinsics to camera resolution
scaling = np.eye(3)
scaling[1, 1] = self.yscale
scaling[0, 0] = self.xscale
sample['intrinsics'] = np.dot(scaling, intrinsics)
extrinsics = np.loadtxt('{}.extrinsics.txt'.format(frame))
extrinsics = np.linalg.inv(extrinsics)
sample['extrinsics'] = extrinsics
sample['scene_id'] = scene_id
for key in sample.keys():
if type(sample[key]) is not np.ndarray and type(sample[key]) is not str:
sample[key] = np.asarray(sample[key])
if self.transform:
sample = self.transform(sample)
return sample
def get_grid(self, scene, resolution=None):
sc, obj = scene.split('/')
if not self.load_smooth:
# if self.grid_resolution == 25604530566 10fe40ebace4de15f457958925a36a51:
# filepath = os.path.join(self.root_dir, sc, obj, 'voxels', '*.{}.binvox')
# print(filepath)
# else:
# filepath = os.path.join(self.root_dir, sc, obj, 'voxels', '*.{}.binvox'.format(self.grid_resolution))
filepath = os.path.join(self.root_dir, sc, obj, 'voxels', '*.{}.binvox'.format(self.grid_resolution))
filepath = glob.glob(filepath)[0]
# filepath = os.path.join(self.root_dir, 'example', 'voxels', 'chair_0256.binvox')
with open(filepath, 'rb') as file:
volume = read_as_3d_array(file)
scene = volume.data
scene = scene.astype(np.int)
labels_out = cc3d.connected_components(scene) # 26-connected
N = np.max(labels_out)
max_label = 0
max_label_count = 0
valid_labels = []
for segid in range(1, N + 1):
extracted_image = labels_out * (labels_out == segid)
extracted_image[extracted_image != 0] = 1
label_count = np.sum(extracted_image)
if label_count > max_label_count:
max_label = segid
max_label_count = label_count
if label_count > 1000:
valid_labels.append(segid)
for segid in range(1, N + 1):
if segid not in valid_labels:
scene[labels_out == segid] = 0.
if not resolution:
resolution = 1. / self.grid_resolution
else:
resolution
grid = Voxelgrid(resolution)
bbox = np.zeros((3, 2))
bbox[:, 0] = volume.translate
bbox[:, 1] = bbox[:, 0] + resolution * volume.dims[0]
grid.from_array(scene, bbox)
else:
bbox = np.zeros((3, 2))
bbox[0, 0] = -0.5
bbox[1, 0] = -0.5
bbox[2, 0] = -0.5
bbox[0, 1] = 0.5
bbox[1, 1] = 0.5
bbox[2, 1] = 0.5
filepath = os.path.join(self.root_dir, sc, obj, 'smooth', 'model.hf5')
with h5py.File(filepath, 'r') as file:
volume = file['TSDF'][:]
resolution = 1. / self.grid_resolution
grid = Voxelgrid(resolution)
grid.from_array(volume, bbox)
return grid
def get_tsdf(self, scene, resolution=None):
sc, obj = scene.split('/')
if self.grid_resolution == 256:
filepath = os.path.join(self.root_dir, sc, obj, 'voxels', '*.binvox')
else:
filepath = os.path.join(self.root_dir, sc, obj, 'voxels', '*.{}.binvox'.format(self.grid_resolution))
filepath = glob.glob(filepath)[0]
# filepath = os.path.join(self.root_dir, 'example', 'voxels', 'chair_0256.binvox')
with open(filepath, 'rb') as file:
volume = read_as_3d_array(file)
scene = volume.data
scene = scene.astype(np.int)
labels_out = cc3d.connected_components(scene) # 26-connected
N = np.max(labels_out)
max_label = 0
max_label_count = 0
valid_labels = []
for segid in range(1, N + 1):
extracted_image = labels_out * (labels_out == segid)
extracted_image[extracted_image != 0] = 1
label_count = np.sum(extracted_image)
if label_count > max_label_count:
max_label = segid
max_label_count = label_count
if label_count > 10000:
valid_labels.append(segid)
for segid in range(1, N + 1):
if segid not in valid_labels:
scene[labels_out == segid] = 0.
# computing tsdf
dist1 = compute_tsdf(scene.astype(np.float64))
dist1[dist1 > 0] -= 0.5
dist2 = compute_tsdf(np.ones(scene.shape) - scene)
dist2[dist2 > 0] -= 0.5
# print(np.where(dist == 79.64923100695951))
scene = np.copy(dist1 - dist2)
if not resolution:
resolution = 1. / self.grid_resolution
else:
step_size = int(self.grid_resolution / resolution)
scene = scene[::step_size, ::step_size, ::step_size]
resolution = 1. / resolution
grid = Voxelgrid(resolution)
bbox = np.zeros((3, 2))
bbox[:, 0] = volume.translate
bbox[:, 1] = bbox[:, 0] + resolution * scene.shape[0]
grid.from_array(scene, bbox)
return grid
@property
def scenes(self):
return self._scenes
if __name__ == '__main__':
import matplotlib.pyplot as plt
dataset = ShapeNet('/media/weders/HV620S/data/shape-net/processed',
'03001627',
'1007e20d5e811b308351982a6e40cf41',
grid_resolution=128)
for f in dataset:
plt.imshow(f['sparse_depth'])
plt.show()
|
import setuptools
setuptools.setup(
name="windows-path-adder",
version="1.0.4",
license='MIT',
author="oneofthezombies",
author_email="hunhoekim@gmail.com",
description="add environment path in windows.",
long_description=open('README.md').read(),
long_description_content_type = 'text/markdown',
url="https://github.com/oneofthezombies/windows-path-adder",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
],
)
|
#!/usr/bin/env python
import json
import os
import urllib.request
from pprint import pprint
job_status = os.environ["JOB_STATUS"]
github = json.loads(os.environ["GITHUB_ENVIRONMENT"])
actor = github["actor"]
workflow = github["workflow"]
repository = github["repository"]
run_id = github["run_id"]
html_url = f"https://www.github.com/{repository}"
run_description = {"success": "ran", "cancelled": "cancelled", "failure": "failed"}[
job_status
]
run_emoji = {"success": "🎉", "cancelled": "💥", "failure": "💥"}[job_status]
fallback = f"{repository} - {actor} {run_description} <{html_url}/actions/runs/{run_id}|{workflow}> {run_emoji}"
color = {
"success": "good",
"cancelled": "warning",
"failure": "danger",
}[job_status]
field = {
"title": repository,
"value": f"{actor} {run_description} <{html_url}/actions/runs/{run_id}|{workflow}> {run_emoji}",
"short": False,
}
fields = [field]
if os.environ["CUSTOM_MESSAGE"]:
fields.append({"value": os.environ["CUSTOM_MESSAGE"]})
body = {
"attachments": [{"fallback": fallback, "color": color, "fields": fields}],
}
request = urllib.request.Request(
os.environ["SLACK_WEBHOOK"],
json.dumps(body).encode("utf-8"),
{ "Content-Type": "application/json" }
)
with urllib.request.urlopen(request) as response:
print("Status:")
pprint(response.status)
print("Response body:")
pprint(response.read())
|
from typing import Dict
import pandas as pd
import plotly.graph_objects as go
import mlrun
from mlrun.artifacts import Artifact, PlotlyArtifact
from ..._common import ModelType
from ..plan import MLPlanStages, MLPlotPlan
from ..utils import DatasetType, to_dataframe
class FeatureImportancePlan(MLPlotPlan):
"""
Plan for producing a feature importance.
"""
_ARTIFACT_NAME = "feature-importance"
def __init__(self):
"""
Initialize a feature importance plan.
An example of use can be seen at the Scikit-Learn docs here:
https://scikit-learn.org/stable/auto_examples/ensemble/plot_forest_importances.html
"""
super(FeatureImportancePlan, self).__init__()
def is_ready(self, stage: MLPlanStages, is_probabilities: bool) -> bool:
"""
Check whether or not the plan is fit for production by the given stage and prediction probabilities. The
feature importance is ready post training.
:param stage: The stage to check if the plan is ready.
:param is_probabilities: True if the 'y_pred' that will be sent to 'produce' is a prediction of probabilities
(from 'predict_proba') and False if not.
:return: True if the plan is producible and False otherwise.
"""
return stage == MLPlanStages.POST_FIT
def produce(
self, model: ModelType, x: DatasetType, **kwargs
) -> Dict[str, Artifact]:
"""
Produce the feature importance according to the given model and dataset ('x').
:param model: Model to get its 'feature_importances_' or 'coef_[0]' fields.
:param x: Input dataset the model trained on for the column labels.
:return: The produced feature importance artifact in an artifacts dictionary.
"""
# Validate the 'feature_importances_' or 'coef_' fields are available for the given model:
if not (hasattr(model, "feature_importances_") or hasattr(model, "coef_")):
raise mlrun.errors.MLRunInvalidArgumentError(
"This model cannot be used for Feature Importance plotting."
)
# Get the importance score:
if hasattr(model, "feature_importances_"):
# Tree-based feature importance
importance_score = model.feature_importances_
else:
# Coefficient-based importance
importance_score = model.coef_[0]
# Create a table of features and their importance:
df = pd.DataFrame(
{
"features": to_dataframe(x).columns,
"feature_importance": importance_score,
}
).sort_values(by="feature_importance", ascending=False)
# Create the figure:
fig = go.Figure(
[go.Bar(x=df["feature_importance"], y=df["features"], orientation="h")]
)
# Creating the artifact:
self._artifacts[self._ARTIFACT_NAME] = PlotlyArtifact(
key=self._ARTIFACT_NAME,
figure=fig,
)
return self._artifacts
|
import requests
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
#my_api_key = "" Input your AlphaVenture API key as a str() var here
my_stock = "GME"
my_timeframe = "TIME_SERIES_DAILY"
response = requests.get("https://www.alphavantage.co/query?function="+my_timeframe+"&symbol="+my_stock+"&outputsize=full&apikey="+my_api_key)
my_response = response.json()
my_response.keys() ## keys are 'Meta Data', 'Time Series (Daily)'. It's a dictionary of dictionaries.
time_series_data = my_response['Time Series (Daily)']
## Put the data I want into a list of lists
my_list = []
for key, value in time_series_data.items():
my_list.append([key, float(value['1. open']), float(value['2. high']), float(value['3. low']), float(value['4. close'])])
## Add some column names and turn it into a Pandas dataframe
col_names = ["Date", "Open", "High", "Low", "Close"]
my_data = pd.DataFrame(my_list, columns= col_names)
## Subset only data from mid-November 2020 onwards
my_data = my_data[my_data['Date'] >= '2020-11-15']
## Reformat dataframe columns into variables (just because)
x = [dt.datetime.strptime(i, '%Y-%m-%d').date() for i in my_data['Date']]
y_open = my_data['Open']
y_high = my_data['High']
y_low = my_data['Low']
y_close = my_data['Close']
## First plot
## Let's see $GME's Open, High, Low, and Close values over time
plt.plot(x, y_open, label = "Open")
plt.plot(x, y_high, label = "High")
plt.plot(x, y_low, label = "Low")
plt.plot(x, y_close, label = "Close")
plt.ylabel('$ Value')
plt.title('$GME')
plt.legend()
plt.show()
## I want to see how $GME's daily movement (increase/decrease from Open to Close) looks over time
## Add in some horizontal lines at $20 and $-20 to mark big jumps
## Highlight values above/below $20/-20 in yellow to call them out
y_daily_move = my_data['Open']-my_data['Close']
y_daily_20_subset = [i for i in y_daily_move if i < -20 or i > 20]
x_daily_20_subset = [x[i] for i in range(0, len(y_daily_move)) if y_daily_move[i] < -20 or y_daily_move[i] > 20]
plt.scatter(x, y_daily_move, label = 'Diff. Open to Close')
plt.scatter(x_daily_20_subset, y_daily_20_subset, color = 'yellow')
plt.axhline(y=20, color = 'r', linestyle = 'dashed')
plt.axhline(y=-20, color = 'r', linestyle = 'dashed')
plt.ylabel('$ Difference')
plt.title('GME Difference from Open to Close ($)')
plt.show()
## I want to see the difference between $GME's high's and low's each day
## If you don't like stomaching daily stock volatility, you'd want this chart to be clustered around the same values
## I'll call out days where there's over $20 between a daily high and low, which is pretty insane
y_daily_var = my_data['High']-my_data['Low']
y_daily_20_subset = [i for i in y_daily_var if i < -20 or i > 20]
x_daily_20_subset = [x[i] for i in range(0, len(y_daily_var)) if y_daily_var[i] > 20]
plt.scatter(x, y_daily_var, label = 'Diff. High to Low')
plt.scatter(x_daily_20_subset, y_daily_20_subset, color = 'yellow')
plt.axhline(y=20, color = 'r', linestyle = 'dashed')
plt.ylabel('$ Difference')
plt.title('GME Difference from High to Low ($)')
plt.show()
|
"""Bot starts a telegram bot."""
import os
from github import Github
import requests
from datetime import datetime, timedelta
from collections import defaultdict
import logging
g = Github(os.getenv('GITHUB_USER', ''), os.getenv('GITHUB_PASSWORD', ''))
BASE_URL = 'https://api.github.com'
log = logging.getLogger(__name__)
org = os.getenv('GITHUB_ORG', '')
team_id = int(os.getenv('GITHUB_TEAM_ID', '0'))
def pull_requests(bot, update):
"""Return open PRs."""
log.info("Got message")
repos = g.get_organization(org).get_team(team_id).get_repos()
msg = ''
for repo in repos:
for pr in repo.get_pulls():
msg += '{} \n {}\n\n'.format(pr.title, pr.html_url)
log.info(msg)
update.message.reply_text(msg)
|
import string, re
from subprocess import Popen, PIPE
enc_flag = 'JcOCLQgPJEjwNAZHgVFzAoMVHOiCRVAVKkvFidUvzmUSSnqJzO'
flag = []
pat = re.compile('Flag: (\w+)\n')
candidates = string.ascii_lowercase+string.ascii_uppercase+string.digits+'_'+'-'
for k in range(len(enc_flag)):
for c in candidates:
with open('flag.txt', 'w') as fd:
fd.write(''.join(flag)+c)
p = Popen(['./revme'], stdout=PIPE)
enc = pat.search(p.stdout.read().decode()).group()[6:].strip()
if enc[k] == enc_flag[k]:
flag.append(c)
break
print(''.join(flag))
|
#! /usr/bin/env python
DESCRIPTION = "Intensive Care Unit (ICU) Simulation"
LONG_DESCRIPTION = """\
ICUSIM simplifies the process of simulating ICU burden scenarios
based on rich set of input variables.
"""
DISTNAME = 'ICUSIM'
MAINTAINER = 'Mikko Kotila'
MAINTAINER_EMAIL = 'mailme@mikkokotila.com'
URL = 'http://autonom.io'
LICENSE = 'MIT'
DOWNLOAD_URL = 'https://github.com/autonomio/ICUSIM/'
VERSION = '0.2.0'
try:
from setuptools import setup
_has_setuptools = True
except ImportError:
from distutils.core import setup
install_requires = ['simpy==3.0.12',
'numpy',
'pandas',
'tqdm',
'salib']
if __name__ == "__main__":
setup(name=DISTNAME,
author=MAINTAINER,
author_email=MAINTAINER_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
install_requires=install_requires,
packages=['icusim', 'icusim.commands'],
classifiers=['Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Human Machine Interfaces',
'Topic :: Scientific/Engineering :: Mathematics',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows :: Windows 10'])
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = [
'GetServiceResult',
'AwaitableGetServiceResult',
'get_service',
]
@pulumi.output_type
class GetServiceResult:
"""
A collection of values returned by getService.
"""
def __init__(__self__, hostname=None, id=None, ip_address=None, location=None, name=None, primary_access_key=None, primary_connection_string=None, public_port=None, resource_group_name=None, secondary_access_key=None, secondary_connection_string=None, server_port=None, tags=None):
if hostname and not isinstance(hostname, str):
raise TypeError("Expected argument 'hostname' to be a str")
pulumi.set(__self__, "hostname", hostname)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ip_address and not isinstance(ip_address, str):
raise TypeError("Expected argument 'ip_address' to be a str")
pulumi.set(__self__, "ip_address", ip_address)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if primary_access_key and not isinstance(primary_access_key, str):
raise TypeError("Expected argument 'primary_access_key' to be a str")
pulumi.set(__self__, "primary_access_key", primary_access_key)
if primary_connection_string and not isinstance(primary_connection_string, str):
raise TypeError("Expected argument 'primary_connection_string' to be a str")
pulumi.set(__self__, "primary_connection_string", primary_connection_string)
if public_port and not isinstance(public_port, int):
raise TypeError("Expected argument 'public_port' to be a int")
pulumi.set(__self__, "public_port", public_port)
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
pulumi.set(__self__, "resource_group_name", resource_group_name)
if secondary_access_key and not isinstance(secondary_access_key, str):
raise TypeError("Expected argument 'secondary_access_key' to be a str")
pulumi.set(__self__, "secondary_access_key", secondary_access_key)
if secondary_connection_string and not isinstance(secondary_connection_string, str):
raise TypeError("Expected argument 'secondary_connection_string' to be a str")
pulumi.set(__self__, "secondary_connection_string", secondary_connection_string)
if server_port and not isinstance(server_port, int):
raise TypeError("Expected argument 'server_port' to be a int")
pulumi.set(__self__, "server_port", server_port)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def hostname(self) -> str:
"""
The FQDN of the SignalR service.
"""
return pulumi.get(self, "hostname")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> str:
"""
The publicly accessible IP of the SignalR service.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter
def location(self) -> str:
"""
Specifies the supported Azure location where the SignalR service exists.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="primaryAccessKey")
def primary_access_key(self) -> str:
"""
The primary access key of the SignalR service.
"""
return pulumi.get(self, "primary_access_key")
@property
@pulumi.getter(name="primaryConnectionString")
def primary_connection_string(self) -> str:
"""
The primary connection string of the SignalR service.
"""
return pulumi.get(self, "primary_connection_string")
@property
@pulumi.getter(name="publicPort")
def public_port(self) -> int:
"""
The publicly accessible port of the SignalR service which is designed for browser/client use.
"""
return pulumi.get(self, "public_port")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> str:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="secondaryAccessKey")
def secondary_access_key(self) -> str:
"""
The secondary access key of the SignalR service.
"""
return pulumi.get(self, "secondary_access_key")
@property
@pulumi.getter(name="secondaryConnectionString")
def secondary_connection_string(self) -> str:
"""
The secondary connection string of the SignalR service.
"""
return pulumi.get(self, "secondary_connection_string")
@property
@pulumi.getter(name="serverPort")
def server_port(self) -> int:
"""
The publicly accessible port of the SignalR service which is designed for customer server side use.
"""
return pulumi.get(self, "server_port")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
return pulumi.get(self, "tags")
class AwaitableGetServiceResult(GetServiceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetServiceResult(
hostname=self.hostname,
id=self.id,
ip_address=self.ip_address,
location=self.location,
name=self.name,
primary_access_key=self.primary_access_key,
primary_connection_string=self.primary_connection_string,
public_port=self.public_port,
resource_group_name=self.resource_group_name,
secondary_access_key=self.secondary_access_key,
secondary_connection_string=self.secondary_connection_string,
server_port=self.server_port,
tags=self.tags)
def get_service(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServiceResult:
"""
Use this data source to access information about an existing Azure SignalR service.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.signalr.get_service(name="test-signalr",
resource_group_name="signalr-resource-group")
```
:param str name: Specifies the name of the SignalR service.
:param str resource_group_name: Specifies the name of the resource group the SignalR service is located in.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:signalr/getService:getService', __args__, opts=opts, typ=GetServiceResult).value
return AwaitableGetServiceResult(
hostname=__ret__.hostname,
id=__ret__.id,
ip_address=__ret__.ip_address,
location=__ret__.location,
name=__ret__.name,
primary_access_key=__ret__.primary_access_key,
primary_connection_string=__ret__.primary_connection_string,
public_port=__ret__.public_port,
resource_group_name=__ret__.resource_group_name,
secondary_access_key=__ret__.secondary_access_key,
secondary_connection_string=__ret__.secondary_connection_string,
server_port=__ret__.server_port,
tags=__ret__.tags)
|
from __future__ import print_function
import Pyro4
import sys
import time
if sys.version_info<(3,0):
input=raw_input
host=input("enter the hostname of the itunescontroller: ")
itunes=Pyro4.Proxy("PYRO:itunescontroller@{0}:39001".format(host))
print("setting Playlist 'Music'...")
itunes.playlist("Music")
itunes.play()
print("Current song:", itunes.currentsong())
time.sleep(6)
print("setting Playlist 'itunes DJ'...")
itunes.playlist("itunes DJ")
itunes.play()
print("Current song:", itunes.currentsong())
time.sleep(6)
print("next song...")
itunes.next()
print("Current song:", itunes.currentsong())
time.sleep(6)
print("stop.")
itunes.stop()
|
import abc
class InstallationStrategy( abc.ABC ):
@classmethod
@abc.abstractmethod
def execute(cls):
""" abstract method definingthe rules for the installation bla bvla """
class DmgInstallationStrategy( InstallationStrategy ):
@classmethod
def execute(cls):
""" abstract method definingthe rules for the installation bla bvla """
print("opening up the dmg")
class PkgInstallationStrategy( InstallationStrategy ):
@classmethod
def execute(cls):
""" abstract method definingthe rules for the installation bla bvla """
print("opening up the pkg")
class TarInstallationStrategy( InstallationStrategy ):
@classmethod
def execute(cls):
""" abstract method definingthe rules for the installation bla bvla """
print("opening up the tar")
class BzipInstallationStrategy( InstallationStrategy ):
@classmethod
def execute(cls):
""" abstract method definingthe rules for the installation bla bvla """
print("opening up the bzip")
NAME_STRATEGY_MAPPING = {
".dmg" : DmgInstallationStrategy,
".pkg" : PkgInstallationStrategy,
".tar" : TarInstallationStrategy,
".bzip" : BzipInstallationStrategy,
}
def install_download_app(soft_path: str, strategy: InstallationStrategy):
strategy.execute()
if __name__ == '__main__':
# on cherche a isoler notre extension bla bla
extension = ".pkg"
strategy = NAME_STRATEGY_MAPPING[extension]
install_download_app("blabla/blibli", strategy)
|
revision = '4160ccb58402'
down_revision = None
branch_labels = None
depends_on = None
import json
import os
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
sections = {
'update_authorized_keys': 'local',
'authorized_keys_file': 'local',
'githome_executable': 'local',
'githome_id': 'githome',
}
def upgrade():
con = op.get_bind()
old_cfg = table('configsetting',
column('key', sa.String),
column('json_value', sa.String))
# check we know where to put each key
for key, value in con.execute(old_cfg.select()):
if key not in sections:
raise RuntimeError('Cannot migrate configuration, unknown '
'configuration value: {}'.format(key))
new_cfg = op.create_table('config',
sa.Column('key', sa.String(), nullable=False),
sa.Column('section', sa.String(), nullable=False),
sa.Column('data', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('key', 'section')
)
section = sections[key]
new_recs = [{
'key': key,
'section': sections[key],
'data': value,
} for key, value in con.execute(old_cfg.select())]
op.bulk_insert(new_cfg, new_recs)
import githome
gh_client = os.path.join(os.path.dirname(githome.__file__), 'gh_client')
op.bulk_insert(new_cfg, [
{'section': 'local', 'key': 'authorized_keys_start_marker',
'data': r'"# -- added by githome {}, do not remove these markers --\n"'},
{'section': 'local', 'key': 'authorized_keys_end_marker',
'data': r'"# -- end githome {}. keep trailing newline! --\n"'},
{'section': 'local', 'key': 'use_gh_client',
'data': json.dumps(True)},
{'section': 'local', 'key': 'gh_client_socket',
'data': json.dumps('ghclient.sock')},
{'section': 'local', 'key': 'gh_client_executable',
'data': json.dumps(gh_client)},
])
# rename config key githome_id to id
op.execute(new_cfg.update().where(new_cfg.c['key'] == 'githome_id')
.values(key='id'))
op.rename_table('user', 'users')
op.rename_table('public_key', 'public_keys')
op.drop_table('configsetting')
|
# Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Vanilla policy gradient agent.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from tensorforce.agents import BatchAgent
from tensorforce.models import VPGModel
class VPGAgent(BatchAgent):
"""
Vanilla Policy Gradient agent as described by [Sutton et al. (1999)](https://papers.nips.cc/paper/1713-policy-gradient-methods-for-reinforcement-learning-with-function-approximation.pdf).
Configuration:
Each agent requires the following ``Configuration`` parameters:
* `states`: dict containing one or more state definitions.
* `actions`: dict containing one or more action definitions.
* `preprocessing`: dict or list containing state preprocessing configuration.
* `exploration`: dict containing action exploration configuration.
The `BatchAgent` class additionally requires the following parameters:
* `batch_size`: integer of the batch size.
A Policy Gradient Model expects the following additional configuration parameters:
* `sample_actions`: boolean of whether to sample actions.
* `baseline`: string indicating the baseline value function (currently 'linear' or 'mlp').
* `baseline_args`: list of arguments for the baseline value function.
* `baseline_kwargs`: dict of keyword arguments for the baseline value function.
* `generalized_advantage_estimation`: boolean indicating whether to use GAE.
* `gae_lambda`: float of the Generalized Advantage Estimation lambda.
* `normalize_advantage`: boolean indicating whether to normalize the advantage or not.
The VPG agent does not require any additional configuration parameters.
"""
name = 'VPGAgent'
model = VPGModel
|
import pyglet
from pyglet.window import key
import cocos
from cocos import actions, layer, sprite, scene
from cocos.director import director
import cocos.euclid as eu
import cocos.collision_model as cm
import math
import paho.mqtt.client as mqtt
import json
MAP_SIZE = (600, 600)
VELOCITY_MAX = 400
VELOCITY_INERTIA = 3 # smaller means more inertia
VELOCITY_BRAKE_VS_SPEED = 3
VELOCITY_IMPACT_ON_TURNING = 0.0025
TURNING_SPEED = 3
VELOCITY_DECLINE = 0.995 # not touching controls means the velocity will go to zero
class CollidableSprite(cocos.sprite.Sprite):
def __init__(self, image, cx, cy, radius):
super(CollidableSprite, self).__init__(image)
self.position = (cx, cy)
self.cshape = cm.CircleShape(eu.Vector2(cx, cy), 25)
def update_in_collision_manager(self):
collision_manager.remove_tricky(self)
self.cshape = cm.CircleShape(eu.Vector2(self.position[0], self.position[1]), 25)
collision_manager.add(self)
def maybe_impact(self):
if collision_manager.any_near(self, 1):
self.velocity = (- self.velocity[0], - self.velocity[1])
#self.velocity = (0, 0)
# check if out of map
self.position = (max(0, min(self.position[0], MAP_SIZE[0])), \
max(0, min(self.position[1], MAP_SIZE[1])))
# How to handle collisions
#mapcollider = mapcolliders.RectMapCollider("bounce")
# Car Actions class
class Car(actions.Move):
def step(self, dt):
super(Car, self).step(dt)
rl = keyboard[key.RIGHT] - keyboard[key.LEFT]
speed_or_brake = keyboard[key.UP] - keyboard[key.DOWN]
radians = self.target.rotation * math.pi / 180
# Update the speed from the perspective of the car
try:
speed_or_brake = keyboard[key.UP] - VELOCITY_BRAKE_VS_SPEED * keyboard[key.DOWN] \
if self.target.speed > 0 else \
VELOCITY_BRAKE_VS_SPEED * keyboard[key.UP] - keyboard[key.DOWN]
self.target.speed = VELOCITY_DECLINE * (min(VELOCITY_INERTIA * speed_or_brake + self.target.speed, VELOCITY_MAX))
except AttributeError:
self.target.speed = math.sqrt(self.target.velocity[0]**2 + self.target.velocity[1]**2)
velocity_x = self.target.speed * math.sin(radians)
velocity_y = self.target.speed * math.cos(radians)
self.target.velocity = (velocity_x, velocity_y)
# turn the car
rl = TURNING_SPEED * rl * VELOCITY_IMPACT_ON_TURNING * abs(self.target.speed)
rl = rl if self.target.speed > 0 else - rl
action = actions.interval_actions.RotateBy(rl, 0)
self.target.do(action)
self.target.update_in_collision_manager()
self.target.maybe_impact()
class Mqtt_layer(layer.Layer):
def __init__(self, collision_mgr):
super(Mqtt_layer, self).__init__()
self.collision_mgr = collision_mgr
# MQTT part
def on_marker(client, userdata, msg):
print("marker: '" + str(msg.payload))
payload = json.loads(msg.payload)
print payload["position"][0]
print payload["position"][1]
# create an obstacle and add to layer
# obstacle3 = CollidableSprite('sprites/obstacle.png', 200, 200, 0)
# player_layer.add(obstacle3)
# obstacle3.velocity = (0, 0)
# collision_manager.add(obstacle3)
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
# client.message_callback_add("ares/video/markers", on_marker)
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("ares/video/markers")
client.subscribe("ares/video/edges")
client.subscribe("ares/video/objects")
client.subscribe("ares/mgt/features/add")
client.subscribe("ares/mgt/features/remove")
# The callback for when a PUBLISH message is received from the server which is not handled in other handlers
def on_message(client, userdata, msg):
print("Received message '" + str(msg.payload) + "' on topic '" \
+ msg.topic + "' with QoS " + str(msg.qos))
payload = json.loads(msg.payload)
x = payload["position"][0]
y = payload["position"][1]
# create an obstacle and add to layer
obstacle3 = CollidableSprite('sprites/obstacle.png', x, y, 0)
self.add(obstacle3)
# obstacle3.velocity = (0, 0)
self.collision_mgr.add(obstacle3)
self.client = mqtt.Client()
self.client.on_connect = on_connect
self.client.on_message = on_message
self.client.connect("localhost", 1883, 60)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
def draw(self):
self.client.loop(0)
# Main class
def main():
global keyboard
global collision_manager
collision_manager = cm.CollisionManagerBruteForce()
director.init(width=MAP_SIZE[0], height=MAP_SIZE[1], autoscale=True, resizable=True)
# Create a layer
player_layer = Mqtt_layer(collision_manager)
# create an obstacle and add to layer
obstacle1 = CollidableSprite('sprites/obstacle.png', 200, 200, 0)
player_layer.add(obstacle1)
obstacle1.velocity = (0, 0)
collision_manager.add(obstacle1)
# create an obstacle and add to layer
obstacle2 = CollidableSprite('sprites/obstacle.png', 320, 240, 0)
player_layer.add(obstacle2)
obstacle2.velocity = (0, 0)
collision_manager.add(obstacle2)
# create an obstacle and add to layer
obstacle4 = CollidableSprite('sprites/obstacle.png', 490, 490, 0)
player_layer.add(obstacle4)
obstacle4.velocity = (0, 0)
collision_manager.add(obstacle4)
# create the car and add to layer
car = CollidableSprite('sprites/Black_viper.png', 100, 100, 10)
action = actions.interval_actions.ScaleBy(0.25, 0)
car.do(action)
player_layer.add(car)
car.velocity = (0, 0)
# Set the sprite's movement class.
car.do(Car())
# Create a scene and set its initial layer.
main_scene = scene.Scene(player_layer)
# collisions
collision_manager.add(car)
# Attach a KeyStateHandler to the keyboard object.
keyboard = key.KeyStateHandler()
director.window.push_handlers(keyboard)
# Play the scene in the window.
director.run(main_scene)
if __name__ == '__main__':
main()
|
"""
Problem:
The function even_squares takes in a number. If the number is:
* even - print out the number squared
* odd - print out the number
Tests:
>>> even_squares(20)
400
>>> even_squares(9)
9
>>> even_squares(8)
64
>>> even_squares(73)
73
"""
# Use this to test your solution. Don't edit it!
import doctest
def run_tests():
doctest.testmod(verbose=True)
# Edit this code
def even_squares(n):
if n%2 == 0:
print(n*n)
else:
print(n)
|
from ytree.frontends.treefarm import \
TreeFarmArbor
from ytree.utilities.testing import \
ArborTest, \
TempDirTest
class TreeFarmArborDescendentsTest(TempDirTest, ArborTest):
arbor_type = TreeFarmArbor
test_filename = "tree_farm/tree_farm_descendents/fof_subhalo_tab_000.0.h5"
num_data_files = 51
class TreeFarmArborAncestorsTest(TempDirTest, ArborTest):
arbor_type = TreeFarmArbor
test_filename = "tree_farm/tree_farm_ancestors/fof_subhalo_tab_017.0.h5"
num_data_files = 34
|
# -*- encoding: utf-8 -*-
import argparse
import logging
import multiprocessing
from math import floor
import django
from django.core.management import BaseCommand
import miniblog
from bpp.util import partition_count, disable_multithreading_by_monkeypatching_pool
from import_dbf.models import B_A, Bib
from import_dbf.util import (
integruj_autorow,
integruj_b_a,
integruj_charaktery,
integruj_funkcje_autorow,
integruj_jednostki,
integruj_jezyki,
integruj_kbn,
integruj_publikacje,
integruj_tytuly_autorow,
integruj_uczelnia,
integruj_wydzialy,
integruj_zrodla,
mapuj_elementy_publikacji,
przypisz_jednostki,
sprawdz_zamapowanie_autorow,
usun_podwojne_przypisania_b_a,
wyswietl_prace_bez_dopasowania,
wzbogacaj_charaktery,
zatwierdz_podwojne_przypisania,
dodaj_aktualnosc,
set_sequences,
przypisz_grupy_punktowe,
utworz_szkielety_ksiazek,
integruj_dyscypliny,
)
django.setup()
class Command(BaseCommand):
help = "Integruje zaimportowaną bazę DBF z bazą BPP"
def add_arguments(self, parser):
parser.add_argument("--uczelnia", type=str, default="Domyślna Uczelnia")
parser.add_argument("--skrot", type=str, default="DU")
parser.add_argument("--disable-multithreading", action="store_true")
parser.add_argument("--enable-all", action="store_true")
parser.add_argument("--disable-transaction", action="store_true")
parser.add_argument("--enable-wydzial", action="store_true")
parser.add_argument("--enable-jednostka", action="store_true")
parser.add_argument("--enable-autor", action="store_true")
parser.add_argument("--enable-dyscypliny", action="store_true")
parser.add_argument("--enable-publikacja", action="store_true")
parser.add_argument("--enable-grupy-punktowe", action="store_true")
parser.add_argument("--enable-szkielety-ksiazek", action="store_true")
parser.add_argument("--enable-mapuj-publikacja", action="store_true")
parser.add_argument("--enable-charakter-kbn-jezyk", action="store_true")
parser.add_argument(
"--charaktery-enrichment-xls", type=argparse.FileType("rb"), nargs="+"
)
parser.add_argument("--enable-zrodlo", action="store_true")
parser.add_argument("--enable-b-a", action="store_true")
parser.add_argument(
"--enable-zatwierdz-podwojne-przypisania",
action="store_true",
help="""W przypadku, gdyby podwójne przypisania w bazie danych były OK, podaj ten argument
aby utworzyć dodatkowe rekordy dla prawidłowo zdublowanych autorów""",
)
parser.add_argument("--enable-przypisz-jednostki", action="store_true")
parser.add_argument("--enable-dodaj-aktualnosc", action="store_true")
def handle(
self,
uczelnia,
skrot,
enable_all,
disable_transaction,
disable_multithreading,
*args,
**options
):
verbosity = int(options["verbosity"])
logger = logging.getLogger("django")
if verbosity > 1:
logger.setLevel(logging.DEBUG)
from django import db
db.connections.close_all()
cpu_count = multiprocessing.cpu_count()
num_proc = int(floor(cpu_count * 0.875)) or 1
pool = multiprocessing.Pool(processes=num_proc)
if disable_multithreading:
disable_multithreading_by_monkeypatching_pool(pool)
pool.apply(integruj_uczelnia, (uczelnia, skrot))
if enable_all or options["enable_wydzial"]:
logger.debug("Wydzialy")
pool.apply(integruj_wydzialy)
if enable_all or options["enable_jednostka"]:
logger.debug("Jednostki")
pool.apply(integruj_jednostki)
if enable_all or options["enable_autor"]:
pool.apply(integruj_tytuly_autorow)
pool.apply(integruj_funkcje_autorow)
# 'Jose Miguel', 'Caldas' <==> 'Jose', 'Miguel Caldas'
# with fuj as (select imiona || ' ' || nazwisko as x, idt_aut as y
# from import_dbf_aut where exp_id = idt_aut) select x, array_agg(y)
# from fuj group by x having count(*) > 1
logger.debug("Autorzy")
pool.apply(integruj_autorow)
logger.debug("Sprawdzam czy wszyscy sa przypisani")
pool.apply(sprawdz_zamapowanie_autorow)
if enable_all or options["enable_dyscypliny"]:
pool.apply(integruj_dyscypliny)
if enable_all or options["enable_charakter_kbn_jezyk"]:
pool.apply(integruj_charaktery)
fp = options.get("charaktery_enrichment_xls")
if fp:
pool.apply(wzbogacaj_charaktery, args=(fp[0].name,))
pool.apply(integruj_kbn)
pool.apply(integruj_jezyki)
if enable_all or options["enable_zrodlo"]:
logger.debug("Zrodla")
pool.apply(integruj_zrodla)
if enable_all or options["enable_mapuj_publikacja"]:
logger.debug("Publikacje - wyciągam dane")
pool.starmap(
mapuj_elementy_publikacji,
partition_count(Bib.objects.exclude(analyzed=True), num_proc),
)
if enable_all or options["enable_publikacja"]:
logger.info("Integruje publikacje")
pool.starmap(
integruj_publikacje,
partition_count(
Bib.objects.filter(object_id=None, analyzed=True), num_proc
),
)
pool.apply(wyswietl_prace_bez_dopasowania, (logger,))
pool.apply(set_sequences)
if enable_all or options["enable_grupy_punktowe"]:
pool.apply(przypisz_grupy_punktowe)
if enable_all or options["enable_szkielety_ksiazek"]:
pool.apply(utworz_szkielety_ksiazek, (logger,))
if enable_all or options["enable_zatwierdz_podwojne_przypisania"]:
logger.debug("Zatwierdzanie podwójnych podwojnych przypisan")
pool.apply(zatwierdz_podwojne_przypisania, (logger,))
if enable_all or options["enable_b_a"]:
logger.debug("Usuwanie podwojnych przypisan")
pool.apply(usun_podwojne_przypisania_b_a, (logger,))
logger.debug("Integracja B_A")
pool.starmap(integruj_b_a, partition_count(B_A.objects, num_proc))
logger.debug("Przypisuje jednostki do autorow")
if enable_all or options["enable_przypisz_jednostki"]:
logger.debug("Przypisuje Autor_Jednostka masowo")
pool.apply(przypisz_jednostki)
if enable_all or options["enable_dodaj_aktualnosc"]:
pool.apply(dodaj_aktualnosc)
|
#!/usr/bin/env python
# Copyright Contributors to the Open Shading Language project.
# SPDX-License-Identifier: BSD-3-Clause
# https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
# max(float,float) includes masking
command += testshade("-t 1 -g 64 64 -od uint8 -o Cout out_max_u_float_u_float.tif test_max_u_float_u_float")
command += testshade("-t 1 -g 64 64 -od uint8 -o Cout out_max_u_float_v_float.tif test_max_u_float_v_float")
command += testshade("-t 1 -g 64 64 -od uint8 -o Cout out_max_v_float_u_float.tif test_max_v_float_u_float")
command += testshade("-t 1 -g 64 64 -od uint8 -o Cout out_max_v_float_v_float.tif test_max_v_float_v_float")
# Derivs includes masking
command += testshade("--vary_udxdy --vary_udxdy -t 1 -g 64 64 -od uint8 -o Cout out_max_v_dfloat_v_dfloat.tif test_max_v_dfloat_v_dfloat")
# max(int, int) (includes masking)
command += testshade("-t 1 -g 64 64 -od uint8 -o Cout out_max_u_int_u_int.tif test_max_u_int_u_int")
command += testshade("-t 1 -g 64 64 -od uint8 -o Cout out_max_u_int_v_int.tif test_max_u_int_v_int")
command += testshade("-t 1 -g 64 64 -od uint8 -o Cout out_max_v_int_u_int.tif test_max_v_int_u_int")
command += testshade("-t 1 -g 64 64 -od uint8 -o Cout out_max_v_int_v_int.tif test_max_v_int_v_int")
# max(vec, vec) (including Masking)
command += testshade("-t 1 -g 64 64 -od uint8 -o Cout out_max_u_vec_u_vec.tif test_max_u_vec_u_vec")
command += testshade("-t 1 -g 64 64 -od uint8 -o Cout out_max_u_vec_v_vec.tif test_max_u_vec_v_vec")
command += testshade("-t 1 -g 64 64 -od uint8 -o Cout out_max_v_vec_v_vec.tif test_max_v_vec_v_vec")
command += testshade("-t 1 -g 64 64 -od uint8 -o Cout out_max_v_vec_u_vec.tif test_max_v_vec_u_vec")
# Derivs includes masking
command += testshade("--vary_udxdy --vary_udxdy -t 1 -g 64 64 -od uint8 -o Cout out_max_v_dvec_v_dvec.tif test_max_v_dvec_v_dvec")
outputs = [
"out_max_u_float_u_float.tif",
"out_max_u_float_v_float.tif",
"out_max_v_float_u_float.tif",
"out_max_v_float_v_float.tif",
"out_max_v_dfloat_v_dfloat.tif",
"out_max_u_int_u_int.tif",
"out_max_u_int_v_int.tif",
"out_max_v_int_u_int.tif",
"out_max_v_int_v_int.tif",
"out_max_u_vec_u_vec.tif",
"out_max_u_vec_v_vec.tif",
"out_max_v_vec_v_vec.tif",
"out_max_v_vec_u_vec.tif",
"out_max_v_dvec_v_dvec.tif"
]
# expect a few LSB failures
failthresh = 0.008
failpercent = 3
|
from collections import Counter
def solve(input):
twos = 0
threes = 0
for line in input:
c = Counter(line)
has2 = False
has3 = False
for k, v in c.items():
if not has2 and v == 2:
twos += 1
has2 = True
elif not has3 and v == 3:
threes += 1
has3 = True
print (twos*threes)
with open('input.txt', 'r') as f:
input = f.read().splitlines()
solve(input)
|
from euler import *
n = 144
while True:
a = n * ( 2 * n - 1)
if check_penta(a) and check_triangle(a) == True:
print(a)
break
else:
n += 1
|
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
from sklearn import metrics
import random
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, Flatten, Dense, Activation
import torch
from torch.backends import cudnn
import keras
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.optim as optim
from sklearn.metrics import accuracy_score
import nni
from copy import deepcopy
from pathlib import Path
from utils import get_device
from utlis_graph_zsl import replace_max
seed = 0
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.deterministic = True
random.seed(seed)
class TopKRanker(OneVsRestClassifier):
"""
Linear regression with one-vs-rest classifier
"""
def predict_kfir(self, x, top_k_list):
assert x.shape[0] == len(top_k_list)
probs = super(TopKRanker, self).predict_proba(x)
prediction = np.zeros((x.shape[0], self.classes_.shape[0]))
for i, k in enumerate(top_k_list):
probs_ = probs[i, :]
labels = self.classes_[probs_.argsort()[-int(k):]].tolist()
for label in labels:
prediction[i, int(label)] = 1
return prediction, probs
def train_edge_classification(x_train, y_train, solver="lbfgs"):
"""
train the classifier with the train set.
:param x_train: The features' edge- norm (train set).
:param y_train: The edges labels- 0 for true, 1 for false (train set).
:return: The classifier
"""
model = LogisticRegression(solver=solver, class_weight="balanced")
penalty = ["l2"] if solver == "lbfgs" else ["l2", "l1"]
parameters = {"penalty": penalty, "C": [0.01, 0.1, 1.0, 10.0]}
model = TopKRanker(
GridSearchCV(model, param_grid=parameters, cv=2, scoring='balanced_accuracy', n_jobs=1, verbose=0,
pre_dispatch='n_jobs'))
model.fit(x_train, y_train)
return model
def predict_edge_classification(classif2, x_test):
"""
With the test data make
:param classif2:
:param x_test:
:return:
"""
top_k_list = list(np.ones(len(x_test)).astype(int))
prediction, probs = classif2.predict_kfir(x_test, top_k_list)
if np.argmax(x_test) != np.argmax(probs.T[0]):
print('stop')
return prediction, probs
def create_keras_model(input_shape, hidden_layer_size):
"""
:param input_shape: tuple - shape of a single sample (2d, 1)
"""
model = tf.keras.Sequential()
model.add(Dense(hidden_layer_size, activation="relu", input_shape=(input_shape,)))
model.add(Dense(1))
model.add(Activation('sigmoid'))
# opti = keras.optimizers.Adam(lr=0.01)
opti = "Adam"
model.compile(optimizer=opti, loss="binary_crossentropy", metrics=[tf.keras.metrics.AUC()])
model.summary()
# parameters = {"learning_rate": [0.005, 0.01, 0.1]}
# model = GridSearchCV(model, param_grid=parameters, cv=2, scoring='balanced_accuracy', n_jobs=1, verbose=0,
# pre_dispatch='n_jobs')
return model
def keras_model_fit(model, x_train, y):
y_train = y[:, 0]
tf.config.run_functions_eagerly(True)
model.fit(x_train, y_train, epochs=5)
print("done fitting")
return model
def keras_model_predict(model, x_test):
y_pred = model.predict(x_test)
return y_pred
class EmbeddingLinkPredictionDataset(Dataset):
def __init__(self, embeddings, labels):
self.embeddings = embeddings
# labels = labels[:, 0]
self.labels = labels
def __len__(self):
return len(self.embeddings)
def __getitem__(self, idx):
return self.embeddings[idx], self.labels[idx]
class EmbeddingLinkPredictionNetwork(nn.Module):
def __init__(self, input_dim: int, hidden_layer_dim: int, lr: float = 0.001, weight_decay: float = 0.0,
pos_weight: float = 10.0, dropout_prob: float = 0.5, optimizer="adam",
loss="weighted_binary_cross_entropy", device="cpu"):
super().__init__()
self.fc1 = nn.Linear(input_dim, hidden_layer_dim)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(p=dropout_prob)
self.fc2 = nn.Linear(hidden_layer_dim, 2)
self.sigmoid = nn.Sigmoid()
if optimizer == "adam":
self.optimizer = optim.Adam(self.parameters(), lr=lr, weight_decay=weight_decay, amsgrad=True)
if loss == "weighted_binary_cross_entropy":
self.loss = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([1, pos_weight]))
self.device = device
self.to(self.device)
def forward(self, x):
out = self.fc1(x)
out = self.relu1(out)
out = self.dropout1(out)
out = self.fc2(out)
out = self.sigmoid(out)
return out
class TrainLinkPrediction:
def __init__(self, model, epochs, train_loader=None, val_loader=None, test_loader=None, to_nni=False):
self.model = model
self.epochs = epochs
self.train_loader = train_loader
self.val_loader = val_loader
self.test_loader = test_loader
self.device = model.device
self.to_nni = to_nni
def train(self):
running_loss = 0.0
accuracy = []
best_val_auc = -1.0
best_epoch = 0
self.model.train()
for epoch in range(self.epochs):
for i, (embeddings, labels) in enumerate(self.train_loader):
labels = torch.tensor(np.array(labels).astype(int), dtype=torch.float, device=self.device)
embeddings = torch.tensor(np.array(embeddings), dtype=torch.float, device=self.device)
self.model.optimizer.zero_grad()
self.model.train()
predictions = self.model(embeddings)
# one_hot_labels = torch.zeros(predictions.shape)
# one_hot_labels[torch.arange(predictions.shape[0]), torch.tensor(np.array(labels).astype(int), dtype=torch.long)] = 1
loss = self.model.loss(predictions, labels).to(self.device)
loss.backward()
self.model.optimizer.step()
running_loss += loss.item()
final_preds = 1 - torch.argmax(predictions, dim=1)
row_labels = labels[:, 0].cpu()
samples_weight = row_labels * 10 + 1 - row_labels
accuracy.append(accuracy_score(row_labels, final_preds.cpu(), sample_weight=samples_weight))
_, _, val_accuracy, val_auc = self.eval()
best_val_auc, change = replace_max(best_val_auc, val_auc, report_change=True)
if change:
best_epoch = epoch
best_classif = deepcopy(self.model)
if self.to_nni:
nni.report_intermediate_result(val_auc)
else:
print('num_epochs:{} || loss: {} || train accuracy: {} || val accuracy: {} || val auc: {}'
.format(epoch, running_loss / len(self.train_loader), np.mean(accuracy[-9:]), val_accuracy, val_auc))
running_loss = 0.0
if self.to_nni:
nni.report_final_result({'default': best_val_auc, 'best_num_epochs': best_epoch})
return best_classif
def eval(self):
self.model.eval()
concat = False
with torch.no_grad():
for i, (embeddings, labels) in enumerate(self.val_loader):
labels = torch.tensor(np.array(labels).astype(int), dtype=torch.float, device=self.device)
embeddings = torch.tensor(np.array(embeddings), dtype=torch.float, device=self.device)
self.model.eval()
predictions = self.model(embeddings)
if concat:
final_predictions = torch.cat((final_predictions, predictions))
all_labels = torch.cat((all_labels, labels))
else:
final_predictions = predictions
all_labels = labels
concat = True
all_row_labels = all_labels[:, 0].cpu()
fpr, tpr, thresholds = metrics.roc_curve(all_row_labels, final_predictions[:, 0].cpu(), pos_label=1)
auc = metrics.auc(fpr, tpr)
samples_weight = all_row_labels * 10 + 1 - all_row_labels
final_predictions = 1 - torch.argmax(final_predictions, dim=1)
return all_labels.cpu(), final_predictions.cpu(), accuracy_score(all_row_labels, final_predictions.cpu(),
sample_weight=samples_weight), auc
def test(self):
self.model.eval()
concat = False
with torch.no_grad():
for i, (embeddings, labels) in enumerate(self.test_loader):
embeddings = torch.tensor(np.array(embeddings), dtype=torch.float, device=self.device)
self.model.eval()
probs = self.model(embeddings)[:, 0]
if concat:
final_probs = torch.cat((final_probs, probs))
else:
final_probs = probs
concat = True
return final_probs
if __name__ == "__main__":
device = get_device()
params = nni.get_next_parameter()
x_path = Path("save_data_graph/lad/final_graph_embeddings.npy")
y_path = Path("save_data_graph/lad/final_graph_labels.npy")
x_train_all, y_train_all = np.load(x_path, allow_pickle=True), np.load(y_path, allow_pickle=True)
dataset = EmbeddingLinkPredictionDataset(x_train_all, y_train_all)
train_set, val_set = torch.utils.data.random_split(dataset, [int(0.8 * len(dataset)),
len(dataset) - int(0.8 * len(dataset))])
train_loader = DataLoader(train_set, batch_size=32, shuffle=True)
val_loader = DataLoader(val_set, batch_size=32, shuffle=False)
net = EmbeddingLinkPredictionNetwork(len(x_train_all[0]), int(params["hidden_layer_size"]),
lr=params["embedding_nn_lr"],
weight_decay=params["embedding_nn_weight_decay"],
dropout_prob=params["embedding_nn_dropout_prob"],
optimizer="adam",
loss="weighted_binary_cross_entropy", device=device)
train_lp = TrainLinkPrediction(net, epochs=params["embedding_nn_epochs"],
train_loader=train_loader, val_loader=val_loader, to_nni=True)
classif2 = train_lp.train()
|
import functools
import os.path as osp
import numpy as np
import rlf.rl.utils as rutils
import torch
from rlf.algos.base_net_algo import BaseNetAlgo
from rlf.il.transition_dataset import TransitionDataset
from rlf.rl import utils
class ExperienceGenerator(object):
def init(self, policy, args, exp_gen_num_trans):
pass
def get_batch(self):
pass
def reset(self):
pass
class BaseILAlgo(BaseNetAlgo):
def __init__(self, exp_generator=None):
super().__init__()
self.exp_generator = exp_generator
self.data_iter = None
self._holdout_idxs = None
# By default do not transform the dataset at all.
self._transform_dem_dataset_fn = None
def set_transform_dem_dataset_fn(self, transform_dem_dataset_fn):
self._transform_dem_dataset_fn = transform_dem_dataset_fn
def _load_expert_data(self, policy, args):
assert args.traj_load_path is not None, "Must specify expert demonstrations!"
self.args = args
self.orig_dataset = self._get_traj_dataset(
osp.join(args.cwd, args.traj_load_path)
)
self.orig_dataset = self.orig_dataset.to(args.device)
num_trajs = self._create_train_loader(args)
trans_count_str = utils.human_format_int(
len(self.expert_train_loader) * args.traj_batch_size
)
print("Loaded %s transitions for imitation" % trans_count_str)
print("(%i trajectories)" % num_trajs)
@property
@functools.lru_cache()
def expert_stats(self):
return self.orig_dataset.get_expert_stats(self.args.device)
def _create_train_loader(self, args):
# Always keep track of the non-shuffled, non-split version of the
# dataset.
self.expert_dataset = self.orig_dataset
if args.traj_frac != 1.0:
self.expert_dataset = self.expert_dataset.compute_split(
args.traj_frac, args.seed
)
if args.traj_viz:
self.expert_dataset.viz(args)
if args.traj_val_ratio != 0.0:
N = len(self.expert_dataset)
n_val = int(N * args.traj_val_ratio)
train_dataset, val_dataset = torch.utils.data.random_split(
self.expert_dataset,
[N - n_val, n_val],
torch.Generator().manual_seed(self.args.seed),
)
val_traj_batch_size = min(len(val_dataset), self.args.traj_batch_size)
self.val_train_loader = torch.utils.data.DataLoader(
dataset=val_dataset,
batch_size=val_traj_batch_size,
shuffle=True,
drop_last=True,
)
else:
train_dataset = self.expert_dataset
self.val_train_loader = None
self.expert_train_loader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=args.traj_batch_size,
shuffle=True,
drop_last=True,
)
if isinstance(self.expert_dataset, torch.utils.data.Subset):
return int(args.traj_frac * self.expert_dataset.dataset.get_num_trajs())
else:
return self.expert_dataset.get_num_trajs()
def _get_next_data(self):
if self.exp_generator is None:
if self.data_iter is None:
self.data_iter = iter(self.expert_train_loader)
try:
return next(self.data_iter, None)
except IndexError:
return None
else:
return self.exp_generator.get_batch()
def _reset_data_fetcher(self):
if self.exp_generator is None:
self.data_iter = iter(self.expert_train_loader)
else:
self.exp_generator.reset()
def get_env_settings(self, args):
settings = super().get_env_settings(args)
if args.il_out_action_norm:
print("Setting environment action denormalization")
settings.action_fn = self._denorm_action
return settings
def _adjust_action(self, x):
if not self.args.il_in_action_norm:
return x
return (x) / (self.expert_stats["action"][1] + 1e-8)
def _denorm_action(self, x):
return (x) * self.expert_stats["action"][1]
def init(self, policy, args):
# Load the expert data first, so we can calculate the needed number of
# steps for the learning rate scheduling in base_net_algo.py.
if self.exp_generator is None:
self._load_expert_data(policy, args)
else:
self.exp_generator.init(policy, args, args.exp_gen_num_trans)
print(f"Generating {args.exp_gen_num_trans} transitions for imitation")
super().init(policy, args)
def _get_expert_traj_stats(self):
return self.expert_mean, self.expert_std
def _get_traj_dataset(self, traj_load_path):
return TransitionDataset(traj_load_path, self._transform_dem_dataset_fn)
def get_add_args(self, parser):
super().get_add_args(parser)
parser.add_argument("--traj-load-path", type=str, default=None)
parser.add_argument("--traj-batch-size", type=int, default=128)
parser.add_argument(
"--traj-val-ratio",
type=float,
default=0.0,
help="""
Ratio of the dataset which is used for validation. This is only
supported for algorithms where there is some supervised
objective and this makes sense (i.e. for
something like BC where training is performed offline).
""",
)
parser.add_argument(
"--traj-frac",
type=float,
default=1.0,
help="The fraction of trajectories to use",
)
parser.add_argument("--traj-viz", action="store_true", default=False)
parser.add_argument("--exp-gen-num-trans", type=int, default=None)
# Unless you have some weird dataset situation, you probably want to
# specify either both or none of these. Specifying only in-action-norm
# will normalize the actions as input to the policy but will not
# denormalize the output when being passed to the environment.
parser.add_argument(
"--il-in-action-norm",
action="store_true",
default=False,
help="Normalize expert actions input to the policy",
)
parser.add_argument(
"--il-out-action-norm",
action="store_true",
default=False,
help="Denormalize actions in the environment",
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cv2
# ArUcoのライブラリを導入
aruco = cv2.aruco
# 4x4のマーカー,ID番号は50までの辞書を使う
dictionary = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
def main():
# 10枚のマーカーを作るために10回繰り返す
for i in range(10):
ar_image = aruco.drawMarker(dictionary, i, 150) # i: ID番号,150x150ピクセル.
fileName = "ar" + str(i).zfill(2) + ".png" # ファイル名を "ar0x.png" の様に作る
cv2.imwrite(fileName, ar_image) # マーカー画像を保存する
if __name__ == "__main__":
main()
|
"""
Abstract asyncio synchronization primitive
"""
from __future__ import annotations
from abc import ABC
class SyncPrimitive(ABC):
"""
Abstract asyncio synchronization primitive
"""
def __init__(self, key: str):
"""
:param key: key
"""
self.key = key
|
import cv2
import time
import numpy as np
cap=cv2.VideoCapture(0)
time.sleep(3)
background=0
for i in range(60):
capture,background=cap.read()
background = np.flip(background,axis=1)
## Read every frame from the webcam, until the camera is open
while(cap.isOpened()):
res,img=cap.read()
if not res:
break
img = np.flip(img, axis = 1)
hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
lower_red= np.array([0,120,70])
upper_red = np.array([10, 255,255])
mask1=cv2.inRange(hsv,lower_red,upper_red)
lower_red = np.array([170, 120, 70])
upper_red = np.array([180, 255, 255])
mask2 = cv2.inRange(hsv, lower_red, upper_red)
mask1 = mask1 + mask2
## Open and Dilate the mask image
mask1 = cv2.morphologyEx(mask1, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))
mask1 = cv2.morphologyEx(mask1, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8))
## Create an inverted mask to segment out the red color from the frame
mask2 = cv2.bitwise_not(mask1)
## Segment the red color part out of the frame using bitwise and with the inverted mask
res1 = cv2.bitwise_and(img, img, mask =mask2)
## Create image showing static background frame pixels only for the masked region
res2 = cv2.bitwise_and(background, background, mask=mask1)
## Generating the final output and writing
finalOutput = cv2.addWeighted(res1, 1, res2, 1, 0)
print(finalOutput)
cv2.imshow("magic", finalOutput)
k=cv2.waitKey(1)
if k == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
"""This is a dummy module for the reflection test."""
from .reflection_test import ImportedClass # pylint: disable=unused-import
class DuplicateClass: # pylint: disable=missing-docstring
pass
|
import os
# be careful with this one
def disable_fan_control():
cmd="""ssh root@shm-smrf-sp01 \"clia setfanpolicy 20 4 disable; clia setfanpolicy 20 3 disable\""""
os.system(cmd)
def enable_fan_control():
cmd="""ssh root@shm-smrf-sp01 \"clia setfanpolicy 20 4 enable; clia setfanpolicy 20 3 enable\""""
os.system(cmd)
def set_fan_level(fan_level=100):
cmd="""ssh root@shm-smrf-sp01 \"clia minfanlevel %d; clia setfanlevel all %d\""""%(fan_level,fan_level)
os.system(cmd)
ctime=time.time()
output_dir='/data/smurf_data/rflab_thermal_testing_swh_July2019'
hardware_logfile=os.path.join(output_dir,'{}_hwlog.dat'.format(int(ctime)))
slots=[2,3,4]
for slot in slots:
start_hardware_logging(slot,hardware_logfile)
print('-> Waiting 1 min before changing fan levels.')
time.sleep(60)
fan_levels=range(70,101,2)[::-1]
for fan_level in fan_levels:
print('-> Setting fan_level to %d'%fan_level)
add_tag_to_hardware_log(hardware_logfile,tag='fan%d'%(fan_level))
set_fan_level(fan_level)
print('-> Waiting 5 min until next fan level.')
time.sleep(5*60)
|
import streamlit as st
# from stqdm import stqdm
import io
from PIL import Image
import requests
import time
import logging
import json
import os
from requests_toolbelt.multipart.encoder import MultipartEncoder
backend = "http://fastapi:8000"
def detect_image(data, server):
m = MultipartEncoder(fields={"file": ("filename", data, "image/jpeg")})
resp = requests.post(
server + "/detection/image",
data=m,
headers={"Content-Type": m.content_type},
timeout=8000,
)
return resp
def detect_video(server):
requests.post(
server + "/detection/video",
timeout=8000,
)
def get_video_status(server):
resp = requests.get(
server + "/detection/video/status",
timeout=8000,
)
return resp
def main():
st.title("Plant Disease Detector Application")
st.write(
"""Test Plant Image
This streamlit example uses a FastAPI service as backend.
Visit this URL at `:8000/docs` for FastAPI documentation."""
) # description and instructions
# Side Bar
st.sidebar.title("Test Models")
app_mode = st.sidebar.selectbox("Choose Model", ["YOLO_V5_202103"])
if app_mode == "YOLO_V5_202103":
run_app()
def run_app():
data_type = st.selectbox("Choose Data Type", ["Image", "Video"])
input_data = st.file_uploader(f"insert {data_type}") # image upload widget
time.sleep(1)
if st.button("Detect Plant Disease"):
col1, col2 = st.beta_columns(2)
if data_type == "Image":
if input_data:
pred = detect_image(input_data, backend)
original_image = Image.open(input_data).convert("RGB")
converted_image = pred.content
converted_image = Image.open(io.BytesIO(converted_image)).convert("RGB")
r, g, b = converted_image.split()
converted_image = Image.merge("RGB", (b, g, r))
col1.header("Original")
col1.image(original_image, use_column_width=True)
col2.header("Detected")
col2.image(converted_image, use_column_width=True)
else:
# handle case with no image
st.write("Insert an image!")
elif data_type == "Video":
temp_path = "/var/lib/assets"
for t in os.listdir(temp_path):
os.remove(temp_path + "/" + t)
origin_video = input_data.read()
video_path = "/var/lib/assets/video1.mp4"
if os.path.isfile(video_path):
os.remove(video_path)
with open(video_path, "wb") as wfile:
wfile.write(origin_video)
logging.info(f"{video_path} added")
time.sleep(1)
wfile.close()
detect_video(backend)
time.sleep(1)
status = None
bar = st.progress(0)
# with stqdm(total=1, st_container=st) as pbar:
while status != "Success":
resp = get_video_status(backend)
resp_dict = json.loads(resp.content.decode("utf-8"))
status = resp_dict["status"]
if status != "Pending":
progress = resp_dict["progress"]
# pbar.update(int(progress))
bar.progress(int(progress))
time.sleep(1)
time.sleep(3)
save_path = "/var/lib/assets/detect1.mp4"
convert_path = "/var/lib/assets/detect2.mp4"
os.system(f"ffmpeg -i {save_path} -vcodec libx264 {convert_path}")
video_file = open(convert_path, "rb")
video_bytes = video_file.read()
col1.header("Original")
col2.header("Detected")
col1.video(origin_video, format="video/mp4")
col2.video(video_bytes, format="video/mp4")
if __name__ == "__main__":
main()
|
class Solution:
def sortedSquares(self, A: 'List[int]') -> 'List[int]':
B = []
for elem in A:
B.append(elem * elem)
B.sort()
return B
|
from kendo_base import KendoComponent
# test urls
get_list = 'http://127.0.0.1:3000/posts/'
class DataSource(KendoComponent):
'''
http://docs.telerik.com/kendo-ui/api/javascript/ui/datepicker#fields-options
'''
_k_cls = kendo.data.DataSource
data = None
transport = None
_functions = ['read']
def __init__(self, opts):
od = dict(opts)
url = od.pop('url', None)
if url:
od['transport'] = {'read': {'url': url, 'dataType': 'jsonp'}}
KendoComponent.__init__(self, opts)
self.read()
|
from kaneda import Metrics
from . import mark_benchmark
@mark_benchmark
class TestBenchmarksBackends(object):
def test_benchmark_elasticsearch(self, elasticsearch_backend, benchmark):
metrics = Metrics(backend=elasticsearch_backend)
benchmark(metrics.gauge, 'benchmark_elasticsearch', 1)
def test_benchmark_mongo(self, mongo_backend, benchmark):
metrics = Metrics(backend=mongo_backend)
benchmark(metrics.gauge, 'benchmark_mongo', 1)
def test_benchmark_rethink(self, rethink_backend, benchmark):
metrics = Metrics(backend=rethink_backend)
benchmark(metrics.gauge, 'benchmark_mongo', 1)
|
from .client import Method, Request, HTTPClient
from .gateway import GatewayKeepAlive, Gateway, GatewayEvent
from .events import *
from .session import Session
from .bot import Bot
from .exceptions import ClosedSocketException
from .structs.user import Relationship, Presence, Status, BotUser, User
from .structs.channels import ChannelType, Channel, SavedMessages, DirectMessage, Group, TextChannel, VoiceChannel, Message, EmbedType, EmbedImageSize, Embed, Masquerade, Reply
from .structs.server import Category, SystemMessages, Role, Server
from .structs.member import Member
|
# -*- coding: utf-8 -*-
# @Author: JanKinCai
# @Date: 2019-12-24 12:55:30
# @Last Modified by: JanKinCai
# @Last Modified time: 2020-01-06 22:41:34
from interact.when import When
test_items = {
"a": True,
"b": "ssss",
"c": 22,
"d": {
"dd": 23
},
}
def test_true():
"""
Test when boolean
"""
vstr = "a == true"
assert When(wv=vstr, cid=test_items).do_when() is True
def test_false():
"""
Test when boolean
"""
vstr = "a == false"
assert When(wv=vstr, cid=test_items).do_when() is False
def test_str():
"""
Test when str
"""
vstr = "b == 'ssss'"
assert When(wv=vstr, cid=test_items).do_when() is True
def test_str2():
"""
Test when str
"""
vstr = "b == ssss"
assert When(wv=vstr, cid=test_items).do_when() is True
def test_str3():
"""
Test when str
"""
vstr = 'b == "ssss"'
assert When(wv=vstr, cid=test_items).do_when() is True
def test_int():
"""
Test when int
"""
vstr = "c == 22"
assert When(wv=vstr, cid=test_items).do_when() is True
def test_int2():
"""
Test when int
"""
vstr = "d.dd == 23"
assert When(wv=vstr, cid=test_items).do_when() is True
def test_int3():
"""
Test when int
"""
vstr = "d.dd != 23"
assert When(wv=vstr, cid=test_items).do_when() is False
def test_linux():
"""Test linux
"""
assert When(wv="islinux", cid=test_items).do_when() is True
if __name__ == "__main__":
test_true()
test_false()
test_str()
test_str2()
test_str3()
test_int()
test_int2()
test_int3()
test_linux()
|
# Colors
yellow = (255, 255, 0)
red = (255, 0, 0)
blue = (0, 0, 255)
white = (255, 255, 255)
black = (0, 0, 0)
green = (0,255,0)
|
import sys
import os.path
def main(files):
nfiles = len(files)
if nfiles < 2:
sys.exit("Usage: %s file1 file2 ... " % sys.argv[0])
# Check files
for filename in files:
if( os.path.isfile(filename) == False ):
sys.exit("File %s not found" % filename)
# read files
n = 0
d = []
for filename in files:
f = open(filename, 'r')
d.append(f.read())
n += 1
# compare and print
for i in range(len(d[0])):
a = d[0][i]
for x in range(1,nfiles):
if a != d[x][i]:
sys.stdout.write("_")
break
else:
sys.stdout.write(a)
if __name__ == '__main__':
main(sys.argv[1:])
|
### TUPLE
print("\n\n### TUPLE")
x = (1,2,"Three",4)
print(x)
x = 1,2,"Three",4
print(x)
# Index
x = (1,2,"Three",4)
print(x[1])
print(x[1:-1])
# Mutate Tuple?
x = (1,2)
print(x*4)
y = ("Three",4)
print(x+y)
print(x+y)
print(1 in x)
for i in x: print(i)
# Following will not work
# del x[1]
# y[1] = 3
|
import optuna
import pandas as pd
from lightgbm import LGBMClassifier
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from zenml.logger import get_logger
logging = get_logger(__name__)
class Hyperparameter_Optimization:
"""
Class for doing hyperparameter optimization.
"""
def __init__(
self, x_train: pd.DataFrame, y_train: pd.Series, x_test: pd.DataFrame, y_test: pd.Series
) -> None:
"""
Initialize the Hyperparameter_Optimization class.
Args:
x_train: Training data
y_train: Training labels
x_test: Test data
y_test: Test labels
"""
self.x_train = x_train
self.y_train = y_train
self.x_test = x_test
self.y_test = y_test
def optimize_randomforest(self, trial: optuna.Trial) -> float:
"""
Method for optimizing Random Forest
"""
n_estimators = trial.suggest_int("n_estimators", 1, 200)
max_depth = trial.suggest_int("max_depth", 1, 20)
min_samples_split = trial.suggest_int("min_samples_split", 2, 20)
reg = RandomForestClassifier(
n_estimators=n_estimators,
max_depth=max_depth,
min_samples_split=min_samples_split,
)
reg.fit(self.x_train, self.y_train)
val_accuracy = reg.score(self.x_test, self.y_test)
return val_accuracy
def optimize_lightgbm(self, trial: optuna.Trial) -> float:
"""
Method for Optimizing LightGBM
"""
n_estimators = trial.suggest_int("n_estimators", 1, 200)
max_depth = trial.suggest_int("max_depth", 1, 20)
learning_rate = trial.suggest_uniform("learning_rate", 0.01, 0.99)
reg = LGBMClassifier(
n_estimators=n_estimators,
learning_rate=learning_rate,
max_depth=max_depth,
)
reg.fit(self.x_train, self.y_train)
val_accuracy = reg.score(self.x_test, self.y_test)
return val_accuracy
def optimize_xgboost_regressor(self, trial: optuna.Trial) -> float:
"""
Method for Optimizing Xgboost
"""
param = {
"max_depth": trial.suggest_int("max_depth", 1, 30),
"learning_rate": trial.suggest_loguniform("learning_rate", 1e-7, 10.0),
"n_estimators": trial.suggest_int("n_estimators", 1, 200),
}
reg = XGBClassifier(**param)
reg.fit(self.x_train, self.y_train)
val_accuracy = reg.score(self.x_test, self.y_test)
return val_accuracy
class TreeBasedModels:
"""
Class for training models.
"""
def __init__(
self, x_train: pd.DataFrame, y_train: pd.Series, x_test: pd.DataFrame, y_test: pd.Series
) -> None:
"""
Initialize the class TreeBasedModels class.
Args:
x_train: Training data
y_train: Training labels
x_test: Test data
y_test: Test labels
"""
self.x_train = x_train
self.y_train = y_train
self.x_test = x_test
self.y_test = y_test
def random_forest_trainer(self, fine_tuning: bool = True):
"""
It trains the random forest model.
Args:
fine_tuning: If True, hyperparameter optimization is performed. If False, the default
parameters are used, defaults to True (optional)
"""
logging.info("Entered for training Random Forest model")
try:
if fine_tuning:
hyper_opt = Hyperparameter_Optimization(
self.x_train, self.y_train, self.x_test, self.y_test
)
study = optuna.create_study(direction="maximize")
study.optimize(hyper_opt.optimize_randomforest, n_trials=100)
trial = study.best_trial
n_estimators = trial.params["n_estimators"]
max_depth = trial.params["max_depth"]
min_samples_split = trial.params["min_samples_split"]
print("Best parameters : ", trial.params)
reg = RandomForestClassifier(
n_estimators=n_estimators,
max_depth=max_depth,
min_samples_split=min_samples_split,
)
reg.fit(self.x_train, self.y_train)
return reg
else:
model = RandomForestClassifier(
n_estimators=152, max_depth=20, min_samples_split=17
)
model.fit(self.x_train, self.y_train)
return model
except Exception as e:
logging.error("Error in training Random Forest model")
logging.error(e)
return None
def lightgbm_trainer(self, fine_tuning: bool = True):
"""
It trains the LightGBM model.
Args:
fine_tuning: If True, hyperparameter optimization is performed. If False, the default
parameters are used, defaults to True (optional)
"""
logging.info("Entered for training LightGBM model")
try:
if fine_tuning:
hyper_opt = Hyperparameter_Optimization(
self.x_train, self.y_train, self.x_test, self.y_test
)
study = optuna.create_study(direction="maximize")
study.optimize(hyper_opt.optimize_lightgbm, n_trials=100)
trial = study.best_trial
n_estimators = trial.params["n_estimators"]
max_depth = trial.params["max_depth"]
learning_rate = trial.params["learning_rate"]
reg = LGBMClassifier(
n_estimators=n_estimators,
learning_rate=learning_rate,
max_depth=max_depth,
)
reg.fit(self.x_train, self.y_train)
return reg
else:
model = LGBMClassifier(n_estimators=200, learning_rate=0.01, max_depth=20)
model.fit(self.x_train, self.y_train)
return model
except Exception as e:
logging.error("Error in training LightGBM model")
logging.error(e)
return None
def xgboost_trainer(self, fine_tuning: bool = True):
"""
It trains the xgboost model.
Args:
fine_tuning: If True, hyperparameter optimization is performed. If False, the default
parameters are used. Defaults to True (optional)
"""
logging.info("Started training XGBoost model")
try:
if fine_tuning:
hy_opt = Hyperparameter_Optimization(
self.x_train, self.y_train, self.x_test, self.y_test
)
study = optuna.create_study(direction="maximize")
study.optimize(hy_opt.optimize_xgboost_regressor, n_trials=100)
trial = study.best_trial
n_estimators = trial.params["n_estimators"]
learning_rate = trial.params["learning_rate"]
max_depth = trial.params["max_depth"]
reg = XGBClassifier(
n_estimators=n_estimators,
learning_rate=learning_rate,
max_depth=max_depth,
)
reg.fit(self.x_train, self.y_train)
return reg
else:
model = XGBClassifier(n_estimators=200, learning_rate=0.01, max_depth=20)
model.fit(self.x_train, self.y_train)
return model
except Exception as e:
logging.error("Error in training XGBoost model")
logging.error(e)
return None
|
from __future__ import division
import cv2
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage
# calculate the scale that should be aplied to make the image
# fit into the window
def display_image(window_name, image):
screen_res = 720, 480
scale_width = screen_res[0] / image.shape[1]
scale_height = screen_res[1] / image.shape[0]
scale = min(scale_width, scale_height)
window_width = int(image.shape[1] * scale)
window_height = int(image.shape[0] * scale)
# reescale the resolution of the window
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(window_name, window_width, window_height)
# show image
cv2.imshow(window_name, image)
# wait for any key to quit the program
cv2.waitKey(0)
cv2.destroyAllWindows()
# calculate the scale that should be aplied to make the image
# fit into the window
def display_frame(window_name, image):
screen_res = 720, 480
scale_width = screen_res[0] / image.shape[1]
scale_height = screen_res[1] / image.shape[0]
scale = min(scale_width, scale_height)
window_width = int(image.shape[1] * scale)
window_height = int(image.shape[0] * scale)
# reescale the resolution of the window
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(window_name, window_width, window_height)
# show image
cv2.imshow(window_name, image)
# fourier transform
def fourier(image):
f = np.fft.fft2(image)
fshift = np.fft.fftshift(f)
magnitude_spectrum = 20 * np.log(np.abs(fshift))
plt.subplot(121), plt.imshow(image, cmap='gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(magnitude_spectrum, cmap='gray')
plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
plt.show()
# calculate the histogram of the image
def histogram(image):
hist = cv2.calcHist([image], [0], None, [256], [0, 256])
plt.plot(hist)
# plt.hist(img_norm.ravel(),256,[0,256]);
plt.show()
# find edge in a binarized image
def edge(image):
edge_horizont = ndimage.sobel(image, 0)
edge_vertical = ndimage.sobel(image, 1)
magnitude = np.hypot(edge_horizont, edge_vertical)
return magnitude
# binarize the image
def binarize(image):
# binarize the image 0, 128, 255
img_cpy = np.ndarray(shape=image.shape)
# bom para edge
# 64 0
# 128 128
# 255 255
# bom para binarizacao
# 32 0
# 255 255
# apply filter
for i in range(len(image)):
for j in range(len(image[0])):
if image[i][j] <= 64:
img_cpy[i][j] = 0
elif image[i][j] <= 128:
img_cpy[i][j] = 128
elif image[i][j] <= 255:
img_cpy[i][j] = 255
return img_cpy
def binarize_02(image):
ret, thresh1 = cv2.threshold(image, 64, 255, cv2.THRESH_BINARY)
return thresh1
# convert an norm image to grayscale image
def imgToGray(image):
img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return img_gray
# negative of an image
def negImage(image):
img_neg = (255 - image);
return img_neg
# read an image in normal mode
def readImage(filename):
img = cv2.imread(filename)
return img
# read an image in grayScale, dont check if file exists
def readGrayImage(filename):
img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
return img
|
import fv3gfs.wrapper
# May need to run 'ulimit -s unlimited' before running this example
# If you're running in our prepared docker container, you definitely need to do this
# sets the stack size to unlimited
# Run using mpirun -n 6 python3 basic_model.py
# mpirun flags that may be useful if using openmpi rather than mpich:
# for docker: --allow-run-as-root
# for CircleCI: --oversubscribe
# to silence a certain inconsequential MPI error: --mca btl_vader_single_copy_mechanism none
# All together, for openmpi:
# mpirun -n 6 --allow-run-as-root --oversubscribe --mca btl_vader_single_copy_mechanism none python3 basic_model.py
if __name__ == "__main__":
fv3gfs.wrapper.initialize()
for i in range(fv3gfs.wrapper.get_step_count()):
fv3gfs.wrapper.step_dynamics()
fv3gfs.wrapper.step_physics()
fv3gfs.wrapper.save_intermediate_restart_if_enabled()
fv3gfs.wrapper.cleanup()
|
from datetime import datetime
# PLAYER ID ALIASES
FAZY_ID = 67712324
GRUMPY_ID = 100117588
KESKOO_ID = 119653426
SHIFTY_ID = 171566175
WARELIC_ID = 211310297
class DotaPatch:
def __init__(self, name, release_time):
self.name = name
self.release_time = release_time
def __str__(self):
string_datetime = self.release_time.strftime('%d-%b-Y %H:%M')
return f'(version {self.name} (released on {string_datetime})'
v728a = DotaPatch('7.28a', datetime(2020, 12, 22, 12, 0))
v728b = DotaPatch('7.28b', datetime(2021, 1, 11, 6, 0))
v728c = DotaPatch('7.28c', datetime(2021, 2, 20, 3, 0))
v729 = DotaPatch('7.29', datetime(2021, 4, 9, 3, 0))
v729b = DotaPatch('7.29b', datetime(2021, 4, 16, 3, 0))
v729c = DotaPatch('7.29c', datetime(2021, 4, 29, 3, 0))
v729d = DotaPatch('7.29d', datetime(2021, 5, 24, 3, 0))
v730 = DotaPatch('7.30', datetime(2021, 8, 18, 3, 0))
v730b = DotaPatch('7.30b', datetime(2021, 8, 23, 7, 0))
VERSIONS = {'7.28a': v728a,
'7.28b': v728b,
'7.28c': v728c,
'7.29a': v729,
'7.29b': v729b,
'7.29c': v729c,
'7.29d': v729d,
'7.30': v730,
'7.30b': v730b}
|
# -*- coding: utf-8 -*-
"""Login middleware."""
from django.contrib.auth import authenticate
from django.middleware.csrf import get_token as get_csrf_token
from account.accounts import Account
class LoginMiddleware(object):
"""Login middleware."""
def process_view(self, request, view, args, kwargs):
"""process_view."""
if getattr(view, 'login_exempt', False):
return None
user = authenticate(request=request)
if user:
request.user = user
get_csrf_token(request)
return None
account = Account()
return account.redirect_login(request)
|
def for_closed_bracket():
for row in range(5):
for col in range(5):
if col==3 or col==2 and(row==0 or row==4):
print("*",end=" ")
else:
print(" ",end=" ")
print()
def while_closed_bracket():
row=0
while row<5:
col=0
while col<5:
if col==3 or col==2 and(row==0 or row==4):
print("*",end=" ")
else:
print(" ",end=" ")
col+=1
row+=1
print()
|
import pytest
from aries_cloudcontroller import (
AcaPyClient,
CredentialsApi,
IssueCredentialV10Api,
IssueCredentialV20Api,
LedgerApi,
PresentProofV10Api,
PresentProofV20Api,
WalletApi,
)
from mockito import mock
from app.tests.util.client_fixtures import (
member_admin_acapy_client,
member_admin_client,
yoma_acapy_client,
yoma_client,
)
from app.tests.util.member_personas import (
alice_member_client,
bob_and_alice_connection,
bob_and_alice_public_did,
bob_member_client,
alice_acapy_client,
bob_acapy_client,
)
@pytest.fixture
def mock_agent_controller():
controller = mock(AcaPyClient)
controller.wallet = mock(WalletApi)
controller.ledger = mock(LedgerApi)
controller.issue_credential_v1_0 = mock(IssueCredentialV10Api)
controller.issue_credential_v2_0 = mock(IssueCredentialV20Api)
controller.present_proof_v1_0 = mock(PresentProofV10Api)
controller.present_proof_v2_0 = mock(PresentProofV20Api)
controller.credentials = mock(CredentialsApi)
return controller
|
from typing import Any
from utils.dp.observer import ConcreteSubject
class AppStatus(dict, ConcreteSubject):
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
def setv(self, name, value):
name_parts = list(name.split("."))
obj = self
for name_part in name_parts[:-1]:
sn = obj.get(name_part, dict())
obj[name_part] = sn
obj = sn
previous_state = sn.get(name_parts[-1])
sn[name_parts[-1]] = value
self.notify(name, previous_state, value)
def getv(self, name, default_value=None):
name_parts = list(name.split("."))
obj = self
for name_part in name_parts:
value = obj.get(name_part, default_value)
if not isinstance(value, dict):
break
obj = value
return value
|
#!/usr/bin/python2.5
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Wrapper that makes more useful stack dumps when your script crashes.
Normal use, so that your script works if with or without traceplus:
if __name__ == '__main__':
try:
import traceplus
traceplus.RunWithExpandedTrace(main)
except ImportError:
main()
"""
def MakeExpandedTrace(frame_records):
"""Return a list of text lines for the given list of frame records."""
dump = []
for (frame_obj, filename, line_num, fun_name, context_lines,
context_index) in frame_records:
dump.append('File "%s", line %d, in %s\n' % (filename, line_num,
fun_name))
if context_lines:
for (i, line) in enumerate(context_lines):
if i == context_index:
dump.append(' --> %s' % line)
else:
dump.append(' %s' % line)
for local_name, local_val in frame_obj.f_locals.items():
try:
truncated_val = repr(local_val)[0:500]
except Exception, e:
dump.append(' Exception in str(%s): %s\n' % (local_name, e))
else:
if len(truncated_val) >= 500:
truncated_val = '%s...' % truncated_val[0:499]
dump.append(' %s = %s\n' % (local_name, truncated_val))
dump.append('\n')
return dump
def RunWithExpandedTrace(closure):
try:
return closure()
except (SystemExit, KeyboardInterrupt):
raise
except:
import inspect
import sys
import traceback
# Save trace and exception now. These calls look at the most recently
# raised exception. The code that makes the report might trigger other
# exceptions.
original_trace = inspect.trace(3)[1:]
formatted_exception = traceback.format_exception_only(*(sys.exc_info()[:2]))
dashes = '%s\n' % ('-' * 60)
dump = []
dump.append(dashes)
dump.extend(MakeExpandedTrace(original_trace))
dump.append(''.join(formatted_exception))
print ''.join(dump)
print
print dashes
sys.exit(127)
|
# vim: set fenc=utf8 ts=4 sw=4 et :
import xml.sax
import functools
from .autovivification import AutoVivification
from .utils import autoconvert, call_plugin
from .conf import Conf
from .flow import Flow
from .logging import *
class PdmlHandler(xml.sax.ContentHandler):
def __init__(self):
self.__frame = AutoVivification()
self.__flows = {}
# Call when an element starts
def startElement(self, tag, attributes):
if tag == 'packet':
self.__frame = AutoVivification()
else:
if 'name' in attributes:
name = attributes.getValue('name')
if len(name) > 0:
merge = False
# Build object tree
new = AutoVivification()
name_access = functools.reduce(
lambda x,y: x[y], [new] + name.split(Conf.PDML_NESTCHAR)
)
# Extract raw data
if 'show' in attributes:
show = attributes.getValue('show')
if len(show) > Conf.DATA_MAXLEN:
show = Conf.DATA_TOO_LONG
if len(show) > 0:
debug('{}.raw: {}'.format(name, show))
name_access['raw'] = [autoconvert(show)]
merge = True
# Extract showname
if 'showname' in attributes and Conf.EXTRACT_SHOW:
showname = attributes.getValue('showname')
if len(showname) > Conf.DATA_MAXLEN:
showname = Conf.DATA_TOO_LONG
if len(showname) > 0:
debug('{}.show: {}'.format(name, showname))
name_access['show'] = [showname]
merge = True
if merge:
self.__frame.merge(new)
# Call when an elements ends
def endElement(self, tag):
if tag == 'packet':
# advance time
try:
Flow.newest_overall_frame_time = max(
Flow.newest_overall_frame_time,
self.__frame[Conf.FRAME_TIME]
)
except TypeError:
warning(
'Dropping frame because of invalid time ({}) in {}'.format(
self.__frame[Conf.FRAME_TIME],
Conf.FRAME_TIME
)
)
return
# write out expired flows
new_flows = {}
for (flowid, flow) in self.__flows.items():
if flow.not_expired():
new_flows[flowid] = flow
else:
flow.expired()
self.__flows = new_flows
# the flow definition
flowid = Flow.get_flow_id(self.__frame)
# ignore frames without a flowid
if flowid:
try:
flow = self.__flows[flowid]
self.__flows[flowid].add_frame(self.__frame)
debug('old flow: {}'.format(flowid))
except KeyError:
# flow unknown add new flow
flow = self.__flows[flowid] = Flow(self.__frame)
debug('new flow: {}'.format(flowid))
else:
for plugin in Conf.PLUGINS:
call_plugin(
plugin,
'frame_new',
self.__frame.cast_dicts(dict),
None
)
def endDocument(self):
for (flowid, flow) in self.__flows.items():
flow.end()
for plugin in Conf.PLUGINS:
call_plugin(
plugin,
'__deinit__'
)
|
from RivetDetect import RivetDetect
import threading
import numpy as np
import cv2
import time
# 3개의 카메라를 멀티스레드로 동시에 실행
def camera1():
RivetDetect.execute(0)
def camera2():
RivetDetect.execute(1)
def camera3():
RivetDetect.execute(2)
# 스레드를 실행하기 위해 객체(t1, t2, t3)를 할당하고 인자를 대입함(인자 없음)
# target = 함수이름, args = 전달받는 인자
t1 = threading.Thread(target=camera3, args=(), daemon=True)
t2 = threading.Thread(target=camera2, args=(), daemon=True)
t3 = threading.Thread(target=camera1, args=(), daemon=False) # 0번 카메라가 메인 쓰레드, 종료시 모두 같이 종료됨.
def execute():
# 반드시 객체.start()를 붙여야 스레드가 동시에 작동 시작.
t1.start()
t2.start()
t3.start()
if __name__ == "__main__":
execute()
|
import smart_imports
smart_imports.all()
class MatchmakerClient(tt_api_matchmaker.Client):
def protobuf_to_battle_request(self, pb_battle_request):
return objects.BattleRequest(id=pb_battle_request.id,
initiator_id=pb_battle_request.initiator_id,
matchmaker_type=relations.MATCHMAKER_TYPE(pb_battle_request.matchmaker_type),
created_at=datetime.datetime.fromtimestamp(pb_battle_request.created_at),
updated_at=datetime.datetime.fromtimestamp(pb_battle_request.updated_at))
def protobuf_to_battle(self, pb_battle):
return objects.Battle(id=pb_battle.id,
matchmaker_type=relations.MATCHMAKER_TYPE(pb_battle.matchmaker_type),
participants_ids=list(pb_battle.participants_ids),
created_at=datetime.datetime.fromtimestamp(pb_battle.created_at))
matchmaker = MatchmakerClient(entry_point=conf.settings.TT_MATCHMAKER_ENTRY_POINT)
|
"""
main runner for all sims
Args:
- type of sim
- num points in sim
- save name
- other parameters?
"""
#main packages
import argparse
import numpy as np
import os
#special packages
import model_runner as mr
#Read in supplied arguments.
parser = argparse.ArgumentParser(description="Run a single vasculature simulation.")
parser.add_argument("-t", "--sim_type", required=True, default = "auxin", type = str, help="Type of simulation: auxin, branched_OT")
parser.add_argument("-i", "--inf_num_pts", default=100, required=False, type = int, help="Min number of initial points to generate simulation.")
parser.add_argument("-s", "--sup_num_pts", default=200, required=False, type = int, help="Max number of initial points to generate simulation.")
parser.add_argument("-n", "--num_simulations", required=True, default = 100, type = int, help="Number of simulations to run.")
parser.add_argument("-d", "--directory", default=None, required=False, type=str, help="Directory of where to save simulation.")
parser.add_argument("-o","--other", default = None,required=False,type=str,help="Path to file with optional parameters.")
args = parser.parse_args()
t = args.sim_type
num_min = args.inf_num_pts
num_max = args.sup_num_pts
num_runs = args.num_simulations
dir = args.directory
#run the requested number of simulations
for ii in range(num_runs):
print()
model = mr.Vasculature()
model.save_simulation(dir)
|
#!/usr/bin/env python
__author__ = "Richard Clubb"
__copyrights__ = "Copyright 2018, the python-uds project"
__credits__ = ["Richard Clubb"]
__license__ = "MIT"
__maintainer__ = "Richard Clubb"
__email__ = "richard.clubb@embeduk.com"
__status__ = "Development"
from uds.uds_config_tool import DecodeFunctions
import sys
from uds.uds_config_tool.FunctionCreation.iServiceMethodFactory import IServiceMethodFactory
# When encode the dataRecord for transmission we have to allow for multiple elements in the data record
# i.e. 'value1' - for a single value, or [('param1','value1'),('param2','value2')] for more complex data records
requestFuncTemplate = str("def {0}(dataRecord):\n"
" encoded = []\n"
" if type(dataRecord) == list and type(dataRecord[0]) == tuple:\n"
" drDict = dict(dataRecord)\n"
" {3}\n"
"{4}\n"
" return {1} + {2} + encoded")
checkFunctionTemplate = str("def {0}(input):\n"
" serviceIdExpected = {1}\n"
" diagnosticIdExpected = {2}\n"
" serviceId = DecodeFunctions.buildIntFromList(input[{3}:{4}])\n"
" diagnosticId = DecodeFunctions.buildIntFromList(input[{5}:{6}])\n"
" if(len(input) != {7}): raise Exception(\"Total length returned not as expected. Expected: {7}; Got {{0}}\".format(len(input)))\n"
" if(serviceId != serviceIdExpected): raise Exception(\"Service Id Received not expected. Expected {{0}}; Got {{1}} \".format(serviceIdExpected, serviceId))\n"
" if(diagnosticId != diagnosticIdExpected): raise Exception(\"Diagnostic Id Received not as expected. Expected: {{0}}; Got {{1}}\".format(diagnosticIdExpected, diagnosticId))")
negativeResponseFuncTemplate = str("def {0}(input):\n"
" result = {{}}\n"
" nrcList = {5}\n"
" if input[{1}:{2}] == [{3}]:\n"
" result['NRC'] = input[{4}]\n"
" result['NRC_Label'] = nrcList.get(result['NRC'])\n"
" return result")
encodePositiveResponseFuncTemplate = str("def {0}(input):\n"
" return")
class WriteDataByIdentifierMethodFactory(IServiceMethodFactory):
##
# @brief method to create the request function for the service element
@staticmethod
def create_requestFunction(diagServiceElement, xmlElements):
serviceId = 0
diagnosticId = 0
shortName = "request_{0}".format(diagServiceElement.find('SHORT-NAME').text)
requestElement = xmlElements[diagServiceElement.find('REQUEST-REF').attrib['ID-REF']]
paramsElement = requestElement.find('PARAMS')
encodeFunctions = []
encodeFunction = "None"
for param in paramsElement:
semantic = None
try:
semantic = param.attrib['SEMANTIC']
except AttributeError:
pass
if(semantic == 'SERVICE-ID'):
serviceId = [int(param.find('CODED-VALUE').text)]
elif(semantic == 'ID'):
diagnosticId = DecodeFunctions.intArrayToIntArray([int(param.find('CODED-VALUE').text)], 'int16', 'int8')
elif semantic == 'DATA':
dataObjectElement = xmlElements[(param.find('DOP-REF')).attrib['ID-REF']]
longName = param.find('LONG-NAME').text
bytePosition = int(param.find('BYTE-POSITION').text)
# Catching any exceptions where we don't know the type - these will fail elsewhere, but at least we can test what does work.
try:
encodingType = dataObjectElement.find('DIAG-CODED-TYPE').attrib['BASE-DATA-TYPE']
bitLength = dataObjectElement.find('DIAG-CODED-TYPE').find('BIT-LENGTH').text
except:
encodingType = "unknown" # ... for now just drop into the "else" catch-all ??????????????????????????????????????????????
if(encodingType) == "A_ASCIISTRING":
functionStringList = "DecodeFunctions.stringToIntList(drDict['{0}'], None)".format(longName)
functionStringSingle = "DecodeFunctions.stringToIntList(dataRecord, None)"
elif (encodingType in ("A_INT8", "A_INT16", "A_INT32", "A_UINT8", "A_UINT16", "A_UINT32")):
functionStringList = "DecodeFunctions.intValueToByteArray(drDict['{0}'], {1})".format(longName, bitLength)
functionStringSingle = "DecodeFunctions.intValueToByteArray(dataRecord, {0})".format(bitLength)
else:
functionStringList = "drDict['{0}']".format(longName)
functionStringSingle = "dataRecord"
"""
The following encoding types may be required at some stage, but are not currently supported by any functions in the DecodeFunctions.py module ...
A_VOID: pseudo type for non-existing elements
A_BIT: one bit
A_INT64: signed integer 64-bit, two's complement
A_FLOAT32: IEEE 754 single precision
A_FLOAT64: IEEE 754 double precision
A_ASCIISTRING: string, ISO-8859-1 encoded
A_UTF8STRING: string, UTF-8 encoded
A_UNICODE2STRING: string, UCS-2 encoded
A_BYTEFIELD: Field of bytes
Also, we will most need to handle scaling at some stage within DecodeFunctions.py (for RDBI at the very least)
"""
#
encodeFunctions.append("encoded += {1}".format(longName,
functionStringList))
encodeFunction = " else:\n encoded = {1}".format(longName,functionStringSingle)
# If we have only a single value for the dataRecord to send, then we can simply suppress the single value sending option.
# Note: in the reverse case, we do not suppress the dictionary method of sending, as this allows extra flexibility, allowing
# a user to use a consistent list format in all situations if desired.
if len(encodeFunctions) > 1:
encodeFunction = ""
funcString = requestFuncTemplate.format(shortName,
serviceId,
diagnosticId,
"\n ".join(encodeFunctions), # ... handles input via list
encodeFunction) # ... handles input via single value
exec(funcString)
return locals()[shortName]
##
# @brief method to create the function to check the positive response for validity
@staticmethod
def create_checkPositiveResponseFunction(diagServiceElement, xmlElements):
responseId = 0
diagnosticId = 0
responseIdStart = 0
responseIdEnd = 0
diagnosticIdStart = 0
diagnosticIdEnd = 0
shortName = diagServiceElement.find('SHORT-NAME').text
checkFunctionName = "check_{0}".format(shortName)
positiveResponseElement = xmlElements[(diagServiceElement.find('POS-RESPONSE-REFS')).find('POS-RESPONSE-REF').attrib['ID-REF']]
paramsElement = positiveResponseElement.find('PARAMS')
totalLength = 0
for param in paramsElement:
try:
semantic = None
try:
semantic = param.attrib['SEMANTIC']
except AttributeError:
pass
startByte = int(param.find('BYTE-POSITION').text)
if(semantic == 'SERVICE-ID'):
responseId = int(param.find('CODED-VALUE').text)
bitLength = int((param.find('DIAG-CODED-TYPE')).find('BIT-LENGTH').text)
listLength = int(bitLength / 8)
responseIdStart = startByte
responseIdEnd = startByte + listLength
totalLength += listLength
elif(semantic == 'ID'):
diagnosticId = int(param.find('CODED-VALUE').text)
bitLength = int((param.find('DIAG-CODED-TYPE')).find('BIT-LENGTH').text)
listLength = int(bitLength / 8)
diagnosticIdStart = startByte
diagnosticIdEnd = startByte + listLength
totalLength += listLength
else:
pass
except:
#print(sys.exc_info())
pass
checkFunctionString = checkFunctionTemplate.format(checkFunctionName, # 0
responseId, # 1
diagnosticId, # 2
responseIdStart, # 3
responseIdEnd, # 4
diagnosticIdStart, # 5
diagnosticIdEnd, # 6
totalLength) # 7
exec(checkFunctionString)
return locals()[checkFunctionName]
##
# @brief method to encode the positive response from the raw type to it physical representation
@staticmethod
def create_encodePositiveResponseFunction(diagServiceElement, xmlElements):
# There's nothing to extract here! The only value in the response is the DID, checking of which is handled in the check function,
# so must be present and ok. This function is only required to return the default None response.
shortName = diagServiceElement.find('SHORT-NAME').text
encodePositiveResponseFunctionName = "encode_{0}".format(shortName)
encodeFunctionString = encodePositiveResponseFuncTemplate.format(encodePositiveResponseFunctionName) # 0
exec(encodeFunctionString)
return locals()[encodePositiveResponseFunctionName]
##
# @brief method to create the negative response function for the service element
@staticmethod
def create_checkNegativeResponseFunction(diagServiceElement, xmlElements):
shortName = diagServiceElement.find('SHORT-NAME').text
check_negativeResponseFunctionName = "check_negResponse_{0}".format(shortName)
negativeResponsesElement = diagServiceElement.find('NEG-RESPONSE-REFS')
negativeResponseChecks = []
for negativeResponse in negativeResponsesElement:
negativeResponseRef = xmlElements[negativeResponse.attrib['ID-REF']]
negativeResponseParams = negativeResponseRef.find('PARAMS')
for param in negativeResponseParams:
semantic = None
try:
semantic = param.attrib['SEMANTIC']
except:
semantic = None
bytePosition = int(param.find('BYTE-POSITION').text)
if semantic == 'SERVICE-ID':
serviceId = param.find('CODED-VALUE').text
start = int(param.find('BYTE-POSITION').text)
diagCodedType = param.find('DIAG-CODED-TYPE')
bitLength = int((param.find('DIAG-CODED-TYPE')).find('BIT-LENGTH').text)
listLength = int(bitLength/8)
end = start + listLength
elif bytePosition == 2:
nrcPos = bytePosition
expectedNrcDict = {}
try:
dataObjectElement = xmlElements[(param.find('DOP-REF')).attrib['ID-REF']]
nrcList = dataObjectElement.find('COMPU-METHOD').find('COMPU-INTERNAL-TO-PHYS').find('COMPU-SCALES')
for nrcElem in nrcList:
expectedNrcDict[int(nrcElem.find('UPPER-LIMIT').text)] = nrcElem.find('COMPU-CONST').find('VT').text
except:
pass
pass
negativeResponseFunctionString = negativeResponseFuncTemplate.format(check_negativeResponseFunctionName, start, end, serviceId, nrcPos, expectedNrcDict)
exec(negativeResponseFunctionString)
return locals()[check_negativeResponseFunctionName]
|
# -*- coding: utf-8 -*-
## @file testsuite/python/thresholdTest.py
## @date jan. 2017
## @author PhRG - opticalp.fr
##
## Test the threshold img proc modules
#
# Copyright (c) 2017 Ph. Renaud-Goud / Opticalp
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
def myMain(baseDir, testThres):
"""Main function. Run the tests. """
from os.path import join
print("Test the features of the thresholding modules. ")
from instru import *
fac = Factory("DeviceFactory")
print("Retrieved factory: " + fac.name)
print("Create module from CameraFromFilesFactory")
try:
cam = fac.select("camera").select("fromFiles").create("fakeCam")
except RuntimeError as e:
print("Runtime error: {0}".format(e.message))
print("OpenCV is probably not present. Exiting. ")
exit(0)
imgDir = join(baseDir,"resources")
print("Set image file directory to " + imgDir)
cam.setParameterValue("directory", imgDir)
print("Set file to be read: thresholdMe.png")
cam.setParameterValue("files", "thresholdMe.png")
print("Force grayscale images. ")
cam.setParameterValue("forceGrayscale", "ON")
print("create a new fake cam module")
camMask = fac.select("camera").select("fromFiles").create("camMask")
print("Set image file directory to " + imgDir)
camMask.setParameterValue("directory", imgDir)
print("Set file to be read: rectangles.png")
camMask.setParameterValue("files", "rectangles.png")
print("Force grayscale images. ")
cam.setParameterValue("forceGrayscale", "ON")
testThres("absolute", 128)
testThres("population", 0.5)
testThres("mean", 40)
print("End of script thresholdTest.py")
def testThres(thresholdName, thresValue):
"""build workflow and run the tests for a given threshold"""
import time
print("run tests for threshold method: " + thresholdName)
from instru import *
cam = Module("fakeCam")
print("module " + cam.name + " retrieved (" + cam.internalName + ") ")
print("Create a threshold ("+ thresholdName +", no mask) module")
thres = Factory("ImageProcFactory").select("maskGen").select("threshold").select(thresholdName).create()
print("Bind the image of the pseudo-camera to the input of threshold")
bind(cam.outPort("image"), thres.inPort("image"))
print("Attaching a data logger to show the image...")
loggerClasses = dataLoggerClasses() # DataManager::dataLoggerClasses()
print("Available data logger classes: ")
for loggerClass in loggerClasses:
print(" - " + loggerClass + ": " + loggerClasses[loggerClass])
print('Logger creation using the constructor: DataLogger("ShowImageLogger")')
logger = DataLogger("ShowImageLogger")
print("Logger description: " + logger.description)
# logger.setName("imgShow")
#cam.outPort("image").register(logger)
thres.outPort("binImage").register(logger)
print("Set threshold parameters")
thres.setParameterValue("thresholdValue", thresValue)
thres.setParameterValue("onValue",128)
thres.setParameterValue("lowHigh","high")
print("Run...")
runModule(cam)
waitAll()
print(str(thres.outPort("count").getDataValue()*100/thres.outPort("totalCount").getDataValue()) +
"% pixels where thresholded")
time.sleep(1) # wait 1s in order to show the image
print("Change thresholding to low")
thres.setParameterValue("lowHigh","low")
print("Re-run...")
runModule(cam)
waitAll()
print(str(thres.outPort("count").getDataValue()*100/thres.outPort("totalCount").getDataValue()) +
"% pixels where thresholded")
time.sleep(1) # wait 1s in order to show the image
print("Retrieve the mask fake cam module")
camMask = Module("camMask")
print("Bind the image of the second pseudo-camera to the mask input of thres")
bind(camMask.outPort("image"), thres.inPort("mask"))
print("Reset threshold parameters")
thres.setParameterValue("thresholdValue", thresValue)
thres.setParameterValue("onValue",255)
thres.setParameterValue("lowHigh","high")
print("Run...")
runModule(cam)
runModule(camMask)
waitAll()
print(str(thres.outPort("count").getDataValue()*100/thres.outPort("totalCount").getDataValue()) +
"% pixels where thresholded")
time.sleep(1) # wait 1s in order to show the image
print("change threshold parameters")
thres.setParameterValue("onValue",128)
print("Re-run...")
runModule(cam)
runModule(camMask)
waitAll()
print(str(thres.outPort("count").getDataValue()*100/thres.outPort("totalCount").getDataValue()) +
"% pixels where thresholded")
time.sleep(1) # wait 1s in order to show the image
print("unbind threshold input")
unbind(thres.inPort("image"))
unbind(thres.inPort("mask"))
# main body
import sys
import os
from os.path import dirname
if len(sys.argv) >= 1:
# probably called from InstrumentAll
checker = os.path.basename(sys.argv[0])
if checker == "instrumentall" or checker == "instrumentall.exe":
print("current script: " + os.path.realpath(__file__))
baseDir = dirname(dirname(__file__))
print globals()
print locals()
myMain(baseDir, testThres)
exit(0)
print("Presumably not called from InstrumentAll >> Exiting...")
exit("This script has to be launched from inside InstrumentAll")
|
"""Created by sgoswami on 9/1/17."""
"""Given a binary tree
struct TreeLinkNode {
TreeLinkNode *left;
TreeLinkNode *right;
TreeLinkNode *next;
}
Populate each next pointer to point to its next right node. If there is no next right node, the next pointer should be set to NULL.
Initially, all next pointers are set to NULL.
For example,
Given the following perfect binary tree,
1
/ \
2 3
/ \ / \
4 5 6 7
After calling your function, the tree should look like:
1 -> NULL
/ \
2 -> 3 -> NULL
/ \ / \
4->5->6->7 -> NULL"""
import collections
# Definition for binary tree with next pointer.
class TreeLinkNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
self.next = None
class Solution:
# @param root, a tree link node
# @return nothing
def connect(self, root):
if not root:
return
queue = collections.deque()
queue.appendleft(root)
queue.appendleft('#')
curr_list = []
while len(queue) > 0:
curr = queue.pop()
if curr == '#':
if len(queue) > 0:
queue.appendleft('#')
count = 1
while count < len(curr_list):
curr_list[count-1].next = curr_list[count]
count += 1
curr_list[-1].next = None
curr_list = []
continue
else:
if curr.left:
curr_list.append(curr.left)
queue.appendleft(curr.left)
if curr.right:
curr_list.append(curr.right)
queue.appendleft(curr.right)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.