content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/env python3
from gi.repository import Gtk
import os
import os.path
from sys_handler import language_dictionary
# Folder use for the installer.
tmp = "/tmp/.gbi/"
installer = "/usr/local/lib/gbi/"
query = "sh /usr/local/lib/gbi/backend-query/"
if not os.path.exists(tmp):
os.makedirs(tmp)
logo = "/usr/local/lib/gbi/logo.png"
langfile = '%slanguage' % tmp
lang_dictionary = language_dictionary()
# Text to be replace be multiple language file.
title = "Welcome To GhostBSD!"
welltext = """Select the language you want to use with GhostBSD."""
class Language:
# On selection it overwrite the delfaut language file.
def Language_Selection(self, tree_selection):
model, treeiter = tree_selection.get_selected()
if treeiter is not None:
value = model[treeiter][0]
self.language = lang_dictionary[value]
return
def Language_Columns(self, treeView):
cell = Gtk.CellRendererText()
column = Gtk.TreeViewColumn(None, cell, text=0)
column_header = Gtk.Label('Language')
column_header.set_use_markup(True)
column_header.show()
column.set_widget(column_header)
column.set_sort_column_id(0)
treeView.append_column(column)
return
def save_selection(self):
lang_file = open(langfile, 'w')
lang_file.writelines(self.language)
lang_file.close()
return
# Initial definition.
def __init__(self):
# Add a Default vertical box
self.vbox1 = Gtk.VBox(False, 0)
self.vbox1.show()
# Add a second vertical box
grid = Gtk.Grid()
self.vbox1.pack_start(grid, True, True, 0)
grid.set_row_spacing(10)
grid.set_column_spacing(3)
grid.set_column_homogeneous(True)
grid.set_row_homogeneous(True)
grid.set_margin_left(10)
grid.set_margin_right(10)
grid.set_margin_top(10)
grid.set_margin_bottom(10)
# Adding a Scrolling Window
sw = Gtk.ScrolledWindow()
sw.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
# Adding a treestore and store language in it.
store = Gtk.TreeStore(str)
for line in lang_dictionary:
store.append(None, [line])
treeView = Gtk.TreeView(store)
treeView.set_model(store)
treeView.set_rules_hint(True)
self.Language_Columns(treeView)
tree_selection = treeView.get_selection()
tree_selection.set_mode(Gtk.SelectionMode.SINGLE)
tree_selection.connect("changed", self.Language_Selection)
sw.add(treeView)
sw.show()
grid.attach(sw, 1, 2, 1, 9)
# add text in a label.
vhbox = Gtk.VBox(False, 0)
vhbox.set_border_width(10)
vhbox.show()
self.wellcome = Gtk.Label('<span size="xx-large"><b>' + title + '</b></span>')
self.wellcome.set_use_markup(True)
self.wellcometext = Gtk.Label(welltext)
self.wellcometext.set_use_markup(True)
table = Gtk.Table()
# table.attach(self.wellcome, 0, 1, 1, 2)
# wall = Gtk.Label()
# table.attach(wall, 0, 1, 2, 3)
table.attach(self.wellcometext, 0, 1, 3, 4)
vhbox.pack_start(table, False, False, 5)
image = Gtk.Image()
image.set_from_file(logo)
image.show()
grid.attach(self.wellcome, 1, 1, 3, 1)
vhbox.pack_start(image, True, True, 5)
grid.attach(vhbox, 2, 2, 2, 9)
grid.show()
return
def get_model(self):
return self.vbox1
|
import webbrowser
import os
import socket
from urllib.parse import urlparse
from plugin import plugin, alias, require
FILE_PATH = os.path.abspath(os.path.dirname(__file__))
@require(network=True)
@alias("open website")
@plugin("website")
class OpenWebsite:
"""
This plugin will open a website using some parameters.
The user can open a simple website giving a complete link or
inputting the name of the website like the examples:
> open website www.google.com
> open website github
> open website github username
You can find a csv file with a list of saved websites at:
Jarvis/jarviscli/data/website.csv
{Alternatively, you can also use only 'website'
instead of 'open website'}
"""
def __call__(self, jarvis, link):
inputs = link.split(' ')
self.main_link = inputs[0]
self.complement = False
if len(inputs) > 1:
self.complement = inputs[1]
if self.has_on_saved_links():
webbrowser.open(self.main_link)
elif self.verify_link():
webbrowser.open(self.main_link)
else:
jarvis.say("Sorry, I can't open this link.")
def has_on_saved_links(self):
websites_csv = \
open(os.path.join(FILE_PATH, "../data/websites.csv"), 'r')
for website in websites_csv:
website = website.rstrip() # remove newline
information = website.split(',')
if self.main_link == information[0]:
if self.complement:
if len(information) > 2:
self.main_link = \
information[1] + information[2] + self.complement
else:
self.main_link = information[1] + self.complement
else:
self.main_link = information[1]
return True
return False
def verify_link(self):
self.fix_link()
domain = urlparse(self.main_link).netloc
try:
socket.getaddrinfo(domain, 80)
except socket.gaierror:
return False
return True
def fix_link(self):
if not self.main_link.startswith('http'):
self.main_link = "https://" + self.main_link
|
from random import randint
from PIL import Image,ImageDraw
import sys
PRE = "SHOW.png"
if len(sys.argv) == 2:
PRE = sys.argv[1] + ".png"
WIDTH, HEIGHT = 1920,1080
COLOUR = (25,25,255)
image = Image.new("RGB", (WIDTH, HEIGHT), (0,0,0))
pixels = image.load()
pixels[randint(1,WIDTH-2),1] = COLOUR
def check(x,y):
if x < 1 or y < 1 or x > WIDTH-2 or y > HEIGHT-2:
return False
if pixels[x-1,y] == COLOUR:
return True
if pixels[x+1,y] == COLOUR:
return True
if pixels[x,y-1] == COLOUR:
return True
if pixels[x,y+1] == COLOUR:
return True
return False
notComplete = True
while notComplete:
x,y = randint(0,WIDTH-1),HEIGHT-1
while not check(x,y):
r = randint(0,2)
if r == 0:
x-=1
elif r == 1:
x+=1
elif r == 2:
y-=1
if x < 0 or x >= WIDTH or y < 0 or y >= HEIGHT:
break
if check(x,y):
print "%s-> DOT: %d,%d"%(PRE,x,y)
pixels[x,y] = COLOUR
for i in xrange(WIDTH):
if pixels[i,HEIGHT-2] == COLOUR:
notComplete = False
break
if len(sys.argv) == 2:
image.save(sys.argv[1] + ".png")
else:
image.show()
|
from .base import Base
from deoplete.util import load_external_module
load_external_module(__file__, 'source')
from data import GREEK_DICT
class Source(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'greek'
self.mark = '[greek]'
self.min_pattern_length = 1
def gather_candidates(self, context):
return [{'word': k, 'kind': '{}'.format(v)} for (k, v) in GREEK_DICT.items()]
|
"""Pins views."""
# Django REST Framework
from rest_framework import viewsets
# from rest_framework.generics import get_object_or_404
# Model
from pinterest.pin.models import Pin
# Serializers
from pinterest.pin.api.serializers import (
PinModelSerializer,
CreateUpdatePinSerializer,
)
# Permissions
from rest_framework.permissions import IsAuthenticated
# Actions / Utils
from pinterest.utils import (
CustomCreateModelMixin,
CustomRetrieveModelMixin,
CustomListModelMixin,
CustomUpdateModelMixin,
CustomDestroyModelMixin
)
from pinterest.utils.response import CustomActions
class PinViewSet(
CustomCreateModelMixin,
CustomRetrieveModelMixin,
CustomListModelMixin,
CustomUpdateModelMixin,
CustomDestroyModelMixin,
viewsets.GenericViewSet):
"""Pin view set.
Crud for a pins
"""
custom_actions = CustomActions()
queryset = Pin.objects.all()
serializer_class = PinModelSerializer
def get_permissions(self):
"""Assign permission based on action."""
if self.action in ['create', 'update']:
permissions = [IsAuthenticated]
else:
permissions = []
return [permission() for permission in permissions]
def get_serializer_class(self):
"""Return serializer based on action."""
if self.action in ['create', 'update']:
return CreateUpdatePinSerializer
return PinModelSerializer
def get_queryset(self):
"""Restrict list to public-only."""
if self.action == 'list':
return self.queryset.filter(status='active')
return self.queryset
|
class WellsAndCoppersmith:
@staticmethod
def mag_length_strike_slip(mag):
""" Returns average rupture length for a given magnitude from Wells and Coppersmith
Args:
mag: magnitude
Returns:
rupture_legnth: average rupture length for a given magnitude in kilometers
"""
return 10**(-3.22+0.69*mag)
|
from django.http import HttpResponse
from django.template import loader
from django.views.decorators.cache import never_cache
@never_cache
def home(request):
"""
rendering ui by template for homepage
this view never cache for delivering correct translation inside template
"""
template = loader.get_template('weather/home.html')
return HttpResponse(template.render({}, request))
|
# Project
# -----------------------------------------------------------------------------
# Title : Dynamic Programming for Optimal Speedrun Strategy Exploration
# Author : [Twitter] @samuelladoco [Twitch] SLDCtwitch
# Contents: Dynamic programming algorithm (Dijkstra's algorithm for DAG)
# -----------------------------------------------------------------------------
# Import
# -----------------------------------------------------------------------------
from __future__ import annotations
import dataclasses
import queue
#
from level import Level
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
# ----------------------------------------------------------------------
@dataclasses.dataclass(frozen=True, order=True)
class Vertex:
"""動的計画法の段階(グラフの頂点)"""
level: Level
cumlative_num_gems: int
def __repr__(self) -> str:
return f'({self.level.ep_pg}, {self.cumlative_num_gems})'
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
@dataclasses.dataclass(frozen=True)
class Label:
"""動的計画法の段階(グラフの頂点)に付与されるラベル"""
vertex_this: Vertex
label_prev: Label | None
cumulative_time: float
def __repr_label_prev(self) -> str:
return (
'None' if self.label_prev is None else (
f'({self.label_prev.vertex_this}, ' +
f'{self.label_prev.cumulative_time:.2f})'
)
)
def __repr__(self) -> str:
return (
f'vt={self.vertex_this}, ' +
f'lp={self.__repr_label_prev()}, ' +
f'ct={self.cumulative_time:.2f}'
)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
@dataclasses.dataclass(frozen=True)
class OptimizerByDynamicProgramming:
"""
動的計画法(今回の場合、非巡回有向グラフに対するダイクストラ法)によるチャートの最適化
Parameters
----------
levels : list[Level]
(手前の面から並んだ)面のリスト
max_labels_per_vertex : int
求めたいチャートの個数( 第 1 最適 ~ 最大で 第 max_labels_per_vertex 最適 まで)
max_required_gems : int
開放に必要なダイヤ数が最も多い面の必要ダイヤ数
"""
levels: list[Level]
max_labels_per_vertex: int
max_required_gems: int = dataclasses.field(compare=False)
__labels: dict[Vertex, list[Label]] = dataclasses.field(
init=False, default_factory=dict, compare=False
)
__q: queue.PriorityQueue[Vertex] =dataclasses.field(
init=False, default_factory=queue.PriorityQueue, compare=False)
def solve(self) -> list[tuple[list[Vertex], float]]:
"""チャートを求める"""
# 最初に出発可能な頂点たち
for num_gems, time in self.levels[0].times.items():
v_start: Vertex = Vertex(self.levels[0], num_gems)
self.__labels[v_start] = [Label(v_start, None, time)]
self.__q.put(v_start)
del v_start
del num_gems, time
# (面, ダイヤ数) を辞書式の順番で探索
while self.__q.empty() is False:
vertex_this: Vertex = self.__q.get()
self.__generate_next(vertex_this)
# (最終面, 最小必要ダイヤ数) の各ラベルからグラフを逆にたどってパスを構築
sols: list[tuple[list[Vertex], float]] = []
v_end: Vertex = Vertex(self.levels[-1], self.max_required_gems)
for label_goal in self.__labels[v_end]:
vs: list[Vertex] = [v_end]
#
label_this: Label = label_goal
while label_this.label_prev is not None:
label_this = label_this.label_prev
vs.append(label_this.vertex_this)
del label_this
#
vs.reverse()
sols.append((vs, label_goal.cumulative_time))
del vs
del label_goal
return sols
def __generate_next(self, vertex_this: Vertex) -> None:
"""動的計画法の vertex_this の段階(グラフの頂点) から次を生成する"""
# この頂点の各ラベルについて
for label_this in self.__labels[vertex_this]:
# 次に移動できる面と移動時間について
for level_next, time_move in vertex_this.level.get_next_levels_and_times().items():
# この頂点でのダイヤ数が足りず次に移動できる面を開放できない場合
if vertex_this.cumlative_num_gems < level_next.num_required_gems:
continue
# 次に移動した面で取得するダイヤ数とクリア時間について
for num_gems_next, time_next in level_next.times.items():
#
# ダイヤ数 = この頂点のダイヤ数 + 次に移動した面で取得するダイヤ数
n_g: int = vertex_this.cumlative_num_gems + num_gems_next
# ダイヤ数を必要以上に取った場合
if n_g > self.max_required_gems:
continue
#
# 時間 = この頂点までの累積時間 + 次に移動した面への移動時間 + 次に移動した面のクリア時間
t: float = label_this.cumulative_time + time_move + time_next
#
# (次に移動した面, ダイヤ数) の頂点 のラベルたちとの比較
vertex_next: Vertex = Vertex(level_next, n_g)
# ラベルがない場合
if vertex_next not in self.__labels.keys():
self.__labels[vertex_next] = [
Label(vertex_next, label_this, t)
]
self.__q.put(vertex_next)
# ラベルがあるが最大数以下の個数しかない場合
elif len(self.__labels[vertex_next]) < self.max_labels_per_vertex:
# 実装が面倒なので、計算量は無視して組み込みのソートで対応
self.__labels[vertex_next].append(
Label(vertex_next, label_this, t)
)
self.__labels[vertex_next].sort(
key=lambda x:x.cumulative_time)
# ラベルがあり最大個数に達している場合
else:
# 時間 が累積時間が最も長いラベルの時間より短い場合
if t < self.__labels[vertex_next][-1].cumulative_time:
# 実装が面倒なので、計算量は無視して組み込みのソートで対応
self.__labels[vertex_next][-1] = Label(
vertex_next, label_this, t
)
self.__labels[vertex_next].sort(
key=lambda x:x.cumulative_time
)
# そうでない場合
else:
pass
del n_g, t, vertex_next
del num_gems_next, time_next
del level_next, time_move
del label_this
return
def __repr_labels(self, labels: list[Label]) -> str:
return "', '".join([str(l) for l in labels])
def __repr__(self) -> str:
s: str = ''
s += f'levels=\n'
s += f'\n'.join([str(l) for l in self.levels]) + '\n'
s += f'mlpv={self.max_labels_per_vertex}\n'
s += f'lables='
if len(self.__labels) > 0:
s += '\n' + f'\n'.join(
[f"{v}: ['{self.__repr_labels(ls)}']"
for v, ls in self.__labels.items()]
) + '\n'
else:
s += '(Empty)'
return s
# ----------------------------------------------------------------------
# -----------------------------------------------------------------------------
|
from datetime import datetime
def regularMonths(currMonth):
splitted = currMonth.split('-')
month = int(splitted[0])
year = int(splitted[1])
found = False
while not found:
month += 1
if month > 12:
month = 1
year += 1
aux = datetime(year, month, 1)
if aux.weekday() == 0:
found = True
return "{:02d}-{}".format(month, year)
|
'''
Make sure the active directory is the directory of the repo when running the test in a IDE
'''
from skimage.data import chelsea
import matplotlib.pyplot as plt
import MTM
print( MTM.__version__ )
import numpy as np
#%% Get image and templates by cropping
image = chelsea()
template = image[80:150, 130:210]
listTemplates = [template]
#%% Perform matching
#listHit = MTM.findMatches(image, listTemplates)
#listHit = MTM.findMatches(image, listTemplates, nObjects=1) # there should be 1 top hit per template
finalHits = MTM.matchTemplates(image,
listTemplates,
score_threshold=0.6,
maxOverlap=0,
nObjects=10)
print("Found {} detections".format(len(finalHits)))
print (np.array(finalHits)) # better formatting with array
#%% Display matches
MTM.plotDetections(image, finalHits, showLegend = True)
|
# __BEGIN_LICENSE__
# Copyright (C) 2008-2010 United States Government as represented by
# the Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# __END_LICENSE__
import django.views.generic.simple
from django.conf.urls import * # pylint: disable=W0401
from django.views.generic import RedirectView
urlpatterns = patterns('geocamTalk.views',
url(r'^$', RedirectView.as_view(url='messages'),
{'permanent': False},
name='geocamTalk_home'),
url(r'^register$', 'register',
name='geocamTalk_register_c2dm'),
url(r'^unregister$', 'unregister',
name='geocamTalk_unregister_c2dm'),
url(r'^messages/create.json$', 'create_message_json',
name='geocamTalk_create_message_json'),
url(r'^messages/create$', 'create_message',
name='geocamTalk_create_message'),
url(r'^messages/clear$', 'clear_messages',
name='geocamTalk_clear_messages'),
url(r'^messages/details/(?P<message_id>\d+).json$', 'message_details_json',
name="geocamTalk_message_details_json"),
url(r'^messages/details/(?P<message_id>\d+)$', 'message_details',
name="geocamTalk_message_details"),
url(r'^messages/(?P<recipient_username>[^ ]+)/(?P<author_username>[^ ]+).json$', 'feed_messages',
name="geocamTalk_message_list_to_from_json"),
url(r'^messages/(?P<recipient_username>[^ ]+).json$', 'feed_messages',
name="geocamTalk_message_list_author_json"),
url(r'^messages.json$', 'feed_messages',
name="geocamTalk_message_list_all_json"),
url(r'^messages/(?P<recipient_username>[^ ]+)/(?P<author_username>[^ ]+)$', 'message_list',
name="geocamTalk_message_list_to_from"),
url(r'^messages/(?P<recipient_username>[^ ]+)$', 'message_list',
name="geocamTalk_message_list_to"),
url(r'^messages/(?P<recipient_username>[^ ]+)$', 'message_list',
name="geocamTalk_message_list_author"),
url(r'^messages$', 'message_list',
name="geocamTalk_message_list_all"),
url(r'^map$', 'message_map',
name="geocamTalk_message_map"),
url(r'^messages\.kml$', 'feed_messages_kml',
{'readOnly': True},
name='geocamTalk_message_list_all_kml'),
)
|
# Generated by Django 2.2.4 on 2019-09-05 22:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='userpart',
name='qty',
field=models.PositiveIntegerField(default=0),
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-14 19:33
from __future__ import unicode_literals
from django.db import migrations
import location_field.models.spatial
class Migration(migrations.Migration):
dependencies = [
('place', '0002_place_city'),
]
operations = [
migrations.RemoveField(
model_name='place',
name='lat',
),
migrations.RemoveField(
model_name='place',
name='lng',
),
migrations.AddField(
model_name='place',
name='lat_lng',
field=location_field.models.spatial.LocationField(null=True),
preserve_default=False,
),
]
|
def key(arg):
return arg
|
# -*- coding:utf-8 -*-
"""
@file interrupt.py
@brief 中断检测传感器的运动,当传感器的运动强度超过阈值时,会在int1或
@n者int2产生中断脉冲信号(set_int1_event()函数或者set_int2_event函数选择引脚)
@n在使用SPI时,片选引脚时可以通过改变RASPBERRY_PIN_CS的值修改
@copyright Copyright (c) 2010 DFRobot Co.Ltd (http://www.dfrobot.com)
@licence The MIT License (MIT)
@author [fengli](li.feng@dfrobot.com)
@version V1.0
@date 2021-01-16
@get from https://www.dfrobot.com
@https://github.com/DFRobot/DFRobot_IIS
"""
import sys
sys.path.append("../") # set system path to top
from DFRobot_IIS2ICLX import *
import time
INT1 = 26 #Interrupt pin,使用BCM编码方式,编码号为26,对应引脚GPIO25
int_pad_Flag = False #intPad flag
def int_pad_callback(status):
global int_pad_Flag
print("Activity detected")
int_pad_Flag = True
#如果你想要用SPI驱动此模块,打开下面两行的注释,并通过SPI连接好模块和树莓派
#RASPBERRY_PIN_CS = 27 #Chip selection pin when SPI is selected,使用BCM编码方式,编码号为27,对应引脚GPIO2
#acce = DFRobot_IIS2ICLX_SPI(RASPBERRY_PIN_CS)
#如果你想要应IIC驱动此模块,打开下面三行的注释,并通过I2C连接好模块和树莓派
I2C_BUS = 0x01 #default use I2C1
ADDRESS = 0x6B #传感器地址
acce = DFRobot_IIS2ICLX_I2C(I2C_BUS ,ADDRESS)
# set int_Pad to input
GPIO.setup(INT1, GPIO.IN)
#set int_Pad interrupt callback
GPIO.add_event_detect(INT1,GPIO.RISING,int_pad_callback)
#Chip initialization
acce.begin()
acce.reset()
#Get chip id
print('chip id :%x'%acce.get_id())
'''
Set the sensor measurement range:
RANGE_500MG #±500mg
RANGE_3G #±3g
RANGE_1G #±1g
RANGE_2G #±2g
'''
acce.set_range(acce.RANGE_2G)
'''
Set the sensor data collection rate:
参数可取:RATE_0HZ //0hz
RATE_12HZ5 //12.5hz
RATE_26HZ //26hz
RATE_52HZ //52hz
RATE_104HZ //104hz
RATE_208HZ //208hz
RATE_416HZ //416hz
RATE_833HZ //833hz
'''
acce.set_data_rate(acce.RATE_833HZ)
'''
滤波方式选择
参数可取:HPF //滤波器选择为high-pass digital filter
SLOPE //滤波器选择为slope filter
'''
acce.set_filter_Path(acce.SLOPE)
#使能传感器中断
acce.enable_interrupt(True)
#设置运动检测阈值,单位为(g),数值是在量程之内
acce.set_wakeup_threshold(0.3)
'''
选择在中断1引脚产生的中断事件
参数可取:WAKEUP //wake up 事件
SINGLE_TAP //单击事件
DOUBLE_TAP //双击事件
MOTION //静止/运动检测
'''
acce.set_int1_event(acce.MOTION)
#acce.set_int2_event(acce.MOTION)
'''
设置唤醒的持续时间,在此时间后,若两个方向的加速度数据小于
唤醒阈值,那么模块会进入睡眠状态
dur duration time 范围 0 ~ 15
time = dur * (512/rate)(unit:s)
| 参数与时间之间的线性关系的示例 |
|-----------------------------------------------------------------------------------------------------|
| | | | |
| Data rate | 12.5 Hz | 26 Hz | 52 Hz |
|-----------------------------------------------------------------------------------------------------|
| time |dur*(512s/12.5)=dur*40s |dur*(512s/26)=dur*20s | dur*(3512s/52)= dur*10ms |
|---------------------------------------------------------------------------------------------------- |
'''
acce.set_wake_up_dur(dur = 0)
time.sleep(0.1)
while True:
if(int_pad_Flag == True):
#Free fall event is detected
time.sleep(0.01)
free_fall = acce.act_detected()
if free_fall == True:
print("Activity detected")
time.sleep(0.2)
int_pad_Flag = False
|
#This program is an item counter
def main():
#open the file
user_file = open('filename.txt', 'r')
count = 0 #This counts the number of items in the file
for line in user_file:
count += 1
user_file.close()
print('The total number of items in the file =',count)
main()
|
"""Tests for the Meteoclimatic component."""
|
# Generated by Django 3.1 on 2020-12-06 09:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('classes', '0006_auto_20201205_2224'),
]
operations = [
migrations.RemoveField(
model_name='classsyllabus',
name='components',
),
migrations.AddField(
model_name='classsyllabus',
name='assignment',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='classsyllabus',
name='practical',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='classsyllabus',
name='project',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='classsyllabus',
name='theory',
field=models.BooleanField(default=False),
),
]
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 5 21:36:09 2018
Python2 code for Landscape Modeling class exercise 3 - Part 2
Finite difference method using forward in time and centered in space
@author: nadine
"""
#%% PART 2 - FINITE DIFF SOLUTION
import numpy as np
import matplotlib.pyplot as plt
import os
degree = u"\u00b0" # unicode symbol for degree for labeling plots nicely
#%% Try with explicit equation
## define variables - simply for starters
#Qm = 45 * .001 # heat flux in W/m^2, mantle heat flux = bottom boundary condition
#k = 2.5 # thermal conductivity in W/(m-K)
#kappa = 1 * 1e-6 # thermal diffusivity in m^2/sec
#period = 60*60*24*365.25 # one year of seconds
#
#dz = 0.1 # meter, try making this smaller
#zmax = 1. # meters
#z = np.arange(0.,zmax+dz,dz) # depth array in meters
#
#years = 1. # number of years to run for
#dt = (period/365.)* .01 # one day of seconds
#tmax = period * years # years
#t = np.arange(0.,tmax+dt,dt) # time array in days
#
#T = np.ndarray(shape=(len(t),len(z)), dtype=float)
#
#plt.axvline(0,0,15,color='k',linestyle='--') # plot line at X = 0
##plt.ylim(0,15)
##plt.xlim(-25,5,5)
#plt.gca().invert_yaxis() # invert Y axis
#plt.xlabel('Temperature ('+degree+'C)')
#plt.ylabel('Depth (m)')
#
## BOUNDARY CONDITIONS --> bottom Q and top T
## try smaller time steps
## is the indexing correct for plotting??
#
#for n in range(len(t)-1):
# for i in range(len(z)-1):
# T[(n+1),i] = T[n,i] + (dt*kappa)*((T[n,(i+1)]-(2*T[n,i])+T[n,(i-1)])/(dz**2))
# if n % 10 == 0:
# plt.plot(T[n,:],z,linewidth=0.5)
#%% Try again with other method (dXdy arrays)
# set variables
years = 1 # number of years to run for
plots = years*100 # plot every "plots" timestep geotherm
k = 2.5 # thermal conductivity in W/(m-K)
kappa = 1e-6 # thermal diffusivity in m^2/sec
rhoc = k/kappa # calculate rho*c based on values of k and kappa
period = 60*60*24*365.25 # one year of seconds
Qm = 45 * .001 # heat flux in W/m^2, mantle heat flux = bottom boundary condition
MAT = -10 # mean annual temp at the surface in celcius
dT = 15 # annual +/- change in temp in celcius
dt = (period / 365 ) * .01 # timestep: one day times a multiplier to keep dt small
tmax = period * years # max time = 1 year * number of years set above
t = np.arange(0,tmax,dt) # time array
dz = 0.1 # depth step in meters
zmax = 20.0 # max depth
z = np.arange(0,zmax+dz,dz) # depth array
#%% second try run loop with dXdy arrays
os.system("rm tmp*.png") # remove any tmp.png files so don't get erroneous frames in our movie
# initialize arrays
Ts = np.zeros(len(t)) # surface temp array
T = np.ndarray(shape=(len(z),len(t)), dtype=float) # temp/geotherm array
Q = np.ndarray(shape=(len(z),len(t)), dtype=float) # Q / heat flux array
dTdz = np.ndarray(shape=(len(z)-1,len(t)), dtype=float)
dQdz = np.ndarray(shape=(len(z)-1,len(t)), dtype=float)
dTdt = np.ndarray(shape=(len(z)-1,len(t)), dtype=float)
Q[-1,:] = Qm # apply mantle heat flux (Qm) as bottom boundary condition for all time
# set up plot for movie
plt.axvline(0,0,15,color='k',linestyle='--')
plt.ylim(0,20)
plt.xlim(-25,5,5)
plt.gca().invert_yaxis()
plt.xlabel('Temperature ('+degree+'C)')
plt.ylabel('Depth (m)')
# finite diff loop through all time t
for i in range(len(t)-1):
Ts[i] = MAT + dT * np.sin(((2*np.pi*t[i]))/period) # calculate surface temperature (Ts) for time i
T[0,i] = Ts[i] # apply surface temp as top boundary condition (z = 0) for time i
dTdz[:,i] = np.diff(T[:,i]) / dz # calculate T gradient with depth
Q[:-1,i] = -k * dTdz[:,i] # calculate heat content at every depth
dQdz[:,i] = np.diff(Q[:,i]) / dz # heat flow gradient
dTdt[:,i] = (-1/rhoc) * dQdz[:,i] # temp gradient with time
T[1:,i+1] = T[1:,i] + (dTdt[:,i]*dt) # write T
if i % plots == 0: # plot at every plots timestep
plt.plot(T[:,i],z) # plot T for all depth at time i
plt.text(-20, 12.5, 'time (days):'+ str((i/100))) # add label for total time =
plt.savefig('tmp'+str(i/plots)+'.png',bbox_inches="tight",dpi=150) # save plot for movie
# make a movie with ffmpeg!
#fps = 35
#os.system("rm movie.mp4") # remove a previous movie.mp4 file so don't get overwrite problems
#os.system("ffmpeg -r 35 -pattern_type sequence -i tmp'%d'.png -vcodec mpeg4 movie.mp4")
#os.system("rm tmp*.png")
#%% first try run loop with dXdy arrays
# starts ok for first .01 of a year, then goes crazy
#num = 50
#z = np.linspace(0,zmax+dz,num)
z = np.arange(0,zmax+dz,dz)
#Q = np.zeros(num)
Q = np.zeros(len(z))
Q[-1] = Qm # bottom boundary condition, Qm = mantle heat flow
dt = (period / 365 ) * .01 # timestep: one day times a multiplier to keep dt small
tmax = period * years # max time = 1 year * number of years set above
t = np.arange(0,0.01*tmax,dt)
#T = np.ones(num)
T = np.ones(len(z))
#T = np.ndarray(shape=(len(z),len(t)), dtype=float)
#dTdz = np.zeros(num-1)
#dQdz = np.zeros(num-1)
#dTdt = np.zeros(num-1)
dTdz = np.zeros(len(z)-1)
dQdz = np.zeros(len(z)-1)
dTdt = np.zeros(len(z)-1)
for n in range(len(t)):
for i in range(len(z)):
Ts = MAT + dT * np.sin(((2*np.pi*t[n]))/period) # sinusoidal equation for surface temp
T[0] = Ts # top boundary condition
dTdz = np.diff(T) / dz
Q[:-1] = -k * dTdz
dQdz = np.diff(Q) / dz
dTdt = -(1/rhoc) * dQdz
T[1:] = T[1:] + (dTdt * dt)
if n % 1000 == 0:
plt.plot(T,z)
plt.axvline(0,0,15,color='k',linestyle='--')
plt.ylim(0,20)
plt.xlim(-25,5,5)
plt.gca().invert_yaxis()
plt.xlabel('Temperature ('+degree+'C)')
plt.ylabel('Depth (m)')
plt.show()
#%% Myelene's loop code
#for i in range(0,len(time)-1):
# T[0,i] = T_bound[i] # make boundary condition temperature surface temperature for time i
# Q[-1,i] = 30e-3 # geothermal gradient in degrees C per meter
# dTdz[:,i] = np.divide(np.diff(T[:,i]),dz) # calculate T gradient with depth
# Q[:-1,i] = -k*dTdz[:,i] # calculate heat content at every depth
# dQdz[:,i] = np.divide(np.diff(Q[:,i]),dz) # heat flow gradient
# dTdt[:,i] = np.multiply(-1/(roh*c),dQdz[:,i]) # temp gradient with time
# T[1:,i+1] = np.add(T[1:,i],dTdt[:,i]*dt) # write T
|
import logging
import sys
from collections import namedtuple
from queue import Empty
from time import sleep
from types import GeneratorType
from bonobo.config import create_container
from bonobo.config.processors import ContextCurrifier
from bonobo.constants import NOT_MODIFIED, BEGIN, END, TICK_PERIOD, Token, Flag, INHERIT
from bonobo.errors import InactiveReadableError, UnrecoverableError, UnrecoverableTypeError
from bonobo.execution.contexts.base import BaseContext
from bonobo.structs.inputs import Input
from bonobo.util import get_name, isconfigurabletype, ensure_tuple
from bonobo.util.bags import BagType
from bonobo.util.statistics import WithStatistics
logger = logging.getLogger(__name__)
UnboundArguments = namedtuple('UnboundArguments', ['args', 'kwargs'])
class NodeExecutionContext(BaseContext, WithStatistics):
def __init__(self, wrapped, *, parent=None, services=None, _input=None, _outputs=None):
"""
Node execution context has the responsibility fo storing the state of a transformation during its execution.
:param wrapped: wrapped transformation
:param parent: parent context, most probably a graph context
:param services: dict-like collection of services
:param _input: input queue (optional)
:param _outputs: output queues (optional)
"""
BaseContext.__init__(self, wrapped, parent=parent)
WithStatistics.__init__(self, 'in', 'out', 'err', 'warn')
# Services: how we'll access external dependencies
if services:
if self.parent:
raise RuntimeError(
'Having services defined both in GraphExecutionContext and child NodeExecutionContext is not supported, for now.'
)
self.services = create_container(services)
else:
self.services = None
# Input / Output: how the wrapped node will communicate
self.input = _input or Input()
self.outputs = _outputs or []
# Types
self._input_type, self._input_length = None, None
self._output_type = None
# Stack: context decorators for the execution
self._stack = None
def __str__(self):
return self.__name__ + self.get_statistics_as_string(prefix=' ')
def __repr__(self):
name, type_name = get_name(self), get_name(type(self))
return '<{}({}{}){}>'.format(type_name, self.status, name, self.get_statistics_as_string(prefix=' '))
def start(self):
"""
Starts this context, a.k.a the phase where you setup everything which will be necessary during the whole
lifetime of a transformation.
The "ContextCurrifier" is in charge of setting up a decorating stack, that includes both services and context
processors, and will call the actual node callable with additional parameters.
"""
super().start()
try:
initial = self._get_initial_context()
self._stack = ContextCurrifier(self.wrapped, *initial.args, **initial.kwargs)
if isconfigurabletype(self.wrapped):
# Not normal to have a partially configured object here, so let's warn the user instead of having get into
# the hard trouble of understanding that by himself.
raise TypeError(
'Configurables should be instanciated before execution starts.\nGot {!r}.\n'.format(self.wrapped)
)
self._stack.setup(self)
except Exception:
# Set the logging level to the lowest possible, to avoid double log.
self.fatal(sys.exc_info(), level=0)
# We raise again, so the error is not ignored out of execution loops.
raise
def loop(self):
"""
The actual infinite loop for this transformation.
"""
logger.debug('Node loop starts for {!r}.'.format(self))
while self.should_loop:
try:
self.step()
except InactiveReadableError:
break
except Empty:
sleep(TICK_PERIOD) # XXX: How do we determine this constant?
continue
except (
NotImplementedError,
UnrecoverableError,
):
self.fatal(sys.exc_info()) # exit loop
except Exception: # pylint: disable=broad-except
self.error(sys.exc_info()) # does not exit loop
except BaseException:
self.fatal(sys.exc_info()) # exit loop
logger.debug('Node loop ends for {!r}.'.format(self))
def step(self):
"""
A single step in the loop.
Basically gets an input bag, send it to the node, interpret the results.
"""
# Pull and check data
input_bag = self._get()
# Sent through the stack
results = self._stack(input_bag)
# self._exec_time += timer.duration
# Put data onto output channels
if isinstance(results, GeneratorType):
while True:
try:
# if kill flag was step, stop iterating.
if self._killed:
break
result = next(results)
except StopIteration:
# That's not an error, we're just done.
break
else:
# Push data (in case of an iterator)
self._send(self._cast(input_bag, result))
elif results:
# Push data (returned value)
self._send(self._cast(input_bag, results))
else:
# case with no result, an execution went through anyway, use for stats.
# self._exec_count += 1
pass
def stop(self):
"""
Cleanup the context, after the loop ended.
"""
if self._stack:
try:
self._stack.teardown()
except:
self.fatal(sys.exc_info())
super().stop()
def send(self, *_output, _input=None):
return self._send(self._cast(_input, _output))
### Input type and fields
@property
def input_type(self):
return self._input_type
def set_input_type(self, input_type):
if self._input_type is not None:
raise RuntimeError('Cannot override input type, already have %r.', self._input_type)
if type(input_type) is not type:
raise UnrecoverableTypeError('Input types must be regular python types.')
if not issubclass(input_type, tuple):
raise UnrecoverableTypeError('Input types must be subclasses of tuple (and act as tuples).')
self._input_type = input_type
def get_input_fields(self):
return self._input_type._fields if self._input_type and hasattr(self._input_type, '_fields') else None
def set_input_fields(self, fields, typename='Bag'):
self.set_input_type(BagType(typename, fields))
### Output type and fields
@property
def output_type(self):
return self._output_type
def set_output_type(self, output_type):
if self._output_type is not None:
raise RuntimeError('Cannot override output type, already have %r.', self._output_type)
if type(output_type) is not type:
raise UnrecoverableTypeError('Output types must be regular python types.')
if not issubclass(output_type, tuple):
raise UnrecoverableTypeError('Output types must be subclasses of tuple (and act as tuples).')
self._output_type = output_type
def get_output_fields(self):
return self._output_type._fields if self._output_type and hasattr(self._output_type, '_fields') else None
def set_output_fields(self, fields, typename='Bag'):
self.set_output_type(BagType(typename, fields))
### Attributes
def setdefault(self, attr, value):
try:
getattr(self, attr)
except AttributeError:
setattr(self, attr, value)
def write(self, *messages):
"""
Push a message list to this context's input queue.
:param mixed value: message
"""
for message in messages:
if isinstance(message, Token):
self.input.put(message)
elif self._input_type:
self.input.put(ensure_tuple(message, cls=self._input_type))
else:
self.input.put(ensure_tuple(message))
def write_sync(self, *messages):
self.write(BEGIN, *messages, END)
for _ in messages:
self.step()
def error(self, exc_info, *, level=logging.ERROR):
self.increment('err')
super().error(exc_info, level=level)
def fatal(self, exc_info, *, level=logging.CRITICAL):
self.increment('err')
super().fatal(exc_info, level=level)
self.input.shutdown()
def get_service(self, name):
if self.parent:
return self.parent.services.get(name)
return self.services.get(name)
def _get(self):
"""
Read from the input queue.
If Queue raises (like Timeout or Empty), stat won't be changed.
"""
input_bag = self.input.get()
# Store or check input type
if self._input_type is None:
self._input_type = type(input_bag)
elif type(input_bag) is not self._input_type:
raise UnrecoverableTypeError(
'Input type changed between calls to {!r}.\nGot {!r} which is not of type {!r}.'.format(
self.wrapped, input_bag, self._input_type
)
)
# Store or check input length, which is a soft fallback in case we're just using tuples
if self._input_length is None:
self._input_length = len(input_bag)
elif len(input_bag) != self._input_length:
raise UnrecoverableTypeError(
'Input length changed between calls to {!r}.\nExpected {} but got {}: {!r}.'.format(
self.wrapped, self._input_length, len(input_bag), input_bag
)
)
self.increment('in') # XXX should that go before type check ?
return input_bag
def _cast(self, _input, _output):
"""
Transforms a pair of input/output into the real slim output.
:param _input: Bag
:param _output: mixed
:return: Bag
"""
tokens, _output = split_token(_output)
if NOT_MODIFIED in tokens:
return ensure_tuple(_input, cls=(self.output_type or tuple))
if INHERIT in tokens:
if self._output_type is None:
self._output_type = concat_types(self._input_type, self._input_length, self._output_type, len(_output))
_output = _input + ensure_tuple(_output)
return ensure_tuple(_output, cls=(self._output_type or tuple))
def _send(self, value, _control=False):
"""
Sends a message to all of this context's outputs.
:param mixed value: message
:param _control: if true, won't count in statistics.
"""
if not _control:
self.increment('out')
for output in self.outputs:
output.put(value)
def _get_initial_context(self):
if self.parent:
return UnboundArguments((), self.parent.services.kwargs_for(self.wrapped))
if self.services:
return UnboundArguments((), self.services.kwargs_for(self.wrapped))
return UnboundArguments((), {})
def isflag(param):
return isinstance(param, Flag)
def split_token(output):
"""
Split an output into token tuple, real output tuple.
:param output:
:return: tuple, tuple
"""
output = ensure_tuple(output)
flags, i, len_output, data_allowed = set(), 0, len(output), True
while i < len_output and isflag(output[i]):
if output[i].must_be_first and i:
raise ValueError('{} flag must be first.'.format(output[i]))
if i and output[i - 1].must_be_last:
raise ValueError('{} flag must be last.'.format(output[i - 1]))
if output[i] in flags:
raise ValueError('Duplicate flag {}.'.format(output[i]))
flags.add(output[i])
data_allowed &= output[i].allows_data
i += 1
output = output[i:]
if not data_allowed and len(output):
raise ValueError('Output data provided after a flag that does not allow data.')
return flags, output
def concat_types(t1, l1, t2, l2):
t1, t2 = t1 or tuple, t2 or tuple
if t1 == t2 == tuple:
return tuple
f1 = t1._fields if hasattr(t1, '_fields') else tuple(range(l1))
f2 = t2._fields if hasattr(t2, '_fields') else tuple(range(l2))
return BagType('Inherited', f1 + f2)
|
__author__="KOLANICH"
__license__="Unlicense"
__copyright__=r"""
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org/>
"""
class SilentList(list):
"""A list automatically extending to fit the key. Behaves like a dict but is a list. Called silent because a usual list raises on out-of-bounds access."""
def __getitem__(self, key):
if key < len(self):
return super().__getitem__(key)
else:
return None
def __setitem__(self, key, value):
maxLenNeeded=key+1
count=maxLenNeeded-len(self)
super().extend(count*[None])
return super().__setitem__(key, value)
class CustomBaseList(SilentList):
"""A list which starting index is not zero."""
base=1
def idxSub(self, index, another):
if isinstance(index, slice):
start=index.start
stop=index.stop
step=index.step
if start is None:
start = self.base
if stop is None:
stop=-1
start=self.idxSub(start, another)
stop=self.idxSub(stop, another)
index=type(index)(start, stop, step)
else:
if index>0:
index-=another
return index
def __getitem__(self, key):
return super().__getitem__(self.idxSub(key, self.base))
def __setitem__(self, key, value):
return super().__setitem__(self.idxSub(key, self.base), value)
def __enumerate__(self):
raise NotImplementedError()
|
#
# Facharbeit2021WatchCat dies ist der Bot welcher die Alarmanlage Steuert und die Schnitstelle zwichen User und Programm darstellt
# Also quasi DER rosa Elephant unter den Rosa Elephanten
#
# v3.2
#
import discord # Wie immer die Discord Lib
import asyncio # Für asyncrone Methoden
import time # Für die Zeit
import subprocess # Für Shell befehle
import threading # Für parraleles ablaufen von Methoden
import Facharbeit2021CaptureAndScan as coreprog # Der Core der Alarmanlage
from multiprocessing import Process, Queue # Ähnlich threading wird ausschließlich bei record benutzt, da threading in der kombination nicht funktioniert
core = coreprog.core()
praefix = "/"
cmdGetImg ="ffmpeg -loglevel fatal -rtsp_transport tcp -i \"rtsp://GpSJkxsh:A8CH8Q5ubY8S6FT3@192.168.178.111:554/live/ch1\" -r 1 -vframes 1 stream.png -y"
streamChID = 898391213554663556
statusChID = 898994828271042631
runstream = False
detec = 898190093804781589 # Channel ID
everyoneID=898189948803481660 # ID der Rolle everyone
pathToPrediction = "predictions.jpg"
token = "TOKEN" # Bot token
def getPassword():
pswd = open("passwd.txt" , "r")
passwrd = pswd.readline().strip()
pswd.close()
return passwrd
def setPassword(pPasswd):
pswd = open("passwd.txt", "w")
pswd.write(pPasswd)
pswd.close()
return
async def cleanChannel(pCount, pChannel):
async for message in pChannel.history(limit=pCount):
if (message.id != 899046028085428224 and message.id != 898391768888909824):
await message.delete()
def handleRecord(pTime): # Das sind nicht die Jed-i ähhh die Methode die Sie suchen!
p1 = Process(target=core.record)# Nein wirklich die Lösung ist einfach beschissen aber die beste die ich finden konnte
p1.start()
p2 = Process(target=timeToRecord, args=(pTime,))
p2.start()
return
def timeToRecord(pTime):
time.sleep(pTime)
core.stopRecord()
def kill():
k = subprocess.Popen("pgrep vlc", stdout=subprocess.PIPE, shell=True)
output, err = k.communicate()
try:
rip = subprocess.Popen("kill -9 " + str(int(output)), stdout=subprocess.PIPE, shell=True)
rip.wait()
except ValueError:
pass
k2 = subprocess.Popen("pgrep python3", stdout=subprocess.PIPE, shell=True)
output, err = k2.communicate()
processes = str(output).strip()
processes = processes.replace("b","")
processes = processes.replace("'","")
processes = processes.split("\\n")
i = len(processes)-1
while i >=0:
try:
rip2 = subprocess.Popen("kill -9 " + str(int(processes[i-1])), stdout=subprocess.PIPE, shell=True)
rip2.wait()
except ValueError:
pass
i=i-1
exit()
#_____________________$$______#
#__$s_______s$__________$$____#
#_$$$_$$$_$$$$__________$$____#
#_$$$O$$$$O$$$___________$$___#
#_$$$$$=Y=$$$$___________$$___#
#__$$$$$$$$$$____________$$___#
#_____$$$$___$$$$$$$$$__$$____#
#______$$$$$$$$$$$$$$$$$$_____#
#____$$$$$$$$$$$$$$$$$$$______#
#____$$_$$_$$$$$$_$$_$$_______#
#__$$__$$_________$$__$$______#
#(($$ ((($$$$___((($$$$_$$$$__#
class MyBot(discord.Client):
runstream == False
queue = Queue()
main = Process(target=core.main, args=[queue])
async def stream(self):
while runstream == True:
p = subprocess.Popen(cmdGetImg, stdout=subprocess.PIPE, shell=True)
p.wait()
streamCh = client.get_channel(streamChID)
await streamCh.send(file=discord.File("stream.png"))
async for oldmsg in streamCh.history(limit=3):
if (oldmsg.id != 898391768888909824 and oldmsg != oldmsg.channel.last_message):
await oldmsg.delete()
return
#TODO: möglichkeit für parraleles zu finden
async def on_ready(self):
print("Ich habe mich eingelogt. Beep bop bup.")
statusCh = client.get_channel(statusChID)
await cleanChannel(1, statusCh)
await statusCh.send("```diff\nThe System is: \n+ active\n```")
self.main.start() #startet das Core Programm
async def on_message(self, message):
global runstream
global statusChID
global everyoneID
if (message.content.startswith(praefix+"help")):
help1 = f"\
This is a list of all commands:\n\
+ {praefix}help => Zeigt diese Hilfe Seite\n\
+ {praefix}changePassword PASSWORD NEW_PASSWORD => Ändern des Passworts\n\
+ {praefix}clean ANZAHL => Löcht die letzten ANZAHL Nachrichten in dem Channel der Nachricht\n\
+ {praefix}start stream => Streamt live Bilder in den \#stream Channel\n\
+ {praefix}stop stream => Stoppt den Livestream\n\
+ {praefix}record TIME => Nimmt für TIME Minute ein Video auf und Speichert es auf dem Analyse-Pc\n\
+ {praefix}disarm PASSWORD => Entschärft die Alarmanlage/stellt den Alarm aus\n\
+ {praefix}arm PASSWORD => Schaltet die Alarmanlage scharf\n"
await message.channel.send("```diff\n" + help1 + "```")
help2 = f"\
```fix\n\
O {praefix}restart PASSWORD => Startet die Anlage neu HANDLE_WITH_CARE\n\
```"
await message.channel.send(help2)
help3 =f"\
- {praefix}start PASSWORD => Startet die Anlage ADMIN_USAGE_ONLY\n\
- {praefix}kill PASSWORD => Deaktiviert die Anlage kommplett ADMIN_USAGE_ONLY\n"
await message.channel.send("```diff\n"+help3+"```")
if (message.content.startswith(praefix+"changePassword "+getPassword())):
input = message.content
splitinput = input.split(" ")
if (splitinput[1]==getPassword()):
setPassword(splitinput[2])
await message.channel.send("New password is:" + getPassword() + "\nThis Chat will be deleted in 3 seconds")
time.sleep(3)
await cleanChannel(2, message.channel)
elif (message.content == praefix+"changePassword".rstrip()):
await message.channel.send("Please enter: old_password new_password\nTry again in 5 seconds")
time.sleep(5)
await cleanChannel(2, message.channel)
elif(message.content.startswith(praefix+"changePassword ")):
await message.channel.send("Something went wrong please try again\nThis Chat will be deleted in 3 seconds")
time.sleep(3)
await cleanChannel(2, message.channel)
if(message.content.startswith(praefix+"clean ")):
input = message.content
splitinput = input.split(" ")
await cleanChannel(int(splitinput[1].rstrip()), message.channel)
if(message.content.startswith(praefix+"start ")):
input = message.content
splitinput = input.split(" ")
if (splitinput[1]=="stream"):
runstream= True
s = threading.Thread(target=await self.stream()) # Führt die Methode stream() parrallel zum Rest aus
s.deamon = True
if(message.content.startswith(praefix+"stop ")):
input = message.content
splitinput = input.split(" ")
if (splitinput[1]=="stream"):
runstream = False
if(message.content.startswith(praefix+"record ")):
input = message.content
splitinput = input.split(" ")
handleRecord(int(splitinput[1])*60)
if(message.content.startswith(praefix+"disarm " + getPassword())):
await cleanChannel(1, message.channel)
await message.channel.send("accepted!")
time.sleep(1)
await cleanChannel(1, message.channel)
core.disarmAlarm(self.queue) #Teil disarm die Instanz mit in der der core Prozess läuft und die queue zur kommunikation
statusCh = client.get_channel(statusChID)
everyone = discord.utils.get(client.get_channel(detec).guild.roles, id=everyoneID)
await cleanChannel(1, statusCh)
await statusCh.send("```diff\nThe System is: \n- inactive\n```")
if(message.content.startswith(praefix+"arm " + getPassword())):
await cleanChannel(1, message.channel)
await message.channel.send("accepted!")
await message.channel.send("The System will be active again in 5 seconds")
time.sleep(3)
await cleanChannel(2, message.channel)
core.armAlarm(self.queue) #Teil arm die Instanz mit in der der core Prozess läuft und die queue zur Kommunikation
statusCh = client.get_channel(statusChID)
await cleanChannel(1, statusCh)
await statusCh.send("```diff\nThe System is: \n+ active\n```")
if(message.content.startswith(praefix+"kill " + getPassword())):
statusCh = client.get_channel(statusChID)
everyone = discord.utils.get(client.get_channel(detec).guild.roles, id=everyoneID)
await cleanChannel(1, statusCh)
await cleanChannel(1, message.channel)
await statusCh.send("```diff\nThe System is: \n- inactive\n```")
kill()
if __name__ == '__main__':
logger = subprocess.Popen("python3 Logger.py", stdout=subprocess.PIPE, shell=True)
client = MyBot()
client.run(token)
|
from datetime import datetime
from optparse import OptionParser
import pymysql
from dateutil.relativedelta import relativedelta
def main(options):
curr = datetime.now()
print("Current time : ", curr)
partitionQuery = "SELECT PARTITION_NAME FROM information_schema.partitions WHERE TABLE_SCHEMA = '{db}' " \
"AND TABLE_NAME = '{table}' AND PARTITION_NAME IS NOT NULL"
removeQuery = "ALTER TABLE `{table}` DROP PARTITION `{part}`"
addQuery = "ALTER TABLE `{table}` ADD PARTITION (PARTITION `{part}` VALUES LESS THAN ({func}('{dt}')))"
db_conn = pymysql.connect(host=options.host, port=int(options.port), user=options.user,
password=options.password, database=options.db)
db_cur = db_conn.cursor(pymysql.cursors.DictCursor)
sql = partitionQuery.format(db=options.db, table=options.table)
db_cur.execute(sql)
tbl_partitions = db_cur.fetchall()
remove_partitions = []
add_partitions = []
if options.basis == "daily":
if options.removePart:
remove = datetime.now() - relativedelta(days=options.removePart, hour=0, minute=0, second=0)
remove_partition = remove.strftime("p_%Y%m%d")
for partition in tbl_partitions:
if partition['PARTITION_NAME'] <= remove_partition:
remove_partitions.append(partition['PARTITION_NAME'])
for i in range(options.addPart):
part_name = 'p_' + (datetime.now() + relativedelta(days=i + 1, hour=0, minute=0, second=0)).strftime(
"%Y%m%d")
time_lt = (datetime.now() + relativedelta(days=i + 2, hour=0, minute=0, second=0)).strftime("%Y-%m-%d")
add_partitions.append((part_name, time_lt))
elif options.basis == "monthly":
if options.removePart:
remove = datetime.now() - relativedelta(months=options.removePart, day=1, hour=0, minute=0, second=0)
remove_partition = remove.strftime("p_%Y%m%d")
for partition in tbl_partitions:
if partition['PARTITION_NAME'] <= remove_partition:
remove_partitions.append(partition['PARTITION_NAME'])
for i in range(options.addPart):
part_name = 'p_' + (datetime.now() + relativedelta(months=i + 1, hour=0, minute=0, second=0)).strftime(
"%Y%m%d")
time_lt = (datetime.now() + relativedelta(months=i + 2, hour=0, minute=0, second=0)).strftime("%Y-%m-%d")
add_partitions.append((part_name, time_lt))
else:
return
for remove in remove_partitions:
sql = removeQuery.format(table=options.table, part=remove)
db_cur.execute(sql)
print("remove partition %s" % remove)
for part_name, time_lt in add_partitions:
sql = addQuery.format(table=options.table, func=options.func, part=part_name, dt=time_lt)
db_cur.execute(sql)
print("add partition %s" % part_name)
db_cur.close()
db_conn.close()
print("Complete Work")
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-H", "--host", action="store", type="string", dest="host",
help="MySQL Server Address")
parser.add_option("-P", "--port", action="store", type="string", dest="port",
help="MySQL port")
parser.add_option("-d", "--database", action="store", type="string", dest="db",
help="MySQL database name")
parser.add_option("-t", "--table", action="store", type="string", dest="table",
help="MySQL Table")
parser.add_option("-u", "--user", action="store", type="string", dest="user",
help="MySQL User Name")
parser.add_option("-p", "--password", action="store", type="string", dest="password",
help="MySQL User Password")
parser.add_option("-a", "--add", action="store", type="int", dest="addPart",
help="a partition to be added ", default=0)
parser.add_option("-r", "--remove", action="store", type="int", dest="removePart",
help="a partition to be removed", default=0)
parser.add_option("-f", "--func", action="store", type="choice", dest="func", default="TO_DAYS",
choices=['TO_DAYS', 'UNIX_TIMESTAMP'], help="TO_DAYS: DATE, DATETIME, UNIX_TIMESTAMP: TIMESTAMP")
parser.add_option("-b", "--basis", action="store", type="choice", dest="basis", default="daily",
choices=['daily', 'monthly'], help="time basis")
(args_options, _) = parser.parse_args()
try:
main(args_options)
except Exception as e:
print(e)
|
from ._version import get_versions
from .base import Client
from .config import Configuration
from .exceptions import QuetzalAPIException
__version__ = get_versions()['version']
del get_versions
__all__ = (
'__version__',
'Client',
'Configuration',
'QuetzalAPIException',
)
|
# Copyright (c) 2013 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import sys
import os
import shutil
import tempfile
from tank_test.tank_test_base import *
import tank
from tank.errors import TankError
from tank.platform import application
from tank.platform import constants
from tank.template import Template
from tank.deploy import descriptor
class TestApplication(TankTestBase):
"""
General fixtures class for testing Toolkit apps
"""
def setUp(self):
"""
Fixtures setup
"""
super(TestApplication, self).setUp()
self.setup_fixtures()
# set up a path to the folder above the app folder
# this is so that the breakdown and all its frameworks can be loaded in
# it is assumed that you have all the dependent packages in a structure under this
# root point, like this:
#
# bundle_root
# |
# |- tk-multi-breakdown
# |- tk-framework-qtwidgets
# \- tk-framework-shotgunutils
#
#
os.environ["BUNDLE_ROOT"] = os.path.abspath(os.path.join( os.path.dirname(__file__), "..", ".."))
# set up a standard sequence/shot/step and run folder creation
self.seq = {"type": "Sequence",
"id": 2,
"code": "seq_code",
"project": self.project}
self.shot = {"type": "Shot",
"id": 1,
"code": "shot_code",
"sg_sequence": self.seq,
"project": self.project}
self.step = {"type": "Step",
"id": 3,
"code": "step_code",
"entity_type": "Shot",
"short_name": "step_short_name"}
self.task = {"type": "Task",
"id": 23,
"entity": self.shot,
"step": self.step,
"project": self.project}
# Add these to mocked shotgun
self.add_to_sg_mock_db([self.shot, self.seq, self.step, self.project, self.task])
# run folder creation for the shot
self.tk.create_filesystem_structure(self.shot["type"], self.shot["id"])
# now make a context
context = self.tk.context_from_entity(self.shot["type"], self.shot["id"])
# and start the engine
self.engine = tank.platform.start_engine("test_engine", self.tk, context)
def tearDown(self):
"""
Fixtures teardown
"""
# engine is held as global, so must be destroyed.
cur_engine = tank.platform.current_engine()
if cur_engine:
cur_engine.destroy()
# important to call base class so it can clean up memory
super(TestApplication, self).tearDown()
class TestApi(TestApplication):
"""
Tests for the Breakdown App's API interface
"""
def setUp(self):
"""
Fixtures setup
"""
super(TestApi, self).setUp()
# short hand for the app
self.app = self.engine.apps["tk-multi-breakdown"]
# set up some test data
self.test_path_1 = os.path.join(self.project_root,
"sequences",
self.seq["code"],
self.shot["code"],
self.step["short_name"],
"publish",
"foo.v003.ma")
self.test_path_2 = os.path.join(self.project_root,
"sequences",
self.seq["code"],
self.shot["code"],
self.step["short_name"],
"publish",
"foo.v004.ma")
fh = open(self.test_path_1, "wt")
fh.write("hello")
fh.close()
fh = open(self.test_path_2, "wt")
fh.write("hello")
fh.close()
# this will be read by our hook so push
# it out into env vars...
os.environ["TEST_PATH_1"] = self.test_path_1
os.environ["TEST_PATH_2"] = self.test_path_2
def test_analyze_scene(self):
"""
Tests the analyze_scene method
"""
scene_data = self.app.analyze_scene()
self.assertEqual(len(scene_data), 1)
item = scene_data[0]
self.assertEqual(item["fields"], {'Shot': 'shot_code',
'name': 'foo',
'Sequence': 'seq_code',
'Step': 'step_short_name',
'version': 3,
'maya_extension': 'ma',
'eye': '%V'})
self.assertEqual(item["node_name"], "maya_publish")
self.assertEqual(item["node_type"], "TestNode")
self.assertEqual(item["template"], self.tk.templates["maya_shot_publish"])
self.assertEqual(item["sg_data"], None)
def test_compute_highest_version(self):
"""
Tests the version computation logic
"""
scene_data = self.app.analyze_scene()
item = scene_data[0]
# test logic
self.assertEqual(self.app.compute_highest_version(item["template"], item["fields"]), 4)
# test bad data
self.assertRaises(TankError,
self.app.compute_highest_version,
self.tk.templates["maya_asset_publish"],
item["fields"])
def test_update(self):
"""
Test scene update
"""
scene_data = self.app.analyze_scene()
item = scene_data[0]
# increment version
fields = item["fields"]
fields["version"] = 4
# clear temp location where hook writes to
tank._hook_items = None
# execute hook
self.app.update_item(item["node_type"], item["node_name"], item["template"], fields)
# check result
self.assertEqual(len(tank._hook_items), 1)
self.assertEqual(tank._hook_items[0]["node"], "maya_publish")
self.assertEqual(tank._hook_items[0]["path"], self.test_path_2)
self.assertEqual(tank._hook_items[0]["type"], "TestNode")
|
from maneuvers.kit import *
from maneuvers.strikes.dodge_strike import DodgeStrike
class DodgeShot(DodgeStrike):
max_base_height = 220
def intercept_predicate(self, car: Car, ball: Ball):
max_height = align(car, ball, self.target) * 60 + self.max_base_height
contact_ray = ball.wall_nearby(max_height)
return (
norm(contact_ray.start) > 0
and ball.pos[2] < max_height + 50
and (Arena.inside(ball.pos, 100) or distance(ball, self.target) < 1000)
and abs(car.pos[0]) < Arena.size[0] - 300
)
def configure(self, intercept: Intercept):
super().configure(intercept)
ball = intercept.ball
target_direction = ground_direction(ball, self.target)
hit_dir = ground_direction(ball.vel, target_direction * 4000)
self.arrive.target = intercept.ground_pos - hit_dir * 100
self.arrive.target_direction = hit_dir
|
import _plotly_utils.basevalidators
class HoveronValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name='hoveron', parent_name='parcats', **kwargs):
super(HoveronValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'plot'),
role=kwargs.pop('role', 'info'),
values=kwargs.pop('values', ['category', 'color', 'dimension']),
**kwargs
)
|
#
# PySNMP MIB module IB-DNSONE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/IB-DNSONE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:50:37 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion")
ibDNSOne, IbString = mibBuilder.importSymbols("IB-SMI-MIB", "ibDNSOne", "IbString")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
IpAddress, ObjectIdentity, MibIdentifier, Integer32, Unsigned32, enterprises, Gauge32, Counter32, Counter64, TimeTicks, NotificationType, ModuleIdentity, Bits, iso, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "ObjectIdentity", "MibIdentifier", "Integer32", "Unsigned32", "enterprises", "Gauge32", "Counter32", "Counter64", "TimeTicks", "NotificationType", "ModuleIdentity", "Bits", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ibDnsModule = ModuleIdentity((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1))
ibDnsModule.setRevisions(('2010-03-23 00:00', '2005-06-09 00:00', '2005-01-10 00:00', '2004-05-21 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ibDnsModule.setRevisionsDescriptions(('Fixed smilint errors', 'DNS views', 'Added copyright', 'Creation of the MIB file',))
if mibBuilder.loadTexts: ibDnsModule.setLastUpdated('201003230000Z')
if mibBuilder.loadTexts: ibDnsModule.setOrganization('Infoblox')
if mibBuilder.loadTexts: ibDnsModule.setContactInfo('Please See IB-SMI-MIB.')
if mibBuilder.loadTexts: ibDnsModule.setDescription('This file defines the Infoblox DNS One MIB.')
ibZoneStatisticsTable = MibTable((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 1), )
if mibBuilder.loadTexts: ibZoneStatisticsTable.setStatus('current')
if mibBuilder.loadTexts: ibZoneStatisticsTable.setDescription('A table of named ZONE statistics.')
ibZoneStatisticsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 1, 1), ).setIndexNames((0, "IB-DNSONE-MIB", "ibBindZoneName"))
if mibBuilder.loadTexts: ibZoneStatisticsEntry.setStatus('current')
if mibBuilder.loadTexts: ibZoneStatisticsEntry.setDescription('A conceptual row of the ibZoneStatisticsEntry containing info about a particular zone in the default view.')
ibBindZoneName = MibTableColumn((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 1, 1, 1), IbString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibBindZoneName.setStatus('current')
if mibBuilder.loadTexts: ibBindZoneName.setDescription("DNS Zone name. The first one is global summary statistics. Index name for global statistics is 'summary'. All zones live in the default view.")
ibBindZoneSuccess = MibTableColumn((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 1, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibBindZoneSuccess.setStatus('current')
if mibBuilder.loadTexts: ibBindZoneSuccess.setDescription('Number of Successful responses since DNS process started.')
ibBindZoneReferral = MibTableColumn((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 1, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibBindZoneReferral.setStatus('current')
if mibBuilder.loadTexts: ibBindZoneReferral.setDescription('Number of DNS referrals since DNS process started.')
ibBindZoneNxRRset = MibTableColumn((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 1, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibBindZoneNxRRset.setStatus('current')
if mibBuilder.loadTexts: ibBindZoneNxRRset.setDescription('Number of DNS query received for non-existent record.')
ibBindZoneNxDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 1, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibBindZoneNxDomain.setStatus('current')
if mibBuilder.loadTexts: ibBindZoneNxDomain.setDescription('Number of DNS query received for non-existent domain.')
ibBindZoneRecursion = MibTableColumn((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 1, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibBindZoneRecursion.setStatus('current')
if mibBuilder.loadTexts: ibBindZoneRecursion.setDescription('Number of Queries received using recursion since DNS process started.')
ibBindZoneFailure = MibTableColumn((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 1, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibBindZoneFailure.setStatus('current')
if mibBuilder.loadTexts: ibBindZoneFailure.setDescription('Number of Failed queries since DNS process started.')
ibZonePlusViewStatisticsTable = MibTable((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 2), )
if mibBuilder.loadTexts: ibZonePlusViewStatisticsTable.setStatus('current')
if mibBuilder.loadTexts: ibZonePlusViewStatisticsTable.setDescription('A table of named ZONE+VIEW statistics.')
ibZonePlusViewStatisticsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 2, 1), ).setIndexNames((0, "IB-DNSONE-MIB", "ibBindViewName"), (0, "IB-DNSONE-MIB", "ibBindZonePlusViewName"))
if mibBuilder.loadTexts: ibZonePlusViewStatisticsEntry.setStatus('current')
if mibBuilder.loadTexts: ibZonePlusViewStatisticsEntry.setDescription('A conceptual row of the ibZonePlusViewStatisticsEntry containing info about a particular zone in a particular view.')
ibBindZonePlusViewName = MibTableColumn((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 2, 1, 1), IbString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibBindZonePlusViewName.setStatus('current')
if mibBuilder.loadTexts: ibBindZonePlusViewName.setDescription("DNS Zone name. The first one in the default view is the global summary statistics. Index name for global statistics is 'summary'.")
ibBindZonePlusViewSuccess = MibTableColumn((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 2, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibBindZonePlusViewSuccess.setStatus('current')
if mibBuilder.loadTexts: ibBindZonePlusViewSuccess.setDescription('Number of Successful responses since DNS process started.')
ibBindZonePlusViewReferral = MibTableColumn((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 2, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibBindZonePlusViewReferral.setStatus('current')
if mibBuilder.loadTexts: ibBindZonePlusViewReferral.setDescription('Number of DNS referrals since DNS process started.')
ibBindZonePlusViewNxRRset = MibTableColumn((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 2, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibBindZonePlusViewNxRRset.setStatus('current')
if mibBuilder.loadTexts: ibBindZonePlusViewNxRRset.setDescription('Number of DNS query received for non-existent record.')
ibBindZonePlusViewNxDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 2, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibBindZonePlusViewNxDomain.setStatus('current')
if mibBuilder.loadTexts: ibBindZonePlusViewNxDomain.setDescription('Number of DNS query received for non-existent domain.')
ibBindZonePlusViewRecursion = MibTableColumn((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 2, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibBindZonePlusViewRecursion.setStatus('current')
if mibBuilder.loadTexts: ibBindZonePlusViewRecursion.setDescription('Number of Queries received using recursion since DNS process started.')
ibBindZonePlusViewFailure = MibTableColumn((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 2, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibBindZonePlusViewFailure.setStatus('current')
if mibBuilder.loadTexts: ibBindZonePlusViewFailure.setDescription('Number of Failed queries since DNS process started.')
ibBindViewName = MibTableColumn((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 2, 1, 8), IbString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibBindViewName.setStatus('current')
if mibBuilder.loadTexts: ibBindViewName.setDescription('DNS view name. Empty for default view and summary.')
ibDDNSUpdateStatistics = MibIdentifier((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 3))
ibDDNSUpdateSuccess = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 3, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDDNSUpdateSuccess.setStatus('current')
if mibBuilder.loadTexts: ibDDNSUpdateSuccess.setDescription('Number of successful dynamic DNS update.')
ibDDNSUpdateFailure = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 3, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDDNSUpdateFailure.setStatus('current')
if mibBuilder.loadTexts: ibDDNSUpdateFailure.setDescription('Number of failure dynamic DNS update.')
ibDDNSUpdateReject = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 3, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDDNSUpdateReject.setStatus('current')
if mibBuilder.loadTexts: ibDDNSUpdateReject.setDescription('Number of dynamic DNS update rejects maybe due to permission failure.')
ibDDNSUpdatePrerequisiteReject = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 3, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDDNSUpdatePrerequisiteReject.setStatus('current')
if mibBuilder.loadTexts: ibDDNSUpdatePrerequisiteReject.setDescription('Number of dynamic DNS update rejects due to prerequisite failure.')
ibBindZoneTransferCount = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 3, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibBindZoneTransferCount.setStatus('current')
if mibBuilder.loadTexts: ibBindZoneTransferCount.setDescription('Number of zone transfer.')
mibBuilder.exportSymbols("IB-DNSONE-MIB", ibDnsModule=ibDnsModule, ibBindZonePlusViewRecursion=ibBindZonePlusViewRecursion, ibBindZoneNxDomain=ibBindZoneNxDomain, ibBindViewName=ibBindViewName, ibBindZonePlusViewNxRRset=ibBindZonePlusViewNxRRset, ibZoneStatisticsEntry=ibZoneStatisticsEntry, ibDDNSUpdateStatistics=ibDDNSUpdateStatistics, PYSNMP_MODULE_ID=ibDnsModule, ibBindZoneRecursion=ibBindZoneRecursion, ibZonePlusViewStatisticsEntry=ibZonePlusViewStatisticsEntry, ibBindZonePlusViewReferral=ibBindZonePlusViewReferral, ibZonePlusViewStatisticsTable=ibZonePlusViewStatisticsTable, ibBindZoneNxRRset=ibBindZoneNxRRset, ibDDNSUpdatePrerequisiteReject=ibDDNSUpdatePrerequisiteReject, ibDDNSUpdateSuccess=ibDDNSUpdateSuccess, ibBindZoneTransferCount=ibBindZoneTransferCount, ibBindZoneReferral=ibBindZoneReferral, ibBindZoneSuccess=ibBindZoneSuccess, ibBindZoneFailure=ibBindZoneFailure, ibBindZonePlusViewName=ibBindZonePlusViewName, ibBindZonePlusViewFailure=ibBindZonePlusViewFailure, ibDDNSUpdateFailure=ibDDNSUpdateFailure, ibBindZonePlusViewSuccess=ibBindZonePlusViewSuccess, ibBindZonePlusViewNxDomain=ibBindZonePlusViewNxDomain, ibDDNSUpdateReject=ibDDNSUpdateReject, ibZoneStatisticsTable=ibZoneStatisticsTable, ibBindZoneName=ibBindZoneName)
|
import torch
from mobilenetv3 import MobileNetV3_forFPN, MobileNetV3, load_pretrained_fpn
def test_loaded_weights():
torch.backends.cudnn.deterministic = True
path = '/home/davidyuk/Projects/backbones/pytorch-mobilenet-v3/mobilenetv3_small_67.4.pth.tar'
mn3_fpn = MobileNetV3_forFPN()
mn3_fpn = load_pretrained_fpn(mn3_fpn, path)
mobNetv3 = MobileNetV3(mode='small')
state_dict = torch.load(path, map_location='cpu')
mobNetv3.load_state_dict(state_dict)
mobNetv3 = mobNetv3.features[:12]
for param, base_param in zip(mn3_fpn.parameters(), mobNetv3.parameters()):
assert ((param == base_param).all()), 'params differ'
#print(len(tuple(mn3_fpn.parameters())),len(tuple(mobNetv3.parameters())))
# mobNetv3.eval()
# mn3_fpn.eval()
image = torch.rand(1, 3, 224, 224)
with torch.no_grad():
output = mn3_fpn.forward(image)
output1 = mobNetv3.forward(image)
if (output == output1).all():
print('test passed')
else:
print('test failed')
torch.backends.cudnn.deterministic = False
def compare_output(model1, model2, tensor=torch.rand(1, 3, 64, 64), mode_train=True):
if mode_train:
model1.train(); model2.train()
else:
model1.eval(); model2.eval()
with torch.no_grad():
output1 = model1.forward(tensor)
output2 = model2.forward(tensor)
return output1, output2
|
#!/usr/bin/env python
# this scripts calculates the mean coverage and SD in a bam
# USE: add_meanCov_SD.py
# NOTE: change "my_dir" to the directory of interest
# NOTE: elements of this script are ran in depth_TFPD_ins.py
import os
import re
from subprocess import Popen, PIPE
import statistics
my_dir="/lscr2/andersenlab/kml436/round21_Aug19/round19_Aug13"
means=[]
SDs=[]
###CHANGE TO 1 through 8
run_list=[1,2,3,4,5,6,7,8]
for i in run_list:
print "Processing Run:"
print i
run="run_{i}".format(**locals())
bam_file="{my_dir}/{run}_N2/{run}_N2.sorted.bam".format(**locals())
result, err = Popen(["""samtools depth {bam_file}| datamash mean 3 sstdev 3""".format(**locals())], stdout=PIPE, stderr=PIPE, shell=True).communicate()
result=result.split('\t')
mean=float(result[0])
SD=float(result[1])
means.append(mean)
SDs.append(SD)
print mean
print SD
mean_of_means=statistics.mean(means)
mean_of_SDs=statistics.mean(SDs)
two_SDS=2*mean_of_SDs
three_SDS=3*mean_of_SDs
four_SDS=4*mean_of_SDs
print mean_of_means
print mean_of_SDs
print two_SDS
print three_SDS
print four_SDS
|
import numpy as np
from random import randrange, shuffle
class LogisticRegression:
@staticmethod
def sigmoid(scores):
return 1 / (1 + np.exp(-scores))
def __init__(self, learning_rate=1, regularization_loss_tradeoff=1):
self.learning_rate = learning_rate
self.regularization_loss_tradeoff = regularization_loss_tradeoff
def train(self, train, labels, epochs):
w = np.array([0 for _ in range(len(train[0]))])
for _ in range(epochs):
[_, w] = self.train_one_epoch(train, labels, w)
return w
def train_one_epoch(self, train, labels, w):
lr = self.learning_rate
tradeoff = self.regularization_loss_tradeoff
scores = np.dot(train, w)
predictions = self.__class__.sigmoid(scores)
size = len(labels)
output_error_signal = labels - predictions
gradient = np.dot(np.array(train).T, output_error_signal) / size
w = w * (1 - lr) + gradient * lr * tradeoff
return [0, w]
@staticmethod
def predict(x, w):
x = np.array(x)
if np.dot(x, w) < 0:
return -1
else:
return 1
|
from guillotina import configure
from guillotina.db.interfaces import IDBTransactionStrategy
from guillotina.db.interfaces import ITransaction
from guillotina.db.strategies.simple import SimpleStrategy
import logging
logger = logging.getLogger("guillotina")
@configure.adapter(for_=ITransaction, provides=IDBTransactionStrategy, name="dbresolve")
class DBResolveStrategy(SimpleStrategy):
"""
Get us a transaction, but we don't care about voting
"""
async def tpc_vote(self):
return True
@configure.adapter(for_=ITransaction, provides=IDBTransactionStrategy, name="dbresolve_readcommitted")
class DBResolveReadCommittedStrategy(DBResolveStrategy):
"""
Delay starting transaction to the commit phase so reads will be inconsistent.
"""
async def tpc_begin(self):
pass
async def tpc_commit(self):
if self._transaction._tid in (-1, 1, None):
await self.retrieve_tid()
if self._transaction._db_txn is None:
await self._storage.start_transaction(self._transaction)
|
import hashlib
import execjs
from common import http_util,MyLogger
from concurrent.futures import ThreadPoolExecutor
import socket
log = MyLogger.Logger('all.log',level='info')
socket.setdefaulttimeout(30)
'''
avatar_url: "https://pic2.zhimg.com/50/8658418bc_720w.jpg?source=54b3c3a5"
excerpt: "学科该词有以下两种含义:①相对独立的知识体系。人类所有的知识划分为五大门类:自然科学,农业科学,医药科学,工程与技术科学,人文与社会科学。②我国高等学校本科教育专业设置的学科分类,我国高等教育划分为13个学科门类:哲学、经济学、法学、教育学、文学、历史学、理学、工学、农学、医学、军事学、管理学、艺术学。"
id: "19618774"
introduction: "学科该词有以下两种含义:①相对独立的知识体系。人类所有的知识划分为五大门类:自然科学,农业科学,医药科学,工程与技术科学,人文与社会科学。②我国高等学校本科教育专业设置的学科分类,我国高等教育划分为13个学科门类:哲学、经济学、法学、教育学、文学、历史学、理学、工学、农学、医学、军事学、管理学、艺术学。"
is_black: false
is_super_topic_vote: true
is_vote: false
name: "学科"
type: "topic"
url: "http://www.zhihu.com/api/v3/topics/19618774"
'''
with open('g_encrypt.js', 'r') as f:
ctx1 = execjs.compile(f.read(), cwd='F:\\idea_workspace\\github\\python-course-master\\node_modules')
def get_signature(id,limit,offset,cookie):
f = '101_3_2.0+/api/v3/topics/%s/children?limit=%s&offset=%s+"%s"'%(id,limit,offset,http_util.parse_cookie(cookie).get('d_c0'))
# f = '101_3_2.0+/api/v3/topics/%s/children+"%s"'%(id,http_util.parse_cookie(cookie).get('d_c0'))
fmd5 = hashlib.new('md5', f.encode()).hexdigest()
log.log(fmd5)
encrypt_str = "2.0_%s" % ctx1.call('b', fmd5)
log.log(encrypt_str)
return encrypt_str
from faker import Faker
import requests
import json
import time
from lxml import etree
import threading
from threading import Thread
from pymongo import MongoClient
# from hyper.contrib import HTTP20Adapter
threadPool = ThreadPoolExecutor(max_workers=100)
rlock = threading.RLock()
def get_children(id,limit,offset,cookie):
api_url='https://www.zhihu.com/api/v3/topics/%s/children?limit=%s&offset=%s'
url=api_url%(id,limit,offset)
headers = {
'user-agent': Faker().user_agent(),
# 'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36',
'cookie':cookie,
'x-zse-93':'101_3_2.0',
'x-zse-96':get_signature(id,limit,offset,cookie),
'referer':'https://www.zhihu.com/topic/19776749/hot',
'sec-ch-ua':'"Google Chrome";v="95", "Chromium";v="95", ";Not A Brand";v="99"',
'sec-ch-ua-mobile':'?0',
'sec-ch-ua-platform':'"Windows"',
'sec-fetch-dest':'empty',
'sec-fetch-mode':'cors',
'sec-fetch-site':'same-origin',
'x-ab-param':'top_test_4_liguangyi=1;pf_adjust=0;zr_expslotpaid=1;se_ffzx_jushen1=0;tp_dingyue_video=0;qap_question_visitor= 0;qap_question_author=0;tp_zrec=0;tp_topic_style=0;tp_contents=2;pf_noti_entry_num=0;zr_slotpaidexp=1',
'x-ab-pb':'CtYBhAJDAKED4wR9AtgCVwQUBYsF4wUZBbULCgQzBOkEEgXMAowEfwUYBlYMMQYBC6IDNwWABZsLCwQ0BI0EBwxQA9oEQwVkBOAEUQUPC3UEKQVqAbIFQAY3DHQB8wPgCxEFOQZHADIDcgNFBJ4FTwNXA6ADMwX0C7QK3AsBBioGnwLXAtEE7ApgCzAGOwK5AlYFiQy0AEAB+AOMBRwGzwsbANYEFQVSBdgFUgv0A7EF1wu3Aw4F5AppAcIFFgamBOcFwQQKBkEG9gJCBD8AVQU/BjQMjAIyBRJrABgAAQAAAAAAAAADAAAAAAAAAAABAAAAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAQAAAAsAAAAAAAAACwAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=',
'x-requested-with':'fetch'
}
# sessions=requests.session()
# sessions.mount(url, HTTP20Adapter())
# response=sessions.get(url,headers=headers)
res= {}
try:
response=requests.get(url=url,headers=headers)
response.encoding = 'utf-8'
res=response.text
log.log(res)
response.close()
# time.sleep(1)
if not res:
return []
res=json.loads(res).get('data')
ls=set_parent_id(id, res)
# 异步写入Mongo
threadPool.submit(batch_insert_to_mongo,list=res)
del res
return ls
except Exception as e:
log.log("Exception=%s"%e)
return []
def get_all_children(id,limit,offset,cookie):
# for child in child_list:
# child['parentId']=id
# log.log(child)
log.log("get_all_children-id=%s"%id)
res=[]
child_list=get_children(id,limit,offset,cookie)
while child_list and len(child_list)>0:
res.extend(child_list)
offset+=limit
child_list=get_children(id,limit,offset,cookie)
if res and len(res)>0:
log.log('get_all_children-first:'+str(res[0]))
log.log('get_all_children-last:'+str(res[-1]))
return res
def set_parent_id(id, res):
ls=[]
for c in res:
c['parentId'] = str(id)
ls.append(c['id'])
return ls
def batch_insert_to_mongo(list):
for one in list:
with rlock:
insert_mongo(one)
def load_children(id,limit,offset,cookie):
log.log('load_children:id=%s'%id)
child_list=get_all_children(id,limit,offset,cookie)
# if len(child_list)==0:
# return
while len(child_list)>0:
relist=[]
# batch_insert_to_mongo(child_list)
for child in child_list:
if(str(child)=='19776751'):
continue
# load_children(id=child,limit=limit,offset=offset,cookie=cookie)
# 获取某节点的下一级子列表
node_list=get_all_children(child,limit,offset,cookie)
if len(node_list)>0:
relist.extend(node_list)
del child_list
child_list=relist
# task=threadPool.submit(load_children,id=child['id'],limit=limit,offset=offset,cookie=cookie)
return
def insert_mongo(data):
client = MongoClient('mongodb://mozhu:123456@localhost:27017/admin')
db = client.hyj.zhihu_topic5
if db.find_one({'id':data['id'],'parentId':data['parentId']}):
# if db.find_one({'id':data['id']}):
log.log('已存在')
else:
db.insert_one(data)
log.log('插入成功')
return
def process(id,limit,offset,cookie):
threadlist = []
# for i in range(number):
# t = Thread(target=all_topic_urls, args=(cut_lists[i],))
# t.start()
# threadlist.append(t)
# for thd in threadlist:
# thd.join()
def start(id,limit, offset, cookie):
child_list = get_all_children(id, limit, offset, cookie)
# if len(child_list)==0:
# pass
# batch_insert_to_mongo(child_list)
threadlist = []
relist = []
for child in child_list:
if (str(child) == '19776751'):
continue
node_list = get_all_children(id, limit, offset, cookie)
if len(node_list) > 0:
relist.extend(node_list)
log.log('relist:len=%s,data=%s' % (len(relist), relist))
for id in relist:
load_children(id, limit, offset, cookie)
t = Thread(target=load_children, args=(id, limit, offset, cookie))
t.start()
threadlist.append(t)
return threadlist
# task=threadPool.submit(load_children,id=child['id'],limit=limit,offset=offset,cookie=cookie)
if __name__ == '__main__':
cookie='_zap=7c0c3042-fc21-40e8-ba21-43e86519000a; d_c0="AAARWzrPxhOPTskGcpEuVAsXuVwn2cynUow=|1632539393"; _9755xjdesxxd_=32; YD00517437729195%3AWM_TID=vzdptJ53pPlEAUBEEUM%2F8UOknqvlS0XD; _xsrf=146c7345-3f23-4693-ae0a-87f19399b85c; Hm_lvt_98beee57fd2ef70ccdd5ca52b9740c49=1636642498; Hm_lpvt_98beee57fd2ef70ccdd5ca52b9740c49=1636642498; captcha_session_v2="2|1:0|10:1636642499|18:captcha_session_v2|88:WnlramU4ZGlLRFd4Z2tHVmU3MXNtaE9ZTGNEK25WdS9ERzhoUkNSNkdkaGlOMExVTUROMEgzV1Z6U1Nxc2cveg==|0abacb7e93feff9bf8593008e1df7c8a92896607691f62031eeb0a123b69927f"; gdxidpyhxdE=c0DnZgtBbQxInClVjABCWPl1%5CG1krUPI%2Fr135s0I%2Bf1hKnNkae1zT8TUyimS6LqVjgcyOtaoNmQo8oOdul1IDmnkuZOu5wg%2BugqANWWpMaiZ4H6n5V9YbX%2FQc9d%2BCZHYrzOZBGhVjc0X0eMyiNTcb9Z81VirbyWhViNbjZ%2BxQ4ADISkW%3A1636643402352; YD00517437729195%3AWM_NI=FicaJGoVxfplYJa1HEiJYQuyIwotvFtc4d8Vhrcx%2FiUnLRNRxAtPcKRnzAMcLYm3HlbWgzVRZ2eFKXsMuPzTtRS3P6YPVXCVI4OPAWw2Vp3VDspKwA7DOShXkjyRKivaVVY%3D; YD00517437729195%3AWM_NIKE=9ca17ae2e6ffcda170e2e6eed1ee4b86edb9a3d941b6a88bb7c14b969f8f84ae3fb59cbbadaa63a7b2a9b8b82af0fea7c3b92a86b7ad82c94dad8ab887f3689b96afd2cb4fab9697b2f8738ba9a195f54eb8bdfc85bb5eacaead99ae6397b284b3cd5abc9fbcb1e24192b2b6a2d0699087a3d4cb68a289a99bd55a9aeeaba7ca54b193bbb1f960b88e9dd4d33e9b909ab9c25e8bec81b1c45db5ee9a91b25e88adc0acf64bf59c81b4eb54a9b6b7b3ca63b6bf9ab7dc37e2a3; KLBRSID=0a401b23e8a71b70de2f4b37f5b4e379|1636642512|1636642496'
limit=10
offset=0
# 根话题id
# id='19776749'
# 「形而上」话题
# id='19778298'
# load_children(id,limit,offset,cookie)
relist=[]
# for id in ['19778317','19580349','19550912','19778298']:
for id in ['19778317']:
threadlist=start(id,limit, offset, cookie)
relist.extend(threadlist)
for thd in relist:
thd.join()
log.log('done')
|
######################################################
# --- Day 20: Infinite Elves and Infinite Houses --- #
######################################################
import AOCUtils
maxHouses = 1000000
######################################################
presents = AOCUtils.loadInput(20)
houses = dict()
for elf in range(1, presents):
limit = maxHouses
for house in range(elf, limit, elf):
if house not in houses:
houses[house] = 0
houses[house] += 10 * elf
if houses[elf] >= presents:
print("Part 1: {}".format(elf))
break
houses = dict()
for elf in range(1, presents):
limit = min(50 * elf, maxHouses)
for house in range(elf, limit, elf):
if house not in houses:
houses[house] = 0
houses[house] += 11 * elf
if houses[elf] >= presents:
print("Part 2: {}".format(elf))
break
AOCUtils.printTimeTaken()
|
"""
Oscillators.
"""
import souffle.datatypes as dtt
##### Default constants #####
# Brusselator, unstable regime
BRUSS_A = 1.0
BRUSS_B = 3.0
# Lotka-Volterra
LOTKA_ALPHA = 1.5
LOTKA_BETA = 1.0
LOTKA_GAMMA = 2.0
LOTKA_DELTA = 1.0
# van der Pol oscillator
VANDERPOL_MU = 5.0
VANDERPOL_OMEGA = 1.0
#############################
def brusselator(t, X, **kwargs):
"""
The Brusselator.
@type t: number
@param t: current time
@type X: vector
@param X: current state
@rtype: vector
@return: derivative
"""
x = X[0]
y = X[1]
if len(kwargs) == 0:
x_dot = 1 - (BRUSS_B + 1) * x + BRUSS_A * x**2 * y
y_dot = BRUSS_B * x - BRUSS_A * x**2 * y
elif len(kwargs) != 2:
raise ValueError("Bad kwargs; please provide all of the "\
"following parameters: a, b")
else:
x_dot = 1 - (kwargs["b"] + 1) * x + kwargs["a"] * x**2 * y
y_dot = kwargs["b"] * x - kwargs["a"] * x**2 * y
X_dot = [x_dot, y_dot]
return dtt.Vector(X_dot)
def lotka_volterra(t, X, **kwargs):
"""
The Lotka-Volterra ("predator-prey") equations.
We define the following constants:
alpha = growth rate of prey
beta = rate at which predators consume prey
gamma = death rate of predators
delta = rate at which predators increase by consuming prey
The prey population, x, increases at a rate of dx/dt = Ax, but is consumed
by predators at a rate of dx/dt = -Bxy.
The predator population, y, decreases at a rate of dy/dt = -Cy, but
increases at a rate of dy/dt = Dxy.
@type t: number
@param t: current time
@type X: vector
@param X: current state
@rtype: vector
@return: derivative
"""
x = X[0]
y = X[1]
if len(kwargs) == 0:
x_dot = x * (LOTKA_ALPHA - LOTKA_BETA * y)
y_dot = - y * (LOTKA_GAMMA - LOTKA_DELTA * x)
elif len(kwargs) != 4:
raise ValueError("Bad kwargs; please provide all of the "\
"following parameters: alpha, beta, gamma, delta")
else:
x_dot = x * (kwargs["alpha"] - kwargs["beta"] * y)
y_dot = - y * (kwargs["gamma"] - kwargs["delta"] * x)
X_dot = [x_dot, y_dot]
return dtt.Vector(X_dot)
def vanderpol(t, X, **kwargs):
"""
The van der Pol oscillator. This is a non-conservative oscillator, with
nonlinear damping, that shows up in laser physics and electronic circuits.
The system is described by
d^2x/dx^2 - mu * (1 - x^2) * dx/dt + omega * x = 0
where mu and omega are some constants.
Applying the transformation y = dx/dt, we have the equations of motion
y = dx/dt
dv/dt = mu * (1 - x^2) * v - omega^2 * x
@type t: number
@param t: current time
@type X: vector
@param X: current state
@rtype: vector
@return: derivative
"""
x = X[0]
y = X[1]
if len(kwargs) == 0:
x_dot = y
y_dot = VANDERPOL_MU * (1 - x**2) * y - VANDERPOL_OMEGA**2 * x
elif len(kwargs) != 2:
raise ValueError("Bad kwargs; please provide all of the "\
"following parameters: mu, omega")
else:
x_dot = y
y_dot = kwargs["mu"] * (1 - x**2) * y - kwargs["omega"]**2 * x
X_dot = [x_dot, y_dot]
return dtt.Vector(X_dot)
|
import pytest
from anchore_engine.subsys import object_store
from anchore_engine.subsys.object_store.config import (
DEFAULT_OBJECT_STORE_MANAGER_ID,
ALT_OBJECT_STORE_CONFIG_KEY,
)
from anchore_engine.subsys.object_store import migration
from anchore_engine.subsys import logger
from tests.fixtures import anchore_db
from tests.integration.subsys.object_store.conftest import (
test_swift_container,
test_swift_auth_url,
test_swift_user,
test_swift_key,
test_s3_bucket,
test_s3_region,
test_s3_url,
test_s3_key,
test_s3_secret_key,
)
logger.enable_test_logging()
document_1 = b'{"document": {"user_id": "admin", "final_action_reason": "policy_evaluation", "matched_whitelisted_images_rule": "matched_blacklisted_images_rule": false}}'
document_json = {
"user_id": "admin",
"final_action_reason": "policy_evaluation",
"matched_whitelisted_images_rule": False,
"created_at": 1522454550,
"evaluation_problems": [],
"last_modified": 1522454550,
"final_action": "stop",
"matched_mapping_rule": {
"name": "default",
"repository": "*",
"image": {"type": "tag", "value": "*"},
"whitelist_ids": ["37fd763e-1765-11e8-add4-3b16c029ac5c"],
"registry": "*",
"id": "c4f9bf74-dc38-4ddf-b5cf-00e9c0074611",
"policy_id": "48e6f7d6-1765-11e8-b5f9-8b6f228548b6",
},
"matched_blacklisted_images_rule": False,
}
test_user_id = "testuser1"
test_bucket_id = "testbucket1"
def add_data():
logger.info("Adding data")
mgr = object_store.get_manager()
for i in range(0, 100):
archiveId = "doc-{}".format(i)
logger.info("Adding document: {}".format(archiveId))
mgr.put_document(
userId="test1",
bucket="testing",
archiveId=archiveId,
data="TESTINGBUCKETDATASMALL".join([str(x) for x in range(100)]),
)
def flush_data():
logger.info("Flushing data")
mgr = object_store.get_manager()
for i in range(0, 100):
archiveId = "doc-{}".format(i)
logger.info("Deleting document: {}".format(archiveId))
mgr.delete_document(userId="test1", bucket="testing", archiveid=archiveId)
def run_test(src_client_config, dest_client_config):
"""
Common test path for all configs to test against
:return:
"""
logger.info(
(
"Running migration test from {} to {}".format(
src_client_config["name"], dest_client_config["name"]
)
)
)
# config = {'services': {'catalog': {'archive': {'compression': {'enabled': False}, 'storage_driver': src_client_config}}}}
config = {"archive": src_client_config}
object_store.initialize(
config,
check_db=False,
manager_id=DEFAULT_OBJECT_STORE_MANAGER_ID,
config_keys=[DEFAULT_OBJECT_STORE_MANAGER_ID, ALT_OBJECT_STORE_CONFIG_KEY],
allow_legacy_fallback=False,
force=True,
)
add_data()
src_config = {
"storage_driver": src_client_config,
"compression": {"enabled": False},
}
dest_config = {
"storage_driver": dest_client_config,
"compression": {"enabled": False},
}
migration.initiate_migration(
src_config, dest_config, remove_on_source=True, do_lock=False
)
flush_data()
def test_db_to_db2(anchore_db):
from_config = {"name": "db", "config": {}}
to_config = {"name": "db2", "config": {}}
run_test(from_config, to_config)
def test_db_to_s3(s3_bucket, anchore_db):
from_config = {"name": "db", "config": {}}
to_config = {
"name": "s3",
"config": {
"access_key": test_s3_key,
"secret_key": test_s3_secret_key,
"url": test_s3_url,
"region": test_s3_region,
"bucket": test_s3_bucket,
},
}
run_test(from_config, to_config)
|
# Array operation
# Type: list, map() call. This method requires allocation of
# the same amount of memory as original array (to hold result
# array). On the other hand, input array stays intact.
import bench
def test(num):
for i in iter(range(num//10000)):
arr = bytearray(b"\0" * 1000)
arr2 = bytearray(map(lambda x: x + 1, arr))
bench.run(test)
|
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
from scipy.spatial.distance import cdist
from scipy.special import comb
import time
import copy
import itertools
import imageio
import heapq
import pickle
sns.set()
__author__ = "Chana Ross, Yoel Ross"
__copyright__ = "Copyright 2018"
__credits__ = ["Yoel Ross", "Tamir Hazan", "Erez Karpas"]
__version__ = "1.0"
__maintainer__ = "Chana Ross"
__email__ = "schanaby@campus.technion.ac.il"
__status__ = "Thesis"
# Class definitions
class SearchState:
def __init__(self, carPos, committedCars, eventPos, committedEventsIndex, committedEvents, eventStartTimes ,eventCloseTimes, eventReward, eventPenalty, eventsCanceled, eventsAnswered, heuristicVal, costVal, parent,hWeight):
self.carPos = carPos
self.committedCars = committedCars
self.committedEventsIndex = committedEventsIndex
self.hWeight = hWeight
self.eventPos = eventPos
self.committedEvents = committedEvents
self.eventStartTimes = eventStartTimes
self.eventCloseTimes = eventCloseTimes
self.eventReward = eventReward
self.eventPenalty = eventPenalty
self.eventsCanceled = eventsCanceled # true if the event is opened, false if event closed or canceled. starts as true
self.eventsAnswered = eventsAnswered # true if event is answered , false if event is canceled. starts as false
self.time = parent.time+1 if parent is not None else 0 # time is one step ahead of parent
self.hval = heuristicVal
self.gval = costVal
self.parent = parent # predecessor in graph
self.root = parent is None # true of state is the root, false otherwise
return
def __lt__(self, other):
# make sure comparison is to SearchState object
try:
assert (isinstance(other, SearchState))
except:
raise TypeError("must compare to SearchState object.")
# return lt check
return self.getFval() < other.getFval()
def __eq__(self, other):
# make sure comparison is to SearchState object
try:
assert(isinstance(other, SearchState))
except:
raise TypeError("must compare to SearchState object.")
# check
carEq = np.array_equal(self.carPos, other.carPos)
carComEq = np.array_equal(self.committedCars,other.committedCars)
eventComEq = np.array_equal(self.committedEvents,other.committedEvents)
eveEq = np.array_equal(self.eventPos,other.eventPos)
etmEq = np.array_equal(self.eventTimes,other.eventTimes)
comEveIndex = np.array_equal(self.committedEventsIndex,other.committedEventsIndex)
etcEq = np.array_equal(self.eventCloseTimes,other.eventCloseTimes)
sttEq = np.array_equal(self.eventsCanceled, other.eventsCanceled)
astEq = np.array_equal(self.eventsAnswered,other.eventsAnswered)
timEq = self.time == other.time
return carEq and eveEq and etcEq and etmEq and eventComEq and carComEq and sttEq and comEveIndex and astEq and timEq
def __repr__(self):
return "time: {0}, cost: {1}, heuristic: {2}, root: {3}, goal: {4}\n".format(self.time,
self.gval,
self.hval,
self.root,
self.goalCheck())
def __hash__(self):
carPosVec = np.reshape(self.carPos, self.carPos.size)
evePosVec = np.reshape(self.eventPos, self.eventPos.size)
eveComVec = np.reshape(self.committedEvents, self.committedEvents.size)
carComVec = np.reshape(self.committedCars,self.committedCars.size)
comEveIndex = np.reshape(self.committedEventsIndex,self.committedEventsIndex.size)
eveSttVec = self.eventsCanceled.astype(np.int32)
eveAnStVec = self.eventsAnswered.astype(np.int32)
stateTime = np.reshape(np.array(self.time), 1)
hv = np.hstack([carPosVec , evePosVec , comEveIndex , eveSttVec ,eveAnStVec , stateTime, carComVec, eveComVec])
return hash(tuple(hv))
def goalCheck(self):
# if all events have been answered or canceled, we have reached the goal
return np.sum(np.logical_or(self.eventsAnswered,self.eventsCanceled)) == self.eventsAnswered.size
def path(self):
current = self
p = []
while current is not None:
if not current.root:
p.append(current)
current = current.parent
else:
p.append(current)
p.reverse()
return p
def getFval(self):
return self.hval*self.hWeight + self.gval
class Heap():
def __init__(self):
self.array = list()
def __len__(self):
return len(self.array)
def insert(self, obj):
heapq.heappush(self.array, obj)
return
def extractMin(self):
return heapq.heappop(self.array) # get minimum from heap
def empty(self):
return len(self.array)==0
def heapify(self):
heapq.heapify(self.array)
return
def updateStatusVectors(distanceMatrix,canceledEvents,answeredEvents,eventsCloseTime,eventsStartTime,epsilon = 0.01):
eventsOpened = np.logical_and(eventsStartTime<=0, eventsCloseTime>=0)
# convert distance matrix to boolean of approx zero (picked up events)
step1 = np.sum((distanceMatrix <= epsilon), axis=0) >= 1
# condition on event being open
step2 = np.logical_and(step1, eventsOpened)
updatedEventsAnswered = np.copy(answeredEvents)
# new possible events answered status
updatedEventsAnswered[step2] = 1
updatedEventsAnswered.astype(np.bool_)
numAnsweredEvents = np.sum(step2)
# find canceled events and add to canceled vector
step1Canceled = np.logical_and(np.logical_not(answeredEvents),eventsCloseTime<0)
step2Canceled = np.logical_and(step1Canceled,np.logical_not(canceledEvents))
updatedEventsCanceled = np.copy(canceledEvents)
updatedEventsCanceled[step2Canceled] = 1
updatedEventsCanceled.astype(np.bool_)
numCanceledEvents = np.sum(step2Canceled)
updatedEventsOpened = np.logical_and(np.logical_not(step1,eventsOpened))
numOpenedEvents = np.sum(updatedEventsOpened)
return updatedEventsAnswered,updatedEventsCanceled,numAnsweredEvents,numCanceledEvents,numOpenedEvents
def updateCommittedStatus(committedEvents,committedCars,committedEventIndex,canceledEvents,answeredEvents):
eventClosed = np.logical_or(canceledEvents,answeredEvents)
updatedCommittedCars = np.copy(committedCars)
updatedCommittedCars[updatedCommittedCars>0] = np.logical_not(eventClosed[committedEventIndex>=0])
updatedCommittedEvents = np.copy(committedEvents)
updatedCommittedEvents[eventClosed] = False
updatedCommittedEventIndex = np.copy(committedEventIndex)
updatedCommittedEventIndex[np.logical_not(updatedCommittedCars)] = -1
return updatedCommittedCars,updatedCommittedEvents,updatedCommittedEventIndex
def createCommitMatrix(numCars,numEvents):
if numEvents == 0:
commitMat = np.zeros(shape=(numCars,1)).astype(np.bool_)
else:
numCases = 0
numOptionalCommits = np.min([numCars+1,numEvents+1])
numCasesPerCommitOptions = []
for i in range(np.min([numCars+1,numEvents+1])):
numCasesPerCommitOptions.append(comb(numCars,i)*comb(numEvents,i)*math.factorial(i))
numCases = np.sum(numCasesPerCommitOptions)
commitMat = np.zeros(shape = (int(numCases),numCars,numEvents))
k = 0
for i in range(numOptionalCommits):
for carChoice in list(itertools.combinations(range(numCars),i)):
for eventChoice in list(itertools.combinations(range(numEvents),i)):
for eventChoicePermutations in list(itertools.permutations(eventChoice,len(eventChoice))):
commitMat[k,carChoice,eventChoicePermutations] = True
k += 1
return commitMat
|
import os
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Command create .docker folder structure"""
help = "Create .docker folder structure"
def create(self, path: str):
"""Create path if not exist"""
if not os.path.exists(path):
os.makedirs(path)
def handle(self, *args, **options):
self.create(settings.DOCKER_DB_DIR)
self.create(settings.DOCKER_STATIC_DIR)
self.create(settings.DOCKER_LOG_DIR)
self.create(settings.DOCKER_MEDIA_DIR)
|
from .swagger.models.answer import Answer as SwaggerAnswer
from .swagger.models.question import Question as SwaggerQuestion
from .swagger.models.quiz import Quiz as SwaggerQuiz
class Answer(SwaggerAnswer):
pass
class Question(SwaggerQuestion):
pass
class Quiz(SwaggerQuiz):
pass
|
import amulet
from juju.client.connection import JujuData
import logging
import os
import requests
import subprocess
import yaml
log = logging.getLogger(__name__)
def get_juju_credentials():
jujudata = JujuData()
controller_name = jujudata.current_controller()
controller = jujudata.controllers()[controller_name]
endpoint = controller["api-endpoints"][0]
models = jujudata.models()[controller_name]
model_name = models["current-model"]
model_uuid = models["models"][model_name]["uuid"]
accounts = jujudata.accounts()[controller_name]
username = accounts["user"]
password = accounts.get("password")
return {
"endpoint": endpoint,
"model_uuid": model_uuid,
"username": username,
"password": password
}
def _download_resource(url, target_path):
r = requests.get(url, stream=True)
r.raise_for_status()
log.info("Downloading resource {} to {}".format(url, target_path))
with open(target_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
def _get_resource(app, resource, charm):
env = "{}_RESOURCE_{}".format(app.upper(), resource.upper())
default_url = ("https://api.jujucharms.com/charmstore/v5/~elastisys/"
"{}/resource/{}?channel=edge".format(charm, resource))
resource_path = os.getenv(env, default_url)
if os.path.isfile(resource_path):
return resource_path
try:
target_path = "/tmp/{}-{}.tar".format(app, resource)
_download_resource(resource_path, target_path)
return target_path
except requests.exceptions.RequestException:
message = "resource '{}' not found".format(resource_path)
amulet.raise_status(amulet.FAIL, msg=message)
def attach_resource(app, resource, charm):
if not _has_resource(app, resource):
resource_path = _get_resource(app, resource, charm)
log.info("{} resource: {} = {}".format(app, resource, resource_path))
_attach_resource(app, resource, resource_path)
# Creds to https://github.com/juju-solutions/bundle-canonical-kubernetes/blob/master/tests/amulet_utils.py # noqa
def _attach_resource(app, resource, resource_path):
''' Upload a resource to a deployed model.
:param: app - the application to attach the resource
:param: resource - The charm's resouce key
:param: resource_path - the path on disk to upload the
resource'''
# the primary reason for this method is to replace a shell
# script in the $ROOT dir of the charm
cmd = ['juju', 'attach', app, "{}={}".format(resource, resource_path)]
subprocess.call(cmd)
def _has_resource(app, resource):
''' Poll the controller to determine if we need to upload a resource
'''
cmd = ['juju', 'resources', app, '--format=yaml']
output = subprocess.check_output(cmd)
resource_list = yaml.safe_load(output)
for resource in resource_list['resources']:
# We can assume this is the correct resource if it has a filesize
# matches the name of the resource in the charms resource stream
if 'name' in resource and (app in resource['name'] and
resource['size'] > 0):
# Display the found resource
print('Uploading {} for {}'.format(resource['name'], app))
return True
return False
|
class DownloadProgressChangedEventArgs(ProgressChangedEventArgs):
""" Provides data for the System.Net.WebClient.DownloadProgressChanged event of a System.Net.WebClient. """
BytesReceived=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the number of bytes received.
Get: BytesReceived(self: DownloadProgressChangedEventArgs) -> Int64
"""
TotalBytesToReceive=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the total number of bytes in a System.Net.WebClient data download operation.
Get: TotalBytesToReceive(self: DownloadProgressChangedEventArgs) -> Int64
"""
|
# encoding: utf-8
from sqlalchemy import *
from migrate import *
def upgrade(migrate_engine):
metadata = MetaData()
metadata.bind = migrate_engine
migrate_engine.execute('''
CREATE TABLE user_following_dataset (
follower_id text NOT NULL,
object_id text NOT NULL,
datetime timestamp without time zone NOT NULL
);
CREATE TABLE user_following_user (
follower_id text NOT NULL,
object_id text NOT NULL,
datetime timestamp without time zone NOT NULL
);
ALTER TABLE user_following_dataset
ADD CONSTRAINT user_following_dataset_pkey PRIMARY KEY (follower_id, object_id);
ALTER TABLE user_following_user
ADD CONSTRAINT user_following_user_pkey PRIMARY KEY (follower_id, object_id);
ALTER TABLE user_following_dataset
ADD CONSTRAINT user_following_dataset_follower_id_fkey FOREIGN KEY (follower_id) REFERENCES "user"(id) ON UPDATE CASCADE ON DELETE CASCADE;
ALTER TABLE user_following_dataset
ADD CONSTRAINT user_following_dataset_object_id_fkey FOREIGN KEY (object_id) REFERENCES package(id) ON UPDATE CASCADE ON DELETE CASCADE;
ALTER TABLE user_following_user
ADD CONSTRAINT user_following_user_follower_id_fkey FOREIGN KEY (follower_id) REFERENCES "user"(id) ON UPDATE CASCADE ON DELETE CASCADE;
ALTER TABLE user_following_user
ADD CONSTRAINT user_following_user_object_id_fkey FOREIGN KEY (object_id) REFERENCES "user"(id) ON UPDATE CASCADE ON DELETE CASCADE;
''')
|
def multi(l_st):
output = 1
for x in l_st:
output *= x
return output
def add(l_st):
return sum(l_st)
def reverse(string):
return string[::-1]
|
from __future__ import absolute_import
from .links import link_api, link_api_documentation
tool_links = [link_api, link_api_documentation]
|
"""
A hack to make pawt.swing point to the java swing library.
This allows code which imports pawt.swing to work on both JDK1.1 and 1.2
"""
swing = None
try:
import javax.swing.Icon
from javax import swing
except (ImportError, AttributeError):
try:
import java.awt.swing.Icon
from java.awt import swing
except (ImportError, AttributeError):
try:
import com.sun.java.swing.Icon
from com.sun.java import swing
except (ImportError, AttributeError):
raise ImportError, 'swing not defined in javax.swing or java.awt.swing or com.sun.java.swing'
import sys
def test(panel, size=None, name='Swing Tester'):
f = swing.JFrame(name, windowClosing=lambda event: sys.exit(0))
if hasattr(panel, 'init'):
panel.init()
f.contentPane.add(panel)
f.pack()
if size is not None:
from java import awt
f.setSize(apply(awt.Dimension, size))
f.setVisible(1)
return f
if swing is not None:
import pawt, sys
pawt.swing = swing
sys.modules['pawt.swing'] = swing
swing.__dict__['test'] = test
#These two lines help out jythonc to figure out this very strange module
swing.__dict__['__file__'] = __file__
swing.__dict__['__jpythonc_name__'] = 'pawt.swing'
|
from faker import Faker
from nig.endpoints import INPUT_ROOT, OUTPUT_ROOT
from nig.tests import create_test_env, delete_test_env
from restapi.tests import API_URI, BaseTests, FlaskClient
class TestApp(BaseTests):
def test_api_study(self, client: FlaskClient, faker: Faker) -> None:
# setup the test env
(
admin_headers,
uuid_group_A,
user_A1_uuid,
user_A1_headers,
uuid_group_B,
user_B1_uuid,
user_B1_headers,
user_B2_uuid,
user_B2_headers,
study1_uuid,
study2_uuid,
) = create_test_env(client, faker, study=False)
# create a new study for the group B
random_name = faker.pystr()
study1 = {"name": random_name, "description": faker.pystr()}
r = client.post(f"{API_URI}/study", headers=user_B1_headers, data=study1)
assert r.status_code == 200
study1_uuid = self.get_content(r)
assert isinstance(study1_uuid, str)
# create a new study for the group A
random_name2 = faker.pystr()
study2 = {"name": random_name2, "description": faker.pystr()}
r = client.post(f"{API_URI}/study", headers=user_A1_headers, data=study2)
assert r.status_code == 200
study2_uuid = self.get_content(r)
assert isinstance(study2_uuid, str)
# check the directory was created
dir_path = INPUT_ROOT.joinpath(uuid_group_A, study2_uuid)
assert dir_path.is_dir()
# test study access
# test study list response
r = client.get(f"{API_URI}/study", headers=user_B1_headers)
assert r.status_code == 200
response = self.get_content(r)
assert isinstance(response, list)
assert len(response) == 1
# test admin access
r = client.get(f"{API_URI}/study/{study1_uuid}", headers=admin_headers)
assert r.status_code == 200
# study owner
r = client.get(f"{API_URI}/study/{study1_uuid}", headers=user_B1_headers)
assert r.status_code == 200
# other component of the group
r = client.get(f"{API_URI}/study/{study1_uuid}", headers=user_B2_headers)
assert r.status_code == 200
# study own by an other group
r = client.get(f"{API_URI}/study/{study1_uuid}", headers=user_A1_headers)
assert r.status_code == 404
not_authorized_message = self.get_content(r)
assert isinstance(not_authorized_message, str)
# test study modification
# modify a study you do not own
r = client.put(
f"{API_URI}/study/{study1_uuid}",
headers=user_A1_headers,
data={"description": faker.pystr()},
)
assert r.status_code == 404
# modify a study you own
r = client.put(
f"{API_URI}/study/{study1_uuid}",
headers=user_B1_headers,
data={"description": faker.pystr()},
)
assert r.status_code == 204
# delete a study
# delete a study you do not own
r = client.delete(f"{API_URI}/study/{study1_uuid}", headers=user_A1_headers)
assert r.status_code == 404
# delete a study you own
# create a new dataset to test if it's deleted with the study
dataset = {"name": faker.pystr(), "description": faker.pystr()}
r = client.post(
f"{API_URI}/study/{study2_uuid}/datasets",
headers=user_A1_headers,
data=dataset,
)
assert r.status_code == 200
dataset_uuid = self.get_content(r)
assert isinstance(dataset_uuid, str)
dataset_path = dir_path.joinpath(dataset_uuid)
assert dataset_path.is_dir()
# create a new file to test if it's deleted with the study
filename = f"{faker.pystr()}_R1"
file_data = {
"name": f"{filename}.fastq.gz",
"mimeType": "application/gzip",
"size": faker.pyint(),
"lastModified": faker.pyint(),
}
r = client.post(
f"{API_URI}/dataset/{dataset_uuid}/files/upload",
headers=user_A1_headers,
data=file_data,
)
assert r.status_code == 201
# get the file uuid
r = client.get(
f"{API_URI}/dataset/{dataset_uuid}/files",
headers=user_A1_headers,
)
assert r.status_code == 200
file_list = self.get_content(r)
assert isinstance(file_list, list)
file_uuid = file_list[0]["uuid"]
# create a new technical to test if it's deleted with the study
techmeta = {"name": faker.pystr()}
r = client.post(
f"{API_URI}/study/{study2_uuid}/technicals",
headers=user_A1_headers,
data=techmeta,
)
assert r.status_code == 200
techmeta_uuid = self.get_content(r)
assert isinstance(techmeta_uuid, str)
# create a new phenotype to test if it's deleted with the study
phenotype = {"name": faker.pystr(), "sex": "male"}
r = client.post(
f"{API_URI}/study/{study2_uuid}/phenotypes",
headers=user_A1_headers,
data=phenotype,
)
assert r.status_code == 200
phenotype_uuid = self.get_content(r)
assert isinstance(phenotype_uuid, str)
# simulate the study has an output directory
# create the output directory in the same way is created in launch pipeline task
output_path = OUTPUT_ROOT.joinpath(dataset_path.relative_to(INPUT_ROOT))
output_path.mkdir(parents=True)
assert output_path.is_dir()
# delete the study
r = client.delete(f"{API_URI}/study/{study2_uuid}", headers=user_A1_headers)
assert r.status_code == 204
assert not dir_path.is_dir()
assert not dataset_path.is_dir()
# check the dataset was deleted
r = client.get(f"{API_URI}/dataset/{dataset_uuid}", headers=user_A1_headers)
assert r.status_code == 404
# check the file was deleted
r = client.get(f"{API_URI}/file/{file_uuid}", headers=user_A1_headers)
assert r.status_code == 404
# check the technical was deleted
r = client.get(f"{API_URI}/technical/{techmeta_uuid}", headers=user_A1_headers)
assert r.status_code == 404
# check the phenotype was deleted
r = client.get(f"{API_URI}/phenotype/{phenotype_uuid}", headers=user_A1_headers)
assert r.status_code == 404
# check the output dir was deleted
assert not output_path.is_dir()
# delete a study own by your group
r = client.delete(f"{API_URI}/study/{study1_uuid}", headers=user_B2_headers)
assert r.status_code == 204
# check study deletion
r = client.get(f"{API_URI}/study/{study1_uuid}", headers=user_B1_headers)
assert r.status_code == 404
not_existent_message = self.get_content(r)
assert isinstance(not_existent_message, str)
assert not_existent_message == not_authorized_message
# delete all the elements used by the test
delete_test_env(
client,
user_A1_headers,
user_B1_headers,
user_B1_uuid,
user_B2_uuid,
user_A1_uuid,
uuid_group_A,
uuid_group_B,
)
|
import os
def remove_files0():
root_dir = '/home/yche/mnt/wangyue-clu/csproject/biggraph/ywangby/yche/git-repos/SimRank/python_experiments/exp_results/varying_eps_exp'
data_set = 'ca-GrQc'
pair_num = 100000
eps_lst = list(([0.001 * (i + 1) for i in xrange(30)]))
file_name_lst = ['reads-d-rand-bench.txt', 'reads-rq-rand-bench.txt']
for eps in eps_lst:
read_d_file = os.sep.join(map(str, [root_dir, data_set, pair_num, eps, file_name_lst[0]]))
read_rq_file = os.sep.join(map(str, [root_dir, data_set, pair_num, eps, file_name_lst[1]]))
os.system('rm ' + read_d_file)
os.system('rm ' + read_rq_file)
if __name__ == '__main__':
tag = 'exp_results'
folder_name = 'varying_eps_for_topk_precision_exp'
sample_num = str(10 ** 4)
k = str(800)
data_set_name_lst = [
'ca-GrQc',
'ca-HepTh',
'p2p-Gnutella06',
'wiki-Vote'
]
eps_lst = list(reversed([0.0001, 0.0004, 0.0016, 0.0064, 0.0256]))
for algorithm in [
# 'reads-rq-rand-bench', 'reads-d-rand-bench',
'reads-d-rand-bench-gt']:
for data_set_name in data_set_name_lst:
for eps in eps_lst:
statistics_dir = os.sep.join(map(str, [
'/home/yche/mnt/wangyue-clu/csproject/biggraph/ywangby/yche/git-repos/SimRank/python_experiments/',
tag, folder_name, sample_num, k, data_set_name, eps]))
statistics_file_path = statistics_dir + os.sep + algorithm + '.txt'
os.system('rm ' + statistics_file_path)
|
import argparse
import asyncio
import json
import aiohttp
import aiohttp_jinja2
from aiohttp import web
import jinja2
from typing import Dict
from dataclasses import dataclass, field
from pycards import game
parser = argparse.ArgumentParser(description='dapai')
parser.add_argument('--port')
def json_dumps(content):
return json.dumps(content, cls=game.ModelEncoder)
async def spread_cards(game, sockets, cards):
for i in range(cards):
if not game.deck_count:
break
waitables = []
for name, ws in sockets.items():
cards = game.draw(name, 1)
waitables.append(ws.send_json({
'action': 'DRAWED',
'arg': cards,
'name': name,
}))
await asyncio.gather(*waitables)
await asyncio.sleep(1)
# things I need to in app context
@dataclass
class State(object):
players: Dict[str, game.Player] = field(default_factory=dict)
ws_by_name: Dict[str, object] = field(default_factory=dict)
room: game.GameRoom = game.GameRoom()
async def index(request):
return web.FileResponse('static/index.html')
async def ws_handler(request):
ws_current = web.WebSocketResponse()
ws_ready = ws_current.can_prepare(request)
if not ws_ready.ok:
return web.Response(status=400)
# Does this means I have connected in js?
await ws_current.prepare(request)
state = request.app['state']
current_player = None
while True:
msg = await ws_current.receive()
broadcast = {}
reply_result = {}
if msg.type == aiohttp.WSMsgType.text:
try:
parsed = json.loads(msg.data)
new_name, action, arg = parsed.get('name'), parsed['action'], parsed['arg']
print(new_name, action, arg)
if action == 'NEW_PLAYER':
name = new_name
state.ws_by_name[name] = ws_current
if name in state.players:
current_player = state.players[name]
else:
current_player = game.Player(name, 0)
state.players[name] = current_player
if current_player in state.room.players:
if state.room.game:
reply_result = {
'name': '',
'action': 'SET_STATE',
'arg': {
'hand': state.room.game.hand(name)
}
}
else:
if current_player not in state.room.observers:
state.room.observers.append(current_player)
broadcast = {
'name': '',
'action': 'SET_STATE',
'arg': {
'room': state.room,
}
}
elif action == 'SPREAD_CARDS':
count = int(arg)
await spread_cards(state.room.game, state.ws_by_name, count)
elif action == 'MESSAGE':
broadcast['msg'] = arg
else:
assert new_name is None or current_player.name == new_name
reply_result, broadcast = state.room.handle_command(current_player, action, arg)
except ValueError as e:
print(e)
import traceback
traceback.print_exc()
await ws_current.send_json({'error': str(e)})
else:
to_send = []
if broadcast:
for ws in state.ws_by_name.values():
to_send.append(ws.send_json(broadcast, dumps=json_dumps))
if reply_result:
to_send.append(ws_current.send_json(reply_result, dumps=json_dumps))
await asyncio.gather(*to_send)
else:
break
if current_player is not None:
state.room.leave_room(current_player)
del state.ws_by_name[current_player.name]
to_send = []
print('PLAYER_LEFT', current_player.name)
for ws in state.ws_by_name.values():
to_send.append(ws.send_json({
'name': current_player.name,
'action': 'PLAYER_LEFT',
'arg': ''
}))
await asyncio.gather(*to_send)
return ws_current
async def init_app():
app = web.Application()
app['state'] = State()
app.on_shutdown.append(shutdown)
aiohttp_jinja2.setup(
app, loader=jinja2.FileSystemLoader('templates')) # directories relative
app.router.add_get('/', index)
app.router.add_get('/ws', ws_handler)
app.router.add_routes([web.static('/static', 'static')])
return app
async def shutdown(app):
for ws in app['state'].ws_by_name.values():
await ws.close()
app['state'].ws_by_name.clear()
def main():
app = init_app()
args = parser.parse_args()
web.run_app(app, port=args.port)
if __name__ == '__main__':
main()
|
from string import hexdigits
from random import choice
import aiohttp
from type.credentials import GoogleCredentials, FirebaseCredentials
from type.settings import FirebaseSettings
from dataclasses import dataclass
from typing import Optional
@dataclass()
class Installation:
token: str
iid: str
class Firebase:
@staticmethod
async def _getInstallation(settings: FirebaseSettings) -> Optional[Installation]:
url = f"https://firebaseinstallations.googleapis.com/v1/projects/{settings.appName}/installations"
headers = {"Content-Type": "application/json", "x-goog-api-key": settings.publicKey}
data = {"fid": "dIsVQ2QVRT-TW7L6VfeAMh",
"appId": settings.appId,
"authVersion": "FIS_v2",
"sdkVersion": "a:16.3.3"}
async with aiohttp.ClientSession() as session:
r = await session.post(url, headers=headers, json=data)
if r.status != 200:
return
info = await r.json()
return Installation(token=info["authToken"]["token"], iid=info["fid"])
@staticmethod
async def getCredentials(googleCr: GoogleCredentials, settings: FirebaseSettings) -> Optional[FirebaseCredentials]:
installation = await Firebase._getInstallation(settings)
url = "https://android.clients.google.com/c2dm/register3"
headers = {"Authorization": f"AidLogin {googleCr.androidId}:{googleCr.securityToken}"}
sender = settings.appId.split(":")[1]
data = {"X-subtype": sender,
"sender": sender,
"X-appid": installation.iid,
"X-Goog-Firebase-Installations-Auth": installation.token,
"app": settings.appGroup,
"device": googleCr.androidId}
async with aiohttp.ClientSession() as session:
r = await session.post(url=url, headers=headers, data=data)
if r.status != 200:
return
pushToken = (await r.text()).replace("token=", "")
deviceId = ''.join([choice(hexdigits) for _ in range(32)]).lower()
return FirebaseCredentials(pushToken=pushToken, deviceId=deviceId)
testSettings = FirebaseSettings(publicKey="AIzaSyAOaoKaLhW98vLuaCuBqFh8qtLnh5c51z0",
appName="mcdonalds-70126",
appId="1:654771087992:android:79237bff987a6465",
appGroup="com.apegroup.mcdonaldrussia")
|
import json
from setuptools import setup, find_packages
with open('install.json', 'r') as fh:
version = json.load(fh)['programVersion']
if not version:
raise RuntimeError('Cannot find version information')
setup(
author='',
description='Playbook app wrapper for TextBlob (https://github.com/sloria/TextBlob).',
name='text_blob',
packages=find_packages(),
version=version
)
|
#Loading dependencies
import pandas as pd
import sys
from utils.GeneralSettings import *
from Asset.AssetTypes.Bridge import Bridge
from Asset.AssetTypes.Building import Building
from Asset.Elements.BridgeElement import BridgeElement
from Asset.Elements.ConditionRating.NBIRatingModified import NBI
from Asset.Elements.Deterioration.IBMSSinha2009 import Markovian as MarkovianIBMS
from Asset.Elements.Utility.BridgeElementsUtilityBai2013 import *
from Asset.Elements.AgencyCost.AgencyCostSinha2009 import *
from Asset.Elements.BuildingElement import BuildingElement
from Asset.Elements.ConditionRating.PONTISRating import Pontis_CR
from Asset.Elements.Deterioration.Markovian import Markovian
from Asset.Elements.Utility.DummyBuildingUtility import DummyUtility
from Asset.Elements.AgencyCost.DummyBuildingRetrofitCost import RetrofitCosts
from Asset.Elements.SHMElement import SHMElement
from Asset.HazardModels.HazardModel import HazardModel
from Asset.HazardModels.Generator.PoissonProcess import PoissonProcess
from Asset.HazardModels.Response.HazusBridgeResponse import HazusBridgeResponse
from Asset.HazardModels.Loss.BridgeHazusLoss import BridgeHazusLoss
from Asset.HazardModels.Recovery.SimpleRecovery import SimpleRecovery
from Asset.HazardModels.Response.DummyBuildingResponse import DummyResponse
from Asset.HazardModels.Loss.DummyBuildingLoss import DummyLoss
from Asset.HazardModels.Recovery.DummyBuildingRecovery import DummyRecovery
from Asset.MRRModels.MRRFourActions import MRRFourActions
from Asset.MRRModels.MRRTwoActions import MRRTwoActions
from Asset.MRRModels.SHMActions import SHMActions
from Asset.MRRModels.EffectivenessModels.SimpleEffectiveness import SimpleEffectiveness
from Asset.MRRModels.EffectivenessModels.DummyRetrofitEffectiveness import DummyEffectiveness
from Asset.MRRModels.EffectivenessModels.SHMEffectiveness import SHMEffectiveness
from Asset.UserCostModels.TexasDOTUserCost import TexasDOTUserCost
from Asset.UserCostModels.TexasDOTUserCostWithVolatility import TexasDOTUserCostWithVolatility
from Asset.UserCostModels.DummyBuildingUserCost import DummyUserCost
from utils.AccumulatorThree import AccumulatorThree
from utils.AccumulatorX import AccumulatorX
from utils.PredictiveModels.Linear import Linear
from utils.PredictiveModels.WienerDrift import WienerDrift
from utils.PredictiveModels.GBM import GBM
from utils.Distributions.LogNormal import LogNormal
from utils.Distributions.Normal import Normal
from utils.Distributions.Beta import Beta
from utils.Distributions.Exponential import Exponential
from utils.Distributions.Gamma import Gamma
from utils.Distributions.Binomial import Binomial
class BaseNetwork:
def __init__(self, **params):
super().__init__()
'''Constructor of the base network'''
self.file_name = params.pop('file_name', None)
self.n_assets = params.pop('n_assets', 0)
self.settings = params.pop('settings')
if not self.file_name is None:
dir = f'./Network/Networks/{self.file_name}.csv'
self.assets_df = pd.read_csv(dir, index_col = 0).iloc[:self.n_assets, :]
def load_asset(self, *args, **kwargs):
raise NotImplementedError ("load_asset in Loader is not implemented yet")
def set_network_mrr(self, network_mrr):
'''Setting the mrr of the network to each asset'''
for asset, mrr in zip (self.assets, network_mrr):
asset.mrr_model.set_mrr(mrr)
def load_network(self):
'''Loading the network
If the file_name is None, it is assumed that the network will be randomized
Else, the netwotk will be loaded from a datafile
'''
self.assets = []
if self.file_name is None:
# For generated networks
for i in range (self.n_assets):
self.assets.append(self.load_asset())
else:
for idx in self.assets_df.index:
# For loding data from file
self.assets.append(self.load_asset(idx))
return self.assets
def set_current_budget_limit(self, val):
'''Setting the current budget limit'''
self.current_budget_limit = val
def set_budget_limit_model(self, model):
'''Setting the budget limit in time'''
self.budget_model = model
def set_npv_budget_limit(self, val):
'''Set the npv of the budget limit'''
self.npv_budget_limit = val
def objective1(self):
return np.random.random()
def objective2(self):
return np.random.random()
|
import random
number = random.randint(1,10)
user = 0
count = 0
while user != number and user != "exit":
user = int(input("What's your guess? "))
if user == "exit":
break
count += 1
if user < number:
print("Too low!")
elif user > number:
print("Too high!")
else:
print("You got it!")
print("And it only took you",count,"tries!")
|
__title__ = 'glance-times'
__description__ = 'Who will watch the watchman? Glance can.'
__url__ = 'https://github.com/mrice88/glance'
__download_url__ = 'https://github.com/mrice88/glance/archive/master.zip'
__version__ = '0.3.6'
__author__ = 'Mark Rice'
__author_email__ = 'markricejr@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2019 Mark Rice'
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import time
import array
import os
sys.path.append("shell")
import swapforth
class TetheredJ1a(swapforth.TetheredTarget):
cellsize = 2
def open_ser(self, port, speed):
try:
import serial
except:
print("This tool needs PySerial, but it was not found")
sys.exit(1)
self.ser = serial.Serial(port, 38400, timeout=None, rtscts=0)
def reset(self, fullreset = True):
ser = self.ser
ser.setDTR(1)
if fullreset:
ser.setRTS(1)
ser.setRTS(0)
ser.setDTR(0)
def waitcr():
while ser.read(1) != chr(10):
pass
waitcr()
ser.write(b'\r')
waitcr()
for c in ' 1 tth !':
ser.write(c.encode('utf-8'))
ser.flush()
time.sleep(0.001)
ser.flushInput()
#print("In: ", c, "Out: ", repr(ser.read(ser.inWaiting())))
ser.write(b'\r')
while 1:
c = ser.read(1)
# print(repr(c))
if c == b'\x1e':
break
def boot(self, bootfile = None):
sys.stdout.write('Contacting... ')
self.reset()
print('established')
def interrupt(self):
self.reset(False)
def serialize(self):
l = self.command_response('0 here dump')
lines = l.strip().replace('\r', '').split('\n')
s = []
for l in lines:
l = l.split()
s += [int(b, 16) for b in l[1:17]]
s = array.array('B', s).tostring().ljust(8192, chr(0xff))
return array.array('H', s)
if __name__ == '__main__':
swapforth.main(TetheredJ1a)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator 2.3.33.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationRegistrationUpdateApiModel(Model):
"""Application registration update request.
:param product_uri: Product uri
:type product_uri: str
:param application_name: Default name of the server or client.
:type application_name: str
:param locale: Locale of default name - defaults to "en"
:type locale: str
:param localized_names: Localized names keyed off locale id.
To remove entry, set value for locale id to null.
:type localized_names: dict[str, str]
:param certificate: Application public cert
:type certificate: bytearray
:param capabilities: Capabilities of the application
:type capabilities: list[str]
:param discovery_urls: Discovery urls of the application
:type discovery_urls: list[str]
:param discovery_profile_uri: Discovery profile uri
:type discovery_profile_uri: str
:param gateway_server_uri: Gateway server uri
:type gateway_server_uri: str
"""
_validation = {
'capabilities': {'unique': True},
'discovery_urls': {'unique': True},
}
_attribute_map = {
'product_uri': {'key': 'productUri', 'type': 'str'},
'application_name': {'key': 'applicationName', 'type': 'str'},
'locale': {'key': 'locale', 'type': 'str'},
'localized_names': {'key': 'localizedNames', 'type': '{str}'},
'certificate': {'key': 'certificate', 'type': 'bytearray'},
'capabilities': {'key': 'capabilities', 'type': '[str]'},
'discovery_urls': {'key': 'discoveryUrls', 'type': '[str]'},
'discovery_profile_uri': {'key': 'discoveryProfileUri', 'type': 'str'},
'gateway_server_uri': {'key': 'gatewayServerUri', 'type': 'str'},
}
def __init__(self, product_uri=None, application_name=None, locale=None, localized_names=None, certificate=None, capabilities=None, discovery_urls=None, discovery_profile_uri=None, gateway_server_uri=None):
super(ApplicationRegistrationUpdateApiModel, self).__init__()
self.product_uri = product_uri
self.application_name = application_name
self.locale = locale
self.localized_names = localized_names
self.certificate = certificate
self.capabilities = capabilities
self.discovery_urls = discovery_urls
self.discovery_profile_uri = discovery_profile_uri
self.gateway_server_uri = gateway_server_uri
|
from cffi import FFI
import os
ffibuilder = FFI()
# cdef() expects a single string declaring the C types, functions and
# globals needed to use the shared object. It must be in valid C syntax.
ffibuilder.cdef("""
typedef int Boolean;
typedef int64_t ft_data_t;
typedef struct {
size_t length;
size_t N0;
ft_data_t* bit;
ft_data_t (*op)(ft_data_t, ft_data_t);
ft_data_t ide;
} FenwickTree;
void ft_add(FenwickTree* ft, size_t a, ft_data_t w);
ft_data_t ft_sum(FenwickTree* ft, size_t a);
FenwickTree* ft_init(ft_data_t* array, size_t length,
ft_data_t (*op)(ft_data_t, ft_data_t), ft_data_t ide,
Boolean construct);
typedef struct {
int length;
int* table;
int* n_members;
} Dsu;
Dsu *DsuInit(int length);
int dsu_find(Dsu *dsu, int x);
void dsu_union(Dsu *dsu, int x, int y);
typedef struct {
int length;
int depth;
int n;
int64_t* q;
} PriorityQueue;
PriorityQueue *PriorityQueueInit(int _length);
int pq_get_size(PriorityQueue *pq);
int64_t pq_pop(PriorityQueue *pq);
int64_t pq_top(PriorityQueue *pq);
void pq_push(PriorityQueue *pq, int64_t x);
typedef int64_t data_t;
typedef struct {
int n;
int depth;
data_t *segtree;
data_t (*op)(data_t, data_t);
data_t ide;
} SegmentTree;
SegmentTree* SegmentTreeInit(data_t *array, int _length, data_t (*op)(data_t, data_t), data_t ide);
void st_add(SegmentTree *st, int a, data_t x);
void st_subst(SegmentTree *st, int a, data_t x);
data_t st_get_one(SegmentTree* st, int a);
data_t st_get(SegmentTree* st, int l, int r);
typedef struct set_node{
int key;
int rank;
int size;
struct set_node *left, *right;
} set_node;
set_node* insert(set_node *x, int t);
set_node* lt(set_node*x,int k);
set_node* gt(set_node*x,int k);
set_node* SetInit();
static int Scanner(void);
static int* ScannerMulti(int n);
static char* ScannerString(int n);
static void Printer(int64_t x);
static void PrinterOne(int64_t x);
static void PrinterArray(int64_t *x, int n);
data_t (*get_operator(const char *str)) (data_t, data_t);
""")
# set_source() gives the name of the python extension module to
# produce, and some C source code as a string. This C code needs
# to make the declarated functions, types and globals available,
# so it is often just the "#include".
ffibuilder.set_source("_compprog_cffi",
"""
#include "compprog.h" // the C header of the library
""")
if __name__ == "__main__":
os.chdir("lib/")
ffibuilder.compile(verbose=True)
|
import torch
from torch import nn
def expand_as_one_hot(input_, C, ignore_label=None):
"""
Converts NxSPATIAL label image to NxCxSPATIAL, where each label gets converted to its corresponding one-hot vector.
NOTE: make sure that the input_ contains consecutive numbers starting from 0, otherwise the scatter_ function
won't work.
SPATIAL = DxHxW in case of 3D or SPATIAL = HxW in case of 2D
:param input_: 3D or 4D label image (NxSPATIAL)
:param C: number of channels/labels
:param ignore_label: ignore index to be kept during the expansion
:return: 4D or 5D output image (NxCxSPATIAL)
"""
assert input_.dim() in (3, 4), f"Unsupported input shape {input_.shape}"
# expand the input_ tensor to Nx1xSPATIAL before scattering
input_ = input_.unsqueeze(1)
# create result tensor shape (NxCxSPATIAL)
output_shape = list(input_.size())
output_shape[1] = C
if ignore_label is not None:
# create ignore_label mask for the result
mask = input_.expand(output_shape) == ignore_label
# clone the src tensor and zero out ignore_label in the input_
input_ = input_.clone()
input_[input_ == ignore_label] = 0
# scatter to get the one-hot tensor
result = torch.zeros(output_shape).to(input_.device).scatter_(1, input_, 1)
# bring back the ignore_label in the result
result[mask] = ignore_label
return result
else:
# scatter to get the one-hot tensor
return torch.zeros(output_shape).to(input_.device).scatter_(1, input_, 1)
def check_consecutive(labels):
""" Check that the input labels are consecutive and start at zero.
"""
diff = labels[1:] - labels[:-1]
return (labels[0] == 0) and (diff == 1).all()
class ContrastiveWeights(nn.Module):
"""
Implementation of contrastive loss defined in https://arxiv.org/pdf/1708.02551.pdf
'Semantic Instance Segmentation with a Discriminative Loss Function'
This implementation expands all tensors to match the instance dimensions.
This means that it's fast, but has high memory consumption.
Also, the implementation does not support masking any instance labels in the loss.
"""
def __init__(self, delta_var, delta_dist, alpha=1., beta=1., gamma=0.001):
super().__init__()
self.delta_var = delta_var
self.delta_dist = delta_dist
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.norm = 'fro'
def dist(self, x, y, dim, kd=True):
"""implements cosine distance"""
_x = x / (torch.norm(x, dim=dim, keepdim=True) + 1e-10)
_y = y / (torch.norm(y, dim=dim, keepdim=True) + 1e-10)
return 1.0 - (_x * _y).sum(dim=dim, keepdim=kd)
# def dist(self, x, y, dim, kd=True):
# return (x - y).norm(p=self.norm, dim=dim, keepdim=kd)
def _compute_cluster_means(self, input_, target, ndim):
dim_arg = (3, 4) if ndim == 2 else (3, 4, 5)
embedding_dims = input_.size()[1]
# expand target: NxCxSPATIAL -> # NxCx1xSPATIAL
target = target.unsqueeze(2)
# NOTE we could try to reuse this in '_compute_variance_term',
# but it has another dimensionality, so we would need to drop one axis
# get number of voxels in each cluster output: NxCx1(SPATIAL)
num_voxels_per_instance = torch.sum(target, dim=dim_arg, keepdim=True)
# expand target: NxCx1xSPATIAL -> # NxCxExSPATIAL
shape = list(target.size())
shape[2] = embedding_dims
target = target.expand(shape)
# expand input_: NxExSPATIAL -> Nx1xExSPATIAL
input_ = input_.unsqueeze(1)
# sum embeddings in each instance (multiply first via broadcasting) output: NxCxEx1(SPATIAL)
embeddings_per_instance = input_ * target
num = torch.sum(embeddings_per_instance, dim=dim_arg, keepdim=True)
# compute mean embeddings per instance NxCxEx1(SPATIAL)
mean_embeddings = num / num_voxels_per_instance
# return mean embeddings and additional tensors needed for further computations
return mean_embeddings, embeddings_per_instance
def _compute_variance_term(self, cluster_means, embeddings_per_instance, target, ndim):
dim_arg = (2, 3) if ndim == 2 else (2, 3, 4)
# compute the distance to cluster means, result:(NxCxSPATIAL)
embedding_norms = self.dist(embeddings_per_instance, cluster_means, dim=2, kd=False)
# get per instance distances (apply instance mask)
embedding_norms = embedding_norms * target
# zero out distances less than delta_var and sum to get the variance (NxC)
embedding_variance = torch.clamp(embedding_norms - self.delta_var, min=0) ** 2
embedding_variance = torch.sum(embedding_variance, dim=dim_arg)
# get number of voxels per instance (NxC)
num_voxels_per_instance = torch.sum(target, dim=dim_arg)
# normalize the variance term
C = target.size()[1]
variance_term = torch.sum(embedding_variance / num_voxels_per_instance, dim=1) / C
return variance_term
def _compute_distance_term(self, cluster_means, eid, ew, C, ndim):
if C == 1:
# just one cluster in the batch, so distance term does not contribute to the loss
return 0.
# squeeze space dims
for _ in range(ndim):
cluster_means = cluster_means.squeeze(-1)
# expand cluster_means tensor in order to compute the pair-wise distance between cluster means
cluster_means = cluster_means.unsqueeze(1)
shape = list(cluster_means.size())
shape[1] = C
# NxCxCxExSPATIAL(1)
cm_matrix1 = cluster_means.expand(shape)
# transpose the cluster_means matrix in order to compute pair-wise distances
cm_matrix2 = cm_matrix1.permute(0, 2, 1, 3)
# compute pair-wise distances (NxCxC)
dist_matrix = self.dist(cm_matrix1, cm_matrix2, dim=3, kd=False)
if ew is not None:
dist_matrix[:, eid[0], eid[1]] = dist_matrix[:, eid[0], eid[1]] * ew
dist_matrix[:, eid[1], eid[0]] = dist_matrix[:, eid[1], eid[0]] * ew
# create matrix for the repulsion distance (i.e. cluster centers further apart than 2 * delta_dist
# are not longer repulsed)
repulsion_dist = 2 * self.delta_dist * (1 - torch.eye(C))
# 1xCxC
repulsion_dist = repulsion_dist.unsqueeze(0).to(cluster_means.device)
# zero out distances grater than 2*delta_dist (NxCxC)
hinged_dist = torch.clamp(repulsion_dist - dist_matrix, min=0) ** 2
# sum all of the hinged pair-wise distances
hinged_dist = torch.sum(hinged_dist, dim=(1, 2))
# normalized by the number of paris and return
return hinged_dist / (C * (C - 1))
def _compute_regularizer_term(self, cluster_means, C, ndim):
# squeeze space dims
for _ in range(ndim):
cluster_means = cluster_means.squeeze(-1)
norms = torch.norm(cluster_means, p=self.norm, dim=2)
assert norms.size()[1] == C
# return the average norm per batch
return torch.sum(norms, dim=1).div(C)
def forward(self, input_, target, edge_ids=None, weights=None, *args, **kwargs):
"""
Args:
input_ (torch.tensor): embeddings predicted by the network (NxExDxHxW) (E - embedding dims)
expects float32 tensor
target (torch.tensor): ground truth instance segmentation (NxDxHxW)
expects int64 tensor
Returns:
Combined loss defined as: alpha * variance_term + beta * distance_term + gamma * regularization_term
"""
input_ = input_[:, :, None]
n_batches = input_.shape[0]
# compute the loss per each instance in the batch separately
# and sum it up in the per_instance variable
per_instance_loss = 0.
for idx, (single_input, single_target), eid, ew in enumerate(zip(input_, target)):
# add singleton batch dimension required for further computation
if weights is None:
ew = None
eid = None
else:
ew = weights[idx]
eid = edge_ids[idx]
single_input = single_input.unsqueeze(0)
single_target = single_target.unsqueeze(0)
# get number of instances in the batch instance
instances = torch.unique(single_target)
assert check_consecutive(instances)
C = instances.size()[0]
# SPATIAL = D X H X W in 3d case, H X W in 2d case
# expand each label as a one-hot vector: N x SPATIAL -> N x C x SPATIAL
single_target = expand_as_one_hot(single_target, C)
# compare spatial dimensions
assert single_input.dim() in (4, 5)
assert single_input.dim() == single_target.dim()
assert single_input.size()[2:] == single_target.size()[2:]
spatial_dims = single_input.dim() - 2
# compute mean embeddings and assign embeddings to instances
cluster_means, embeddings_per_instance = self._compute_cluster_means(single_input,
single_target, spatial_dims)
variance_term = self._compute_variance_term(cluster_means, embeddings_per_instance,
single_target, spatial_dims)
distance_term = self._compute_distance_term(cluster_means, eid, ew, C, spatial_dims)
# regularization_term = self._compute_regularizer_term(cluster_means, C, spatial_dims)
# compute total loss and sum it up
loss = self.alpha * variance_term + self.beta * distance_term # + self.gamma * regularization_term
per_instance_loss += loss
# reduce across the batch dimension
return per_instance_loss.div(n_batches)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 5 11:36:51 2020
@author: cjburke
"""
import numpy as np
import matplotlib.pyplot as plt
import h5py
from astropy.wcs import WCS
from astropy.io import fits
import glob
import os
import argparse
try:
import pyds9 as pd
except ImportError:
print('Warning: No pyds9 installed. No debugging with image display available')
from astropy.wcs import WCS
from astropy.io import fits
from astropy import units as u
from astropy.coordinates import SkyCoord
from gwcs.wcstools import wcs_from_points
from gwcs.wcs import WCS as WCS_gwcs
from gwcs.coordinate_frames import *
import os
import glob
from step1_get_refimg_ctrlpts import gdPRF_calc, idx_filter, ring_background
import photutils.centroids as cent
from tess_stars2px import tess_stars2px_reverse_function_entry
def idx_filter(idx, *array_list):
new_array_list = []
for array in array_list:
new_array_list.append(array[idx])
return new_array_list
def binmedian(xdata, ydata, nBins=30, xmin=None, xmax=None, showDetail=False):
if xmin == None:
xmin = xdata.min()
if xmax == None:
xmax = xdata.max()
xedges = np.linspace(xmin, xmax, nBins+1)
midx = xedges[:-1] + np.diff(xedges)/2.0
iargs = np.digitize(xdata, xedges)
medata = np.zeros_like(midx)
mndata = np.zeros_like(midx)
stddata = np.zeros_like(midx)
ndata = np.zeros_like(midx)
for i in np.arange(0,nBins):
iuse = np.where(iargs == i)[0]
medata[i] = np.median(ydata[iuse])
mndata[i] = np.mean(ydata[iuse])
stddata[i] = np.std(ydata[iuse])
ndata[i] = len(ydata[iuse])
if showDetail:
for i in np.arange(0,nBins):
errmn = stddata[i]/np.sqrt(ndata[i])
sigmn = mndata[i] / errmn
print('i: {0:d} med: {1:f} mn: {2:f} n: {3:f} errmn: {4:f} sigdif: {5:f} midx: {6:f}'.format(\
i, medata[i], mndata[i], ndata[i], errmn, sigmn, midx[i]))
return medata, midx, mndata, stddata, ndata
if __name__ == '__main__':
refprefix = 'refout/refspocunidense_S29_'
nTots = np.array([], dtype=np.int)
nBrgts = np.array([], dtype=np.int)
noises = np.array([], dtype=np.int)
REFPIXCOL = 1024.0+45.0
REFPIXROW = 1024.0
PIX2DEG = 21.0/3600.0 # Turn pixels to degrees roughly
SECTOR_WANT=29
fitDegree=6
colMin = 45
colMax = 2092
rowMin = 1
rowMax = 2048
for iCam in range(1,5):
for iCcd in range(1,5):
refh5 = '{0}{1:d}{2:d}.h5'.format(refprefix,iCam, iCcd)
CAMERA_WANT=iCam
CCD_WANT=iCcd
# Load the reference position information
fin = h5py.File(refh5, 'r')
tics = np.array(fin['tics'])
ras = np.array(fin['ras'])
decs = np.array(fin['decs'])
tmags = np.array(fin['tmags'])
blkidxs = np.array(fin['blkidxs'])
obscols = np.array(fin['obscols'])
obsrows = np.array(fin['obsrows'])
# From reference pixel coordinates get the estimated ra and dec of this point
raproj, decproj, scinfo = tess_stars2px_reverse_function_entry(\
SECTOR_WANT, CAMERA_WANT, CCD_WANT, REFPIXCOL, REFPIXROW)
proj_point = SkyCoord(raproj, decproj, frame = 'icrs', unit=(u.deg, u.deg))
# Reference subtracted pixel coordinates
sclObsCols = (obscols - REFPIXCOL) * PIX2DEG
sclObsRows = (obsrows - REFPIXROW) * PIX2DEG
xy = (sclObsCols, sclObsRows)
radec = (ras, decs)
gwcs_obj = wcs_from_points(xy, radec, proj_point, degree=fitDegree)
# Look for outliers to trim
gwcs_pred_ras, gwcs_pred_decs = gwcs_obj(sclObsCols, sclObsRows)
deg2Rad = np.pi/180.0
deltaRas = (gwcs_pred_ras - ras) *3600.0 * np.cos(decs*deg2Rad)
deltaDecs = (gwcs_pred_decs - decs) *3600.0
deltaSeps = np.sqrt(deltaRas*deltaRas + deltaDecs*deltaDecs)
c1 = 1.0/np.sqrt(2.0)
idx = np.where(tmags<10.0)[0]
std1 = np.std(deltaRas[idx])
std2 = np.std(deltaDecs[idx])
brightstd = np.sqrt(std1*std1+std2*std2)*c1
idx = np.where(tmags>10.0)[0]
std1 = np.std(deltaRas[idx])
std2 = np.std(deltaDecs[idx])
faintstd = np.sqrt(std1*std1+std2*std2)*c1
std1 = np.std(deltaRas)
std2 = np.std(deltaDecs)
allstd = np.sqrt(std1*std1+std2*std2)*c1
nTots = np.append(nTots, len(tics))
nBrgts = np.append(nBrgts, len(np.where(tmags<10.0)[0]))
noises = np.append(noises, brightstd)
print('Cam: {0:d} Ccd:{1:d} TotN:{2:d} BrghtN:{3:d} Nose: {4:f}'.format(\
iCam, iCcd, len(tics), len(np.where(tmags<10.0)[0]),\
brightstd))
plt.plot(obscols, obsrows, '.')
plt.axhline(rowMin, ls='-', color='k')
plt.axhline(rowMax, ls='-', color='k')
plt.axvline(colMin, ls='-', color='k')
plt.axvline(colMax, ls='-', color='k')
plt.xlabel('Column [pix]')
plt.ylabel('Row [pix]')
plt.show()
plt.plot(nTots, noises, '.')
plt.xlabel('N Reference Targs')
plt.ylabel('Fit noise [arcsec]')
plt.show()
plt.plot(nBrgts, noises, '.')
plt.xlabel('N Bright (Tm<10) Ref Targs')
plt.ylabel('WCS Fit Resiudal [arcsec]')
plt.show()
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'control_app.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(958, 795)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.gridLayout_3 = QtGui.QGridLayout(self.centralwidget)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.centralGridLayout = QtGui.QGridLayout()
self.centralGridLayout.setObjectName(_fromUtf8("centralGridLayout"))
self.tabWidget = QtGui.QTabWidget(self.centralwidget)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tab_3 = QtGui.QWidget()
self.tab_3.setObjectName(_fromUtf8("tab_3"))
self.tabWidget.addTab(self.tab_3, _fromUtf8(""))
self.visualizerTab = QtGui.QWidget()
self.visualizerTab.setObjectName(_fromUtf8("visualizerTab"))
self.gridLayout_2 = QtGui.QGridLayout(self.visualizerTab)
self.gridLayout_2.setMargin(0)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.visualizerVLayout = QtGui.QVBoxLayout()
self.visualizerVLayout.setObjectName(_fromUtf8("visualizerVLayout"))
self.soundDeviceSelectBox = QtGui.QComboBox(self.visualizerTab)
self.soundDeviceSelectBox.setObjectName(_fromUtf8("soundDeviceSelectBox"))
self.visualizerVLayout.addWidget(self.soundDeviceSelectBox)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.visualizerStartBtn = QtGui.QPushButton(self.visualizerTab)
self.visualizerStartBtn.setObjectName(_fromUtf8("visualizerStartBtn"))
self.horizontalLayout.addWidget(self.visualizerStartBtn)
self.visualizerStopBtn = QtGui.QPushButton(self.visualizerTab)
self.visualizerStopBtn.setObjectName(_fromUtf8("visualizerStopBtn"))
self.horizontalLayout.addWidget(self.visualizerStopBtn)
self.visualizerVLayout.addLayout(self.horizontalLayout)
self.gridLayout_2.addLayout(self.visualizerVLayout, 0, 0, 1, 1)
self.tabWidget.addTab(self.visualizerTab, _fromUtf8(""))
self.centralGridLayout.addWidget(self.tabWidget, 0, 0, 1, 1)
self.gridLayout_3.addLayout(self.centralGridLayout, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 958, 21))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "LED Control", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate("MainWindow", "Start", None))
self.visualizerStartBtn.setText(_translate("MainWindow", "Start", None))
self.visualizerStopBtn.setText(_translate("MainWindow", "Stop", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.visualizerTab), _translate("MainWindow", "Tab 2", None))
|
# -*- coding: utf-8 -*-
"""
zeronimo.helpers
~~~~~~~~~~~~~~~~
Helper functions.
:copyright: (c) 2013-2017 by Heungsub Lee
:license: BSD, see LICENSE for more details.
"""
from errno import EINTR
import zmq
__all__ = ['FALSE_RETURNER', 'class_name', 'make_repr', 'socket_type_name',
'repr_socket', 'eintr_retry', 'eintr_retry_zmq']
#: A function which always returns ``False``. It is used for default of
#: `Worker.reject_if` and `Fanout.drop_if`.
FALSE_RETURNER = lambda *a, **k: False
def class_name(obj):
"""Returns the class name of the object."""
return type(obj).__name__
def _repr_attr(obj, attr, data=None, reprs=None):
val = getattr(obj, attr)
if data is not None:
val = data.get(attr, val)
if reprs is None:
repr_f = repr
else:
repr_f = reprs.get(attr, repr)
return repr_f(val)
def make_repr(obj, params=None, keywords=None, data=None, name=None,
reprs=None):
"""Generates a string of object initialization code style. It is useful
for custom __repr__ methods::
class Example(object):
def __init__(self, param, keyword=None):
self.param = param
self.keyword = keyword
def __repr__(self):
return make_repr(self, ['param'], ['keyword'])
See the representation of example object::
>>> Example('hello', keyword='world')
Example('hello', keyword='world')
"""
opts = []
if params is not None:
opts.append(', '.join(
_repr_attr(obj, attr, data, reprs) for attr in params))
if keywords is not None:
opts.append(', '.join(
'%s=%s' % (attr, _repr_attr(obj, attr, data, reprs))
for attr in keywords))
if name is None:
name = class_name(obj)
return '%s(%s)' % (name, ', '.join(opts))
_socket_type_names = {}
for name in ('PAIR PUB SUB REQ REP DEALER ROUTER PULL PUSH XPUB XSUB '
'STREAM').split():
try:
socket_type = getattr(zmq, name)
except AttributeError:
continue
assert socket_type not in _socket_type_names
_socket_type_names[socket_type] = name
def socket_type_name(socket_type):
"""Gets the ZeroMQ socket type name."""
return _socket_type_names[socket_type]
def repr_socket(socket):
try:
return '%s[%d]' % (socket_type_name(socket.type), socket.fd)
except zmq.ZMQError as exc:
return 'ERR[%d]' % exc.errno
def eintr_retry(exc_type, f, *args, **kwargs):
"""Calls a function. If an error of the given exception type with
interrupted system call (EINTR) occurs calls the function again.
"""
while True:
try:
return f(*args, **kwargs)
except exc_type as exc:
if exc.errno != EINTR:
raise
else:
break
def eintr_retry_zmq(f, *args, **kwargs):
"""The specialization of :func:`eintr_retry` by :exc:`zmq.ZMQError`."""
return eintr_retry(zmq.ZMQError, f, *args, **kwargs)
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''A container for timeline-based events and traces and can handle importing
raw event data from different sources. This model closely resembles that in the
trace_viewer project:
https://code.google.com/p/trace-viewer/
'''
# Register importers for data
from telemetry.core.timeline import inspector_importer
from telemetry.core.timeline.tracing import trace_event_importer
_IMPORTERS = [
inspector_importer.InspectorTimelineImporter,
trace_event_importer.TraceEventTimelineImporter
]
class TimelineModel(object):
def __init__(self, event_data=None, shift_world_to_zero=True):
self._root_events = []
self._all_events = []
self._frozen = False
self.import_errors = []
self.metadata = []
self._bounds = None
if event_data is not None:
self.ImportTraces([event_data], shift_world_to_zero=shift_world_to_zero)
@property
def min_timestamp(self):
if self._bounds is None:
self.UpdateBounds()
return self._bounds[0]
@property
def max_timestamp(self):
if self._bounds is None:
self.UpdateBounds()
return self._bounds[1]
def AddEvent(self, event):
if self._frozen:
raise Exception("Cannot add events once recording is done")
self._root_events.append(event)
self._all_events.extend(
event.GetAllChildrenRecursive(include_self=True))
def DidFinishRecording(self):
self._frozen = True
def ImportTraces(self, traces, shift_world_to_zero=True):
if self._frozen:
raise Exception("Cannot add events once recording is done")
importers = []
for event_data in traces:
importers.append(self._CreateImporter(event_data))
importers.sort(cmp=lambda x, y: x.import_priority - y.import_priority)
for importer in importers:
# TODO: catch exceptions here and add it to error list
importer.ImportEvents()
for importer in importers:
importer.FinalizeImport()
if shift_world_to_zero:
self.ShiftWorldToZero()
# Because of FinalizeImport, it would probably be a good idea
# to prevent the timeline from from being modified.
self.DidFinishRecording()
def ShiftWorldToZero(self):
if not len(self._root_events):
return
self.UpdateBounds()
delta = min(self._root_events, key=lambda e: e.start).start
for event in self._root_events:
event.ShiftTimestampsForward(-delta)
def UpdateBounds(self):
if not len(self._root_events):
self._bounds = (0, 0)
return
for e in self._root_events:
e.UpdateBounds()
first_event = min(self._root_events, key=lambda e: e.start)
last_event = max(self._root_events, key=lambda e: e.end)
self._bounds = (first_event.start, last_event.end)
def GetRootEvents(self):
return self._root_events
def GetAllEvents(self):
return self._all_events
def GetAllEventsOfName(self, name):
return [e for e in self._all_events if e.name == name]
def _CreateImporter(self, event_data):
for importer_class in _IMPORTERS:
if importer_class.CanImport(event_data):
return importer_class(self, event_data)
raise ValueError("Could not find an importer for the provided event data")
|
try:
from .KsDataReader import *
except ImportError:
pass
|
import os
import json
from multiplespawner.defaults import default_base_path
from multiplespawner.util import load
def get_spawner_template_path(path=None):
if "MULTIPLE_SPAWNER_TEMPLATE_FILE" in os.environ:
path = os.environ["MULTIPLE_SPAWNER_TEMPLATE_FILE"]
else:
# If no path is set programmatically
if not path:
path = os.path.join(default_base_path, "spawner_templates.json")
return path
def get_spawner_template(provider, resource_type, path=None):
if not path:
path = get_spawner_template_path(path=path)
templates = load(path, handler=json)
if not isinstance(templates, list):
return None
for template in templates:
if (
template["resource_type"] == resource_type
and provider in template["providers"]
):
return template
return None
|
from .sensitivity import solve_sensitivity
from .monte_carlo import solve_monte_carlo
from ..model_parser import ModelParser
def solve_identifiability(model):
method = model['options']['method']
if method == 'sensitivity':
return solve_sensitivity(model, ModelParser)
elif method == 'monte_carlo':
return solve_monte_carlo(model)
|
"""
GoPro splits long recordings out to multiple files (chapters).
This library is for locating movies (from a GoPro memory card)
in a directory and determining which movies belong to a sequence
and merging them into a single movie file.
>>> import gp_merge_clips
>>> gp_merge_clips.merge_clips('/path/to/root/dir')
Command line
$ python -m gp_merge_clips /path/to/root/dir
"""
import os
import shutil
import argparse
import tempfile
from itertools import groupby
from operator import itemgetter
from subprocess import Popen
_VERBOSE = True
VALID = ('mp4', 'mov', 'avi')
EXE = 'ffmpeg -f concat -safe 0 -i %(text)s -c:v copy %(output)s'
def merge_clips(path, dryrun=False):
"""
Locate movie clips that are chapters of one recording
and merge them to single movie file(s).
:param path: root path to a directory containing the movie files
:rtype: {}
:returns: {
<clip basename>: {
'clips': [<manifest of clips to be merged>, ...]
'command': <ffmpeg command that is generated>
'output': <merged movie file>
}
}
"""
mapping = _map_chapters(path)
for key in mapping:
clips = mapping[key]['clips']
if len(clips) < 2:
continue
clips.sort()
mapping[key].update(_merge_clips(clips, dryrun))
key_path = os.path.join(path, key)
if not os.path.exists(key_path):
if dryrun:
_print("DRYRUN: creating '%s'" % key_path)
else:
os.makedirs(key_path)
for clip in clips:
_move(clip, key_path, dryrun)
_move(mapping[key]['output'], clips[0], dryrun)
return mapping
def _print(message):
"""
:param str message:
"""
global _VERBOSE
if _VERBOSE:
print(message)
def _move(src, dst, dryun):
"""
:param str src:
:param str dst:
:param bool dryrun:
"""
if dryun:
_print("Moving %s > %s" % (src, dst))
else:
shutil.move(src, dst)
def _merge_clips(clips, dryrun):
"""
ffmpeg plays nicely when the files that are to be concatenated
are written to a text file, and the file is passed to ffmpeg
the new file is written to temp
:param [] clips: list of movie clips to be concatenated
:param bool dryrun:
:returns: {
'output': <concatenated file>
'command': <ffmpeg command executed>
}
"""
text = []
for clip in clips:
text.append("file '%s'" % clip)
text = '\n'.join(text)
tmp_text = tempfile.mktemp()
if dryrun:
_print("Writing:\n%s\n>>%s" % (text, tmp_text))
else:
with open(tmp_text, 'w') as open_file:
open_file.write(text)
ext = os.path.splitext(clips[0])[-1]
output = tempfile.mktemp(suffix=ext)
command = EXE % {
'text': tmp_text,
'output': output
}
_print("Running: %s" % command)
if not dryrun:
proc = Popen(command, shell=True)
proc.communicate()
if proc.returncode != 0:
raise RuntimeError("Failed to process '%s'" % command)
os.remove(tmp_text)
return {'output': output, 'command': command}
def _sort_by_mtime(path, movies):
"""
Sort movies by the mtime, this gives us a list of movies that is not
sorted by name. This will help distinguish clips that are chapters
of a single shot
:param str path: root path
:param [] movies: movie file names
:rtype: [<str>, <str>, ...]
"""
mtime_mapping = {}
for movie in movies:
full_path = os.path.join(path, movie)
mtime_mapping[os.stat(full_path).st_mtime] = movie
mtime = [*mtime_mapping.keys()]
mtime.sort()
return [mtime_mapping[x] for x in mtime]
def _map_movies(movies):
"""
In order to sequentially sort the movie clips we have to strip
the 'GH' prefix and extension and cast the rest of the filename
to an integer
input:
['GH010013.MP4', 'GH020013.MP4', 'GH030013.MP4']
output:
{10013: 'GH010013.MP4', 20013: 'GH020013.MP4', 30013: 'GH030013.MP4'}
:param [] movies:
:rtype: {}
"""
mapped_movies = {}
for movie in movies:
basename = os.path.splitext(movie)[0]
mapped_movies[int(basename[2:])] = movie
return mapped_movies
def _sort_sequential_movies(movies):
"""
The logic here will figure out the sequentially named files and
return the correct sequences
A list that looks like this:
['GH010013.MP4', 'GH010014.MP4', 'GH010015.MP4', 'GH010016.MP4',
'GH010017.MP4', 'GH010018.MP4', 'GH020013.MP4', 'GH020016.MP4',
'GH030013.MP4', 'GH030016.MP4', 'GH040016.MP4']
Should be sorted like this
[['GH010013.MP4', 'GH010014.MP4', 'GH010015.MP4', 'GH010016.MP4',
'GH010017.MP4', 'GH010018.MP4'],
['GH020013.MP4'],
['GH020016.MP4'],
['GH030013.MP4'],
['GH030016.MP4'],
['GH040016.MP4']]
The nested list that has the smallest number should be the list
containing all movies that are first chapter or solo (un-chaptered).
This is the list that should be returned
['GH010013.MP4', 'GH010014.MP4', 'GH010015.MP4', 'GH010016.MP4',
'GH010017.MP4', 'GH010018.MP4']
:param [] movies: movie movies
:rtype: []
"""
mapped_movies = _map_movies(movies)
keys = [*mapped_movies]
keys.sort()
sequential_movies = []
for gb in groupby(enumerate(keys), lambda ix : ix[0] - ix[1]):
grouped = list(map(itemgetter(1), gb[1]))
sequential_movies.append([mapped_movies[x] for x in grouped])
sorted(sequential_movies, key=lambda x: x[0])
try:
first_chapters = sequential_movies[0]
except IndexError:
first_chapters = []
return first_chapters
def _map_chapters(path):
"""
Create a mapping table (dict) that associates chapters
:param str path:
:rtype: {}
:returns: {
<clip basename>: {
'clips': [<manifest of clips to be merged>, ...]
'command': <ffmpeg command that is generated>
'output': <merged movie file>
}
}
"""
movies = []
mapping = {}
# locate all valid media files
for movie in os.listdir(path):
ext = os.path.splitext(movie)[-1]
if ext[1:].lower() in VALID:
movies.append(movie)
# isolate sequential files, that should also be the first
# chapter of a single shot
sequential_movies = _sort_sequential_movies(movies)
if not sequential_movies:
return mapping
# get movies sorted by their mtime (ignore sequential naming)
sorted_by_mtime = _sort_by_mtime(path, movies)
# here we create a diff of indices that will identify the
# the sorted_by_mtime would look something like this
# ['GH010013.MP4', 'GH020013.MP4', 'GH030013.MP4', 'GH010014.MP4',
# 'GH010015.MP4', 'GH010016.MP4', 'GH020016.MP4', 'GH030016.MP4',
# 'GH040016.MP4', 'GH010017.MP4', 'GH010018.MP4']
# the sequential nodes would look like this
# ['GH010013.MP4', 'GH010014.MP4', 'GH010015.MP4', 'GH010016.MP4',
# 'GH010017.MP4', 'GH010018.MP4']
# the diff would isolate the indices of the movies (first example)
# that are missing from the second example.
# [1, 2, 6, 7, 8]
diff_movies = list(set(sorted_by_mtime) - set(sequential_movies))
diff_indices = [sorted_by_mtime.index(d) for d in diff_movies]
diff_indices.sort()
# now we group the indices to look like this
# [[1, 2], [6, 7, 8]]
# pulling the first and last index of each nested list we know the
# index range of the chapters that belong to one shot
for gb in groupby(enumerate(diff_indices), lambda ix : ix[0] - ix[1]):
grouped_indices = list(map(itemgetter(1), gb[1]))
grouped_movies = sorted_by_mtime[grouped_indices[0]-1:
grouped_indices[-1]+1]
key = os.path.splitext(grouped_movies[0])[0]
mapping.setdefault(key,
{'clips': [os.path.join(path, x)
for x in grouped_movies],
'command': None,
'output': None})
return mapping
def _main():
parser = argparse.ArgumentParser()
parser.add_argument('path', nargs='?', default=os.getcwd())
parser.add_argument('-n', '--dryrun', action='store_true')
args = parser.parse_args()
merge_clips(args.path, dryrun=args.dryrun)
if __name__ == '__main__':
_main()
|
from django.db import models
from slugify import slugify
class Country(models.Model):
""" Country's model """
name = models.CharField(
verbose_name='Country name',
max_length=255,
)
slug = models.SlugField(
unique=True,
max_length=255,
blank=True,
editable=False
)
class Meta:
verbose_name = 'Country'
verbose_name_plural = 'Countries'
ordering = ('name',)
def __str__(self) -> str:
return f'{self.name}'
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Country, self).save(*args, **kwargs)
class City(models.Model):
""" City's model """
country = models.ForeignKey(
to=Country,
on_delete=models.CASCADE,
verbose_name='Country',
related_name='cities'
)
name = models.CharField(
verbose_name='City name',
max_length=255
)
slug = models.SlugField(
unique=True,
max_length=255,
blank=True,
editable=False,
)
class Meta:
verbose_name = 'City'
verbose_name_plural = 'Cities'
def __str__(self) -> str:
return f'{self.name}'
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(City, self).save(*args, **kwargs)
|
from . import bit
from .utils import Key, BaseUrl
from .bit import capture, verify_images, verify, status, setup, BiTStatus
|
#!/usr/bin/env python
"""%(prog)s - run a command when a file is changed
Usage: %(prog)s [-r] FILE COMMAND...
%(prog)s [-r] FILE [FILE ...] -c COMMAND
FILE can be a directory. Watch recursively with -r.
Use %%f to pass the filename to the command.
Copyright (c) 2011, Johannes H. Jensen.
License: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import sys
import os
import re
import pyinotify
class WhenChanged(pyinotify.ProcessEvent):
# Exclude Vim swap files, its file creation test file 4913 and backup files
exclude = re.compile(r'^\..*\.sw[px]*$|^4913$|.~$')
def __init__(self, files, command, recursive=False):
self.files = files
self.paths = {os.path.realpath(f): f for f in files}
self.command = command
self.recursive = recursive
def run_command(self, file):
os.system(self.command.replace('%f', file))
def is_interested(self, path):
basename = os.path.basename(path)
if self.exclude.match(basename):
return False
if path in self.paths:
return True
path = os.path.dirname(path)
if path in self.paths:
return True
if self.recursive:
while os.path.dirname(path) != path:
path = os.path.dirname(path)
if path in self.paths:
return True
return False
def process_IN_CLOSE_WRITE(self, event):
path = event.pathname
if self.is_interested(path):
self.run_command(path)
def run(self):
wm = pyinotify.WatchManager()
notifier = pyinotify.Notifier(wm, self)
# Add watches (IN_CREATE is required for auto_add)
mask = pyinotify.IN_CLOSE_WRITE | pyinotify.IN_CREATE
watched = set()
for p in self.paths:
if os.path.isdir(p) and not p in watched:
# Add directory
wdd = wm.add_watch(p, mask, rec=self.recursive,
auto_add=self.recursive)
else:
# Add parent directory
path = os.path.dirname(p)
if not path in watched:
wdd = wm.add_watch(path, mask)
notifier.loop()
def print_usage(prog):
print(__doc__ % {'prog': prog}, end='')
def main():
args = sys.argv
prog = os.path.basename(args.pop(0))
if '-h' in args or '--help' in args:
print_usage(prog)
exit(0)
files = []
command = []
recursive = False
if args and args[0] == '-r':
recursive = True
args.pop(0)
if '-c' in args:
cpos = args.index('-c')
files = args[:cpos]
command = args[cpos+1:]
elif len(args) >= 2:
files = [args[0]]
command = args[1:]
if not files or not command:
print_usage(prog)
exit(1)
command = ' '.join(command)
# Tell the user what we're doing
if len(files) > 1:
l = ["'%s'" % f for f in files]
s = ', '.join(l[:-1]) + ' or ' + l[-1]
print("When %s changes, run '%s'" % (s, command))
else:
print("When '%s' changes, run '%s'" % (files[0], command))
wc = WhenChanged(files, command, recursive)
try:
wc.run()
except KeyboardInterrupt:
print()
exit(0)
|
from ..core import Niche
from .model import Model, simulate
from .env import minigridhard_custom, Env_config
from collections import OrderedDict
DEFAULT_ENV = Env_config(
name='default_env',
lava_prob=[0., 0.1],
obstacle_lvl=[0., 1.],
box_to_ball_prob=[0., 0.3],
door_prob=[0., 0.3],
wall_prob=[0., 0.3])
class MiniGridNiche(Niche):
def __init__(self, env_configs, seed, init='random', stochastic=False):
self.model = Model(minigridhard_custom)
if not isinstance(env_configs, list):
env_configs = [env_configs]
self.env_configs = OrderedDict()
for env in env_configs:
self.env_configs[env.name] = env
self.seed = seed
self.stochastic = stochastic
self.model.make_env(seed=seed, env_config=DEFAULT_ENV)
self.init = init
def __getstate__(self):
return {"env_configs": self.env_configs,
"seed": self.seed,
"stochastic": self.stochastic,
"init": self.init,
}
def __setstate__(self, state):
self.model = Model(minigridhard_custom)
self.env_configs = state["env_configs"]
self.seed = state["seed"]
self.stochastic = state["stochastic"]
self.model.make_env(seed=self.seed, env_config=DEFAULT_ENV)
self.init = state["init"]
def add_env(self, env):
env_name = env.name
assert env_name not in self.env_configs.keys()
self.env_configs[env_name] = env
def delete_env(self, env_name):
assert env_name in self.env_configs.keys()
self.env_configs.pop(env_name)
def initial_theta(self):
if self.init == 'random':
return self.model.get_random_model_params()
elif self.init == 'zeros':
import numpy as np
return np.zeros(self.model.param_count)
else:
raise NotImplementedError(
'Undefined initialization scheme `{}`'.format(self.init))
def rollout(self, theta, random_state, eval=False):
self.model.set_model_params(theta)
total_returns = 0
total_length = 0
if self.stochastic:
seed = random_state.randint(1000000)
else:
seed = self.seed
#print('self.env_configs.values()', self.env_configs.values())
for env_config in self.env_configs.values():
returns, lengths = simulate(
self.model, seed=seed, train_mode=not eval, num_episode=1, env_config_this_sim=env_config)
total_returns += returns[0]
total_length += lengths[0]
return total_returns / len(self.env_configs), total_length
|
import xml.dom.minidom, sys
"""Para parsear el documento con muchas noticias (cada noticia entre <doc></doc>),
necesitamos una metaetiqueta (en este ejemplo <docs>).
"""
def getKey(item):
return item[0]
def getText(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def handleReuse(docs):
tuples=[]
cases = docs.getElementsByTagName("reuse_case")
for case in cases:
a=handleCase(case)
tuples.append(a[0])
tuples.append(a[1])
return tuples
def handleCase(case):
tuple=[[str(case.attributes['source_code1'].value),str(case.attributes['source_code2'].value)],[str(case.attributes['source_code2'].value),str(case.attributes['source_code1'].value)]]
return tuple
import sys
if len(sys.argv) !=3:
print "Number of arguments must be 2: QREL_FILE DETECTION_FILE"
sys.exit()
#print 'Number of arguments:', len(sys.argv), 'arguments.'
#archivo= 'SOCO14-c.qrel'
qrel_file=str(sys.argv[1])
det_file=str(sys.argv[2])
document = open(det_file, 'r').read()
lines = [line.strip() for line in open(qrel_file)]
qrel= [l.split() for l in lines]
gs = [ [rj[1],rj[0]] for rj in qrel ]
gold_standard = gs+qrel
sorted(gold_standard, key=getKey)
#Minidom parsea el texto
dom = xml.dom.minidom.parseString(document)
########################################
#Iniciamos la extraccion of gold standard
result=handleReuse(dom)
relevant_documents= len(gold_standard)
retrieved_documents= len(result)
contados=[]
intersection= 0
for i in result:
if i in gold_standard and i not in contados and [i[1],i[0]] not in contados:
intersection=intersection+1
contados=contados+[i]
intersection=intersection*2
precision = intersection/float(retrieved_documents)
recall = intersection/float(relevant_documents)
f1= 2 * ((precision*recall)/(precision+recall))
#print "Precision = "+ str(intersection/2) + " / "+ str(retrieved_documents/2) + " = %.3f" % precision,
#print "Recall = "+ str(intersection/2) + " / "+ str(relevant_documents/2) + " = %.3f" % recall,
print "\tF1 = %.3f" % f1,
print "\tPrec. = %.3f" % precision,
print "\tRec. = %.3f" % recall
|
import numpy as np
from hyperopt import hp, fmin, tpe, Trials
binary_operators = ["*", "/", "+", "-"]
unary_operators = ["sin", "cos", "exp", "log"]
space = dict(
# model_selection="best",
model_selection=hp.choice("model_selection", ["accuracy"]),
# binary_operators=None,
binary_operators=hp.choice("binary_operators", [binary_operators]),
# unary_operators=None,
unary_operators=hp.choice("unary_operators", [unary_operators]),
# populations=100,
populations=hp.qloguniform("populations", np.log(10), np.log(1000), 1),
# niterations=4,
niterations=hp.choice(
"niterations", [10000]
), # We will quit automatically based on a clock.
# ncyclesperiteration=100,
ncyclesperiteration=hp.qloguniform(
"ncyclesperiteration", np.log(10), np.log(5000), 1
),
# alpha=0.1,
alpha=hp.loguniform("alpha", np.log(0.0001), np.log(1000)),
# annealing=False,
annealing=hp.choice("annealing", [False, True]),
# fraction_replaced=0.01,
fraction_replaced=hp.loguniform("fraction_replaced", np.log(0.0001), np.log(0.5)),
# fraction_replaced_hof=0.005,
fraction_replaced_hof=hp.loguniform(
"fraction_replaced_hof", np.log(0.0001), np.log(0.5)
),
# population_size=100,
population_size=hp.qloguniform("population_size", np.log(20), np.log(1000), 1),
# parsimony=1e-4,
parsimony=hp.loguniform("parsimony", np.log(0.0001), np.log(0.5)),
# topn=10,
topn=hp.qloguniform("topn", np.log(2), np.log(50), 1),
# weight_add_node=1,
weight_add_node=hp.loguniform("weight_add_node", np.log(0.0001), np.log(100)),
# weight_insert_node=3,
weight_insert_node=hp.loguniform("weight_insert_node", np.log(0.0001), np.log(100)),
# weight_delete_node=3,
weight_delete_node=hp.loguniform("weight_delete_node", np.log(0.0001), np.log(100)),
# weight_do_nothing=1,
weight_do_nothing=hp.loguniform("weight_do_nothing", np.log(0.0001), np.log(100)),
# weight_mutate_constant=10,
weight_mutate_constant=hp.loguniform(
"weight_mutate_constant", np.log(0.0001), np.log(100)
),
# weight_mutate_operator=1,
weight_mutate_operator=hp.loguniform(
"weight_mutate_operator", np.log(0.0001), np.log(100)
),
# weight_randomize=1,
weight_randomize=hp.loguniform("weight_randomize", np.log(0.0001), np.log(100)),
# weight_simplify=0.002,
weight_simplify=hp.choice("weight_simplify", [0.002]), # One of these is fixed.
# crossover_probability=0.01,
crossover_probability=hp.loguniform(
"crossover_probability", np.log(0.00001), np.log(0.2)
),
# perturbation_factor=1.0,
perturbation_factor=hp.loguniform(
"perturbation_factor", np.log(0.0001), np.log(100)
),
# maxsize=20,
maxsize=hp.choice("maxsize", [30]),
# warmup_maxsize_by=0.0,
warmup_maxsize_by=hp.uniform("warmup_maxsize_by", 0.0, 0.5),
# use_frequency=True,
use_frequency=hp.choice("use_frequency", [True, False]),
# optimizer_nrestarts=3,
optimizer_nrestarts=hp.quniform("optimizer_nrestarts", 1, 10, 1),
# optimize_probability=1.0,
optimize_probability=hp.uniform("optimize_probability", 0.0, 1.0),
# optimizer_iterations=10,
optimizer_iterations=hp.quniform("optimizer_iterations", 1, 10, 1),
# tournament_selection_p=1.0,
tournament_selection_p=hp.uniform("tournament_selection_p", 0.0, 1.0),
)
|
import math
import random
import numpy as np
class Register(object):
def __init__(self, num_qubits, state):
"""Initialise quantum register
A register can be in a measured state superposition of states e.g.
if a Hadamard gate is applied to a 1-qubit system then the qubit
would be in a superposition of being both 0 and 1
The state of the register must always follow this equation:
Let a, b and c be complex numbers representing the quantum states of a
2-qubit system: |a|^2 + |b|^2 + |c|^2 + |d|^2 = 1
Arguments:
num_qubits -- number of qubits in register e.g. 3
state -- column vector representing initial quantum state. Example:
[
(0.0, 0.0i) # 0 0
(0.0, 0.0i) # 0 1
(1.0, 0.0i) # 1 0
(0.0, 0.0i) # 1 1
]
shows a 'collapsed quantum state' because 1 0 is the only state set to 1
"""
num_bases = len(state)
if num_bases != pow(2, num_qubits):
raise Exception("Invalid number of bases vectors")
eps = 0.0001
total = complex(0, 0)
for val in state:
total = total + pow(np.abs(val), 2)
if(not (abs(total - (complex(1, 0))) < eps)):
raise Exception("Quantum state is invalid")
self.state = state
self.num_qubits = num_qubits
@staticmethod
def generate_bases(num_qubits):
"""Generate bases vectors
Arguments:
num_qubits -- number of qubits in register e.g. 3
Example: [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0,
1], [1, 1, 0], [1, 1, 1]] for a 3-qubit system
"""
bases = []
num_vectors = pow(2, num_qubits)
for idx in range(num_vectors):
base = []
current_base = idx
for _ in range(num_qubits):
base.append(int(current_base % 2))
current_base = current_base / 2
base.reverse()
bases.append(base)
return bases
@staticmethod
def filter_bases(num_qubits, qubit_nums):
"""Filter bases
Arguments:
num_qubits -- number of qubits in register e.g. 3
qubit_nums -- the qubits to filter e.g. 0 and 2
Example:
- Generate bases: [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1,
0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]]
- Filter to qubits 0 and 2: [[0, 0], [0, 1], [0, 0], [0, 1], [1,
0], [1, 1], [1, 0], [1, 1]]
"""
bases = Register.generate_bases(num_qubits)
for idx in range(len(bases)):
bases_before_filter = bases[idx]
bases[idx] = []
for qubit_position in qubit_nums:
bases[idx].append(bases_before_filter[qubit_position])
return bases
@staticmethod
def dirac_to_column_vector(dirac):
"""Convert Dirac vector from to column vector form
Arguments:
dirac -- dirac vector to convert e.g. [0, 0]
Example:
[0, 0] (|00> in Dirac notation) returns [0, 0, 0, 1]
"""
binary_str = ''
for val in dirac:
binary_str = binary_str + str(int(val))
column_vector_len = pow(2, len(dirac))
one_position = column_vector_len - int(binary_str, 2) - 1
vector = [0] * column_vector_len
vector[one_position] = 1
return vector
@staticmethod
def column_vector_to_possible_dirac_vectors(column):
"""Convert column vector to all possible Dirac vectors
Arguments:
column -- column vector to convert e.g. [0, 0, 0, 1]
Examples:
[0, 0, 0, 1] returns [0, 0]
(or |00> in Dirac notation)
[0, 0, 1/sqrt(2), 1/sqrt(2)] returns [0, 0] or [0, 1]
(or |00> or |01> in Dirac notation)
"""
possibilities = []
len_column = len(column)
num_qubits = int(math.sqrt(len_column))
bases = Register.generate_bases(num_qubits)
for idx, number in enumerate(column):
if number != 0:
possibilities.append(bases[len_column - idx - 1])
return possibilities
def __get_collapsed_qubit__(self, probabilities):
"""Get a random qubit to collapse to based on quantum state
Arguments:
probabilities -- probabilities of each state to be the collapse state e.g. [0.5, 0.5]
"""
r = random.uniform(0, 1)
s = 0
for qubit, prob in enumerate(probabilities):
s += probabilities[prob]
if s >= r:
return qubit
return probabilities[-1]
def measure(self):
"""Perform quantum measurement
Collapses from quantum state to classical state
"""
current_state = np.squeeze(np.asarray(self.state))
probabilities = {}
for idx, basis in enumerate(current_state):
probabilities[idx] = pow(basis, 2)
collapsed_qubit = self.__get_collapsed_qubit__(probabilities)
for idx, basis in enumerate(current_state):
current_state[idx] = 0
current_state[collapsed_qubit] = 1
self.state = np.asmatrix(current_state)
|
import tkinter as tk
class Plot4Q(tk.Frame):
DEFAULT_BG_COLOR = 'grey'
DEFAULT_LINE_COLOR = '#39FF14'
def __init__(self, master, x_pixels=200, y_pixels=200, xrange=1.0, yrange=1.0, grid=False, x_axis_label_str=None, y_axis_label_str=None):
self.parent = master
super().__init__(self.parent)
self.width_px = x_pixels
self.height_px = y_pixels
self.x_per_pixel = xrange/x_pixels
self.y_per_pixel = yrange/y_pixels
self.grid = grid
self.plot = tk.Canvas(self.parent, width=x_pixels, height=y_pixels, background=self.DEFAULT_BG_COLOR)
self.plot.grid()
self.draw_axes()
self.draw_grid()
self.label_x_axis(x_axis_label_str)
self.label_y_axis(y_axis_label_str)
self.plot_series_number = 0
self.current_points = {}
def remove_points(self):
self.plot.delete('data_point')
def remove_lines(self):
self.plot.delete('data_line')
def draw_axes(self):
# draw the primary axes
x0, y0 = self.to_screen_coords(-self.width_px / 2, 0)
x1, y1 = self.to_screen_coords(self.width_px / 2, 0)
x_axis = self.plot.create_line(x0, y0, x1, y1, tag='x_axis')
x0, y0 = self.to_screen_coords(0, self.height_px / 2)
x1, y1 = self.to_screen_coords(0, -self.height_px / 2)
y_axis = self.plot.create_line(x0, y0, x1, y1, tag='y-axis')
def draw_grid(self):
if self.grid:
# create the grid
x_grid_interval_px = self.width_px / 10
y_grid_interval_px = self.height_px / 10
dash_tuple = (1, 1)
for i in range(4):
# top to bottom lines, right quadrants
grid_x = (i + 1) * x_grid_interval_px
grid_y = self.height_px / 2
x1, y1 = self.to_screen_coords(grid_x, grid_y)
x1, y2 = self.to_screen_coords(grid_x, -grid_y)
self.plot.create_line(x1, y1, x1, y2, dash=dash_tuple, tag='grid')
# top to bottom lines, left quadrants
grid_x = -(i + 1) * x_grid_interval_px
grid_y = self.height_px / 2
x1, y1 = self.to_screen_coords(grid_x, grid_y)
x1, y2 = self.to_screen_coords(grid_x, -grid_y)
self.plot.create_line(x1, y1, x1, y2, dash=dash_tuple, tag='grid')
# left-to-right lines, upper quadrants
grid_x = self.width_px / 2
grid_y = (i + 1) * y_grid_interval_px
x1, y1 = self.to_screen_coords(grid_x, grid_y)
x2, y1 = self.to_screen_coords(-grid_x, grid_y)
self.plot.create_line(x1, y1, x2, y1, dash=dash_tuple, tag='grid')
# left-to-right lines, lower quadrants
grid_x = self.width_px / 2
grid_y = -(i + 1) * y_grid_interval_px
x1, y1 = self.to_screen_coords(grid_x, grid_y)
x2, y1 = self.to_screen_coords(-grid_x, grid_y)
self.plot.create_line(x1, y1, x2, y1, dash=dash_tuple, tag='grid')
def label_x_axis(self, label):
if label:
self.plot.create_text((self.width_px - 5, (self.height_px/2)+5), text=label, anchor=tk.NE, tag='x-axis-label')
else:
pass
def label_y_axis(self, label):
if label:
self.plot.create_text((self.width_px/2 + 5, 5), text=label, anchor=tk.NW, tag='y-axis-label')
else:
pass
def plot_line(self, first_point, second_point, point_format=None, fill=None, tag='data_line'):
if not fill:
fill = self.DEFAULT_LINE_COLOR
x0, y0 = first_point
x1, y1 = second_point
if point_format != 'px':
x0 /= self.x_per_pixel
x1 /= self.x_per_pixel
y0 /= self.y_per_pixel
y1 /= self.y_per_pixel
x0_screen, y0_screen = self.to_screen_coords(x0, y0)
x1_screen, y1_screen = self.to_screen_coords(x1, y1)
self.plot.create_line(x0_screen,
y0_screen,
x1_screen,
y1_screen,
fill=fill,
width=3.0,
tag=tag)
def plot_point(self, point, fill='green', tag='data_point'):
if not fill:
fill = self.DEFAULT_LINE_COLOR
# find the location of the point on the canvas
x, y = point
x /= self.x_per_pixel
y /= self.y_per_pixel
x_screen, y_screen = self.to_screen_coords(x, y)
point_radius = 2
x0 = x_screen - point_radius
y0 = y_screen - point_radius
x1 = x_screen + point_radius
y1 = y_screen + point_radius
# if the tag exists, then move the point, else create the point
point_ids = self.plot.find_withtag(tag)
if point_ids != ():
point_id = point_ids[0]
location = self.plot.coords(point_id)
current_x = (location[0] + location[2])/2
current_y = (location[1] + location[3])/2
move_x = x_screen - current_x
move_y = y_screen - current_y
self.plot.move(point_id, move_x, move_y)
else:
point = self.plot.create_oval(x0,
y0,
x1,
y1,
outline=fill,
fill=fill,
tag=tag)
def scatter(self, list_of_points=[], color='#0000ff', tag='current'):
""" create the new points then delete the old points """
num_of_points = len(list_of_points)
for i, point in enumerate(list_of_points):
new_tag = tag + str(i)
self.plot_point(point, fill=color, tag=new_tag)
self.current_points[tag] = list_of_points
def remove_scatter(self, tag='current'):
# if erase is True, then we will delete all tags containing the
# prefix contained in 'tag'. For instance, if tag == 'current',
# then we will delete 'current0', 'current1', ...
del_list = []
for e in self.plot.find_all():
for item_tag in self.plot.gettags(e):
if tag in item_tag and item_tag not in del_list:
del_list.append(item_tag)
for item_tag in del_list:
self.plot.delete(item_tag)
def to_screen_coords(self, x, y):
new_x = x + self.width_px/2
new_y = self.height_px/2 - y
return new_x, new_y
if __name__ == "__main__":
# initialize tk items for display
root = tk.Tk()
root.title("for(embed) - Curve Tracer Viewer")
app = Plot4Q(root)
points = [(0,0), (10,10), (20,20)]
app.scatter(points)
app.scatter()
root.mainloop()
|
#BASIC IMAGE READING AND SAVING/COPYING
# import cv2 as cv
# import sys
# img = cv.imread(cv.samples.findFile("dp2.jpg"))
# if img is None:
# sys.exit("Could not read the image.")
# cv.imshow("A Human", img)
# k = cv.waitKey(0)
# if k == ord("s"):
# cv.imwrite("dp3.png", img)
#SIMPLE/GLOBAL THRESHOLDING
# import cv2
# from matplotlib import pyplot as plt
# img = cv2.imread("dp2.jpg")
# img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
# ret,thresh1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
# ret,thresh2 = cv2.threshold(img,127,255,cv2.THRESH_BINARY_INV)
# ret,thresh3 = cv2.threshold(img,127,255,cv2.THRESH_TRUNC)
# ret,thresh4 = cv2.threshold(img,127,255,cv2.THRESH_TOZERO)
# ret,thresh5 = cv2.threshold(img,127,255,cv2.THRESH_TOZERO_INV)
# titles = ['Original Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV']
# images = [img, thresh1, thresh2, thresh3, thresh4, thresh5]
# for i in range(6):
# plt.subplot(2,3,i+1)
# plt.imshow(images[i],'gray')
# plt.title(titles[i])
# plt.xticks([])
# plt.yticks([])
# plt.show()
# ADAPTIVE THRESHOLD
# import cv2
# import numpy as np
# from matplotlib import pyplot as plt
# img = cv2.imread('bnw2.jpg',0)
# img = cv2.medianBlur(img,5)
# ret,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
# th2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,11,2)
# th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,2)
# titles = ['Original Image', 'Global Thresholding (v = 127)',
# 'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding']
# images = [img, th1, th2, th3]
# for i in range(4):
# plt.subplot(2,2,i+1),plt.imshow(images[i],'gray')
# plt.title(titles[i])
# plt.xticks([]),plt.yticks([])
# plt.show()
#OTSU THRSHOLDING
# import cv2
# import numpy as np
# from matplotlib import pyplot as plt
# img = cv2.imread('ap.png',0)
# # global thresholding
# ret1,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
# # Otsu's thresholding
# ret2,th2 = cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# # Otsu's thresholding after Gaussian filtering
# blur = cv2.GaussianBlur(img,(5,5),0)
# ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# # plot all the images and their histograms
# images = [img, 0, th1,
# img, 0, th2,
# blur, 0, th3]
# titles = ['Original Noisy Image','Histogram','Global Thresholding (v=127)',
# 'Original Noisy Image','Histogram',"Otsu's Thresholding",
# 'Gaussian filtered Image','Histogram',"Otsu's Thresholding"]
# for i in range(3):
# plt.subplot(3,3,i*3+1),plt.imshow(images[i*3],'gray')
# plt.title(titles[i*3]), plt.xticks([]), plt.yticks([])
# plt.subplot(3,3,i*3+2),plt.hist(images[i*3].ravel(),256)
# plt.title(titles[i*3+1]), plt.xticks([]), plt.yticks([])
# plt.subplot(3,3,i*3+3),plt.imshow(images[i*3+2],'gray')
# plt.title(titles[i*3+2]), plt.xticks([]), plt.yticks([])
# plt.show()
#SEGMENTATION, RGB, BGR, HSV Patterns
# import numpy as np
# import cv2
# from matplotlib import pyplot as plt
# from matplotlib import colors
# from mpl_toolkits.mplot3d import Axes3D
# from matplotlib import cm
# dark = cv2.cvtColor(cv2.imread('dark.jpg'), cv2.COLOR_RGB2BGR)
# hsv_dark = cv2.cvtColor(dark, cv2.COLOR_BGR2HSV)
# h, s, v = cv2.split(hsv_dark)
# fig = plt.figure()
# axis = fig.add_subplot(1, 1, 1, projection="3d")
# pixel_colors = dark.reshape((np.shape(dark)[0]*np.shape(dark)[1], 3))
# norm = colors.Normalize(vmin=-1.,vmax=1.)
# norm.autoscale(pixel_colors)
# pixel_colors = norm(pixel_colors).tolist()
# axis.scatter(h.flatten(), s.flatten(), v.flatten(), facecolors=pixel_colors, marker=".")
# axis.set_xlabel("Hue")
# axis.set_ylabel("Saturation")
# axis.set_zlabel("Value")
# plt.show()
# cv2.destroyAllWindows()
#GEOMETRIC CHANGES
#BLURRING
# import cv2
# import numpy as np
# from matplotlib import pyplot as plt
# img = cv2.imread('as2.png')
# n=10
# kernel = np.ones((n,n),np.float32)/(n*n)
# dst = cv2.filter2D(img,-1,kernel)
# plt.subplot(1,5,1),plt.imshow(img)
# plt.xticks([]), plt.yticks([])
# plt.subplot(1,5,2),plt.imshow(dst)
# plt.xticks([]), plt.yticks([])
# blur = cv2.GaussianBlur(img,(5,5),0)
# median = cv2.medianBlur(img,5)
# biblur = cv2.bilateralFilter(img,9,75,75)
# izzz = [blur, median, biblur]
# for i in range(3):
# plt.subplot(1,5,i+3),plt.imshow(img)
# plt.xticks([]), plt.yticks([])
# plt.show()
#EDGE DETECTION
# import cv2
# import numpy as np
# from matplotlib import pyplot as plt
# img = cv2.imread('sudoku.jpg',0)
# laplacian = cv2.Laplacian(img,cv2.CV_64F)
# sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5)
# sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=5)
# plt.subplot(2,2,1)
# plt.imshow(img,cmap = 'gray')
# plt.title('Original'), plt.xticks([]), plt.yticks([])
# plt.subplot(2,2,2)
# plt.imshow(laplacian,cmap = 'gray')
# plt.title('Laplacian'), plt.xticks([]), plt.yticks([])
# plt.subplot(2,2,3)
# plt.imshow(sobelx,cmap = 'gray')
# plt.title('Sobel X'), plt.xticks([]), plt.yticks([])
# plt.subplot(2,2,4)
# plt.imshow(sobely,cmap = 'gray')
# plt.title('Sobel Y'), plt.xticks([]), plt.yticks([])
# plt.show()
#CONTOURS
# import numpy as np
# import cv2
# from matplotlib import pyplot as plt
# im = cv2.imread('dp2.jpg')
# imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
# ret,thresh = cv2.threshold(imgray,127,255,0)
# contours, h = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# img = cv2.drawContours(imgray, contours, -1, (0,0,0), 3)
# plt.imshow(img, 'gray')
# plt.show()
# cv2.destroyAllWindows()
#HAAR CASCADES FACE AND EYE DETECTION
#PIC DETECTION
# import numpy as np
# import cv2
# face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
# img = cv2.imread('dp2.jpg')
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# faces = face_cascade.detectMultiScale(gray, 1.3, 5)
# for (x,y,w,h) in faces:
# img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
# roi_gray = gray[y:y+h, x:x+w]
# roi_color = img[y:y+h, x:x+w]
# eyes = eye_cascade.detectMultiScale(roi_gray)
# for (ex,ey,ew,eh) in eyes:
# cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
# cv2.imshow('img',img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
#LIVE DETECTION
import numpy as np
import cv2
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
capture = cv2.VideoCapture(0)
while(True):
ret, img = capture.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv2.imshow('video', img)
if cv2.waitKey(1) == 27:
break
capture.release()
|
# -*- coding: utf-8 -*-
import pytest
# TODO: implement all unit tests
def test_fib():
assert True
|
from core.settings import settings
from core.security import generate_confirmation_code
from .sender import BaseMessage
# To developers: KISS is much more important than DRY!
# So do OOP-inheritance ONLY if your new classes has same validation fields,
# same purpose AND MANY common logic.
# If you have to do some same validation without any complexity, do it in
# separate classes without inheritance, please.
# The class is much more readable and error-resilient if it has smaller
# inheritance chain.
# This can be extended to other things, like pydantic models, email messages, etc.
class ConfirmAccountMessage(BaseMessage):
template_name = 'confirm.html'
class validation(BaseMessage.validation):
subject: str = 'Confirm registration'
account_id: int
def get_context(self) -> dict:
code = generate_confirmation_code(
account_id=self.schema.account_id
)
return {'url': f'{settings.SERVER_DOMAIN}/webhooks/confirm/account?code={code}'}
class ChangePasswordMessage(BaseMessage):
template_name = 'forget_password.html'
class validation(BaseMessage.validation):
subject: str = 'Change password'
account_id: int
def get_context(self) -> dict:
code = generate_confirmation_code(
account_id=self.schema.account_id
)
return {'url': f'{settings.SERVER_DOMAIN}/change-password?code={code}'}
class PasswordWasChangedMessage(BaseMessage):
template_name = 'changed_password.html'
class validation(BaseMessage.validation):
subject: str = 'Your password has been changed'
class SuccessfulRegistrationMessage(BaseMessage):
template_name = 'registration.html'
class validation(BaseMessage.validation):
subject: str = 'You have successfully registered'
url: str = f'{settings.SERVER_DOMAIN}/personal_area'
class ChangeBankCardMessage(BaseMessage):
template_name = 'change_bank_card.html'
class validation(BaseMessage.validation):
subject: str = 'Failed to process payment'
account_id: int
def get_context(self) -> dict:
account_id = self.schema.account_id
return {
'url': f'{settings.API_URL}/payments/change_bank_card/{account_id}'
}
|
from .download import start, get_pdf_list
|
import bpy
from ... base_types import AnimationNode, VectorizedSocket
class ReverseTextNode(bpy.types.Node, AnimationNode):
bl_idname = "an_ReverseTextNode"
bl_label = "Reverse Text"
useList: VectorizedSocket.newProperty()
def create(self):
self.newInput(VectorizedSocket("Text", "useList",
("Text", "inText"), ("Texts", "inTexts")))
self.newOutput(VectorizedSocket("Text", "useList",
("Text", "outText"), ("Texts", "outTexts")))
def getExecutionCode(self, required):
if self.useList:
return "outTexts = [text[::-1] for text in inTexts]"
else:
return "outText = inText[::-1]"
|
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, DataType
# This file is auto-generated by generate_schema so do not edit it manually
# noinspection PyPep8Naming
class BinarySchema:
"""
A resource that represents the data of a single raw artifact as digital
content accessible in its native format. A Binary resource can contain any
content, whether text, image, pdf, zip archive, etc.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = None,
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
include_modifierExtension: Optional[bool] = False,
use_date_for: Optional[List[str]] = None,
parent_path: Optional[str] = "",
) -> Union[StructType, DataType]:
"""
A resource that represents the data of a single raw artifact as digital
content accessible in its native format. A Binary resource can contain any
content, whether text, image, pdf, zip archive, etc.
resourceType: This is a Binary resource
id: The logical id of the resource, as used in the URL for the resource. Once
assigned, this value never changes.
meta: The metadata about the resource. This is content that is maintained by the
infrastructure. Changes to the content might not always be associated with
version changes to the resource.
implicitRules: A reference to a set of rules that were followed when the resource was
constructed, and which must be understood when processing the content. Often,
this is a reference to an implementation guide that defines the special rules
along with other profiles etc.
language: The base language in which the resource is written.
contentType: MimeType of the binary content represented as a standard MimeType (BCP 13).
securityContext: This element identifies another resource that can be used as a proxy of the
security sensitivity to use when deciding and enforcing access control rules
for the Binary resource. Given that the Binary resource contains very few
elements that can be used to determine the sensitivity of the data and
relationships to individuals, the referenced resource stands in as a proxy
equivalent for this purpose. This referenced resource may be related to the
Binary (e.g. Media, DocumentReference), or may be some non-related Resource
purely as a security proxy. E.g. to identify that the binary resource relates
to a patient, and access should only be granted to applications that have
access to the patient.
data: The actual content, base64 encoded.
"""
if extension_fields is None:
extension_fields = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueUrl",
"valueReference",
"valueCodeableConcept",
"valueAddress",
]
from spark_fhir_schemas.r4.simple_types.id import idSchema
from spark_fhir_schemas.r4.complex_types.meta import MetaSchema
from spark_fhir_schemas.r4.simple_types.uri import uriSchema
from spark_fhir_schemas.r4.simple_types.code import codeSchema
from spark_fhir_schemas.r4.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.r4.simple_types.base64binary import base64BinarySchema
if (
max_recursion_limit and nesting_list.count("Binary") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["Binary"]
my_parent_path = parent_path + ".binary" if parent_path else "binary"
schema = StructType(
[
# This is a Binary resource
StructField("resourceType", StringType(), True),
# The logical id of the resource, as used in the URL for the resource. Once
# assigned, this value never changes.
StructField(
"id",
idSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".id",
),
True,
),
# The metadata about the resource. This is content that is maintained by the
# infrastructure. Changes to the content might not always be associated with
# version changes to the resource.
StructField(
"meta",
MetaSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# A reference to a set of rules that were followed when the resource was
# constructed, and which must be understood when processing the content. Often,
# this is a reference to an implementation guide that defines the special rules
# along with other profiles etc.
StructField(
"implicitRules",
uriSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".implicitrules",
),
True,
),
# The base language in which the resource is written.
StructField(
"language",
codeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".language",
),
True,
),
# MimeType of the binary content represented as a standard MimeType (BCP 13).
StructField(
"contentType",
codeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".contenttype",
),
True,
),
# This element identifies another resource that can be used as a proxy of the
# security sensitivity to use when deciding and enforcing access control rules
# for the Binary resource. Given that the Binary resource contains very few
# elements that can be used to determine the sensitivity of the data and
# relationships to individuals, the referenced resource stands in as a proxy
# equivalent for this purpose. This referenced resource may be related to the
# Binary (e.g. Media, DocumentReference), or may be some non-related Resource
# purely as a security proxy. E.g. to identify that the binary resource relates
# to a patient, and access should only be granted to applications that have
# access to the patient.
StructField(
"securityContext",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# The actual content, base64 encoded.
StructField(
"data",
base64BinarySchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".data",
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
if not include_modifierExtension:
schema.fields = [
c
if c.name != "modifierExtension"
else StructField("modifierExtension", StringType(), True)
for c in schema.fields
]
return schema
|
from PIL import Image, ImageDraw, ImageFont
import matplotlib.pyplot as plt
from skimage import io
import numpy as np
import argparse
import datetime
import pickle
import time
import cv2
import sys
import os
sys.path.append(os.path.abspath(".."))
from src.runner import JestureSdkRunner
from src.utils import load_image_with_alpha, overlay_alpha
from src.utils import draw_text, draw_multiline_text, draw_skeleton
from src.thread_camera_draw import ThreadCameraDraw
print('cv2.__version__:', cv2.__version__) # 4.1.2 recommended
# pasrse args
parser = argparse.ArgumentParser(description='Collect hand keypoints data for gesture recognition model training.')
parser.add_argument('--cam_id', type=int, default=0)
args = parser.parse_args()
# create the application window
name = 'JestureSDK: Annotation Tool'
width, height = (640, 480)
cv2.namedWindow(name)
# cv2.resizeWindow(name, (width, height))
cv2.startWindowThread()
# set the data file
data_dir = './out_data'
os.makedirs(data_dir, exist_ok=True)
now = datetime.datetime.now()
dt = f'{now.day:02d}{now.month:02d}{now.year%100:02d}_{now.hour:02d}_{now.minute:02d}'
data_file_name = f'{data_dir}/hand_keypoints_{dt}.pkl'
# set the logo stuff
logo_path = 'images/jesture_logo.png'
logo_img, logo_alpha = load_image_with_alpha(logo_path, remove_borders=True)
logo_loc = (10, 10)
# set the gestures help stuff
key_to_idx = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5,
'6': 6, '7': 7, '8': 8, '9': 9}
key_ords = [ord(x) for x in key_to_idx]
idx_to_gesture = {0: 'no_gesture', 1: 'one', 2: 'two', 3: 'three', 4: 'four',
5: 'five', 6: 'fist', 7: 'peace', 8: 'love', 9: 'ok'}
idx_to_count = {k: 0 for k in idx_to_gesture}
# help_textlist = [f'{k}: {idx_to_gesture[key_to_idx[k]]} {idx_to_count[key_to_idx[k]]}' for k in key_to_idx]
# help_textlist_str = '\n'.join(help_textlist)
help_box_width = 175
help_box_tl = {'right': (10, height//5+10),
'left': (width-(help_box_width+10), height//5+10)}
help_box_br = {'right': (20+help_box_width, len(key_to_idx)*35),
'left': (width, len(key_to_idx)*35)}
help_text_loc = {'right': (help_box_tl['right'][0]+10, help_box_tl['right'][1]+10),
'left': (help_box_tl['left'][0]+10, help_box_tl['left'][1]+10)}
help_font = ImageFont.truetype("fonts/Comfortaa-Light.ttf", 20)
# set the scaled hands stuff
mid_hand_box_tl = (width//3, height-height//5)
mid_hand_box_br = (2*width//3, height)
hand_box_tl = {'right': (2*width//3, height-height//5),
'left': (0, height-height//5)}
hand_box_br = {'right': (width, height),
'left': (width//3, height)}
# set the hand type stuff
handtype_text = {"right": "Right hand capture (press L to change)",
"left": "Left hand capture (press R to change)"}
handtype_text_loc = (width//3, 25)
# set the counter stuff
count_text_loc = (width//3, 25)
# set common font
font = ImageFont.truetype("fonts/Comfortaa-Light.ttf", 24)
handtype_font = ImageFont.truetype("fonts/Comfortaa-Light.ttf", 20)
# variables used in the main loop
pressed_duration = 0
pressed_text = ''
selfie_mode = True
hand_type = 'right'
data_list = []
prev_k = ''
i = 0
if __name__ == "__main__":
# start Jesture SDK Python runner
jesture_runner = JestureSdkRunner(cam_id=args.cam_id,
use_tracking=True,
use_static_gestures=False,
use_dynamic_gestures=False)
jesture_runner.start_recognition()
# start reading frames to display in the application window
cap = ThreadCameraDraw(jesture_runner,
cam_id=args.cam_id,
width=width, height=height,
hand_box_tl=mid_hand_box_tl, hand_box_br=mid_hand_box_br,
draw_hand_box=False)
cap.start()
# start the main loop
while(True):
if cap.frame is None:
continue
# get current webcam image with drawn hand skeletons
frame = cap.frame[:,::-1,:] if selfie_mode else cap.frame
# draw logo
frame = overlay_alpha(logo_img[:,:,::-1], logo_alpha, frame, loc=logo_loc, alpha=1.0)
# draw ui elements
frame = Image.fromarray(frame if type(np.array([])) == type(frame) else frame.get())
draw = ImageDraw.Draw(frame, "RGBA")
draw.rectangle((help_box_tl[hand_type], help_box_br[hand_type]),
fill=(0, 0, 0, 127), outline=(235, 190, 63, 255))
# draw.rectangle((hand_box_tl, hand_box_br), fill=(0, 0, 0, 127), outline=(235, 190, 63, 255))
# draw text
draw.multiline_text(handtype_text_loc, handtype_text[hand_type],
font=handtype_font, fill=(255, 255, 255, 200))
help_textlist = [f'{idx_to_count[key_to_idx[k]]} | {k}: {idx_to_gesture[key_to_idx[k]]}'
for k in key_to_idx]
help_textlist_str = '\n'.join(help_textlist)
draw.multiline_text(help_text_loc[hand_type], help_textlist_str,
font=help_font, fill=(255, 255, 255))
# retrieve keyboard signal
c = cv2.waitKey(1) % 256
if c == ord('q'):
break
if c == ord('l'):
hand_type = 'left'
if c == ord('r'):
hand_type = 'right'
# retrieve if gesture key is pressed
if chr(c) in key_to_idx:
k, v = chr(c), idx_to_gesture[key_to_idx[chr(c)]]
pressed_text = f'{idx_to_count[key_to_idx[k]]} | {k}: {v}'
idx_to_count[key_to_idx[k]] += 1
pressed_duration = 4
print(f"pressed {pressed_text}, shape: {frame.size}")
data_list.append({
'hand_type': hand_type,
'gesture_id': key_to_idx[k],
'gesture_name': v,
'pred_gesture_name': jesture_runner.get_gesture(
f'{hand_type}_static'),
'keypoints': jesture_runner.get_hand_keypoints(
f'{hand_type}_keypoints'),
'scaled_keypoints': jesture_runner.get_hand_keypoints(
f'scaled_{hand_type}_keypoints'),
})
# save current data to not to lose it
# in case if the program accidentally exited
if k != prev_k:
with open(data_file_name, 'wb') as file:
pickle.dump(data_list, file)
prev_k = k
# draw notification text if key was pressed less then 12 frames ago
if pressed_duration > 0:
notify_textlist_str = "\n".join(
[x if x == pressed_text else "" for x in help_textlist])
draw.multiline_text(help_text_loc[hand_type], notify_textlist_str,
font=help_font, fill=(235, 190, 63))
pressed_duration -= 1
frame = np.array(frame).astype(np.uint8)
cv2.imshow(name, frame)
i += 1
# save all the data collected
with open(data_file_name, 'wb') as file:
print(f'Dumping {len(data_list)} items to {data_file_name}...')
pickle.dump(data_list, file)
print(f'Dumped.')
# finish and close all resources
jesture_runner.stop_recognition()
cap.stop()
cv2.waitKey(1)
cv2.destroyWindow(name)
cv2.destroyAllWindows()
cv2.waitKey(1)
|
"""Translation stats collection and reporting."""
import datetime
import io
import logging
import traceback
import texttable
class TranslationStats(object):
ROW_COUNT = 7
def __init__(self):
self._locale_to_message = {}
self._untranslated = {}
self._stacktraces = []
self._untagged = {}
# Default to info logging.
self.log = logging.info
self.datetime = datetime.datetime
@property
def messages(self):
"""All messages and counts."""
return self._locale_to_message
@property
def missing(self):
"""Messages that are untagged and untranslated during rendering."""
untagged_messages = set([msg for _, msg in self.untagged.items()])
tracking = {}
for locale, messages in self._untranslated.items():
if locale not in tracking:
tracking[locale] = {}
for message in messages:
if message in untagged_messages:
tracking[locale][message] = self._locale_to_message[
locale][message]
# Removing empty locales.
blank_locales = []
for key in tracking:
if not tracking[key]:
blank_locales.append(key)
for key in blank_locales:
del tracking[key]
return tracking
@property
def untagged(self):
"""Untagged messages by location."""
return self._untagged
@property
def untranslated(self):
"""Untranslated messages by locale."""
tracking = {}
for locale, messages in self._untranslated.items():
if locale not in tracking:
tracking[locale] = {}
for message in messages:
tracking[locale][message] = self._locale_to_message[
locale][message]
return tracking
@property
def count_untranslated(self):
"""Max number of untranslated strings for any locale."""
untranslated_ids = set()
for _, messages in self._untranslated.items():
for message in messages:
untranslated_ids.add(message)
return len(untranslated_ids)
@property
def stacktraces(self):
return self._stacktraces
@staticmethod
def _simplify_traceback(tb):
"""Remove extra layers of the traceback to just get best bits."""
start = 0
end = len(tb) - 3
for i, item in enumerate(tb):
if 'jinja' in item:
start = i + 1
break
return tb[start:end]
def _get_message_locations(self, locale, message):
"""Get a list of the locations for a given locale and message."""
locations = set()
for item in self.stacktraces:
if item['locale'] != locale or item['id'] != message:
continue
if item['location']:
locations.add(item['location'])
return [(location, None) for location in locations]
def add_untagged(self, paths_to_untagged):
"""Add untagged paths and strings."""
self._untagged = paths_to_untagged
def export(self):
"""Export messages and untranslated strings."""
return {
'messages': self.messages,
'untranslated': self.untranslated,
'untagged': self.untagged,
}
def export_untranslated_catalogs(self, pod, dir_path=None):
"""Export the untranslated messages into catalogs based on locale."""
locale_to_catalog = {}
for locale, messages in self.untranslated.items():
if locale not in locale_to_catalog:
locale_to_catalog[locale] = pod.catalogs.get(
locale, basename='untranslated.po', dir_path=dir_path)
catalog = locale_to_catalog[locale]
for message in messages:
catalog.add(message, locations=self._get_message_locations(
locale, message))
return locale_to_catalog
def export_untranslated_tracebacks(self):
"""Export the untranslated tracebacks into a log file."""
width = 80
border_width = 3
border_char = '='
def _blank_line():
return '\n'
def _solid_line():
return '{}\n'.format(border_char * width)
def _text_line(text):
text_width = width - (border_width + 1) * 2
return '{} {} {}\n'.format(
border_char * border_width, text.center(text_width),
border_char * border_width)
with io.StringIO() as output:
output.write(_solid_line())
output.write(_text_line('Untranslated Strings'))
output.write(_solid_line())
output.write(_text_line(
'{} occurrences of {} untranslated strings'.format(
len(self.stacktraces), self.count_untranslated)))
output.write(_text_line(str(self.datetime.now())))
output.write(_solid_line())
output.write(_blank_line())
if not self.stacktraces:
output.write(
_text_line('No untranslated strings found.'))
return output.getvalue()
for item in self.stacktraces:
output.write('{} :: {}\n'.format(item['locale'], item['id']))
for line in item['tb']:
output.write('{}'.format(line))
output.write(_blank_line())
return output.getvalue()
def tick(self, message, locale, default_locale, location=None):
"""Count a translation."""
if not message:
return
if locale not in self._locale_to_message:
self._locale_to_message[locale] = {}
messages = self._locale_to_message[locale]
if message.id not in messages:
messages[message.id] = 0
messages[message.id] += 1
# Check for untranslated message.
if not message.string and message.id.strip() and locale is not default_locale:
if locale not in self._untranslated:
self._untranslated[locale] = set()
stack = traceback.format_stack()
self._stacktraces.append({
'locale': locale,
'location': location,
'id': message.id,
'tb': self._simplify_traceback(stack),
})
self._untranslated[locale].add(message.id)
def pretty_print(self, show_all=False):
"""Outputs the translation stats to a table formatted view."""
if not self.untranslated and not self.untagged:
self.log('\nNo untranslated strings found.\n')
return
# Most frequent untranslated and untagged messages.
if self.untagged:
table = texttable.Texttable(max_width=120)
table.set_deco(texttable.Texttable.HEADER)
table.set_cols_dtype(['t', 'i', 't'])
table.set_cols_align(['l', 'r', 'l'])
rows = []
missing = self.missing
for locale in missing:
for message in missing[locale]:
rows.append([str(locale), missing[locale][message], message])
rows = sorted(rows, key=lambda x: -x[1])
if not show_all:
num_rows = len(rows)
rows = rows[:self.ROW_COUNT]
if num_rows > self.ROW_COUNT:
rows.append(['', num_rows - self.ROW_COUNT,
'+ Additional untranslated strings...'])
table.add_rows(
[['Locale', '#', 'Untagged and Untranslated Message']] + rows)
self.log('\n' + table.draw() + '\n')
# Most frequent untranslated messages.
if self.untranslated:
table = texttable.Texttable(max_width=120)
table.set_deco(texttable.Texttable.HEADER)
table.set_cols_dtype(['t', 'i', 't'])
table.set_cols_align(['l', 'r', 'l'])
rows = []
for locale in self.untranslated:
for message in self.untranslated[locale]:
rows.append([str(locale), self.untranslated[
locale][message], message])
rows = sorted(rows, key=lambda x: -x[1])
if not show_all:
num_rows = len(rows)
rows = rows[:self.ROW_COUNT]
if num_rows > self.ROW_COUNT:
rows.append(['', num_rows - self.ROW_COUNT,
'+ Additional untranslated strings...'])
table.add_rows([['Locale', '#', 'Untranslated Message']] + rows)
self.log('\n' + table.draw() + '\n')
# Untranslated messages per locale.
table = texttable.Texttable(max_width=120)
table.set_deco(texttable.Texttable.HEADER)
table.set_cols_dtype(['t', 'i'])
table.set_cols_align(['l', 'r'])
rows = []
for locale in self.untranslated:
rows.append([str(locale), len(self.untranslated[locale])])
rows = sorted(rows, key=lambda x: -x[1])
table.add_rows([['Locale', 'Untranslated']] + rows)
self.log('\n' + table.draw() + '\n')
|
class GetSeries(object):
def get_series(self, series):
"""
Returns a census series API handler.
"""
if series == "acs1":
return self.census.acs1dp
elif series == "acs5":
return self.census.acs5
elif series == "sf1":
return self.census.sf1
elif series == "sf3":
return self.census.sf3
else:
return None
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def _cr2a(c1, r1, c2=None, r2=None):
"""
r1=1 c1=1 gives A1 etc.
"""
assert r1>0 and c1>0, "negative coordinates not allowed!"
out=_n2x(c1)+str(r1)
if c2 is not None:
out +=':'+ _n2x(c2) + str(r2)
return out
def _a2cr(a,f4=False):
"""
B1 gives [1,2]
B1:D3 gives [1,2,3,4]
if f4==True, always return a 4-element list, so [1,2] becomes [1,2,1,2]
"""
if ':' in a:
tl,br=a.split(':')
out= _a2cr(tl)+_a2cr(br)
if out[0]==0:out[0]=1
if out[1]==0:out[1]=1
if out[2]==0:out[2]=2**14
if out[3]==0:out[3]=2**20
return out
else:
c,r=_splitaddr(a)
if f4:
return [_x2n(c), r, _x2n(c), r]
else:
return [_x2n(c),r]
def _n2x(n):
"""
convert decimal into base 26 number-character
:param n:
:return:
"""
numerals='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b=26
if n<=b:
return numerals[n-1]
else:
pre=_n2x((n-1)//b)
return pre+numerals[n%b-1]
def _x2n(x):
"""
converts base 26 number-char into decimal
:param x:
:return:
"""
numerals = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b=26
n=0
for i,l in enumerate(reversed(x)):
n+=(numerals.index(l)+1)*b**(i)
return n
def _splitaddr(addr):
"""
splits address into character and decimal
:param addr:
:return:
"""
col='';rown=0
for i in range(len(addr)):
if addr[i].isdigit():
col = addr[:i]
rown = int(addr[i:])
break
elif i==len(addr)-1:
col=addr
return col,rown
def _df2outline(df, outline_string):
"""
infer boundaries of a dataframe given an outline_string, to be fed into rng.outline(boundaries)
:param df:
:param outline_string:
:return:
"""
from collections import OrderedDict
out=OrderedDict()
idx = list(zip(*df.index.values.tolist()))
z=1
rf = 1 # this is just to avoid an exception if df has no index field named outline
for lvl in range(len(idx)):
for i, v, p in list(zip(list(range(1, len(idx[lvl]))), idx[lvl][1:], idx[lvl][:-1])):
if p == outline_string: rf = i
if (v == outline_string and p != outline_string) or i + 1 == len(idx[lvl]):
rl = (i if i + 1 == len(idx[lvl]) else i - 1)
out[z+rf-1]=[z+rf,z+rl]
return out
def _isrow(addr):
if ':' in addr:
coords=_a2cr(addr)
return coords[2]-coords[0]==16383
else: return False
def _iscol(addr):
if ':' in addr:
coords=_a2cr(addr)
return coords[3]-coords[1]==1048575
else: return False
def _isnumeric(x):
'''
returns true if x can be cast to a numeric datatype
'''
try:
float(x)
return True
except (ValueError, TypeError):
return False
def _df_to_ll(df, header=True, index=True, index_label=None):
"""
transform DataFrame or Series object into a list of lists
:param self:
:param header: True/False
:param index: True/False
:param index_label: currently unused
:return:
"""
if header:
if df.columns.nlevels>1:
if index:
hdr = list(zip(*df.reset_index().columns.tolist()))
else:
hdr = list(zip(*df.columns.tolist()))
else:
if index:
hdr = [df.reset_index().columns.tolist()]
else:
hdr = [df.columns.tolist()]
else: hdr=[]
if index:
vals=df.reset_index().values.tolist()
else:
vals=df.values.tolist()
return hdr + vals
|
import logging
from flask import Blueprint, render_template
import MySQLdb
import MySQLdb.cursors
from pprint import pprint
import sys
import json
main = Blueprint('main', __name__)
#@main.route('/')
def index():
return "Main"
@main.route('/')
def display_books():
#import _mysql.cursors
db=MySQLdb.connect(host="localhost", user="root", passwd="amsterdam678",
db="dataloggerDB", cursorclass=MySQLdb.cursors.DictCursor)
c = db.cursor()
c.execute("SELECT id, logtime, sensorID,messwert FROM dataloggerDB.messwerteTBL WHERE 1")
R = c.fetchall()
#pprint(R)
c.close()
return render_template("home.html", the_data=list(R))
@main.route('api/data/')
def get_sensordata():
dat= []
return dat
|
def exercise_log_op(is_magician = True, is_expert = False):
#check if magician AND expert: "you are a master magician"
#check if magician but not expert: "at least you're getting there"
#if you're not magician: "you need magic powers"
if is_magician and is_expert:
print("you are a master magician")
elif is_magician and not is_expert:
print("at least you're getting there")
else:
print("you need magic powers")
#another way to check if you're not magician
#elif not is_magician:
# print("you need magic powers")
|
# Generated by Django 3.1.2 on 2020-10-05 21:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import simple_history.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Status",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"name",
models.CharField(max_length=30, unique=True, verbose_name="Cтатус"),
),
],
options={"verbose_name": "Статус", "verbose_name_plural": "Статусы",},
),
migrations.CreateModel(
name="Task",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=30, verbose_name="Название")),
(
"description",
models.TextField(max_length=250, verbose_name="Описание"),
),
(
"creation_date",
models.DateTimeField(
auto_now_add=True, verbose_name="Дата создания"
),
),
(
"end_date",
models.DateTimeField(
blank=True,
null=True,
verbose_name="Планируемая дата завершения",
),
),
(
"owner",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
(
"status",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="tasks.status",
to_field="name",
verbose_name="Статус",
),
),
],
options={"verbose_name": "Задача", "verbose_name_plural": "Задачи",},
),
migrations.CreateModel(
name="HistoricalTask",
fields=[
(
"id",
models.IntegerField(
auto_created=True, blank=True, db_index=True, verbose_name="ID"
),
),
("name", models.CharField(max_length=30, verbose_name="Название")),
(
"description",
models.TextField(max_length=250, verbose_name="Описание"),
),
(
"creation_date",
models.DateTimeField(
blank=True, editable=False, verbose_name="Дата создания"
),
),
(
"end_date",
models.DateTimeField(
blank=True,
null=True,
verbose_name="Планируемая дата завершения",
),
),
("history_id", models.AutoField(primary_key=True, serialize=False)),
("history_date", models.DateTimeField()),
("history_change_reason", models.CharField(max_length=100, null=True)),
(
"history_type",
models.CharField(
choices=[("+", "Created"), ("~", "Changed"), ("-", "Deleted")],
max_length=1,
),
),
(
"history_user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
(
"owner",
models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
(
"status",
models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
to="tasks.status",
to_field="name",
verbose_name="Статус",
),
),
],
options={
"verbose_name": "historical Задача",
"ordering": ("-history_date", "-history_id"),
"get_latest_by": "history_date",
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
]
|
import logging
import threading
import os
import time
from webcamlib.ConfigureLogging import logging_setup
class myThread (threading.Thread):
def __init__(self, threadID, name, delay):
threading.Thread.__init__(self)
self.logger = logging.getLogger("test-{}-{}".format(name, threadID))
self.threadID = threadID
self.name = name
self.delay = delay
def run(self):
self.logger.info("Starting " + self.name)
for item in range(100000):
self.logger.info("Test line {}".format(item))
time.sleep(self.delay)
self.logger.info("Exiting " + self.name)
"""
Not really a unit test, but a test to test out logging configuration and log rotation.
Ran into some errors when a log was to be rotated
"""
def main():
logging_setup("test.log", 'DEBUG', True, True)
logger = logging.getLogger("main")
logger.info("Starting Main Thread")
threads = []
# Create new threads
thread1 = myThread(1, "Thread-1", 0.1)
thread2 = myThread(2, "Thread-2", 0.05)
thread3 = myThread(3, "Thread-3", 0.2)
# Start new Threads
thread1.start()
thread2.start()
thread3.start()
# Add threads to thread list
threads.append(thread1)
threads.append(thread2)
threads.append(thread3)
# Wait for all threads to complete
for t in threads:
t.join()
logger.info("Exiting Main Thread")
if __name__ == '__main__':
main()
|
import os
settings = {}
def refresh():
"""
Refresh environment.settings dict
"""
APP_DATA_DIR = os.environ.get('APP_DATA_DIR', os.getcwd())
settings.clear()
settings.update({
'APP_DATA_DIR': APP_DATA_DIR,
'LOG_DIR': os.path.join(APP_DATA_DIR, 'logs'),
'LOG_FILE': os.path.join(APP_DATA_DIR, 'logs', 'api.log'),
'CONTAINERS_DIR': os.path.join(APP_DATA_DIR, 'containers'),
'CONTAINERS_FILE':
os.path.join(
APP_DATA_DIR, 'containers', '_containers_create.json'),
'CALIBRATIONS_DIR': os.path.join(APP_DATA_DIR, 'calibrations'),
'CALIBRATIONS_FILE':
os.path.join(APP_DATA_DIR, 'calibrations', 'calibrations.json'),
'APP_IS_ALIVE_URL': 'http://localhost:31950',
'APP_JUPYTER_UPLOAD_URL': 'http://localhost:31950/upload-jupyter',
})
return settings
def get_path(key):
"""
For a given key returns a full path and
creates the path if missing. The caller is guaranteed
the path exists if exception is not thrown.
For *_DIR it will create a directory, for *_FILE it will
create a directory tree to the file. Throws exception if neither.
"""
if key not in settings:
raise ValueError(
'Key "{}" not found in environment settings'.format(key))
if key.endswith('_DIR'):
path = settings[key]
elif key.endswith('_FILE'):
path, _ = os.path.split(settings[key])
else:
raise ValueError(
'Expected key suffix as _DIR or _FILE. "{}" received'.format(key))
if not os.path.exists(path):
os.makedirs(path)
return settings[key]
refresh()
|
"""
API test example that tests various auths.
"""
from screenpy import Actor, given, then, when
from screenpy.actions import AddHeader, See, SendGETRequest
from screenpy.questions import StatusCodeOfTheLastResponse
from screenpy.resolutions import IsEqualTo
from ..urls import BASIC_AUTH_URL, BEARER_AUTH_URL
def test_basic_auth(Perry: Actor) -> None:
"""Basic authentication is accepted by the basic auth endpoint."""
test_username = "USER"
test_password = "PASS"
when(Perry).attempts_to(
SendGETRequest.to(f"{BASIC_AUTH_URL}/{test_username}/{test_password}").with_(
auth=(test_username, test_password)
)
)
then(Perry).should(See.the(StatusCodeOfTheLastResponse(), IsEqualTo(200)))
def test_bearer_auth(Perry: Actor) -> None:
"""Bearer token authentication is accepted by the bearer auth endpoint."""
given(Perry).was_able_to(AddHeader(Authorization="Bearer 1234"))
when(Perry).attempts_to(SendGETRequest.to(BEARER_AUTH_URL))
then(Perry).should(See.the(StatusCodeOfTheLastResponse(), IsEqualTo(200)))
|
import pandas as pd
import numpy as np
import sys
# > > > Load Input Data [DYNAMIC INPUTS] < < <
LoadData = pd.read_csv('C:/UMI/temp/Loads.csv')
Output = LoadData #Reformatted Kuwait output to bypass the reformat section
WeatherData = pd.read_csv('C:/UMI/temp/DryBulbData.csv')
# > > > User-specified Parameters [DYNAMIC INPUTS] < < <
Cost_Electricity = float(sys.argv[1]) #Generation cost per kWh Source: https://www.oxfordenergy.org/wpcms/wp-content/uploads/2014/04/MEP-9.pdf
Price_NaturalGas = float(sys.argv[2]) #Dollars per kWh
Emissions_ElectricGeneration = float(sys.argv[3]) #Metric ton CO2 per kWh produced
Effic_PowerGen = float(sys.argv[6]) #Average thermal efficiency of electrical generation in Kuwait
Losses_Transmission = float(sys.argv[4]) #Electrical transmission losses https://www.eia.gov/tools/faqs/faq.php?id=105&t=3
# ...................................................................
# Internal Parameters
kWhPerTherm = 29.3 #kwh/therm
Emissions_NG_Combustion_therm = 0.005302 #Metric ton CO2 per therm of NG
Emissions_NG_Combustion_kWh = Emissions_NG_Combustion_therm / kWhPerTherm
FanPwrCoolRatio = 0 # 34.0/27.0 removed because assuming fan energy ~constant for all cases
# Empirical heat pump model from Purdue for heating mode
Size_HeatPump_Heating = 3.516825 #Nominal rated capacity of the heat pump in heating mode [kW]
Power_Max_Heating = 2.164 #Max work by compressor in kW
MinPLR_HP_Heating = 0.28
MinCOP_Heating = 0.0
THot_Heating = 308.15 #35C in Kelvin
TCold_Heating = 253.15 #-20C in Kelvin
MaxCOP_Heating = THot_Heating / (THot_Heating - TCold_Heating)
# Heating-mode heat pump model coefficients
a1 = 0.0914
a2 = 0.8033
a3 = 2.5976
a4 = -3.2925
a5 = -0.8678
a6 = 0.1902
a7 = 1.4833
# Heat Pump Model, Optimized cooling from Tea Zakula: Air-to-air heat pump with evaporator outlet at 12.5C saturated
# Mitsubishi MUZ-A09NA-1 outdoor unit heat pump and a MSZ-A09NA
Size_HeatPump_Cooling = 9 # Nominal rated capacity of the heat pump in cooling mode [kW]
MinPLR_HP_Cooling = 0.1 # Set this to 0, because a min PLR of 10% results in odd behavior: 10% and 2C DB -> -799 for COP
COP_Nominal_Cooling = 3.81
THot_Cooling = 313.15 #12.5C in Kelvin based on the assumption of the constant evaporator temperature at 12.5C from Tea's work
TCold_Cooling = 285.65 #40C in Kelvin
MinCOP_Cooling = 0.0
MaxCOP_Cooling = TCold_Cooling / (THot_Cooling - TCold_Cooling)
c1_hp = 3.02E-3
c2_hp = -3.23E-1
c3_hp = 1.23E-2
c4_hp = 4.76E-1
c5_hp = -2.38E-4
c6_hp = -2.86E-4
c7_hp = -2.02E-1
c8_hp = 6.77E-4
c9_hp = 3.71E-5
c10_hp = 4.25E-6
# ...................................................................
# Update Output dataframe
Output['TotalHeating'] = Output['SDL/Heating'] + Output['SDL/Domestic Hot Water']
Output['Electricity'] = Output['SDL/Equipment'] + Output['SDL/Lighting']
Output = Output.join(WeatherData['DB'], on='Hour')
# ...................................................................
# Identify peak demands for cooling, heating, and electricity
PeakCooling = Output.groupby('Hour')['SDL/Cooling'].sum()
PeakCooling.sort_values(inplace=True, ascending=False)
print '\n Peak Cooling Demand (kW):', PeakCooling[:1].round()
PeakHeating = Output.groupby('Hour')['TotalHeating'].sum()
PeakHeating.sort_values(inplace=True, ascending=False)
print '\n Peak Heating Demand (kW):', PeakHeating[:1].round()
PeakElectricity = Output.groupby('Hour')['Electricity'].sum()
PeakElectricity.sort_values(inplace=True, ascending=False)
print '\n Peak Non-HVAC Electricity Demand(kW):', PeakElectricity[:1].round()
# ...................................................................
# Identify peak demands for each building for cooling, heating, and electricity
CoolingMax = Output.groupby('Building')['SDL/Cooling'].max()
Output = Output.join(CoolingMax, on='Building', rsuffix='Max')
print 'Cooling max :', CoolingMax[:1]
HeatingMax = Output.groupby('Building')['TotalHeating'].max()
Output = Output.join(HeatingMax, on='Building', rsuffix='Max')
ElectricMax = Output.groupby('Building')['Electricity'].max()
Output = Output.join(ElectricMax, on='Building', rsuffix='Max')
# ...................................................................
print "Scenario 01: Grid Supplied Electricity Satisfies All Heating, Cooling, and Electric loads"
# Calculate total heating energy needed
Output['NumHeatPumps_Heat'] = Output['TotalHeatingMax']/Size_HeatPump_Heating
Output['NumHeatPumps_Heat'] = np.ceil(Output['NumHeatPumps_Heat'])
Output['PLR_HeatPump_Heating'] = np.where(Output['TotalHeating']<=0, 0, (Output['TotalHeating']/Output['NumHeatPumps_Heat'])/Size_HeatPump_Heating)
Output['PLR_HeatPump_Heating'] = np.where(Output['PLR_HeatPump_Heating']<=MinPLR_HP_Heating, MinPLR_HP_Heating, (Output['TotalHeating']/Output['NumHeatPumps_Heat'])/Size_HeatPump_Heating)
Output['HP_Heat_Modifier'] = a1 + a2 * Output['PLR_HeatPump_Heating'] + a3 * Output['PLR_HeatPump_Heating']**2 + a4 * Output['PLR_HeatPump_Heating'] + a5 * Output['PLR_HeatPump_Heating']**3 + a6 * Output['PLR_HeatPump_Heating'] + a7 * Output['PLR_HeatPump_Heating']
Output['Energy_HeatPump_Heating'] = Output['HP_Heat_Modifier'] * Power_Max_Heating * Output['NumHeatPumps_Heat']
Output['COP_HeatPump_Heating'] = np.where(Output['Energy_HeatPump_Heating'] < MinCOP_Heating,MinCOP_Heating, Output['TotalHeating'] / Output['Energy_HeatPump_Heating'])
Output['COP_HeatPump_Heating'] = np.where(Output['COP_HeatPump_Heating'] >MaxCOP_Heating, MaxCOP_Heating, Output['COP_HeatPump_Heating'])
# ...................................................................
# Calculate total cooling energy needed
Output['NumHeatPumps_Cool'] = Output['SDL/CoolingMax']/Size_HeatPump_Cooling
Output['NumHeatPumps_Cool'] = np.ceil(Output['NumHeatPumps_Cool'])
Output['PLR_HeatPump_Cooling'] = np.where(Output['NumHeatPumps_Cool']<=0.0,0.0, Output['SDL/Cooling'] / (Output['NumHeatPumps_Cool']*Size_HeatPump_Cooling))
Output['PLR_HeatPump_Cooling'] = np.where(Output['PLR_HeatPump_Cooling'] < MinPLR_HP_Cooling, MinPLR_HP_Cooling, Output['PLR_HeatPump_Cooling'])
Output['COP_HeatPump_Cooling'] = ((c1_hp + c2_hp*Output['PLR_HeatPump_Cooling'] + c3_hp*Output['DB'] +
c4_hp*Output['PLR_HeatPump_Cooling']**2 + c5_hp*Output['PLR_HeatPump_Cooling']*Output['DB'] +
c6_hp*Output['DB']**2 + c7_hp*Output['PLR_HeatPump_Cooling']**3 + c8_hp*Output['PLR_HeatPump_Cooling']**2*Output['DB'] +
c9_hp*Output['PLR_HeatPump_Cooling']*Output['DB']**2 + c10_hp*Output['DB']**3)**-1)
Output['COP_HeatPump_Cooling'] = np.where(Output['COP_HeatPump_Cooling']>MaxCOP_Cooling, MaxCOP_Cooling,Output['COP_HeatPump_Cooling'])
Output['COP_HeatPump_Cooling'] = np.where(Output['COP_HeatPump_Cooling'] > MinCOP_Cooling, Output['COP_HeatPump_Cooling'], MinCOP_Cooling) # Added in v22 to set minimum COP to prevent negative values
Output['Energy_HeatPump_Cooling'] = np.where(Output['COP_HeatPump_Cooling']<=0,0, Output['SDL/Cooling'] / Output['COP_HeatPump_Cooling'])
Output['FanPowerSC01'] = Output['Energy_HeatPump_Cooling'] * FanPwrCoolRatio
# ...................................................................
NonHVAC_Electricity = Output['Electricity']
Load_CitySC01 = sum(NonHVAC_Electricity+Output['Energy_HeatPump_Heating']+Output['Energy_HeatPump_Cooling']+Output['FanPowerSC01'] )
Load_Grid_SC01 = Load_CitySC01 * (1 + Losses_Transmission)
Energy_SC01 = Load_Grid_SC01/Effic_PowerGen
SC01_NonHVACEnergy = sum(NonHVAC_Electricity) * (1+Losses_Transmission) / Effic_PowerGen
SC01_HeatingEnergy = sum(Output['Energy_HeatPump_Heating']) * (1+Losses_Transmission) / Effic_PowerGen
SC01_CoolingEnergy = sum(Output['Energy_HeatPump_Cooling']) * (1+Losses_Transmission) / Effic_PowerGen
SC01_FanEnergy = sum(Output['FanPowerSC01']) * (1+Losses_Transmission) / Effic_PowerGen
SC01_AvgCoolingCOP = sum(Output['SDL/Cooling']) / SC01_CoolingEnergy # This version of COP represents the true input energy needed to meet cooling demands
SC01_AvgHeatingCOP = sum(Output['TotalHeating']) / SC01_HeatingEnergy
print "Annual input energy for non-HVAC electricity loads:", SC01_NonHVACEnergy
print "Annual input energy for heating:", SC01_HeatingEnergy
print "Annual input energy for cooling:", SC01_CoolingEnergy
print "Annual input energy required (kWh):", Energy_SC01
print "Average Cooling COP:", SC01_AvgCoolingCOP
print "Average Heating COP:", SC01_AvgHeatingCOP
# Cost to Generate Electricity
Cost_SC01 = Load_Grid_SC01 * Cost_Electricity
print "Annual cost of generating electricity for grid (USD):" , Cost_SC01
# Calculate CO2 Emissions Associated to Using Electricity from the Grid to Satisfy Loads
Emissions_SC01 = Load_Grid_SC01 * Emissions_ElectricGeneration
print "Annual carbon emissions (Metric Tons):", Emissions_SC01
Output['ElectricityConsumption'] = (NonHVAC_Electricity + Output['Energy_HeatPump_Heating'] + Output['Energy_HeatPump_Cooling']+Output['FanPowerSC01']) * (1 + Losses_Transmission)
Output['NaturalGasConsumption'] = 0.0
Results = pd.DataFrame()
Results['ElectricityConsumption'] = Output.groupby('Hour')['ElectricityConsumption'].sum()
Results['NaturalGasConsumption'] = Output.groupby('Hour')['NaturalGasConsumption'].sum()
# Results['Scenario01'] = SC01_NonHVACEnergy, SC01_HeatingEnergy, SC01_CoolingEnergy, Energy_SC01, SC01_AvgCoolingCOP, SC01_AvgHeatingCOP, Cost_SC01, Emissions_SC01
# ...................................................................
# Export Data
Output.to_csv('C:/UMI/temp/DHSimulationResults/SC01_Export_OutputDataFrame.csv')
Results.to_csv('C:/UMI/temp/DHSimulationResults/SC01_Export_ResultsDataFrame.csv')
|
from theteller_api_sdk.checkout.checkout import Checkout
import unittest
from theteller_api_sdk.core.core import Client
from theteller_api_sdk.core.environment import Environment
from os import environ
class TestCheckout(unittest.TestCase):
def setUp(self) -> None:
env= Environment("test")
client = Client(merchant_id=environ.get("MERCHANT_ID"),
api_key=environ.get("API_KEY"),
apiuser=environ.get("API_USERNAME"),
environment=env)
self.checkout = Checkout(client)
def test_checkout_creation(self):
checkout = self.checkout.createCheckout("test checkout",10,"https://localhost:8000","oliverotchere4@gmail.com")
self.assertIsNotNone(checkout.get("checkout_url"))
|
#!/usr/bin/env python3
import Constants as Constants
import Solvers as Solvers
import FunctionCallers as FunctionCallers
def malthus():
return Solvers.rungeKutta4V1(
FunctionCallers.callMalthus,
Constants.INITIAL_NUMBER_OF_PREYS,
0,
Constants.DURATION,
Constants.STEP
)
def verhulst():
return Solvers.rungeKutta4V1(
FunctionCallers.callVerhulst,
Constants.INITIAL_NUMBER_OF_PREYS,
0,
Constants.DURATION,
Constants.STEP
)
def lotkaVolterraMalthus():
return Solvers.rungeKutta4V2(
FunctionCallers.callLotkaVolterraPrey,
FunctionCallers.callLotkaVolterraPredator,
Constants.INITIAL_NUMBER_OF_PREYS,
Constants.INITIAL_NUMBER_OF_PREDATORS,
0,
Constants.DURATION,
Constants.STEP
)
def lotkaVolterraVerhulst():
return Solvers.rungeKutta4V2(
FunctionCallers.callLotkaVolterraVerhulstPrey,
FunctionCallers.callLotkaVolterraVerhulstPredator,
Constants.INITIAL_NUMBER_OF_PREYS,
Constants.INITIAL_NUMBER_OF_PREDATORS,
0,
Constants.DURATION,
Constants.STEP
)
|
#this is an implementation of the FFT (fast fourier transform) in python from scratch
from cmath import exp, pi, sin
import matplotlib.pyplot as plt
def fft(input):
#seperate the input vector into even and odd indexes
input_odd = input[1::2]
input_even = input[0::2]
#length of the input vector
N = len(input)
#print(N)
if N == 1 :
return input
output = [0 for i in range(N)]
#recursive implementation
output_odd = fft(input_odd)
output_even = fft(input_even)
w = W_coef(N) #calculate the omega coeffecient
for i in range(N//2):
output[i] = output_even[i] + (w**i)*output_odd[i]
output[i + N//2] = output_even[i] - (w**i)*output_odd[i]
return output
def W_coef(N):
return exp(-1j*(2*pi)/N)
#the number of element must be a power of to in order for the algorithm to work
x = [ i*0.01 for i in range(1024)]
y = [sin(i) for i in x]
Y = fft(y)
plt.plot(Y)
plt.show()
|
# coding: utf-8
""" Execute Step Classes """
import tmt
class Execute(tmt.steps.Step):
""" Run the tests (using the specified framework and its settings) """
def __init__(self, data, plan):
""" Initialize the execute step """
super(Execute, self).__init__(data, plan)
if len(self.data) > 1:
raise tmt.utils.SpecificationError(
"Multiple execute steps defined in '{}'.".format(self.plan))
def show(self):
""" Show execute details """
super(Execute, self).show(keys=['how', 'script', 'isolate'])
|
import socket
import ssl
from machine import Pin, Timer
import network
from utime import sleep
from settings import wifi_name, wifi_pass, pushover_user, pushover_token
PUSHOVER_HOST = 'api.pushover.net'
PUSHOVER_PORT = 443
PUSHOVER_PATH = '/1/messages.json'
SAFE_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_.- '
#####################################
# SET THESE IN 'settings.py'! #
#####################################
# WiFi Settings
ssid = wifi_name
netpass = wifi_pass
# Pushover Settings
user = pushover_user
token = pushover_token
## Optional Settings
beep_duration=1 # duration to beep for (secs); can be decimal, eg 0.5
alarm_frequency=10 # how often to beep (secs) when in an alert state
net_frequency=1800 # how often to beep (secs) when network is disconencted
restart_delay=30 # if in a failed state, wait this many seconds before starting back up
connect_count=60 # How frequently (secs) to retry network connection if it's down (too frequent will interrupt in-flight attempts!)
#####################################
# END #
#####################################
## Setup pins
floatswitch = Pin(2, Pin.IN) ## Pulled high when OK. Goes low when water level too high (alert state)
buzzer = Pin(4, Pin.OUT, value=False) ## Off by default
powertail = Pin(18, Pin.OUT, value=True) ## On by default
### PINOUT:
# Float1: GPIO_2
# Float2: 3.3v
#
# Buzzer+: GPIO_4
# Buzzer-: GND
#
# Powertail+: GPIO_18
# Powertail-: GND
## Initial State
last_float_state=True ## True is healthy (water level OK)
startup_message_sent=False
send_alert_message=False
sent_recovery_message=True
net_timer=Timer(0)
net_timer_init=True
alarm_timer=Timer(1)
alarm_timer_init=False
def make_safe(string):
r = []
for c in string:
if c in SAFE_CHARS:
r.append(c)
else:
r.append('%%%x' % ord(c))
return (''.join(r)).replace(' ', '+')
def sendMessage(title, msg, highPriority=False):
data = 'token=' + make_safe(token)
data += '&user=' + make_safe(user)
data += '&title=' + make_safe(title)
data += '&message=' + make_safe(msg)
if highPriority:
data += '&priority=1'
r = None
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.settimeout(10)
addr = [(socket.getaddrinfo(PUSHOVER_HOST, PUSHOVER_PORT)[0][-1], PUSHOVER_HOST)]
s.connect(addr[0][0])
s = ssl.wrap_socket(s)
s.write('POST {} HTTP/1.0\r\n'.format(PUSHOVER_PATH).encode())
s.write('Host: {}\r\n'.format(addr[0][1]).encode())
s.write(b'Content-Type: application/x-www-form-urlencoded\r\n')
s.write('Content-Length: {}\r\n'.format(len(data)).encode())
s.write(b'\r\n')
s.write('{}\r\n\r\n'.format(data))
while s.readline() != b'\r\n':
continue
r = s.read()
except Exception as e:
print(e)
finally:
s.close()
print("Response: {}".format(r))
### Float switch connected 1. GND, 2. Pin2
def get_float_state():
value = floatswitch.value()
# print("Float value: {}".format(value))
return floatswitch.value()
def beep():
buzzer.value(True)
sleep(beep_duration)
buzzer.value(False)
def pushover_alert(wlan):
if wlan.isconnected():
try:
print("Aquarium Float Trigger: ALERT")
sendMessage("Aquarium Float Trigger: ALERT", "The float switch has triggered. Check overflow!", True)
return True
except Exception as err:
print("Encountered error sending pushover message: {}".format(err))
return False
else:
print("No alert message sent; wifi not up")
return False
def pushover_recovery(wlan):
if wlan.isconnected():
try:
print("Aquarium Float Trigger: OK")
sendMessage("Aquarium Float Trigger: OK", "The float switch state has been restored")
return True
except Exception as err:
print("Encountered error sending pushover message: {}".format(err))
return False
else:
print("No recovery message sent; wifi not up")
return False
def pushover_started(wlan):
if wlan.isconnected():
try:
print("Aquarium Float Trigger: STARTED")
sendMessage("Aquarium Float Trigger: Started", "The float switch service has started up")
return True
except Exception as err:
print("Encountered error sending pushover message: {}".format(err))
return False
else:
print("No startup message sent; wifi not up")
return False
## START
# Connect to network
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
while True:
# Send a startup message
if not startup_message_sent:
startup_message_sent = pushover_started(wlan)
# Maintain network connection. Beep hourly if not connected.
if not wlan.isconnected():
if not net_timer_init:
net_timer.init(mode=Timer.PERIODIC, period=net_frequency*1000, callback=lambda t:beep()) # Beep every 1hr
net_timer_init=True
connect_count += 1
if connect_count > 60:
connect_count = 0
print('Connecting to Wifi...')
wlan.connect(ssid, netpass) # We don't block on this.
else:
if net_timer_init:
connect_count = 0
net_timer.deinit()
net_timer_init = False
# monitor float switch (voltage_high when OK)
current_float_state = get_float_state() # See if state has changed from last reading
if current_float_state != last_float_state:
print("State Change!: {} to {}".format(last_float_state, current_float_state))
last_float_state = current_float_state
if current_float_state:
# ensure pin0 (powertail) is high
powertail.value(1)
sent_alert_message = False
sent_recovery_message = pushover_recovery(wlan)
if alarm_timer_init:
alarm_timer.deinit()
else:
# if float_switch == vlow:
powertail.value(0)
sent_recovery_message = False
alarm_timer.init(mode=Timer.PERIODIC, period=alarm_frequency*1000, callback=lambda t:beep()) # Beep every 30s
alarm_timer_init = True
sent_alert_message = pushover_alert(wlan)
## Block further change state for 1minute to stop constant pump cycle loops
print("Sleeping monitoring for {}s".format(restart_delay))
sleep(restart_delay)
else:
# Retry sending recovery messages if it failed previously
if not current_float_state and not sent_alert_message:
sent_alert_message = pushover_alert(wlan)
if current_float_state and not sent_recovery_message:
sent_recovery_message = pushover_recovery(wlan)
sleep(1)
|
from pygal_maps_world.maps import World
from pygal.style import NeonStyle
wm = World(style=NeonStyle)
wm.title = 'Populations of Countries in North America'
wm.add('North America', {'ca': 34126000, 'us': 309349000,
'mx': 113423000})
wm.render_to_file('na_populations.svg')
|
# -*- coding: utf-8 -*-
"""
qiniusupport
~~~~~~~~~~~~~~
Qiniu client wrapper.
https://developer.qiniu.com/kodo/sdk/1242/python
:copyright: (c) 2018 by fengweimin.
:date: 2018/3/21
"""
import qiniu
class QiniuSupport(object):
def __init__(self, app=None):
self.app = app
self.auth = None
self.bucket = None
if app is not None:
self.init_app(app)
def init_app(self, app):
self.app = app
self.auth = qiniu.Auth(app.config['QINIU_AK'], app.config['QINIU_SK'])
self.bucket = app.config['QINIU_BUCKET']
def token(self, policy=None):
"""
生成token.
"""
# 上传策略, 可以设置持久化逻辑, 比如进行视频的预处理
# https://developer.qiniu.com/kodo/manual/1208/upload-token
# 默认过期时间为3600秒
return self.auth.upload_token(self.bucket, policy=policy)
def upload_data(self, key, data, **kwargs):
"""
上传数据.
注: 七牛没有文件夹的概念, 其自身定位是对象存储, 所以只有key这个字段.
"""
ret, info = qiniu.put_data(self.token(), key, data, **kwargs)
if ret:
return self.url(ret['key'])
else:
self.app.logger.error('Failed when uploading data, error info %s' % info)
return None
def upload_stream(self, key, input_stream, file_name, data_size, **kwargs):
"""
上传本地的文件.
"""
ret, info = qiniu.put_stream(self.token(), key, input_stream, file_name, data_size, **kwargs)
if ret:
return self.url(ret['key'])
else:
self.app.logger.error('Failed when uploading stream, error info %s' % info)
return None
def upload_file(self, key, file, **kwargs):
"""
上传本地的文件.
"""
ret, info = qiniu.put_file(self.token(), key, file, **kwargs)
if ret:
return self.url(ret['key'])
else:
self.app.logger.error('Failed when uploading file, error info %s' % info)
return None
def url(self, key):
"""
生成完整路径.
"""
return '%s/%s' % (self.app.config['QINIU_BASE_URL'], key)
|
from dataclasses import dataclass
import numpy as np
import pandas as pd
from statsmodels.nonparametric import kernels
from statsmodels.sandbox.nonparametric import kernels as sandbox_kernels
from statsmodels.nonparametric import bandwidths
import sys
# MINIMUM_CONTI_BANDWIDTH = sys.float_info.min
MINIMUM_CONTI_BANDWIDTH = 1e-100
def indicator_kernel(h: np.ndarray, Xi: np.ndarray, x: np.ndarray) -> np.ndarray:
"""The indicator kernel returning one if two elements are equal.
Parameters:
h : not used. This argument is left for compatibility.
Xi : 1-D ndarray, shape (nobs, K). The value of the training set.
x : 1-D ndarray, shape (K, 1). The value at which the kernel density is being estimated.
Returns:
ndarray of shape ``(n_obs, K)``: The kernel_value at each training point for each var.
"""
return (Xi - x) == 0
def epanechnikov(h: np.ndarray, Xi: np.ndarray, x: np.ndarray) -> np.ndarray:
"""Epanechnikov kernel.
Parameters:
h : bandwidth.
Xi : 1-D ndarray, shape (nobs, 1). The value of the training set.
x : 1-D ndarray, shape (1, nbatch). The value at which the kernel density is being estimated.
Returns:
ndarray of shape ``(n_obs, nbatch)``: The kernel_value at each training point for each var.
"""
u = (Xi - x) / h
out = 3 / 4 * (1 - u**2) * (np.abs(u) <= 1)
assert out.shape == (Xi.shape[0], x.shape[1])
return out
def triweight(h: np.ndarray, Xi: np.ndarray, x: np.ndarray) -> np.ndarray:
"""Triweight kernel.
Parameters:
h : bandwidth.
Xi : 1-D ndarray, shape (nobs, 1). The value of the training set.
x : 1-D ndarray, shape (1, nbatch). The value at which the kernel density is being estimated.
Returns:
ndarray of shape ``(n_obs, nbatch)``: The kernel_value at each training point for each var.
"""
u = (Xi - x) / h
out = 35 / 32 * (np.maximum(0, 1 - u**2)**3)
assert out.shape == (Xi.shape[0], x.shape[1])
return out
def biweight(h: np.ndarray, Xi: np.ndarray, x: np.ndarray) -> np.ndarray:
"""Biweight kernel.
Parameters:
h : bandwidth.
Xi : 1-D ndarray, shape (nobs, 1). The value of the training set.
x : 1-D ndarray, shape (1, nbatch). The value at which the kernel density is being estimated.
Returns:
ndarray of shape ``(n_obs, nbatch)``: The kernel_value at each training point for each var.
"""
u = (Xi - x) / h
out = 15 / 16 * (np.maximum(0, 1 - u**2)**2)
assert out.shape == (Xi.shape[0], x.shape[1])
return out
def tricube(h: np.ndarray, Xi: np.ndarray, x: np.ndarray) -> np.ndarray:
"""Tricube kernel.
Parameters:
h : bandwidth.
Xi : 1-D ndarray, shape (nobs, 1). The value of the training set.
x : 1-D ndarray, shape (1, nbatch). The value at which the kernel density is being estimated.
Returns:
ndarray of shape ``(n_obs, nbatch)``: The kernel_value at each training point for each var.
"""
u = (Xi - x) / h
out = 70 / 81 * (np.maximum(0, 1 - np.abs(u)**3)**3)
assert out.shape == (Xi.shape[0], x.shape[1])
return out
# https://github.com/statsmodels/statsmodels/blob/2a5a6ec3baf901f52008aee10f166ff6085d3ba5/statsmodels/nonparametric/_kernel_base.py
# statsmodels.nonparametric.kernels: https://github.com/statsmodels/statsmodels/blob/2a5a6ec3baf901f52008aee10f166ff6085d3ba5/statsmodels/nonparametric/kernels.py
kernel_func = dict(
wangryzin=kernels.wang_ryzin,
aitchisonaitken=kernels.aitchison_aitken,
# https://tedboy.github.io/statsmodels_doc/_modules/statsmodels/nonparametric/kernels.html#gaussian
gaussian=kernels.gaussian,
aitchison_aitken_reg=kernels.aitchison_aitken_reg,
wangryzin_reg=kernels.wang_ryzin_reg,
gauss_convolution=kernels.gaussian_convolution,
wangryzin_convolution=kernels.wang_ryzin_convolution,
aitchisonaitken_convolution=kernels.aitchison_aitken_convolution,
gaussian_cdf=kernels.gaussian_cdf,
aitchisonaitken_cdf=kernels.aitchison_aitken_cdf,
wangryzin_cdf=kernels.wang_ryzin_cdf,
d_gaussian=kernels.d_gaussian,
# tricube=kernels.tricube,
tricube=tricube,
# Following are added here:
indicator=indicator_kernel,
epanechnikov=epanechnikov,
triweight=triweight,
biweight=biweight,
)
@dataclass
class VanillaProductKernelConfig:
"""A configuration set used for product kernels.
Parameters:
conti_kertype : Default: 'gaussian'.
ordered_kertype : statmodels' original default is 'wangryzin'.
unordered_kertype : statmodels' original default is 'aitchisonaitken'.
conti_bw_method :
ordered_bw_method :
unordered_bw_method :
"""
conti_kertype: str = 'gaussian'
conti_bw_method: str = 'normal_reference'
conti_bw_temperature: float = 1.
ordered_kertype: str = 'indicator'
ordered_bw_method: str = 'indicator'
unordered_kertype: str = 'indicator'
unordered_bw_method: str = 'indicator'
def bw_normal_reference(x: np.ndarray, kernel=sandbox_kernels.Gaussian) -> float:
"""
Plug-in bandwidth with kernel specific constant based on normal reference.
This bandwidth minimizes the mean integrated square error if the true
distribution is the normal. This choice is an appropriate bandwidth for
single peaked distributions that are similar to the normal distribution.
Parameters
----------
x : array_like
Array for which to get the bandwidth
kernel : CustomKernel object
Used to calculate the constant for the plug-in bandwidth.
Returns
-------
bw : float
The estimate of the bandwidth
Notes
-----
Returns C * A * n ** (-1/5.) where ::
A = min(std(x, ddof=1), IQR/1.349)
IQR = np.subtract.reduce(np.percentile(x, [75,25]))
C = constant from Hansen (2009)
When using a Gaussian kernel this is equivalent to the 'scott' bandwidth up
to two decimal places. This is the accuracy to which the 'scott' constant is
specified.
References
----------
Silverman, B.W. (1986) `Density Estimation.`
Hansen, B.E. (2009) `Lecture Notes on Nonparametrics.`
"""
C = kernel().normal_reference_constant
A = bandwidths._select_sigma(x)
n = len(x)
return C * A * n**(-0.2)
class BandwidthNormalReference:
"""Class to propose the rule-of-thumb bandwidth."""
def __init__(self, coeff:float=1):
"""Constructor.
Parameters:
coeff : Coefficient to multiply the rule-of-thumb bandwidth.
"""
self.coeff = coeff
def __call__(self, *args, **kwargs) -> float:
"""Compute the bandwidth.
Returns:
Computed bandwidth.
"""
return self.coeff * bw_normal_reference(*args, **kwargs)
class VanillaProductKernel:
"""Product kernel object.
Notes:
Bandwidth methods: ``statsmodels.nonparametric.bandwidths``: https://www.statsmodels.org/devel/_modules/statsmodels/nonparametric/bandwidths.html
"""
BW_METHODS = {
'normal_reference': BandwidthNormalReference(),
'indicator': lambda x: None,
}
def __init__(
self,
data_ref: np.ndarray,
vartypes: str,
config: VanillaProductKernelConfig = VanillaProductKernelConfig()):
"""Constructor.
Parameters:
data_ref : Reference data points for which the kernel values are computed.
vartypes : The variable type ('c': continuous, 'o': ordered, 'u': unordered). Example: ``'ccou'``.
product_kernel_config : the configuration object.
"""
self.vartypes = vartypes
self.kertypes = dict(c=config.conti_kertype,
o=config.ordered_kertype,
u=config.unordered_kertype)
self.bw_methods = dict(c=config.conti_bw_method,
o=config.ordered_bw_method,
u=config.unordered_bw_method)
self.conti_bw_temperature = config.conti_bw_temperature
self._fit(data_ref)
def _fit(self, data_ref: np.ndarray) -> None:
"""Fit the product kernel.
Parameters:
data_ref : ndarray of shape ``(n_obs, n_dim)`` the kernel centers.
"""
self.data_ref = data_ref
self.bandwidths = []
for k, vtype in enumerate(self.vartypes):
bw_method = self.bw_methods.get(vtype, lambda x: 'not implemented')
if isinstance(bw_method, str):
bw = self.BW_METHODS[bw_method](data_ref[:, k])
else:
bw = bw_method(data_ref[:, k])
if vtype == 'c':
bw = bw * self.conti_bw_temperature
if bw == 0:
# Error handling for the case that there is only one unique value for the variable in the data.
bw = MINIMUM_CONTI_BANDWIDTH
self.bandwidths.append(bw)
def __call__(self, data: np.ndarray) -> np.ndarray:
"""Compute the kernel matrix ``(k(data_i, data_ref_j))_{ij}``.
Parameters:
data : ndarray of shape ``(n_data, n_dim)``.
Returns:
ndarray of shape ``(n_data, n_data_ref)``.
"""
gram_matrices = []
for k, vtype in enumerate(self.vartypes):
func = kernel_func[self.kertypes[vtype]]
gram_matrix = func(self.bandwidths[k], data[:, k][:, None],
self.data_ref[:, k][None, :])
gram_matrices.append(gram_matrix)
return np.array(gram_matrices).prod(axis=0)
|
#!/usr/bin/env python3
import os
import sys
import subprocess
import string
import random
import re
import subprocess
media_re = '.(mp4|mov|wma)$'
from_dir = "ready-for-glitch"
overlay_dir = "overlays"
dest_dir = "final"
overlay_file = "tv-interference-overlay.mp4"
# unlike on the command line, complicated cli options don't need
# to be quoted when using subprocess options
ffmpeg_opts = [
"-filter_complex",
# "[1:0]scale=-1:480, crop=ih/3*4:ih, setdar=dar=1, format=rgba[a]; \
# [0:0]scale=-1:480, crop=ih/3*4:ih, setdar=dar=1, format=rgba[b]; \
# [b][a]blend=all_mode='overlay':all_opacity=0.8"
# "[1]split[m][a]; \
# [a]geq='if(gt(lum(X,Y),16),255,0)',hue=s=0[al]; \
# [m][al]alphamerge[ovr]; \
# [0][ovr]overlay",
# "[1]colorkey=0x000000:0.02:0.03[ckout]; \
# [ckout]scale=-1:480, crop=ih/3*4:ih[scaleout]; \
# [0][scaleout]overlay=154:170[out];"
"[0:a]volume=1.0[a0]; \
[1:a]volume=0.2[a1]; \
[a0][a1]amix=inputs=2:duration=shortest; \
[0:v]scale=-1:480, crop=ih/3*4:ih[basevid]; \
[1:v]scale=-1:480, crop=ih/3*4:ih[overlay]; \
[overlay]colorkey=0x000000:0.2:0.2[keyout]; \
[basevid][keyout]overlay=shortest=1[out]",
"-map", "[out]",
# "-acodec", "copy",
# this works but has hard edges around the key, and doesn't scale
# "[1:v]colorkey=0x000000:0.1:0.1[ckout]; \
# [0:v][ckout]overlay[out]",
# "-map", "[out]",
# this works but the blend mode is not quite right
# "[1:0]scale=-1:480, crop=ih/3*4:ih, format=rgba[a]; \
# [0:0]scale=-1:480, crop=ih/3*4:ih, format=rgba[b]; \
# [b][a]blend=all_mode='overlay':all_opacity=0.8",
# glitch mods
# "-x265-params", "b-frames=0",
"-shortest",
"-c:v", "libx264",
"-preset", "medium",
"-crf", "23",
"-c:a", "aac",
"-strict",
"-2"
]
media_dir_raw = input ("Enter media folder: ")
media_dir_clean = re.sub('\\\\', '', media_dir_raw)
# if media dir not found, bail
# isdir() prefers either the escaped version,
# or a fullpath in quotes. We'll use the latter.
if os.path.isdir(f'"{media_dir_clean}"'):
print(f"Folder not found: {media_dir_clean}")
quit()
else:
print(f"Valid media folder: {media_dir_clean}")
# if source dir not found, bail
# again, isdir() prefers escapes or quoted, we use quotes
if os.path.isdir(f'"{media_dir_clean}/{from_dir}"'):
print(f"No folder \"{from_dir}\", recheck media folder.")
quit()
else:
print(f"Folder \"{from_dir}\" found.")
# if destination doesn't exist, create it
# again, isdir() prefers escapes or quoted, we use quotes
if os.path.isdir(f'"{media_dir_raw}/{dest_dir}"'):
print(f"No destination folder \"{dest_dir}\" found, creating.")
os.makedirs(f"{media_dir}/{dest_dir}")
else:
print(f"Destination folder \"{dest_dir}\" found.")
print("Transcoding files")
print("=================")
full_src_dir = f"{media_dir_clean}/{from_dir}"
full_over_dir = f"{media_dir_clean}/{overlay_dir}"
full_dest_dir = f"{media_dir_clean}/{dest_dir}"
filelist = [f for f in os.listdir(full_src_dir)]
opt_str = " ".join(ffmpeg_opts)
try:
for file in filelist:
if re.search(media_re, file):
basename = os.path.splitext(file)[0]
# Note source and dest dirs could have spaces and need to be quoted
source = f"{full_src_dir}/{file}"
overlay = f"{full_over_dir}/{overlay_file}"
dest = f"{full_dest_dir}/{basename}.mp4"
# unlike on the command line, complicated cli options don't need
# to be quoted when using subprocess options
# ffmpeg -i orig-vid.mp4 -i overlay.mp4 opts result.mp3
args = ["ffmpeg", "-i", f'{source}', "-i", f'{overlay}'] + ffmpeg_opts + [f'{dest}']
# print (args)
if not os.path.isfile(dest):
subprocess.call(args)
else:
print(f"File {file} already exists, skipping.")
except:
if dest:
os.remove(dest)
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.