content
stringlengths 5
1.05M
|
|---|
# -*- coding: utf-8 -*-
# Copyright © 2017 Kevin Thibedeau
# Distributed under the terms of the MIT license
import os
import math
import cairo
from .shapes import *
try:
import pango
import pangocairo
use_pygobject = False
except ImportError:
import gi
gi.require_version('Pango', '1.0')
gi.require_version('PangoCairo', '1.0')
from gi.repository import Pango as pango
from gi.repository import PangoCairo as pangocairo
use_pygobject = True
#################################
## CAIRO objects
#################################
def rgb_to_cairo(rgb):
if len(rgb) == 4:
r,g,b,a = rgb
return (r / 255.0, g / 255.0, b / 255.0, a / 255.0)
else:
r,g,b = rgb
return (r / 255.0, g / 255.0, b / 255.0, 1.0)
def cairo_font(tk_font):
family, size, weight, *extra = tk_font
return pango.FontDescription('{} {} {}'.format(family, weight, size))
def cairo_line_cap(line_cap):
if line_cap == 'round':
return cairo.LINE_CAP_ROUND
elif line_cap == 'square':
return cairo.LINE_CAP_SQUARE
else:
return cairo.LINE_CAP_BUTT
class CairoSurface(BaseSurface):
def __init__(self, fname, def_styles, padding=0, scale=1.0):
BaseSurface.__init__(self, fname, def_styles, padding, scale)
self.ctx = None
def render(self, canvas, transparent=False):
x0,y0,x1,y1 = canvas.bbox('all')
self.markers = canvas.markers
W = int((x1 - x0 + 2*self.padding) * self.scale)
H = int((y1 - y0 + 2*self.padding) * self.scale)
ext = os.path.splitext(self.fname)[1].lower()
if ext == '.svg':
surf = cairo.SVGSurface(self.fname, W, H)
elif ext == '.pdf':
surf = cairo.PDFSurface(self.fname, W, H)
elif ext in ('.ps', '.eps'):
surf = cairo.PSSurface(self.fname, W, H)
if ext == '.eps':
surf.set_eps(True)
else: # Bitmap
surf = cairo.ImageSurface(cairo.FORMAT_ARGB32, W, H)
self.ctx = cairo.Context(surf)
ctx = self.ctx
if not transparent:
# Fill background
ctx.rectangle(0,0, W,H)
ctx.set_source_rgba(1.0,1.0,1.0)
ctx.fill()
ctx.scale(self.scale, self.scale)
ctx.translate(-x0 + self.padding, -y0 + self.padding)
if self.draw_bbox:
last = len(canvas.shapes)
for s in canvas.shapes[:last]:
bbox = s.bbox
r = canvas.create_rectangle(*bbox, line_color=(255,0,0, 127), fill=(0,255,0,90))
for s in canvas.shapes:
self.draw_shape(s)
if ext in ('.svg', '.pdf', '.ps', '.eps'):
surf.show_page()
else:
surf.write_to_png(self.fname)
def text_bbox(self, text, font_params, spacing=0):
return CairoSurface.cairo_text_bbox(text, font_params, spacing, self.scale)
@staticmethod
def cairo_text_bbox(text, font_params, spacing=0, scale=1.0):
surf = cairo.ImageSurface(cairo.FORMAT_ARGB32, 8, 8)
ctx = cairo.Context(surf)
# The scaling must match the final context.
# If not there can be a mismatch between the computed extents here
# and those generated for the final render.
ctx.scale(scale, scale)
font = cairo_font(font_params)
if use_pygobject:
status, attrs, plain_text, _ = pango.parse_markup(text, len(text), '\0')
layout = pangocairo.create_layout(ctx)
pctx = layout.get_context()
fo = cairo.FontOptions()
fo.set_antialias(cairo.ANTIALIAS_SUBPIXEL)
pangocairo.context_set_font_options(pctx, fo)
layout.set_font_description(font)
layout.set_spacing(spacing * pango.SCALE)
layout.set_text(plain_text, len(plain_text))
layout.set_attributes(attrs)
li = layout.get_iter() # Get first line of text
baseline = li.get_baseline() / pango.SCALE
re = layout.get_pixel_extents()[1] # Get logical extents
extents = (re.x, re.y, re.x + re.width, re.y + re.height)
else: # pyGtk
attrs, plain_text, _ = pango.parse_markup(text)
pctx = pangocairo.CairoContext(ctx)
pctx.set_antialias(cairo.ANTIALIAS_SUBPIXEL)
layout = pctx.create_layout()
layout.set_font_description(font)
layout.set_spacing(spacing * pango.SCALE)
layout.set_text(plain_text)
layout.set_attributes(attrs)
li = layout.get_iter() # Get first line of text
baseline = li.get_baseline() / pango.SCALE
#print('@@ EXTENTS:', layout.get_pixel_extents()[1], spacing)
extents = layout.get_pixel_extents()[1] # Get logical extents
return [extents[0], extents[1], extents[2], extents[3], baseline]
@staticmethod
def draw_text(x, y, text, font, text_color, spacing, c):
c.save()
c.set_source_rgba(*rgb_to_cairo(text_color))
font = cairo_font(font)
c.translate(x, y)
if use_pygobject:
status, attrs, plain_text, _ = pango.parse_markup(text, len(text), '\0')
layout = pangocairo.create_layout(c)
pctx = layout.get_context()
fo = cairo.FontOptions()
fo.set_antialias(cairo.ANTIALIAS_SUBPIXEL)
pangocairo.context_set_font_options(pctx, fo)
layout.set_font_description(font)
layout.set_spacing(spacing * pango.SCALE)
layout.set_text(plain_text, len(plain_text))
layout.set_attributes(attrs)
pangocairo.update_layout(c, layout)
pangocairo.show_layout(c, layout)
else: # pyGtk
attrs, plain_text, _ = pango.parse_markup(text)
pctx = pangocairo.CairoContext(c)
pctx.set_antialias(cairo.ANTIALIAS_SUBPIXEL)
layout = pctx.create_layout()
layout.set_font_description(font)
layout.set_spacing(spacing * pango.SCALE)
layout.set_text(plain_text)
layout.set_attributes(attrs)
pctx.update_layout(layout)
pctx.show_layout(layout)
c.restore()
def draw_marker(self, name, mp, tp, weight, c):
if name in self.markers:
m_shape, ref, orient, units = self.markers[name]
c.save()
c.translate(*mp)
if orient == 'auto':
angle = math.atan2(tp[1]-mp[1], tp[0]-mp[0])
c.rotate(angle)
elif isinstance(orient, int):
angle = math.radians(orient)
c.rotate(angle)
if units == 'stroke':
c.scale(weight, weight)
c.translate(-ref[0], -ref[1])
self.draw_shape(m_shape)
c.restore()
def draw_shape(self, shape):
c = self.ctx
default_pen = rgb_to_cairo(self.def_styles.line_color)
c.set_source_rgba(*default_pen)
weight = shape.param('weight', self.def_styles)
fill = shape.param('fill', self.def_styles)
line_color = shape.param('line_color', self.def_styles)
line_cap = cairo_line_cap(shape.param('line_cap', self.def_styles))
stroke = True if weight > 0 else False
c.set_line_width(weight)
c.set_line_cap(line_cap)
if shape.__class__ in self.shape_drawers:
self.shape_drawers[shape.__class__](shape, self)
elif isinstance(shape, GroupShape):
c.save()
c.translate(*shape._pos)
if 'scale' in shape.options:
c.scale(shape.options['scale'], shape.options['scale'])
if 'angle' in shape.options:
c.rotate(math.radians(shape.options['angle']))
for s in shape.shapes:
self.draw_shape(s)
c.restore()
elif isinstance(shape, TextShape):
x0, y0, x1, y1 = shape.bbox
text = shape.param('text', self.def_styles)
font = shape.param('font', self.def_styles)
text_color = shape.param('text_color', self.def_styles)
anchor = shape.param('anchor', self.def_styles).lower()
spacing = shape.param('spacing', self.def_styles)
CairoSurface.draw_text(x0, y0, text, font, text_color, spacing, c)
elif isinstance(shape, LineShape):
x0, y0, x1, y1 = shape.points
marker = shape.param('marker')
marker_start = shape.param('marker_start')
marker_mid = shape.param('marker_mid')
marker_end = shape.param('marker_end')
if marker is not None:
if marker_start is None:
marker_start = marker
if marker_end is None:
marker_end = marker
if marker_mid is None:
marker_mid = marker
adjust = shape.param('marker_adjust')
if adjust is None:
adjust = 0
if adjust > 0:
angle = math.atan2(y1-y0, x1-x0)
dx = math.cos(angle)
dy = math.sin(angle)
if marker_start in self.markers:
# Get bbox of marker
m_shape, ref, orient, units = self.markers[marker_start]
mx0, my0, mx1, my1 = m_shape.bbox
soff = (ref[0] - mx0) * adjust
# Move start point
x0 += soff * dx
y0 += soff * dy
if marker_end in self.markers:
# Get bbox of marker
m_shape, ref, orient, units = self.markers[marker_end]
mx0, my0, mx1, my1 = m_shape.bbox
eoff = (mx1 - ref[0]) * adjust
# Move end point
x1 -= eoff * dx
y1 -= eoff * dy
c.move_to(x0,y0)
c.line_to(x1,y1)
c.stroke()
# Draw any markers
self.draw_marker(marker_start, (x0,y0), (x1,y1), weight, c)
self.draw_marker(marker_end, (x1,y1), (x1 + 2*(x1-x0),y1 + 2*(y1-y0)), weight, c)
self.draw_marker(marker_mid, ((x0 + x1)/2,(y0+y1)/2), (x1,y1), weight, c)
elif isinstance(shape, RectShape):
x0, y0, x1, y1 = shape.points
c.rectangle(x0,y0, x1-x0,y1-y0)
if fill is not None:
c.set_source_rgba(*rgb_to_cairo(fill))
if stroke:
c.fill_preserve()
else:
c.fill()
if stroke:
c.set_source_rgba(*rgb_to_cairo(line_color))
c.stroke()
elif isinstance(shape, OvalShape):
x0, y0, x1, y1 = shape.points
xc = (x0 + x1) / 2.0
yc = (y0 + y1) / 2.0
w = abs(x1 - x0)
h = abs(y1 - y0)
c.save()
# Set transformation matrix to permit drawing ovals
c.translate(xc,yc)
c.scale(w/2.0, h/2.0)
c.arc(0,0, 1, 0, 2 * math.pi)
#c.arc(xc,yc, rad, 0, 2 * math.pi)
if fill is not None:
c.set_source_rgba(*rgb_to_cairo(fill))
if stroke:
c.fill_preserve()
else:
c.fill()
c.restore() # Stroke with original transform
if stroke:
c.set_source_rgba(*rgb_to_cairo(line_color))
c.stroke()
elif isinstance(shape, ArcShape):
x0, y0, x1, y1 = shape.points
xc = (x0 + x1) / 2.0
yc = (y0 + y1) / 2.0
w = abs(x1 - x0)
h = abs(y1 - y0)
start = shape.options['start']
extent = shape.options['extent']
# Start and end angles
sa = -math.radians(start)
ea = -math.radians(start + extent)
# Tk has opposite angle convention from Cairo
# Positive extent is a negative rotation in Cairo
# Negative extent is a positive rotation in Cairo
c.save()
c.translate(xc, yc)
c.scale(w/2.0, h/2.0)
if fill is not None:
c.move_to(0,0)
if extent >= 0:
c.arc_negative(0,0, 1.0, sa, ea)
else:
c.arc(0,0, 1.0, sa, ea)
c.set_source_rgba(*rgb_to_cairo(fill))
c.fill()
# Stroke arc segment
c.new_sub_path()
if extent >= 0:
c.arc_negative(0,0, 1.0, sa, ea)
else:
c.arc(0,0, 1.0, sa, ea)
c.restore()
c.set_source_rgba(*rgb_to_cairo(line_color))
c.stroke()
elif isinstance(shape, PathShape):
pp = shape.nodes[0]
for n in shape.nodes:
if n == 'z':
c.close_path()
break
elif len(n) == 2:
c.line_to(*n)
pp = n
elif len(n) == 6:
c.curve_to(*n)
pp = n[4:6]
elif len(n) == 5: # Arc (javascript arcto() args)
#print('# arc:', pp)
#pp = self.draw_rounded_corner(pp, n[0:2], n[2:4], n[4], c)
center, start_p, end_p, rad = rounded_corner(pp, n[0:2], n[2:4], n[4])
if rad < 0: # No arc
c.line_to(*end_p)
else:
# Determine angles to arc end points
ostart_p = (start_p[0] - center[0], start_p[1] - center[1])
oend_p = (end_p[0] - center[0], end_p[1] - center[1])
start_a = math.atan2(ostart_p[1], ostart_p[0]) % math.radians(360)
end_a = math.atan2(oend_p[1], oend_p[0]) % math.radians(360)
# Determine direction of arc
# Rotate whole system so that start_a is on x-axis
# Then if delta < 180 cw if delta > 180 ccw
delta = (end_a - start_a) % math.radians(360)
#print('# start_a, end_a', math.degrees(start_a), math.degrees(end_a),
# math.degrees(delta))
if delta < math.radians(180): # CW
c.arc(center[0],center[1], rad, start_a, end_a)
else: # CCW
c.arc_negative(center[0],center[1], rad, start_a, end_a)
pp = end_p
#print('# pp:', pp)
if fill is not None:
c.set_source_rgba(*rgb_to_cairo(fill))
if stroke:
c.fill_preserve()
else:
c.fill()
if stroke:
c.set_source_rgba(*rgb_to_cairo(line_color))
c.stroke()
|
import os
import subprocess
import pickle
import sys
#Give input as path to folder contaiing pairs file
path=sys.argv[1]
def longestSubstringFinder(string1, string2):
answer = ""
len1, len2 = len(string1), len(string2)
for i in range(len1):
match = ""
for j in range(len2):
if (i + j < len1 and string1[i + j] == string2[j]):
match += string2[j]
else:
if (len(match) > len(answer)): answer = match
match = ""
return answer
#f=open('pairs file','r')
def doeslastmatch(string1,string2): #Checks if last word matches
s1=re.sub(r'[^\w]', ' ', string1) #Removing symbols from string
s2=re.sub(r'[^\w]', ' ', string2)
s1=string1.split(" ")
s2=string1.split(" ")
if s1[-1]==s2[-1]:
return 1
else:
return 0
def doesfirstmatch(string1,string2): #Checks if first word matches
s1=re.sub(r'[^\w]', ' ', string1) #Removing symbols from string
s2=re.sub(r'[^\w]', ' ', string2)
s1=string1.split(" ")
s2=string1.split(" ")
if s1[-1]==s2[-1]:
return 1
else:
return 0
ctypedict={'coref pronoun': 0, 'coref person':1,'coref treatment':2,'coref problem':3,'coref test':4,'null':5}
def num_words_concept(string1):
s1=re.sub(r'[^\w]', ' ', string1)
words=s1.split(" ")
return len(words)
for i in os.listdir(path):
f=open(i,'r')
fp=open('feature_folder/feature'+i,'w+')
for line in f:
feature_vector=[]
s=line.split("\"")
c1=s[1]
c2=s[5]
first_line=s[2].split(":")
end_line=s[6].split(":")
start_1, end_1=first_line[0],first_line[2]
start_2,end_2=end_line[0],end_line[2]
concept_type=ctypedict.get(s[3])
#c1type=s[-1]
#c2type=s[-2]
os.remove('name_l1.pickle')
os.remove('name_l2.pickle')
os.remove('umls_results.pickle')
subprocess.call(['./umls_concepts.sh',c1,c2])
with open('umls_results.pickle','rb') as p:
umls_vector=pickle.load(p)
common=longestSubstringFinder(c1,c2)
len_common=len(common) #Length of longest common substring of each concept
max_mention_length=max(len(c1),len(c2)) #Which of the concept mention is longer in length
part_common=max_mention_length-len_common
feature_vector.append(concept_type)
feature_vector.append(len_common)
feature_vector.append(part_common)
feature_vector.append(doesfirstmatch(c1,c2))
feature_vector.append(doeslastmatch(c1,c2))
feature_vector.append(len(c1))
feature_vector.append(len(c2))
feature_vector.append(num_words_concept(c1))
feature_vector.append(num_words_concept(c2))
feature_vector.append(start_1)
feature_vector.append(start_2)
with open('exact_name_l1.pickle','rb') as p:
exact_name_l1=pickle.load(p)
with open('exact_name_l2.pickle','rb') as p:
exact_name_l2=pickle.load(p)
with open('approximate_name_l1.pickle','rb') as p:
approximate_name_l1=pickle.load(p)
with open('approximate_name_l2.pickle','rb') as p:
approximate_name_l2=pickle.load(p)
with open('leftTruncation_name_l1.pickle','rb') as p:
leftTruncation_name_l1=pickle.load(p)
with open('leftTruncation_name_l2.pickle','rb') as p:
leftTruncation_name_l2=pickle.load(p)
with open('rightTruncation_name_l1.pickle','rb') as p:
rightTruncation_name_l1=pickle.load(p)
with open('rightTruncation_name_l2.pickle','rb') as p:
rightTruncation_name_l2=pickle.load(p)
with open('word_name_l1.pickle','rb') as p:
word_name_l1=pickle.load(p)
with open('word_name_l2.pickle','rb') as p:
word_name_l2=pickle.load(p)
with open('normalizedString_name_l1.pickle','rb') as p:
normalizedString_name_l1=pickle.load(p)
with open('normalizedString_name_l2.pickle','rb') as p:
normalizedString_name_l2=pickle.load(p)
f1,f2,f3=three_features(exact_name_l1,exact_name_l2)
feature_vector.append(f1)
feature_vector.append(f2)
feature_vector.append(f3)
f1,f2,f3=three_features(approximate_name_l1,approximate_name_l2)
feature_vector.append(f1)
feature_vector.append(f2)
feature_vector.append(f3)
f1,f2,f3=three_features(normalizedString_name_l1,normalizedString_name_l2)
feature_vector.append(f1)
feature_vector.append(f2)
feature_vector.append(f3)
f1,f2,f3=three_features(leftTruncation_name_l1,leftTruncation_name_l2)
feature_vector.append(f1)
feature_vector.append(f2)
feature_vector.append(f3)
f1,f2,f3=three_features(rightTruncation_name_l1,rightTruncation_name_l2)
feature_vector.append(f1)
feature_vector.append(f2)
feature_vector.append(f3)
f1,f2,f3=three_features(word_name_l1,word_name_l2)
feature_vector.append(f1)
feature_vector.append(f2)
feature_vector.append(f3)
#log term frequency
words_common=re.sub(r'[^\w]', ' ', common)
feature_vector.append(len(words_common))
ltf=Math.log((len(words_common)+1)/(num_words_concept(c1)+num_words_concept(c2)))
feature_vector.append(ltf)
for feature in feature_vector:
fp.write("%s," % feature)
fp.write(c1+",")
fp.write(c2)
fp.write("\n")
fp.close()
f.close()
def three_features(name_l1,name_l2):
num_common_names=0
num_common_names=len(set(exact_name_l1) & set(name_l2)) #Intersection of name results
len_n1=len(name_l1) #length of first name array
len_n2=len(name_l2) #length of second name array
return num_common_names,len_n1,len_n2
|
import xml.etree.ElementTree as Et
class BusLocation:
def __init__(self, bus_arrival_item: dict):
self.route_id = bus_arrival_item['routeId']
self.station_id = bus_arrival_item['stationId']
self.station_seq = bus_arrival_item['stationSeq']
self.end_bus = bus_arrival_item['endBus']
self.plate_no = bus_arrival_item['plateNo']
self.plate_type = bus_arrival_item['plateType']
self.remain_seat_cnt = bus_arrival_item['remainSeatCnt']
@staticmethod
def from_element_tree(bus_arrival_item: Et):
return BusLocation(
{
'routeId': bus_arrival_item.find('routeId').text,
'stationId': bus_arrival_item.find('stationId').text,
'stationSeq': bus_arrival_item.find('stationSeq').text,
'endBus': bus_arrival_item.find('endBus').text,
'plateNo': bus_arrival_item.find('plateNo').text,
'plateType': bus_arrival_item.find('plateType').text,
'remainSeatCnt': bus_arrival_item.find('remainSeatCnt').text
}
)
def __str__(self):
return '노선 ID : {}\n' \
'정류소 ID : {}\n' \
'정류소 순서 : {}\n' \
'막차 여부 : {}\n' \
'차량번호 : {}\n' \
'차종 : {}\n' \
'빈 자리 수 : {}\n'\
.format(
self.route_id,
self.station_id,
self.station_seq,
self.end_bus,
self.plate_no,
self.plate_type,
self.remain_seat_cnt
)
def print_simple(self, station_map: dict):
return '{:2d}번째 정류소 : {} (ID : {}){} - {}\n'\
.format(
int(self.station_seq),
station_map[self.station_id],
self.station_id,
' <막차>' if (self.end_bus == '1') else '',
self.plate_no
)
|
from django.test import TestCase
from django.urls import resolve, reverse
from ..models import Post
from ..views import PostListView
class PageTests(TestCase):
"""page view tests"""
def setUp(self):
self.post = Post.objects.create(title='Vasyan', post_text='blog',
before_spoiler='not a good blog')
self.page_url = reverse('page', kwargs={'page': 1})
self.page_response = self.client.get(self.page_url)
def test_page_status_code(self):
"""Tests the page status code"""
self.assertEqual(self.page_response.status_code, 200)
def test_page_url_resolves_post_list_view(self):
"""Tests resolving PostListView"""
view = resolve('/page/1/')
self.assertEqual(view.func.view_class, PostListView)
def test_page_contains_links_to_posts(self):
"""Tests the page contains the links to the posts"""
post_url = reverse('post', kwargs={'pk': self.post.pk})
self.assertContains(self.page_response, 'href="{0}"'.format(post_url))
def test_outofrange_page(self):
"""Tests if we get 404 when we go to a not existing page"""
outofrange_page_url = reverse('page', kwargs={'page': 200})
outofrange_response = self.client.get(outofrange_page_url)
self.assertEqual(outofrange_response.status_code, 404)
def test_new_post_appeared_in_center_and_recent_posts(self):
"""Tests if the new post appeared in the navigation bar (recent posts)
and the main content part (center)
"""
self.assertContains(self.page_response, 'href="/post/{}/"'.format(self.post.pk), 2)
class Pagination(TestCase):
def setUp(self):
self.POSTS_ON_PAGE = 2
# We wrap every post in a div container, its class name
self.POST_DIV_CLASS_NAME = 'post'
for i in range(self.POSTS_ON_PAGE*5+1):
t = str(i)
Post.objects.create(title=t, post_text=t, before_spoiler=t)
def test_page_contains_links_to_other_pages(self):
"""Tests if the page page contains the links to the other pages.
It must have references at least to the last, first, previous and next
pages
"""
page_url = reverse('page', kwargs={'page': 3})
page_response = self.client.get(page_url)
# First page
self.assertContains(
page_response,
'href="{}"'.format(reverse('page', kwargs={'page': 1}))
)
# Last page
self.assertContains(
page_response,
'href="{}"'.format(reverse('page', kwargs={'page': 6}))
)
# Previous page
self.assertContains(
page_response,
'href="{}"'.format(reverse('page', kwargs={'page': 2}))
)
# Next page
self.assertContains(
page_response,
'href="{}"'.format(reverse('page', kwargs={'page': 4}))
)
def test_every_page_contains_right_number_of_posts(self):
"""Tests every page contains right number of posts"""
for page in range(1, 6):
page_url = reverse('page', kwargs={'page': page})
page_response = self.client.get(page_url)
self.assertContains(
page_response,
'class="{}"'.format(self.POST_DIV_CLASS_NAME),
self.POSTS_ON_PAGE
)
page_url = reverse('page', kwargs={'page': 6})
page_response = self.client.get(page_url)
self.assertContains(page_response, 'class="{}"'.format(self.POST_DIV_CLASS_NAME), 1)
|
import logging
import asyncio
import collections
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from homeassistant.helpers import aiohttp_client
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'grohe_sense'
CONF_REFRESH_TOKEN = 'refresh_token'
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema({
vol.Required(CONF_REFRESH_TOKEN): cv.string,
}),
},
extra=vol.ALLOW_EXTRA,
)
BASE_URL = 'https://idp2-apigw.cloud.grohe.com/v3/iot/'
GROHE_SENSE_TYPE = 101 # Type identifier for the battery powered water detector
GROHE_SENSE_GUARD_TYPE = 103 # Type identifier for sense guard, the water guard installed on your water pipe
GroheDevice = collections.namedtuple('GroheDevice', ['locationId', 'roomId', 'applianceId', 'type', 'name'])
async def async_setup(hass, config):
_LOGGER.debug("Loading Grohe Sense")
await initialize_shared_objects(hass, config.get(DOMAIN).get(CONF_REFRESH_TOKEN))
await hass.helpers.discovery.async_load_platform('sensor', DOMAIN, {}, config)
await hass.helpers.discovery.async_load_platform('switch', DOMAIN, {}, config)
return True
async def initialize_shared_objects(hass, refresh_token):
session = aiohttp_client.async_get_clientsession(hass)
auth_session = OauthSession(session, refresh_token)
devices = []
hass.data[DOMAIN] = { 'session': auth_session, 'devices': devices }
locations = await auth_session.get(BASE_URL + f'locations')
for location in locations:
_LOGGER.debug('Found location %s', location)
locationId = location['id']
rooms = await auth_session.get(BASE_URL + f'locations/{locationId}/rooms')
for room in rooms:
_LOGGER.debug('Found room %s', room)
roomId = room['id']
appliances = await auth_session.get(BASE_URL + f'locations/{locationId}/rooms/{roomId}/appliances')
for appliance in appliances:
_LOGGER.debug('Found appliance %s', appliance)
applianceId = appliance['appliance_id']
devices.append(GroheDevice(locationId, roomId, applianceId, appliance['type'], appliance['name']))
class OauthException(Exception):
def __init__(self, error_code, reason):
self.error_code = error_code
self.reason = reason
class OauthSession:
def __init__(self, session, refresh_token):
self._session = session
self._refresh_token = refresh_token
self._access_token = None
self._fetching_new_token = None
@property
def session(self):
return self._session
async def token(self, old_token=None):
""" Returns an authorization header. If one is supplied as old_token, invalidate that one """
if self._access_token not in (None, old_token):
return self._access_token
if self._fetching_new_token is not None:
await self._fetching_new_token.wait()
return self._access_token
self._access_token = None
self._fetching_new_token = asyncio.Event()
data = { 'refresh_token': self._refresh_token }
headers = { 'Content-Type': 'application/json' }
refresh_response = await self._http_request(BASE_URL + 'oidc/refresh', 'post', headers=headers, json=data)
if not 'access_token' in refresh_response:
_LOGGER.error('OAuth token refresh did not yield access token! Got back %s', refresh_response)
else:
self._access_token = 'Bearer ' + refresh_response['access_token']
self._fetching_new_token.set()
self._fetching_new_token = None
return self._access_token
async def get(self, url, **kwargs):
return await self._http_request(url, auth_token=self, **kwargs)
async def post(self, url, json, **kwargs):
return await self._http_request(url, method='post', auth_token=self, json=json, **kwargs)
async def _http_request(self, url, method='get', auth_token=None, headers={}, **kwargs):
_LOGGER.debug('Making http %s request to %s, headers %s', method, url, headers)
headers = headers.copy()
tries = 0
while True:
if auth_token != None:
# Cache token so we know which token was used for this request,
# so we know if we need to invalidate.
token = await auth_token.token()
headers['Authorization'] = token
try:
async with self._session.request(method, url, headers=headers, **kwargs) as response:
_LOGGER.debug('Http %s request to %s got response %d', method, url, response.status)
if response.status in (200, 201):
return await response.json()
elif response.status == 401:
if auth_token != None:
_LOGGER.debug('Request to %s returned status %d, refreshing auth token', url, response.status)
token = await auth_token.token(token)
else:
_LOGGER.error('Grohe sense refresh token is invalid (or expired), please update your configuration with a new refresh token')
raise OauthException(response.status, await response.text())
else:
_LOGGER.debug('Request to %s returned status %d, %s', url, response.status, await response.text())
except OauthException as oe:
raise
except Exception as e:
_LOGGER.debug('Exception for http %s request to %s: %s', method, url, e)
tries += 1
await asyncio.sleep(min(600, 2**tries))
|
# Generated by Django 2.0.9 on 2019-07-19 18:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('maps', '0003_auto_20190716_2152'),
]
operations = [
migrations.AlterField(
model_name='stop',
name='comments',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AlterField(
model_name='stop',
name='contact',
field=models.CharField(blank=True, max_length=30, null=True),
),
migrations.AlterField(
model_name='stop',
name='email',
field=models.CharField(blank=True, max_length=30, null=True),
),
migrations.AlterField(
model_name='stop',
name='phone',
field=models.CharField(blank=True, max_length=15, null=True),
),
]
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import dash_table
import time
import datetime
import json
import pandas as pd
import numpy as np
import os
from dash.dependencies import Input, Output, State
# from server import app, server
q2=["2. Over the past 2 weeks, how many times did you have swelling in your feet, ankles or legs when you woke up in the morning?",
["Every morning",
"3 or more times per week but not every day",
"1-2 times per week",
"Less than once a week",
"Never over the past 2 weeks"],
[20,40,60,80,100], 'SF']
q3=["3. Over the past 2 weeks, on average, how many times has fatigue limited your ability to do what you wanted?",
["All of the time",
"Several times per day",
"At least once a day",
"3 or more times per week but not every day",
"1-2 times per week",
"Less than once a week",
"Never over the past 2 weeks"],
[15,30,45,60,75,90,100], 'SF']
q4=["4. Over the past 2 weeks, on average, how many times has shortness of breath limited your ability to do what you wanted?",
["All of the time",
"Several times per day",
"At least once a day",
"3 or more times per week but not every day",
"1-2 times per week",
"Less than once a week",
"Never over the past 2 weeks"],
[15,30,45,60,75,90,100], 'SF']
q5=["5. Over the past 2 weeks, on average, how many times have you been forced to sleep sitting up in a chair or with at least 3 pillows to prop you up because of shortness of breath?",
["Every night",
"3 or more times per week but not every day",
"1-2 times per week",
"Less than once a week",
"Never over the past 2 weeks"],
[30,40,60,80,100], "SF"]
q6=["6. Over the past 2 weeks, how much has your heart failure limited your enjoyment of life?",
["It has extremely limited my enjoyment of life",
"It has limited my enjoyment of life quite a bit",
"It has moderately limited my enjoyment of life",
"It has slightly limited my enjoyment of life",
"It has not limited my enjoyment of life at all",],
[20,40,60,80,100], "QL"]
q7=["7. If you had to spend the rest of your life with your heart failure the way it is right now, how would you feel about this?",
["Not at all satisfied",
"Mostly dissatisfied",
"Somewhat satisfied",
"Mostly satisfied",
"Completely satisfied",],
[20,40,60,80,100], "QL"]
def modal_kccq_questionaire_answer(app):
return html.Div(
[
html.H6("Review", style={"font-size":"0.7rem","padding-top":"10px"}),
dbc.Button(children = [html.Img(src=app.get_asset_url("icon-inspection-100.png"), style={"height":"1.5rem", "padding-top":"0px"})], color="light",style={"border-radius":"10rem"}, id = 'kccq-modal-answer-button-open'),
dbc.Modal(
[
dbc.ModalHeader(id = "kccq-modal-answer-header"),
dbc.ModalBody(
modal_kccq_questionaire_body_answer(),
style={"padding":"40px","margin-top":"-20px"}
),
dbc.ModalFooter(
dbc.Button("Close", id="kccq-modal-answer-button-submit", className="mr-2",style={"width":"160px"}),
style={"padding-right":"42%"}
)
],
id = "kccq-modal-answer",
size = 'xl',
backdrop = "static"
)
],
style={"text-align":"center"}
)
def modal_kccq_questionaire_body_answer():
return html.Div(
[
html.Div(
[
html.Div(
"1. Heart failure affects different people in different ways. Some feel shortness of breath while others feel fatigue. Please indicate how much you are limited by heart failure (shortness of breath or fatigue) in your ability to do the following activities over the past 2 weeks.",
style={"padding-top":"10px","padding-bottom":"10px"}
),
html.Div(
[
html.Div(
dbc.Row(
[
dbc.Col(width = 3),
dbc.Col("Extremely Limited"),
dbc.Col("Quite a bit Limited"),
dbc.Col("Moderately Limited"),
dbc.Col("Slightly Limited"),
dbc.Col("Not at all Limited"),
dbc.Col("Limited for other reasons or did not do the activity"),
],
style = {"display" : "flex", "justify-content" : "space-around", "text-align" : "center","font-family":"NotoSans-SemiBold"}
)
),
html.Hr(),
html.Div(
dbc.Row(
[
dbc.Col("a. Showering/bathing", width = 3),
dbc.Col(
dbc.RadioItems(
options = [
{"label": "", "value" : 20, "disabled" : True},
{"label": "", "value" : 40, "disabled" : True},
{"label": "", "value" : 60, "disabled" : True},
{"label": "", "value" : 80, "disabled" : True},
{"label": "", "value" : 100, "disabled" : True},
{"label": "", "value" : 50, "disabled" : True},
],
id = "kccq-modal-answer-radio-q1a",
inline = True,
style = {"display" : "flex", "justify-content" : "space-around"} ),
),
]
)
),
html.Hr(),
html.Div(
dbc.Row(
[
dbc.Col("b. Walking 1 block on level ground",width = 3),
dbc.Col(
dbc.RadioItems(
options = [
{"label": "", "value" : 20, "disabled" : True},
{"label": "", "value" : 40, "disabled" : True},
{"label": "", "value" : 60, "disabled" : True},
{"label": "", "value" : 80, "disabled" : True},
{"label": "", "value" : 100, "disabled" : True},
{"label": "", "value" : 50, "disabled" : True},
],
id = "kccq-modal-answer-radio-q1b",
inline = True,
style = {"display" : "flex", "justify-content" : "space-around"} ),
),
]
)
),
html.Hr(),
html.Div(
dbc.Row(
[
dbc.Col("c. Hurrying or jogging (as if to catch a bus)",width = 3),
dbc.Col(
dbc.RadioItems(
options = [
{"label": "", "value" : 20, "disabled" : True},
{"label": "", "value" : 40, "disabled" : True},
{"label": "", "value" : 60, "disabled" : True},
{"label": "", "value" : 80, "disabled" : True},
{"label": "", "value" : 100, "disabled" : True},
{"label": "", "value" : 50, "disabled" : True},
],
id = "kccq-modal-answer-radio-q1c",
inline = True,
style = {"display" : "flex", "justify-content" : "space-around"} ),
),
]
)
),
],
style={"font-size":"0.8rem","padding":"20px","border-radius":"0.5rem","background":"#f5f5f5"}
)
],
style={"padding":"20px"}
),
question_group_answer(q2[0], q2[1], q2[2], "kccq-modal-answer-radio-q2"),
question_group_answer(q3[0], q3[1], q3[2], "kccq-modal-answer-radio-q3"),
question_group_answer(q4[0], q4[1], q4[2], "kccq-modal-answer-radio-q4"),
question_group_answer(q5[0], q5[1], q5[2], "kccq-modal-answer-radio-q5"),
question_group_answer(q6[0], q6[1], q6[2], "kccq-modal-answer-radio-q6"),
question_group_answer(q7[0], q7[1], q7[2], "kccq-modal-answer-radio-q7"),
html.Div(
[
html.Div(
"8. How much does your heart failure affect your lifestyle? Please indicate how your heart failure may have limited your participation in the following activities over the past 2 weeks.",
style={"padding-top":"10px","padding-bottom":"10px"}
),
html.Div(
[
html.Div(
dbc.Row(
[
dbc.Col(width = 3),
dbc.Col("Severely Limited"),
dbc.Col("Limited quite a bit"),
dbc.Col("Moderately Limited"),
dbc.Col("Slightly Limited"),
dbc.Col("Did not limit at all"),
dbc.Col("Does not apply or did not do for other reasons"),
],
style = {"display" : "flex", "justify-content" : "space-around", "text-align" : "center","font-family":"NotoSans-SemiBold"}
)
),
html.Hr(),
html.Div(
dbc.Row(
[
dbc.Col("a. Hobbies, recreational activities", width = 3),
dbc.Col(
dbc.RadioItems(
options = [
{"label": "", "value" : 20, "disabled" : True},
{"label": "", "value" : 40, "disabled" : True},
{"label": "", "value" : 60, "disabled" : True},
{"label": "", "value" : 80, "disabled" : True},
{"label": "", "value" : 100, "disabled" : True},
{"label": "", "value" : 70, "disabled" : True},
],
id = "kccq-modal-answer-radio-q8a",
inline = True,
style = {"display" : "flex", "justify-content" : "space-around"} ),
),
]
)
),
html.Hr(),
html.Div(
dbc.Row(
[
dbc.Col("b. Working or doing household chores",width = 3),
dbc.Col(
dbc.RadioItems(
options = [
{"label": "", "value" : 20, "disabled" : True},
{"label": "", "value" : 40, "disabled" : True},
{"label": "", "value" : 60, "disabled" : True},
{"label": "", "value" : 80, "disabled" : True},
{"label": "", "value" : 100, "disabled" : True},
{"label": "", "value" : 70, "disabled" : True},
],
id = "kccq-modal-answer-radio-q8b",
inline = True,
style = {"display" : "flex", "justify-content" : "space-around"} ),
),
]
)
),
html.Hr(),
html.Div(
dbc.Row(
[
dbc.Col("c. Visiting family or friends out of your home",width = 3),
dbc.Col(
dbc.RadioItems(
options = [
{"label": "", "value" : 20, "disabled" : True},
{"label": "", "value" : 40, "disabled" : True},
{"label": "", "value" : 60, "disabled" : True},
{"label": "", "value" : 80, "disabled" : True},
{"label": "", "value" : 100, "disabled" : True},
{"label": "", "value" : 70, "disabled" : True},
],
id = "kccq-modal-answer-radio-q8c",
inline = True,
style = {"display" : "flex", "justify-content" : "space-around"} ),
),
]
)
),
],
style={"font-size":"0.8rem","padding":"20px","border-radius":"0.5rem","background":"#f5f5f5"}
)
],
style={"padding":"20px"}
),
],
# style={"margin-top":"-30rem","background-color":"transparent","text-align":"center"}
)
def question_group_answer(label, value_list, value, id):
value_list_len = len(value_list)
options = []
for i in range(value_list_len):
options.append({"label":value_list[i], "value":value[i], "disabled" : True})
return html.Div(
[
dbc.FormGroup(
[
dbc.Label(label,style={"padding-top":"10px","padding-bottom":"10px"}),
dbc.RadioItems(
options=options,
id=id,
style={"font-size":"0.8rem","padding":"20px","border-radius":"0.5rem","background":"#f5f5f5"}
),
],
style={"padding":"20px"}
)
]
)
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.core.files.base import File
from django.core.files.storage import get_storage_class
from . import servers
import logging
logger = logging.getLogger(__name__)
from importlib import import_module
def get_class(import_path=None):
"""
Largely based on django.core.files.storage's get_storage_class
"""
from django.core.exceptions import ImproperlyConfigured
if import_path is None:
raise ImproperlyConfigured('No class path specified.')
try:
dot = import_path.rindex('.')
except ValueError:
raise ImproperlyConfigured("%s isn't a module." % import_path)
module, classname = import_path[:dot], import_path[dot+1:]
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured('Error importing module %s: "%s"' % (module, e))
try:
return getattr(mod, classname)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" class.' % (module, classname))
server = get_class(settings.PRIVATE_MEDIA_SERVER)(**getattr(settings, 'PRIVATE_MEDIA_SERVER_OPTIONS', {}))
if hasattr(settings,'PRIVATE_MEDIA_PERMISSIONS'):
permissions = get_class(settings.PRIVATE_MEDIA_PERMISSIONS)(**getattr(settings, 'PRIVATE_MEDIA_PERMISSIONS_OPTIONS', {}))
else:
from .permissions import DefaultPrivatePermissions
permissions = DefaultPrivatePermissions()
def serve_private_file(request, path):
"""
Serve private files to users with read permission.
"""
if not permissions.has_read_permission(request, path):
if settings.DEBUG:
raise PermissionDenied
else:
raise Http404('File not found')
return server.serve(request, relative_path=path)
|
import unittest
import os
from ..testing import TINYMCE_LATEX_INTEGRATION_TESTING
try:
from Products.CMFPlone.utils import get_installer
except ImportError:
get_installer = None
class TestSetup(unittest.TestCase):
"""Test that this is properly installed."""
layer = TINYMCE_LATEX_INTEGRATION_TESTING
def setUp(self):
"""Custom shared utility setup for tests."""
self.portal = self.layer['portal']
self.view = self.portal.restrictedTraverse('@@latex')
def test_render_image(self):
img = self.view.render_image('f=ma', 16)
self.assertIn(b'\x89PNG', img)
base_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(base_path, 'formula.png'), 'rb') as expected:
expected = expected.read()
self.assertEqual(expected, img)
|
model = {
"schema": {
"title": {
"type": "string",
"regex": "^[a-z0-9]+(?:-[a-z0-9]+)*$",
"required": True,
"unique": True,
"minlength": 0,
"maxlength": 400,
"_metadata": {
"order": 1,
"help": "May only contain alpha-numeric characters and dashes",
"label": "Setting Key",
"field": "slug"
}
},
"setting_value": {
"type": "list",
"schema": {
"type": "dict",
"anyof": [
{
"schema": {
"key": {
"type": "string",
"nullable": True,
"_metadata": {
"order": 1,
"help": "",
"label": "Key",
"field": "string"
}
},
"value": {
"type": "string",
"nullable": True,
"_metadata": {
"order": 2,
"help": "",
"label": "Value",
"field": "string"
}
},
"template": {
"type": "string",
"default": "string_value",
}
},
"_metadata": {
"order": 1,
"help": "",
"label": "String",
"field": "dict"
}
},
{
"schema": {
"key": {
"type": "string",
"nullable": True,
"_metadata": {
"order": 1,
"help": "",
"label": "Key",
"field": "string"
}
},
"value": {
"type": "string",
"_metadata": {
"order": 2,
"help": "",
"label": "Value",
"field": "richtext"
}
},
"template": {
"type": "string",
"default": "richtext_value",
}
},
"_metadata": {
"order": 2,
"help": "",
"label": "Rich Text",
"field": "dict"
}
},
{
"schema": {
"key": {
"type": "string",
"nullable": True,
"_metadata": {
"order": 1,
"help": "",
"label": "Key",
"field": "string"
}
},
"value": {
"type": "string",
"_metadata": {
"order": 2,
"help": "",
"label": "Value",
"field": "textfield"
}
},
"template": {
"type": "string",
"default": "text_value",
}
},
"_metadata": {
"order": 3,
"help": "",
"label": "Text",
"field": "dict"
}
},
{
"schema": {
"key": {
"type": "string",
"nullable": True,
"_metadata": {
"order": 1,
"help": "",
"label": "Key",
"field": "string"
}
},
"value": {
"type": ["objectid", "string"],
"data_relation": {
"resource": "sitemedia",
"field": "_id",
"embeddable": True
},
"nullable": True,
"_metadata": {
"order": 2,
"help": "",
"label": "Value",
"field": "imagefield"
}
},
"template": {
"type": "string",
"default": "image_value",
}
},
"_metadata": {
"order": 4,
"help": "",
"label": "Image",
"field": "dict"
}
},
{
"schema": {
"key": {
"type": "string",
"nullable": True,
"_metadata": {
"order": 1,
"help": "",
"label": "Key",
"field": "string"
}
},
"value": {
"type": "list",
"schema": {
"type": "string",
"_metadata": {
"field": "string"
}
},
"_metadata": {
"order": 2,
"help": "",
"label": "Value",
"field": "simplelist"
}
},
"template": {
"type": "string",
"default": "list_value",
}
},
"_metadata": {
"order": 5,
"help": "",
"label": "List",
"field": "dict"
}
},
{
"schema": {
"key": {
"type": "string",
"nullable": True,
"_metadata": {
"order": 1,
"help": "",
"label": "Key",
"field": "string"
}
},
"value": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"key": {
"type": "string",
"_metadata": {
"order": 1,
"help": "",
"label": "Sub Key",
"field": "string"
}
},
"value": {
"type": "string",
"_metadata": {
"order": 2,
"help": "",
"label": "Sub Value",
"field": "richtext"
}
}
},
"_metadata": {
"field": "dict"
}
},
"_metadata": {
"order": 2,
"help": "Help text for list example",
"label": "Key/Value Store",
"field": "list"
}
},
"template": {
"type": "string",
"default": "dict_value",
}
},
"_metadata": {
"order": 6,
"help": "",
"label": "Key/Value Store Rich Text",
"field": "dict"
}
},
{
"schema": {
"key": {
"type": "string",
"nullable": True,
"_metadata": {
"order": 1,
"help": "",
"label": "Key",
"field": "string"
}
},
"value": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"key": {
"type": "string",
"_metadata": {
"order": 1,
"help": "",
"label": "Sub Key",
"field": "string"
}
},
"value": {
"type": "string",
"_metadata": {
"order": 2,
"help": "",
"label": "Sub Value",
"field": "textfield"
}
}
},
"_metadata": {
"field": "dict"
}
},
"_metadata": {
"order": 2,
"help": "",
"label": "Key/Value Store Plaintext",
"field": "list"
}
},
"template": {
"type": "string",
"default": "dict_value_plaintext",
}
},
"_metadata": {
"order": 7,
"help": "",
"label": "Key/Value Store Plaintext",
"field": "dict"
}
}
],
"_metadata": {
"order": 1,
"help": "",
"label": "Options",
"field": "flexible_content"
}
},
"_metadata": {
"order": 2,
"help": "",
"label": "Setting Values",
"field": "list"
}
},
"tags": {
"type": "list",
"schema": {
"type": "string",
"_metadata": {
"field": "string"
}
},
"_metadata": {
"order": 3,
"help": "",
"label": "Tags",
"field": "simplelist"
}
}
},
"cache_control": "",
"cache_expires": 0,
"allowed_roles": [{"role": "superuser"}],
"public_methods": ['GET'],
"public_item_methods": ['GET'],
"resource_methods": ["GET", "POST"],
"item_methods": ["GET", "PUT", "PATCH", "DELETE"],
"versioning": True,
"hateoas": False,
"pagination": False,
}
|
"""Test runway.core.providers.aws.s3._helpers.sync_strategy.base."""
# pylint: disable=no-self-use
from __future__ import annotations
import datetime
from typing import TYPE_CHECKING, List, Optional, cast
import pytest
from mock import Mock
from runway.core.providers.aws.s3._helpers.file_generator import FileStats
from runway.core.providers.aws.s3._helpers.sync_strategy.base import (
BaseSync,
MissingFileSync,
NeverSync,
SizeAndLastModifiedSync,
)
if TYPE_CHECKING:
from pytest_mock import MockerFixture
from runway.core.providers.aws.s3._helpers.sync_strategy.base import ValidSyncType
MODULE = "runway.core.providers.aws.s3._helpers.sync_strategy.base"
class TestBaseSync:
"""Test BaseSync."""
@pytest.mark.parametrize(
"src_size, dest_size, expected",
[(10, 10, True), (10, 11, False), (11, 10, False)],
)
def test_compare_size(self, dest_size: int, expected: bool, src_size: int) -> None:
"""Test compare_size."""
src_file = FileStats(src="", size=src_size)
dest_file = FileStats(src="", size=dest_size)
assert BaseSync.compare_size(src_file, dest_file) is expected
@pytest.mark.parametrize(
"src, dest", [(None, None), (Mock(), None), (None, Mock())]
)
def test_compare_size_raise_value_error(
self, dest: Optional[FileStats], src: Optional[FileStats]
) -> None:
"""Test compare_time."""
with pytest.raises(ValueError) as excinfo:
BaseSync().compare_size(src, dest)
assert str(excinfo.value) == "src_file and dest_file must not be None"
def test_compare_time(self) -> None:
"""Test compare_time."""
obj = BaseSync()
now = datetime.datetime.now()
future = now + datetime.timedelta(0, 15)
kwargs = {"src": "", "operation_name": "invalid"}
assert (
obj.compare_time(
FileStats(last_update=now, **kwargs),
FileStats(last_update=now, **kwargs),
)
is False
)
assert (
obj.compare_time(
FileStats(last_update=now, **kwargs),
FileStats(last_update=future, **kwargs),
)
is False
)
assert (
obj.compare_time(
FileStats(last_update=future, **kwargs),
FileStats(last_update=now, **kwargs),
)
is False
)
@pytest.mark.parametrize("operation_name", ["copy", "upload"])
def test_compare_time_copy_or_upload(self, operation_name: str) -> None:
"""Test compare_time."""
obj = BaseSync()
now = datetime.datetime.now()
future = now + datetime.timedelta(0, 15)
kwargs = {"src": "", "operation_name": operation_name}
assert (
obj.compare_time(
FileStats(last_update=now, **kwargs),
FileStats(last_update=now, **kwargs),
)
is True
)
assert (
obj.compare_time(
FileStats(last_update=now, **kwargs),
FileStats(last_update=future, **kwargs),
)
is True
)
assert (
obj.compare_time(
FileStats(last_update=future, **kwargs),
FileStats(last_update=now, **kwargs),
)
is False
)
def test_compare_time_download(self) -> None:
"""Test compare_time."""
obj = BaseSync()
now = datetime.datetime.now()
future = now + datetime.timedelta(0, 15)
kwargs = {"src": "", "operation_name": "download"}
assert (
obj.compare_time(
FileStats(last_update=now, **kwargs),
FileStats(last_update=now, **kwargs),
)
is True
)
assert (
obj.compare_time(
FileStats(last_update=now, **kwargs),
FileStats(last_update=future, **kwargs),
)
is False
)
assert (
obj.compare_time(
FileStats(last_update=future, **kwargs),
FileStats(last_update=now, **kwargs),
)
is True
)
@pytest.mark.parametrize(
"src, dest", [(None, None), (Mock(), None), (None, Mock())]
)
def test_compare_time_raise_value_error(
self, dest: Optional[FileStats], src: Optional[FileStats]
) -> None:
"""Test compare_time."""
with pytest.raises(ValueError) as excinfo:
BaseSync().compare_time(src, dest)
assert str(excinfo.value) == "src_file and dest_file must not be None"
def test_determine_should_sync(self) -> None:
"""Test determine_should_sync."""
with pytest.raises(NotImplementedError):
BaseSync().determine_should_sync(None, None) # type: ignore
def test_init(self) -> None:
"""Test __init__."""
valid_sync_types: List[ValidSyncType] = [
"file_at_src_and_dest",
"file_not_at_dest",
"file_not_at_src",
]
for sync_type in valid_sync_types:
strategy = BaseSync(sync_type)
assert strategy.sync_type == sync_type
with pytest.raises(ValueError):
BaseSync("invalid_sync_type") # type: ignore
def test_name(self) -> None:
"""Test name."""
assert BaseSync().name is None
def test_register_strategy(self) -> None:
"""Test register_strategy."""
session = Mock()
obj = BaseSync()
obj.register_strategy(session)
register_args = cast(Mock, session.register).call_args_list
assert register_args[0][0][0] == "choosing-s3-sync-strategy"
# pylint: disable=comparison-with-callable
assert register_args[0][0][1] == obj.use_sync_strategy
def test_use_sync_strategy(self, mocker: MockerFixture) -> None:
"""Test use_sync_strategy."""
assert (
BaseSync().use_sync_strategy(
{"invalid_sync_strategy": True} # type: ignore
)
is None
)
mocker.patch.object(BaseSync, "name", "something")
obj = BaseSync()
assert obj.use_sync_strategy({"something": True}) == obj # type: ignore
class TestMissingFileSync:
"""Test MissingFileSync."""
@pytest.mark.parametrize(
"is_size, is_time, expected",
[
(True, True, True),
(True, False, True),
(False, True, True),
(False, False, True),
],
)
def test_determine_should_sync(
self, expected: bool, is_size: bool, is_time: bool, mocker: MockerFixture
) -> None:
"""Test determine_should_sync."""
mock_compare_size = mocker.patch.object(
MissingFileSync, "compare_size", return_value=is_size
)
mock_compare_time = mocker.patch.object(
MissingFileSync, "compare_time", return_value=is_time
)
assert (
MissingFileSync().determine_should_sync(
FileStats(src=""), FileStats(src="")
)
is expected
)
mock_compare_size.assert_not_called()
mock_compare_time.assert_not_called()
def test_name(self) -> None:
"""Test name."""
assert MissingFileSync().name is None
def test_sync_type(self) -> None:
"""Test sync_type."""
assert MissingFileSync().sync_type == "file_not_at_dest"
class TestTestNeverSync:
"""Test NeverSync."""
@pytest.mark.parametrize(
"is_size, is_time, expected",
[
(True, True, False),
(True, False, False),
(False, True, False),
(False, False, False),
],
)
def test_determine_should_sync(
self, expected: bool, is_size: bool, is_time: bool, mocker: MockerFixture
) -> None:
"""Test determine_should_sync."""
mock_compare_size = mocker.patch.object(
NeverSync, "compare_size", return_value=is_size
)
mock_compare_time = mocker.patch.object(
NeverSync, "compare_time", return_value=is_time
)
assert (
NeverSync().determine_should_sync(FileStats(src=""), FileStats(src=""))
is expected
)
mock_compare_size.assert_not_called()
mock_compare_time.assert_not_called()
def test_name(self) -> None:
"""Test name."""
assert NeverSync().name is None
def test_sync_type(self) -> None:
"""Test sync_type."""
assert NeverSync().sync_type == "file_not_at_src"
class TestSizeAndLastModifiedSync:
"""Test SizeAndLastModifiedSync."""
@pytest.mark.parametrize(
"is_size, is_time, expected",
[
(True, True, False),
(True, False, True),
(False, True, True),
(False, False, True),
],
)
def test_determine_should_sync(
self, expected: bool, is_size: bool, is_time: bool, mocker: MockerFixture
) -> None:
"""Test determine_should_sync."""
src_file = FileStats(src="")
dest_file = FileStats(src="")
mock_compare_size = mocker.patch.object(
SizeAndLastModifiedSync, "compare_size", return_value=is_size
)
mock_compare_time = mocker.patch.object(
SizeAndLastModifiedSync, "compare_time", return_value=is_time
)
assert (
SizeAndLastModifiedSync().determine_should_sync(src_file, dest_file)
is expected
)
mock_compare_size.assert_called_once_with(src_file, dest_file)
mock_compare_time.assert_called_once_with(src_file, dest_file)
def test_name(self) -> None:
"""Test name."""
assert BaseSync().name is None
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import Length, Email, EqualTo, DataRequired, ValidationError
from market.models import User
class RegisterForm(FlaskForm):
username = StringField(label="username", validators=[Length(min=6, max=30), DataRequired()])
email = StringField(label="email", validators=[Email(), DataRequired()])
pd = PasswordField(label="password", validators=[Length(6, ), DataRequired()])
confirm_pd = PasswordField(label="confirm password", validators=[EqualTo("pd"), DataRequired()])
submit = SubmitField(label="Create Account")
def validate_username(self, username_to_check):
"""Prevent the error from SQLAlchemy, database can have two users with the same username"""
user = User.query.filter_by(username=username_to_check.data).first()
if user:
raise ValidationError("The user with this username, has already existed. Type another username.")
def validate_email(self, email_to_check):
"""Same thing as validate_username but now it's email"""
user_address = User.query.filter_by(user_email=email_to_check.data).first()
if user_address:
raise ValidationError("The user with this email address, has already existed. Type another email address.")
class LoginForm(FlaskForm):
username = StringField(label="username", validators=[DataRequired()])
password = PasswordField(label="password", validators=[DataRequired()])
submit = SubmitField(label="Sign In")
class BoughtItemForm(FlaskForm):
submit = SubmitField(label="Buy Item")
class SoldItemForm(FlaskForm):
submit = SubmitField(label="Sell Item")
|
# Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from flask import Flask, Response, request
from bentoml.utils.usage_stats import track_server
def setup_bento_service_api_route(app, api):
def view_function():
return api.handle_request(request)
app.add_url_rule(
rule="/invocations",
endpoint=api.name,
view_func=view_function,
methods=["POST"],
)
def ping_view_func():
return Response(response="\n", status=200, mimetype="application/json")
def setup_routes(app, bento_service, api_name):
"""
Setup routes required for AWS sagemaker
/ping
/invocations
"""
app.add_url_rule("/ping", "ping", ping_view_func)
api = bento_service.get_service_api(api_name)
setup_bento_service_api_route(app, api)
class BentoSagemakerServer:
"""
BentoSagemakerServer create an AWS Sagemaker compatibility rest API server.
"""
_DEFAULT_PORT = 8080
def __init__(self, bento_service, api_name, app_name=None):
app_name = bento_service.name if app_name is None else app_name
self.bento_service = bento_service
self.app = Flask(app_name)
setup_routes(self.app, self.bento_service, api_name)
def start(self):
track_server('sagemaker')
self.app.run(port=BentoSagemakerServer._DEFAULT_PORT)
|
from django.contrib import admin
from .models import Account, Category
admin.site.register(Account)
admin.site.register(Category)
|
from thefuzz import fuzz, process
import pandas as pd
colors_list = pd.read_csv('colors.csv', names=['Color', 'R', 'G', 'B'])
def getFuzzyColor(color_name):
# we want a 85% match for thefuzz
fuzzyMatch = process.extractOne(color_name.title(), colors_list['Color'], scorer=fuzz.token_sort_ratio)
fuzz_color, percent_match = fuzzyMatch[0], fuzzyMatch[1]
if percent_match >= 85:
return fuzz_color
else:
return None
|
from winejournal.blueprints.filters.filters import filters
|
#-*-coding:utf-8-*-
import sys
from flask import Flask,request,render_template
import json
from etc.settings import DEBUG,receiver_file,register_server_port,web_log,LOG_DIRECTORY,RESOURCES_DIRECTORY
from threading import Lock
from os import remove, devnull
from shutil import move
from logger import *
app=Flask(__name__)
web_log_file= open(path.join(LOG_DIRECTORY,web_log),'a')
sys.stderr = web_log_file
sys.stdout = web_log_file
lock = Lock()
@app.route("/register",methods=["POST"])
def register() :
lock.acquire()
try :
email = request.form['email']
boards = json.loads(request.form['boards'])
email_enabled = True if request.form['email_enabled']=="True" else False
push_enabled= True if request.form['push_enabled'] =="True" else False
device_type = request.form['device_type']
push_token = request.form['push_token']
update_file(email,json.dumps([email,boards,email_enabled,push_enabled,device_type,push_token]))
finally:
lock.release()
return "OK"
@app.route("/",methods=["GET"])
def index() :
f = open(path.join(RESOURCES_DIRECTORY,receiver_file),'r')
l = f.readlines()
l = map(lambda x: json.loads(x),l)
for i in l:
s = ''
for j in i[1] :
s+= str(j)+'\n'
i[1] = s
return render_template('index.html',receivers=l)
def update_file(email,new_info) :
fo = open(path.join(RESOURCES_DIRECTORY,receiver_file)+'.tmp','w')
fi = open(path.join(RESOURCES_DIRECTORY,receiver_file),'r')
not_found = True
for line in fi.readlines() :
if email in line :
fo.write(new_info+'\n')
not_found = False
log("update User : %s, Info : %s"%(email,new_info))
else :
fo.write(line)
if not_found :
fo.write(new_info+'\n')
log("new User : %s, Info : %s"%(email,new_info))
fo.close()
fi.close()
remove(receiver_file)
move(receiver_file+'.tmp',receiver_file)
def run():
app.debug = DEBUG
log("Flask Started. PORT : %s"%register_server_port)
app.run(host='0.0.0.0',port=register_server_port)
|
from django.conf.urls import url, include
from . import views
from gallery import views as gallery_views
from files import views as files_views
urlpatterns = [
url(r'^$', views.GroupListView.as_view(), name='list'),
url(r'^(?P<group_id>[0-9]+)/?$', views.GroupFeedView.as_view(), name='feed'),
url(r'^(?P<group_id>[0-9]+)/settings/?$', views.GroupSettingsView.as_view(), name='settings'),
# url(r'^(?P<group_id>[0-9]+)/settings/sync_admins?$', views.GroupSettingsSyncAdminsView.as_view(),
# name='settings_sync_admins'),
url(r'^(?P<group_id>[0-9]+)/photos/?$', gallery_views.GroupView.as_view(), name='photo_overview'),
url(r'^(?P<group_id>[0-9]+)/photos/(?P<photo_id>[0-9]+)/?$', gallery_views.PhotoDetailView.as_view(),
name='photo_detail'),
url(r'^(?P<group_id>[0-9]+)/photos/albums/(?P<album_id>[0-9]+)/?$', gallery_views.GroupAlbumDetailView.as_view(),
name='photo_album'),
url(r'^(?P<group_id>[0-9]+)/photos/albums/(?P<album_id>[0-9]+)/(?P<photo_id>[0-9]+)/?$',
gallery_views.AlbumPhotoDetailView.as_view(),
name='photo_album_photo_detail'),
url(r'^(?P<group_id>[0-9]+)/files/?$', files_views.DirectoryView.as_view(), name='file_overview'),
url(r'^(?P<group_id>[0-9]+)/files/(?P<file_id>[0-9]+)/move/?$', files_views.MoveFileView.as_view(),
name='file_move'),
url(r'^(?P<group_id>[0-9]+)/files/(?P<file_id>[0-9]+)/delete/?$', files_views.DeleteFileView.as_view(),
name='file_delete'),
url(r'^(?P<group_id>[0-9]+)/files/directories/create/?$', files_views.CreateDirectoryView.as_view(),
name='file_directory_create'),
url(r'^(?P<group_id>[0-9]+)/files/directories/(?P<directory_id>[0-9]+)/?$', files_views.DirectoryView.as_view(),
name='file_directory'),
url(r'^(?P<group_id>[0-9]+)/files/directories/(?P<directory_id>[0-9]+)/move/?$',
files_views.MoveDirectoryView.as_view(), name='file_directory_move'),
url(r'^(?P<group_id>[0-9]+)/files/directories/(?P<directory_id>[0-9]+)/delete/?$',
files_views.DeleteDirectoryView.as_view(), name='file_directory_delete'),
url(r'^(?P<group_id>[0-9]+)/links/?$', views.GroupLinksView.as_view(), name='links'),
url(r'^(?P<group_id>[0-9]+)/links/(?P<link_id>[0-9]+)/delete/?$', views.DeleteLinkView.as_view(),
name='link_delete'),
url(r'^(?P<group_id>[0-9]+)/audios/?$', views.GroupAudiosView.as_view(), name='audios'),
url(r'^(?P<group_id>[0-9]+)/audios/(?P<audio_id>[0-9]+)/delete/?$', views.DeleteAudioView.as_view(),
name='audio_delete'),
url(r'^(?P<group_id>[0-9]+)/videos(/[0-9]+)?/?$', views.GroupVideosView.as_view(), name='videos'),
url(r'^(?P<group_id>[0-9]+)/videos/(?P<video_id>[0-9]+)/delete/?$', views.DeleteVideoView.as_view(),
name='video_delete'),
]
|
import json
import boto3
import os #required to fetch environment varibles
TASK_NAME_FILTER = os.environ['task_name_filter']
ssmclient = boto3.client('ssm')
def lambda_handler(event, context):
eventdetail = event['detail']
eventmwid = eventdetail['window-id']
eventmwtaskid = eventdetail['window-task-id']
print ("Event Window: " + str(eventmwid))
print ("Event Task: " + str(eventmwtaskid))
# Retrieve Maintenance Window
mwindows = ssmclient.get_maintenance_window(WindowId=eventmwid)
mwname = mwindows['Name']
# Retrieve Maintenance Window Tasks
mwtask = ssmclient.get_maintenance_window_task(WindowId=eventmwid, WindowTaskId=eventmwtaskid)
mwtaskname = mwtask['Name']
mwtaskid = mwtask['WindowTaskId']
print ("Task Name: " + str(mwtaskname))
# Check if task name matches searched for task name
if TASK_NAME_FILTER in mwtaskname:
# Expand task paramters
try:
taskparamters = mwtask['TaskInvocationParameters']
taskparamters1 = taskparamters['Automation']
taskparamters2 = taskparamters1['Parameters']
taskparamters3 = taskparamters2['TagValue']
parametervalue = taskparamters3[0]
print("Currently Set: " + parametervalue)
except:
print("Cannot retrieve parameters")
parametervalue = ""
#Check if parameters already set - without this, the cloudwatch event will trigger everytime and cause a continuous loop
if mwname in parametervalue:
# Parameter already set - do nothing
print("No Update required")
else:
# Update Maintenance Task
ssmclient.update_maintenance_window_task(WindowId=eventmwid, WindowTaskId=eventmwtaskid,TaskInvocationParameters={"Automation":{"DocumentVersion":"$LATEST","Parameters":{"TagValue":[mwname]}}} )
#ssmclient.update_maintenance_window_task(WindowId=eventmwid, WindowTaskId=eventmwtaskid,TaskInvocationParameters={"Automation":{"DocumentVersion":"$LATEST"}} )
print ("Task Updated: " + str(mwtaskname))
return 'Completed'
|
import numpy as np
from nanogui import nanovg as nvg
from misc import *
import copy
def draw_coord_system(ctx):
ctx.StrokeColor(nvg.RGB(0, 0, 0))
ctx.StrokeWidth(0.005)
for i in range(-100, 101):
ctx.BeginPath()
ctx.MoveTo(-1000.0, float(i))
ctx.LineTo(+1000.0, float(i))
ctx.Stroke()
for i in range(-100, 101):
ctx.BeginPath()
ctx.MoveTo(float(i), -1000.0)
ctx.LineTo(float(i), +1000.0)
ctx.Stroke()
ctx.StrokeWidth(0.01)
ctx.BeginPath()
ctx.MoveTo(-1000.0, 0.0)
ctx.LineTo(+1000.0, 0.0)
ctx.Stroke()
ctx.BeginPath()
ctx.MoveTo(0.0, -1000.0)
ctx.LineTo(0.0, +1000.0)
ctx.Stroke()
def draw_path_lines(ctx, path, modifier='', scale=1):
if modifier == 'tangent' or modifier == 'seed':
ctx.StrokeColor(nvg.RGB(255, 0, 0))
ctx.StrokeWidth(0.004*scale)
elif modifier == 'manifold':
ctx.StrokeColor(nvg.RGB(114, 104, 130))
ctx.StrokeWidth(0.008*scale)
elif modifier == '':
ctx.StrokeColor(nvg.RGB(80, 80, 80))
ctx.StrokeWidth(0.008*scale)
for i in range(len(path) - 1):
v0 = path[i].p
v1 = path[i+1].p
ctx.BeginPath()
ctx.MoveTo(v0[0], v0[1])
ctx.LineTo(v1[0], v1[1])
ctx.Stroke()
def draw_intermediate_path_lines(ctx, path, color, scale=1):
ctx.StrokeColor(color)
ctx.StrokeWidth(0.004*scale)
for i in range(len(path) - 1):
v0 = path[i].p
v1 = path[i+1].p
ctx.BeginPath()
ctx.MoveTo(v0[0], v0[1])
ctx.LineTo(v1[0], v1[1])
ctx.Stroke()
def draw_dotted_path_lines(ctx, path, scale=1.0, spacing=0.05):
color = nvg.RGB(80, 80, 80)
for i in range(len(path) - 1):
v0 = path[i].p
v1 = path[i+1].p
draw_dotted_line(ctx, v0, v1, color, scale=scale, spacing=spacing)
def draw_path_vertices(ctx, path, modifier='', scale=1):
if modifier == 'seed':
ctx.FillColor(nvg.RGB(255, 0, 0))
elif modifier == 'manifold':
ctx.FillColor(nvg.RGB(114, 104, 130))
else:
ctx.FillColor(nvg.RGB(80, 80, 80))
ctx.StrokeColor(nvg.RGB(255, 255, 255))
ctx.StrokeWidth(0.005*scale)
for i in range(0, len(path)):
vtx = path[i]
ctx.BeginPath()
ctx.Circle(vtx.p[0], vtx.p[1], 0.015*scale)
ctx.Fill()
if modifier != 'seed':
ctx.Stroke()
def draw_points(ctx, positions, color, scale=1):
ctx.Save()
ctx.FillColor(color)
for p in positions:
ctx.BeginPath()
ctx.Circle(p[0], p[1], 0.008*scale)
ctx.Fill()
ctx.Restore()
def draw_vertices(ctx, positions, color, scale=1):
ctx.Save()
ctx.FillColor(color)
ctx.StrokeColor(nvg.RGB(255, 255, 255))
ctx.StrokeWidth(0.005*scale)
for p in positions:
ctx.BeginPath()
ctx.Circle(p[0], p[1], 0.015*scale)
ctx.Fill()
ctx.Stroke()
ctx.Restore()
def draw_line(ctx, a, b, color, scale=1.0, endcap_a=False, endcap_b=False):
ctx.Save()
ctx.StrokeWidth(0.01*scale)
ctx.StrokeColor(color)
ctx.BeginPath()
ctx.MoveTo(a[0], a[1])
ctx.LineTo(b[0], b[1])
ctx.Stroke()
ctx.StrokeWidth(0.005*scale)
v = normalize(b - a)
v_p = np.array([-v[1], v[0]])
if endcap_a:
ctx.BeginPath()
a1 = a + 0.02*v_p
a2 = a - 0.02*v_p
ctx.MoveTo(a1[0], a1[1])
ctx.LineTo(a2[0], a2[1])
ctx.Stroke()
if endcap_b:
ctx.BeginPath()
b1 = b + 0.02*v_p
b2 = b - 0.02*v_p
ctx.MoveTo(b1[0], b1[1])
ctx.LineTo(b2[0], b2[1])
ctx.Stroke()
ctx.Restore()
def draw_dotted_line(ctx, a, b, color, scale=1.0, spacing=0.05):
ctx.Save()
ctx.FillColor(color)
ctx.BeginPath()
v = b - a
dist = norm(v)
v /= dist
k = 0
while True:
offset = (k+0.5)*spacing*v
if norm(offset) > dist:
break
c = a + offset
ctx.Circle(c[0], c[1], 0.005*scale)
k += 1
ctx.Fill()
ctx.Restore()
def draw_angle(ctx, p, r, phi_0, phi_1, color, scale=1.0, flip=False):
o = nvg.NVGwinding.CCW if flip else nvg.NVGwinding.CW
ctx.Save()
ctx.StrokeWidth(0.01*scale)
ctx.StrokeColor(color)
ctx.BeginPath()
ctx.Arc(p[0], p[1], r, phi_0, phi_1, o)
ctx.Stroke()
ctx.Restore()
def draw_arrow_helper(ctx, p0, v, scale=1.0, length=0.12, head_scale=1.0):
theta = np.arctan2(v[1], v[0])
p1 = p0 + v * length * scale
ctx.Save()
ctx.StrokeWidth(0.01*scale)
ctx.BeginPath()
ctx.MoveTo(p0[0], p0[1])
ctx.LineTo(p1[0], p1[1])
ctx.Stroke()
ctx.Restore()
v_p = np.array([-v[1], v[0]])
p_a = p1 - 0.02*head_scale*scale*v_p - 0.01*head_scale*scale*v
p_b = p1 + 0.02*head_scale*scale*v_p - 0.01*head_scale*scale*v
p_tip = p1 + v * 0.03*head_scale * scale
ctx.BeginPath()
ctx.MoveTo(p_tip[0], p_tip[1])
ctx.LineTo(p_a[0], p_a[1])
ctx.ArcTo(p1[0], p1[1], p_b[0], p_b[1], scale*0.028)
ctx.LineTo(p_b[0], p_b[1])
ctx.LineTo(p_tip[0], p_tip[1])
ctx.Fill()
def draw_arrow(ctx, p, v, color, scale=1.0, length=1.0, head_scale=1.0):
ctx.Save()
ctx.FillColor(color)
ctx.StrokeColor(color)
draw_arrow_helper(ctx, p, v, scale, length, head_scale)
ctx.Restore()
def draw_path_normals(ctx, path, scale=1.0):
ctx.Save()
ctx.StrokeWidth(0.01*scale)
ctx.FillColor(nvg.RGB(255, 0, 0))
ctx.StrokeColor(nvg.RGB(255, 0, 0))
for i in range(1, len(path)-1):
vtx = path[i]
draw_arrow_helper(ctx, vtx.p, vtx.n, scale)
ctx.Restore()
def draw_path_tangents(ctx, path, scale=1.0):
ctx.Save()
ctx.FillColor(nvg.RGB(0, 0, 255))
ctx.StrokeColor(nvg.RGB(0, 0, 255))
ctx.StrokeWidth(0.01*scale)
for i in range(1, len(path)-1):
vtx = path[i]
draw_arrow_helper(ctx, vtx.p, vtx.s, scale)
ctx.Restore()
def draw_path_origin(ctx, path, last_picked_vtx_idx):
if last_picked_vtx_idx == None:
return
ctx.FillColor(nvg.RGB(255, 255, 255))
ctx.StrokeColor(nvg.RGB(255, 255, 255))
ctx.StrokeWidth(0.01)
i0 = -1 if last_picked_vtx_idx == 0 else 0
i1 = -2 if last_picked_vtx_idx == 0 else 1
p0 = copy.copy(path[i0].p)
p1 = copy.copy(path[i1].p)
wi = p1 - p0
theta = np.arctan2(wi[1], wi[0])
wi = np.array([np.cos(theta), np.sin(theta)])
p1 = p0 + wi * 0.08
ctx.BeginPath()
ctx.MoveTo(p0[0], p0[1])
ctx.LineTo(p1[0], p1[1])
ctx.Stroke()
wi2 = np.array([np.cos(theta+0.3), np.sin(theta+0.3)])
wi3 = np.array([np.cos(theta-0.3), np.sin(theta-0.3)])
p2 = p0 + wi2 * 0.08
p3 = p0 + wi3 * 0.08
p4 = p1 + wi * 0.03
ctx.BeginPath()
ctx.MoveTo(p4[0], p4[1])
ctx.LineTo(p2[0], p2[1])
ctx.LineTo(p3[0], p3[1])
ctx.LineTo(p4[0], p4[1])
ctx.Fill()
|
from frequency_plan import Frequency
from enum import Enum
class CN470_510(Frequency):
JOIN_ACCEPT_DELAY = 5
MAX_FCNT_GAP = 16384
ADR_ACK_LIMIT = 64
ADR_ACK_DELAY = 32
ACK_TIMEOUT = 2 # 2 +/-1 s random delay between 1 and 3 seconds
RF_CH = 0
class DataRate(Enum):
SF12BW125 = 0
SF11BW125 = 1
SF10BW125 = 2
SF9BW125 = 3
SF8BW125 = 4
SF7BW125 = 5
RX2Frequency = 505.3
RX2DataRate = 0
RX1DRoffset = 0
RxDelay = 1
class TXPower(Enum):
dBm17 = 0
dBm16 = 1
dBm14 = 2
dBm12 = 3
dBm10 = 4
dBm7 = 5
dBm5 = 6
dBm2 = 7
default = 7
@classmethod
def rx1_datr(cls, dr_up, dr_offset):
"""
:param dr_up: int
:param dr_offset: int
:return: DataRate
"""
assert 0 <= dr_up <= 5
assert 0 <= dr_offset <= 3
dr_dn = dr_up - dr_offset
if dr_dn < 0:
dr_dn = 0
return cls.DataRate(dr_dn)
@classmethod
def rx1_freq(cls, freq_up):
"""
:param freq_up: float (MHz)
:return: float (MHz)
"""
chan_up = cls.get_channel_up_by_freq(freq_up)
chan_dn = chan_up % 48
freq_dn = cls.get_freq_dn_by_channel(chan_dn)
return freq_dn
@staticmethod
def get_freq_dn_by_channel(channel):
"""
:param channel: int
:return: float (MHz)
"""
assert 0 <= channel <= 47
# print(channel)
# print(500.3 + 0.2 * channel)
return 500 + (3 + 2 * channel)/10
# return 500.3 + 0.2 * channel
@staticmethod
def get_channel_up_by_freq(frequency):
"""
:param frequency: float (MHz)
:return:
"""
assert 470.3 <= frequency <= 489.3, 'CN470_510 Frequency Plan got Frequency: %s'%frequency
channel = (frequency - 470.3) / 0.2
decimal = channel % 1
if decimal >= 0.5:
channel = int(channel) + 1
else:
channel = int(channel)
return int(channel)
class Channel:
"""
Ch1 470.3
Ch2 470.5
Ch3 470.7
Ch4 470.9
Ch5 471.1
Ch6 471.3
Ch7 471.5
Ch8 471.7
"""
CF_LIST = b''
CH_MASK = b'\xff\x00' # Ch1-8 open
CH_MASK_CNTL = 0
NB_TRANS = 1
MAX_LENGTH = {
DataRate.SF12BW125: 59,
DataRate.SF11BW125: 59,
DataRate.SF10BW125: 59,
DataRate.SF9BW125: 123,
DataRate.SF8BW125: 230,
DataRate.SF7BW125: 230,
}
@staticmethod
def adr_schema(rssi, recent_datr):
if -57 < rssi:
return 5
elif -60 < rssi <= -57:
if recent_datr == 4:
return recent_datr
else:
return 5
elif -67 < rssi <= -60:
return 4
elif -70 < rssi <= -67:
if recent_datr == 3:
return recent_datr
else:
return 4
elif -77 < rssi <= -70:
return 3
elif -80 < rssi <= -77:
if recent_datr == 2:
return recent_datr
else:
return 3
elif -87 < rssi <= -80:
return 2
elif -90 < rssi <= -87:
if recent_datr == 1:
return recent_datr
else:
return 2
elif -107 < rssi <= -90:
return 1
elif -100 < rssi <= -107:
if recent_datr == 0:
return recent_datr
else:
return 1
else:
return 0
# else:
# logger.error(ConstLog.adr + 'rssi %s recent_datr %s' % (rssi, recent_datr))
if __name__ == '__main__':
print(CN470_510.rx1_freq(470.9))
|
#IMPORTS
@IMPORTS
#GLOBAL
@GLOBAL
#PARAMETER
@PARAMETER
#LOCAL
@LOCAL
#PROCEDURE
@PROCEDURE
|
#!/usr/bin/env python
# encoding: utf-8
##################################################################
submit_scripts = {
'Slurm': {
# Gaussian09 on C3DDB
'gaussian': """#!/bin/bash -l
#SBATCH -p defq
#SBATCH -J {name}
#SBATCH -N 1
#SBATCH -n 8
#SBATCH --time={t_max}
#SBATCH --mem-per-cpu 4500
module add c3ddb/gaussian/09.d01
which g09
echo "============================================================"
echo "Job ID : $SLURM_JOB_ID"
echo "Job Name : $SLURM_JOB_NAME"
echo "Starting on : $(date)"
echo "Running on node : $SLURMD_NODENAME"
echo "Current directory : $(pwd)"
echo "============================================================"
WorkDir=/scratch/users/{un}/$SLURM_JOB_NAME-$SLURM_JOB_ID
SubmitDir=`pwd`
GAUSS_SCRDIR=/scratch/users/{un}/g09/$SLURM_JOB_NAME-$SLURM_JOB_ID
export GAUSS_SCRDIR
mkdir -p $GAUSS_SCRDIR
mkdir -p $WorkDir
cd $WorkDir
. $g09root/g09/bsd/g09.profile
cp $SubmitDir/input.gjf .
g09 < input.gjf > input.log
formchk check.chk check.fchk
cp * $SubmitDir/
rm -rf $GAUSS_SCRDIR
rm -rf $WorkDir
""",
# Orca on C3DDB:
'orca': """#!/bin/bash -l
#SBATCH -p defq
#SBATCH -J {name}
#SBATCH -N 1
#SBATCH -n 8
#SBATCH --time={t_max}
#SBATCH --mem-per-cpu 4500
module add c3ddb/orca/4.0.0
module add c3ddb/openmpi/2.0.2
which orca
export ORCA_DIR=/cm/shared/c3ddb/orca/4.0.0/
export PATH=$PATH:$ORCA_DIR
echo "============================================================"
echo "Job ID : $SLURM_JOB_ID"
echo "Job Name : $SLURM_JOB_NAME"
echo "Starting on : $(date)"
echo "Running on node : $SLURMD_NODENAME"
echo "Current directory : $(pwd)"
echo "============================================================"
WorkDir=/scratch/users/{un}/$SLURM_JOB_NAME-$SLURM_JOB_ID
SubmitDir=`pwd`
mkdir -p $WorkDir
cd $WorkDir
cp $SubmitDir/input.inp .
${ORCA_DIR}/orca input.inp > input.log
cp * $SubmitDir/
rm -rf $WorkDir
""",
# Molpro 2015 on RMG
'molpro': """#!/bin/bash -l
#SBATCH -p long
#SBATCH -J {name}
#SBATCH -N 1
#SBATCH -n 8
#SBATCH --time={t_max}
#SBATCH --mem-per-cpu={mem_cpu}
export PATH=/opt/molpro/molprop_2015_1_linux_x86_64_i8/bin:$PATH
echo "============================================================"
echo "Job ID : $SLURM_JOB_ID"
echo "Job Name : $SLURM_JOB_NAME"
echo "Starting on : $(date)"
echo "Running on node : $SLURMD_NODENAME"
echo "Current directory : $(pwd)"
echo "============================================================"
sdir=/scratch/{un}/$SLURM_JOB_NAME-$SLURM_JOB_ID
mkdir -p $sdir
molpro -n 8 -d $sdir input.in
rm -rf $sdir
""",
},
'OGE': {
# Gaussian16 on Pharos
'gaussian': """#!/bin/bash -l
#$ -N {name}
#$ -l long
#$ -l harpertown
#$ -l h_rt={t_max}
#$ -pe singlenode 6
#$ -l h=!node60.cluster
#$ -cwd
#$ -o out.txt
#$ -e err.txt
echo "Running on node:"
hostname
g16root=/opt
GAUSS_SCRDIR=/scratch/{un}/{name}
export g16root GAUSS_SCRDIR
. $g16root/g16/bsd/g16.profile
mkdir -p /scratch/{un}/{name}
g16 input.gjf
rm -r /scratch/{un}/{name}
""",
# Gaussian03 on Pharos
'gaussian03_pharos': """#!/bin/bash -l
#$ -N {name}
#$ -l long
#$ -l harpertown
#$ -l h_rt={t_max}
#$ -pe singlenode 6
#$ -l h=!node60.cluster
#$ -cwd
#$ -o out.txt
#$ -e err.txt
echo "Running on node:"
hostname
g03root=/opt
GAUSS_SCRDIR=/scratch/{un}/{name}
export g03root GAUSS_SCRDIR
. $g03root/g03/bsd/g03.profile
mkdir -p /scratch/{un}/{name}
g03 input.gjf
rm -r /scratch/{un}/{name}
""",
# QChem 4.4 on Pharos:
'qchem': """#!/bin/bash -l
#$ -N {name}
#$ -l long
#$ -l harpertown
#$ -l h_rt={t_max}
#$ -pe singlenode 6
#$ -l h=!node60.cluster
#$ -cwd
#$ -o out.txt
#$ -e err.txt
echo "Running on node:"
hostname
export QC=/opt/qchem
export QCSCRATCH=/scratch/{un}/{name}
export QCLOCALSCR=/scratch/{un}/{name}/qlscratch
. $QC/qcenv.sh
mkdir -p /scratch/{un}/{name}/qlscratch
qchem -nt 6 input.in output.out
rm -r /scratch/{un}/{name}
""",
# Molpro 2012 on Pharos
'molpro': """#! /bin/bash -l
#$ -N {name}
#$ -l long
#$ -l harpertown
#$ -l h_rt={t_max}
#$ -pe singlenode 6
#$ -l h=!node60.cluster
#$ -cwd
#$ -o out.txt
#$ -e err.txt
export PATH=/opt/molpro2012/molprop_2012_1_Linux_x86_64_i8/bin:$PATH
sdir=/scratch/{un}
mkdir -p /scratch/{un}/qlscratch
molpro -d $sdir -n 6 input.in
""",
}
}
|
"""
Augmenter that apply operation (word level) to textual input based on back translation.
"""
import string
import os
import torch
from nlpaug.augmenter.word import WordAugmenter
import nlpaug.model.lang_models as nml
BACK_TRANSLATION_MODELS = {}
def init_back_translatoin_model(model_src, from_model_name, to_model_name, device, force_reload=False):
global BACK_TRANSLATION_MODELS
model_name = '_'.join([model_src, from_model_name, to_model_name])
if model_name in BACK_TRANSLATION_MODELS and not force_reload:
BACK_TRANSLATION_MODELS[model_name].device = device
return BACK_TRANSLATION_MODELS[model_name]
if model_src == 'huggingface':
model = nml.MtTransformers(src_model_name=from_model_name, tgt_model_name=to_model_name, device=device)
# elif model_src == 'fairseq':
# model = nml.Fairseq(from_model_name=from_model_name, from_model_checkpt=from_model_checkpt,
# to_model_name=to_model_name, to_model_checkpt=to_model_checkpt,
# tokenzier_name=tokenzier_name, bpe_name=bpe_name, is_load_from_github=is_load_from_github,
# device=device)
BACK_TRANSLATION_MODELS[model_name] = model
return model
class BackTranslationAug(WordAugmenter):
# https://arxiv.org/pdf/1511.06709.pdf
"""
Augmenter that leverage two translation models for augmentation. For example, the source is English. This
augmenter translate source to German and translating it back to English. For detail, you may visit
https://towardsdatascience.com/data-augmentation-in-nlp-2801a34dfc28
:param str from_model_name: Any model from https://huggingface.co/models?filter=translation&search=Helsinki-NLP. As
long as from_model_name is pair with to_model_name. For example, from_model_name is English to Japanese,
then to_model_name should be Japanese to English.
:param str to_model_name: Any model from https://huggingface.co/models?filter=translation&search=Helsinki-NLP.
:param str device: Default value is CPU. If value is CPU, it uses CPU for processing. If value is CUDA, it uses GPU
for processing. Possible values include 'cuda' and 'cpu'. (May able to use other options)
:param bool force_reload: Force reload the contextual word embeddings model to memory when initialize the class.
Default value is False and suggesting to keep it as False if performance is the consideration.
:param str name: Name of this augmenter
>>> import nlpaug.augmenter.word as naw
>>> aug = naw.BackTranslationAug()
"""
def __init__(self, from_model_name='Helsinki-NLP/opus-mt-en-de', to_model_name='Helsinki-NLP/opus-mt-de-en',
name='BackTranslationAug', device='cpu', force_reload=False, verbose=0):
super().__init__(
action='substitute', name=name, aug_p=None, aug_min=None, aug_max=None, tokenizer=None,
device=device, verbose=verbose, include_detail=False, parallelable=True)
# migrate from fairseq to huggingface library
self.model_src = 'huggingface'
self.model = self.get_model(model_src=self.model_src,
from_model_name=from_model_name, to_model_name=to_model_name, device=device
)
self.device = self.model.device
def substitute(self, data):
if not data:
return data
augmented_text = self.model.predict(data)
return augmented_text
@classmethod
def get_model(cls, model_src, from_model_name, to_model_name, device='cuda', force_reload=False):
return init_back_translatoin_model(model_src, from_model_name, to_model_name, device,
force_reload)
@classmethod
def clear_cache(cls):
global BACK_TRANSLATION_MODELS
BACK_TRANSLATION_MODELS = {}
|
import numpy as np
import yfinance as yf
import datetime as dt
from pandas_datareader import data as pdr
yf.pdr_override()
# Pulls data[Ticker symbol, market price] for selected stock and appends to data array
def getStockData(stock, data):
# Set date range for api pull
now=dt.datetime.now()
if (dt.datetime.today().weekday() == 6): # If sunday, pull data for last 48 hours
lastDay = now - dt.timedelta(hours=48)
else:
lastDay = now - dt.timedelta(hours=24)
# Fetch a dataframe from Yahoo finance api of data from start to now
df = pdr.get_data_yahoo(stock, lastDay, now)
df.insert(0, "Stock_Name", stock)
df["Diff"] = df["Close"].sub(df["Open"], axis = 0)
rows = len(df.index)
# Parse individual information for last row in data frame
stockName = df.iloc[rows-1:rows, 0:1].values.flatten(order='C')
stockClose = df.iloc[rows-1:rows, 4:5].values.flatten(order='C')
# Round to 2 decimals
stockClose = np.around(stockClose, decimals=2)
# Append to data array
newData = np.stack((stockName, stockClose))
data = np.append(data, newData, axis=1)
return data
# Returns an array of stock data for all ticker symbols in SaveData.txt
def getAllStockData():
# Get ticker list from saved data file, load to tickerList
file = 'SaveData.txt'
tickerList = np.loadtxt(file, dtype=str)
tickerList = tickerList.tolist()
# Data: [Ticker symbol, market price]
data = np.array([[],[]])
# Loop through each element in tickerList, pulling market information and filling data array
if isinstance(tickerList, list):
for x in tickerList:
if (x[0] != "portBal" and x[0]!= "bookVal"
and x[0]!= "PL" and x[0]!="funds"):
data = getStockData(x[0], data)
else:
data = getStockData(tickerList, data)
return data.transpose()
myData = getAllStockData()
# Save output to Data.txt
np.savetxt("Data.txt", myData, fmt='%s')
|
'''
MFEM example 3
See c++ version in the MFEM library for more detail
'''
from mfem import path
import mfem.ser as mfem
from mfem.ser import intArray
from os.path import expanduser, join
import numpy as np
from numpy import sin, array
freq = 1.0
kappa = np.pi*freq
static_cond = False
order = 1
meshfile = expanduser(join(path, 'data', 'beam-tet.mesh'))
mesh = mfem.Mesh(meshfile, 1,1)
dim = mesh.Dimension()
sdim= mesh.SpaceDimension()
class E_exact(mfem.VectorPyCoefficient):
def __init__(self):
mfem.VectorPyCoefficient.__init__(self, dim)
def EvalValue(self, x):
return (sin(kappa * x[1]),
sin(kappa * x[2]),
sin(kappa * x[0]))
class f_exact(mfem.VectorPyCoefficient):
def __init__(self):
mfem.VectorPyCoefficient.__init__(self, dim)
def EvalValue(self, x):
return ((1 + kappa**2)*sin(kappa * x[1]),
(1 + kappa**2)*sin(kappa * x[2]),
(1 + kappa**2)*sin(kappa * x[0]))
# 3. Refine the mesh to increase the resolution. In this example we do
# 'ref_levels' of uniform refinement. We choose 'ref_levels' to be the
# largest number that gives a final mesh with no more than 50,000
# elements.
ref_levels = int(np.floor(np.log(50000./mesh.GetNE())/np.log(2.)/dim))
for x in range(ref_levels):
mesh.UniformRefinement();
mesh.ReorientTetMesh();
# 4. Define a finite element space on the mesh. Here we use the Nedelec
# finite elements of the specified order.
fec = mfem.ND_FECollection(order, dim)
fespace = mfem.FiniteElementSpace(mesh, fec)
print("Number of finite element unknowns: " + str(fespace.GetTrueVSize()))
# 5. Determine the list of true (i.e. conforming) essential boundary dofs.
# In this example, the boundary conditions are defined by marking all
# the boundary attributes from the mesh as essential (Dirichlet) and
# converting them to a list of true dofs.
ess_tdof_list = intArray();
if mesh.bdr_attributes.Size():
ess_bdr = intArray(mesh.bdr_attributes.Max())
ess_bdr = intArray([1]*mesh.bdr_attributes.Max())
fespace.GetEssentialTrueDofs(ess_bdr, ess_tdof_list);
# 6. Set up the linear form b(.) which corresponds to the right-hand side
# of the FEM linear system, which in this case is (f,phi_i) where f is
# given by the function f_exact and phi_i are the basis functions in the
# finite element fespace.
b = mfem.LinearForm(fespace);
f = f_exact()
dd = mfem.VectorFEDomainLFIntegrator(f);
b.AddDomainIntegrator(dd)
b.Assemble();
# 7. Define the solution vector x as a finite element grid function
# corresponding to fespace. Initialize x by projecting the exact
# solution. Note that only values from the boundary edges will be used
# when eliminating the non-homogeneous boundary condition to modify the
# r.h.s. vector b.
#from mfem.examples.ex3 import E_exact_cb
x = mfem.GridFunction(fespace)
E = E_exact()
x.ProjectCoefficient(E);
# 8. Set up the bilinear form corresponding to the EM diffusion operator
# curl muinv curl + sigma I, by adding the curl-curl and the mass domain
# integrators.
muinv = mfem.ConstantCoefficient(1.0);
sigma = mfem.ConstantCoefficient(1.0);
a = mfem.BilinearForm(fespace);
a.AddDomainIntegrator(mfem.CurlCurlIntegrator(muinv));
a.AddDomainIntegrator(mfem.VectorFEMassIntegrator(sigma));
# 9. Assemble the bilinear form and the corresponding linear system,
# applying any necessary transformations such as: eliminating boundary
# conditions, applying conforming constraints for non-conforming AMR,
# static condensation, etc.
if (static_cond): a.EnableStaticCondensation()
a.Assemble();
A = mfem.SparseMatrix()
B = mfem.Vector()
X = mfem.Vector()
a.FormLinearSystem(ess_tdof_list, x, b, A, X, B);
## Here, original version calls hegith, which is not
## defined in the header...!?
print("Size of linear system: " + str(A.Size()))
import sys
sys.stdout.flush()
# 10. Solve
M = mfem.GSSmoother(A)
mfem.PCG(A, M, B, X, 1, 500, 1e-12, 0.0);
sys.stdout.flush()
# 11. Recover the solution as a finite element grid function.
a.RecoverFEMSolution(X, b, x);
# 12. Compute and print the L^2 norm of the error.
import sys
sys.stdout.write("|| E_h - E ||_{L^2} = " + str(x.ComputeL2Error(E))+"\n")
mesh.Print('refined.mesh', 8)
x.Save('sol.gf', 8)
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 6 22:39:51 2018
@author: fhp7
"""
import numpy as np
# for each OD pair of nodes:
# while not yet at the end of the path
# find length of edge between cur_node and prev_node
# add the edge length to the total edge length counter
# if this edge has a bike lane
# add the edge length to the bike lane length counter
# record the bike_lane_length/total_path_length in the citibike_monthly_od_ser next to the normalized trip count
def find_lane_frac(path, month, network):
"""Find the percentage of bike lane by length along the shortest path
between two osmids.
Args:
path: list containing the nodes on the shortest path between start and
end nodes
month: integer representation of the month in yyyymm format
Returns:
frac: float fraction of the length of this shortest path covered by
any sort of bike lane.
"""
path_list = path.copy()
cur_node = path_list.pop(0)
if len(path_list) == 0:
return np.nan
else:
total_len = 0
lane_len = 0
while len(path_list) > 0:
prev_node = cur_node
cur_node = path_list.pop(0)
cur_edge_dict = network.edges[prev_node, cur_node, 0]
total_len = total_len + cur_edge_dict['length']
if 'instdate' in cur_edge_dict:
#TODO: replace this with a permanent fix by adding a month column
# to nxprep.py
month_int = cur_edge_dict['instdate'].year*100 + cur_edge_dict['instdate'].month
if month_int <= month:
lane_len = lane_len + cur_edge_dict['length']
frac = lane_len/total_len
return frac
def find_adj_val(df, stationidA, stationidB, month, colname, step, default_value, valid_months):
"""
Args:
df: pandas dataframe holding columns for month, startstationosmid,
and endstationosmid
stationidA: CitiBike station id of the A-station for this station pair
stationidB: CitiBike station id of the B-station for this station pair
month: integer representing the month of the year from 1 to 12
colname: string denoting the column in which the previous value is to
be found
step: integer representing the number of months to look forward into
the future or back into the past. Negative values look into
the past.
default_value: the value that should be returned if the query_month is
valid but no data exists for it. This situation will
occur when no trips were taken on a given od pair
during a valid month.
valid_months: set of all the unique months represented in df
Returns:
val: the value of the column given in colname for the station ids and
months specified. If that value does not exist in df, then
apply a default value or a nan value
"""
query_month = month + step
try:
val = df.loc[(stationidA, stationidB, query_month), colname]
except KeyError: # That is, if that month didn't exist in the index
if month in valid_months: # If the month was valid but there were no trips
val = default_value
else: # If the month was invalid
val = np.nan
return val
def find_prepost_val(df, colname, month, step, idt, default_value):
"""
Args:
df: pandas dataframe holding columns for month, startstationosmid,
and endstationosmid
colname: string denoting the column in which the previous value is to
be found
month: integer representing the month of the year from 1 to 12
step: integer representing the number of months to look forward into
the future or back into the past. Negative values look into
the past.
idt: tuple denoting the ids (not OSM) of the end stations for this route
default_value: the value that should be returned if the query_month is
valid but no data exists for it. This situation will
occur when no trips were taken on a given od pair
during a valid month.
Returns:
val: float fraction of the shortest path between the given nodes that
was covered by bike lanes in the previous month.
"""
valid_months = [1,2,3,4,5,6,7,8,9,10,11,12]
query_month = month + step
if query_month in valid_months:
val_ser = df.loc[(df['idtuple'] == idt) & \
(df['month'] == query_month), colname]
if len(val_ser) > 1:
print("Warning! Multiple possible data points found in val_ser!")
print(val_ser)
elif len(val_ser) == 1:
val = val_ser.iloc[0]
else: # That is, if there were no trips on an od pair in a valid month
val = default_value
else:
val = np.nan
return val
|
import yaml
import requests
import os
from slugify import slugify
class _DataObject(object):
def __getattr__(self, k):
return self.data.get(k)
class SiteCollection(object):
def __init__(self, directory):
self.sites = []
for site_file in os.listdir(directory):
with open(os.path.join(directory, site_file), 'rb') as fh:
site = Site(yaml.load(fh))
self.sites.append(site)
def get(self, slug):
for site in self.sites:
if site.slug == slug:
return site
def __iter__(self):
return self.sites.__iter__()
def to_dict(self):
return {'sites': self.sites}
class Filter(_DataObject):
def __init__(self, site, data):
self.site = site
self.data = data
self.default = unicode(data.get('default'))
self._values = None
@property
def values(self):
if self._values is None:
field = self.data.get('field')
dimension, attribute = field, None
if '.' in dimension:
dimension, attribute = dimension.split('.', 2)
res = requests.get('https://openspending.org/api/2/aggregate',
params={'dataset': self.site.dataset, 'drilldown': field})
self._values = []
for drilldown in res.json().get('drilldown'):
v = drilldown.get(dimension)
if isinstance(v, dict):
if not attribute:
self.data['field'] = field + '.label'
v = v.get(attribute or 'label')
self._values.append(v)
self._values = list(sorted(self._values))
return self._values
@property
def class_name(self):
_ = self.values
return self.field.replace('.', ' ')
def to_dict(self):
values = self.values
data = self.data.copy()
data['slug'] = self.slug
data['values'] = values
return data
class Site(_DataObject):
def __init__(self, data):
self.data = data
self.slug = slugify(data.get('slug', data.get('name')))
self.filters = [Filter(self, d) for d in data.get('filters', [])]
def to_dict(self):
data = self.data.copy()
data['slug'] = self.slug
data['filters'] = self.filters
return data
def load_sites(app):
return SiteCollection(app.config['SITES_FOLDER'])
|
###########################################################################
## Class hydroshare_resource_editor
###########################################################################
import wx
import wx.xrc
from Utilities.HydroShareUtility import HydroShareAccountDetails, HydroShareUtility
from WxUtilities import *
from wx.lib.pubsub import pub
# from pubsub import pub
from InputValidator import *
from urlparse import urlparse
import re
# noinspection PyPropertyAccess,PyPropertyAccess,PyPropertyAccess,PyPropertyAccess,PyPropertyAccess,PyPropertyAccess,
# PyPropertyAccess
# noinspection PyPropertyAccess,PyPropertyAccess,PyUnusedLocal,PyUnusedLocal,PyUnusedLocal,PyUnusedLocal
class HydroShareResourceTemplateDialog(wx.Dialog):
def __init__(self, parent, templates, selected=0, create_selected=False):
title = u'Create a new HydroShare Resource' if create_selected else u"Manage HydroShare Resource Templates"
self.dialog = wx.Dialog.__init__(self, parent, id=wx.ID_ANY, title=title, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.DEFAULT_DIALOG_STYLE)
self.urlregex = re.compile(
r'^(?:http|ftp)s?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
self.templates = templates
self.create_new = create_selected
self.SetSizeHints(wx.DefaultSize, wx.DefaultSize)
main_sizer = wx.BoxSizer(wx.VERTICAL)
label_size = wx.Size(125, -1)
input_size = wx.Size(300, -1)
###########################################################################
# Template Selection
###########################################################################
template_text = u'Modify Template' if not create_selected else u'Load saved template'
self.label1 = wx.StaticText(self, wx.ID_ANY, template_text, wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_CENTRE)
self.label1.Wrap(-1)
self.label1.SetMinSize(label_size)
default_item = u'Populate fields from template...' if create_selected else u'Create new template...'
template_choices = [default_item] + templates.keys()
self.template_selector = wx.Choice(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, template_choices, 0)
self.template_selector.SetMinSize(input_size)
template_selector_sizer = wx.BoxSizer(wx.HORIZONTAL)
template_selector_sizer.Add(self.label1, 0, flag=wx.ALL | wx.EXPAND, border=5)
template_selector_sizer.Add(self.template_selector, 0, wx.ALL | wx.EXPAND, border=5)
main_sizer.Add(template_selector_sizer, flag=wx.ALL | wx.EXPAND, border=5)
###########################################################################
# Template name input (optional)
###########################################################################
if not create_selected:
template_name_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.m_staticText1 = wx.StaticText(self, wx.ID_ANY, u"Template Name", wx.DefaultPosition, wx.DefaultSize,
wx.ALIGN_CENTRE)
self.m_staticText1.Wrap(-1)
self.m_staticText1.SetMinSize(label_size)
template_name_sizer.Add(self.m_staticText1, 0, flag=wx.ALL | wx.EXPAND, border=5)
self.template_name_input = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
0)
self.template_name_input.SetMinSize(input_size)
template_name_sizer.Add(self.template_name_input, 0, wx.ALL | wx.EXPAND, border=5)
main_sizer.Add(template_name_sizer, flag=wx.ALL | wx.EXPAND, border=5)
###########################################################################
# Resource name input
###########################################################################
self.m_staticText11 = wx.StaticText(self, wx.ID_ANY, u"Resource Name", wx.DefaultPosition, wx.DefaultSize,
wx.ALIGN_CENTRE)
self.m_staticText11.Wrap(-1)
self.m_staticText11.SetMinSize(label_size)
self.resource_name_input = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0)
self.resource_name_input.SetMinSize(input_size)
name_sizer = wx.BoxSizer(wx.HORIZONTAL)
name_sizer.Add(self.m_staticText11, 0, flag=wx.ALL | wx.EXPAND, border=5)
name_sizer.Add(self.resource_name_input, 0, flag=wx.ALL | wx.EXPAND, border=5)
main_sizer.Add(name_sizer, flag=wx.ALL | wx.EXPAND, border=5)
###########################################################################
# Resource Abstract input
###########################################################################
self.m_staticText12 = wx.StaticText(self, wx.ID_ANY, u"Resource Abstract", wx.DefaultPosition, wx.DefaultSize,
wx.ALIGN_CENTRE)
self.m_staticText12.Wrap(-1)
self.m_staticText12.SetMinSize(label_size)
self.resource_abstract_input = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_MULTILINE)
self.resource_abstract_input.SetMinSize(wx.Size(300, 75))
abstract_sizer = wx.BoxSizer(wx.HORIZONTAL)
abstract_sizer.Add(self.m_staticText12, 0, flag=wx.ALL | wx.EXPAND, border=5)
abstract_sizer.Add(self.resource_abstract_input, 0, flag=wx.ALL | wx.EXPAND, border=5)
main_sizer.Add(abstract_sizer, flag=wx.ALL | wx.EXPAND, border=5)
###########################################################################
# Funding agency input
###########################################################################
self.m_staticText13 = wx.StaticText(self, wx.ID_ANY, u"Funding Agency", wx.DefaultPosition, wx.DefaultSize,
wx.ALIGN_CENTRE)
self.m_staticText13.Wrap(-1)
self.m_staticText13.SetMinSize(label_size)
self.funding_agency_input = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0)
self.funding_agency_input.SetMinSize(input_size)
funding_agency_sizer = wx.BoxSizer(wx.HORIZONTAL)
funding_agency_sizer.Add(self.m_staticText13, 0, flag=wx.ALL | wx.EXPAND, border=5)
funding_agency_sizer.Add(self.funding_agency_input, 0, flag=wx.ALL | wx.EXPAND, border=5)
main_sizer.Add(funding_agency_sizer, flag=wx.ALL | wx.EXPAND, border=5)
###########################################################################
# Agency URL input
###########################################################################
self.m_staticText14 = wx.StaticText(self, wx.ID_ANY, u"Agency Website", wx.DefaultPosition, wx.DefaultSize,
wx.ALIGN_CENTRE)
self.m_staticText14.Wrap(-1)
self.m_staticText14.SetMinSize(label_size)
self.agency_url_input = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0)
self.agency_url_input.SetMinSize(input_size)
agency_url_sizer = wx.BoxSizer(wx.HORIZONTAL)
agency_url_sizer.Add(self.m_staticText14, 0, flag=wx.ALL | wx.EXPAND, border=5)
agency_url_sizer.Add(self.agency_url_input, 0, flag=wx.ALL | wx.EXPAND, border=5)
main_sizer.Add(agency_url_sizer, flag=wx.ALL | wx.EXPAND, border=5)
###########################################################################
# Award title input
###########################################################################
self.m_staticText15 = wx.StaticText(self, wx.ID_ANY, u"Award Title", wx.DefaultPosition, wx.DefaultSize,
wx.ALIGN_CENTRE)
self.m_staticText15.Wrap(-1)
self.m_staticText15.SetMinSize(label_size)
self.award_title_input = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0)
self.award_title_input.SetMinSize(input_size)
award_title_sizer = wx.BoxSizer(wx.HORIZONTAL)
award_title_sizer.Add(self.m_staticText15, 0, flag=wx.ALL | wx.EXPAND, border=5)
award_title_sizer.Add(self.award_title_input, 0, flag=wx.ALL | wx.EXPAND, border=5)
main_sizer.Add(award_title_sizer, flag=wx.ALL | wx.EXPAND, border=5)
###########################################################################
# Award number input
###########################################################################
self.m_staticText16 = wx.StaticText(self, wx.ID_ANY, u"Award Number", wx.DefaultPosition, wx.DefaultSize,
wx.ALIGN_CENTRE)
self.m_staticText16.Wrap(-1)
self.m_staticText16.SetMinSize(label_size)
self.award_number_input = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0)
self.award_number_input.SetMinSize(input_size)
award_number_sizer = wx.BoxSizer(wx.HORIZONTAL)
award_number_sizer.Add(self.m_staticText16, flag=wx.ALL | wx.EXPAND, border=5)
award_number_sizer.Add(self.award_number_input, flag=wx.ALL | wx.EXPAND, border=5)
main_sizer.Add(award_number_sizer, flag=wx.ALL | wx.EXPAND, border=5)
"""
Keywords input
"""
self.keywords_label = wx.StaticText(self, wx.ID_ANY, 'Keywords', wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_CENTER)
self.keywords_label.Wrap(-1)
self.keywords_label.SetMinSize(label_size)
self.keywords_input = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0)
self.keywords_input.SetMinSize(input_size)
self.keywords_input.SetToolTip(wx.ToolTip('Enter keywords as a comma seperated\n'
'list (i.e. "Keyword 1, Keyword 2", etc.)'))
keywords_sizer = wx.BoxSizer(wx.HORIZONTAL)
keywords_sizer.Add(self.keywords_label, flag=wx.ALL | wx.EXPAND, border=5)
keywords_sizer.Add(self.keywords_input, flag=wx.ALL | wx.EXPAND, border=5)
main_sizer.Add(keywords_sizer, flag=wx.ALL | wx.EXPAND, border=5)
bSizer211 = wx.BoxSizer(wx.VERTICAL)
bSizer20 = wx.BoxSizer(wx.HORIZONTAL)
bSizer211.Add(bSizer20, 1, wx.EXPAND, border=5)
bSizer201 = wx.BoxSizer(wx.HORIZONTAL)
bSizer211.Add(bSizer201, 1, wx.EXPAND, border=5)
main_sizer.Add(bSizer211, flag=wx.ALL | wx.EXPAND, border=5)
###########################################################################
# Action buttons
###########################################################################
buttons_sizer = wx.BoxSizer(wx.HORIZONTAL)
if create_selected:
self.cancel_button = wx.Button(self, wx.ID_ANY, u"Cancel", wx.DefaultPosition, wx.DefaultSize, 0)
self.save_button = wx.Button(self, wx.ID_ANY, u"Create Resource", wx.DefaultPosition, wx.DefaultSize, 0)
buttons_sizer.Add(self.cancel_button, 0, flag=wx.ALL | wx.EXPAND, border=5)
buttons_sizer.Add(self.save_button, 0, flag=wx.ALL | wx.EXPAND, border=5)
# Connect Events
self.cancel_button.Bind(wx.EVT_BUTTON, self.on_cancel_clicked)
self.save_button.Bind(wx.EVT_BUTTON, self.on_create_clicked)
else:
self.cancel_button = wx.Button(self, wx.ID_ANY, u"Cancel", wx.DefaultPosition, wx.DefaultSize, 0)
self.delete_button = wx.Button(self, wx.ID_ANY, u"Delete Template", wx.DefaultPosition, wx.DefaultSize, 0)
self.copy_button = wx.Button(self, wx.ID_ANY, u"Copy Template", wx.DefaultPosition, wx.DefaultSize, 0)
self.save_button = wx.Button(self, wx.ID_ANY, u"Save Template", wx.DefaultPosition, wx.DefaultSize, 0)
buttons_sizer.Add(self.cancel_button, 0, flag=wx.ALL | wx.EXPAND, border=5)
buttons_sizer.Add(self.delete_button, 0, flag=wx.ALL | wx.EXPAND, border=5)
buttons_sizer.Add(self.copy_button, 0, flag=wx.ALL | wx.EXPAND, border=5)
buttons_sizer.Add(self.save_button, 0, flag=wx.ALL | wx.EXPAND, border=5)
# Connect Events
self.cancel_button.Bind(wx.EVT_BUTTON, self.on_cancel_clicked)
self.save_button.Bind(wx.EVT_BUTTON, self.on_save_clicked)
self.delete_button.Bind(wx.EVT_BUTTON, self.on_delete_clicked)
self.copy_button.Bind(wx.EVT_BUTTON, self.on_copy_clicked)
self.template_selector.Bind(wx.EVT_CHOICE, self.on_selection_changed)
main_sizer.Add(buttons_sizer, flag=wx.ALL | wx.ALIGN_RIGHT, border=5)
###########################################################################
# Finish off the rest
###########################################################################
self.SetSizerAndFit(main_sizer)
self.Layout()
self.Centre(wx.BOTH)
self.template_selector.SetSelection(selected)
self.on_selection_changed()
def __del__(self):
pass
def on_cancel_clicked(self, event):
self.EndModal(False)
def on_delete_clicked(self, event):
print "remove account clicked!"
pub.sendMessage("hs_resource_remove", result=self._get_input_as_dict())
selected = self.template_selector.GetCurrentSelection()
if selected > 0:
pub.sendMessage("hs_auth_remove", result=self._get_input_as_dict())
self.template_selector.SetSelection(selected - 1)
self.template_selector.Delete(selected)
self.on_selection_changed()
def on_copy_clicked(self, event):
self.template_selector.SetSelection(0)
counter = 1
new_name = "{}_({})".format(self.template_name_input.Value, counter)
while new_name in self.templates and counter < 10:
new_name = "{}_({})".format(self.template_name_input.Value, counter)
counter += 1
self.template_name_input.SetValue(new_name)
def on_save_clicked(self, event):
pub.sendMessage("hs_resource_save", result=self._get_input_as_dict())
self.EndModal(True)
event.Skip()
def on_create_clicked(self, event):
result = self._get_input_as_dict()
agwebsite_initial = agwebsite = result.get('agency_url', '')
error_list = []
# Make sure the resource has a name
if not len(result.get('resource_name', '')):
error_list.append("The 'Resource Name' field is required.")
# If the value for `agency_url` is not empty, validate the URL, otherwise, continue on
if len(agwebsite_initial):
# If the user did not include a scheme for the agency website, use 'http://' as the default
if not re.match(r'https?://', agwebsite):
result['agency_url'] = 'http://' + result.get('agency_url', '')
# If `agwebsite` passes the url pattern check, continue on, otherwise
# show some sort of validation error
if not self.urlregex.match(result.get('agency_url')):
error_list.append(
"Agency Website '{}' is an invalid URL.\n\nEnter a valid URL to continue.".format(agwebsite_initial)
)
if not len(error_list):
pub.sendMessage("hs_resource_create", result=result)
self.EndModal(True)
else:
if len(error_list) > 1:
msg = "Please fix the following errors"
for err in error_list:
msg += "\n\n - {}".format(err)
else:
msg = "Error: {}".format(error_list[0])
wx.MessageBox(msg, parent=self.dialog, caption='Error', style=wx.OK)
event.Skip()
def on_selection_changed(self, event=None):
value = self.template_selector.GetStringSelection()
if value in self.templates:
template = self.templates[value]
if not self.create_new:
self.template_name_input.SetValue(template.template_name)
self.resource_name_input.SetValue(template.title)
self.resource_abstract_input.SetValue(template.abstract)
self.award_number_input.SetValue(template.award_number)
self.award_title_input.SetValue(template.award_title)
self.funding_agency_input.SetValue(template.funding_agency)
self.agency_url_input.SetValue(template.agency_url)
else:
if not self.create_new:
self.template_name_input.SetValue("")
self.resource_name_input.SetValue("")
self.resource_abstract_input.SetValue("")
self.award_number_input.SetValue("")
self.award_title_input.SetValue("")
self.funding_agency_input.SetValue("")
self.agency_url_input.SetValue("")
def _get_input_as_dict(self):
return dict(selector=self.template_selector.GetStringSelection(),
name=self.template_name_input.Value if not self.create_new else '',
resource_name=self.resource_name_input.Value, abstract=self.resource_abstract_input.Value,
funding_agency=self.funding_agency_input.Value, agency_url=self.agency_url_input.Value,
award_title=self.award_title_input.Value, award_number=self.award_number_input.Value,
keywords=self.keywords_input.GetValue())
|
import json
import logging
import os
import parmed
import yaml
from simtk import unit
from simtk.openmm import app
from blues import reporters, utils
class Settings(object):
"""
Function that will parse the YAML configuration file for setup and running
BLUES simulations.
Parameters
----------
yaml_config : filepath to YAML file (or JSON)
"""
def __init__(self, config):
# Parse YAML or YAML docstr into dict
config = Settings.load_yaml(config)
# Parse the config into dict
if type(config) is dict:
config = Settings.set_Parameters(config)
self.config = config
@staticmethod
def load_yaml(yaml_config):
"""
Function that reads the YAML configuration file and parameters are
returned as a dict.
"""
# Parse input parameters from YAML
try:
if os.path.isfile(yaml_config):
with open(yaml_config, 'r') as stream:
config = yaml.safe_load(stream)
else:
config = yaml.safe_load(yaml_config)
except IOError as e:
print("Unable to open file:", yaml_config)
raise e
except yaml.YAMLError as e:
yaml_err = 'YAML parsing error in file: {}'.format(yaml_config)
if hasattr(e, 'problem_mark'):
mark = e.problem_mark
print(yaml_err + '\nError on Line:{} Column:{}' \
.format(mark.line + 1, mark.column + 1))
raise e
else:
return config
@staticmethod
def set_Structure(config):
"""
Load the input/reference files (.prmtop, .inpcrd) into a parmed.Structure. If a `restart` (.rst7)
file is given, overwrite the reference positions, velocities, and box vectors on the Structure.
Parameters
-----------
filename: str, filepath to input (.prmtop)
restart: str, file path to Amber restart file (.rst7)
logger: logging.Logger object, records information
Notes
-----
Reference for parmed.load_Structure *args and **kwargs
https://parmed.github.io/ParmEd/html/structobj/parmed.formats.registry.load_file.html#parmed.formats.registry.load_file
"""
if 'restart' in config['structure'].keys():
rst7 = config['structure']['restart']
config['Logger'].info('Restarting simulation from {}'.format(rst7))
restart = parmed.amber.Rst7(rst7)
config['structure'].pop('restart')
structure = parmed.load_file(**config['structure'])
structure.positions = restart.positions
structure.velocities = restart.velocities
structure.box = restart.box
else:
structure = parmed.load_file(**config['structure'])
config['Structure'] = structure
return config
@staticmethod
def set_Output(config):
"""
Parses/updates the config (dict) with the given path for storing output files.
"""
# Set file paths
if 'output_dir' in config.keys():
os.makedirs(config['output_dir'], exist_ok=True)
else:
output_dir = '.'
outfname = os.path.join(config['output_dir'], config['outfname'])
print(outfname)
config['outfname'] = outfname
config['simulation']['outfname'] = outfname
return config
@staticmethod
def set_Logger(config):
"""
Initializes the logging.Logger modules and parses/updates the
config (dict) with the logger_level and the file path to store the .log file
"""
# Initialize root Logger module
#level = config['logger_level'].upper()
level = config['logger']['level'].upper()
stream = config['logger']['stream']
if 'filename' in config['logger'].keys():
outfname = config['logger']['filename']
else:
outfname = config['outfname']
if level == 'DEBUG':
# Add verbosity if logging is set to DEBUG
config['verbose'] = True
config['system']['verbose'] = True
config['simulation']['verbose'] = True
else:
config['verbose'] = False
config['system']['verbose'] = False
config['simulation']['verbose'] = False
logger_level = eval("logging.%s" % level)
logger = reporters.init_logger(logging.getLogger(), logger_level, stream, outfname)
config['Logger'] = logger
return config
@staticmethod
def set_Units(config):
"""
Parses/updates the config (dict) values with parameters that should have
units on them. If no unit is provided, the default units are assumed.
Distances: unit.angstroms
Temperature: unit.kelvins
Masses: unit.daltons
Time: unit.picoseconds
Pressure: unit.atmospheres
Force: unit.kilocalories_per_mole/unit.angstroms**2
"""
# Default parmed units.
default_units = {
'nonbondedCutoff': unit.angstroms,
'switchDistance': unit.angstroms,
'implicitSolventKappa': unit.angstroms,
'freeze_distance': unit.angstroms,
'temperature': unit.kelvins,
'hydrogenMass': unit.daltons,
'dt': unit.picoseconds,
'friction': 1 / unit.picoseconds,
'pressure': unit.atmospheres,
'implicitSolventSaltConc': unit.mole / unit.liters,
'weight': unit.kilocalories_per_mole / unit.angstroms**2,
}
# Loop over parameters which require units
for param, unit_type in default_units.items():
# Check each nested subset of parameters
for setup_keys in ['system', 'simulation', 'freeze', 'restraints']:
# If the parameter requires units, cheeck if provided by user
try:
#print(param, config[setup_keys].keys())
if str(param) in config[setup_keys].keys():
user_input = config[setup_keys][param]
if '*' in str(user_input):
config[setup_keys][param] = utils.parse_unit_quantity(user_input)
# If not provided, set default units
else:
config['Logger'].warn("Units for '{} = {}' not specified. Setting units to '{}'".format(
param, user_input, unit_type))
config[setup_keys][param] = user_input * unit_type
except:
pass
return config
@staticmethod
def check_SystemModifications(config):
"""
Given a dict (config), check the parameters related to freezing or
restraining the system. Requires loading parmed.Structure from YAML.
"""
# Check Amber Selections
if 'freeze' in config.keys():
freeze_keys = ['freeze_center', 'freeze_solvent', 'freeze_selection']
for sel in freeze_keys:
if sel in config['freeze']:
utils.check_amber_selection(config['Structure'], config['freeze'][sel])
if 'restraints' in config.keys():
utils.check_amber_selection(config['Structure'], config['restraints']['selection'])
@staticmethod
def set_Apps(config):
"""
Check system parameters which require loading from the simtk.openmm.app namespace
nonbondedMethod : ['NoCutoff', 'CutoffNonPeriodic', 'CutoffPeriodic', 'PME', 'Ewald'],
constraints : [None, 'HBonds', 'HAngles', 'AllBonds'],
implicitSolvent : ['HCT', 'OBC1', 'OBC2', 'GBn', 'GBn2']
"""
# System related parameters that require import from the simtk.openmm.app namesapce
valid_apps = {
'nonbondedMethod': ['NoCutoff', 'CutoffNonPeriodic', 'CutoffPeriodic', 'PME', 'Ewald'],
'constraints': [None, 'HBonds', 'HAngles', 'AllBonds'],
'implicitSolvent': ['HCT', 'OBC1', 'OBC2', 'GBn', 'GBn2']
}
for method, app_type in valid_apps.items():
if method in config['system']:
user_input = config['system'][method]
try:
config['system'][method] = eval("app.%s" % user_input)
except:
config['Logger'].exception("'{}' was not a valid option for '{}'. Valid options: {}".format(
user_input, method, app_type))
return config
@staticmethod
def set_ncmcSteps(config):
"""
Calculates the number of lambda switching steps and integrator steps
for the NCMC simulation.
"""
ncmc_parameters = utils.calculateNCMCSteps(**config['simulation'])
for k, v in ncmc_parameters.items():
config['simulation'][k] = v
return config
@staticmethod
def set_Reporters(config):
"""
Store the openmm.Reporters for the simulations to the configuration
"""
logger = config['Logger']
outfname = config['outfname']
nstepsNC = config['simulation']['nstepsNC']
moveStep = config['simulation']['moveStep']
if 'md_reporters' in config.keys():
# Returns a list of Reporter objects, overwrites the configuration parameters
md_reporter_cfg = reporters.ReporterConfig(outfname, config['md_reporters'], logger)
config['md_reporters'] = md_reporter_cfg.makeReporters()
if md_reporter_cfg.trajectory_interval:
config['simulation']['md_trajectory_interval'] = md_reporter_cfg.trajectory_interval
else:
logger.warn('Configuration for MD reporters were not set.')
# Configure the NCMC simulation reporters
if 'ncmc_reporters' in config.keys():
#Update the reporter parameters with the proper NCMC steps
for rep in config['ncmc_reporters'].keys():
if 'totalSteps' in config['ncmc_reporters'][rep].keys():
config['ncmc_reporters'][rep]['totalSteps'] = nstepsNC
#If -1 is given in frame_indices, record at the last frame
#If 0.5 is given in frame_indices, record at the midpoint/movestep
if 'frame_indices' in config['ncmc_reporters'][rep].keys():
frame_indices = config['ncmc_reporters'][rep]['frame_indices']
frame_indices = [moveStep if x == 0.5 else x for x in frame_indices]
frame_indices = [nstepsNC if x == -1 else x for x in frame_indices]
config['ncmc_reporters'][rep]['frame_indices'] = frame_indices
ncmc_reporter_cfg = reporters.ReporterConfig(outfname + '-ncmc', config['ncmc_reporters'], logger)
config['ncmc_reporters'] = ncmc_reporter_cfg.makeReporters()
else:
logger.warn('Configuration for NCMC reporters were not set.')
return config
@staticmethod
def set_Parameters(config):
"""
MAIN execution function for updating/correcting (placing units) in the config
"""
try:
# Set top level configuration parameters
config = Settings.set_Output(config)
config = Settings.set_Logger(config)
if 'structure' in config:
config = Settings.set_Structure(config)
Settings.check_SystemModifications(config)
config = Settings.set_Units(config)
config = Settings.set_Apps(config)
config = Settings.set_ncmcSteps(config)
config = Settings.set_Reporters(config)
except Exception as e:
config['Logger'].exception(e)
raise e
return config
def asDict(self):
return self.config
def asOrderedDict(self):
from collections import OrderedDict
return OrderedDict(sorted(self.config.items(), key=lambda t: t[0]))
def asYAML(self):
return yaml.dump(self.config)
def asJSON(self, pprint=False):
if pprint:
return json.dumps(self.config, sort_keys=True, indent=2, skipkeys=True, default=str)
return json.dumps(self.config, default=str)
|
f = open("input.txt", "r")
input = f.read()
floor = 0
for i in range(len(input)):
if input[i] == '(':
floor += 1
elif input[i] == ')':
floor -= 1
if floor == -1:
print "part 2: floor -1 at", i + 1
print floor
|
#!/usr/bin/env python
from lobe import ImageModel
model = ImageModel.load('path/to/exported/model')
# Predict from an image file
result = model.predict_from_file('path/to/file.jpg')
# Predict from an image url
result = model.predict_from_url('http://url/to/file.jpg')
# Predict from Pillow image
from PIL import Image
img = Image.open('path/to/file.jpg')
result = model.predict(img)
# Print top prediction
print("Top prediction:", result.prediction)
# Print all classes
for label, confidence in result.labels:
print(f"{label}: {confidence*100:.6f}%")
|
import datetime
from constants.common_constants import DEFAULT_FALSE_FLAG, DEFAULT_TRUE_FLAG
from models import session
from models.book import Book, UserBookMapping, BookType
from utils.log_handler import function_logger
def get_number_of_books_charge(user_id=None):
now = datetime.datetime.now()
book_details = session.query(
UserBookMapping.created_on,
BookType.charge,
BookType.fixed_days,
BookType.fixed_charges
).join(
Book, Book.id == UserBookMapping.book_id
).join(
BookType,
BookType.id == Book.book_type_id
).filter(
UserBookMapping.is_deleted == DEFAULT_FALSE_FLAG,
Book.is_deleted == DEFAULT_FALSE_FLAG,
UserBookMapping.user_id == user_id
).all()
# total charge of rest days + total charge of initial days
# if total days is greater than fixed days
# (charge(total_days - fixed days)) + fixed charge
# else
# fixed charge
book_details = [
((((now-time).days+1)-fixed_days)*charge if
(((now-time).days+1)-fixed_days)*charge > 0 else 0)+fixed_charges for
time, charge, fixed_days, fixed_charges in book_details]
return dict(
number_of_books=len(book_details),
book_charges=sum(book_details)
)
@function_logger
def get_user_statement(user_id=None):
return get_number_of_books_charge(user_id=user_id)
|
from snovault import (
calculated_property,
collection,
load_schema,
)
from .base import (
Item
)
@collection(
name='individuals',
unique_key='accession',
properties={
'title': 'Individuals',
'description': 'Listing of Individuals',
})
class Individual(Item):
item_type = 'individual'
name_key = 'accession'
schema = load_schema('encoded:schemas/individual.json')
rev = {
'children_f': ('Individual', 'father'),
'children_m': ('Individual', 'mother'),
'families': ('Family', 'members'),
'case': ('Case', 'individual')
}
embedded_list = [
# Individual linkTo
'father.accession',
'father.is_deceased',
'father.sex',
# Individual linkTo
'mother.accession',
'mother.is_deceased',
'mother.sex'
]
@calculated_property(schema={
"title": "Display Title",
"description": "Individual's Identifier",
"type": "string"
})
def display_title(self, request, accession):
""" Use accession """
return accession
@calculated_property(schema={
"title": "Children",
"description": "Children of the individual",
"type": "array",
"items": {
"title": "Child",
"type": "string",
"linkTo": "Individual"
}
})
def children(self, request):
kids = (self.rev_link_atids(request, "children_f") +
self.rev_link_atids(request, "children_m"))
if kids:
return kids
@calculated_property(schema={
"title": "Families",
"description": "Families this individual is a member of",
"type": "array",
"items": {
"title": "Family",
"type": "string",
"linkTo": "Family"
}
})
def families(self, request):
fams = self.rev_link_atids(request, "families")
if fams:
return fams
@calculated_property(schema={
"title": "Cases",
"description": "Cases for this individual",
"type": "array",
"items": {
"title": "Case",
"type": "string",
"linkTo": "Case"
}
})
def case(self, request):
rs = self.rev_link_atids(request, "case")
if rs:
return rs
|
from typing import Dict, List, Tuple
day_num = "03"
day_title = "Spiral Memory"
INPUT = 312051
up = 1
left = 2
down = 3
right = 4
def rotateLeft(dir: int) -> int:
if dir == up:
return left
elif dir == left:
return down
elif dir == down:
return right
elif dir == right:
return up
else:
print("unknown direction:", dir)
return -1
def move(p: Tuple[int, int], dir: int) -> Tuple[int, int]:
if dir == up:
return p[0], p[1]+1
elif dir == left:
return p[0]-1, p[1]
elif dir == down:
return p[0], p[1]-1
elif dir == right:
return p[0]+1, p[1]
else:
print("unknown direction:", dir)
return p
def part1(n: int) -> int:
spiral: Dict[Tuple[int, int], int] = {}
dir = right
w = 1
p = (0, 0)
spiral[p] = w
w = 2
p = (1, 0)
spiral[p] = w
while w < n:
turn = rotateLeft(dir)
q = move(p, turn)
if q in spiral:
q = move(p, dir)
else:
dir = turn
p = q
w += 1
spiral[p] = w
return abs(p[0])+abs(p[1])
def adj(p: Tuple[int, int]) -> List[Tuple[int, int]]:
l: List[Tuple[int, int]] = []
for x in range(-1, 2):
for y in range(-1, 2):
if x != 0 or y != 0:
l.append((p[0]+x, p[1]+y))
return l
def part2(n: int) -> int:
spiral: Dict[Tuple[int, int], int] = {}
dir = right
w = 1
p = (0, 0)
spiral[p] = w
w = 1
p = (1, 0)
spiral[p] = w
while w < n:
turn = rotateLeft(dir)
q = move(p, turn)
if q in spiral:
q = move(p, dir)
else:
dir = turn
p = q
sum = 0
for a in adj(p):
if a in spiral:
sum += spiral[a]
w = sum
spiral[p] = w
return w
def main():
print(f"Day {day_num}: {day_title}")
print("Part 1", part1(INPUT))
print("Part 2", part2(INPUT))
if __name__ == '__main__':
main()
|
import sys
def get_number(seq):
return int("".join(map(str, seq)))
def run_phase(input_seq):
sums = [0]
for n in input_seq:
sums.append(sums[-1] + n)
out = []
for digit_idx in range(len(input_seq)):
multiplier = 1
group_start = digit_idx
group_size = digit_idx + 1
digit = 0
while group_start < len(input_seq):
group_end = min(len(input_seq), group_start + group_size)
partial_sum = sums[group_end] - sums[group_start]
digit += partial_sum * multiplier
multiplier *= -1
group_start += group_size * 2
out.append(abs(digit) % 10)
return out
def problem1(input_seq):
for _ in range(100):
input_seq = run_phase(input_seq)
print(get_number(input_seq[:8]))
def problem2(input_seq):
offset = get_number(input_seq[:7])
print(offset)
long_input = input_seq * 10000
for _ in range(100):
for idx in range(len(long_input) - 2, offset - 1, -1):
long_input[idx] = (long_input[idx] + long_input[idx + 1]) % 10
print(get_number(long_input[offset:offset + 8]))
with open(sys.argv[1], "r") as f:
line = f.read().splitlines()[0]
input_seq = list(map(int, line))
problem1(input_seq)
problem2(input_seq)
|
import numpy as np
import nltk
import re
import pandas as pd
import os
import pickle
import csv
from pythainlp import word_tokenize
from pythainlp.corpus import thai_stopwords
from nltk.stem.porter import PorterStemmer
from stop_words import get_stop_words
from string import punctuation
from sklearn.preprocessing import normalize
from sklearn.feature_extraction.text import TfidfVectorizer
from ast import literal_eval
from urllib.parse import urlparse
nltk.download('words')
th_stop = thai_stopwords()
en_stop = get_stop_words('en')
p_stemmer = PorterStemmer()
query_vector = []
dup_check = []
def split_word(text):
tokens = word_tokenize(text,engine='newmm')
# Remove stop words ภาษาไทย และอักขระพิเศษ
tokens = [i for i in tokens if (not i in th_stop) & (not i in en_stop) & (not i in punctuation) & (not i in ["'",'"','“','”','‘','’','\n',"None", ' ', ";", ":"])]
# ลบตัวเลข
tokens = [i for i in tokens if not i.isnumeric()]
# ลบช่องว่าง
tokens = [i for i in tokens if not ' ' in i]
return tokens
def remove_emojis(data):
emoj = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002500-\U00002BEF" # chinese char
u"\U00002702-\U000027B0"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001f926-\U0001f937"
u"\U00010000-\U0010ffff"
u"\u2640-\u2642"
u"\u2600-\u2B55"
u"\u200d"
u"\u23cf"
u"\u23e9"
u"\u231a"
u"\ufe0f" # dingbats
u"\u3030"
"]+", re.UNICODE)
return re.sub(emoj, '', data)
def read_cofact_refer(): #สำหรับดึงข้อมูลของ cofact csv
global dup_check
# Opening CSV file
root_path = os.getcwd()
path = os.path.join(root_path, 'result\\Cofact\\cofact_refer.csv')
f = open(path, encoding="utf8")
# returns CSV object as
# a dictionary
csvreader = csv.reader(f)
# read header from CSV file
header = []
header = next(csvreader)
rows = []
for row in csvreader:
tmp = []
content_parts = []
content = ''
# เพิ่ม header + content และตรวจสอบว่า header ซ้ำหรือไม่
if row[1] in dup_check:
continue
elif row[1] not in dup_check:
dup_check.append(row[1])
tmp.append(row[1] + row[2])
# เพิ่ม content
tmp.append(row[2])
# เพิ่ม link (ข่าวต้นทางสุด)
tmp.append(row[3])
# เพิ่ม datetime
if (len(row) == 7):
tmp.append(row[6])
else:
tmp.append("")
# เพิ่ม header
tmp.append(row[1])
rows.append(tmp)
# Closing file
f.close()
return rows
def read_anti_refer(): #สำหรับดึงข้อมูลของ anti fake news csv
global dup_check
# Opening CSV file
root_path = os.getcwd()
path = os.path.join(root_path, 'result\\Anti\\anti_info.csv')
f = open(path, encoding="utf8")
# returns CSV object as
# a dictionary
csvreader = csv.reader(f)
# read header from CSV file
header = []
header = next(csvreader)
rows = []
for row in csvreader:
tmp = []
content_parts = []
content = ''
# เพิ่ม header + content และตรวจสอบว่า header ซ้ำหรือไม่
if row[1] in dup_check:
continue
elif row[1] not in dup_check:
dup_check.append(row[1])
tmp.append(row[1] + row[2])
# เพิ่ม content
tmp.append(row[2])
# เพิ่ม link
tmp.append(row[3])
# เพิ่ม datetime
tmp.append(row[5])
# เพิ่ม header
tmp.append(row[1])
rows.append(tmp)
# Closing file
f.close()
return rows
def read_sure_refer(): #สำหรับดึงข้อมูลของ sure and share csv
global dup_check
# Opening CSV file
root_path = os.getcwd()
path = os.path.join(root_path, 'result\\Sure\\sure_info.csv')
f = open(path, encoding="utf8")
# returns CSV object as
# a dictionary
csvreader = csv.reader(f)
# read header from CSV file
header = []
header = next(csvreader)
rows = []
for row in csvreader:
tmp = []
content_parts = []
content = ''
# เพิ่ม header + content และตรวจสอบว่า header ซ้ำหรือไม่
if row[1] in dup_check:
continue
elif row[1] not in dup_check:
dup_check.append(row[1])
tmp.append(row[1] + row[2])
# เพิ่ม content
tmp.append(row[2])
# เพิ่ม link
tmp.append(row[3])
# เพิ่ม datetime
tmp.append(row[5])
# เพิ่ม header
tmp.append(row[1])
rows.append(tmp)
# Closing file
f.close()
return rows
def combine_every_headline():
refer_text_list = []
cofact_refer_text_list = read_cofact_refer()
anti_refer_text_list = read_anti_refer()
sure_refer_text_list = read_sure_refer()
refer_text_list = cofact_refer_text_list + anti_refer_text_list + sure_refer_text_list
return refer_text_list
def create_df_for_backtrack(all_refer_text_list):
global all_original_text_and_headline_news_df, all_refer_header_and_content, all_refer_content
all_refer_content = []
all_refer_url = []
all_refer_datetime = []
all_refer_domain = []
all_refer_header = []
for i in range(len(all_refer_text_list)):
all_refer_header_and_content.append(all_refer_text_list[i][0]) #list ของส่วนหัวข้อข่าว + เนื้อหา
all_refer_content.append(all_refer_text_list[i][1]) #list ของส่วนเนื้อหาเท่านั้น
all_refer_url.append(all_refer_text_list[i][2]) #list ของ url เท่านั้น
all_refer_datetime.append(all_refer_text_list[i][3]) #list ของ datetime เท่านั้น
all_refer_domain.append(urlparse(all_refer_text_list[i][2]).hostname) #list ของ domain เท่านั้น
all_refer_header.append(all_refer_text_list[i][4]) #list ของส่วนหัวข้อข่าวเท่านั้น
#ทำ list ให้เป็น dataframe
all_original_text_and_headline_news_df = pd.DataFrame(list(zip(all_refer_header_and_content, all_refer_content, all_refer_url, all_refer_datetime, all_refer_domain, all_refer_header)), columns=["All_headline_and_content_from_every_reference", "All_content_from_every_reference", "All_URL_from_every_reference", "All_datatime_from_every_reference", "All_domain_from_every_reference", "All_headline_from_every_reference"])
return all_original_text_and_headline_news_df, all_refer_header_and_content
def tokenize_and_create_vocabulary(all_refer_header_and_content):
all_headline_and_content_tokens_list = [split_word(txt) for txt in all_refer_header_and_content] #list ของส่วนหัวข้อข่าว + เนื้อหา
local_all_tokens_list_j = [','.join(tkn) for tkn in all_headline_and_content_tokens_list]
## Create Vocabulary
tokens_list = []
for words in local_all_tokens_list_j:
# print(words)
temp_list = words.split(",")
# print(temp_list)
for i in temp_list:
tokens_list.append(i)
local_vocabulary = set(tokens_list)
local_vocabulary = list(local_vocabulary)
### Save Vacabulary
root_path = os.getcwd()
path = os.path.join(root_path, 'vocabulary_all.txt')
with open(path, "w", encoding="utf-8") as file:
file.write(str(local_vocabulary))
### load Vacabulary
root_path = os.getcwd()
path = os.path.join(root_path, 'vocabulary_all.txt')
with open(path, "r", encoding="utf-8") as file:
data2 = eval(file.readline())
return local_vocabulary, local_all_tokens_list_j, data2
def create_tfidf_matrix(all_tokens_list_j):
tvec = TfidfVectorizer(analyzer=lambda x:x.split(','),)
local_original_c_feat = tvec.fit_transform(all_tokens_list_j)
### Save model
root_path = os.getcwd()
path = os.path.join(root_path, 'all-tfid.pkl')
with open(path,'wb') as handle:
pickle.dump(local_original_c_feat, handle)
return local_original_c_feat, tvec
## Create vector for Query/search keywords
def gen_vector_T(tokens):
global tvec
Q = np.zeros((len(vocabulary)))
x = tvec.transform(tokens)
x = x.A[0]
return x
def cosine_similarity_T(k, query):
global tvec
tokens = split_word(str(query))
q_df = pd.DataFrame(columns=['q_clean'])
q_df.loc[0,'q_clean'] =str(tokens)
q_df=q_df.replace(to_replace ="\[.", value = '', regex = True)
q_df=q_df.replace(to_replace ="'", value = '', regex = True)
q_df=q_df.replace(to_replace =" ", value = '', regex = True)
q_df=q_df.replace(to_replace ='\]', value = '', regex = True)
d_cosines = []
query_vector = gen_vector_T(q_df['q_clean'])
query_vector = query_vector.reshape((1,-1))
d_cosines = np.dot(normalize(query_vector), norm_original_c_feat)
list_d_cosines = d_cosines[0].tolist()
out = np.array(list_d_cosines).argsort()[-k:][::-1]
list_d_cosines.sort()
a = pd.DataFrame()
for i in out:
a.loc[i,'index'] = str(i)
a.loc[i,'headline_and_content'] = all_original_text_and_headline_news_df["All_headline_and_content_from_every_reference"][i]
list_d_cosines.sort(reverse=True)
for j in range(k):
a.loc[out[j],'Score'] = list_d_cosines[j]
all_result = a
all_result_with_url = pd.DataFrame()
for i in range(len(all_result)):
if float(all_result.iloc[i]["Score"]) > 0:
all_result_with_url.loc[i,'index'] = all_result.iloc[i]["index"]
all_result_with_url.loc[i,'headline'] = all_original_text_and_headline_news_df["All_headline_from_every_reference"][int(all_result.iloc[i]["index"])]
all_result_with_url.loc[i,'url'] = all_original_text_and_headline_news_df["All_URL_from_every_reference"][int(all_result.iloc[i]["index"])]
all_result_with_url.loc[i,'content'] = all_original_text_and_headline_news_df["All_content_from_every_reference"][int(all_result.iloc[i]["index"])]
all_result_with_url.loc[i,'datetime'] = all_original_text_and_headline_news_df["All_datatime_from_every_reference"][int(all_result.iloc[i]["index"])]
all_result_with_url.loc[i,'domain'] = all_original_text_and_headline_news_df["All_domain_from_every_reference"][int(all_result.iloc[i]["index"])]
all_result_with_url.loc[i,'score'] = all_result.iloc[i]["Score"]
js = all_result_with_url.to_dict('records')
return js
def preprocess():
global original_c_feat, norm_original_c_feat, tvec, all_refer_text_list, vocabulary, all_original_text_and_headline_news_df, data2
all_refer_text_list = combine_every_headline() #เก็บหัวข้อข่าวและ URL ใน list
all_original_text_and_headline_news_df, all_refer_header_and_content = create_df_for_backtrack(all_refer_text_list) #สร้าง dataframe สำหรับอ้างถึงตอนค้นคืนข่าว
vocabulary, all_tokens_list_j, data2 = tokenize_and_create_vocabulary(all_refer_header_and_content) #ตัดคำจากหัวข่าว (headline) และสร้าง list ของคำศัพท์ (vocabulary)
original_c_feat, tvec = create_tfidf_matrix(all_tokens_list_j) #สร้าง vector tfidf สำหรับแต่ละข่าว
norm_original_c_feat = normalize(original_c_feat)
norm_original_c_feat = norm_original_c_feat.toarray()
norm_original_c_feat = norm_original_c_feat.T
return None
# Main
all_refer_text_list = []
all_refer_header_and_content = []
vocabulary = []
all_tokens_list_j = []
data2 = []
all_original_text_and_headline_news_df = pd.DataFrame()
original_c_feat = ""
norm_original_c_feat = ""
tvec = ""
preprocess()
|
"""JSPEC Testing Module for matching JSPEC documents errors.
"""
from test.matcher import JSPECTestMatcher
from jspec.entity import (
JSPEC,
JSPECObject,
JSPECArray,
JSPECIntPlaceholder,
JSPECRealPlaceholder,
JSPECNumberPlaceholder,
)
class JSPECTestMatcherError(JSPECTestMatcher):
"""Class for testing the behaviour when using the ``match`` method for
getting errors.
"""
def test_matcher_error(self):
"""Test examples of good matches.
The ``match`` method should return a ``ValueError``.
"""
test_cases = [
{
"name": "Integer placeholder invalid inequalities",
"spec": JSPEC(
JSPECIntPlaceholder(
(set(), 0),
),
),
"obj": 1,
"errmsg": "JSPEC does not support inequalities of class <class 'set'>",
},
{
"name": "Real placeholder invalid inequalities",
"spec": JSPEC(
JSPECRealPlaceholder(
(set(), 0.0),
),
),
"obj": 1.1,
"errmsg": "JSPEC does not support inequalities of class <class 'set'>",
},
{
"name": "Number placeholder invalid inequalities",
"spec": JSPEC(
JSPECNumberPlaceholder(
(set(), 0.0),
),
),
"obj": 1.1,
"errmsg": "JSPEC does not support inequalities of class <class 'set'>",
},
{
"name": "Object invalid pair",
"spec": JSPEC(
JSPECObject({
int(),
}),
),
"obj": dict(),
"errmsg": "JSPEC objects do not support object paris of class <class 'int'>",
},
{
"name": "Array invalid pair",
"spec": JSPEC(
JSPECArray([
set(),
]),
),
"obj": list(),
"errmsg": "JSPEC arrays do not support elements of class <class 'set'>",
},
{
"name": "Array invalid pair",
"spec": JSPEC(
set(),
),
"obj": 1,
"errmsg": "JSPEC do not support elements of class <class 'set'>",
},
]
self._error_match(test_cases)
|
from lib import actions
class SetFanAction(actions.BaseAction):
def run(self, state, structure=None, device=None):
target_state = True if state == 'on' else False
if structure and device:
nest = self._get_device(structure, device)
nest.fan = target_state
else:
for structure in self._nest.structures:
for device in structure.devices:
device.fan = target_state
return state
|
######################################################
#
# BioSignalML Management in Python
#
# Copyright (c) 2010-2013 David Brooks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
######################################################
'''
Abstract BioSignalML objects.
'''
__all__ = [ 'makelabel' ]
from .ontology import BSML
from .recording import Recording
from .signal import Signal
from .event import Event
from .segment import Segment
from .annotation import Annotation
def makelabel(label, suffix):
#============================
"""
Helper function to generate a meaningful label for sub-properties of resources.
:param label: The label of some resource.
:param suffix: A suffix to append to the label.
:return: A string consisting of the label, a '_', and the suffix.
"""
return label + '_' + suffix
if __name__ == '__main__':
#=========================
import logging
logging.getLogger().setLevel('DEBUG')
import biosignalml.rdf as rdf
def print_dict(r):
#-----------------
print '{'
for kv in r.__dict__.iteritems(): print ' %s: %s' % kv
print ' }'
def check(instance):
#-------------------
g = rdf.Graph()
instance.save_metadata_to_graph(g)
# print g.serialise(format=rdf.Format.TURTLE)
copy = instance.__class__.create_from_graph(instance.uri, g)
# if isinstance(instance, Event):
# print_dict(instance.about)
# print_dict(copy.about)
# print instance.metadata_as_string(rdf.Format.TURTLE)
# print copy.metadata_as_string(rdf.Format.TURTLE)
if instance.metadata_as_string(rdf.Format.TURTLE) != copy.metadata_as_string(rdf.Format.TURTLE):
print "INPUT:", instance.metadata_as_string(rdf.Format.TURTLE)
print "RESULT:", copy.metadata_as_string(rdf.Format.TURTLE)
raise AssertionError
return copy
r1 = Recording('http://example.org/recording', duration='1806')
# r1 = 'http://example.org/rec1'
# print r1.metadata_as_string(rdf.Format.TURTLE)
# a1 = Annotation.Note('http://example.org/ann1', r1, 'comment', creator='dave')
e1 = Annotation.Note('http://example.org/event', Segment(r1, r1.interval(1, 0.5)),
'event',
creator='dave')
t1 = Annotation.Tag('http://example.org/tag1', r1, 'tag')
# print t1.metadata_as_string(rdf.Format.TURTLE)
# for t in t1.tags: print (str(t))
# r2 = check(r1)
# a2 = check(a1)
# print a2.metadata_as_string(rdf.Format.TURTLE)
e2 = check(e1)
# print e2.metadata_as_string(rdf.Format.TURTLE)
# assert(e2.time == e1.time)
# t2 = check(t1)
# print t2.metadata_as_string(rdf.Format.TURTLE)
# for t in t2.tags: print (str(t))
ev1 = r1.new_event('http://ex.org/evt1', 'etype', 32.0, 10)
# print ev1.metadata_as_string(rdf.Format.TURTLE)
ev2 = check(ev1)
ev1 = r1.new_event('http://ex.org/evt1', 'etype', 32.0)
# print ev1.metadata_as_string(rdf.Format.TURTLE)
ev2 = check(ev1)
|
import traceback
import sys
class ConsistentHashing(object):
def __init__(self,hash_func=hash, num_replicas=1):
self.__hash=hash_func
self.__numReplicas=num_replicas
self.__hashCircle={}
self.__keys=[]
def keys(self):
return self.__keys
def hash_circles(self):
return self.__hashCircle
def node_size(self):
return len(self.__hashCircle)
def add_node(self,node):
try:
if not isinstance(node, str):
raise TypeError("The type of node should be str.")
except TypeError:
traceback.print_exc(file=sys.stdout)
for i in xrange(self.__numReplicas):
replicasNode = node + str(i)
keyNode = self.__hash(replicasNode)
self.__hashCircle[keyNode] = node
self.__keys.append(keyNode)
self.__keys.sort()
def remove_node(self,node):
try:
if not isinstance(node, str):
raise TypeError("The type of node should be str.")
except TypeError:
traceback.print_exc(file=sys.stdout)
for i in xrange(self.__numReplicas):
replicasNode = node + str(i)
keyNode = self.__hash(replicasNode)
del self.__hashCircle[keyNode]
self.__keys.remove(keyNode)
def get(self,val):
if self.node_size()==0:
return None
keyVal = self.__hash(val)
nodeLength = self.node_size()
for i in xrange(nodeLength):
if int(keyVal) < int(self.__keys[i]):
return self.__hashCircle[self.__keys[i]]
return self.__hashCircle[self.__keys[0]]
|
from django.db.models import Max
from django.shortcuts import render
def index(request):
return render(request, 'core/index.html')
def mapa(request):
return render(request, 'core/mapa.html')
|
"""Syntax highlighter for Markdown markup language."""
from __future__ import annotations
from prettyqt import core, gui, syntaxhighlighters
BASE_FONT = 12.0
class Rule(syntaxhighlighters.HighlightRule):
font_size = BASE_FONT
class Link(Rule):
regex = r'\[(.+)\]\(([^ ]+)( "(.+)")?\)'
color = "#61AFE9"
class Image(Rule):
regex = r'\!\[(.+)\]\(([^ ]+)( "(.+)")?\)'
color = "#2B65D1"
class Heading1(Rule):
regex = r"^#[^\n]*"
color = "#E06C75"
bold = True
font_size = BASE_FONT * 2
class Heading2(Rule):
regex = r"^##[^\n]*"
color = "#E06C75"
bold = True
font_size = BASE_FONT * 1.5
class Heading3(Rule):
regex = r"^###[^\n]*"
color = "#E06C75"
bold = True
font_size = BASE_FONT * 1.17
class Heading4(Rule):
regex = r"^####[^\n]*"
color = "#E06C75"
bold = True
font_size = BASE_FONT
class Heading5(Rule):
regex = r"^#####[^\n]*"
color = "#E06C75"
bold = True
font_size = BASE_FONT * 0.83
class Heading6(Rule):
regex = r"^######[^\n]*"
color = "#E06C75"
bold = True
font_size = BASE_FONT * 0.67
class Emphasis(Rule):
regex = r"(\*)([^\*]+)\1"
color = "#BC78DD"
italic = True
class Strong(Rule):
regex = r"(\*{2})([^\*\*]+)\1"
color = "#D19A66"
bold = True
class Code(Rule):
regex = [r"`[^`]*`", r"^((?:(?:[ ]{4}|\t).*(\R|$))+)"]
color = "grey"
TRI_SINGLE = (core.RegularExpression("```"), Code.get_format())
class MarkdownHighlighter(gui.SyntaxHighlighter):
RULES = Rule.__subclasses__()
def highlightBlock(self, text: str):
super().highlightBlock(text)
self.setCurrentBlockState(0)
self._match_multiline(text, *TRI_SINGLE)
def _match_multiline(
self, text: str, delimiter: core.RegularExpression, style: gui.TextCharFormat
):
# If inside triple-single quotes, start at 0
if self.previousBlockState() == 1:
start = 0
add = 0
# Otherwise, look for the delimiter on this line
else:
match = delimiter.match(text)
if not match.hasMatch():
return
start = match.capturedStart()
add = match.capturedLength()
# As long as there's a delimiter match on this line...
while start >= 0:
# Look for the ending delimiter
match = delimiter.match(text, start + add)
end = match.capturedStart()
# Ending delimiter on this line?
if end >= add:
length = end + match.capturedLength()
self.setCurrentBlockState(0)
# No; multi-line string
else:
self.setCurrentBlockState(1)
length = len(text)
self.setFormat(start, length - start + add, style)
# Look for the next match
start = delimiter.match(text, start + length).capturedStart()
if __name__ == "__main__":
from prettyqt import widgets
app = widgets.app()
editor = widgets.PlainTextEdit()
highlighter = MarkdownHighlighter(editor.document())
editor.show()
app.main_loop()
|
import praw
# fill in your reddit stuff here.
reddit = praw.Reddit(client_id='',
client_secret='',
user_agent='',
username='')
def scrap(sub):
subreddit = reddit.subreddit(sub)
meme = subreddit.random()
return meme.url, meme.author, meme.permalink
|
from datetime import datetime, timedelta, time
from pyiso.base import BaseClient
import copy
import re
from bs4 import BeautifulSoup
import time
import pdb
# PRC_LMP
# PRC_HASP_LMP
# PRC_RTPD_LMP
# PRC_INTVL_LMP
# PRC_AS - All Ancillary Services for Region and Sub-Regional Partition. Posted hourly in $/MW for the DAM and HASP.
# PRC_INVL_AS - Posts 15-Minute price relevant to the next 15 minute binding interval for RTM
# PRC_CURR_LMP - Posts all LMP data for the most current interval
"""
Returned data is a list of dicts, each of which has a time code as the main term which is used for indexing. e.g. the following is the result of this code:
mycaiso = caiso.CAISOClient()
mydata = mycaiso.get_generation(latest=True)
mydata
[{'timestamp': datetime.datetime(2014, 12, 11, 6, 20, tzinfo=<UTC>), 'gen_MW': 1678.0, 'fuel_name': 'renewable', 'ba_name': 'CAISO', 'freq': '10m', 'market': 'RT5M'},
{'timestamp': datetime.datetime(2014, 12, 11, 6, 20, tzinfo=<UTC>), 'gen_MW': 447.0, 'fuel_name': 'wind', 'ba_name': 'CAISO', 'freq': '10m', 'market': 'RT5M'},
{'gen_MW': 26155.37, 'ba_name': 'CAISO', 'timestamp': datetime.datetime(2014, 12, 11, 6, 20, tzinfo=<UTC>), 'freq': '10m', 'fuel_name': 'other', 'market': 'RT5M'}]
this can then be pulled into a pandas dataframe:
import pandas as pd
df = pd.DataFrame(data)
"""
"""
fruitful methods:
get_generation(self, latest=False, yesterday=False,start_at=False, end_at=False, **kwargs):
get_load(self, latest=False,start_at=False, end_at=False, **kwargs)
get_trade(self, latest=False, start_at=False, end_at=False, **kwargs)
get_lmp(self, latest=False,start_at=False, end_at=False, market='hourly', grp_type='ALL',node='ALL',**kwargs)
construct_oasis_payload(self, queryname, preferred_start_at=None, **kwargs)
fetch_oasis(self, payload={})
parsing methods:
parse_generation
parse_lmp(self,raw_data)
parse_oasis_slrs(self, raw_data)
parse_oasis_renewable(self, raw_data)
parse_oasis_demand_forecast(self, raw_data)
parse_todays_outlook_renewables(self, soup, ts)
"""
class CAISOClient(BaseClient):
"""
Interface to CAISO data sources.
For information about the data sources,
see http://www.caiso.com/Documents/InterfaceSpecifications-OASISv4_1_3.pdf
"""
NAME = 'CAISO'
base_url_oasis = 'http://oasis.caiso.com/oasisapi/SingleZip'
base_url_gen = 'http://content.caiso.com/green/renewrpt/'
base_url_outlook = 'http://content.caiso.com/outlook/SP/'
base_payload = {'version': 1}
oasis_request_time_format = '%Y%m%dT%H:%M-0000'
TZ_NAME = 'America/Los_Angeles'
fuels = {
'GEOTHERMAL': 'geo',
'BIOMASS': 'biomass',
'BIOGAS': 'biogas',
'SMALL HYDRO': 'smhydro',
'WIND TOTAL': 'wind',
'SOLAR': 'solar',
'SOLAR PV': 'solarpv',
'SOLAR THERMAL': 'solarth',
'NUCLEAR': 'nuclear',
'THERMAL': 'thermal',
'HYDRO': 'hydro',
}
oasis_markets = { # {'RT5M': 'RTM', 'DAHR': 'DAM', 'RTHR': 'HASP'}
BaseClient.MARKET_CHOICES.hourly: 'HASP',
BaseClient.MARKET_CHOICES.fivemin: 'RTM', # There are actually three codes used: RTPD (Real-time Pre-dispatch), RTD (real-time dispatch), and RTM (Real-Time Market). I can't figure out what the difference is.
BaseClient.MARKET_CHOICES.dam: 'DAM',
}
def get_generation(self, latest=False, yesterday=False,
start_at=False, end_at=False, **kwargs):
# set args
self.handle_options(data='gen', latest=latest, yesterday=yesterday,
start_at=start_at, end_at=end_at, **kwargs)
# ensure market and freq are set
if 'market' not in self.options:
if self.options['forecast']:
self.options['market'] = self.MARKET_CHOICES.dam
else:
self.options['market'] = self.MARKET_CHOICES.fivemin
if 'freq' not in self.options:
if self.options['forecast']:
self.options['freq'] = self.FREQUENCY_CHOICES.hourly
else:
self.options['freq'] = self.FREQUENCY_CHOICES.fivemin
if latest:
return self._generation_latest()
elif self.options['forecast']:
return self._generation_forecast()
else:
return self._generation_historical()
def get_lmp(self, latest=False,
start_at=False, end_at=False, market='hourly', grp_type='ALL',node='ALL',csv=False, **kwargs):
# Construct_oasis_payload expects market option to be one of 'hourly', 'fivemin', 'tenmin', 'na', 'dam'
# if csv = False, pulls xml files, parses SLOWLY, and returned data is
# a list of dicts, each of which has a main index of the timestamp
# if csv=True, pulls csv files, parses more quickly, and returns Pandas
# Panel data structure
# Expected parameters:
# node: CAISO node ID. Can be set to individual node or "ALL". "ALL" will override grp_type
# grp_type: either "ALL_APNodes" or "ALL" - This will trigger day-by-day iteration
# NOTE: This needs to be turned off for processing individual nodes. This will override node types
# market= "DAM", "HASP", "RTM"
# start_at and end_at can be a variety of parsable input types, with or without time codes
# i.e. '2013-10-12T11:45:30' or '2011-10-12'
# Relevant XML Calls:
# PRC_LMP - for market_run_id='DAM'
# PRC_HASP_LMP for market_run_id='HASP'
# PRC_INTVL_LMP for market_run_id='RTM'
# PRC_RTPD_LMP No longer valid?
# Max call interval:
# In the HASP and RTM markets, requesting more than the max interval length may result in the wrong data being returned.
# Individual nodes: <31 days
# Calling "ALL" or "ALL_APNODES":
# DAM: 1 day, returns 4 files from expanded zip. Each has 20-line header
# HASP: 1 hour, returns one file with all components (LMP, MCC, MCE, MCL)
# RTM: 1 hour, returns one file with all components (LMP, MCC, MCE, MCL)
#PRC_LMP
# if grp_type=="ALL" or "ALL_APNODES", we are processing full node sets:
# remove 'node' from the payload
# can only process one time step at a time,
# Time step for DAM = 1 day; time step otherwise = 1 hr
#
# if node is not "ALL", we are dealing with a specific node:
# remove grp_type from payload
# Check to make sure that the date is less than 31 days or cut into pieces
# set args
self.handle_options(data='load', latest=latest,
start_at=start_at, end_at=end_at, market=market, grp_type=grp_type,node=node, **kwargs)
requestSpan = self.options['end_at'] - self.options['start_at'] # This is the duration spanned by our request
requestStart = self.options['start_at'] #This should be a datetime object
requestEnd = self.options['end_at'] # This should be a datetime object
print 'Request span is:',requestSpan
# ensure market and freq are set # What is this for?
if 'market' not in self.options:
if self.options['forecast']:
self.options['market'] = self.MARKET_CHOICES.dam
else:
self.options['market'] = self.MARKET_CHOICES.fivemin
"""if 'freq' not in self.options: # What is the frequency used for?
if self.options['forecast']:
self.options['freq'] = self.FREQUENCY_CHOICES.hourly
else:
self.options['freq'] = self.FREQUENCY_CHOICES.fivemin
"""
# Clean up conflicting commands
# Check this: this may currently be buggy when requesting grp_type=ALL_APNODES but excluding 'node' in the call
if self.options['node']=='ALL' and self.options['grp_type']!='ALL':
del self.options['grp_type'] # Node typically overrides the grp_type call
# Decision fork: either we are handing "all" nodes or we are handling an individual node
if self.options['grp_type']=='ALL' or self.options['grp_type']=='ALL_APNodes':
# If we are processing full node sets, need to iterate across the appropriate time blocks
del self.options['node'] # Get rid of node commands to ensure we aren't sending mixed signals. This will override the node option.
if market=='DAHR':
print ('The DAM LMP call is not yet implemented... you should go do that.')
else: # We are not in DAM, but in HASP or RTM
# If we are requesting all nodes in the Hour-ahead market or real-time markets, we can request at most one hour at a time
if market=='RTHR':
# This is a request for the Hour-Ahead Scheduling Process (HASP)
oasis_API_call= 'PRC_HASP_LMP'
else: #if ':market=='RTM
# Assume that this is a request for the real-time market
oasis_API_call= 'PRC_INTVL_LMP'
parsed_data = [] # Placeholder
currentStartAt = requestStart # Initialize loop conditions
currentEndAt = currentStartAt
# The contents of the following if statement can probably be refactored
if requestSpan.total_seconds()>3600:
timeStep = timedelta(hours=1) # Increment by one hour each iteration
currentEndAt = currentEndAt + timeStep # Priming the pump
# The following loop can probably be refactored significantly
while currentEndAt < requestEnd:
# Set up payload, fetch data, and parse data
self.options['start_at']=currentStartAt
self.options['end_at']=currentEndAt
payload = self.construct_oasis_payload(oasis_API_call,csv=csv)
print 'Requesting data for time starting at ', (currentStartAt).strftime(self.oasis_request_time_format)
startRequest = time.clock()
oasis_data = self.fetch_oasis(payload=payload)
endRequest = time.clock()
print 'Imported data in ',endRequest-startRequest,' s'
parsed_data.append(self.parse_lmp(oasis_data,csv=csv))
print 'Parsed Data in ', time.clock()-endRequest,' s'
currentStartAt = currentEndAt
currentEndAt = currentEndAt + timeStep
# Previous 'if' block was to get us within one time step of the finish. This will get us the rest of the way.
#Clean up final iteration to get to the end time
print 'Exited the loop'
self.options['start_at']=currentStartAt
self.options['end_at']=requestEnd
payload = self.construct_oasis_payload(oasis_API_call,csv=csv)
print 'Requesting data for time starting at ', (currentStartAt).strftime(self.oasis_request_time_format)
oasis_data = self.fetch_oasis(payload=payload)
parsed_data.append(self.parse_lmp(oasis_data,csv))
result = parsed_data
#merge dataframes if you have been pulling csv's
if csv:
for i in range(len(parsed_data)):
if i == 0: result = parsed_data[0]
else:
result = result.append(parsed_data[i])
result = result.unstack()
result.columns = result.columns.droplevel()
else:
# If we aren't handling full node sets, we are handling individual nodes and can request up to 31 days of data at a time
print('The single-node calls are not yet implemented... you should go do that.')
# Return either just the most recent datapoint, or return all the parsed data
# It seems like this could be moved to a separate function
# Commenting out for now because it looks like it needs a specific data structure, i.e. a dict with a 'timestamp' key
"""
if self.options['latest']:
# select latest
latest_dp = None
latest_ts = self.utcify('1900-01-01 12:00')
now = self.utcify(datetime.utcnow(), tz_name='utc')
for dp in parsed_data:
if dp['timestamp'] < now and dp['timestamp'] > latest_ts:
latest_dp = dp
latest_ts = dp['timestamp']
# return latest
if latest_dp:
return [latest_dp]
else:
return []
else:
# return all data
return parsed_data
"""
return result
def parse_lmp(self,raw_data,csv=False):
"""
Incoming raw_data is a list of data points, split using the <REPORT DATA> tag
Parse raw data output of fetch_oasis for location marginal price.
4 LMP components (Marginal Cost of Energy, Marginal Cost of Congestion, Marginal cost of Losses, net LMP
"""
#Sample entry:
#<REPORT_DATA>
#<DATA_ITEM>LMP_PRC</DATA_ITEM>
#<RESOURCE_NAME>3EMIDIO_6_N001</RESOURCE_NAME>
#<OPR_DATE>2013-05-25</OPR_DATE>
#<INTERVAL_NUM>69</INTERVAL_NUM>
#<INTERVAL_START_GMT>2013-05-26T00:00:00-00:00</INTERVAL_START_GMT>
#<INTERVAL_END_GMT>2013-05-26T00:15:00-00:00</INTERVAL_END_GMT>
#<VALUE>27.5385</VALUE>
#</REPORT_DATA>
# if using csvs, parsing just involves using parse_to_df method
if csv:
parsed_data = self.parse_to_df(raw_data,usecols=[
'INTERVALSTARTTIME_GMT','INTERVALENDTIME_GMT','NODE','MW','LMP_TYPE'],
index_col=['INTERVALSTARTTIME_GMT','INTERVALENDTIME_GMT','LMP_TYPE','NODE'],
parse_dates=['INTERVALSTARTTIME_GMT','INTERVALENDTIME_GMT'])
else:
# set up storage
parsed_data = {}
#parsed_data = []
# Structure of returned data: set of nested dictionaries
# {dict of times}
# {dict of nodes}
# {dict of lmp components}
# i.e. parsed
# extract values from xml
for raw_soup_dp in raw_data:
# Parse the time, node name, data item, and value from the xml
try:
ts = self.utcify(raw_soup_dp.find('interval_start_gmt').string)
node_name =raw_soup_dp.find('resource_name').string.lower()
data_type = raw_soup_dp.find('data_item').string.lower()
data_value = float(raw_soup_dp.find('value').string)
except TypeError:
self.logger.error('Error in schema for CAISO OASIS result %s' % raw_soup_dp.prettify())
continue
# Make sure that our dict structure has a spot ready to recieve a new lmp value
if ts not in parsed_data:
parsed_data[ts] = {}
if node_name not in parsed_data[ts]:
parsed_data[ts][node_name]={}
# store generation value
parsed_data[ts][node_name][data_type] = data_value # This will set MCC, MCL, MCE, or LMP to the given value
return parsed_data
def get_load(self, latest=False,
start_at=False, end_at=False, **kwargs):
# set args
self.handle_options(data='load', latest=latest,
start_at=start_at, end_at=end_at, **kwargs)
# ensure market and freq are set
if 'market' not in self.options:
if self.options['forecast']:
self.options['market'] = self.MARKET_CHOICES.dam
else:
self.options['market'] = self.MARKET_CHOICES.fivemin
if 'freq' not in self.options:
if self.options['forecast']:
self.options['freq'] = self.FREQUENCY_CHOICES.hourly
else:
self.options['freq'] = self.FREQUENCY_CHOICES.fivemin
# construct and execute OASIS request
payload = self.construct_oasis_payload('SLD_FCST')
oasis_data = self.fetch_oasis(payload=payload)
# parse data
parsed_data = self.parse_oasis_demand_forecast(oasis_data)
if self.options['latest']:
# select latest
latest_dp = None
latest_ts = self.utcify('1900-01-01 12:00')
now = self.utcify(datetime.utcnow(), tz_name='utc')
for dp in parsed_data:
if dp['timestamp'] < now and dp['timestamp'] > latest_ts:
latest_dp = dp
latest_ts = dp['timestamp']
# return latest
if latest_dp:
return [latest_dp]
else:
return []
else:
# return all data
return parsed_data
def get_trade(self, latest=False,
start_at=False, end_at=False, **kwargs):
# set args
self.handle_options(data='trade', latest=latest,
start_at=start_at, end_at=end_at, **kwargs)
# ensure market and freq are set
if 'market' not in self.options:
if self.options['forecast']:
self.options['market'] = self.MARKET_CHOICES.dam
else:
self.options['market'] = self.MARKET_CHOICES.fivemin
if 'freq' not in self.options:
if self.options['forecast']:
self.options['freq'] = self.FREQUENCY_CHOICES.hourly
else:
self.options['freq'] = self.FREQUENCY_CHOICES.fivemin
# construct and execute OASIS request
payload = self.construct_oasis_payload('ENE_SLRS')
oasis_data = self.fetch_oasis(payload=payload)
# parse data
parsed_data = self.parse_oasis_slrs(oasis_data)
if self.options['latest']:
# select latest
latest_dp = None
latest_ts = self.utcify('1900-01-01 12:00')
now = self.utcify(datetime.utcnow(), tz_name='utc')
for dp in parsed_data:
if dp['timestamp'] < now and dp['timestamp'] > latest_ts:
latest_dp = dp
latest_ts = dp['timestamp']
# return latest
if latest_dp:
return [latest_dp]
else:
return []
else:
# return all data
return parsed_data
def construct_oasis_payload(self, queryname, preferred_start_at=None,
csv=False, **kwargs):
# get start and end times
if self.options['latest']:
now = self.utcify(datetime.utcnow(), tz_name='utc')
startdatetime = now - timedelta(minutes=20)
enddatetime = now + timedelta(minutes=20)
else:
startdatetime = self.options['start_at']
enddatetime = self.options['end_at']
# get market id
market_run_id = self.oasis_markets[self.options['market']]
# construct payload
payload = {'queryname': queryname,
'market_run_id': market_run_id,
'startdatetime': (startdatetime).strftime(self.oasis_request_time_format),
'enddatetime': (enddatetime).strftime(self.oasis_request_time_format),
}
if csv:
payload.update({'resultformat' : 6})
print(payload)
payload.update(self.base_payload)
payload.update(kwargs)
# return
return payload
def set_dt_index(self, df, date, hours, end_of_hour=True):
if end_of_hour:
offset = -1
else:
offset = 0
# create list of combined datetimes
dts = [datetime.combine(date, time(hour=(h+offset))) for h in hours]
# set list as index
df.index = dts
# utcify
df.index = self.utcify_index(df.index)
# return
return df
def _generation_historical(self):
# set up storage
parsed_data = []
# collect data
this_date = self.options['start_at'].date()
while this_date <= self.options['end_at'].date():
# set up request
url_file = this_date.strftime('%Y%m%d_DailyRenewablesWatch.txt')
url = self.base_url_gen + url_file
# carry out request
response = self.request(url)
if not response:
this_date += timedelta(days=1)
continue
# process both halves of page
for header in [1, 29]:
df = self.parse_to_df(response.text,
skiprows=header, nrows=24, header=header,
delimiter='\t+')
# combine date with hours to index
indexed = self.set_dt_index(df, this_date, df['Hour'])
# original header is fuel names
indexed.rename(columns=self.fuels, inplace=True)
# remove non-fuel cols
fuel_cols = list( set(self.fuels.values()) & set(indexed.columns) )
subsetted = indexed[fuel_cols]
# pivot
pivoted = self.unpivot(subsetted)
pivoted.rename(columns={'level_1': 'fuel_name', 0: 'gen_MW'}, inplace=True)
# store
parsed_data += self.serialize(pivoted,
header=['timestamp', 'fuel_name', 'gen_MW'],
extras={'ba_name': self.NAME,
'market': self.MARKET_CHOICES.hourly,
'freq': self.FREQUENCY_CHOICES.hourly})
# finish day
this_date += timedelta(days=1)
# return
return parsed_data
def fetch_oasis(self, payload={}):
"""Returns a list of report data elements, or an empty list if an error was encountered."""
# set up storage
raw_data = []
# try get
response = self.request(self.base_url_oasis, params=payload) # have request
if not response:
return []
# read data from zip
content = self.unzip(response.content)
if not content:
return []
# if downloaded xml
if not 'resultformat' in payload.keys():
# prep xml file for parsing w/ beautiful soup
# load xml into soup
soup = BeautifulSoup(content)
# check xml content
error = soup.find('m:error')
if error:
code = error.find('m:err_code')
desc = error.find('m:err_desc')
msg = 'XML error for CAISO OASIS with payload %s: %s %s' % (payload, code, desc)
self.logger.error(msg)
raw_data = []
else:
raw_data = soup.find_all('report_data')
# if downloaded csv (instead of xml), do nothing to parse
else: raw_data = content
return raw_data
def parse_oasis_renewable(self, raw_data):
"""Parse raw data output of fetch_oasis for renewables."""
# set up storage
preparsed_data = {}
parsed_data = []
# extract values from xml
for raw_soup_dp in raw_data:
# set up storage for timestamp
ts = self.utcify(raw_soup_dp.find('interval_start_gmt').string)
if ts not in preparsed_data:
preparsed_data[ts] = {'wind': 0, 'solar': 0}
# store generation value
try:
fuel_name = raw_soup_dp.find('renewable_type').string.lower()
gen_MW = float(raw_soup_dp.find('value').string)
preparsed_data[ts][fuel_name] += gen_MW
except TypeError:
self.logger.error('Error in schema for CAISO OASIS result %s' % raw_soup_dp.prettify())
continue
# collect values into dps
freq = self.options.get('freq', self.FREQUENCY_CHOICES.hourly)
market = self.options.get('market', self.MARKET_CHOICES.hourly)
for ts, preparsed_dp in preparsed_data.iteritems():
# set up base
base_parsed_dp = {'timestamp': ts,
'freq': freq,
'market': market,
'gen_MW': 0, 'ba_name': self.NAME}
# collect data
for fuel_name in ['wind', 'solar']:
parsed_dp = copy.deepcopy(base_parsed_dp)
parsed_dp['fuel_name'] = fuel_name
parsed_dp['gen_MW'] += preparsed_dp[fuel_name]
parsed_data.append(parsed_dp)
# return
return parsed_data
def parse_oasis_slrs(self, raw_data):
"""Parse raw data output of fetch_oasis for System Load and Resource Schedules."""
# set strings to search on
if self.options['data'] == 'gen':
data_items = ['ISO_TOT_GEN_MW']
data_label = 'gen_MW'
elif self.options['data'] == 'trade':
data_items = ['ISO_TOT_EXP_MW', 'ISO_TOT_IMP_MW']
data_label = 'net_exp_MW'
else:
data_items = []
data_label = None
freq = self.options.get('freq', self.FREQUENCY_CHOICES.fivemin)
market = self.options.get('market', self.MARKET_CHOICES.fivemin)
# set up storage
extracted_data = {}
parsed_data = []
# extract values from xml
for raw_soup_dp in raw_data:
data_item = raw_soup_dp.find('data_item').string
if data_item in data_items:
# parse timestamp
ts = self.utcify(raw_soup_dp.find('interval_start_gmt').string)
# parse val
if data_item == 'ISO_TOT_IMP_MW':
val = -float(raw_soup_dp.find('value').string)
else:
val = float(raw_soup_dp.find('value').string)
# add to storage
try:
extracted_data[ts] += val
except KeyError:
extracted_data[ts] = val
# assemble data
for ts in sorted(extracted_data.keys()):
parsed_dp = {data_label: extracted_data[ts]}
parsed_dp.update({'timestamp': ts, 'freq': freq, 'market': market, 'ba_name': self.NAME})
if self.options['data'] == 'gen':
parsed_dp.update({'fuel_name': 'other'})
# add to storage
parsed_data.append(parsed_dp)
# return
return parsed_data
def parse_oasis_demand_forecast(self, raw_data):
"""Parse raw data output of fetch_oasis for system-wide 5-min RTM demand forecast."""
# set up storage
parsed_data = []
# set up freq and market
freq = self.options.get('freq', self.FREQUENCY_CHOICES.fivemin)
market = self.options.get('market', self.MARKET_CHOICES.fivemin)
if market == self.MARKET_CHOICES.dam:
data_item_key = 'SYS_FCST_DA_MW'
else:
data_item_key = 'SYS_FCST_5MIN_MW'
# extract values from xml
for raw_soup_dp in raw_data:
if raw_soup_dp.find('data_item').string == data_item_key and \
raw_soup_dp.find('resource_name').string == 'CA ISO-TAC':
# parse timestamp
ts = self.utcify(raw_soup_dp.find('interval_start_gmt').string)
# set up base
parsed_dp = {'timestamp': ts,
'freq': freq,
'market': market,
'ba_name': self.NAME}
# store generation value
parsed_dp['load_MW'] = float(raw_soup_dp.find('value').string)
parsed_data.append(parsed_dp)
# return
return parsed_data
def todays_outlook_time(self):
# get timestamp
response = self.request(self.base_url_outlook+'systemconditions.html')
if not response:
return None
demand_soup = BeautifulSoup(response.content)
for ts_soup in demand_soup.find_all(class_='docdate'):
match = re.search('\d{1,2}-[a-zA-Z]+-\d{4} \d{1,2}:\d{2}', ts_soup.string)
if match:
ts_str = match.group(0)
return self.utcify(ts_str)
def fetch_todays_outlook_renewables(self):
# get renewables data
response = self.request(self.base_url_outlook+'renewables.html')
return BeautifulSoup(response.content)
def parse_todays_outlook_renewables(self, soup, ts):
# set up storage
parsed_data = []
freq = self.options.get('freq', self.FREQUENCY_CHOICES.tenmin)
market = self.options.get('market', self.MARKET_CHOICES.tenmin)
# get all renewables values
for (id_name, fuel_name) in [('totalrenewables', 'renewable'),
('currentsolar', 'solar'),
('currentwind', 'wind')]:
resource_soup = soup.find(id=id_name)
if resource_soup:
match = re.search('(?P<val>\d+.?\d+)\s+MW', resource_soup.string)
if match:
parsed_dp = {'timestamp': ts,
'freq': freq,
'market': market,
'ba_name': self.NAME}
parsed_dp['gen_MW'] = float(match.group('val'))
parsed_dp['fuel_name'] = fuel_name
parsed_data.append(parsed_dp)
# actual 'renewable' value should be only renewables that aren't accounted for in other categories
accounted_for_ren = 0
for dp in parsed_data:
if dp['fuel_name'] != 'renewable':
accounted_for_ren += dp['gen_MW']
for dp in parsed_data:
if dp['fuel_name'] == 'renewable':
dp['gen_MW'] -= accounted_for_ren
return parsed_data
def _generation_latest(self, **kwargs):
# set up
parsed_data = []
# override market and freq to 10 minute
self.options['market'] = self.MARKET_CHOICES.tenmin
self.options['freq'] = self.FREQUENCY_CHOICES.tenmin
# get and parse "Today's Outlook" data
soup = self.fetch_todays_outlook_renewables()
ts = self.todays_outlook_time()
parsed_data += self.parse_todays_outlook_renewables(soup, ts)
if len(parsed_data) == 0:
return parsed_data
total_ren_MW = sum([dp['gen_MW'] for dp in parsed_data])
ts = parsed_data[0]['timestamp']
# get OASIS total gen data
payload = self.construct_oasis_payload(queryname='ENE_SLRS', schedule='ALL')
oasis_data = self.fetch_oasis(payload=payload)
# parse OASIS data
for dp in self.parse_oasis_slrs(oasis_data):
if dp['timestamp'] == ts:
dp['gen_MW'] -= total_ren_MW
dp['freq'] = self.options['freq']
parsed_data.append(dp)
return parsed_data
# no matching OASIS data found, so return null
return []
def _generation_forecast(self, **kwargs):
# set up
parsed_data = []
# get OASIS total gen data
gen_payload = self.construct_oasis_payload(queryname='ENE_SLRS', schedule='ALL')
gen_oasis_data = self.fetch_oasis(payload=gen_payload)
gen_dps = self.parse_oasis_slrs(gen_oasis_data)
# get OASIS renewable gen data
ren_payload = self.construct_oasis_payload(queryname='SLD_REN_FCST')
ren_oasis_data = self.fetch_oasis(payload=ren_payload)
ren_dps = self.parse_oasis_renewable(ren_oasis_data)
# set of times with both gen and renewable data
times = set([dp['timestamp'] for dp in ren_dps]) & set([dp['timestamp'] for dp in gen_dps])
# handle renewables
total_ren_MW = {}
for dp in ren_dps:
if dp['timestamp'] in times:
# assemble renewable totals for each time
try:
total_ren_MW[dp['timestamp']] += dp['gen_MW']
except KeyError:
total_ren_MW[dp['timestamp']] = dp['gen_MW']
# add to storage
parsed_data.append(dp)
# handle generation
for dp in gen_dps:
if dp['timestamp'] in times:
# subtract off renewable totals
dp['gen_MW'] -= total_ren_MW[dp['timestamp']]
# add to storage
parsed_data.append(dp)
# return
return parsed_data
|
from re_calc.util import is_number, every
import unittest
class TestInputProcessing(unittest.TestCase):
def test_is_number(self):
self.assertTrue(is_number('4.0'))
self.assertFalse(is_number('*'))
def test_every(self):
numbers_list = [1, 2, 4, 5]
result = every(lambda x: x % 2 == 0, numbers_list)
self.assertFalse(result)
numbers_list_2 = (2, 6, 8)
result_2 = every(lambda x: x % 2 == 0, numbers_list_2)
self.assertTrue(result_2)
|
""" Dovecot dict proxy implementation
"""
import asyncio
import logging
import json
class DictProtocol(asyncio.Protocol):
""" Protocol to answer Dovecot dict requests, as implemented in Dict proxy.
Only a subset of operations is handled properly by this proxy: hello,
lookup and transaction-based set.
There is very little documentation about the protocol, most of it was
reverse-engineered from :
https://github.com/dovecot/core/blob/master/src/dict/dict-connection.c
https://github.com/dovecot/core/blob/master/src/dict/dict-commands.c
https://github.com/dovecot/core/blob/master/src/lib-dict/dict-client.h
"""
DATA_TYPES = {0: str, 1: int}
def __init__(self, table_map):
self.table_map = table_map
# Minor and major versions are not properly checked yet, but stored
# anyway
self.major_version = None
self.minor_version = None
# Every connection starts with specifying which table is used, dovecot
# tables are called dicts
self.dict = None
# Dictionary of active transaction lists per transaction id
self.transactions = {}
super(DictProtocol, self).__init__()
def connection_made(self, transport):
logging.info('Connect {}'.format(transport.get_extra_info('peername')))
self.transport = transport
def data_received(self, data):
logging.debug("Received {}".format(data))
results = []
# Every command is separated by "\n"
for line in data.split(b"\n"):
# A command must at list have a type and one argument
if len(line) < 2:
continue
# The command function will handle the command itself
command = DictProtocol.COMMANDS.get(line[0])
if command is None:
logging.warning('Unknown command {}'.format(line[0]))
return self.transport.abort()
# Args are separated by "\t"
args = line[1:].strip().split(b"\t")
try:
future = command(self, *args)
if future:
results.append(future)
except Exception:
logging.exception("Error when processing request")
return self.transport.abort()
# For asyncio consistency, wait for all results to fire before
# actually returning control
return asyncio.gather(*results)
def process_hello(self, major, minor, value_type, user, dict_name):
""" Process a dict protocol hello message
"""
self.major, self.minor = int(major), int(minor)
self.value_type = DictProtocol.DATA_TYPES[int(value_type)]
self.user = user.decode("utf8")
self.dict = self.table_map[dict_name.decode("ascii")]
logging.debug("Client {}.{} type {}, user {}, dict {}".format(
self.major, self.minor, self.value_type, self.user, dict_name))
async def process_lookup(self, key):
""" Process a dict lookup message
"""
logging.debug("Looking up {}".format(key))
# Priv and shared keys are handled slighlty differently
key_type, key = key.decode("utf8").split("/", 1)
try:
result = await self.dict.get(
key, ns=(self.user if key_type == "priv" else None)
)
if type(result) is str:
response = result.encode("utf8")
elif type(result) is bytes:
response = result
else:
response = json.dumps(result).encode("ascii")
return self.reply(b"O", response)
except KeyError:
return self.reply(b"N")
def process_begin(self, transaction_id):
""" Process a dict begin message
"""
self.transactions[transaction_id] = {}
def process_set(self, transaction_id, key, value):
""" Process a dict set message
"""
# Nothing is actually set until everything is commited
self.transactions[transaction_id][key] = value
async def process_commit(self, transaction_id):
""" Process a dict commit message
"""
# Actually handle all set operations from the transaction store
results = []
for key, value in self.transactions[transaction_id].items():
logging.debug("Storing {}={}".format(key, value))
key_type, key = key.decode("utf8").split("/", 1)
result = await self.dict.set(
key, json.loads(value),
ns=(self.user if key_type == "priv" else None)
)
# Remove stored transaction
del self.transactions[transaction_id]
return self.reply(b"O", transaction_id)
def reply(self, command, *args):
logging.debug("Replying {} with {}".format(command, args))
self.transport.write(command)
self.transport.write(b"\t".join(map(tabescape, args)))
self.transport.write(b"\n")
@classmethod
def factory(cls, table_map):
""" Provide a protocol factory for a given map instance.
"""
return lambda: cls(table_map)
COMMANDS = {
ord("H"): process_hello,
ord("L"): process_lookup,
ord("B"): process_begin,
ord("C"): process_commit,
ord("S"): process_set
}
def tabescape(unescaped):
""" Escape a string using the specific Dovecot tabescape
See: https://github.com/dovecot/core/blob/master/src/lib/strescape.c
"""
return unescaped.replace(b"\x01", b"\x011")\
.replace(b"\x00", b"\x010")\
.replace(b"\t", b"\x01t")\
.replace(b"\n", b"\x01n")\
.replace(b"\r", b"\x01r")
def tabunescape(escaped):
""" Unescape a string using the specific Dovecot tabescape
See: https://github.com/dovecot/core/blob/master/src/lib/strescape.c
"""
return escaped.replace(b"\x01r", b"\r")\
.replace(b"\x01n", b"\n")\
.replace(b"\x01t", b"\t")\
.replace(b"\x010", b"\x00")\
.replace(b"\x011", b"\x01")
|
import numpy as np
import matplotlib.pyplot as plt
def ROC_plot(state, scores, threshold=None, color=None, legend_on=True,
label="predict", base_line=True, linewidth=1.0):
"""
Plot ROC curve and calculate the Area under the curve (AUC) from the
with the prediction scores and true labels.
The threshold is the step of the ROC cureve.
"""
# if color is None or color=="none":
# color = np.random.rand(3,1)
score_gap = np.unique(scores)
if len(score_gap) > 2000:
idx = np.random.permutation(len(score_gap))
score_gap = score_gap[idx[:2000]]
score_gap = np.append(np.min(score_gap)-0.1, score_gap)
score_gap = np.append(score_gap, np.max(score_gap)+0.1)
if threshold is not None:
thresholds = np.sort(np.append(threshold, score_gap))
_idx = np.where(scores >= threshold)[0]
_fpr = np.sum(state[_idx] == 0)/np.sum(state == 0).astype('float')
_tpr = np.sum(state[_idx] == 1)/np.sum(state == 1).astype('float')
# plt.scatter(_fpr, _tpr, marker="o", s=80, facecolors='none', edgecolors=color)
if color is None:
plt.plot(_fpr, _tpr, marker='o', markersize=8, mfc='none')
else:
plt.plot(_fpr, _tpr, marker='o', markersize=8, mec=color, mfc=color)
else:
thresholds = np.sort(score_gap)
#thresholds = np.arange(np.min(threshold), 1+2*threshold, threshold)
fpr, tpr = np.zeros(thresholds.shape[0]), np.zeros(thresholds.shape[0])
for i in range(thresholds.shape[0]):
idx = np.where(scores >= thresholds[i])[0]
fpr[i] = np.sum(state[idx] == 0)/np.sum(state == 0).astype('float')
tpr[i] = np.sum(state[idx] == 1)/np.sum(state == 1).astype('float')
auc = 0
for i in range(thresholds.shape[0]-1):
auc = auc + (fpr[i]-fpr[i+1]) * (tpr[i]+tpr[i+1]) / 2.0
if color is None:
plt.plot(fpr, tpr, "-", linewidth=linewidth,
label="%s: AUC=%.3f" %(label,auc))
else:
plt.plot(fpr, tpr, "-", linewidth=linewidth, color=color,
label="%s: AUC=%.3f" %(label,auc))
if base_line: plt.plot(np.arange(0,2), np.arange(0,2), "k--", linewidth=1.0,
label="random: AUC=0.500")
if legend_on:
plt.legend(loc="best", fancybox=True, ncol=1)
plt.xlabel("False Positive Rate (1-Specificity)")
plt.ylabel("True Positive Rate (Sensitivity)")
return fpr, tpr, thresholds, auc
def PR_curve(state, scores, threshold=None, color=None, legend_on=True,
label="predict", base_line=False, linewidth=1.5):
"""
Plot ROC curve and calculate the Area under the curve (AUC) from the
with the prediction scores and true labels.
The threshold is the step of the ROC cureve.
"""
###Test compare
# from sklearn.metrics import precision_recall_curve,average_precision_score
# precision, recall, thresholds = precision_recall_curve(labels, BF_tmp)
# ap = average_precision_score(labels, BF_tmp)
# plt.plot(recall, precision, label="%.3f" %(ap))
if color is None or color=="none":
color = np.random.rand(3,1)
score_gap = np.unique(scores)
if len(score_gap) > 2000:
idx = np.random.permutation(len(score_gap))
score_gap = score_gap[idx[:2000]]
#score_gap = np.append(np.min(score_gap)-0.1, score_gap)
#score_gap = np.append(score_gap, np.max(score_gap)+0.1)
if threshold is not None:
thresholds = np.sort(np.append(threshold, score_gap))
idx1 = np.where(scores >= threshold)[0]
idx2 = np.where(scores < threshold)[0]
FP = np.sum(state[idx1] == 0)
TP = np.sum(state[idx1] == 1)
FN = np.sum(state[idx2] == 1)
_pre = (TP+0.0)/(TP + FP)
_rec = (TP+0.0)/(TP + FN)
# plt.plot(_rec, _pre, marker='*', markersize=9, mec="k", mfc='none')
plt.plot(_rec, _pre, marker='o', markersize=8, mec=color, mfc=color)
else:
thresholds = np.sort(score_gap)
pre, rec = np.zeros(thresholds.shape[0]), np.zeros(thresholds.shape[0])
for i in range(thresholds.shape[0]):
idx1 = np.where(scores >= thresholds[i])[0]
idx2 = np.where(scores < thresholds[i])[0]
FP = np.sum(state[idx1] == 0)
TP = np.sum(state[idx1] == 1)
FN = np.sum(state[idx2] == 1)
pre[i] = (TP+0.0)/(TP + FP)
rec[i] = (TP+0.0)/(TP + FN)
auc = 0
_rec = np.append(1.0, rec)
_pre = np.append(0.0, pre)
_rec = np.append(_rec, 0.0)
_pre = np.append(_pre, 1.0)
for i in range(_rec.shape[0]-1):
auc = auc + (_rec[i]-_rec[i+1]) * (_pre[i]+_pre[i+1]) / 2.0
plt.plot(_rec, _pre, "-", color=color, linewidth=linewidth,
label="%s: AUC=%.3f" %(label,auc))
if base_line: plt.plot(np.arange(0,2), 1-np.arange(0,2), "k--",
linewidth=1.0, label="random: AUC=0.500")
if legend_on:
plt.legend(loc="best", fancybox=True, ncol=1)
plt.ylabel("Precision: TP/(TP+FP)")
plt.xlabel("Recall: TP/(TP+FN)")
return rec, pre, thresholds, auc
def ecdf_plot(data, x=None, **kwargs):
"""
Empirical plot for cumulative distribution function
Parameters
----------
data: array or list
data for the empirical CDF plot
x: array or list (optional)
the points to show the plot
**kwargs:
**kwargs for matplotlib.plot
Returns
-------
x: array
sorted x
ecdf_val:
values of empirical cdf for x
"""
data = np.sort(np.array(data))
if x is None:
x = data
else:
x = np.sort(np.array(x))
ecdf_val = np.zeros(len(x))
for i in range(len(x)):
ecdf_val[i] = np.mean(data < x[i])
plt.plot(x, ecdf_val, **kwargs)
return x, ecdf_val
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import json
import requests
from typing import List, Dict
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBALS/PARAMS '''
USERNAME = demisto.params().get('credentials', {}).get('identifier')
PASSWORD = demisto.params().get('credentials', {}).get('password')
API_KEY = demisto.params().get('key')
SYSTEM_NAME = demisto.params().get('system_name')
# Remove trailing slash to prevent wrong URL path to service
SERVER = demisto.params()['url'][:-1] \
if (demisto.params()['url'] and demisto.params()['url'].endswith('/')) else demisto.params()['url']
# Should we use SSL
USE_SSL = not demisto.params().get('insecure', False)
# Service base URL
BASE_URL = SERVER + '/BeyondTrust/api/public/v3'
# Headers to be sent in requests
HEADERS = {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
SESSION = requests.session()
ERR_DICT = {
'4031': 'User does not have permission.',
'4034': 'Request is not yet approved.',
'4091': 'Conflicting request exists. This user or another user has already requested a password for the'
' specified account.'
}
''' HELPER FUNCTIONS '''
def http_request(method: str, suffix_url: str, data=None):
"""
A wrapper for requests lib to send our requests and handle requests
and responses better
Parameters
----------
method : str
HTTP method, e.g. 'GET', 'POST' ... etc.
suffix_url : str
API endpoint.
data: str
Data to be sent in a 'POST' request.
Returns
-------
Response from having made the request.
"""
url = BASE_URL + suffix_url
try:
res = SESSION.request(
method,
url,
verify=USE_SSL,
data=data, # type: ignore
headers=HEADERS
)
except requests.exceptions.SSLError:
ssl_error = 'Could not connect to BeyondTrust: Could not verify certificate.'
return return_error(ssl_error)
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout,
requests.exceptions.TooManyRedirects, requests.exceptions.RequestException) as e:
connection_error = f'Could not connect to BeyondTrust: {e}'
return return_error(connection_error)
# Handle error responses gracefully
if res.status_code not in {200, 201, 204}:
txt = res.text
if txt in ERR_DICT:
txt = ERR_DICT[txt]
elif res.status_code in ERR_DICT:
txt = ERR_DICT[txt]
elif res.status_code == 401:
txt = 'Wrong credentials.'
return_error(f'Error in API call to BeyondTrust Integration [{res.status_code}] - {txt})')
try:
return res.json()
except ValueError:
return None
def signin():
"""
Starts a session in BeyondTrust
"""
suffix_url = '/Auth/SignAppin'
header = {'Authorization': f'PS-Auth key={API_KEY}; runas={USERNAME}; pwd=[{PASSWORD}];'}
SESSION.headers.update(header)
http_request('POST', suffix_url)
def signout():
"""
Ends a session
"""
suffix_url = '/auth/signout'
http_request('POST', suffix_url)
''' COMMANDS + REQUESTS FUNCTIONS '''
def get_managed_accounts_request():
"""
Request for all managed accounts
"""
suffix_url = '/managedaccounts'
response = http_request('GET', suffix_url)
return response
def get_managed_accounts():
"""
Returns a list of Managed Accounts that can be requested by the current user.
"""
data = []
headers = ['AccountName', 'AccountID', 'AssetName', 'AssetID', 'DomainName', 'LastChangeDate', 'NextChangeDate']
managed_accounts = get_managed_accounts_request()
for account in managed_accounts:
data.append({
'LastChangeDate': account.get('LastChangeDate'),
'NextChangeDate': account.get('NextChangeDate'),
'AssetID': account.get('SystemId'),
'AssetName': account.get('SystemName'),
'DomainName': account.get('DomainName'),
'AccountID': account.get('AccountId'),
'AccountName': account.get('AccountName')
})
entry_context = {'BeyondTrust.Account(val.AccountId && val.AccountId === obj.AccountId)': managed_accounts}
return_outputs(tableToMarkdown('BeyondTrust Managed Accounts', data, headers, removeNull=True), entry_context,
managed_accounts)
def get_managed_systems_request() -> List[Dict]:
"""
Request for all managed systems
"""
suffix_url = '/managedsystems'
response = http_request('GET', suffix_url)
return response
def get_managed_systems():
"""
Returns a list of Managed Systems.
"""
data = []
managed_systems = get_managed_systems_request()
for managed_system in managed_systems:
data.append({
'ManagedAssetID': managed_system.get('ManagedSystemID'),
'ChangeFrequencyDays': managed_system.get('ChangeFrequencyDays'),
'AssetID': managed_system.get('AssetID'),
'DatabaseID': managed_system.get('DatabaseID'),
'DirectoryID': managed_system.get('DirectoryID'),
'AssetName': managed_system.get('SystemName'),
'PlatformID': managed_system.get('PlatformID'),
'Port': managed_system.get('Port')
})
entry_context = {'BeyondTrust.System(val.ManagedAssetID === obj.ManagedAssetID)': managed_systems}
return_outputs(tableToMarkdown('BeyondTrust Managed Systems', data, removeNull=True), entry_context,
managed_systems)
def create_release_request(data: str):
"""
Request for credentials release
"""
suffix_url = '/requests'
response = http_request('POST', suffix_url, data=data)
return response
def create_release():
"""
Creates a new release request.
Retrieves the credentials for an approved and active (not expired) credentials release request.
demisto parameter: (string) access_type
The type of access requested (View, RDP, SSH). Defualt is "View".
demisto parameter: (int) system_id
ID of the Managed System to request.
demisto parameter: (int) account_id
ID of the Managed Account to request.
demisto parameter: (int) duration_minutes
The request duration (in minutes).
demisto parameter: (string) reason
The reason for the request.
demisto parameter: (int) access_policy_schedule_id
The Schedule ID of an Access Policy to use for the request. If omitted, automatically selects the best schedule.
demisto parameter: (bool) conflict_option
The conflict resolution option to use if an existing request is found for the same user,
system, and account ("reuse" or "renew").
"""
access_type = demisto.args().get('access_type')
system_id = demisto.args().get('system_id')
account_id = demisto.args().get('account_id')
duration_minutes = demisto.args().get('duration_minutes')
reason = demisto.args().get('reason')
conflict_option = demisto.args().get('conflict_option')
data = {
'SystemId': system_id,
'AccountId': account_id,
'DurationMinutes': duration_minutes
}
if access_type:
data['AccessType'] = access_type
if reason:
data['Reason'] = reason
if conflict_option:
data['ConflictOption'] = conflict_option
request = create_release_request(str(data))
request_id = str(request)
credentials = get_credentials_request(request_id)
response = {
'RequestID': request_id,
'Password': credentials
}
entry_context = {'BeyondTrust.Request(val.AccountID === obj.AccountID)': createContext(response)}
return_outputs(tableToMarkdown('The new release was created successfully.', response), entry_context, response)
def get_credentials_request(request_id: str):
"""
Request for specific credentials
"""
suffix_url = '/credentials/' + request_id
response = http_request('GET', suffix_url)
return response
def get_credentials():
"""
Retrieves the credentials for an approved and active (not expired) credentials release request.
demisto parameter: (int) request_id
ID of the Request for which to retrieve the credentials
"""
request_id = demisto.args().get('request_id')
request = str(request_id)
response = get_credentials_request(request)
demisto.results('The credentials for BeyondTrust request: ' + response)
def check_in_credentials_request(request_id: str, data: dict):
"""
Request for check-in credentials
"""
suffix_url = f'/Requests/{request_id}/Checkin'
response = http_request('PUT', suffix_url, data=json.dumps(data))
return response
def check_in_credentials():
"""
Checks-in/releases a request before it has expired.
demisto parameter: (int) request_id
ID of the request to release.
demisto parameter: (string) reason
A reason or comment why the request is being released.
"""
request_id = demisto.args().get('request_id')
reason = str(demisto.args().get('reason'))
data = {'Reason': reason if reason else ''}
check_in_credentials_request(request_id, data)
demisto.results('The release was successfully checked-in/released')
def change_credentials_request(account_id: str, data: dict):
"""
Request to change credentials
"""
suffix_url = f'/ManagedAccounts/{account_id}/Credentials'
response = http_request('PUT', suffix_url, data=json.dumps(data))
return response
def change_credentials():
"""
Updates the credentials for a Managed Account, optionally applying the change to the Managed System.
demisto parameter: (int) account_id
ID of the account for which to set the credentials.
demisto parameter: (string) password
The new password to set. If not given, generates a new, random password.
demisto parameter: (string) public_key
The new public key to set on the host. This is required if PrivateKey is given and updateSystem=true.
demisto parameter: (string) private_key
The private key to set (provide Passphrase if encrypted).
demisto parameter: (string) pass_phrase
The passphrase to use for an encrypted private key.
demisto parameter: (bool) update_system
Whether to update the credentials on the referenced system.
"""
account_id = demisto.args().get('account_id')
password = demisto.args().get('password')
public_key = demisto.args().get('public_key')
private_key = demisto.args().get('private_key')
pass_phrase = demisto.args().get('pass_phrase')
update_system = demisto.args().get('update_system')
data = {
'AccountId': account_id
}
if password:
data['Password'] = password
if private_key:
if public_key and update_system is True:
data['PrivateKey'] = private_key
data['PublicKey'] = public_key
else:
return_error('Missing public key')
if pass_phrase:
data['Passphrase'] = pass_phrase
change_credentials_request(account_id, data)
demisto.results('The password has been changed')
def fetch_credentials():
"""
Returns: Account's credentials
"""
credentials = []
identifier = demisto.args().get('identifier')
duration_minutes = 1
account_info = get_managed_accounts_request()
for account in account_info:
account_name = account.get('AccountName')
system_name = account.get('SystemName')
if SYSTEM_NAME and system_name != SYSTEM_NAME:
continue
item = {
'SystemId': account.get('SystemId'),
'AccountId': account.get('AccountId'),
'DurationMinutes': duration_minutes,
'ConflictOption': 'reuse'
}
release_id = create_release_request(str(item))
password = get_credentials_request(str(release_id))
credentials.append({
'user': account_name,
'password': password,
'name': system_name + '_' + account_name
})
if identifier:
credentials = list(filter(lambda c: c.get('name', '') == identifier, credentials))
demisto.debug("Amount of credentials for identifier: {} is {}".format(identifier, len(credentials)))
demisto.credentials(credentials)
''' COMMANDS MANAGER / SWITCH PANEL '''
LOG('Command being called is %s' % (demisto.command()))
try:
handle_proxy()
signin()
if demisto.command() == 'test-module':
# This is the call made when pressing the integration test button.
get_managed_accounts_request()
demisto.results('ok')
elif demisto.command() == 'beyondtrust-get-managed-accounts':
get_managed_accounts()
elif demisto.command() == 'beyondtrust-get-managed-systems':
get_managed_systems()
elif demisto.command() == 'beyondtrust-create-release-request':
create_release()
elif demisto.command() == 'beyondtrust-get-credentials':
get_credentials()
elif demisto.command() == 'beyondtrust-check-in-credentials':
check_in_credentials()
elif demisto.command() == 'beyondtrust-change-credentials':
change_credentials()
elif demisto.command() == 'fetch-credentials':
fetch_credentials()
# Log exceptions
except Exception as e:
LOG(str(e))
LOG.print_log()
raise
finally:
signout()
|
import unittest
from awstin.dynamodb.orm import Attr, DynamoModel, Key, list_append
from awstin.dynamodb.testing import temporary_dynamodb_table
class MyModel(DynamoModel):
_table_name_ = "temp"
pkey = Key()
an_attr = Attr()
another_attr = Attr()
set_attr = Attr()
third_attr = Attr()
def __eq__(self, other):
if isinstance(other, MyModel):
return (
(self.pkey == other.pkey)
& (self.an_attr == other.an_attr)
& (self.another_attr == other.another_attr)
& (self.set_attr == other.set_attr)
& (self.third_attr == other.third_attr)
)
return NotImplemented
class TestUpdateItem(unittest.TestCase):
def setUp(self):
self.temp_table = temporary_dynamodb_table(MyModel, "pkey")
def test_update_set_value(self):
update_expression = MyModel.an_attr.set(100)
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr=11,
)
table.put_item(item)
result = table.update_item("bbb", update_expression)
expected = MyModel(
pkey="bbb",
an_attr=100,
)
self.assertEqual(result, expected)
def test_update_set_nested_map(self):
update_expression = MyModel.an_attr.a.set("e")
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr={"a": "b", "c": "d"},
)
table.put_item(item)
result = table.update_item("bbb", update_expression)
expected = MyModel(
pkey="bbb",
an_attr={"a": "e", "c": "d"},
)
self.assertEqual(result, expected)
def test_update_set_deep_nested_map(self):
update_expression = MyModel.an_attr.a.l.set("e")
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr={"a": {"l": "m", "n": "o"}, "c": "d"},
)
table.put_item(item)
result = table.update_item("bbb", update_expression)
expected = MyModel(
pkey="bbb",
an_attr={"a": {"l": "e", "n": "o"}, "c": "d"},
)
self.assertEqual(result, expected)
def test_update_set_nested_to_nested_map(self):
update_expression = MyModel.an_attr.a.l.set(MyModel.an_attr.c)
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr={"a": {"l": "m", "n": "o"}, "c": "d"},
)
table.put_item(item)
result = table.update_item("bbb", update_expression)
expected = MyModel(
pkey="bbb",
an_attr={"a": {"l": "d", "n": "o"}, "c": "d"},
)
self.assertEqual(result, expected)
def test_update_set_nested_list(self):
update_expression = MyModel.an_attr[1].set("e")
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr=["a", "b", "c"],
)
table.put_item(item)
result = table.update_item("bbb", update_expression)
expected = MyModel(
pkey="bbb",
an_attr=["a", "e", "c"],
)
self.assertEqual(result, expected)
def test_update_set_nested_to_nested_list(self):
update_expression = MyModel.an_attr[1].set(MyModel.an_attr[0])
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr=["a", "b", "c"],
)
table.put_item(item)
result = table.update_item("bbb", update_expression)
expected = MyModel(
pkey="bbb",
an_attr=["a", "a", "c"],
)
self.assertEqual(result, expected)
def test_update_set_nested_to_nested_complex(self):
# Pepper in some reserved keywords as well to test
update_expression = MyModel.an_attr[1].name.set(
MyModel.another_attr.BINARY[1]
) & MyModel.another_attr.CASCADE[0].set(MyModel.an_attr[0].atomic)
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr=[{"atomic": "b"}, {"name": "d"}, {"both": "e"}],
another_attr={
"AGENT": [1, 2, 3],
"BINARY": [4, 5, 6],
"CASCADE": [7, 8, 9],
},
)
table.put_item(item)
result = table.update_item("bbb", update_expression)
expected = MyModel(
pkey="bbb",
an_attr=[{"atomic": "b"}, {"name": 5}, {"both": "e"}],
another_attr={
"AGENT": [1, 2, 3],
"BINARY": [4, 5, 6],
"CASCADE": ["b", 8, 9],
},
)
self.assertEqual(result, expected)
def test_update_set_attr(self):
update_expression = MyModel.an_attr.set(MyModel.another_attr)
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr=11,
another_attr=22,
)
table.put_item(item)
result = table.update_item("bbb", update_expression)
expected = MyModel(
pkey="bbb",
an_attr=22,
another_attr=22,
)
self.assertEqual(result, expected)
def test_update_set_attr_plus_value(self):
update_expression = MyModel.an_attr.set(MyModel.an_attr + 100)
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr=11,
)
table.put_item(item)
result = table.update_item("bbb", update_expression)
expected = MyModel(
pkey="bbb",
an_attr=111,
)
self.assertEqual(result, expected)
def test_update_set_attr_minus_value(self):
update_expression = MyModel.an_attr.set(100 - MyModel.another_attr)
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr=11,
another_attr=10,
)
table.put_item(item)
result = table.update_item("bbb", update_expression)
expected = MyModel(
pkey="bbb",
an_attr=90,
another_attr=10,
)
self.assertEqual(result, expected)
def test_update_set_attr_plus_attr(self):
update_expression = MyModel.an_attr.set(MyModel.an_attr + MyModel.another_attr)
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr=11,
another_attr=22,
)
table.put_item(item)
result = table.update_item("bbb", update_expression)
expected = MyModel(
pkey="bbb",
an_attr=33,
another_attr=22,
)
self.assertEqual(result, expected)
def test_update_multiple_sets(self):
update_expression = MyModel.an_attr.set(
MyModel.an_attr + MyModel.another_attr
) & MyModel.another_attr.set(50)
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr=11,
another_attr=22,
)
table.put_item(item)
result = table.update_item("bbb", update_expression)
expected = MyModel(
pkey="bbb",
an_attr=33,
another_attr=50,
)
self.assertEqual(result, expected)
def test_update_remove(self):
update_expression = MyModel.an_attr.remove()
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr=11,
another_attr=22,
)
table.put_item(item)
result = table.update_item("bbb", update_expression)
expected = MyModel(
pkey="bbb",
another_attr=22,
)
self.assertEqual(result, expected)
def test_update_multiple_remove(self):
update_expression = MyModel.an_attr.remove() & MyModel.another_attr.remove()
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr=11,
another_attr=22,
)
table.put_item(item)
result = table.update_item("bbb", update_expression)
expected = MyModel(
pkey="bbb",
)
self.assertEqual(result, expected)
def test_update_delete_int_set(self):
update_expression = MyModel.an_attr.delete({2, 3, 4, 5})
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr={1, 3, 5, 7},
)
table.put_item(item)
result = table.update_item("bbb", update_expression)
expected = MyModel(
pkey="bbb",
an_attr={1, 7},
)
self.assertEqual(result, expected)
def test_update_delete_float_set(self):
update_expression = MyModel.an_attr.delete({2.2, 3.3, 4.4, 5.5})
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr={1.1, 3.3, 5.5, 7.7},
)
table.put_item(item)
result = table.update_item("bbb", update_expression)
expected = MyModel(
pkey="bbb",
an_attr={1.1, 7.7},
)
self.assertEqual(result, expected)
def test_update_delete_string_set(self):
update_expression = MyModel.an_attr.delete({"b", "c", "d", "e"})
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr={"a", "c", "e", "g"},
)
table.put_item(item)
result = table.update_item("bbb", update_expression)
expected = MyModel(
pkey="bbb",
an_attr={"a", "g"},
)
self.assertEqual(result, expected)
def test_update_delete_all(self):
update_expression = MyModel.an_attr.delete({"a", "c", "e", "g"})
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr={"a", "c", "e", "g"},
)
table.put_item(item)
result = table.update_item("bbb", update_expression)
expected = MyModel(
pkey="bbb",
)
self.assertEqual(result, expected)
def test_update_delete_multiple(self):
update_expression = MyModel.an_attr.delete(
{"b", "c", "d", "e"}
) & MyModel.another_attr.delete({1, 2, 3})
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr={"a", "c", "e", "g"},
another_attr={1, 2, 3},
)
table.put_item(item)
result = table.update_item("bbb", update_expression)
expected = MyModel(
pkey="bbb",
an_attr={"a", "g"},
)
self.assertEqual(result, expected)
def test_update_add_numerical(self):
update_expression = MyModel.an_attr.add(50)
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr=11,
)
table.put_item(item)
result = table.update_item("bbb", update_expression)
expected = MyModel(
pkey="bbb",
an_attr=61,
)
self.assertEqual(result, expected)
def test_update_add_set(self):
update_expression = MyModel.an_attr.add({1, 2, 3})
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr={2, 3, 4},
)
table.put_item(item)
result = table.update_item("bbb", update_expression)
expected = MyModel(
pkey="bbb",
an_attr={1, 2, 3, 4},
)
self.assertEqual(result, expected)
def test_update_add_nonexistent_numerical(self):
update_expression = MyModel.another_attr.add(50)
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr=11,
)
table.put_item(item)
result = table.update_item("bbb", update_expression)
expected = MyModel(
pkey="bbb",
an_attr=11,
another_attr=50,
)
self.assertEqual(result, expected)
def test_update_add_nonexistent_set(self):
update_expression = MyModel.another_attr.add({1, 2, 3})
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr=11,
)
table.put_item(item)
result = table.update_item("bbb", update_expression)
expected = MyModel(
pkey="bbb",
an_attr=11,
another_attr={1, 2, 3},
)
self.assertEqual(result, expected)
def test_update_add_multiple(self):
update_expression = MyModel.another_attr.add({1, 2, 3}) & MyModel.an_attr.add(
20
)
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr=11,
)
table.put_item(item)
result = table.update_item("bbb", update_expression)
expected = MyModel(
pkey="bbb",
an_attr=31,
another_attr={1, 2, 3},
)
self.assertEqual(result, expected)
def test_update_many(self):
update_expression = (
MyModel.an_attr.set(5 - MyModel.another_attr)
& MyModel.third_attr.add(100)
& MyModel.another_attr.remove()
& MyModel.set_attr.delete({2, 3})
)
with self.temp_table as table:
item = MyModel(
pkey="aaa",
an_attr=11,
another_attr=22,
set_attr={1, 3, 4, 5},
third_attr=33,
)
table.put_item(item)
result = table.update_item("aaa", update_expression)
expected = MyModel(
pkey="aaa",
an_attr=5 - item.another_attr,
third_attr=133,
set_attr={1, 4, 5},
)
self.assertEqual(result, expected)
got_item = table["aaa"]
self.assertEqual(result, got_item)
def test_update_with_false_conditions(self):
update_expression = MyModel.an_attr.add(50) & MyModel.another_attr.remove()
condition_expression = MyModel.an_attr > 11
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr=11,
another_attr=100,
)
table.put_item(item)
result = table.update_item("bbb", update_expression, condition_expression)
self.assertIsNone(result)
# Item unchanged in the table
self.assertEqual(table["bbb"], item)
def test_update_with_conditions(self):
update_expression = MyModel.an_attr.add(50) & MyModel.another_attr.remove()
condition_expression = MyModel.an_attr == 11
with self.temp_table as table:
item = MyModel(
pkey="bbb",
an_attr=11,
another_attr=100,
)
table.put_item(item)
result = table.update_item("bbb", update_expression, condition_expression)
expected = MyModel(
pkey="bbb",
an_attr=61,
)
self.assertEqual(result, expected)
def test_if_not_exists_applied(self):
update_expression = MyModel.an_attr.set(MyModel.an_attr.if_not_exists(10))
with self.temp_table as table:
item = MyModel(
pkey="aaa",
another_attr=999,
)
table.put_item(item)
result = table.update_item("aaa", update_expression)
expected = MyModel(
pkey="aaa",
an_attr=10,
another_attr=999,
)
self.assertEqual(result, expected)
def test_if_not_exists_applied_attr(self):
update_expression = MyModel.an_attr.set(
MyModel.an_attr.if_not_exists(MyModel.another_attr)
)
with self.temp_table as table:
item = MyModel(
pkey="aaa",
another_attr=50,
)
table.put_item(item)
result = table.update_item("aaa", update_expression)
expected = MyModel(
pkey="aaa",
an_attr=50,
another_attr=50,
)
self.assertEqual(result, expected)
def test_if_not_exists_not_applied(self):
update_expression = MyModel.an_attr.set(MyModel.an_attr.if_not_exists(10))
with self.temp_table as table:
item = MyModel(
pkey="aaa",
an_attr=777,
another_attr=999,
)
table.put_item(item)
result = table.update_item("aaa", update_expression)
expected = MyModel(
pkey="aaa",
an_attr=777,
another_attr=999,
)
self.assertEqual(result, expected)
def test_list_append_literal_literal(self):
update_expression = MyModel.an_attr.set(
list_append([1.1, 2.2, 3.3], [4.4, 5.5, 6.6])
)
with self.temp_table as table:
item = MyModel(
pkey="aaa",
an_attr=777,
another_attr=999,
)
table.put_item(item)
result = table.update_item("aaa", update_expression)
expected = MyModel(
pkey="aaa",
an_attr=[1.1, 2.2, 3.3, 4.4, 5.5, 6.6],
another_attr=999,
)
self.assertEqual(result, expected)
def test_list_append_literal_attr(self):
update_expression = MyModel.an_attr.set(
list_append([1.1, 2.2, 3.3], MyModel.another_attr)
)
with self.temp_table as table:
item = MyModel(
pkey="aaa",
an_attr=777,
another_attr=[999],
)
table.put_item(item)
result = table.update_item("aaa", update_expression)
expected = MyModel(
pkey="aaa",
an_attr=[1.1, 2.2, 3.3, 999],
another_attr=[999],
)
self.assertEqual(result, expected)
def test_list_append_attr_literal(self):
update_expression = MyModel.an_attr.set(
list_append(MyModel.another_attr, [4.4, 5.5, 6.6])
)
with self.temp_table as table:
item = MyModel(
pkey="aaa",
an_attr=777,
another_attr=[999],
)
table.put_item(item)
result = table.update_item("aaa", update_expression)
expected = MyModel(
pkey="aaa",
an_attr=[999, 4.4, 5.5, 6.6],
another_attr=[999],
)
self.assertEqual(result, expected)
def test_list_append_attr_attr(self):
update_expression = MyModel.an_attr.set(
list_append(MyModel.an_attr, MyModel.another_attr)
)
with self.temp_table as table:
item = MyModel(
pkey="aaa",
an_attr=["a", "b"],
another_attr=["c", "d"],
)
table.put_item(item)
result = table.update_item("aaa", update_expression)
expected = MyModel(
pkey="aaa",
an_attr=["a", "b", "c", "d"],
another_attr=["c", "d"],
)
self.assertEqual(result, expected)
|
from dnnv.properties import *
import numpy as np
N = Network("N")
x = Image(__path__.parent / "dave_small_image9.npy")[None] / 255.0
input_layer = 0
output_layer = -2
epsilon = Parameter("epsilon", type=float) / 255.0
gamma = Parameter("gamma", type=float, default=15.0) * np.pi / 180
output = N[input_layer:](x)
gamma_lb = np.tan(max(-np.pi / 2, (output - gamma) / 2))
gamma_ub = np.tan(min(np.pi / 2, (output + gamma) / 2))
Forall(
x_,
Implies(
(0 <= x_ <= 1) & ((x - epsilon) < x_ < (x + epsilon)),
(gamma_lb < N[input_layer:output_layer](x_) < gamma_ub),
),
)
|
# Copyright (c) 2014 Evalf
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""The transformseq module."""
from . import types, numeric, util, transform, element
from .elementseq import References
import abc, itertools, operator, numpy
class Transforms(types.Singleton):
'''Abstract base class for a sequence of :class:`~nutils.transform.TransformItem` tuples.
This class resembles to some extent a plain :class:`tuple`: the
class supports indexing, iterating and has an :meth:`index` method. In
addition the class supports the :meth:`index_with_tail` method which can be
used to find the index of a transform given the transform plus any number of
child transformations.
The transforms in this sequence must satisfy the following condition: any
transform must not start with any other transform in the same sequence.
Parameters
----------
fromdims : :class:`int`
The number of dimensions all transforms in this sequence map from.
Attributes
----------
fromdims : :class:`int`
The number of dimensions all transforms in this sequence map from.
Notes
-----
Subclasses must implement :meth:`__getitem__`, :meth:`__len__` and
:meth:`index_with_tail`.
'''
__slots__ = 'fromdims'
@types.apply_annotations
def __init__(self, fromdims:types.strictint):
self.fromdims = fromdims
super().__init__()
@abc.abstractmethod
def __len__(self):
'''Return ``len(self)``.'''
raise NotImplementedError
def __getitem__(self, index):
'''Return ``self[index]``.'''
if numeric.isint(index):
raise NotImplementedError
elif isinstance(index, slice):
index = range(len(self))[index]
if index == range(len(self)):
return self
if index.step < 0:
raise NotImplementedError('reordering the sequence is not yet implemented')
return MaskedTransforms(self, numpy.arange(index.start, index.stop, index.step))
elif numeric.isintarray(index):
if index.ndim != 1:
raise IndexError('invalid index')
if numpy.any(numpy.less(index, 0)) or numpy.any(numpy.greater_equal(index, len(self))):
raise IndexError('index out of range')
dindex = numpy.diff(index)
if len(index) == len(self) and (len(self) == 0 or (index[0] == 0 and numpy.all(numpy.equal(dindex, 1)))):
return self
if numpy.any(numpy.equal(dindex, 0)):
raise ValueError('repeating an element is not allowed')
if not numpy.all(numpy.greater(dindex, 0)):
s = numpy.argsort(index)
return ReorderedTransforms(self[index[s]], numpy.argsort(s))
if len(index) == 0:
return EmptyTransforms(self.fromdims)
if len(index) == len(self):
return self
return MaskedTransforms(self, index)
elif numeric.isboolarray(index):
if index.shape != (len(self),):
raise IndexError('mask has invalid shape')
if not numpy.any(index):
return EmptyTransforms(self.fromdims)
if numpy.all(index):
return self
index, = numpy.where(index)
return MaskedTransforms(self, index)
else:
raise IndexError('invalid index')
@abc.abstractmethod
def index_with_tail(self, trans):
'''Return the index of ``trans[:n]`` and the tail ``trans[n:]``.
Find the index of a transform in this sequence given the transform plus any
number of child transforms. In other words: find ``index`` such that
``self[index] == trans[:n]`` for some ``n``. Note that there is either
exactly one ``index`` satisfying this condition, or none, due to the
restrictions of the transforms in a :class:`Transforms` object.
Parameters
----------
trans : :class:`tuple` of :class:`nutils.transform.TransformItem` objects
The transform to find up to a possibly empty tail.
Returns
-------
index : :class:`int`
The index of ``trans`` without tail in this sequence.
tail : :class:`tuple` of :class:`nutils.transform.TransformItem` objects
The tail: ``trans[len(self[index]):]``.
Raises
------
:class:`ValueError`
if ``trans`` is not found.
Example
-------
Consider the following plain sequence of two shift transforms:
>>> from nutils.transform import Shift, Scale
>>> transforms = PlainTransforms([(Shift([0.]),), (Shift([1.]),)], fromdims=1)
Calling :meth:`index_with_tail` with the first transform gives index ``0``
and no tail:
>>> transforms.index_with_tail((Shift([0.]),))
(0, ())
Calling with an additional scale gives:
>>> transforms.index_with_tail((Shift([0.]), Scale(0.5, [0.])))
(0, (Scale([0]+0.5*x),))
'''
raise NotImplementedError
def __iter__(self):
'''Implement ``iter(self)``.'''
for i in range(len(self)):
yield self[i]
def index(self, trans):
'''Return the index of ``trans``.
Parameters
----------
trans : :class:`tuple` of :class:`nutils.transform.TransformItem` objects
Returns
-------
index : :class:`int`
The index of ``trans`` in this sequence.
Raises
------
:class:`ValueError`
if ``trans`` is not found.
Example
-------
Consider the following plain sequence of two shift transforms:
>>> from nutils.transform import Shift, Scale
>>> transforms = PlainTransforms([(Shift([0.]),), (Shift([1.]),)], fromdims=1)
Calling :meth:`index` with the first transform gives index ``0``:
>>> transforms.index((Shift([0.]),))
0
Calling with an additional scale raises an exception, because the transform
is not present in ``transforms``.
>>> transforms.index((Shift([0.]), Scale(0.5, [0.])))
Traceback (most recent call last):
...
ValueError: (Shift([0]+x), Scale([0]+0.5*x)) not in sequence of transforms
'''
index, tail = self.index_with_tail(trans)
if tail:
raise ValueError('{!r} not in sequence of transforms'.format(trans))
return index
def contains(self, trans):
'''Return ``trans`` in ``self``.
Parameters
----------
trans : :class:`tuple` of :class:`nutils.transform.TransformItem` objects
Returns
-------
:class:`bool`
``True`` if ``trans`` is contained in this sequence of transforms, i.e.
if :meth:`index` returns without :class:`ValueError`, otherwise
``False``.
'''
try:
self.index(trans)
except ValueError:
return False
else:
return True
__contains__ = contains
def contains_with_tail(self, trans):
'''Return ``trans[:n]`` in ``self`` for some ``n``.
Parameters
----------
trans : :class:`tuple` of :class:`nutils.transform.TransformItem` objects
Returns
-------
:class:`bool`
``True`` if a head of ``trans`` is contained in this sequence
of transforms, i.e. if :meth:`index_with_tail` returns without
:class:`ValueError`, otherwise ``False``.
'''
try:
self.index_with_tail(trans)
except ValueError:
return False
else:
return True
def refined(self, references):
'''Return the sequence of refined transforms given ``references``.
Parameters
----------
references : :class:`~nutils.elementseq.References`
A sequence of references matching this sequence of transforms.
Returns
-------
:class:`Transforms`
The sequence of refined transforms::
(trans+(ctrans,) for trans, ref in zip(self, references) for ctrans in ref.child_transforms)
'''
if references.isuniform:
return UniformDerivedTransforms(self, references[0], 'child_transforms', self.fromdims)
else:
return DerivedTransforms(self, references, 'child_transforms', self.fromdims)
def edges(self, references):
'''Return the sequence of edge transforms given ``references``.
Parameters
----------
references : :class:`~nutils.elementseq.References`
A sequence of references matching this sequence of transforms.
Returns
-------
:class:`Transforms`
The sequence of edge transforms::
(trans+(etrans,) for trans, ref in zip(self, references) for etrans in ref.edge_transforms)
'''
if references.isuniform:
return UniformDerivedTransforms(self, references[0], 'edge_transforms', self.fromdims-1)
else:
return DerivedTransforms(self, references, 'edge_transforms', self.fromdims-1)
def __add__(self, other):
'''Return ``self+other``.'''
if not isinstance(other, Transforms) or self.fromdims != other.fromdims:
return NotImplemented
return chain((self, other), self.fromdims)
def unchain(self):
'''Iterator of unchained :class:`Transforms` items.
Yields
------
:class:`Transforms`
Unchained items.
'''
yield self
stricttransforms = types.strict[Transforms]
class EmptyTransforms(Transforms):
'''An empty sequence.'''
def __getitem__(self, index):
if not numeric.isint(index):
return super().__getitem__(index)
raise IndexError('index out of range')
def __len__(self):
return 0
def index_with_tail(self, trans):
raise ValueError
def index(self, trans):
raise ValueError
def contains_with_tail(self, trans):
return False
def contains(self, trans):
return False
__contains__ = contains
class PlainTransforms(Transforms):
'''A general purpose implementation of :class:`Transforms`.
Use this class only if there exists no specific implementation of
:class:`Transforms` for the transforms at hand.
Parameters
----------
transforms : :class:`tuple` of :class:`~nutils.transform.TransformItem` objects
The sequence of transforms.
fromdims : :class:`int`
The number of dimensions all ``transforms`` map from.
'''
__slots__ = '_transforms', '_sorted', '_indices'
@types.apply_annotations
def __init__(self, transforms:types.tuple[transform.canonical], fromdims:types.strictint):
transforms_fromdims = set(trans[-1].fromdims for trans in transforms)
if not (transforms_fromdims <= {fromdims}):
raise ValueError('expected transforms with fromdims={}, but got {}'.format(fromdims, transforms_fromdims))
self._transforms = transforms
self._sorted = numpy.empty([len(self._transforms)], dtype=object)
for i, trans in enumerate(self._transforms):
self._sorted[i] = tuple(map(id, trans))
self._indices = numpy.argsort(self._sorted)
self._sorted = self._sorted[self._indices]
super().__init__(fromdims)
def __iter__(self):
return iter(self._transforms)
def __getitem__(self, index):
if not numeric.isint(index):
return super().__getitem__(index)
return self._transforms[numeric.normdim(len(self), index)]
def __len__(self):
return len(self._transforms)
def index_with_tail(self, trans):
trans, orig_trans = transform.promote(trans, self.fromdims), trans
transid_array = numpy.empty((), dtype=object)
transid_array[()] = transid = tuple(map(id, trans))
i = numpy.searchsorted(self._sorted, transid_array, side='right') - 1
if i < 0:
raise ValueError('{!r} not in sequence of transforms'.format(orig_trans))
match = self._sorted[i]
if transid[:len(match)] != match:
raise ValueError('{!r} not in sequence of transforms'.format(orig_trans))
return self._indices[i], trans[len(match):]
class IdentifierTransforms(Transforms):
'''A sequence of :class:`nutils.transform.Identifier` singletons.
Every identifier is instantiated with three arguments: the dimension, the
name string, and an integer index matching its position in the sequence.
Parameters
----------
ndims : :class:`int`
Dimension of the transformation.
name : :class:`str`
Identifying name string.
length : :class:`int`
Length of the sequence.
'''
__slots__ = '_name', '_length'
@types.apply_annotations
def __init__(self, ndims:types.strictint, name:str, length:int):
self._name = name
self._length = length
super().__init__(ndims)
def __getitem__(self, index):
if not numeric.isint(index):
return super().__getitem__(index)
index = int(index) # make sure that index is a Python integer rather than numpy.intxx
return transform.Identifier(self.fromdims, (self._name, numeric.normdim(self._length, index))),
def __len__(self):
return self._length
def index_with_tail(self, trans):
root = trans[0]
if root.fromdims == self.fromdims and isinstance(root, transform.Identifier) and isinstance(root.token, tuple) and len(root.token) == 2 and root.token[0] == self._name and 0 <= root.token[1] < self._length:
return root.token[1], trans[1:]
raise ValueError
class Axis(types.Singleton):
'''Base class for axes of :class:`~nutils.topology.StructuredTopology`.'''
__slots__ = 'i', 'j', 'mod'
def __init__(self, i:types.strictint, j:types.strictint, mod:types.strictint):
assert i <= j
self.i = i
self.j = j
self.mod = mod
def __len__(self):
return self.j - self.i
def unmap(self, index):
ielem = index - self.i
if self.mod:
ielem %= self.mod
if not 0 <= ielem < len(self):
raise ValueError
return ielem
def map(self, ielem):
assert 0 <= ielem < len(self)
index = self.i + ielem
if self.mod:
index %= self.mod
return index
class DimAxis(Axis):
__slots__ = 'isperiodic'
isdim = True
@types.apply_annotations
def __init__(self, i:types.strictint, j:types.strictint, mod:types.strictint, isperiodic:bool):
super().__init__(i, j, mod)
self.isperiodic = isperiodic
@property
def refined(self):
return DimAxis(self.i*2, self.j*2, self.mod*2, self.isperiodic)
def opposite(self, ibound):
return self
def getitem(self, s):
if not isinstance(s, slice):
raise NotImplementedError
if s == slice(None):
return self
start, stop, stride = s.indices(self.j - self.i)
assert stride == 1
assert stop > start
return DimAxis(self.i+start, self.i+stop, mod=self.mod, isperiodic=False)
def boundaries(self, ibound):
if not self.isperiodic:
yield IntAxis(self.i, self.i+1, self.mod, ibound, side=False)
yield IntAxis(self.j-1, self.j, self.mod, ibound, side=True)
def intaxis(self, ibound, side):
return IntAxis(self.i-side+1-self.isperiodic, self.j-side, self.mod, ibound, side)
class IntAxis(Axis):
__slots__ = 'ibound', 'side'
isdim = False
@types.apply_annotations
def __init__(self, i:types.strictint, j:types.strictint, mod:types.strictint, ibound:types.strictint, side:bool):
super().__init__(i, j, mod)
self.ibound = ibound
self.side = side
@property
def refined(self):
return IntAxis(self.i*2+self.side, self.j*2+self.side-1, self.mod*2, self.ibound, self.side)
def opposite(self, ibound):
return IntAxis(self.i+2*self.side-1, self.j+2*self.side-1, self.mod, self.ibound, not self.side) if ibound == self.ibound else self
def boundaries(self, ibound):
return ()
class StructuredTransforms(Transforms):
'''Transforms sequence for :class:`~nutils.topology.StructuredTopology`.
Parameters
----------
root : :class:`~nutils.transform.TransformItem`
Root transform of the :class:`~nutils.topology.StructuredTopology`.
axes : :class:`tuple` of :class:`Axis` objects
The axes defining the :class:`~nutils.topology.StructuredTopology`.
nrefine : :class:`int`
Number of structured refinements.
'''
__slots__ = '_root', '_axes', '_nrefine', '_etransforms', '_ctransforms', '_cindices'
@types.apply_annotations
def __init__(self, root:transform.stricttransformitem, axes:types.tuple[types.strict[Axis]], nrefine:types.strictint):
self._root = root
self._axes = axes
self._nrefine = nrefine
ref = element.LineReference()**len(self._axes)
self._ctransforms = numeric.asobjvector(ref.child_transforms).reshape((2,)*len(self._axes))
self._cindices = {t: numpy.array(i, dtype=int) for i, t in numpy.ndenumerate(self._ctransforms)}
etransforms = []
rmdims = numpy.zeros(len(axes), dtype=bool)
for order, side, idim in sorted((axis.ibound, axis.side, idim) for idim, axis in enumerate(axes) if not axis.isdim):
ref = util.product(element.getsimplex(0 if rmdim else 1) for rmdim in rmdims)
iedge = (idim - rmdims[:idim].sum()) * 2 + 1 - side
etransforms.append(ref.edge_transforms[iedge])
rmdims[idim] = True
self._etransforms = tuple(etransforms)
super().__init__(sum(axis.isdim for axis in self._axes))
def __getitem__(self, index):
if not numeric.isint(index):
return super().__getitem__(index)
index = numeric.normdim(len(self), index)
# Decompose index into indices per dimension on the nrefined level.
indices = []
for axis in reversed(self._axes):
index, rem = divmod(index, len(axis))
indices.insert(0, axis.map(rem))
assert index == 0
# Create transform.
ctransforms = []
indices = numpy.asarray(indices, dtype=int)
for i in range(self._nrefine):
indices, r = divmod(indices, self._ctransforms.shape)
ctransforms.insert(0, self._ctransforms[tuple(r)])
trans0 = transform.Shift(types.frozenarray(indices, dtype=float, copy=False))
return (self._root, trans0, *ctransforms, *self._etransforms)
def __len__(self):
return util.product(map(len, self._axes))
def index_with_tail(self, trans):
if len(trans) < 2 + self._nrefine + len(self._etransforms):
raise ValueError
root, shift, tail = trans[0], trans[1], transform.uppermost(trans[2:])
if root != self._root:
raise ValueError
if not isinstance(shift, transform.Shift) or len(shift.offset) != len(self._axes) or not numpy.equal(shift.offset.astype(int), shift.offset).all():
raise ValueError
indices = numpy.array(shift.offset, dtype=int)
# Match child transforms.
for item in tail[:self._nrefine]:
try:
indices = indices*2 + self._cindices[item]
except KeyError:
raise ValueError
# Check index boundaries and flatten.
flatindex = 0
for index, axis in zip(indices, self._axes):
flatindex = flatindex*len(axis) + axis.unmap(index)
# Promote the remainder and match the edge transforms.
tail = transform.promote(tail[self._nrefine:], self.fromdims)
if tail[:len(self._etransforms)] != self._etransforms:
raise ValueError
tail = tail[len(self._etransforms):]
return flatindex, tail
class MaskedTransforms(Transforms):
'''An order preserving subset of another :class:`Transforms` object.
Parameters
----------
parent : :class:`Transforms`
The transforms to subset.
indices : one-dimensional array of :class:`int`\\s
The strict monotonic increasing indices of ``parent`` transforms to keep.
'''
__slots__ = '_parent', '_mask', '_indices'
@types.apply_annotations
def __init__(self, parent:stricttransforms, indices:types.frozenarray[types.strictint]):
self._parent = parent
self._indices = indices
super().__init__(parent.fromdims)
def __iter__(self):
for itrans in self._indices:
yield self._parent[int(itrans)]
def __getitem__(self, index):
if numeric.isintarray(index) and index.ndim == 1 and numpy.any(numpy.less(index, 0)):
raise IndexError('index out of bounds')
return self._parent[self._indices[index]]
def __len__(self):
return len(self._indices)
def index_with_tail(self, trans):
parent_index, tail = self._parent.index_with_tail(trans)
index = numpy.searchsorted(self._indices, parent_index)
if index == len(self._indices) or self._indices[index] != parent_index:
raise ValueError
else:
return int(index), tail
class ReorderedTransforms(Transforms):
'''A reordered :class:`Transforms` object.
Parameters
----------
parent : :class:`Transforms`
The transforms to reorder.
indices : one-dimensional array of :class:`int`\\s
The new order of the transforms.
'''
__slots__ = '_parent', '_mask', '_indices'
__cache__ = '_rindices'
@types.apply_annotations
def __init__(self, parent:stricttransforms, indices:types.frozenarray[types.strictint]):
self._parent = parent
self._indices = indices
super().__init__(parent.fromdims)
@property
def _rindices(self):
return numpy.argsort(self._indices)
def __iter__(self):
for itrans in self._indices:
yield self._parent[int(itrans)]
def __getitem__(self, index):
if numeric.isintarray(index) and index.ndim == 1 and numpy.any(numpy.less(index, 0)):
raise IndexError('index out of bounds')
return self._parent[self._indices[index]]
def __len__(self):
return len(self._parent)
def index_with_tail(self, trans):
parent_index, tail = self._parent.index_with_tail(trans)
return int(self._rindices[parent_index]), tail
class DerivedTransforms(Transforms):
'''A sequence of derived transforms.
The derived transforms are ordered first by parent transforms, then by derived
transforms, as returned by the reference::
(trans+(ctrans,) for trans, ref in zip(parent, parent_references) for ctrans in getattr(ref, derived_attribute))
Parameters
----------
parent : :class:`Transforms`
The transforms to refine.
parent_references: :class:`~nutils.elementseq.References`
The references to use for the refinement.
derived_attribute : :class:`str`
The name of the attribute of a :class:`nutils.element.Reference` that
contains the derived references.
fromdims : :class:`int`
The number of dimensions all transforms in this sequence map from.
'''
__slots__ = '_parent', '_parent_references', '_derived_transforms'
__cache__ = '_offsets'
@types.apply_annotations
def __init__(self, parent:stricttransforms, parent_references:types.strict[References], derived_attribute:types.strictstr, fromdims:types.strictint):
if len(parent) != len(parent_references):
raise ValueError('`parent` and `parent_references` should have the same length')
if parent.fromdims != parent_references.ndims:
raise ValueError('`parent` and `parent_references` have different dimensions')
self._parent = parent
self._parent_references = parent_references
self._derived_transforms = operator.attrgetter(derived_attribute)
super().__init__(fromdims)
@property
def _offsets(self):
return types.frozenarray(numpy.cumsum([0, *(len(self._derived_transforms(ref)) for ref in self._parent_references)]), copy=False)
def __len__(self):
return self._offsets[-1]
def __iter__(self):
for reference, trans in zip(self._parent_references, self._parent):
for dtrans in self._derived_transforms(reference):
yield trans+(dtrans,)
def __getitem__(self, index):
if not numeric.isint(index):
return super().__getitem__(index)
index = numeric.normdim(len(self), index)
iparent = numpy.searchsorted(self._offsets, index, side='right')-1
assert 0 <= iparent < len(self._offsets)-1
iderived = index - self._offsets[iparent]
return self._parent[iparent] + (self._derived_transforms(self._parent_references[iparent])[iderived],)
def index_with_tail(self, trans):
iparent, tail = self._parent.index_with_tail(trans)
if not tail:
raise ValueError
if self.fromdims == self._parent.fromdims:
tail = transform.uppermost(tail)
else:
tail = transform.canonical(tail)
iderived = self._derived_transforms(self._parent_references[iparent]).index(tail[0])
return self._offsets[iparent]+iderived, tail[1:]
class UniformDerivedTransforms(Transforms):
'''A sequence of refined transforms from a uniform sequence of references.
The refined transforms are ordered first by parent transforms, then by
derived transforms, as returned by the reference::
(trans+(ctrans,) for trans in parent for ctrans in getattr(parent_reference, derived_attribute))
Parameters
----------
parent : :class:`Transforms`
The transforms to refine.
parent_reference: :class:`~nutils.element.Reference`
The reference to use for the refinement.
derived_attribute : :class:`str`
The name of the attribute of a :class:`nutils.element.Reference` that
contains the derived references.
fromdims : :class:`int`
The number of dimensions all transforms in this sequence map from.
'''
__slots__ = '_parent', '_derived_transforms'
@types.apply_annotations
def __init__(self, parent:stricttransforms, parent_reference:element.strictreference, derived_attribute:types.strictstr, fromdims:types.strictint):
if parent.fromdims != parent_reference.ndims:
raise ValueError('`parent` and `parent_reference` have different dimensions')
self._parent = parent
self._derived_transforms = getattr(parent_reference, derived_attribute)
super().__init__(fromdims)
def __len__(self):
return len(self._parent)*len(self._derived_transforms)
def __iter__(self):
for trans in self._parent:
for dtrans in self._derived_transforms:
yield trans+(dtrans,)
def __getitem__(self, index):
if not numeric.isint(index):
return super().__getitem__(index)
iparent, iderived = divmod(numeric.normdim(len(self), index), len(self._derived_transforms))
return self._parent[iparent] + (self._derived_transforms[iderived],)
def index_with_tail(self, trans):
iparent, tail = self._parent.index_with_tail(trans)
if not tail:
raise ValueError
if self.fromdims == self._parent.fromdims:
tail = transform.uppermost(tail)
else:
tail = transform.canonical(tail)
iderived = self._derived_transforms.index(tail[0])
return iparent*len(self._derived_transforms) + iderived, tail[1:]
class ProductTransforms(Transforms):
'''The product of two :class:`Transforms` objects.
The order of the resulting transforms is: ``transforms1[0]*transforms2[0],
transforms1[0]*transforms2[1], ..., transforms1[1]*transforms2[0],
transforms1[1]*transforms2[1], ...``.
Parameters
----------
transforms1 : :class:`Transforms`
The first sequence of transforms.
transforms2 : :class:`Transforms`
The second sequence of transforms.
'''
__slots__ = '_transforms1', '_transforms2'
@types.apply_annotations
def __init__(self, transforms1:stricttransforms, transforms2:stricttransforms):
self._transforms1 = transforms1
self._transforms2 = transforms2
super().__init__(transforms1.fromdims+transforms2.fromdims)
def __iter__(self):
for trans1 in self._transforms1:
for trans2 in self._transforms2:
yield transform.Bifurcate(trans1, trans2),
def __getitem__(self, index):
if not numeric.isint(index):
return super().__getitem__(index)
index1, index2 = divmod(numeric.normdim(len(self), index), len(self._transforms2))
return transform.Bifurcate(self._transforms1[index1], self._transforms2[index2]),
def __len__(self):
return len(self._transforms1) * len(self._transforms2)
def index_with_tail(self, trans):
bf = trans[0]
assert isinstance(bf, transform.Bifurcate)
index1, tail1 = self._transforms1.index_with_tail(bf.trans1[:-1])
index2, tail2 = self._transforms2.index_with_tail(bf.trans2[:-1])
return index1*len(self._transforms2)+index2, None # FIXME
class ChainedTransforms(Transforms):
'''A sequence of chained :class:`Transforms` objects.
Parameters
----------
items: :class:`tuple` of :class:`Transforms` objects
The :class:`Transforms` objects to chain.
'''
__slots__ = '_items'
__cache__ = '_offsets'
@types.apply_annotations
def __init__(self, items:types.tuple[stricttransforms]):
if len(items) == 0:
raise ValueError('Empty chain.')
if len(set(item.fromdims for item in items)) != 1:
raise ValueError('Cannot chain Transforms with different fromdims.')
self._items = items
super().__init__(self._items[0].fromdims)
@property
def _offsets(self):
return types.frozenarray(numpy.cumsum([0, *map(len, self._items)]), copy=False)
def __len__(self):
return self._offsets[-1]
def __getitem__(self, index):
if numeric.isint(index):
index = numeric.normdim(len(self), index)
outer = numpy.searchsorted(self._offsets, index, side='right') - 1
assert outer >= 0 and outer < len(self._items)
return self._items[outer][index-self._offsets[outer]]
elif isinstance(index, slice) and index.step in (1, None):
index = range(len(self))[index]
if index == range(len(self)):
return self
elif index.start == index.stop:
return EmptyTransforms(self.fromdims)
ostart = numpy.searchsorted(self._offsets, index.start, side='right') - 1
ostop = numpy.searchsorted(self._offsets, index.stop, side='left')
return chain((item[max(0,index.start-istart):min(istop-istart,index.stop-istart)] for item, (istart, istop) in zip(self._items[ostart:ostop], util.pairwise(self._offsets[ostart:ostop+1]))), self.fromdims)
elif numeric.isintarray(index) and index.ndim == 1 and len(index) and numpy.all(numpy.greater(numpy.diff(index), 0)):
if index[0] < 0 or index[-1] >= len(self):
raise IndexError('index out of bounds')
split = numpy.searchsorted(index, self._offsets, side='left')
return chain((item[index[start:stop]-offset] for item, offset, (start, stop) in zip(self._items, self._offsets, util.pairwise(split)) if stop > start), self.fromdims)
elif numeric.isboolarray(index) and index.shape == (len(self),):
return chain((item[index[start:stop]] for item, (start, stop) in zip(self._items, util.pairwise(self._offsets))), self.fromdims)
else:
return super().__getitem__(index)
def __iter__(self):
return itertools.chain.from_iterable(self._items)
def index_with_tail(self, trans):
offset = 0
for item in self._items:
try:
index, tail = item.index_with_tail(trans)
return index + offset, tail
except ValueError:
pass
offset += len(item)
raise ValueError
def refined(self, references):
return chain((item.refined(references[start:stop]) for item, start, stop in zip(self._items, self._offsets[:-1], self._offsets[1:])), self.fromdims)
def edges(self, references):
return chain((item.edges(references[start:stop]) for item, start, stop in zip(self._items, self._offsets[:-1], self._offsets[1:])), self.fromdims-1)
def unchain(self):
yield from self._items
def chain(items, fromdims):
'''Return the chained transforms sequence of ``items``.
Parameters
----------
items : iterable of :class:`Transforms` objects
The :class:`Transforms` objects to chain.
fromdims : :class:`int`
The number of dimensions all transforms in this sequence map from.
Returns
-------
:class:`Transforms`
The chained transforms.
'''
unchained = tuple(filter(len, itertools.chain.from_iterable(item.unchain() for item in items)))
items_fromdims = set(item.fromdims for item in unchained)
if not (items_fromdims <= {fromdims}):
raise ValueError('expected transforms with fromdims={}, but got {}'.format(fromdims, items_fromdims))
if len(unchained) == 0:
return EmptyTransforms(fromdims)
elif len(unchained) == 1:
return unchained[0]
else:
return ChainedTransforms(unchained)
# vim:sw=2:sts=2:et
|
import numpy as np
from openiva.commons.facial import get_transform_mat,warp_img,l2_norm,face_distance,sub_feature
MEAN_PTS_5=np.array([[0.34191607, 0.46157411],
[0.65653392, 0.45983393],
[0.500225 , 0.64050538],
[0.3709759 , 0.82469198],
[0.63151697, 0.82325091]])
INDS_68_5=[36, 45, 30, 48, 54]
|
def f(x):
"""
:rtype: object
"""
pass
def g(x):
y = x
f(x) # (1)
f(y) # (2)
|
import dash_html_components as html
import dash_core_components as dcc
import pandas as pd
import pathlib
import io
import requests
def get_code():
code = html.Div(
[
html.H1("Example Static Tab"),
dcc.Graph(
id='example-graph',
figure={
'data': [
{'x': [1, 2, 3], 'y': [4, 1, 2], 'type': 'bar', 'name': 'SF'},
{'x': [1, 2, 3], 'y': [2, 4, 5], 'type': 'bar', 'name': u'Montréal'},
],
'layout': {
'title': 'Dash Data Visualization'
}
}
)
]
)
return code
def get_callbacks(app):
return None
### =================================================== ###
if __name__ == "__main__":
|
"""
cs_modals.py - Creates modal dialogs for the child support slash command.
Copyright (c) 2020 by Thomas J. Daley, J.D.
"""
import json
import math
import re
class CsModals(object):
@staticmethod
def param_modal(trigger_id: str) -> dict:
return CsModals.load_view('cs_params_view', trigger_id)
@staticmethod
def good_response_modal(trigger_id: str) -> dict:
return CsModals.load_view('cs_good_response', trigger_id)
@staticmethod
def load_view(modal_name: str, trigger_id: str) -> dict:
"""
Return dict that specifies a modal dialog to retrieve
input parameters to compute child support.
Args:
modal_name (str): Portion of the file's name
trigger_id (str): Received from interaction.
Returns:
(dict): Slack modal dialog specification
"""
with open(f'./modals/views/{modal_name}.json', 'r') as fp:
view = json.load(fp)
return {
'trigger_id': trigger_id,
'view': view
}
@staticmethod
def calculate_child_support(user_data: dict) -> dict:
"""
Compute Texas Guideline Child Support.
"""
net_resources_cap = 9200.0 * 12.0
clean_data(user_data)
user_data['gross_income_annual'] = user_data['income_amount'] * user_data['income_frequency']
user_data['medical_annual'] = user_data['medical_amount'] * user_data['insurance_frequency']
user_data['dental_annual'] = user_data['dental_amount'] * user_data['insurance_frequency']
user_data['union_dues_annual'] = user_data['union_dues_amount'] * user_data['union_dues_frequency']
user_data['social_sec_annual'] = social_security(user_data)
user_data['medicare_annual'] = medicare(user_data)
user_data['income_tax_annual'] = federal_income_tax(user_data)
user_data['net_resources_annual'] = annual_net_resources(user_data)
user_data['support_factor'] = support_factor(user_data)
user_data['child_support_annual'] = min(user_data['net_resources_annual'], net_resources_cap) * user_data['support_factor']
scale_numbers(user_data)
print(json.dumps(user_data, indent=4))
def round_up(number, decimals: int = 2) -> float:
factor = int('1' + ('0' * decimals))
return math.ceil(number * factor) / factor
def scale_numbers(user_data: dict):
fields = [
'gross_income_annual', 'medical_annual', 'dental_annual', 'union_dues_annual',
'social_sec_annual', 'medicare_annual', 'income_tax_annual', 'net_resources_annual',
'child_support_annual'
]
for field in fields:
user_data[field] = round_up(user_data[field])
month_key = field.replace('_annual', '_monthly')
user_data[month_key] = round_up(user_data[field] / 12.0)
def support_factor(user_data: dict):
factors = [
[],
[.2, .175, .16, .1475, .1360, .1333, .1314, .13],
[.25, .225, .2063, .19, .1833, .1786, .175, .1722],
[.3, .2738, .2520, .24, .2314, .225, .22, .216],
[.35, .322, .3033, .29, .28, .2722, .266, .2609],
[.4, .3733, .3543, .34, .3289, .32, .3127, .3067],
[.4, .3771, .36, .3467, .336, .3273, .32, .3138],
[.4, .38, .3644, .352, .3418, .3333, .3262, .32]
]
children_in = min(user_data['children_inside'], len(factors)-1)
children_out = min(len(factors[children_in])-1, user_data['children_outside'])
return factors[children_in][children_out]
def annual_net_resources(user_data: dict):
return user_data['gross_income_annual'] - \
user_data['medical_annual'] - \
user_data['dental_annual'] - \
user_data['union_dues_annual'] - \
user_data['social_sec_annual'] - \
user_data['medicare_annual'] - \
user_data['income_tax_annual']
def federal_income_tax(user_data):
personal_exemption = 0.0 # Until 2025
standard_deduction = 12400.0
gross = user_data['gross_income_annual'] - personal_exemption - standard_deduction
if user_data['self_employed']:
gross -= user_data['social_sec_annual'] / 2
gross -= user_data['medicare_annual'] / 2
if gross >= 518400.0:
return 156235.0 + .37 * max(gross-518400.0, 0.0)
if gross >= 207351.0:
return 47367.5 + .35 * max(gross-207350.0, 0.0)
if gross >= 163301.0:
return 33217.5 + .32 * max(gross-163300.0, 0.0)
if gross >= 85526.0:
return 14605.5 + .24 * max(gross-85525.0, 0.0)
if gross >= 40126.0:
return 4617.5 + .22 * max(gross-40125.0, 0.0)
if gross >= 9876.0:
return 987.5 + .12 * max(gross-9875.0, 0.0)
return .1 * gross
def social_security(user_data):
social_sec_rate_emp = .062
social_sec_rate_self = .124
max_social_sec_wages = 137700.0
self_employment_factor = .9235
if user_data['self_employed']:
taxable_income = min(user_data['gross_income_annual'] * self_employment_factor, max_social_sec_wages)
return taxable_income * social_sec_rate_self
taxable_income = min(user_data['gross_income_annual'], max_social_sec_wages)
return taxable_income * social_sec_rate_emp
def medicare(user_data):
medicare_rate_emp = .0145
medicare_rate_self = .029
self_employment_factor = .9235
if user_data['self_employed']:
taxable_income = user_data['gross_income_annual'] * self_employment_factor
return taxable_income * medicare_rate_self
return user_data['gross_income_annual'] * medicare_rate_emp
def clean_data(user_data: dict):
supply_defaults(user_data)
edit_data(user_data)
convert_types(user_data)
def edit_data(user_data: dict):
user_data['income_amount'] = user_data['income_amount'] \
.replace('$', '') \
.replace(',', '') \
.strip()
user_data['medical_amount'] = user_data['medical_amount'] \
.replace('$', '') \
.replace(',', '') \
.strip()
user_data['dental_amount'] = user_data['dental_amount'] \
.replace('$', '') \
.replace(',', '') \
.strip()
user_data['union_dues_amount'] = user_data['union_dues_amount'] \
.replace('$', '') \
.replace(',', '') \
.strip()
user_data['children_inside'] = user_data['children_inside'].strip()
user_data['children_outside'] = user_data['children_outside'].strip()
def convert_types(user_data):
types = {
'income_frequency': int,
'insurance_frequency': int,
'self_employed': bool,
'union_dues_frequency': int,
'income_amount': float,
'medical_amount': float,
'dental_amount': float,
'children_inside': int,
'children_outside': int,
'union_dues_amount': float
}
for key, value in user_data.items():
if types.get(key) == int:
user_data[key] = int(value)
continue
if types.get(key) == bool:
user_data[key] = value.upper() == 'YES'
continue
if types.get(key) == float:
user_data[key] = float(value)
continue
def supply_defaults(user_data):
defaults = {
'income_frequency': "12",
'insurance_frequency': "12",
'self_employed': "NO",
'union_dues_frequency': "12",
'income_amount': "1300",
'medical_amount': "0",
'dental_amount': "0",
'children_inside': "1",
'children_outside': "0",
'union_dues_amount': "0"
}
# Supply defaults for extant keys with missing values
for key, value in user_data.items():
if not value or value == '':
user_data[key] = defaults.get(key, None)
# Supply defaults for missing keys
for key, value in defaults.items():
if key not in user_data:
user_data[key] = value
|
from pymongo import MongoClient
import os
from influxdb import InfluxDBClient
import time
import datetime
mongopwd = os.environ["MONGOPASS"]
mongourl = "mongodb://pax:{}@xenon1t-daq.lngs.infn.it:27017/run".format(mongopwd)
client = InfluxDBClient(host='influxdb', port=8086,)
dbs = client.get_list_database()
print(dbs)
if "mdb" not in [d['name'] for d in dbs]:
client.create_database("mdb")
last_run: int = 9227
last_date = datetime.datetime(2018, 5, 2, 16, 32, 28)
last = client.query('SELECT LAST("value"),"time" from "tpc_event_rate"', database='mdb').get_points()
last = list(last)
if last:
# last_date = datetime.datetime.fromtimestamp(last[0]['time'])
last_date = datetime.datetime.strptime(last[0]['time'],"%Y-%m-%dT%H:%M:%SZ")
print("last date: ",last_date)
coll = MongoClient(mongourl)['run']['runs_new']
def write_data(name, data):
json_body = [{
"measurement":name,
"time": int((entry['srun']+0.5*(entry['erun']-entry['srun'])).timestamp()),
"fields":{"value": entry['event_rate'],
"run_number":entry['number']},
} for entry in data]
client.write_points(json_body, time_precision='s',database='mdb')
try:
while True:
print('Connecting to DB')
# TPC rates
aggregate_cursor = list(coll.aggregate([
{"$match": {"detector": "tpc", "end": {"$exists": True}, "trigger.events_built": {"$gt": 1000}, "start":{"$gt":last_date},"reader.ini.name": "background_stable"}},
{"$sort": {"number": 1}},
{"$project": {"number":"$number","srun": "$start","erun": "$end", "event_rate": {"$divide": ["$trigger.events_built", {"$multiply": [.001, {"$subtract": ["$end", "$start"]}]}]}}}
]))
print('Got back tpc data for {} runs.'.format(len(aggregate_cursor)))
#print(aggregate_cursor[:2])
if aggregate_cursor:
print("saving tpc data data to mdb DB")
write_data("tpc_event_rate", aggregate_cursor)
# last_run = aggregate_cursor[-1]["number"]
last_date = aggregate_cursor[0]["srun"]
# Muon veto rates
aggregate_cursor_mv = list(coll.aggregate([
{"$match": {"detector": "muon_veto", "end": {"$exists": True},"start":{"$gt":last_date}, "trigger.events_built": {"$gt": 1000},"reader.ini.name": "muon_mode_sync_triggersentTPC"}},
{"$sort": {"number": 1}},
{"$project": {"number":"$number","srun": "$start","erun": "$end", "event_rate": {"$divide": ["$trigger.events_built", {"$multiply": [.001, {"$subtract": ["$end", "$start"]}]}]}}}
]))
print('Got back muVeto data for {} runs.'.format(len(aggregate_cursor_mv)))
if aggregate_cursor_mv:
print("saving muVeto data data to mdb DB")
write_data("mv_event_rate",aggregate_cursor_mv)
last_date = aggregate_cursor_mv[0]["srun"]
#print(aggregate_cursor_mv[:2])
time.sleep(60)
except KeyboardInterrupt:
pass
except Exception as e:
print(e)
|
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
\file genCompileScript.py
\brief Python script to generate the compile script for unix systems.
\copyright Copyright (c) 2018 Visual Computing group of Ulm University,
Germany. See the LICENSE file at the top-level directory of
this distribution.
\author pedro hermosilla (pedro-1.hermosilla-casajus@uni-ulm.de)
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
import argparse
import tensorflow as tf
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate the compile script for the MCCNN operations.')
parser.add_argument('--cudaFolder', required=True, help='Path to the CUDA folder')
parser.add_argument('--MLPSize', default=8, type=int, help='Size of the MLPs (default 8)')
parser.add_argument('--debugInfo', action='store_true', help='Print debug information during execution (default: False)')
args = parser.parse_args()
debugString = " -DPRINT_CONV_INFO" if args.debugInfo else ""
with open("compile.sh", "w") as myCompileScript:
myCompileScript.write(args.cudaFolder+"/bin/nvcc -DBLOCK_MLP_SIZE="+str(args.MLPSize)+debugString+" -std=c++11 aabb_gpu.cu -o aabb_gpu.cu.o -c -O2 -DGOOGLE_CUDA=1 -x cu -Xcompiler -fPIC\n")
myCompileScript.write(args.cudaFolder+"/bin/nvcc -DBLOCK_MLP_SIZE="+str(args.MLPSize)+debugString+" -std=c++11 sort_gpu.cu -o sort_gpu.cu.o -c -O2 -DGOOGLE_CUDA=1 -x cu -Xcompiler -fPIC\n")
myCompileScript.write(args.cudaFolder+"/bin/nvcc -DBLOCK_MLP_SIZE="+str(args.MLPSize)+debugString+" -std=c++11 find_neighbors.cu -o find_neighbors.cu.o -c -O2 -DGOOGLE_CUDA=1 -x cu -Xcompiler -fPIC\n")
myCompileScript.write(args.cudaFolder+"/bin/nvcc -DBLOCK_MLP_SIZE="+str(args.MLPSize)+debugString+" -std=c++11 compute_pdf.cu -o compute_pdf.cu.o -c -O2 -DGOOGLE_CUDA=1 -x cu -Xcompiler -fPIC\n")
myCompileScript.write(args.cudaFolder+"/bin/nvcc -DBLOCK_MLP_SIZE="+str(args.MLPSize)+debugString+" -std=c++11 poisson_sampling.cu -o poisson_sampling.cu.o -c -O2 -DGOOGLE_CUDA=1 -x cu -Xcompiler -fPIC\n")
myCompileScript.write(args.cudaFolder+"/bin/nvcc -DBLOCK_MLP_SIZE="+str(args.MLPSize)+debugString+" -std=c++11 spatial_conv.cu -o spatial_conv.cu.o -c -O2 -DGOOGLE_CUDA=1 -x cu -Xcompiler -fPIC\n")
tensorflowInclude = tf.sysconfig.get_include()
tensorflowLib = tf.sysconfig.get_lib()
myCompileScript.write("g++ -std=c++11 -DBLOCK_MLP_SIZE="+str(args.MLPSize)+debugString+" spatial_conv.cc poisson_sampling.cc compute_pdf.cc "\
"find_neighbors.cc sort_gpu.cc aabb_gpu.cc spatial_conv.cu.o poisson_sampling.cu.o compute_pdf.cu.o "\
"find_neighbors.cu.o sort_gpu.cu.o aabb_gpu.cu.o -o MCConv.so -shared -fPIC -I"+tensorflowInclude+" -I"+tensorflowInclude+"/external/nsync/public "\
"-I"+args.cudaFolder+"/include -lcudart -L "+args.cudaFolder+"/lib64/ -L"+tensorflowLib+" -ltensorflow_framework -O2 -D_GLIBCXX_USE_CXX11_ABI=0\n")
with open("MCConvModuleSrc", "r") as mySrcPyScript:
with open("MCConvModule.py", "w") as myDestPyScript:
for line in mySrcPyScript:
myDestPyScript.write(line)
myDestPyScript.write("\n")
myDestPyScript.write("\n")
myDestPyScript.write("def get_block_size():\n")
myDestPyScript.write(" return "+str(args.MLPSize)+"\n")
myDestPyScript.write("\n")
|
# -*- coding: utf-8 -*-
"""
Generate metadata and bag for a resource from Django
"""
import os
import requests
from django.conf import settings
from django.core.management.base import BaseCommand
from hs_core.models import BaseResource
from hs_core.hydroshare import hs_requests
from hs_core.hydroshare.hs_bagit import create_bag_files
from hs_core.tasks import create_bag_by_irods
from django_irods.icommands import SessionException
def check_bag(rid, options):
requests.packages.urllib3.disable_warnings()
try:
resource = BaseResource.objects.get(short_id=rid)
istorage = resource.get_irods_storage()
root_exists = istorage.exists(resource.root_path)
if root_exists:
# print status of metadata/bag system
scimeta_path = os.path.join(resource.root_path, 'data',
'resourcemetadata.xml')
scimeta_exists = istorage.exists(scimeta_path)
if scimeta_exists:
print("resource metadata {} found".format(scimeta_path))
else:
print("resource metadata {} NOT FOUND".format(scimeta_path))
resmap_path = os.path.join(resource.root_path, 'data', 'resourcemap.xml')
resmap_exists = istorage.exists(resmap_path)
if resmap_exists:
print("resource map {} found".format(resmap_path))
else:
print("resource map {} NOT FOUND".format(resmap_path))
bag_exists = istorage.exists(resource.bag_path)
if bag_exists:
print("bag {} found".format(resource.bag_path))
else:
print("bag {} NOT FOUND".format(resource.bag_path))
dirty = resource.getAVU('metadata_dirty')
print("{}.metadata_dirty is {}".format(rid, str(dirty)))
modified = resource.getAVU('bag_modified')
print("{}.bag_modified is {}".format(rid, str(modified)))
if options['reset']: # reset all data to pristine
resource.setAVU('metadata_dirty', 'true')
print("{}.metadata_dirty set to true".format(rid))
try:
istorage.delete(resource.scimeta_path)
print("{} deleted".format(resource.scimeta_path))
except SessionException as ex:
print("{} delete failed: {}"
.format(resource.scimeta_path,
ex.stderr))
try:
istorage.delete(resource.resmap_path)
print("{} deleted".format(resource.resmap_path))
except SessionException as ex:
print("{} delete failed: {}"
.format(resource.resmap_path,
ex.stderr))
resource.setAVU('bag_modified', 'true')
print("{}.bag_modified set to true".format(rid))
try:
istorage.delete(resource.bag_path)
print("{} deleted".format(resource.bag_path))
except SessionException as ex:
print("{} delete failed: {}"
.format(resource.bag_path,
ex.stderr))
if options['reset_metadata']:
resource.setAVU('metadata_dirty', 'true')
print("{}.metadata_dirty set to true".format(rid))
try:
istorage.delete(resource.scimeta_path)
print("{} deleted".format(resource.scimeta_path))
except SessionException as ex:
print("delete of {} failed: {}"
.format(resource.scimeta_path,
ex.stderr))
try:
istorage.delete(resource.resmap_path)
print("{} deleted".format(resource.resmap_path))
except SessionException as ex:
print("{} delete failed: {}"
.format(resource.resmap_path,
ex.stderr))
if options['reset_bag']:
resource.setAVU('bag_modified', 'true')
print("{}.bag_modified set to true".format(rid))
try:
istorage.delete(resource.bag_path)
print("{} deleted".format(resource.bag_path))
except SessionException as ex:
print("{} delete failed: {}"
.format(resource.bag_path,
ex.stderr))
if options['generate']: # generate usable bag
if not options['if_needed'] or dirty or not scimeta_exists or not resmap_exists:
try:
create_bag_files(resource)
except ValueError as e:
print("{}: value error encountered: {}".format(rid, e.message))
return
print("{} metadata generated from Django".format(rid))
resource.setAVU('metadata_dirty', 'false')
resource.setAVU('bag_modified', 'true')
print("{}.metadata_dirty set to false".format(rid))
if not options['if_needed'] or modified or not bag_exists:
create_bag_by_irods(rid)
print("{} bag generated from iRODs".format(rid))
resource.setAVU('bag_modified', 'false')
print("{}.bag_modified set to false".format(rid))
if options['generate_metadata']:
if not options['if_needed'] or dirty or not scimeta_exists or not resmap_exists:
try:
create_bag_files(resource)
except ValueError as e:
print("{}: value error encountered: {}".format(rid, e.message))
return
print("{}: metadata generated from Django".format(rid))
resource.setAVU('metadata_dirty', 'false')
print("{}.metadata_dirty set to false".format(rid))
resource.setAVU('bag_modified', 'true')
print("{}.bag_modified set to false".format(rid))
if options['generate_bag']:
if not options['if_needed'] or modified or not bag_exists:
create_bag_by_irods(rid)
print("{}: bag generated from iRODs".format(rid))
resource.setAVU('bag_modified', 'false')
print("{}.bag_modified set to false".format(rid))
if options['download_bag']:
if options['password']:
server = getattr(settings, 'FQDN_OR_IP', 'www.hydroshare.org')
uri = "https://{}/hsapi/resource/{}/".format(server, rid)
print("download uri is {}".format(uri))
r = hs_requests.get(uri, verify=False, stream=True,
auth=requests.auth.HTTPBasicAuth(options['login'],
options['password']))
print("download return status is {}".format(str(r.status_code)))
print("redirects:")
for thing in r.history:
print("...url: {}".format(thing.url))
filename = 'tmp/check_bag_block'
with open(filename, 'wb') as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
else:
print("cannot download bag without username and password.")
if options['open_bag']:
if options['password']:
server = getattr(settings, 'FQDN_OR_IP', 'www.hydroshare.org')
uri = "https://{}/hsapi/resource/{}/".format(server, rid)
print("download uri is {}".format(uri))
r = hs_requests.get(uri, verify=False, stream=True,
auth=requests.auth.HTTPBasicAuth(options['login'],
options['password']))
print("download return status is {}".format(str(r.status_code)))
print("redirects:")
for thing in r.history:
print("...url: {}".format(thing.url))
filename = 'tmp/check_bag_block'
with open(filename, 'wb') as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
break
else:
print("cannot open bag without username and password.")
else:
print("Resource with id {} does not exist in iRODS".format(rid))
except BaseResource.DoesNotExist:
print("Resource with id {} NOT FOUND in Django".format(rid))
class Command(BaseCommand):
help = "Create metadata files and bag for a resource."
def add_arguments(self, parser):
# a list of resource id's, or none to check all resources
parser.add_argument('resource_ids', nargs='*', type=str)
# Named (optional) arguments
parser.add_argument(
'--reset',
action='store_true', # True for presence, False for absence
dest='reset', # value is options['reset']
help='delete metadata and bag and start over'
)
parser.add_argument(
'--reset_metadata',
action='store_true', # True for presence, False for absence
dest='reset_metadata', # value is options['reset_metadata']
help='delete metadata files and start over'
)
parser.add_argument(
'--reset_bag',
action='store_true', # True for presence, False for absence
dest='reset_bag', # value is options['reset_bag']
help='delete bag and start over'
)
parser.add_argument(
'--generate',
action='store_true', # True for presence, False for absence
dest='generate', # value is options['generate']
help='force generation of metadata and bag'
)
parser.add_argument(
'--generate_metadata',
action='store_true', # True for presence, False for absence
dest='generate_metadata', # value is options['generate_metadata']
help='force generation of metadata and bag'
)
parser.add_argument(
'--generate_bag',
action='store_true', # True for presence, False for absence
dest='generate_bag', # value is options['generate_bag']
help='force generation of metadata and bag'
)
parser.add_argument(
'--if_needed',
action='store_true', # True for presence, False for absence
dest='if_needed', # value is options['if_needed']
help='generate only if not present'
)
parser.add_argument(
'--download_bag',
action='store_true', # True for presence, False for absence
dest='download_bag', # value is options['download_bag']
help='try downloading the bag'
)
parser.add_argument(
'--open_bag',
action='store_true', # True for presence, False for absence
dest='open_bag', # value is options['open_bag']
help='try opening the bag in http without downloading'
)
parser.add_argument(
'--login',
default='admin',
dest='login', # value is options['login']
help='HydroShare login name'
)
parser.add_argument(
'--password',
default=None,
dest='password', # value is options['password']
help='HydroShare password'
)
def handle(self, *args, **options):
if len(options['resource_ids']) > 0: # an array of resource short_id to check.
for rid in options['resource_ids']:
check_bag(rid, options)
else:
for r in BaseResource.objects.all():
check_bag(r.short_id, options)
|
import rospy
from roboy_middleware_msgs.msg import MagneticSensor
import numpy as np
import matplotlib.pyplot as plt
from magpylib.source.magnet import Box,Cylinder
from magpylib import Collection, displaySystem, Sensor
from scipy.optimize import fsolve, least_squares
import matplotlib.animation as manimation
import random, math
import sensor_msgs.msg, std_msgs
from yaml import load,dump,Loader,Dumper
import sys
from os import path
if len(sys.argv) < 2:
print("\nUSAGE: python3 magnetic_sensor_selector.py balljoint_config, e.g. \n python3 magnetic_sensor_logger.py balljoint_config.yaml \n")
sys.exit()
balljoint_config = load(open(sys.argv[1], 'r'), Loader=Loader)
print('listening to sensor with id %d'%balljoint_config['id'])
def MagneticSensorCallback(data):
if data.id is not int(balljoint_config['id']):
return
values = []
i =0
print("sensor_id=%d --------------------\n"%data.id)
for sens in data.x:
print('%.3f\t%.3f\t%.3f'%(data.x[i],data.y[i],data.z[i]))
i = i+1
print("\n---------------------------------\n")
rospy.sleep(0.1)
rospy.init_node('magnetic_sensor_selector')
magneticSensor_sub = rospy.Subscriber('roboy/middleware/MagneticSensor', MagneticSensor, MagneticSensorCallback, queue_size=1)
rospy.spin()
rospy.loginfo('done')
|
import rcbu.client.report as report
from rcbu.utils.bytes import dehumanize_bytes
import rcbu.common.duration as duration
def _args_from_dict(body):
args = {
'_backup_id': body['BackupConfigurationId'],
'_restored': {
'files': int(body['NumFilesRestored']),
'bytes': body['NumBytesRestored']
},
'_destination': {
'id': body['RestoreDestinationMachineId'],
'path': body['RestoreDestination']
}
}
args.update(report._args_from_dict(body))
return args
def from_dict(restore_id, body):
args = _args_from_dict(body)
return RestoreReport(restore_id, **args)
class RestoreReport(report.Report):
def __init__(self, report_id, **kwargs):
report.Report.__init__(self, report_id, 'restore', **kwargs)
def __repr__(self):
form = ('<RestoreReport id:{0} state:{1} ok:{2} started:{3} '
'duration:{4} #errors:{5} bytes:{6}>')
hours, minutes, seconds = duration.tuple(self.duration)
return form.format(self.id, self.state, self.ok,
self.started.isoformat(),
'{0}:{1:02}:{2:02}'.format(hours, minutes, seconds),
len(self.errors), self.bytes_restored)
@property
def files_restored(self):
return self._restored['files']
@property
def bytes_restored(self):
return dehumanize_bytes(self._restored['bytes'])
@property
def destination(self):
"""Returns a string in the form: 'path id'"""
return '{0} {1}'.format(self._destination['path'],
self._destination['id'])
|
#####################################################################
# Python script to run ModelSim simulations for all the post-pnr testbenches
# in a project directory
# This script will
# - Collect all the testbenches in a given directory
# For instance:
# ../k4_arch/pre_pnr/verilog_testbenches/and2_post_pnr_include_netlist.v
# - Use run_post_pnr_msim_test.py to run Modelsim simulations and check results
#####################################################################
import os
from os.path import dirname, abspath
import shutil
import re
import argparse
import logging
import subprocess
import glob
import threading
import multiprocessing
import run_post_pnr_msim_test
#####################################################################
# Initialize logger
#####################################################################
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
#####################################################################
# Parse the options
#####################################################################
parser = argparse.ArgumentParser(description='Run a ModelSim verification task for a tape-out FPGA')
parser.add_argument('--testbench_dir_name', required=True,
help='Specify the directory path for the Verilog testbenches')
parser.add_argument('--task_name', required=True,
help='Specify the directory path for the Verilog testbenches')
parser.add_argument('--testbench_type', default="postpnr",
help='Specify the type of verification: postpnr|prepnr')
args = parser.parse_args()
#####################################################################
# Walk through the parent directory and find all the pre-PnR testbenches
#####################################################################
logging.info("Finding testbenches...");
testbench_dir_abspath = abspath(args.testbench_dir_name) + "/" + args.testbench_type + "/verilog_testbench";
testbench_files = []
for globbed_file in glob.glob(testbench_dir_abspath + "/*_include_netlists.v"):
testbench_files.append(globbed_file)
logging.info("Found " + str(len(testbench_files)) + " testbenches")
#####################################################################
# Try to create the directory of Modelsim projects
#####################################################################
parent_dir_abspath = dirname(dirname(abspath(__file__)))
msim_task_dir_abspath = abspath(parent_dir_abspath + "/" + args.task_name) + "/" + args.testbench_type + "/verilog_testbench";
os.makedirs(msim_task_dir_abspath, exist_ok=True)
#####################################################################
# Run ModelSim simulations for each testbench
#####################################################################
logging.info("Running Modelsim simulations...");
num_sim_finished = 0
msim_testrun_script_abspath = os.path.abspath(__file__)
msim_testrun_script_abspath = re.sub(os.path.basename(msim_testrun_script_abspath), "run_post_pnr_msim_test.py", msim_testrun_script_abspath)
threads = []
for testbench_file in testbench_files:
# Find testbench name
testbench_name = re.findall("(\w+)_include_netlists.v", os.path.basename(testbench_file))[0]
process = multiprocessing.Process(target=run_post_pnr_msim_test.run_msim, args=(testbench_file, msim_task_dir_abspath + "/" + testbench_name, testbench_name + "_autocheck_top_tb",))
process.start()
threads.append(process)
for process in threads:
process.join()
logging.info("Done")
logging.info("Finish " + str(len(threads)) + " ModelSim simulations")
|
from fvcore.common.registry import Registry
from collections import OrderedDict
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from clip import Bottleneck
from clip import QuickGELU, LayerNorm
ENCODER_MODULES_REGISTRY = Registry("ENCODER_MODULES")
ENCODER_MODULES_REGISTRY.__doc__ = """
Registry for encoder modules.
"""
def build_encoder_module(cfg, **kwargs):
return ENCODER_MODULES_REGISTRY.get(cfg.name)(cfg, **kwargs)
class MetaEncoder(nn.Module):
def __init__(self):
super().__init__()
self.position_resolution = None
self.mask = None
@property
def hp(self):
return []
@hp.setter
def hp(self, hp):
pass
class Miscellanea(MetaEncoder):
""" a parameter container.
"""
def __init__(self, cfg, position_resolution=None, **kwargs):
super().__init__()
if position_resolution is not None:
width = position_resolution[-1]
self.position_resolution = position_resolution[:-1]
positions = np.prod(self.position_resolution) + 1
else:
self.position_resolution = None
width, positions = 0, 0
scale = width ** -0.5 if width > 0 else 0
self.positional_embedding = nn.Parameter(scale * torch.randn(positions, width))
self.class_embedding = nn.Parameter(scale * torch.randn(width)) #None # `<s>` as the class
def initialize_parameters(self):
pass
@ENCODER_MODULES_REGISTRY.register()
class AddonEncoder(nn.Module):
""" enhance an existing encoder.
"""
def __init__(self, cfg, **kwargs):
super().__init__()
def forward(self, x: torch.Tensor, **kwargs):
return x
@ENCODER_MODULES_REGISTRY.register()
class CLIPMisc(Miscellanea):
""" a parameter container.
"""
def __init__(self, cfg, position_resolution=None, **kwargs):
super().__init__(cfg, position_resolution=position_resolution, **kwargs)
pass
def replace_modules(self, reference, keep_hp=False):
self.positional_embedding, self.class_embedding = reference.positional_embedding, reference.class_embedding
if not keep_hp:
self.position_resolution = reference.position_resolution
@property
def hp(self):
return [self.position_resolution]
@hp.setter
def hp(self, hp):
(self.position_resolution,) = hp
@property
def pos_embedding(self):
positional_embedding = interp_clip_vp_embedding(self.positional_embedding, self.position_resolution)
#print(f"{self.positional_embedding.shape} {self.position_resolution} {positional_embedding.shape}")
return positional_embedding
@property
def cls_embedding(self):
return self.class_embedding
@ENCODER_MODULES_REGISTRY.register()
class GPTPreEncoder(MetaEncoder):
def __init__(self, cfg, width=512, ctx_len=77, **kwargs):
super().__init__()
self.position_resolution = (ctx_len, width)
self.token_embedding = nn.Embedding(cfg.vocab_size, width)
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
@property
def dtype(self):
return self.token_embedding.weight.dtype
def forward(
self,
x: torch.Tensor,
positional_embedding: torch.Tensor = None,
class_embedding: torch.Tensor = None, **kwargs
):
# take features from the eot embedding (eot_token is the highest number in each sequence)
# saved for the post encoder
self.mask = x.argmax(dim=-1)
x = self.token_embedding(x).type(self.dtype) # [batch_size, n_ctx, d_model]
positional_embedding = positional_embedding[:x.shape[1]]
x = x + positional_embedding.type(self.dtype)
return x
@ENCODER_MODULES_REGISTRY.register()
class GPTPostEncoder(MetaEncoder):
def __init__(self, cfg, width=512, embed_dim=512, **kwargs):
super().__init__()
scale = width ** -0.5
self.ln = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, embed_dim))
self.initialize_parameters()
def initialize_parameters(self):
pass
def forward(
self,
x: torch.Tensor,
positional_embedding: torch.Tensor = None,
class_embedding: torch.Tensor = None, mask = None, **kwargs
):
dtype = x.dtype
x = self.ln(x).type(dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
x = x[torch.arange(x.shape[0]), mask] @ self.proj
return x
def _vit_position_resolution(input_resolution, patch_size, stride):
stride = stride or patch_size
if isinstance(stride, int):
stride = [stride] * 2
stride = list(stride)
if isinstance(patch_size, int):
patch_size = [patch_size] * 2
patch_size = list(patch_size)
if isinstance(input_resolution, int):
nrow = ncol = input_resolution // patch_size[0]
positions = nrow ** 2 + 1 #
position_resolution = (nrow, ncol)
else:
row_stride, col_stride = stride[:2]
nrow = (input_resolution[0] - patch_size[0]) // row_stride + 1
ncol = (input_resolution[1] - patch_size[1]) // col_stride + 1
positions = nrow * ncol + 1
position_resolution = (nrow, ncol)
return stride, positions, position_resolution
def interp_conv_weight_channel(conv_weight, input_shape):
if conv_weight.shape[1] != input_shape[1]:
input_shape = (conv_weight.shape[0], input_shape[1])
conv_weight = conv_weight.permute(2, 3, 0, 1)
conv_weight = F.interpolate(
conv_weight,
input_shape,
mode="bilinear",
align_corners=False,
)
conv_weight = conv_weight.permute(2, 3, 0, 1)
return conv_weight
def interp_conv_weight_spatial(conv_weight, patch_shape):
if conv_weight.shape[-2:] != patch_shape:
conv_weight = F.interpolate(
conv_weight,
patch_shape,
mode="bilinear",
align_corners=False,
)
return conv_weight
@ENCODER_MODULES_REGISTRY.register()
class ViTPreEncoder(MetaEncoder):
def __init__(self, cfg, width=768, resolution=224, **kwargs):
super().__init__()
self.stride, _, self.position_resolution = _vit_position_resolution(
resolution, cfg.patch_size, cfg.stride
)
self.position_resolution += (width,)
self.conv1 = nn.Conv2d(
in_channels=cfg.in_channels, out_channels=width, kernel_size=cfg.patch_size, stride=self.stride, bias=False
)
self.patch_size = self.conv1.weight.shape[-2:]
self.ln = LayerNorm(width)
self.initialize_parameters()
def initialize_parameters(self):
pass
def replace_modules(self, reference, keep_hp=False):
self.conv1, self.ln = reference.conv1, reference.ln
if not keep_hp:
self.stride, self.patch_size, self.position_resolution = \
reference.stride, reference.patch_size, reference.position_resolution
@property
def hp(self):
return [self.stride, self.patch_size, self.position_resolution]
@hp.setter
def hp(self, hp):
(self.stride, self.patch_size, self.position_resolution) = hp
@property
def dtype(self):
return self.conv1.weight.dtype
def forward(
self,
x: torch.Tensor,
positional_embedding: torch.Tensor = None,
class_embedding: torch.Tensor = None, **kwargs
):
assert x.dim() == 4, f"expect 4d `x` but get x.dim == {x.dim()}"
x = x.type(self.dtype)
if x.shape[1] != 3: # interpolate weight
use_mean = True
conv1_weight = interp_conv_weight_spatial(self.conv1.weight, self.patch_size)
#print(f"{self.conv1.weight.shape}, {conv1_weight.shape}, {self.patch_size}, {self.conv1.stride}, {self.stride}")
if x.shape[1] != conv1_weight.shape[1]: # channel
conv1_weight = (
conv1_weight.mean(1, keepdim=True) if use_mean else
interp_conv_weight_channel(conv1_weight, x.shape)
)
x = F.conv2d(
x, conv1_weight, bias=self.conv1.bias, stride=self.stride
)
else:
x = self.conv1(x) # shape = [*, width, grid, grid]
#print(f"{self.conv1.weight.shape}, {self.patch_size}, {self.conv1.stride}, {self.stride}")
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([
class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x
], dim=1) # shape = [*, grid ** 2 + 1, width]
#print(f"C {x.shape}, {positional_embedding.shape}, {self.position_resolution}")
x = x + positional_embedding[:x.shape[1]].to(x.dtype)
x = self.ln(x)
return x
@ENCODER_MODULES_REGISTRY.register()
class ViTPostEncoder(MetaEncoder):
def __init__(self, cfg, width=768, embed_dim=512, **kwargs):
super().__init__()
scale = width ** -0.5
self.ln = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, embed_dim))
self.initialize_parameters()
def initialize_parameters(self):
pass
def forward(
self,
x: torch.Tensor,
positional_embedding: torch.Tensor = None,
class_embedding: torch.Tensor = None,
position_resolution: list = None,
require_feature: bool = False, **kwargs
):
if require_feature: # Transformer encoder-decoder model
x = self.ln(x)
feature = x[:, 1:]
N, _, D = feature.shape
nrow, ncol = position_resolution
feature = feature.view(N, nrow, ncol, D)
return x[:, 0, :] @ self.proj, feature
x = self.ln(x[:, 0, :])
x = x @ self.proj
return x
def _resnet_position_resolution(input_resolution, patch_size=32, stride=None):
stride = stride or patch_size
if isinstance(stride, int):
stride = [stride] * 2
stride = list(stride)
if isinstance(input_resolution, int):
nrow = ncol = input_resolution // patch_size
positions = nrow ** 2 + 1 #
position_resolution = (nrow, ncol)
else:
row_stride, col_stride = stride[:2]
nrow = (input_resolution[0] - 0) // row_stride
ncol = (input_resolution[1] - 0) // col_stride
positions = nrow * ncol + 1
position_resolution = (nrow, ncol)
return stride, positions, position_resolution
@ENCODER_MODULES_REGISTRY.register()
class ResNetPreEncoder(MetaEncoder):
def __init__(self, cfg, width=64, **kwargs):
super().__init__()
# the 3-layer stem
self.conv1 = nn.Conv2d(cfg.in_channels, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
self.initialize_parameters()
def initialize_parameters(self):
pass
@property
def dtype(self):
return self.conv1.weight.dtype
def forward(
self,
x: torch.Tensor,
positional_embedding: torch.Tensor = None,
class_embedding: torch.Tensor = None, **kwargs
):
assert x.dim() == 4, f"expect 4d `x` but get x.dim == {x.dim()}"
x = x.type(self.dtype)
if x.shape[1] != 3: # interpolate weight
use_mean = True
conv1_weight = (
self.conv1.weight.mean(1, keepdim=True) if use_mean else
interp_conv_weight_channel(self.conv1.weight, x.shape)
)
x = F.conv2d(
x, conv1_weight, bias=self.conv1.bias, stride=self.conv1.stride, padding=self.conv1.padding
)
else:
x = self.conv1(x) # shape = [*, width, grid, grid]
x = self.relu(self.bn1(x))
for conv, bn in [(self.conv2, self.bn2), (self.conv3, self.bn3)]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
@ENCODER_MODULES_REGISTRY.register()
class ResNetPostEncoder(MetaEncoder):
def __init__(self, cfg, width=64, embed_dim=None, resolution=224, **kwargs):
super().__init__()
width = width * 32 # the ResNet feature dimension
self.num_heads = width // 64
_, _, self.position_resolution = _resnet_position_resolution(resolution)
self.position_resolution += (width,)
self.k_proj = nn.Linear(width, width)
self.q_proj = nn.Linear(width, width)
self.v_proj = nn.Linear(width, width)
self.c_proj = nn.Linear(width, embed_dim or width)
self.initialize_parameters()
def initialize_parameters(self):
std = self.c_proj.in_features ** -0.5
nn.init.normal_(self.q_proj.weight, std=std)
nn.init.normal_(self.k_proj.weight, std=std)
nn.init.normal_(self.v_proj.weight, std=std)
nn.init.normal_(self.c_proj.weight, std=std)
def replace_modules(self, reference, keep_hp=False):
self.k_proj, self.q_proj, self.v_proj, self.c_proj = (
reference.k_proj, reference.q_proj, reference.v_proj, reference.c_proj
)
if not keep_hp:
self.position_resolution = reference.position_resolution
@property
def hp(self):
return [self.position_resolution]
@hp.setter
def hp(self, hp):
(self.position_resolution,) = hp
def forward(
self,
x: torch.Tensor,
positional_embedding: torch.Tensor = None,
class_embedding: torch.Tensor = None, **kwargs
):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
#print(f"C {x.shape}, {positional_embedding.shape}, {self.position_resolution}")
x = x + positional_embedding[:x.shape[0], None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
@ENCODER_MODULES_REGISTRY.register()
class ResNetBackbone(MetaEncoder):
def __init__(self, cfg, width=64, **kwargs):
super().__init__()
self.batch_first = True
layers = cfg.layers
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
self.initialize_parameters()
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def initialize_parameters(self):
for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
@property
def dtype(self):
return self.conv1.weight.dtype
def forward(self, x: torch.Tensor, **kwargs):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
@ENCODER_MODULES_REGISTRY.register()
class TransformerBackbone(MetaEncoder):
def __init__(self, cfg, width=512, ctx_len=77, **kwargs):
super().__init__()
self.batch_first = False
self.ctx_len = ctx_len
heads = width // 64
attn_mask = self.build_attention_mask()
self.resblocks = nn.Sequential(*[
ResidualAttentionBlock(
width, heads, attn_mask, cfg.skip_attn_mask
) for _ in range(cfg.layers)
])
def build_attention_mask(self):
if self.ctx_len is None:
return None
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.ctx_len, self.ctx_len)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
def forward(self, x: torch.Tensor, **kwargs):
return self.resblocks(x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None, skip_attn_mask: bool = True):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
self.skip_attn_mask = skip_attn_mask
def attention(self, x: torch.Tensor):
if not self.skip_attn_mask and self.attn_mask is not None:
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device)
attn_mask = self.attn_mask[:x.shape[0], :x.shape[0]]
else:
attn_mask = None
return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
def interp_clip_vp_embedding(old_pos_emb, pos_resolution, old_pos_resolution=None, bop=1):
""" vp: stands for `visual positional`
bop: start position of the postional embeddings
old_pos_emb: (H x W + 1, D)
"""
num_pos, pos_dim = old_pos_emb.shape[-2:]
num_pos_required = np.prod(pos_resolution)
# TODO assumed old_pos_emb comes from vision pos, but it can come from audio pos
# if these two kinds do not share, we do not need to interp the input pos.
# FIXME adhoc: the condition of not sharing may be wrong.
if num_pos_required + 1 == num_pos:
return old_pos_emb
if old_pos_resolution is None:
# old_pos_emb must be vision pos if sharing pos between vision and audio
h = w = int(np.sqrt(num_pos - bop))
else: # should have fixed the TODO
h, w = old_pos_resolution
ptensor = old_pos_emb[bop:].reshape(
-1, h, w, pos_dim
).permute(0, 3, 1, 2)
if ptensor.shape[-2:] != pos_resolution:
new_pos_emb = F.interpolate(
ptensor,
pos_resolution,
mode="bilinear",
align_corners=False,
).permute(0, 2, 3, 1).flatten(1, 2)
new_pos_emb = torch.cat((
old_pos_emb[:bop], new_pos_emb.view(-1, pos_dim)
), dim=0)
else: # do nothing
new_pos_emb = old_pos_emb
return new_pos_emb
|
'''
Created on Aug, 12 2015
@author: mlaptev
'''
from base_serialization import Base_Serialization
import json
import os
class JSON_Serialization(Base_Serialization):
'''
This class will be used to demonstration of both serialization and de-serialization
capabilities with using JSON
'''
def __init__(self):
Base_Serialization.__init__(self)
self.file_extension = "json"
self.file_name_pattern = self.__class__.__name__ + "_" + self.file_name_pattern + self.file_extension
def serialize_object(self, object_to_serialize):
try:
object_file_name = self.file_name_pattern.replace(self.type_template, object_to_serialize.__class__.__name__)
with open(object_file_name, 'w') as f:
json.dump(object_to_serialize, f, indent = 4)
except TypeError, te:
print "Object '{}' is not JSON serializable: {}{}".format(object_to_serialize.__class__.__name__, os.linesep, te)
def deserialize_object(self, type_of_serialized_object):
try:
object_file_name = self.file_name_pattern.replace(self.type_template, type_of_serialized_object)
with open(object_file_name, 'r') as f:
data = json.load(f)
return data
except ValueError, ve:
print "Object '{}' could not be decoded after serialization: {}{}".format(type_of_serialized_object, os.linesep, ve)
if __name__ == "__main__":
json_serialization = JSON_Serialization()
print "{0}Serialization... {0}".format(os.linesep)
json_serialization.serialize_me()
print "{0}Deserialization... {0}".format(os.linesep)
json_serialization.deserialize_me()
print "{0}Done... {0}".format(os.linesep)
|
#!/usr/bin/python
"""
get_spectrum.py
Jordan Fox , 2016
this program extracts the energy speactrum from an input file (arg 1)
and prints the energies to STDOUT
The input file must be a .res file from BIGSTICK 7.7.0 or similar
Works for BIGSTICK options n, d
"""
import sys
import os
"""
get input file
"""
if (len(sys.argv)>1):
yesinfile = True
else:
yesinfile = False
if (not yesinfile):
exit("Need input file.")
filename_in = sys.argv[1]
exists_in = os.path.isfile(filename_in)
if (yesinfile and not exists_in):
exit(filename_in + " does not exist.")
print(filename_in + " : ")
"""
read
"""
with open(filename_in,"rt") as fptr_in:
contents = fptr_in.read()
lines = contents.split("\n")
istate = 1
for line in lines:
try:
if len(line.split())==5 and int(line.split()[0])==istate:
print(line)
istate = istate + 1
except ValueError:
continue
|
'''
This function gives you the possibility to
concatenate multiple videos.
'''
import moviepy.editor as mp
from scripts import style
import os
def create_concatenation():
videos = []
all_video_file_paths = []
all_filenames = []
color = style.bcolors()
while True:
try:
video_amount = int(input(f'Enter amount of videos {color.OKBLUE}(e.g. 2){color.ENDC}: '))
break
except ValueError:
os.system('cls')
print(f'{color.FAIL}Input is invalid - numbers only!{color.ENDC}')
continue
number = 0
for _ in range(int(video_amount)):
number += 1
while True:
video_file_path = input(f'Enter full path to video file {str(number)} {color.OKBLUE}(e.g. C:/Users/JohnDoe/Videos/example.mp4){color.ENDC}: ')
if os.path.exists(video_file_path):
if video_file_path.lower().endswith(('.mp4', '.mkv', '.mov')):
all_video_file_paths.append(video_file_path)
videos.append(mp.VideoFileClip(video_file_path))
print(f'{color.OKGREEN}Video file has been found!{color.ENDC}\n')
break
else:
os.system('cls')
print(f'{color.FAIL}File isn\'t a video extension! (e.g. mp4 mkv mov){color.ENDC}')
continue
else:
os.system('cls')
print(f'{color.FAIL}File doesn\'t exist in this directory!{color.ENDC}')
continue
for x in all_video_file_paths:
old_filename = x.rsplit('\\', 1)[-1]
old_extension = os.path.splitext(video_file_path)[1]
new_filename = old_filename.replace(old_extension, '')
all_filenames.append(new_filename)
merged_filename = '-'.join(all_filenames) + '.mp4'
final_clip = mp.concatenate_videoclips(videos, method='compose')
while True:
if os.path.isfile(f'assets/videos/merged-{merged_filename}'):
overwrite = input(f'File \'assets/videos/marge-{new_filename}\' already exists. Overwrite ? [y/N] ')
if overwrite.upper() == 'Y':
final_clip.write_videofile(f'assets/videos/merged-{merged_filename}')
print(f'{color.OKGREEN}Overwriting - done{color.ENDC}')
break
elif overwrite.upper() == 'N':
print(f'{color.FAIL}Not overwriting - exiting{color.ENDC}')
break
else:
os.system('cls')
print(f'{color.FAIL}Invalid input!{color.ENDC}')
continue
else:
final_clip.write_videofile(f'assets/videos/merged-{merged_filename}')
break
|
#!/usr/bin/python
# build_native.py
# Build native codes
#
# Please use cocos console instead
import sys
import os, os.path
import shutil
import urllib
import webbrowser
from optparse import OptionParser
os.system('cls')
current_dir = os.path.dirname(os.path.realpath(__file__))
command = 'php artisan serve'
print command
webbrowser.open('http://127.0.0.1:8000')
os.system(command)
|
from .base import Experiment
from .dnn import DNNExperiment
from .train import TrainingExperiment
from .prune import PruningExperiment
from .attack import AttackExperiment
from .quantize import QuantizeExperiment
|
"""
问题描述: 牛牛的作业薄上有一个长度为 n 的排列 A,这个排列包含了从1到n的n个数,但是因为一些原因,
其中有一些位置(不超过 10 个)看不清了,但是牛牛记得这个数列顺序对的数量是 k,顺序对是指满足 i < j
且 A[i] < A[j] 的对数,请帮助牛牛计算出,符合这个要求的合法排列的数目。
输入描述:
每个输入包含一个测试用例。每个测试用例的第一行包含两个整数 n 和 k(1 <= n <= 100, 0 <= k <= 1000000000),
接下来的 1 行,包含 n 个数字表示排列 A,其中等于0的项表示看不清的位置(不超过 10 个)。
输出描述:
输出一行表示合法的排列数目。
示例1
输入
5 5
4 0 0 2 0
输出
2
"""
import sys
import itertools
class Solution:
def get_rs(self, arr, k):
rs = 0
num1 = self.counter(arr)
k -= num1
lacked = set(range(1, len(arr) + 1)) - set(filter(lambda x: x != 0, arr))
lacked_pos = list()
origin_pos = list()
index = 0
while index < len(arr):
if arr[index] == 0:
lacked_pos.append(index)
else:
origin_pos.append(index)
index += 1
datas = itertools.permutations(lacked)
for data in datas:
num2 = self.counter(data)
num3 = self.count_two_arr(arr, data, origin_pos, lacked_pos)
if num2 + num3 == k:
rs += 1
print(rs)
def counter(self, arr):
count = 0
i = 0
while i < len(arr) - 1:
j = i + 1
while j < len(arr):
if arr[i] != 0 and arr[i] < arr[j]:
count += 1
j += 1
i += 1
return count
def count_two_arr(self, arr1, arr2, origin, lacked):
count = 0
for index, i in enumerate(lacked):
for j in origin:
if (j < i and arr1[j] < arr2[index]) or (i < j and arr2[index] < arr1[j]):
count += 1
return count
if __name__ == '__main__':
a, b = map(int, sys.stdin.readline().split())
args = list(map(int, sys.stdin.readline().split()))
s = Solution()
s.get_rs(args, b)
|
from __future__ import unicode_literals
import logging
import os
import signal
import socket
import sys
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand
from mesos.interface import mesos_pb2
try:
from mesos.native import MesosSchedulerDriver
except ImportError:
# ensure there are not undefined errors when generating docs on a system without mesos bindings
class MesosSchedulerDriver(object):
def __init__(self, *args, **kargs):
pass
from scheduler.manager import scheduler_mgr
from scheduler.scale_scheduler import ScaleScheduler
logger = logging.getLogger(__name__)
#TODO: make these command options
MESOS_CHECKPOINT = False
MESOS_AUTHENTICATE = False
DEFAULT_PRINCIPLE = None
DEFAULT_SECRET = None
GLOBAL_SHUTDOWN = None
class Command(BaseCommand):
"""Command that launches the Scale scheduler
"""
help = 'Launches the Scale scheduler'
def add_arguments(self, parser):
parser.add_argument('-m', '--master', action='store',
default=settings.MESOS_MASTER,
help='The master to connect to')
def handle(self, *args, **options):
"""See :meth:`django.core.management.base.BaseCommand.handle`.
This method starts the scheduler.
"""
# Register a listener to handle clean shutdowns
signal.signal(signal.SIGTERM, self._onsigterm)
# Set up global shutdown
global GLOBAL_SHUTDOWN
GLOBAL_SHUTDOWN = self._shutdown
# TODO: clean this up
mesos_master = options.get('master')
logger.info('Scale Scheduler %s', settings.VERSION)
try:
scheduler_zk = settings.SCHEDULER_ZK
except:
scheduler_zk = None
if scheduler_zk is not None:
from scheduler import cluster_utils
my_id = socket.gethostname()
cluster_utils.wait_for_leader(scheduler_zk, my_id, self.run_scheduler, mesos_master)
else:
# leader election is disabled
self.run_scheduler(mesos_master)
def run_scheduler(self, mesos_master):
logger.info("I am the leader")
self.scheduler = ScaleScheduler()
self.scheduler.initialize()
scheduler_mgr.hostname = socket.getfqdn()
framework = mesos_pb2.FrameworkInfo()
framework.user = '' # Have Mesos fill in the current user.
framework.name = os.getenv('DCOS_PACKAGE_FRAMEWORK_NAME', 'Scale')
webserver_address = os.getenv('SCALE_WEBSERVER_ADDRESS')
if webserver_address:
framework.webui_url = webserver_address
logger.info('Connecting to Mesos master at %s', mesos_master)
# TODO(vinod): Make checkpointing the default when it is default on the slave.
if MESOS_CHECKPOINT:
logger.info('Enabling checkpoint for the framework')
framework.checkpoint = True
if MESOS_AUTHENTICATE:
logger.info('Enabling authentication for the framework')
if not DEFAULT_PRINCIPLE:
logger.error('Expecting authentication principal in the environment')
sys.exit(1)
if not DEFAULT_SECRET:
logger.error('Expecting authentication secret in the environment')
sys.exit(1)
credential = mesos_pb2.Credential()
credential.principal = DEFAULT_PRINCIPLE
credential.secret = DEFAULT_SECRET
self.driver = MesosSchedulerDriver(self.scheduler, framework, mesos_master, credential)
else:
self.driver = MesosSchedulerDriver(self.scheduler, framework, mesos_master)
try:
status = 0 if self.driver.run() == mesos_pb2.DRIVER_STOPPED else 1
except:
status = 1
logger.exception('Mesos Scheduler Driver returned an exception')
#Perform a shut down and return any non-zero status
shutdown_status = self._shutdown()
status = status or shutdown_status
logger.info('Exiting...')
sys.exit(status)
def _onsigterm(self, signum, _frame):
"""See signal callback registration: :py:func:`signal.signal`.
This callback performs a clean shutdown when a TERM signal is received.
"""
logger.info('Scheduler command terminated due to signal: %i', signum)
self._shutdown()
sys.exit(1)
def _shutdown(self):
"""Performs any clean up required by this command.
:returns: The exit status code based on whether the shutdown operation was clean with no exceptions.
:rtype: int
"""
status = 0
try:
if self.scheduler:
self.scheduler.shutdown()
except:
logger.exception('Failed to properly shutdown Scale scheduler.')
status = 1
try:
if self.driver:
self.driver.stop()
except:
logger.exception('Failed to properly stop Mesos driver.')
status = 1
return status
|
try:
import galsim
except ImportError:
class GalSimWCS(object):
def __init__(self, *args, **kwargs):
raise NotImplementedError(
'Unable to import galsim. The GalSimWCS interface is not available.')
else:
from .decaminfo import DECamInfo
from .PixelMapCollection import PixelMapCollection
from .DESMaps import DESMaps
import os
import astropy.coordinates
import numpy as np
from . import files
class GalSimWCS(galsim.wcs.CelestialWCS):
"""A wrapper of the `PixelMapCollection` class that can be used as a galsim `WCS` type.
This can be constructed using either:
* an existing `PixelMapCollection` object (supplied as `pmc`), or
* a generic serialized PixelMapCollection in a YAML file,
(supplied by `yaml_file`), or
* The `DESMaps` class, which accesses files in the format of
the DES Y6A1_ASTROMETRY release, which one selects via
`use_DESMaps=True`. In this case one can supply names for
the `exposure_file,guts_file,resids_file,affine_file` that
the `DESMaps` needs, or any of these will use Y6A1 default
names if they are supplied as `None` (This is default
behavior).
Exactly one of these must be true: `pmc is not None`;
`yaml_file is not None`; `use_DESMaps`.
The WCS within the collection is selected by name. One can
either specify the name directly with `wcs_name`; or a name
can be constructed using DES conventions from an exposure
number `exp` and `ccdnum`.
:param pmc: An existing pixmappy.PixelMapCollection instance [default: None]
:param yaml_file: The yaml file with the PixelMapCollection
description. [default: None]
:param use_DESMaps: If `True`, use `DESMaps` to construct WCS. [default: False]
:param dir: Optional directory to prepend to all filename
arguments. [default: None. Note that if use_DESMaps is
True, then you can place a search path for setup files into
the environment variable CAL_PATH]
:param wcs_name: The name of the WCS within the
PixelMapCollection to use. [default:
None; either `wcs_name` or (`exp` and
`ccdnum`) is required. DESMaps require
the exp+ccdnum.]
:param exp: The exposure number of the desired WCS. [default: None]
:param ccdnum: The CCD number of the desired WCS. [default: None]
:param exposure_file: FITS file holding binary table of DES
per-exposure info for DESMaps. [default:
None; if `use_DESMaps` then the file in the
Y6A1_ASTRONOMY release will be used in
this default case.]
:param guts_file: YAML file holding static DECam distortions
for DESMaps. [default: None; (same
behavior as above).
:param resids_file: FITS file holding 2d residual adjustment
maps for DESMaps [default: None; (same
behavior as above).
:param affine_file: FITS file holding time-dependent DECam CCD
affine tweaks for DESMaps [default: None;
(same behavior as above).
:param origin: Optional origin position for the image coordinate system.
If provided, it should be a PositionD or
PositionI. [default: None]
:param cache: Cache this file's PixelMapCollection in the GalSimWCS.cache dict?
[default: True]
:param default_color: The default color to use if this WCS involves color terms and
`wcs.toWorld` or similar methods do not pass in a color value.
[default: None, which means an exception will be raised if no
color term is in the map and no color value is provided]
"""
_opt_params = { "origin" : galsim.PositionD, "ccdnum": int,
"dir" : str, "guts_file" : str,
"exposure_file" : str, "resids_file" : str, "affine_file" : str,
"default_color" : float}
_single_params = [ { "wcs_name" : str, "exp" : int },
{"yaml_file" : str, "use_DESMaps":bool}]
_takes_rng = False
info = DECamInfo()
cache = dict()
def __init__(self, pmc=None, yaml_file=None, use_DESMaps=False, dir=None,
wcs_name=None, exp=None, ccdnum=None,
exposure_file=None, guts_file=None, resids_file=None, affine_file=None,
origin=None, cache=True, default_color=None):
self._color = default_color
# Make sure only one method is in use:
count = int(pmc is not None) + int(yaml_file is not None) + int(use_DESMaps)
if count!=1:
raise TypeError("Must provide exactly one of yaml_file, pmc, or use_DESMaps")
if pmc is not None:
self._pmc = pmc
self._tag = 'pmc=%r'%pmc # Used in __repr__
if yaml_file is not None:
if dir is not None:
yaml_file = os.path.join(dir,yaml_file)
if yaml_file in self.cache:
pmc = self.cache[yaml_file]
else:
pmc = PixelMapCollection(yaml_file)
if cache:
self.cache[yaml_file] = pmc
self._tag = 'yaml_file=%r'%yaml_file
self._pmc = pmc
if use_DESMaps:
if exp is None or ccdnum is None:
raise TypeError("exp and ccdnum must be provided when using DESMaps")
self._tag = 'use_DESMaps=True'
if exposure_file is None:
exposure_file = files.default_exposure_file
else:
self._tag = self._tag + ', exposure_file=%s'%exposure_file
if guts_file is None:
guts_file = files.default_guts_file
else:
self._tag = self._tag + ', guts_file=%s'%guts_file
if resids_file is None:
resids_file = files.default_resids_file
else:
self._tag = self._tag + ', resids_file=%s'%resids_file
if affine_file is None:
affine_file = files.default_affine_file
else:
self._tag = self._tag + ', affine_file=%s'%affine_file
if dir is not None:
exposure_file = os.path.join(dir,exposure_file)
guts_file = os.path.join(dir,guts_file)
resids_file = os.path.join(dir,resids_file)
affine_file = os.path.join(dir,affine_file)
self._tag = self._tag + ', dir=%s'%dir
# We'll cache the DESMaps object by the exposure_file name
if exposure_file in self.cache:
pmc = self.cache[exposure_file]
else:
pmc = DESMaps(guts_file=guts_file,
exposure_file=exposure_file,
resids_file=resids_file,
affine_file=affine_file)
if cache:
self.cache[exposure_file] = pmc
self._pmc = pmc
# Now extract the desired WCS from our PixelMapCollection or DESMaps
if use_DESMaps:
if exp is None or ccdnum is None:
raise TypeError("DESMaps require exp,ccdnum")
ccdname = self.info.ccddict[ccdnum]
self._wcs_name = 'D%s/%s'%(exp, ccdname) #Used by __eq__
self._wcs = pmc.getDESWCS(exp, ccdname)
self._tag = self._tag + ', exp=%r, ccdnum=%r'%(exp,ccdnum)
else:
if wcs_name is None:
if exp is None or ccdnum is None:
raise TypeError("Must provide either wcs_name or (exp,ccdnum)")
ccdname = self.info.ccddict[ccdnum]
self._wcs_name = 'D%s/%s'%(exp, ccdname)
elif exp is not None or ccdnum is not None:
raise TypeError("Cannot provide both wcs_name and (exp,ccdnum)")
else:
self._wcs_name = wcs_name
self._wcs = pmc.getWCS(self._wcs_name)
self._tag = self._tag + ', wcs_name=%r'%self._wcs_name
# Set origin, if any
if origin is None:
self._origin = galsim.PositionD(0,0)
else:
if isinstance(origin, galsim.PositionI):
origin = galsim.PositionD(origin.x, origin.y)
elif not isinstance(origin, galsim.PositionD):
raise TypeError("origin must be a PositionD or PositionI argument")
self._origin = origin
@property
def pmc(self): return self._pmc
@property
def wcs_name(self):
# Note: older versions of pixmappy didn't set _wcs_name if it was None.
# This is fixed above, but to accommodate reading in older serialized pixmappy
# objects, we check for the attribute existing here.
return self._wcs_name if hasattr(self,'_wcs_name') else None
@property
def origin(self): return self._origin
@classmethod
def clear_cache(cls):
"""Clear the cache of PixelMapCollections.
The PixelMapCollection objects that are read in from a file are often rather large,
and a typical use case involves getting many wcs objects from the same file.
So the GalSimWCS class caches them avoid needing to read in the file repeatedly for
each WCS you want to extract from it.
However, if you are done with a particular input file, you might not want to keep
it around anymore. So ``pixmappy.GalSimWCS.clear_cache()`` will release the memory
currently being used by the cache.
You can also modify the cache yourself if you want (say to remove a particular element
rather than all objects). It is a dict indexed by the the yaml filename or the exposure
filename for DESMaps.
"""
cls.cache.clear()
def _radec(self, x, y, c=None):
ra, dec = self._wcs.toSky(x, y, c=c )
ra *= galsim.degrees / galsim.radians
dec *= galsim.degrees / galsim.radians
return ra, dec
def _xy(self, ra, dec, c=None):
ra *= galsim.radians / galsim.degrees
dec *= galsim.radians / galsim.degrees
x, y = self._wcs.toPix(ra, dec, c=c)
return x, y
def _newOrigin(self, origin):
ret = self.copy()
ret._origin = origin
return ret
def _writeHeader(self, header, bounds):
raise NotImplementedError("Cannot write PixelMap to a fits header")
@staticmethod
def _readHeader(header):
raise NotImplementedError("Cannot read PixelMap from a fits header")
def copy(self):
# The copy module version of copying the dict works fine here.
import copy
return copy.copy(self)
def __eq__(self, other):
return (isinstance(other, GalSimWCS) and
self._tag == other._tag and
self.wcs_name == other.wcs_name and
self.origin == other.origin )
def __repr__(self):
# Should eval back into itself
s = "pixmappy.GalSimWCS(%s, origin=%r"%(self._tag, self.origin)
if self._color is not None:
s += ', default_color=%r'%self._color
s += ')'
return s
def __hash__(self): return hash(repr(self))
def __getstate__(self):
# The naive pickling works, but it includes _pmc, which is huge, and not actually
# necessary for the functioning of the object. (It's just for information purposes
# really.) So remove it from the dict to be pickled.
d = self.__dict__.copy()
d['_pmc'] = None
return d
|
#! python3
from time import perf_counter
import re
from curriculummapper import Course, Curriculum
def main():
'''
An example scraper for the case western MS Data Science program webpage
'''
# initiating an empty curriculum object
true_start_time = perf_counter()
school_name = "Case Western"
degree_name = "MS in Data Science"
curriculum = Curriculum(school_name, degree_name, "CSDS",
colored_subjects=["STAT", "MATH"])
url_list = ["https://bulletin.case.edu/schoolofengineering/compdatasci/", # COMPSCI # noqa: E501
"https://bulletin.case.edu/collegeofartsandsciences/mathematics/", # MATH # noqa: E501
"https://bulletin.case.edu/schoolofengineering/eleccompsyseng/", # COMPSCI # noqa: E501
"https://bulletin.case.edu/course-descriptions/dsci/",
"https://bulletin.case.edu/course-descriptions/stat/"
]
for URL in url_list:
print("Politely checking: %s..." % URL)
soup = curriculum.get_soup(URL)
# Getting prereq names from course tables first
print("Scraping tables...")
for table_tag in soup.find_all("table", {"class": "sc_courselist"}):
for row_tag in table_tag.findChildren("tr"):
cells = row_tag.findChildren("td")
try:
course_title = str(cells[1].string)
course_id = curriculum.course_id_list_from_string(
str(row_tag.findChildren("a")[0].string))[0]
'''
subject_code, course_code = course_id_to_list(course_id)
curriculum.add_course(Course(subject_code,
course_code,
course_title))
'''
curriculum.add_courses_from_string(course_id)
curriculum.course_dict[course_id].append_course_title(
course_title)
except Exception:
pass
# inpecting the source reveals that each course is neatly in div blocks
# courseblock class. Iterating through each courseblock
print("Scraping courseblocks...")
for course_tag in soup.find_all("div", {"class": "courseblock"}):
# then the title contains the "AAAA 000?. COURSE TITLE. n Units."
blocktitle_tag = course_tag.find("p", {"class": "courseblocktitle"}).find("strong") # noqa: E501
# convert the content to UNICODE to minimize memory use
blocktitle_string = str(blocktitle_tag.string)
# print(blocktitle_string)
# search for the first instance in blocktitle_string
# that matches course_search
course_id = curriculum.course_id_list_from_string(
blocktitle_string)[0]
subject_code, course_code = curriculum.course_id_to_list(
course_id)
# apparently some universitys have letters in their course codes
# so leave as string. Remove the spaces and periods tho.
# course title
title_search = re.compile(r"(?<=\s\s)([^!]*)(?=\.\s\s)")
title_match = re.findall(title_search, blocktitle_string)
course_title = title_match[0]
# Now extracting course desc
blockdesc_tag = course_tag.find("p", {"class": "courseblockdesc"})
course_description = blockdesc_tag.contents[0].split("Offered as")[0]\
.split("Prereq:")[0]\
.split("Recommended preparation:")[0] # noqa: E501
# Take everything in blockdesc, stitch into one long string
glued_string = ""
for item in blockdesc_tag.contents:
try:
for z in item.string.split("\n"):
glued_string += z
except Exception:
pass
finally:
pass
# Looking for the sentense that starts with Prereq: or preparation
# print(glued_string)
prereq_match = re.findall(r"(?<=Prereq: |ration: )([^!?.]*)",
glued_string)
# blink list to hold Course objects
prereqs = []
if prereq_match is not None:
try:
# find every instance of a course in the remaining string
prereqs = curriculum.course_list_from_string(
prereq_match[0])
except IndexError:
# print("No prereqs.")
pass
# Looking for the sentense that starts with "Offered as"
alias_match = re.findall(r"(?<=Offered as )([^!?.]*)",
glued_string)
aliases = []
if alias_match is not None:
try:
# find every instance of a course in the remaining string
aliases = curriculum.course_id_list_from_string(
str(alias_match[0]))
except IndexError:
pass
curriculum.add_course(Course(subject_code, course_code,
course_title, course_description,
prereqs, aliases))
curriculum.print_all()
true_finish_time = perf_counter()
print("\t\tTOTAL TIME: %.6f" % (true_finish_time - true_start_time))
if __name__ == "__main__":
main()
|
import re
import datetime
from commands.core import Command
class Note:
def __init__(self):
self.name = ''
self.dz = ''
self.date = datetime.datetime.today()
def __str__(self):
tt = self.date.timetuple()
weekday = self.weekday(self.get_calendar()[2])
return '\n{} \n# {}\n@ {}'.format(self.name.upper(),
'{}, {}'.format(weekday, '{} {}'.format(tt.tm_mday, self.month(tt.tm_mon))),
self.dz)
@staticmethod
def month(month):
return {
1: "января",
2: "февраля",
3: "марта",
4: "апреля",
5: "мая",
6: "июня",
7: "июля",
8: "августа",
9: "сентября",
10: "октября",
11: "ноября",
12: "декабря"
}.get(month)
@staticmethod
def weekday(wday):
return {
0: "Воскресенье",
1: "Понедельник",
2: "Вторник",
3: "Среда",
4: "Четверг",
5: "Пятница",
6: "Суббота",
}.get(wday)
def get_calendar(self):
return self.date.isocalendar()
class HomeworkCommand(Command):
def __init__(self, bot_account, group_id):
super().__init__()
self._triggers = ['hw', 'Hw']
self.account = bot_account
self.group_id = group_id
self.recent_notes = []
self.last_id = 0
def proceed(self, member, message, attachments, group, **kwargs):
if len(args) > 0 and args[1] in self._triggers:
return self.week()
return False
def sort(self, result):
return sorted(result, key=self.sortByDate)
@staticmethod
def sortByDate(note):
return note.date.timetuple().tm_yday
@staticmethod
def parse_lessons(text):
calendar = '📅'
books = '📚'
name_sep = '#'
lessons = text.rsplit('_')
notes = []
for lesson in lessons:
note = Note()
try:
note.name = re.search(name_sep + " ?[А-я ]*", lesson).group().strip("# \n")
date = re.search(calendar + " ?[^`]*{}".format(books), lesson).group().strip("{}{} \n".format(calendar, books))
date = '{}.{}'.format(date, datetime.datetime.today().timetuple().tm_year)
date = datetime.datetime.strptime(date, '%d.%m.%Y').date()
note.date = date
note.dz = re.search(books + " ?[^`]*\n?", lesson).group().strip("{} \n".format(books))
notes.append(note)
except:
continue
return notes
def update_posts(self,account, group_id, count):
if group_id > 0:
group_id *= -1
print(account)
posts = account.method('wall.get', {
'owner_id': group_id,
'count': count,
'fields': 'domain, lists',
'filter': 'all'
})
posts = posts.get('items')
recent_notes = []
for post in posts:
text = post.get('text')
if re.sub('^#ДЗ *\n?$', '', text) or re.sub('^#дз *\n?$', '', text):
lessons = self.parse_lessons(text.lstrip('#дзДЗ'))
for note in lessons:
if note is not None:
recent_notes.append(note)
return recent_notes
def get_this_week(self, count):
today = datetime.datetime.today()
td = today.isocalendar()
td_year, td_week, td_wday = td[0], td[1], td[2]
result = []
for note in self.update_posts(self.account, self.group_id, count):
note_calendar = note.get_calendar()
nt_year, nt_week, nt_wday = note_calendar[0], note_calendar[1], note_calendar[2]
if nt_week == td_week and nt_wday >= td_wday:
if td_wday not in [6, 7]:
td_tt = today.timetuple()
if td_wday == nt_wday and td_tt.tm_hour >= 13:
continue
result.append(note)
elif nt_week == td_week + 1:
result.append(note)
return self.sort(result)
def week(self):
result = '{}'.format('-' * 0)
for note in self.get_this_week(50):
result += '{}\n{}'.format(str(note), '-' * 20)
if result is '':
result = 'Записей нет.'
return result
|
import logging
from crl.interactivesessions.shells.remotemodules import servers
from crl.interactivesessions.shells.remotemodules.msgs import (
ExecCommandErrorObj)
from crl.interactivesessions.shells.remotemodules.compatibility import to_bytes
from .serverterminal import (
LineServerBase,
LineServerExit)
__copyright__ = 'Copyright (C) 2019, Nokia'
LOGGER = logging.getLogger(__name__)
class PromptPythonServer(LineServerBase, servers.PythonServer):
_prompt = b'>>> '
def __init__(self):
LineServerBase.__init__(self)
servers.PythonServer.__init__(self)
self.processlocalsysmanager = None
def _server_setup(self):
self._setup_standard_streams()
self._write(self._prompt)
def _setup_standard_streams(self):
self._setup_own_out()
self._setup_messaging_inout()
def _setup_own_out(self):
self.stdout = self._inout.write_file
self.stderr = self._inout.write_file
def _setup_messaging_inout(self):
servers.sys.stdin = self._inout.read_file
servers.sys.stdout = self._inout.write_file
servers.sys.stderr = self._inout.write_file
def _handle_line(self, line):
self._send_reply_out(self._exec_and_create_reply_msg(line))
def _exec_and_create_reply_msg(self, cmd):
return self._exec_command(cmd)
def _exec_command(self, cmd):
try:
ret = self.pythoncmdline.exec_command(cmd)
return b'' if ret is None else to_bytes(repr(ret))
except SystemExit:
LOGGER.debug('PromptPythonServer: Exiting')
raise LineServerExit
except Exception as e: # pylint: disable=broad-except
return str(ExecCommandErrorObj(e, cmd))
def _send_reply_out(self, out):
LOGGER.debug('PromptPythonServer: %s', out)
self._write(out + self._prompt)
def _write(self, s):
LOGGER.debug('Writing %s', s)
self._strcomm.comm.write(s)
@property
def _stop_cmd(self):
return 'exit()'
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import typing
from bravado_core.spec import Spec
from swagger_spec_compatibility.rules.common import BaseRule
from swagger_spec_compatibility.rules.common import Level
from swagger_spec_compatibility.rules.common import RuleType
from swagger_spec_compatibility.rules.common import ValidationMessage
from swagger_spec_compatibility.spec_utils import get_endpoints
class DeletedEndpoint(BaseRule):
description = \
'An endpoint has been removed. This change is not backward compatible as holders of stale swagger ' \
'specs (like old mobile Apps) could continue to call the removed endpoint and this will cause an ' \
'HTTP error status code (usually an HTTP/400 or HTTP/404)'
error_code = 'MIS-E001'
error_level = Level.ERROR
rule_type = RuleType.MISCELLANEOUS
short_name = 'Delete Endpoint'
@classmethod
def validate(cls, left_spec, right_spec):
# type: (Spec, Spec) -> typing.Iterable[ValidationMessage]
endpoints_left_spec = get_endpoints(left_spec)
endponts_right_spec = get_endpoints(right_spec)
return (
cls.validation_message(str(removed_endpoint))
for removed_endpoint in endpoints_left_spec - endponts_right_spec
)
|
import os
import subprocess
import sys
import shutil
import json
import argparse
import jinja2 as jinja
import platform
import git
PKG_ROOT = 'ansibledriver'
PKG_INFO = 'pkg_info.json'
DIST_DIR = 'dist'
WHL_FORMAT = 'ansible_lifecycle_driver-{version}-py3-none-any.whl'
DOCS_FORMAT = 'ansible-lifecycle-driver-{version}-docs'
DOCS_DIR = 'docs'
DOCKER_WHLS_DIR = 'whls'
DOCKER_PATH = 'docker'
DOCKER_IMG_NAME = 'ansible-lifecycle-driver'
DOCKER_REGISTRY = 'ibmcom'
HELM_CHART_PATH = os.path.join('helm', 'ansiblelifecycledriver')
HELM_CHART_NAME = 'ansiblelifecycledriver'
HELM_CHART_NAME_FORMAT = 'ansiblelifecycledriver-{0}.tgz'
parser=argparse.ArgumentParser()
parser.add_argument('--release', help='Include this flag to publish this build as an official release', default=False, action='store_true')
parser.add_argument('--version', help='version to set for the release')
parser.add_argument('--post-version', help='version to set after the release')
parser.add_argument('--ignition-version', help='Set the ignition version for the release')
parser.add_argument('--skip-tests', default=False, action='store_true')
parser.add_argument('--skip-docker', default=False, action='store_true')
parser.add_argument('--skip-helm', default=False, action='store_true')
parser.add_argument('--ignition-whl', help='Add a custom Ignition whl to the build by path (useful when working with a dev version of Ignition)')
args = parser.parse_args()
class BuildError(Exception):
pass
class Secret:
def __init__(self, value):
self.value = value
class Stage:
def __init__(self, builder, title):
self.builder = builder
self.title = title
self.exit_reason = None
self.exit_code = 0
def __enter__(self):
print('================================================')
print('{0}'.format(self.title))
print('================================================')
return self
def __exit__(self, type, err_value, traceback):
if err_value != None:
# Legit python error thrown
print('ERROR: {0}\n'.format(str(err_value)))
try:
self.builder.report()
except e:
pass
return
if self.exit_code != 0:
if self.exit_reason != None:
print(self.exit_reason)
self.builder.report()
exit(self.exit_code)
else:
print('')
def _cmd_exit(self, exit_code):
self.exit_code = exit_code
def exit_with_error(self, exit_code, reason):
self.exit_reason = reason
self.exit_code = exit_code
def run_cmd(self, *cmd):
print('Executing: {0}'.format(' '.join(cmd)))
working_dir = self.builder.project_path if self.builder.project_path != None and self.builder.project_path != '' else None
process = subprocess.Popen(cmd, stdout=sys.stdout, stderr=sys.stderr, cwd=working_dir)
process.communicate()
if process.returncode != 0:
return self._cmd_exit(process.returncode)
class Builder:
def __init__(self):
self.project_path = os.path.dirname(__file__)
self.project_path_is_current_dir = False
if self.project_path == None or self.project_path == '':
self.project_path_is_current_dir = True
self.stages = []
self.project_version = None
self.py_normalized_version = None
def report(self):
print('================================================')
print('Build Result')
print('================================================')
for s in self.stages:
if s.exit_code == 0:
print(' {0} - OK'.format(s.title))
else:
print(' {0} - FAILED'.format(s.title))
print(' ')
def stage(self, title):
stage = Stage(self, title)
self.stages.append(stage)
return stage
def _announce_build_location(self):
if self.project_path_is_current_dir:
print('Building at: ./')
else:
print('Building at: {0}'.format(self.project_path))
def doIt(self):
self._announce_build_location()
if args.release == True:
self.release()
else:
self.build()
self.report()
def build(self):
self.determine_version()
self.init_artifacts_directory()
self.run_unit_tests()
self.build_python_wheel()
self.pkg_docs()
if args.skip_docker is not True:
self.build_docker_image()
if args.skip_helm is not True:
self.build_helm_chart()
def release(self):
if args.version is None:
raise ValueError('Must set --version when releasing')
if args.post_version is None:
raise ValueError('Must set --post-version when releasing')
self.set_version()
self.build()
if args.skip_docker is not True:
self.push_docker_image()
self.push_release_git_changes()
self.set_post_version()
self.push_post_release_git_changes()
def init_artifacts_directory(self):
self.artifacts_path = os.path.join(self.project_path, 'release-artifacts')
if os.path.exists(self.artifacts_path):
shutil.rmtree(self.artifacts_path)
os.makedirs(self.artifacts_path)
def set_version(self):
with self.stage('Updating Release Version') as s:
pkg_info_path = os.path.join(self.project_path, PKG_ROOT, PKG_INFO)
print('Updating version in {0} to {1}'.format(pkg_info_path, args.version))
with open(pkg_info_path, 'r') as f:
pkg_info_data = json.load(f)
pkg_info_data['version'] = args.version
if args.ignition_version:
print('Updating Ignition version in {0} to {1}'.format(pkg_info_path, args.ignition_version))
pkg_info_data['ignition-version'] = '=={0}'.format(args.ignition_version)
with open(pkg_info_path, 'w') as f:
json.dump(pkg_info_data, f)
def set_post_version(self):
with self.stage('Updating Post Release Version') as s:
pkg_info_path = os.path.join(self.project_path, PKG_ROOT, PKG_INFO)
print('Updating version in {0} to {1}'.format(pkg_info_path, args.post_version))
with open(pkg_info_path, 'r') as f:
pkg_info_data = json.load(f)
pkg_info_data['version'] = args.post_version
with open(pkg_info_path, 'w') as f:
json.dump(pkg_info_data, f)
def determine_version(self):
with self.stage('Gathering Version') as s:
pkg_info_path = os.path.join(self.project_path, PKG_ROOT, PKG_INFO)
print('Reading version from {0}'.format(pkg_info_path))
with open(pkg_info_path, 'r') as f:
pkg_info_data = json.load(f)
if 'version' not in pkg_info_data:
return s.exit_with_error(1, '\'version\' not found in {0}'.format(pkg_info_path))
else:
self.project_version = pkg_info_data['version']
print('Found version is: {0}'.format(self.project_version))
self.py_normalized_version = pkg_info_data['version']
self.py_normalized_version = self.py_normalized_version.replace('-alpha-', 'a')
self.py_normalized_version = self.py_normalized_version.replace('-beta-', 'b')
def run_unit_tests(self):
with self.stage('Run Unit Tests') as s:
s.run_cmd('python3', '-m', 'unittest')
def build_python_wheel(self):
with self.stage('Build Wheel') as s:
print('Cleaning directory: {0}'.format(DIST_DIR))
dist_path = os.path.join(self.project_path, DIST_DIR)
if os.path.exists(dist_path):
shutil.rmtree(dist_path)
s.run_cmd('python3', 'setup.py', 'bdist_wheel')
def build_docker_image(self):
self._build_docker_image('Build Docker Image', os.path.join(self.project_path, DOCKER_PATH), DOCKER_IMG_NAME)
def _build_docker_image(self, title, docker_context_path, docker_img_name):
with self.stage(title) as s:
docker_whls_path = os.path.join(docker_context_path, DOCKER_WHLS_DIR)
print('Cleaning directory: {0}'.format(docker_whls_path))
if os.path.exists(docker_whls_path):
shutil.rmtree(docker_whls_path)
os.mkdir(docker_whls_path)
src_whl_path = os.path.join(self.project_path, DIST_DIR, WHL_FORMAT.format(version=self.py_normalized_version))
if not os.path.exists(src_whl_path):
return s.exit_with_error(1, 'Could not find whl at: {0}'.format(src_whl_path))
else:
dest_whl = os.path.join(docker_whls_path, WHL_FORMAT.format(version=self.py_normalized_version))
shutil.copyfile(src_whl_path, dest_whl)
if args.ignition_whl is not None:
if not os.path.exists(args.ignition_whl):
return s.exit_with_error(1, 'Could not find Ignition whl at: {0}'.format(args.ignition_whl))
dest_ign_whl = os.path.join(docker_whls_path, os.path.basename(args.ignition_whl))
print('Copying Ignition whl at {0} to {1}'.format(args.ignition_whl, dest_ign_whl))
shutil.copyfile(args.ignition_whl, dest_ign_whl)
img_tag = '{0}:{1}'.format(docker_img_name, self.project_version)
s.run_cmd('docker', 'build', '-t', img_tag, '{0}'.format(docker_context_path))
def build_helm_chart(self):
with self.stage('Build Helm Chart') as s:
tmp_helm_path = os.path.join(self.project_path, 'helm', 'build', HELM_CHART_NAME)
if os.path.exists(tmp_helm_path):
shutil.rmtree(tmp_helm_path)
os.makedirs(tmp_helm_path)
helm_chart_path = os.path.join(self.project_path, HELM_CHART_PATH)
template_loader = jinja.FileSystemLoader(searchpath=helm_chart_path)
template_env = jinja.Environment(variable_start_string='${', variable_end_string='}', loader=template_loader)
resolvable_props = {'version': self.project_version}
for item in os.listdir(helm_chart_path):
full_item_path = os.path.join(helm_chart_path, item)
if os.path.isdir(full_item_path):
self._template_helm_chart_directory(helm_chart_path, template_env, full_item_path, tmp_helm_path, resolvable_props)
else:
self._template_helm_chart_file(helm_chart_path, template_env, full_item_path, tmp_helm_path, resolvable_props)
pkg_path = os.path.join(self.project_path, 'pkg')
s.run_cmd('helm', 'package', '--destination', self.artifacts_path, '{0}'.format(tmp_helm_path))
shutil.rmtree(os.path.join(self.project_path, 'helm', 'build'))
def _template_helm_chart_directory(self, base_path, template_env, orig_dir_path, target_parent_path, resolvable_props):
orig_dir_name = os.path.basename(orig_dir_path)
new_dir_path = os.path.join(target_parent_path, orig_dir_name)
if os.path.exists(new_dir_path):
shutil.rmtree(new_dir_path)
else:
os.mkdir(new_dir_path)
for item in os.listdir(orig_dir_path):
full_item_path = os.path.join(orig_dir_path, item)
if os.path.isdir(full_item_path):
self._template_helm_chart_directory(base_path, template_env, full_item_path, new_dir_path, resolvable_props)
else:
self._template_helm_chart_file(base_path, template_env, full_item_path, new_dir_path, resolvable_props)
def _template_helm_chart_file(self, base_path, template_env, orig_file_path, target_parent_path, resolvable_props):
file_rel_path = os.path.relpath(orig_file_path, base_path)
template = template_env.get_template(file_rel_path)
output = template.render(resolvable_props)
orig_file_name = os.path.basename(orig_file_path)
new_file_path = os.path.join(target_parent_path, orig_file_name)
with open(new_file_path, 'w') as f:
f.write(output)
def push_docker_image(self):
self._push_docker_image('Push Docker Image', '{0}:{1}'.format(DOCKER_IMG_NAME, self.project_version))
def _push_docker_image(self, title, current_docker_img_tag):
with self.stage(title) as s:
new_tag = DOCKER_REGISTRY + '/' + current_docker_img_tag
s.run_cmd('docker', 'tag', current_docker_img_tag, new_tag)
s.run_cmd('docker', 'push', new_tag)
def pkg_docs(self):
with self.stage('Package Docs') as s:
print('Packaging docs at {0}'.format(DOCS_DIR))
docs_output = DOCS_FORMAT.format(version=self.project_version)
docs_output_file = os.path.join(self.artifacts_path, docs_output + '.tgz')
transform_command = 's/{0}/{1}/'.format(DOCS_DIR, docs_output)
# Note that a system running on Mac will return 'Darwin' for platform.system()
if platform.system() == 'Darwin':
transform_command = '/{0}/{1}/'.format(DOCS_DIR, docs_output)
s.run_cmd('tar', '-cvz', '-s', transform_command, '-f', docs_output_file, DOCS_DIR+'/')
else:
s.run_cmd('tar', '-cvzf', docs_output_file, DOCS_DIR+'/', '--transform', transform_command)
def push_release_git_changes(self):
with self.stage('Commit Release Changes') as s:
repo = git.Repo(self.project_path)
repo.index.add([os.path.join(PKG_ROOT, PKG_INFO)])
repo.index.commit('Update version for release')
if args.version in repo.tags:
repo.delete_tag(args.version)
repo.create_tag(args.version)
def push_post_release_git_changes(self):
with self.stage('Commit Post Release Changes') as s:
repo = git.Repo(self.project_path)
repo.index.add([os.path.join(PKG_ROOT, PKG_INFO)])
repo.index.commit('Update version for development')
origin = repo.remote('origin')
origin.push(tags=True)
origin.push()
def main():
builder = Builder()
builder.doIt()
if __name__== "__main__":
main()
|
from milvus import Milvus, Prepare, IndexType, Status
import random
milvus = Milvus()
# Connect Milvus server, please change HOST and PORT to correct one
milvus.connect(host='localhost', port='33001')
# Table name is defined
table_name = 'table_'+str(random.randint(0,100))
# Create table: table name, vector dimension and index type
milvus.create_table(Prepare.table_schema(table_name, dimension=256, index_type=IndexType.IDMAP))
# Add 20 256-dim-vectors into table
vectors = Prepare.records([[random.random()for _ in range(256)] for _ in range(20)])
milvus.add_vectors(table_name=table_name, records=vectors)
# Get table row count
_, result = milvus.get_table_row_count(table_name=table_name)
print('Table {}, row counts: {}'.format(table_name, result))
|
#!/usr/bin/env python3
import aocd
YEAR = 2021
DAY = 8
DIGIT_SEGS = {
0: 'abcefg',
1: 'cf',
2: 'acdeg',
3: 'acdfg',
4: 'bcdf',
5: 'abdfg',
6: 'abdefg',
7: 'acf',
8: 'abcdefg',
9: 'abcdfg',
}
SEG_DIGITS = {v: k for k, v in DIGIT_SEGS.items()}
def part_a(inlist):
count = 0
for _, b in inlist:
for i in b:
n = len(i)
if n in (2, 3, 4, 7):
count += 1
return count
def decode_output(pair):
inhalf, outhalf = pair
fives = [] # 2, 3, 5
sixes = [] # 0, 6, 9
for s in inhalf:
match len(s):
case 2:
one = set(s)
case 3:
seven = set(s)
case 4:
four = set(s)
case 5:
fives.append(set(s))
case 6:
sixes.append(set(s))
case 7:
eight = set(s)
mapping = {}
a = seven - one
bd = four - one
adg = fives[0].intersection(*fives[1:])
g = adg - bd - a
d = adg - a - g
b = bd - d
abgf = sixes[0].intersection(*sixes[1:])
f = abgf - a - b - g
c = one - f
e = eight - a - b - c - d - f - g
mapping = {
list(a)[0]: 'a',
list(b)[0]: 'b',
list(c)[0]: 'c',
list(d)[0]: 'd',
list(e)[0]: 'e',
list(f)[0]: 'f',
list(g)[0]: 'g',
}
return sum(
10**i * SEG_DIGITS[''.join(sorted(map(lambda x: mapping[x], s)))]
for i, s in enumerate(reversed(outhalf))
)
def main():
data = """be cfbegad cbdgef fgaecd cgeb fdcge agebfd fecdb fabcd edb | fdgacbe cefdb cefbgd gcbe
edbfga begcd cbg gc gcadebf fbgde acbgfd abcde gfcbed gfec | fcgedb cgb dgebacf gc
fgaebd cg bdaec gdafb agbcfd gdcbef bgcad gfac gcb cdgabef | cg cg fdcagb cbg
fbegcd cbd adcefb dageb afcb bc aefdc ecdab fgdeca fcdbega | efabcd cedba gadfec cb
aecbfdg fbg gf bafeg dbefa fcge gcbea fcaegb dgceab fcbdga | gecf egdcabf bgf bfgea
fgeab ca afcebg bdacfeg cfaedg gcfdb baec bfadeg bafgc acf | gebdcfa ecba ca fadegcb
dbcfg fgd bdegcaf fgec aegbdf ecdfab fbedc dacgb gdcebf gf | cefg dcbef fcge gbcadfe
bdfegc cbegaf gecbf dfcage bdacg ed bedf ced adcbefg gebcd | ed bcgafe cdgba cbgef
egadfb cdbfeg cegd fecab cgb gbdefca cg fgcdab egfdb bfceg | gbdfcae bgc cg cgb
gcafb gcf dcaebfg ecagb gf abcdeg gaef cafbge fdbac fegbdc | fgae cfgab fg bagce"""
data = aocd.get_data(day=DAY, year=YEAR)
inlist = [[a.split() for a in l.split(' | ')] for l in data.split('\n')]
answer = part_a(inlist)
print(answer)
aocd.submit(answer, part='a', day=DAY, year=YEAR)
answer = sum(map(decode_output, inlist))
print(answer)
aocd.submit(answer, part='b', day=DAY, year=YEAR)
if __name__ == '__main__':
main()
|
from fr_api import *
import glob
if __name__ == '__main__':
file_names = glob.glob(r'./dataset/pic/*.jpg')
file_names.sort()
print(file_names)
for file in file_names:
save_path = "".join("./dataset/ha/{}.png".format(file[14:18]))
print(save_path)
face_image = FacesImage(file)
face_image.run()
# face_image.show()
face_image.save(save_path)
|
# -------------------------------------------------------------------------
# Copyright (c) Thomas Waldinger. All rights reserved.
# Licensed under the Apache License, Version 2.0. See
# License.txt in the project root for license
# information.
# ---------------
"""
ArchiveEntry
The archive entry represents one archived file. It is an opaque handle which P5
uses to quickly locate the file on the archive media and it's metadata in the
archive index database.
The archive entry is generated for each file added to the archive selection.
Please see the ArchiveSelection resource description for details upon creation.
"""
from awp5.base.connection import P5Resource, exec_nsdchat
from awp5.base.helpers import resourcelist, onereturnvalue
from awp5.api.archiveindex import ArchiveIndex
from awp5.api.client import Client
module_name = "ArchiveEntry"
@onereturnvalue
def handle(client, path, database=None, as_object=False, p5_connection=None):
"""
Syntax: ArchiveEntry handle <client> <path> [<database>]
Description: Returns the properly formatted archive entry handle which can
be used for restoring files archived over the P5 web GUI.
The <client> is the name of the P5 client where the <path> resides.
The <path> is the absolute platform-native path to a file. No checking is
performed on the file. If the passed <path> contains blanks, be sure to
enclose it in curly braces: {/some/path with blanks/file}.
Furthermore, if the <path> contains { and/or } chars themselves, you must
escape them with a backslash '\' character.
The optional <database> declares the name of the database where the file
has been indexed. If omitted, the standard Default-Archive database is
used. If no such database could be found in the current P5 configuration,
an error is triggered.
Return Values:
-On Success: the handle of the entry
"""
method_name = "handle"
result = exec_nsdchat([module_name, method_name, client, path, database],
p5_connection)
if not as_object:
return result
else:
return resourcelist(result, ArchiveEntry, p5_connection)
def btime(archiveentry_handle, p5_connection=None):
"""
Syntax: ArchiveEntry <handle> btime
Description: Returns the list of backup/archive times in seconds (Posix
time) for each instance of the given archive entry.
Return Values:
-On Success: the list of backup times
"""
method_name = "btime"
return exec_nsdchat([module_name, archiveentry_handle, method_name],
p5_connection)
def mtime(archiveentry_handle, p5_connection=None):
"""
Syntax: ArchiveEntry <handle> mtime
Description: Returns the list of modification times in seconds (Posix time)
for each instance of the given archive entry.
Return Values:
-On Success: the list of modification times
"""
method_name = "mtime"
return exec_nsdchat([module_name, archiveentry_handle, method_name],
p5_connection)
def meta(archiveentry_handle, key=None, p5_connection=None):
"""
Syntax: ArchiveEntry <handle> meta [<key>]
Description: Returns defined meta-data keys and their values for the given
archive entry. If the optional <key> argument is given, it is assumed to be
one of the meta columns defined for the particular index database where the
archive entry has been indexed.
Return Values:
-On Success: with <key> argument: the value of the given meta key
without <key> argument:
the list of all the meta keys and their values
"""
method_name = "meta"
return exec_nsdchat([module_name, archiveentry_handle, method_name,
key], p5_connection)
def setmeta(archiveentry_handle, key_value_list, p5_connection=None):
"""
Syntax: ArchiveEntry <handle> setmeta [<key> <value> [<key> <value>].. ]
Description: Sets the defined meta-data key/value pair for the given
archive entry. Key argument is assumed to be one of the meta columns
defined for the particular index database where the archive entry has been
indexed.
Return Values:
-On Success: the newly set key/value pair
"""
method_name = "setmeta"
return exec_nsdchat([module_name, archiveentry_handle, method_name,
key_value_list], p5_connection)
def size(archiveentry_handle, p5_connection=None):
"""
Syntax: ArchiveEntry <handle> size
Description: Returns the list of sizes in bytes for each instance of the
given archive entry.
Return Values:
-On Success: the list of file sizes
"""
method_name = "size"
return exec_nsdchat([module_name, archiveentry_handle, method_name],
p5_connection)
@onereturnvalue
def status(archiveentry_handle, p5_connection=None):
"""
Syntax: ArchiveEntry <handle> status
Description: Returns the status of the archived entry. An archive entry can
have number of internal statuses, depending on the stage of the archive
and/or restore process. Currently, the following statuses are supported:
• indexed found in the archive index
• unknown not found in the archive index
The indexed status means that the entry has been processed (archived) and
its meta data may be obtained from the index database.
The unknown status means that the entry has not (yet) been found in the
index, which is normal for files still waiting to be archived.
If the status of an entry returns unknown, then all of the subsequent entry
methods described below will return invalid values.
Return Values:
-On Success: one of the supported statuses
"""
method_name = "status"
return exec_nsdchat([module_name, archiveentry_handle, method_name],
p5_connection)
def volume(archiveentry_handle, p5_connection):
"""
Syntax: ArchiveEntry <handle> volume
Description: Returns the media volume ID where the entry <name> has been
archived. An entry can be stored on one or more volumes or even many times
on the same volume (see the Volume resource for more information) during
the archive operation, depending on the plan configuration.
Return Values:
-On Success: the ID of the volume if the entry was stored on only
one volume,
or a list of volume ID's if the entry was stored on
multiple volumes
"""
method_name = "volume"
return exec_nsdchat([module_name, archiveentry_handle, method_name],
p5_connection)
@onereturnvalue
def clippath(archiveentry_handle, newpath=None, p5_connection=None):
"""
Syntax: ArchiveEntry <handle> clippath [newpath]
Description: If newpath is not given, the command will return the path of
an existing clip or the string "unknown" if there is no clip available.
If newpath is given as empty string "", it will clean/delete the previous
clip (if any) and return the string "unknown" as a result.
If newpath is given as a path to an existing file, this file will be set as
the entry's clip. The file itself will be moved (not copied!) into the clip
storage of the corresponding index and the absolute path of of the clip
will be returned.
Return Values:
-On Success: the path to the existing clip
or the string "unknown" if not found
"""
method_name = "clippath"
return exec_nsdchat([module_name, archiveentry_handle, method_name],
p5_connection)
@onereturnvalue
def clipurl(archiveentry_handle, host, port, p5_connection=None):
"""
Syntax: ArchiveEntry <handle> clipurl <host> <port>
Description: Returns a URL of the clip of the file as
http://host:client/url-to-the-clip
<host> and <port> refer to the host address and port of the P5 server host.
Return Values:
-On Success: the URL as a string
"""
method_name = "clipurl"
return exec_nsdchat([module_name, archiveentry_handle, method_name, host,
port])
class ArchiveEntry(P5Resource):
def __init__(self, archiveentry_name, p5_connection=None):
super().__init__(archiveentry_name, p5_connection)
@onereturnvalue
def handle(client, path, database=None, as_object=True,
p5_connection=None):
"""
Syntax: ArchiveEntry handle <client> <path> [<database>]
Description: Returns the properly formatted archive entry handle which
can be used for restoring files archived over the P5 web GUI.
The <client> is the name of the P5 client where the <path> resides.
The <path> is the absolute platform-native path to a file. No checking
is performed on the file. If the passed <path> contains blanks, be sure
to enclose it in curly braces: {/some/path with blanks/file}.
Furthermore, if the <path> contains { and/or } chars themselves, you
must escape them with a backslash '\' character.
The optional <database> declares the name of the database where the
file has been indexed. If omitted, the standard Default-Archive
database is used. If no such database could be found in the current P5
configuration, an error is triggered.
Return Values:
-On Success: the handle of the entry
"""
method_name = "handle"
result = exec_nsdchat([module_name, method_name, client, path,
database], p5_connection)
if not as_object:
return result
else:
return resourcelist(result, ArchiveEntry, p5_connection)
def btime(self):
"""
Syntax: ArchiveEntry <handle> btime
Description: Returns the list of backup/archive times in seconds (Posix
time) for each instance of the given archive entry.
Return Values:
-On Success: the list of backup times
"""
method_name = "btime"
return self.p5_connection.nsdchat_call([module_name, self.name,
method_name])
def mtime(self):
"""
Syntax: ArchiveEntry <handle> mtime
Description: Returns the list of modification times in seconds (Posix
time) for each instance of the given archive entry.
Return Values:
-On Success: the list of modification times
"""
method_name = "mtime"
return self.p5_connection.nsdchat_call([module_name, self.name,
method_name])
def meta(self, key=None):
"""
Syntax: ArchiveEntry <handle> meta [<key>]
Description: Returns defined meta-data keys and their values for the
given archive entry. If the optional <key> argument is given, it is
assumed to be one of the meta columns defined for the particular index
database where the archive entry has been indexed.
Return Values:
-On Success: with <key> argument: the value of the given meta key
without <key> argument:
the list of all the meta keys and their values
"""
method_name = "meta"
key_option = ""
if key:
key_option = key
return self.p5_connection.nsdchat_call([module_name, self.name,
method_name, key_option])
def setmeta(self, key_value_list):
"""
Syntax: ArchiveEntry <handle> setmeta [<key> <value> [<key> <value>]..]
Description: Sets the defined meta-data key/value pair for the given
archive entry. Key argument is assumed to be one of the meta columns
defined for the particular index database where the archive entry has
been indexed.
Return Values:
-On Success: the newly set key/value pair
"""
method_name = "setmeta"
return self.p5_connection.nsdchat_call([module_name, self.name,
method_name, key_value_list])
def size(self):
"""
Syntax: ArchiveEntry <handle> size
Description: Returns the list of sizes in bytes for each instance of
the given archive entry.
Return Values:
-On Success: the list of file sizes
"""
method_name = "size"
return self.p5_connection.nsdchat_call([module_name, self.name,
method_name])
@onereturnvalue
def status(self):
"""
Syntax: ArchiveEntry <handle> status
Description: Returns the status of the archived entry. An archive entry
can have number of internal statuses, depending on the stage of the
archive and/or restore process. Currently, the following statuses are
supported:
• indexed found in the archive index
• unknown not found in the archive index
The indexed status means that the entry has been processed (archived)
and its meta data may be obtained from the index database.
The unknown status means that the entry has not (yet) been found in the
index, which is normal for files still waiting to be archived.
If the status of an entry returns unknown, then all of the subsequent
entry methods described below will return invalid values.
Return Values:
-On Success: one of the supported statuses
"""
method_name = "status"
return self.p5_connection.nsdchat_call([module_name, self.name,
method_name])
def volume(self):
"""
Syntax: ArchiveEntry <handle> volume
Description: Returns the media volume ID where the entry <name> has
been archived. An entry can be stored on one or more volumes or even
many times on the same volume (see the Volume resource for more
information) during the archive operation, depending on the plan
configuration.
Return Values:
-On Success: the ID of the volume if the entry was stored on only
one volume,
or a list of volume ID's if the entry was stored on
multiple volumes
"""
method_name = "volume"
return self.p5_connection.nsdchat_call([module_name, self.name,
method_name])
@onereturnvalue
def clippath(self, newpath=None):
"""
Syntax: ArchiveEntry <handle> clippath [newpath]
Description: If newpath is not given, the command will return the path
of an existing clip or the string "unknown" if there is no clip
available. If newpath is given as empty string "", it will clean/delete
the previous clip (if any) and return the string "unknown" as a result.
If newpath is given as a path to an existing file, this file will be
set as the entry's clip. The file itself will be moved (not copied!)
into the clip storage of the corresponding index and the absolute path
of of the clip will be returned.
Return Values:
-On Success: the path to the existing clip
or the string "unknown" if not found
"""
method_name = "clippath"
return self.p5_connection.nsdchat_call([module_name, self.name,
method_name, newpath])
@onereturnvalue
def clipurl(self, host, port):
"""
Syntax: ArchiveEntry <handle> clipurl <host> <port>
Description: Returns a URL of the clip of the file as
http://host:client/url-to-the-clip
<host> and <port> refer to the host address and port of the P5 server
host.
Return Values:
-On Success: the URL as a string
"""
method_name = "clipurl"
return self.p5_connection.nsdchat_call([module_name, self.name,
method_name, host, port])
def __repr__(self):
return ": ".join([module_name, self.name])
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
This script takes the path to a directory, and looks for any Turtle files
(https://www.w3.org/TeamSubmission/turtle/), then uses RDFLib to check if
they're valid TTL.
It exits with code 0 if all files are valid, 1 if not.
"""
import logging
import os
import sys
import daiquiri
import rdflib
daiquiri.setup(level=logging.INFO)
logger = daiquiri.getLogger(__name__)
# This is a slightly cheaty way of tracking which paths (if any) failed --
# we append to this global list, and inspect it at the end of the script!
failures = []
def parse_turtle(path):
"""Try to parse the Turtle at a given path."""
logger.info("Parsing Turtle at path %s", path)
graph = rdflib.Graph()
try:
graph.parse(path, format='ttl')
except Exception as exc:
# Get the name of the exception class
# e.g. rdflib.plugins.parsers.notation3.BadSyntax
exc_name = f'{exc.__class__.__module__}.{exc.__class__.__name__}'
# Then try to log something useful
logger.error("Error parsing Turtle (%s)", exc_name)
logger.error(exc)
failures.append(path)
else:
logger.info("Successfully parsed Turtle! ")
def find_turtle_files(path):
"""Generates all the Turtle files under a given path."""
for root, _, filenames in os.walk(path):
for f in filenames:
if not f.endswith('.ttl'):
continue
yield os.path.join(root, f)
if __name__ == '__main__':
for path in find_turtle_files('.'):
if 'WIP' in path:
logger.info("Skipping path %s as WIP", path)
continue
parse_turtle(path)
if failures:
logger.error("Failures in the following files: %s", ", ".join(failures))
sys.exit(1)
else:
logger.info("✨ All Turtle files passed linting! ✨")
sys.exit(0)
|
import scipy
import numpy as np
import cv2
import argparse
import json
from keras.applications import inception_v3
from keras import backend as K
from keras.preprocessing import image
from utils import *
#Set learning phase = 0 (test mode). Prevent model learning for safety reasons.
K.set_learning_phase(0)
model = inception_v3.InceptionV3(weights='imagenet', include_top=False)
##### Create parser #####
parser = argparse.ArgumentParser()
parser.add_argument('-i', action = 'store', type = str,
dest = 'image_path',
help = 'Path to the input image',
required = True)
parser.add_argument('-o', action = 'store', type = str,
dest = 'json_path',
default = 'config.json',
help = 'Path to the json configuration file')
def predict(img_file, num_octave, octave_scale, iterations, step, max_loss, model, loss, write_result=False):
img = preprocess_img(img_file)
# Create list of shapes correspond with octave scales
original_shape = img.shape[1:3]
octave_shapes = [original_shape]
for i in range(num_octave):
scaled_shape = tuple([int(dim/(octave_scale ** i)) for dim in original_shape])
octave_shapes.append(scaled_shape)
octave_shapes = octave_shapes[::-1]
orginal_img = np.copy(img)
# Initialize shrunck image by the smallest image
shrunck_original_img = resize_img(img, octave_shapes[0])
for shape in octave_shapes:
print('Processing image shape: ', shape)
# Image gradient ascenting
img = resize_img(img, shape)
img = gradient_ascent(img, iterations, step, fetch_loss_and_grads(model, loss), max_loss)
# Lost detail computation
upscaled_shrunck_original_img = resize_img(shrunck_original_img, shape)
same_original_size = resize_img(orginal_img, shape)
lost_detail = same_original_size - upscaled_shrunck_original_img
# Impute details
img += lost_detail
save_img(img, './imgs/scale_{}.png'.format(shape))
shrunck_original_img = resize_img(orginal_img, shape)
if write_result:
save_img(img, 'result.png')
print('Process finished, result was saved in the project root folder')
else:
pil_img = deprocess_img(np.copy(img))
return pil_img
if __name__ == "__main__":
args = parser.parse_args()
img_path = args.image_path
with open(args.json_path) as f:
options = json.load(f)
f.close()
layer_contribution = options['layer_contribution']
loss = get_loss(layer_contribution, model)
predict(img_file=img_path,
num_octave=options['num_octave'],
octave_scale=options['octave_scale'],
iterations=options['iterations'],
step=options['step'],
max_loss=options['max_loss'],
model = model,
loss = loss,
write_result=True)
|
"""
Copyright 2020 Huawei Technologies Co., Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Typical usage example:
"""
import os
from PIL import Image
import sys
import numpy as np
def load_image_into_numpy_array(image):
"""
load image into numpy array
"""
im_width, im_height = image.size
print(image.getdata().size)
return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint0)
def read_imame_and_to_numpy(file_path, data_dtype, size=None):
"""
read image and load into numpy
"""
image = Image.open(file_path)
image = image.convert("RGB")
if size is not None:
new_image = image.resize([size[1], size[0]], Image.BILINEAR)
else:
new_image = image
image_np = load_image_into_numpy_array(new_image)
image_np = image_np.astype(data_dtype)
return image_np
def preprocess_image(src_path, save_path):
"""
preprocess image
"""
files = os.listdir(src_path)
if not os.path.exists(save_path):
os.makedirs(save_path)
i = 0
for file in files:
(filename, extension) = os.path.splitext(file)
if(extension == '.jpg' or extension == 'JPEG'):
i += 1
print(file, "====", i)
img_path = os.path.join(src_path, file)
input_type = np.uint8
image_np = read_imame_and_to_numpy(img_path, input_type, [640, 640])
image_np.tofile(os.path.join(save_path, file.split('.')[0] + ".bin"))
if __name__ == '__main__':
if len(sys.argv) < 3:
raise Exception("usage: python3 xxx.py [src_path] [save_path]")
src_path = sys.argv[1]
save_path = sys.argv[2]
src_path = os.path.realpath(src_path)
save_path = os.path.realpath(save_path)
preprocess_image(src_path, save_path)
|
"""empty message
Revision ID: d28e299dee4e
Revises: 4eb2629c7a29
Create Date: 2021-02-01 10:17:23.264719
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'd28e299dee4e'
down_revision = '4eb2629c7a29'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('coffees', 'created_at',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('coffees', 'updated_at',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('reviews', 'created_at',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('reviews', 'updated_at',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('users', 'created_at',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('users', 'updated_at',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('users', 'updated_at',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('users', 'created_at',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('reviews', 'updated_at',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('reviews', 'created_at',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('coffees', 'updated_at',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('coffees', 'created_at',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
# ### end Alembic commands ###
|
# TODO: This is the entry routine for your monero miner.
# NOTE: If you are creating a new GIT repo and you are going to put the data_location_json and/or settings.json file in your repo directory then be sure
# to add these to the your .gitignore file.
# The first thing you need is a way to read configuration file to load the
miner_settings = []
# TODO: Look at https://github.com/fireice-uk/xmr-stak/blob/master/xmrstak/cli/cli-miner.cpp -> the routine named main
# TODO: work is on p3demo and yocto. create fopd module and git it running p3demo. then loop back and work on
# actual monero algorithm.
# Load the settings file
if exists('data_location.json'):
with open('data_location.json') as f:
dl = json.load(f)
if 'settings_file_path' in dl.keys():
if exists(join(dl['settings_file_path'])):
#TODO Add code to look for the required settings such as pool address and pool logon
# with open(join(dl['settings_file_path'])) as s
# load_miner_settings(s)
# create the fopd compatible thread
# run a fopd compatible monitor
else:
print('You must create a settings file')
print('I will prompt you for answers but feel free to quit and create the file manually.')
else:
print('The file named data_location.json must have an attribute named settings.file_path')
else:
print('You must create a data_location.json file')
|
from django.db import models
import datetime
from django.db.models.constraints import UniqueConstraint
from random import randint
from django.apps import apps
import datetime
from django.utils import timezone
# Create your models here.
'''
The is the place where the structure of database goes
'''
class Game(models.Model):
game_id = models.IntegerField(primary_key=True, default=00000)
game_mode = models.CharField(max_length=200, default="default")
player_or_bot = models.CharField(max_length=200, default="default")
#under turns tab
max_turns = models.IntegerField(default=0)
max_seconds_per_turn = models.IntegerField(default=0)
#under bank accoun tab
initial_bank_balance = models.IntegerField(default=0)
#under demand tab
price_output_equal_player = models.IntegerField(default=0)
demand_elasticity = models.FloatField(default=0.0)
price_growth_multiplier = models.FloatField(default=0.0)
#under R&D tab
max_value = models.IntegerField(default=0)
mode = models.IntegerField(default=0)
#under robots tab
robot_num = models.IntegerField(default=0)
#under information tab
market_report_available = models.BooleanField(default=False)
report_cost = models.IntegerField(default=0)
demand_curve_viewable = models.BooleanField(default=False)
rd_distribution_viewable = models.BooleanField(default=False)
#game turn_num starting with 0
turn_num = models.IntegerField(default=0)
counter_time = models.DateTimeField("Time Submitted", default=timezone.now)
creator = models.ForeignKey('Company', on_delete=models.DO_NOTHING, related_name='+', default=None, null=True, blank=True)
#return number of companies in this game
def company_num(self):
Game = apps.get_model('game','Game')
Company = apps.get_model('game','Company')
game = Game.objects.get(pk=self.game_id)
return len(Company.objects.filter(game=game))
class Company(models.Model):
game = models.ForeignKey(Game, on_delete=models.CASCADE)
company_id = models.IntegerField(default=randint(100000000,999999999))
class Meta:
constraints = [
UniqueConstraint(fields = ['game','company_id'], name='unique_company')
]
machine_purchased = models.IntegerField(default=0)
to_own = models.IntegerField(default=0)
machine_operated = models.IntegerField(default=0)
r_d_purchased = models.IntegerField(default=0)
bank_balance = models.IntegerField(default=0)
mp_cost = models.IntegerField(default=0)
mo_cost = models.IntegerField(default=0)
r_d_cost = models.IntegerField(default=0)
revenue = models.IntegerField(default=0)
unit_produce = models.FloatField(default=0)
cost_per_turn = models.IntegerField(default=0)
company_name = models.TextField(default='Untitled')
class Record(models.Model):
game = models.ForeignKey(Game, on_delete=models.CASCADE)
turn = models.IntegerField(default=1)
company = models.ForeignKey(Company, on_delete=models.CASCADE)
class Meta:
constraints = [
UniqueConstraint(fields = ['game','turn','company'], name='unique_record')
]
machine_purchased = models.IntegerField(default=0)
to_own = models.IntegerField(default=0)
machine_operated = models.IntegerField(default=0)
r_d_purchased = models.IntegerField(default=0)
bank_balance = models.IntegerField(default=0)
mp_cost = models.IntegerField(default=0)
mo_cost = models.IntegerField(default=0)
r_d_cost = models.IntegerField(default=0)
revenue = models.IntegerField(default=0)
unit_produce = models.FloatField(default=0)
cost_per_turn = models.IntegerField(default=0)
|
#! /usr/bin/ipython
import numpy as np
import ke
from gplot import *
from pause import *
ke._Ecs(0.0001, 1., n=15)
ke.E(0.0001, 1., n=15, typ='atr')
x=np.arange(0,10,0.001); y=1*x; ke._ke_newton.v_cos(x,y, x.size); gplot(x,y-np.cos(x))
x=np.arange(0,10,0.001); y=1*x; ke._ke_newton.v_sin(x,y, x.size); gplot(x,y-np.sin(x))
e = 1.0
E = np.arange(-10,6,0.001)
E = np.arange(0,3,0.001); M = ke.M(E, e)
if 0:
gplot(M, E, ke._E_newton(M, e), ke.E(M, e, n=55, typ='atr'), ' us 1:3, "" us 1:4')
gplot('"" us 1:($3-$2), "" us 1:($4-$2)')
gplot(M, np.log10(np.abs(ke._E(M, e, n=55)-E)))
pause()
M = np.arange(0, np.pi, np.pi/1000);
timeit ke._E(M, e, n=29)
#1000 loops, best of 3: 1.51 ms per loop
timeit ke.E(M, e, n=29)
#1000 loops, best of 3: 1.51 ms per loop
timeit ke._Ecs(M, e, n=29)
#1000 loops, best of 3: 1.54 ms per loop
timeit ke.E(M, e, n=29, typ='pn')
#1000 loops, best of 3: 1.7 ms per loop
timeit ke.E(M, e, n=29, typ='atr')
#100 loops, best of 3: 2.1 ms per loop
timeit ke._E1N(M, e, n=19)
#1000 loops, best of 3: 966 µs per loop
timeit ke._E_newton(M, e)
#100 loops, best of 3: 2.95 ms per loop
timeit ke._E_newton(M, e)
#100 loops, best of 3: 3.22 ms per loop
timeit ke._E_newton(M, e, typ="my")
#100 loops, best of 3: 2.43 ms per loop
timeit ke.E(M, e, n=29)
timeit ke.E(M, e, n=2, typ="N")
# ~ same speed =>
# 29/2
|
""" Defines a dummy socket implementing (part of) the zmq.Socket interface. """
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import abc
import warnings
try:
from queue import Queue # Py 3
except ImportError:
from Queue import Queue # Py 2
import zmq
from traitlets import HasTraits, Instance, Int
from ipython_genutils.py3compat import with_metaclass
#-----------------------------------------------------------------------------
# Generic socket interface
#-----------------------------------------------------------------------------
class SocketABC(with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def recv_multipart(self, flags=0, copy=True, track=False):
raise NotImplementedError
@abc.abstractmethod
def send_multipart(self, msg_parts, flags=0, copy=True, track=False):
raise NotImplementedError
@classmethod
def register(cls, other_cls):
if other_cls is not DummySocket:
warnings.warn("SocketABC is deprecated since ipykernel version 4.5.0.",
DeprecationWarning, stacklevel=2)
abc.ABCMeta.register(cls, other_cls)
#-----------------------------------------------------------------------------
# Dummy socket class
#-----------------------------------------------------------------------------
class DummySocket(HasTraits):
""" A dummy socket implementing (part of) the zmq.Socket interface. """
queue = Instance(Queue, ())
message_sent = Int(0) # Should be an Event
context = Instance(zmq.Context)
def _context_default(self):
return zmq.Context()
#-------------------------------------------------------------------------
# Socket interface
#-------------------------------------------------------------------------
def recv_multipart(self, flags=0, copy=True, track=False):
return self.queue.get_nowait()
def send_multipart(self, msg_parts, flags=0, copy=True, track=False):
msg_parts = list(map(zmq.Message, msg_parts))
self.queue.put_nowait(msg_parts)
self.message_sent += 1
def flush(self, timeout=1.0):
"""no-op to comply with stream API"""
pass
SocketABC.register(DummySocket)
|
""" Define various utility functions. """
import numpy as np
from typing import Sequence
def rle_encode(seq: Sequence) -> list:
""" Run-length-encode a sequence.
Converts a sequence to a list of runs of the same element.
Parameters
----------
seq
Sequence to convert.
Returns a list of tuples of the form `(elem, n_rep)` indicating rows of
elements where the value `elem` repeats `n_rep` times.
"""
if len(seq) == 0:
return []
starts = np.hstack(([0], np.diff(seq).nonzero()[0] + 1, len(seq)))
rle = [(seq[i1], i2 - i1) for i1, i2 in zip(starts, starts[1:])]
return rle
def rle_decode(seq_rle: Sequence) -> list:
""" Decode a sequence from its run-length encoding.
Takes in a sequence of tuples `(elem, n_rep)`, as returned by `rle_encode`,
and returns a list of states corresponding to that encoding.
Parameters
----------
seq_rle
Run-length-encoded sequence to convert.
Returns a sequence of elements whose run-length encoding is `seq_rle`.
"""
seq = []
for elem, n_rep in seq_rle:
seq.extend(n_rep * [elem])
return seq
def to_hankel(y: Sequence, p: int, step: int = 1) -> np.ndarray:
""" Generate a Hankel matrix from a sequence.
A generalized version of the Hankel matrix where a non-trivial step is used for the
column index can also be generated. See `step`.
Parameters
----------
y
The sequence to convert into a Hankel matrix.
p
Order of lag vectors (number of columns in resulting matrix).
step
Step in the column direction -- see definition below.
Returns a matrix `H` with shape `(len(y), p)` obeying
H[i, j] = y[i - j * step] if i >= j * step else 0 .
Note that this is only strictly a Hankel matrix if `step == 1`.
"""
n = len(y)
if p < 1:
return np.empty((n, 0))
H = np.zeros((n, p))
H[:, 0] = y
for j in range(1, p):
js = j * step
H[js:, j] = y[:-js]
return H
|
from .missing_element_base import MissingElementBase
from . import missing_empty
from . import missing_field
from . import missing_fieldset
from . import missing_list
from . import missing_section_element
class MissingSection(MissingElementBase):
def __repr__(self):
if self._key:
return f"<class MissingSection key={self._key}>"
return '<class MissingSection>'
def element(self, key=None):
return missing_section_element.MissingSectionElement(key, self)
def elements(self, _key=None):
return []
def empty(self, key=None):
return missing_empty.MissingEmpty(key, self)
def field(self, key=None):
return missing_field.MissingField(key, self)
def fields(self, _key=None):
return []
def fieldset(self, key=None):
return missing_fieldset.MissingFieldset(key, self)
def fieldsets(self, _key=None):
return []
def list(self, key=None):
return missing_list.MissingList(key, self)
def lists(self, _key=None):
return []
def optional_element(self, _key=None):
return None
def optional_empty(self, _key=None):
return None
def optional_field(self, _key=None):
return None
def optional_fieldset(self, _key=None):
return None
def optional_list(self, _key=None):
return None
def optional_section(self, _key=None):
return None
def required_element(self, _key=None):
self._parent._missing_error(self)
def required_empty(self, _key=None):
self._parent._missing_error(self)
def required_field(self, _key=None):
self._parent._missing_error(self)
def required_fieldset(self, _key=None):
self._parent._missing_error(self)
def required_list(self, _key=None):
self._parent._missing_error(self)
def required_section(self, _key=None):
self._parent._missing_error(self)
def section(self, key=None):
return MissingSection(key, self)
def sections(self, _key=None):
return []
|
# -*- coding:utf-8 -*-
import coremltools
import logging
import os
if __name__ == '__main__':
logFmt = '%(asctime)s %(lineno)04d %(levelname)-8s %(message)s'
logging.basicConfig(level=logging.DEBUG, format=logFmt, datefmt='%H:%M',)
modelFilePath = os.getcwd()
modelFilePath += '/MarsHabitatPricePredictor/Resources/MarsHabitatPricer.mlmodel'
logging.debug(modelFilePath)
model = coremltools.models.MLModel(modelFilePath) # 加载mlmodel文件
# 打印各字段,这些是文件的概要信息
logging.info('author: %s' % (model.author))
logging.info('license: %s' % (model.license))
logging.info('short description: %s' % (model.short_description))
logging.info('input description: %s' % (model.input_description))
logging.info('output description: %s' % (model.output_description))
# 打印spec,这里有详细的各字段信息
logging.info(model.get_spec())
# 根据输入的三个字段,验证输出值
dataList = [{'solarPanels':1.0, 'greenhouses':1.0, 'size':1024},
{'solarPanels':4.0, 'greenhouses':5.0, 'size':10000}]
logging.info('solarPanels greenhouses size price')
logging.info('------------------------------------')
for dataItem in dataList:
predictions = model.predict(dataItem)
logging.info('%11.1f %11d %4d %5d' % (dataItem['solarPanels'], \
dataItem['greenhouses'], dataItem['size'], predictions['price']))
|
from Messages.Message import Message
class Error (Message):
def get_text(self):
return "ERROR: " + self.text
|
import aredis
import asyncio
import uvloop
import time
import sys
from functools import wraps
from argparse import ArgumentParser
if sys.version_info[0] == 3:
long = int
def parse_args():
parser = ArgumentParser()
parser.add_argument('-n',
type=int,
help='Total number of requests (default 100000)',
default=100000)
parser.add_argument('-P',
type=int,
help=('Pipeline <numreq> requests.'
' Default 1 (no pipeline).'),
default=1)
parser.add_argument('-s',
type=int,
help='Data size of SET/GET value in bytes (default 2)',
default=2)
args = parser.parse_args()
print(args)
return args
async def run():
args = parse_args()
r = aredis.StrictRedis()
await r.flushall()
await set_str(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
await set_int(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
await get_str(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
await get_int(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
await incr(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
await lpush(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
await lrange_300(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
await lpop(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
await hmset(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
def timer(func):
@wraps(func)
async def wrapper(*args, **kwargs):
start = time.clock()
ret = await func(*args, **kwargs)
duration = time.clock() - start
if 'num' in kwargs:
count = kwargs['num']
else:
count = args[1]
print('{0} - {1} Requests'.format(func.__name__, count))
print('Duration = {}'.format(duration))
print('Rate = {}'.format(count/duration))
print('')
return ret
return wrapper
@timer
async def set_str(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = await conn.pipeline()
format_str = '{:0<%d}' % data_size
set_data = format_str.format('a')
for i in range(num):
await conn.set('set_str:%d' % i, set_data)
if pipeline_size > 1 and i % pipeline_size == 0:
await conn.execute()
if pipeline_size > 1:
await conn.execute()
await conn.reset()
@timer
async def set_int(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = await conn.pipeline()
format_str = '{:0<%d}' % data_size
set_data = int(format_str.format('1'))
for i in range(num):
await conn.set('set_int:%d' % i, set_data)
if pipeline_size > 1 and i % pipeline_size == 0:
await conn.execute()
if pipeline_size > 1:
await conn.execute()
await conn.reset()
@timer
async def get_str(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = await conn.pipeline()
for i in range(num):
await conn.get('set_str:%d' % i)
if pipeline_size > 1 and i % pipeline_size == 0:
await conn.execute()
if pipeline_size > 1:
await conn.execute()
await conn.reset()
@timer
async def get_int(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = await conn.pipeline()
for i in range(num):
await conn.get('set_int:%d' % i)
if pipeline_size > 1 and i % pipeline_size == 0:
await conn.execute()
if pipeline_size > 1:
await conn.execute()
await conn.reset()
@timer
async def incr(conn, num, pipeline_size, *args, **kwargs):
if pipeline_size > 1:
conn = await conn.pipeline()
for i in range(num):
await conn.incr('incr_key')
if pipeline_size > 1 and i % pipeline_size == 0:
await conn.execute()
if pipeline_size > 1:
await conn.execute()
await conn.reset()
@timer
async def lpush(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = await conn.pipeline()
format_str = '{:0<%d}' % data_size
set_data = int(format_str.format('1'))
for i in range(num):
await conn.lpush('lpush_key', set_data)
if pipeline_size > 1 and i % pipeline_size == 0:
await conn.execute()
if pipeline_size > 1:
await conn.execute()
await conn.reset()
@timer
async def lrange_300(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = await conn.pipeline()
for i in range(num):
await conn.lrange('lpush_key', i, i+300)
if pipeline_size > 1 and i % pipeline_size == 0:
await conn.execute()
if pipeline_size > 1:
await conn.execute()
await conn.reset()
@timer
async def lpop(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = await conn.pipeline()
for i in range(num):
await conn.lpop('lpush_key')
if pipeline_size > 1 and i % pipeline_size == 0:
await conn.execute()
if pipeline_size > 1:
await conn.execute()
await conn.reset()
@timer
async def hmset(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = await conn.pipeline()
set_data = {'str_value': 'string',
'int_value': 123456,
'long_value': long(123456),
'float_value': 123456.0}
for i in range(num):
await conn.hmset('hmset_key', set_data)
if pipeline_size > 1 and i % pipeline_size == 0:
await conn.execute()
if pipeline_size > 1:
await conn.execute()
await conn.reset()
if __name__ == '__main__':
print('WITH ASYNCIO ONLY:')
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
print('WITH UVLOOP:')
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
|
#!/usr/bin/env python
# svmap.py - SIPvicious SIP scanner
__GPL__ = """
SIPvicious SIP scanner searches for SIP devices on a given network
Copyright (C) 2012 Sandro Gauci <sandro@enablesecurity.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import socket
import select
import random
from struct import pack,unpack
class DrinkOrSip:
def __init__(self,scaniter,selecttime=0.005,compact=False, bindingip='0.0.0.0',
fromname='sipvicious',fromaddr='sip:100@1.1.1.1', extension=None,
sessionpath=None,socktimeout=3,externalip=None,localport=5060,
printdebug=False,first=None,fpworks=False):
import logging,anydbm
import os.path
from libs.svhelper import packetcounter
#from svfphelper import sipfingerprint
#self.sipfingerprint = sipfingerprint
self.log = logging.getLogger('DrinkOrSip')
self.bindingip = bindingip
self.sessionpath = sessionpath
self.dbsyncs = False
if self.sessionpath is not None:
self.resultip = anydbm.open(os.path.join(self.sessionpath,'resultip'),'c')
self.resultua = anydbm.open(os.path.join(self.sessionpath,'resultua'),'c')
self.resultfp = anydbm.open(os.path.join(self.sessionpath,'resultfp'),'c')
try:
self.resultip.sync()
self.dbsyncs = True
self.log.info("Db does sync")
except AttributeError:
self.log.info("Db does not sync")
pass
else:
self.resultip = dict()
self.resultua = dict()
self.resultfp = dict()
# we do UDP
self.sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
# socket timeout - this is particularly useful when quitting .. to eat
# up some final packets
self.sock.settimeout(socktimeout)
# enable sending to broadcast addresses
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# read handles
self.rlist = [self.sock]
# write handles
self.wlist = list()
# error handles
self.xlist = list()
self.scaniter = scaniter
self.selecttime = selecttime
self.localport = localport
if externalip is None:
self.log.debug("external ip was not set")
if (self.bindingip != '0.0.0.0') and (len(self.bindingip) > 0):
self.log.debug("but bindingip was set! we'll set it to the binding ip")
self.externalip = self.bindingip
else:
try:
self.log.info("trying to get self ip .. might take a while")
self.externalip = socket.gethostbyname(socket.gethostname())
except socket.error:
self.externalip = '127.0.0.1'
else:
self.log.debug("external ip was set")
self.externalip = externalip
self.log.debug("External ip: %s:%s" % (self.externalip,localport) )
self.compact = compact
self.log.debug("Compact mode: %s" % self.compact)
self.fromname = fromname
self.fromaddr = fromaddr
self.log.debug("From: %s <%s>" % (self.fromname,self.fromaddr))
self.nomoretoscan = False
self.originallocalport = self.localport
self.nextip = None
self.extension = extension
self.fpworks = fpworks
self.printdebug = printdebug
self.first = first
if self.sessionpath is not None:
self.packetcount = packetcounter(50)
self.sentpackets = 0
def getResponse(self,buff,srcaddr):
from libs.svhelper import fingerPrintPacket,getTag
srcip,srcport = srcaddr
uaname = 'unknown'
if buff.startswith('OPTIONS ') \
or buff.startswith('INVITE ') \
or buff.startswith('REGISTER '):
if self.externalip == srcip:
self.log.debug("We received our own packet from %s:%s" % srcaddr)
else:
self.log.info("Looks like we received a SIP request from %s:%s"% srcaddr)
self.log.debug(repr(buff))
return
self.log.debug("running fingerPrintPacket()")
res = fingerPrintPacket(buff)
if res is not None:
if res.has_key('name'):
uaname = res['name'][0]
else:
uaname = 'unknown'
self.log.debug(`buff`)
#if self.fpworks:
# try:
# fp = self.sipfingerprint(buff)
# except:
# self.log.error("fingerprinting gave errors - will be disabled")
# self.fpworks = False
if not self.fpworks:
fp = None
if fp is None:
if self.fpworks:
fpname = 'unknown'
else:
fpname = 'disabled'
else:
fpname = ' / '.join(fp)
self.log.debug('Fingerprint: %s' % fpname)
self.log.debug("Uaname: %s" % uaname)
#print buff
originaldst = getTag(buff)
try:
dstip = socket.inet_ntoa(pack('!L',int(originaldst[:8],16)))
dstport = int(originaldst[8:12],16)
except (ValueError,TypeError,socket.error):
self.log.debug("original destination could not be decoded: %s" % (originaldst))
dstip,dstport = 'unknown','unknown'
resultstr = '%s:%s\t->\t%s:%s\t->\t%s\t->\t%s' % (dstip,dstport,srcip,srcport,uaname,fpname)
self.log.info( resultstr )
self.resultip['%s:%s' % (srcip,srcport)] = '%s:%s' % (dstip,dstport)
self.resultua['%s:%s' % (srcip,srcport)] = uaname
self.resultfp['%s:%s' % (srcip,srcport)] = fpname
if self.sessionpath is not None and self.dbsyncs:
self.resultip.sync()
self.resultua.sync()
self.resultfp.sync()
else:
self.log.info('Packet from %s:%s did not contain a SIP msg'%srcaddr)
self.log.debug('Packet: %s' % `buff`)
def start(self):
from libs.svhelper import makeRequest, createTag
from libs.svhelper import mysendto
import socket
# bind to 5060 - the reason is to maximize compatability with
# devices that disregard the source port and send replies back
# to port 5060
self.log.debug("binding to %s:%s" % (self.bindingip,self.localport))
while 1:
if self.localport > 65535:
self.log.critical("Could not bind to any port")
return
try:
self.sock.bind((self.bindingip,self.localport))
break
except socket.error:
self.log.debug("could not bind to %s" % self.localport)
self.localport += 1
if self.originallocalport != self.localport:
self.log.warn("could not bind to %s:%s - some process might already be listening on this port. Listening on port %s instead" % (self.bindingip,self.originallocalport, self.localport))
self.log.info("Make use of the -P option to specify a port to bind to yourself")
while 1:
r, w, e = select.select(
self.rlist,
self.wlist,
self.xlist,
self.selecttime
)
if r:
# we got stuff to read off the socket
try:
buff,srcaddr = self.sock.recvfrom(8192)
self.log.debug('got data from %s:%s' % srcaddr)
self.log.debug('data: %s' % `buff`)
if self.printdebug:
print srcaddr
print buff
except socket.error:
continue
self.getResponse(buff,srcaddr)
else:
# no stuff to read .. its our turn to send back something
if self.nomoretoscan:
try:
# having the final sip
self.log.debug("Making sure that no packets get lost")
self.log.debug("Come to daddy")
while 1:
buff,srcaddr = self.sock.recvfrom(8192)
if self.printdebug:
print srcaddr
print buff
self.getResponse(buff,srcaddr)
except socket.error:
break
try:
nextscan = self.scaniter.next()
except StopIteration:
self.log.debug('no more hosts to scan')
self.nomoretoscan = True
continue
dstip,dstport,method = nextscan
self.nextip = dstip
dsthost = (dstip,dstport)
branchunique = '%s' % random.getrandbits(32)
localtag = createTag('%s%s' % (''.join(map(lambda x: '%02x' % int(x), dsthost[0].split('.'))),'%04x' % dsthost[1]))
cseq = 1
fromaddr = '"%s"<%s>' % (self.fromname,self.fromaddr)
toaddr = fromaddr
callid = '%s' % random.getrandbits(80)
contact = None
if method != 'REGISTER':
contact = 'sip:%s@%s:%s' % (self.extension,self.externalip,self.localport)
data = makeRequest(
method,
fromaddr,
toaddr,
dsthost[0],
dsthost[1],
callid,
self.externalip,
branchunique,
compact=self.compact,
localtag=localtag,
contact=contact,
accept='application/sdp',
localport=self.localport,
extension=self.extension
)
try:
self.log.debug("sending packet to %s:%s" % dsthost)
self.log.debug("packet: %s" % `data`)
mysendto(self.sock,data,dsthost)
self.sentpackets += 1
#self.sock.sendto(data,dsthost)
if self.sessionpath is not None:
if self.packetcount.next():
try:
f=open(os.path.join(self.sessionpath,'lastip.pkl'),'w')
pickle.dump(self.nextip,f)
f.close()
self.log.debug('logged last ip %s' % self.nextip)
except IOError:
self.log.warn('could not log the last ip scanned')
if self.first is not None:
if self.sentpackets >= self.first:
self.log.info('Reached the limit to scan the first %s packets' % self.first)
self.nomoretoscan = True
except socket.error,err:
self.log.error( "socket error while sending to %s:%s -> %s" % (dsthost[0],dsthost[1],err))
pass
def main():
from optparse import OptionParser
from datetime import datetime
import anydbm
import os
from libs.svhelper import standardoptions, standardscanneroptions, calcloglevel
from sys import exit
import logging
import pickle
usage = "usage: %prog [options] host1 host2 hostrange\r\n"
usage += 'Scans for SIP devices on a given network\r\n\r\n'
usage += "examples:\r\n\r\n"
usage += "%prog 10.0.0.1-10.0.0.255 "
usage += "172.16.131.1 sipvicious.org/22 10.0.1.1/24"
usage += "1.1.1.1-20 1.1.2-20.* 4.1.*.*\r\n\r\n"
usage += "%prog -s session1 --randomize 10.0.0.1/8\r\n\r\n"
usage += "%prog --resume session1 -v\r\n\r\n"
usage += "%prog -p5060-5062 10.0.0.3-20 -m INVITE\r\n\r\n"
parser = OptionParser(usage, version="%prog v"+str(__version__)+__GPL__)
parser = standardoptions(parser)
parser = standardscanneroptions(parser)
parser.add_option("--randomscan", dest="randomscan", action="store_true",
default=False,
help="Scan random IP addresses")
parser.add_option("-i", "--input", dest="input",
help="Scan IPs which were found in a previous scan. Pass the session name as the argument", metavar="scan1")
parser.add_option("-I", "--inputtext", dest="inputtext",
help="Scan IPs from a text file - use the same syntax as command line but with new lines instead of commas. Pass the file name as the argument", metavar="scan1")
parser.add_option("-m", "--method", dest="method",
help="Specify the request method - by default this is OPTIONS.",
default='OPTIONS'
)
parser.add_option("-d", "--debug", dest="printdebug",
help="Print SIP messages received",
default=False, action="store_true"
)
parser.add_option("--first", dest="first",
help="Only send the first given number of messages (i.e. usually used to scan only X IPs)",
type="long",
)
parser.add_option("-e", "--extension", dest="extension", default='100',
help="Specify an extension - by default this is not set")
parser.add_option("--randomize", dest="randomize", action="store_true",
default=False,
help="Randomize scanning instead of scanning consecutive ip addresses")
parser.add_option("--srv", dest="srvscan", action="store_true",
default=False,
help="Scan the SRV records for SIP on the destination domain name." \
"The targets have to be domain names - example.org domain1.com")
parser.add_option('--fromname',dest="fromname", default="sipvicious",
help="specify a name for the from header")
(options, args) = parser.parse_args()
from libs.svhelper import getRange, scanfromfile, scanlist, scanrandom, getranges,\
ip4range, resumeFromIP, scanfromdb, dbexists, getTargetFromSRV
exportpath = None
if options.resume is not None:
exportpath = os.path.join(os.path.expanduser('~'),'.sipvicious',__prog__,options.resume)
if os.path.exists(os.path.join(exportpath,'closed')):
logging.error("Cannot resume a session that is complete")
exit(1)
if not os.path.exists(exportpath):
logging.critical('A session with the name %s was not found'% options.resume)
exit(1)
optionssrc = os.path.join(exportpath,'options.pkl')
previousresume = options.resume
previousverbose = options.verbose
options,args = pickle.load(open(optionssrc,'r'))
options.resume = previousresume
options.verbose = previousverbose
elif options.save is not None:
exportpath = os.path.join(os.path.expanduser('~'),'.sipvicious',__prog__,options.save)
logging.basicConfig(level=calcloglevel(options))
logging.debug('started logging')
scanrandomstore = None
if options.input is not None:
db = os.path.join(os.path.expanduser('~'),'.sipvicious',__prog__,options.input,'resultua')
if dbexists(db):
scaniter = scanfromdb(db,options.method.split(','))
else:
logging.error("the session name does not exist. Please use svreport to list existing scans")
exit(1)
elif options.randomscan:
logging.debug('making use of random scan')
logging.debug('parsing range of ports: %s' % options.port)
portrange = getRange(options.port)
internetranges =[[16777216,167772159],
[184549376,234881023],
[251658240,2130706431],
[2147549184L,2851995647L],
[2852061184L,2886729727L],
[2886795264L,3221159935L],
[3221226240L,3227017983L],
[3227018240L,3232235519L],
[3232301056L,3323068415L],
[3323199488L,3758096127L]
]
scanrandomstore = '.sipviciousrandomtmp'
resumescan = False
if options.save is not None:
scanrandomstore = os.path.join(exportpath,'random')
resumescan = True
scaniter = scanrandom(
internetranges,
portrange,
options.method.split(','),
randomstore=scanrandomstore,
resume=resumescan
)
elif options.inputtext:
logging.debug('Using IP addresses from input text file')
try:
f = open(options.inputtext,'r')
args = f.readlines()
f.close()
except IOError:
logging.critical('Could not open %s' % options.inputtext)
exit(1)
args = map(lambda x: x.strip(), args)
args = filter(lambda x: len(x) > 0, args)
logging.debug('ip addresses %s' % args)
try:
iprange = ip4range(*args)
except ValueError,err:
logging.error(err)
exit(1)
portrange = getRange(options.port)
if options.randomize:
scanrandomstore = '.sipviciousrandomtmp'
resumescan = False
if options.save is not None:
scanrandomstore = os.path.join(exportpath,'random')
resumescan = True
scaniter = scanrandom(map(getranges,args),portrange,options.method.split(','),randomstore=scanrandomstore,resume=resumescan)
else:
scaniter = scanlist(iprange,portrange,options.method.split(','))
else:
if len(args) < 1:
parser.error('Provide at least one target')
exit(1)
logging.debug('parsing range of ports: %s' % options.port)
portrange = getRange(options.port)
if options.randomize:
scanrandomstore = '.sipviciousrandomtmp'
resumescan = False
if options.save is not None:
scanrandomstore = os.path.join(exportpath,'random')
resumescan = True
scaniter = scanrandom(map(getranges,args),portrange,options.method.split(','),randomstore=scanrandomstore,resume=resumescan)
elif options.srvscan:
logging.debug("making use of SRV records")
scaniter = getTargetFromSRV(args,options.method.split(','))
else:
if options.resume is not None:
lastipsrc = os.path.join(exportpath,'lastip.pkl')
try:
f=open(lastipsrc,'r')
previousip = pickle.load(f)
f.close()
except IOError:
logging.critical('Could not read from %s' % lastipsrc)
exit(1)
logging.debug('Previous args: %s' % args)
args = resumeFromIP(previousip,args)
logging.debug('New args: %s' % args)
logging.info('Resuming from %s' % previousip)
# normal consecutive scan
try:
iprange = ip4range(*args)
except ValueError,err:
logging.error(err)
exit(1)
scaniter = scanlist(iprange,portrange,options.method.split(','))
if options.save is not None:
if options.resume is None:
exportpath = os.path.join(os.path.expanduser('~'),'.sipvicious',__prog__,options.save)
if os.path.exists(exportpath):
logging.warn('we found a previous scan with the same name. Please choose a new session name')
exit(1)
logging.debug('creating an export location %s' % exportpath)
try:
os.makedirs(exportpath,mode=0700)
except OSError:
logging.critical('could not create the export location %s' % exportpath)
exit(1)
optionsdst = os.path.join(exportpath,'options.pkl')
logging.debug('saving options to %s' % optionsdst)
pickle.dump([options,args],open(optionsdst,'w'))
try:
options.extension
except AttributeError:
options.extension = None
if options.autogetip:
tmpsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tmpsocket.connect(("msn.com",80))
options.externalip=tmpsocket.getsockname()[0]
tmpsocket.close()
sipvicious = DrinkOrSip(
scaniter,
selecttime=options.selecttime,
compact=options.enablecompact,
localport=options.localport,
externalip=options.externalip,
bindingip=options.bindingip,
sessionpath=exportpath,
extension=options.extension,
printdebug=options.printdebug,
first=options.first,
fromname=options.fromname,
)
start_time = datetime.now()
logging.info( "start your engines" )
try:
sipvicious.start()
if exportpath is not None:
open(os.path.join(exportpath,'closed'),'w').close()
except KeyboardInterrupt:
logging.warn( 'caught your control^c - quiting' )
pass
except Exception, err:
import traceback
from libs.svhelper import reportBugToAuthor
if options.reportBack:
logging.critical( "Got unhandled exception : sending report to author" )
reportBugToAuthor(traceback.format_exc())
else:
logging.critical( "Unhandled exception - please run same command with the -R option to send me an automated report")
pass
logging.exception( "Exception" )
if options.save is not None and sipvicious.nextip is not None and options.randomize is False and options.randomscan is False:
lastipdst = os.path.join(exportpath,'lastip.pkl')
logging.debug('saving state to %s' % lastipdst)
try:
f = open(lastipdst,'w')
pickle.dump(sipvicious.nextip,f)
f.close()
except OSError:
logging.warn('Could not save state to %s' % lastipdst)
elif options.save is None:
if scanrandomstore is not None:
#if options.randomize or options.randomscan:
try:
logging.debug('removing %s' % scanrandomstore)
os.unlink(scanrandomstore)
except OSError:
logging.warn('could not remove %s' % scanrandomstore)
pass
# display results
if not options.quiet:
lenres = len(sipvicious.resultua)
if lenres > 0:
logging.info("we have %s devices" % lenres)
if (lenres < 400 and options.save is not None) or options.save is None:
from libs.pptable import indent,wrap_onspace
width = 60
labels = ('SIP Device','User Agent','Fingerprint')
rows = list()
for k in sipvicious.resultua.keys():
rows.append((k,sipvicious.resultua[k],sipvicious.resultfp[k]))
print indent([labels]+rows,hasHeader=True,
prefix='| ', postfix=' |',wrapfunc=lambda x: wrap_onspace(x,width))
else:
logging.warn("too many to print - use svreport for this")
else:
logging.warn("found nothing")
end_time = datetime.now()
total_time = end_time - start_time
logging.info("Total time: %s" % total_time)
if __name__ == '__main__':
main()
|
from django.conf.urls.defaults import patterns, url
from django.views.decorators.csrf import csrf_exempt
from mozdns.mozbind.views import build_debug_soa
urlpatterns = patterns('',
url(r'^build_debug/(?P<soa_pk>[\w-]+)/$',
csrf_exempt(build_debug_soa)),
)
|
# We use three flags: met_dot, met_e, met_digit, mark if we have met ., e or any digit so far. First we strip the string, then go through each char and make sure:
# If char == + or char == -, then prev char (if there is) must be e
# . cannot appear twice or after e
# e cannot appear twice, and there must be at least one digit before and after e
# All other non-digit char is invalid
class Solution:
def isNumber(self, s: str) -> bool:
s = s.strip()
met_dot = met_e = met_digit = False
for i, char in enumerate(s):
if char in ['+', '-']:
if i > 0 and s[i-1] != 'e':
return False
elif char == '.':
if met_dot or met_e: return False
met_dot = True
elif char == 'e':
if met_e or not met_digit:
return False
met_e, met_digit = True, False
elif char.isdigit():
met_digit = True
else:
return False
return met_digit
# Time: O(N)
# Space:O(1)
|
#coding=utf8
from uliweb import expose
@expose('/')
def index():
return redirect('/yesno/list')
|
import tensorflow as tf
train, test = tf.keras.datasets.fashion_mnist.load_data()
trainImages, trainLabels = train
testImages, testLabels = test
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28,28)),
tf.keras.layers.Dense(units=128, activation=tf.nn.relu),
tf.keras.layers.Dense(units=10, activation=tf.nn.softmax)])
model.compile(optimizer=tf.train.AdamOptimizer(), loss="sparse_categorical_crossentropy", metrics=["accuracy"])
history = model.fit(trainImages, trainLabels, epochs=5)
eval_result = model.evaluate(testImages, testLabels)
loss, acc = eval_result
print([loss, acc])
|
# Copyright 2017 st--, Mark van der Wilk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
import warnings
import numpy as np
import tensorflow as tf
from . import transforms, kernels, settings
from .decors import params_as_tensors, params_as_tensors_for
from .params import Parameter, Parameterized
from .dispatch import dispatch
logger = settings.logger()
class InducingFeature(Parameterized):
"""
Abstract base class for inducing features.
"""
@abstractmethod
def __len__(self) -> int:
"""
Returns the number of features, relevant for example to determine the
size of the variational distribution.
"""
raise NotImplementedError()
class InducingPointsBase(InducingFeature):
"""
Real-space inducing points
"""
def __init__(self, Z):
"""
:param Z: the initial positions of the inducing points, size M x D
"""
super().__init__()
self.Z = Parameter(Z, dtype=settings.float_type)
def __len__(self):
return self.Z.shape[0]
class InducingPoints(InducingPointsBase):
pass
@dispatch(InducingPoints, kernels.Kernel)
def Kuu(feat, kern, *, jitter=0.0):
with params_as_tensors_for(feat):
Kzz = kern.K(feat.Z)
Kzz += jitter * tf.eye(len(feat), dtype=settings.dtypes.float_type)
return Kzz
@dispatch(InducingPoints, kernels.Kernel, object)
def Kuf(feat, kern, Xnew):
with params_as_tensors_for(feat):
Kzx = kern.K(feat.Z, Xnew)
return Kzx
class Multiscale(InducingPointsBase):
r"""
Multi-scale inducing features
Originally proposed in
::
@incollection{NIPS2009_3876,
title = {Inter-domain Gaussian Processes for Sparse Inference using Inducing Features},
author = {Miguel L\'{a}zaro-Gredilla and An\'{\i}bal Figueiras-Vidal},
booktitle = {Advances in Neural Information Processing Systems 22},
year = {2009},
}
"""
def __init__(self, Z, scales):
super().__init__(Z)
self.scales = Parameter(scales,
transform=transforms.positive) # Multi-scale feature widths (std. dev. of Gaussian)
if self.Z.shape != scales.shape:
raise ValueError("Input locations `Z` and `scales` must have the same shape.") # pragma: no cover
@staticmethod
def _cust_square_dist(A, B, sc):
"""
Custom version of _square_dist that allows sc to provide per-datapoint length
scales. sc: N x M x D.
"""
return tf.reduce_sum(tf.square((tf.expand_dims(A, 1) - tf.expand_dims(B, 0)) / sc), 2)
@dispatch(Multiscale, kernels.RBF, object)
def Kuf(feat, kern, Xnew):
with params_as_tensors_for(feat, kern):
Xnew, _ = kern._slice(Xnew, None)
Zmu, Zlen = kern._slice(feat.Z, feat.scales)
idlengthscales = kern.lengthscales + Zlen
d = feat._cust_square_dist(Xnew, Zmu, idlengthscales)
Kuf = tf.transpose(kern.variance * tf.exp(-d / 2) *
tf.reshape(tf.reduce_prod(kern.lengthscales / idlengthscales, 1),
(1, -1)))
return Kuf
@dispatch(Multiscale, kernels.RBF)
def Kuu(feat, kern, *, jitter=0.0):
with params_as_tensors_for(feat, kern):
Zmu, Zlen = kern._slice(feat.Z, feat.scales)
idlengthscales2 = tf.square(kern.lengthscales + Zlen)
sc = tf.sqrt(
tf.expand_dims(idlengthscales2, 0) + tf.expand_dims(idlengthscales2, 1) - tf.square(
kern.lengthscales))
d = feat._cust_square_dist(Zmu, Zmu, sc)
Kzz = kern.variance * tf.exp(-d / 2) * tf.reduce_prod(kern.lengthscales / sc, 2)
Kzz += jitter * tf.eye(len(feat), dtype=settings.float_type)
return Kzz
def inducingpoint_wrapper(feat, Z):
"""
Models which used to take only Z can now pass `feat` and `Z` to this method. This method will
check for consistency and return the correct feature. This allows backwards compatibility in
for the methods.
"""
if feat is not None and Z is not None:
raise ValueError("Cannot pass both an InducingFeature instance and Z values") # pragma: no cover
elif feat is None and Z is None:
raise ValueError("You must pass either an InducingFeature instance or Z values") # pragma: no cover
elif Z is not None:
feat = InducingPoints(Z)
elif isinstance(feat, np.ndarray):
feat = InducingPoints(feat)
else:
assert isinstance(feat, InducingFeature) # pragma: no cover
return feat
|
import os
from dotenv import load_dotenv
from os import environ
load_dotenv(".env")
BOT_TOKEN = os.environ["BOT_TOKEN"]
DATABASE = os.environ["DATABASE"]
admins = [
381252111,
]
async def load_admins() -> tuple:
return tuple(map(int, environ["ADMINS"].split(",")))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.