content stringlengths 5 1.05M |
|---|
#
# @lc app=leetcode id=657 lang=python3
#
# [657] Robot Return to Origin
#
# https://leetcode.com/problems/robot-return-to-origin/description/
#
# algorithms
# Easy (73.67%)
# Likes: 1228
# Dislikes: 692
# Total Accepted: 274.1K
# Total Submissions: 371K
# Testcase Example: '"UD"'
#
# There is a robot starting at position (0, 0), the origin, on a 2D plane.
# Given a sequence of its moves, judge if this robot ends up at (0, 0) after it
# completes its moves.
#
# The move sequence is represented by a string, and the character moves[i]
# represents its ith move. Valid moves are R (right), L (left), U (up), and D
# (down). If the robot returns to the origin after it finishes all of its
# moves, return true. Otherwise, return false.
#
# Note: The way that the robot is "facing" is irrelevant. "R" will always make
# the robot move to the right once, "L" will always make it move left, etc.
# Also, assume that the magnitude of the robot's movement is the same for each
# move.
#
#
# Example 1:
#
#
# Input: moves = "UD"
# Output: true
# Explanation: The robot moves up once, and then down once. All moves have the
# same magnitude, so it ended up at the origin where it started. Therefore, we
# return true.
#
#
# Example 2:
#
#
# Input: moves = "LL"
# Output: false
# Explanation: The robot moves left twice. It ends up two "moves" to the left
# of the origin. We return false because it is not at the origin at the end of
# its moves.
#
#
# Example 3:
#
#
# Input: moves = "RRDD"
# Output: false
#
#
# Example 4:
#
#
# Input: moves = "LDRRLRUULR"
# Output: false
#
#
#
# Constraints:
#
#
# 1 <= moves.length <= 2 * 10^4
# moves only contains the characters 'U', 'D', 'L' and 'R'.
#
#
#
# @lc code=start
class Solution:
def judgeCircle(self, moves: str) -> bool:
start = [0, 0]
for move in moves:
if move == "U":
start[0], start[1] = start[0] + 0, start[1] + 1
elif move == "R":
start[0], start[1] = start[0] + 1, start[1] + 0
elif move == "D":
start[0], start[1] = start[0] + 0, start[1] + (-1)
else:
start[0], start[1] = start[0] + (-1), start[1] + 0
return start == [0, 0]
# @lc code=end
|
import base64
import codecs
import json
import os
from Crypto.Cipher import AES
class Cracker():
modulus = '00e0b509f6259df8642dbc35662901477df22677ec152b5ff68ace615bb7b725152b3ab17a876aea8a5aa76d2e417629ec4ee341f56135fccf695280104e0312ecbda92557c93870114af6c9d05c4f7f0c3685b7a46bee255932575cce10b424d813cfe4875d3e82047b97ddef52741d546b8e289dc6935b3ece0462db0a22b8e7'
nonce = '0CoJUm6Qyw8W8jud'
pubKey = '010001'
@classmethod
def get(cls, text):
text = json.dumps(text)
secKey = cls._createSecretKey(16)
encText = cls._aesEncrypt(cls._aesEncrypt(text, cls.nonce), secKey)
encSecKey = cls._rsaEncrypt(secKey, cls.pubKey, cls.modulus)
post_data = {'params': encText, 'encSecKey': encSecKey}
return post_data
@classmethod
def _aesEncrypt(cls, text, secKey):
pad = 16 - len(text) % 16
if isinstance(text, bytes):
text = text.decode('utf-8')
text = text + str(pad * chr(pad))
secKey = secKey.encode('utf-8')
encryptor = AES.new(secKey, 2, b'0102030405060708')
text = text.encode('utf-8')
ciphertext = encryptor.encrypt(text)
ciphertext = base64.b64encode(ciphertext)
return ciphertext
@classmethod
def _rsaEncrypt(cls, text, pubKey, modulus):
text = text[::-1]
rs = int(codecs.encode(text.encode('utf-8'), 'hex_codec'), 16)**int(pubKey, 16) % int(modulus, 16)
return format(rs, 'x').zfill(256)
@classmethod
def _createSecretKey(cls, size):
return (''.join(map(lambda xx: (hex(ord(xx))[2:]), str(os.urandom(size)))))[0:16]
if __name__ == "__main__":
print(Cracker.get("Hello World"))
|
from setuptools import setup
dependencies = [
'aiohttp',
'pyjwt'
]
version = '1.0.0'
setup(name='aioupbit',
version=version,
packages=['aioupbit'],
description='Asynchronous wrapper for the Upbit API',
url='https://github.com/chaos314/aio-upbit',
author='Seokhwan Cheon',
author_email='chaos314@gmail.com',
license='MIT',
install_requires=dependencies,
keywords=['upbit', 'crypto', 'bitcoin'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Topic :: Office/Business :: Financial',
],
)
|
# -*- coding: utf-8 -*-
from .autogen import *
from .custom import *
# modules = glob.glob(join(dirname(__file__), "*.py"))
# __all__ = [basename(f)[:-3] for f in modules if isfile(f) and f.endswith('_block.py')]
|
import csv
import math
import random
import time
import brainex.database.genexengine as gxdb
from pyspark import SparkContext, SparkConf
from brainex.parse import generate_query
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import findspark
import os
# spark_location = '/Users/Leo/spark-2.4.3-bin-hadoop2.7' # Set your own
# java8_location = '/Library/Java/JavaVirtualMachines/jdk1.8.0_151.jdk/Contents/Home/jre'
# os.environ['JAVA_HOME'] = java8_location
# findspark.init(spark_home=spark_location)
# create the spark context
def experiment_genex_ke(data_file, num_sample, num_query, best_k, feature_num, add_uuid, dist_type):
num_cores = 32
conf = SparkConf(). \
setMaster("local[" + str(num_cores) + "]"). \
setAppName("Genex").set('spark.driver.memory', '64G'). \
set('spark.driver.maxResultSize', '64G')
sc = SparkContext(conf=conf)
# create gxdb from a csv file
# set up where to save the results
print('Performing clustering ...')
mydb = gxdb.from_csv(data_file, sc=sc, feature_num=feature_num, add_uuid=add_uuid,
_rows_to_consider=num_sample)
print('Generating query of max seq len ...')
# generate the query sets
query_set = list()
# get the number of subsequences
# randomly pick a sequence as the query from the query sequence, make sure the picked sequence is in the input list
# this query'id must exist in the database
for i in range(num_query):
query_set.append(mydb.get_random_seq_of_len(mydb.get_max_seq_len(), seed=i))
best_l1_so_far = math.inf
l1_ke_list = [[], []]
timing_dict = dict()
# perform clustering
cluster_start_time = time.time()
mydb.build(st=0.1, dist_type=dist_type)
timing_dict['cluster time'] = time.time() - cluster_start_time
bf_result_dict = dict()
bf_time_list = list()
for i, q in enumerate(query_set):
print('Brute Force Querying #' + str(i) + ' of ' + str(len(query_set)) + '; query = ' + str(q))
start = time.time()
query_result_bf = mydb.query_brute_force(query=q, best_k=best_k)
bf_time_list.append(time.time() - start)
bf_result_dict[q] = query_result_bf
timing_dict['bf query time'] = np.mean(bf_time_list)
print('Running Genex Query ...')
gx_timing_list = list()
current_ke = best_k
while best_l1_so_far > 0.0001 and current_ke < mydb.get_num_subsequences():
diff_list = []
# calculate diff for all queries
for i, q in enumerate(query_set):
print(
'dist_type: ' + dist_type + '. Best k = ' + str(best_k) + '- Querying #' + str(i) + ' of ' + str(len(query_set)) + '; query = ' + str(
q))
start = time.time()
query_result_gx = mydb.query(query=q, best_k=best_k, _ke=current_ke)
gx_timing_list.append(time.time() - start)
# calculating l1 distance
for gx_r, bf_r in zip(query_result_gx, bf_result_dict[q]): # retrive bf result from the result dict
diff_list.append(abs(gx_r[0] - bf_r[0]))
print('Diff list is ' + str(diff_list))
cur_l1 = np.mean(diff_list)
print('Current l1 and ke are: ' + str(cur_l1) + ' ' + str(current_ke))
l1_ke_list[0].append(cur_l1)
l1_ke_list[1].append(current_ke)
current_ke = int(current_ke + mydb.get_num_subsequences() * 0.01) # increment ke
# break the loop if current_ke exceeds the number of subsequences
if current_ke > mydb.get_num_subsequences():
break
best_l1_so_far = cur_l1 if cur_l1 < best_l1_so_far else best_l1_so_far # update bsf
timing_dict['gx query time'] = np.mean(gx_timing_list)
sc.stop()
return l1_ke_list, gx_timing_list, bf_time_list, timing_dict,
def harvest_ke_multiple_k(k_to_test, experiment_set):
result_dict = dict()
for dataset_name, config in experiment_set.items():
ke_result_dict = dict()
l1_ke_k_array = list()
fig, ax = plt.subplots()
for k in k_to_test:
ke_result_dict[k] = experiment_genex_ke(config['data_original'], num_sample=40, num_query=40, best_k=k,
add_uuid=config['add_uuid'], feature_num=config['feature_num'],
dist_type=config['dist_type'])
a = np.transpose(ke_result_dict[k][0])
b = np.expand_dims([k for i in range(len(a))], axis=1)
c = np.concatenate([a, b], axis=1)
l1_ke_k_array.append(c)
ax.plot(ke_result_dict[k][0][1], ke_result_dict[k][0][0], label='k=' + str(k), marker='o')
ax.set_ylabel('RMSE')
ax.set_xlabel('Ke')
ax.set_title(dataset_name + ' l1-ke')
ax.legend()
plt.show()
result_dict[dataset_name] = [ke_result_dict, l1_ke_k_array]
return result_dict
k_to_test = [50, 15, 9, 1]
experiment_set_ma = {
'italyPowerDemand': {'data_original': 'data_original/ItalyPower.csv',
'feature_num': 2,
'add_uuid': False,
'dist_type': 'ma'},
'ecgFiveDays': {'data_original': 'data_original/ECGFiveDays.csv',
'feature_num': 2,
'add_uuid': False,
'dist_type': 'ma'},
'Gun_Point_TRAIN': {'data_original': 'data_original/Gun_Point_TRAIN.csv',
'feature_num': 1,
'add_uuid': True,
'dist_type': 'ma'},
'synthetic_control_TRAIN': {'data_original': 'data_original/synthetic_control_TRAIN.csv',
'feature_num': 1,
'add_uuid': True,
'dist_type': 'ma'},
}
result_dict = harvest_ke_multiple_k(k_to_test, experiment_set_ma)
|
from .helper import PillowTestCase, hopper
from .test_imageqt import PillowQtTestCase
from PIL import ImageQt, Image
if ImageQt.qt_is_installed:
from PIL.ImageQt import QImage
try:
from PyQt5 import QtGui
from PyQt5.QtWidgets import QWidget, QHBoxLayout, QLabel, QApplication
QT_VERSION = 5
except (ImportError, RuntimeError):
try:
from PySide2 import QtGui
from PySide2.QtWidgets import QWidget, QHBoxLayout, QLabel, \
QApplication
QT_VERSION = 5
except (ImportError, RuntimeError):
try:
from PyQt4 import QtGui
from PyQt4.QtGui import QWidget, QHBoxLayout, QLabel, \
QApplication
QT_VERSION = 4
except (ImportError, RuntimeError):
from PySide import QtGui
from PySide.QtGui import QWidget, QHBoxLayout, QLabel, \
QApplication
QT_VERSION = 4
class TestToQImage(PillowQtTestCase, PillowTestCase):
def test_sanity(self):
for mode in ('RGB', 'RGBA', 'L', 'P', '1'):
src = hopper(mode)
data = ImageQt.toqimage(src)
self.assertIsInstance(data, QImage)
self.assertFalse(data.isNull())
# reload directly from the qimage
rt = ImageQt.fromqimage(data)
if mode in ('L', 'P', '1'):
self.assert_image_equal(rt, src.convert('RGB'))
else:
self.assert_image_equal(rt, src)
if mode == '1':
# BW appears to not save correctly on QT4 and QT5
# kicks out errors on console:
# libpng warning: Invalid color type/bit depth combination
# in IHDR
# libpng error: Invalid IHDR data
continue
# Test saving the file
tempfile = self.tempfile('temp_{}.png'.format(mode))
data.save(tempfile)
# Check that it actually worked.
reloaded = Image.open(tempfile)
# Gray images appear to come back in palette mode.
# They're roughly equivalent
if QT_VERSION == 4 and mode == 'L':
src = src.convert('P')
self.assert_image_equal(reloaded, src)
def test_segfault(self):
app = QApplication([])
ex = Example()
assert app # Silence warning
assert ex # Silence warning
if ImageQt.qt_is_installed:
class Example(QWidget):
def __init__(self):
super(Example, self).__init__()
img = hopper().resize((1000, 1000))
qimage = ImageQt.ImageQt(img)
pixmap1 = QtGui.QPixmap.fromImage(qimage)
QHBoxLayout(self) # hbox
lbl = QLabel(self)
# Segfault in the problem
lbl.setPixmap(pixmap1.copy())
|
# -*- coding: utf-8 -*-
import unittest
import os
import re
from iktomi.utils import html
from lxml.html import Element
from lxml import etree
import lxml.html as h
class TestSanitizer(unittest.TestCase):
'''Tests for sanitizer based on lxml'''
def setUp(self):
self.attrs = {
'allow_tags': ['a', 'p', 'br', 'li', 'ul', 'ol', 'hr', 'u', 'i', 'b',
'blockquote', 'sub', 'sup', 'span', 'img'],
'safe_attrs': ['href', 'src', 'alt', 'title', 'class', 'rel'],
'drop_empty_tags': ['p', 'a', 'u', 'i', 'b', 'sub', 'sup'],
'allow_classes': {},
'tags_to_wrap': [],
#'strip_whitespace': True,
}
def sanitize(self, text):
return html.sanitize(text, **self.attrs)
def assertSanitize(self, text, right):
res = self.sanitize(text)
self.assertEqual(res, right)
def test_safe_attrs(self):
self.assertSanitize('<p notsafeattr="s" abbr="1" alt="Alt">Safe p</p>',
'<p alt="Alt">Safe p</p>')
def test_allowed_protocols(self):
self.attrs['allowed_protocols'] = set(['http'])
self.attrs['allow_external_src'] = True
self.attrs['safe_attrs'].append('cite')
self.assertSanitize('<a href="http://iktomi.com">sample text</a>',
'<a href="http://iktomi.com">sample text</a>')
self.assertSanitize('<a href="iktomi://http.com">sample text</a>',
'sample text')
self.assertSanitize('<img src="http://iktomi.com">',
'<img src="http://iktomi.com">')
self.assertSanitize('<img src="iktomi://http.com">', '')
self.assertSanitize('<blockquote cite="http://iktomi.com">sample text</blockquote>',
'<blockquote cite="http://iktomi.com">sample text</blockquote>')
self.assertSanitize('<blockquote cite="iktomi://http.com">sample text</blockquote>',
'<blockquote>sample text</blockquote>')
def test_safe_tags(self):
self.assertSanitize('<p alt="Alt">Safe p <script>bad_script()</script></p> <accept>acc</accept>',
'<p alt="Alt">Safe p </p> acc')
def test_empty_tags(self):
self.assertSanitize('<p alt="Alt">p</p><p alt="Alt"> </p><p style="color:red"></p><p></p>',
'<p alt="Alt">p</p><p alt="Alt"> </p>')
self.assertSanitize('<b>some<span> </span>text</b>',
'<b>some<span> </span>text</b>')
self.assertSanitize('<p>head</p><p><br></p><p>tail</p>',
'<p>head</p><p>tail</p>')
self.assertSanitize('<p>head</p><p><b><i> <br /> </i></b></p><p>tail</p>',
'<p>head</p><p>tail</p>')
self.assertSanitize('<p>head</p><p><b>mid<i></i></b></p><p>tail</p>',
'<p>head</p><p><b>mid</b></p><p>tail</p>')
self.attrs['allow_tags'].append('div')
self.assertSanitize('<div>text<br>text</div>',
'<div>text<br>text</div>')
self.assertSanitize('<i><br>text</i>',
'<i><br>text</i>')
@unittest.skip('lxml does not provide css filtration')
def test_safe_css(self):
u'''Ensure that sanitizer does not remove safe css'''
self.attrs['allowed_attributes'].append('style')
res = self.sanitize('<p style="color: #000; background-color: red; font-size: 1.2em">p</p>')
assert 'color: #000; background-color: red; font-size: 1.2em' in res
def test_allowed_classes(self):
self.attrs['allow_classes']['p'] = ['yellow']
self.attrs['allow_classes']['b'] = lambda x: 'b' in x
self.assertSanitize('<p class="yellow green">',
'<p class="yellow"></p>')
self.assertSanitize('<sup class="yellow green" title="Alt">a</sup>',
'<sup title="Alt">a</sup>')
self.assertSanitize('<b class="has_b has_c">a</b>',
'<b class="has_b">a</b>')
def test_tags_sticking(self):
self.attrs['allow_tags'].remove('span')
res = self.sanitize('<p>a</p> <p>b</p>')
self.assertEqual(res, '<p>a</p> <p>b</p>')
res = self.sanitize('<b>a</b> <b>b</b>')
self.assertEqual(res, '<b>a</b> <b>b</b>')
res = self.sanitize('<span>a</span> <p>b</p>')
self.assertEqual(res, 'a <p>b</p>')
res = self.sanitize('<p><span>a</span> <span>b</span></p>')
self.assertEqual(res, '<p>a b</p>')
# lxml parser eats the space on some environments
#res = self.sanitize('<brbr>a</brbr> <p>b</p>')
#self.assertEqual(res, 'a <p>b</p>')
#res = self.sanitize('<p><brbr>a</brbr> <brbr>b</brbr></p>')
#self.assertEqual(res, '<p>a b</p>')
@unittest.skip('not supported')
def test_autoclosing_attrs_xhtml(self):
self.attrs['method'] = 'xhtml'
res = self.sanitize('<br><hr>b ')
self.assertEqual(res, '<br /><hr />b')
def test_autoclosing_attrs_html(self):
self.attrs['drop_empty_tags'] = []
res = self.sanitize('<br><hr>b <p>')
self.assertEqual(res, '<br><hr>b <p></p>')
def test_remove_empty_a(self):
self.assertSanitize('<a href="moo">BLABLA</a> <a>txt <span>foo</span></a>',
'<a href="moo">BLABLA</a> txt <span>foo</span>')
self.assertSanitize('<p><a>run</a><b><a>bar</a></b></p>',
'<p>run<b>bar</b></p>')
@unittest.skip('lxml does not provide css filtration')
def test_unsafe_css(self):
u'''Special test for html5: html5lib has very ultimate css cleanup with gauntlets'''
self.attrs['allowed_attributes'].append('style')
res = self.sanitize('<p style="background: url(javascript:void); '
'color: #000; width: e/**/xpression(alert());">p</p>')
self.assertEqual(res, '<p>p</p>')
def test_plain_text(self):
res = self.sanitize('Some plain text')
self.assertEqual(res, 'Some plain text')
def test_empty_strings(self):
res = self.sanitize('')
self.assertEqual(res, '')
res = self.sanitize('\t \n')
self.assertEqual(res, '')
def test_on_real_data(self):
'''
Compare with logged genshi output to ensure that there are no
new errors
'''
return None
skips = 10
if os.path.isdir('clean_html'):
self.attrs['string_callbacks'] = [html.remove_TinyMCE_trash,
html.strip_empty_tags_nested,
spaceless]
for dir, dirs, files in os.walk('clean_html'):
for file in filter(lambda x: x.endswith('.in'), files):
path = os.path.join(dir, file)
in_ = open(path, 'r').read().decode('utf-8')
out = open(path[:-3] + '.out', 'r').read().decode('utf-8')
out = html.remove_TinyMCE_trash(out) # Old sanitizer can't do this
#out = self.sanitize(out).strip()
res = self.sanitize(in_).strip()
if res != out:
if skips < 10:
print(in_, '\n----------\n', res + '---\n!=\n' + out + '---\n\n\n')
skips -= 1
if not skips:
return
#print "asserted"
def test_no_initial_data(self):
self.attrs = {}
res = self.sanitize('a<p color: #000" class="2">p</p><script></script>')
self.assertEqual(res, 'a<p>p</p>')
@unittest.skip('lxml does not support this option')
def test_escaping(self):
self.attrs['escape_invalid_tags'] = True
res = self.sanitize('a<p>p</p><script>alert()</script>')
self.assertEqual(res, 'a<p>p</p><script>alert()</script>')
def test_get_wrapper_tag(self):
c = html.Cleaner(allow_tags=None, wrap_inline_tags='div')
self.assertEqual(c.get_wrapper_tag(), None)
c = html.Cleaner(allow_tags=['p', 'div'], wrap_inline_tags=False)
self.assertEqual(c.get_wrapper_tag(), None)
c = html.Cleaner(allow_tags=['p', 'div'], wrap_inline_tags=None)
self.assertEqual(c.get_wrapper_tag().tag, 'p')
c = html.Cleaner(allow_tags=['div'], wrap_inline_tags=None)
self.assertEqual(c.get_wrapper_tag().tag, 'div')
c = html.Cleaner(allow_tags=['b'], wrap_inline_tags=None)
self.assertEqual(c.get_wrapper_tag(), None)
c = html.Cleaner(allow_tags=['p', 'div'], wrap_inline_tags='div')
self.assertEqual(c.get_wrapper_tag().tag, 'div')
c = html.Cleaner(allow_tags=['p', 'div', 'span'],
wrap_inline_tags=(lambda:Element('span')))
self.assertEqual(c.get_wrapper_tag().tag, 'span')
c = html.Cleaner(allow_tags=['p', 'div'],
wrap_inline_tags=(lambda:Element('span')))
self.assertEqual(c.get_wrapper_tag(), None)
def test_is_element_empty(self):
c = html.Cleaner(allow_tags=['p', 'div', 'span', 'br', 'pre'],
drop_empty_tags=['p', 'span'])
doc = h.fragment_fromstring('<p></p><span>asd</span><br><pre></pre>',
create_parent=True)
p = doc.xpath('.//p')[0]
self.assertTrue(c.is_element_empty(p))
span = doc.xpath('.//span')[0]
self.assertFalse(c.is_element_empty(span))
br = doc.xpath('.//br')[0]
self.assertTrue(c.is_element_empty(br))
pre = doc.xpath('.//pre')[0]
self.assertFalse(c.is_element_empty(pre))
def test_tags_to_wrap(self):
self.attrs['tags_to_wrap'] = ['b', 'i', 'br']
self.attrs['wrap_inline_tags'] = True
self.assertSanitize("head<b>bold</b>tail",
"<p>head<b>bold</b>tail</p>")
self.assertSanitize("head<b>bold</b>boldtail<i>italic</i><p>par</p>tail",
"<p>head<b>bold</b>boldtail<i>italic</i></p><p>par</p><p>tail</p>")
self.assertSanitize("<p>par</p><b>bla</b>text<p>blabla</p>",
"<p>par</p><p><b>bla</b>text</p><p>blabla</p>")
self.assertSanitize("<p>par</p>text<b>bla</b>text<p>blabla</p>",
"<p>par</p><p>text<b>bla</b>text</p><p>blabla</p>")
self.assertSanitize('first<br>second<br>third',
'<p>first</p><p>second</p><p>third</p>')
self.assertSanitize('first<br>second<p>third</p>',
'<p>first</p><p>second</p><p>third</p>')
self.assertSanitize('<p>first</p>tail<br>second<p>third</p>',
'<p>first</p><p>tail</p><p>second</p><p>third</p>')
def test_dom_callback(self):
def fix_link_domain(dom):
# sample callback
for el in dom.xpath('.//a'):
if el.attrib['href']:
el.attrib['href'] = el.attrib['href'].replace('example', 'iktomi')
self.attrs['dom_callbacks'] = [fix_link_domain]
self.assertSanitize('<a href="http://example.com">sample text</a>',
'<a href="http://iktomi.com">sample text</a>')
def test_tags_to_wrap_trailing_br(self):
self.attrs['tags_to_wrap'] = ['b', 'i', 'br']
self.attrs['wrap_inline_tags'] = True
self.assertSanitize("<p>head</p><br> ",
"<p>head</p>")
def test_tags_to_wrap_double_br(self):
self.attrs['tags_to_wrap'] = ['b', 'i', 'br']
self.attrs['wrap_inline_tags'] = True
self.assertSanitize("head<br><br>tail",
"<p>head</p><p>tail</p>")
self.assertSanitize("head<br> <br>tail",
"<p>head</p><p>tail</p>")
self.assertSanitize("<br><br><br><br>", "")
def test_split_paragraphs_by_br(self):
self.attrs['tags_to_wrap'] = ['b', 'i', 'br']
self.attrs['wrap_inline_tags'] = True
self.attrs['drop_empty_tags'] = []
self.assertSanitize("<p>head<br><br><br></p>",
"<p>head</p><p></p><p></p><p></p>")
self.assertSanitize("<p>head<br>body<br>tail</p>",
"<p>head</p><p>body</p><p>tail</p>")
self.assertSanitize("<p>head<br><b>body<sup>letters</sup></b><br><i>ta</i>il</p>",
"<p>head</p><p><b>body<sup>letters</sup></b></p><p><i>ta</i>il</p>")
def test_wrap_inline_tags(self):
self.attrs['tags_to_wrap'] = ['b', 'i', 'br']
self.attrs['wrap_inline_tags'] = False
self.assertSanitize('first<br>second<br>third',
'first<br>second<br>third')
def test_p_not_allowed(self):
self.attrs['tags_to_wrap'] = ['b', 'i', 'br']
self.attrs['wrap_inline_tags'] = 'div'
# replacing p with div in allow_tags
self.attrs['allow_tags'].remove('p')
self.attrs['allow_tags'].append('div')
self.assertSanitize("head<br><br>tail",
"<div>head</div><div>tail</div>")
def test_lambda_wrap_tag(self):
self.attrs['tags_to_wrap'] = ['b', 'i', 'br']
self.attrs['wrap_inline_tags'] = lambda:Element('span')
self.assertSanitize("head<br><br>tail",
"<span>head</span><span>tail</span>")
self.attrs['allow_tags'].remove('p')
def test_no_wrap_tags(self):
self.attrs['tags_to_wrap'] = ['b', 'i', 'br']
self.attrs['wrap_inline_tags'] = True
self.attrs['allow_tags'].remove('p')
self.assertRaises(ValueError, self.sanitize, 'head<br><br>tail')
# cannot create Cleaner with wrong parameters
def test_create_cleaner_with_wrong_parameters(self):
self.attrs['wrap_inline_tags'] = True
self.attrs['allow_tags'].remove('p')
self.assertRaises(ValueError, html.Cleaner, **self.attrs)
def spaceless(clean, **kwargs):
clean = re.compile('\s+').sub(' ', clean)
return clean.strip()
|
# Generated by Django 4.0.1 on 2022-02-21 11:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
('member', '0002_remove_token_valid'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='profile', serialize=False, to=settings.AUTH_USER_MODEL)),
('nickname', models.CharField(max_length=40)),
('updated_time', models.DateTimeField(auto_now=True)),
],
),
]
|
import cv2
import os
import argparse
import shutil
import glob
from vidstab import VidStab
def image_to_video(image_path,out_video_path):
for image_folder in image_path:
head, tail = os.path.split(image_folder)
print(head + " and " + tail)
images = [img for img in os.listdir(image_folder) \
if img.endswith(".png")]
frame = cv2.imread(os.path.join(image_folder, images[0]))
height, width, layers = frame.shape
video = cv2.VideoWriter(os.path.join(out_video_path, tail+'.mp4'),
0, 50, (width,height))
for image in images:
video.write(cv2.imread(os.path.join(image_folder, image)))
cv2.destroyAllWindows()
video.release()
#stabilizing the video
def video_stabilizer(input_video_filepath, output_video_filepath):
# input_video_filepath = glob.glob('/home/afarahani/Projects/output/*')
# output_video_filepath = '/home/afarahani/Projects/stab_output/'
for video_path in input_video_filepath:
head, tail = os.path.split(video_path)
stabilizer = VidStab()
# black borders
stabilizer.stabilize(input_path=video_path,
output_path=output_video_filepath+tail,
border_type='black')
#conveting the video to image frames and applying auto correction
#for illumination and smoothing
def processing_frame_from_video(input_path, output_path, args):
# input_path = glob.glob('/home/afarahani/Projects/stab_output/*')
# output_path = '/home/afarahani/Projects/stab_images'
for item in input_path:
head, tail = os.path.split(item)
vidcap = cv2.VideoCapture(item)
success,image = vidcap.read()
image_path = os.path.join(output_path, tail[:-4])
try:
if not os.path.exists(image_path):
os.makedirs(image_path)
except OSError:
print ('Error: Creating directory of data')
count = 0
while success:
image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
#print(image.shape)
if args.clahe:
# auto illumin ation correction
clahe = cv2.createCLAHE(clipLimit=args.clipLimit, \
tileGridSize=(args.tileGridX, \
args.tileGridY))
image = clahe.apply(image)
if args.bf:
# smoothing the image
image = cv2.bilateralFilter( image, args.bf_d, \
args.bf_sc, args.bf_ss )
if args.gb:
image = cv2.GaussianBlur(image,(args.gb_x,args.gb_y), \
args.gb_border_type)
cv2.imwrite(image_path+'/frame'+str(count).zfill(4)+'.png', image)
success,image = vidcap.read()
#print('Read a new frame: ', success)
count += 1
# Main function
def main(args):
image_path = os.path.normpath(args.sourceDir)
output_path = os.path.normpath(args.outputDir)
# Did the user ask for download data?
if args.sourceAddr:
# make sure the target source dir is empty
if os.path.exists(image_path):
shutil.rmtree(image_path)
if not os.path.exists(image_path):
os.mkdir(image_path)
os.system("gsutil cp -r " + args.sourceAddr + " " + image_path )
# Now untar all tar files there
os.system('mv ' + args.sourceAddr + " " + image_path +"/project2/* "+ \
args.sourceAddr + " " + image_path)
os.system('rmdir ' + args.sourceAddr + " " + image_path +"/project2")
for file in os.listdir(image_path + "/data/"):
if file.endswith(".tar"):
output_dir = file[:-4]
os.system('mkdir ' + image_path + "/data/" + output_dir)
os.system('tar xvf ' + image_path + "/data/" + file + " -C " +\
image_path + "/data")
os.system('mv ' + image_path + "/data/data/"+output_dir+" " + \
image_path + "/data")
os.system('rmdir ' + image_path + "/data/data/")
os.system('rm ' + image_path + "/data/" + file)
else:
# Check to see if expected directories exist
if not os.path.exists(image_path) \
or not os.path.exists(image_path + "/data" ) \
or not os.path.exists(image_path + "/masks"):
raise Exception("The source directory you specified does not " + \
"conform with expectations.")
# make sure expected output directories exist
if not os.path.exists(output_path):
os.mkdir(output_path)
if not os.path.exists(output_path + "/data"):
os.mkdir(output_path + "/data")
if not os.path.exists(output_path + "/masks"):
os.mkdir(output_path + "/masks")
os.system('cp ' + image_path + "/masks/*.png " + output_path + "/masks")
# make a temp directory for videos
if not os.path.exists(output_path + "/.video"):
os.mkdir(output_path + "/.video")
if not os.path.exists(output_path + "/.video_stab"):
os.mkdir(output_path + "/.video_stab")
# Make videos from images for later use
image_to_video(glob.glob(image_path + "/data/*"), output_path + \
"/.video")
# apply video stabalizer if asked for
if args.vs:
video_stabilizer(glob.glob(output_path + "/.video/*"),
output_path + "/.video_stab")
processing_frame_from_video(glob.glob(output_path + "/.video_stab/*"),
output_path + '/data', args)
else:
processing_frame_from_video(glob.glob(output_path + "/.video/*"),
output_path + '/data', args)
# Remove temp directories
# shutil.rmtree(output_path + "/.video")
# shutil.rmtree(output_path + "/.video_stab")
# Parse arguments and call main
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='This ' + \
'is part of the UGA CSCI 8360 Project 2 - . Please visit our ' + \
'GitHub project at https://github.com/dsp-uga/team-linden-p2 ' + \
'for more information regarding data organization ' + \
'expectations and examples on how to execute these scripts.')
# Options for downloading data or specifying source directory
parser.add_argument('-s', '--sourceDir', required=True,
help='Root directory for data that agrees with our '+ \
'organization standards.')
parser.add_argument('-g', '--sourceAddr',
help='The address for source data that agrees with ' + \
'our organization standards. WARN: This will write ' + \
'to the sourceDir, deleting what is already there!!!')
parser.add_argument('-o', '--outputDir', required=True,
help='Directory to store all png cilia predictions')
parser.add_argument('-clahe', '--clahe', action='store_true',
help='Signify if you want clahe image processing')
parser.add_argument('-clipLimit', '--clipLimit', default=2, type=int,
help='clahe: clip limit variable')
parser.add_argument('-tileGridX', '--tileGridX', default=8, type=int,
help='clahe: Tile Grid x-dim size')
parser.add_argument('-tileGridY', '--tileGridY', default=8, type=int,
help='clahe: Tile Grid y-dim size')
parser.add_argument('-bf', '--bf', action='store_true',
help='Signify if you want Bilateral Filter image ' + \
'processing')
parser.add_argument('-bf_d', '--bf_d', default=7, type=int,
help='Bilateral Filter: The d argument')
parser.add_argument('-bf_sc', '--bf_sc', default=30, type=float,
help='Bilateral Filter: The sc argument')
parser.add_argument('-bf_ss', '--bf_ss', default=30, type=float,
help='Bilateral Filter: The ss argument')
parser.add_argument('-gb', '--gb', action='store_true',
help='Signify if you want Gaussian Blur image ' + \
'processing')
parser.add_argument('-gb_x', '--gb_x', default=5, type=float,
help='Gaussian Blur: sigma x argument')
parser.add_argument('-gb_y', '--gb_y', default=5, type=float,
help='Gaussian Blur: sigma y argument')
parser.add_argument('-gb_border_type', '--gb_border_type',
default=0, type=int,
help='Gaussian Blur: border type argument')
parser.add_argument('-vs','--vs',action='store_true',
help='Signify if you would like vidoe stabalizing ' + \
'applied to a movie generated by frames')
args = parser.parse_args()
main(args)
|
'''
Created on Aug 9, 2017
@author: Hao Wu
'''
from ScopeFoundry import HardwareComponent
from .arduino_sol_dev import ArduinoSolDev
from PyDAQmx import *
import numpy as np
import time
from math import exp
import h5py
class ArduinoSolHW(HardwareComponent):
'''
Hardware Component Class for receiving AI input for breathing, licking etc
'''
name='arduino_sol'
def setup(self,port='COM7',baud_rate=500000,fname='D:\\Hao\\VOTA\\VOTA_Control\\VOTAScopeHW\\arduino_sol_8\\calib.h5'):
'''
add settings for analog input event
'''
self.settings.New(name='port',initial=port,dtype=str,ro=False)
self.settings.New(name='baud_rate',initial=baud_rate,dtype=int,ro=False)
self.settings.New(name='calibration_fname',initial=fname,dtype=str,ro=False)
self.settings.New(name='load_calibration',initial=False,dtype=bool)
self.settings.New(name='calibration_on', dtype=bool, initial=True,ro=False)
self.sols=[]
self.sols.append(self.settings.New(name='clean_air0',initial=0,dtype=int,ro=False,vmin=0,vmax=100))
self.sols.append(self.settings.New(name='odor1',initial=0,dtype=int,ro=False,vmin=0,vmax=100))
self.sols.append(self.settings.New(name='odor2',initial=0,dtype=int,ro=False,vmin=0,vmax=100))
self.sols.append(self.settings.New(name='odor3',initial=0,dtype=int,ro=False,vmin=0,vmax=100))
self.sols.append(self.settings.New(name='clean_air4',initial=0,dtype=int,ro=False,vmin=0,vmax=100))
self.sols.append(self.settings.New(name='odor5',initial=0,dtype=int,ro=False,vmin=0,vmax=100))
self.sols.append(self.settings.New(name='odor6',initial=0,dtype=int,ro=False,vmin=0,vmax=100))
self.sols.append(self.settings.New(name='odor7',initial=0,dtype=int,ro=False,vmin=0,vmax=100
))
self.load_calib()
self.sols_old = []
for i in range(8):
self.sols_old.append(self.sols[i].value())
def connect(self):
self._dev=ArduinoSolDev(self.settings.port.value(),
self.settings.baud_rate.value())
def write(self):
for i in range(len(self.sols)):
x=self.sols[i].value()
if (x!=self.sols_old[i]):
if self.settings.calibration_on.value():
self._dev.write(i,int(self.calib[x,i]))
else:
self._dev.write(i,x)
self.sols_old[i] = x
def set_low(self):
for counter,sol in enumerate(self.sols):
sol.update_value(0)
def write_low(self):
self.set_low()
self.write()
def write_default(self):
self.set_low()
self.sols[0].update_value(0)
self.sols[4].update_value(0)
self.write()
def load(self,vals):
for i in range(len(vals)):
self.sols[i].update_value(vals[i])
def load_calib(self):
fname = self.settings.calibration_fname.value()
calib_file = h5py.File(fname,'r')
calib_dset = calib_file['calib']
self.calib = calib_dset[:]
def start(self):
self._dev.open()
def stop(self):
self._dev.close()
def disconnect(self):
try:
self.stop()
del self._dev
del self.write
except AttributeError:
pass
if __name__ == '__main__':
ai=DAQaiHW()
ai.connect()
print(ai._data)
time.sleep(1)
ai.disconnect() |
#!/usr/bin/env python3
import json
import os
import sys
import tempfile
import requests
from tidetech_methods import print_json, save_file
# Configuration of server and API Key
SERVER = 'https://api.tidetech.org'
# Change this to your API key
API_KEY = os.environ.get('TIDETECH_API_KEY')
if not API_KEY:
print("Please set TIDETCH_API_KEY environment variable, or change it in this script. Terminating.")
sys.exit()
# Note that the API Key can be passed in as a query parameter with api_key=API_KEY
HEADERS = {'Authentication': 'Token ' + API_KEY}
# The URLs of our Data API, including:
# Meta - get information about a dataset
meta_url = "{}/v1/data/{}/"
# Get data at one or more points (GET or POST)
point_url = "{}/v1/data/{}/point/"
# Get data for an area
area_url = "{}/v1/data/{}/area/"
# Multi-point allows requesting information from multiple datasets at once
multipoint_url = "{}/v1/data/points/"
# Get metadata for a single dataset.
def get_metadata(server, dataset, display=True):
url = meta_url.format(server, dataset)
response = requests.get(url, headers=HEADERS)
file_info = response.json()
if display:
print_json(file_info)
return file_info
# Get data for an area (returns NetCDF file)
def get_area(server, dataset, parameters, out_file):
url = area_url.format(server, dataset)
response = requests.get(url, headers=HEADERS, params=parameters)
save_file(response, out_file)
# Get data for one or more points, for a single dataset
def get_point(server, dataset, locations, out_file=None):
url = point_url.format(server, dataset)
parameters = {
"locations": json.dumps(locations)
}
# response = requests.get(url, headers=HEADERS, params=parameters)
# Or do a POST Request. POST is required for large numbers of points.
response = requests.post(url, headers=HEADERS, json=parameters)
result = response.json()
# Write it to a file, if required, or print it, if not
if response.status_code != 200:
print("Area request failed... Reason follows.")
print(response.text)
else:
if out_file:
with open(out_file, 'w') as outfile:
json.dump(result, outfile)
else:
print_json(result)
# Get data for one or more points for one or more datasets
def get_multipoints(server, datasets, locations, out_file=None):
url = multipoint_url.format(server)
parameters = {
"locations": json.dumps(locations),
"name": datasets
}
response = requests.get(url, headers=HEADERS, params=parameters)
# Or do a POST Request. POST is required for large numbers of points.
# response = requests.post(url, headers=HEADERS, json=parameters, params={'name': datasets})
result = response.json()
# Write it to a file, if required, or print it, if not
if response.status_code == 404 or response.status_code == 400:
print("Area request failed... Reason follows.")
print(response.text)
else:
if out_file:
with open(out_file, 'w') as outfile:
json.dump(result, outfile)
else:
print_json(result)
def get_example_metadata():
print("Getting metadata")
dataset = 'global_combined_currents'
get_metadata(SERVER, dataset, display=True)
# An example area, used in testing
def get_example_area():
print("Getting area dataset")
# Area parameters
dataset = 'global_waves'
minX = 100
maxX = 102
minY = 10
maxY = 12
start_date = '2016-02-01T00:00'
end_date = '2016-02-23T2:00'
region = {
"type": "Polygon",
"coordinates":
[[
[minX, minY],
[minX, maxY],
[maxX, maxY],
[maxX, minY],
[minX, minY]
]]
}
parameters = {
"region": json.dumps(region),
"start_datetime": start_date,
"end_datetime": end_date,
"filename": "ThisIsATest"
}
out_file = os.path.join(tempfile.mkdtemp(), 'test_waves.nc')
print("Saving file to {}".format(out_file))
get_area(SERVER, dataset, parameters, out_file)
# An example point, used in testing
def get_example_point():
print("Getting points from a single dataset")
dataset = 'global_combined_currents'
locations = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"geometry": {"type": "Point", "coordinates": [144, 36]},
"properties": {
"id": 'p1',
# "codes": '49,50', # Optional
"datetime": '2016-02-01T00:00'
}
},
{
"type": "Feature",
"geometry": {"type": "Point", "coordinates": [144, 37]},
"properties": {
"id": 'p2',
# "codes": '49,50', # Optional
"datetime": '2016-02-01T12:00'
}
}
]
}
out_file = os.path.join(tempfile.mkdtemp(), 'test_currents.json')
print("Saving file to {}".format(out_file))
get_point(SERVER, dataset, locations, out_file=out_file)
# An example multi-dataset point, used in testing
def get_example_multipoints():
print("Getting points from multiple datasets")
datasets = 'global_combined_currents,global_wind,global_waves'
locations = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"geometry": {"type": "Point", "coordinates": [19, -40]},
"properties": {
"id": 'p3',
# "codes": '49,50,33,34,8', # Optional
"datetime": '2016-06-20T18:00'
}
},
{
"type": "Feature",
"geometry": {"type": "Point", "coordinates": [20, -41]},
"properties": {
"id": 'p4',
# "codes": '49,50,33,34,8', # Optional
"datetime": '2016-06-20T21:00'
}
}
]
}
out_file = os.path.join(tempfile.mkdtemp(), 'test_all.json')
print("Saving file to {}".format(out_file))
get_multipoints(SERVER, datasets, locations, out_file=out_file)
# Run the examples here.
def run_examples():
get_example_metadata()
get_example_area()
get_example_point()
get_example_multipoints()
if __name__ == '__main__':
run_examples()
|
"""
Parsers for the experimental data files
"""
# import datetime
from pathlib import Path
from html.parser import HTMLParser
from typing import Tuple
# 3rd party
# import datefinder
# import pandas as pd
#%%
class ParserError(ValueError):
"""unable to parse this file"""
def read_PAR_file(filepath: Path, break_if_line_contains="", metadata_only=False):
"""
Special parser for Versatstudio ".par" files
opens the file, cleans the lines
initializes the Parser and feeds the data
returns the Parser Object
"""
if metadata_only:
break_if_line_contains = "<Segment1>"
try:
with open(filepath) as fp:
if break_if_line_contains:
fp_readlines = []
for line in fp:
fp_readlines.append(line)
if break_if_line_contains in line:
break
else:
fp_readlines = fp.readlines()
fp_read = find_replace_line_endings(fp_readlines)
except OSError:
raise ParserError(
"Can not open or read this file" "File: {filepath} is invalid."
)
VSP = VersaStudioParser()
VSP.feed(fp_read)
VSP.close()
# FIXME TODO monkey patching kwargs
_kwargs = {
"break_if_line_contains": break_if_line_contains,
"metadata_only": metadata_only,
}
VSP._kwargs = _kwargs
# breakpoint()
# metadata, actions, data = cast_parser_to_dataframe(VSP)
# VSP._interesting_data
return VSP
def find_replace_line_endings(fp_readlines):
"""
special find and replace function
to clean up line endings in the file
from end or starttag characters
"""
clean = []
for line in fp_readlines:
if line.endswith("=<\n"):
line = line.replace("<\n", "lt\n")
clean.append(line)
return "".join(clean)
class VersaStudioParser(HTMLParser):
"""
Main VersaStudio .par file parser.
It seperates the read-in data already
in an actions part and data part,
following the structure of the .par file.
Usage is similar to HTMLParser:
VSP = VersaStudioParser()
VSP.feed(data)
VSP.close
Data can be found in attributes of VSP:
actions
data_body
data_keys
metadata
"""
_VSP_VERSION = "0.1.0"
_skipped_tags = ("dockinglayout", "dockpanel", "graph1")
_skipped_data = (">", "<", "\n\n")
# _meta_tags = ('application', 'instrument','experiment')
_data_name = "segment"
_action_name = "action"
_metadata_tags = (
"application",
"instrument",
"mode:floating,",
"filter:normal",
"experiment",
)
_tags_found = []
actions = {}
data_body = {}
metadata = {}
data_keys = []
data_version = {}
# wronglines = {}
_tags = []
_all_raw_data = {}
def handle_starttag(self, tag, attrs):
self.tag = ""
# self.endtag = False
if tag not in self._skipped_tags:
# and not any(tag.startswith(i) for i in self._skipped_tags_startswith):
# if tag in self.meta_tags:
# self._tags.append(tag)
self.tag = tag
self._tags_found.append((tag, attrs))
else:
pass
# print("skipped start tag :", tag)
# if not hasattr(self, tag):
# setattr(self, tag, tag)
def handle_endtag(self, tag):
if self.tag:
pass
def handle_data(self, data, max_len=None):
"""
handles data depending on tag name
different way of handling 'action' blocks
and the data in a 'segment' block
"""
if max_len:
if len(data) > max_len:
data = data[0:max_len]
if data not in self._skipped_data and self.tag not in self._skipped_tags:
self._all_raw_data.update({self.tag: data})
if self.tag.startswith(self._data_name):
# parse main data
self.data_version.update(self.parse_text(data, self.tag))
for segkey, segval in self.data_version.items():
data_definition = ""
if isinstance(segval, dict):
data_definition = segval.get("Definition", "")
if data_definition:
data_keys = [i for i in data_definition.split(", ") if i != "0"]
self.data_keys = data_keys
self.data_body.update(
self.parse_data_body(data, segkey, data_keys)
)
elif self.tag.startswith(self._action_name):
# print("starting data handling:", self.tag)
# print('datahandler:', self.tag)
# parse actions and other metadata
pass
self.actions.update(self.parse_text(data, self.tag))
elif self.tag in self._metadata_tags:
self.metadata.update(self.parse_text(data, self.tag))
# self._all_data.update({self.tag : data.strip()})
def parse_data_body(self, text, segment_name: str, data_keys: list):
data_body = []
# data_body.append(data_keys)
lenkeys = len(data_keys)
for line in text.splitlines():
splt = line.split(",")
if len(splt) == lenkeys:
try:
splt = [int(i) if not "." in i else float(i) for i in splt]
except Exception as e:
pass
data_body.append(splt)
return {segment_name: data_body}
def parse_text(self, text, current_tag):
text = text.strip()
textdict = {current_tag: None}
try:
# splt = [i.split("=") for i in text[1:-1].split("\n")]
splitted_lines = [
line.split(sep="=")
for line in text.splitlines()
if (line != "" and "=" in line)
]
lines_notlen2 = [i for i in splitted_lines if len(i) != 2]
if lines_notlen2:
# FIXME report errors
# print(f'parsemeta error for {current_tag} missing len2 lines {len(lines_notlen2)}')
splitted_lines = [i for i in splitted_lines if len(i) != 2]
# Cast values to numeric types
splitted_lines_dict = cast_elements_to_numeric(splitted_lines)
if splitted_lines_dict:
textdict.update({current_tag: splitted_lines_dict})
else:
raise ValueError
except Exception as e:
pass
# print(f'parsemeta error for {current_tag} {e}')
# self.wronglines.update({current_tag: f'error {e},{text[0:100]}'})
# print('PARSE',text,frame)
return textdict
def cast_elements_to_numeric(
splitted_lines: list, float_sep=(",", "."), minus_sign=("-")
) -> dict:
"""helper function for casting str to numeric in a list of lists"""
result = {}
for n, elem in enumerate(splitted_lines):
if isinstance(elem, list) and len(elem) == 2:
key, value = elem
_isnum = "".join(
[i for i in value if i.isnumeric() or i in (*float_sep, *minus_sign)]
)
if _isnum:
# if numeric str characters are found
if len(_isnum) == len(value):
# check if there is a decimal separator character in the numeric str
# cast in float or int
if any(sep in _isnum for sep in float_sep):
try:
value = float(value)
except Exception as e:
# type casting to float error
pass
else:
try:
value = int(value)
except Exception as e:
# type casting to int error
pass
else:
# if both numeric and other characters are found in str
pass
else:
pass
else:
key = "error_cast_elem_{n}"
value = elem
result.update({key: value})
return result
|
from peewee import *
import os
db = MySQLDatabase(
"metrics",
host=os.environ['MYSQL_HOST'],
port=int(os.environ['MYSQL_PORT']),
user=os.environ['MYSQL_USER'],
password=os.environ['MYSQL_PASS']
)
class Exercicios(Model):
id = PrimaryKeyField()
banca = CharField()
data_registro = DateField()
quant_questoes = IntegerField()
quant_error = IntegerField()
quant_acertos = IntegerField()
user_id = IntegerField()
def close(self):
db.close()
class Meta:
database = db |
# -*- coding: utf-8 -*-
"""Tests: Algorithms for topological sorting"""
import unittest
from assertpy import assert_that
from algolib.graphs import DirectedSimpleGraph
from algolib.graphs.algorithms import DirectedCyclicGraphError, sort_topological_using_dfs, \
sort_topological_using_inputs
class TopologicalSortingTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@staticmethod
def test__sort_topological_using_inputs__when_acyclic_graph__then_topological_order():
# given
graph = DirectedSimpleGraph(range(6))
graph.add_edge_between(graph.get_vertex(0), graph.get_vertex(2))
graph.add_edge_between(graph.get_vertex(0), graph.get_vertex(4))
graph.add_edge_between(graph.get_vertex(1), graph.get_vertex(0))
graph.add_edge_between(graph.get_vertex(1), graph.get_vertex(4))
graph.add_edge_between(graph.get_vertex(3), graph.get_vertex(1))
graph.add_edge_between(graph.get_vertex(3), graph.get_vertex(0))
graph.add_edge_between(graph.get_vertex(3), graph.get_vertex(2))
graph.add_edge_between(graph.get_vertex(5), graph.get_vertex(1))
graph.add_edge_between(graph.get_vertex(5), graph.get_vertex(2))
graph.add_edge_between(graph.get_vertex(5), graph.get_vertex(4))
# when
result = sort_topological_using_inputs(graph)
# then
assert_that(result).is_instance_of(list)
assert_that(result).is_equal_to(
[graph.get_vertex(3), graph.get_vertex(5), graph.get_vertex(1), graph.get_vertex(0),
graph.get_vertex(2), graph.get_vertex(4)])
@staticmethod
def test__sort_topological_using_inputs__when_cyclic_graph__then_directed_cyclic_graph_error():
# given
graph = DirectedSimpleGraph(range(6))
graph.add_edge_between(graph.get_vertex(0), graph.get_vertex(2))
graph.add_edge_between(graph.get_vertex(0), graph.get_vertex(4))
graph.add_edge_between(graph.get_vertex(1), graph.get_vertex(0))
graph.add_edge_between(graph.get_vertex(1), graph.get_vertex(4))
graph.add_edge_between(graph.get_vertex(2), graph.get_vertex(1))
graph.add_edge_between(graph.get_vertex(3), graph.get_vertex(1))
graph.add_edge_between(graph.get_vertex(3), graph.get_vertex(0))
graph.add_edge_between(graph.get_vertex(3), graph.get_vertex(2))
graph.add_edge_between(graph.get_vertex(5), graph.get_vertex(1))
graph.add_edge_between(graph.get_vertex(5), graph.get_vertex(2))
graph.add_edge_between(graph.get_vertex(5), graph.get_vertex(4))
# when
def function(graph_):
sort_topological_using_inputs(graph_)
# then
assert_that(function).raises(DirectedCyclicGraphError).when_called_with(graph)
@staticmethod
def test__sort_topological_using_inputs__when_empty_graph__then_vertices():
# given
graph = DirectedSimpleGraph(range(6))
# when
result = sort_topological_using_inputs(graph)
# then
assert_that(result).is_instance_of(list)
assert_that(result).is_equal_to(sorted(graph.vertices))
@staticmethod
def test__sort_topological_using_dfs__when_acyclic_graph__then_topological_order():
# given
graph = DirectedSimpleGraph(range(6))
graph.add_edge_between(graph.get_vertex(0), graph.get_vertex(2))
graph.add_edge_between(graph.get_vertex(0), graph.get_vertex(4))
graph.add_edge_between(graph.get_vertex(1), graph.get_vertex(0))
graph.add_edge_between(graph.get_vertex(1), graph.get_vertex(4))
graph.add_edge_between(graph.get_vertex(3), graph.get_vertex(1))
graph.add_edge_between(graph.get_vertex(3), graph.get_vertex(0))
graph.add_edge_between(graph.get_vertex(3), graph.get_vertex(2))
graph.add_edge_between(graph.get_vertex(5), graph.get_vertex(1))
graph.add_edge_between(graph.get_vertex(5), graph.get_vertex(2))
graph.add_edge_between(graph.get_vertex(5), graph.get_vertex(4))
# when
result = sort_topological_using_dfs(graph)
# then
assert_that(result).is_instance_of(list)
print(result in [
[graph.get_vertex(5), graph.get_vertex(3), graph.get_vertex(1), graph.get_vertex(0),
graph.get_vertex(4), graph.get_vertex(2)],
[graph.get_vertex(3), graph.get_vertex(5), graph.get_vertex(1), graph.get_vertex(0),
graph.get_vertex(2), graph.get_vertex(4)],
[graph.get_vertex(5), graph.get_vertex(3), graph.get_vertex(1), graph.get_vertex(0),
graph.get_vertex(2), graph.get_vertex(4)],
[graph.get_vertex(3), graph.get_vertex(5), graph.get_vertex(1), graph.get_vertex(0),
graph.get_vertex(4), graph.get_vertex(2)]])
assert_that(result).is_in(
[graph.get_vertex(5), graph.get_vertex(3), graph.get_vertex(1), graph.get_vertex(0),
graph.get_vertex(4), graph.get_vertex(2)],
[graph.get_vertex(3), graph.get_vertex(5), graph.get_vertex(1), graph.get_vertex(0),
graph.get_vertex(2), graph.get_vertex(4)],
[graph.get_vertex(5), graph.get_vertex(3), graph.get_vertex(1), graph.get_vertex(0),
graph.get_vertex(2), graph.get_vertex(4)],
[graph.get_vertex(3), graph.get_vertex(5), graph.get_vertex(1), graph.get_vertex(0),
graph.get_vertex(4), graph.get_vertex(2)])
@staticmethod
def test__sort_topological_using_dfs__when_cyclic_graph__then_directed_cyclic_graph_error(
):
# given
graph = DirectedSimpleGraph(range(6))
graph.add_edge_between(graph.get_vertex(0), graph.get_vertex(2))
graph.add_edge_between(graph.get_vertex(0), graph.get_vertex(4))
graph.add_edge_between(graph.get_vertex(1), graph.get_vertex(0))
graph.add_edge_between(graph.get_vertex(1), graph.get_vertex(4))
graph.add_edge_between(graph.get_vertex(2), graph.get_vertex(1))
graph.add_edge_between(graph.get_vertex(3), graph.get_vertex(1))
graph.add_edge_between(graph.get_vertex(3), graph.get_vertex(0))
graph.add_edge_between(graph.get_vertex(3), graph.get_vertex(2))
graph.add_edge_between(graph.get_vertex(5), graph.get_vertex(1))
graph.add_edge_between(graph.get_vertex(5), graph.get_vertex(2))
graph.add_edge_between(graph.get_vertex(5), graph.get_vertex(4))
# when
def function(graph_):
sort_topological_using_dfs(graph_)
# then
assert_that(function).raises(DirectedCyclicGraphError).when_called_with(graph)
@staticmethod
def test__sort_topological_using_dfs__when_empty_graph__then_vertices():
# given
graph = DirectedSimpleGraph(range(6))
# when
result = sort_topological_using_dfs(graph)
# then
assert_that(result).is_instance_of(list)
assert_that(result).is_equal_to(sorted(graph.vertices))
|
class Resource:
def __init__(self, service):
self.service = service |
# coding: utf-8
# # Function dct
#
# ## Synopse
#
# Discrete Cossine Transform.
#
# - **F = dct(f)**
#
# - **F**:output: image dct transform.
# - **f**:input: input image.
# In[6]:
import numpy as np
def dct(f):
import ea979.src as ia
f = f.astype(np.float64)
if len(f.shape) == 1: f = f[:,np.newaxis]
(m, n) = f.shape
if (n == 1):
A = ia.dctmatrix(m)
F = np.dot(A, f)
else:
A=ia.dctmatrix(m)
B=ia.dctmatrix(n)
F = np.dot(np.dot(A, f), np.transpose(B))
return F
# ## Examples
# In[1]:
testing = (__name__ == "__main__")
if testing:
get_ipython().system(' jupyter nbconvert --to python dct.ipynb')
import numpy as np
import sys,os
ea979path = os.path.abspath('../../')
if ea979path not in sys.path:
sys.path.append(ea979path)
import ea979.src as ia
# ### Example 1
# In[8]:
if testing:
np.set_printoptions(suppress=True, precision=2)
f = np.array([[4,3,2,1],[3,3,2,1],[2,2,2,1],[1,1,1,1]])
print('Matriz original:\n',f)
F = ia.dct(f)
print('\nDCT:\n',F)
# ### Example 2
# In[2]:
if testing:
get_ipython().magic('matplotlib inline')
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import sys,os
ea979path = os.path.abspath('../../')
if ea979path not in sys.path:
sys.path.append(ea979path)
import ea979.src as ia
r,c = np.indices( (256, 256) )
f = ( (r-129)**2 + (c-129)**2 < 10**2)*255
ia.adshow(ia.normalize(f),'Imagem original')
F = ia.dct(f)
ia.adshow( ia.normalize(np.log(abs(F)+1)),'DCT')
# ### Example 3
# Compare with dft
# In[3]:
if testing:
f = mpimg.imread('../data/cameraman.tif')
nb = ia.nbshow(3)
nb.nbshow(f,'Imagem original')
F1 = np.fft.fft2(f)
F2 = ia.dct(f)
nb.nbshow(ia.normalize(np.log(np.abs(ia.ptrans(F1,(f.shape[0]//2,f.shape[1]//2))+1))),'DFT' )
nb.nbshow(ia.normalize(np.log(abs(F2)+1)),'DCT')
nb.nbshow()
print('Tempo de execução DFT:')
get_ipython().magic('%timeit F1 = np.fft.fft2(f)')
print('\nTempo de execução DCT:')
get_ipython().magic('%timeit F2 = ia.dct(f)')
# ### Example 4
# Compare with scipy function
# In[4]:
if testing:
from scipy.fftpack import dct as spdct
r,c = np.indices( (256, 256) )
f = ( (r-129)**2 + (c-129)**2 < 10**2)*255
Fscipy = spdct(spdct(f,norm='ortho',axis=0),norm='ortho',axis=1)
F = ia.dct(f)
nb = ia.nbshow(2)
nb.nbshow( ia.normalize(np.log(abs(Fscipy)+1)),'Função do scipy')
nb.nbshow( ia.normalize(np.log(abs(F)+1)),'Função do toolbox')
nb.nbshow()
print('Diferença entre as duas funções (scipy e implementada):',np.sum(np.abs(Fscipy-F)))
print('\nTempo de execução função implementada:')
get_ipython().magic('%timeit F = ia.dct(f)')
print('Tempo de execução scipy:')
get_ipython().magic("%timeit Fscipy = spdct(spdct(f,norm='ortho',axis=0),norm='ortho',axis=1)")
# In[ ]:
|
import json
import os
import sys
here = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(here, "./vendored"))
import requests
TOKEN = os.environ['TELEGRAM_TOKEN']
BASE_URL = f"https://api.telegram.org/bot{TOKEN}"
def manualLoad(headersfilename='headers.txt'):
unorderedlist = []
with open(headersfilename, "r") as f:
unorderedlist = f.read().splitlines()
datadict = {}
for i in range(len(unorderedlist)):
datadict[i+1] = unorderedlist[i]
return datadict
def hello(event, context):
try:
data = json.loads(event["body"])
message = str(data["message"]["text"]).strip('/')
chat_id = data["message"]["chat"]["id"]
response = f"Enter rule number:"
try:
num = int(message)
datadict = manualLoad()
if num in datadict:
response = f"RULE #{num} // {datadict[num]}"
else:
response = "No such rule number."
except ValueError as ve:
print(ve)
data = {"text": response.encode("utf8"), "chat_id": chat_id}
url = f"{BASE_URL}/sendMessage"
requests.post(url, data)
except Exception as e:
print(e)
return {"statusCode": 200} |
# -*- test-case-name: Cowrie Proxy Test Cases -*-
# Copyright (c) 2019 Guilherme Borges
# See LICENSE for details.
import os
from twisted.cred import portal
from twisted.internet import reactor
from twisted.trial import unittest
from cowrie.core.checkers import HoneypotPasswordChecker, HoneypotPublicKeyChecker
from cowrie.core.realm import HoneyPotRealm
from cowrie.ssh.factory import CowrieSSHFactory
# from cowrie.test.proxy_compare import ProxyTestCommand
os.environ["COWRIE_HONEYPOT_TTYLOG"] = "false"
os.environ["COWRIE_OUTPUT_JSONLOG_ENABLED"] = "false"
def create_ssh_factory(backend):
factory = CowrieSSHFactory(backend, None)
factory.portal = portal.Portal(HoneyPotRealm())
factory.portal.registerChecker(HoneypotPublicKeyChecker())
factory.portal.registerChecker(HoneypotPasswordChecker())
# factory.portal.registerChecker(HoneypotNoneChecker())
return factory
# def create_telnet_factory(backend):
# factory = HoneyPotTelnetFactory(backend, None)
# factory.portal = portal.Portal(HoneyPotRealm())
# factory.portal.registerChecker(HoneypotPasswordChecker())
#
# return factory
class ProxyTests(unittest.TestCase):
"""
How to test the proxy:
- setUp runs a 'shell' backend on 4444; then set up a 'proxy' on port 5555 connected to the 'shell' backend
- test_ssh_proxy runs an exec command via a client against both proxy and shell; returns a deferred
- the deferred succeeds if the output from both is the same
"""
HOST = "127.0.0.1"
PORT_BACKEND_SSH = 4444
PORT_PROXY_SSH = 5555
PORT_BACKEND_TELNET = 4445
PORT_PROXY_TELNET = 5556
USERNAME_BACKEND = "root"
PASSWORD_BACKEND = "example"
USERNAME_PROXY = "root"
PASSWORD_PROXY = "example"
def setUp(self):
# ################################################# #
# #################### Backend #################### #
# ################################################# #
# setup SSH backend
self.factory_shell_ssh = create_ssh_factory("shell")
self.shell_server_ssh = reactor.listenTCP(
self.PORT_BACKEND_SSH, self.factory_shell_ssh
)
# ################################################# #
# #################### Proxy ###################### #
# ################################################# #
# setup proxy environment
os.environ["COWRIE_PROXY_BACKEND"] = "simple"
os.environ["COWRIE_PROXY_BACKEND_SSH_HOST"] = self.HOST
os.environ["COWRIE_PROXY_BACKEND_SSH_PORT"] = str(self.PORT_BACKEND_SSH)
os.environ["COWRIE_PROXY_BACKEND_TELNET_HOST"] = self.HOST
os.environ["COWRIE_PROXY_BACKEND_TELNET_PORT"] = str(self.PORT_BACKEND_TELNET)
# setup SSH proxy
self.factory_proxy_ssh = create_ssh_factory("proxy")
self.proxy_server_ssh = reactor.listenTCP(
self.PORT_PROXY_SSH, self.factory_proxy_ssh
)
# def test_ls(self):
# command_tester = ProxyTestCommand('ssh', self.HOST, self.PORT_BACKEND_SSH, self.PORT_PROXY_SSH,
# self.USERNAME_BACKEND, self.PASSWORD_BACKEND,
# self.USERNAME_PROXY, self.PASSWORD_PROXY)
#
# return command_tester.execute_both('ls -halt')
def tearDown(self):
for client in self.factory_proxy_ssh.running:
if client.transport:
client.transport.loseConnection()
self.proxy_server_ssh.stopListening()
self.shell_server_ssh.stopListening()
self.factory_shell_ssh.stopFactory()
self.factory_proxy_ssh.stopFactory()
|
import numpy as np
from visualization import Visualization
from agent import PPOAgent
# from agent_with_throttle import PPOAgent
def expand_image_dimension(image):
return np.expand_dims(image, axis=2)
def preprocess_image(image):
return image / 255
def action_to_motion(action):
clipped_steering = np.clip(action[0], -1.0, 1.0)
clipped_throttle = np.clip(action[1], 0, 1.0)
motion = {}
motion['steering'] = round(float(clipped_steering), 3)
motion['acceleration'] = round(float(clipped_throttle), 3)
return motion
class Environment:
def __init__(self):
self.agent = PPOAgent(load_model_from_file=False)
self.visualization = Visualization()
self.is_terminal_state = False
def add_movement(self, move_model):
is_on_track = move_model['isOnTrack']
self.is_terminal_state = move_model['isTerminalState']
self.is_finish_reached = move_model['isFinishReached']
# get the observation
colors = move_model['colors']
gray_scale_image = np.reshape(colors, (50, 120))
gray_scale_image = np.flip(gray_scale_image, 0)
self.visualization.add_image(gray_scale_image)
# show the input image
# self.visualization.show_agent_input_image(gray_scale_image)
gray_scale_image = expand_image_dimension(gray_scale_image)
gray_scale_image = preprocess_image(gray_scale_image)
# this will hopefully give the agent an understanding of speed
prev_throttle = self.agent.get_previous_throttle()
previous_throttle_state = np.full_like(gray_scale_image, prev_throttle / 10.0)
state = np.append(gray_scale_image, previous_throttle_state, axis=2)
value = self.agent.get_value(state)
action, probability = self.agent.get_action(state)
done = self.is_terminal_state
# reward = 1.0 if action == 1 else 0.9
# reward = 1.0 if action[1] == 0 else 0.5
# reward = reward + 0.2 if action[0] == 1 else reward
reward = 1.0 if is_on_track else -0.1
self.agent.store_transition(value, state, action, reward, done, probability)
def train_model_on_batch(self):
# step_count = self.agent.get_steps_count()
# self.visualization.add_steps_value(step_count)
# self.visualization.plot_steps_history()
# self.visualization.show_random_agent_input_image()
self.visualization.add_reward_value(self.agent.get_reward_sum())
self.visualization.plot_reward_history()
# if we reached the goal -> current model is already really good (save before retraining)
if self.is_finish_reached:
print('Saving model to file...')
self.agent.save_models()
self.agent.learn(self.is_finish_reached)
# write video to file if finish is reached
if self.is_finish_reached:
print('Saving video to file...')
self.visualization.frames_to_file()
self.visualization.reset_image_buffer()
def get_predicted_motion(self):
return action_to_motion(self.agent.get_current_action())
|
from typing import Sequence, Dict, Optional
import pandas as pd
import pyexlatex as pl
from datacode.models.variables import Variable
from datacode.summarize.coverage.counts import variables_drop_count_df_and_plt_figs
from datacode.summarize.coverage.to_tex import variables_drop_panel, variables_latex_figures
def variables_drop_obs_doc(df: pd.DataFrame, var_groups: Dict[str, Sequence[Variable]],
id_var: Optional[str] = None, sources_outfolder: str = '.') -> pl.Document:
"""
Generates a summary document of coverage of variables
Produce a summary document of first a table which shows for each variable,
the count of non-missing observations, and the count of coins which have
at least one non-missing observation. Then it shows statistics on what
would happen to the observations if the variable was dropped from the sample.
These numbers are calculated by looking at every combination of the
variables and how much the observation count would increase if this
variable was excluded from the analysis. Summary statistics are shown for that.
Then the following pages in the document are histograms of the same analysis,
showing the distribution of observation counts that could be regained if we
excluded that variable across the combinations of the other variables.
:param df:
:param var_groups: keys are names of variable groups, values are variables in the group
:param id_var:
:param sources_outfolder:
:return:
"""
panels = []
all_figures = []
for group_name, selected_vars in var_groups.items():
var_names = [var.name for var in selected_vars]
summ_df, figs = variables_drop_count_df_and_plt_figs(df, var_names, id_var)
panel = variables_drop_panel(summ_df, group_name, id_var)
panels.append(panel)
figures = variables_latex_figures(figs, group_name, sources_outfolder)
all_figures.extend(figures)
tab = pl.Table.from_panel_list(panels, caption='Variable Drop Analysis')
doc = pl.Document([tab] + all_figures)
return doc
|
# Ameryn Media metadata processor
# Alex Ball, 2015
import discogs_client
from mutagen.flac import FLAC
import csv, os, time, math, logging
import webbrowser
import time
# Initial variables
input_filename_series = False
input_whole_folder = True
use_boxes_csv = True
input_filename = '00228'
input_filename_list = []
input_filename_list_range = range(1, 5)
flac_directory = 'D:/split'
csv_name = 'boxes.csv'
LOG_TO_FILE = True
CLIENT_NAME = 'AmerynDiscogsBot/0.1'
CONSUMER_KEY = 'lysCMszUmXHGNcFDVmbH'
CONSUMER_SECRET = 'njuRMMqVtcCkojDvRtGhOFqstZfHBFrf'
LOG_PATH = 'D:/temp/'
DISCOGS_REQUEST_INTERVAL = 2 # seconds between API requests
matches_count = 0
def discogs_auth():
discogs = discogs_client.Client(CLIENT_NAME)
discogs.set_consumer_key(CONSUMER_KEY, CONSUMER_SECRET)
authorize_url = discogs.get_authorize_url()
webbrowser.open(authorize_url[2])
authorize_token = raw_input('Enter authorize token: ')
access_token = discogs.get_access_token(authorize_token)
logging.debug('access_token: {0}'.format(access_token))
return discogs
def average(x):
assert len(x) > 0
return float(sum(x)) / len(x)
def pearson_def(x, y):
assert len(x) == len(y)
n = len(x)
assert n > 0
avg_x = average(x)
avg_y = average(y)
diffprod = 0
xdiff2 = 0
ydiff2 = 0
for idx in range(n):
xdiff = x[idx] - avg_x
ydiff = y[idx] - avg_y
diffprod += xdiff * ydiff
xdiff2 += xdiff * xdiff
ydiff2 += ydiff * ydiff
return diffprod / math.sqrt(xdiff2 * ydiff2)
def to_seconds(time_str_list):
to_seconds_output = []
for i in time_str_list:
i_split = i.split(':')
to_seconds_output.append(int(i_split[0] or 0)*60 + int(i_split[1]))
return to_seconds_output
def boxes_pull(input_filename):
# Pull artist, album info from boxes csv
double_type = None
input_filename = input_filename.lstrip('0')
input_filename = input_filename.rsplit('-')[0]
with open(csv_name, 'r') as boxes:
spamreader = csv.reader(boxes)
rowdata = []
for row in spamreader:
rowdata.append(row)
for row in rowdata:
row_serial = row[0].translate(None,' ').lower()
if row_serial == input_filename:
print 'Match!'
print 'RealRow:', row
artist = row[1]
album = row[2]
if album.lower() == 'self titled' or album.lower() == 'self-titled':
album = artist
if row[5].lower() == 'x':
print 'Double trouble!'
if row[6].startswith('1/2') or row[6] == '':
print '1/2!'
double_type = '1/2'
elif row[6].startswith('1/4'):
print 'Eeeek! 1/4!!'
double_type = '1/4'
elif row[5].lower() and row[5].lower() != 'x':
double_type = 'other'
if 'live' in row[4].lower():
print 'Eeeek! Live album!'
return artist, album, double_type
else:
return None, None, None
def add_serial_metadata(input_filename):
filename_matches = []
log_comment = ''
artist = ''
album = ''
track_lengths_correlation = 0
discogs_match = False
print
print '----------------------'
print 'Query:', input_filename
# Check for filename query matches
for each_file in os.listdir(flac_directory):
if each_file.startswith(input_filename):
# Skip 'a' matches for non-'a' files
if len(input_filename) == 5 and each_file[5] != '_':
continue
filename_matches.append(each_file)
if not filename_matches:
print 'No split (_xx) audio file matches! Dork.'
return input_filename, artist, album, track_lengths_correlation, log_comment, discogs_match
# Check for artist tags, add from spreadsheet if enabled and necessary
if use_boxes_csv:
artist, album, double_type = boxes_pull(input_filename)
if (artist, album) == (None, None):
log_comment = 'No spreadsheet match found.'
return input_filename, artist, album, track_lengths_correlation, log_comment, discogs_match
elif double_type == 'other':
log_comment = 'Freaky double! Engage manual tagging mode.'
return input_filename, artist, album, track_lengths_correlation, log_comment, discogs_match
# Add tracknumber, album, artist to FLAC files
for each_file in filename_matches:
audio = FLAC(os.path.join(flac_directory, each_file))
audio['artist'] = artist
audio['album'] = album
audio.save()
else:
try:
artist = FLAC(os.path.join(flac_directory, filename_matches[0]))['artist'][0]
album = FLAC(os.path.join(flac_directory, filename_matches[0]))['album'][0]
print 'Metadata found. Artist:', artist, 'Album:', album
except:
print 'No artist/album tags; boxes spreadsheet not searched:', input_filename
return input_filename, artist, album, track_lengths_correlation, log_comment
# Search Discogs for artist and album
query = artist+' '+album
for char in '!@#$.?/;:&\'"':
query = query.replace(char,' ')
output = discogs.search(query)
print 'Discogs results for "'+query+'":', len(output)
# Print track listing, tag FLAC files with titles
for i, result in enumerate(output):
time.sleep(DISCOGS_REQUEST_INTERVAL) # Wait to comply with Discogs 60 requests/minute limit
if hasattr(result,'tracklist') and type(result) != discogs_client.Master:
tracklist = [track for track in result.tracklist if track.duration]
if not tracklist:
print 'Result {0}: No track lengths found. Continuing...'.format(str(i + 1))
continue
elif len(tracklist) != len(result.tracklist):
print 'Some tracks missing durations. Be careful!'
elif len(tracklist) == len(filename_matches):
print '----------------------'
print 'Result', str(i + 1)
print 'Release ID:', result.data['id']
print 'Artist:', result.artists[0].name.encode('utf-8')
print 'Album:', result.title.encode('utf-8')
# Funky double handling
if double_type in ['1/4', '1/3'] and filename_matches[0][-7] not in 'abcd':
if not result.tracklist[0].position:
print 'Result '+str(i + 1)+': Nope! (No position info found in Discogs)'
continue
if double_type == '1/4':
sort_key = ['a', 'd', 'b', 'c']
if double_type == '1/3':
sort_key = ['a', 'c', 'b', 'd']
# Reorder tracklist by alpha position key
tracklist_sorted = []
for key in sort_key:
for track in tracklist:
if track.position.lower().startswith(key):
tracklist_sorted.append(track)
tracklist = tracklist_sorted
# Check correlation of Discogs track lengths with those of FLAC files
discogs_lengths = [track.duration for track in tracklist if track.duration]
flac_lengths = []
for match in filename_matches:
audio = FLAC(os.path.join(flac_directory, match))
flac_length = time.strftime('%M:%S', time.gmtime(audio.info.length)).lstrip('0')
flac_lengths.append(flac_length)
track_lengths_correlation = round(pearson_def(to_seconds(discogs_lengths), to_seconds(flac_lengths)),4)
print 'Track lengths correlation:', track_lengths_correlation
if track_lengths_correlation < 0.80:
log_comment = '----- Low correlation. Best check yoself!'
continue
# Write tags to FLAC files
discogs_match += 1
for track, match in zip(tracklist, filename_matches):
audio = FLAC(os.path.join(flac_directory, match))
audio['tracknumber'] = track.position
audio['title'] = track.title
flac_length = time.strftime('%M:%S', time.gmtime(audio.info.length)).lstrip('0')
print track.position, track.title.encode('utf-8'), track.duration, '-->', match, flac_length
audio.save()
return input_filename, artist, album, track_lengths_correlation, log_comment, discogs_match
else:
print 'Result '+str(i + 1)+': Nope! ('+str(len(tracklist))+' != '+str(len(filename_matches))+')'
elif i > 40:
log_comment = 'Okay that\'s enough Spot. Heel!'
break
else:
log_comment = 'No Discogs matches! Dork.'
return input_filename, artist, album, track_lengths_correlation, log_comment, discogs_match
# RUN THE TRAP
if __name__ == '__main__':
start_time = time.time()
if LOG_TO_FILE:
logging.basicConfig(filename=os.path.join(LOG_PATH, 'log.csv'), format='%(levelname)s,%(message)s', level=logging.INFO)
else:
logging.basicConfig(format='%(levelname)s,%(message)s', level=logging.DEBUG)
discogs = discogs_auth()
if input_filename_series:
for f in input_filename_list_range:
input_filename_list.append(str(f).zfill(5))
print 'Input filenames (series):', input_filename_list
elif input_whole_folder:
for f in os.listdir(flac_directory):
if f.endswith('.flac') or f.endswith('.mp3'):
f = f.rsplit('_clean')[0]
if f[-2] == '-':
f = f[:-1]
if f not in input_filename_list:
input_filename_list.append(f)
print 'Input filenames ('+flac_directory+'):', input_filename_list
else:
input_filename_list = [input_filename]
for each in input_filename_list:
input_filename, artist, album, track_lengths_correlation, log_comment, discogs_match = add_serial_metadata(each)
if discogs_match: matches_count += 1
# Log track data
print log_comment
artist = artist.translate(None, ',')
album = album.translate(None, ',')
logging.info('%s,%s,%s,%s,%s', input_filename, artist, album, track_lengths_correlation, log_comment)
print 'Great success! {0} files ({1} matches) processed in {2}s.'.format(len(input_filename_list), matches_count, round(time.time() - start_time, 1)) |
print("hello docker world!!!")
|
class Solution:
def canConstruct(self, ransomNote: str, magazine: str) -> bool:
ransomFreqMap, magazineFreqMap = Counter(ransomNote), Counter(magazine)
for char, freq in ransomFreqMap.items():
if char not in magazineFreqMap or freq > magazineFreqMap[char]:
return False
return True |
'''
grad_descent.py
Use gradient descent to find the minimum value of a
single variable function. This also checks for the existence
of a solution for the equation, f'(x)=0 and plots the intermediate
points traversed.
'''
from sympy import Derivative, Symbol, sympify, solve
import matplotlib.pyplot as plt
def grad_descent(x0, f1x, x):
# check if f1x=0 has a solution
if not solve(f1x):
print('Cannot continue, solution for {0}=0 does not exist'.format(f1x))
return None
epsilon = 1e-6
step_size = 1e-4
x_old = x0
x_new = x_old - step_size*f1x.subs({x:x_old}).evalf()
# list to store the X values traversed
X_traversed = []
while abs(x_old - x_new) > epsilon:
X_traversed.append(x_new)
x_old = x_new
x_new = x_old-step_size*f1x.subs({x:x_old}).evalf()
return x_new, X_traversed
def frange(start, final, interval):
numbers = []
while start < final:
numbers.append(start)
start = start + interval
return numbers
def create_plot(X_traversed, f, var):
# First create the graph of the function itself
x_val = frange(-1, 1, 0.01)
f_val = [f.subs({var:x}) for x in x_val]
plt.plot(x_val, f_val, 'bo')
# calculate the function value at each of the intermediate
# points traversed
f_traversed = [f.subs({var:x}) for x in X_traversed]
plt.plot(X_traversed, f_traversed, 'r.')
plt.legend(['Function', 'Intermediate points'], loc='best')
plt.show()
if __name__ == '__main__':
f = input('Enter a function in one variable: ')
var = input('Enter the variable to differentiate with respect to: ')
var0 = float(input('Enter the initial value of the variable: '))
try:
f = sympify(f)
except SympifyError:
print('Invalid function entered')
else:
var = Symbol(var)
d = Derivative(f, var).doit()
var_min, X_traversed = grad_descent(var0, d, var)
if var_min:
print('{0}: {1}'.format(var.name, var_min))
print('Minimum value: {0}'.format(f.subs({var:var_min})))
create_plot(X_traversed, f, var)
|
#!/usr/bin/env python
from distutils.core import setup
import gitstats
def readme():
try:
with open('README.rst') as f:
return f.read()
except:
return '(Could not read from README.rst)'
with open('requirements.txt') as fin:
install_reqs = fin.readlines()
setup(
name='gitstats',
py_modules=['gitstats', 'gitstats.__main__', 'gitstats.utils'],
version=gitstats.__version__,
description='Generating overall statistics for multiple git repositories',
long_description=readme(),
author=gitstats.__author__,
author_email=gitstats.__email__,
url='http://github.com/suminb/gitstats',
license='BSD',
packages=[],
data_files=[('', ['requirements.txt'])],
install_requires=install_reqs,
entry_points={
'console_scripts': [
'gitstats = gitstats.__main__:cli'
]
},
)
|
import python_path
from utils.decorator import ReferenceUsingTest
from ml_workflow.viz_utils import VizUtils, plot_model, \
plot_model_full_detail, get_default_dirname
import utils
from ml_workflow.workflow_node import WorkflowNode
import ml_workflow
from ml_workflow import Rule, DataSource
import ml_workflow.tracable_data_set as tds
import shutil
import pandas as pd
import numpy as np
import os
import datetime as dt
def get_simple_graph_with_fork():
root1 = WorkflowNode([Rule(name='DataSource1')])
root2 = WorkflowNode([Rule(name='DataSource2')])
child1 = WorkflowNode([Rule(name='Processing_on_DS2')], previous=root2)
merge_child2 = WorkflowNode([Rule(name='Merge_sources')], previous=[root1, child1])
leaf_node = WorkflowNode([Rule(name='LeafNode')], previous=merge_child2)
return leaf_node
def test_check_pydot():
assert(VizUtils.check_pydot())
@ReferenceUsingTest('test_regression.svg')
def test_plot_model_as_svg():
WorkflowNode._next_id = 0
_test_plot_model('test_regression.svg')
@ReferenceUsingTest('test_regression.png')
def test_plot_model_as_png():
_test_plot_model('test_regression.png')
def _test_plot_model(filename):
leaf_node = get_simple_graph_with_fork()
plot_model(leaf_node, filename, ts = 'TEST_ts')
def test_model_to_dot():
leaf_node = get_simple_graph_with_fork()
model_as_dot = VizUtils().model_to_dot(leaf_node)
model_as_dot.write('test_regression.dot', format='dot')
def test_correct_weird_pydot_bug():
with open('temp/correct_weird_pydot_bug', 'w') as f:
f.write('transform="scale(1.33 1.33) rotate(0) translate(4 256)"')
VizUtils.correct_weird_pydot_bug('temp/correct_weird_pydot_bug')
with open('temp/correct_weird_pydot_bug', 'r') as f:
assert(f.read() == 'transform="rotate(0) translate(4 256)"')
with open('temp/correct_weird_pydot_bug', 'w') as f:
f.write('transform="scale(1.3333 1.3333) rotate(0) translate(4 256)"')
VizUtils.correct_weird_pydot_bug('temp/correct_weird_pydot_bug')
with open('temp/correct_weird_pydot_bug', 'r') as f:
assert(f.read() == 'transform="rotate(0) translate(4 256)"')
@ReferenceUsingTest('test_sub_label.svg')
def test_sub_labels():
df = tds.TracableDataFrame({'A' : [1, 2]})
@Rule(name = 'viz_utils.test', branch='test', version='1.0', tags='test_tag')
def f(df):
df['A'] += 1
return df
df = f(df)
df.plot_model('test_sub_label.svg') |
import csv
import re
def parse_peptide(peptide, peptide_regex, ptm_removal_regex):
match = peptide_regex.match(peptide)
if match and match.group('peptide'):
matched_peptide = match.group('peptide')
return ptm_removal_regex.sub('', matched_peptide)
else:
return None
class PINFile:
def __init__(self, path):
self.path = path
self.peptides = set()
self.peptide_regex = re.compile('^[A-Z\-]\.(?P<peptide>.*)\.[A-Z\-]$')
self.ptm_removal_regex = re.compile('\[[^\]]*\]')
with open(path, 'r') as f:
reader = csv.DictReader(f, delimiter='\t', restkey='Proteins')
self.fieldnames = list(reader.fieldnames)
next(reader)
for row in reader:
peptide = parse_peptide(row['Peptide'], self.peptide_regex, self.ptm_removal_regex)
assert(peptide)
self.peptides.add(peptide)
def addPin(self, additionalPath,*, decoy=False):
with open(additionalPath, 'r') as f:
reader = csv.DictReader(f, delimiter='\t', restkey='Proteins')
next(reader)
with open(self.path, 'a') as g:
writer = csv.DictWriter(g, self.fieldnames, delimiter='\t')
for row in reader:
if decoy:
print('row')
print(row)
row['Label'] = '-1'
writer.writerow(row)
peptide = parse_peptide(row['Peptide'], self.peptide_regex, self.ptm_removal_regex)
assert(peptide)
self.peptides.add(peptide)
def addScores(self, scoreDict, columnHeader, columnDirection):
#inserts the scores
rows = []
with open(self.path, 'r') as f:
reader = csv.DictReader(f, self.fieldnames, delimiter='\t', restkey='Proteins')
next(reader)
direction = dict(next(reader))
for row in reader:
rows.append(row)
self.fieldnames.insert(6, columnHeader)
direction[columnHeader] = columnDirection
with open(self.path, 'w') as f:
writer = csv.DictWriter(f, self.fieldnames, delimiter='\t')
writer.writeheader()
writer.writerow(direction)
for row in rows:
row_copy = dict(row)
peptide = parse_peptide(row['Peptide'], self.peptide_regex, self.ptm_removal_regex)
row_copy[columnHeader] = str(scoreDict[peptide])
writer.writerow(row_copy)
|
# -*- coding: utf-8-*-
import sys, os, time, random
import re
import json
import argparse
import logging
import psutil
from multiprocessing import Process, Queue, Pipe
from lib.graphic.baiduGraphic import BaiduGraphic
from lib.voice.baiduVoice import BaiduVoice
from lib.voice.baseVoice import AbstractVoiceEngine
from plugin.bootstrap import Bootstrap
import lib.appPath
import lib.util
import plugin.volume.pulseAudio
from plugin.fm.doubanFM import DoubanFM
from lib.mail import SMTPMail
from plugin.monitor.people import PeopleMonitor
from plugin.feeds.jiqizhixin import JiqizhixinFeed
import plugin.feeds.jiqizhixin
def doubanFM(logger,args):
speaker = BaiduVoice.get_instance()
douban_fm = DoubanFM.get_instance()
douban_fm.set_speaker(speaker)
for i in range(0,2):
song = douban_fm.playRandomLikeSong()
def pulseAudio(logger,args):
baidu_voice = BaiduVoice.get_instance()
out_to_fp, in_to_fp = Pipe(True)
out_from_fp, in_from_fp = Pipe(True)
son_p = Process(target=Bootstrap.son_process,
args=(baidu_voice,
(out_to_fp, in_to_fp),
(out_from_fp, in_from_fp),
plugin.volume.pulseAudio.son_process_handle,False))
son_p.start()
# 等to_pipe被fork 后,关闭主进程的输出端; 创建的Pipe一端连接着主进程的输入,一端连接着子进程的输出口
out_to_fp.close()
# 等from_pipe被fork 后,关闭主进程的输入端; 创建的Pipe一端连接着子进程的输入,一端连接着父进程的输出口
in_from_fp.close()
words = [
u"打开声音",
u"声音小一点",
u"声音小点",
u"声音再小一点",
u"声音大点",
u"声音再大一点",
u"静音",
u"打开声音",
u"安静",
u"打开声音",
#u"声音放到最大",
]
for text in words:
is_valid = plugin.volume.pulseAudio.isValid(text)
if is_valid is True:
logger.debug("word %s is valid" % text)
plugin.volume.pulseAudio.process_handle(text,in_to_fp,out_from_fp,son_p,baidu_voice)
time.sleep(3)
else:
logger.debug("word %s is not valid" % text)
in_to_fp.close()
out_from_fp.close()
son_p.join()
logger.debug("debug pulseAudio is over")
def mail(logger,args):
smtpMail = SMTPMail.get_instance()
with open('./mind-idea.jpg', 'rb') as f:
smtpMail.sendImageEmail(f.read())
logger.debug("debug mail is over")
def peopleMonitor(logger,args):
speaker = BaiduVoice.get_instance()
people_monitor = PeopleMonitor.get_instance()
people_monitor.set_speaker(speaker)
def get_text_callback():
index = random.choice([0,1])
test_words = [
u'打开人体监控',
u'结束人体监控',
]
logger.debug("index %d, text:%s",index,test_words[index])
time.sleep(5)
return test_words[index]
people_monitor.start(get_text_callback)
logger.debug("debug peopleMonitor is over")
def baiduGraphic(logger,args):
def get_file_content(filePath):
with open(filePath, 'rb') as fp:
return fp.read()
baidu_graphic = BaiduGraphic.get_instance()
for detect_type in ["plant","dish","car","logo","animal","object","face"]:
file = os.path.join(lib.appPath.APP_PATH, '.'.join([detect_type,'jpg']))
img = get_file_content(file)
res = baidu_graphic.detectImage(img,detect_type)
logger.debug("%s: %s",detect_type,json.dumps(res,encoding="UTF-8",ensure_ascii=False))
logger.debug("debug baiduGraphic is over")
def jiqizhixinFeed(logger,args):
speaker = BaiduVoice.get_instance()
out_to_fp, in_to_fp = Pipe(True)
out_from_fp, in_from_fp = Pipe(True)
son_p = Process(target=Bootstrap.son_process,
args=(speaker,
(out_to_fp, in_to_fp),
(out_from_fp, in_from_fp),
plugin.feeds.jiqizhixin.son_process_handle,False))
son_p.start()
# 等to_pipe被fork 后,关闭主进程的输出端; 创建的Pipe一端连接着主进程的输入,一端连接着子进程的输出口
out_to_fp.close()
# 等from_pipe被fork 后,关闭主进程的输入端; 创建的Pipe一端连接着子进程的输入,一端连接着父进程的输出口
in_from_fp.close()
debug_words = [
u"阅读机器之心新闻",
u"阅读下一条",
u"下一条",
u"下一条",
u"结束阅读",
]
for text in debug_words:
is_valid = plugin.feeds.jiqizhixin.isValid(text)
if is_valid is True:
if any(word in text for word in [u'结束阅读',u'阅读机器之心']):
time.sleep(60)
plugin.feeds.jiqizhixin.process_handle(text,in_to_fp,out_from_fp,son_p,speaker)
if any(word in text for word in [u'结束阅读']): break
time.sleep(7)
else:
print("word %s is not valid" % text)
in_to_fp.close()
out_from_fp.close()
son_p.join()
'''
instance = JiqizhixinFeed.get_instance()
instance.set_speaker(speaker)
instance.update_feeds()
ct = instance.get_feeds_count()
for i in range(0,ct):
instance.get_next_feed()
'''
logger.debug("debug jiqizhixinFeed is over")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='debug')
parser.add_argument('--debug', action='store_true',
help='Show debug messages')
parser.add_argument('--pulseAudio', action='store_true',
help='Show debug pulse audio plugin messages')
parser.add_argument('--doubanFM', action='store_true',
help='Show debug douban fm plugin messages')
parser.add_argument('--mail', action='store_true',
help='Show debug mail lib messages')
parser.add_argument('--peopleMonitor', action='store_true',
help='Show debug people monitor plugin messages')
parser.add_argument('--baiduGraphic', action='store_true',
help='Show debug baidu graphic lib messages')
parser.add_argument('--jiqizhixinFeed', action='store_true',
help='Show debug jiqizhixinFeed plugin messages')
args = parser.parse_args()
logging.basicConfig(stream=sys.stdout)
logger = logging.getLogger("")
if args.debug:
logger.setLevel(logging.DEBUG)
if args.pulseAudio:
pulseAudio(logger,args)
exit(0)
if args.doubanFM:
doubanFM(logger,args)
exit(0)
if args.mail:
mail(logger,args)
exit(0)
if args.peopleMonitor:
peopleMonitor(logger,args)
exit(0)
if args.baiduGraphic:
baiduGraphic(logger,args)
exit(0)
if args.jiqizhixinFeed:
jiqizhixinFeed(logger,args)
exit(0)
|
#!/bin/python3
import os
# Complete the minimumSwaps function below.
def minimum_swaps(array) -> int:
swap_count = 0
correct_order = sorted(array)
indexes = {v: i for i, v in enumerate(array)}
for index, value in enumerate(array):
correct_value = correct_order[index]
if value != correct_value:
current_index_of_correct_value = indexes[correct_value]
array[current_index_of_correct_value], array[index] = array[index], array[current_index_of_correct_value] # swap them
indexes[correct_value] = index
indexes[value] = current_index_of_correct_value
swap_count += 1
return swap_count
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
arr = list(map(int, input().rstrip().split()))
res = minimum_swaps(arr)
fptr.write(str(res) + '\n')
fptr.close()
|
from sklearn.naive_bayes import GaussianNB
import numpy as np
from sklearn import datasets
import scipy.stats
def get_data():
iris = datasets.load_iris()
features, target = iris.data, iris.target
train_features, train_targets = features[:120, :], target[:120]
test_features, test_targets = features[120:, :], target[120:]
return train_features, train_targets, test_features, test_targets
def calculate_dist_parameters(n_labels, n_features, train_features,
train_targets, feature_cats):
train_mean, train_std = \
np.zeros((n_labels, n_features)), np.zeros((n_labels, n_features))
for i in range(n_features):
if feature_cats[i] == 1:
for j in range(n_labels):
j_features = train_features[np.where(train_targets == j), i]
mean, std_dev = np.mean(j_features), np.std(j_features)
train_mean[j, i] = mean
train_std[j, i] = std_dev
return train_mean, train_std
def calculate_likelihood(n_labels, train_features, train_targets):
class_probabilities = np.zeros(n_labels)
for i in range(n_labels):
prob = len(np.where(train_targets == i)[0])
class_probabilities[i] = prob/np.float(train_features.shape[0])
return class_probabilities
def get_test_predictions(n_labels, test_features, train_mean, train_std,
class_probabilities, feature_cats, train_features,
train_targets):
test_predictions = []
for record in test_features:
class_probs = np.ones(n_labels)
for i in range(n_labels):
for j in range(n_features):
if feature_cats[i] == 1:
class_probs[i] *= \
(scipy.stats.norm(train_mean[i, j],
train_std[i, j]).pdf(record[j]))
else:
req_features = train_features[np.where(train_targets == i),
j]
prob = len(np.where(req_features == record[j])[0]) / \
len(req_features)
class_probs[i] *= prob
class_probs[i] *= class_probabilities[i]
test_predictions.append(np.argmax(class_probs))
return test_predictions
def calculate_accuracy(truth, predicted):
return np.sum(truth == predicted)/float(len(truth))
if __name__ == "__main__":
train_features, train_targets, test_features, test_targets = get_data()
n_features = train_features.shape[1]
n_labels = len(set(train_targets))
# 1 - continuous, 0 - categorical
feature_cats = [1 if len(set(train_features[:, i])) >
10 else 0 for i in range(train_features.shape[1])]
train_mean, train_std = calculate_dist_parameters(n_labels, n_features,
train_features,
train_targets,
feature_cats)
class_probabilities = calculate_likelihood(n_labels, train_features,
train_targets)
test_predictions = get_test_predictions(n_labels, test_features, train_mean,
train_std, class_probabilities,
feature_cats, train_features,
train_targets)
print("The accuracy through my code is {}".format(
calculate_accuracy(test_predictions, test_targets)))
# Cross check
nb_model = GaussianNB()
# Train the model using the training dataset
nb_model.fit(train_features, train_targets)
# print(nb_model.class_prior_)
# print(nb_model.theta_)
# print(np.sqrt(nb_model.sigma_))
testing_predictions = nb_model.predict(test_features)
print("The accuracy through GNB model is {}".format(
calculate_accuracy(testing_predictions, test_targets)))
|
# Generated by Django 3.2.5 on 2021-09-12 23:26
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Collection',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=200)),
],
),
migrations.CreateModel(
name='CollectionDomain',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=200)),
],
),
migrations.CreateModel(
name='ImageColor',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=80)),
],
),
migrations.CreateModel(
name='LanguageCode',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(default='', max_length=6)),
],
),
migrations.CreateModel(
name='Museum',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=200)),
],
),
migrations.CreateModel(
name='MuseumObject',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('internal_identifier', models.CharField(default='', max_length=100)),
('description', models.CharField(blank=True, default='', max_length=5000)),
('collection', models.ManyToManyField(to='inventory.Collection')),
],
),
migrations.CreateModel(
name='ObjectCreator',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=100)),
],
),
migrations.CreateModel(
name='ObjectDateType',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=100)),
],
),
migrations.CreateModel(
name='ObjectLocationType',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=100)),
],
),
migrations.CreateModel(
name='ObjectPublisher',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=100)),
],
),
migrations.CreateModel(
name='ObjectSubject',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=100)),
],
),
migrations.CreateModel(
name='ObjectTitle',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='', max_length=1000)),
('museum_object', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='inventory.museumobject')),
],
),
migrations.CreateModel(
name='ObjectLocation',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('term', models.CharField(default='', max_length=200)),
('location', django.contrib.gis.db.models.fields.PointField(srid=4326)),
('location_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='inventory.objectlocationtype')),
('museum_object', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='inventory.museumobject')),
],
),
migrations.CreateModel(
name='ObjectImage',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='images/')),
('color', models.ManyToManyField(to='inventory.ImageColor')),
('museum_object', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='inventory.museumobject')),
],
),
migrations.CreateModel(
name='ObjectDate',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(default='', max_length=80)),
('museum_object', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='inventory.museumobject')),
('type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='inventory.objectdatetype')),
],
),
migrations.AddField(
model_name='museumobject',
name='creator',
field=models.ManyToManyField(blank=True, to='inventory.ObjectCreator'),
),
migrations.AddField(
model_name='museumobject',
name='language',
field=models.ManyToManyField(to='inventory.LanguageCode'),
),
migrations.AddField(
model_name='museumobject',
name='publisher',
field=models.ManyToManyField(to='inventory.ObjectPublisher'),
),
migrations.AddField(
model_name='museumobject',
name='subject',
field=models.ManyToManyField(blank=True, to='inventory.ObjectSubject'),
),
migrations.AddField(
model_name='collection',
name='domain',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='inventory.collectiondomain'),
),
migrations.AddField(
model_name='collection',
name='museum',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='inventory.museum'),
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''wgetdb
Usage:
wgetdb <database_path> <url> <label>
wgetdb -h | --help
wgetdb --version
Options:
-h --help Show this screen.
--version Show version.
'''
from __future__ import unicode_literals, print_function
import datetime
import urllib2
import sqlite3
from time import sleep
from docopt import docopt
__version__ = "0.1.4"
__author__ = "Akira Kozakai"
__license__ = "MIT"
URLOPEN_TIMEOUT = 10
TABLE_NAME = "datas"
CREATE_TABLE_SQL = """
CREATE TABLE %s (
id INTEGER PRIMARY KEY,
url VARCHAR(4095) NOT NULL,
label VARCHAR(255) NOT NULL,
data BLOB NOT NULL,
created_date DATE NOT NULL,
modified_date DATE NOT NULL,
UNIQUE(url, label)
);
""" % TABLE_NAME
class WgetDB(object):
def __init__(self, db_path, wait_before_wget=0):
self.db_path = db_path
self.wait_before_wget = wait_before_wget
self._con = None
def __del__(self):
self.con.close()
@property
def con(self):
if not self._con:
self._con = sqlite3.connect(self.db_path, isolation_level=None)
self.create_table()
return self._con
def create_table(self):
cur = self.con.execute(
"SELECT * FROM sqlite_master WHERE type='table' and name=?",
(TABLE_NAME,))
if cur.fetchone() is None:
self.con.execute(CREATE_TABLE_SQL)
def download_url(self, url):
if self.wait_before_wget:
sleep(self.wait_before_wget)
response = urllib2.urlopen(url, timeout=URLOPEN_TIMEOUT)
if response.code != 200:
return None
return response.read()
def insert_data(self, url, label, data):
sql = ('INSERT INTO %s ("url", "label", "data", "created_date", "modified_date")'
'VALUES (?, ?, ?, ?, ?);') % TABLE_NAME
args = (url, label, buffer(data), datetime.datetime.utcnow(),
datetime.datetime.utcnow())
self.con.execute(sql, args)
def update_data(self, url, label, data):
sql = ('UPDATE %s SET "data" = ?, "modified_date" = ?'
'WHERE "url" = ? AND "label" = ?;') % TABLE_NAME
args = (buffer(data), datetime.datetime.utcnow(), url, label)
self.con.execute(sql, args)
def store(self, url, label):
data = self.download_url(url)
try:
self.insert_data(url, label, data)
except sqlite3.IntegrityError as e:
self.update_data(url, label, data)
def get(self, url, label):
sql = ('SELECT * FROM %s WHERE "url" = ? AND "label" = ?;') % TABLE_NAME
args = (url, label)
cur = self.con.execute(sql, args)
record = list(cur)
if len(record) > 0:
record = record[0]
return {'data': str(record[3]),
'created_date': record[4],
'modified_date': record[5]}
return None
def wget(self, url, label, compulsion_replace=False):
if compulsion_replace:
self.store(url, label)
data = self.get(url, label)
if not data:
self.store(url, label)
data = self.get(url, label)
return data
def main():
try:
args = docopt(__doc__, version=__version__)
db_path = args.get('<database_path>')
url = args.get('<url>')
label = args.get('<label>')
wgetdb = WgetDB(db_path)
wgetdb.store(url, label)
print('SUCCESS!')
except Exception as e:
print(u'=== ERROR ===')
print(u'type:{0}'.format(type(e)))
print(u'args:{0}'.format(e.args))
print(u'message:{0}'.format(e.message))
if __name__ == '__main__':
main()
|
class TestTypeConversionError:
pass
class TestCondition:
def test___str__(self): # synced
assert True
def test___call__(self): # synced
assert True
def test_extract_name_from_condition(self): # synced
assert True
class TestValidatorMeta:
pass
class TestValidator:
def test___str__(self): # synced
assert True
def test___call__(self): # synced
assert True
def test_set_nullable(self): # synced
assert True
def test_set_choices(self): # synced
assert True
def test_add_condition(self): # synced
assert True
def test_add_conditions(self): # synced
assert True
def test_is_valid(self): # synced
assert True
def test_convert(self): # synced
assert True
def test__to_subtype(self): # synced
assert True
def test__pre_process(self): # synced
assert True
class TestAnythingValidator:
class TestAnything:
def test_is_type(self): # synced
assert True
def test_convert(self): # synced
assert True
class TestUnknownTypeValidator:
def test_convert(self): # synced
assert True
class TestBooleanValidator:
pass
class TestStringValidator:
def test_max_len(self): # synced
assert True
def test_min_len(self): # synced
assert True
def test__to_subtype(self): # synced
assert True
class TestNumericValidator:
def test_max_value(self): # synced
assert True
def test_min_value(self): # synced
assert True
class TestIntegerValidator:
pass
class TestFloatValidator:
class TestFloat:
def test_convert(self): # synced
assert True
class TestDecimalValidator:
class TestDecimal:
pass
class TestParametrizableValidator:
def test__pre_process(self): # synced
assert True
class TestListValidator:
def test___str__(self): # synced
assert True
def test_parametrize(self): # synced
assert True
def test_of_type(self): # synced
assert True
def test_is_valid(self): # synced
assert True
def test_convert(self): # synced
assert True
def test__to_subtype(self): # synced
assert True
class TestSetValidator:
class TestSet:
def test_convert(self): # synced
assert True
class TestDictionaryValidator:
def test___str__(self): # synced
assert True
def test_is_parametrized(self): # synced
assert True
def test_parametrize(self): # synced
assert True
def test_of_type(self): # synced
assert True
def test_is_valid(self): # synced
assert True
def test_convert(self): # synced
assert True
def test__to_subtype(self): # synced
assert True
class TestDateTimeValidator:
def test_before(self): # synced
assert True
def test_after(self): # synced
assert True
def test__to_subtype(self): # synced
assert True
class TestDateValidator:
class TestDate:
def test_convert(self): # synced
assert True
def test__to_subtype(self): # synced
assert True
class TestPathValidator:
class TestPath:
def test_is_type(self): # synced
assert True
def test_convert(self): # synced
assert True
class TestFileValidator:
class TestFile:
def test_is_type(self): # synced
assert True
def test_convert(self): # synced
assert True
class TestDirValidator:
class TestDir:
def test_is_type(self): # synced
assert True
def test_convert(self): # synced
assert True
class TestValidate:
def test_infer_type(self): # synced
assert True
|
def test_Druva_FindDevice_Command(requests_mock):
from Druva import Client, Druva_FindDevice_Command
mock_response = {'resources': [{
'orgID': '-1',
'resourceID': '12345',
'resourceName': 'test',
'resourceParent': 'testUser',
'resourceStatus': 'enabled',
'resourceType': 'endpoint'
}]}
requests_mock.get('https://apis.druva.com/realize/ransomwarerecovery/v1/search/resource?hostname=test',
json=mock_response)
client = Client(
base_url='https://apis.druva.com/',
verify=False,
headers={
'Authentication': 'Bearer some_api_key'
}
)
search_string = 'test'
response = Druva_FindDevice_Command(client, search_string)
assert response[1]["Druva.Resource(val.resourceID == obj.resourceID)"] == [{
'orgID': '-1',
'resourceID': '12345',
'resourceName': 'test',
'resourceParent': 'testUser',
'resourceStatus': 'enabled',
'resourceType': 'endpoint'
}]
def test_Druva_ListQuarantineRanges_Command(requests_mock):
from Druva import Client, Druva_ListQuarantineRanges_Command
mock_response = {'quarantineRanges': [{
'rangeID': '100',
'resourceID': '12345',
'resourceType': 'Endpoint',
'startDate': '2020-12-01',
'endDate': '2020-12-10'
}]}
requests_mock.get('https://apis.druva.com/realize/ransomwarerecovery/v1/quarantineranges', json=mock_response)
client = Client(
base_url='https://apis.druva.com/',
verify=False,
headers={
'Authentication': 'Bearer some_api_key'
}
)
response = Druva_ListQuarantineRanges_Command(client)
assert response[1]["Druva.activeQuarantineRanges(val.rangeID == obj.rangeID)"] == [{
'rangeID': '100',
'resourceID': '12345',
'resourceType': 'Endpoint',
'startDate': '2020-12-01',
'endDate': '2020-12-10'
}]
def test_Druva_QuarantineResource_Command(requests_mock):
from Druva import Client, Druva_QuarantineResource_Command
mock_response = {
'rangeID': '100'
}
requests_mock.post('https://apis.druva.com/realize/ransomwarerecovery/v1/quarantineranges/resource/12345',
json=mock_response)
client = Client(
base_url='https://apis.druva.com/',
verify=False,
headers={
'Authentication': 'Bearer some_api_key'
}
)
resource_id = '12345'
resource_type = 'endpoint'
from_date = '2020-12-01'
to_date = '2020-12-10'
response = Druva_QuarantineResource_Command(client, resource_id, resource_type, from_date, to_date)
assert response[1]["Druva.QuarantinedRangeID"] == '100'
def test_Druva_DeleteQuarantineRange_Command(requests_mock):
from Druva import Client, Druva_DeleteQuarantineRange_Command
mock_response = {
'rangeID': '100'
}
requests_mock.delete('https://apis.druva.com/realize/ransomwarerecovery/v1/quarantineranges/resource/12345/range/100',
json=mock_response)
client = Client(
base_url='https://apis.druva.com/',
verify=False,
headers={
'Authentication': 'Bearer some_api_key'
}
)
resource_id = '12345'
range_id = '100'
_, _, response = Druva_DeleteQuarantineRange_Command(client, resource_id, range_id)
assert response["rangeID"] == '100'
def test_Druva_ViewQurantineRange_Command(requests_mock):
from Druva import Client, Druva_ViewQurantineRange_Command
mock_response = {
'rangeID': '100'
}
requests_mock.get('https://apis.druva.com/realize/ransomwarerecovery/v1/quarantineranges/resource/12345/range/100',
json=mock_response)
client = Client(
base_url='https://apis.druva.com/',
verify=False,
headers={
'Authentication': 'Bearer some_api_key'
}
)
resource_id = '12345'
range_id = '100'
response = Druva_ViewQurantineRange_Command(client, resource_id, range_id)
assert response[1]["Druva.viewedQuarantineRange(val.rangeID == obj.rangeID)"] == {
'rangeID': '100'
}
def test_Druva_UpdateQuarantineRange_Command(requests_mock):
from Druva import Client, Druva_UpdateQuarantineRange_Command
mock_response = {
'rangeID': '100'
}
requests_mock.put(
'https://apis.druva.com/realize/ransomwarerecovery/v1/quarantineranges/resource/12345/range/100',
json=mock_response)
client = Client(
base_url='https://apis.druva.com/',
verify=False,
headers={
'Authentication': 'Bearer some_api_key'
}
)
resource_id = '12345'
resource_type = 'endpoint'
range_id = '100'
from_date = '2020-12-05'
to_date = '2020-12-10'
response = Druva_UpdateQuarantineRange_Command(client, resource_id, resource_type, range_id, from_date, to_date)
assert response[1]["Druva.updatedQuarantineRange"] == '100'
def test_Druva_ListQuarantine_Snapshots_Command(requests_mock):
from Druva import Client, Druva_ListQuarantine_Snapshots_Command
mock_response = {'snapshots': [{
'name': 'May 4 2020, 15:02',
'snapshotID': 'TW9uIE1heSAgNCAxNTowMjo0MSAyMDIw',
'snapshotSize': 'Endpoint',
'status': 'Quarantined'
}]}
requests_mock.get('https://apis.druva.com/realize/ransomwarerecovery/v1/snapshots/resource/28604/range/233',
json=mock_response)
client = Client(
base_url='https://apis.druva.com/',
verify=False,
headers={
'Authentication': 'Bearer some_api_key'
}
)
resource_id = '28604'
range_id = '233'
response = Druva_ListQuarantine_Snapshots_Command(client, resource_id, range_id)
assert response[1]["Druva.quarantinedSnapshots(val.snapshotID == obj.snapshotID)"] == [{
'name': 'May 4 2020, 15:02',
'snapshotID': 'TW9uIE1heSAgNCAxNTowMjo0MSAyMDIw',
'snapshotSize': 'Endpoint',
'status': 'Quarantined'
}]
def test_Druva_DeleteQuarantined_Snapshots_Command(requests_mock):
from Druva import Client, Druva_DeleteQuarantined_Snapshots_Command
mock_response = {
'snapshotID': 'TW9uIE1heSAgNCAxNTowMjo0MSAyMDIw'
}
requests_mock.delete(
'https://apis.druva.com/realize/ransomwarerecovery/v1/'
'snapshots/resource/28604/range/233/snapshot/TW9uIE1heSAgNCAxNTowMjo0MSAyMDIw',
json=mock_response)
client = Client(
base_url='https://apis.druva.com/',
verify=False,
headers={
'Authentication': 'Bearer some_api_key'
}
)
resource_id = '28604'
range_id = '233'
snapshot_id = 'TW9uIE1heSAgNCAxNTowMjo0MSAyMDIw'
_, _, response = Druva_DeleteQuarantined_Snapshots_Command(client, resource_id, range_id, snapshot_id)
assert response['snapshotID'] == 'TW9uIE1heSAgNCAxNTowMjo0MSAyMDIw'
def test_Druva_SearchbyFileHash_Command(requests_mock):
from Druva import Client, Druva_SearchbyFileHash_Command
mock_response = {'results': [{
'resourceID': '12345',
'resourceParent': 'testUser',
'resourceType': 'endpoint'
}]}
requests_mock.get(
'https://apis.druva.com/realize/mds/v1/user/files?sha1Checksum=12345abcdef6789ghijkl101112mnop',
json=mock_response)
client = Client(
base_url='https://apis.druva.com/',
verify=False,
headers={
'Authentication': 'Bearer some_api_key'
}
)
sha1_checksum = '12345abcdef6789ghijkl101112mnop'
response = Druva_SearchbyFileHash_Command(client, sha1_checksum)
assert response[1]["Druva.searchEndpointsFileHashResults(val.objectID == obj.objectID)"] == [{
'resourceID': '12345',
'resourceParent': 'testUser',
'resourceType': 'endpoint'
}]
def test_Druva_Restore_Endpoint(requests_mock):
from Druva import Client, Druva_Restore_Endpoint
mock_response = {'restores': [{
'deviceID': '12345',
'targetDeviceID': '12345',
'restoreLocation': 'Desktop',
'userID': '12345',
'restoreID': '100'
}]}
requests_mock.post('https://apis.druva.com/insync/endpoints/v1/restores', json=mock_response)
client = Client(
base_url='https://apis.druva.com/',
verify=False,
headers={
'Authentication': 'Bearer some_api_key'
}
)
source_resourceid = '12345'
target_resourceid = '12345'
restore_location = 'Desktop'
response = Druva_Restore_Endpoint(client, source_resourceid, target_resourceid, restore_location)
assert response[1]["Druva.restoreJobs(val.restoreID == obj.restoreID)"] == [{
'deviceID': '12345',
'targetDeviceID': '12345',
'restoreLocation': 'Desktop',
'userID': '12345',
'restoreID': '100'
}]
def test_Druva_Restore_Status(requests_mock):
from Druva import Client, Druva_Restore_Status
mock_response = {
'deviceID': '12345',
'targetDeviceID': '12345',
'restoreLocation': 'Desktop',
'userID': '12345',
'status': 'Successful'
}
requests_mock.get('https://apis.druva.com/insync/endpoints/v1/restores/100', json=mock_response)
client = Client(
base_url='https://apis.druva.com/',
verify=False,
headers={
'Authentication': 'Bearer some_api_key'
}
)
restore_id = '100'
response = Druva_Restore_Status(client, restore_id)
assert response[1]["Druva.restoreJobs(val.restoreID == obj.restoreID)"] == {
'deviceID': '12345',
'targetDeviceID': '12345',
'restoreLocation': 'Desktop',
'userID': '12345',
'status': 'Successful'
}
|
import pytest
from typing import (
Dict,
Optional,
List
)
@pytest.fixture(scope='module')
def matlab_folder_path(p: Optional[str] = None) -> str:
return r"..\sources\matlab_saved_workspaces"
@pytest.fixture(scope='module')
def matlab_ws_keys(keys: Optional[Dict] = None) -> List:
return ['data', 'storage', 'ans', 'storage_model', 'storage_obj', 'tester']
@pytest.fixture(scope='module')
def verification_key() -> str:
return 'tester'
|
import discord
from discord.ext import commands
import sys
import asyncio
import traceback
from contextlib import suppress
class Periodic:
def __init__(self, time, func):
self.func = func
self.time = time
self.is_started = False
self._task = None
async def start(self):
if not self.is_started:
self.is_started = True
# Start task to call func periodically:
self._task = asyncio.ensure_future(self._run())
async def stop(self):
if self.is_started:
self.is_started = False
# Stop task and await it stopped:
self._task.cancel()
with suppress(asyncio.CancelledError):
await self._task
async def _run(self):
while True:
try:
await self.func()
except:
ty, error, trace = sys.exc_info()
traceback.print_exception(ty, error, trace, file=sys.stderr)
await asyncio.sleep(self.time)
class TemporaryAutoUnprobation:
def __init__(self, bot):
self.bot = bot
self.timer = None
print('Addon "{}" loaded'.format(self.__class__.__name__))
async def on_ready(self):
if self.timer == None:
# Add group to 5 member every minute
self.timer = Periodic(60, self._add_some_members)
await self.timer.start()
async def _add_some_members(self):
# We assume that 5 user every minute is fine
users_to_fix = 5
msg = "Fixing the roles for {} more users".format(users_to_fix)
await self.bot.send_message(self.bot.serverlogs_channel, msg)
i = 0
to_process = 0
for member in self.bot.get_all_members():
if not self.bot.probation_role in member.roles and not self.bot.unprobated_role in member.roles:
if i < users_to_fix:
await self.bot.add_roles(member, self.bot.unprobated_role)
i += 1
else:
to_process += 1
if to_process == 0:
msg = "✅ All users have been processed, we are ready to move on to phase 2"
await self.bot.send_message(self.bot.serverlogs_channel, msg)
await self.timer.stop()
else:
msg = "ℹ️ We have {} users left to handle".format(to_process)
await self.bot.send_message(self.bot.serverlogs_channel, msg)
def setup(bot):
bot.add_cog(TemporaryAutoUnprobation(bot))
|
import re
from aniparser.constants import audio_terms, video_terms, source_terms
__all__ = (
"EPISODE_SEASON_REGEX",
"EPISODE_REGEX",
"SEASON_REGEX",
"RESOLUTION_REGEX",
"CHECKSUM_REGEX",
"BRACKET_TERMS_REGEX",
"YEAR_REGEX",
"EXTENSION_REGEX",
"RELEASE_VERSION_REGEX",
"RELEASE_GROUP_REGEX",
"EMPTY_BRACKETS_REGEX",
"AUDIO_TERM_REGEX",
"VIDEO_TERM_REGEX",
"SOURCE_TERM_REGEX",
"ALTERNATE_TITLE_REGEX",
)
# Unfortunately there's too many combinations that can fuck up with titles too
# we need to handle episodes/seasons in steps
EPISODE_SEASON_REGEX = re.compile(
r"s?(?P<season>\d+)(e|ep|sp|x)(?P<episode>\d+)", flags=re.IGNORECASE
)
EPISODE_REGEX = re.compile(
r"(\/| )(e|ep|episode )?(?P<episode>\d+)[ \.\-\/]",
flags=re.IGNORECASE,
)
SEASON_REGEX = re.compile(r"\(?(Season| s) ?(?P<season>\d+)\)?", flags=re.IGNORECASE)
RESOLUTION_REGEX = re.compile(
r"(?P<pos_height>\d{3,4})([p]|[x\u00D7](?P<height>\d{3,4}))|\[(?P<alone_height>\d{3,4})\]",
flags=re.IGNORECASE,
)
CHECKSUM_REGEX = re.compile(
r"[ -]?[\[(](?P<checksum>[A-Fa-f0-9]{8})[\])][ -]?", flags=re.IGNORECASE
)
BRACKET_TERMS_REGEX = re.compile(r"\[(?P<terms>[\w \-_.]*)\]", flags=re.IGNORECASE)
YEAR_REGEX = re.compile(r"[\[\( \-](?P<year>\d{4})[\]\) \-]", flags=re.IGNORECASE)
EXTENSION_REGEX = re.compile(r"(\.(?:(?:[a-z]+)|\[\w+\]))")
RELEASE_VERSION_REGEX = re.compile(r"(?P<release>v\d+)", flags=re.IGNORECASE)
RELEASE_GROUP_REGEX = re.compile(r"^\/[\[\(](?P<release_group>[\w\s\- ]+)[\]\)]")
EMPTY_BRACKETS_REGEX = re.compile(r"[\[\(][_\-. ]*[\]\)]")
AUDIO_TERM_REGEX = re.compile(
f"({'|'.join(audio_terms)})" + r"(?=[^\w])", flags=re.IGNORECASE
)
VIDEO_TERM_REGEX = re.compile(
f"({'|'.join(video_terms)})" + r"(?=[^\w])", flags=re.IGNORECASE
)
SOURCE_TERM_REGEX = re.compile(
f"({'|'.join(source_terms)})" + r"(?=[^\w])", flags=re.IGNORECASE
)
ALTERNATE_TITLE_REGEX = re.compile(
r"\((?P<alternate_title>[\w'\- ]+)\)", flags=re.IGNORECASE
)
|
from libAnt.core import lazyproperty
from libAnt.profiles.profile import ProfileMessage
class HeartRateProfileMessage(ProfileMessage):
""" Message from Heart Rate Monitor """
def __init__(self, msg, previous):
super().__init__(msg, previous)
def __str__(self):
return f'{self.heartrate}'
@lazyproperty
def heartrate(self):
"""
Instantaneous heart rate. This value is
intended to be displayed by the display
device without further interpretation.
If Invalid set to 0x00
"""
return self.msg.content[7] |
from typing import Any, Dict, Generator, Optional
import pytest
from xpresso import App, Path, Security
from xpresso.security import OAuth2PasswordBearer
from xpresso.testclient import TestClient
from xpresso.typing import Annotated
oauth2_scheme = OAuth2PasswordBearer(
tokenUrl="/token",
description="OAuth2PasswordBearer security scheme",
auto_error=False,
)
async def read_items(token: Annotated[Optional[str], Security(oauth2_scheme)]):
if token is None:
return {"msg": "Create an account first"}
return {"token": token}
app = App([Path("/items/", get=read_items)])
@pytest.fixture
def client() -> Generator[TestClient, None, None]:
with TestClient(app) as client:
yield client
openapi_schema: Dict[str, Any] = {
"openapi": "3.0.3",
"info": {"title": "API", "version": "0.1.0"},
"paths": {
"/items/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
}
},
"security": [{"OAuth2PasswordBearer": []}],
}
}
},
"components": {
"securitySchemes": {
"OAuth2PasswordBearer": {
"type": "oauth2",
"flows": {"password": {"scopes": {}, "tokenUrl": "/token"}},
"description": "OAuth2PasswordBearer security scheme",
}
}
},
}
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_no_token(client: TestClient):
response = client.get("/items")
assert response.status_code == 200, response.text
assert response.json() == {"msg": "Create an account first"}
def test_token(client: TestClient):
response = client.get("/items", headers={"Authorization": "Bearer testtoken"})
assert response.status_code == 200, response.text
assert response.json() == {"token": "testtoken"}
def test_incorrect_token(client: TestClient):
response = client.get("/items", headers={"Authorization": "Notexistent testtoken"})
assert response.status_code == 200, response.text
assert response.json() == {"msg": "Create an account first"}
|
"""
Date: 08/22/2021
Name: Rio Weil
Title: ui.py
Description: Frontend code for minesweeper game, responsible for the UI through the console.
"""
import model
import errors
class Game:
"""
Initializes the Game.
"""
def __init__(self):
try:
width, height, numbombs = self.initialparams()
self.gamestate = model.GameState(width, height, numbombs)
self.gameloop()
except(IndexError):
print("It looks like you didn't format your input correctly! Restarting...")
Game()
except(ValueError):
print("It looks like your inputs weren't integers! Restarting...")
Game()
except(errors.ZeroException):
print("It looks like your board has size zero! Restarting...")
Game()
except(errors.TooManyBombsException):
print("It looks like you have too many bombs for the size of your board! Restarting...")
Game()
"""
Ask the user for the board dimensions and the number of bombs desired.
Returns the width, height, and number of bombs of the board.
"""
def initialparams(self):
params = input("Welcome to Minesweeper!\nEnter the board width, board height, and number of bombs in the form width height numbombs\n")
split_params = params.split()
return int(split_params[0]), int(split_params[1]), int(split_params[2])
"""
Main gameloop of the minesweeper game
"""
def gameloop(self):
while(True):
self.render()
self.handle_input()
if(self.gamestate.gameover):
self.reveal_all()
self.render()
print("You lose!")
self.play_again()
break
if(self.is_win()):
self.reveal_all()
self.render()
print("You win!")
self.play_again()
break
"""
Handles command given on the commandline
"""
def handle_input(self):
command = input("Enter command\ncheck row col - reveals space at (col, row)\nflag row col - places/removes flag at (col, row)\nfold - to give up\n")
split_command = command.split()
try:
if(split_command[0] == "check"):
row = int(split_command[1]) - 1
col = int(split_command[2]) - 1
if (row < 0) or (row > self.gamestate.height - 1) or (col < 0) or (col > self.gamestate.width - 1):
print("Coordinates outside of board! Try again...")
self.handle_input()
else:
self.gamestate.check_space(row, col)
elif(split_command[0] == "flag"):
row = int(split_command[1]) - 1
col = int(split_command[2]) - 1
if (row < 0) or (row > self.gamestate.height - 1) or (col < 0) or (col > self.gamestate.width - 1):
print("Coordinates outside of board! Try again...")
self.handle_input()
else:
self.gamestate.set_flag(row, col)
elif(split_command[0] == "fold"):
self.gamestate.gameover = True
else:
print("Invalid command! Try again...")
self.handle_input()
except(IndexError):
print("It looks like you didn't format your input correctly! Try again...")
self.handle_input()
except(ValueError):
print("It looks like your inputs weren't integers! Try again...")
self.handle_input()
"""
Renders the current game board.
"""
def render(self):
print(self.gamestate.render())
"""
Returns true if game is in winning state, false otherwise
"""
def is_win(self):
return self.gamestate.is_win()
"""
Returns true if game is in losing state, false otherwise
"""
def is_lose(self):
return self.gamestate.is_lose()
"""
Reveals entire gameboard
"""
def reveal_all(self):
self.gamestate.reveal_all()
"""
Asks player if they want to play another game, creates new Game if yes
"""
def play_again(self):
answer = input("Would you like to play again? (y/n)\n")
if answer == "y":
Game()
elif answer == "n":
return
else:
print("Invalid command! Try again...")
self.play_again()
|
import re
from itertools import zip_longest
def text2sentences(txt, equal_to_space = ["\n"]):
"""
Разбивает текст на предложения, учитывая, что фразы типа "мат. алгоритмы", "и т. д.", ".NET" --- это всё одно предложение
"""
inds = []
for i in range(len(txt)):
if txt[i] == '.':
if i+1 == len(txt):
inds.append(i+1)
elif txt[i+1].isspace():
if i+2 < len(txt) and (not txt[i+2].islower() or txt[i+2]=='.'):
inds.append(i+1)
if len(inds) > 0:
answer = [txt[i:j-1] for i,j in zip_longest([0]+inds[:-1], inds)]
if inds[-1] != len(txt):
answer += [txt[inds[-1]+1:]]
else:
answer = [txt]
#raise Exception()
r_template = '\s*[' + "".join(equal_to_space + ["\s"]) + '\s*]'
answer = [" ".join([v for v in re.split(r_template, t) if len(v)>0]) for t in answer]
return answer
def split_by_words(sentence, words):
"""
Делает split предложения по stopwords
"""
result = []
tmp=[]
for w in sentence.split():
if w in words:
if len(tmp)>0:
result.append(' '.join(tmp))
tmp=[]
else:
tmp.append(w)
if len(tmp) > 0:
result.append(' '.join(tmp))
return result
def sentence_split(sentence, separators = ",;!?", stop_words = None):
"""
Делает split предложения по указанным стоп-словам и сепараторам
"""
r_template = fr"[{separators}]\s*"
phrases = re.split(r_template, sentence)
if not (stop_words is None):
phrases = sum([split_by_words(phrase, stop_words) for phrase in phrases], [])
return phrases
|
import unittest
from request_parsers import keystroke
class KeystrokeTest(unittest.TestCase):
# Intentionally violating style conventions sot hat we can parallel the
# self.assertEqual method.
# pylint: disable=no-self-use
# pylint: disable=invalid-name
def assertKeystrokesEqual(self, expected, actual):
if expected != actual:
raise AssertionError('%s != %s' % (expected, actual))
def test_parses_valid_keystroke_message(self):
self.assertKeystrokesEqual(
keystroke.Keystroke(left_meta_modifier=False,
right_meta_modifier=False,
left_alt_modifier=False,
right_alt_modifier=False,
left_shift_modifier=False,
right_shift_modifier=False,
left_ctrl_modifier=False,
right_ctrl_modifier=False,
key='A',
code='KeyA'),
keystroke.parse_keystroke({
'metaLeft': False,
'metaRight': False,
'shiftLeft': False,
'shiftRight': False,
'altLeft': False,
'altRight': False,
'ctrlLeft': False,
'ctrlRight': False,
'key': 'A',
'code': 'KeyA',
}))
def test_parses_valid_keystroke_message_with_all_modifiers_pushed(self):
self.assertKeystrokesEqual(
keystroke.Keystroke(left_meta_modifier=True,
right_meta_modifier=True,
left_alt_modifier=True,
right_alt_modifier=True,
left_shift_modifier=True,
right_shift_modifier=True,
left_ctrl_modifier=True,
right_ctrl_modifier=True,
key='A',
code='KeyA'),
keystroke.parse_keystroke({
'metaLeft': True,
'metaRight': True,
'shiftLeft': True,
'shiftRight': True,
'altLeft': True,
'altRight': True,
'ctrlLeft': True,
'ctrlRight': True,
'key': 'A',
'code': 'KeyA',
}))
def test_parses_left_ctrl_key(self):
self.assertKeystrokesEqual(
keystroke.Keystroke(left_meta_modifier=False,
right_meta_modifier=False,
left_alt_modifier=False,
right_alt_modifier=False,
left_shift_modifier=False,
right_shift_modifier=False,
left_ctrl_modifier=True,
right_ctrl_modifier=False,
key='Control',
code='ControlLeft'),
keystroke.parse_keystroke({
'metaLeft': False,
'metaRight': False,
'shiftLeft': False,
'shiftRight': False,
'altLeft': False,
'altRight': False,
'ctrlLeft': True,
'ctrlRight': False,
'key': 'Control',
'code': 'ControlLeft',
}))
def test_parses_right_ctrl_key(self):
self.assertKeystrokesEqual(
keystroke.Keystroke(left_meta_modifier=False,
right_meta_modifier=False,
left_alt_modifier=False,
right_alt_modifier=False,
left_shift_modifier=False,
right_shift_modifier=False,
left_ctrl_modifier=False,
right_ctrl_modifier=True,
key='Control',
code='ControlRight'),
keystroke.parse_keystroke({
'metaLeft': False,
'metaRight': False,
'shiftLeft': False,
'shiftRight': False,
'altLeft': False,
'altRight': False,
'ctrlLeft': False,
'ctrlRight': True,
'key': 'Control',
'code': 'ControlRight',
}))
def test_parses_minimal_valid_keystroke_with_defaults(self):
self.assertKeystrokesEqual(
keystroke.Keystroke(left_meta_modifier=False,
right_meta_modifier=False,
left_alt_modifier=False,
right_alt_modifier=False,
left_shift_modifier=False,
right_shift_modifier=False,
left_ctrl_modifier=False,
right_ctrl_modifier=False,
key='',
code='KeyA'),
keystroke.parse_keystroke({
'code': 'KeyA',
}))
def test_parses_and_merges_valid_keystroke_message_with_defaults(self):
self.assertKeystrokesEqual(
keystroke.Keystroke(left_meta_modifier=True,
right_meta_modifier=False,
left_alt_modifier=True,
right_alt_modifier=False,
left_shift_modifier=False,
right_shift_modifier=True,
left_ctrl_modifier=False,
right_ctrl_modifier=False,
key='A',
code='KeyA'),
keystroke.parse_keystroke({
'metaLeft': True,
'shiftLeft': False,
'shiftRight': True,
'altLeft': True,
'ctrlRight': False,
'key': 'A',
'code': 'KeyA',
}))
def test_rejects_missing_mandatory_code_value(self):
with self.assertRaises(keystroke.MissingFieldErrorError):
keystroke.parse_keystroke({
'metaLeft': False,
'metaRight': False,
'shiftLeft': False,
'shiftRight': False,
'altLeft': False,
'altRight': False,
'ctrlLeft': False,
'ctrlRight': False,
'key': 'A',
})
def test_rejects_float_keycode_value(self):
with self.assertRaises(keystroke.InvalidKeyCodeError):
keystroke.parse_keystroke({
'metaLeft': False,
'metaRight': False,
'shiftLeft': False,
'shiftRight': False,
'altLeft': False,
'altRight': False,
'ctrlLeft': False,
'ctrlRight': False,
'key': 'A',
'code': 1.25,
})
def test_rejects_too_long_code_value(self):
with self.assertRaises(keystroke.InvalidKeyCodeError):
keystroke.parse_keystroke({
'metaLeft': False,
'metaRight': False,
'shiftLeft': False,
'shiftRight': False,
'altLeft': False,
'altRight': False,
'ctrlLeft': False,
'ctrlRight': False,
'key': 'A',
'code': 'A' * 31,
})
class KeystrokeWithInvalidValuesTest(unittest.TestCase):
def test_rejects_invalid_meta_modifier(self):
with self.assertRaises(keystroke.InvalidModifierKeyError):
keystroke.parse_keystroke({
'metaLeft': 'banana',
'metaRight': False,
'shiftLeft': False,
'shiftRight': False,
'altLeft': False,
'altRight': False,
'ctrlLeft': False,
'ctrlRight': False,
'key': 'A',
'code': 'KeyA',
})
def test_rejects_invalid_alt_left_modifier(self):
with self.assertRaises(keystroke.InvalidModifierKeyError):
keystroke.parse_keystroke({
'metaLeft': False,
'metaRight': False,
'shiftLeft': False,
'shiftRight': False,
'altLeft': 'banana',
'altRight': False,
'ctrlLeft': False,
'ctrlRight': False,
'key': 'A',
'code': 'KeyA',
})
def test_rejects_invalid_alt_right_modifier(self):
with self.assertRaises(keystroke.InvalidModifierKeyError):
keystroke.parse_keystroke({
'metaLeft': False,
'metaRight': False,
'shiftLeft': False,
'shiftRight': False,
'altLeft': False,
'altRight': 'banana',
'ctrlLeft': False,
'ctrlRight': False,
'key': 'A',
'code': 'KeyA',
})
def test_rejects_invalid_shift_modifier(self):
with self.assertRaises(keystroke.InvalidModifierKeyError):
keystroke.parse_keystroke({
'metaLeft': False,
'metaRight': False,
'shiftLeft': 'banana',
'shiftRight': False,
'altLeft': False,
'altRight': False,
'ctrlLeft': False,
'ctrlRight': False,
'key': 'A',
'code': 'KeyA',
})
def test_rejects_invalid_ctrl_modifier(self):
with self.assertRaises(keystroke.InvalidModifierKeyError):
keystroke.parse_keystroke({
'metaLeft': False,
'metaRight': False,
'shiftLeft': False,
'shiftRight': False,
'altLeft': False,
'altRight': False,
'ctrlLeft': 'banana',
'ctrlRight': False,
'key': 'A',
'code': 'KeyA',
})
|
import multiprocessing
import concurrent.futures
import time
from typing import Callable
S_T = [4, 2, 7, 4, 3, 10, 11, 3, 2, 1]
def task(args: tuple = ('', 1)) -> str:
name = args[0]
sec = args[1]
print('Task', name, 'sleeps for', sec)
time.sleep(sec)
return 'Task', name, sec, 'finished.'
def run_process(n: int) -> None:
processes = [multiprocessing.Process(target=task, args=[(str(i), 1)]) for i in range(n)]
for p in processes:
p.start()
for p in processes:
p.join()
def run_pool_1(n: int) -> None:
with multiprocessing.Pool(processes=n) as pool:
global S_T
# this gives us sequential execution
# we could do task() in a for loop
# f : is a task handle , so we need to check if it's done
# and retrieve the result
# f = [pool.apply(task, [(i, S_T[i])]) for i in range(n)]
# print(*f, sep='\n')
# this will push task on SimpleQueue from wich processes will
# pick tasks up
f = [pool.apply_async(task, [(i, S_T[i])]) for i in range(n)]
# we still hang on the longest task
# pool.join()
# without join the thread's will finish in background and we will
# move forward with execution
pool.close()
# pool.join()
# try to emulate doing something as soon as we get the result
while f:
for p in f:
if p.ready():
print(p.get())
del f[f.index(p)]
def run_pool_2(n: int) -> None:
with multiprocessing.Pool(processes=n) as pool:
global S_T
m = pool.map(task, enumerate(S_T))
print(*m, sep='\n')
# pool.close()
# pool.join()
def run_executor_1(n: int) -> None:
with concurrent.futures.ProcessPoolExecutor(max_workers=n) as exec:
global S_T
f = [exec.submit(task, (str(i), S_T[i])) for i in range(n)]
# here we will hang on the longer task
# for f1 in f:
# print(f1.result())
# here we process the tasks as they finish
for p in concurrent.futures.as_completed(f):
print(p.result())
def run_executor_2(n: int) -> None:
with concurrent.futures.ProcessPoolExecutor(max_workers=10) as exec:
global S_T
# It returns the tasks in order that they were started
# however we still hang untill everythings done
# Exception will be risen when retrievd value from iterator <-- !!! -- >
res = exec.map(task, enumerate(S_T))
print(*res, sep='\n')
def timeme(func: Callable) -> None:
start = time.perf_counter()
func()
end = time.perf_counter()
print('Run time: ', round(end-start, 3), 'second(s)')
def main():
print('Usage: uncomment funct to run example.')
# run_process(10)
# run_pool_1(10)
# run_pool_2(10)
# run_executor_1(10)
# run_executor_2(10)
if __name__ == "__main__":
timeme(main) |
#!/usr/bin/env python
#
import cgi
import wsgiref.handlers
import logging
import json
from actingweb import actor
from actingweb import auth
from actingweb import config
from on_aw import on_aw_bot
import webapp2
class MainPage(webapp2.RequestHandler):
def post(self, path):
"""Handles POST callbacks for bots."""
Config = config.config()
if not Config.bot['token'] or len(Config.bot['token']) == 0:
self.response.set_status(404)
return
check = auth.auth(id=None)
check.oauth.token = Config.bot['token']
ret = on_aw_bot.on_bot_post(req=self, auth=check, path=path)
if ret and ret >= 100 and ret < 999:
self.response.set_status(ret)
return
elif ret:
self.response.set_status(204)
return
else:
self.response.set_status(404)
return
application = webapp2.WSGIApplication([
webapp2.Route(r'/bot<:/?><path:(.*)>', MainPage, name='MainPage'),
], debug=True)
|
"""
Copyright (c) 2019, Pyxidr and/or its affiliates. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Pyxidr or the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
===============================================================================
-- utility_tables.py --
Provides subroutines for populating SQL database with utility tables to
facilitate some analyses.
To do:
- None at the moment
"""
import datetime
import logging
import pandas as pd
from modules.utils import get_season
logger = logging.getLogger('client_data')
class UtilTablesError(Exception):
pass
def create_utility_tables(conn, start_date, end_date, start_peak, end_peak):
'''
Populates tables used to provide attributes to days and hours.
'''
logger.info('. Creating utility talbes')
cursor = conn.cursor()
_create_hourly_periods(conn, cursor, start_date, end_date,
start_peak, end_peak)
_create_daily_periods(conn, cursor, start_date, end_date)
def _create_hourly_periods(conn, cursor, start_date, end_date,
start_peak, end_peak):
'''
Populates attributes associated hours.
'''
logger.info('.. Creating hourly table')
# Delete any previous records
cursor.execute('delete from tbl_util_hourly_periods;')
conn.commit()
list_of_records = list()
range_of_dates = pd.date_range(start_date, end_date)
for d in range_of_dates:
for h in range(0, 24):
record = dict({
"date": d,
"hour": h + 1,
"datehour": datetime.datetime(d.year, d.month, d.day, h),
"weekhour": d.weekday() * 24 + h + 1,
"periodtype": 'On-Peak' if d.weekday() < 6 and
h >= start_peak and h <= end_peak
else 'Off-Peak'
})
list_of_records.append(record)
df = pd.DataFrame(list_of_records)
df.to_sql('tbl_util_hourly_periods', conn, if_exists='append', index=False)
conn.commit()
def _create_daily_periods(conn, cursor, start_date, end_date):
'''
Populates attributes associated hours.
'''
logger.info('.. Creating daily table')
# Delete any previous records
cursor.execute('delete from tbl_util_daily_periods;')
conn.commit()
list_of_records = list()
range_of_dates = pd.date_range(start_date, end_date)
for d in range_of_dates:
record = dict({
"Date": d,
"Year": d.year,
"Month": d.month,
"Week": d.isocalendar()[1],
"Weekday": d.weekday() + 1,
"QuarterID": "Q{}".format(d.year * 10 + (d.month - 1) // 3 + 1),
"MonthID": "M{}".format(d.year * 100 + d.month),
"WeekID": "W{}".format(d.year * 100 + d.isocalendar()[1]),
"Season": get_season(d)
})
list_of_records.append(record)
df = pd.DataFrame(list_of_records)
df.to_sql('tbl_util_daily_periods', conn, if_exists='append', index=False)
conn.commit()
|
"""ADd cascades
Revision ID: def46e09c9ef
Revises: 3efdb537f933
Create Date: 2017-12-28 11:44:14.263012
"""
# revision identifiers, used by Alembic.
revision = 'def46e09c9ef'
down_revision = '3efdb537f933'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(u'RepositoryApp2languages_ibfk_2', 'RepositoryApp2languages', type_='foreignkey')
op.drop_constraint(u'RepositoryApp2languages_ibfk_1', 'RepositoryApp2languages', type_='foreignkey')
op.create_foreign_key(None, 'RepositoryApp2languages', 'RepositoryApps', ['repository_app_id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')
op.create_foreign_key(None, 'RepositoryApp2languages', 'Languages', ['language_id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')
op.drop_constraint(u'RepositoryAppCheckUrls_ibfk_1', 'RepositoryAppCheckUrls', type_='foreignkey')
op.create_foreign_key(None, 'RepositoryAppCheckUrls', 'RepositoryApps', ['repository_app_id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')
op.drop_constraint(u'RepositoryAppFailures_ibfk_1', 'RepositoryAppFailures', type_='foreignkey')
op.create_foreign_key(None, 'RepositoryAppFailures', 'RepositoryAppCheckUrls', ['repository_app_check_url_id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'RepositoryAppFailures', type_='foreignkey')
op.create_foreign_key(u'RepositoryAppFailures_ibfk_1', 'RepositoryAppFailures', 'RepositoryAppCheckUrls', ['repository_app_check_url_id'], ['id'])
op.drop_constraint(None, 'RepositoryAppCheckUrls', type_='foreignkey')
op.create_foreign_key(u'RepositoryAppCheckUrls_ibfk_1', 'RepositoryAppCheckUrls', 'RepositoryApps', ['repository_app_id'], ['id'])
op.drop_constraint(None, 'RepositoryApp2languages', type_='foreignkey')
op.drop_constraint(None, 'RepositoryApp2languages', type_='foreignkey')
op.create_foreign_key(u'RepositoryApp2languages_ibfk_1', 'RepositoryApp2languages', 'Languages', ['language_id'], ['id'])
op.create_foreign_key(u'RepositoryApp2languages_ibfk_2', 'RepositoryApp2languages', 'RepositoryApps', ['repository_app_id'], ['id'])
# ### end Alembic commands ###
|
from __future__ import absolute_import
from pychron.core.ui import set_toolkit
set_toolkit('qt4')
from pychron.paths import paths
paths.build('_unittest')
#build_directories(paths)
from pychron.core.helpers.logger_setup import logging_setup
logging_setup('arar')
from test.database import isotope_manager_factory
__author__ = 'ross'
import unittest
class ArArAgeCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.isotope_man = isotope_manager_factory(name='pychrondata_minnabluff')
cls.isotope_man.db.connect()
expected_filtered_56954_01A = dict(Ar40bs_c=2.20831,
Ar39bs_c=4.96090e-3,
Ar38bs_c=0.0014621,
Ar37bs_c=1.34848e-3,
Ar36bs_c=7.40844e-3,
Ar40bs=7.50865e-5,
bs_fil=[44],
Ar40_fil=[1, 9, 43],
Ar39_fil=[11, 16],
Ar40bl=0.0328504,
#Ar39_Ar40=0.002255
Ar39_Ar40=1 / 443.1534341,
Ar39_Ar40e=0.000023
)
expected_nonfiltered = dict(Ar40bs_c=2.20908914,
Ar39bs_c=4.96081e-3,
Ar38bs_c=0.0014706,
Ar37bs_c=0.001349,
Ar36bs_c=0.0074384,
Ar40bs=7.50865e-5,
bs_fil=[],
Ar40_fil=[])
expected_filtered = dict(
Ar40bs_c=0.159394,
Ar40bs_ce=2.67938e-4,
Ar39bs_c=1.58288e-2,
Ar39bs_ce=7.9423e-5,
Ar40bl_c=0.129651,
Ar40bl_ce=0.003811,
Ar40bl=0.031794,
Ar40ble=0.0038013,
Ar39_Ar40=0.1226,
Ar39_Ar40e=0.0037,
ar37df=3.348
)
cls.expected = expected_nonfiltered
cls.expected = expected_filtered
man = cls.isotope_man
db = man.db
with db.session_ctx():
ans, tc = db.get_sample_analyses(['AF-72', ], ['Minna Bluff'])
an = man.make_analysis(ans[4])
cls.analysis = an
def test_Ar40blank(self):
an = self.analysis
iso = an.isotopes['Ar40'].blank
self.assertAlmostEqual(iso.value,
self.expected['Ar40bl'], 6)
def test_Ar40blank_error(self):
an = self.analysis
iso = an.isotopes['Ar40'].blank
self.assertAlmostEqual(iso.error,
self.expected['Ar40ble'], 6)
def test_Ar40bl_e(self):
an = self.analysis
iso = an.isotopes['Ar40']
self.assertAlmostEqual(iso.get_non_detector_corrected_value().std_dev,
self.expected['Ar40bl_ce'], 6)
def test_Ar40_e(self):
an = self.analysis
iso = an.isotopes['Ar40']
self.assertAlmostEqual(iso.get_baseline_corrected_value().std_dev,
self.expected['Ar40bs_ce'], 6)
def test_Ar39_e(self):
an = self.analysis
iso = an.isotopes['Ar39']
self.assertAlmostEqual(iso.get_baseline_corrected_value().std_dev,
self.expected['Ar39bs_ce'], 6)
def test_analysis_step(self):
an = self.analysis
self.assertEqual(an.step, 'E')
def test_ar37decayfactor(self):
an = self.analysis
self.assertAlmostEqual(an.ar37decayfactor,
self.expected['ar37df'], 3)
def test_Ar39Ar40_e(self):
an = self.analysis
a40 = an.isotopes['Ar40'].get_intensity()
a39 = an.isotopes['Ar39'].get_intensity()#*an.ar39decayfactor
self.assertEqual((a39 / a40).std_dev, self.expected['Ar39_Ar40e'])
def test_Ar39Ar40_v(self):
an = self.analysis
a40 = an.isotopes['Ar40'].get_intensity()
a39 = an.isotopes['Ar39'].get_intensity() * an.ar39decayfactor
self.assertEqual((a39 / a40).nominal_value, self.expected['Ar39_Ar40'])
@unittest.skip
def test_Ar40_blank(self):
an = self.analysis
iso = an.isotopes['Ar40']
self.assertEqual(iso.blank.value, self.expected['Ar40bl'])
def test_baseline_filtering(self):
an = self.analysis
iso = an.isotopes['Ar40'].baseline
self.assertEqual(iso.regressor.excludes, self.expected['bs_fil'])
def test_Ar40_filtering(self):
an = self.analysis
iso = an.isotopes['Ar40']
self.assertEqual(iso.regressor.excludes, self.expected['Ar40_fil'])
def test_Ar39_filtering(self):
an = self.analysis
iso = an.isotopes['Ar39']
self.assertEqual(iso.regressor.excludes, self.expected['Ar39_fil'])
def test_isFiltered(self):
an = self.analysis
iso = an.isotopes['Ar40']
self.assertEqual(iso.filter_outliers, True)
def test_R_errors(self):
an = self.analysis
self.assertNotEqual(an.F_err, an.F_err_wo_irrad)
def test_Multiplier_baseline(self):
an = self.analysis
self.assertAlmostEqual(an.isotopes['Ar40'].baseline.value,
self.expected['Ar40bs'], 4)
def test_Ar40_baseline_corrected(self):
an = self.analysis
self.assertAlmostEqual(an.isotopes['Ar40'].get_baseline_corrected_value().nominal_value,
self.expected['Ar40bs_c'], 4)
def test_Ar39_baseline_corrected(self):
an = self.analysis
self.assertAlmostEqual(an.isotopes['Ar39'].get_baseline_corrected_value().nominal_value,
self.expected['Ar39bs_c'], 7)
def test_Ar37_baseline_corrected(self):
an = self.analysis
self.assertAlmostEqual(an.isotopes['Ar37'].get_baseline_corrected_value().nominal_value,
self.expected['Ar37bs_c'], 7)
def test_Ar38_baseline_corrected(self):
an = self.analysis
self.assertAlmostEqual(an.isotopes['Ar38'].get_baseline_corrected_value().nominal_value,
self.expected['Ar38bs_c'], 7)
def test_Ar36_baseline_corrected(self):
an = self.analysis
self.assertAlmostEqual(an.isotopes['Ar36'].get_baseline_corrected_value().nominal_value,
self.expected['Ar36bs_c'], 7)
@unittest.skip
def test_39_40(self):
def _func(an):
a39 = an.isotopes['Ar39'].get_interference_corrected_value()
a40 = an.isotopes['Ar40'].get_interference_corrected_value()
a40 = an.isotopes['Ar40'].get_intensity()
e = a40.std_dev
#v=(a39/a40).nominal_value
#v=(a40/a39).nominal_value
#e=(a40/a39).std_dev
##e=(a39/a40).std_dev
#self.assertEqual(v,
# self.expected_bs_corrected['Ar39_Ar40']
# )
self.assertEqual(e,
self.expected['Ar39_Ar40e']
)
self._get_analysis(_func)
if __name__ == '__main__':
unittest.main()
|
import numpy as np
from lib.sampling import Sampler
from shapely.geometry import Polygon, Point, LineString
from sklearn.neighbors import KDTree
import networkx as nx
def can_connect(n1, n2, polygons):
line = LineString([n1, n2])
for p in polygons:
if p.crosses(line) and p.height >= min(n1[2], n2[2]):
return False
return True
def closest_node(graph, current_position):
'''
Compute the closest node in the graph to the current position
'''
closest_node = None
dist = 100000
xyz_position = (current_position[0], current_position[1], current_position[2])
for p in graph.nodes:
d = ((p[0]-xyz_position[0])**2 + (p[1]-xyz_position[1])**2 + (p[2]-xyz_position[2]))
if d < dist:
closest_node = p
dist = d
return closest_node
def createGraphFromNodes(nodes, polygons_, k):
g = nx.Graph()
tree = KDTree(nodes)
for n1 in nodes:
# for each node connect try to connect to k nearest nodes
idxs = tree.query([n1], k, return_distance=False)[0]
for idx in idxs:
n2 = nodes[idx]
if n2 == n1:
continue
if can_connect(n1, n2, polygons_):
g.add_edge(n1, n2, weight=1)
return g
def generateProbabilisticGraph(data, n_sampling_points, k):
'''
Graph generation for probabilistic roadmap
n_sampling_points : sampling n points on random position
k : number of nearest nodes each node try to connect to
'''
print("Generating probabilistic graph...")
sampler = Sampler(data)
polygons = sampler._polygons
nodes = sampler.sample(n_sampling_points)
g = nx.Graph()
tree = KDTree(nodes)
print("Connecting nodes...")
for n1 in nodes:
# for each node connect try to connect to k nearest nodes
idxs = tree.query([n1], k, return_distance=False)[0]
for idx in idxs:
n2 = nodes[idx]
if n2 == n1:
continue
if can_connect(n1, n2, polygons):
g.add_edge(n1, n2, weight=1)
return g
|
# Copyright 2018-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from caffe2.python import workspace, cnn, data_parallel_model, core
from caffe2.proto import caffe2_pb2
from vmz_interface.lib.models import model_builder
from vmz_interface.lib.utils import model_helper
from vmz_interface.lib.utils import model_loader
from vmz_interface.lib.utils import metric
from vmz_interface.lib.utils import reader_utils
import numpy as np
import logging
import argparse
import os.path
import pickle
import h5py
logging.basicConfig()
log = logging.getLogger("feature_extractor")
log.setLevel(logging.INFO)
def feature_extractor(load_model_path=None, test_data=None, gpu_list=None, num_gpus=0, batch_size=4
, clip_per_video=1, decode_type=2
, clip_length_rgb=4, sampling_rate_rgb=1, scale_h=128, scale_w=171
, crop_size=112, video_res_type=0, num_decode_threads=4, multi_label=0
, num_labels=101, input_type=0, clip_length_of=8, sampling_rate_of=2
, frame_gap_of=2, do_flow_aggregation=0, flow_data_type=0
, get_video_id=1, get_start_frame=0, use_local_file=1, crop_per_clip=1
, db_type='pickle' , model_name='r2plus1d', model_depth=18
, num_channels=3, output_path=None
, use_cudnn=1, layers='final_avg', num_iterations=1, channel_multiplier=1.0
, bottleneck_multiplier=1.0, use_pool1=0, use_convolutional_pred=0
, use_dropout=0, **kwargs):
"""
:param gpu_list: list of gpu ids to use
:param batch_size: batch size
:param clip_per_video: When clips_per_video > 1, sample this many clips uniformly in time
:param decode_type: 0: random, 1: uniform sampling, 2: use starting frame
:param clip_length_rgb: Length of input clips
:param sampling_rate_rgb: Frame sampling rate
:param scale_h: Scale image height to
:param scale_w: Scale image width to
:param crop_size: Input image size (to crop to)
:param video_res_type: Video frame scaling option, 0: scaled by height x width; 1: scaled by short edge
:param num_decode_threads: number of decoding threads
:param multi_label: Multiple label csv file input
:param num_labels: Number of labels
:param input_type: 0=rgb, 1=optical flow
:param clip_length_of: Frames of optical flow data
:param sampling_rate_of: Sampling rate for optial flows
:param frame_gap_of: Frame gap of optical flows
:param do_flow_aggregation: whether to aggregate optical flow across multiple frames
:param flow_data_type: 0=Flow2C, 1=Flow3C, 2=FlowWithGray, 3=FlowWithRGB
:param get_video_id: Output video id
:param get_start_frame: Output clip start frame
:param use_local_file: use local file
:param crop_per_clip: number of spatial crops per clip
:param db_type: Db type of the testing model
:param model_name: Model name
:param model_depth: Model depth
:param num_channels: Number of channels
:param load_model_path: Load saved model for testing
:param test_data: Path to output pickle; defaults to layers.pickle next to <test_data>
:param output_path: Path to output pickle; defaults to layers.pickle next to <test_data>
:param use_cudnn: Use CuDNN
:param layers: Comma-separated list of blob names to fetch
:param num_iterations: Run only this many iterations
:param channel_multiplier: Channel multiplier
:param bottleneck_multiplier: Bottleneck multiplier
:param use_pool1: use pool1 layer
:param use_convolutional_pred: using convolutional predictions
:param use_dropout: Use dropout at the prediction layer
"""
if load_model_path is None or test_data is None:
raise Exception('Model path AND test data need to be specified')
# Initialize Caffe2
workspace.GlobalInit(['caffe2', '--caffe2_log_level=2'])
if gpu_list is None:
if num_gpus == 0:
raise Exception('Must specify GPUs')
else:
gpus = [i for i in range(num_gpus)]
else:
gpus = gpu_list
num_gpus = len(gpus)
my_arg_scope = {
'order': 'NCHW',
'use_cudnn': True,
'cudnn_exhaustive_search': True
}
model = cnn.CNNModelHelper(
name="Extract features",
**my_arg_scope
)
video_input_args = dict(
batch_size=batch_size,
clip_per_video=clip_per_video,
decode_type=decode_type,
length_rgb=clip_length_rgb,
sampling_rate_rgb=sampling_rate_rgb,
scale_h=scale_h,
scale_w=scale_w,
crop_size=crop_size,
video_res_type=video_res_type,
short_edge=min(scale_h, scale_w),
num_decode_threads=num_decode_threads,
do_multi_label=multi_label,
num_of_class=num_labels,
random_mirror=False,
random_crop=False,
input_type=input_type,
length_of=clip_length_of,
sampling_rate_of=sampling_rate_of,
frame_gap_of=frame_gap_of,
do_flow_aggregation=do_flow_aggregation,
flow_data_type=flow_data_type,
get_rgb=input_type == 0,
get_optical_flow=input_type == 1,
get_video_id=get_video_id,
get_start_frame=get_start_frame,
use_local_file=use_local_file,
crop_per_clip=crop_per_clip,
)
reader_args = dict(
name="extract_features" + '_reader',
input_data=test_data,
)
reader, num_examples = reader_utils.create_data_reader(
model,
**reader_args
)
def input_fn(model):
model_helper.AddVideoInput(
model,
reader,
**video_input_args)
def create_model_ops(model, loss_scale):
return model_builder.build_model(
model=model,
model_name=model_name,
model_depth=model_depth,
num_labels=num_labels,
batch_size=batch_size,
num_channels=num_channels,
crop_size=crop_size,
clip_length=(
clip_length_of if input_type == 1
else clip_length_rgb
),
loss_scale=loss_scale,
is_test=1,
multi_label=multi_label,
channel_multiplier=channel_multiplier,
bottleneck_multiplier=bottleneck_multiplier,
use_dropout=use_dropout,
use_convolutional_pred=use_convolutional_pred,
use_pool1=use_pool1,
)
##
if num_gpus > 0:
data_parallel_model.Parallelize_GPU(
model,
input_builder_fun=input_fn,
forward_pass_builder_fun=create_model_ops,
param_update_builder_fun=None, # 'None' since we aren't training
devices=gpus,
optimize_gradient_memory=True,
)
else:
model._device_type = caffe2_pb2.CPU
model._devices = [0]
device_opt = core.DeviceOption(model._device_type, 0)
with core.DeviceScope(device_opt):
# Because our loaded models are named with "gpu_x", keep the naming for now.
# TODO: Save model using `data_parallel_model.ExtractPredictorNet`
# to extract the model for "gpu_0". It also renames
# the input and output blobs by stripping the "gpu_x/" prefix
with core.NameScope("{}_{}".format("gpu", 0)):
input_fn(model)
create_model_ops(model, 1.0)
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
if db_type == 'pickle':
model_loader.LoadModelFromPickleFile(model, load_model_path)
elif db_type == 'minidb':
if num_gpus > 0:
model_helper.LoadModel(load_model_path, db_type)
else:
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU, 0)):
model_helper.LoadModel(load_model_path, db_type)
else:
log.warning("Unsupported db_type: {}".format(db_type))
data_parallel_model.FinalizeAfterCheckpoint(model)
##
def fetchActivations(model, outputs, num_iterations):
all_activations = {}
for counter in range(num_iterations):
workspace.RunNet(model.net.Proto().name)
num_devices = 1 # default for cpu
if num_gpus > 0:
num_devices = num_gpus
for g in range(num_devices):
for output_name in outputs:
blob_name = 'gpu_{}/'.format(g) + output_name
activations = workspace.FetchBlob(blob_name)
if output_name not in all_activations:
all_activations[output_name] = []
all_activations[output_name].append(activations)
# each key holds a list of activations obtained from each minibatch.
# we now concatenate these lists to get the final arrays.
# concatenating during the loop requires a realloc and can get slow.
for key in all_activations:
all_activations[key] = np.concatenate(all_activations[key])
return all_activations
if not isinstance(layers, list):
layers = [layers]
if 'video_id' not in layers:
layers.append('video_id')
assert len(layers) > 0
examples_per_iteration = batch_size * num_gpus
num_iterations = int(num_examples / examples_per_iteration)
activations = fetchActivations(model, layers, num_iterations)
# saving extracted layers
for index in range(len(layers)):
log.info(
"Read '{}' with shape {}".format(
layers[index],
activations[layers[index]].shape
)
)
if output_path:
log.info('Writing to {}'.format(output_path))
if save_h5:
with h5py.File(output_path, 'w') as handle:
for name, activation in activations.items():
handle.create_dataset(name, data=activation)
else:
with open(output_path, 'wb') as handle:
pickle.dump(activations, handle)
else:
return activations
|
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
builder = tfds.builder('imdb_reviews')
builder.download_and_prepare()
datasets, info = tfds.load('imdb_reviews', with_info=True, as_supervised=True)
train_dataset = datasets['train']
train_dataset = train_dataset.batch(5).shuffle(50).take(2)
for data in train_dataset:
print(data)
|
from abc import ABC, abstractmethod
import torch
class AbstractReward(ABC):
# language=rst
"""
Abstract base class for reward computation.
"""
@abstractmethod
def compute(self, **kwargs) -> None:
# language=rst
"""
Computes/modifies reward.
"""
pass
@abstractmethod
def update(self, **kwargs) -> None:
# language=rst
"""
Updates internal variables needed to modify reward. Usually called once per
episode.
"""
pass
class MovingAvgRPE(AbstractReward):
# language=rst
"""
Computes reward prediction error (RPE) based on an exponential moving average (EMA)
of past rewards.
"""
def __init__(self, **kwargs) -> None:
# language=rst
"""
Constructor for EMA reward prediction error.
"""
self.reward_predict = torch.tensor(0.0) # Predicted reward (per step).
self.reward_predict_episode = torch.tensor(0.0) # Predicted reward per episode.
self.rewards_predict_episode = (
[]
) # List of predicted rewards per episode (used for plotting).
def compute(self, **kwargs) -> torch.Tensor:
# language=rst
"""
Computes the reward prediction error using EMA.
Keyword arguments:
:param Union[float, torch.Tensor] reward: Current reward.
:return: Reward prediction error.
"""
# Get keyword arguments.
reward = kwargs["reward"]
return reward - self.reward_predict
def update(self, **kwargs) -> None:
# language=rst
"""
Updates the EMAs. Called once per episode.
Keyword arguments:
:param Union[float, torch.Tensor] accumulated_reward: Reward accumulated over
one episode.
:param int steps: Steps in that episode.
:param float ema_window: Width of the averaging window.
"""
# Get keyword arguments.
accumulated_reward = kwargs["accumulated_reward"]
steps = torch.tensor(kwargs["steps"]).float()
ema_window = torch.tensor(kwargs.get("ema_window", 10.0))
# Compute average reward per step.
reward = accumulated_reward / steps
# Update EMAs.
self.reward_predict = (
1 - 1 / ema_window
) * self.reward_predict + 1 / ema_window * reward
self.reward_predict_episode = (
1 - 1 / ema_window
) * self.reward_predict_episode + 1 / ema_window * accumulated_reward
self.rewards_predict_episode.append(self.reward_predict_episode.item())
|
from pathlib import Path
import torch
from PIL import ImageFont
FILE = Path(__file__).absolute()
ROOT = FILE.parents[1] # yolov5/ dir
# Check YOLOv5 Annotator font
font = 'Arial.ttf'
# try:
# ImageFont.truetype(font)
# except Exception as e: # download if missing
# url = "https://ultralytics.com/assets/" + font
# print(f'Downloading {url} to {ROOT / font}...')
# torch.hub.download_url_to_file(url, str(ROOT / font))
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved
# ============================================================================
""" Evaluation Content """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from xai.formatter.contents.base import Content
from xai.formatter.writer.base import Writer
################################################################################
### Multi Class Evaluation Metric Result Content
################################################################################
class MultiClassEvaluationMetricResult(Content):
"""
Multi Class Evaluation Metric Result
"""
def __init__(self, metric_tuple, notes=None) -> None:
"""
Add information about metric results for multi-class evaluation
Args:
metric_tuple(tuple): (evaluation_header, evaluation_metric_dict)
- evaluation_header(str): a header for current evaluation,
can be split or round number.
- evaluation_metric_dict(dict): key-value pair for metric
- key: metric name
- value: metric dict. The dict should either
(1) have a `class` keyword, with key-value pair of class name
and corresponding values, or
(2) have a `average` keyword to show a macro-average metric.
notes(str): text to explain the block
"""
super(MultiClassEvaluationMetricResult, self).__init__(metric_tuple,
notes)
self._metric_tuple = metric_tuple
if not (notes is None):
self._notes = notes
else:
self._notes = "This section shows multi-class evaluation metric."
@property
def metric_tuple(self):
"""Returns metric."""
return self._metric_tuple
@property
def notes(self):
"""Returns multi-class evaluation metric info."""
return self._notes
def draw(self, writer: Writer):
"""
Draw Multi Class Evaluation Metric Result
Args:
writer (Writer): Report Writer
"""
writer.draw_multi_class_evaluation_metric_results(notes=self.notes,
metric_tuple=self.metric_tuple)
################################################################################
### Binary Class Evaluation Metric Result Content
################################################################################
class BinaryClassEvaluationMetricResult(Content):
"""
Binary Class Evaluation Metric Result
"""
def __init__(self, metric_tuple: tuple, aggregated=True,
notes=None) -> None:
"""
add information about metric results for binary-class evaluation
Args:
metric_tuple(tuple): (evaluation_header, evaluation_metric_dict)
- evaluation_header(str): a header for current evaluation, can be split or round number.
- evaluation_metric_dict(dict): key-value pair for metric
- key: metric name
- value: metric value
aggregated(bool): whether to aggregate multiple result tables into one
default True
notes(str): text to explain the block
"""
super(BinaryClassEvaluationMetricResult, self).__init__(metric_tuple,
notes)
self._metric_tuple = metric_tuple
self._aggregated = aggregated
if not (notes is None):
self._notes = notes
else:
self._notes = "This section shows binary-class evaluation metric."
@property
def metric_tuple(self):
"""Returns metric."""
return self._metric_tuple
@property
def aggregated(self):
"""Returns aggregate indicator."""
return self._aggregated
@property
def notes(self):
"""Returns binary-class evaluation metric info."""
return self._notes
def draw(self, writer: Writer):
"""
Draw Binary Class Evaluation Metric Result
Args:
writer (Writer): Report Writer
"""
writer.draw_binary_class_evaluation_metric_results(notes=self.notes,
metric_tuple=self.metric_tuple,
aggregated=self.aggregated)
################################################################################
### Confusion Matrix Content
################################################################################
class ConfusionMatrixResult(Content):
"""
Confusion Matrix
"""
def __init__(self, confusion_matrix_tuple: tuple, notes=None) -> None:
"""
add information about confusion matrix to report
Args:
confusion_matrix_tuple(tuple): (confusion_matrix_header,
confusion_matrix_dict)
- confusion_matrix_header(str): a header for confusion_matrix,
can be split or round number.
- confusion_matrix_dict(dict):
- `labels`(:list of :str): label of classes
- `values`(:list of :list): 2D list for confusion matrix value,
row for predicted, column for true.
notes(str): text to explain the block
"""
super(ConfusionMatrixResult, self).__init__(confusion_matrix_tuple,
notes)
self._confusion_matrix_tuple = confusion_matrix_tuple
if not (notes is None):
self._notes = notes
else:
self._notes = "This section shows confusion matrix result."
@property
def confusion_matrix_tuple(self):
"""Returns confusion matrix."""
return self._confusion_matrix_tuple
@property
def notes(self):
"""Returns confusion matrix info."""
return self._notes
def draw(self, writer: Writer):
"""
Draw Confusion Matrix Result
Args:
writer (Writer): Report Writer
"""
writer.draw_confusion_matrix_results(notes=self.notes,
confusion_matrix_tuple=self.confusion_matrix_tuple)
################################################################################
### Multi Class Confidence Distribution Content
################################################################################
class MultiClassConfidenceDistribution(Content):
"""
Multi-Class Confidence Distribution
"""
def __init__(self, visual_result_tuple: tuple, max_num_classes=9,
notes=None) -> None:
"""
add information about multi class confidence distribution to report
Args:
*visual_result_tuple(tuple): (visual_result_header, visual_result_dict)
- visual_result_header(str): a header for confusion_matrix, can be split or round number.
- visual_result_dict(dict): key-value
- key(str): the predicted class
- value(dit): result dict
- `gt` (:list of :str): ground truth class label for all samples
- `values` (:list of :float): probability for all samples
max_num_classes(int, Optional): maximum number of classes
displayed for each graph, default 9
notes(str,Optional): text to explain the block
"""
super(MultiClassConfidenceDistribution, self).__init__(
visual_result_tuple,
max_num_classes,
notes)
self._visual_result_tuple = visual_result_tuple
self._max_num_classes = max_num_classes
if not (notes is None):
self._notes = notes
else:
self._notes = "This section shows multi-class confidence " \
"distribution result."
@property
def visual_result_tuple(self):
"""Returns visual result."""
return self._visual_result_tuple
@property
def max_num_classes(self):
"""Returns maximum number of classes."""
return self._max_num_classes
@property
def notes(self):
"""Returns multi class confidence distribution info."""
return self._notes
def draw(self, writer: Writer):
"""
Draw Multi-Class Confidence Distribution Result
Args:
writer (Writer): Report Writer
"""
writer.draw_multi_class_confidence_distribution(notes=self.notes,
visual_result_tuple=self.visual_result_tuple,
max_num_classes=self.max_num_classes)
################################################################################
### Binary Class Confidence Distribution Content
################################################################################
class BinaryClassConfidenceDistribution(Content):
"""
Binary-Class Confidence Distribution
"""
def __init__(self, visual_result_tuple: tuple, notes=None) -> None:
"""
add information about binary class confidence distribution to report
Args:
*visual_result_tuple(tuple): (visual_result_header, visual_result_dict)
- visual_result_header(str): a header for confusion_matrix, can be split or round number.
- visual_result_dict(dict): key-value
- `gt` (:list of :str): ground truth class label for all samples
- `probability` (:list of :list): 2D list (N sample * 2)
to present probability distribution of each sample
notes(str,Optional): text to explain the block
"""
super(BinaryClassConfidenceDistribution, self).__init__(
visual_result_tuple,
notes)
self._visual_result_tuple = visual_result_tuple
if not (notes is None):
self._notes = notes
else:
self._notes = "This section shows binary-class confidence " \
"distribution result."
@property
def visual_result_tuple(self):
"""Returns visual result."""
return self._visual_result_tuple
@property
def notes(self):
"""Returns binary class confidence distribution info."""
return self._notes
def draw(self, writer: Writer):
"""
Draw Binary-Class Confidence Distribution Result
Args:
writer (Writer): Report Writer
"""
writer.draw_binary_class_confidence_distribution(notes=self.notes,
visual_result_tuple=self.visual_result_tuple)
################################################################################
### Binary Class Reliability Diagram Content
################################################################################
class BinaryClassReliabilityDiagram(Content):
"""
Binary-Class Reliability Diagram
"""
def __init__(self, visual_result_tuple: tuple, notes=None) -> None:
"""
add information about reliability to report
Args:
*visual_result_tuple(tuple): (visual_result_header, visual_result_dict)
- visual_result_header(str): a header for confusion_matrix, can be split or round number.
- visual_result_dict(dict): key-value
- `gt` (:list of :str): ground truth class label for all samples
- `probability` (:list of :list): 2D list (N sample * 2) to
present probability distribution of each sample
notes(str,Optional): text to explain the block
"""
super(BinaryClassReliabilityDiagram, self).__init__(
visual_result_tuple,
notes)
self._visual_result_tuple = visual_result_tuple
if not (notes is None):
self._notes = notes
else:
self._notes = "This section shows binary-class reliability " \
"diagram result."
@property
def visual_result_tuple(self):
"""Returns visual result."""
return self._visual_result_tuple
@property
def notes(self):
"""Returns binary class reliability diagram info."""
return self._notes
def draw(self, writer: Writer):
"""
Draw Binary-Class Reliability Diagram Result
Args:
writer (Writer): Report Writer
"""
writer.draw_binary_class_reliability_diagram(notes=self.notes,
visual_result_tuple=self.visual_result_tuple)
|
from sqlalchemy import Column, Integer, String, DateTime, TIMESTAMP, UniqueConstraint, Time, Text, Boolean, BigInteger, Date, DateTime, ForeignKey, JSON, Float, func
from sqlalchemy.orm import relationship
|
from django.shortcuts import render
from anuncios.models import Anuncio
# Create your views here.
def anuncios(request):
anuncios = Anuncio.objects.filter(status='publicado')
return render(request,'anuncios/index.html',{'anuncios':anuncios})
def home(request):
return render(request,"web/index.html") |
import os
__author__ = ['Andrew Liew <liew@arch.ethz.ch>']
__copyright__ = 'Copyright 2017, Andrew Liew'
__license__ = 'MIT License'
__email__ = 'liew@arch.ethz.ch'
__all__ = [
'get_document_name',
'get_document_filename',
'get_document_path',
'get_document_dirname'
]
def get_document_name():
raise NotImplementedError
def get_document_filename():
raise NotImplementedError
def get_document_path():
raise NotImplementedError
def get_document_dirname():
raise NotImplementedError
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
pass
|
from typing import List
from guet.commands.command import Command
from guet.commands.command_factory import CommandFactoryMethod
from guet.settings.settings import Settings
class CommandFactoryDecorator(CommandFactoryMethod):
def __init__(self, decorated: CommandFactoryMethod):
super().__init__()
self.decorated = decorated
def short_help_message(self):
return self.decorated.short_help_message()
def build(self, args: List[str], settings: Settings) -> Command:
raise NotImplementedError
|
"""DjangoChina URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from rest_framework.documentation import include_docs_urls
from rest_framework.routers import DefaultRouter
from rest_framework_jwt.views import refresh_jwt_token
from rest_auth.registration.views import (
SocialAccountListView, SocialAccountDisconnectView
)
from notifications_extension.views import NotificationViewSet
from posts.views import PostViewSet
from replies.views import ReplyViewSet
from tags.views import TagViewSet
from users.views import (
EmailAddressViewSet,
LoginViewCustom,
RegisterViewCustom,
ConfirmEmailView,
MugshotUploadView,
UserViewSets,
GitHubLogin,
GitHubConnect
)
router = DefaultRouter()
router.register(r'posts', PostViewSet)
router.register(r'tags', TagViewSet)
router.register(r'replies', ReplyViewSet)
router.register(r'users', UserViewSets)
router.register(r'users/email', EmailAddressViewSet, base_name='email')
router.register(r'notifications', NotificationViewSet, base_name='notifications')
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^accounts/', include('allauth.urls')),
# url(r'^users/mugshot/(?P<filename>[^/]+)$', MugshotUploadView.as_view()),
url(r'^rest-auth/login/$', LoginViewCustom.as_view(), name='rest_login'),
url(r'^rest-auth/registration/$', RegisterViewCustom.as_view(), name='rest_register'),
url(r'^rest-auth/registration/account-confirm-email/(?P<key>[-:\w]+)/$',
ConfirmEmailView.as_view(),
name='account_confirm_email'),
url(r'^rest-auth/github/login/$', GitHubLogin.as_view(), name='github_login'),
url(r'^rest-auth/github/connect/$', GitHubConnect.as_view(), name='github_connect'),
url(r'^rest-auth/socialaccounts/$',
SocialAccountListView.as_view(),
name='social_account_list'),
url(r'^rest-auth/socialaccounts/(?P<pk>\d+)/disconnect/$',
SocialAccountDisconnectView.as_view(),
name='social_account_disconnect'),
url(r'^rest-auth/jwt-refresh/', refresh_jwt_token),
url(r'^rest-auth/', include('rest_auth.urls')),
url(r'^rest-auth/registration/', include('rest_auth.registration.urls')),
url(r'^api-auth/', include('rest_framework.urls')), # 仅仅用于测试
url(r'^', include(router.urls)),
url(r'^docs/', include_docs_urls(title='Django中文社区 API'))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
import os
import csv
import argparse
import numpy as np
from scipy import integrate
from matplotlib import pyplot as plt
from sklearn.metrics import r2_score
from exp_variant_class import exp_variant
from sklearn.decomposition import TruncatedSVD,PCA
from examples.plotting.commons import *
from copy import deepcopy
cmap = plt.cm.viridis
cmaplist = [cmap(i) for i in range(cmap.N)]
cmaplen=len(cmaplist)
plt.rcParams["figure.figsize"] = (10,8)
parser = argparse.ArgumentParser()
parser.add_argument('--tr', nargs='+', required=True)
parser.add_argument('--ee',type=str, nargs='+',choices=spt_energy_list)
parser.add_argument('--no_div',action='store_true')
parser.add_argument('--agentt',
type=str,choices=spt_agent_list)
args = parser.parse_args()
agentt=args.agentt
svg = False
save = True
sortt = False
avg_plot = True
precheck = False
truncated = True
plot_r_sq = True
manual_rsq = True
standscale = True
if args.no_div:
get_rid_div=True
else:
get_rid_div=False
named_label = False
energy_penalty = False
plot_norm_single = True
min_rsq = 0
num_epi = 10
div_rate=0.4
recon_num = 8
LW_action = 4
ori_num_vec = 3
desired_length = 50 # 50#28#7
truncated_start = 200
ori_total_vec_rsq = 9
per_episode_max_length=1000
type_ = 'P'
action_path=None
reward_path=None
state_path=None
total_vec=agent_info_dict[agentt]['total_vec']
total_chk=agent_info_dict[agentt]['total_chk']
ori_final=agent_info_dict[agentt]['ori_final']
ori_begin=agent_info_dict[agentt]['ori_begin']
ori_step=agent_info_dict[agentt]['ori_step']
x_speed_index=agent_info_dict[agentt]['x_speed_index']
desired_dist=agent_info_dict[agentt]['desired_dist']
try:
dll=agent_info_dict[agentt]['dll']
truncated_start = agent_info_dict[agentt]['truncated_start']
except:
pass
try:
W_y_lim = agent_info_dict[agentt]['W_y_lim']
except:
pass
try:
recon_y_lim = agent_info_dict[agentt]['recon_y_lim']
except:
pass
try:
C_y_lim = agent_info_dict[agentt]['C_y_lim']
except:
pass
if 'E1' in args.ee[0]:
top_folder=agentt+'_E1'
elif 'Ep5' in args.ee[0]:
top_folder=agentt+'_Ep5'
elif 'Ep1' in args.ee[0]:
top_folder=agentt+'_Ep1'
elif 'Ep25' in args.ee[0]:
top_folder=agentt+'_Ep25'
elif 'Ez' in args.ee[0]:
top_folder=agentt+'_Ez'
elif 'Ealt' in args.ee[0]:
top_folder=agentt+'_Ealt'
elif 'sR' in args.ee[0]:
top_folder=agentt+'_sR'
elif 'bR' in args.ee[0]:
top_folder=agentt+'_bR'
elif args.ee[0]=='sL':
top_folder=agentt+'_sL'
elif args.ee[0]=='sD':
top_folder=agentt+'_sD'
elif "v" in args.ee[0]:
top_folder=agentt+args.ee[0]
else:
top_folder=agentt
if not os.path.exists(file_path+'/experiments_results/Synergy/all_csv/raw_csv/'+top_folder):
os.makedirs(file_path+'/experiments_results/Synergy/all_csv/raw_csv/'+top_folder, exist_ok=True)
if args.agentt=='A' and 'E0' in args.ee:
args.tr=['']+args.tr
for tr in args.tr:
for ee in args.ee:
final = ori_final
begin = ori_begin
step = ori_step
trial = tr
subfolder=ee
if ee not in agentt:
base = agentt + '_' + ee
else:
base = agentt
if not get_rid_div:
surface_csv = open(
file_path+'/experiments_results/Synergy/all_csv/raw_csv/' + top_folder + '/' + base +tr+'_all_surface.csv', 'w')
else:
surface_csv = open(
file_path+'/experiments_results/Synergy/all_csv/raw_csv/' + top_folder + '/' + base +tr+'_no_div_all_surface.csv', 'w')
writer = csv.writer(surface_csv, lineterminator='\n')
writer.writerow(['Checkpoint', 'Surface Area','P', 'PI', 'E','PP','PPI','Spatial area'])
all_names=[]
tmp=[]
for cc in range(final, begin - step, -step):
tmp.append(base + '_C' + str(cc) + trial)
'''if cc==3000 and 'HC' in agentt and 'dof' not in agentt and ee=='E0':
tmp.append(base + trial)
try:
dummy = exp_variant(tmp[-1],action_path=action_path,reward_path=reward_path,state_path=state_path)
except:
tmp.pop()
tmp.append(base + '_C' + str(cc) + trial)
else:
tmp.append(base + '_C' + str(cc) + trial)'''
all_names.append(tmp)
all_names.reverse()
print(all_names)
if precheck:
top_tmp=[]
for all_name in all_names:
tmpp = []
for n in all_name:
if action_path != None:
complete = exp_variant(n, action_path=action_path, reward_path=reward_path,
state_path=state_path).check_complete_data()
else:
complete = exp_variant(n).check_complete_data()
if complete:
tmpp.append(n)
top_tmp.append(tmpp)
all_names=top_tmp
for ind_all_name,all_name in enumerate(all_names):
agent = all_name[0] + '_spatiotemporal_evolution'
folder_name=agent+'_Compare_'+type_+'_'+str(desired_length)
if manual_rsq:
folder_name=folder_name+'_manual_rsq'
if standscale:
folder_name = folder_name + '_SS'
'''step = cmaplen // 30
color_list=[]
c=0
for l in range(30):
color_list.append(cmaplist[c])
c+=step
color_list=[color_list[0],color_list[-1]]'''
all_label=[]
exp_variant_list=[]
for ind,n in enumerate(all_name):
if action_path != None:
exp_variant_list.append(
exp_variant(n, action_path=action_path, reward_path=reward_path, state_path=state_path))
else:
exp_variant_list.append(exp_variant(n))
all_label.append(exp_variant_list[ind].eval(type_))
############################################################################################################
if sortt:
new_index=sorted(range(len(all_label)), key=lambda k: all_label[k],reverse=True)
all_label=sorted(all_label,reverse=True)
tmp=[]
tmp_2=[]
for ni in new_index:
tmp.append(exp_variant_list[ni])
tmp_2.append(all_name[ni])
exp_variant_list=tmp
all_name=tmp_2
if named_label:
all_label=[]
for ind,n in enumerate(all_name):
prefix_list=exp_variant_list[ind].name.split('_')
for pp in prefix_list:
if 'C' in pp and 'H' not in pp:
prefix=pp
break
all_label.append(prefix + ': ' + '{:.2f}'.format(exp_variant_list[ind].eval(type_),2))
else:
all_label = []
for ind, n in enumerate(all_name):
all_label.append('{:.2f}'.format(exp_variant_list[ind].eval(type_), 2))
############################################################################################################
r_sq_all_combare, r_sq_all_combare_ax = plt.subplots(1, 1)
P_list=[]
PI_list=[]
E_list=[]
SA_list=[]
PP_list = []
PPI_list=[]
SpatialArea_list=[]
current_checkpoint_list=[]
for n_ind,name in enumerate(all_name):
exp_variant_obj=exp_variant_list[n_ind]
name_list=name.split('_')
current_checkpoint=0
for nn in name_list:
if nn[0]=='C':
current_checkpoint=nn
if current_checkpoint==0:
current_checkpoint='C'+str(ori_final)
rsq_label = []
X=np.load(exp_variant_obj.action_npy,allow_pickle=True)
state_ = np.load(exp_variant_obj.state_npy,allow_pickle=True)
mini = per_episode_max_length
if X.shape == (num_epi,):
# print('a')
for i in range(num_epi):
amin = np.asarray(X[i]).shape[0]
if amin < mini:
mini = amin
print(mini)
tmp = np.expand_dims(np.asarray(X[0])[-mini::, :], 0)
for i in range(num_epi-1):
tmp = np.vstack((tmp, np.expand_dims(np.asarray(X[i + 1])[-mini::, :], 0)))
print(tmp.shape)
X = tmp
tmp2 = np.expand_dims(np.asarray(state_[0])[-mini::, :], 0)
for i in range(num_epi-1):
tmp2 = np.vstack((tmp2, np.expand_dims(np.asarray(state_[i + 1])[-mini::, :], 0)))
state_ = tmp2
X=X[0:num_epi,:,:]#10,1000,12
state_ = state_[0:num_epi, :, :]
distance = []
if x_speed_index:
speed_record = state_[0, :, x_speed_index]
for i in range(len(speed_record)):
if i == 0:
distance.append(speed_record[0])
else:
distance.append(np.sum(speed_record[0:i]))
distance = np.asarray(distance)
if truncated:
total_vec_rsq = ori_total_vec_rsq
if x_speed_index:
if mini == per_episode_max_length or mini>300:
current_dist = distance[truncated_start]
end_dist_index = truncated_start
tmp_dist = 0
while tmp_dist < desired_dist and end_dist_index < len(distance) - 1:
end_dist_index += 1
tmp_dist = distance[end_dist_index] - current_dist
remaining_index = end_dist_index - truncated_start
desired_length = remaining_index
print(desired_length)
elif mini - desired_length >= 0:
remaining_index=desired_length
desired_length = remaining_index
else:
desired_length = dll
if mini == per_episode_max_length:
X_truncated = X[:, truncated_start:truncated_start + desired_length, :]
else:
if mini >= (truncated_start + desired_length):
X_truncated = X[:, truncated_start:truncated_start + desired_length, :]
elif mini - desired_length >= 0:
X_truncated = X[:, mini - desired_length:mini, :]
else:
X_truncated = X[:, 0:mini, :]
if mini>=ori_total_vec_rsq:
total_vec_rsq=ori_total_vec_rsq
else:
total_vec_rsq=mini
X = X_truncated#10,167,12
max_list = []
max_ind = np.argmax(X[0, :, 0])
max_list.append(max_ind)
X_temp = np.concatenate(
(np.expand_dims(X[0, max_ind::, :], 0), np.expand_dims(X[0, 0:max_ind, :], 0)), axis=1)
for l in range(1, X.shape[0], 1):
max_ind = np.argmax(X[l, :, 0])
max_list.append(max_ind)
X_temp = np.concatenate((X_temp, np.concatenate(
(np.expand_dims(X[l, max_ind::, :], 0), np.expand_dims(X[l, 0:max_ind, :], 0)),
axis=1)), axis=0)
X = X_temp#10,167,12
X_before_norm=deepcopy(X)
##########################SPT START###################################
if standscale:
mx = np.mean(X, axis=1)
for k in range(X.shape[1]):
X[:, k, :] = X[:, k, :] - mx
X = reshape_into_spt_shape(X)
rsq_single_list = []
for num_vec_to_keep_ in range(1,total_vec_rsq+1):
pca = TruncatedSVD(n_components=num_vec_to_keep_)
pca.fit(X)
eig_vecs = pca.components_
eig_vals = pca.singular_values_
eig_pairs = [(eig_vals[i], eig_vecs[i, :]) for i in range(len(eig_vals))]
num_features = X.shape[1]
percentage = sum(pca.explained_variance_ratio_)
proj_mat = eig_pairs[0][1].reshape(num_features, 1)
for eig_vec_idx in range(1, num_vec_to_keep_):
proj_mat = np.hstack((proj_mat, eig_pairs[eig_vec_idx][1].reshape(num_features, 1)))
W = proj_mat
C = X.dot(W)
X_prime = C.dot(W.T)
if manual_rsq:
Vm = np.mean(X, axis=0, keepdims=True)
resid = X - np.dot(Vm, np.ones((X.shape[1], 1)))
resid2 = X - X_prime
SST = np.linalg.norm(resid)
SSE = np.linalg.norm(resid2)
rsq = 1 - SSE / SST
else:
rsq = r2_score(X, X_prime)
rsq_single_list.append(rsq)
surface_area=integrate.simps(rsq_single_list,range(1,total_vec_rsq+1))
SA_list.append(surface_area)
##########################SPT END###################################
##########################SPATIAL START###################################
if standscale:
ori_shape = X_before_norm.shape
X_spatial = reshape_into_spatial_shape(X_before_norm)
mx = np.mean(X_spatial, axis=1)
X_spatial = X_spatial - np.expand_dims(mx, 1)
X_spatial = X_spatial.T # 1200,8
num_features = X_spatial.shape[1]
num_vec_to_keep_max = X_spatial.shape[1] + 1
rsq_spatial_list = []
for num_vec_to_keep_ in range(1, total_vec + 1):
pca = PCA(n_components=num_vec_to_keep_)
pca.fit(X_spatial)
eig_vecs = pca.components_
eig_vals = pca.singular_values_
eig_pairs = [(eig_vals[i], eig_vecs[i, :]) for i in range(len(eig_vals))]
num_features = X_spatial.shape[1]
percentage = sum(pca.explained_variance_ratio_)
proj_mat = eig_pairs[0][1].reshape(num_features, 1)
for eig_vec_idx in range(1, num_vec_to_keep_):
proj_mat = np.hstack((proj_mat, eig_pairs[eig_vec_idx][1].reshape(num_features, 1)))
W = proj_mat
C = X_spatial.dot(W)
X_spatial_prime = C.dot(W.T)
if manual_rsq:
Vm = np.mean(X_spatial, axis=0, keepdims=True)
# resid = X - np.dot(Vm, np.ones((X.shape[1],1 )))
resid = X_spatial - np.dot(np.ones((X_spatial.shape[0], 1)), Vm)
resid2 = X_spatial - X_spatial_prime
SST = np.linalg.norm(resid) ** 2
SSE = np.linalg.norm(resid2) ** 2
rsq = 1 - SSE / SST
else:
rsq = r2_score(X_spatial, X_spatial_prime)
rsq_spatial_list.append(rsq)
surface_area_spatial = integrate.simps(rsq_spatial_list, range(1, total_vec + 1))
SpatialArea_list.append(surface_area_spatial)
##########################SPATIAL END###################################
rsq_label.append('Rsq_' + exp_variant_obj.name)
P=exp_variant_obj.eval('P')
PI=exp_variant_obj.eval('PI')
E = exp_variant_obj.eval('E')
P_list.append(P)
PI_list.append(PI)
E_list.append(E)
current_checkpoint_list.append(current_checkpoint)
try:
PP = exp_variant_obj.eval('PP')
PP_list.append(PP)
PPI = exp_variant_obj.eval('PPI')
PPI_list.append(PPI)
except:
print("PP list load failed in AdaptiveW_Extract_SA_P_PI_corr_each_trial_SVD.py line 390.")
if np.isnan(surface_area):
surface_area=SA_list[n_ind-1]
if np.isnan(surface_area_spatial):
surface_area_spatial=SpatialArea_list[n_ind-1]
if get_rid_div:
SpatialArea_list = np.flip(np.asarray(SpatialArea_list))
SA_list = np.flip(np.asarray(SA_list))
P_list = np.flip(np.asarray(P_list))
PI_list = np.flip(np.asarray(PI_list))
E_list = np.flip(np.asarray(E_list))
current_checkpoint_list = np.flip(np.asarray(current_checkpoint_list))
try:
PP_list = np.flip(np.asarray(PP_list))
PPI_list = np.flip(np.asarray(PPI_list))
except:
print("PP list failed in AdaptiveW_Extract_SA_P_PI_corr_each_trial_SVD.py line 406.")
bad_ind_list = []
for ind, p in enumerate(P_list):
if ind >5 and ind < (len(P_list) - 1):
if abs(p - P_list[ind - 1]) / abs(p) > div_rate and abs(p - P_list[ind + 1]) / abs(
p) > div_rate:
bad_ind_list.append(ind)
print(len(bad_ind_list))
print(bad_ind_list)
if len(bad_ind_list) > 0:
P_list = np.delete(P_list, bad_ind_list, 0)
PI_list = np.delete(PI_list, bad_ind_list, 0)
E_list = np.delete(E_list, bad_ind_list, 0)
current_checkpoint_list=np.delete(current_checkpoint_list, bad_ind_list, 0)
SA_list = np.delete(SA_list, bad_ind_list, 0)
SpatialArea_list = np.delete(SpatialArea_list, bad_ind_list, 0)
try:
PP_list = np.delete(PP_list, bad_ind_list, 0)
PPI_list = np.delete(PPI_list, bad_ind_list, 0)
except:
print("PP list failed in AdaptiveW_Extract_SA_P_PI_corr_each_trial_SVD.py line 427.")
SpatialArea_list = np.flip(SpatialArea_list)
SA_list = np.flip(SA_list)
P_list = np.flip(P_list)
PI_list = np.flip(PI_list)
E_list = np.flip(E_list)
current_checkpoint_list = np.flip(current_checkpoint_list)
try:
PP_list = np.flip(PP_list)
PPI_list = np.flip(PPI_list)
except:
print("PP list failed in AdaptiveW_Extract_SA_P_PI_corr_each_trial_SVD.py line 437.")
if len(PP_list)==0:
PP_list=[0]*len(P_list)
PPI_list=[0]*len(P_list)
for ind in range(len(P_list)):
writer.writerow([current_checkpoint_list[ind], SA_list[ind], P_list[ind], PI_list[ind], E_list[ind],PP_list[ind],PPI_list[ind],SpatialArea_list[ind]])
surface_csv.close()
|
# Copyright (c), Google Inc, 2017
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
try:
import google.auth
import google.auth.compute_engine
from google.oauth2 import service_account
from google.auth.transport.requests import AuthorizedSession
HAS_GOOGLE_LIBRARIES = True
except ImportError:
HAS_GOOGLE_LIBRARIES = False
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_text
import ast
import os
import json
def navigate_hash(source, path, default=None):
if not source:
return None
key = path[0]
path = path[1:]
if key not in source:
return default
result = source[key]
if path:
return navigate_hash(result, path, default)
else:
return result
class GcpRequestException(Exception):
pass
def remove_nones_from_dict(obj):
new_obj = {}
for key in obj:
value = obj[key]
if value is not None and value != {} and value != []:
new_obj[key] = value
return new_obj
# Handles the replacement of dicts with values -> the needed value for GCP API
def replace_resource_dict(item, value):
if isinstance(item, list):
items = []
for i in item:
items.append(replace_resource_dict(i, value))
return items
else:
if not item:
return item
if isinstance(item, dict):
return item.get(value)
# Item could be a string or a string representing a dictionary.
try:
new_item = ast.literal_eval(item)
return replace_resource_dict(new_item, value)
except ValueError:
return item
# Handles all authentication and HTTP sessions for GCP API calls.
class GcpSession(object):
def __init__(self, module, product):
self.module = module
self.product = product
self._validate()
def get(self, url, body=None, **kwargs):
kwargs.update({'json': body, 'headers': self._headers()})
try:
return self.session().get(url, **kwargs)
except getattr(requests.exceptions, 'RequestException') as inst:
self.module.fail_json(msg=inst.message)
def post(self, url, body=None, headers=None, **kwargs):
if headers:
headers = self.merge_dictionaries(headers, self._headers())
else:
headers = self._headers()
try:
return self.session().post(url, json=body, headers=headers)
except getattr(requests.exceptions, 'RequestException') as inst:
self.module.fail_json(msg=inst.message)
def post_contents(self, url, file_contents=None, headers=None, **kwargs):
if headers:
headers = self.merge_dictionaries(headers, self._headers())
else:
headers = self._headers()
try:
return self.session().post(url, data=file_contents, headers=headers)
except getattr(requests.exceptions, 'RequestException') as inst:
self.module.fail_json(msg=inst.message)
def delete(self, url, body=None):
try:
return self.session().delete(url, json=body, headers=self._headers())
except getattr(requests.exceptions, 'RequestException') as inst:
self.module.fail_json(msg=inst.message)
def put(self, url, body=None):
try:
return self.session().put(url, json=body, headers=self._headers())
except getattr(requests.exceptions, 'RequestException') as inst:
self.module.fail_json(msg=inst.message)
def patch(self, url, body=None, **kwargs):
kwargs.update({'json': body, 'headers': self._headers()})
try:
return self.session().patch(url, **kwargs)
except getattr(requests.exceptions, 'RequestException') as inst:
self.module.fail_json(msg=inst.message)
def session(self):
return AuthorizedSession(
self._credentials())
def _validate(self):
if not HAS_REQUESTS:
self.module.fail_json(msg="Please install the requests library")
if not HAS_GOOGLE_LIBRARIES:
self.module.fail_json(msg="Please install the google-auth library")
if self.module.params.get('service_account_email') is not None and self.module.params['auth_kind'] != 'machineaccount':
self.module.fail_json(
msg="Service Account Email only works with Machine Account-based authentication"
)
if (self.module.params.get('service_account_file') is not None or
self.module.params.get('service_account_contents') is not None) and self.module.params['auth_kind'] != 'serviceaccount':
self.module.fail_json(
msg="Service Account File only works with Service Account-based authentication"
)
def _credentials(self):
cred_type = self.module.params['auth_kind']
if cred_type == 'application':
credentials, project_id = google.auth.default(scopes=self.module.params['scopes'])
return credentials
elif cred_type == 'serviceaccount' and self.module.params.get('service_account_file'):
path = os.path.realpath(os.path.expanduser(self.module.params['service_account_file']))
return service_account.Credentials.from_service_account_file(path).with_scopes(self.module.params['scopes'])
elif cred_type == 'serviceaccount' and self.module.params.get('service_account_contents'):
cred = json.loads(self.module.params.get('service_account_contents'))
return service_account.Credentials.from_service_account_info(cred).with_scopes(self.module.params['scopes'])
elif cred_type == 'machineaccount':
return google.auth.compute_engine.Credentials(
self.module.params['service_account_email'])
else:
self.module.fail_json(msg="Credential type '%s' not implemented" % cred_type)
def _headers(self):
return {
'User-Agent': "Google-Ansible-MM-{0}".format(self.product)
}
def _merge_dictionaries(self, a, b):
new = a.copy()
new.update(b)
return new
class GcpModule(AnsibleModule):
def __init__(self, *args, **kwargs):
arg_spec = {}
if 'argument_spec' in kwargs:
arg_spec = kwargs['argument_spec']
kwargs['argument_spec'] = self._merge_dictionaries(
arg_spec,
dict(
project=dict(
required=False,
type='str',
fallback=(env_fallback, ['GCP_PROJECT'])),
auth_kind=dict(
required=False,
fallback=(env_fallback, ['GCP_AUTH_KIND']),
choices=['machineaccount', 'serviceaccount', 'application'],
type='str'),
service_account_email=dict(
required=False,
fallback=(env_fallback, ['GCP_SERVICE_ACCOUNT_EMAIL']),
type='str'),
service_account_file=dict(
required=False,
fallback=(env_fallback, ['GCP_SERVICE_ACCOUNT_FILE']),
type='path'),
service_account_contents=dict(
required=False,
fallback=(env_fallback, ['GCP_SERVICE_ACCOUNT_CONTENTS']),
type='str'),
scopes=dict(
required=False,
fallback=(env_fallback, ['GCP_SCOPES']),
type='list')
)
)
mutual = []
if 'mutually_exclusive' in kwargs:
mutual = kwargs['mutually_exclusive']
kwargs['mutually_exclusive'] = mutual.append(
['service_account_email', 'service_account_file', 'service_account_contents']
)
AnsibleModule.__init__(self, *args, **kwargs)
def raise_for_status(self, response):
try:
response.raise_for_status()
except getattr(requests.exceptions, 'RequestException') as inst:
self.fail_json(msg="GCP returned error: %s" % response.json())
def _merge_dictionaries(self, a, b):
new = a.copy()
new.update(b)
return new
# This class takes in two dictionaries `a` and `b`.
# These are dictionaries of arbitrary depth, but made up of standard Python
# types only.
# This differ will compare all values in `a` to those in `b`.
# Note: Only keys in `a` will be compared. Extra keys in `b` will be ignored.
# Note: On all lists, order does matter.
class GcpRequest(object):
def __init__(self, request):
self.request = request
def __eq__(self, other):
return not self.difference(other)
def __ne__(self, other):
return not self.__eq__(other)
# Returns the difference between `self.request` and `b`
def difference(self, b):
return self._compare_dicts(self.request, b.request)
def _compare_dicts(self, dict1, dict2):
difference = {}
for key in dict1:
difference[key] = self._compare_value(dict1.get(key), dict2.get(key))
# Remove all empty values from difference.
difference2 = {}
for key in difference:
if difference[key]:
difference2[key] = difference[key]
return difference2
# Takes in two lists and compares them.
def _compare_lists(self, list1, list2):
difference = []
for index in range(len(list1)):
value1 = list1[index]
if index < len(list2):
value2 = list2[index]
difference.append(self._compare_value(value1, value2))
difference2 = []
for value in difference:
if value:
difference2.append(value)
return difference2
def _compare_value(self, value1, value2):
diff = None
# If a None is found, a difference does not exist.
# Only differing values matter.
if not value2:
return None
# Can assume non-None types at this point.
try:
if isinstance(value1, list):
diff = self._compare_lists(value1, value2)
elif isinstance(value2, dict):
diff = self._compare_dicts(value1, value2)
elif isinstance(value1, bool):
diff = self._compare_boolean(value1, value2)
# Always use to_text values to avoid unicode issues.
elif to_text(value1) != to_text(value2):
diff = value1
# to_text may throw UnicodeErrors.
# These errors shouldn't crash Ansible and should be hidden.
except UnicodeError:
pass
return diff
def _compare_boolean(self, value1, value2):
try:
# Both True
if value1 and isinstance(value2, bool) and value2:
return None
# Value1 True, value2 'true'
elif value1 and to_text(value2) == 'true':
return None
# Both False
elif not value1 and isinstance(value2, bool) and not value2:
return None
# Value1 False, value2 'false'
elif not value1 and to_text(value2) == 'false':
return None
else:
return value2
# to_text may throw UnicodeErrors.
# These errors shouldn't crash Ansible and should be hidden.
except UnicodeError:
return None
|
import os
import shelve
import scrape
import download
import wx.dataview as dv
from wx.lib.delayedresult import startWorker
# Ignore wxWidgets/wxWidgets version mismatch warnings.
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import wx
class AliasConfigWindow(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, title="Configure Aliases", size=(625, 400))
# Elements creation
self.panel = wx.Panel(self, wx.ID_ANY)
self.panel.SetBackgroundColour("#ffffff")
self.dataView = dv.DataViewListCtrl(self.panel, wx.ID_ANY)
self.dataView.AppendTextColumn("Title", width=250,
flags=dv.DATAVIEW_COL_RESIZABLE | dv.DATAVIEW_COL_SORTABLE)
self.dataView.AppendTextColumn("Alias (Separate multiple values with semicolons)", width=350,
mode=dv.DATAVIEW_CELL_EDITABLE,
flags=dv.DATAVIEW_COL_RESIZABLE | dv.DATAVIEW_COL_SORTABLE)
# Event bindings
self.Bind(dv.EVT_DATAVIEW_ITEM_VALUE_CHANGED, self.OnAliasChanged, self.dataView)
self.Bind(wx.EVT_CHAR_HOOK, self.OnKeyUp)
self.Bind(wx.EVT_SHOW, self.OnShow, self)
self.Bind(wx.EVT_CLOSE, self.OnClose, self)
# Elements sizing and positing
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.dataView, 1, wx.EXPAND | wx.ALL, 0)
self.panel.SetSizer(sizer)
self.panel.Layout()
self.SetIcon(wx.Icon("anidl.exe" if os.path.exists("anidl.exe") else "anidl.ico", wx.BITMAP_TYPE_ICO))
def OnAliasChanged(self, evt):
updatedRow = self.dataView.ItemToRow(evt.GetItem())
updatedRowTitle = self.dataView.GetTextValue(updatedRow, 0)
updatedRowAlias = self.dataView.GetTextValue(updatedRow, 1)
aliases = self.GetParent().userConfig["aliases"]
aliases[updatedRowTitle] = [alias.strip() for alias in updatedRowAlias.split(";")] if updatedRowAlias else []
def OnShow(self, evt):
if evt.GetShow():
for title, alias in self.GetParent().userConfig["aliases"].items():
self.dataView.AppendItem([title, "; ".join(alias)])
else:
self.dataView.DeleteAllItems()
def OnClose(self, evt):
self.Show(False)
def OnKeyUp(self, evt):
if evt.GetKeyCode() == wx.WXK_DELETE and self.dataView.HasSelection():
deletedRow = self.dataView.ItemToRow(self.dataView.GetSelection())
deletedRowTitle = self.dataView.GetTextValue(deletedRow, 0)
del self.GetParent().userConfig["aliases"][deletedRowTitle]
self.dataView.DeleteItem(deletedRow)
evt.Skip()
class MainWindow(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, title="Anidl", size=(400, 525))
# Open config file
self.userConfig = shelve.open("config", writeback=True)
if "aliases" not in self.userConfig:
self.userConfig["aliases"] = {}
# Elements creation
self.panel = wx.Panel(self, wx.ID_ANY)
self.panel.SetBackgroundColour("#ffffff")
dirPickerLabel = wx.StaticText(self.panel, wx.ID_ANY, "Download directory")
dirPickerDefaultValue = self.userConfig["downloadDir"] if "downloadDir" in self.userConfig else ""
self.dirPicker = wx.DirPickerCtrl(self.panel, wx.ID_ANY, dirPickerDefaultValue, "Select your download directory")
listUrlLabel = wx.StaticText(self.panel, wx.ID_ANY, "Anilist username")
listUrlTextInputDefaultValue = self.userConfig["anilistUsername"] if "anilistUsername" in self.userConfig else ""
self.listUrlTextInput = wx.TextCtrl(self.panel, wx.ID_ANY, listUrlTextInputDefaultValue)
listBoxLabel = wx.StaticText(self.panel, wx.ID_ANY, "Target qualities")
self.listBoxItems = ["480p", "720p", "1080p"]
self.listBox = wx.ListBox(self.panel, wx.ID_ANY, choices=self.listBoxItems, style=wx.LB_MULTIPLE)
if "selectedListBoxItems" in self.userConfig:
for item in self.userConfig["selectedListBoxItems"]:
self.listBox.SetSelection(item)
else:
for i in range(len(self.listBoxItems)):
self.listBox.SetSelection(i)
comboBoxLabel = wx.StaticText(self.panel, wx.ID_ANY, "Episodes look-ahead")
self.comboBox = wx.ComboBox(self.panel, wx.ID_ANY, choices=["1", "2", "3"], style=wx.CB_READONLY)
self.comboBox.SetSelection(self.userConfig["selectedComboBoxItem"] if "selectedComboBoxItem" in self.userConfig else 0)
self.checkListToggle = wx.CheckBox(self.panel, wx.ID_ANY, "Select/Deselect all")
self.checkListToggle.SetValue(True)
self.checkList = wx.CheckListBox(self.panel, wx.ID_ANY, choices=[""]*10)
downloadButton = wx.Button(self.panel, wx.ID_ANY, "Download my chinese cartoons")
self.aliasConfigWindow = AliasConfigWindow(self)
# Menu creation
fileMenu = wx.Menu()
refreshMenuItem = fileMenu.Append(-1, "Refresh\tCtrl+R")
fileMenu.AppendSeparator()
downloadMenuItem = fileMenu.Append(-1, "Download and Exit\tCtrl+Shift+D",
"Download selected cartoons and terminate the program.")
exitMenuItem = fileMenu.Append(wx.ID_EXIT, "Exit without downloading\tCtrl+W", " Terminate the program.")
editMenu = wx.Menu()
selectAllMenuItem = editMenu.Append(-1, "Select All\tCtrl+A")
deselectAllMenuItem = editMenu.Append(-1, "Deselect All\tCtrl+D")
editMenu.AppendSeparator()
configureAliasesMenuItem = editMenu.Append(-1, "Configure Aliases\tCtrl+Shift+A")
menuBar = wx.MenuBar()
menuBar.Append(fileMenu, "&File")
menuBar.Append(editMenu, "&Edit")
self.SetMenuBar(menuBar)
# Event bindings
self.Bind(wx.EVT_BUTTON, self.OnDownload, downloadButton)
self.Bind(wx.EVT_CHECKBOX, self.OnToggleSelection, self.checkListToggle)
self.Bind(wx.EVT_TEXT, self.OnUsernameChanged, self.listUrlTextInput)
self.Bind(wx.EVT_DIRPICKER_CHANGED, self.OnDownloadPathChanged, self.dirPicker)
self.Bind(wx.EVT_LISTBOX, self.OnQualityChanged, self.listBox)
self.Bind(wx.EVT_COMBOBOX, self.OnEpisodeLookAheadChanged, self.comboBox)
self.Bind(wx.EVT_CLOSE, self.OnClose, self)
self.Bind(wx.EVT_MENU, self.OnRefresh, refreshMenuItem)
self.Bind(wx.EVT_MENU, self.OnDownload, downloadMenuItem)
self.Bind(wx.EVT_MENU, self.OnExit, exitMenuItem)
self.Bind(wx.EVT_MENU, self.OnSelectAll, selectAllMenuItem)
self.Bind(wx.EVT_MENU, self.OnDeselectAll, deselectAllMenuItem)
self.Bind(wx.EVT_MENU, self.OnConfigureAliases, configureAliasesMenuItem)
# Elements sizing and positing
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(dirPickerLabel, 0, wx.TOP | wx.LEFT | wx.RIGHT | wx.ALIGN_LEFT, 5)
sizer.Add(self.dirPicker, 0, wx.EXPAND | wx.ALL | wx.ALIGN_LEFT, 5)
sizer.Add(listUrlLabel, 0, wx.TOP | wx.LEFT | wx.RIGHT | wx.ALIGN_LEFT, 5)
sizer.Add(self.listUrlTextInput, 0, wx.EXPAND | wx.ALL | wx.ALIGN_LEFT, 5)
filtersSizer = wx.FlexGridSizer(2, 2)
filtersSizer.Add(listBoxLabel, 0, wx.TOP | wx.LEFT | wx.RIGHT | wx.ALIGN_LEFT, 5)
filtersSizer.Add(comboBoxLabel, 0, wx.TOP | wx.LEFT | wx.RIGHT | wx.ALIGN_LEFT, 5)
filtersSizer.Add(self.listBox, 0, wx.ALL | wx.ALIGN_LEFT, 5)
filtersSizer.Add(self.comboBox, 0, wx.ALL | wx.ALIGN_LEFT, 5)
sizer.Add(filtersSizer, 0)
sizer.AddSpacer(15)
sizer.Add(self.checkListToggle, 0, wx.ALL, 5)
sizer.Add(self.checkList, 0, wx.EXPAND | wx.ALL | wx.ALIGN_LEFT)
sizer.Add(downloadButton, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 15)
self.panel.SetSizer(sizer)
self.panel.Layout()
self.SetIcon(wx.Icon("anidl.exe" if os.path.exists("anidl.exe") else "anidl.ico", wx.BITMAP_TYPE_ICO))
self.Show(True)
def SelectAll(self):
self.checkListToggle.SetValue(True)
for i in range(len(self.checkListItems)):
self.checkList.Check(i)
def DeselectAll(self):
self.checkListToggle.SetValue(False)
for i in range(len(self.checkListItems)):
self.checkList.Check(i, False)
def FetchData(self):
self.checkList.Clear()
self.checkListItems = []
unselectedQualities = [self.listBoxItems[i] for i in range(
len(self.listBoxItems)) if i not in self.listBox.GetSelections()]
startWorker(self.OnDataFetched, self.FetchDataWorker, wargs=(
self.listUrlTextInput.GetLineText(0),
unselectedQualities,
int(self.comboBox.GetSelection()) + 1,
self.userConfig["aliases"]))
# Progress Dialog
self.progress = 0
self.keepGoing = True
progressDialog = wx.ProgressDialog("Fetching data",
"This may take a while...",
parent=self,
style=wx.PD_APP_MODAL | wx.PD_CAN_ABORT | wx.PD_AUTO_HIDE)
while self.keepGoing and self.progress < 100:
wx.MilliSleep(250)
wx.Yield()
(self.keepGoing, skip) = progressDialog.Update(self.progress)
progressDialog.Destroy()
def FetchDataWorker(self, anilist_username, blacklisted_qualities, look_ahead, aliases):
for (progress, entry) in scrape.fetch(anilist_username, blacklisted_qualities, look_ahead, aliases):
self.progress = progress
self.checkListItems.extend(entry)
if not self.keepGoing:
return False
return True
def OnDataFetched(self, result):
if result.get():
if len(self.checkListItems):
self.checkList.InsertItems([entry["name"] for entry in self.checkListItems], 0)
self.SelectAll()
self.checkList.SetFocus()
def OnRefresh(self, evt):
self.FetchData()
def OnConfigureAliases(self, evt):
self.aliasConfigWindow.Show(True)
def OnDownload(self, evt):
download.open()
for i in range(len(self.checkListItems)):
if self.checkList.IsChecked(i):
download.torrent(self.checkListItems[i], self.dirPicker.GetPath())
download.close()
self.Close(True)
def OnToggleSelection(self, evt):
if self.checkListToggle.IsChecked():
self.SelectAll()
else:
self.DeselectAll()
def OnEpisodeLookAheadChanged(self, evt):
self.userConfig["selectedComboBoxItem"] = self.comboBox.GetSelection()
def OnQualityChanged(self, evt):
self.userConfig["selectedListBoxItems"] = self.listBox.GetSelections()
def OnUsernameChanged(self, evt):
self.userConfig["anilistUsername"] = self.listUrlTextInput.GetLineText(0)
def OnDownloadPathChanged(self, evt):
self.userConfig["downloadDir"] = self.dirPicker.GetPath()
def OnSelectAll(self, evt):
self.SelectAll()
def OnDeselectAll(self, evt):
self.DeselectAll()
def OnExit(self, evt):
self.Close(True)
def OnClose(self, evt):
self.userConfig.close()
self.Destroy()
class AnidlApp(wx.App):
def __init__(self, *args, **kwargs):
wx.App.__init__(self, *args, **kwargs)
# Event bindings
self.Bind(wx.EVT_ACTIVATE_APP, self.OnActivate)
def OnInit(self):
anidl = MainWindow(None)
anidl.FetchData()
return True
def BringWindowToFront(self):
try:
self.GetTopWindow().Raise()
except:
pass
def OnActivate(self, evt):
if evt.GetActive():
self.BringWindowToFront()
evt.Skip()
def MacReopenApp(self):
self.BringWindowToFront()
if __name__ == "__main__":
app = AnidlApp(False)
app.MainLoop()
|
import calendar
import warnings
import ipywidgets as widgets
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import stats
from scipy.interpolate import interp1d
from scipy.optimize import curve_fit
from sklearn import linear_model
from sklearn.preprocessing import StandardScaler
from tail_extrap import util
class Univariate:
''' Tail extrapolation of uni-variable as a time series
Environmental variables (e.g., wind, wave) with short intervals are not
dependent, as such Extreme Value Theory cannot be used for
distribution tail extrapolation.
This Univariate class applies Extreme Value Theory to the independent
subset, and then convert the extrapolation result to the original
dataset using MRP ratio curve.
Methods include fit, predict, and plot_diagnosis. During the fitting,
the distribution is estimated separately for the left tail, bulk,
and the right tail, and then combined together. Left and right tails
are extrapolated through the _TailExtrapolation class.
Parameters
----------
data: pandas series with datetime index. If constructing conditional
distribution, NaN should be retained for total year to be
correctly inferred
sample_coor: numpy array. Sample coordinate for outputs reference.
Inferred from data if not provided
'''
def __init__(self, data, sample_coor=None):
''' Variables added:
data: with NaN removed
time: datetimes corresponding to data
total_year: float, total years covered by data
sample_coor: numpy array, coordinates of discretized variable
notebook_backend: bool, whether this is a notebook environment
'''
def days_in_year(year):
''' Total days in a year '''
return 366 if calendar.isleap(year) else 365
# Sanity check
if not isinstance(data, pd.Series):
raise TypeError('data should be a pandas series')
if not isinstance(data.index, pd.core.indexes.datetimes.DatetimeIndex):
raise TypeError('data index should be datetimes')
ratio_threshold = 0.7
data = data.sort_index()
first_year_ratio = 1 - \
(data.index[0].dayofyear - 1) / \
days_in_year(data.index[0].year)
last_year_ratio = data.index[-1].dayofyear / \
days_in_year(data.index[-1].year)
if min([first_year_ratio, last_year_ratio]) < ratio_threshold:
warnings.warn('Missing to much data in the first or last year '
'might affect accuracy')
self.total_year = data.index[-1].year - data.index[0].year - 1 +\
first_year_ratio + last_year_ratio
idx_valid = ~np.isnan(data.values)
self.data = data.values[idx_valid]
self.time = data.index[idx_valid]
if sample_coor is None:
self.sample_coor = np.linspace(
self.data.min() * 0.5, self.data.max() * 1.5, 1000)
else:
self.sample_coor = sample_coor
self.notebook_backend = matplotlib.get_backend() \
in ['module://ipykernel.pylab.backend_inline']
def fit(self, maxima_extract='Annual Maxima', maxima_fit='Gumbel Chart',
bulk_fit='Empirical', outlier_detect=None, verbose=False):
'''Fit a univirate distribution for the bulk and tail of the data
Parameters:
-----------
maxima_extract: one of ['Annual Maxima']
How to extract independent maxima subset.
maxima_fit: one of ['Gumbel Chart']
How to fit a distribution to the extracted maxima.
bulk_fit: one of ['Empirical', 'Parametric']
How to fit a distribution to the bulk of the data.
Empirical: Use the empirical CDF
Parametric: Choose the best fitting distribution
outlier_detect: one of ['None', 'RANSAC Regression', 'Huber
Regression']
Whether to assume outliers when fitting maxima data
None: Assume no outliers
RANSAC Regression: Use RANSACRegressor to filter outliers
Huber Regression: Use HuberRegressor to filter outliers
verbose: bool. Whether to print progress
'''
total_steps = 3
self.diag_fig = plt.figure(figsize=(15, 7), tight_layout=True)
# Fit bulk
if verbose:
print(f'Step 1/{total_steps}: Fitting bulk of the data')
if bulk_fit == 'Empirical':
self._bulk_empirical_fit()
elif bulk_fit == 'Parametric':
self._bulk_best_fit(verbose)
else:
raise AttributeError(
'Unsupported bulk fitting method, check method_bulk')
# Fit right tail
if verbose:
print(f'Step 2/{total_steps}: Fitting right tail')
tail_right = _TailExtrapolation(self, fig_handle=self.diag_fig)
tail_right.fit(maxima_extract=maxima_extract,
maxima_fit=maxima_fit, outlier_detect=outlier_detect)
# Fit left tail
if verbose:
print(f'Step 3/{total_steps}: Fitting left tail')
tail_left = _TailExtrapolation(
self, left_tail=True, fig_handle=self.diag_fig)
with np.errstate(over='ignore'):
tail_left.fit(maxima_extract=maxima_extract,
maxima_fit=maxima_fit, outlier_detect=outlier_detect)
# Arrange diagnostic plot
self.diag_fig.axes[0].change_geometry(2, 4, 3)
self.diag_fig.axes[2].change_geometry(2, 4, 4)
self.diag_fig.axes[3].change_geometry(2, 4, 7)
self.diag_fig.axes[5].change_geometry(2, 4, 8)
self.diag_fig.axes[4].remove()
self.diag_fig.axes[1].remove()
# Combine tail and bulk
self._combine_bulk_tail(tail_right, tail_left)
if self.notebook_backend:
plt.close(self.diag_fig)
def predict(self, mrp=None, val=None):
''' Predict value given an MRP, or MRP given a value
Parameters:
-----------
MRP: float, mean return period for prediction
val: float, value of the variable
Returns:
--------
Value corresponding to MRP (if MRP is input), or MRP
corresponding to val
'''
if mrp is not None and val is None: # MRP provided
idx = np.isnan(self.sample_mrp)
return interp1d(self.sample_mrp[~idx], self.sample_coor[~idx])(mrp)
elif mrp is None and val is not None: # val provided
return interp1d(self.sample_coor, self.sample_mrp)(val)
else:
raise AttributeError('Only one of MRP and val should be provided')
def plot_diagnosis(self):
''' Display diagnostic plot
'''
if hasattr(self, 'diag_fig'):
if self.notebook_backend:
display(self.diag_fig)
else:
plt.show()
else:
raise AttributeError(
'No diagnostic plot found. Call fit method first.')
def _bulk_empirical_fit(self):
''' Fit bulk using empirical CDF
Variables added:
----------------
self.bulk_F: CDF corresponding to self.sample_coor
'''
x = np.sort(self.data)
F_emp = util.plotting_position(x, method='unbiased')
if self.sample_coor[0] < x[0]:
x = np.concatenate(([0], x))
F_emp = np.concatenate(([0], F_emp))
self.bulk_F = interp1d(
x, F_emp, bounds_error=False)(self.sample_coor)
def _bulk_best_fit(self, verbose):
''' Fit bulk using optimal distribution
See best_fit for more information
Variables added:
----------------
self.bulk_F: CDF corresponding to self.sample_coor
'''
ss = StandardScaler().fit(self.data.reshape(-1, 1))
data_std = ss.transform(self.data.reshape(-1, 1)).flatten()
dist, dist_name, _ = util.best_fit(data_std)
self.bulk_F = dist.cdf(
ss.transform(self.sample_coor.reshape(-1, 1))).flatten()
if verbose:
print(f' Best fit distribution: {dist_name}')
def _combine_bulk_tail(self, tail_right, tail_left):
''' Combine bulk, tail_right, and tail_left
Variables added:
----------------
sample_F: CDF for self.sample_coor
sample_mrp: MRP for self.sample_coor
'''
sample_F = np.copy(self.bulk_F)
idx_right_tail = self.sample_coor >= tail_right.threshold
sample_F[idx_right_tail] = tail_right.tail_F[idx_right_tail]
idx_left_tail = self.sample_coor <= -tail_left.threshold
sample_F[idx_left_tail] = 1 - tail_left.tail_F[idx_left_tail]
self.sample_F = sample_F
for attr in ['c_rate', 'm_rate']:
setattr(self, attr, getattr(tail_right, attr))
with np.errstate(divide='ignore'):
self.sample_mrp = 1 / self.c_rate / (1 - sample_F)
# Diagnositc plot
ax = self.diag_fig.add_subplot(1, 4, (1, 2))
mrp_emp = 1 / self.c_rate / (
1 - util.plotting_position(self.data, method='unbiased'))
ax.plot(mrp_emp, np.sort(self.data), '.', color=[0.6, 0.6, 0.6],
markersize=8, label='Empirical')
ax.set_xscale('log')
idx_tail = (self.sample_coor >= tail_right.threshold) | (
self.sample_coor <= -tail_left.threshold)
sample_mrp_tail = np.copy(self.sample_mrp)
sample_mrp_tail[~idx_tail] = np.nan
ax.plot(sample_mrp_tail, self.sample_coor, 'b-', label='Tail fit')
xlm = list(ax.get_xlim())
xlm[1] = mrp_emp[-1] * 10 # Limit MRP to be 10 * data period
ylm = list(ax.get_ylim())
with np.errstate(invalid='ignore'):
ylm[1] = self.sample_coor[sample_mrp_tail < xlm[1]][-1] # Corresponding y
ax.plot(1 / self.c_rate / (1 - self.bulk_F), self.sample_coor, '--',
color=[0, 0.5, 0])
ax.plot(
1 / self.c_rate / (1 - self.bulk_F[~idx_tail]),
self.sample_coor[~idx_tail], '-',
color=[0, 0.5, 0], label='Bulk fit')
ax.plot(xlm, tail_right.threshold * np.array([1, 1]), 'k--')
ax.plot(xlm, -tail_left.threshold * np.array([1, 1]), 'k--')
ax.set_xlim(xlm)
ax.set_ylim(ylm)
ax.set_xlabel('Return period (year)')
ax.set_ylabel('X')
ax.set_title('Fitting result')
ax.grid(True, which='both')
ax.legend(loc='upper left')
class _TailExtrapolation:
''' Extrapolation the tail of a distribution.
Parameters
----------
univariate_obj: Instance of Univariate class
left_tail: bool. Whether it's extrapolating the left tail
fig_handle: Figure handler for diagnosis plot
'''
def __init__(self, univariate_obj, left_tail=False, fig_handle=None):
for attr in ['data', 'time', 'total_year', 'sample_coor', 'bulk_F']:
setattr(self, attr, getattr(univariate_obj, attr))
if left_tail:
# Reverse data so that it becomes a maxima extrapolation
self.data = -self.data
self.sample_coor = -self.sample_coor
self.bulk_F = 1 - self.bulk_F
self.label = 'left'
else:
self.label = 'right'
if fig_handle is None:
self.diag_fig = plt.figure(figsize=(8, 3), tight_layout=True)
else:
self.diag_fig = fig_handle
def fit(self, maxima_extract='Annual Maxima', maxima_fit='Gumbel Chart',
outlier_detect=None):
'''Fit EVT tail, MRP ratio, then convert the EVT tail to raw data
Parameters
----------
maxima_extract : str, optional
Method to extract independent maxima subset, default 'Annual Maxima'
maxima_fit : str, optional
Method to fit maxima subset, by default 'Gumbel Chart'
outlier_detect : str, optional
Whether to assume outliers in the maxima subset, by default 'None'
Options are 'None', 'RANSAC Regression', and 'Huber Regression'
'''
# Extract maxima (sorted, no NaN)
if maxima_extract == 'Annual Maxima':
self._extract_annual_maxima()
else:
raise AttributeError(
f'Unsupported maxima extraction method {maxima_extract}')
if maxima_fit == 'Gumbel Chart':
self._fit_gumbel_chart(outlier_detect, plot_diagnosis=True)
else:
raise AttributeError(
'Unsupported tail fitting method, check method_tail.')
# Fitting tail
self._maxima_to_continuous(plot_diagnosis=True)
def _extract_annual_maxima(self):
'''Extract annual maxima
Variables added
---------------
self.maxima : numpy array in ascending order
'''
year = np.array(self.time.year)
unique_year = np.unique(year)
result = [max(self.data[year == cur_year]) for cur_year in unique_year]
self.maxima = np.sort(result)
def _fit_gumbel_chart(self, outlier_detect, plot_diagnosis):
'''Fit a Gumbel distribution fit via Gumbel chart
Parameters
----------
outlier_detect : bool
Whether to assume outliers. Use OLS when False.
plot_diagnosis: bool
Whether to generate diagnostic plot.
Variables added
---------------
self.maxima_inlier_mask: Mask indicating inliers
self.maxima_dist: Probability distribution for the maxima
self.threshold: Threshold of X between bulk and tail, minimum
is constrained to be no lower than 5 percentile of F_maxima
'''
def _gumbel_y(F):
''' Calculate y coordinates on the Gumbel chart from CDF '''
return -np.log(-np.log(F))
x = self.maxima
F = util.plotting_position(x, method='unbiased')
y = _gumbel_y(F)
if outlier_detect is None or outlier_detect == 'None':
mdl = linear_model.LinearRegression().fit(x.reshape(-1, 1), y)
self.maxima_inlier_mask = np.array(
[True] * len(self.maxima)) # Create mask manually
elif outlier_detect == 'RANSAC Regression':
mdl = linear_model.RANSACRegressor(
random_state=1).fit(x.reshape(-1, 1), y)
self.maxima_inlier_mask = mdl.inlier_mask_
mdl = mdl.estimator_
elif outlier_detect == 'Huber Regression':
mdl = linear_model.HuberRegressor(
epsilon=1.35).fit(x.reshape(-1, 1), y)
self.maxima_inlier_mask = np.array(
[True] * len(self.maxima)) # Create mask manually
else:
raise ValueError('Unrecognized outlier_detect keyword')
k, b = mdl.coef_[0], mdl.intercept_
if plot_diagnosis:
ax = self.diag_fig.add_subplot(1, 3, 1, label=self.label)
ax.plot(x[self.maxima_inlier_mask], y[self.maxima_inlier_mask],
'b.', markersize=10, label='Maxima(inliers)')
ax.plot(x[~self.maxima_inlier_mask], y[~self.maxima_inlier_mask],
'r.', markersize=10, label='Maxima(outliers)')
xlm, ylm = ax.get_xlim(), ax.get_ylim()
ax.plot(
self.sample_coor, mdl.predict(self.sample_coor.reshape(-1, 1)),
'r--', label='Linear fitting')
ax.set_xlim(xlm)
ax.set_ylim(ylm)
ax.set_xlabel('Maxima data')
ax.set_ylabel('$-ln(-ln(F))$')
ax.set_title(f'Gumbel chart ({self.label} tail)')
ax.grid(True)
# ax.legend(loc='best')
self.maxima_dist = stats.gumbel_r(loc=-b/k, scale=1/k)
self.maxima_inlier_mask[self.maxima <
self.maxima_dist.ppf(0.05)] = False
self.threshold = self.maxima[self.maxima_inlier_mask].min()
def _maxima_to_continuous(self, plot_diagnosis: bool):
'''Convert the EVT tail to the continuous dataset
Parameters
----------
plot_diagnosis : bool
Whether to generate diagnostic plot
Variables added
---------------
self.m_rate : float
Annual occurrence rate of the maxima data
self.c_rate : float
Annual occurrence rate of the continuous data
self.tail_F : numpy array
CDF of the tail of the continuous dataset. The part below
self.threshold is set to be np.nan
'''
# Calculate empirical MRP for continuous and maxima datasets
c_data = np.sort(self.data)
m_data = self.maxima
c_rate = len(c_data) / self.total_year
m_rate = len(m_data) / self.total_year
c_mrp_emp = 1 / c_rate / \
(1 - util.plotting_position(c_data, method='unbiased'))
m_mrp_emp = 1 / m_rate / \
(1 - util.plotting_position(m_data, method='unbiased'))
# Calculate empirical MRP ratio
mrp_ratio_emp = m_mrp_emp / interp1d(c_data, c_mrp_emp)(m_data)
# Calculate the corresponding t coordinates for the empirical MRP ratio
t_emp = -np.log(self.maxima_dist.cdf(m_data))
# Target MRP ratio at self.threshold
t_threshold = -np.log(self.maxima_dist.cdf(self.threshold))
c_mrp_threshold = 1 / c_rate / \
(1 - interp1d(self.sample_coor, self.bulk_F)(self.threshold))
m_mrp_threshold = 1 / m_rate / \
(1 - self.maxima_dist.cdf(self.threshold))
mrp_ratio_threshold = m_mrp_threshold / c_mrp_threshold
# Prepare fitting data
# Maximum data yields incorrect MRP ratio (always 1)
self.maxima_inlier_mask[-1] = False
# Exclude data below threshold
self.maxima_inlier_mask[self.maxima < self.threshold] = False
t_emp = t_emp[self.maxima_inlier_mask]
mrp_ratio_emp = mrp_ratio_emp[self.maxima_inlier_mask]
t_emp = np.concatenate((t_emp, [t_threshold])) # Append threshold
mrp_ratio_emp = np.concatenate((mrp_ratio_emp, [mrp_ratio_threshold]))
sigma = np.ones(t_emp.shape)
sigma[-1] = 1 / len(sigma) # Set the threshold point for more weight
# Fitting MRP ratio ~ t
def func(t, a, b, c):
return (a * t + c) ** b
popt, _ = curve_fit(func, t_emp, mrp_ratio_emp, bounds=(
[0, 0, 1], np.inf), sigma=sigma, max_nfev=1e5)
# Convert tail MRP
m_sample_F = self.maxima_dist.cdf(self.sample_coor)
m_sample_F[self.sample_coor < self.threshold] = np.nan
with np.errstate(divide='ignore'):
m_sample_mrp = 1 / m_rate / (1 - m_sample_F)
c_sample_mrp = m_sample_mrp / func(-np.log(m_sample_F), *popt)
c_sample_F = 1 - 1 / c_rate / c_sample_mrp
# Record results
self.m_rate = m_rate
self.c_rate = c_rate
self.tail_F = c_sample_F
if plot_diagnosis:
# MRP ratio fitting
ax = self.diag_fig.add_subplot(1, 3, 2, label=self.label)
sample_t = np.linspace(0, 3.5, 100)
ax.plot(t_emp[:-1], mrp_ratio_emp[:-1], 'k.',
markersize=8, label='Empirical')
ax.plot(t_threshold, mrp_ratio_threshold, 'rx',
markersize=10, label='Connecting point')
ax.plot(sample_t, func(sample_t, *popt), 'r-', label='Fit')
ax.set_xlim([0, 3.5])
ax.set_xlabel('t(X)')
ax.set_ylabel('MRP ratio')
ax.set_title(f'MRP ratio ({self.label} tail)')
ax.grid(True)
# ax.legend(loc='lower right')
# Maxima to continuous conversion
ax = self.diag_fig.add_subplot(1, 3, 3, label=self.label)
ax.plot(m_mrp_emp, m_data, '.', color=[
1, 0.4, 0.4], markersize=8, label='Maxima')
ax.plot(c_mrp_emp[c_data >= m_data.min()],
c_data[c_data >= m_data.min()], '.', color=[0.4, 0.4, 1],
markersize=8, label='Continuous')
ax.set_xscale('log')
xlm = ax.get_xlim()
ylm = ax.get_ylim()
ax.plot(m_sample_mrp, self.sample_coor, 'r-', label='Maxima fit')
ax.plot(c_sample_mrp, self.sample_coor,
'b-', label='Continuous fit')
ax.plot(xlm, self.threshold * np.array([1, 1]), 'k--')
ax.set_xlim(xlm)
ax.set_ylim([m_data.min(), ylm[1]])
ax.set_xlabel('Return period (year)')
ax.set_ylabel('X')
ax.set_title(f'Tail extrap. ({self.label} tail)')
ax.grid(True, which='both')
# ax.legend(loc='upper left')
# if self.notebook_backend:
# plt.close()
if __name__ == '__main__':
import os
import pickle
dirname = os.path.dirname(__file__)
data_path = os.path.join(dirname, '../datasets/D.pkl')
with open(data_path, 'rb') as f:
df = pickle.load(f)
data = df.iloc[:, 0]
urv = Univariate(data, sample_coor=np.linspace(0, 2*data.max(), 1000))
urv.fit(bulk_fit='Parametric')
urv.plot_diagnosis()
|
import json
import urllib.request, urllib.parse
import subprocess
from bs4 import BeautifulSoup
from inky import InkyPHAT
from PIL import Image, ImageFont, ImageDraw
from font_fredoka_one import FredokaOne
req = urllib.request.Request('http://www.instagram.com/sj___156')
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36')
html = urllib.request.urlopen(req).read()
response = BeautifulSoup(html, 'html.parser')
jsonObject = response.select("body > script:nth-of-type(1)")[0].text.replace('window._sharedData =','').replace(';','')
data = json.loads(jsonObject)
following = data['entry_data']['ProfilePage'][0]['graphql']['user']['edge_follow']['count']
followed = data['entry_data']['ProfilePage'][0]['graphql']['user']['edge_followed_by']['count']
posts = data['entry_data']['ProfilePage'][0]['graphql']['user']['edge_owner_to_timeline_media']['count']
username = data['entry_data']['ProfilePage'][0]['graphql']['user']['edge_owner_to_timeline_media']['edges'][0]['node']['owner']['username']
# check value populated
print(followed)
# set display to be variable of inky_display
inky_display = InkyPHAT("red")
# set the border to be white around the display
inky_display.set_border(inky_display.WHITE)
img = Image.new("P", (inky_display.WIDTH, inky_display.HEIGHT))
draw = ImageDraw.Draw(img)
# set font try to experiment with different font
font = ImageFont.truetype(FredokaOne, 22)
line1 = 'Insta followers:'
line2 = str(followed)
line_padding = 6
w1, h1 = font.getsize(line1)
x = (inky_display.WIDTH / 2) - (w1 / 2)
y = (inky_display.HEIGHT / 2) - h1 - (line_padding / 2)
draw.text((x, y), line1, inky_display.RED, font)
w2, h2 = font.getsize(line2)
x = (inky_display.WIDTH / 2) - (w2 / 2)
y = (inky_display.HEIGHT / 2) + (line_padding / 2)
draw.text((x, y), line2, inky_display.BLACK, font)
inky_display.set_image(img)
inky_display.show()
|
#!/usr/bin/env python3.8
# Copyright 2020 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Use scrutiny to verify a build."""
import argparse
import difflib
import json
import os
import re
import shlex
import subprocess
import sys
import tempfile
SUPPORTED_TYPES = ['kernel_cmdline', 'bootfs_filelist', 'static_pkgs']
SOFT_TRANSITION_MESSAGE = """
If you are making a change in fuchsia.git that causes this, you need to perform a soft transition:
1: Instead of adding lines as written above, add each line prefixed with a question mark to mark it as transitional.
2: Instead of removing lines as written above, prefix the line with a question mark to mark it as transitional.
3: Check in your fuchsia.git change.
4: For each new line you added in 1, remove the question mark.
5: For each existing line you modified in 2, remove the line.
"""
class GoldenFileChecker(object):
def __init__(self, path):
self._path = path
self._checked = False
try:
with open(path, "r") as f:
self.orig_lines = f.read().strip().splitlines()
except IOError as e:
raise VerificationError(f'Failed to open golden file: {e}')
# Parse file contents.
self.required_contents = set()
self.optional_contents = set()
for line in self.orig_lines:
if line.startswith("#"):
# A leading # indicates a comment, and we ignore the rest of the line.
continue
elif line.startswith("?"):
# A leading ? indicates an optional line, and we allow the presence or
# absence of the noun indicated by the rest of the line
self.optional_contents.add(line[1:])
else:
self.required_contents.add(line)
# Prepare records.
self.observed_and_not_permitted = []
self.observed = set()
def check_match(self, observed_contents):
assert not self._checked
self._checked = True
# Verify that:
# 1) every line in observed_contents is allowed in either
# required_contents or optional_contents
# 2) every line in required_contents is observed
for line in observed_contents:
if line not in self.required_contents and line not in self.optional_contents:
self.observed_and_not_permitted.append(line)
else:
self.observed.add(line)
errors = []
for extra in sorted(list(self.observed_and_not_permitted)):
errors.append(f"'{extra}' is not listed in {self._path} but was found in the build. If the addition to the build was intended, add a line '{extra}' to {self._path}.")
for missing in sorted(list(self.required_contents - self.observed)):
errors.append(f"'{missing}' was declared as required in {self._path} but was not found in the build. If the removal from the build was intended, update {self._path} to remove the line '{missing}'.")
return errors
def print_error(msg):
print(msg, file=sys.stderr)
def main(input_args):
parser = argparse.ArgumentParser()
parser.add_argument(
'--zbi-file', help='Path to the zbi to verify', required=True)
parser.add_argument(
'--blobfs-manifest',
help='Path to blobfs manifest file, required for "static_pkgs"',
required=False)
parser.add_argument(
'--scrutiny',
help='Path to the scrutiny tool used for verifying kernel cmdline',
required=True)
parser.add_argument(
'--far',
help=(
'Path to the far tool used for extracting package, ' +
'required for "static_pkgs"'),
required=False)
parser.add_argument(
'--golden-files',
help=(
'Path to one of the possible golden files to check against, ' +
'there should only be one golden file in normal case, and only ' +
'two golden files, one old file and one new file during a soft ' +
'transition. After the transition, the old golden file should ' +
'be removed and only leave the new golden file.'),
nargs='+',
required=True)
parser.add_argument(
'--stamp', help='Path to the victory file', required=True)
parser.add_argument(
'--type',
help=('The type of the ZBI item to verify'),
choices=SUPPORTED_TYPES,
required=True)
parser.add_argument(
'--depfile',
help=(
'Optional generated depfile listing dynamic deps for the script' +
', required for "static_pkgs"'),
required=False)
args = parser.parse_args(input_args)
if len(args.golden_files) > 2:
print_error(
'At most two optional golden files are supported, ' +
'is there a soft transition already in place? Please wait for ' +
'that to finish before starting a new one.')
try:
verify_build(args)
except VerificationError as e:
print_error(str(e))
return 1
with open(args.stamp, 'w') as stamp_file:
stamp_file.write('Golden!\n')
return 0
def verify_build(args):
"""verify_build verifies a build against specified golden files.
Raises:
VerificationError: If verification fails.
"""
# Check for some necessary files/dirs exist first.
for file in [args.scrutiny, args.zbi_file]:
if not os.path.exists(file):
raise VerificationError('Missing required file: ' + file)
with tempfile.TemporaryDirectory() as tmp:
run_scrutiny_command(
args.scrutiny, ' '.join(
[
'tool.zbi.extract', '--input',
shlex.quote(args.zbi_file), '--output',
shlex.quote(tmp)
]))
last_error = None
for golden_file in args.golden_files:
try:
if args.type == 'kernel_cmdline':
verify_kernel_cmdline(golden_file, tmp)
elif args.type == 'bootfs_filelist':
verify_bootfs_filelist(golden_file, tmp)
elif args.type == 'static_pkgs':
verify_static_pkgs(args, golden_file, tmp)
# Passes the verification, no error thrown.
return
except VerificationError as e:
# Error thrown, we want to record this error and check next
# golden_file.
last_error = e
raise last_error
def verify_kernel_cmdline(kernel_cmdline_golden_file, scrutiny_out):
"""verify_kernel_cmdline verifies the kernel cmdline in ZBI image.
Raises:
VerificationError: If verification fails.
"""
gf_checker = GoldenFileChecker(kernel_cmdline_golden_file)
actual_cmd = []
if os.path.exists(os.path.join(scrutiny_out, 'sections', 'cmdline.blk')):
try:
with open(os.path.join(scrutiny_out, 'sections', 'cmdline.blk'),
'r') as f:
# The cmdline.blk contains a trailing \x00.
cmdline = f.read().strip().rstrip('\x00')
except IOError as e:
raise VerificationError(f'Failed to read cmdline.blk: {e}')
cmdline_args = cmdline.split(' ')
try:
actual_cmd = generate_sorted_cmdline(cmdline_args)
except CmdlineFormatError as e:
raise VerificationError(f'Invalid golden cmdline format: {e}')
errors = gf_checker.check_match(actual_cmd)
if len(errors) > 0:
error_msgs = ['Kernel cmdline mismatch!']
error_msgs.append('')
error_msgs.extend(errors)
error_msgs.append('')
error_msgs.append(f'If you intended to change the kernel command line, please acknowledge it by updating {kernel_cmdline_golden_file} with the added or removed lines.')
error_msgs.append(SOFT_TRANSITION_MESSAGE)
raise VerificationError('\n'.join(error_msgs))
def verify_bootfs_filelist(bootfs_filelist_golden_file, scrutiny_out):
"""verify_bootfs_filelist verifies the bootFS filelist in ZBI image.
Raises:
VerificationError: If verification fails.
"""
gf_checker = GoldenFileChecker(bootfs_filelist_golden_file)
bootfs_folder = os.path.join(scrutiny_out, 'bootfs')
bootfs_files = []
try:
for root, _, files in os.walk(bootfs_folder):
for file in files:
bootfs_files.append(
os.path.relpath(os.path.join(root, file), bootfs_folder))
except IOError as e:
raise VerificationError(f'Failed to walk bootfs folder: {e}')
got_content = sorted(bootfs_files)
errors = gf_checker.check_match(bootfs_files)
if len(errors) > 0:
error_msgs = ['BootFS file list mismatch!']
error_msgs.append('')
error_msgs.extend(errors)
error_msgs.append('')
error_msgs.append(f'If you intended to change the bootfs contents, please acknowledge it by updating {bootfs_filelist_golden_file} with the added or removed lines.')
error_msgs.append(SOFT_TRANSITION_MESSAGE)
raise VerificationError('\n'.join(error_msgs))
def verify_static_pkgs(
args,
golden_file,
scrutiny_out,
):
"""verify_static_pkgs verifies static packages list.
Raises:
VerificationError: If verification fails.
"""
deps = []
if not args.blobfs_manifest:
raise VerificationError(
'"blobfs-manifest" must be specified for "static_pkgs" check')
if not args.far:
raise VerificationError(
'"far" must be specified for "static_pkgs" check')
if not args.depfile:
raise VerificationError(
'"depfile" must be specified for "static_pkgs" check')
try:
system_image_hash = get_system_image_hash(scrutiny_out)
except IOError as e:
raise VerificationError(f'Failed to get devmgr config: {e}')
except KeyError as e:
raise VerificationError(f'Invalid devmgr config: {e}')
try:
blob_manifest = parse_key_value_file(args.blobfs_manifest)
except IOError as e:
raise VerificationError(f'Failed to open blob manifest: {e}')
try:
system_image_blob = os.path.join(
os.path.dirname(args.blobfs_manifest),
blob_manifest[system_image_hash])
# Add system_image_blob as dynamic dependency.
deps.append(system_image_blob)
except KeyError as e:
raise VerificationError(f'System image blob not found: {e}')
system_image_folder = os.path.join(scrutiny_out, 'system_image')
try:
extract_package(args.far, system_image_blob, system_image_folder)
except subprocess.CalledProcessError as e:
raise VerificationError(
f'Failed to extract system_image package: {e.stderr}')
try:
static_packages_hash = parse_key_value_file(
os.path.join(system_image_folder, 'meta',
'contents'))['data/static_packages']
except KeyError:
raise VerificationError(
'No "data/static_packages" found in "system_image"')
except IOError as e:
raise VerificationError(
f'Failed to read system_image/meta/contents file: {e}')
try:
static_packages_blob = os.path.join(
os.path.dirname(args.blobfs_manifest),
blob_manifest[static_packages_hash])
# Add static_packages_blob as dynamic dependency.
deps.append(static_packages_blob)
except KeyError as e:
raise VerificationError(f'Static pkgs blob not found: {e}')
try:
with open(static_packages_blob, 'r') as f:
static_packages_content = f.read().strip()
except IOError as e:
raise VerificationError(f'Failed to read static packages blob: {e}')
# Write depfile.
try:
with open(args.depfile, 'w') as f:
f.write(args.stamp + ': ' + ' '.join(deps) + '\n')
except IOError as e:
raise VerificationError(f'Failed to write depfile: {e}')
pkgs = []
for pkg in static_packages_content.splitlines():
pkgs.append(re.split(r'/[0-9]=', pkg)[0])
gf_checker = GoldenFileChecker(golden_file)
errors = gf_checker.check_match(sorted(pkgs))
if len(errors) > 0:
error_msgs = ['Static packages list mismatch!']
error_msgs.append('')
error_msgs.extend(errors)
error_msgs.append('')
error_msgs.append(f'If you intended to change the list of static packages, please acknowledge it by updating {golden_file} with the added or removed lines.')
error_msgs.append(SOFT_TRANSITION_MESSAGE)
raise VerificationError('\n'.join(error_msgs))
def get_system_image_hash(scrutiny_out):
"""Get the system image merkle root.
Args:
scrutiny_out: the scrutiny output directory.
Raises:
IOError: If fails to read devmgr config.
KeyError: If the config entry for system image hash is not found.
"""
devmgr_config_file = os.path.join(
scrutiny_out, 'bootfs', 'config', 'devmgr')
key_value_map = parse_key_value_file(devmgr_config_file)
return key_value_map['zircon.system.pkgfs.cmd'].replace('bin/pkgsvr+', '')
def run_scrutiny_command(scrutiny_path, command):
"""Runs scrutiny command.
Args:
scrutiny_path: The path to the scrutiny tool.
command: The scrutiny command to run.
Raises:
VerificationError: If the command fails or the output is not
'{"status":"ok"}'.
"""
try:
output = subprocess.run(
[scrutiny_path, '-c', command], capture_output=True,
check=True).stdout
except subprocess.CalledProcessError as e:
raise VerificationError(f'Failed to run scrutiny: {e.stederr}')
try:
if json.loads(output)['status'] != 'ok':
raise VerificationError(f'Unexpected scrutiny output: {output}')
except (KeyError, json.JSONDecodeError) as e:
raise VerificationError(f'Unexpected scrutiny output: {e}')
def extract_package(far_path, package_path, output_dir):
"""Extract a package from a blob using "fx far extract".
Args:
far_path: The path to far tool.
package_path: The path to the package blob file.
output_dir: The output directory to put the extracted package.
Raises:
subprocess.CalledProcessError: If failed to extract.
"""
subprocess.run(
[
far_path, 'extract', '--archive=' + package_path,
'--output=' + output_dir
],
capture_output=True,
check=True)
class CmdlineFormatError(Exception):
"""Exception thrown when kernel cmdline is in invalid format."""
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
def __str__(self):
return self.msg
class VerificationError(Exception):
"""Exception thrown when verification fails."""
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
def __str__(self):
return self.msg
def generate_sorted_cmdline(cmdline_args):
"""generate_sorted_cmdline generates a kernel cmdline sorted by entry keys.
Raises:
CmdlineFormatError: If the kernel cmdline is not formatted correctly.
"""
cmdline_entries = {}
for entry in cmdline_args:
if len(entry.split('=')) > 2:
raise CmdlineFormatError(
'invalid kernel cmdline, key value pair: ' + entry)
key, _, value = entry.partition('=')
if key in cmdline_entries:
raise CmdlineFormatError('duplicate kernel cmdline key: ' + key)
cmdline_entries[key] = value
return [
('%s=%s' % (key, value)) if value else key
for key, value in sorted(cmdline_entries.items())]
def parse_key_value_file(file_path):
"""Parses a file in 'key=value' format.
Args:
file_path: The path to the file.
Returns:
A {key:value} map
Raises:
IOError: if failed to read the file.
"""
with open(file_path, 'r') as f:
content = f.read()
key_value_map = {}
for line in content.splitlines():
split_array = line.split('=')
if len(split_array) == 2:
key_value_map[split_array[0]] = split_array[1]
return key_value_map
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
#To plot los signal for 4 given los for a given offset, polystr and tau
import numpy as np
import matplotlib.pyplot as plt
import argparse
import sys
######################################################
#### Choose (x,y) for plotting los signal
los_array=[[34,54], [12,183], [234,145], [218,87]]
######################################################
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--polystr", type=int, default=-1, help="Exponents in the terms of the fitting polynomial")
parser.add_argument("-o", "--offset", type=float, default=-1, help="Offset added to the original cube")
parser.add_argument("-t", "--tau", type=float, default=-1, help="Integration time (in hrs)")
args = parser.parse_args()
if any(i==-1 for i in [args.polystr,args.offset,args.tau]):
print("Invalid Input.\nRun `python3 plot_los_signal.py --help` for more info")
sys.exit()
def load_binary_data(filename, dtype=np.float64):
f = open(filename, "rb")
data = f.read()
f.close()
_data = np.fromstring(data, dtype)
if sys.byteorder == 'big':
_data = _data.byteswap()
return _data
##############################################################
####Plotting starts here
##############################################################
freq=np.loadtxt("../ska_data/frequency.dat", unpack=True)
freq=[i/1e6 for i in freq]
#Plan:
#recovered/total[0]=eor_p_fg signal
#recovered/total[1]=eor_p_noise signal
#recovered/total[2]=eor_p_noise_p_fg signal
recovered=[None,None,None]
total=[None,None,None]
k=0
for i in ["eor_p_fg", "eor_p_noise", "eor_p_noise_p_fg"]:
file1="../results/ska_cube/cube/%s_recovered_21cm_signal_XY_300_%.1f_hrs_Npol_%d_offset_%.6f.bin" % (i, args.tau, args.polystr, args.offset)
file2="../ska_data/%s_pre_total_field_300_%.1f_hr-image.bin" % (i, args.tau)
recovered[k]=load_binary_data(file1)
total[k]=load_binary_data(file2)
k+=1
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
for los in los_array:
y1=[]; y2=[]; y3=[]; y4=[]; y5=[]; y6=[]
i=los[0]
j=los[1]
for k in range(21):
y1.append(recovered[0][k+21*(j+300*i)])
y2.append(recovered[1][k+21*(j+300*i)])
y3.append(recovered[2][k+21*(j+300*i)])
y4.append(total[0][k+21*(j+300*i)])
y5.append(total[1][k+21*(j+300*i)])
y6.append(total[2][k+21*(j+300*i)])
ax.plot(freq,y1,"r--",label="eor+fg")
ax.plot(freq,y2,"b:",label="eor+noise")
ax.plot(freq,y3,"g-.",label="eor+noise+fg")
plt.xlabel("Frequency [MHz]")
plt.ylabel(r"$T_{21}$ [K]")
plt.tight_layout()
plt.legend(loc="best")
fig.savefig("ska/los_1/los_x_%d_y_%d_recovered_all_tau%.1f_polystr%d_offset%.6f.png"%(i,j,args.tau,args.polystr,args.offset))
plt.cla()
ax.plot(freq,y4,"r--",label="eor+fg")
ax.plot(freq,y5,"b:",label="eor+noise")
ax.plot(freq,y6,"g-.",label="eor+noise+fg")
plt.xlabel("Frequency [MHz]")
plt.ylabel(r"$T_{21}$ [K]")
plt.tight_layout()
plt.legend(loc="best")
fig.savefig("ska/los_1/los_x_%d_y_%d_total_all_tau%.1f_polystr%d_offset%.6f.png"%(i,j,args.tau,args.polystr,args.offset))
plt.cla()
#Setting average of all the fields to zero
ave=sum(y1)/len(y1)
y1=[i-ave for i in y1]
ave=sum(y2)/len(y2)
y2=[i-ave for i in y2]
ave=sum(y3)/len(y3)
y3=[i-ave for i in y3]
ave=sum(y4)/len(y4)
y4=[i-ave for i in y4]
ave=sum(y5)/len(y5)
y5=[i-ave for i in y5]
ave=sum(y6)/len(y6)
y6=[i-ave for i in y6]
ax.plot(freq,y1,"r--",label="eor+fg, recovered")
ax.plot(freq,y4,"b:",label="eor+fg, total")
plt.xlabel("Frequency [MHz]")
plt.ylabel(r"$T_{21}$ [K]")
plt.tight_layout()
plt.legend(loc="best")
fig.savefig("ska/los_1/los_x_%d_y_%d_eor_p_fg_tau%.1f_polystr%d_offset%.6f.png"%(i,j,args.tau,args.polystr,args.offset))
plt.cla()
ax.plot(freq,y2,"r--",label="eor+noise, recovered")
ax.plot(freq,y5,"b:",label="eor+noise, total")
plt.xlabel("Frequency [MHz]")
plt.ylabel(r"$T_{21}$ [K]")
plt.tight_layout()
plt.legend(loc="best")
fig.savefig("ska/los_1/los_x_%d_y_%d_eor_p_noise_tau%.1f_polystr%d_offset%.6f.png"%(i,j,args.tau,args.polystr,args.offset))
plt.cla()
ax.plot(freq,y3,"r--",label="eor+noise+fg, recovered")
ax.plot(freq,y6,"b:",label="eor+noise+fg, total")
plt.xlabel("Frequency [MHz]")
plt.ylabel(r"$T_{21}$ [K]")
plt.tight_layout()
plt.legend(loc="best")
fig.savefig("ska/los_1/los_x_%d_y_%d_eor_p_noise_fg_tau%.1f_polystr%d_offset%.6f.png"%(i,j,args.tau,args.polystr,args.offset))
plt.cla() |
import RPi.GPIO as GPIO
import time
# Pin Definitons:
enablePin = 2
directionPin = 3
pulsePin = 4
# setup gpio
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(enablePin, GPIO.OUT)
GPIO.setup(directionPin, GPIO.OUT)
GPIO.setup(pulsePin, GPIO.OUT)
# disable stepper
GPIO.output(enablePin, GPIO.LOW)
# set direction pin LOW
GPIO.output(directionPin, GPIO.LOW)
# set pulse pin low
GPIO.output(pulsePin, GPIO.LOW)
print ('setting all pins low: ENA-, DIR- and PUL-')
|
print(f"imported mw_url_shortener.api.redirects as {__name__}")
"""
Manages the redirects portion of the API
"""
from fastapi import APIRouter, Body
from ..database.entities import RedirectEntity
from ..database.interface import add_redirect
from ..database.models import Redirect
router_v1 = APIRouter()
@router_v1.post("/", response_model=Redirect)
async def create(new_redirect: Redirect = Body(...)) -> RedirectEntity:
return add_redirect(redirect=new_redirect)
@router_v1.get("/")
async def read() -> None:
raise NotImplementedError()
@router_v1.patch("/")
async def update() -> None:
raise NotImplementedError()
@router_v1.delete("/")
async def delete() -> None:
raise NotImplementedError()
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def findBottomLeftValue(self, root: TreeNode) -> int:
leaves_list = []
def dfs(node, row, col):
if node is None:
return
if node.left is None and node.right is None:
leaves_list.append((-row, col, node.val))
else:
dfs(node.left, row+1, col-1)
dfs(node.right, row+1, col+1)
dfs(root, 0, 0)
# order the leaves nodes by its (row, col) index
leaves_list.sort()
return leaves_list[0][2]
|
# Generated by Django 3.2.12 on 2022-04-04 15:23
import api.models
import authlib.oauth2.rfc6749.models
from django.conf import settings
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('username', models.CharField(max_length=32)),
('email', models.EmailField(max_length=254, unique=True, verbose_name='email address')),
('is_confirmed', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
bases=(api.models.HashidsModelMixin, models.Model),
),
migrations.CreateModel(
name='Console',
fields=[
('uuid', models.UUIDField(primary_key=True, serialize=False)),
('token', models.UUIDField(default=uuid.uuid4)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('modified', models.DateTimeField(auto_now=True)),
('used', models.DateTimeField(null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='OAuth2Client',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('client_id', models.UUIDField(unique=True)),
('client_secret', models.UUIDField()),
('client_name', models.CharField(max_length=120)),
('website_uri', models.URLField(max_length=256, null=True)),
('description', models.TextField(null=True)),
('redirect_uris', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=256), size=None)),
('default_redirect_uri', models.CharField(max_length=256, null=True)),
('scope', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=24), null=True, size=None)),
('response_types', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=32), null=True, size=None)),
('grant_types', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=32), default=api.models.grant_types_default, size=None)),
('token_endpoint_auth_method', models.CharField(choices=[('client_secret_post', 'client_secret_post'), ('client_secret_basic', 'client_secret_basic')], default='client_secret_post', max_length=120)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'OAuth2 Client',
},
bases=(api.models.HashidsModelMixin, models.Model, authlib.oauth2.rfc6749.models.ClientMixin),
),
migrations.CreateModel(
name='SSHKey',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.UUIDField(default=uuid.uuid4, unique=True)),
('key', models.TextField(max_length=2000)),
('type', models.CharField(max_length=20)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('modified', models.DateTimeField(auto_now=True)),
('console', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sshkeys', to='api.console')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sshkeys', to=settings.AUTH_USER_MODEL)),
],
bases=(api.models.HashidsModelMixin, models.Model),
),
migrations.CreateModel(
name='OAuth2Token',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('token_type', models.CharField(max_length=40)),
('access_token', models.CharField(max_length=255, unique=True)),
('refresh_token', models.CharField(db_index=True, max_length=255)),
('scope', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=24), null=True, size=None)),
('revoked', models.BooleanField(default=False)),
('issued_at', models.DateTimeField(default=django.utils.timezone.now)),
('expires_in', models.IntegerField(default=0)),
('client', models.ForeignKey(db_column='client', on_delete=django.db.models.deletion.CASCADE, to='api.oauth2client', to_field='client_id')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
bases=(api.models.HashidsModelMixin, models.Model, authlib.oauth2.rfc6749.models.TokenMixin),
),
migrations.CreateModel(
name='OAuth2Code',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=120, unique=True)),
('redirect_uri', models.TextField(null=True)),
('response_type', models.TextField(null=True)),
('scope', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=24), null=True, size=None)),
('auth_time', models.DateTimeField(default=django.utils.timezone.now)),
('nonce', models.CharField(max_length=120, null=True)),
('client', models.ForeignKey(db_column='client', on_delete=django.db.models.deletion.CASCADE, to='api.oauth2client', to_field='client_id')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
bases=(api.models.HashidsModelMixin, models.Model, authlib.oauth2.rfc6749.models.AuthorizationCodeMixin),
),
migrations.CreateModel(
name='Hostname',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
('addresses', django.contrib.postgres.fields.ArrayField(base_field=models.GenericIPAddressField(), null=True, size=None)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('modified', models.DateTimeField(auto_now=True)),
('console', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='hostnames', to='api.console')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='hosts', to=settings.AUTH_USER_MODEL)),
],
bases=(api.models.HashidsModelMixin, models.Model),
),
]
|
CMDPrefix = {
'502781179368439808':'i!',
}
|
# Copyright (c) 2021 Jonas Thorsell
import sys
tl = [0] * 9
for t in [int(x) for x in sys.stdin.readline().split(',')]:
tl[t] += 1
for d in range(256):
ntl = [0] * 9
for i in range(9):
if i == 0:
ntl[8] = tl[0]
ntl[6] = tl[0]
else:
ntl[i-1] += tl[i]
tl = ntl
print(sum(tl))
|
from django.contrib.auth import get_user_model
from django.urls import reverse
import pytest
from pytest_django.asserts import assertContains
@pytest.fixture
def user(admin_client):
user = get_user_model().objects.create_user(
email='test@fake.com',
password='password123',
name='Test user full name'
)
yield user
class TestAdminSite:
@pytest.mark.django_db
def test_users_listed(self, admin_client, user):
"""Test that users are listed on user page"""
url = reverse("admin:core_user_changelist")
res = admin_client.get(url)
assertContains(res, user.name)
assertContains(res, user.email)
def test_user_change_page(self, admin_client, user):
"""Test the user edit page works"""
url = reverse('admin:core_user_change', args=[user.id])
res = admin_client.get(url)
assert res.status_code == 200
def test_create_user_page(self, admin_client):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = admin_client.get(url)
assert res.status_code == 200
|
from .quickperms import quickperms
|
#####################################################
# generate simulated data of sequence reads
#####################################################
import numpy as np
import pymc
import scipy.special as ss
def modelCNV(pos, filename=False):
profile = np.empty(pos)
truestate = np.empty(pos)
current = 0
n_rate = 0.1 # normal rate
h_rate = 0.2 # high rate
l_rate = 0.05 # low rate
while(current < pos):
state = np.random.randint(1, 3) # randomly select state
prop = abs(np.random.normal(0, .1, 1))
lenght = np.random.randint(round(prop*pos))
if(lenght < 2):
lenght = 2
if(state == 1):
data = np.random.poisson(l_rate, lenght)
if(state == 2):
data = np.random.poisson(n_rate, lenght)
if(state == 3):
data = np.random.poisson(h_rate, lenght)
end = np.minimum(current+lenght, pos)
added = end-current
profile[current:end] = data[0:(added)] #+ noise
truestate[current:end] = state
lenght = added
current += lenght
return profile, truestate
#####################################################
# estimate rate of Poisson distrib from observations
#####################################################
def estimateRate(data, iter=1000):
rate = pymc.Uniform("rate", lower=0.1, upper=0.4)
# this also works if you estimate well lower and upper from control
# DiscreteUniform("rate", lower=1, upper=3)
reads = pymc.Poisson('reads', mu=rate, value=data, observed=True)
M = pymc.Model([reads, rate])
mcmc = pymc.MCMC(M)
mcmc.sample(iter, iter/10, verbose=0)
r_est = mcmc.trace("rate")[:]
return np.mean(r_est)
def estimateRate_helper(args):
return estimateRate(*args)
#####################################################
# compute credible region on a vector of reals
#####################################################
def bayes_CR_mu(D, sigma, frac=0.95):
"""Compute the credible region on the mean"""
Nsigma = np.sqrt(2) * ss.erfinv(frac)
mu = D.mean()
sigma_mu = sigma * D.size ** -0.5
return mu - Nsigma * sigma_mu, mu + Nsigma * sigma_mu
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 23 22:18:35 2021
@author: 1636740
"""
import pandas as pd
from flask import Flask, request, render_template
import pickle
app = Flask('__name__')
q = ""
@app.route("/")
def loadPage():
return render_template('home_heroku.html', query="")
@app.route("/predict", methods=['POST'])
def predict():
inputQuery1 = request.form['query1']
inputQuery2 = request.form['query2']
inputQuery3 = request.form['query3']
inputQuery4 = request.form['query4']
inputQuery5 = request.form['query5']
inputQuery6 = request.form['query6']
inputQuery7 = request.form['query7']
data = [[inputQuery1, inputQuery2, inputQuery3, inputQuery4, inputQuery5, inputQuery6, inputQuery7]]
features = ['GRE Score', 'TOEFL Score', 'University Rating', 'SOP', 'LOR ', 'CGPA', 'Research']
df = pd.DataFrame(data, columns = features)
filename = 'LinearRegression.sav'
LinearRegression = pickle.load(open(filename, 'rb'))
a = round(LinearRegression.predict(df)[0]*100,2)
result = f'Chances of Admit is : {a} %'
error = 'Accuracy of the prediction: 81.88% (referred from r2_score)' # From r2_score
return render_template('home_heroku.html', output1=result, output2=error, query1 = request.form['query1'], query2 = request.form['query2'],query3 = request.form['query3'],query4 = request.form['query4'],query5 = request.form['query5'],query6 = request.form['query6'],query7 = request.form['query7'])
if __name__ == '__main__':
app.run(debug=True)
|
# %%
from gym_minigrid.minigrid import *
from gym_minigrid.window import Window
import numpy as np
class Maze(MiniGridEnv):
"""
Maze environment.
This environment is full observation.
The state is (x, y, dir) where x,y indicate
the agent's location in the environment and
dir indicates the direction it is facing.
"""
# Enumeration of possible actions
class Actions(IntEnum):
# Turn left, turn right, move forward
left = 0
right = 1
forward = 2
def __init__(
self,
agent_start_states = [(1,1,0)],
slip_p=0.0,
):
"""
Inputs
------
agent_start_states : list
List of tuples representing the possible initial states
(entry conditions) of the agent in the environment.
slip_p : float
Probability with which the agent "slips" on any given action,
and takes another action instead.
"""
size = 20
width = size
height = size
self.agent_start_states = agent_start_states
self.goal_states = [(1, height - 2, 0), (1, height - 2, 1), (1, height - 2, 2), (1, height - 2, 3)]
super().__init__(
grid_size=size,
max_steps=4*size*size,
)
# Action enumeration for this environment
self.actions = Maze.Actions
# Actions are discrete integer values
self.action_space = spaces.Discrete(len(self.actions))
self.observation_space = spaces.Box(
low=np.array([0,0,0]),
high=np.array([self.width, self.height, 3]),
dtype='uint8'
)
self.slip_p = slip_p
def _gen_grid(self, width, height):
# Create an empty grid
self.grid = Grid(width, height)
# Generate the surrounding walls
self.grid.wall_rect(0, 0, width, height)
# Generate the rooms
self.grid.wall_rect(0, 0, 6, 6)
self.grid.wall_rect(5, 0, 15, 6)
self.grid.wall_rect(8, 5, 6, 11)
self.grid.wall_rect(13, 5, 7, 11)
self.grid.wall_rect(0, 5, 9, 6)
self.grid.wall_rect(0, 10, 9, 6)
# Add doors
self.put_obj(Door('grey', is_open=True), 3, 5)
self.put_obj(Door('grey', is_open=True), 5, 2)
self.put_obj(Door('grey', is_open=True), 10, 5)
self.put_obj(Door('grey', is_open=True), 14, 5)
self.put_obj(Door('grey', is_open=True), 5, 10)
self.put_obj(Door('grey', is_open=True), 3, 15)
self.put_obj(Door('grey', is_open=True), 16, 15)
# Place a goal square
for goal_state in self.goal_states:
self.put_obj(Goal(), goal_state[0], goal_state[1])
# Place dangerous lava
self.grid.horz_wall(2, 7, 3, obj_type=Lava)
self.grid.horz_wall(6, 8, 2, obj_type=Lava)
self.grid.horz_wall(3, 12, 2, obj_type=Lava)
self.grid.horz_wall(6, 14, 2, obj_type=Lava)
# Place the agent
if self.agent_start_states:
# Uniformly pick from the possible start states
agent_start_state = self.agent_start_states[np.random.choice(len(self.agent_start_states))]
self.agent_pos = (agent_start_state[0], agent_start_state[1])
self.agent_dir = agent_start_state[2]
else:
self.place_agent()
self.mission = "get to the goal square"
def gen_obs(self):
"""
Generate the observation of the agent, which in this environment, is its state.
"""
pos = self.agent_pos
direction = self.agent_dir
obs_out = np.array([pos[0], pos[1], direction])
return obs_out
def step(self, action):
"""
Step the environment.
"""
self.step_count += 1
reward = 0
done = False
info = {
'task_complete' : False,
'lava' : False
}
# Slip probability causes agent to randomly take the wrong action
if np.random.rand() <= self.slip_p:
action = np.random.choice(np.array([0, 1, 2]))
current_pos = self.agent_pos
current_cell = self.grid.get(*current_pos)
if current_cell != None and current_cell.type == 'lava':
# If the agent is in lava, it can no longer do anything
action = self.actions.done
# Get the position in front of the agent
fwd_pos = self.front_pos
# Get the contents of the cell in front of the agent
fwd_cell = self.grid.get(*fwd_pos)
# Rotate left
if action == self.actions.left:
self.agent_dir -= 1
if self.agent_dir < 0:
self.agent_dir += 4
# Rotate right
elif action == self.actions.right:
self.agent_dir = (self.agent_dir + 1) % 4
# Move forward
elif action == self.actions.forward:
if fwd_cell == None or fwd_cell.can_overlap():
self.agent_pos = fwd_pos
if fwd_cell != None and fwd_cell.type == 'lava':
done = True
max_distance = np.array([self.width, self.height])
info['lava'] = True
# Pick up an object
elif action == self.actions.pickup:
if fwd_cell and fwd_cell.can_pickup():
if self.carrying is None:
self.carrying = fwd_cell
self.carrying.cur_pos = np.array([-1, -1])
self.grid.set(*fwd_pos, None)
# Drop an object
elif action == self.actions.drop:
if not fwd_cell and self.carrying:
self.grid.set(*fwd_pos, self.carrying)
self.carrying.cur_pos = fwd_pos
self.carrying = None
# Toggle/activate an object
elif action == self.actions.toggle:
if fwd_cell:
fwd_cell.toggle(self, fwd_pos)
# Done action (not used by default)
elif action == self.actions.done:
pass
else:
assert False, "unknown action"
next_state = (self.agent_pos[0], self.agent_pos[1], self.agent_dir)
if next_state in self.goal_states:
info['task_complete'] = True
done = True
reward = 1.0
if self.step_count >= self.max_steps:
done = True
obs = self.gen_obs()
return obs, reward, done, info
def get_num_states(self):
return self.width * self.height * 4 # position in the gridworld and also facing direction |
from __future__ import division # import this module if you do ANY mathematics
import sys # this module does this ...
import csv # saving RAC_samples as csv file not sure how to determine where it saves yet
# the csv module might not be necessary
username = 'lisalocey'
mydir = '/Users/'+ username + '/Desktop/SADModels/'
sys.path.append(mydir)
import tools
sys.path.append(mydir + 'Models/')
import BrokenStick, RandFrac
""" Code in this file will be used to examine SADs of different models. Right
now it's a bit of a shambles, largely because Ken is messing with stuff.
But that's the way the cookie self-organizes into a sentient being. """
RAC_samples = simple_random_fraction(N, S, sample_size)
RAC_mean = [sum(x)/len(x) for x in itertools.izip(*RAC_samples)] #find mean for the lists in lists
sample_RAC = [RAC_samples]
print sample_RAC
out=open('sample_RAC.csv','wb')
output=csv.writer(out)
for row in sample_RAC:
output.writerow(row)
out.close()
RAC_hist = plt.hist(RAC_mean, bins=1000) #attempt to plot as histogram (not coming out right)
plt.title("RAC_Mean")
plt.show(RAC_hist)
RAC_plot = plt.plot(RAC_mean) # plots RAC mean
plt.ylabel('RAC')
plt.show()
print RAC_samples
print RAC_mean
N = 100
S = 4
size = 10
# 1. import some data from CBC files
# 2. get all N, S combinations
# 3. get expected RAC for each
algorithm = 'simple random fraction'
expected_RAC = RandFrac.get_expectedRAC(N, S, size, algorithm )
figure = plt.figure(1,2,1)
RandFrac.get_heat(fig)
print expected_RAC
rand_sample = RandFrac.get_sample(...)
# below are function and such to do something with rand_sample...plot, compare,
# etc.
|
import cv2
import argparse
import time
import os
import Update_Model
import glob
import random
import eel
#import winsound
frequency=2500
duration=1000
eel.init('WD_INNOVATIVE')
emotions=["angry", "happy", "sad", "neutral"]
fishface = cv2.face.FisherFaceRecognizer_create()
font = cv2.FONT_HERSHEY_SIMPLEX
'''try:
fishface.load("model.xml")
except:
print("No trained model found... --update will create one.")'''
parser=argparse.ArgumentParser(description="Options for emotions based music player(Updating the model)")
parser.add_argument("--update", help="Call for taking new images and retraining the model.", action="store_true")
args=parser.parse_args()
facedict={}
video_capture=cv2.VideoCapture(0)
facecascade=cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
def crop(clahe_image, face):
for (x, y, w, h) in face:
faceslice=clahe_image[y:y+h, x:x+w]
faceslice=cv2.resize(faceslice, (350, 350))
facedict["face%s" %(len(facedict)+1)]=faceslice
return faceslice
def grab_face():
ret, frame=video_capture.read()
#cv2.imshow("Video", frame)
cv2.imwrite('test.jpg', frame)
cv2.imwrite("images/main%s.jpg" %count, frame)
gray=cv2.imread('test.jpg',0)
#gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
clahe=cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
clahe_image=clahe.apply(gray)
return clahe_image
def detect_face():
clahe_image=grab_face()
face=facecascade.detectMultiScale(clahe_image, scaleFactor=1.1, minNeighbors=15, minSize=(10, 10), flags=cv2.CASCADE_SCALE_IMAGE)
if len(face)>=1:
faceslice=crop(clahe_image, face)
#return faceslice
else:
print("No/Multiple faces detected!!, passing over the frame")
def save_face(emotion):
print("\n\nLook "+emotion+" untill the timer expires and keep the same emotion for some time.")
#winsound.Beep(frequency, duration)
print('\a')
for i in range(0, 5):
print(5-i)
time.sleep(1)
while len(facedict.keys())<16:
detect_face()
for i in facedict.keys():
path, dirs, files = next(os.walk("dataset/%s" %emotion))
file_count = len(files)+1
cv2.imwrite("dataset/%s/%s.jpg" %(emotion, (file_count)), facedict[i])
facedict.clear()
def update_model(emotions):
print("Update mode for model is ready")
checkForFolders(emotions)
for i in range(0, len(emotions)):
save_face(emotions[i])
print("Collected the images, looking nice! Now updating the model...")
Update_Model.update(emotions)
print("Model train successful!!")
def checkForFolders(emotions):
for emotion in emotions:
if os.path.exists("dataset/%s" %emotion):
pass
else:
os.makedirs("dataset/%s" %emotion)
def identify_emotions():
prediction=[]
confidence=[]
for i in facedict.keys():
pred, conf=fishface.predict(facedict[i])
cv2.imwrite("images/%s.jpg" %i, facedict[i])
prediction.append(pred)
confidence.append(conf)
output=emotions[max(set(prediction), key=prediction.count)]
print("You seem to be %s" %output)
facedict.clear()
return output;
#songlist=[]
#songlist=sorted(glob.glob("songs/%s/*" %output))
#random.shuffle(songlist)
#os.startfile(songlist[0])
count=0
@eel.expose
def getEmotion():
count=0
while True:
count=count+1
detect_face()
if args.update:
update_model(emotions)
break
elif count==10:
fishface.read("model.xml")
return identify_emotions()
break
#eel.start('main.html', options=web_app_options)
#options={'host':'file', 'port': '//'}
eel.start('main.html')#//WD_INNOVATIVE//main.html')
#, options)
|
# Copyright 2015 Novo Nordisk Foundation Center for Biosustainability, DTU.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
from cobra.manipulation.delete import find_gene_knockout_reactions
__all__ = ["ReactionKnockoutProfiler", "GeneKnockoutProfiler"]
class KnockoutProfiler(object):
def __init__(self, model=None, simulation_method=None):
self._model = model
self._simulation_method = simulation_method
def profile(self, elements, *args, **kwargs):
return {e.id: self.simulate_knockout(e, *args, **kwargs) for e in elements}
def simulate_knockout(self, to_knockout, *args, **kwargs):
raise NotImplementedError
class ReactionKnockoutProfiler(KnockoutProfiler):
def __init__(self, *args, **kwargs):
super(ReactionKnockoutProfiler, self).__init__(*args, **kwargs)
def simulate_knockout(self, to_knockout, *args, **kwargs):
cache = {
"variables": {},
"constraints": {},
"first_run": True
}
with self._model:
to_knockout.knock_out()
return self._simulation_method(self._model,
volatile=False,
cache=cache,
*args,
**kwargs)[cache['original_objective']]
class GeneKnockoutProfiler(KnockoutProfiler):
def __init__(self, *args, **kwargs):
super(GeneKnockoutProfiler, self).__init__(*args, **kwargs)
def simulate_knockout(self, to_knockout, *args, **kwargs):
reactions = find_gene_knockout_reactions(self._model, [to_knockout])
cache = {
"variables": {},
"constraints": {},
"first_run": True
}
with self._model:
for reaction in reactions:
reaction.knock_out()
return self._simulation_method(self._model,
volatile=False,
cache=cache,
*args,
**kwargs)[cache['original_objective']]
|
# -*- coding: utf-8 -*-
from mock import Mock
import socket
import pytest
from huskar_sdk_v2.http.components.config import Config
@pytest.fixture
def config_component(requests_mock, no_cache_client):
return Config('arch.test', 'overall')
def test_config_should_success_for_app_id_clueter(
config_component, requests_mock):
assert config_component.get('test_config') == 'test_value'
def test_config_watch_decorator(
config_component, requests_mock, no_cache_client,
wait_huskar_api_ioloop_connected):
wait_huskar_api_ioloop_connected(3.0)
handler = Mock()
handler_2 = Mock()
config_component.watch("test_config", handler)
config_component.on_change("test_config")(handler_2)
requests_mock.set_result_file('test_data_changed.txt')
assert requests_mock.wait_processed()
assert config_component.get('test_config') == 'new_value'
handler.assert_called_once_with(u'new_value')
handler_2.assert_called_once_with(u'new_value')
assert handler.call_count == 1
assert handler_2.call_count == 1
def test_config_exists_and_not_exists(
config_component, requests_mock, no_cache_client,
wait_huskar_api_ioloop_connected):
wait_huskar_api_ioloop_connected(3.0)
assert config_component.exists('test_config')
assert not config_component.exists('something_that_shouldnt_exists')
def test_should_get_global_config_if_local_not_exists(
requests_mock, no_cache_client, wait_huskar_api_ioloop_connected):
config = Config('arch.test', 'some-cluster-not-exists')
wait_huskar_api_ioloop_connected(3.0)
assert config.get('test_config') == 'test_value'
def test_should_get_new_value_if_cluster_config_is_added(
requests_mock, no_cache_client, wait_huskar_api_ioloop_connected):
config = Config('arch.test', 'some-cluster-not-exists')
wait_huskar_api_ioloop_connected(3.0)
requests_mock.add_response(
'{"body": {"config": {"arch.test": {"some-cluster-not-exists": '
'{"test_config": {"value": "new_value"}}}}}, "message": "update"}'
)
assert requests_mock.wait_processed()
assert config.get('test_config') == 'new_value'
assert config.get('test_config', _force_overall=True) == 'test_value'
def test_should_get_overall_value_if_cluster_config_is_deleted(
requests_mock, no_cache_client, wait_huskar_api_ioloop_connected):
config = Config('arch.test', 'some-cluster-not-exists')
wait_huskar_api_ioloop_connected(3.0)
requests_mock.add_response(
'{"body": {"config": {"arch.test": {"some-cluster-not-exists":'
' {"test_config": {"value": "new_value"}}}}}, "message": "update"}'
)
assert requests_mock.wait_processed()
assert config.get('test_config') == 'new_value'
mock_handler = Mock()
config.on_change("test_config")(mock_handler)
requests_mock.add_response(
'{"body": {"config": {"arch.test": {"some-cluster-not-exists": '
'{"test_config": {"value": null}}}}}, "message": "delete"}'
)
assert requests_mock.wait_processed()
assert config.get('test_config') == 'test_value'
mock_handler.assert_called_once_with(u'test_value')
def test_listener_called_with_config_in_overall_cluster_if_not_exists(
requests_mock, no_cache_client, wait_huskar_api_ioloop_connected):
config = Config('arch.test', 'some-cluster-not-exists')
wait_huskar_api_ioloop_connected(3.0)
handler = Mock()
config.watch("test_config", handler)
requests_mock.set_result_file('test_data_changed.txt')
assert requests_mock.wait_processed()
assert config.get('test_config') == 'new_value'
assert config.exists('test_config')
handler.assert_called_once_with(u'new_value')
assert handler.call_count == 1
requests_mock.add_response(
'{"body": {"config": {"arch.test": {"some-cluster-not-exists": '
'{"test_config": {"value": "new_value_2"}}}}}, "message": "update"}'
)
assert requests_mock.wait_processed()
assert config.get('test_config') == 'new_value_2'
assert config.exists('test_config')
handler.assert_any_call(u'new_value_2')
assert handler.call_count == 2
requests_mock.add_response(
'{"body": {"config": {"arch.test": {"overall": {"test_config": '
'{"value": "new_value_2"}}}}}, "message": "update"}'
)
assert handler.call_count == 2
def test_should_get_dict_if_config_is_dict(
requests_mock, client, config_component,
wait_huskar_api_ioloop_connected):
client.run()
wait_huskar_api_ioloop_connected(10)
requests_mock.add_response(
r'{"body": {"config": {"arch.test": {"overall": {"test_config":'
r' {"value": "{\"config\": 2}"}}}}}, "message": "update"}'
)
requests_mock.wait_processed()
assert config_component.get('test_config')['config'] == 2
def test_should_get_correct_config_from_cache(requests_mock, started_client,
config_component, cache_dir):
import requests
from huskar_sdk_v2.http.ioloops.http import HuskarApiIOLoop
assert started_client.connected.wait(1)
requests_mock.add_response(
r'{"body": {"config": {"arch.test": {"overall": {"test_json":'
r' {"value": "\"abcdefg\""}}}}}, "message": "update"}'
)
assert requests_mock.wait_processed()
assert started_client.watched_configs.get(
'arch.test', 'overall',
'test_json', raises=True) == {'value': 'abcdefg'}
assert started_client.stop(3)
requests_mock.stop_exception = requests.Timeout
client = HuskarApiIOLoop('test_url', 'test_token', cache_dir=cache_dir)
client.install()
client.run()
assert client.watched_configs.get(
'arch.test', 'overall',
'test_json', raises=True) == {'value': 'abcdefg'}
def test_iteritems_in_config(requests_mock, started_client):
config = Config('arch.test', 'a_cluster')
assert started_client.connected.wait(1)
requests_mock.add_response(
r'{"body": {"config": {"arch.test": {"a_cluster": {"test_json":'
r' {"value": "\"abcdefg\""}}}}}, "message": "update"}'
)
requests_mock.add_response(
r'{"body": {"config": {"arch.test": {"a_cluster": {"test_config":'
r' {"value": "\"12345\""}}}}}, "message": "update"}'
)
assert requests_mock.wait_processed()
items = list(config.iteritems())
assert sorted(items) == sorted(list({
'test_json': "abcdefg",
'test_config': "12345",
}.items()))
def test_critical_component_should_raise_if_client_not_connected(requests_mock,
client):
config = Config('arch.test', 'a_cluster')
config.set_critical()
requests_mock.wait_time = 11
client.run()
with pytest.raises(RuntimeError):
config.get("test")
def test_non_critical_component_should_ignore_if_client_not_connected(
requests_mock, client):
config = Config('arch.test', 'a_cluster')
requests_mock.wait_time = 10
client.run()
assert config.get("test") is None
def test_critical_component_should_still_work_if_cache_is_found(requests_mock,
client,
sleep_ops):
config = Config('arch.test', 'a_cluster')
config_component = config.client
config.set_critical()
client.run()
config.get("test")
assert not config_component.fail_mode
sleep_ops.set_constant_sleep_time(0.1)
requests_mock.stop_exception = socket.timeout
assert client.is_disconnected.wait(3)
assert not client.is_connected()
config.get("test")
assert not config_component.fail_mode
def test_critical_component_should_recover_if_connection_reestablished(
requests_mock, client, sleep_ops):
config = Config('arch.test', 'a_cluster')
config.set_critical()
client.run()
config_component = config.client
config.get("test")
assert not config_component.fail_mode
sleep_ops.set_constant_sleep_time(0.1)
requests_mock.stop_exception = socket.timeout
assert client.is_disconnected.wait(3)
assert not client.is_connected()
config.get("test")
assert not config_component.fail_mode
requests_mock.stop_exception = None
assert client.connected.wait(3)
config.get("test")
assert not config_component.fail_mode
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest
from mkldnn_op_test import format_reorder
class TestReQuantizeOp(OpTest):
def setUp(self):
self.op_type = 'requantize'
self.scale_in = 2.0
self.scale_out = 1.5
self.input_size = [1, 1, 10, 10]
self.data_type = 'int8'
self.set_scale()
self.set_data_type()
self.prepare_inputs()
def prepare_inputs(self):
scale_shift = self.scale_out / self.scale_in
if self.data_type == 'int8':
self.input = (np.random.randint(0, 100, self.input_size) - 50
).astype(self.data_type)
output_tmp = np.round(self.input.astype('float32') *
scale_shift).astype('int8')
else:
self.input = (np.random.randint(
0, 100, self.input_size)).astype(self.data_type)
output_tmp = np.round(self.input.astype('float32') *
scale_shift).astype('uint8')
self.output = format_reorder(output_tmp, self.input_size)
self.inputs = {'Input': OpTest.np_dtype_to_fluid_dtype(self.input)}
self.outputs = {'Output': self.output}
self.attrs = {'Scale_in': self.scale_in, 'Scale_out': self.scale_out}
def test_check_output(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_output(check_dygraph=False)
def set_scale(self):
pass
def set_data_type(OpTest):
pass
#--------------------test requantize with s8 input--------------------
class TestReQuantizeOp1(TestReQuantizeOp):
def set_scale(self):
self.scale_in = 1.5
self.scale_out = 1.5
class TestReQuantizeOp2(TestReQuantizeOp):
def set_scale(self):
self.scale_in = 0.1
self.scale_out = 0.2
#--------------------test requantize with u8 input--------------------
class TestReQuantizeOp3(TestReQuantizeOp1):
def set_data_type(self):
self.data_type = 'uint8'
class TestReQuantizeOp4(TestReQuantizeOp2):
def set_data_type(self):
self.data_type = 'uint8'
#-------------------test reused requantize op---------------------------
class TestReQuantizeOpReused(TestReQuantizeOp):
def setUp(self):
self.input_size = [1, 1, 10, 10]
self.data_type = 'int8'
self.set_scale()
self.prepare_inputs()
def set_scale(self):
self.scale_in = 0.1
self.scale_out = 0.2
def test_check_output(self):
variables = {
"input": self.input,
"output": self.output,
}
program = fluid.Program()
with fluid.program_guard(program):
block = program.global_block()
for name in variables:
block.create_var(
name=name, dtype="int8", shape=variables[name].shape)
requant_op = block.append_op(
type="requantize",
inputs={'Input': block.var('input'), },
outputs={"Output": block.var('output')},
attrs={'Scale_in': self.scale_in,
'Scale_out': self.scale_out})
place = core.CPUPlace()
exe = fluid.Executor(place)
for i in range(2):
out = exe.run(program,
feed={'input': variables['input']},
fetch_list=['output'])
self.assertTrue(
np.allclose(
variables['output'], out[0], atol=1e-4), 'output')
if __name__ == '__main__':
unittest.main()
|
"""
test_reddit.py: Application unit tests for the Reddit Category.
__author__ = "Fernando P. Lopes"
__email__ = "fpedrosa@gmail.com"
"""
import unittest
import news_source_reddit as nsr
class RedditTests(unittest.TestCase):
def test_ask(self):
news, title = nsr.get_most_read('re_ask')
self.assertEqual(len(news), 5)
if __name__ == '__main__':
unittest.main()
|
from typing import List
from utils import get_characters
from beans.comment_bridge import CommentBridge
from beans.comment import Comment
from collections import Counter
import anim
def render_comment_list(comment_list: List[Comment], output_filename = 'hello.mp4', music_code = 'PWR'):
counter = Counter()
thread = []
for comment in comment_list:
counter.update({comment.effective_user_id: 1})
characters = get_characters(counter)
for comment in comment_list:
comment.character = characters[comment.effective_user_id]
thread.append(CommentBridge(comment))
if (output_filename[-4:] != '.mp4'):
output_filename += '.mp4'
return anim.comments_to_scene(thread, name_music = music_code, output_filename=output_filename)
|
""" Python GTH16 hash - part of pyGT https://github.com/sybip/pyGT """
def gtAlgoH16(str):
""" Proprietary hash based on the Park-Miller LCG """
seed = 0xaa
mult = 48271
incr = 1
modulus = (1 << 31) - 1 # 0x7FFFFFFF
h = 0
x = seed
for c in bytearray(str):
x = (((x + c) * mult + incr) & 0xFFFFFFFF) % modulus
h = h ^ x
# Derive 16-bit value from 32-bit hash by XORing its two halves
r = ((h & 0xFFFF0000) >> 16) ^ (h & 0xFFFF)
return r
|
import time
import DFL168A
SuccessFresh=False
def refresh():
global SuccessFresh
if not DFL168A.DigitalCommand('FEE5'):
SuccessFresh=False
return False
Temp=DFL168A.HandleResponse(DFL168A.ReturnStr)
DFL168A.ReturnStr=Temp
SuccessFresh=True
return True
def getTotalEngineHours():
global SuccessFresh
if not SuccessFresh:
return False,0.0
temp=DFL168A.ReturnStr[6:8]+DFL168A.ReturnStr[4:6]+DFL168A.ReturnStr[2:4]+DFL168A.ReturnStr[0:2]
temp=int(temp,16)
if temp>0xfaffffff:
return False,0.0
TotalEngineHours=temp*0.05
return True, TotalEngineHours
def getTotalEngineRevolutions():
global SuccessFresh
if not SuccessFresh:
return False,0.0
temp=DFL168A.ReturnStr[14:16]+DFL168A.ReturnStr[12:14]+DFL168A.ReturnStr[10:12]+DFL168A.ReturnStr[8:10]
temp=int(temp,16)
if temp>0xfaffffff:
return False,0.0
EngineRevolutions=temp*1000.0
return True, EngineRevolutions |
import requests
import json
import pandas
def create_project(url, super_token):
record = [{
'project_title': 'ham10000',
'purpose': '0',
'purpose_other': '',
'project_notes': ''}]
data = json.dumps(record)
fields = {
'token': super_token,
'content': 'project',
'format': 'json',
'data': data,
}
r = requests.post(url, data=fields)
print(r.status_code)
print(r.text)
return r.text
def add_fields_to_project(url, token):
dictionary = [{"field_name":"record_id","form_name":"ham","section_header":"","field_type":"text","field_label":"record_id","select_choices_or_calculations":"","field_note":"","text_validation_type_or_show_slider_number":"","text_validation_min":"","text_validation_max":"","identifier":"y","branching_logic":"","required_field":"y","custom_alignment":"","question_number":"","matrix_group_name":"","matrix_ranking":"","field_annotation":""},
{"field_name":"image_id","form_name":"ham","section_header":"","field_type":"text","field_label":"image_id","select_choices_or_calculations":"","field_note":"","text_validation_type_or_show_slider_number":"","text_validation_min":"","text_validation_max":"","identifier":"","branching_logic":"","required_field":"","custom_alignment":"","question_number":"","matrix_group_name":"","matrix_ranking":"","field_annotation":""},
{"field_name":"lesion_id","form_name":"ham","section_header":"","field_type":"text","field_label":"lesion_id","select_choices_or_calculations":"","field_note":"","text_validation_type_or_show_slider_number":"","text_validation_min":"","text_validation_max":"","identifier":"","branching_logic":"","required_field":"","custom_alignment":"","question_number":"","matrix_group_name":"","matrix_ranking":"","field_annotation":""},
{"field_name":"dx","form_name":"ham","section_header":"","field_type":"text","field_label":"dx","select_choices_or_calculations":"","field_note":"","text_validation_type_or_show_slider_number":"","text_validation_min":"","text_validation_max":"","identifier":"","branching_logic":"","required_field":"","custom_alignment":"","question_number":"","matrix_group_name":"","matrix_ranking":"","field_annotation":""},
{"field_name":"dx_type","form_name":"ham","section_header":"","field_type":"text","field_label":"dx_type","select_choices_or_calculations":"","field_note":"","text_validation_type_or_show_slider_number":"","text_validation_min":"","text_validation_max":"","identifier":"","branching_logic":"","required_field":"","custom_alignment":"","question_number":"","matrix_group_name":"","matrix_ranking":"","field_annotation":""},
{"field_name":"age","form_name":"ham","section_header":"","field_type":"text","field_label":"age","select_choices_or_calculations":"","field_note":"","text_validation_type_or_show_slider_number":"","text_validation_min":"","text_validation_max":"","identifier":"","branching_logic":"","required_field":"","custom_alignment":"","question_number":"","matrix_group_name":"","matrix_ranking":"","field_annotation":""},
{"field_name":"sex","form_name":"ham","section_header":"","field_type":"text","field_label":"sex","select_choices_or_calculations":"","field_note":"","text_validation_type_or_show_slider_number":"","text_validation_min":"","text_validation_max":"","identifier":"","branching_logic":"","required_field":"","custom_alignment":"","question_number":"","matrix_group_name":"","matrix_ranking":"","field_annotation":""},
{"field_name":"localization","form_name":"ham","section_header":"","field_type":"text","field_label":"localization","select_choices_or_calculations":"","field_note":"","text_validation_type_or_show_slider_number":"","text_validation_min":"","text_validation_max":"","identifier":"","branching_logic":"","required_field":"","custom_alignment":"","question_number":"","matrix_group_name":"","matrix_ranking":"","field_annotation":""}]
data = json.dumps(dictionary)
metadata = {
'token': token,
'content': 'metadata',
'format': 'json',
'data': data,
'returnFormat': 'json',
}
r = requests.post(url, data=metadata)
print('HTTP Status: ' + str(r.status_code))
print(r.text)
def add_data_to_project(url, token):
data = pandas.read_csv("~/ham10000/HAM10000_metadata.csv")
records = []
for index, row in data.iterrows():
record = {
"record_id": str(row['record_id']),
"image_id": row['image_id'],
"lesion_id": row['lesion_id'],
"dx": row['dx'],
"dx_type": row['dx_type'],
"age": str(row['age']),
"sex": row['sex'],
"localization": row['localization'],
}
records.append(record)
data = json.dumps(records)
fields = {
'token': token,
'content': 'record',
'format': 'json',
'type': 'flat',
'data': data,
}
r = requests.post(url, data=fields)
print('HTTP Status: ' + str(r.status_code))
print(r.text)
if __name__ == "__main__":
url = 'http://localhost/redcap/api/'
super_token = "FEJRQVHQ3993BYQ50KMXZ0XFQH17V3X5P5STELNZ2DE243EUKJDY4T2O12GZ5555"
token = project_token = create_project(url, super_token)
add_fields_to_project(url, token)
add_data_to_project(url, token)
with open("token", "w") as f:
f.write(token)
|
############## Reverse linked List in place ###############
""" Iterate on the linked list keeping track of the latest value """
import dataclasses
from typing import Optional, Any
@dataclasses.dataclass
class Node:
value: Any
next: Optional['Node']
def revert(root: Node) -> Optional[Node]:
curr: Optional[Node] = root
last: Optional[Node] = None
while curr:
next: Optional[Node] = curr.next
curr.next = last
last = curr
curr = next
return last
def merge(list_a: Optional[Node], list_b: Optional[Node]) -> Optional[Node]:
""" Remember to use an empty Node as a starting point """
""" Merge two sorted lists into a resulting sorted list """
result_root: Optional[Node] = Node(None, None)
result_pointer: Optional[Node] = result_root
while list_a and list_b:
if list_a.value < list_b.value:
result_pointer.next = list_a
list_a = list_a.next
else:
result_pointer.next = list_b
list_b = list_b.next
result_pointer = result_pointer.next
# Get any remaining items into the result list
result_pointer.next = list_a if list_a else list_b
return result_root.next
if __name__ == '__main__':
linked_list = Node(1, Node(2, Node(3, Node(4, Node(5, None)))))
print(revert(linked_list))
import pdb;pdb.set_trace()
|
# Copyright 2020 Canonical, Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nodl.types
import pytest
@pytest.fixture
def topic_publisher():
return nodl.types.Topic(name='foo', message_type='bar', role=nodl.types.PubSubRole.PUBLISHER)
def test_action():
action_server = nodl.types.Action(
name='foo', action_type='bar', role=nodl.types.ServerClientRole.SERVER
)
assert action_server.name == 'foo'
assert action_server.type == 'bar'
assert action_server.role == nodl.types.ServerClientRole.SERVER
def test_parameter():
parameter = nodl.types.Parameter(name='foo', parameter_type='bar')
assert parameter.name == 'foo'
assert parameter.type == 'bar'
def test_service():
service = nodl.types.Service(
name='foo', service_type='bar', role=nodl.types.ServerClientRole.CLIENT
)
assert service.name == 'foo'
assert service.type == 'bar'
assert service.role == nodl.types.ServerClientRole.CLIENT
service = nodl.types.Service(
name='foo', service_type='bar', role=nodl.types.ServerClientRole.SERVER,
)
assert service.role == nodl.types.ServerClientRole.SERVER
def test_topic():
topic = nodl.types.Topic(name='foo', message_type='bar', role=nodl.types.PubSubRole.PUBLISHER,)
assert topic.name == 'foo'
assert topic.type == 'bar'
assert topic.role == nodl.types.PubSubRole.PUBLISHER
topic = nodl.types.Topic(
name='foo', message_type='bar', role=nodl.types.PubSubRole.SUBSCRIPTION,
)
assert topic.role == nodl.types.PubSubRole.SUBSCRIPTION
def test_representations(topic_publisher):
assert 'foo' in repr(topic_publisher) and 'bar' in repr(topic_publisher)
assert 'foo' in str(topic_publisher) and 'bar' in str(topic_publisher)
def test_equality(topic_publisher):
also_topic_publisher = nodl.types.Topic(
name='foo', message_type='bar', role=nodl.types.PubSubRole.PUBLISHER,
)
assert also_topic_publisher == topic_publisher
not_same_topic_publisher = nodl.types.Topic(
name='bar', message_type='bar', role=nodl.types.PubSubRole.PUBLISHER,
)
assert not_same_topic_publisher != topic_publisher
# Test different roles cause inequality
assert nodl.types.Action(
name='foo', action_type='bar', role=nodl.types.ServerClientRole.CLIENT
) != nodl.types.Action(name='foo', action_type='bar', role=nodl.types.ServerClientRole.BOTH)
def test_same_name_different_interface_type():
topic = nodl.types.Topic(name='foo', message_type='bar', role=nodl.types.PubSubRole.PUBLISHER,)
service = nodl.types.Service(
name='foo', service_type='bar', role=nodl.types.ServerClientRole.SERVER
)
assert topic != service
def test_node(topic_publisher):
service = nodl.types.Service(
name='baz', service_type='woo', role=nodl.types.ServerClientRole.SERVER
)
node = nodl.types.Node(
name='test', executable='toast', topics=[topic_publisher], services=[service]
)
assert node.name == 'test'
assert node.executable == 'toast'
assert node.topics[topic_publisher.name] == topic_publisher
assert node.services[service.name] == service
|
# Copyright 2017 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
import full_activity_dump
from util import makePpaAndPalRecordsConsistent
from reference_models.pre_iap_filtering import zone_purge
from reference_models.pre_iap_filtering import pre_iap_util
TEST_DIR = os.path.join(os.path.dirname(__file__), 'testdata')
class TestZonePurge(unittest.TestCase):
def test_zone_PurgeModel_default(self):
# TEST DATA
cbsd_0 = json.load(
open(os.path.join(TEST_DIR, 'testset1', 'cbsd_0.json')))
cbsd_1 = json.load(
open(os.path.join(TEST_DIR, 'testset1', 'cbsd_1.json')))
cbsd_2 = json.load(
open(os.path.join(TEST_DIR, 'testset1', 'cbsd_2.json')))
cbsd_3 = json.load(
open(os.path.join(TEST_DIR, 'testset1', 'cbsd_3.json')))
cbsd_4 = json.load(
open(os.path.join(TEST_DIR, 'testset1', 'cbsd_4.json')))
cbsd_5 = json.load(
open(os.path.join(TEST_DIR, 'testset1', 'cbsd_5.json')))
# Load PPA record
gwpz_record = json.load(
open(os.path.join(TEST_DIR, 'testset1', 'gwpz_0.json')))
ppa_record = json.load(
open(os.path.join(TEST_DIR, 'testset1', 'ppa_0.json')))
pal_record_0 = json.load(
open(os.path.join(TEST_DIR, 'testset1', 'pal_0.json')))
pal_record_1 = json.load(
open(os.path.join(TEST_DIR, 'testset1', 'pal_1.json')))
pal_record_2 = json.load(
open(os.path.join(TEST_DIR, 'testset1', 'pal_2.json')))
pal_record_list = [pal_record_0, pal_record_1, pal_record_2]
fss_record = json.load(
open(os.path.join(TEST_DIR, 'testset1', 'fss_0.json')))
gwbl_record = json.load(
open(os.path.join(TEST_DIR, 'testset1', 'gwbl_0.json')))
pal_low_frequency = pal_record_0['channelAssignment']['primaryAssignment']['lowFrequency']
pal_high_frequency = pal_record_0['channelAssignment']['primaryAssignment']['highFrequency']
ppa_record, pal_records = makePpaAndPalRecordsConsistent(
ppa_record, pal_record_list,
pal_low_frequency,
pal_high_frequency,
'test_user_1')
ppa_record['ppaInfo']['cbsdReferenceId'] = ["cbsd_5", "cbsd_3"]
fad_object_1 = full_activity_dump.FullActivityDump({'cbsd': [cbsd_0, cbsd_1]})
fad_object_2 = full_activity_dump.FullActivityDump({'cbsd': [cbsd_2, cbsd_3]})
fad_object_3 = full_activity_dump.FullActivityDump({'cbsd': [cbsd_4, cbsd_5]})
sas_uut_fad = fad_object_1
sas_test_harness_fads = [fad_object_2, fad_object_3]
protected_entities = {'gwpzRecords': [gwpz_record],'palRecords': pal_records,
'ppaRecords': [ppa_record], 'fssRecords':[fss_record],
'gwblRecords':[gwbl_record]}
print "================CBSD Grants passed as input======================"
initial_grants = 0
for records in sas_uut_fad.getCbsdRecords():
for grants in records['grants']:
print " ",json.dumps(grants['id'])
initial_grants = initial_grants + 1
for fad in sas_test_harness_fads:
for rec in fad.getCbsdRecords():
for grants in rec['grants']:
print " ",json.dumps(grants['id'])
initial_grants = initial_grants + 1
print "===================================================================="
zone_purge.zonePurgeReferenceModel(sas_uut_fad, sas_test_harness_fads,\
protected_entities['ppaRecords'], protected_entities['palRecords'],\
protected_entities['gwpzRecords'], protected_entities['fssRecords'])
print "================CBSD Grants received as output======================"
final_grants = 0
for records in sas_uut_fad.getCbsdRecords():
for grants in records['grants']:
print " ",json.dumps(grants['id'])
final_grants = final_grants + 1
for fad in sas_test_harness_fads:
for rec in fad.getCbsdRecords():
for grants in rec['grants']:
print " ",json.dumps(grants['id'])
final_grants = final_grants + 1
print "===================================================================="
self.assertLess(final_grants, initial_grants)
# TODO: convert this in to proper unit test with assert
if __name__ == '__main__':
unittest.main()
|
import frappe
from frappe import _, throw, msgprint
from frappe.utils import cint
from frappe.utils import now_datetime
from frappe.model.naming import getseries
from erpnext.setup.doctype.naming_series.naming_series import NamingSeries
class ENCINamingSeries(NamingSeries):
def validate_series_name(self, n):
import re
if not re.match("^[\w\- /.#{}\[\]]*$", n, re.UNICODE):
throw(_('Special Characters except "-", "#", ".", "/", "{", "}", "[" and "]" not allowed in naming series'))
@frappe.whitelist()
def get_current(self, arg=None):
"""get series current"""
if self.prefix:
prefix = self.parse_naming_series()
self.current_value = frappe.db.get_value("Series",
prefix, "current", order_by = "name")
@frappe.whitelist()
def update_series_start(self):
if self.prefix:
prefix = self.parse_naming_series()
self.insert_series(prefix)
frappe.db.sql("update `tabSeries` set current = %s where name = %s",
(cint(self.current_value), prefix))
msgprint(_("Series Updated Successfully"))
else:
msgprint(_("Please select prefix first"))
def parse_naming_series(self):
parts = self.prefix.split('.')
# Remove ### from the end of series
if parts[-1] == "#" * len(parts[-1]):
del parts[-1]
prefix = parse_naming_series(parts)
return prefix
def set_name_by_naming_series(doc):
"""Sets name by the `naming_series` property"""
if not doc.naming_series:
doc.naming_series = get_default_naming_series(doc.doctype)
if not doc.naming_series:
frappe.throw(frappe._("Naming Series mandatory"))
doc.name = make_autoname(doc.naming_series+".#####", "", doc)
def make_autoname(key="", doctype="", doc=""):
"""
Creates an autoname from the given key:
**Autoname rules:**
* The key is separated by '.'
* '####' represents a series. The string before this part becomes the prefix:
Example: ABC.#### creates a series ABC0001, ABC0002 etc
* 'MM' represents the current month
* 'YY' and 'YYYY' represent the current year
*Example:*
* DE/./.YY./.MM./.##### will create a series like
DE/09/01/0001 where 09 is the year, 01 is the month and 0001 is the series
"""
if key == "hash":
return frappe.generate_hash(doctype, 10)
if "#" not in key:
key = key + ".#####"
elif "." not in key:
error_message = _("Invalid naming series (. missing)")
if doctype:
error_message = _("Invalid naming series (. missing) for {0}").format(doctype)
frappe.throw(error_message)
parts = key.split('.')
n = parse_naming_series(parts, doctype, doc)
return n
def parse_naming_series(parts, doctype='', doc=''):
n = ''
if isinstance(parts, str):
parts = parts.split('.')
series_set = False
hash_ = False
today = now_datetime()
for e in parts:
part = ''
if e.startswith('#'):
if not series_set:
if "[hash]" in e.lower():
e = e.split("[")[0]
hash_ = True
digits = len(e)
part = getseries(n, digits)
if hash_:
part = frappe.generate_hash(doctype + part, digits).upper()
series_set = True
elif e == 'YY':
part = today.strftime('%y')
elif e == 'MM':
part = today.strftime('%m')
elif e == 'DD':
part = today.strftime("%d")
elif e == 'YYYY':
part = today.strftime('%Y')
elif e == 'timestamp':
part = str(today)
elif e == 'FY':
part = frappe.defaults.get_user_default("fiscal_year")
elif e.startswith('{') and doc:
e = e.replace('{', '').replace('}', '')
part = doc.get(e)
elif doc and doc.get(e):
part = doc.get(e)
else:
part = e
if isinstance(part, str):
n += part
return n
def get_default_naming_series(doctype):
"""get default value for `naming_series` property"""
naming_series = frappe.get_meta(doctype).get_field("naming_series").options or ""
if naming_series:
naming_series = naming_series.split("\n")
return naming_series[0] or naming_series[1]
else:
return None |
from __future__ import absolute_import
from .client import * # noqa: F401,F403
from .const import * # noqa: F401,F403
from .conversions import * # noqa: F401,F403
from .exceptions import * # noqa: F401,F403
from .planner import * # noqa: F401,F403
from .utils import * # noqa: F401,F403
__all__ = [name for name in dir() if not name.startswith('_')]
|
from .unshorten import unshorten
|
from django.apps import AppConfig
class PikaConfig(AppConfig):
name = 'pika'
|
import serial
import string
import time
import numpy as np
from file_handling import format_filename, export_recorded
from plot import plot_data
windows = 'COM6'
linux = '/dev/ttyACM0'
ser = serial.Serial(windows, 230400, timeout=1)
def read() -> list:
line = str(ser.readline()).replace(' ', '').replace('\\r\\n', '').replace('b\'', '')
return line.split(';')
def write(command) -> None:
ser.write(command.encode())
def read_bytes(amount) -> bytes:
return ser.read(amount)
def main():
channel_names = ['24', '25', '26', '27', '28', '29']
data_channels = list()
input('start recording by pressing any button, stop recording by using keyboard interrupt crtl+c: ')
try:
while True:
data_channels.append(read())
except KeyboardInterrupt:
plot_data([channel_names, np.transpose(np.array(data_channels))])
if __name__ == '__main__':
main()
|
import re
import packet
import json
import qbert
import cluster_manager as cm
from flask import Flask
from flask import jsonify
from flask import request
from flask import Response
app = Flask(__name__)
with open('secrets.json') as f:
SECRETS = json.load(f)
def packet_auth(auth_token, project_id):
pattern = re.compile("([A-z0-9-])")
print(not project_id)
print(not auth_token)
if not auth_token or not project_id or not pattern.match(auth_token) or not pattern.match(project_id):
return False
manager = packet.Manager(auth_token=auth_token)
try:
projects = manager.list_projects()
for project in projects:
if project.id == project_id:
return True
except packet.baseapi.Error:
return False
return False
def get_clusters(project_name, SECRETS):
endpoint = re.search("(?:http.*://)?(?P<host>[^:/ ]+)", SECRETS['OS_AUTH_URL']).group('host')
try:
token, catalog, project_id = qbert.get_token_v3(endpoint, SECRETS['OS_USERNAME'], SECRETS['OS_PASSWORD'],
project_name)
qbert_url = "{0}/{1}".format(qbert.get_service_url('qbert', catalog, SECRETS['OS_REGION_NAME']), project_id)
clusters = qbert.get_request(qbert_url, token, "clusters")
status_code = 200
except:
print("Something went wrong.")
clusters = []
status_code = 200
return clusters, status_code
def get_cluster(project_name, cluster_id, SECRETS):
endpoint = re.search("(?:http.*://)?(?P<host>[^:/ ]+)", SECRETS['OS_AUTH_URL']).group('host')
try:
token, catalog, project_id = qbert.get_token_v3(endpoint, SECRETS['OS_USERNAME'], SECRETS['OS_PASSWORD'],
project_name)
qbert_url = "{0}/{1}".format(qbert.get_service_url('qbert', catalog, SECRETS['OS_REGION_NAME']), project_id)
cluster = qbert.get_request(qbert_url, token, "clusters/{}".format(cluster_id))
status_code = 200
except:
print("Something went wrong.")
cluster = {'error': {'message': "Error: table clusters does not have object {}".format(cluster_id),
'code': 400}}
status_code = 400
if not isinstance(cluster, dict):
print("Something went wrong.")
cluster = {'error': {'message': "Error: table clusters does not have object {}".format(cluster_id),
'code': 400}}
status_code = 400
return cluster, status_code
@app.errorhandler(405)
def method_not_allowed(e):
return jsonify({'error': '405 Method Not Allowed'}), 405
@app.errorhandler(500)
def internal_server_error(e):
return jsonify({'error': '500 Internal Server Error'}), 500
@app.route('/', strict_slashes=False)
def api_versions():
versions = {"tikube": "v0"}
return jsonify(versions)
@app.route('/v0/clusters', strict_slashes=False)
def cluster_error():
return jsonify({"ERROR": "No project id detected in URL /v0/<project_id>/clusters/"}), 403
@app.route('/v0/<project_id>/clusters', methods=['GET', 'POST'], strict_slashes=False)
def clusters(project_id):
auth_token = request.headers.get('X-Auth-Token')
if not packet_auth(auth_token, project_id):
return jsonify({'error': 'Invalid authentication token or project id.'}), 401
if request.method == 'GET':
clusters, status_code = get_clusters(project_id, SECRETS)
return jsonify(clusters), status_code
elif request.method == 'POST':
body = request.get_json()
if body['multi_master']:
SECRETS['MASTER_COUNT'] = 3
else:
SECRETS['MASTER_COUNT'] = 1
SECRETS['AUTH_TOKEN'] = auth_token
SECRETS['PACKET_PROJECT_ID'] = project_id
SECRETS['CLUSTER_NAME'] = body['cluster_name']
SECRETS['FACILITY'] = body['facility']
SECRETS['MASTER_SIZE'] = body['master_plan']
SECRETS['WORKER_SIZE'] = body['worker_plan']
SECRETS['WORKER_COUNT'] = body['worker_count']
create_cluster = cm.do_create_stack(SECRETS)
return jsonify(create_cluster), 200
@app.route('/v0/<project_id>/clusters/<cluster_id>', methods=['GET', 'DELETE'], strict_slashes=False)
def cluster(project_id, cluster_id):
auth_token = request.headers.get('X-Auth-Token')
if not packet_auth(auth_token, project_id):
return jsonify({'error': 'Invalid authentication token or project id.'}), 401
if request.method == 'GET':
clusters, status_code = get_cluster(project_id, cluster_id, SECRETS)
return jsonify(clusters), status_code
elif request.method == 'DELETE':
SECRETS['AUTH_TOKEN'] = auth_token
SECRETS['PACKET_PROJECT_ID'] = project_id
SECRETS['CLUSTER_ID'] = cluster_id
delete_cluster = cm.do_delete_stack(SECRETS)
return jsonify(delete_cluster), 200
else:
return jsonify({"error": "Invalid Method: {}".format(request.method)}), 401
@app.route('/v0/<project_id>/clusters/<cluster_id>/kubeconfig', methods=['POST'], strict_slashes=False)
def kubeconfig(project_id, cluster_id):
auth_token = request.headers.get('X-Auth-Token')
if not packet_auth(auth_token, project_id):
return jsonify({'error': 'Invalid authentication token or project id.'}), 401
if request.method == 'POST':
body = request.get_json()
SECRETS['user_id'] = body['user_id']
SECRETS['AUTH_TOKEN'] = auth_token
SECRETS['PACKET_PROJECT_ID'] = project_id
SECRETS['CLUSTER_ID'] = cluster_id
cluster, status_code = get_cluster(project_id, cluster_id, SECRETS)
json.dumps(cluster)
if status_code != 200:
return jsonify(cluster), status_code
if cluster['status'] != 'ok' or not cluster['lastOk'] or not cluster['lastOp']:
return jsonify({'Not Ready': 'The cluster is not ready to serve a kubeconfig. Check back later'}), 409
kubeconfig = cm.do_get_kubeconfig(SECRETS)
if not isinstance(kubeconfig, dict):
resp = Response(kubeconfig)
resp.headers['Content-Type'] = 'application/octet-stream'
resp.headers['Content-disposition'] = 'attachment; filename={}.yaml'.format(cluster_id)
return resp
else:
return jsonify(kubeconfig), 400
else:
return jsonify({"error": "Invalid Method: {}".format(request.method)}), 401
@app.route('/v0/<project_id>/clusters/<cluster_id>/users', methods=['GET', 'POST'], strict_slashes=False)
def users(project_id, cluster_id):
auth_token = request.headers.get('X-Auth-Token')
if not packet_auth(auth_token, project_id):
return jsonify({'error': 'Invalid authentication token or project id.'}), 401
if request.method == 'GET':
SECRETS['AUTH_TOKEN'] = auth_token
SECRETS['PACKET_PROJECT_ID'] = project_id
SECRETS['CLUSTER_ID'] = cluster_id
get_users = cm.do_get_users(SECRETS)
return jsonify(get_users), 200
if request.method == 'POST':
body = request.get_json()
SECRETS['username'] = body['username']
SECRETS['AUTH_TOKEN'] = auth_token
SECRETS['PACKET_PROJECT_ID'] = project_id
SECRETS['CLUSTER_ID'] = cluster_id
new_user = cm.do_create_user(SECRETS)
if 'Error' in new_user:
return jsonify(new_user), 400
else:
return jsonify(new_user), 200
else:
return jsonify({"error": "Invalid Method: {}".format(request.method)}), 401
@app.route('/v0/<project_id>/clusters/<cluster_id>/users/<user_id>', methods=['GET', 'DELETE'], strict_slashes=False)
def user(project_id, cluster_id, user_id):
auth_token = request.headers.get('X-Auth-Token')
if not packet_auth(auth_token, project_id):
return jsonify({'error': 'Invalid authentication token or project id.'}), 401
if request.method == 'GET':
SECRETS['AUTH_TOKEN'] = auth_token
SECRETS['PACKET_PROJECT_ID'] = project_id
SECRETS['CLUSTER_ID'] = cluster_id
SECRETS['user_id'] = user_id
delete_user = cm.do_get_users(SECRETS, user_id)
return jsonify(delete_user), 200
elif request.method == 'DELETE':
SECRETS['AUTH_TOKEN'] = auth_token
SECRETS['PACKET_PROJECT_ID'] = project_id
SECRETS['CLUSTER_ID'] = cluster_id
SECRETS['user_id'] = user_id
delete_user = cm.do_delete_user(SECRETS)
if 'Error' in delete_user:
return jsonify(delete_user), 400
else:
return jsonify(delete_user), 200
else:
return jsonify({"error": "Invalid Method: {}".format(request.method)}), 401
|
"""
Django settings for FPIDjango project.
----
Unfortunately, the modified import statement of
..
from FPIDjango.private.settings_private import *
is required for the PyCharm Python Console and the PyCharm manage.py to
work properly. It obviously knows better than I do. It cannot pay
attention to some silly environment variable that I set in Preferences
like we were able to do in the run settings.
This will work -- and not reveal our credentials -- as long as the
**private** directory is included in our **.gitignore** file.
----
(Comments autogenerated by Django )
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
from os.path import dirname, join
import psycopg2.extensions
import django_extensions
from FPIDjango.private.settings_private import *
"""
Original import of dummy values:
from .settings_public import *
"""
__author__ = '(Multiple)'
__project__ = "Food-Pantry-Inventory"
__creation_date__ = "04/01/2019"
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SCANS_DIR = join(dirname(BASE_DIR), 'scans')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = MY_SECRET_KEY
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Site info - see https://docs.djangoproject.com/en/3.0/ref/contrib/sites/
SITE_ID = 1
ALLOWED_HOSTS = ['127.0.0.1', 'localhost', DB_HOST]
# Application definition
INSTALLED_APPS = [
# Application app(s)
'fpiweb.apps.FpiwebConfig',
# Django supplied apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
# Third party apps
'bootstrap4',
'django_extensions'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'FPIDjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# 'BACKEND': 'django.template.backends.jinja2.Jinja2',
'DIRS': [
os.path.join(BASE_DIR, 'fpiweb/')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'FPIDjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# },
'default': {
'ENGINE': DB_ENGINE,
'NAME': DB_NAME,
'USER': DB_USER,
'PASSWORD': DB_PSWD,
'HOST': DB_HOST,
'PORT': DB_PORT,
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# URL to allow a user to authenticate herself. This becomes the default
# used whenever Django detects someone trying to do something requiring
# authentication.
LOGIN_URL = 'fpiweb:login'
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
# django-bootstrap4 settings
# Default settings
BOOTSTRAP4 = {
# The complete URL to the Bootstrap CSS file
# Note that a URL can be either a string,
# e.g. "https://stackpath.bootstrapcdn.com/bootstrap/4.1.1/css/bootstrap
# .min.css",
# or a dict like the default value below.
"css_url": {
"href": "https://stackpath.bootstrapcdn.com/bootstrap/4.1.1/css"
"/bootstrap.min.css",
"integrity": "sha384-WskhaSGFgHYWDcbwN70/dfYBj47jz9qbsMId"
"/iRN3ewGhXQFZCSftd1LZCfmhktB",
"crossorigin": "anonymous",
},
# The complete URL to the Bootstrap JavaScript file
"javascript_url": {
"url": "https://stackpath.bootstrapcdn.com/bootstrap/4.1.1/js"
"/bootstrap.min.js",
"integrity": "sha384-smHYKdLADwkXOn1EmN1qk/HfnUcbVRZyYmZ4qpPea6sjB"
"/pTJ0euyQp0Mk8ck+5T",
"crossorigin": "anonymous",
},
# The complete URL to the Bootstrap CSS file (None means no theme)
"theme_url": None,
# The URL to the jQuery JavaScript file (full)
"jquery_url": {"url": "https://code.jquery.com/jquery-3.3.1.min.js",
"integrity": "sha384-tsQFqpEReu7ZLhBV2VZlAu7zcOV+rXbYlF2cqB8txI"
"/8aZajjp4Bqd+V6D5IgvKT",
"crossorigin": "anonymous",
},
# The URL to the jQuery JavaScript file (slim)
"jquery_slim_url": {
"url": "https://code.jquery.com/jquery-3.3.1.slim.min.js",
"integrity":
"sha384-q8i/X+965DzO0rT7abK41JStQIAqVgRVzpbzo5smXKp4YfRvH"
"+8abtTE1Pi6jizo",
"crossorigin": "anonymous",
},
# The URL to the Popper.js JavaScript file (slim)
"popper_url": {
"url": "https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.3/umd"
"/popper.min.js",
"integrity":
"sha384-ZMP7rVo3mIykV+2+9J3UJ46jBk0WLaUAdn689aCwoqbBJiSnjAK"
"/l8WvCWPIPm49",
"crossorigin": "anonymous",
},
# Put JavaScript in the HEAD section of the HTML document (only relevant
# if you use bootstrap4.html)
'javascript_in_head': False,
# Include jQuery with Bootstrap JavaScript False|falsy|slim|full (
# default=False)
# False - means tag bootstrap_javascript use default value - `falsy` and
# does not include jQuery)
'include_jquery': False,
# Label class to use in horizontal forms
'horizontal_label_class': 'col-md-3',
# Field class to use in horizontal forms
'horizontal_field_class': 'col-md-9',
# Set placeholder attributes to label if no placeholder is provided
'set_placeholder': True,
# Class to indicate required (better to set this in your Django form)
'required_css_class': '',
# Class to indicate error (better to set this in your Django form)
'error_css_class': 'is-invalid',
# Class to indicate success, meaning the field has valid input (better
# to set this in your Django form)
'success_css_class': 'is-valid',
# Renderers (only set these if you have studied the source and
# understand the inner workings)
'formset_renderers': {
'default': 'bootstrap4.renderers.FormsetRenderer',
},
'form_renderers': {
'default': 'bootstrap4.renderers.FormRenderer',
},
'field_renderers': {
'default': 'bootstrap4.renderers.FieldRenderer',
'inline': 'bootstrap4.renderers.InlineFieldRenderer',
},
}
LOG_DIR = join(BASE_DIR, 'log')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters':{
'standard': {
'format': "{levelname}:{asctime}:{filename}:{lineno}:{message}",
'style': '{',
}
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'standard',
},
'django': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': 'django.log',
'mode': 'w',
'formatter': 'standard',
},
'fpiweb': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': 'fpiweb.log',
'mode': 'w',
'formatter': 'standard',
},
'file': {
'class': 'logging.FileHandler',
'level': 'INFO',
'formatter': 'standard',
'filename': 'root.log',
'mode': 'w',
},
},
'loggers': {
'django': {
'handlers': ['django', 'console'],
'level': 'INFO',
'propagate': True,
},
'fpiweb': {
'handlers': ['fpiweb', 'console'],
'level': 'INFO',
'propagate': True,
},
},
'root': {
'level': 'INFO',
'handlers': ['file', 'console'],
},
}
|
# Generated by Django 2.2.6 on 2019-11-05 13:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myuser', '0003_auto_20191021_1610'),
]
operations = [
migrations.AddField(
model_name='templateuser',
name='photo',
field=models.FileField(blank=True, null=True, upload_to='profile'),
),
]
|
import requests
resto = requests.get("http://www.pythonhow.com/data/universe.txt")
text = re.text
print(text) |
from bokeh.io import show
from bokeh.layouts import column
from bokeh.models.util import generate_structure_plot
from bokeh.plotting import figure
# draw the structure graph of a basic figure model
f = figure(width=400,height=400)
f.line(x=[1,2,3],y=[1,2,3])
K = generate_structure_plot(f)
show(column(f,K))
|
# -*- coding: utf-8 -*-
try:
# python2
import urlparse
except ImportError:
# python3
from urllib.parse import urlparse
import scrapy
from scrapy.shell import inspect_response
class BrandenburgSpider(scrapy.Spider):
name = "brandenburg"
# allowed_domains = ["https://www.bildung-brandenburg.de/schulportraets/index.php?id=3"]
start_urls = ['https://www.bildung-brandenburg.de/schulportraets/index.php?id=3&schuljahr=2016&kreis=&plz=&schulform=&jahrgangsstufe=0&traeger=0&submit=Suchen',
'https://www.bildung-brandenburg.de/schulportraets/index.php?id=3&schuljahr=2016&kreis=&plz=&schulform=&jahrgangsstufe=0&traeger=1&submit=Suchen']
def parse(self, response):
for link in response.css("table a"):
url = link.css("::attr(href)").extract_first()
response.link = link
parsed_url = urlparse.urlparse(url)
parsed = urlparse.parse_qs(parsed_url.query)
meta = {}
meta['nummer'] = parsed['schulnr'][0]
meta['name'] = link.css('::text').extract_first()
response.foo = meta
#inspect_response(response, self)
yield scrapy.Request(response.urljoin(url), callback=self.parse_detail, meta=meta)
def parse_detail(self, response):
trs = response.css("table tr")
content = {}
# The first row is an image and a map
#inspect_response(response, self)
for tr in trs[1:]:
key = "\n".join(tr.css('th ::text').extract()).strip()[:-1].replace("**", "")
value = "\n".join(tr.css("td ::text").extract()).replace("*", "")
content[key] = value
content['name'] = response.meta['name']
content['nummer'] = response.meta['nummer']
response.content = content
#inspect_response(response, self)
yield content
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.