content
stringlengths 5
1.05M
|
|---|
import numpy as np
import pandas as pd
from parallelm.components import ConnectableComponent
class RandomDataframe(ConnectableComponent):
"""
Generating a random dataframe. The number of rows and columns is provided as input parameters to the component
"""
def __init__(self, engine):
super(self.__class__, self).__init__(engine)
def _materialize(self, parent_data_objs, user_data):
num_rows = self._params.get('num_lines', 100)
num_cols = self._params.get('num_cols', 5)
df = pd.DataFrame(np.random.randint(0, 100, size=(num_rows, num_cols)))
self._logger.info("Generated random dataframe rows: {} cols: {})".format(num_rows, num_cols))
return [df]
|
from functools import lru_cache
from operator import mul
def parse_decks(filename):
with open(filename) as file:
player1_raw, player2_raw = file.read().split("\n\n")
p1 = list(map(int, player1_raw.split("\n")[1:]))
p2 = list(map(int, player2_raw.split("\n")[1:]))
return p1, p2
def play_combat(p1, p2):
while p1 and p2:
c1 = p1.pop(0)
c2 = p2.pop(0)
if c1 > c2:
p1.append(c1)
p1.append(c2)
else:
p2.append(c2)
p2.append(c1)
return p1, p2
def play_recursive_combat(p1, p2, depth=0):
current_cache=set()
p1 = list(p1)
p2 = list(p2)
while p1 and p2:
if (tuple(p1), tuple(p2)) in current_cache:
return (1, p1, p2)
current_cache.add((tuple(p1), tuple(p2)))
c1 = p1.pop(0)
c2 = p2.pop(0)
if len(p1) >= c1 and len(p2) >= c2:
winner, _, _ = play_recursive_combat(tuple(p1[:c1]), tuple(p2[:c2]), depth=depth+1)
if winner == 1:
p1.append(c1)
p1.append(c2)
else:
p2.append(c2)
p2.append(c1)
else:
if c1 > c2:
p1.append(c1)
p1.append(c2)
else:
p2.append(c2)
p2.append(c1)
return (1, p1, p2) if p1 else (2, p1, p2)
def get_score(deck):
n = len(deck)
l = map(mul, deck, reversed(range(1, n+1)))
return sum(l)
def main():
p1, p2 = parse_decks("input.txt")
p1cpy, p2cpy = list(p1), list(p2)
play_combat(p1cpy, p2cpy)
winner = p1cpy if p1cpy else p2cpy
score = get_score(winner)
print(score)
p1cpy, p2cpy = tuple(p1), tuple(p2)
winner, p1final, p2final = play_recursive_combat(p1cpy, p2cpy)
score = get_score(p1final) if winner == 1 else get_score(p2final)
print(score)
if __name__ == '__main__':
main()
|
from tkinter import *
import sqlite3
root=Tk()
root.title(" DataBase Record ")
root.geometry("400x600")
root.iconbitmap('edit.ico')
'''
# Create a Database or connect to one
conn=sqlite3.connect('address_book.db')
# Create cursor
c = conn.cursor()
# Create a Table
c.execute("""CREATE TABLE addresses(
first_name text,
last_name text,
address text,
city text,
state text,
zipcode integer) """)
'''
def update():
# Create a Database or connect to one
conn=sqlite3.connect('address_book.db')
# Create cursor
c=conn.cursor()
record_id=delete_box.get()
c.execute("""
UPDATE address SET
first_name = :first,
last_name = :last,
address = :address,
city = :city,
state = :state,
zipcode = :zipcode
WHERE oid = :oid""",
{
'first': f_name_edit.get(),
'last': l_name_edit.get(),
'address': address_edit.get(),
'city': city_edit.get(),
'state': state_edit.get(),
'zipcode': zipcode_edit.get(),
'oid': record_id
})
# commit changes
conn.commit()
# close connection
conn.close()
# create edit() function to update a record
def edit():
editor=Tk()
editor.title("Update a Record")
editor.geometry("400x600")
root.iconbitmap('edit.ico')
# Create a Database or connect to one
conn=sqlite3.connect('address_book.db')
# Create cursor
c=conn.cursor()
record_id=delete_box.get()
# Query the Data base
c.execute(" SELECT * FROM addresses WHERE oid ="+record_id)
records=c.fetchall()
show=''
for record in records[0:]:
show+=str(record)+"\t"+"\n"
# Create Global variable for text boxes name
global f_name_edit
global l_name_edit
global address_edit
global city_edit
global state_edit
global zipcode_edit
# Create Text Boxes
f_name_edit=Entry(editor, width=50)
f_name_edit.grid(row=0, column=1, padx=20, pady=5)
l_name_edit=Entry(editor, width=50)
l_name_edit.grid(row=1, column=1, padx=20, pady=5)
address_edit=Entry(editor, width=50)
address_edit.grid(row=2, column=1, padx=20, pady=5)
city_edit=Entry(editor, width=50)
city_edit.grid(row=3, column=1, padx=20, pady=5)
state_edit=Entry(editor, width=50)
state_edit.grid(row=4, column=1, padx=20, pady=5)
zipcode_edit=Entry(editor, width=50)
zipcode_edit.grid(row=5, column=1, padx=20, pady=5)
# Create Text Label
f_name_label_edit=Label(editor, text="First Name")
f_name_label_edit.grid(row=0, column=0)
l_name_label_edit=Label(editor, text="Last Name")
l_name_label_edit.grid(row=1, column=0)
address_label_edit=Label(editor, text="Address")
address_label_edit.grid(row=2, column=0)
city_label_edit=Label(editor, text="City")
city_label_edit.grid(row=3, column=0)
state_label_edit=Label(editor, text="State")
state_label_edit.grid(row=4, column=0)
zipcode_label_edit=Label(editor, text="ZipCode")
zipcode_label_edit.grid(row=5, column=0)
# Create a Save Button to save edited record
save_btn=Button(editor, text=" save Record ", command=query)
save_btn.grid(row=6, column=0, columnspan=2, padx=10, pady=10, ipadx=100)
# loop through result
for record in records:
f_name_edit.insert(0, record[0])
l_name_edit.insert(0, record[1])
address_edit.insert(0, record[2])
city_edit.insert(0, record[3])
state_edit.insert(0, record[4])
zipcode_edit.insert(0, record[5])
# Create data base submit function
def submit():
# Create a Database or connect to one
conn=sqlite3.connect('address_book.db')
# Create cursor
c=conn.cursor()
# Insert into Table
c.execute(" INSERT INTO addresses VALUES (:f_name, :l_name, :address, :city, :state, :zipcode) ",
{
'f_name': f_name.get(),
'l_name': l_name.get(),
'address': address.get(),
'city': city.get(),
'state': state.get(),
'zipcode': zipcode.get()
})
# commit changes
conn.commit()
# close connection
conn.close()
# clear the boxes
f_name.delete(0, END)
l_name.delete(0, END)
address.delete(0, END)
city.delete(0, END)
state.delete(0, END)
zipcode.delete(0, END)
# Create a function to show record
def query():
# Create a Database or connect to one
conn=sqlite3.connect('address_book.db')
# Create cursor
c=conn.cursor()
# Query the Data base
c.execute(" SELECT *,oid FROM addresses ")
records=c.fetchall()
show=''
for record in records[0:]:
show+=str(record[0:6])+"\t"+str(record[6])+"\n"
query_label=Label(root, text=show)
query_label.grid(row=12, column=0, columnspan=2)
# commit changes
conn.commit()
# close connection
conn.close()
# create a function to delete record
def delete():
# Create a Database or connect to one
conn=sqlite3.connect('address_book.db')
# Create cursor
c=conn.cursor()
# delete a record
c.execute("DELETE from addresses WHERE oid="+delete_box.get())
# commit changes
conn.commit()
# close connection
conn.close()
# Create Text Boxes
f_name=Entry(root, width=50)
f_name.grid(row=0, column=1, padx=20, pady=5)
l_name=Entry(root, width=50)
l_name.grid(row=1, column=1, padx=20, pady=5)
address=Entry(root, width=50)
address.grid(row=2, column=1, padx=20, pady=5)
city=Entry(root,width=50)
city.grid(row=3, column=1, padx=20, pady=5)
state=Entry(root, width=50)
state.grid(row=4, column=1, padx=20, pady=5)
zipcode=Entry(root, width=50)
zipcode.grid(row=5, column=1, padx=20, pady=5)
delete_box=Entry(root, width=50)
delete_box.grid(row=9, column=1, padx=20, pady=5)
# Create Text Label
f_name_label=Label(root, text="First Name")
f_name_label.grid(row=0, column=0)
l_name_label=Label(root, text="Last Name")
l_name_label.grid(row=1, column=0)
address_label=Label(root, text="Address")
address_label.grid(row=2, column=0)
city_label=Label(root, text="City")
city_label.grid(row=3, column=0)
state_label=Label(root, text="State")
state_label.grid(row=4, column=0)
zipcode_label=Label(root, text="ZipCode")
zipcode_label.grid(row=5, column=0)
delete_box_label=Label(root, text="Delete ID")
delete_box_label.grid(row=9, column=0)
# Create Submit Button
submit_btn=Button(root, text='Submit', command=submit)
submit_btn.grid(row=6, column=0, columnspan=2, padx=10, pady=10, ipadx=120)
# Create Query Button
query_btn=Button(root, text=" Show Records ", command=query)
query_btn.grid(row=7, column=0, columnspan=2, padx=10, pady=10, ipadx=100)
# Create Delete Button
delete_btn=Button(root, text=" Delete Record ", command=delete)
delete_btn.grid(row=10, column=0, columnspan=2, padx=10, pady=10, ipadx=100)
# Create Update Button
edit_btn=Button(root, text=" Edit Record ", command=edit)
edit_btn.grid(row=11, column=0, columnspan=2, padx=10, pady=10, ipadx=100)
'''
# commit changes
conn.commit()
# close connection
conn.close()
'''
root.mainloop()
|
def print_matrix(m):
idx = 1
for r in m:
if idx < 10:
print(" ", idx, r)
else:
print(idx, r)
idx += 1
print("-------------------")
def nz_min(line):
if len(line) == 0:
return 0
line = [i for i in line if i != 0]
if len(line) == 0:
return 0
else:
return min(line)
def min_val(x, y):
if y == 0:
return x
if x > y:
return y
return x
def coinChange(coins, amount):
rows = amount
cols = len(coins)
arr = [[0 for i in range(cols)] for j in range(rows)]
for i in range(rows):
if (i+1) % coins[0] == 0:
arr[i][0] = (i+1) // coins[0]
print_matrix(arr)
for i in range(1,cols):
for j in range(rows):
part_amt = (j + 1)
if part_amt < coins[i]:
arr[j][i] = arr[j][i-1]
else:
x = part_amt // coins[i]
y = part_amt % coins[i]
mval = 9999
if y == 0:
mval = min_val(mval, x)
mval = min_val(mval, arr[j][i-1])
idx = part_amt - coins[i] * 1 - 1
if arr[idx][i] != 0:
mval = min_val(mval, 1 + arr[idx][i])
arr[j][i] = mval
print_matrix(arr)
return nz_min(arr[-1])
print(coinChange([2,5,10,1], 27))
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Zcash developers
# Copyright (c) 2020 The PIVX Developers
# Copyright (c) 2020 The Deviant Developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
from test_framework.test_framework import DeviantTestFramework
from test_framework.util import *
from decimal import Decimal
class WalletAnchorForkTest(DeviantTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.setup_clean_chain = True
saplingUpgrade = ['-nuparams=v5_shield:1']
self.extra_args = [saplingUpgrade, saplingUpgrade, saplingUpgrade]
def run_test (self):
self.nodes[0].generate(4) # generate blocks to activate sapling in regtest
# verify sapling activation.
assert(self.nodes[0].getblockchaininfo()['upgrades']['v5 shield']['activationheight'] == 1)
self.sync_all()
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(self.nodes[0].getblockcount(), walletinfo['last_processed_block'])
assert_equal(walletinfo['immature_balance'], 1000)
assert_equal(walletinfo['balance'], 0)
self.sync_all()
self.nodes[1].generate(102)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 1000)
assert_equal(self.nodes[1].getbalance(), 500)
assert_equal(self.nodes[2].getbalance(), 0)
# At this point in time, commitment tree is the empty root
# Node 0 creates a shield transaction
mytaddr0 = get_coinstake_address(self.nodes[0])
myzaddr0 = self.nodes[0].getnewshieldaddress()
recipients = []
recipients.append({"address":myzaddr0, "amount": Decimal('10.0') - Decimal('0.0001')})
txid = self.nodes[0].shieldsendmany(mytaddr0, recipients)
# Sync up mempools and mine the transaction. All nodes have the same anchor.
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
# Stop nodes.
self.stop_nodes()
# Relaunch nodes and partition network into two:
# A: node 0
# B: node 1, 2
self.start_node(0, self.extra_args[0])
self.start_node(1, self.extra_args[1])
self.start_node(2, self.extra_args[2])
connect_nodes(self.nodes[1], 2)
# Partition B, node 1 mines an empty block
self.nodes[1].generate(1)
self.sync_blocks(self.nodes[1:3])
# Check partition
assert_equal(self.nodes[1].getblockcount(), self.nodes[2].getblockcount())
assert(self.nodes[2].getblockcount() != self.nodes[0].getblockcount())
# Partition A, node 0 creates a shield transaction
recipients = []
recipients.append({"address":myzaddr0, "amount": Decimal('10.0') - Decimal('0.0001')})
txid = self.nodes[0].shieldsendmany(mytaddr0, recipients)
rawhex = self.nodes[0].getrawtransaction(txid)
# Partition A, node 0 mines a block with the transaction
self.nodes[0].generate(1)
self.sync_all(self.nodes[1:3])
# Partition B, node 1 mines the same shield transaction
txid2 = self.nodes[1].sendrawtransaction(rawhex)
assert_equal(txid, txid2)
self.nodes[1].generate(1)
# Check that Partition B is one block ahead and that they have different tips
assert_equal(self.nodes[0].getblockcount() + 1, self.nodes[1].getblockcount())
assert(self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash())
# Shut down all nodes so any in-memory state is saved to disk
self.stop_nodes()
# Relaunch nodes and reconnect the entire network
self.start_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[2], 1)
connect_nodes(self.nodes[2], 0)
# Mine a new block and let it propagate
self.nodes[1].generate(1)
self.sync_all()
assert_equal( self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
assert_equal( self.nodes[1].getbestblockhash(), self.nodes[2].getbestblockhash())
if __name__ == '__main__':
WalletAnchorForkTest().main()
|
from collections import *
from functools import lru_cache
import heapq
import itertools
import math
import random
import sys
# https://raw.githubusercontent.com/arknave/advent-of-code-2020/main/day18/day18.py
def parse(eqn, is_part_2=False):
eqn = eqn.replace("(", "( ")
eqn = eqn.replace(")", " )")
# accidentally made this right associative, so reverse all the tokens
# and flip parens
tokens = eqn.split()[::-1]
stk = []
ops = []
for token in tokens:
if token == '(':
while ops[-1] != ")":
stk.append(ops.pop())
ops.pop()
elif token == "*":
# handle precedence
while is_part_2 and ops and ops[-1] == "+":
stk.append(ops.pop())
ops.append(token)
elif token in ")+":
ops.append(token)
else:
stk.append(int(token))
while ops:
stk.append(ops.pop())
print()
print(eqn)
print(stk)
cur = []
#import pdb; pdb.set_trace()
for val in stk:
if val == "+":
x = cur[-1] + cur[-2]
cur.pop()
cur.pop()
cur.append(x)
elif val == "*":
x = cur[-1] * cur[-2]
cur.pop()
cur.pop()
cur.append(x)
else:
cur.append(val)
print(stk, cur[0])
return cur[0]
def main():
#res = parse("5 * 7 * (9 * 3 + 6 + 8 + 8 + 5)", False)
#print(res)
lines = [line.strip() for line in sys.stdin if line.strip()]
part1 = 0
part2 = 0
for line in lines:
part1 += parse(line, False)
part2 += parse(line, True)
print("part 1", part1)
print("part 2", part2)
main()
|
import sys
sys.path.append('..')
import time
import pygame
from pygame import Rect, Color
from pathfinder import PathFinder
from gridmap import GridMap
class Visualizer(object):
def __init__(self, screen, field, message_func):
self.screen = screen
self.field = field
self.message_func = message_func
self.grid_size = 15
self.field_color = Color('black')
self.grid_color = Color('gray')
self.start_pos_color = Color('red')
self.goal_pos_color = Color('green')
self.path_color = Color('violet')
self.blocked_color = Color('gray')
self._init_map()
def draw(self):
self._draw_grid(self.field)
self._draw_map(self.field,
self.blocked_list, self.start_pos,
self.goal_pos, self.path)
self.message_func(self.msg1, self.msg2)
def user_event(self, event):
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_F5:
self._recompute_path()
elif event.type == pygame.MOUSEBUTTONDOWN:
self.path_valid = False
self.msg1 = 'Please recompute path (F5)'
self.msg2 = ''
self._handle_mouse_click(event)
########################## PRIVATE ##########################
def _init_map(self):
self.start_pos = 0, 0
self.goal_pos = 3, 8
nrows = self.field.height / self.grid_size
ncols = self.field.width / self.grid_size
self.map = GridMap(nrows, ncols)
for b in [ (1, 1), (1, 2), (0, 3), (1, 3), (2, 3),
(2, 4), (2, 5), (2, 6)]:
self.map.set_blocked(b)
self._recompute_path()
def _handle_mouse_click(self, event):
if not self.field.collidepoint(event.pos):
return
ncol = (event.pos[0] - self.field.left) / self.grid_size
nrow = (event.pos[1] - self.field.top) / self.grid_size
coord = (nrow, ncol)
if event.button == 1:
self.map.set_blocked(coord, not self.map.blocked[coord])
elif event.button == 2:
self.start_pos = coord
elif event.button == 3:
self.goal_pos = coord
def _recompute_path(self):
self.blocked_list = self.map.blocked
pf = PathFinder(self.map.successors, self.map.move_cost,
self.map.move_cost)
t = time.clock()
self.path = list(pf.compute_path(self.start_pos, self.goal_pos))
dt = time.clock() - t
if self.path == []:
self.msg1 = "No path found"
else:
self.msg1 = "Found path (length %d)" % len(self.path)
self.msg2 = "Elapsed: %s seconds" % dt
self.path_valid = True
def _draw_grid(self, field):
""" Draw a grid on the given surface.
"""
self.screen.fill(self.field_color, field)
nrows = field.height / self.grid_size
ncols = field.width / self.grid_size
for y in range(nrows + 1):
pygame.draw.line(
self.screen,
self.grid_color,
(field.left, field.top + y * self.grid_size - 1),
(field.right - 1, field.top + y * self.grid_size - 1))
for x in range(ncols + 1):
pygame.draw.line(
self.screen,
self.grid_color,
(field.left + x * self.grid_size - 1, field.top),
(field.left + x * self.grid_size - 1, field.bottom - 1))
def _draw_map(self, field, blocked, start, goal, path):
def _fill_square((nrow, ncol), color):
left = field.left + ncol * self.grid_size
top = field.top + nrow * self.grid_size
width = self.grid_size - 1
self.screen.fill(color, Rect(left, top, width, width))
def _fill_spot((nrow, ncol), color):
pos_x = field.left + ncol * self.grid_size + self.grid_size / 2
pos_y = field.top + nrow * self.grid_size + self.grid_size / 2
radius = self.grid_size / 4
pygame.draw.circle(self.screen,
color, (pos_x, pos_y), radius)
for bl in blocked:
_fill_square(bl, self.blocked_color)
if self.path_valid:
for path_square in path:
_fill_spot(path_square, self.path_color)
_fill_spot(start, self.start_pos_color)
_fill_spot(goal, self.goal_pos_color)
def draw_messages(screen, rect, message1, message2):
draw_rimmed_box(screen, rect, (50, 20, 0), 4, Color('white'))
my_font = pygame.font.SysFont('arial', 18)
message1_sf = my_font.render(message1, True, Color('white'))
message2_sf = my_font.render(message2, True, Color('white'))
screen.blit(message1_sf, rect.move(10, 0))
screen.blit(message2_sf, rect.move(10, message1_sf.get_height()))
def draw_rimmed_box(screen, box_rect, box_color,
rim_width=0,
rim_color=Color('black')):
""" Draw a rimmed box on the given surface. The rim is drawn
outside the box rect.
"""
if rim_width:
rim_rect = Rect(box_rect.left - rim_width,
box_rect.top - rim_width,
box_rect.width + rim_width * 2,
box_rect.height + rim_width * 2)
pygame.draw.rect(screen, rim_color, rim_rect)
pygame.draw.rect(screen, box_color, box_rect)
def draw_title(screen, rect):
draw_rimmed_box(screen, rect, (40, 10, 60), 4, Color('gray'))
msgs = [
'Left click to toggle wall',
'Middle click to set start (red)',
'Right click to set goal (green)',
'F5 to recompute the path',
]
my_font = pygame.font.SysFont('arial', 16)
for i, msg in enumerate(msgs):
rendered = my_font.render(msg, True, Color('white'))
screen.blit(rendered, rect.move(10, i * rendered.get_height()))
def run_game():
SCREEN_WIDTH, SCREEN_HEIGHT = 350, 550
FIELD_RECT = Rect(25, 130, 300, 300)
MESSAGES_RECT = Rect(25, 450, 300, 50)
TITLE_RECT = Rect(25, 10, 300, 90)
pygame.init()
screen = pygame.display.set_mode(
(SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32)
clock = pygame.time.Clock()
def message_func(msg1, msg2):
draw_messages(screen, MESSAGES_RECT, msg1, msg2)
visualizer = Visualizer(screen, FIELD_RECT, message_func)
while True:
time_passed = clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit_game()
else:
visualizer.user_event(event)
draw_title(screen, TITLE_RECT)
visualizer.draw()
pygame.display.flip()
def exit_game():
sys.exit()
run_game()
|
from datetime import timedelta
from celery import Celery
app = Celery('twitch',
backend='db+sqlite:///celeryresdb.sqlite',
broker='sqla+sqlite:///celerydb.sqlite',
include=['tasks'])
app.conf.CELERYBEAT_SCHEDULE = {
'clear-db': {
'task': 'twitch.clear_dbs',
'schedule': timedelta(minutes=15)
},
}
if __name__ == '__main__':
app.start()
|
from bokeh.io import export_png, output_file, show
def _get_return(function, x, y, return_var):
return_var.append(function(x, elapsed_time=y))
from tnetwork.DCD.analytics.dynamic_partition import *
from nf1 import NF1
from sklearn.metrics import adjusted_rand_score,normalized_mutual_info_score
import pandas as pd
import numpy as np
from tnetwork.DCD.externals.dynamo import dynamo
from tnetwork.DCD.externals.dynmoga import dynmoga
from tnetwork.DCD.externals.MuchaOriginal import transversal_network_mucha_original
from matlab import engine
def standard_methods_to_test():
eng = engine.start_matlab()
def smoothed_louvain(x, elapsed_time=True):
return tn.DCD.iterative_match(x, CDalgo="smoothedLouvain", elapsed_time=elapsed_time)
# methods_to_test = {"iterative":DCD.iterative_match,"dynamo":dynamo,"dynmoga":dynmoga,"smoothed_louvain":smoothed_louvain}
def mucha_opti(x, elapsed_time=True):
return transversal_network_mucha_original(x, elapsed_time=elapsed_time, matlab_session=eng)
def mucha_global(x, elapsed_time=True):
return transversal_network_mucha_original(x, elapsed_time=elapsed_time, matlab_session=eng, form="global")
print("pas de mucha")
methods_to_test = {"iterative": tn.DCD.iterative_match,
"dynamo": dynamo,
"smoothed_louvain": smoothed_louvain,
"mucha": mucha_opti, # "mucha_global":mucha_global,
"survival_graph": tn.DCD.label_smoothing} # ,"dynmoga":dynmoga}#
# methods_to_test = {"smoothed_louvain":smoothed_louvain}#,"dynmoga":dynmoga}#
return methods_to_test
def generate_graph(nb_com =6,min_size=4,max_size=15,operations=18,mu=0.1):
print("generating graph with nb_com = ",nb_com)
prog_scenario = tn.ComScenario(verbose=False, external_density_penalty=mu)
all_communities = set(prog_scenario.INITIALIZE(np.random.randint(min_size,max_size,size=nb_com)))
for i in range(operations):
[com1] = np.random.choice(list(all_communities),1,replace=False)
all_communities.remove(com1)
if len(com1.nodes())<max_size and len(all_communities)>0: #merge
[com2] = np.random.choice(list(all_communities),1,replace=False)
largest_com = max([com1,com2],key=lambda x: len(x.nodes()))
merged = prog_scenario.MERGE([com1,com2], largest_com.label(), wait=20)
all_communities.remove(com2)
all_communities.add(merged)
else: #split
smallest_size = int(len(com1.nodes())/3)
(com2,com3) = prog_scenario.SPLIT(com1, [prog_scenario._get_new_ID("CUSTOM"), com1.label()], [smallest_size, len(com1.nodes()) - smallest_size], wait=20)
all_communities|= set([com2,com3])
(dyn_graph,dyn_com) = prog_scenario.run()
return(dyn_graph,dyn_com)
def compute_all_stats(all_infos, detailed=True):
names = []
times = []
LaNMI = []
LNMI = []
LF1 = []
LARI = []
nb_changes = []
# entropies = []
ent_by_nodes = []
S = []
modularities = []
nmis = []
IDs = {}
for id,an_experiment in all_infos.items():
GT_as_sn = an_experiment["GT"]
dyn_graph_sn=an_experiment["graph"]
results = an_experiment["result"]
iteration = an_experiment["ID"]
print(id)
for name, (result, time) in results.items():
for k, v in iteration.items():
IDs.setdefault(k,[])
IDs[k].append(v)
names.append(name)
times.append(time["total"])
if detailed:
LaNMI.append(longitudinal_similarity(GT_as_sn, result))
def nf1go(x, y):
a = NF1(y, x)
score = a.get_f1()[0]
return score
LF1.append(longitudinal_similarity(GT_as_sn,result,score=nf1go,convert_coms_sklearn_format=False))
LNMI.append(longitudinal_similarity(GT_as_sn, result))
LARI.append(longitudinal_similarity(GT_as_sn, result, score=adjusted_rand_score))
nb_changes.append(nb_node_change(result))
consecutive_NMIs = consecutive_sn_similarity(result)
#entropies.append(entropy(result))
ent_by_nodes.append(entropy_by_node(result)) #####Slow
S.append(np.average(consecutive_NMIs[0], weights=consecutive_NMIs[1]))
mods = quality_at_each_step(result, dyn_graph_sn)
modularities.append(np.average(mods[0], weights=mods[1]))
sim = similarity_at_each_step(GT_as_sn,result)
nmis.append(np.average(sim[0],weights=sim[1]))
df = pd.DataFrame()
df["algorithm"] = names
df["running time"] = times
if detailed:
df["LaNMI"] = LaNMI
df["LNMI"] = LNMI
df["LF1"] = LF1
df["LARI"] = LARI
df["M"] = nb_changes
#df["I_old"] = entropies
df["I"] = ent_by_nodes
df["S"] = S
df["Q"] = modularities
df["aNMI"] = nmis
df["# nodes"] = len(dyn_graph_sn.snapshots(dyn_graph_sn.snapshots_timesteps()[0]).nodes)
df["# steps"] = len(dyn_graph_sn.snapshots())
for k,l in IDs.items():
df[k]=l
return df
def run_all_algos(methods_to_test, dyn_graph_sn, plot=False, waiting=120):
"""
:param methods_to_test:
:param dyn_graph_sn:
:param plot:
:param waiting:
:return:
"""
results = {}
if plot:
dyn_graph = dyn_graph_sn.to_DynGraphIG(sn_duration=1)
methods_this_step = {name: m for name, m in methods_to_test.items()}
for name, m in methods_this_step.items():
results[name] = m(dyn_graph_sn, elapsed_time=True)
# manager = multiprocessing.Manager()
# temp = manager.list()
# p = multiprocessing.Process(target=_get_return, args=(m,dyn_graph_sn,True,temp))
# p.start()
# p.join(waiting)
# if p.is_alive():
# print ("running... let's kill it...")
# del methods_to_test[name]
# Terminate
# p.terminate()
# p.join()
# else:
# results[name] = temp[0]
if plot:
output_file(name + ".html")
p = tn.plot_longitudinal(dyn_graph, results[name][0].to_DynCommunitiesIG(1))
show(p)
export_png(p, filename=name + ".png")
return results
def subset(graph, com, length):
subgraph = tn.DynGraphSN(list(graph.snapshots().values())[:length])
subcomsGT = tn.DynCommunitiesSN()
for t in subgraph.snapshots_timesteps():
subcomsGT.set_communities(t, com.snapshot_communities(t))
return (subgraph, subcomsGT)
|
from django.contrib import admin
from submissions.models import Submission
class SubmissionAdmin(admin.ModelAdmin):
list_display = ("submitted_by", "status", "is_valid", "file_upload",
"submitted_file_name", "group_submitted", "trigger",
"entry_point", "datetime_submitted")
list_filter = ['entry_point']
admin.site.register(Submission, SubmissionAdmin)
|
"""
Stats 101
Based on the 10 Days of Statistics Tutorial
see: https://www.hackerrank.com/domains/tutorials/10-days-of-statistics
"""
import random
from collections import Counter
from collections.abc import Sequence
from fractions import Fraction
from functools import reduce
from operator import mul, itemgetter
from math import sqrt, pi, factorial, exp, erf
def mean(a):
if isinstance(a, Sequence):
## this is a lot faster, if we have a sequence with a __len__ method
return sum(a)/float(len(a))
else:
## works for generators or any iterable
s = n = 0
for x in a:
s += x
n += 1
return s/float(n)
def median(a, presorted=False):
n = len(a)
if not presorted:
a = sorted(a)
return (a[n//2]+a[n//2-1])/2.0 if len(a)%2==0 else a[n//2]
def mode(a):
"""
Find the most commonly occurring item in the input list
"""
index, _value = max(Counter(a).items(), key=itemgetter(1))
return index
def weighted_mean(x,w):
"""
Given equal length vectors of values and weights
"""
return sum(xi*wi for xi,wi in zip(x,w)) / sum(w)
def rank(x):
"""
Given a vector x, return an integer vector of the same length ranking the
values of x, where equal values have equal rank.
"""
r = 0
x_prev = float('-inf')
result = [None]*len(x)
## sort by value, i is the index of the value in the
## original unsorted list
for i,xi in sorted(enumerate(x), key=itemgetter(1)):
if xi>x_prev:
r += 1
x_prev = xi
result[i] = r
return result
def variance(a):
mu = mean(a)
return sum((x-mu)**2 for x in a)/len(a)
def sd(a):
return sqrt(variance(a))
def covariance(x,y):
mean_x = mean(x)
mean_y = mean(y)
return sum((xi-mean_x)*(yi-mean_y) for xi,yi in zip(x,y))/len(x)
def pearson_correlation(x,y):
mean_x = mean(x)
mean_y = mean(y)
sd_x = sd(x)
sd_y = sd(y)
return sum( (xi-mean_x)*(yi-mean_y) for xi,yi in zip(x,y) )/(len(x)*sd_x*sd_y)
def spearman_unique_values(x,y):
n = len(x)
return 1 - 6*sum((xi-yi)**2 for xi,yi in zip(rank(x),rank(y)))/(n*(n**2-1))
def quartiles(a):
a = sorted(a)
n = len(a)
q = n//4
if n%4 == 0:
q1 = mean(a[q-1],a[q])
q2 = mean(a[n//2-1],a[n//2])
q3 = mean(a[n-q-1],a[n-q])
elif n%4 == 1:
q1 = mean(a[q-1],a[q])
q2 = a[n//2]
q3 = mean(a[n-q-1],a[n-q])
elif n%4 == 2:
q1 = a[q]
q2 = mean(a[n//2-1],a[n//2])
q3 = a[n-q-1]
elif n%4 == 3:
q1 = a[q]
q2 = a[n//2]
q3 = a[n-q-1]
return (q1,q2,q3)
def interquartile_range(a):
q1,q2,q3 = quartiles(a)
return q3-q1
def n_choose_k(n,k):
return int( reduce(mul, (Fraction(n-i, i+1) for i in range(k)), 1) )
def binomial_pmf(x,n,p):
return n_choose_k(n, x) * p**x * (1-p)**(n-x)
def geom_pmf(n,p):
return (1-p)**(n-1)*p
def neg_binomial_pmf(x,n,p):
return n_choose_k(n-1, x-1) * p**x * (1-p)**(n-x)
def poisson_pmf(lambda_, k):
"""
poisson probability mass function
"""
return lambda_**k * exp(-lambda_) / factorial(k)
def normal_pdf(mean, sd, x):
"""
normal probability density function
"""
return 1/(sd*sqrt(2*pi)) * exp(-((x-mu)**2)/(2*sd**2))
def normal_cdf(mean, sd, x, lower_tail=True):
"""
normal cumulative density function
If the time taken to assemble a car is normally distribution with a mean of
20 hours and a standard deviation of 2 hours, the probability that a car can
be assembled in less thand 19.5 hours is:
normal_cdf(20, 2, 19.5)
"""
return 1/2*(1+erf((x-mean)/(sd*sqrt(2)))) if lower_tail else 1 - 1/2*(1+erf((x-mean)/(sd*sqrt(2))))
def central_limit(mean, sd, n):
return mean*n, sqrt(n)*2.0, n
def rnorm(n, mean=0.0, sd=1.0):
return tuple(random.gauss(mean, sd) for i in range(n))
def runif(n, a=0.0, b=1.0):
return tuple(random.uniform(a,b) for i in range(n))
def sequence(s,e,i):
n = int((e-s)/i)
return tuple(j*i+s for j in range(0,n+1))
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2007 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from sim_params import *
from mcni.pyre_support.Instrument import Instrument as base
class Instrument(base):
class Inventory( base.Inventory ):
import pyre.inventory
from mcni.pyre_support import facility, componentfactory as component
import mccomponents.pyre_support
source = facility(
'source',
default = component('sources', 'MonochromaticSource')('source') )
sample = facility(
'sample',
default = component( 'samples', 'SampleAssemblyFromXml')('sample') )
detector = facility(
'detector',
default = component( 'detectors', 'DetectorSystemFromXml')('detector') )
pass # end of Inventory
def __init__(self, name = 'test-Nickel'):
base.__init__(self, name)
return
def _defaults(self):
base._defaults(self)
self.inventory.sequence = ['source', 'sample', 'detector']
geometer = self.inventory.geometer
geometer.inventory.source = (0,0,0), (0,0,0)
geometer.inventory.sample = (0,0,mod2sample), (0,0,0)
geometer.inventory.detector = (0,0,mod2sample), (0,0,0)
source = self.inventory.source
source.inventory.position = 0,0,0
source.inventory.velocity = 0,0,vi
source.inventory.probability = 1
source.inventory.time = 0.0
sample = self.inventory.sample
sample.inventory.xml = 'Ni.xml'
detector = self.inventory.detector
detector.inventory.eventsdat = eventsdat
detector.inventory.instrumentxml = instrumentxml
detector.inventory.tofparams = str(tofparams)
return
pass # end of Instrument
def main():
Instrument().run()
return
if __name__ == "__main__":
main()
# version
__id__ = "$Id$"
# End of file
|
from django.urls import reverse
from django.shortcuts import render
from urllib.parse import urljoin
LEGACY_PYTHON_DOMAIN = 'http://legacy.python.org'
PYPI_URL = 'https://pypi.org/'
def legacy_path(path):
"""Build a path to the same path under the legacy.python.org domain."""
return urljoin(LEGACY_PYTHON_DOMAIN, path)
def custom_404(request, exception, template_name='404.html'):
"""Custom 404 handler to only cache 404s for 5 minutes."""
context = {
'legacy_path': legacy_path(request.path),
'download_path': reverse('download:download'),
'doc_path': reverse('documentation'),
'pypi_path': PYPI_URL,
}
response = render(request, template_name, context=context, status=404)
response['Cache-Control'] = 'max-age=300'
return response
|
# check_cpu.py - An example Opsview plugin written with plugnpy
from __future__ import print_function
import plugnpy
import psutil
def get_cpu_usage():
"""Returns CPU Usage %"""
return psutil.cpu_percent(interval=2.0)
def get_args():
"""Gets passed arguments using plugnpy.Parser.
This will exit 3 (UNKNOWN) if an input is missing"""
parser = plugnpy.Parser(description="Monitors CPU Usage", copystr="Example Copyright 2017-2019")
parser.add_argument('-w', '--warning', help="Warning percentage")
parser.add_argument('-c', '--critical', help="Critical percentage")
parser.add_argument('-d', '--debug', action="store_true", help="Debug mode")
return parser.parse_args()
if __name__ == "__main__":
args = get_args()
check = plugnpy.Check() # Instantiate Check object
# Add Metric
check.add_metric('cpu_usage', get_cpu_usage(), '%', args.warning, args.critical, display_name="CPU Usage",
msg_fmt="{name} at {value}{unit}")
# Run Check (handles printing and exit codes)
check.final()
|
from django.contrib import admin
from apps.configuration.models import Project
@admin.register(Project)
class ProjectAdmin(admin.ModelAdmin):
"""
The representation of :class:`configuration.models.Project` in the
administration interface.
"""
list_display = ('id', 'name', 'abbreviation', 'active',)
list_filter = ('active',)
search_fields = ('name', 'abbreviation',)
|
"""
Contains the HappyGeneration class
"""
from dataclasses import dataclass
from typing import List
from transformers import AutoModelForCausalLM, TextGenerationPipeline
from happytransformer.happy_transformer import HappyTransformer
from happytransformer.gen.trainer import GENTrainer, GENTrainArgs, GENEvalArgs
from happytransformer.adaptors import get_adaptor
from happytransformer.gen import ARGS_GEN_TRAIN, ARGS_GEN_EVAl, ARGS_GEN_TEST
from happytransformer.happy_trainer import EvalResult
from happytransformer.fine_tuning_util import create_args_dataclass
from happytransformer.cuda_detect import detect_cuda_device_number
"""
The main settings that users will adjust when performing experiments
The values for full_settings are the same as the default values above except for min and max length.
"""
@dataclass
class GENSettings:
min_length: int = 10
max_length: int = 50
do_sample: bool = False
early_stopping: bool = False
num_beams: int = 1
temperature: float = 1
top_k: int = 50
no_repeat_ngram_size: int = 0
top_p: float = 1
bad_words: List[str] = None
@dataclass
class GenerationResult:
text: str
class HappyGeneration(HappyTransformer):
"""
This class is a user facing class that allows users to generate text using
text generation Transformer models.
The purpose of this class is to be lightweight and easy
to understand and to offload complex tasks to
other classes.
"""
def __init__(self, model_type: str = "GPT2", model_name: str = "gpt2", load_path: str = ""):
self.adaptor = get_adaptor(model_type)
if load_path != "":
model = AutoModelForCausalLM.from_pretrained(load_path)
else:
model = AutoModelForCausalLM.from_pretrained(model_name)
super().__init__(model_type, model_name, model)
device_number = detect_cuda_device_number()
self._pipeline = TextGenerationPipeline(model=self.model, tokenizer=self.tokenizer, device=device_number)
self._trainer = GENTrainer(self.model, model_type, self.tokenizer, self._device, self.logger)
def __assert_default_text_is_val(self, text):
"""
Ensures the input's text input is valid.
Raises a Value Error if the text input is not valid.
:param text: The value the user inputs for the "text" parameter
"""
if not isinstance(text, str):
raise ValueError("The text input must be a string")
if not text:
raise ValueError("The text input must have at least one character")
def generate_text(self, text: str, args: GENSettings=GENSettings()) -> GenerationResult:
"""
:param text: starting text that the model uses as a prompt to continue it.
:param args: A GENSettings object
:return: A GenerationResult() object
"""
self.__assert_default_text_is_val(text)
input_ids = self.tokenizer.encode(text, return_tensors="pt")
adjusted_min_length = args.min_length + len(input_ids[0])
adjusted_max_length = args.max_length + len(input_ids[0])
if args.bad_words:
bad_words_ids = [self.tokenizer(" "+phrase.strip()).input_ids for phrase in args.bad_words]
else:
bad_words_ids = None
output = self._pipeline(text, min_length=adjusted_min_length,
return_full_text=False,
max_length=adjusted_max_length,
do_sample=args.do_sample,
early_stopping=args.early_stopping,
num_beams=args.num_beams,
temperature=args.temperature,
top_k=args.top_k,
no_repeat_ngram_size=args.no_repeat_ngram_size,
top_p=args.top_p,
bad_words_ids=bad_words_ids
)
return GenerationResult(text=output[0]['generated_text'])
def __post_process_generated_text(self, result, text):
"""
A method for processing the output of the model. More features will be added later.
:param result: result the output of the model after being decoded
:param text: the original input to generate_text
:return: returns to text after going through post-processing. Removes starting text
"""
return result[len(text):]
def train(self, input_filepath: str, args=GENTrainArgs()):
"""
:param input_filepath:a file path to a text file that contains nothing but training data
:param args: either a GENTrainArgs() object or a dictionary that contains all of the same keys as ARGS_GEN_TRAIN
:return: None
"""
if type(args) == dict:
method_dataclass_args = create_args_dataclass(default_dic_args=ARGS_GEN_TRAIN,
input_dic_args=args,
method_dataclass_args=GENTrainArgs)
elif type(args) == GENTrainArgs:
method_dataclass_args = args
else:
raise ValueError("Invalid args type. Use a GENTrainArgs object or a dictionary")
self._trainer.train(input_filepath=input_filepath, dataclass_args=method_dataclass_args)
def eval(self, input_filepath: str, args=GENEvalArgs()) -> EvalResult:
"""
:param input_filepath:a file path to a text file that contains nothing but evaluating data
:param args: either a GENEvalArgs() object or a dictionary that contains all of the same keys as ARGS_GEN_EVAl
:return: None
"""
if type(args) == dict:
method_dataclass_args = create_args_dataclass(default_dic_args=ARGS_GEN_EVAl,
input_dic_args=args,
method_dataclass_args=GENEvalArgs)
elif type(args) == GENEvalArgs:
method_dataclass_args = args
else:
raise ValueError("Invalid args type. Use a GENEvalArgs object or a dictionary")
return self._trainer.eval(input_filepath=input_filepath, dataclass_args=method_dataclass_args)
def test(self, input_filepath, args=ARGS_GEN_TEST):
raise NotImplementedError("test() is currently not available")
|
#
# This file is part of PCAP BGP Parser (pbgpp)
#
# Copyright 2016-2017 DE-CIX Management GmbH
# Author: Christopher Moeller <christopher.moeller@de-cix.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pbgpp.Application.Flags.Exceptions import FlagError
class Flag:
# @todo representation of the flag.
def __init__(self):
self.default_value = 0
self.value = 0
self.compatible_values = []
def __eq__(self, other):
if isinstance(Flag, other):
return self.value == other.value
else:
return self.value == other
def set_value(self, value):
if len(self.compatible_values) == 0:
raise FlagError("Cant set value of uninitialized flag")
if value not in self.compatible_values:
raise FlagError("AddPathFlag: incompatible value")
else:
self.value = value
def get_value(self):
return self.value
|
import os
import sqlite3 as sql
from flask import Flask, render_template, request, g, url_for, redirect
from gerrymander_app.db import get_db
from wtforms import form
import pdb
app = Flask(__name__)
def district_info():
db = get_db()
"""query to get random district id"""
random_district_id = db.execute("SELECT district_id FROM gerrymander_images ORDER BY RANDOM() LIMIT 1").fetchone()[0]
"""queries to get relevant score/image based on id"""
gerrymander_score_query = f"SELECT state, district_number, gerrymander_score FROM gerrymander_score WHERE district_id = '{random_district_id}'"
gerrymander_image = f"SELECT district_image FROM gerrymander_images WHERE district_id = '{random_district_id}'"
district_image = db.execute(gerrymander_image).fetchone()[0]
state, district_number, gerrymander_score = tuple(db.execute(gerrymander_score_query).fetchone())
district_info = {'random_district_id': random_district_id, 'district_image': district_image, 'state': state, 'district_number': district_number, 'gerrymander_score': gerrymander_score}
return district_info
@app.route('/question', methods=['GET', 'POST'])
def user_input(district_info):
#if district_info['gerrymander_score'] > 50:
#gerrymandered = "yes"
#else:
#gerrymandered = "no"
print(district_info['state'])
if request.form:
user_answer=list(request.form.keys())[0]
print(user_answer)
return redirect(url_for('results', gerrymandered = gerrymandered, user_answer=user_answer))
else:
return render_template("question.html", district_image = district_info['district_image'], state = district_info['state'], district_number = district_info['district_number'], form=form)
@app.route('/answer', methods=['GET', 'POST'])
def results():
data = request.args
user_answer = data['user_answer']
gerrymandered = data['gerrymandered']
print(user_answer)
print(gerrymandered)
if user_answer == gerrymandered:
results_title = "It's a match!"
return render_template("answer.html", results_title=results_title, form=form)
elif user_answer != gerrymandered:
results_title = "Not a match!"
return render_template("answer.html", results_title=results_title, form=form)
|
import onmt
import torch
import torch.nn as nn
if __name__ == "__main__":
from onmt.models.multilingual_translator.reversible_transformers import reversible_encoder, \
ReversibleTransformerEncoderLayer
from onmt.models.multilingual_translator.reversible_transformers import reversible_decoder, \
ReversibleTransformerDecoderLayer
import argparse
parser = argparse.ArgumentParser(description='reversible transformer')
parser.add_argument('-model_size', type=int, default=32,
help='Size of embedding / transformer hidden')
parser.add_argument('-gpu', default=0, type=int,
help="Seed for deterministic runs.")
opt = parser.parse_args()
torch.cuda.set_device(opt.gpu)
opt.layers = 4
opt.variational_dropout = False
opt.dropout = 0.0
opt.attn_dropout = 0.0
opt.residual_dropout = 0.0
opt.ffn_dropout = 0.0
opt.n_heads = 4
opt.inner_size = 4 * opt.model_size
opt.ffn_glu = False
opt.ffn_activation = 'relu'
opt.head_dim = opt.model_size // opt.n_heads
opt.learnable_position_encoding = False
opt.ignore_source = False
layers = torch.nn.ModuleList()
for l in range(opt.layers):
layer = ReversibleTransformerEncoderLayer(opt)
layers.append(layer)
class TestEncoder(torch.nn.Module):
def __init__(self, layers):
super().__init__()
self.function = reversible_encoder
self.layers = layers
def forward(self, input, pos):
return self.function(self.layers, input, pos, None)
bsz = 4
len_q = 7
len_r = 7
len_k = 12
device = torch.device('cuda:0')
input_states = torch.randn(*(len_q, bsz, opt.model_size), dtype=torch.float64, requires_grad=True, device=device)
pos = torch.randn(*(len_q, 1, opt.model_size), dtype=torch.float64, requires_grad=False, device=device)
net = TestEncoder(layers)
net = net.double().cuda()
print(net)
# print("gradchecking ENCODER start.")
#
# torch.autograd.gradcheck(net, (input_states, pos), eps=1e-6, atol=1e-5, rtol=1e-3)
#
# print("gradchecking ENCODER completed.")
class TestDecoder(torch.nn.Module):
def __init__(self, layers):
super().__init__()
self.function = reversible_decoder
self.layers = layers
def forward(self, input, pos, context):
return self.function(self.layers, input, pos, context, None, None, None, None)
device = torch.device('cuda:0')
input_states = torch.randn(*(len_q, bsz, opt.model_size), dtype=torch.float64, requires_grad=True, device=device)
pos = torch.randn(*(len_q, 1, opt.model_size), dtype=torch.float64, requires_grad=False, device=device)
context = torch.randn(*(len_k, bsz, opt.model_size), dtype=torch.float64, requires_grad=True, device=device)
layers = torch.nn.ModuleList()
for l in range(opt.layers):
layer = ReversibleTransformerDecoderLayer(opt)
layers.append(layer)
net = TestDecoder(layers)
net = net.double().cuda()
print("gradchecking DECODER start.")
torch.autograd.gradcheck(net, (input_states, pos, context), eps=1e-6, atol=1e-5, rtol=1e-3)
print("Completed.")
# net(input_states, pos, context)
|
import os
import sys
import time
import cv2
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
sys.path.append(os.path.join(Path().resolve(), '../../../python'))
import jellypy
# Video Write-DMA
REG_VIDEO_WDMA_CORE_ID = 0x00
REG_VIDEO_WDMA_VERSION = 0x01
REG_VIDEO_WDMA_CTL_CONTROL = 0x04
REG_VIDEO_WDMA_CTL_STATUS = 0x05
REG_VIDEO_WDMA_CTL_INDEX = 0x07
REG_VIDEO_WDMA_PARAM_ADDR = 0x08
REG_VIDEO_WDMA_PARAM_STRIDE = 0x09
REG_VIDEO_WDMA_PARAM_WIDTH = 0x0a
REG_VIDEO_WDMA_PARAM_HEIGHT = 0x0b
REG_VIDEO_WDMA_PARAM_SIZE = 0x0c
REG_VIDEO_WDMA_PARAM_AWLEN = 0x0f
REG_VIDEO_WDMA_MONITOR_ADDR = 0x10
REG_VIDEO_WDMA_MONITOR_STRIDE = 0x11
REG_VIDEO_WDMA_MONITOR_WIDTH = 0x12
REG_VIDEO_WDMA_MONITOR_HEIGHT = 0x13
REG_VIDEO_WDMA_MONITOR_SIZE = 0x14
REG_VIDEO_WDMA_MONITOR_AWLEN = 0x17
# Video format regularizer
REG_VIDEO_FMTREG_CORE_ID = 0x00
REG_VIDEO_FMTREG_CORE_VERSION = 0x01
REG_VIDEO_FMTREG_CTL_CONTROL = 0x04
REG_VIDEO_FMTREG_CTL_STATUS = 0x05
REG_VIDEO_FMTREG_CTL_INDEX = 0x07
REG_VIDEO_FMTREG_CTL_SKIP = 0x08
REG_VIDEO_FMTREG_CTL_FRM_TIMER_EN = 0x0a
REG_VIDEO_FMTREG_CTL_FRM_TIMEOUT = 0x0b
REG_VIDEO_FMTREG_PARAM_WIDTH = 0x10
REG_VIDEO_FMTREG_PARAM_HEIGHT = 0x11
REG_VIDEO_FMTREG_PARAM_FILL = 0x12
REG_VIDEO_FMTREG_PARAM_TIMEOUT = 0x13
# Demosaic
REG_IMG_DEMOSAIC_CORE_ID = 0x00
REG_IMG_DEMOSAIC_CORE_VERSION = 0x01
REG_IMG_DEMOSAIC_CTL_CONTROL = 0x04
REG_IMG_DEMOSAIC_CTL_STATUS = 0x05
REG_IMG_DEMOSAIC_CTL_INDEX = 0x07
REG_IMG_DEMOSAIC_PARAM_PHASE = 0x08
REG_IMG_DEMOSAIC_CURRENT_PHASE = 0x18
# peri
reg_peri = jellypy.UioAccessor('uio_pl_peri', 0x00100000)
if not reg_peri.is_mapped():
print('open error : uio_pl_peri')
sys.exit(1)
reg_gid = reg_peri.get_accessor(0x00000000)
reg_fmtr = reg_peri.get_accessor(0x00010000)
reg_prmup = reg_peri.get_accessor(0x00011000)
reg_rgb = reg_peri.get_accessor(0x00012000)
reg_wdma = reg_peri.get_accessor(0x00021000)
print('\n<read core id>')
print('gid : 0x%08x' % reg_gid.read_reg(0))
print('fmtr : 0x%08x' % reg_fmtr.read_reg(0))
print('prmup : 0x%08x' % reg_prmup.read_reg(0))
print('rgb : 0x%08x' % reg_rgb.read_reg(0))
print('wdma : 0x%08x' % reg_wdma.read_reg(0))
# udmabuf
udmabuf = jellypy.UdmabufAccessor('udmabuf0')
if not udmabuf.is_mapped():
print('open error : udmabuf0')
sys.exit(1)
dmabuf_mem_addr = udmabuf.get_phys_addr()
dmabuf_mem_size = udmabuf.get_size()
print('\n<udmabuf>')
print('mem_addr : 0x%08x' % dmabuf_mem_addr)
print('mem_size : 0x%08x' % dmabuf_mem_size)
# IMX219 I2C control
imx219 = jellypy.Imx219ControlI2c()
if not imx219.open('/dev/i2c-0', 0x10):
print('I2C open error')
sys.exit(1)
imx219.reset()
print('Model ID : %04x' % imx219.get_model_id())
pixel_clock = 91000000 # 139200000.0
binning = True
width = 640 #640
height = 480 #132
aoi_x = -1
aoi_y = -1
flip_h = False
flip_v = False
frame_rate = 60 # 1000
exposure = 10
a_gain = 20
d_gain = 10
bayer_phase = 1
view_scale = 1
# camera setting
imx219.set_pixel_clock(pixel_clock)
imx219.set_aoi(width, height, aoi_x, aoi_y, binning, binning)
imx219.start()
rec_frame_num = min(100, dmabuf_mem_size // (width * height * 4))
frame_num = 1
print(dmabuf_mem_size)
print(rec_frame_num)
def capture_still_image(reg_wdma, reg_fmtr, bufaddr, width, height, frame_num):
# DMA start (one shot)
reg_wdma.write_reg(REG_VIDEO_WDMA_PARAM_ADDR, bufaddr)
reg_wdma.write_reg(REG_VIDEO_WDMA_PARAM_STRIDE, width*4)
reg_wdma.write_reg(REG_VIDEO_WDMA_PARAM_WIDTH, width)
reg_wdma.write_reg(REG_VIDEO_WDMA_PARAM_HEIGHT, height)
reg_wdma.write_reg(REG_VIDEO_WDMA_PARAM_SIZE, width*height*frame_num)
reg_wdma.write_reg(REG_VIDEO_WDMA_PARAM_AWLEN, 31)
reg_wdma.write_reg(REG_VIDEO_WDMA_CTL_CONTROL, 0x07)
# video format regularizer
reg_fmtr.write_reg(REG_VIDEO_FMTREG_CTL_FRM_TIMER_EN, 1)
reg_fmtr.write_reg(REG_VIDEO_FMTREG_CTL_FRM_TIMEOUT, 10000000)
reg_fmtr.write_reg(REG_VIDEO_FMTREG_PARAM_WIDTH, width)
reg_fmtr.write_reg(REG_VIDEO_FMTREG_PARAM_HEIGHT, height)
reg_fmtr.write_reg(REG_VIDEO_FMTREG_PARAM_FILL, 0x100)
reg_fmtr.write_reg(REG_VIDEO_FMTREG_PARAM_TIMEOUT, 100000)
reg_fmtr.write_reg(REG_VIDEO_FMTREG_CTL_CONTROL, 0x03)
time.sleep(0.1)
# wait
time.sleep(0.1)
while reg_wdma.read_reg(REG_VIDEO_WDMA_CTL_STATUS) != 0:
time.sleep(0.1)
# formatter stop
reg_fmtr.write_reg(REG_VIDEO_FMTREG_CTL_CONTROL, 0x00)
time.sleep(0.1)
while reg_wdma.read_reg(REG_VIDEO_FMTREG_CTL_STATUS) != 0:
usleep(1000)
#
imx219.set_frame_rate(frame_rate)
imx219.set_exposure_time(exposure / 1000.0)
imx219.set_gain(a_gain)
imx219.set_digital_gain(d_gain)
imx219.set_flip(flip_h, flip_v)
#reg_demos.write_reg(REG_IMG_DEMOSAIC_PARAM_PHASE, bayer_phase)
#reg_demos.write_reg(REG_IMG_DEMOSAIC_CTL_CONTROL, 3) #update & enable
#capture_still_image(reg_wdma, reg_fmtr, dmabuf_mem_addr, width, height, 1)
#img = udmabuf.get_array_uint8((height, width, 4), 0)
#plt.imshow(img[:,:,::-1])
#plt.show()
while cv2.waitKey(10) != 0x1b:
# cpture
capture_still_image(reg_wdma, reg_fmtr, dmabuf_mem_addr, width, height, 1)
#cv::Mat img(height*frame_num, width, CV_8UC4)
#udmabuf_acc.MemCopyTo(img.data, 0, width * height * 4 * frame_num)
img = udmabuf.get_array_uint8((height, width, 4), 0)
#img_rgb = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
#plt.figure(figsize=(12, 8))
#plt.imshow(img_rgb[:,:,::-1])
#plt.show()
cv2.imshow('"img', img)
|
from collections import namedtuple
SrcTile = namedtuple('SrcTile', 'bbox f img scale')
class BBOX(namedtuple('BBOX_base', 'x y w h')):
""" Represents a bounding box as something more specific than a shapely polygon
Units are usually northing and easting. """
@classmethod
def with_xyxy(_cls, x1, y1, x2, y2):
""" Construct in terms of top-left and bottom-right points """
return BBOX(x1, y1, x2 - x1, y2 - y1)
def x2(self):
""" the right edge (east) """
return self.x + self.w
def y2(self):
""" the bottom edge (south) """
return self.y + self.h
def xyxy(self):
""" extent as top-left and bottom-right points.
`shapely.geometry.box()` takes this format. """
return (self.x, self.y, self.x2(), self.y2())
def xxyy(self):
""" extent in `matplotlib` format """
return (self.x, self.x2(), self.y, self.y2())
|
from smbus import SMBus
bus = SMBus(1)
address = 0x48
def get_temp():
data = bus.read_i2c_block_data(0x48, 0x00, 2)
data[0] = data[0] * 256
temperature = data[0] + data[1]
temperature = (temperature >> 4) * 0.0625
return(temperature)
|
# -*- encoding:utf-8 -*-
"""
company:IT
author:pengjinfu
project:migu
time:2020.5.7
"""
|
import sqlite3
import db_tools as db
db_file = 'db/pyolo.db'
db.create_connection(db_file)
db.initialize_tables(db_file)
|
import requests
import json
import logging
import traceback
import datetime
import time
import pandas as pd
log_fmt = '%(asctime)s- %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=log_fmt, level=logging.DEBUG)
class Db2dataTool:
userid = None
passwd = None
dbname = None
crn = None
url = None
token = None
tablename = None
def __init__(self, db2info):
self.userid = db2info['USERID']
self.passwd = db2info['PASSWD']
self.dbname = db2info['DBNAME']
self.crn = db2info['CRN']
self.url = db2info['URL']
self.tablename = db2info['TABLENAME']
def __handleError(self, msg):
logging.error(msg)
result = {}
result['status'] = 'ERROR'
result['message'] = json.dumps(msg)
return json.dumps(result)
def __verifyDate(self, datestr):
try:
newDate = datetime.datetime.strptime(datestr, "%Y-%m-%d")
return True
except ValueError:
return False
def getData(self, fromdate, todate):
result = self.getToken()
resjson = json.loads(result)
if resjson['status'] == 'ERROR':
return result
result = self.doQuery(fromdate, todate)
resjson = json.loads(result)
if resjson['status'] == 'ERROR':
return result
result = self.getSQLResults(resjson['message'], fromdate, todate)
return result
def getSQLResults(self, id, fromdate, todate):
# time.sleep(1)
#REST API の URL 作成
REST_API_URL = self.url
service_name = '/dbapi/v4/sql_jobs/%s'%(id)
host = REST_API_URL + service_name
# request header作成
headers = {}
headers ['content-type'] = "application/json"
headers ['x-deployment-id']=self.crn
headers ['X-DB-Profile'] = "BLUDB"
# headerにアクセストークンをセット
headers ['authorization'] = 'Bearer ' + self.token
start = time.time()
df = pd.DataFrame()
message = ''
while True:
try:
r = requests.get(host, headers=headers, verify = False)
except Exception as err:
return self.__handleError(traceback.format_exc())
# Check error
if (r.status_code != 200):
message = r.json()['errors']
return self.__handleError(message)
if r.json()["status"] == "failed":
message = r.json()["results"][0]["error"]
return self.__handleError(message)
if "results" in r.json():
if "rows_count" in r.json()["results"][0]:
print(r.json()["results"][0]["rows_count"])
if len(df) < 1:
df = pd.DataFrame(data=r.json()["results"][0]["rows"] , columns =['labels', 'data'])
else:
df = df.append(r.json()["results"][0]["rows"])
else:
message = "Error :%s"%(r.json()["results"][0])
if r.json()["status"] == "completed":
break
if time.time() - start > 180:
message = "sql_jobs timeout 3min"
return self.__handleError(message)
try:
if len(df) < 1 :
if len(message) < 1:
message = "No result data from %s to %s. "%(fromdate, todate)
return self.__handleError(message)
df['data'] = df['data'].astype(int)
resultdata = {}
resultdata['labels'] = df['labels'].values.tolist()
resultdata['data'] = df['data'].values.tolist()
except Exception as err:
return self.__handleError(traceback.format_exc())
result = {}
result['status'] = 'SUCCESS'
result['message'] = resultdata
return json.dumps(result)
def doQuery(self, fromdate, todate):
if not (self.__verifyDate(fromdate) or self.__verifyDate(todate)):
return self.__handleError("Date format is invalid")
#REST API の URL 作成
REST_API_URL = self.url
service_name = '/dbapi/v4/sql_jobs'
host = REST_API_URL + service_name
# request header作成
headers = {}
headers ['content-type'] = "application/json"
headers ['x-deployment-id']=self.crn
headers ['X-DB-Profile'] = "BLUDB"
# headerにアクセストークンをセット
headers ['authorization'] = 'Bearer ' + self.token
sqlstr = "SELECT 公表_年月日, count(公表_年月日) AS 人数 FROM %s WHERE 公表_年月日 BETWEEN '%s' AND '%s' GROUP BY 公表_年月日;" % (
self.tablename, fromdate, todate)
body ={
"commands": sqlstr,
"limit" : 1000000,
"separator": ";",
"stop_on_error":"no"
}
# Call the RESTful service
try:
r = requests.post(host, headers=headers, json=body, verify = False)
# print( r.status_code)
except Exception as err:
return self.__handleError(traceback.format_exc())
# Check error
if (r.status_code != 201): # There was an error with the authentication
message = r.json()['errors']
return self.__handleError(message)
result ={}
result['status'] = 'SUCCESS'
result['message'] = r.json()["id"]
return json.dumps(result)
def getToken(self):
# REST API の URL 作成
REST_API_URL = self.url
service_name = '/dbapi/v4/auth/tokens'
host = REST_API_URL + service_name
# request header作成
headers = {}
headers['content-type'] = "application/json"
headers['x-deployment-id'] = self.crn
# parameter dbアクセス用のuid, pwを指定
params = {}
params['userid'] = self.userid
params['password'] = self.passwd
result = {}
# Call the RESTful service
try:
r = requests.post(host, headers=headers, data=json.dumps(params))
except Exception as err:
return self.__handleError(traceback.format_exc())
# Check for Invalid credentials
if (r.status_code == 401): # There was an error with the authentication
message = r.json()['errors']
return self.__handleError(message)
# Check for anything other than 200/401
if (r.status_code != 200): # Some other failure
message = r.json()['errors']
return self.__handleError(message)
# Retrieve the access token
try:
access_token = r.json()['token']
# print(r.json())
except:
return self.__handleError(r.json())
result['status'] = 'SUCCESS'
result['message'] = access_token
self.token = access_token
return json.dumps(result)
|
import unittest
from impbot.connections import twitch_webhook
class TestTwitchWebhookConnection(unittest.TestCase):
def test_topic(self):
link = (
'<https://api.twitch.tv/helix/webhooks/hub>; rel="hub", '
'<https://api.twitch.tv/helix/streams?user_id=1234>; rel="self"')
self.assertEqual(
"https://api.twitch.tv/helix/streams?user_id=1234",
twitch_webhook._topic(link))
|
import tempfile
import os
from PIL import Image
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Post, Tag
from post.serializers import PostSerializer, PostDetailSerializer
POSTS_URL = reverse('post:post-list')
def image_upload_url(post_id):
"""
Helper method to retrieve the API end-point to upload the image.
:param post_id: The unique ID of the post we want to add image to.
:return: An end-point to which we can POST an image
"""
return reverse('post:post-upload-image', args=[post_id])
def detail_url(post_id):
"""
Helper method to retrieve the API end-point to post content
:param post_id: The unique ID of the post
:return: An end-point to retrieve the post content.
"""
return reverse('post:post-detail', args=[post_id])
def sample_tag(user, name="Technology"):
"""
Helper method to create a tag and return the tag object
:param user: user that created the tag.
:param name: name of the tag
:return: Returns a tag object after creating it.
"""
return Tag.objects.create(user=user, name=name)
def sample_post(user, **params):
"""
Helper method to create a sample post for our testing purposes.
:param user:
:param params:
:return: A Post object after it is created.
"""
defaults = {
'title': 'Blog Post #01',
'content': 'Blog post\'s content that is really awesome',
}
defaults.update(params)
return Post.objects.create(user=user, **defaults)
class PublicPostAPITests(TestCase):
"""
Test cases for unauthenticated access to the post API
"""
def setUp(self):
self.client = APIClient()
def test_auth_required(self):
"""
Test that the authentication is required!
:return: None
"""
res = self.client.get(POSTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivatePostAPITests(TestCase):
"""
Test cases for authenticated access to the post API
"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'user@test.com',
'Test123'
)
self.client.force_authenticate(self.user)
def test_retrieve_posts(self):
"""
Test case for retrieving the list of posts
:return: None
"""
sample_post_title = {'title': 'Sample Post 01'}
sample_post(user=self.user, **sample_post_title)
sample_post_title = {'title': 'Sample Post 02'}
sample_post(user=self.user, **sample_post_title)
res = self.client.get(POSTS_URL)
posts = Post.objects.all().order_by('id')
serializer = PostSerializer(posts, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_view_post_detail(self):
"""
Test viewing of a post details.
:return: None
"""
post = sample_post(user=self.user)
post.tags.add(sample_tag(user=self.user))
url = detail_url(post.id)
res = self.client.get(url)
serializer = PostDetailSerializer(post)
self.assertEqual(res.data, serializer.data)
def test_create_basic_post(self):
"""
Test creating a post
:return: None
"""
payload = {
'title': 'Blog post 01',
'content': 'Content for blog post 01'
}
res = self.client.post(POSTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
post = Post.objects.get(id=res.data['id'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(post, key))
def test_create_post_with_tags(self):
"""
Test the creation of a post with tags
"""
tag1 = sample_tag(user=self.user, name="Technology")
tag2 = sample_tag(user=self.user, name="Python")
payload = {
'title': 'Blog Post 01',
'content': 'Content for blog post 01',
'tags': [tag1.id, tag2.id]
}
res = self.client.post(POSTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
post = Post.objects.get(id=res.data['id'])
tags = post.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_partial_update_post(self):
"""
Test updating post with PATCH
"""
post = sample_post(user=self.user)
post.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name="Django")
payload = {'title': 'Python + Django', 'tags': [new_tag.id]}
url = detail_url(post.id)
self.client.patch(url, payload)
post.refresh_from_db()
self.assertEqual(post.title, payload['title'])
tags = post.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_full_update_post(self):
"""
Test updating a post with PUT
"""
post = sample_post(user=self.user)
post.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Blog Post 007',
'content': 'Content for blog post 007',
}
url = detail_url(post.id)
self.client.put(url, payload)
post.refresh_from_db()
self.assertEqual(post.title, payload['title'])
tags = post.tags.all()
self.assertEqual(len(tags), 0)
class PostImageUploadTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'user@test.com',
'Test123'
)
self.client.force_authenticate(self.user)
self.post = sample_post(user=self.user)
def tearDown(self):
"""
We have to delete the created dummy images to ensure
we are maintaining the system state correctly after
the test execution.
:return: None
"""
self.post.image.delete()
def test_uploading_valid_image(self):
"""
Test Uploading a valid image file
"""
url = image_upload_url(self.post.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10, 10))
img.save(ntf, format='JPEG')
ntf.seek(0)
res = self.client.post(url, {'image': ntf}, format='multipart')
self.post.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.post.image.path))
def test_uploading_invalid_image(self):
"""
Test uploading an invalid image file
"""
url = image_upload_url(self.post.id)
res = self.client.post(url,
{'image': 'notanimage'},
format='multipart'
)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
class TestPostFilteringAPI(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'user@test.com',
'Test123'
)
self.client.force_authenticate(self.user)
def test_filter_posts_by_tags(self):
"""Test returning posts with specific tags"""
post1 = sample_post(user=self.user, title='Blog Post 01')
post2 = sample_post(user=self.user, title='Blog Post 02')
tag1 = sample_tag(user=self.user, name='Technology')
tag2 = sample_tag(user=self.user, name='Frameworks')
post1.tags.add(tag1)
post2.tags.add(tag2)
post3 = sample_post(user=self.user, title='Blog Post 03')
res = self.client.get(
POSTS_URL,
{'tags': f'{tag1.id},{tag2.id}'}
)
serializer1 = PostSerializer(post1)
serializer2 = PostSerializer(post2)
serializer3 = PostSerializer(post3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
|
"""Quantum Inspire library
Copyright 2019 QuTech Delft
qilib is available under the [MIT open-source license](https://opensource.org/licenses/MIT):
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
from abc import ABC, abstractmethod
from typing import Optional
from qcodes.instrument.base import Instrument
from qilib.configuration_helper.visitor import Visitor
from qilib.utils.python_json_structure import PythonJsonStructure
class InstrumentAdapter(ABC):
def __init__(self, address: str, instrument_name: Optional[str] = None) -> None:
""" A template for an adapter to the QCoDeS instrument interface.
Args:
address: The address/ID of the QCoDeS instrument.
"""
self._name = f'{self.__class__.__name__}_{address}'
self._address = address
self._instrument: Optional[Instrument] = None
self._instrument_name = instrument_name if instrument_name is not None else self.name
def __repr__(self) -> str:
return f'{self.__class__.__name__}({self._address!r}, {self._instrument_name!r})'
@property
def name(self) -> str:
return self._name
@property
def instrument(self) -> Optional[Instrument]:
return self._instrument
@property
def address(self) -> str:
return self._address
@abstractmethod
def apply(self, config: PythonJsonStructure) -> None:
""" An abstract method that should be implemented in subclasses.
Writes a full set of configuration to the adapters instrument.
Args:
config: Containing the instrument configuration.
"""
def read(self, update: bool = False) -> PythonJsonStructure:
""" Obtains a full set of settings from the instrument.
Returns:
Part of the instrument snapshot, i.e., parameter values, without the instrument's
parameters which are explicitly filtered out, and the instrument name.
"""
configuration = PythonJsonStructure()
if self._instrument is not None:
snapshot = self._instrument.snapshot(update)
parameters = self._filter_parameters(snapshot['parameters'])
valued_parameters = self.__notify_and_remove_none_values(parameters)
configuration.update(valued_parameters)
return configuration
@abstractmethod
def _filter_parameters(self, parameters: PythonJsonStructure) -> PythonJsonStructure:
""" Filters out parameters that are not used for instrument configuration storage.
This function should be overwritten in the subclasses for each specific instrument,
if needed when reading the configuration.
Args:
parameters: A complete snapshot from an instrument.
Returns:
PythonJsonStructure: Contains the instrument snapshot without the instrument
parameters which are filtered out by this function.
"""
def close_instrument(self) -> None:
""" Close the wrapped QCoDeS instrument."""
if self._instrument is not None:
self._instrument.close()
def accept(self, visitor: Visitor) -> None:
""" Accept a visitor and run visit method with self as a parameter.
Args:
visitor: An implementation of the Visitor interface.
"""
visitor.visit(self)
def __notify_and_remove_none_values(self, parameters: PythonJsonStructure) -> PythonJsonStructure:
""" Return parameters from the QCoDeS snapshot which are not None.
Takes the parameters of the QCoDeS instrument snapshot. Removes all parameters
which have a value of None. Returns the parameter settings which have a value.
All parameters with None value will be listed in the log as an error.
Args:
parameters: The parameters of a QCoDeS instrument snapshot.
Returns:
PythonJsonStructure: Contains the instrument snapshot parameters without the
instrument parameters which a none value.
"""
valued_parameters = PythonJsonStructure()
none_value_parameters = PythonJsonStructure()
for parameter_name, settings in parameters.items():
if 'value' in settings and settings['value'] is None:
none_value_parameters[parameter_name] = settings
else:
valued_parameters[parameter_name] = settings
if none_value_parameters:
parameter_names = list(none_value_parameters.keys())
error_message = f'Parameter values of {self._instrument_name} are None. Please set: {parameter_names}.'
logging.error(error_message)
return valued_parameters
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# licensed under CC-Zero: https://creativecommons.org/publicdomain/zero/1.0
import requests
import json
import pywikibot
from pywikibot.data import api
site = pywikibot.Site('wikidata', 'wikidata')
site.login()
repo = site.data_repository()
site.get_tokens('edit')
ERROR_THRES = 50
r = requests.get('https://www.wikidata.org/wiki/User:DeltaBot/badges?action=raw')
tasks = r.json()
# remove badges
for t in tasks:
payload = {
'categories': t['category'],
'language': t['language'],
'project': t['project'],
'sparql': 'SELECT ?item WHERE { ?article schema:about ?item;wikibase:badge wd:'+t['badge']+';schema:isPartOf <https://'+t['language']+'.'+t['project']+'.org/>}',
'source_combination': 'sparql not categories',
'ns[0]': 1,
'ns[100]': 1,
'common_wiki': 'wikidata',
'format': 'json',
'doit': 'Do it!'
}
r = requests.get('https://petscan.wmflabs.org/', params=payload)
data = r.json()
if len(data['*'][0]['a']['*']) > ERROR_THRES:
continue
for m in data['*'][0]['a']['*']:
try:
params = {
'action': 'wbsetsitelink',
'id': m['title'],
'linksite': t['site'],
'badges': '',
'token': site.tokens['edit']
}
print(m)
#req = api.Request(site=site, **params)
#data = req.submit()
except:
pass
# add badges
for t in tasks:
payload = {
'categories': t['category'],
'language': t['language'],
'project': t['project'],
'sparql': 'SELECT ?item WHERE { ?article schema:about ?item;wikibase:badge wd:'+t['badge']+';schema:isPartOf <https://'+t['language']+'.'+t['project']+'.org/>}',
'source_combination': 'categories not sparql',
'ns[0]': 1,
'ns[100]': 1,
'common_wiki': 'wikidata',
'format': 'json',
'doit': 'Do it!'
}
r = requests.get('https://petscan.wmflabs.org/', params=payload)
data = r.json()
if len(data['*'][0]['a']['*']) > ERROR_THRES:
continue
for m in data['*'][0]['a']['*']:
try:
params = {
'action': 'wbsetsitelink',
'id': m['title'],
'linksite': t['site'],
'badges': t['badge'],
'token': site.tokens['edit']
}
req = api.Request(site=site, **params)
data = req.submit()
except:
pass
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Set, Tuple
from . import DictEntries, ParseIssueTuple, PipelineStep, Summary
class WarningCodeFilter(PipelineStep[DictEntries, DictEntries]):
def __init__(self, codes_to_keep: Set[int]) -> None:
self.codes_to_keep: Set[int] = codes_to_keep
def _should_skip_issue(self, issue: ParseIssueTuple) -> bool:
return issue.code not in self.codes_to_keep
def run(self, input: DictEntries, summary: Summary) -> Tuple[DictEntries, Summary]:
filtered_issues = []
for issue in input["issues"]:
if self._should_skip_issue(issue):
continue
filtered_issues.append(issue)
input["issues"] = filtered_issues
return input, summary
|
from aacharts.aachartcreator.AAChartModel import AAChartModel
from aacharts.aachartcreator.AASeriesElement import AASeriesElement
from aacharts.aaenum.AAEnum import AAChartType, AAChartAnimationType, AAChartSymbolType, AAChartSymbolStyleType
from aacharts.aatool.AAGradientColor import AAGradientColor, AALinearGradientDirection
class BasicChartComposer:
@staticmethod
def configureBasicOptions():
return (AAChartModel()
.backgroundColorSet("#4b2b7f")
.dataLabelsEnabledSet(False)
.yAxisGridLineWidthSet(0)
.touchEventEnabledSet(True))
@staticmethod
def configureAreaChart():
element1 = (AASeriesElement()
.nameSet("Tokyo")
.dataSet([7.0, 6.9, 9.5, 14.5, 18.2, 21.5, 25.2, 26.5, 23.3, 18.3, 13.9, 9.6]))
element2 = (AASeriesElement()
.nameSet("NewYork")
.dataSet([0.2, 0.8, 5.7, 11.3, 17.0, 22.0, 24.8, 24.1, 20.1, 14.1, 8.6, 2.5]))
element3 = (AASeriesElement()
.nameSet("London")
.dataSet([0.9, 0.6, 3.5, 8.4, 13.5, 17.0, 18.6, 17.9, 14.3, 9.0, 3.9, 1.0]))
element4 = (AASeriesElement()
.nameSet("Berlin")
.dataSet([3.9, 4.2, 5.7, 8.5, 11.9, 15.2, 17.0, 16.6, 14.2, 10.3, 6.6, 4.8]))
aaChartModel = (BasicChartComposer.configureBasicOptions()
.chartTypeSet(AAChartType.area)
.categoriesSet(["Java", "Swift", "Python", "Ruby", "PHP", "Go", "C", "C#", "C++"])
.seriesSet([element1, element2, element3, element4]))
return aaChartModel
@staticmethod
def configureStepAreaChartAndStepLineChart():
element1 = (AASeriesElement()
.nameSet("Tokyo")
.stepSet(True)
.dataSet([149.9, 171.5, 106.4, 129.2, 144.0, 176.0, 135.6, 188.5, 276.4, 214.1, 95.6, 54.4]))
element2 = (AASeriesElement()
.nameSet("NewYork")
.stepSet(True)
.dataSet([83.6, 78.8, 188.5, 93.4, 106.0, 84.5, 105.0, 104.3, 131.2, 153.5, 226.6, 192.3]))
element3 = (AASeriesElement()
.nameSet("London")
.stepSet(True)
.dataSet([48.9, 38.8, 19.3, 41.4, 47.0, 28.3, 59.0, 69.6, 52.4, 65.2, 53.3, 72.2]))
aaChartModel = (BasicChartComposer.configureBasicOptions()
.chartTypeSet(AAChartType.area)
.seriesSet([element1, element2, element3, ]))
return aaChartModel
@staticmethod
def configureColumnChartAndBarChart():
aaChartModel = (BasicChartComposer.configureAreaChart()
.categoriesSet([
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"
])
.legendEnabledSet(True)
.colorsThemeSet(["#fe117c", "#ffc069", "#06caf4", "#7dffc0"])
.animationTypeSet(AAChartAnimationType.easeOutCubic)
.animationDurationSet(1200)
)
return aaChartModel
@staticmethod
def configureAreaChartAndAreasplineChartStyle(chartType: AAChartType):
aaChartModel = (BasicChartComposer.configureAreaChart()
.animationTypeSet(AAChartAnimationType.easeOutQuart)
.legendEnabledSet(True)
.markerRadiusSet(5)
.markerSymbolSet(AAChartSymbolType.circle)
.markerSymbolStyleSet(AAChartSymbolStyleType.innerBlank))
if chartType == AAChartType.areaspline:
gradientColorDic = AAGradientColor.linearGradient1(
AALinearGradientDirection.toBottomRight,
"rgba(138,43,226,1)",
"rgba(30,144,255,1)" # 颜色字符串设置支持十六进制类型和 rgba 类型
)
element1 = (AASeriesElement()
.nameSet("Predefined symbol")
.fillColorSet(gradientColorDic)
.dataSet([0.45, 0.43, 0.50, 0.55, 0.58, 0.62, 0.83, 0.39, 0.56, 0.67, 0.50, 0.34, 0.50, 0.67, 0.58, 0.29, 0.46,
0.23, 0.47, 0.46, 0.38, 0.56, 0.48, 0.36]))
element2 = (AASeriesElement()
.nameSet("Image symbol")
.dataSet([0.38, 0.31, 0.32, 0.32, 0.64, 0.66, 0.86, 0.47, 0.52, 0.75, 0.52, 0.56, 0.54, 0.60, 0.46, 0.63, 0.54,
0.51, 0.58, 0.64, 0.60, 0.45, 0.36, 0.67]))
element3 = (AASeriesElement()
.nameSet("Base64 symbol(*)")
.dataSet([0.46, 0.32, 0.53, 0.58, 0.86, 0.68, 0.85, 0.73, 0.69, 0.71, 0.91, 0.74, 0.60, 0.50, 0.39, 0.67, 0.55,
0.49, 0.65, 0.45, 0.64, 0.47, 0.63, 0.64]))
element4 = (AASeriesElement()
.nameSet("Custom symbol")
.dataSet([0.60, 0.51, 0.52, 0.53, 0.64, 0.84, 0.65, 0.68, 0.63, 0.47, 0.72, 0.60, 0.65, 0.74, 0.66, 0.65, 0.71,
0.59, 0.65, 0.77, 0.52, 0.53, 0.58, 0.53]))
(aaChartModel
.animationTypeSet(AAChartAnimationType.easeFrom) # 设置图表渲染动画类型为 EaseFrom
.seriesSet([element1, element2, element3, element4]))
return aaChartModel
@staticmethod
def configureLineChartAndSplineChartStyle(chartType: AAChartType):
aaChartModel = (BasicChartComposer.configureAreaChart()
.chartTypeSet(chartType)
.markerSymbolStyleSet(AAChartSymbolStyleType.borderBlank) # 设置折线连接点样式为:边缘白色
.markerRadiusSet(6))
if chartType == AAChartType.spline:
element1 = (AASeriesElement()
.nameSet("Tokyo")
.lineWidthSet(7)
.dataSet([50, 320, 230, 370, 230, 400, ]))
element2 = (AASeriesElement()
.nameSet("Berlin")
.lineWidthSet(7)
.dataSet([80, 390, 210, 340, 240, 350, ]))
element3 = (AASeriesElement()
.nameSet("New York")
.lineWidthSet(7)
.dataSet([100, 370, 180, 280, 260, 300, ]))
element4 = (AASeriesElement()
.nameSet("London")
.lineWidthSet(7)
.dataSet([130, 350, 160, 310, 250, 268, ]))
(aaChartModel
.animationTypeSet(AAChartAnimationType.swingFromTo)
.seriesSet([element1, element2, element3, element4]))
return aaChartModel
|
from adafruit_circuitplayground.express import cpx
while True:
if cpx.button_a:
cpx.play_tone(262, 1)
if cpx.button_b:
cpx.play_tone(294, 1)
|
import os
import re
import matplotlib.pyplot as plt
def get_root_dir():
return input("Enter root_dir: ")
def get_user_input():
return input("Enter keyword: ")
def find_files():
root_dir = get_root_dir()
keyword = re.compile(get_user_input())
files_found = {}
for (dirpath, dirnames, filenames) in os.walk(root_dir):
rel_dirpath = os.path.relpath(dirpath, root_dir)
files_found[rel_dirpath] = 0
for current_file in filenames:
with open(f'{dirpath}/{current_file}') as f:
for line in f:
if re.search(keyword, line):
files_found[rel_dirpath] += 1
print(files_found)
plt.bar(range(len(files_found)), list(files_found.values()), align='center', alpha=0.5)
plt.xticks(range(len(files_found)), list(files_found.keys()))
plt.ylabel('Files')
plt.title('Files found which contain pharse')
plt.show()
if __name__ == '__main__':
find_files()
|
from socketclient.SocketClient import SocketClient
from socketclient.SocketPool import SocketPool
from socketclient.Connector import Connector, TcpConnector
|
# -*- coding: utf-8 -*-
from scrapy_redis.spiders import RedisCrawlSpider
from crawler.items import ArticleItem
class Game17373Spider(RedisCrawlSpider):
name = 'game_17373'
allowed_domains = ['news.17173.com']
redis_key = '17373_game:start_urls'
custom_settings = {
'ITEM_PIPELINES' : {
'crawler.pipelines.ArticlePipeline':300
},
# 启用redis
'DUPEFILTER_CLASS': "scrapy_redis.dupefilter.RFPDupeFilter",
'SCHEDULER': "scrapy_redis.scheduler.Scheduler",
}
def parse(self, response):
item = ArticleItem()
item['website'] = 'news.17173.com'
item['url'] = response.url
item['title'] = response.xpath('//h1[@class="gb-final-tit-article"]/text()').extract_first()
item['content'] = self.parse_content('-'.join(response.xpath('//div[@id="mod_article"]//text()').extract()))
item['category'] = '游戏'
item['publish_time'] = response.xpath('//div[@class="gb-final-mod-info"]/span[1]/text()').extract_first()
yield item
def parse_content(self, content):
content = content.replace(r'【.*?】', '')
content = content.replace('\u3000', '')
content = content.replace('\n', '')
content = content.replace('\r', '')
content = content.replace('\xa0', '')
content = content.replace('-', '')
content = content.replace('17173新闻采访部', '')
content = content.replace(r'/[a-zA-Z]*[:\//\]*[A-Za-z0-9\-_]+\.+[A-Za-z0-9\.\/%&=\?\-_]+/i', '')
content = content.strip()
return content
|
"""Sensor from an SQL Query."""
import datetime
import decimal
import logging
import sqlalchemy
from sqlalchemy.orm import scoped_session, sessionmaker
import voluptuous as vol
from homeassistant.components.recorder import CONF_DB_URL, DEFAULT_DB_FILE, DEFAULT_URL
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, CONF_UNIT_OF_MEASUREMENT, CONF_VALUE_TEMPLATE
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_COLUMN_NAME = "column"
CONF_QUERIES = "queries"
CONF_QUERY = "query"
CONF_QUERY_TEMPLATE = "query_template"
def validate_sql(value):
"""Validate that value is a SQL SELECT query."""
if not value.lstrip().lower().startswith(
"select"
) and not value.lstrip().lower().startswith("exec"):
raise Exception("Only SELECT or EXEC queries allowed")
return value
_QUERY_SCHEME = vol.Schema(
{
vol.Required(CONF_COLUMN_NAME): cv.string,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_QUERY): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_QUERY_TEMPLATE): cv.template,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_QUERIES): [_QUERY_SCHEME], vol.Optional(CONF_DB_URL): cv.string}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the SQL sensor platform."""
db_url = config.get(CONF_DB_URL, None)
if not db_url:
db_url = DEFAULT_URL.format(hass_config_path=hass.config.path(DEFAULT_DB_FILE))
try:
engine = sqlalchemy.create_engine(db_url)
sessmaker = scoped_session(sessionmaker(bind=engine))
# Run a dummy query just to test the db_url
sess = sessmaker()
sess.execute("SELECT 1;")
except sqlalchemy.exc.SQLAlchemyError as err:
_LOGGER.error("Couldn't connect using %s DB_URL: %s", db_url, err)
return
finally:
sess.close()
queries = []
for query in config.get(CONF_QUERIES):
name = query.get(CONF_NAME)
query_str = query.get(CONF_QUERY)
unit = query.get(CONF_UNIT_OF_MEASUREMENT)
value_template = query.get(CONF_VALUE_TEMPLATE)
column_name = query.get(CONF_COLUMN_NAME)
query_template = query.get(CONF_QUERY_TEMPLATE)
if query_str and query_template:
raise Exception("Both query and query_template are defined. Choose one.")
if value_template is not None:
value_template.hass = hass
if query_template is not None:
query_template.hass = hass
sensor = SQLSensor(
hass,
name,
sessmaker,
query_str,
column_name,
unit,
value_template,
query_template,
)
queries.append(sensor)
async_add_entities(queries)
return True
class SQLSensor(Entity):
"""Representation of an SQL sensor."""
def __init__(
self, hass, name, sessmaker, query, column, unit, value_template, query_template
):
"""Initialize the SQL sensor."""
self.hass = hass
self._name = name
self._query_template = query_template
if query is not None:
if "LIMIT" in query:
self._query = query
else:
self._query = query.replace(";", " LIMIT 1;")
else:
self._query = None
self._unit_of_measurement = unit
self._template = value_template
self._column_name = column
self.sessionmaker = sessmaker
self._state = None
self._attributes = {}
@property
def name(self):
"""Return the name of the query."""
return self._name
@property
def state(self):
"""Return the query's current state."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
async def async_update(self):
"""Retrieve sensor data from the query."""
if self._query is not None:
sql_command = self._query
if self._query_template is not None:
try:
sql_command = self._query_template.async_render()
except TemplateError as ex:
if ex.args and ex.args[0].startswith(
"UndefinedError: 'None' has no attribute"
):
# Common during HA startup - so just a warning
_LOGGER.warning(
"Could not render template %s, the state is unknown.",
self._name,
)
else:
self._state = None
_LOGGER.error("Could not render template %s: %s", self._name, ex)
try:
validated_sql_command = validate_sql(sql_command)
sess = self.sessionmaker()
result = sess.execute(validated_sql_command)
self._attributes = {}
if not result.returns_rows or result.rowcount == 0:
_LOGGER.warning("%s returned no results", self._query)
self._state = None
return
for res in result:
_LOGGER.debug("result = %s", res.items())
data = res[self._column_name]
for key, value in res.items():
if isinstance(value, decimal.Decimal):
value = float(value)
if isinstance(value, datetime.date):
value = str(value)
self._attributes[key] = value
except sqlalchemy.exc.SQLAlchemyError as err:
_LOGGER.error("Error executing query %s: %s", self._query, err)
return
finally:
sess.close()
if self._template is not None:
self._state = self._template.async_render_with_possible_json_value(
data, None
)
else:
self._state = data
|
import os
from multiprocessing import Process, Pool, Value, Lock
from ctypes import c_int
import subprocess
from datetime import datetime
import time
from io import StringIO
from vncdotool import api
import argparse
parser = argparse.ArgumentParser(description="Open/Unsafe VNC Scraper")
parser.add_argument("-input", help="Input IP list file")
parser.add_argument("-port", help="VNC connection port", type=int)
parser.add_argument("-proc_count", help="Multithreaded process count", type=int)
parser.add_argument("-connection_timeout", help="VNC connection timeout", type=int)
parser.add_argument("-screenshot_timeout", help="Screenshot attempt timeout", type=int)
parser.add_argument("--no_screenshots", help="Disable server screenshots", action="store_true")
parser.add_argument("--no_passwords", help="Disable basic password checks", action="store_true")
args = parser.parse_args()
#install vncdotool, but don't need to import. Also install zmap
#sudo zmap -B 10M -p 5900 -n 500000 -o results.txt
#sudo zmap -B 10M -p 5900 -n 1000000000 -o results.txt 2.5 days/60 hours
#sudo zmap -B 10M -p 5900 -n 16666666 -o results.txt 1 hour
vncport = "5900"
#Timeout in seconds
connection_timeout = 10
#screenshots can take minutes..
screenshot_timeout = 180
process_amount = 50
#replaces the backslashes in os.getcwd with forward slashes.
screenshot_path = os.getcwd().replace('\\', '/') + "/results/screenshots/"
password_file = "./passwords.txt"
ipfile = "./pw_results.txt"
valid_ipfile = "./results/" + time.strftime("%Y%m%d-%H%M%S") + "_validips.txt"
password_ipfile = "./results/" + time.strftime("%Y%m%d-%H%M%S") + "_passwordips.txt"
password_check = not args.no_passwords
skip_screencapture = args.no_screenshots
if args.input:
ipfile = args.input
if args.port:
vncport = str(args.port)
if args.connection_timeout:
connection_timeout = args.connection_timeout
if args.screenshot_timeout:
screenshot_timeout = args.screenshot_timeout
if args.proc_count:
process_amount = args.proc_count
if password_check:
#Passwords to test every password-protected VNC server by, line-separated.
passwords = [line.strip() for line in open(password_file)]
def screencapture(startendpts):
#startendpts in format: [start:end] eg: [0,52] [53, 106]...
start = startendpts[0]
end = startendpts[1]
passed_ips = []
password_failed_ips = []
passed_amt = failed_amt = password_failed_amt = 0
#Will NOT work in an IDLE with multiprocessing!
for i in range(start, end):
screenshot_starttime = datetime.now()
vncserver = vncservers[i]
timestr = time.strftime("%Y%m%d-%H%M%S")
#screenshot_filename = timestr + ".png"
screenshot_filename = str(i+1) + "_" + vncserver + "_" + timestr + ".png"
try:
#Test connection
client = api.connect(vncserver, password=None)
client.timeout = connection_timeout
client.connectionMade()
print("Connection has established successfully to IP " + str(i + 1) + "/" + str(end) + ": " + vncserver)
if not skip_screencapture:
#Now restart. This is required.
client = api.connect(vncserver, password=None)
client.timeout = screenshot_timeout
client.captureScreen(screenshot_path + screenshot_filename)
client.disconnect()
except Exception as e:
if "Timeout" in str(e):
failed_amt += 1
None
#print("Connection to IP " + str(i) + "/" + str(end) + " has timed out.")
elif "password" in str(e):
password_success = False
correctpass = None
if password_check:
for pw in passwords:
try:
client = api.connect(vncserver, password=pw)
client.timeout = connection_timeout
client.connectionMade()
client.disconnect()
except:
pass
else:
correctpass = pw
password_success = True
break
if password_success:
print("IP " + str(i + 1) + "/" + str(end) + " (" + vncserver + ") has passed because it has a password present in your password list: " + correctpass)
try:
client = api.connect(vncserver, password=correctpass)
client.timeout = screenshot_timeout
client.captureScreen(screenshot_path + screenshot_filename)
client.disconnect()
except Exception as e:
print("IP " + str(i + 1) + "/" + str(end) + " (" + vncserver + ") password was found, but screenshot could not be taken. Exception: " + str(e))
else:
screenshot_endtime = datetime.now()
screenshot_duration = screenshot_endtime - screenshot_starttime
print(screenshot_filename + " screenshot taken in " + str(screenshot_duration.total_seconds()) + " seconds.")
passed_amt += 1
passed_ips.append(vncserver + ":" + vncport + ":" + correctpass)
else:
print("IP " + str(i + 1) + "/" + str(end) + " (" + vncserver + ") has failed because it requires a password you do not have.")
password_failed_amt += 1
password_failed_ips.append(vncserver + ":" + vncport)
else:
None
print("Screencapture for IP " + str(i) + "/" + str(end) + " has failed: " + str(e))
failed_amt += 1
else:
screenshot_endtime = datetime.now()
screenshot_duration = screenshot_endtime - screenshot_starttime
print(screenshot_filename + " screenshot taken in " + str(screenshot_duration.total_seconds()) + " seconds.")
passed_amt += 1
passed_ips.append(vncserver + ":" + vncport)
resultsdict = {}
resultsdict['passed_ips'] = passed_ips
resultsdict['password_failed_ips'] = password_failed_ips
resultsdict['password_failed_amt'] = password_failed_amt
resultsdict['passed_amt'] = passed_amt
resultsdict['failed_amt'] = failed_amt
return resultsdict
def readipfile():
servers = []
with open(ipfile) as fp:
for line in fp:
servers.append(line.strip())
return servers
vncservers = readipfile()
if not os.path.exists(screenshot_path):
os.makedirs(screenshot_path)
serveramt = len(vncservers)
split_serveramt = int(serveramt / process_amount)
print("IPs to attempt to screen capture: " + str(serveramt) + ". There will be " + str(process_amount) + " processes handling " + str(split_serveramt) + " IPs each.")
if __name__ == '__main__':
processlist = []
xypairs = []
for g in range(process_amount):
x = split_serveramt * g
y = split_serveramt * (g + 1)
xypair = [x, y]
xypairs.append(xypair)
#print(xypairs)
pool = Pool(processes=len(xypairs))
result_list = pool.map(screencapture, xypairs)
print("results: ")
passed_amt = password_failed_amt = failed_amt = 0
passed_ips = []
password_failed_ips = []
for result in result_list:
passed_amt += result['passed_amt']
password_failed_amt += result['password_failed_amt']
failed_amt += result['failed_amt']
for ip in result['password_failed_ips']:
password_failed_ips.append(ip)
for ip in result['passed_ips']:
passed_ips.append(ip)
#Will not go further until all of the process_amount have finished.
print("Screencaptures have finished. Passed: " + str(passed_amt) + ". Password failed: " + str(password_failed_amt) + ". Failed: " + str(failed_amt) + ".")
print("Writing passed IPs to file.")
with open(valid_ipfile, "w") as myfile:
for ip in passed_ips:
myfile.write(ip + "\n")
print("Passed IPs have been written to " + valid_ipfile)
print("Writing password-failed IPs to file.")
with open(password_ipfile, "w") as myfile:
for ip in password_failed_ips:
ip += "\n"
myfile.write(ip)
print("Password-failed IPs have been written to " + password_ipfile)
|
from django.contrib import admin
from .models import Assignments
# Register your models here.
admin.site.register(Assignments)
|
import ldap3, json
def makeOrganogram(staff):
connection = make_connection()
organogram = {}
for uid in staff.nList:
employees = query_cdr(connection, staff.person[uid]["Email"], "eMail")
emList = []
for employee in employees:
ee = employee.decode()
fID = ee.split(",")[0][3:]
eem = query_cdr(connection, fID, "federalID")
if len(eem) == 1: # Do they really exist in CDR?
eMail = eem[0].decode()
if eMail in staff.eList:
emList.append(eMail)
if emList: organogram[staff.person[uid]["Email"]] = emList
organogram["debbie.loader@stfc.ac.uk"] = staff.eList
with open('ppd_organogram.json', 'w') as file:
file.write(json.dumps(organogram))
def make_connection():
server = ldap3.Server("ldaps://fed.cclrc.ac.uk:3269")
connection = ldap3.Connection(server, client_strategy=ldap3.SAFE_SYNC, auto_bind=True)
return connection
def query_cdr(conn, tag, type):
if type == "eMail":
filter = "(userPrincipalName=" + tag + ")"
attribute = "directreports"
else:
filter = "(sAMAccountName=" + tag + ")"
attribute = "userPrincipalName"
status, result, response, _ = conn.search(
"dc=FED,dc=CCLRC,dc=AC,dc=UK", filter, attributes=attribute
)
# For some reason the response is of class "bytes"
if status:
if attribute in response[0]["raw_attributes"]:
feature = response[0]["raw_attributes"][attribute]
return feature
return []
|
def generate_project_params(runlevel):
"""Returns the project-specific params."""
params = {}
return params
def generate_project_users(runlevel):
origin = 'nest'
users = [
{
'username': 'demouser',
'password': 'GARBAGESECRET',
'given_name': 'ARI',
'family_name': 'User',
'origin': origin,
'is_superuser': False
},
{
'username': 'klumppuser',
'password': 'GARBAGESECRET',
'given_name': 'KlumppLab',
'family_name': 'User',
'origin': origin,
'is_superuser': False
},
{
'username': 'mayouser',
'password': 'GARBAGESECRET',
'given_name': 'Mayo',
'family_name': 'Shared',
'origin': origin,
'is_superuser': False
},{
'username': 'adminuser',
'password': 'GARBAGESECRET',
'given_name': 'Ari',
'family_name': 'Admin',
'origin': origin,
'is_superuser': True
},
]
return users
|
import enum
import logging
import numpy as np
import sympy as sp
class TrajectoryType(enum.Enum):
VELOCITY = 2
ACCELERATION = 4
JERK = 6
SNAP = 8
class MininumTrajectory:
def __init__(self, trajtype: TrajectoryType):
numcoeffs = trajtype.value
logging.info(f'Creating minimum {trajtype.name} trajectory generator with {numcoeffs} coefficients')
if numcoeffs % 2 != 0:
raise ValueError('Number of coefficients must be divisible by 2!')
self.numcoeffs = numcoeffs
t = sp.symbols('t')
pos = t**0
for ii in range(1, numcoeffs):
pos += t**ii
vel = pos.diff(t)
acc = vel.diff(t)
jerk = acc.diff(t)
snap = jerk.diff(t)
crac = snap.diff(t)
pop = crac.diff(t)
self.eqs = [pos, vel, acc, jerk, snap, crac, pop]
self.times = None
self.polys = None
self.dims = None
self.numderivatives = None
def coeffs_for_time(self, equations, time):
"""
Returns a matrix where each row contains the coefficients of the input equations evaluated at time
:param equations: list of equations to evaluate against
:param time: the time to evaluate at
:return: matrix with number of rows equal to number of equations by number of coefficients
"""
retval = np.zeros((len(equations), self.numcoeffs))
for ii, eq in enumerate(equations):
# pull out the coefficients - ordered from highest to lowest poly order, e.g.: t^3, t^2, t^1, t^0
coeffs = eq.as_poly().all_coeffs()
# apply time
exp = len(coeffs) - 1
for idx in range(len(coeffs)):
coeffs[idx] *= time ** exp
exp -= 1
# pad out with zeros if some of the coefficients are 0
while len(coeffs) < self.numcoeffs:
coeffs.append(0)
# reverse them to match up with our coeffs vector
retval[ii] = list(reversed(coeffs))
return retval
def generate(self, points, times, numderivatives=2):
"""
Solves the minimum trajectory for the given points and times and caches the trajectory and its requested time
derivates for later use
:param points: an ordered list of tuples represent points the trajectory will pass through
:param times: an ordered list of times corresponding to points, of when the trajectory should reach the point
:param numderivatives: the number of time derivatives to generate
"""
if len(points) != len(times) or len(points) < 2:
raise ValueError('Points and times must be lists of equal length greater than 2')
for idx, starttime in enumerate(times[:-1]):
endtime = times[idx+1]
if endtime <= starttime:
raise ValueError('Times must be ordered from smallest to largest and cannot overlap')
self.dims = len(points[0])
logging.info(f'Generating trajectory using {len(points)} waypoints - waypoints have {self.dims} dimensions.')
logging.info(f'Will generate {numderivatives} time derivatives as well as position.')
self.numderivatives = numderivatives
self.times = np.array(times)
n = self.numcoeffs * (len(points) - 1)
A = np.zeros((n, n))
# use the first point to figure out how many b vectors we need
b = [np.zeros((n, 1)) for _ in range(self.dims)]
# fill in equations for first segment - time derivatives of position are all equal to 0 at start time
nextrow = 0
numeqs = int((self.numcoeffs - 2) / 2)
equations = self.eqs[1:1+numeqs]
A[nextrow:nextrow + numeqs, 0:self.numcoeffs] = self.coeffs_for_time(equations, times[0])
nextrow += numeqs
# fill in equations for last segment - time derivatives of position are all equal to 0 at end time
A[nextrow:nextrow + numeqs, n - self.numcoeffs:n] = self.coeffs_for_time(equations, times[-1])
nextrow += numeqs
# for all segments...
for idx, startp in enumerate(points[0:-1]):
endp = points[idx + 1]
startt = times[idx]
endt = times[idx + 1]
# fill in 2 equations for start and end point passing through the poly
# start point
col = idx * self.numcoeffs
A[nextrow:nextrow + 1, col:col + self.numcoeffs] = self.coeffs_for_time([self.eqs[0]], startt)
for ii in range(len(points[0])):
b[ii][nextrow] = startp[ii]
nextrow += 1
# end point
A[nextrow:nextrow + 1, col:col + self.numcoeffs] = self.coeffs_for_time([self.eqs[0]], endt)
for ii in range(len(points[0])):
b[ii][nextrow] = endp[ii]
nextrow += 1
# for all segments, except last...
for idx in range(len(points) - 2):
endt = times[idx + 1]
# fill in required equations for time derivatives to ensure they are the same through the transition point
numeqs = self.numcoeffs - 2
equations = self.eqs[1:1 + numeqs]
col = idx * self.numcoeffs
A[nextrow:nextrow + numeqs, col:col + self.numcoeffs] = self.coeffs_for_time(equations, endt)
col += self.numcoeffs
# negate endt coefficients since we move everything to the lhs
A[nextrow:nextrow + numeqs, col:col + self.numcoeffs] = -self.coeffs_for_time(equations, endt)
nextrow += numeqs
# solve the system
x = [np.linalg.solve(A, b[ii]) for ii in range(len(b))]
# polys will have rows corresponding to segments, columns corresponding to the point dimensions, and 3rd dim
# will contain the position poly and the number of requested time derivatives
self.polys = []
t = sp.symbols('t')
for ii in range(len(points)-1):
col = []
offset = ii * self.numcoeffs
for jj in range(len(x)):
layer = [sp.Poly(reversed(x[jj][offset:offset+self.numcoeffs].transpose()[0]), t)]
# append on requested number of time derivatives as well
for kk in range(1, 1+numderivatives):
# take the time derivative of the previous poly
layer.append(layer[kk-1].diff(t))
col.append(layer)
self.polys.append(col)
logging.info(f'Finished trajectory generation using {len(points)} waypoints.')
def getvalues(self, time):
"""
Returns an array where rows corresponds to the number of dimensions in points, and columns corresponds to the
time derivatives for the given time
:param time: the time to evaluate the polys at
:return: nump array
"""
if self.times is None or self.polys is None:
raise AssertionError('Please generate the trajectory first')
# find the correct poly index
if time < self.times[0]:
time = self.times[0]
if time > self.times[-1]:
time = self.times[-1]
idx = np.argwhere(self.times <= time)[-1][0]
# the last index time actually means we should use the previous poly
if idx >= len(self.polys):
idx = len(self.polys) - 1
retval = np.zeros((self.dims, self.numderivatives+1))
for jj in range(retval.shape[0]):
for kk in range(retval.shape[1]):
retval[jj][kk] = self.polys[idx][jj][kk].eval(time)
return retval
|
#!/usr/bin/env python3
# Copyright 2019 Google LLC
#
# Licensed under the the Apache License v2.0 with LLVM Exceptions (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Upload build results to Phabricator.
As I did not like the Jenkins plugin, we're using this script to upload the
build status, a summary and the test reults to Phabricator."""
import argparse
import os
import re
import socket
import time
import urllib
import uuid
from typing import Optional, List, Dict
import pathspec
from lxml import etree
from phabricator import Phabricator
from enum import IntEnum
class PhabTalk:
"""Talk to Phabricator to upload build results.
See https://secure.phabricator.com/conduit/method/harbormaster.sendmessage/
"""
def __init__(self, token: Optional[str], host: Optional[str], dryrun: bool):
self._phab = None # type: Optional[Phabricator]
if not dryrun:
self._phab = Phabricator(token=token, host=host)
_try_call(self._phab.update_interfaces)
@property
def dryrun(self):
return self._phab is None
def get_revision_id(self, diff: str) -> Optional[str]:
"""Get the revision ID for a diff from Phabricator."""
if self.dryrun:
return None
result = self._phab.differential.querydiffs(ids=[diff])
return 'D' + result[diff]['revisionID']
def comment_on_diff(self, diff_id: str, text: str):
"""Add a comment to a differential based on the diff_id"""
print('Sending comment to diff {}:'.format(diff_id))
print(text)
revision_id = self.get_revision_id(diff_id)
if revision_id is not None:
self._comment_on_revision(revision_id, text)
def _comment_on_revision(self, revision: str, text: str):
"""Add comment on a differential based on the revision id."""
transactions = [{
'type': 'comment',
'value': text
}]
if self.dryrun:
print('differential.revision.edit =================')
print('Transactions: {}'.format(transactions))
return
# API details at
# https://secure.phabricator.com/conduit/method/differential.revision.edit/
self._phab.differential.revision.edit(objectIdentifier=revision,
transactions=transactions)
print('Uploaded comment to Revision D{}:{}'.format(revision, text))
def update_build_status(self, diff_id: str, phid: str, working: bool, success: bool, lint: {} = {}, unit: [] = []):
"""Submit collected report to Phabricator.
"""
result_type = 'pass'
if working:
result_type = 'working'
elif not success:
result_type = 'fail'
# Group lint messages by file and line.
lint_messages = []
for v in lint.values():
path = ''
line = 0
descriptions = []
for e in v:
path = e['path']
line = e['line']
descriptions.append('{}: {}'.format(e['name'], e['description']))
lint_message = {
'name': 'Pre-merge checks',
'severity': 'warning',
'code': 'llvm-premerge-checks',
'path': path,
'line': line,
'description': '\n'.join(descriptions),
}
lint_messages.append(lint_message)
if self.dryrun:
print('harbormaster.sendmessage =================')
print('type: {}'.format(result_type))
print('unit: {}'.format(unit))
print('lint: {}'.format(lint_messages))
return
_try_call(lambda: self._phab.harbormaster.sendmessage(
buildTargetPHID=phid,
type=result_type,
unit=unit,
lint=lint_messages))
print('Uploaded build status {}, {} test results and {} lint results'.format(
result_type, len(unit), len(lint_messages)))
# TODO: deprecate
def add_artifact(self, phid: str, file: str, name: str, results_url: str):
artifactKey = str(uuid.uuid4())
artifactType = 'uri'
artifactData = {'uri': '{}/{}'.format(results_url, file),
'ui.external': True,
'name': name}
self.create_artifact(phid, artifactKey, artifactType, artifactData)
print('Created artifact "{}"'.format(name))
def create_artifact(self, phid, artifact_key, artifact_type, artifact_data):
if self.dryrun:
print('harbormaster.createartifact =================')
print('artifactKey: {}'.format(artifact_key))
print('artifactType: {}'.format(artifact_type))
print('artifactData: {}'.format(artifact_data))
return
_try_call(lambda: self._phab.harbormaster.createartifact(
buildTargetPHID=phid,
artifactKey=artifact_key,
artifactType=artifact_type,
artifactData=artifact_data))
def _parse_patch(patch) -> List[Dict[str, str]]:
"""Extract the changed lines from `patch` file.
The return value is a list of dictionaries {filename, line, diff}.
Diff must be generated with -U0 (no context lines).
"""
entries = []
lines = []
filename = None
line_number = 0
for line in patch:
match = re.search(r'^(\+\+\+|---) [^/]+/(.*)', line)
if match:
if len(lines) > 0:
entries.append({
'filename': filename,
'diff': ''.join(lines),
'line': line_number,
})
lines = []
filename = match.group(2).rstrip('\r\n')
continue
match = re.search(r'^@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))?', line)
if match:
if len(lines) > 0:
entries.append({
'filename': filename,
'diff': ''.join(lines),
'line': line_number,
})
lines = []
line_number = int(match.group(1))
continue
if line.startswith('+') or line.startswith('-'):
lines.append(line)
if len(lines) > 0:
entries.append({
'filename': filename,
'diff': ''.join(lines),
'line': line_number,
})
return entries
class Step:
def __init__(self):
self.name = ''
self.success = True
self.duration = 0.0
self.messages = []
def set_status_from_exit_code(self, exit_code: int):
if exit_code != 0:
self.success = False
class Report:
def __init__(self):
self.os = ''
self.name = ''
self.comments = []
self.success = True
self.working = False
self.unit = [] # type: List
self.lint = {}
self.test_stats = {
'pass': 0,
'fail': 0,
'skip': 0
} # type: Dict[str, int]
self.steps = [] # type: List[Step]
self.artifacts = [] # type: List
def __str__(self):
return str(self.__dict__)
def add_lint(self, m):
key = '{}:{}'.format(m['path'], m['line'])
if key not in self.lint:
self.lint[key] = []
self.lint[key].append(m)
def add_artifact(self, dir: str, file: str, name: str):
self.artifacts.append({'dir': dir, 'file': file, 'name': name})
class BuildReport:
def __init__(self, args):
# self.args = args
self.ph_id = args.ph_id # type: str
self.diff_id = args.diff_id # type: str
self.test_result_file = args.test_result_file # type: str
self.conduit_token = args.conduit_token # type: str
self.dryrun = args.dryrun # type: bool
self.buildresult = args.buildresult # type: str
self.clang_format_patch = args.clang_format_patch # type: str
self.clang_tidy_result = args.clang_tidy_result # type: str
self.clang_tidy_ignore = args.clang_tidy_ignore # type: str
self.results_dir = args.results_dir # type: str
self.results_url = args.results_url # type: str
self.workspace = args.workspace # type: str
self.failure_messages = args.failures # type: str
self.name = args.name # type: str
self.api = PhabTalk(args.conduit_token, args.host, args.dryrun)
self.revision_id = self.api.get_revision_id(self.diff_id)
self.comments = []
self.success = True
self.working = False
self.unit = [] # type: List
self.lint = {}
self.test_stats = {
'pass': 0,
'fail': 0,
'skip': 0
} # type: Dict[str, int]
def add_lint(self, m):
key = '{}:{}'.format(m['path'], m['line'])
if key not in self.lint:
self.lint[key] = []
self.lint[key].append(m)
def final_report(self):
if self.buildresult is not None:
print('Jenkins result: {}'.format(self.buildresult))
if self.buildresult.lower() == 'success':
pass
elif self.buildresult.lower() == 'null':
self.working = True
else:
self.success = False
else:
self.success = False
try:
self.add_test_results()
except etree.XMLSyntaxError:
# Sometimes we get an incomplete XML file.
# In this case:
# - fail the build (the safe thing to do)
# - continue so the user gets some feedback.
print('Error parsing {}. Invalid XML syntax!'.format(self.test_result_file))
self.success = False
self.add_clang_tidy()
self.add_clang_format()
self.api.update_build_status(self.diff_id, self.ph_id, self.working, self.success, self.lint, self.unit)
self.add_links_to_artifacts()
title = 'Issue with build for {} ({})'.format(self.api.get_revision_id(self.diff_id), self.diff_id)
self.comments.append(
'Pre-merge checks is in beta <a href="https://github.com/google/llvm-premerge-checks/issues/new?assignees'
'=&labels=bug&template=bug_report.md&title={}">report issue</a>.<br/>'
'Please <a href="https://reviews.llvm.org/project/update/78/join/">join beta</a> or '
'<a href="https://github.com/google/llvm-premerge-checks/issues/new?assignees=&labels=enhancement&template'
'=&title=enable%20checks%20for%20{{PATH}}">enable it for your project</a>'.format(
urllib.parse.quote(title)))
with open(os.path.join(self.results_dir, 'summary.html'), 'w') as f:
f.write('<html><head><style>'
'body { font-family: monospace; margin: 16px; }\n'
'.failure {color:red;}\n'
'.success {color:green;}\n'
'</style></head><body>')
f.write('<h1>Build result for diff <a href="https://reviews.llvm.org/{0}">{0}</a> {1} at {2}</h1>'.format(
self.revision_id, self.diff_id, self.name))
if self.failure_messages and len(self.failure_messages) > 0:
for s in self.failure_messages.split('\n'):
f.write('<p class="failure">{}</p>'.format(s))
f.write('<p>' + '</p><p>'.join(self.comments) + '</p>')
f.write('</body></html>')
self.api.add_artifact(self.ph_id, 'summary.html', 'summary ' + self.name, self.results_url)
def add_clang_format(self):
"""Populates results from diff produced by clang format."""
if self.clang_format_patch is None:
return
present = os.path.exists(
os.path.join(self.results_dir, self.clang_format_patch))
if not present:
print('clang-format result {} is not found'.format(self.clang_format_patch))
self.comments.append(section_title('clang-format', False, False))
return
p = os.path.join(self.results_dir, self.clang_format_patch)
if os.stat(p).st_size != 0:
self.api.add_artifact(self.ph_id, self.clang_format_patch, 'clang-format ' + self.name, self.results_url)
diffs = _parse_patch(open(p, 'r'))
success = len(diffs) == 0
for d in diffs:
lines = d['diff'].splitlines(keepends=True)
m = 10 # max number of lines to report.
description = 'please reformat the code\n```\n'
n = len(lines)
cut = n > m + 1
if cut:
lines = lines[:m]
description += ''.join(lines) + '\n```'
if cut:
description += '\n{} diff lines are omitted. See [full diff]({}/{}).'.format(
n - m,
self.results_url,
self.clang_format_patch)
self.add_lint({
'name': 'clang-format',
'severity': 'autofix',
'code': 'clang-format',
'path': d['filename'],
'line': d['line'],
'char': 1,
'description': description,
})
comment = section_title('clang-format', success, present)
if not success:
comment += 'Please format your changes with clang-format by running `git-clang-format HEAD^` or applying ' \
'this <a href="{}">patch</a>.'.format(os.path.basename(self.clang_format_patch))
self.comments.append(comment)
self.success = success and self.success
def add_clang_tidy(self):
if self.clang_tidy_result is None:
return
# Typical message looks like
# [..]/clang/include/clang/AST/DeclCXX.h:3058:20: error: no member named 'LifetimeExtendedTemporary' in 'clang::Decl' [clang-diagnostic-error]
pattern = '^{}/([^:]*):(\\d+):(\\d+): (.*): (.*)'.format(self.workspace)
errors_count = 0
warn_count = 0
inline_comments = 0
present = os.path.exists(
os.path.join(self.results_dir, self.clang_tidy_result))
if not present:
print('clang-tidy result {} is not found'.format(self.clang_tidy_result))
self.comments.append(section_title('clang-tidy', False, False))
return
present = (self.clang_tidy_ignore is not None) and os.path.exists(self.clang_tidy_ignore)
if not present:
print('clang-tidy ignore file {} is not found'.format(self.clang_tidy_ignore))
self.comments.append(section_title('clang-tidy', False, False))
return
p = os.path.join(self.results_dir, self.clang_tidy_result)
add_artifact = False
ignore = pathspec.PathSpec.from_lines(pathspec.patterns.GitWildMatchPattern,
open(self.clang_tidy_ignore, 'r').readlines())
for line in open(p, 'r'):
line = line.strip()
if len(line) == 0 or line == 'No relevant changes found.':
continue
add_artifact = True
match = re.search(pattern, line)
if match:
file_name = match.group(1)
line_pos = match.group(2)
char_pos = match.group(3)
severity = match.group(4)
text = match.group(5)
text += '\n[[{} | not useful]] '.format(
'https://github.com/google/llvm-premerge-checks/blob/master/docs/clang_tidy.md#warning-is-not'
'-useful')
if severity in ['warning', 'error']:
if severity == 'warning':
warn_count += 1
if severity == 'error':
errors_count += 1
if ignore.match_file(file_name):
print('{} is ignored by pattern and no comment will be added'.format(file_name))
else:
inline_comments += 1
self.add_lint({
'name': 'clang-tidy',
'severity': 'warning',
'code': 'clang-tidy',
'path': file_name,
'line': int(line_pos),
'char': int(char_pos),
'description': '{}: {}'.format(severity, text),
})
if add_artifact:
self.api.add_artifact(self.ph_id, self.clang_tidy_result, 'clang-tidy ' + self.name, self.results_url)
success = errors_count + warn_count == 0
comment = section_title('clang-tidy', success, present)
if not success:
comment += 'clang-tidy found <a href="{}">{} errors and {} warnings</a>. ' \
'{} of them are added as review comments <a href="{}">why?</a>.'.format(
self.clang_tidy_result, errors_count, warn_count, inline_comments,
'https://github.com/google/llvm-premerge-checks/blob/master/docs/clang_tidy.md#review-comments')
self.comments.append(comment)
self.success = success and self.success
def add_test_results(self):
"""Populates results from build test results XML.
Only reporting failed tests as the full test suite is too large to upload.
"""
success = True
present = (self.test_result_file is not None) and os.path.exists(
os.path.join(self.results_dir, self.test_result_file))
if not present:
print('Warning: Could not find test results file: {}'.format(self.test_result_file))
self.comments.append(section_title('Unit tests', False, present))
return
root_node = etree.parse(os.path.join(self.results_dir, self.test_result_file))
for test_case in root_node.xpath('//testcase'):
test_result = _test_case_status(test_case)
self.test_stats[test_result] += 1
if test_result == 'fail':
success = False
failure = test_case.find('failure')
test_result = {
'name': test_case.attrib['name'],
'namespace': test_case.attrib['classname'],
'result': test_result,
'duration': float(test_case.attrib['time']),
'details': failure.text
}
self.unit.append(test_result)
comment = section_title('Unit tests', success, True)
comment += '{} tests passed, {} failed and {} were skipped.<br/>'.format(
self.test_stats['pass'],
self.test_stats['fail'],
self.test_stats['skip'],
)
if not success:
comment += 'Failures:<br/>'
for test_case in self.unit:
if test_case['result'] == 'fail':
comment += '{}/{}<br/>'.format(test_case['namespace'], test_case['name'])
self.comments.append(comment)
self.success = success and self.success
def add_links_to_artifacts(self):
"""Comment on a diff, read text from file."""
file_links = []
for f in os.listdir(self.results_dir):
if f == 'summary.html':
continue
if f == 'console-log.txt':
self.api.add_artifact(self.ph_id, f, 'build log ' + self.name, self.results_url)
p = os.path.join(self.results_dir, f)
if not os.path.isfile(p):
continue
if os.stat(p).st_size == 0:
continue
file_links.append('<a href="{0}">{0}</a>'.format(f))
if len(file_links) > 0:
self.comments.append('<a href="./">Build artifacts</a>:<br/>' + '<br/>'.join(file_links))
def _test_case_status(test_case) -> str:
"""Get the status of a test case based on an etree node."""
if test_case.find('failure') is not None:
return 'fail'
if test_case.find('skipped') is not None:
return 'skip'
return 'pass'
def section_title(title: str, ok: bool, present: bool) -> str:
result = 'unknown'
c = ''
if present:
c = 'success' if ok else 'failure'
result = 'pass' if ok else 'fail'
return '{} <span class="{}">{}</span>. '.format(title, c, result)
def _try_call(call):
"""Tries to call function several times retrying on socked.timeout."""
c = 0
while True:
try:
call()
except socket.timeout as e:
c += 1
if c > 5:
print('Connection to Pharicator failed, giving up: {}'.format(e))
raise
print('Connection to Pharicator failed, retrying: {}'.format(e))
time.sleep(c * 10)
break
def main():
parser = argparse.ArgumentParser(
description='Write build status back to Phabricator.')
parser.add_argument('ph_id', type=str)
parser.add_argument('diff_id', type=str)
parser.add_argument('--test-result-file', type=str, dest='test_result_file', default='test-results.xml')
parser.add_argument('--conduit-token', type=str, dest='conduit_token', required=True)
parser.add_argument('--host', type=str, dest='host', default="https://reviews.llvm.org/api/",
help="full URL to API with trailing slash, e.g. https://reviews.llvm.org/api/")
parser.add_argument('--dryrun', action='store_true',
help="output results to the console, do not report back to the server")
parser.add_argument('--buildresult', type=str, default=None, choices=['SUCCESS', 'UNSTABLE', 'FAILURE', 'null'])
parser.add_argument('--clang-format-patch', type=str, default=None,
dest='clang_format_patch',
help="path to diff produced by git-clang-format, relative to results-dir")
parser.add_argument('--clang-tidy-result', type=str, default=None,
dest='clang_tidy_result',
help="path to diff produced by git-clang-tidy, relative to results-dir")
parser.add_argument('--clang-tidy-ignore', type=str, default=None,
dest='clang_tidy_ignore',
help="path to file with patters to exclude commenting on for clang-tidy findings")
parser.add_argument('--results-dir', type=str, default=None, required=True,
dest='results_dir',
help="directory of all build artifacts")
parser.add_argument('--results-url', type=str, default=None,
dest='results_url',
help="public URL to access results directory")
parser.add_argument('--workspace', type=str, required=True, help="path to workspace")
parser.add_argument('--failures', type=str, default=None, help="optional failure messages separated by newline")
parser.add_argument('--name', type=str, default='', help="optional name of the build bot")
args = parser.parse_args()
reporter = BuildReport(args)
reporter.final_report()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from blog.models import Post, Author
class PostAdmin(admin.ModelAdmin):
pass
class AuthorAdmin(admin.ModelAdmin):
pass
admin.site.register(Post, PostAdmin)
admin.site.register(Author, AuthorAdmin)
|
'''
Description:
Note: This is a companion problem to the System Design problem: Design TinyURL.
TinyURL is a URL shortening service where you enter a URL such as https://leetcode.com/problems/design-tinyurl and it returns a short URL such as http://tinyurl.com/4e9iAk.
Design the encode and decode methods for the TinyURL service. There is no restriction on how your encode/decode algorithm should work. You just need to ensure that a URL can be encoded to a tiny URL and the tiny URL can be decoded to the original URL.
'''
class Codec:
def __init__(self):
self.url_table = dict()
def encode(self, longUrl: str) -> str:
"""Encodes a URL to a shortened URL.
"""
# generate url id by native hash() function
url_id = hash(longUrl)
header = 'http://tinyurl.com/'
# generate shour url
short_url = header + str(url_id)
# update key-value pair in dictionary, url_table
self.url_table[url_id] = longUrl
return short_url
def decode(self, shortUrl: str) -> str:
"""Decodes a shortened URL to its original URL.
"""
# paring the url id after 'http://tinyurl.com/'
url_id = int(shortUrl[19:])
# lookup original url by url_id in dictionary
return self.url_table[url_id]
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.decode(codec.encode(url))
# n : the length of input URL
## Time Complexity: O(n)
#
# The overhead in time is the cost of sequence generated from hash(n), which is of O( n ).
## Space Complexity: O(1)
#
# The overhead in space is the storage for one entry in dictionary, which is of O( 1 )
def test_bench():
test_data = [
'https://www.leetcode.com',
'https://www.google.com'
]
coder = Codec()
for link in test_data:
short_url = coder.encode( link )
print( coder.decode(short_url) )
return
if __name__ == '__main__':
test_bench()
|
# -*- coding: utf-8 -*-
#
# Test Django Rest framework related changes
#
# :copyright: 2020 Sonu Kumar
# :license: BSD-3-Clause
#
import unittest
from django.test import LiveServerTestCase
from util import TestBase
class BasicTestCase(LiveServerTestCase, TestBase):
def test_no_exception(self):
result = self.client.get("/users/")
self.assertEqual(u'[]', result.content.decode('utf-8'))
self.assertEqual(self.get_exceptions(), [])
def test_create_user(self):
from_data = {'username': 'admin', 'email': 'example@example.com'}
self.post("/users/", data=from_data)
errors = self.get_exceptions()
self.assertEqual(len(errors), 1)
error = errors[0]
self.assertIsNotNone(error.hash)
self.assertIsNotNone(error.host)
self.assertIsNotNone(error.path)
self.assertIsNotNone(error.method)
self.assertIsNotNone(error.request_data)
self.assertIsNotNone(error.traceback)
self.assertIsNotNone(error.count)
self.assertIsNotNone(error.created_on)
self.assertIsNotNone(error.last_seen)
form = eval(error.request_data)['form']
self.assertEqual(from_data, form)
if __name__ == '__main__':
unittest.main()
|
from django import forms
from .models import Wish
class WishForm(forms.Form):
author = forms.CharField(max_length=40,
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Your name'
})
)
email = forms.CharField(max_length=40,
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Your email',
'type': 'email',
'pattern': "[a-z0-9._%+-]+@[a-z0-9.-]+\.[a-z]{2,}$"
})
)
description = forms.CharField(max_length=50,
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'A short description of yourself/who AY is to you or how you know her'
})
)
body = forms.CharField(widget=forms.Textarea(attrs={
'class': 'form-control',
'placeholder': 'Leave a birthday wish for AY'
})
)
class ReplyForm(forms.Form):
msg = forms.CharField(widget=forms.Textarea(attrs={
'class': 'form-control',
'placeholder': 'Leave a reply'
})
)
|
import numbers
import ipywidgets
import numpy as np
import pandas as pd
import ubermagutil.units as uu
import matplotlib.pyplot as plt
import ubermagutil.typesystem as ts
import discretisedfield.util as dfu
@ts.typesystem(dim=ts.Scalar(expected_type=int, positive=True, const=True),
n=ts.Scalar(expected_type=int, positive=True, const=True))
class Line:
"""Line class.
This class represents field sampled on the line. It is based on
``pandas.DataFrame``, which is generated from two lists: ``points`` and
``values`` of the same length. ``points`` is a list of length-3 tuples
representing the points on the line on which the field was sampled. On the
other hand, ``values`` is a list of field values, which are
``numbers.Real`` for scalar fields or ``array_like`` for vector fields.
During the initialisation of the object, ``r`` column is added to
``pandas.DataFrame`` and it represents the distance of the point from the
first point in ``points``.
By default the columns where points data is stored are labelled as ``px``,
``py``, and ``pz``, storing the x, y, and z components of the point,
respectively. Similarly, for scalar fields, values are stored in column
``v``, whereas for vector fields, data is stored in ``vx``, ``vy``, and
``vz``. The default names of columns can be changed by passing
``point_columns`` and ``value_columns`` lists. Both lists are composed of
strings and must have appropriate lengths.
The number of points can be retrieved as ``discretisedfield.Line.n`` and
the dimension of the value can be retrieved using
``discretisedfield.Line.dim``.
Data in the form of ``pandas.DataFrame`` can be exposed as ``line.data``.
Parameters
----------
points : list
Points at which the field was sampled. It is a list of length-3 tuples.
values : list
Values sampled at ``points``.
point_columns : list
Point column names. Defaults to None.
value_columns : list
Value column names. Defaults to None.
Raises
------
ValueError
If the numbers of points is not the same as the number of values.
Example
-------
1. Defining ``Line`` object, which contains scalar values.
>>> import discretisedfield as df
...
>>> points = [(0, 0, 0), (1, 0, 0), (2, 0, 0)]
>>> values = [1, 2, 3] # scalar values
>>> line = df.Line(points=points, values=values)
>>> line.n # the number of points
3
>>> line.dim
1
2. Defining ``Line`` for vector values.
>>> points = [(0, 0, 0), (1, 1, 1), (2, 2, 2), (3, 3, 3)]
>>> values = [(0, 0, 1), (0, 0, 2), (0, 0, 3), (0, 0, 4)] # vector values
>>> line = df.Line(points=points, values=values)
>>> line.n # the number of points
4
>>> line.dim
3
"""
def __init__(self, points, values, point_columns=None, value_columns=None):
if len(points) != len(values):
msg = (f'The number of points ({len(points)}) must be the same '
f'as the number of values ({len(values)}).')
raise ValueError(msg)
# Set the dimension (const descriptor).
if isinstance(values[0], numbers.Real):
self.dim = 1
else:
self.dim = len(values[0])
# Set the number of values (const descriptor).
self.n = len(points)
points = np.array(points)
values = np.array(values).reshape((points.shape[0], -1))
self.data = pd.DataFrame()
self.data['r'] = np.linalg.norm(points - points[0, :], axis=1)
for i, column in enumerate(self.point_columns):
self.data[column] = points[..., i]
for i, column in zip(range(values.shape[-1]), self.value_columns):
self.data[column] = values[..., i]
if point_columns is not None:
self.point_columns = point_columns
if value_columns is not None:
self.value_columns = value_columns
@property
def point_columns(self):
"""The names of point columns.
This method returns a list of strings denoting the names of columns
storing three coordinates of points. Similarly, by assigning a list of
strings to this property, the columns can be renamed.
Parameters
----------
val : list
Point column names used to rename them.
Returns
-------
list
List of point column names.
Raises
------
ValueError
If a list of inappropriate length is passed.
Examples
--------
1. Getting and setting the column names.
>>> import discretisedfield as df
...
>>> points = [(0, 0, 0), (1, 0, 0), (2, 0, 0)]
>>> values = [1, 2, 3] # scalar values
>>> line = df.Line(points=points, values=values)
>>> line.point_columns
['px', 'py', 'pz']
>>> line.point_columns = ['p0', 'p1', 'p2']
>>> line.data.columns
Index(['r', 'p0', 'p1', 'p2', 'v'], dtype='object')
"""
if not hasattr(self, '_point_columns'):
return [f'p{i}' for i in dfu.axesdict.keys()]
else:
return self._point_columns
@point_columns.setter
def point_columns(self, val):
if len(val) != 3:
msg = (f'Cannot change column names with a '
f'list of lenght {len(val)}.')
raise ValueError(msg)
self.data = self.data.rename(dict(zip(self.point_columns, val)),
axis=1)
self._point_columns = val
@property
def value_columns(self):
"""The names of value columns.
This method returns a list of strings denoting the names of columns
storing values. The length of the list is the same as the dimension of
the value. Similarly, by assigning a list of strings to this property,
the columns can be renamed.
Parameters
----------
val : list
Value column names used to rename them.
Returns
-------
list
List of value column names.
Raises
------
ValueError
If a list of inappropriate length is passed.
Examples
--------
1. Getting and setting the column names.
>>> import discretisedfield as df
...
>>> points = [(0, 0, 0), (1, 0, 0), (2, 0, 0)]
>>> values = [1, 2, 3] # scalar values
>>> line = df.Line(points=points, values=values)
>>> line.value_columns
['v']
>>> line.value_columns = ['my_interesting_value']
>>> line.data.columns
Index(['r', 'px', 'py', 'pz', 'my_interesting_value'], dtype='object')
"""
if not hasattr(self, '_value_columns'):
if self.dim == 1:
return ['v']
else:
return [f'v{i}' for i in list(dfu.axesdict.keys())[:self.dim]]
else:
return self._value_columns
@value_columns.setter
def value_columns(self, val):
if len(val) != self.dim:
msg = (f'Cannot change column names with a '
f'list of lenght {len(val)}.')
raise ValueError(msg)
self.data = self.data.rename(dict(zip(self.value_columns, val)),
axis=1)
self._value_columns = val
@property
def length(self):
"""Line length.
Length of the line is defined as the distance between the first and the
last point in ``points``.
Returns
-------
float
Line length.
Example
-------
1. Getting the length of the line.
>>> import discretisedfield as df
...
>>> points = [(0, 0, 0), (2, 0, 0), (4, 0, 0)]
>>> values = [(1, 0, 0), (0, 1, 0), (0, 0, 1)] # vector values
>>> line = df.Line(points=points, values=values)
>>> line.length
4.0
"""
return self.data['r'].iloc[-1]
def __repr__(self):
"""Representation string.
Returns
-------
str
Representation string.
Example
-------
1. Getting representation string.
>>> import discretisedfield as df
...
>>> points = [(0, 0, 0), (2, 0, 0), (4, 0, 0)]
>>> values = [(1, 0, 0), (0, 1, 0), (0, 0, 1)] # vector values
>>> line = df.Line(points=points, values=values)
>>> repr(line)
'...
"""
return repr(self.data)
def mpl(self, ax=None, figsize=None, yaxis=None, xlim=None,
multiplier=None, filename=None, **kwargs):
"""Line values plot.
This method plots the values (scalar or individual components) as a
function of the distance ``r``. ``mpl`` adds the plot to
``matplotlib.axes.Axes`` passed via ``ax`` argument. If ``ax`` is not
passed, ``matplotlib.axes.Axes`` object is created automatically and
the size of a figure can be specified using ``figsize``. To choose
particular value columns to be plotted ``yaxis`` can be passed as a
list of column names. The range of ``r``values on the horizontal axis
can be defined by passing a lenth-2 tuple. It is often the case that
the line length is small (e.g. on a nanoscale) or very large (e.g. in
units of kilometers). Accordingly, ``multiplier`` can be passed as
:math:`10^{n}`, where :math:`n` is a multiple of 3 (..., -6, -3, 0, 3,
6,...). According to that value, the horizontal axis will be scaled and
appropriate units shown. For instance, if ``multiplier=1e-9`` is
passed, all mesh points will be divided by :math:`1\\,\\text{nm}` and
:math:`\\text{nm}` units will be used as axis labels. If ``multiplier``
is not passed, the best one is calculated internally. The plot can be
saved as a PDF when ``filename`` is passed.
This method plots the mesh using ``matplotlib.pyplot.plot()`` function,
so any keyword arguments accepted by it can be passed.
Parameters
----------
ax : matplotlib.axes.Axes, optional
Axes to which the field plot is added. Defaults to ``None`` - axes
are created internally.
figsize : tuple, optional
The size of a created figure if ``ax`` is not passed. Defaults to
``None``.
yaxis : list, optional
A list of value columns to be plotted.
xlim : tuple
A length-2 tuple setting the limits of the horizontal axis.
multiplier : numbers.Real, optional
``multiplier`` can be passed as :math:`10^{n}`, where :math:`n` is
a multiple of 3 (..., -6, -3, 0, 3, 6,...). According to that
value, the axes will be scaled and appropriate units shown. For
instance, if ``multiplier=1e-9`` is passed, the mesh points will be
divided by :math:`1\\,\\text{nm}` and :math:`\\text{nm}` units will
be used as axis labels. Defaults to ``None``.
filename : str, optional
If filename is passed, the plot is saved. Defaults to ``None``.
Examples
--------
1. Visualising the values on the line using ``matplotlib``.
>>> import discretisedfield as df
...
>>> points = [(0, 0, 0), (2, 0, 0), (4, 0, 0)]
>>> values = [(1, 0, 0), (0, 1, 0), (0, 0, 1)] # vector values
>>> line = df.Line(points=points, values=values)
>>> line.mpl()
"""
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
if multiplier is None:
multiplier = uu.si_multiplier(self.length)
if yaxis is None:
yaxis = self.value_columns
for i in yaxis:
ax.plot(np.divide(self.data['r'].to_numpy(), multiplier),
self.data[i], label=i, **kwargs)
ax.set_xlabel(f'r ({uu.rsi_prefixes[multiplier]}m)')
ax.set_ylabel('value')
ax.grid(True) # grid is turned off by default for field plots
ax.legend()
if xlim is not None:
plt.xlim(*np.divide(xlim, multiplier))
if filename is not None:
plt.savefig(filename, bbox_inches='tight', pad_inches=0)
def slider(self, multiplier=None, **kwargs):
"""Slider for interactive plotting.
Based on the values in the ``r`` column,
``ipywidgets.SelectionRangeSlider`` is returned for navigating
interactive plots.
This method is based on ``ipywidgets.SelectionRangeSlider``, so any
keyword argument accepted by it can be passed.
Parameters
----------
multiplier : numbers.Real, optional
``multiplier`` can be passed as :math:`10^{n}`, where :math:`n` is
a multiple of 3 (..., -6, -3, 0, 3, 6,...). According to that
value, the values will be scaled and appropriate units shown. For
instance, if ``multiplier=1e-9`` is passed, the slider points will
be divided by :math:`1\\,\\text{nm}` and :math:`\\text{nm}` units
will be used in the description. If ``multiplier`` is not passed,
the optimum one is computed internally. Defaults to ``None``.
Returns
-------
ipywidgets.SelectionRangeSlider
``r`` range slider.
Example
-------
1. Get the slider for the horizontal axis.
>>> import discretisedfield as df
...
>>> points = [(0, 0, 0), (2, 0, 0), (4, 0, 0)]
>>> values = [(1, 0, 0), (0, 1, 0), (0, 0, 1)] # vector values
>>> line = df.Line(points=points, values=values)
>>> line.slider()
SelectionRangeSlider(...)
"""
if multiplier is None:
multiplier = uu.si_multiplier(self.length)
values = self.data['r'].to_numpy()
labels = np.around(values/multiplier, decimals=2)
options = list(zip(labels, values))
slider_description = f'r ({uu.rsi_prefixes[multiplier]}m):'
return ipywidgets.SelectionRangeSlider(options=options,
value=(values[0], values[-1]),
description=slider_description,
**kwargs)
def selector(self, **kwargs):
"""Selection list for interactive plotting.
Based on the value columns, ``ipywidgets.SelectMultiple`` widget is
returned for selecting the value columns to be plotted.
This method is based on ``ipywidgets.SelectMultiple``, so any
keyword argument accepted by it can be passed.
Returns
-------
ipywidgets.SelectMultiple
Selection list.
Example
-------
1. Get the widget for selecting value columns.
>>> import discretisedfield as df
...
>>> points = [(0, 0, 0), (2, 0, 0), (4, 0, 0)]
>>> values = [(1, 0, 0), (0, 1, 0), (0, 0, 1)] # vector values
>>> line = df.Line(points=points, values=values)
>>> line.selector()
SelectMultiple(...)
"""
return ipywidgets.SelectMultiple(options=self.value_columns,
value=self.value_columns,
rows=3,
description='y-axis:',
disabled=False,
**kwargs)
|
from django.db import models
from django.db.models import DO_NOTHING
from bpp.fields import YearField
from bpp.models import BazaModeluOdpowiedzialnosciAutorow, TupleField
class RozbieznosciViewBase(models.Model):
id = TupleField(models.IntegerField(), size=3, primary_key=True)
rekord = models.ForeignKey("bpp.Rekord", DO_NOTHING, related_name="+")
rok = YearField()
autor = models.ForeignKey("bpp.Autor", DO_NOTHING, related_name="+")
dyscyplina_rekordu = models.ForeignKey(
"bpp.Dyscyplina_Naukowa", DO_NOTHING, related_name="+", null=True, blank=True
)
dyscyplina_autora = models.ForeignKey(
"bpp.Dyscyplina_Naukowa", DO_NOTHING, related_name="+"
)
subdyscyplina_autora = models.ForeignKey(
"bpp.Dyscyplina_Naukowa", DO_NOTHING, related_name="+", null=True, blank=True
)
class Meta:
managed = False
abstract = True
class BrakPrzypisaniaView(RozbieznosciViewBase):
class Meta:
managed = False
class RozbieznePrzypisaniaView(RozbieznosciViewBase):
class Meta:
managed = False
class RozbieznosciView(RozbieznosciViewBase):
# Uwaga: w sytuacji, gdy praca będzie miała jednego i tego samego autora (np w roli redaoktora
# oraz autora) to ten model i funkcja get_wydawnictwo_autor_obj zawiedzie.
class Meta:
managed = False
verbose_name = "rozbieżność rekordu i dyscyplin"
verbose_name_plural = "rozbieżności rekordów i dyscyplin"
def get_wydawnictwo_autor_obj(self) -> BazaModeluOdpowiedzialnosciAutorow:
# Uwaga: w sytuacji, gdy praca będzie miała jednego i tego samego autora (np w roli redaoktora
# oraz autora) to ten model i funkcja get_wydawnictwo_autor_obj zawiedzie (zwraca wyłacznie pierwszy
# rekord z powiazaniem autora + rekordu)
return self.rekord.original.autorzy_set.filter(autor=self.autor).first()
|
from abc import ABC, abstractmethod
from typing import List, Tuple, Dict
from utils.data_structures import UFDS
from utils.search import strict_binary_search
class TrainingInstancesGenerator(ABC):
@abstractmethod
def generate(self, training_ids: List[int], ufds: UFDS) -> List[Tuple[int, int, int]]:
pass
class BudiInstancesGenerator(TrainingInstancesGenerator):
def __init__(self, document_id_by_markable_id: Dict[int, int]) -> None:
self.document_id_by_markable_id = document_id_by_markable_id
def generate(self, training_ids: List[int], ufds: UFDS) -> List[Tuple[int, int, int]]:
instances = []
for a in range(len(training_ids)):
for b in range(a + 1, len(training_ids)):
if self.document_id_by_markable_id[training_ids[a]] != self.document_id_by_markable_id[training_ids[b]]:
break
instances.append(
(training_ids[a], training_ids[b], int(ufds.is_same(training_ids[a], training_ids[b])))
)
return instances
class SoonInstancesGenerator(TrainingInstancesGenerator):
def generate(self, training_ids: List[int], ufds: UFDS) -> List[Tuple[int, int, int]]:
instances = []
chains = ufds.get_chain_list()
for chain in chains:
for i in range(len(chain) - 1):
instances.append((chain[i], chain[i + 1], 1))
antecedent_idx = strict_binary_search(training_ids, chain[i])
anaphora_idx = strict_binary_search(training_ids, chain[i + 1])
for j in range(antecedent_idx + 1, anaphora_idx):
instances.append((training_ids[j], chain[i + 1], 0))
return instances
class GilangInstancesGenerator(TrainingInstancesGenerator):
def generate(self, training_ids: List[int], ufds: UFDS) -> List[Tuple[int, int, int]]:
instances = []
added_pair = set()
chains = ufds.get_chain_list()
for chain in chains:
for i in range(len(chain) - 1):
instances.append((chain[i], chain[i + 1], 1))
antecedent_idx = strict_binary_search(training_ids, chain[i])
anaphora_idx = strict_binary_search(training_ids, chain[i + 1])
for j in range(antecedent_idx + 1, anaphora_idx):
for k in range(j + 1, anaphora_idx):
if not ufds.is_same(j, k) and not (j, k) in added_pair:
instances.append((training_ids[j], training_ids[k], 0))
added_pair.add((j, k))
return instances
|
import datetime
import enum
from typing import List, Any, Dict
from flask import current_app
from itsdangerous import (JSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired)
from sqlalchemy import ForeignKey
from sqlalchemy.dialects.postgresql import ARRAY
from sqlalchemy.orm import relationship
from werkzeug.security import generate_password_hash, check_password_hash
from app.extensions import db
from app.utils import str_from_date, str_from_date_time
class BaseModel(db.Model):
__abstract__ = True
class UserModel(BaseModel):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True) # type:int
# required fields after signup to send messages
registration_id = db.Column(db.Integer, index=True) # type:int
device_id = db.Column(db.Integer, index=True) # type:int
identity_public_key = db.Column(db.String()) # type:str
signed_pre_key = db.Column(db.String()) # type:str
one_time_pre_keys = db.Column(ARRAY(db.String())) # type: List[str]
friends = db.Column(ARRAY(db.Integer())) # type: List[int]
# LoginModel reference
login = relationship("LoginModel", uselist=False, back_populates="user") # type:LoginModel
def to_dict(self):
return {'user_id': self.id,
'username': self.login.username,
'registration_id': self.registration_id,
'device_id': self.device_id,
'identity_public_key': self.identity_public_key,
'one_time_pre_key': self.one_time_pre_keys,
'signed_pre_key': self.signed_pre_key}
def to_public_dict(self):
one_time_pre_key = self.one_time_pre_keys[0]
return {'user_id': self.id,
'username': self.login.username,
'registration_id': self.registration_id,
'device_id': self.device_id,
'identity_public_key': self.identity_public_key,
'one_time_pre_key': one_time_pre_key,
'signed_pre_key': self.signed_pre_key}
class LoginModel(BaseModel):
__tablename__ = 'logins'
user_id = db.Column(db.Integer, ForeignKey(UserModel.id), primary_key=True) # type:int
username = db.Column(db.String(), index=True) # type:str
password_hash = db.Column(db.String(128)) # type:str
user = relationship("UserModel", back_populates="login", uselist=False) # type: UserModel
def hash_password(self, password: str):
self.password_hash = generate_password_hash(password)
def verify_password(self, password: str) -> bool:
return check_password_hash(self.password_hash, password)
def generate_auth_token(self):
s = Serializer(current_app.config['SECRET_KEY'])
return s.dumps({'user_id': self.user_id})
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except SignatureExpired:
return None # expired token
except BadSignature:
return None # invalid token
login = LoginModel.query.get(data['user_id']) # type: LoginModel
return login
class MessageModel(BaseModel):
__tablename__ = 'messages'
sender_id = db.Column(db.Integer, ForeignKey(UserModel.id), primary_key=True) # type:int
receiver_id = db.Column(db.Integer, ForeignKey(UserModel.id), primary_key=True) # type:int
message_ciphertext = db.Column(db.String()) # type:str
timestamp = db.Column(db.DateTime(), primary_key=True) # type:datetime.datetime
def to_dict(self):
return {'sender_id': self.sender_id,
'receiver_id': self.receiver_id,
'message_ciphertext': self.message_ciphertext,
'timestamp': str_from_date_time(self.timestamp)}
|
import numpy as np
def center(data: np.ndarray, axis: int = 0) -> np.ndarray:
'''
centers data by subtracting the mean
'''
means = np.mean(data, axis=axis)
return data - means
def minmax_scale(data: np.ndarray, axis: int = 0) -> np.ndarray:
'''
Scales data by dividing by the range
'''
naive_rngs = np.max(data, axis=axis) - np.min(data, axis=axis) # may include 0
rngs = np.where(naive_rngs == 0, 1, naive_rngs)
return data/rngs
|
# Copyright (c) 2013 Ian C. Good
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from collections.abc import Mapping, Sequence
class ConfigValidationError(Exception):
def __init__(self, msg, stack=None):
if stack:
msg += ' in config '+self._repr_stack(stack)
super(ConfigValidationError, self).__init__(msg)
def _repr_stack(self, stack):
ret = []
for item in stack:
if isinstance(item, int):
ret[-1] += '[{0}]'.format(item)
else:
ret.append(item)
return '.'.join(ret)
class ConfigValidation(object):
def __init__(self, cfg):
self.cfg = cfg
def _check_ref(self, section, name):
return name in self.cfg[section]
def _check_keys(self, opts, keydict, stack, only_keys=False):
for k, v in opts.items():
if k not in keydict:
if only_keys:
msg = "Unexpected key '{0}'".format(k)
raise ConfigValidationError(msg, stack)
else:
continue
if not isinstance(v, keydict[k][0]):
type_name = keydict[k][0].__name__.lower()
msg = "Expected key '{0}' to be {1}".format(k, type_name)
raise ConfigValidationError(msg, stack)
del keydict[k]
for k, v in keydict.items():
if v[1]:
msg = "Missing required key '{0}'".format(k)
raise ConfigValidationError(msg, stack)
def _check_process(self, opts, stack):
keydict = {'daemon': (bool, False),
'hostname': (str, False),
'fqdn': (str, False),
'pid_file': (str, False),
'user': (str, False),
'group': (str, False),
'stdout': (str, False),
'stderr': (str, False),
'logging': (Mapping, False)}
self._check_keys(opts, keydict, stack, True)
def _check_lookup(self, opts, stack):
keydict = {'type': (str, True)}
self._check_keys(opts, keydict, stack)
def _check_listener(self, opts, stack):
keydict = {'type': (str, False),
'interface': (str, False),
'port': (int, False),
'path': (str, False),
'factory': (str, False)}
self._check_keys(opts, keydict, stack, True)
if opts.get('type') == 'custom' and not opts.get('factory'):
msg = "The 'factory' key must be given when using 'custom' type"
raise ConfigValidationError(msg, stack)
def _check_edge(self, opts, stack):
keydict = {'type': (str, True),
'queue': (str, True),
'factory': (str, False),
'listener': (Mapping, False),
'listeners': (Sequence, False),
'hostname': (str, False),
'max_size': (int, False),
'tls': (Mapping, False),
'tls_immediately': (bool, False),
'proxyprotocol': (bool, False),
'rules': (Mapping, False)}
self._check_keys(opts, keydict, stack)
if not self._check_ref('queue', opts.queue):
msg = "No match for reference key 'queue'"
raise ConfigValidationError(msg, stack)
if opts.type == 'custom' and not opts.get('factory'):
msg = "The 'factory' key must be given when using 'custom' type"
raise ConfigValidationError(msg, stack)
if 'listeners' in opts:
if 'listener' in opts:
msg = "Cannot use both 'listener' and 'listeners' keys"
raise ConfigValidationError(msg, stack)
for i, listener in enumerate(opts.get('listeners')):
self._check_listener(listener, stack+['listeners', i])
elif 'listener' in opts:
self._check_listener(opts.listener, stack+['listener'])
if 'tls' in opts:
tls_keydict = {'certfile': (str, True),
'keyfile': (str, True),
'ca_certs': (str, False)}
self._check_keys(opts.tls, tls_keydict, stack+['tls'])
if 'rules' in opts:
rules_keydict = {'banner': (str, False),
'dnsbl': ((str, Sequence), False),
'reject_spf': (Sequence, False),
'lookup_senders': (Mapping, False),
'lookup_recipients': (Mapping, False),
'only_senders': (Sequence, False),
'only_recipients': (Sequence, False),
'regex_senders': (Sequence, False),
'regex_recipients': (Sequence, False),
'lookup_credentials': (Mapping, False),
'password_hash': (str, False),
'reject_spam': (Mapping, False)}
self._check_keys(opts.rules, rules_keydict, stack+['rules'], True)
if 'lookup_sender' in opts.rules:
self._check_lookup(opts.rules.lookup_sender,
stack+['lookup_sender'])
if 'lookup_recipients' in opts.rules:
self._check_lookup(opts.rules.lookup_recipients,
stack+['lookup_recipients'])
if 'lookup_credentials' in opts.rules:
self._check_lookup(opts.rules.lookup_credentials,
stack+['lookup_credentials'])
def _check_queue(self, opts, stack):
keydict = {'type': (str, True),
'relay': (str, False),
'factory': (str, False),
'bounce_queue': (str, False),
'retry': (Mapping, False),
'policies': (Sequence, False)}
self._check_keys(opts, keydict, stack)
if 'relay' in opts and not self._check_ref('relay', opts.relay):
msg = "No match for reference key 'relay'"
raise ConfigValidationError(msg, stack)
if 'bounce_queue' in opts and not self._check_ref('queue',
opts.bounce_queue):
msg = "No match for reference key 'bounce_queue'"
raise ConfigValidationError(msg, stack)
if opts.type == 'custom' and not opts.get('factory'):
msg = "The 'factory' key must be given when using 'custom' type"
raise ConfigValidationError(msg, stack)
policies = opts.get('policies', [])
for i, p in enumerate(policies):
mystack = stack + ['policies', i]
if not isinstance(p, Mapping):
msg = 'Expected dictionary'
raise ConfigValidationError(msg, mystack)
self._check_keys(p, {'type': (str, True)}, mystack)
if 'retry' in opts:
retry_keydict = {'maximum': (int, False),
'delay': (str, False)}
self._check_keys(opts.retry, retry_keydict, stack+['retry'], True)
def _check_relay(self, opts, stack):
keydict = {'type': (str, True),
'factory': (str, False),
'ehlo_as': (str, False),
'credentials': (Mapping, False),
'override_mx': (Mapping, False),
'ipv4_only': (bool, False)}
self._check_keys(opts, keydict, stack)
if opts.type == 'custom' and not opts.get('factory'):
msg = "The 'factory' key must be given when using 'custom' type"
raise ConfigValidationError(msg, stack)
if opts.type == 'pipe':
pipe_keydict = {'args': (list, True)}
self._check_keys(opts, pipe_keydict, stack)
for arg in opts.args:
if not isinstance(arg, str):
msg = "All 'args' must be strings"
raise ConfigValidationError(msg, stack+['args'])
if 'credentials' in opts:
creds_keydict = {'username': (str, True),
'password': (str, True)}
self._check_keys(opts.credentials, creds_keydict,
stack+['credentials'], True)
def _check_toplevel(self, stack, program):
if not isinstance(self.cfg, Mapping):
msg = 'Expected mapping'
raise ConfigValidationError(msg, stack)
keydict = {'process': (Mapping, True),
'edge': (Mapping, False),
'relay': (Mapping, False),
'queue': (Mapping, True)}
self._check_keys(self.cfg, keydict, stack)
for process, opts in self.cfg.process.items():
self._check_process(opts, stack+['process', process])
if 'edge' in self.cfg:
for edge, opts in self.cfg.edge.items():
self._check_edge(opts, stack+['edge', edge])
for queue, opts in self.cfg.queue.items():
self._check_queue(opts, stack+['queue', queue])
if 'relay' in self.cfg:
for relay, opts in self.cfg.relay.items():
self._check_relay(opts, stack+['relay', relay])
if program not in self.cfg.process:
msg = "Missing required key '{0}'".format(program)
raise ConfigValidationError(msg, stack+['process'])
@classmethod
def check(cls, cfg, program):
return cls(cfg)._check_toplevel(['root'], program)
# vim:et:fdm=marker:sts=4:sw=4:ts=4
|
#! coding: utf-8
import itertools
import time
from unittest import TestCase
from dogpile.cache import util
from dogpile.cache.api import NO_VALUE
from dogpile.util import compat
from . import eq_
from . import winsleep
from ._fixtures import _GenericBackendFixture
class DecoratorTest(_GenericBackendFixture, TestCase):
backend = "dogpile.cache.memory"
def _fixture(
self, namespace=None, expiration_time=None, key_generator=None
):
reg = self._region(config_args={"expiration_time": 0.25})
counter = itertools.count(1)
@reg.cache_on_arguments(
namespace=namespace,
expiration_time=expiration_time,
function_key_generator=key_generator,
)
def go(a, b):
val = next(counter)
return val, a, b
return go
def _multi_fixture(
self, namespace=None, expiration_time=None, key_generator=None
):
reg = self._region(config_args={"expiration_time": 0.25})
counter = itertools.count(1)
@reg.cache_multi_on_arguments(
namespace=namespace,
expiration_time=expiration_time,
function_multi_key_generator=key_generator,
)
def go(*args):
val = next(counter)
return ["%d %s" % (val, arg) for arg in args]
return go
def test_decorator(self):
go = self._fixture()
eq_(go(1, 2), (1, 1, 2))
eq_(go(3, 4), (2, 3, 4))
eq_(go(1, 2), (1, 1, 2))
time.sleep(0.3)
eq_(go(1, 2), (3, 1, 2))
def test_decorator_namespace(self):
# TODO: test the namespace actually
# working somehow...
go = self._fixture(namespace="x")
eq_(go(1, 2), (1, 1, 2))
eq_(go(3, 4), (2, 3, 4))
eq_(go(1, 2), (1, 1, 2))
time.sleep(0.3)
eq_(go(1, 2), (3, 1, 2))
def test_decorator_custom_expire(self):
go = self._fixture(expiration_time=0.5)
eq_(go(1, 2), (1, 1, 2))
eq_(go(3, 4), (2, 3, 4))
eq_(go(1, 2), (1, 1, 2))
time.sleep(0.3)
eq_(go(1, 2), (1, 1, 2))
time.sleep(0.3)
eq_(go(1, 2), (3, 1, 2))
def test_decorator_expire_callable(self):
go = self._fixture(expiration_time=lambda: 0.5)
eq_(go(1, 2), (1, 1, 2))
eq_(go(3, 4), (2, 3, 4))
eq_(go(1, 2), (1, 1, 2))
time.sleep(0.3)
eq_(go(1, 2), (1, 1, 2))
time.sleep(0.3)
eq_(go(1, 2), (3, 1, 2))
def test_decorator_expire_callable_zero(self):
go = self._fixture(expiration_time=lambda: 0)
eq_(go(1, 2), (1, 1, 2))
winsleep()
eq_(go(1, 2), (2, 1, 2))
winsleep()
eq_(go(1, 2), (3, 1, 2))
def test_explicit_expire(self):
go = self._fixture(expiration_time=1)
eq_(go(1, 2), (1, 1, 2))
eq_(go(3, 4), (2, 3, 4))
eq_(go(1, 2), (1, 1, 2))
go.invalidate(1, 2)
eq_(go(1, 2), (3, 1, 2))
def test_explicit_set(self):
go = self._fixture(expiration_time=1)
eq_(go(1, 2), (1, 1, 2))
go.set(5, 1, 2)
eq_(go(3, 4), (2, 3, 4))
eq_(go(1, 2), 5)
go.invalidate(1, 2)
eq_(go(1, 2), (3, 1, 2))
go.set(0, 1, 3)
eq_(go(1, 3), 0)
def test_explicit_get(self):
go = self._fixture(expiration_time=1)
eq_(go(1, 2), (1, 1, 2))
eq_(go.get(1, 2), (1, 1, 2))
eq_(go.get(2, 1), NO_VALUE)
eq_(go(2, 1), (2, 2, 1))
eq_(go.get(2, 1), (2, 2, 1))
def test_explicit_get_multi(self):
go = self._multi_fixture(expiration_time=1)
eq_(go(1, 2), ["1 1", "1 2"])
eq_(go.get(1, 2), ["1 1", "1 2"])
eq_(go.get(3, 1), [NO_VALUE, "1 1"])
eq_(go(3, 1), ["2 3", "1 1"])
eq_(go.get(3, 1), ["2 3", "1 1"])
def test_explicit_set_multi(self):
go = self._multi_fixture(expiration_time=1)
eq_(go(1, 2), ["1 1", "1 2"])
eq_(go(1, 2), ["1 1", "1 2"])
go.set({1: "1 5", 2: "1 6"})
eq_(go(1, 2), ["1 5", "1 6"])
def test_explicit_refresh(self):
go = self._fixture(expiration_time=1)
eq_(go(1, 2), (1, 1, 2))
eq_(go.refresh(1, 2), (2, 1, 2))
eq_(go(1, 2), (2, 1, 2))
eq_(go(1, 2), (2, 1, 2))
eq_(go.refresh(1, 2), (3, 1, 2))
eq_(go(1, 2), (3, 1, 2))
def test_explicit_refresh_multi(self):
go = self._multi_fixture(expiration_time=1)
eq_(go(1, 2), ["1 1", "1 2"])
eq_(go(1, 2), ["1 1", "1 2"])
eq_(go.refresh(1, 2), ["2 1", "2 2"])
eq_(go(1, 2), ["2 1", "2 2"])
eq_(go(1, 2), ["2 1", "2 2"])
def test_decorator_key_generator(self):
def my_key_generator(namespace, fn, **kw):
fname = fn.__name__
def generate_key_with_first_argument(*args):
return fname + "_" + str(args[0])
return generate_key_with_first_argument
go = self._fixture(key_generator=my_key_generator)
eq_(go(1, 2), (1, 1, 2))
eq_(go(3, 4), (2, 3, 4))
eq_(go(1, 3), (1, 1, 2))
time.sleep(0.3)
eq_(go(1, 3), (3, 1, 3))
def test_decorator_key_generator_multi(self):
def my_key_generator(namespace, fn, **kw):
fname = fn.__name__
def generate_key_with_reversed_order(*args):
return [fname + "_" + str(a) for a in args][::-1]
return generate_key_with_reversed_order
go = self._multi_fixture(key_generator=my_key_generator)
eq_(go(1, 2), ["1 1", "1 2"])
eq_(go.get(1, 2), ["1 1", "1 2"])
eq_(go.get(3, 1), ["1 2", NO_VALUE])
eq_(go(3, 1), ["1 2", "2 1"])
eq_(go.get(3, 1), ["1 2", "2 1"])
class KeyGenerationTest(TestCase):
def _keygen_decorator(self, namespace=None, **kw):
canary = []
def decorate(fn):
canary.append(util.function_key_generator(namespace, fn, **kw))
return fn
return decorate, canary
def _multi_keygen_decorator(self, namespace=None, **kw):
canary = []
def decorate(fn):
canary.append(
util.function_multi_key_generator(namespace, fn, **kw)
)
return fn
return decorate, canary
def _kwarg_keygen_decorator(self, namespace=None, **kw):
canary = []
def decorate(fn):
canary.append(
util.kwarg_function_key_generator(namespace, fn, **kw)
)
return fn
return decorate, canary
def test_default_keygen_kwargs_raises_value_error(self):
decorate, canary = self._keygen_decorator()
@decorate
def one(a, b):
pass
gen = canary[0]
self.assertRaises(ValueError, gen, 1, b=2)
def test_kwarg_kegen_keygen_fn(self):
decorate, canary = self._kwarg_keygen_decorator()
@decorate
def one(a, b):
pass
gen = canary[0]
result_key = "tests.cache.test_decorator:one|1 2"
eq_(gen(1, 2), result_key)
eq_(gen(1, b=2), result_key)
eq_(gen(a=1, b=2), result_key)
eq_(gen(b=2, a=1), result_key)
def test_kwarg_kegen_keygen_fn_with_defaults_and_positional(self):
decorate, canary = self._kwarg_keygen_decorator()
@decorate
def one(a, b=None):
pass
gen = canary[0]
result_key = "tests.cache.test_decorator:one|1 2"
eq_(gen(1, 2), result_key)
eq_(gen(1, b=2), result_key)
eq_(gen(a=1, b=2), result_key)
eq_(gen(b=2, a=1), result_key)
eq_(gen(a=1), "tests.cache.test_decorator:one|1 None")
def test_kwarg_kegen_keygen_fn_all_defaults(self):
decorate, canary = self._kwarg_keygen_decorator()
@decorate
def one(a=True, b=None):
pass
gen = canary[0]
result_key = "tests.cache.test_decorator:one|1 2"
eq_(gen(1, 2), result_key)
eq_(gen(1, b=2), result_key)
eq_(gen(a=1, b=2), result_key)
eq_(gen(b=2, a=1), result_key)
eq_(gen(a=1), "tests.cache.test_decorator:one|1 None")
eq_(gen(1), "tests.cache.test_decorator:one|1 None")
eq_(gen(), "tests.cache.test_decorator:one|True None")
eq_(gen(b=2), "tests.cache.test_decorator:one|True 2")
def test_keygen_fn(self):
decorate, canary = self._keygen_decorator()
@decorate
def one(a, b):
pass
gen = canary[0]
eq_(gen(1, 2), "tests.cache.test_decorator:one|1 2")
eq_(gen(None, 5), "tests.cache.test_decorator:one|None 5")
def test_multi_keygen_fn(self):
decorate, canary = self._multi_keygen_decorator()
@decorate
def one(a, b):
pass
gen = canary[0]
eq_(
gen(1, 2),
[
"tests.cache.test_decorator:one|1",
"tests.cache.test_decorator:one|2",
],
)
def test_keygen_fn_namespace(self):
decorate, canary = self._keygen_decorator("mynamespace")
@decorate
def one(a, b):
pass
gen = canary[0]
eq_(gen(1, 2), "tests.cache.test_decorator:one|mynamespace|1 2")
eq_(gen(None, 5), "tests.cache.test_decorator:one|mynamespace|None 5")
def test_kwarg_keygen_fn_namespace(self):
decorate, canary = self._kwarg_keygen_decorator("mynamespace")
@decorate
def one(a, b):
pass
gen = canary[0]
eq_(gen(1, 2), "tests.cache.test_decorator:one|mynamespace|1 2")
eq_(gen(None, 5), "tests.cache.test_decorator:one|mynamespace|None 5")
def test_key_isnt_unicode_bydefault(self):
decorate, canary = self._keygen_decorator("mynamespace")
@decorate
def one(a, b):
pass
gen = canary[0]
assert isinstance(gen("foo"), str)
def test_kwarg_kwgen_key_isnt_unicode_bydefault(self):
decorate, canary = self._kwarg_keygen_decorator("mynamespace")
@decorate
def one(a, b):
pass
gen = canary[0]
assert isinstance(gen("foo"), str)
def test_unicode_key(self):
decorate, canary = self._keygen_decorator("mynamespace", to_str=str)
@decorate
def one(a, b):
pass
gen = canary[0]
eq_(
gen("méil", "drôle"),
"tests.cache.test_decorator:one|mynamespace|m\xe9il dr\xf4le",
)
def test_unicode_key_kwarg_generator(self):
decorate, canary = self._kwarg_keygen_decorator(
"mynamespace", to_str=str
)
@decorate
def one(a, b):
pass
gen = canary[0]
eq_(
gen("méil", "drôle"),
"tests.cache.test_decorator:one|mynamespace|m\xe9il dr\xf4le",
)
def test_unicode_key_multi(self):
decorate, canary = self._multi_keygen_decorator(
"mynamespace", to_str=str
)
@decorate
def one(a, b):
pass
gen = canary[0]
eq_(
gen("méil", "drôle"),
[
"tests.cache.test_decorator:one|mynamespace|m\xe9il",
"tests.cache.test_decorator:one|mynamespace|dr\xf4le",
],
)
def test_unicode_key_by_default(self):
decorate, canary = self._keygen_decorator("mynamespace", to_str=str)
@decorate
def one(a, b):
pass
gen = canary[0]
assert isinstance(gen("méil"), str)
eq_(
gen("méil", "drôle"),
"tests.cache.test_decorator:" "one|mynamespace|m\xe9il dr\xf4le",
)
def test_unicode_key_by_default_kwarg_generator(self):
decorate, canary = self._kwarg_keygen_decorator(
"mynamespace", to_str=str
)
@decorate
def one(a, b):
pass
gen = canary[0]
assert isinstance(gen("méil"), str)
eq_(
gen("méil", "drôle"),
"tests.cache.test_decorator:" "one|mynamespace|m\xe9il dr\xf4le",
)
def test_sha1_key_mangler(self):
decorate, canary = self._keygen_decorator()
@decorate
def one(a, b):
pass
gen = canary[0]
key = gen(1, 2)
eq_(
util.sha1_mangle_key(key),
"aead490a8ace2d69a00160f1fd8fd8a16552c24f",
)
def test_sha1_key_mangler_unicode_py2k(self):
eq_(
util.sha1_mangle_key("some_key"),
"53def077a4264bd3183d4eb21b1f56f883e1b572",
)
def test_sha1_key_mangler_bytes_py3k(self):
eq_(
util.sha1_mangle_key(b"some_key"),
"53def077a4264bd3183d4eb21b1f56f883e1b572",
)
class CacheDecoratorTest(_GenericBackendFixture, TestCase):
backend = "mock"
def test_cache_arg(self):
reg = self._region()
counter = itertools.count(1)
@reg.cache_on_arguments()
def generate(x, y):
return next(counter) + x + y
eq_(generate(1, 2), 4)
eq_(generate(2, 1), 5)
eq_(generate(1, 2), 4)
generate.invalidate(1, 2)
eq_(generate(1, 2), 6)
def test_original_fn_set(self):
reg = self._region(backend="dogpile.cache.memory")
counter = itertools.count(1)
def generate(x, y):
return next(counter) + x + y
decorated = reg.cache_on_arguments()(generate)
eq_(decorated.original, generate)
def test_reentrant_call(self):
reg = self._region(backend="dogpile.cache.memory")
counter = itertools.count(1)
# if these two classes get the same namespace,
# you get a reentrant deadlock.
class Foo(object):
@classmethod
@reg.cache_on_arguments(namespace="foo")
def generate(cls, x, y):
return next(counter) + x + y
class Bar(object):
@classmethod
@reg.cache_on_arguments(namespace="bar")
def generate(cls, x, y):
return Foo.generate(x, y)
eq_(Bar.generate(1, 2), 4)
def test_multi(self):
reg = self._region()
counter = itertools.count(1)
@reg.cache_multi_on_arguments()
def generate(*args):
return ["%d %d" % (arg, next(counter)) for arg in args]
eq_(generate(2, 8, 10), ["2 2", "8 3", "10 1"])
eq_(generate(2, 9, 10), ["2 2", "9 4", "10 1"])
generate.invalidate(2)
eq_(generate(2, 7, 10), ["2 5", "7 6", "10 1"])
generate.set({7: 18, 10: 15})
eq_(generate(2, 7, 10), ["2 5", 18, 15])
def test_multi_asdict(self):
reg = self._region()
counter = itertools.count(1)
@reg.cache_multi_on_arguments(asdict=True)
def generate(*args):
return dict(
[(arg, "%d %d" % (arg, next(counter))) for arg in args]
)
eq_(generate(2, 8, 10), {2: "2 2", 8: "8 3", 10: "10 1"})
eq_(generate(2, 9, 10), {2: "2 2", 9: "9 4", 10: "10 1"})
generate.invalidate(2)
eq_(generate(2, 7, 10), {2: "2 5", 7: "7 6", 10: "10 1"})
generate.set({7: 18, 10: 15})
eq_(generate(2, 7, 10), {2: "2 5", 7: 18, 10: 15})
eq_(generate.refresh(2, 7), {2: "2 7", 7: "7 8"})
eq_(generate(2, 7, 10), {2: "2 7", 10: 15, 7: "7 8"})
def test_multi_asdict_keys_missing(self):
reg = self._region()
counter = itertools.count(1)
@reg.cache_multi_on_arguments(asdict=True)
def generate(*args):
return dict(
[
(arg, "%d %d" % (arg, next(counter)))
for arg in args
if arg != 10
]
)
eq_(generate(2, 8, 10), {2: "2 1", 8: "8 2"})
eq_(generate(2, 9, 10), {2: "2 1", 9: "9 3"})
assert reg.get(10) is NO_VALUE
generate.invalidate(2)
eq_(generate(2, 7, 10), {2: "2 4", 7: "7 5"})
generate.set({7: 18, 10: 15})
eq_(generate(2, 7, 10), {2: "2 4", 7: 18, 10: 15})
def test_multi_asdict_keys_missing_existing_cache_fn(self):
reg = self._region()
counter = itertools.count(1)
@reg.cache_multi_on_arguments(
asdict=True, should_cache_fn=lambda v: not v.startswith("8 ")
)
def generate(*args):
return dict(
[
(arg, "%d %d" % (arg, next(counter)))
for arg in args
if arg != 10
]
)
eq_(generate(2, 8, 10), {2: "2 1", 8: "8 2"})
eq_(generate(2, 8, 10), {2: "2 1", 8: "8 3"})
eq_(generate(2, 8, 10), {2: "2 1", 8: "8 4"})
eq_(generate(2, 9, 10), {2: "2 1", 9: "9 5"})
assert reg.get(10) is NO_VALUE
generate.invalidate(2)
eq_(generate(2, 7, 10), {2: "2 6", 7: "7 7"})
generate.set({7: 18, 10: 15})
eq_(generate(2, 7, 10), {2: "2 6", 7: 18, 10: 15})
def test_multi_namespace(self):
reg = self._region()
counter = itertools.count(1)
@reg.cache_multi_on_arguments(namespace="foo")
def generate(*args):
return ["%d %d" % (arg, next(counter)) for arg in args]
eq_(generate(2, 8, 10), ["2 2", "8 3", "10 1"])
eq_(generate(2, 9, 10), ["2 2", "9 4", "10 1"])
eq_(
sorted(list(reg.backend._cache)),
[
"tests.cache.test_decorator:generate|foo|10",
"tests.cache.test_decorator:generate|foo|2",
"tests.cache.test_decorator:generate|foo|8",
"tests.cache.test_decorator:generate|foo|9",
],
)
generate.invalidate(2)
eq_(generate(2, 7, 10), ["2 5", "7 6", "10 1"])
generate.set({7: 18, 10: 15})
eq_(generate(2, 7, 10), ["2 5", 18, 15])
def test_cache_preserve_sig(self):
reg = self._region()
def func(a, b, c=True, *args, **kwargs):
return None
signature = compat.inspect_getargspec(func)
cached_func = reg.cache_on_arguments()(func)
cached_signature = compat.inspect_getargspec(cached_func)
self.assertEqual(signature, cached_signature)
def test_cache_multi_preserve_sig(self):
reg = self._region()
def func(a, b, c=True, *args, **kwargs):
return None, None
signature = compat.inspect_getargspec(func)
cached_func = reg.cache_multi_on_arguments()(func)
cached_signature = compat.inspect_getargspec(cached_func)
self.assertEqual(signature, cached_signature)
|
from django.test import TestCase, Client
from django.contrib.auth.models import User
# Setup functions
def add_test_user():
user_to_test = User.objects.create_user(
username="user_test",
password="teste",
is_staff=True
)
return user_to_test
# Tests
class TestAddClient(TestCase):
def setUp(self):
user_to_test = add_test_user()
login = self.client.login(username=user_to_test.username, password="teste")
self.client.get('/config/add-seeds/ajax/estados/')
self.client.get('/config/add-seeds/ajax/municipios/')
self.client.get('/config/add-seeds/ajax/tipos-telefone/')
def test_search_new_client(self):
data_to_post = {
"nome": "Cliente teste - Teste",
"fone": "62900000000",
"email": "teste@gmail.com"
}
response = self.client.post(
'/clientes/', data_to_post)
self.assertEqual(response.status_code, 302)
def test_add_new_client(self):
self.client.post('/clientes/',
{
"nome": "Cliente teste - Teste",
"fone": "62900000000",
"email": "teste@gmail.com"
})
data_to_post = {
"nome": "Cliente teste",
"telefone": "62900000000",
"email": "teste@gmail.com",
"tratamento": "Sr.",
"descricao": "Cliente teste - Teste",
"cnpj": "00000000000000",
"juridica": "0",
"genero": "1",
"empresa": "",
"endereco": "0"
}
response = self.client.post(
'/clientes/dados-cliente/', data_to_post)
self.assertEqual(response.status_code, 302)
def test_add_new_address(self):
self.client.post('/clientes/',
{
"nome": "Cliente teste - Teste",
"fone": "62900000000",
"email": "teste@gmail.com"
})
self.client.post(
'/clientes/dados-cliente/', {
"nome": "Cliente teste",
"telefone": "62900000000",
"email": "teste@gmail.com",
"tratamento": "Sr.",
"descricao": "Cliente teste - Teste",
"cnpj": "00000000000000",
"juridica": "0",
"genero": "1",
"empresa": "",
"endereco": "0"
})
data_to_post = {
"regiao": "Centro Oeste",
"estado": "GO",
"cidade": "94",
"novo_bairro": "Bairro teste",
"novo_logradouro": "Logradouro Teste",
"complemento": "00000000000000"
}
response = self.client.post(
'/clientes/cadastrar-novo-endereco/', data_to_post)
self.assertEqual(response.status_code, 302)
|
from django.test import TestCase
from django.conf import settings
from django.core.management import call_command
from cla_common.constants import DIAGNOSIS_SCOPE, MATTER_TYPE_LEVELS
from legalaid.models import Category, MatterType
from diagnosis.graph import get_graph
from diagnosis.utils import get_node_scope_value
class GraphTestCase(TestCase):
def setUp(self):
self.graph = get_graph(file_name=settings.DIAGNOSIS_FILE_NAME)
self.checker_graph = get_graph(file_name=settings.CHECKER_DIAGNOSIS_FILE_NAME)
call_command("loaddata", "initial_category")
call_command("loaddata", "initial_mattertype")
def assertCategoryInContext(self, context, nodes):
# checking that the category is set and is valid
category_name = context.get("category")
try:
return Category.objects.get(code=category_name)
# GOOD
except Category.DoesNotExist:
self.assertTrue(
False,
"None of the nodes in this path (%s) have category set! Or the category doesn't match any record in the database (category: %s)"
% ("\n".join([node["label"] + " " + node["id"] for node in nodes]), category_name),
)
def assertMatterTypesInContext(self, context, category, nodes):
matter_type1_code = context.get("matter-type-1")
matter_type2_code = context.get("matter-type-2")
if matter_type2_code and not matter_type1_code:
self.assertTrue(
False,
"MatterType2 (%s) set but MatterType1 == None for nodes in this path (%s)"
% (matter_type2_code, "\n".join([node["label"] + " " + node["id"] for node in nodes])),
)
self.assertMatterType(matter_type1_code, MATTER_TYPE_LEVELS.ONE, category, nodes)
self.assertMatterType(matter_type2_code, MATTER_TYPE_LEVELS.TWO, category, nodes)
def assertMatterType(self, matter_type_code, level, category, nodes):
if matter_type_code:
# checking that matter type is valid
try:
return MatterType.objects.get(code=matter_type_code, level=level, category=category)
except MatterType.DoesNotExist:
self.assertTrue(
False,
"MatterType (%s) for nodes in this path (%s) doesn't match any record in the database (level %s, category %s)"
% (
matter_type_code,
"\n".join([node["label"] + " " + node["id"] for node in nodes]),
level,
category.code,
),
)
def test_end_nodes_have_category(self):
def move_down(node_id, context, nodes):
node = self.graph.node[node_id]
node["id"] = node_id
nodes = list(nodes)
nodes.append(node)
context = dict(context)
context.update(node["context"] or {})
scope_value = get_node_scope_value(self.graph, node_id)
if scope_value in [DIAGNOSIS_SCOPE.INSCOPE, DIAGNOSIS_SCOPE.OUTOFSCOPE]:
category = self.assertCategoryInContext(context, nodes)
self.assertMatterTypesInContext(context, category, nodes)
for child_id in self.graph.successors(node_id):
move_down(child_id, context, nodes)
move_down("start", {}, [])
move_down("start", {}, [])
def test_nodes_have_heading(self):
checker_graph = get_graph(file_name=settings.CHECKER_DIAGNOSIS_FILE_NAME)
node = checker_graph.node["n43n2"]
self.assertEqual(node["heading"], u"Choose the option that best describes your debt problem")
def test_nodes_have_subheading(self):
_graph = get_graph(file_name=settings.DIAGNOSIS_FILE_NAME)
node = _graph.node["n97"]
self.assertEqual(
node["context"]["subheading"],
u"If a local authority is involved in taking a child into care and the applicant has received a letter of proceedings or letter of issue sent or client has a court date, a financial assessment is not required",
)
def test_nodes_have_description(self):
_graph = get_graph(file_name=settings.DIAGNOSIS_FILE_NAME)
node = _graph.node["n404"]
self.assertEqual(
unicode(node["description"]),
u"<p><strong>The client has received a letter of proceedings, letter of issue or have a court date.</strong></p>\n<p>No financial assessment is required.</p>",
)
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import fnmatch
import time
import re
import datetime
from collections import OrderedDict
import numpy as np
from .. import units as u
from .. import _erfa as erfa
from ..extern import six
from ..extern.six.moves import zip
from .utils import day_frac, two_sum
__all__ = ['TimeFormat', 'TimeJD', 'TimeMJD', 'TimeFromEpoch', 'TimeUnix',
'TimeCxcSec', 'TimeGPS', 'TimeDecimalYear',
'TimePlotDate', 'TimeUnique', 'TimeDatetime', 'TimeString',
'TimeISO', 'TimeISOT', 'TimeFITS', 'TimeYearDayTime',
'TimeEpochDate', 'TimeBesselianEpoch', 'TimeJulianEpoch',
'TimeDeltaFormat', 'TimeDeltaSec', 'TimeDeltaJD',
'TimeEpochDateString', 'TimeBesselianEpochString',
'TimeJulianEpochString', 'TIME_FORMATS', 'TIME_DELTA_FORMATS',
'TimezoneInfo']
__doctest_skip__ = ['TimePlotDate']
# These both get filled in at end after TimeFormat subclasses defined.
# Use an OrderedDict to fix the order in which formats are tried.
# This ensures, e.g., that 'isot' gets tried before 'fits'.
TIME_FORMATS = OrderedDict()
TIME_DELTA_FORMATS = OrderedDict()
# Translations between deprecated FITS timescales defined by
# Rots et al. 2015, A&A 574:A36, and timescales used here.
FITS_DEPRECATED_SCALES = {'TDT': 'tt', 'ET': 'tt',
'GMT': 'utc', 'UT': 'utc', 'IAT': 'tai'}
def _regexify_subfmts(subfmts):
"""
Iterate through each of the sub-formats and try substituting simple
regular expressions for the strptime codes for year, month, day-of-month,
hour, minute, second. If no % characters remain then turn the final string
into a compiled regex. This assumes time formats do not have a % in them.
This is done both to speed up parsing of strings and to allow mixed formats
where strptime does not quite work well enough.
"""
new_subfmts = []
for subfmt_tuple in subfmts:
subfmt_in = subfmt_tuple[1]
for strptime_code, regex in (('%Y', r'(?P<year>\d\d\d\d)'),
('%m', r'(?P<mon>\d{1,2})'),
('%d', r'(?P<mday>\d{1,2})'),
('%H', r'(?P<hour>\d{1,2})'),
('%M', r'(?P<min>\d{1,2})'),
('%S', r'(?P<sec>\d{1,2})')):
subfmt_in = subfmt_in.replace(strptime_code, regex)
if '%' not in subfmt_in:
subfmt_tuple = (subfmt_tuple[0],
re.compile(subfmt_in + '$'),
subfmt_tuple[2])
new_subfmts.append(subfmt_tuple)
return tuple(new_subfmts)
class TimeFormatMeta(type):
"""
Metaclass that adds `TimeFormat` and `TimeDeltaFormat` to the
`TIME_FORMATS` and `TIME_DELTA_FORMATS` registries, respectively.
"""
_registry = TIME_FORMATS
def __new__(mcls, name, bases, members):
cls = super(TimeFormatMeta, mcls).__new__(mcls, name, bases, members)
# Register time formats that have a name, but leave out astropy_time since
# it is not a user-accessible format and is only used for initialization into
# a different format.
if 'name' in members and cls.name != 'astropy_time':
mcls._registry[cls.name] = cls
if 'subfmts' in members:
cls.subfmts = _regexify_subfmts(members['subfmts'])
return cls
@six.add_metaclass(TimeFormatMeta)
class TimeFormat(object):
"""
Base class for time representations.
Parameters
----------
val1 : numpy ndarray, list, str, or number
Data to initialize table.
val2 : numpy ndarray, list, str, or number; optional
Data to initialize table.
scale : str
Time scale of input value(s)
precision : int
Precision for seconds as floating point
in_subfmt : str
Select subformat for inputting string times
out_subfmt : str
Select subformat for outputting string times
from_jd : bool
If true then val1, val2 are jd1, jd2
"""
def __init__(self, val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd=False):
self.scale = scale # validation of scale done later with _check_scale
self.precision = precision
self.in_subfmt = in_subfmt
self.out_subfmt = out_subfmt
if from_jd:
self.jd1 = val1
self.jd2 = val2
else:
val1, val2 = self._check_val_type(val1, val2)
self.set_jds(val1, val2)
def __len__(self):
return len(self.jd1)
@property
def scale(self):
"""Time scale"""
self._scale = self._check_scale(self._scale)
return self._scale
@scale.setter
def scale(self, val):
self._scale = val
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes"""
if not (val1.dtype == np.double and np.all(np.isfinite(val1)) and
(val2 is None or
val2.dtype == np.double and np.all(np.isfinite(val2)))):
raise TypeError('Input values for {0} class must be finite doubles'
.format(self.name))
if getattr(val1, 'unit', None) is not None:
# Possibly scaled unit any quantity-likes should be converted to
_unit = u.CompositeUnit(getattr(self, 'unit', 1.), [u.day], [1])
val1 = u.Quantity(val1, copy=False).to_value(_unit)
if val2 is not None:
val2 = u.Quantity(val2, copy=False).to_value(_unit)
elif getattr(val2, 'unit', None) is not None:
raise TypeError('Cannot mix float and Quantity inputs')
if val2 is None:
val2 = np.zeros_like(val1)
def asarray_or_scalar(val):
"""
Remove ndarray subclasses since for jd1/jd2 we want a pure ndarray
or a Python or numpy scalar.
"""
return np.asarray(val) if isinstance(val, np.ndarray) else val
return asarray_or_scalar(val1), asarray_or_scalar(val2)
def _check_scale(self, scale):
"""
Return a validated scale value.
If there is a class attribute 'scale' then that defines the default /
required time scale for this format. In this case if a scale value was
provided that needs to match the class default, otherwise return
the class default.
Otherwise just make sure that scale is in the allowed list of
scales. Provide a different error message if `None` (no value) was
supplied.
"""
if hasattr(self.__class__, 'epoch_scale') and scale is None:
scale = self.__class__.epoch_scale
if scale is None:
scale = 'utc' # Default scale as of astropy 0.4
if scale not in TIME_SCALES:
raise ScaleValueError("Scale value '{0}' not in "
"allowed values {1}"
.format(scale, TIME_SCALES))
return scale
def set_jds(self, val1, val2):
"""
Set internal jd1 and jd2 from val1 and val2. Must be provided
by derived classes.
"""
raise NotImplementedError
def to_value(self, parent=None):
"""
Return time representation from internal jd1 and jd2. This is
the base method that ignores ``parent`` and requires that
subclasses implement the ``value`` property. Subclasses that
require ``parent`` or have other optional args for ``to_value``
should compute and return the value directly.
"""
return self.value
@property
def value(self):
raise NotImplementedError
class TimeJD(TimeFormat):
"""
Julian Date time format.
This represents the number of days since the beginning of
the Julian Period.
For example, 2451544.5 in JD is midnight on January 1, 2000.
"""
name = 'jd'
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2)
@property
def value(self):
return self.jd1 + self.jd2
class TimeMJD(TimeFormat):
"""
Modified Julian Date time format.
This represents the number of days since midnight on November 17, 1858.
For example, 51544.0 in MJD is midnight on January 1, 2000.
"""
name = 'mjd'
def set_jds(self, val1, val2):
# TODO - this routine and vals should be Cythonized to follow the ERFA
# convention of preserving precision by adding to the larger of the two
# values in a vectorized operation. But in most practical cases the
# first one is probably biggest.
self._check_scale(self._scale) # Validate scale.
jd1, jd2 = day_frac(val1, val2)
jd1 += erfa.DJM0 # erfa.DJM0=2400000.5 (from erfam.h)
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
return (self.jd1 - erfa.DJM0) + self.jd2
class TimeDecimalYear(TimeFormat):
"""
Time as a decimal year, with integer values corresponding to midnight
of the first day of each year. For example 2000.5 corresponds to the
ISO time '2000-07-02 00:00:00'.
"""
name = 'decimalyear'
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
sum12, err12 = two_sum(val1, val2)
iy_start = np.trunc(sum12).astype(np.int)
extra, y_frac = two_sum(sum12, -iy_start)
y_frac += extra + err12
val = (val1 + val2).astype(np.double)
iy_start = np.trunc(val).astype(np.int)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(y_frac)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode('ascii')
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday,
ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday,
ihr, imin, isec)
t_start = Time(jd1_start, jd2_start, scale=self.scale, format='jd')
t_end = Time(jd1_end, jd2_end, scale=self.scale, format='jd')
t_frac = t_start + (t_end - t_start) * y_frac
self.jd1, self.jd2 = day_frac(t_frac.jd1, t_frac.jd2)
@property
def value(self):
scale = self.scale.upper().encode('ascii')
iy_start, ims, ids, ihmsfs = erfa.d2dtf(scale, 0, # precision=0
self.jd1, self.jd2)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(self.jd1)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode('ascii')
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday,
ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday,
ihr, imin, isec)
dt = (self.jd1 - jd1_start) + (self.jd2 - jd2_start)
dt_end = (jd1_end - jd1_start) + (jd2_end - jd2_start)
decimalyear = iy_start + dt / dt_end
return decimalyear
class TimeFromEpoch(TimeFormat):
"""
Base class for times that represent the interval from a particular
epoch as a floating point multiple of a unit time interval (e.g. seconds
or days).
"""
def __init__(self, val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd=False):
self.scale = scale
# Initialize the reference epoch (a single time defined in subclasses)
epoch = Time(self.epoch_val, self.epoch_val2, scale=self.epoch_scale,
format=self.epoch_format)
self.epoch = epoch
# Now create the TimeFormat object as normal
super(TimeFromEpoch, self).__init__(val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd)
def set_jds(self, val1, val2):
"""
Initialize the internal jd1 and jd2 attributes given val1 and val2.
For an TimeFromEpoch subclass like TimeUnix these will be floats giving
the effective seconds since an epoch time (e.g. 1970-01-01 00:00:00).
"""
# Form new JDs based on epoch time + time from epoch (converted to JD).
# One subtlety that might not be obvious is that 1.000 Julian days in
# UTC can be 86400 or 86401 seconds. For the TimeUnix format the
# assumption is that every day is exactly 86400 seconds, so this is, in
# principle, doing the math incorrectly, *except* that it matches the
# definition of Unix time which does not include leap seconds.
# note: use divisor=1./self.unit, since this is either 1 or 1/86400,
# and 1/86400 is not exactly representable as a float64, so multiplying
# by that will cause rounding errors. (But inverting it as a float64
# recovers the exact number)
day, frac = day_frac(val1, val2, divisor=1. / self.unit)
jd1 = self.epoch.jd1 + day
jd2 = self.epoch.jd2 + frac
# Create a temporary Time object corresponding to the new (jd1, jd2) in
# the epoch scale (e.g. UTC for TimeUnix) then convert that to the
# desired time scale for this object.
#
# A known limitation is that the transform from self.epoch_scale to
# self.scale cannot involve any metadata like lat or lon.
try:
tm = getattr(Time(jd1, jd2, scale=self.epoch_scale,
format='jd'), self.scale)
except Exception as err:
raise ScaleValueError("Cannot convert from '{0}' epoch scale '{1}'"
"to specified scale '{2}', got error:\n{3}"
.format(self.name, self.epoch_scale,
self.scale, err))
self.jd1, self.jd2 = day_frac(tm._time.jd1, tm._time.jd2)
def to_value(self, parent=None):
# Make sure that scale is the same as epoch scale so we can just
# subtract the epoch and convert
if self.scale != self.epoch_scale:
if parent is None:
raise ValueError('cannot compute value without parent Time object')
tm = getattr(parent, self.epoch_scale)
jd1, jd2 = tm._time.jd1, tm._time.jd2
else:
jd1, jd2 = self.jd1, self.jd2
time_from_epoch = ((jd1 - self.epoch.jd1) +
(jd2 - self.epoch.jd2)) / self.unit
return time_from_epoch
value = property(to_value)
class TimeUnix(TimeFromEpoch):
"""
Unix time: seconds from 1970-01-01 00:00:00 UTC.
For example, 946684800.0 in Unix time is midnight on January 1, 2000.
NOTE: this quantity is not exactly unix time and differs from the strict
POSIX definition by up to 1 second on days with a leap second. POSIX
unix time actually jumps backward by 1 second at midnight on leap second
days while this class value is monotonically increasing at 86400 seconds
per UTC day.
"""
name = 'unix'
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = '1970-01-01 00:00:00'
epoch_val2 = None
epoch_scale = 'utc'
epoch_format = 'iso'
class TimeCxcSec(TimeFromEpoch):
"""
Chandra X-ray Center seconds from 1998-01-01 00:00:00 TT.
For example, 63072064.184 is midnight on January 1, 2000.
"""
name = 'cxcsec'
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = '1998-01-01 00:00:00'
epoch_val2 = None
epoch_scale = 'tt'
epoch_format = 'iso'
class TimeGPS(TimeFromEpoch):
"""GPS time: seconds from 1980-01-06 00:00:00 UTC
For example, 630720013.0 is midnight on January 1, 2000.
Notes
=====
This implementation is strictly a representation of the number of seconds
(including leap seconds) since midnight UTC on 1980-01-06. GPS can also be
considered as a time scale which is ahead of TAI by a fixed offset
(to within about 100 nanoseconds).
For details, see http://tycho.usno.navy.mil/gpstt.html
"""
name = 'gps'
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = '1980-01-06 00:00:19'
# above epoch is the same as Time('1980-01-06 00:00:00', scale='utc').tai
epoch_val2 = None
epoch_scale = 'tai'
epoch_format = 'iso'
class TimePlotDate(TimeFromEpoch):
"""
Matplotlib `~matplotlib.pyplot.plot_date` input:
1 + number of days from 0001-01-01 00:00:00 UTC
This can be used directly in the matplotlib `~matplotlib.pyplot.plot_date`
function::
>>> import matplotlib.pyplot as plt
>>> jyear = np.linspace(2000, 2001, 20)
>>> t = Time(jyear, format='jyear', scale='utc')
>>> plt.plot_date(t.plot_date, jyear)
>>> plt.gcf().autofmt_xdate() # orient date labels at a slant
>>> plt.draw()
For example, 730120.0003703703 is midnight on January 1, 2000.
"""
# This corresponds to the zero reference time for matplotlib plot_date().
# Note that TAI and UTC are equivalent at the reference time.
name = 'plot_date'
unit = 1.0
epoch_val = 1721424.5 # Time('0001-01-01 00:00:00', scale='tai').jd - 1
epoch_val2 = None
epoch_scale = 'utc'
epoch_format = 'jd'
class TimeUnique(TimeFormat):
"""
Base class for time formats that can uniquely create a time object
without requiring an explicit format specifier. This class does
nothing but provide inheritance to identify a class as unique.
"""
class TimeAstropyTime(TimeUnique):
"""
Instantiate date from an Astropy Time object (or list thereof).
This is purely for instantiating from a Time object. The output
format is the same as the first time instance.
"""
name = 'astropy_time'
def __new__(cls, val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd=False):
"""
Use __new__ instead of __init__ to output a class instance that
is the same as the class of the first Time object in the list.
"""
val1_0 = val1.flat[0]
if not (isinstance(val1_0, Time) and all(type(val) is type(val1_0)
for val in val1.flat)):
raise TypeError('Input values for {0} class must all be same '
'astropy Time type.'.format(cls.name))
if scale is None:
scale = val1_0.scale
if val1.shape:
vals = [getattr(val, scale)._time for val in val1]
jd1 = np.concatenate([np.atleast_1d(val.jd1) for val in vals])
jd2 = np.concatenate([np.atleast_1d(val.jd2) for val in vals])
else:
val = getattr(val1_0, scale)._time
jd1, jd2 = val.jd1, val.jd2
OutTimeFormat = val1_0._time.__class__
self = OutTimeFormat(jd1, jd2, scale, precision, in_subfmt, out_subfmt,
from_jd=True)
return self
class TimeDatetime(TimeUnique):
"""
Represent date as Python standard library `~datetime.datetime` object
Example::
>>> from astropy.time import Time
>>> from datetime import datetime
>>> t = Time(datetime(2000, 1, 2, 12, 0, 0), scale='utc')
>>> t.iso
'2000-01-02 12:00:00.000'
>>> t.tt.datetime
datetime.datetime(2000, 1, 2, 12, 1, 4, 184000)
"""
name = 'datetime'
def _check_val_type(self, val1, val2):
# Note: don't care about val2 for this class
if not all(isinstance(val, datetime.datetime) for val in val1.flat):
raise TypeError('Input values for {0} class must be '
'datetime objects'.format(self.name))
return val1, None
def set_jds(self, val1, val2):
"""Convert datetime object contained in val1 to jd1, jd2"""
# Iterate through the datetime objects, getting year, month, etc.
iterator = np.nditer([val1, None, None, None, None, None, None],
flags=['refs_ok'],
op_dtypes=[np.object] + 5*[np.intc] + [np.double])
for val, iy, im, id, ihr, imin, dsec in iterator:
dt = val.item()
if dt.tzinfo is not None:
dt = (dt - dt.utcoffset()).replace(tzinfo=None)
iy[...] = dt.year
im[...] = dt.month
id[...] = dt.day
ihr[...] = dt.hour
imin[...] = dt.minute
dsec[...] = dt.second + dt.microsecond / 1e6
jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'),
*iterator.operands[1:])
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(self, timezone=None, parent=None):
"""
Convert to (potentially timezone-aware) `~datetime.datetime` object.
If ``timezone`` is not ``None``, return a timezone-aware datetime
object.
Parameters
----------
timezone : {`~datetime.tzinfo`, None} (optional)
If not `None`, return timezone-aware datetime.
Returns
-------
`~datetime.datetime`
If ``timezone`` is not ``None``, output will be timezone-aware.
"""
if timezone is not None:
if self._scale != 'utc':
raise ScaleValueError("scale is {}, must be 'utc' when timezone "
"is supplied.".format(self._scale))
# Rather than define a value property directly, we have a function,
# since we want to be able to pass in timezone information.
scale = self.scale.upper().encode('ascii')
iys, ims, ids, ihmsfs = erfa.d2dtf(scale, 6, # 6 for microsec
self.jd1, self.jd2)
ihrs = ihmsfs[..., 0]
imins = ihmsfs[..., 1]
isecs = ihmsfs[..., 2]
ifracs = ihmsfs[..., 3]
iterator = np.nditer([iys, ims, ids, ihrs, imins, isecs, ifracs, None],
flags=['refs_ok'],
op_dtypes=7*[iys.dtype] + [np.object])
for iy, im, id, ihr, imin, isec, ifracsec, out in iterator:
if isec >= 60:
raise ValueError('Time {} is within a leap second but datetime '
'does not support leap seconds'
.format((iy, im, id, ihr, imin, isec, ifracsec)))
if timezone is not None:
out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec,
tzinfo=TimezoneInfo()).astimezone(timezone)
else:
out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec)
return iterator.operands[-1]
value = property(to_value)
class TimezoneInfo(datetime.tzinfo):
"""
Subclass of the `~datetime.tzinfo` object, used in the
to_datetime method to specify timezones.
It may be safer in most cases to use a timezone database package like
pytz rather than defining your own timezones - this class is mainly
a workaround for users without pytz.
"""
@u.quantity_input(utc_offset=u.day, dst=u.day)
def __init__(self, utc_offset=0*u.day, dst=0*u.day, tzname=None):
"""
Parameters
----------
utc_offset : `~astropy.units.Quantity` (optional)
Offset from UTC in days. Defaults to zero.
dst : `~astropy.units.Quantity` (optional)
Daylight Savings Time offset in days. Defaults to zero
(no daylight savings).
tzname : string, `None` (optional)
Name of timezone
Examples
--------
>>> from datetime import datetime
>>> from astropy.time import TimezoneInfo # Specifies a timezone
>>> import astropy.units as u
>>> utc = TimezoneInfo() # Defaults to UTC
>>> utc_plus_one_hour = TimezoneInfo(utc_offset=1*u.hour) # UTC+1
>>> dt_aware = datetime(2000, 1, 1, 0, 0, 0, tzinfo=utc_plus_one_hour)
>>> print(dt_aware)
2000-01-01 00:00:00+01:00
>>> print(dt_aware.astimezone(utc))
1999-12-31 23:00:00+00:00
"""
if utc_offset == 0 and dst == 0 and tzname is None:
tzname = 'UTC'
self._utcoffset = datetime.timedelta(utc_offset.to_value(u.day))
self._tzname = tzname
self._dst = datetime.timedelta(dst.to_value(u.day))
def utcoffset(self, dt):
return self._utcoffset
def tzname(self, dt):
return str(self._tzname)
def dst(self, dt):
return self._dst
class TimeString(TimeUnique):
"""
Base class for string-like time representations.
This class assumes that anything following the last decimal point to the
right is a fraction of a second.
This is a reference implementation can be made much faster with effort.
"""
def _check_val_type(self, val1, val2):
# Note: don't care about val2 for these classes
if val1.dtype.kind not in ('S', 'U'):
raise TypeError('Input values for {0} class must be strings'
.format(self.name))
return val1, None
def parse_string(self, timestr, subfmts):
"""Read time from a single string, using a set of possible formats."""
# Datetime components required for conversion to JD by ERFA, along
# with the default values.
components = ('year', 'mon', 'mday', 'hour', 'min', 'sec')
defaults = (None, 1, 1, 0, 0, 0)
# Assume that anything following "." on the right side is a
# floating fraction of a second.
try:
idot = timestr.rindex('.')
except Exception:
fracsec = 0.0
else:
timestr, fracsec = timestr[:idot], timestr[idot:]
fracsec = float(fracsec)
for _, strptime_fmt_or_regex, _ in subfmts:
if isinstance(strptime_fmt_or_regex, six.string_types):
try:
tm = time.strptime(timestr, strptime_fmt_or_regex)
except ValueError:
continue
else:
vals = [getattr(tm, 'tm_' + component)
for component in components]
else:
tm = re.match(strptime_fmt_or_regex, timestr)
if tm is None:
continue
tm = tm.groupdict()
vals = [int(tm.get(component, default)) for component, default
in zip(components, defaults)]
# Add fractional seconds
vals[-1] = vals[-1] + fracsec
return vals
else:
raise ValueError('Time {0} does not match {1} format'
.format(timestr, self.name))
def set_jds(self, val1, val2):
"""Parse the time strings contained in val1 and set jd1, jd2"""
# Select subformats based on current self.in_subfmt
subfmts = self._select_subfmts(self.in_subfmt)
iterator = np.nditer([val1, None, None, None, None, None, None],
op_dtypes=[val1.dtype] + 5*[np.intc] + [np.double])
for val, iy, im, id, ihr, imin, dsec in iterator:
iy[...], im[...], id[...], ihr[...], imin[...], dsec[...] = (
self.parse_string(val.item(), subfmts))
jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'),
*iterator.operands[1:])
self.jd1, self.jd2 = day_frac(jd1, jd2)
def str_kwargs(self):
"""
Generator that yields a dict of values corresponding to the
calendar date and time for the internal JD values.
"""
scale = self.scale.upper().encode('ascii'),
iys, ims, ids, ihmsfs = erfa.d2dtf(scale, self.precision,
self.jd1, self.jd2)
# Get the str_fmt element of the first allowed output subformat
_, _, str_fmt = self._select_subfmts(self.out_subfmt)[0]
if '{yday:' in str_fmt:
has_yday = True
else:
has_yday = False
yday = None
ihrs = ihmsfs[..., 0]
imins = ihmsfs[..., 1]
isecs = ihmsfs[..., 2]
ifracs = ihmsfs[..., 3]
for iy, im, id, ihr, imin, isec, ifracsec in np.nditer(
[iys, ims, ids, ihrs, imins, isecs, ifracs]):
if has_yday:
yday = datetime.datetime(iy, im, id).timetuple().tm_yday
yield {'year': int(iy), 'mon': int(im), 'day': int(id),
'hour': int(ihr), 'min': int(imin), 'sec': int(isec),
'fracsec': int(ifracsec), 'yday': yday}
def format_string(self, str_fmt, **kwargs):
"""Write time to a string using a given format.
By default, just interprets str_fmt as a format string,
but subclasses can add to this.
"""
return str_fmt.format(**kwargs)
@property
def value(self):
# Select the first available subformat based on current
# self.out_subfmt
subfmts = self._select_subfmts(self.out_subfmt)
_, _, str_fmt = subfmts[0]
# TODO: fix this ugly hack
if self.precision > 0 and str_fmt.endswith('{sec:02d}'):
str_fmt += '.{fracsec:0' + str(self.precision) + 'd}'
# Try to optimize this later. Can't pre-allocate because length of
# output could change, e.g. year rolls from 999 to 1000.
outs = []
for kwargs in self.str_kwargs():
outs.append(str(self.format_string(str_fmt, **kwargs)))
return np.array(outs).reshape(self.jd1.shape)
def _select_subfmts(self, pattern):
"""
Return a list of subformats where name matches ``pattern`` using
fnmatch.
"""
fnmatchcase = fnmatch.fnmatchcase
subfmts = [x for x in self.subfmts if fnmatchcase(x[0], pattern)]
if len(subfmts) == 0:
raise ValueError('No subformats match {0}'.format(pattern))
return subfmts
class TimeISO(TimeString):
"""
ISO 8601 compliant date-time format "YYYY-MM-DD HH:MM:SS.sss...".
For example, 2000-01-01 00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = 'iso'
subfmts = (('date_hms',
'%Y-%m-%d %H:%M:%S',
# XXX To Do - use strftime for output ??
'{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}:{sec:02d}'),
('date_hm',
'%Y-%m-%d %H:%M',
'{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}'),
('date',
'%Y-%m-%d',
'{year:d}-{mon:02d}-{day:02d}'))
def parse_string(self, timestr, subfmts):
# Handle trailing 'Z' for UTC time
if timestr.endswith('Z'):
if self.scale != 'utc':
raise ValueError("Time input terminating in 'Z' must have "
"scale='UTC'")
timestr = timestr[:-1]
return super(TimeISO, self).parse_string(timestr, subfmts)
class TimeISOT(TimeISO):
"""
ISO 8601 compliant date-time format "YYYY-MM-DDTHH:MM:SS.sss...".
This is the same as TimeISO except for a "T" instead of space between
the date and time.
For example, 2000-01-01T00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = 'isot'
subfmts = (('date_hms',
'%Y-%m-%dT%H:%M:%S',
'{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),
('date_hm',
'%Y-%m-%dT%H:%M',
'{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}'),
('date',
'%Y-%m-%d',
'{year:d}-{mon:02d}-{day:02d}'))
class TimeYearDayTime(TimeISO):
"""
Year, day-of-year and time as "YYYY:DOY:HH:MM:SS.sss...".
The day-of-year (DOY) goes from 001 to 365 (366 in leap years).
For example, 2000:001:00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = 'yday'
subfmts = (('date_hms',
'%Y:%j:%H:%M:%S',
'{year:d}:{yday:03d}:{hour:02d}:{min:02d}:{sec:02d}'),
('date_hm',
'%Y:%j:%H:%M',
'{year:d}:{yday:03d}:{hour:02d}:{min:02d}'),
('date',
'%Y:%j',
'{year:d}:{yday:03d}'))
class TimeFITS(TimeString):
"""
FITS format: "[±Y]YYYY-MM-DD[THH:MM:SS[.sss]][(SCALE[(REALIZATION)])]".
ISOT with two extensions:
- Can give signed five-digit year (mostly for negative years);
- A possible time scale (and realization) appended in parentheses.
Note: FITS supports some deprecated names for timescales; these are
translated to the formal names upon initialization. Furthermore, any
specific realization information is stored only as long as the time scale
is not changed.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date': date
- 'longdate_hms': as 'date_hms', but with signed 5-digit year
- 'longdate': as 'date', but with signed 5-digit year
See Rots et al., 2015, A&A 574:A36 (arXiv:1409.7583).
"""
name = 'fits'
subfmts = (
('date_hms',
(r'(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)T'
r'(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)'),
'{year:04d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),
('date',
r'(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)',
'{year:04d}-{mon:02d}-{day:02d}'),
('longdate_hms',
(r'(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)T'
r'(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)'),
'{year:+06d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),
('longdate',
r'(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)',
'{year:+06d}-{mon:02d}-{day:02d}'))
# Add the regex that parses the scale and possible realization.
subfmts = tuple(
(subfmt[0],
subfmt[1] + r'(\((?P<scale>\w+)(\((?P<realization>\w+)\))?\))?',
subfmt[2]) for subfmt in subfmts)
_fits_scale = None
_fits_realization = None
def parse_string(self, timestr, subfmts):
"""Read time and set scale according to trailing scale codes."""
# Try parsing with any of the allowed sub-formats.
for _, regex, _ in subfmts:
tm = re.match(regex, timestr)
if tm:
break
else:
raise ValueError('Time {0} does not match {1} format'
.format(timestr, self.name))
tm = tm.groupdict()
if tm['scale'] is not None:
# If a scale was given, translate from a possible deprecated
# timescale identifier to the scale used by Time.
fits_scale = tm['scale'].upper()
scale = FITS_DEPRECATED_SCALES.get(fits_scale, fits_scale.lower())
if scale not in TIME_SCALES:
raise ValueError("Scale {0!r} is not in the allowed scales {1}"
.format(scale, sorted(TIME_SCALES)))
# If no scale was given in the initialiser, set the scale to
# that given in the string. Also store a possible realization,
# so we can round-trip (as long as no scale changes are made).
fits_realization = (tm['realization'].upper()
if tm['realization'] else None)
if self._fits_scale is None:
self._fits_scale = fits_scale
self._fits_realization = fits_realization
if self._scale is None:
self._scale = scale
if (scale != self.scale or fits_scale != self._fits_scale or
fits_realization != self._fits_realization):
raise ValueError("Input strings for {0} class must all "
"have consistent time scales."
.format(self.name))
return [int(tm['year']), int(tm['mon']), int(tm['mday']),
int(tm.get('hour', 0)), int(tm.get('min', 0)),
float(tm.get('sec', 0.))]
def format_string(self, str_fmt, **kwargs):
"""Format time-string: append the scale to the normal ISOT format."""
time_str = super(TimeFITS, self).format_string(str_fmt, **kwargs)
if self._fits_scale and self._fits_realization:
return '{0}({1}({2}))'.format(time_str, self._fits_scale,
self._fits_realization)
else:
return '{0}({1})'.format(time_str, self._scale.upper())
@property
def value(self):
"""Convert times to strings, using signed 5 digit if necessary."""
if 'long' not in self.out_subfmt:
# If we have times before year 0 or after year 9999, we can
# output only in a "long" format, using signed 5-digit years.
jd = self.jd1 + self.jd2
if jd.min() < 1721425.5 or jd.max() >= 5373484.5:
self.out_subfmt = 'long' + self.out_subfmt
return super(TimeFITS, self).value
class TimeEpochDate(TimeFormat):
"""
Base class for support floating point Besselian and Julian epoch dates
"""
def set_jds(self, val1, val2):
self._check_scale(self._scale) # validate scale.
epoch_to_jd = getattr(erfa, self.epoch_to_jd)
jd1, jd2 = epoch_to_jd(val1 + val2)
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
jd_to_epoch = getattr(erfa, self.jd_to_epoch)
return jd_to_epoch(self.jd1, self.jd2)
class TimeBesselianEpoch(TimeEpochDate):
"""Besselian Epoch year as floating point value(s) like 1950.0"""
name = 'byear'
epoch_to_jd = 'epb2jd'
jd_to_epoch = 'epb'
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes"""
if hasattr(val1, 'to') and hasattr(val1, 'unit'):
raise ValueError("Cannot use Quantities for 'byear' format, "
"as the interpretation would be ambiguous. "
"Use float with Besselian year instead. ")
return super(TimeBesselianEpoch, self)._check_val_type(val1, val2)
class TimeJulianEpoch(TimeEpochDate):
"""Julian Epoch year as floating point value(s) like 2000.0"""
name = 'jyear'
unit = erfa.DJY # 365.25, the Julian year, for conversion to quantities
epoch_to_jd = 'epj2jd'
jd_to_epoch = 'epj'
class TimeEpochDateString(TimeString):
"""
Base class to support string Besselian and Julian epoch dates
such as 'B1950.0' or 'J2000.0' respectively.
"""
def set_jds(self, val1, val2):
epoch_prefix = self.epoch_prefix
iterator = np.nditer([val1, None], op_dtypes=[val1.dtype, np.double])
for val, years in iterator:
time_str = val.item()
try:
epoch_type, year_str = time_str[0], time_str[1:]
year = float(year_str)
if epoch_type.upper() != epoch_prefix:
raise ValueError
except (IndexError, ValueError):
raise ValueError('Time {0} does not match {1} format'
.format(time_str, self.name))
else:
years[...] = year
self._check_scale(self._scale) # validate scale.
epoch_to_jd = getattr(erfa, self.epoch_to_jd)
jd1, jd2 = epoch_to_jd(iterator.operands[-1])
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
jd_to_epoch = getattr(erfa, self.jd_to_epoch)
years = jd_to_epoch(self.jd1, self.jd2)
# Use old-style format since it is a factor of 2 faster
str_fmt = self.epoch_prefix + '%.' + str(self.precision) + 'f'
outs = [str_fmt % year for year in years.flat]
return np.array(outs).reshape(self.jd1.shape)
class TimeBesselianEpochString(TimeEpochDateString):
"""Besselian Epoch year as string value(s) like 'B1950.0'"""
name = 'byear_str'
epoch_to_jd = 'epb2jd'
jd_to_epoch = 'epb'
epoch_prefix = 'B'
class TimeJulianEpochString(TimeEpochDateString):
"""Julian Epoch year as string value(s) like 'J2000.0'"""
name = 'jyear_str'
epoch_to_jd = 'epj2jd'
jd_to_epoch = 'epj'
epoch_prefix = 'J'
class TimeDeltaFormatMeta(TimeFormatMeta):
_registry = TIME_DELTA_FORMATS
@six.add_metaclass(TimeDeltaFormatMeta)
class TimeDeltaFormat(TimeFormat):
"""Base class for time delta representations"""
def _check_scale(self, scale):
"""
Check that the scale is in the allowed list of scales, or is `None`
"""
if scale is not None and scale not in TIME_DELTA_SCALES:
raise ScaleValueError("Scale value '{0}' not in "
"allowed values {1}"
.format(scale, TIME_DELTA_SCALES))
return scale
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2, divisor=1./self.unit)
@property
def value(self):
return (self.jd1 + self.jd2) / self.unit
class TimeDeltaSec(TimeDeltaFormat):
"""Time delta in SI seconds"""
name = 'sec'
unit = 1. / erfa.DAYSEC # for quantity input
class TimeDeltaJD(TimeDeltaFormat):
"""Time delta in Julian days (86400 SI seconds)"""
name = 'jd'
unit = 1.
from .core import Time, TIME_SCALES, TIME_DELTA_SCALES, ScaleValueError
|
# Celery配置文件
# 指定中间人、消息队列、任务队列、容器,使用redis
broker_url = "redis://192.168.103.168/10"
|
APPLICATION_JS = 'application/javascript'
JSONP_TEMPLATE = u'{callback}({payload})'
JSONP = 'jsonp'
CALLBACK = 'callback'
def get_callback(request):
return request.GET.get(CALLBACK, request.GET.get(JSONP, None))
|
from flask import Blueprint
blog_blueprint = Blueprint('blog', __name__, static_folder='static', template_folder='templates')
from . import routes
|
from py_sexpr.terms import *
from py_sexpr.stack_vm.emit import module_code
res = block( " | The functions in this module are highly unsafe as they treat records like | stringly-keyed maps and can coerce the row of labels that a record has. | | These function are intended for situations where there is some other way of | proving things about the structure of the record - for example, when using | `RowToList`. **They should never be used for general record manipulation.**"
, assign_star( "$foreign"
, call( var('import_module')
, "purescript_show_python.ffi.Record.Unsafe" ) )
, assign( "exports"
, record( ( "unsafeHas"
, get_item(var("$foreign"), "unsafeHas") )
, ( "unsafeGet"
, get_item(var("$foreign"), "unsafeGet") )
, ( "unsafeSet"
, get_item(var("$foreign"), "unsafeSet") )
, ( "unsafeDelete"
, get_item(var("$foreign"), "unsafeDelete") ) ) ) )
res = module_code(res, filename="C:\\Users\\twshe\\Desktop\\mydb\\com-haskell\\v0.1\\purescript-show-python\\.spago\\prelude\\v4.1.1\\src\\Record\\Unsafe.purs", name="purescript_show_python.Record.Unsafe.pure")
|
import tweepy
from config import create_api
# auth = tweepy.OAuthHandler("CONSUMER_KEY", "CONSUMER_SECRET")
# auth.set_access_token("ACCESS_TOKEN", "ACCESS_TOKEN_SECRET")
def authentication():
auth = tweepy.OAuthHandler("eLz5PW2HhVvfeJ2hmKjlZmP6g",
"BDGnsOitwOcZJWdvvSwTr5iI2OKGglinEHT6glhCJrpLbHl1GT")
auth.set_access_token("1445122551773110273-OhjDrVIbWb4yKcbew3TfkZJLgPJUNt",
"RPAzEmtTwiRRxlJfWJJegXbPQkvwH5Q17s8SeUzXhURXz")
# api = tweepy.API(auth)
# create API object
api = tweepy.API(auth, wait_on_rate_limit=True)
|
from rest_framework import serializers
from battleships.models.game import Game
from battleships.models.game_player import GamePlayer
from battleships.models.ship import Ship
from battleships.models.coordinate import Coordinate
class CoordinateSerializer(serializers.Serializer):
x = serializers.PrimaryKeyRelatedField(many=False, read_only=True)
y = serializers.PrimaryKeyRelatedField(many=False, read_only=True)
carrier = serializers.PrimaryKeyRelatedField(many=False, read_only=True)
destroyer = serializers.PrimaryKeyRelatedField(many=False, read_only=True)
frigate = serializers.PrimaryKeyRelatedField(many=False, read_only=True)
submarine = serializers.PrimaryKeyRelatedField(many=False, read_only=True)
class Meta:
model = Coordinate
fields = '__all__'
class ShipSerializer(serializers.Serializer):
game_player = serializers.PrimaryKeyRelatedField(many=False, read_only=True)
id = serializers.PrimaryKeyRelatedField(many=False, read_only=True)
name = serializers.PrimaryKeyRelatedField(many=False, read_only=True)
strength = serializers.PrimaryKeyRelatedField(many=False, read_only=True)
experience = serializers.PrimaryKeyRelatedField(many=False, read_only=True)
experience_points = serializers.PrimaryKeyRelatedField(many=False, read_only=True)
visibility_radius = serializers.PrimaryKeyRelatedField(many=False, read_only=True)
coordinate_set = CoordinateSerializer(many=True)
class Meta:
model = Ship
fields = '__all__'
abstract = True
class GamePlayerSerializer(serializers.ModelSerializer):
game = serializers.PrimaryKeyRelatedField(many=False, read_only=True)
player = serializers.PrimaryKeyRelatedField(many=False, read_only=True)
carrier_set = ShipSerializer(many=True)
frigate_set = ShipSerializer(many=True)
destroyer_set = ShipSerializer(many=True)
submarine_set = ShipSerializer(many=True)
class Meta:
model = GamePlayer
fields = '__all__'
class GameSerializer(serializers.ModelSerializer):
gameplayer_set = GamePlayerSerializer(many=True)
class Meta:
model = Game
fields = '__all__'
|
#!/usr/bin/env python2
#
# @file
# @author
# @version 0.1
# @date
#
# @short:
#
from __future__ import print_function
import itertools
from threading import Lock
from Queue import Queue
from collections import namedtuple
import rospy # Necessary to create a ros node
from geometry_msgs.msg import PoseWithCovarianceStamped, Point # Msg of the position of the robot
from nav_msgs.msg import OccupancyGrid, GridCells # Msg of the path
if False: # for IDE
from typing import List
Pos = namedtuple('Point', ['x', 'y'])
try:
xrange
except NameError:
xrange = range
WALL_DISTANCE = 100 - 5
MEASUREMENT_RADIUS = 0.5
class Coverage:
def __init__(self):
self.map_sub = rospy.Subscriber('/map', OccupancyGrid, self.map_callback, queue_size=1)
self.pos_sub = rospy.Subscriber('/amcl_pose', PoseWithCovarianceStamped, self.pos_callback, queue_size=1)
self.coverage_pub = rospy.Publisher('/cells', GridCells, queue_size=1)
self.map = None # type: OccupancyGrid
self.costmap = None # type: List[List[int]]
self.dists = None # type: List[List[int]]
self.pos = None # type: Pos
self.map_pos = None # type: Pos
self.measurements = None # type: List[List[int]]
self.grid = None # type: GridCells
self._calc_lock = Lock() # type:Lock
def pos_callback(self, pose):
""":type pose: PoseWithCovarianceStamped"""
if None in [self.map, self.costmap, self.measurements]:
return
if self.pos != Pos(pose.pose.pose.position.x, pose.pose.pose.position.y):
print("pose received")
self.pos = Pos(pose.pose.pose.position.x, pose.pose.pose.position.y)
self.map_pos = Pos(int(round((self.pos.x - self.map.info.origin.position.x) / self.map.info.resolution)),
int(round((self.pos.y - self.map.info.origin.position.y) / self.map.info.resolution)))
m = int(round(MEASUREMENT_RADIUS / self.map.info.resolution))
for x, y in itertools.product(xrange(self.map_pos.x - m, self.map_pos.x + m),
xrange(self.map_pos.y - m, self.map_pos.y + m)):
try:
self.measurements[x][y] = 0
except IndexError:
pass
self.calc_grid()
def map_callback(self, cb_map):
""":type cb_map: OccupancyGrid"""
print("map received")
self.map = cb_map
costmap = [list(l) for l in zip(*[self.map.data[y * self.map.info.width:(y + 1) * self.map.info.width]
for y in xrange(self.map.info.height)])]
for x in xrange(len(costmap)): # type: int
for y in itertools.chain(xrange(len(costmap[x])), reversed(xrange(len(costmap[x])))): # type: int
try:
for i, j in itertools.product(*[[0, 1, -1]] * 2):
costmap[x + i][y + j] = max(costmap[x + i][y + j], costmap[x][y] - 1)
except IndexError:
pass
for y in xrange(len(costmap[0])): # type: int
for x in itertools.chain(xrange(len(costmap)), reversed(xrange(len(costmap)))): # type: int
try:
for i, j in itertools.product(*[[0, 1, -1]] * 2):
costmap[x + i][y + j] = max(costmap[x + i][y + j], costmap[x][y] - 1)
except IndexError:
pass
self.costmap = costmap
self.measurements = [[-1] * self.map.info.height for _ in xrange(self.map.info.width)]
self.calc_grid()
def send(self):
if self.grid is not None:
print("grid sent, len:", len(self.grid.cells))
self.coverage_pub.publish(self.grid)
def calc_grid(self):
if None in [self.pos, self.map_pos, self.map, self.costmap, self.measurements] or len(self.map.data) <= 0:
return
if self.costmap[self.map_pos.x][self.map_pos.y] >= 100:
print("position in wall")
return
if not self._calc_lock.acquire(False):
return
if self.dists is None or self.dists[self.map_pos.x][self.map_pos.y] <= -1:
self.calc_dists()
grid = GridCells()
grid.header.stamp = rospy.Time.now()
grid.header.frame_id = '/map'
grid.cell_width = grid.cell_height = self.map.info.resolution
grid.cells = list(Point(x * self.map.info.resolution + self.map.info.origin.position.x,
y * self.map.info.resolution + self.map.info.origin.position.y, 0)
for x, y in itertools.product(xrange(len(self.costmap)), xrange(len(self.costmap[0])))
if self.dists[x][y] > -1 and self.measurements[x][y] <= -1)
self.grid = grid
self._calc_lock.release()
self.send()
def calc_dists(self):
wave = Queue()
wave.put(self.map_pos)
dist = [[-1] * self.map.info.height for _ in xrange(self.map.info.width)]
dist[self.map_pos.x][self.map_pos.y] = 0
while not wave.empty():
p = wave.get() # pos
for n in (Pos(p.x + x, p.y + y) for x, y in [[1, 0], [0, 1], [-1, 0], [0, -1]]): # neighbors
try:
if dist[n.x][n.y] != -1:
continue # already visited
if self.costmap[n.x][n.y] >= self.costmap[p.x][p.y] and self.costmap[n.x][n.y] >= WALL_DISTANCE:
continue # ignore some distance to wall except if robot is at wall
except IndexError:
continue
dist[n.x][n.y] = dist[p.x][p.y] + 1
wave.put(n)
self.dists = dist
def main_loop():
global t_loop
cov = Coverage()
rate = rospy.Rate(1)
while not rospy.is_shutdown():
rate.sleep()
cov.send()
if __name__ == '__main__':
rospy.init_node('coverage')
try:
main_loop()
except rospy.ROSInterruptException:
pass
|
import codecs
import numpy as np
from word_beam_search import WordBeamSearch
def apply_word_beam_search(mat, corpus, chars, word_chars):
"""Decode using word beam search. Result is tuple, first entry is label string, second entry is char string."""
T, B, C = mat.shape
# decode using the "Words" mode of word beam search with beam width set to 25 and add-k smoothing to 0.0
assert len(chars) + 1 == C
wbs = WordBeamSearch(25, 'Words', 0.0, corpus.encode('utf8'), chars.encode('utf8'), word_chars.encode('utf8'))
label_str = wbs.compute(mat)
# result is string of labels terminated by blank
char_str = []
for curr_label_str in label_str:
s = ''
for label in curr_label_str:
s += chars[label] # map label to char
char_str.append(s)
return label_str[0], char_str[0]
def load_mat(fn):
"""Load matrix from csv and apply softmax."""
mat = np.genfromtxt(fn, delimiter=';')[:, :-1] # load matrix from file
T = mat.shape[0] # dim0=t, dim1=c
# apply softmax
res = np.zeros(mat.shape)
for t in range(T):
y = mat[t, :]
e = np.exp(y)
s = np.sum(e)
res[t, :] = e / s
# expand to TxBxC
return np.expand_dims(res, 1)
def test_mini_example():
"""Mini example, just to check that everything is working."""
corpus = 'a ba' # two words "a" and "ba", separated by whitespace
chars = 'ab ' # the first three characters which occur in the matrix (in this ordering)
word_chars = 'ab' # whitespace not included which serves as word-separating character
mat = np.array([[[0.9, 0.1, 0.0, 0.0]], [[0.0, 0.0, 0.0, 1.0]],
[[0.6, 0.4, 0.0, 0.0]]]) # 3 time-steps and 4 characters per time time ("a", "b", " ", blank)
res = apply_word_beam_search(mat, corpus, chars, word_chars)
print('')
print('Mini example:')
print('Label string:', res[0])
print('Char string:', '"' + res[1] + '"')
assert res[1] == 'ba'
def test_real_example():
"""Real example using a sample from a HTR dataset."""
data_path = '../data/bentham/'
corpus = codecs.open(data_path + 'corpus.txt', 'r', 'utf8').read()
chars = codecs.open(data_path + 'chars.txt', 'r', 'utf8').read()
word_chars = codecs.open(data_path + 'wordChars.txt', 'r', 'utf8').read()
mat = load_mat(data_path + 'mat_2.csv')
res = apply_word_beam_search(mat, corpus, chars, word_chars)
print('')
print('Real example:')
print('Label string:', res[0])
print('Char string:', '"' + res[1] + '"')
assert res[1] == 'submitt both mental and corporeal, is far beyond any idea'
|
import random
import string
class User:
'''
Class that generates new instances ofCredentials user i.e: username and password
'''
User_List = []
def save_user(self):
User.User_List.append(self)
def __init__(self,username,password):
"""
intro of the __init__method to define properties for our objects
"""
self.username = username
self.password = password
def delete_user(self):
'''
delete_user method deletes a saved user from the user_list
'''
User.User_List.remove(self)
@classmethod
def user_exist(cls,number):
'''
checks whether our user exists
'''
for user in cls.User_List:
if user.password == number:
return True
return False
@classmethod
def display_user(cls):
'''
this method returns users' list
'''
return cls.User_List
class Credentials:
"""
Class Credentials generates instances of what will be contained in the class
"""
User_Credentials_list = []
def save_credential(self):
Credentials.User_Credentials_list.append(self)
"""
save_credential method that saves credential objects into the user_credential_list
"""
def __init__(self,sm_account,username,password):
self.sm_account = sm_account
self.username = username
self.password = password
def delete_credential(self):
"""
gets to delete a saved credential
"""
Credentials.User_Credentials_list.remove(self)
@classmethod
def display_credentials(cls):
"""
this returns the credential list
"""
return cls.User_Credentials_list
def generate_password(self):
'''
generates random password
'''
password = string.ascii_uppercase + string.ascii_lowercase + "damzie"
return ''.join(random.choice(password) for i in range(1,9))
|
"""Constants for the autelis integration."""
import logging
_LOGGER = logging.getLogger(__package__)
DOMAIN = "autelis_pool"
AUTELIS_HOST = "host"
AUTELIS_PASSWORD = "password"
AUTELIS_USERNAME = "admin"
AUTELIS_PLATFORMS = ["sensor", "switch", "climate"] # ["binary_sensor", "climate", "sensor", "weather"]
TEMP_SENSORS = {
"pooltemp": ["Temperature", "Pool"],
"spatemp": ["Temperature", "Spa"],
"airtemp": ["Temperature", "Air"],
"solartemp": ["Temperature", "Solar"],
}
HEAT_SET = {
"Pool Heat": ["pooltemp", "poolsp", "poolht"],
"Spa Heat": ["spatemp", "spasp", "spaht"],
}
CIRCUITS = {
"pump": "Pool",
"spa": "Spa",
"aux1": "Spa Extra Jets",
"aux2": "Sheer Descents",
"aux3": "Pool Light",
"aux4": "Spa Light",
"solarht": "Solar Heating",
}
STATE_SERVICE = "service"
STATE_AUTO = "auto"
MAX_TEMP = 104
MIN_TEMP = 34
|
#!/usr/bin/env python
import torch
import argparse
from src.loadopts import *
from src.utils import timemeter
from src.config import SAVED_FILENAME
METHOD = "WhiteBox"
FMT = "{description}={attack}-{epsilon_min:.4f}-{epsilon_max}-{epsilon_times}-{stepsize:.5f}-{steps}"
parser = argparse.ArgumentParser()
parser.add_argument("model", type=str)
parser.add_argument("dataset", type=str)
parser.add_argument("info_path", type=str)
parser.add_argument("--filename", type=str, default=SAVED_FILENAME)
# adversarial settings
parser.add_argument("--attack", type=str, default="pgd-linf")
parser.add_argument("--epsilon_min", type=float, default=8/255)
parser.add_argument("--epsilon_max", type=float, default=1.)
parser.add_argument("--epsilon_times", type=int, default=1)
parser.add_argument("--stepsize", type=float, default=0.25,
help="pgd:rel_stepsize, cwl2:step_size, deepfool:overshoot, bb:lr")
parser.add_argument("--steps", type=int, default=20)
# basic settings
parser.add_argument("-b", "--batch_size", type=int, default=256)
parser.add_argument("--transform", type=str, default='tensor,none')
parser.add_argument("--progress", action="store_false", default=True,
help="show the progress if true")
parser.add_argument("--log2file", action="store_false", default=True,
help="False: remove file handler")
parser.add_argument("--log2console", action="store_false", default=True,
help="False: remove console handler if log2file is True ...")
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("--benchmark", action="store_false", default=True,
help="cudnn.benchmark == True ?")
parser.add_argument("-m", "--description", type=str, default=METHOD)
opts = parser.parse_args()
opts.description = FMT.format(**opts.__dict__)
@timemeter("Setup")
def load_cfg() -> 'Config':
from src.dict2obj import Config
from src.base import FBAdversary
from src.utils import set_seed, activate_benchmark, load, set_logger
from models.base import ADArch
cfg = Config()
# generate the log path
_, cfg['log_path'] = generate_path(
method=METHOD, dataset_type=opts.dataset,
model=opts.model, description=opts.description
)
# set logger
logger = set_logger(
path=cfg.log_path,
log2file=opts.log2file,
log2console=opts.log2console
)
logger.debug(opts.info_path)
activate_benchmark(opts.benchmark)
set_seed(opts.seed)
# the model and other settings for training
model = load_model(opts.model)(num_classes=get_num_classes(opts.dataset))
mean, std = load_normalizer(opts.dataset)
model = ADArch(model=model, mean=mean, std=std)
load(
model=model,
path=opts.info_path,
filename=opts.filename
)
# load the testset
testset = load_dataset(
dataset_type=opts.dataset,
transforms=opts.transform,
train=False
)
cfg['testloader'] = load_dataloader(
dataset=testset,
batch_size=opts.batch_size,
train=False,
show_progress=opts.progress
)
# set the attacker
attack = load_fb_attack(
attack_type=opts.attack,
stepsize=opts.stepsize,
steps=opts.steps
)
epsilons = torch.linspace(opts.epsilon_min, opts.epsilon_max, opts.epsilon_times).tolist()
cfg['attacker'] = FBAdversary(
model=model, attacker=attack,
epsilon=epsilons
)
return cfg
@timemeter("Main")
def main(attacker, testloader, log_path):
from src.utils import distance_lp, getLogger
logger = getLogger()
running_success = [0.] * opts.epsilon_times
running_distance_linf = [0.] * opts.epsilon_times
running_distance_l2 = [0.] * opts.epsilon_times
for inputs, labels in testloader:
inputs = inputs.to(attacker.device)
labels = labels.to(attacker.device)
_, clipped, is_adv = attacker(inputs, labels)
dim_ = list(range(1, inputs.dim()))
for epsilon in range(opts.epsilon_times):
inputs_ = inputs[is_adv[epsilon]]
clipped_ = clipped[epsilon][is_adv[epsilon]]
running_success[epsilon] += is_adv[epsilon].sum().item()
running_distance_linf[epsilon] += distance_lp(inputs_, clipped_, p=float('inf'), dim=dim_).sum().item()
running_distance_l2[epsilon] += distance_lp(inputs_, clipped_, p=2, dim=dim_).sum().item()
datasize = len(testloader.dataset)
for epsilon in range(opts.epsilon_times):
running_distance_linf[epsilon] /= running_success[epsilon]
running_distance_l2[epsilon] /= running_success[epsilon]
running_success[epsilon] /= datasize
running_accuracy = list(map(lambda x: 1. - x, running_success))
running_accuracy = ', '.join([f"{acc:.3%}" for acc in running_accuracy])
running_distance_linf = ', '.join([f"{dis_linf:.5f}" for dis_linf in running_distance_linf])
running_distance_l2 = ', '.join([f"{dis_l2:.5f}" for dis_l2 in running_distance_l2])
logger.info(f"Accuracy: {running_accuracy}")
logger.info(f"Distance-Linf: {running_distance_linf}")
logger.info(f"Distance-L2: {running_distance_l2}")
if __name__ == "__main__":
from src.utils import readme
cfg = load_cfg()
readme(cfg.log_path, opts, mode="a")
main(**cfg)
|
import os
import copy
import argparse
import time
from qdmr2sparql.datasets import DatasetBreak, DatasetSpider
from qdmr2sparql.structures import GroundingKey, GroundingIndex
from qdmr2sparql.structures import save_grounding_to_file, load_grounding_from_file, load_grounding_list_from_file, assert_check_grounding_save_load
from qdmr2sparql.structures import RdfGraph
from qdmr2sparql.structures import QueryResult
from qdmr2sparql.query_generator import create_sparql_query_from_qdmr
from qdmr2sparql.process_sql import replace_orderByLimit1_to_subquery
from qdmr2sparql.utils_qdmr2sparql import handle_exception_sparql_process, TimeoutException, handle_exception
from qdmr2sparql.utils_qdmr2sparql import SparqlGenerationError, SparqlRunError, SparqlWrongAnswerError
SQL_KEYWORDS = ('select', 'from', 'where', 'group', 'order', 'limit', 'intersect', 'union', 'except', 'distinct')
JOIN_KEYWORDS = ('join', 'on', 'by', 'having') #'as'
WHERE_OPS = ('not', 'between', '=', '>', '<', '>=', '<=', '!=', 'in', 'like', 'is', 'exists')
UNIT_OPS = ('none', '-', '+', "*", '/')
AGG_OPS = ('none', 'max', 'min', 'count', 'sum', 'avg')
COND_OPS = ('and', 'or')
ORDER_OPS = ('desc', 'asc')
def parse_args():
parser = argparse.ArgumentParser(description='Build grounding between QDMR and SQL.')
parser.add_argument('--qdmr_path', type=str, help='path to break dataset')
parser.add_argument('--spider_path', type=str, help='path to spider dataset')
parser.add_argument('--db_path', type=str, default="database", help='path from --spider_path to get databases, default - "database"')
parser.add_argument('--output_path', type=str, default=None,help='path to output file with grounding (found correct SPARQL script)')
parser.add_argument('--output_path_all', type=str, default=None,help='path to output file with grounding')
parser.add_argument('--dev', action='store_true', help='if true, use dev, else use train')
parser.add_argument('--start_spider_idx', type=int, default=None, help='index of first spider example')
parser.add_argument('--end_spider_idx', type=int, default=None, help='index of last spider example')
parser.add_argument('--input_grounding', type=str, default=None, help='grounding to start from')
parser.add_argument('--not_all_sql', action='store_true', default=False, help='allows not grounded some sql')
parser.add_argument('--without_sql', action='store_true', default=False, help='ground only scheme, without sql args')
parser.add_argument('--time_limit', type=int, default=600, help='time limit in seconds to process one example')
parser.add_argument('--virtuoso_server', type=str, default=None, help='Path to Virtuoso HTTP service (looks like "http://localhost:8890/sparql/"')
parser.add_argument('--spider_idx', type=str, help='index of spider example, use only for debugging')
args = parser.parse_args()
return args
def create_list_of_groundings_to_try(input_grounding, essential_groundings=None, unique_essential_groundings=True):
list_to_try = []
indices_to_search = [k for k in input_grounding if isinstance(k, GroundingIndex)]
indices_everywhere = [k for k in input_grounding if not isinstance(k, GroundingIndex)]
grnd_joint = {k: input_grounding[k] for k in indices_everywhere}
index_list = []
def get_all_combination_from_index(i):
if i == len(indices_to_search):
return [copy.deepcopy(grnd_joint)] # list of dicts
endings = get_all_combination_from_index(i + 1)
options = input_grounding[indices_to_search[i]]
list_to_try = []
for e in endings:
for o in options:
grnd = {indices_to_search[i] : o}
grnd.update(e)
list_to_try.append(grnd)
return list_to_try
list_to_try = get_all_combination_from_index(0)
# filter combinations based on essential_groundings if provided
if essential_groundings:
list_to_try_base = copy.deepcopy(list_to_try)
list_to_try = []
for grnd in list_to_try_base:
available_essential_groundings = copy.deepcopy(essential_groundings)
assigned = [False] * len(available_essential_groundings)
have_duplicate_groundings = False
for k, v in grnd.items():
if isinstance(k, GroundingIndex):
def match_val_to_comp(a, b):
if not isinstance(a, GroundingKey) or not isinstance(b, GroundingKey):
return False
return a.isval() and b.iscomp() and b.keys[0] == "=" and b.keys[1] == a.get_val()
essential_index = None
for i_e, essential in enumerate(available_essential_groundings):
if essential == v or match_val_to_comp(v, essential) or match_val_to_comp(essential, v):
essential_index = i_e
break
if essential_index is not None:
if not assigned[essential_index]:
assigned[essential_index] = True
else:
have_duplicate_groundings = True
if unique_essential_groundings and have_duplicate_groundings:
continue
if all(assigned):
list_to_try.append(grnd)
return list_to_try
def select_grounding(qdmr, qdmr_name, dataset_spider, db_path, grounding=None, verbose=True, time_limit=None, use_extra_tests=False, virtuoso_server=None):
if grounding is None:
grounding = {}
sql_data = dataset_spider.sql_data[qdmr_name]
db_id = sql_data['db_id']
schema = dataset_spider.schemas[db_id]
sql_query = sql_data['query']
if verbose:
print("db_id:", db_id)
print("Question:", sql_data["question"])
print("SQL query:", sql_query)
print(f"QDMR:\n{qdmr}")
print(f"Groundings: {grounding}")
print(f"Database schema {db_id}:")
for tbl_name, cols in schema.column_names.items():
print(f"{tbl_name}: {cols}")
schema.load_table_data(db_path)
rdf_graph = RdfGraph(schema)
schemas_to_test = [schema]
rdf_graphs_to_test = [rdf_graph]
if use_extra_tests:
schema.load_test_table_data(db_path)
for s in schema.test_schemas:
test_rdf_graph = RdfGraph(s)
rdf_graphs_to_test.append(test_rdf_graph)
schemas_to_test.append(s)
essential_groundings = grounding["ESSENTIAL_GROUNDINGS"] if "ESSENTIAL_GROUNDINGS" in grounding else None
groundings_to_try = create_list_of_groundings_to_try(grounding, essential_groundings)
groundings_all_results = []
groundings_positive_results = []
time_start = time.time()
time_limit_exceeded = False
# try query modifications; if not successful run query as is
try:
sql_query_modified = replace_orderByLimit1_to_subquery(sql_query, schema.column_names)
sql_results = [QueryResult.execute_query_sql(sql_query_modified, s) for s in schemas_to_test]
except:
sql_results = [QueryResult.execute_query_sql(sql_query, s) for s in schemas_to_test]
# is_equal = sql_results[0].is_equal_to(sql_results_modified[0], require_column_order=True, require_row_order=True, schema=schemas_to_test[0])
if verbose:
for sql_result in sql_results:
print("SQL result:", sql_result)
for grnd in groundings_to_try:
if verbose:
grnd_for_printing = copy.deepcopy(grnd)
if "MESSAGES" in grnd_for_printing:
del grnd_for_printing["MESSAGES"]
print("Trying", grnd_for_printing)
got_correct_answer = False
try:
try:
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grnd, strict_mode=True)
except Exception as e:
raise SparqlGenerationError() from e
for cur_schema, cur_rdf, cur_sql_result in zip(schemas_to_test, rdf_graphs_to_test, sql_results):
try:
result = QueryResult.execute_query_to_rdf(sparql_query, cur_rdf, cur_schema, virtuoso_server=virtuoso_server)
if verbose:
print("SPARQL result:", result)
except Exception as e:
raise SparqlRunError() from e
try:
ordered = True if sql_data["sql"]["orderBy"] and not (sql_data["sql"]["limit"] and sql_data["sql"]["limit"] == 1) else False
equal, message = result.is_equal_to(cur_sql_result,
require_column_order=True,
require_row_order=ordered,
weak_mode_argmax=False,
return_message=True)
assert equal, message
except Exception as e:
raise SparqlWrongAnswerError() from e
got_correct_answer = True
message = f"{os.path.basename(__file__)}: OK"
except TimeoutException as e:
# timeout
raise e
except Exception as e:
error_details = handle_exception_sparql_process(e, verbose=False)
message = f"{os.path.basename(__file__)}: SPARQL_error_type: {error_details['sparql_error_type']}, ERROR: {error_details['type']}:{error_details['message']}, file: {error_details['file']}, line {error_details['line_number']}"
cur_result = copy.deepcopy(grnd)
if "MESSAGES" in cur_result:
cur_result["MESSAGES"].append(message)
else:
cur_result["MESSAGES"] = [message]
groundings_all_results.append(cur_result)
if got_correct_answer:
groundings_positive_results.append(cur_result)
else:
cur_result["ERRORS"] = [error_details]
if time_limit is not None and (time.time() - time_start > time_limit):
time_limit_exceeded = True
break
warning_message = []
if not sql_results[0]:
warning_message.append("WARNING: empty SQL result")
if time_limit_exceeded:
warning_message.append(f"WARNING: time limit of {time_limit}s exceeded")
if len(groundings_positive_results) == 0:
warning_message.append("WARNING: no correct result found")
elif len(groundings_positive_results) > 1:
warning_message.append(f"WARNING: multiple ({len(groundings_positive_results)}) correct results found")
if warning_message:
warning_message = "\n" + "\n".join(warning_message)
else:
warning_message = ""
# add warning messages: posotive groundings are from the same object so it's enough only to append to groundings_all_results
for res in groundings_all_results:
res["MESSAGES"][-1] = res["MESSAGES"][-1] + warning_message
message = f"{os.path.basename(__file__)}, {qdmr_name}: "
if len(groundings_positive_results) == 1:
message = message + "OK"
elif len(groundings_positive_results) > 1:
message = message + f"OK: multiple groundings {len(groundings_positive_results)}"
else:
message = message + "No correct match"
if not sql_results[0]:
message = message + "; empty SQL result"
if time_limit_exceeded:
message = message + f"; time limit {time_limit}s exceeded"
if verbose:
print(message)
if "OK" in message:
return groundings_positive_results, message
else:
return groundings_all_results, message
def main(args):
print(args)
split_name = 'dev' if args.dev else 'train'
db_path = os.path.join(args.spider_path, args.db_path)
dataset_break = DatasetBreak(args.qdmr_path, split_name)
dataset_spider = DatasetSpider(args.spider_path, split_name)
if args.input_grounding:
input_grounding = load_grounding_from_file(args.input_grounding)
else:
input_grounding = {}
if args.spider_idx is not None:
qdmr_name = None
for name in dataset_break.names:
idx = dataset_break.get_index_from_name(name)
if idx == args.spider_idx:
qdmr_name = name
break
assert qdmr_name is not None, "Could find QDMR with index {args.spider_idx}"
qdmr = dataset_break.qdmrs[qdmr_name]
qdmr_grounding = input_grounding[qdmr_name] if qdmr_name in input_grounding else None
# debugging
print()
print(qdmr_name)
groundings, _ = select_grounding(qdmr, qdmr_name, dataset_spider, db_path, grounding=qdmr_grounding, verbose=True,
time_limit=args.time_limit, virtuoso_server=args.virtuoso_server)
else:
groundings_all = {}
groundings_only_positive = {}
if split_name == "train":
qdmr_list = [(qdmr_name, qdmr) for qdmr_name, qdmr in dataset_break.make_iterator(args.start_spider_idx, args.end_spider_idx)]
else:
qdmr_list = [(qdmr_name, qdmr) for qdmr_name, qdmr in dataset_break]
for qdmr_name, qdmr in qdmr_list:
spider_idx = DatasetBreak.get_index_from_name(qdmr_name)
qdmr_grounding = input_grounding[qdmr_name] if qdmr_name in input_grounding else None
try:
groundings, message = select_grounding(qdmr, qdmr_name, dataset_spider, db_path, grounding=qdmr_grounding, verbose=False,
time_limit=args.time_limit, virtuoso_server=args.virtuoso_server)
groundings_all[qdmr_name] = {"GROUNDINGS": groundings}
groundings_all[qdmr_name]["MESSAGES"] = [message]
if "OK" in message:
groundings_only_positive[qdmr_name] = {"GROUNDINGS": groundings}
groundings_only_positive[qdmr_name]["MESSAGES"] = [message]
print(message)
except Exception as e:
error_details = handle_exception(e, verbose=False)
print(f"ERROR: {error_details['type']}:{error_details['message']}, file: {error_details['file']}, line {error_details['line_number']}")
groundings_all[qdmr_name] = {"ERRORS" : [error_details]}
if args.output_path:
save_grounding_to_file(args.output_path, groundings_only_positive)
check = load_grounding_list_from_file(args.output_path)
assert_check_grounding_save_load(groundings_only_positive, check)
if args.output_path_all:
save_grounding_to_file(args.output_path_all, groundings_all)
check = load_grounding_list_from_file(args.output_path_all)
assert_check_grounding_save_load(groundings_all, check)
if __name__ == "__main__":
args = parse_args()
main(args)
|
# Copyright 2020 Jigsaw Operations LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rebuild any tables derived from the base scan tables.
Run as
python3 -m table.run_queries
"""
import argparse
import glob
from pprint import pprint
from google.cloud import bigquery as cloud_bigquery # type: ignore
import firehook_resources
client = cloud_bigquery.Client(project=firehook_resources.PROJECT_NAME)
BASE_PLACEHOLDER = 'BASE_DATASET'
DERIVED_PLACEHOLDER = 'DERIVED_DATASET'
DEFAULT_BASE_DATASET = 'base'
DEFAULT_DERIVED_DATASET = 'derived'
def _run_query(filepath: str, base_dataset: str,
derived_dataset: str) -> cloud_bigquery.table.RowIterator:
with open(filepath, encoding='utf-8') as sql:
query = sql.read()
query = query.replace(BASE_PLACEHOLDER, base_dataset)
query = query.replace(DERIVED_PLACEHOLDER, derived_dataset)
query_job = client.query(query)
return query_job.result()
def rebuild_all_tables(base_dataset: str = DEFAULT_BASE_DATASET,
derived_dataset: str = DEFAULT_DERIVED_DATASET) -> None:
for filepath in glob.glob('table/queries/*.sql'):
try:
_run_query(filepath, base_dataset, derived_dataset)
except Exception as ex:
pprint(('Failed SQL query', filepath))
raise ex
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run a beam pipeline over scans')
parser.add_argument(
'--input',
type=str,
default=DEFAULT_BASE_DATASET,
help='Input dataset to query from')
parser.add_argument(
'--output',
type=str,
required=True,
help='Output dataset to write to. To write to prod use --output=derived')
args = parser.parse_args()
rebuild_all_tables(base_dataset=args.input, derived_dataset=args.output)
|
import sqlite3
conn = sqlite3.connect('degiro_new.db')
c = conn.cursor()
import json
from datetime import datetime
import pandas as pd
import degiroapi
from degiroapi.product import Product
from degiroapi.order import Order
from degiroapi.utils import pretty_json
# c.execute('''UPDATE portfolio SET category = (?)''',(ts,))
c.execute('''
SELECT * FROM portfolio
''')
for row in c.fetchall():
print (row)
|
# -*- coding: utf-8 -*-
"""
Last updated Apr 2019
by @ivanwilliammd
"""
import glob
import os
import subprocess
import SimpleITK as sitk
import numpy as np
import lidcXmlHelper as xmlHelper
# Path to the command lines tools of MITK Phenotyping
path_to_executables=r"C:\Users\Ivan William Harsono\Documents\MITK 2016.11.99_rcadabe\bin"
# Path to the folder that contains the LIDC-IDRI DICOM files
path_to_dicoms = r"D:\LIDC-IDRI\DICOM_Classic_Directory"
# Path to the folder that contains the LIDC-IDRI XML files
path_to_xmls= r"D:\LIDC-IDRI\XML\tcia-lidc-xml"
# path_to_xmls= r"P:\Goetz\Datenkollektive\Lungendaten\Nodules_LIDC_IDRI\XML2"
# Output path where the generated NRRD and NIFTI files will be saved
path_to_nrrds = r"D:\LIDC-IDRI\new_nrrd_2"
# Output path where the genreated Planar Figures will be saved
path_to_planars= r"D:\LIDC-IDRI\new_planars_2"
# Output path to the CSV-file that will contain the nodule characteristics. An existing will be appended
path_to_characteristics="D:\LIDC-IDRI\characteristics_2.csv"
# Ouput path to an error file where errors will be logged. An existing file will be appended.
path_to_error_file=r"D:\LIDC-IDRI\conversion_error_2.txt"
planar_template=r"template.pf"
list_of_appendix=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
def write_error(msg, errorfile=path_to_error_file):
"""
A simple error logging method. Errors should be reported using this functions.
All errors are then logged in the file specified with the global variable
'path_to_error_file' if no other file is specified.
The error message is also printed in the main text.
"""
a=open(errorfile,'a')
a.write(str(msg) + "\n")
a.close()
print("ERROR:",msg)
def get_dicom_from_study_uid(study_uid, series_uid):
"""
Find the folder containing the dicoms that corresponds to a given study id
or an study id and a series id.
Returns:
The path to the DICOMs matching the given IDs
The number of DICOMs that had been found.
"""
if series_uid is not None:
search_path=os.path.join(path_to_dicoms, "*","*"+study_uid+"*","*"+series_uid+"*","*.dcm")
else:
search_path=os.path.join(path_to_dicoms, "*","*"+study_uid+"*","*","*.dcm")
paths=glob.glob(search_path)
if len(paths) > 0:
return paths[0], len(paths)
else:
return [], 0
def create_nrrd_from_dicoms(image, patient_id):
"""
Reads a folder that contains multiple DICOM files and
converts the input into a single nrrd file using a command line
app from MITK or MITK Phenotyping.
Input:
* path to one dicom (other are automatically found.)
* Patient ID
Output:
Creates a single nrrd file with the path: $target_path / patient_id + '_ct_scan.nrrd'
"""
target_path = os.path.join(path_to_nrrds, patient_id)
target_name = os.path.join(target_path, patient_id+"_ct_scan.nrrd")
os.makedirs(target_path, exist_ok=True)
cmd_string=r"MitkCLDicom2Nrrd "+\
"-i \"" + image + "\"" \
" -o \"" + target_name + "\""
print(cmd_string)
a=subprocess.Popen(cmd_string,shell=True,cwd=path_to_executables)
a.wait()
return target_name
def get_spacing_and_origin(file):
""" Reading nrrd files, extract spacing and origin usign SimpleITK and returning them"""
image=sitk.ReadImage(file)
spacing=image.GetSpacing()
origin=image.GetOrigin()
return spacing, origin
def create_planarfigure_for_session(session, spacing, origin, patient_id, session_id):
"""
Given one session of an expert, and the corresponding patient id, the given
contours are converted into multiple planar figures.
Each Nodule gets an ID that is unique for ALL Nodules from all images / reading sessions.
The planar figures are saved in a path following this structure:
path_to_planars/<Patient ID>/<Patient_ID>_<Session_ID>_<Nodule_ID>_<ROI_ID>.pf
with the following properties:
* path_to_planars : Globally specified folder
* <Patient ID> : Unique Patient ID consisting of patient number and an appendix
* <Session_ID> : Number of the reading session / expert. Unique to the given patient only.
* <Nodule ID> : An globally unique ID of the given Nodule
* <ROI ID> : A nodule-wide unique, consecutive number of the current ROI. (Each planar figure contains the annotation of a single slice)
"""
# Obtaining the code of the radiologist. Replacing underscore (_) as it is later used to
# encode different IDs in the resulting file name.
radiologist=str(session.find("servicingRadiologistID").text).replace("_","-")
# Reading each Nodule in the given session and creating planar figures for them (if large enough)
global nodule_id
for nodule in session.iter('unblindedReadNodule'):
create_planarfigures_for_nodule(nodule, spacing, origin, patient_id, session_id, radiologist)
nodule_id = nodule_id + 1
def create_planarfigures_for_nodule(nodule, spacing, origin, patient_id, session_id, radiologist):
"""
Extracts the properties of an given nodule, saves them to the CSV file specified in the
global variable 'path_to_characteristics' and saves all contours for that
nodule as planar figure.
Each contour is given a consecutive number.
"""
global nodule_id
nodule_str="{:08n}".format(nodule_id)
# Extract the properties of the nodule
subtlety=xmlHelper.read_nodule_property(nodule, 'subtlety')
internalStructure=xmlHelper.read_nodule_property(nodule, 'internalStructure')
calcification=xmlHelper.read_nodule_property(nodule, 'calcification')
sphericity=xmlHelper.read_nodule_property(nodule, 'sphericity')
margin=xmlHelper.read_nodule_property(nodule, 'margin')
lobulation=xmlHelper.read_nodule_property(nodule, 'lobulation')
spiculation=xmlHelper.read_nodule_property(nodule, 'spiculation')
texture=xmlHelper.read_nodule_property(nodule, 'texture')
malignancy=xmlHelper.read_nodule_property(nodule, 'malignancy')
# save characteristic and specifics of the nodule to the global CSV-file
global path_to_characteristics
with open(path_to_characteristics,"a") as file:
file.write(";".join([str(patient_id),str(session_id),str(radiologist),str(nodule_str),subtlety,internalStructure,calcification,sphericity,margin,lobulation,spiculation,texture,malignancy])+"\n")
# Extract all rois necessary specified within the Nodule
roi_id=0
for roi in nodule.iter('roi'):
create_planarfigures_for_roi(roi, spacing, origin, patient_id, session_id, nodule_str, roi_id)
roi_id=roi_id+1
def create_planarfigures_for_roi(roi, spacing, origin, patient_id, session_id, nodule_id, roi_id):
"""
Given the section of XML that specifies a certain ROI, this function creates a
planar figure file out of it.
The planar figure is saved in a path following this structure:
path_to_planars/<Patient ID>/<Patient_ID>_<Session_ID>_<Nodule_ID>_<ROI_ID>.pf
with the following properties:
* path_to_planars : Globally specified folder
* <Patient ID> : Unique Patient ID consisting of patient number and an appendix
* <Session_ID> : Number of the reading session / expert. Unique to the given patient only.
* <Nodule ID> : An globally unique ID of the given Nodule
* <ROI ID> : A nodule-wide unique, consecutive number of the current ROI.
"""
# All Rois are within a single Z-plane, so the z-position needs only to be obtained once
z_position = roi.find("imageZposition").text
# Create file name and esure that the corresponding folder exists to prevent write errors
target_path = os.path.join(path_to_planars, patient_id)
target_name = os.path.join(target_path, patient_id+"_"+str(session_id)+"_"+str(nodule_id)+"_"+str(roi_id)+".pf")
os.makedirs(target_path, exist_ok=True)
# Convert the given edge information into an XML part describing the planar figure
vertex_string=""
edge_id=0
for edge in roi.iter('edgeMap'):
x=float(edge[0].text)*spacing[0]
y=float(edge[1].text)*spacing[1]
vertex_string=vertex_string+" <Vertex id=\""+str(edge_id)+"\" x=\""+str(x)+"\" y=\""+str(y)+"\" /> \n"
edge_id=edge_id+1
# If less than two points are defined, it is not a complete mesh. This happens
# if the lesion was too small, so the experts didn't draw spacial annotations.
if edge_id < 2:
return None
# Read the template, replace the corresponding structures and
# save the result as a new planar figure
with open(planar_template,"r") as file:
template=file.read()
template=template.replace("%%origin_z%%", str(z_position))
template=template.replace("%%origin_x%%", str(origin[0]))
template=template.replace("%%origin_y%%", str(origin[1]))
template=template.replace("%%points%%", vertex_string)
with open(target_name,"w") as file:
file.write(template)
def convert_planar_figures_to_masks(image, patient_id):
""" Finds all planar figure for a single patient and converts them to segmentations that match the CT of the patient"""
for planar_figure in glob.glob(os.path.join(path_to_planars,patient_id,"*.pf")):
create_mask_for_planar_figure(image, patient_id, planar_figure)
def create_mask_for_planar_figure(image, patient_id, planar_figure):
"""
Create a segmentation file from a planar figure, using the corresponding ct file.
All Mask files are saved in a folder with the structure of
path_to_nrrds/<patient ID>/planar_masks/<Name of the Planar Figure File>.nrrd
"""
# Create the new filename
file_name=os.path.basename(planar_figure)
target_path = os.path.join(path_to_nrrds, patient_id,"planar_masks")
target_name = os.path.join(target_path, file_name.replace(".pf",".nrrd"))
os.makedirs(target_path, exist_ok=True)
cmd_string=r"MitkCLPlanarFigureToNrrd "+\
"-i \"" + image + "\"" \
" -p \"" + planar_figure + "\"" \
" -o \"" + target_name + "\""
#print(cmd_string)
a=subprocess.Popen(cmd_string,shell=True,cwd=path_to_executables)
a.wait()
return target_name
def merge_planar_figures_per_nodule(image, patient_id):
"""
There are two problems associated with the planar figures generated segmentations
that are based on the way that the original data is presented. First, the
segmentations of a each nodes is splitted in multile files, as the corresponding
ROIS as given as slice-wise contours. Second, corresponding annotations
of the same nodule are not identified, as lesions share no common id between
different experts.
This method tries to match segmentations that are from the same rater and combine
them. It also tries to identify multiple segmentations of the same nodule by
different rater, looking at the overlap of segmentations.
It is assumed that two segmentations cover the same nodule, if their segmentations
overlap by more than 10 voxel.
The new created segmentation has the format
path_to_nrrds/<Patient ID>/<Patient_ID>_<Session_ID>_<Nodule_ID>_<True Nodule ID>.nii.gz
with the following properties:
* path_to_planars : Globally specified folder
* <Patient ID> : Unique Patient ID consisting of patient number and an appendix
* <Session_ID> : Number of the reading session / expert. Unique to the given patient only.
* <Nodule ID> : An globally unique ID of the given Nodule
* <True Nodule ID> : A globally minimum unique ID of the nodule. All masks of this nodule should share the same True Nodule ID
"""
# Loading all masks to numpy arrays and save them in a dictionary.
# The keys of the dictionary match the (preliminary) mask id
origin_path = os.path.join(path_to_nrrds, patient_id,"planar_masks","*.nrrd")
images={}
arrays={}
for mask in glob.glob(origin_path):
mask_name=os.path.basename(mask)
mask_limits=mask_name.split("_")
# The first three properties of the file name (Patient ID, Session ID, and Nodule ID)
# identify wheter a given ROI belongs to a certain Nodule. (ROI ID is ignored)
mask_id=mask_limits[0]+"_"+mask_limits[1]+"_"+mask_limits[2]
# If no array with the mask_id is available, create one
if mask_id not in images.keys():
image=sitk.ReadImage(mask)
images[mask_id]=image
array=sitk.GetArrayFromImage(image)
arrays[mask_id]=array
# If already a planar figure belonging to the given nodule exists, add
# the new one to the old one (e.g. merge both segmentations)
else:
image=sitk.ReadImage(mask)
array=sitk.GetArrayFromImage(image)
arrays[mask_id]=arrays[mask_id]+array
for key,idx in zip(images.keys(),range(len(images.keys()))):
# If values larger than 1 are present in a segmentation, there are
# overlaps between two segmentations for this nodule. This should not happen
# but occures due to errors in the original XML files
if len(arrays[key][arrays[key]>1])>1:
write_error("Failed due to wrong segmentations: " + key)
continue
# Identify the smallest global nodule ID for the given nodule.
# It is assumed that two segmentations cover the same nodule if more than
# 10 voxels are covered by both segmentations. The global nodule id is
# the smallest nodule id for each nodule
own_id=int(key.split("_")[2])
minimum_id=own_id
for k2 in arrays.keys():
mask=(arrays[key]*arrays[k2])==1
if len(arrays[key][mask])>10:
new_id=int(k2.split("_")[2])
minimum_id=min(minimum_id, new_id)
#Save the new created segmentation
minimum_id="{:08n}".format(minimum_id)
image=sitk.GetImageFromArray(arrays[key])
image.CopyInformation(images[key])
key_parts=key.split("_")
new_key=key_parts[0]+"_"+key_parts[1]+"_"+key_parts[2]+"_"+str(minimum_id)
sitk.WriteImage(image, os.path.join(path_to_nrrds, patient_id,new_key+".nii.gz"))
def parse_xml_file(file):
# Create an XML Tree, use own method to remove namespaces
root=xmlHelper.create_xml_tree(file)
# Find the Study and Series IDs if possible
study_uid=xmlHelper.get_study_uid(root)
series_uid=xmlHelper.get_series_uid(root)
print(file)
print(study_uid, series_uid)
if study_uid is None:
write_error("Failed to find Study UID: " + file)
return
# Find the DICOMS matching the study and series ID.
# Assuming that all DICOMS to a study/series ID are in one folder.
dicom_path, no_of_dicoms=get_dicom_from_study_uid(study_uid, series_uid)
if no_of_dicoms < 10:
print(dicom_path)
print("No DICOM's found for file:",file)
return
print(dicom_path)
# Files are saved in a folder with the structure $PatientID/$StudyID/$SeriesID/$DicomName
# Removing StudyID, SeriesID and DICOM-Name gives a patient ID
long_patient_id=os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(dicom_path))))
patient_id=long_patient_id.replace("LIDC-IDRI-","")
# For some patients, more than one scan is provided (for example due to multiple
# time points). To ensure that each time point is only scanned one, an appendix
# is added to the patient_id, ensuring that multiple time points can be selected.
for appendix in list_of_appendix:
target_path = os.path.join(path_to_nrrds, patient_id+appendix)
if not os.path.exists(target_path):
patient_id =patient_id+appendix
print(patient_id)
break
# Create Nrrd files from DICOMS and reading spacing and orgiin.
nrrd_file=create_nrrd_from_dicoms(dicom_path, patient_id)
spacing, origin = get_spacing_and_origin(nrrd_file)
# Creating multiple planar figures for each reading session.
# Each session represents the result of an different expert
# Each session gets an session ID that is unique for the given patient ID
# Same session ids for differnt patients do not necessarily correspond to the same expert.
print("Creating Planar Figure")
session_id=0
for session in root.iter('readingSession'):
create_planarfigure_for_session(session, spacing, origin, patient_id, session_id)
session_id=session_id+1
convert_planar_figures_to_masks(nrrd_file, patient_id)
print("Merging Planar Figures")
merge_planar_figures_per_nodule(nrrd_file, patient_id)
nodule_id = 0
for xml_file in glob.glob(os.path.join(path_to_xmls,"*","*.xml")):
# global path_to_characteristics
os.makedirs(os.path.dirname(path_to_characteristics), exist_ok=True)
with open(path_to_characteristics,"a") as file:
file.write(";".join(["Patient_ID","Session_ID","Radiologist","Nodule_Str","subtlety","internalStructure","calcification","sphericity","margin","lobulation","spiculation","texture","malignancy"])+"\n")
print(xml_file)
try:
parse_xml_file(xml_file)
except:
write_error("Unspecific error in file : " + xml_file)
|
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now
# 自定义基类型:
class BaseModel(models.Model):
created_at = models.DateTimeField(verbose_name=_("Created At"), auto_now_add=True)
updated_at = models.DateTimeField(verbose_name=_("Updated At"), auto_now=True)
class Meta:
abstract = True
# 软删除:
class SoftDeleteModel(models.Model):
# soft delete
is_deleted = models.BooleanField(verbose_name=_("Api Is Deleted"), default=False)
deleted_at = models.DateTimeField(verbose_name=_("Deleted At"), default=None, blank=True, null=True)
def soft_delete(self):
self.is_deleted = True
self.deleted_at = now()
self.save()
class Meta:
abstract = True
|
import asyncio
from aiohttp import ClientSession
from aiochclient import ChClient
async def some_query(client: ChClient, offset, limit):
await client.execute(
"INSERT INTO t VALUES", *((i, i / 2) for i in range(offset, offset + limit))
)
async def main():
async with ClientSession() as s:
client = ChClient(s, url="http://localhost:8123")
# preparing database
await client.execute("CREATE TABLE t (a UInt8, b Float32) ENGINE = Memory")
# making queries in parallel
await asyncio.gather(
some_query(client, 1000, 1000),
some_query(client, 2000, 1000),
some_query(client, 3000, 1000),
some_query(client, 4000, 1000),
some_query(client, 5000, 1000),
some_query(client, 6000, 1000),
)
if __name__ == "__main__":
# if >=3.7:
asyncio.run(main())
# if <3.7:
# loop = asyncio.get_event_loop()
# loop.run_until_complete(main())
# loop.close()
|
import numpy as np
import pytest
from rolldecayestimators import tests
import rolldecayestimators
import pandas as pd
import os
from rolldecayestimators import ikeda_speed as ikeda
from numpy import pi, sqrt
from numpy.testing import assert_almost_equal
from matplotlib import pyplot as plt
@pytest.fixture
def lewis_coefficients():
file_path = os.path.join(rolldecayestimators.path,'faust_lewis.csv')
df = pd.read_csv(file_path, sep=';')
yield df
def test_eddy():
N = 21
lpp=100.0
OG=5.5
R=2
d=1
wE=0.2
fi_a=np.deg2rad(3)
xs = np.linspace(0,lpp, N)
B = T = S = np.ones(N)
a, a_1, a_3, sigma_s, H = ikeda.calculate_sectional_lewis(B_s=B, T_s=T, S_s=S)
B_E = ikeda.eddy(bwl=B, a_1=a_1, a_3=a_3, sigma=sigma_s, xs=xs, H0=H, Ts=T, OG=OG, R=R, wE=wE, fi_a=fi_a)
def test_eddy_faust(lewis_coefficients):
"""
Reproduction of Carl-Johans Matlab implementation for Faust.
Parameters
----------
lewis_coefficients
Returns
-------
"""
lc=lewis_coefficients
T = 27.6
wE = 2*pi*1/T # circular frequency
d = 9.5 # Draught of hull [m]
vcg = 14.9 # roll axis (vertical centre of gravity) [m]
OG = -1 * (vcg - d) # distance from roll axis to still water level
fi_a = 10*pi/180 # roll amplitude !!rad??
R = 5 # Bilge Radis
B_E = ikeda.eddy(bwl=lc['bwl'], a_1=lc['a1'], a_3=lc['a3'], sigma=lc['sigma'], xs=lc['x'], H0=lc['H'], Ts=lc['Ts'],
OG=OG, R=R, wE=wE, fi_a=fi_a)
#assert_almost_equal(actual=B_E, desired=1175062.2691943)
ScaleF = 1#/29.565 # Scale Factor [-]
Cb = 0.61 # Block coeff
L = 220*ScaleF # Length
vcg = 14.9*ScaleF # roll axis (vertical centre of gravity) [m]
B = 32.26*ScaleF # Breadth of hull [m]
d = 9.5*ScaleF # Draught of hull [m]
g = 9.81
ra = 1025 # density of water
disp = L*B*d*Cb # Displacement
ND_factorB = sqrt(B / (2 * g)) / (ra * disp * (B**2))
w_hat = np.linspace(0,1,100)
w = sqrt(2) * w_hat / sqrt(B / g)
B_E = ikeda.eddy(bwl=lc['bwl'], a_1=lc['a1'], a_3=lc['a3'], sigma=lc['sigma'], xs=lc['x'], H0=lc['H'], Ts=lc['Ts'],
OG=OG, R=R, wE=w, fi_a=fi_a)
B_E_hat = B_E*ND_factorB
fig,ax=plt.subplots()
ax.plot(w_hat, B_E_hat)
plt.show()
def test_calculate_sectional_lewis():
N=21
B=T=S=np.ones(N)
a, a_1, a_3, sigma_s, H = ikeda.calculate_sectional_lewis(B_s=B, T_s=T, S_s=S)
|
from scipy.special import logsumexp
from scipy.stats import norm
from .data_base import np, gaussian_kde, RegressManager, check_reg_limits
class PointManager(RegressManager):
def get_percentiles(self, mean, var, p):
return np.array([norm.ppf(i, mean, np.sqrt(var)) for i in p]).T
@check_reg_limits
def deregularize_var(self, var):
return var/((2/np.diff(self.reg_limits))**2)
def get_pdf(self, mean, var, x):
return np.array([norm.pdf(j, mean, np.sqrt(var)) for j in x]).T
def get_cdf(self, mean, var, x):
return np.array([norm.cdf(j, mean, np.sqrt(var)) for j in x]).T
def reweight_by_prior(self, mean, var, N, prior_dist):
prior_range = (min(prior_dist), max(prior_dist))
self.bin_limits = prior_range
self.nbins = N
Y = self.get_pdf(mean, var, self.get_bin_centers()).T
if len(Y.shape) == 1:
Y = np.expand_dims(Y, 0)
kde = gaussian_kde(prior_dist, bw_method=0.3)
kde_pdf = kde(self.get_bin_centers())
Y /= kde_pdf[np.newaxis, :]
Y /= np.sum(Y, axis=-1)[:, np.newaxis]
if Y.shape[0] == 1:
Y = Y[0, ...]
return Y
class PointDropoutManager(PointManager):
def get_mean(self, Y):
return self.deregularize(np.mean(Y, axis=-1))
def get_variance(self, Y, tau):
return self.deregularize_var(np.var(Y, axis=-1) + 1./(tau))
def get_ll(self, Y_hat, Y, tau):
ll = logsumexp(-0.5*tau*(Y_hat - Y.reshape(-1, 1))**2, axis=1) - \
np.log(Y_hat.shape[1]) - 0.5*np.log(2*np.pi) + 0.5 * np.log(tau)
return np.mean(ll)
|
#App config
#webx
#central config
CENTRAL_CONFIG = 'c:/mtp/avl_dc/bli_monitor/conf/maboss_config.py'
#windows service
SERVICE_NAME = '_MaboTech_Monitor_HotTest'
SERVICE_DESC = 'HotTest Data Collection'
|
from sklearn.ensemble import RandomForestRegressor
import goal_metrics
from mle import engine
from mle.problem_type import ProblemType
def main():
model_config = {
'problem_type': ProblemType.REGRESSION,
'model_class': RandomForestRegressor,
'train_data_path': 'example_data/train.csv',
'id_column': 'Id',
'target_column': 'SalePrice',
'goal_metric': goal_metrics.ROOT_MEAN_SQUARED_LOG_ERROR
}
engine.execute(model_config)
if __name__ == '__main__':
main()
|
from six import add_metaclass
from abc import ABCMeta
from abc import abstractmethod
@add_metaclass(ABCMeta)
class AbstractAdditionalInput(object):
""" Represents a possible additional independent input for a model
"""
@abstractmethod
def get_n_parameters(self):
""" Get the number of parameters for the additional input
:return: The number of parameters
:rtype: int
"""
@abstractmethod
def get_parameters(self):
""" Get the parameters for the additional input
:return: An array of parameters
:rtype: array of\
:py:class:`spynnaker.pyNN.models.neural_properties.neural_parameter.NeuronParameter`
"""
@abstractmethod
def get_n_cpu_cycles_per_neuron(self):
""" Get the number of CPU cycles executed by\
additional_input_get_input_value_as_current and\
additional_input_has_spiked
"""
def get_sdram_usage_per_neuron_in_bytes(self):
""" Get the SDRAM usage of this additional input in bytes
:return: The SDRAM usage
:rtype: int
"""
return self.get_n_parameters() * 4
def get_dtcm_usage_per_neuron_in_bytes(self):
""" Get the DTCM usage of this additional input in bytes
:return: The DTCM usage
:rtype: int
"""
return self.get_n_parameters() * 4
|
from django.conf import settings
from model_utils import Choices
from assopy.models import Vat, VatFare
from conference.models import FARE_TICKET_TYPES, Conference, Fare
# due to historical reasons this one is basically hardcoded in various places.
SOCIAL_EVENT_FARE_CODE = "VOUPE03"
SIM_CARD_FARE_CODE = "SIM1"
FARE_CODE_TYPES = Choices(
("E", "EARLY_BIRD", "Early Bird"),
("R", "REGULAR", "Regular"),
("D", "ON_DESK", "On Desk"),
)
FARE_CODE_VARIANTS = Choices(
("S", "STANDARD", "Standard"),
("L", "LIGHT", "Standard Light (no trainings)"),
("T", "TRAINING", "Trainings (ep2018+)"),
("C", "COMBINED", "Combined (ep2019+)"),
("D", "DAYPASS", "Day Pass"),
)
FARE_CODE_GROUPS = Choices(
("S", "STUDENT", "Student"),
("P", "PERSONAL", "Personal"),
("C", "COMPANY", "Company"),
)
FARE_CODE_REGEXES = {
"types": {
FARE_CODE_TYPES.EARLY_BIRD: "^TE..$",
FARE_CODE_TYPES.REGULAR: "^TR..$",
FARE_CODE_TYPES.ON_DESK: "^TD..$",
},
"variants": {
FARE_CODE_VARIANTS.STANDARD: "^T.S.$",
FARE_CODE_VARIANTS.LIGHT: "^T.L.$",
FARE_CODE_VARIANTS.TRAINING: "^T.T.$",
FARE_CODE_VARIANTS.COMBINED: "^T.C.$",
FARE_CODE_VARIANTS.DAYPASS: "^T.D.$",
},
"groups": {
FARE_CODE_GROUPS.STUDENT: "^T..S$",
FARE_CODE_GROUPS.PERSONAL: "^T..P$",
FARE_CODE_GROUPS.COMPANY: "^T..C$",
}
}
class FareIsNotAvailable(Exception):
pass
def all_possible_fare_codes():
fare_codes = {
"T" + type_code + variant_code + group_code:
"%s %s %s" % (type_name, variant_name, group_name)
for type_code, type_name in FARE_CODE_TYPES._doubles
for variant_code, variant_name in FARE_CODE_VARIANTS._doubles
for group_code, group_name in FARE_CODE_GROUPS._doubles
}
fare_codes[SOCIAL_EVENT_FARE_CODE] = "Social Event"
fare_codes[SIM_CARD_FARE_CODE] = "Sim Card"
return fare_codes
ALL_POSSIBLE_FARE_CODES = all_possible_fare_codes()
def is_fare_code_valid(fare_code):
return fare_code in ALL_POSSIBLE_FARE_CODES
def get_available_fares(date):
"""
Returns all fares that where available during a given point in time,
regardless of whether they were sold out or not.
"""
return Fare.objects.filter(
start_validity__lte=date,
end_validity__gte=date,
)
def get_available_fares_as_dict(date):
return {f.code: f for f in get_available_fares(date)}
def get_prices_of_available_fares(date):
codes_with_prices = get_available_fares(date).values_list('code', 'price')
return {f[0]: f[1] for f in codes_with_prices}
def create_fare_for_conference(code, conference, price,
start_validity, end_validity,
vat_rate):
assert is_fare_code_valid(code)
assert isinstance(conference, str), "conference should be a string"
assert isinstance(vat_rate, Vat)
if start_validity is not None and end_validity is not None:
assert start_validity <= end_validity
if code == SOCIAL_EVENT_FARE_CODE:
ticket_type = FARE_TICKET_TYPES.event
elif code == SIM_CARD_FARE_CODE:
ticket_type = FARE_TICKET_TYPES.other
else:
ticket_type = FARE_TICKET_TYPES.conference
# This is inefficient, we should pass Conference object as argument instead
# of name.
conference, _ = Conference.objects.get_or_create(
code=conference,
)
if not conference.name:
conference.name = settings.CONFERENCE_NAME
conference.save()
recipient_type = code[3].lower() # same as lowercase last letter of code
name = "%s - %s" % (conference.name, ALL_POSSIBLE_FARE_CODES[code])
fare, _ = Fare.objects.get_or_create(
conference=conference.code,
code=code,
name=name,
defaults=dict(
description=name,
price=price,
recipient_type=recipient_type,
ticket_type=ticket_type,
start_validity=start_validity,
end_validity=end_validity,
)
)
VatFare.objects.get_or_create(fare=fare, vat=vat_rate)
return fare
def pre_create_typical_fares_for_conference(conference, vat_rate,
print_output=False):
fares = []
for fare_code in ALL_POSSIBLE_FARE_CODES.keys():
fare = create_fare_for_conference(
code=fare_code,
conference=conference,
price=210, # random price, we'll change it later (div. by 3)
start_validity=None, end_validity=None,
vat_rate=vat_rate,
)
if print_output:
print("Created fare %s" % fare)
fares.append(fare)
return fares
def set_other_fares_dates(conference, start_date, end_date):
assert start_date <= end_date
other_fares = Fare.objects.filter(
conference=conference,
code__in=[SOCIAL_EVENT_FARE_CODE, SIM_CARD_FARE_CODE],
)
other_fares.update(start_validity=start_date, end_validity=end_date)
def set_early_bird_fare_dates(conference, start_date, end_date):
assert start_date <= end_date
early_birds = Fare.objects.filter(
conference=conference,
code__regex=FARE_CODE_REGEXES["types"][FARE_CODE_TYPES.EARLY_BIRD],
)
assert (
early_birds.count()
== len(FARE_CODE_VARIANTS) * len(FARE_CODE_GROUPS)
== 3 * 5
)
early_birds.update(start_validity=start_date, end_validity=end_date)
def set_regular_fare_dates(conference, start_date, end_date):
assert start_date <= end_date
fares = Fare.objects.filter(
conference=conference,
code__regex=FARE_CODE_REGEXES['types'][FARE_CODE_TYPES.REGULAR]
)
assert (
fares.count()
== len(FARE_CODE_VARIANTS) * len(FARE_CODE_GROUPS)
== 3 * 5
)
fares.update(start_validity=start_date, end_validity=end_date)
|
"""The entrypoint into the CLI for tfimgsort"""
from .tfimgsort import main
main()
|
"""
The regex blocking method is from jieba.
https://github.com/fxsjy/jieba/blob/master/jieba/__init__.py
Algorithm wise, use dp with uni-gram probability.
"""
from collections import defaultdict, deque
import math
import time
import re
re_dict = re.compile('^(.+?)( [0-9]+)?( [a-z]+)?$', re.U)
re_han = re.compile("([\u4E00-\u9FD5]+)", re.U)
re_skip = re.compile("[^a-zA-Z0-9+#\n]", re.U)
class Lexicon():
def __init__(self, dict_path):
"""
Init lexicon with dict path.
Format is 'word freq pos' with space separated. Note that we don't handle pos so far.
:param dict_path:
"""
self.total = 0
self.dict = {}
with open(dict_path, 'r', encoding='utf-8')as f:
for line in f:
word, freq, tag = re_dict.match(line).groups()
if freq is not None:
freq = freq.strip()
# give a minimal 1 count for rare words without freq as smoothing
freq = max(int(freq), 1)
self.dict[word] = freq
self.total += freq
# prefix but not yet a word will be 0
# mimic of prefix check of trie for acceleration
for i in range(len(word)):
sub_word = word[:i + 1]
if sub_word not in self.dict:
self.dict[sub_word] = 0
def check_prob(self, word):
"""
Return prob in neg log format.
:param word:
:return: 0 for prefix, neg log for word. Otherwise None
"""
if word in self.dict:
freq = self.dict[word]
if freq is not 0:
return -math.log(freq/self.total)
else:
return 0
else:
return None
def has_prefix(self, word):
return word in self.dict
def is_word(self, word):
return word in self.dict and self.dict[word] != 0
class Decoder():
def __init__(self):
# model will provide probability
self.lexicon = Lexicon('user_dict.txt')
def decode(self, input):
"""
decode the input sentence.
This method cut input sentence into blocks first with non-chinese symbols as natural boundary.
It is vital for speed up. In local experiment, 50x faster.
:param input:
:return:
"""
blocks = re_han.split(input)
for block in blocks:
if not block:
continue
if re_han.match(block):
for word in self.decode_(block):
yield word
else:
if block == '':
continue
else:
matched = False
tmp = re_skip.split(block)
for x in tmp:
if re_skip.match(x):
matched = True
yield x
if not matched:
yield block
def decode_(self, input):
"""
use dp to find best path.
This method decode with backward lookup. Notice that forward lookup is also a choice.
:param input: The raw input sequence
:return: Best path as list of words
"""
# build frames
# frame is backward lookup with start_idx to key as valid word
frames = defaultdict(list)
input_size = len(input)
for s in range(input_size):
e = s + 1
while self.lexicon.has_prefix(input[s:e]) and e <= input_size:
if self.lexicon.is_word(input[s:e]):
frames[e].append(s)
e += 1
# in case of oov symbols, segment to char
if s not in frames:
frames[s] = [(s-1, 0)]
# decode best path with simple dp from start
best_path = {}
best_path[0] = (0, 0)
for i in range(1, input_size + 1):
for s in frames[i]:
word = input[s:i]
prob = self.lexicon.check_prob(word)
neg_log = prob + best_path[s][1]
if i not in best_path or neg_log < best_path[i][1]:
best_path[i] = (s, neg_log)
# parse results
result = deque()
idx = input_size
while idx > 0:
s = best_path[idx][0]
result.appendleft(input[s:idx])
idx = s
for x in result:
yield x
if __name__ == "__main__":
decoder = Decoder()
start_time = time.time()
result = decoder.decode('结婚的和尚未结婚的,都是很nice cool的“靠谱人士”')
end_time = time.time()
print(' '.join(result))
print('{} s'.format(end_time - start_time))
|
import numpy as np
import ini
import diagn
import h5py
from os.path import join as pjoin
params = ini.parse(open('input.ini').read())
####### Parameters #######
# box size, mm
lx = float(params['grid']['lx'])
ly = float(params['grid']['ly'])
# intervals in x-, y- directions, mm
dx = float(params['grid']['dx'])
dy = float(params['grid']['dy'])
# Thermal diffusivity of steel, mm2.s-1
D = float(params['par']['D'])
# Number of timesteps
nsteps = int(params['time']['nsteps'])
dnn_start = int(params['time']['dnn_start'])
nn = int(params['dnn']['nn'])
epochs = int(params['dnn']['epochs'])
patience = int(params['dnn']['patience'])
batch_size=int(params['dnn']['batch_size'])
nlayer = int(params['dnn']['nlayer'])
plot_fig=bool(params['figures']['plot_fig'])
use_latex=bool(params['figures']['use_latex'])
add_labels=bool(params['figures']['add_labels'])
dumpData = bool(params['diagnostics']['dumpData'])
# vtkData = bool(params['diagnostics']['vtkData'])
nx, ny = int(lx/dx), int(ly/dy)
if dumpData:
f = h5py.File(pjoin("data","data.hdf5"),"w")
diagn.attributes(f,lx,ly,nx,ny,nsteps)
nbx = int(nx/2)
slice = int(ny/2)
dx2, dy2 = dx*dx, dy*dy
x = np.linspace(0,lx,nx)
y = np.linspace(0,ly,ny)
dt = dx2 * dy2 / (2 * D * (dx2 + dy2))
|
from getratings.models.ratings import Ratings
class NA_Jhin_Top_Aatrox(Ratings):
pass
class NA_Jhin_Top_Ahri(Ratings):
pass
class NA_Jhin_Top_Akali(Ratings):
pass
class NA_Jhin_Top_Alistar(Ratings):
pass
class NA_Jhin_Top_Amumu(Ratings):
pass
class NA_Jhin_Top_Anivia(Ratings):
pass
class NA_Jhin_Top_Annie(Ratings):
pass
class NA_Jhin_Top_Ashe(Ratings):
pass
class NA_Jhin_Top_AurelionSol(Ratings):
pass
class NA_Jhin_Top_Azir(Ratings):
pass
class NA_Jhin_Top_Bard(Ratings):
pass
class NA_Jhin_Top_Blitzcrank(Ratings):
pass
class NA_Jhin_Top_Brand(Ratings):
pass
class NA_Jhin_Top_Braum(Ratings):
pass
class NA_Jhin_Top_Caitlyn(Ratings):
pass
class NA_Jhin_Top_Camille(Ratings):
pass
class NA_Jhin_Top_Cassiopeia(Ratings):
pass
class NA_Jhin_Top_Chogath(Ratings):
pass
class NA_Jhin_Top_Corki(Ratings):
pass
class NA_Jhin_Top_Darius(Ratings):
pass
class NA_Jhin_Top_Diana(Ratings):
pass
class NA_Jhin_Top_Draven(Ratings):
pass
class NA_Jhin_Top_DrMundo(Ratings):
pass
class NA_Jhin_Top_Ekko(Ratings):
pass
class NA_Jhin_Top_Elise(Ratings):
pass
class NA_Jhin_Top_Evelynn(Ratings):
pass
class NA_Jhin_Top_Ezreal(Ratings):
pass
class NA_Jhin_Top_Fiddlesticks(Ratings):
pass
class NA_Jhin_Top_Fiora(Ratings):
pass
class NA_Jhin_Top_Fizz(Ratings):
pass
class NA_Jhin_Top_Galio(Ratings):
pass
class NA_Jhin_Top_Gangplank(Ratings):
pass
class NA_Jhin_Top_Garen(Ratings):
pass
class NA_Jhin_Top_Gnar(Ratings):
pass
class NA_Jhin_Top_Gragas(Ratings):
pass
class NA_Jhin_Top_Graves(Ratings):
pass
class NA_Jhin_Top_Hecarim(Ratings):
pass
class NA_Jhin_Top_Heimerdinger(Ratings):
pass
class NA_Jhin_Top_Illaoi(Ratings):
pass
class NA_Jhin_Top_Irelia(Ratings):
pass
class NA_Jhin_Top_Ivern(Ratings):
pass
class NA_Jhin_Top_Janna(Ratings):
pass
class NA_Jhin_Top_JarvanIV(Ratings):
pass
class NA_Jhin_Top_Jax(Ratings):
pass
class NA_Jhin_Top_Jayce(Ratings):
pass
class NA_Jhin_Top_Jhin(Ratings):
pass
class NA_Jhin_Top_Jinx(Ratings):
pass
class NA_Jhin_Top_Kalista(Ratings):
pass
class NA_Jhin_Top_Karma(Ratings):
pass
class NA_Jhin_Top_Karthus(Ratings):
pass
class NA_Jhin_Top_Kassadin(Ratings):
pass
class NA_Jhin_Top_Katarina(Ratings):
pass
class NA_Jhin_Top_Kayle(Ratings):
pass
class NA_Jhin_Top_Kayn(Ratings):
pass
class NA_Jhin_Top_Kennen(Ratings):
pass
class NA_Jhin_Top_Khazix(Ratings):
pass
class NA_Jhin_Top_Kindred(Ratings):
pass
class NA_Jhin_Top_Kled(Ratings):
pass
class NA_Jhin_Top_KogMaw(Ratings):
pass
class NA_Jhin_Top_Leblanc(Ratings):
pass
class NA_Jhin_Top_LeeSin(Ratings):
pass
class NA_Jhin_Top_Leona(Ratings):
pass
class NA_Jhin_Top_Lissandra(Ratings):
pass
class NA_Jhin_Top_Lucian(Ratings):
pass
class NA_Jhin_Top_Lulu(Ratings):
pass
class NA_Jhin_Top_Lux(Ratings):
pass
class NA_Jhin_Top_Malphite(Ratings):
pass
class NA_Jhin_Top_Malzahar(Ratings):
pass
class NA_Jhin_Top_Maokai(Ratings):
pass
class NA_Jhin_Top_MasterYi(Ratings):
pass
class NA_Jhin_Top_MissFortune(Ratings):
pass
class NA_Jhin_Top_MonkeyKing(Ratings):
pass
class NA_Jhin_Top_Mordekaiser(Ratings):
pass
class NA_Jhin_Top_Morgana(Ratings):
pass
class NA_Jhin_Top_Nami(Ratings):
pass
class NA_Jhin_Top_Nasus(Ratings):
pass
class NA_Jhin_Top_Nautilus(Ratings):
pass
class NA_Jhin_Top_Nidalee(Ratings):
pass
class NA_Jhin_Top_Nocturne(Ratings):
pass
class NA_Jhin_Top_Nunu(Ratings):
pass
class NA_Jhin_Top_Olaf(Ratings):
pass
class NA_Jhin_Top_Orianna(Ratings):
pass
class NA_Jhin_Top_Ornn(Ratings):
pass
class NA_Jhin_Top_Pantheon(Ratings):
pass
class NA_Jhin_Top_Poppy(Ratings):
pass
class NA_Jhin_Top_Quinn(Ratings):
pass
class NA_Jhin_Top_Rakan(Ratings):
pass
class NA_Jhin_Top_Rammus(Ratings):
pass
class NA_Jhin_Top_RekSai(Ratings):
pass
class NA_Jhin_Top_Renekton(Ratings):
pass
class NA_Jhin_Top_Rengar(Ratings):
pass
class NA_Jhin_Top_Riven(Ratings):
pass
class NA_Jhin_Top_Rumble(Ratings):
pass
class NA_Jhin_Top_Ryze(Ratings):
pass
class NA_Jhin_Top_Sejuani(Ratings):
pass
class NA_Jhin_Top_Shaco(Ratings):
pass
class NA_Jhin_Top_Shen(Ratings):
pass
class NA_Jhin_Top_Shyvana(Ratings):
pass
class NA_Jhin_Top_Singed(Ratings):
pass
class NA_Jhin_Top_Sion(Ratings):
pass
class NA_Jhin_Top_Sivir(Ratings):
pass
class NA_Jhin_Top_Skarner(Ratings):
pass
class NA_Jhin_Top_Sona(Ratings):
pass
class NA_Jhin_Top_Soraka(Ratings):
pass
class NA_Jhin_Top_Swain(Ratings):
pass
class NA_Jhin_Top_Syndra(Ratings):
pass
class NA_Jhin_Top_TahmKench(Ratings):
pass
class NA_Jhin_Top_Taliyah(Ratings):
pass
class NA_Jhin_Top_Talon(Ratings):
pass
class NA_Jhin_Top_Taric(Ratings):
pass
class NA_Jhin_Top_Teemo(Ratings):
pass
class NA_Jhin_Top_Thresh(Ratings):
pass
class NA_Jhin_Top_Tristana(Ratings):
pass
class NA_Jhin_Top_Trundle(Ratings):
pass
class NA_Jhin_Top_Tryndamere(Ratings):
pass
class NA_Jhin_Top_TwistedFate(Ratings):
pass
class NA_Jhin_Top_Twitch(Ratings):
pass
class NA_Jhin_Top_Udyr(Ratings):
pass
class NA_Jhin_Top_Urgot(Ratings):
pass
class NA_Jhin_Top_Varus(Ratings):
pass
class NA_Jhin_Top_Vayne(Ratings):
pass
class NA_Jhin_Top_Veigar(Ratings):
pass
class NA_Jhin_Top_Velkoz(Ratings):
pass
class NA_Jhin_Top_Vi(Ratings):
pass
class NA_Jhin_Top_Viktor(Ratings):
pass
class NA_Jhin_Top_Vladimir(Ratings):
pass
class NA_Jhin_Top_Volibear(Ratings):
pass
class NA_Jhin_Top_Warwick(Ratings):
pass
class NA_Jhin_Top_Xayah(Ratings):
pass
class NA_Jhin_Top_Xerath(Ratings):
pass
class NA_Jhin_Top_XinZhao(Ratings):
pass
class NA_Jhin_Top_Yasuo(Ratings):
pass
class NA_Jhin_Top_Yorick(Ratings):
pass
class NA_Jhin_Top_Zac(Ratings):
pass
class NA_Jhin_Top_Zed(Ratings):
pass
class NA_Jhin_Top_Ziggs(Ratings):
pass
class NA_Jhin_Top_Zilean(Ratings):
pass
class NA_Jhin_Top_Zyra(Ratings):
pass
|
import os
import sys
import cv2
import numpy as np
# Download and install the Python COCO tools from https://github.com/waleedka/coco
# That's a fork from the original https://github.com/pdollar/coco with a bug
# fix for Python 3.
# If the PR is merged then use the original repo.
# Note: Edit PythonAPI/Makefile and replace "python" with "python3".
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools import mask as maskUtils
from skimage import io
from matplotlib import pyplot as plt
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
dataset_dir = "/media/hzh/work/workspace/mmdetection/data/coco"
subset = "train"
year = "2017"
coco = COCO("{}/annotations/instances_{}{}.json".format(dataset_dir, subset, year))
catIds = coco.getCatIds(catNms=['building'])
imgIds = coco.getImgIds(catIds=catIds)
# imgIds = coco.getImgIds(imgIds=[300784])
img = coco.loadImgs(imgIds[np.random.randint(0, len(imgIds))])[0]
I = io.imread(img['coco_url'])
plt.axis('off')
plt.imshow(I)
plt.show()
io.imsave(os.path.join(dataset_dir, img['file_name']), I)
bg = np.zeros((img['height'], img['width'], 3))
plt.imshow(bg)
plt.axis('off')
annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds, iscrowd=None)
anns = coco.loadAnns(annIds)
print(img['id'])
coco.showAnns(anns)
plt.show()
|
import os
# key of locking gpu
GPU_LOCKING_SET = "gpu_locking_set"
# gpu usage for dispach
GPU_USAGE_THRESHOLD = float(os.environ.get("GPU_USAGE_THRESHOLD", 0.8))
# gpu lock time
GPU_LOCK_MINUTES = int(os.environ.get("GPU_LOCK_MINUTES", 3))
|
from uqcsbot import bot, Command
from uqcsbot.utils.command_utils import loading_status
from typing import Tuple, Dict, List
import feedparser
ARTICLES_TO_POST = 5
RSS_URL = "http://feeds.feedburner.com/TechCrunch/"
TECHCRUNCH_URL = "https://techcrunch.com"
def get_tech_crunch_data() -> List[Dict[str, str]]:
"""
Returns data from TechCrunch RSS feed
"""
data = feedparser.parse(RSS_URL)
if data.status != 200:
return None
return data.entries
def get_data_from_article(news: List[Dict[str, str]], index: int) -> Tuple[str, str]:
"""
Returns the title of the article and the link
Tuple returned: (title, url)
"""
return (news[index]['title'], news[index]['link'])
@bot.on_command("techcrunch")
@loading_status
def handle_news(command: Command) -> None:
"""
Prints the 5 top-most articles in the Latest News Section of TechCrunch
using RSS feed
"""
message = f"*Latest News from <{TECHCRUNCH_URL}|_TechCrunch_> :techcrunch:*\n"
news = get_tech_crunch_data()
if news is None:
bot.post_message(command.channel_id, "There was an error accessing "
"TechCrunch RSS feed")
return
for i in range(ARTICLES_TO_POST):
title, url = get_data_from_article(news, i)
# Formats message a clickable headline which links to the article
# These articles are also now bullet pointed
message += f"• <{url}|{title}>\n"
# Additional parameters ensure that the links don't show as big articles
# underneath the input
bot.post_message(command.channel_id, message, unfurl_links=False, unfurl_media=False)
|
#!/usr/bin/env python3
import sys
import os
from importlib.machinery import SourceFileLoader
from scapy.all import *
from scapy.contrib.geneve import GENEVE
def hexstring(p):
s = bytes(p.__class__(p))
return ",".join("0x{:02x}".format(c) for c in s)
def output_test(filename, tests):
(name, ext) = os.path.basename(filename).split(".")
print('/* DO NOT EDIT: automatically generated by test_genpackets.py */')
print('/* clang-format off */')
print('test_t tests_{}[] = {{'.format(name))
for t in tests:
print(' {')
print(' .name = "{}",'.format(t[0]))
print(' .nsend = {},'.format(len(t[1])))
print(' .send = (char []){{{}}},'.format(hexstring(t[1])))
print(' .nexpect = {},'.format(len(t[2])))
print(' .expect = (char []){{{}}},'.format(hexstring(t[2])))
print(' .expect_next_index = {}'.format(t[3]))
print(' },')
print('};')
print('/* clang-format on */')
# Read tests from file
for filename in sys.argv[1:]:
with open(filename) as f:
content = f.read().replace('\n', '')
tests = eval(content)
output_test(filename, tests)
|
import rtree
import pandas as pd
from scipy.optimize import linear_sum_assignment
import numpy as np
def create_rtree_from_polygon(coords_list):
"""
Args:
coords_list: tuple with coordinates of a polygon
Returns:
index: indexed bounding boxes
"""
index = rtree.index.Index(interleaved=True)
for coords_idx, coords in enumerate(coords_list):
index.insert(coords_idx, coords)
return index
def get_inter_area(box_a, box_b):
"""
Args:
box_a: (xmin, ymin, xmax, ymax)
box_b: (xmin, ymin, xmax, ymax)
Returns:
intersection_area: intersection area between two points
"""
x_a = max(box_a[0], box_b[0])
y_a = max(box_a[1], box_b[1])
x_b = min(box_a[2], box_b[2])
y_b = min(box_a[3], box_b[3])
intersection_area = max(0, x_b - x_a) * max(0, y_b - y_a)
return intersection_area
def _iou_(bbox_a, bbox_b):
"""Intersection over union"""
# compute the area of both the prediction and ground truth rectangles
box_A_area = abs((bbox_a[2] - bbox_a[0]) * (bbox_a[3] - bbox_a[1]))
box_B_area = abs((bbox_b[2] - bbox_b[0]) * (bbox_b[3] - bbox_b[1]))
inter_area = get_inter_area(bbox_a, bbox_b)
union = float(box_A_area + box_B_area - inter_area)
return inter_area / union
def _overlap_(test_poly, true_polys, rtree_index):
"""Calculate overlap between one polygon and all ground truth by area
Args:
test_poly: pd.Series
true_polys: pd.DataFrame
rtree_index: index of bounding boxes
Returns:
results: pd.DataFrame
"""
prediction_id = []
truth_id = []
area = []
matched_list = list(rtree_index.intersection(test_poly['bbox']))
for index, true_row in true_polys.iterrows():
if index in matched_list:
intersection_area = get_inter_area(true_row['bbox'], test_poly['bbox'])
else:
intersection_area = 0
prediction_id.append(test_poly['prediction_id'])
truth_id.append(true_row['truth_id'])
area.append(intersection_area)
results = pd.DataFrame({
'prediction_id': prediction_id,
'truth_id': truth_id,
'area': area
})
return results
def _overlap_all_(pred_polys, true_polys, rtree_index):
"""Find area of overlap among all sets of ground truth and prediction"""
results = []
for i, row in pred_polys.iterrows():
result = _overlap_(test_poly=row,
true_polys=true_polys,
rtree_index=rtree_index)
results.append(result)
results = pd.concat(results, ignore_index=True)
return results
def calculate_iou(predictions, ground_truth):
"""Intersection over union"""
pred_polys = predictions.copy()
true_polys = ground_truth.copy()
true_polys['bbox'] = true_polys.apply(lambda x: (x.xmin, x.ymin, x.xmax, x.ymax), axis=1)
pred_polys['bbox'] = pred_polys.apply(lambda x: (x.xmin, x.ymin, x.xmax, x.ymax), axis=1)
# Create index columns
true_polys['truth_id'] = true_polys.index.values
pred_polys['prediction_id'] = pred_polys.index.values
rtree_index = create_rtree_from_polygon(true_polys['bbox'])
# find overlap among all sets
overlap_df = _overlap_all_(pred_polys, true_polys, rtree_index)
# Create cost matrix for assignment
matrix = overlap_df.pivot(index='truth_id', columns='prediction_id', values='area').values
row_ind, col_ind = linear_sum_assignment(matrix, maximize=True)
# Create IoU dataframe, match those predictions and ground truth, IoU = 0 for all others, they will get filtered out
iou_df = []
for index, row in true_polys.iterrows():
y_true = true_polys.loc[index]
if index in row_ind:
matched_id = col_ind[np.where(index == row_ind)[0][0]]
y_pred = pred_polys[pred_polys['prediction_id'] == matched_id].loc[matched_id]
iou = _iou_(y_pred['bbox'], y_true['bbox'])
pred_bbox = y_pred['bbox']
else:
iou = 0
matched_id = None
pred_bbox = None
iou_df.append(pd.DataFrame({
'prediction_id': [matched_id],
'truth_id': [index],
'IoU': iou,
'prediction_bbox': [pred_bbox],
'truth_bbox': [y_true['bbox']]
}))
iou_df = pd.concat(iou_df)
iou_df = iou_df.merge(true_polys['truth_id'])
return iou_df
def find_false_positives(predictions, ground_truth, threshold=0.5):
iou_results = calculate_iou(predictions, ground_truth)
iou_results = iou_results.dropna() # drop the rows for labels with no prediction
filt = iou_results['IoU'] <= threshold
fp_iou = iou_results[filt]
fp_predictions_df = predictions.iloc[fp_iou['prediction_id']]
return fp_predictions_df
|
# -*- coding: utf-8 -*-
from django.conf.urls import url, include
from djanban.apps.visitors.views.main import view_list, new, edit, delete
urlpatterns = [
url(r'^$', view_list, name="view_list"),
url(r'^new$', new, name="new"),
url(r'^(?P<visitor_id>\d+)/edit$', edit, name="edit"),
url(r'^(?P<visitor_id>\d+)/delete', delete, name="delete"),
]
|
# --------------------------------------------------------
# (c) Copyright 2014, 2020 by Jason DeLaat.
# Licensed under BSD 3-clause licence.
# --------------------------------------------------------
import unittest
import common_tests
import pymonad
from pymonad.state import _State
class EqState(pymonad.monad.MonadAlias, _State):
def __eq__(self, other):
try:
return self.run(0) == other.run(0)
except:
return self.run(0) == other
class StateTests(unittest.TestCase):
def test_insert(self):
self.assertEqual(
EqState.insert(2),
(2, 0)
)
class StateFunctor(common_tests.FunctorTests, unittest.TestCase):
def setUp(self):
self._class = EqState
class StateApplicative(common_tests.ApplicativeTests, unittest.TestCase):
def setUp(self):
self._class = EqState
class StateMonad(common_tests.MonadTests, unittest.TestCase):
def setUp(self):
self._class = EqState
class StateThen(common_tests.ThenTests, unittest.TestCase):
def setUp(self):
self._class = EqState
|
#!/usr/bin/env python3
# Copyright (c) 2015-2019 Daniel Kraft
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# RPC test for name_pending call.
from test_framework.names import NameTestFramework, val
from test_framework.util import *
class NamePendingTest (NameTestFramework):
def set_test_params (self):
self.setup_name_test ([[]] * 2)
def run_test (self):
node = self.nodes[0]
# Register a name that can then be update'd in the mempool.
node.name_register ("x/a", val ("old-value-a"))
node.generate (1)
# Start a new name registration as well.
txb = node.name_register ("x/b", val ("value-b"))
# Perform the unconfirmed updates. Include a currency transaction
# to check that it is not shown.
txa = node.name_update ("x/a", val ("value-a"))
addrOther = self.nodes[1].getnewaddress ()
node.sendtoaddress (addrOther, 1)
# Check that name_show still returns the old value.
self.checkName (0, "x/a", val ("old-value-a"))
# Check sizes of mempool against name_pending.
mempool = node.getrawmempool ()
assert_equal (len (mempool), 3)
pending = node.name_pending ()
assert_equal (len (pending), 2)
# Check result of full name_pending (called above).
for op in pending:
assert op['txid'] in mempool
if op['name'] == 'x/a':
assert_equal (op['op'], 'name_update')
assert_equal (op['value'], val ('value-a'))
assert_equal (op['txid'], txa)
elif op['name'] == 'x/b':
assert_equal (op['op'], 'name_register')
assert_equal (op['value'], val ('value-b'))
assert_equal (op['txid'], txb)
else:
assert False
# Check name_pending with name filter that does not match any name.
pending = node.name_pending ('x/does not exist')
assert_equal (pending, [])
# Check name_pending with name filter.
self.checkPendingName (0, 'x/a', 'name_update', val ('value-a'), txa)
# We don't know the golden value for vout, as this is randomised. But we
# can store the output now and then verify it with name_show after the
# update has been mined.
pending = node.name_pending ('x/a')
assert_equal (len (pending), 1)
pending = pending[0]
assert 'vout' in pending
# Mine a block and check that all mempool is cleared.
node.generate (1)
assert_equal (node.getrawmempool (), [])
assert_equal (node.name_pending (), [])
# Verify vout from before against name_show.
confirmed = node.name_show ('x/a')
assert_equal (pending['vout'], confirmed['vout'])
# Send a name and check that ismine is handled correctly.
tx = node.name_update ('x/a', val ('sent-a'), {"destAddress": addrOther})
self.sync_mempools ()
self.checkPendingName (0, 'x/a', 'name_update', val ('sent-a'), tx, False)
self.checkPendingName (1, 'x/a', 'name_update', val ('sent-a'), tx, True)
def checkPendingName (self, ind, name, op, value, txid, mine=None):
"""
Call name_pending on a given name and check that the result
matches the expected values.
"""
res = self.nodes[ind].name_pending (name)
assert_equal (len (res), 1)
obj = res[0]
assert_equal (obj['op'], op)
assert_equal (obj['name'], name)
assert_equal (obj['value'], value)
assert_equal (obj['txid'], txid)
assert isinstance (obj['ismine'], bool)
if mine is not None:
assert_equal (obj['ismine'], mine)
# There is no golden value for vout, but we can decode the transaction
# to make sure it is correct.
rawtx = self.nodes[ind].getrawtransaction (txid, 1)
assert 'nameOp' in rawtx['vout'][obj['vout']]['scriptPubKey']
if __name__ == '__main__':
NamePendingTest ().main ()
|
EUQ_PATTERN = '(.*) = (.*)'
IN_PATTERN = '(.*) in (.*)'
NOT_NULL_PATTERN = "(.*) is not null"
import re
class FilterParser():
def parse(filterExpression):
if re.match(EUQ_PATTERN, filterExpression):
return FilterParser._parseEuq(filterExpression)
elif re.match(IN_PATTERN, filterExpression):
return FilterParser._parseIn(filterExpression)
elif re.match(NOT_NULL_PATTERN, filterExpression):
return FilterParser._parseNotNull(filterExpression)
else:
return None
def _parseNotNull(filterExperssion):
property = filterExperssion.split(" ")[0].strip().strip("\"")
return [
"!=",
[
"get",
property
],
None
]
def _parseEuq(filterExpression):
filterObj = {}
# Filter có dạng "{Tên trường} = {Giá trị}"
# Tạm thời hỗ trợ type Equal
filterType = "="
filterExps = filterExpression.split(filterType) # filterExpression.split(" ")
if len(filterExps) != 2:
return None
filterObj["property"] = filterExps.pop(0).replace("\"","").strip()
filterObj["type"] = filterType #filterExps.pop(0)
# Giá trị có thể có khoảng trắng ở giữa
# Bỏ đi dấu nháy ở đầu
filterObj["value"] = " ".join(filterExps).replace("'","").strip()
return [
"==",
[
"get",
filterObj["property"]
],
filterObj["value"]
]
def _parseIn(filterExpression):
# Filter có dạng "{Tên trường} in ({Giá trị 1}, {Giá trị 2}, ... {Giá trị n})"
# Tạm thời hỗ trợ type Equal
filterType = " in "
filterExps = filterExpression.split(filterType) # filterExpression.split(" ")
if len(filterExps) != 2:
return None
property = filterExps[0].replace("\"","").strip()
# Giá trị có thể có khoảng trắng ở giữa
# Bỏ đi dấu nháy ở đầu
values = filterExps[1].strip().strip('(').strip(')').replace("\'","").split(',')
return [
"in",
[
"get",
property
],
[
"literal",
values
]
]
|
import os
import re
import requests
from PIL import Image, ImageEnhance
from pytesseract import pytesseract
from bs4 import BeautifulSoup
headers = {
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0"
}
login_url = "http://202.194.119.110/login.php"
verify_code_url = "http://202.194.119.110/vcode.php"
S = requests.Session()
S.get(login_url, headers=headers)
code = S.get(verify_code_url, headers=headers)
with open("code.gif", 'wb') as f:
f.write(code.content)
image = Image.open('{}/code.gif'.format(os.getcwd()))
imgry = image.convert('L')
sharpness = ImageEnhance.Contrast(imgry)
sharp_img = sharpness.enhance(2.0)
sharp_img.save("{}/code1.gif".format(os.getcwd()))
pytesseract.tesseract_cmd = "C:\\Program Files (x86)\\Tesseract-OCR\\tesseract.exe"
os.environ["TESSDATA_PREFIX"] = "C:\\Program Files (x86)\\Tesseract-OCR\\tessdata"
verify_code = pytesseract.image_to_string(Image.open('./code1.gif'))
verify_code = re.findall("\d+", verify_code.strip())
post_data = {
'password': '1054518207a.',
'submit': 'Submit',
'user_id': '201558501224',
'vcode': verify_code
}
print(verify_code)
req = S.post(login_url, headers=headers, data=post_data)
req.encoding = 'utf-8'
print(req.text)
if '-2' in req.text:
print("登录成功")
else:
print("登录失败")
# soup = BeautifulSoup(req.text, "html.parser")
|
"""Defines classes for representing note details, queries, and update requests.
The most important classes are :class:`FileInfo` , :class:`FileEditCmd` , and :class:`FileQuery`
"""
from __future__ import annotations
from dataclasses import dataclass, field, replace
from datetime import datetime, timezone
from enum import Enum
import os
import os.path
from typing import Set, Optional, Union, Iterable, List, Callable, Iterator, Tuple
from urllib.parse import urlparse, unquote_plus
@dataclass
class Link:
href: str
name: str = field(default="")
def __post_init__(self):
""" Strips piped names out of file names
"""
if self.href.find("|") >= 0:
[name, _href] = self.href.split("|")
self.href = _href.strip()
self.name = name.strip()
if not self.href.endswith(".md"):
self.href = self.href + ".md"
def __lt__(self, other):
return self.href < other.href
@dataclass
class LinkInfo:
"""Represents a link from a file to some resource.
Not all links target local files, but those are the ones most important to notesdir. The :meth:`referent` method
can be used to determine what local file, if any, the href targets.
"""
referrer: str
"""The file that contains the link. This should be a resolved, absolute path."""
href: str
"""The linked address.
Normally this is some sort of URI - it's the address portion of a Markdown link,
or the ``href`` or ``src`` attribute of an HTML tag, etc.
"""
name: str = field(default="")
def __post_init__(self):
""" Strips piped names out of file names
"""
href = self.href
if href.find("|") >= 0:
[name, _href] = href.split("|")
self.href = _href.strip()
self.name = name.strip()
if not href.endswith(".md"):
self.href = href + ".md"
def referent(self) -> Optional[str]:
"""Returns the resolved, absolute local path that this link refers to.
The path will be returned even if no file or folder actually exists at that location.
None will be returned if the href cannot be parsed or appears to be a non-file URI.
"""
try:
url = urlparse(self.href)
if (not url.scheme) or (url.scheme == 'file' and url.netloc in ['', 'localhost']):
if not url.path:
# links like "#foo" are just references to other parts of the same file
return self.referrer
referent = unquote_plus(url.path)
if not os.path.isabs(referent):
referent = os.path.join(self.referrer, '..', referent)
return os.path.realpath(referent)
except ValueError:
# not a valid URL
return None
def as_json(self) -> dict:
"""Returns a dict representing the instance, suitable for serializing as json."""
referent = self.referent()
return {
'referrer': self.referrer,
'href': self.href,
'referent': referent if referent else None
}
@dataclass
class FileInfo:
"""Container for the details Notesdir can parse or calculate about a file or folder.
A FileInfo instance does not imply that its path actually exists - instances may be created for
nonexistent paths that have just the :attr:`path` and :attr:`backlinks` attributes filled in.
When you retrieve instances of this from methods like :meth:`notesdir.repos.base.Repo.info`, which fields
are populated depends on which fields you request via the :class:`FileInfoReq`, as well as what fields
are supported for the file type and what data is populated in the particular file.
"""
path: str
"""The resolved, absolute path for which this information applies."""
links: List[LinkInfo] = field(default_factory=list)
"""Links from this file to other files or resources."""
tags: Set[str] = field(default_factory=set)
"""Tags for the file (e.g. "journal" or "project-idea")."""
title: Optional[str] = None
"""The title of the document, if any."""
created: Optional[datetime] = None
"""The creation date of the document, according to metadata within the document, if any.
This will *not* automatically be populated with timestamps from the filesystem, but
see :meth:`guess_created`.
"""
backlinks: List[LinkInfo] = field(default_factory=list)
"""Links from other files to this file."""
def as_json(self) -> dict:
"""Returns a dict representing the instance, suitable for serializing as json."""
return {
'path': self.path,
'title': self.title,
'created': self.created.isoformat() if self.created else None,
'tags': sorted(self.tags),
'links': [link.as_json() for link in self.links],
'backlinks': [link.as_json() for link in self.backlinks]
}
def guess_created(self) -> Optional[datetime]:
"""Returns the first available of: :attr:`created`, or the file's birthtime, or the file's ctime.
Returns None for paths that don't exist.
"""
if self.created:
return self.created
if not (self.path and os.path.exists(self.path)):
return None
stat = os.stat(self.path)
try:
return datetime.fromtimestamp(stat.st_birthtime, tz=timezone.utc)
except AttributeError:
return datetime.fromtimestamp(stat.st_ctime, tz=timezone.utc)
@dataclass
class FileInfoReq:
"""Allows you to specify which attributes you want when loading or querying for files.
For each attribute of :class:`FileInfo`, there is a corresponding boolean attribute here, which you
should set to True to indicate that you want that attribute.
Some methods that take a FileInfoReq parameter also accept strings or lists of strings as a convenience,
which they will pass to :meth:`parse`.
"""
path: bool = False
links: bool = False
tags: bool = False
title: bool = False
created: bool = False
backlinks: bool = False
@classmethod
def parse(cls, val: FileInfoReqIsh) -> FileInfoReq:
"""Converts the parameter to a FileInfoReq, if it isn't one already.
You can pass a comma-separated string like ``"path,backlinks"`` or a list of strings like
``['path', 'backlinks']``. Each listed field will be set to True in the resulting FileInfoReq.
"""
if isinstance(val, FileInfoReq):
return val
if isinstance(val, str):
return cls.parse(s for s in val.split(',') if s.strip())
return cls(**{k: True for k in val})
@classmethod
def internal(cls) -> FileInfoReq:
"""Returns an instance that requests everything which can be determined by looking at a file in isolation.
Currently this means everything except backlinks."""
return cls(path=True, links=True, tags=True, title=True, created=True)
@classmethod
def full(cls) -> FileInfoReq:
"""Returns an instance that requests everything."""
return replace(cls.internal(), backlinks=True)
FileInfoReqIsh = Union[str, Iterable[str], FileInfoReq]
@dataclass
class FileEditCmd:
"""Base class for requests to make changes to a file."""
path: str
"""Path to the file or folder that should be changed."""
@dataclass
class SetTitleCmd(FileEditCmd):
"""Represents a request to change a document's title."""
value: Optional[str]
"""The new title, or None to delete the title."""
@dataclass
class SetCreatedCmd(FileEditCmd):
"""Represents a request to change the creation date stored in a document's metadata (not filesystem metadata)."""
value: Optional[datetime]
"""The new creation date, or None to delete it from the metadata."""
@dataclass
class ReplaceHrefCmd(FileEditCmd):
"""Represents a request to replace link addresses in a document.
All occurrences will be replaced, but only if they are exact matches.
"""
original: str
"""The value to be replaced, generally copied from a :class:`LinkInfo` :attr:`href`"""
replacement: str
"""The new link address."""
@dataclass
class AddTagCmd(FileEditCmd):
"""Represents a request to add a tag to a document.
If the document already contains the tag, this request should be treated as a no-op.
"""
value: str
"""The tag to add."""
@dataclass
class DelTagCmd(FileEditCmd):
"""Represents a request to remove a tag from a document.
If the document does not contain the tag, this request should be treated as a no-op.
"""
value: str
"""The tag to remove."""
@dataclass
class MoveCmd(FileEditCmd):
"""Represents a request to move a file or folder from one location to another.
This does *not* imply that any links should be rewritten; that is a higher-level operation, which is
provided by :meth:`notesdir.api.Notesdir.move`.
"""
dest: str
"""The new path and filename."""
create_parents: bool = False
"""If True, any nonexistent parent directories should be created."""
delete_empty_parents: bool = False
"""If True, any parent directories that are empty after performing the move should be deleted."""
@dataclass
class CreateCmd(FileEditCmd):
"""Represents a request to create a new file."""
contents: str
class FileQuerySortField(Enum):
BACKLINKS_COUNT = 'backlinks'
CREATED = 'created'
FILENAME = 'filename'
PATH = 'path'
TAGS_COUNT = 'tags'
TITLE = 'title'
@dataclass
class FileQuerySort:
field: FileQuerySortField
reverse: bool = False
"""If True, sort descending."""
ignore_case: bool = True
"""If True, strings are sorted as if they were lower case."""
missing_first: bool = False
"""Affects the behavior for None values and empty strings.
If True, they should come before other values; if False, they should come after.
This definition is based on the assumption that reverse=False; when reverse=True, the ultimate result
will be the opposite.
"""
def key(self, info: FileInfo) -> Union[str, int, datetime]:
"""Returns sort key for the given file info for the :attr:`field` specified in this instance.
This is affected by the values of :attr:`ignore_case` and :attr:`missing_first`, but not the
value of :attr:`reverse`.
"""
if self.field == FileQuerySortField.BACKLINKS_COUNT:
return len(info.backlinks)
elif self.field == FileQuerySortField.CREATED:
created = info.created or (datetime(1,1,1) if self.missing_first else datetime(9999,12,31, 23, 59, 59, 999999))
if not created.tzinfo:
created = created.replace(tzinfo=timezone.utc)
return created
elif self.field == FileQuerySortField.FILENAME:
basename = os.path.basename(info.path)
return basename.lower() if self.ignore_case else basename
elif self.field == FileQuerySortField.PATH:
return info.path.lower() if self.ignore_case else info.path
elif self.field == FileQuerySortField.TAGS_COUNT:
return len(info.tags)
elif self.field == FileQuerySortField.TITLE:
if info.title:
return info.title.lower() if self.ignore_case else info.title
return '' if self.missing_first else chr(0x10ffff)
@dataclass
class FileQuery:
"""Represents criteria for searching for notes.
Some methods that take a FileQuery parameter also accept strings as a convenience, which they
pass to :meth:`parse`
If multiple criteria are specified, the query should only return notes that satisfy *all* the criteria.
"""
include_tags: Set[str] = field(default_factory=set)
"""If non-empty, the query should only return files that have *all* of the specified tags."""
exclude_tags: Set[str] = field(default_factory=set)
"""If non-empty, the query should only return files that have *none* of the specified tags."""
sort_by: List[FileQuerySort] = field(default_factory=list)
"""Indicates how to sort the results.
For example, ``[(FileQuerySort.BACKLINKS_COUNT, Order.DESC), (FileQuerySort.FILENAME, Order.ASC)]``
would sort the results so that the most-linked-to files appear first; files with equal numbers of backlinks
would be sorted lexicographically.
"""
@classmethod
def parse(cls, strquery: FileQueryIsh) -> FileQuery:
"""Converts the parameter to a FileQuery, if it isn't one already.
Query strings are split on spaces. Each part can be one of the following:
* ``tag:TAG1,TAG2`` - notes must include all the specified tags
* ``-tag:TAG1,TAG2`` - notes must not include any of the specified tags
* ``sort:FIELD1,FIELD2`` - sort by the given fields
* fields on the left take higher priority, e.g. ``sort:created,title`` sorts by created date first
* a minus sign in front of a field name indicates to sort descending, e.g. ``sort:-backlinks`` or
``sort:filename,-created``
* supported fields: ``backlinks`` (count), ``created``, ``filename``, ``tags`` (count) ``title``, ``path``
Examples:
* ``"tag:journal,food -tag:personal"`` - notes that are tagged both "journal" and "food" but not "personal"
"""
if isinstance(strquery, FileQuery):
return strquery
query = cls()
for term in strquery.split():
term = term.strip()
lower = term.lower()
if lower.startswith('tag:'):
query.include_tags.update(unquote_plus(t) for t in lower[4:].split(','))
elif lower.startswith('-tag:'):
query.exclude_tags.update(unquote_plus(t) for t in lower[5:].split(','))
elif lower.startswith('sort:'):
for sortstr in lower[5:].split(','):
sort = FileQuerySort(FileQuerySortField.TITLE) # placeholder value
if sortstr.startswith('-'):
sortstr = sortstr[1:]
sort.reverse = True
# TODO perhaps expose missing_first and ignore_case
sort.field = FileQuerySortField(sortstr)
query.sort_by.append(sort)
return query
def apply_filtering(self, infos: Iterable[FileInfo]) -> Iterator[FileInfo]:
"""Yields the entries from the given iterable which match the criteria of this query."""
for info in infos:
if not info:
# TODO should probably log a warning
continue
if self.include_tags and not self.include_tags.issubset(info.tags):
continue
if self.exclude_tags and not self.exclude_tags.isdisjoint(info.tags):
continue
yield info
def apply_sorting(self, infos: Iterable[FileInfo]) -> List[FileInfo]:
"""Returns a copy of the given file info collection sorted using this query's sort_by."""
result = list(infos)
for sort in reversed(self.sort_by):
result.sort(key=lambda info: sort.key(info), reverse=sort.reverse)
return result
FileQueryIsh = Union[str, FileQuery]
@dataclass
class TemplateDirectives:
"""Passed by :meth:`notesdir.api.Notesdir.new` when it is rendering one of a user's templates.
It is used for passing data in and out of the template.
"""
dest: Optional[str] = None
"""The path at which the new file should be created.
If this is set before rendering the template, it is the path the user suggested. But the template can change it,
and the template's suggestion will take precedence.
If the path already exists, notesdir will adjust it further to get a unique path before creating the file.
"""
@dataclass
class DependentPathFn:
"""Indicates that a path can be calculated based on the FileInfo for another path.
You can return this from a :class:`notesdir.conf.NotesdirConf.path_organizer` when one file's path depends on
another's. :meth:`notesdir.api.Notesdir.organize` will call the given :attr:`fn` with the info for the path
specified by :attr:`determinant`, but the path in the info will reflect any pending move for that file (even
if they have not been executed in the filesystem yet).
"""
determinant: str
fn: Callable[[FileInfo], str]
|
# django imports
from django import forms
from django.db import models
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
# portlets imports
from portlets.models import Portlet
class TextPortlet(Portlet):
"""Portlet to display some text.
"""
text = models.TextField(_(u"Text"), blank=True)
class Meta:
app_label = 'portlet'
def __unicode__(self):
return "%s" % self.id
def render(self, context):
"""Renders the portlet as html.
"""
request = context.get("request")
return render_to_string("lfs/portlets/text_portlet.html", RequestContext(request, {
"title": self.title,
"text": self.text
}))
def form(self, **kwargs):
return TextPortletForm(instance=self, **kwargs)
class TextPortletForm(forms.ModelForm):
"""Form for the TextPortlet.
"""
class Meta:
model = TextPortlet
|
#!/usr/bin/env python
from pprint import pprint
from typing import List
from problema import Problema
class BuscaEmProfundidade(object):
# TODO: verificar objeto None sendo retornado
def busca_profundidade(self, problema: Problema, estado=None, visitados=[]):
if estado is None:
estado = problema.estado_inicial
#print(f'> Estado sendo avaliado:')
#print(f'{estado}')
if problema.funcao_objetivo(estado):
print('\n >>>> Solucao encontrada <<<< \n')
return estado
sucessores = problema.funcao_sucessora(estado)
# Executa a busca recursivamente
for sucessor in sucessores:
if not visitados.__contains__(sucessor):
visitados.append(sucessor)
estado_recursivo = self.busca_profundidade(problema, sucessor, visitados)
if estado_recursivo is not None:
if problema.funcao_objetivo(estado_recursivo):
return estado_recursivo
# else:
# return sucessor.pai
return None
|
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from pyface.qt.QtGui import QSizePolicy
from traits.api import Property, Enum, Range, Str, Trait, Bool
from traitsui.api import View
from traitsui.basic_editor_factory import BasicEditorFactory
from traitsui.qt4.button_editor import CustomEditor
from traitsui.ui_traits import AView, Image
import six
# ============= standard library imports ========================
# ============= local library imports ==========================
class _ButtonEditor(CustomEditor):
def init(self, parent):
super(_ButtonEditor, self).init(parent)
self.control.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Fixed)
self.control.setFixedHeight(32)
self.control.setFlat(self.factory.flat)
class ButtonEditor(BasicEditorFactory):
""" Editor factory for buttons.
"""
klass = _ButtonEditor
# ---------------------------------------------------------------------------
# Trait definitions:
# ---------------------------------------------------------------------------
# Value to set when the button is clicked
value = Property
# Optional label for the button
label = Str
# The name of the external object trait that the button label is synced to
label_value = Str
# The name of the trait on the object that contains the list of possible
# values. If this is set, then the value, label, and label_value traits
# are ignored; instead, they will be set from this list. When this button
# is clicked, the value set will be the one selected from the drop-down.
values_trait = Trait(None, None, Str)
# (Optional) Image to display on the button
image = Image
# Extra padding to add to both the left and the right sides
width_padding = Range(0, 31, 7)
# Extra padding to add to both the top and the bottom sides
height_padding = Range(0, 31, 5)
# Presentation style
style = Enum('button', 'radio', 'toolbar', 'checkbox')
# Orientation of the text relative to the image
orientation = Enum('vertical', 'horizontal')
# The optional view to display when the button is clicked:
view = AView
# ---------------------------------------------------------------------------
# Traits view definition:
# ---------------------------------------------------------------------------
traits_view = View(['label', 'value', '|[]'])
flat = Bool(False)
# ---------------------------------------------------------------------------
# Implementation of the 'value' property:
# ---------------------------------------------------------------------------
def _get_value(self):
return self._value
def _set_value(self, value):
self._value = value
if isinstance(value, six.string_types):
try:
self._value = int(value)
except:
try:
self._value = float(value)
except:
pass
# ============= EOF =============================================
|
#!/usr/bin/env python
"""
A polar decoder class. Currently only Successive Cancellation Decoder (SCD) is supported.
"""
import numpy as np
from polarcodes.utils import *
from polarcodes.SCD import SCD
class Decode:
def __init__(self, myPC, decoder_name = 'scd'):
"""
Parameters
----------
myPC: `PolarCode`
a polar code object created using the :class:`PolarCode` class
decoder_name: string
name of decoder to use (default is 'scd')
"""
self.myPC = myPC
self.x_noisy = np.array([])
# select decoding algorithm
if decoder_name == 'scd':
scd = SCD(myPC)
self.x_noisy = scd.decode()
self.myPC.message_received = self.noisy_message(self.x_noisy, False)
elif decoder_name == 'systematic_scd':
scd = SCD(myPC)
self.x_noisy = scd.decode()
self.myPC.message_received = self.noisy_message(self.x_noisy, True)
def noisy_message(self, x_noisy, systematic_flag):
if systematic_flag:
x_noisy = self.systematic_decode(x_noisy)
return x_noisy[self.myPC.frozen_lookup == 1]
def systematic_decode(self, x_noisy):
x = np.array([x_noisy], dtype=int)
return np.transpose(np.mod(np.dot(self.myPC.T, x.T), 2))[0]
|
from importers import CSVImportCommand
import requests
import click
class SchoolsImportCommand(CSVImportCommand):
def process_row(self, row):
# Only import schools with easting and northing information
if row[70] and row[71]:
if 'Primary' in row[11]:
school_type = 'PRIMARY'
elif 'Secondary' in row[11]:
school_type = 'SECONDARY'
else:
school_type = 'UNKNOWN'
data = {
"urn": row[0],
"la_name": row[2],
"school_name": row[4],
"school_type": school_type,
"school_capacity": int(row[20]) if row[20] else 0,
"school_pupils": int(row[23]) if row[23] else 0,
"postcode": row[44].replace(' ', ''),
"point": {
"type": "Point",
"coordinates": [float(row[70]), float(row[71])]
},
"srid": 27700
}
headers = {'Authorization': 'Token {0}'.format(self.token)}
response = requests.post(
self.api_url,
json=data,
headers=headers)
if response.status_code == 201:
print('{0} imported correctly'.format(row[0]))
else:
print(
'ERROR: could not import {0} because of {1}'.format(
row[0], response.text))
@click.command()
@click.argument('filenames', nargs=-1, type=click.Path())
@click.option(
'--apiurl',
default='http://localhost:8000/api/schools/', help='API url')
@click.option('--apitoken', help='API authentication token')
def import_schools(filenames, apiurl, apitoken):
command = SchoolsImportCommand(
filenames, apiurl, apitoken, True, encoding='ISO-8859-1')
command.run()
if __name__ == '__main__':
import_schools()
|
import numpy as np
import tensorflow as tf
class FastZeroTagModel(object):
"""
Create a tensorflow graph that finds the principal direction of the target word embeddings
(with negative sampling), using the loss function from "Fast Zero-Shot Image Tagging".
"""
def __init__(self, input_size,
w2v,
learning_rate=1e-5,
hidden_units=[4096, 2048],
use_batch_norm=True):
self.model_info = dict()
# Placeholders for data
self.model_info['input'] = tf.placeholder(shape=(None, input_size), dtype=tf.float32)
self.model_info['pos_ids'] = tf.placeholder(dtype=tf.int32)
self.model_info['neg_ids'] = tf.placeholder(dtype=tf.int32)
self.model_info['y_truth'] = tf.transpose(tf.nn.embedding_lookup(w2v,self.model_info['pos_ids']), perm=[1,0,2])
self.model_info['y_neg'] = tf.transpose(tf.nn.embedding_lookup(w2v,self.model_info['neg_ids']), perm=[1,0,2])
# Construct fully connected layers
layers = []
for i, hidden_size in enumerate(hidden_units[:-1]):
if i == 0:
layer = tf.contrib.layers.relu(self.model_info['input'], hidden_size)
else:
layer = tf.contrib.layers.relu(layer, hidden_size)
layers.append(layer)
if use_batch_norm:
layer = tf.contrib.layers.batch_norm(layer)
layers.append(layer)
# Output layer should always be linear
layer = tf.contrib.layers.linear(layer, hidden_units[-1])
layers.append(layer)
self.model_info['layers'] = layers
self.model_info['prediction'] = layer
def fztloss( f, pVecs, nVecs ):
"""
Tensorized cost function from Fast Zero-Shot Learning paper
Args:
f: The output from the network, a tensor of shape (# images, word embedding size)
pVecs: The vector embeddings of the ground truth tags, a tensor
of shape (# images, # positive tags, word embedding size)
nVecs: The vector embeddings of negatively sampled tags, a tensor
of shape (# images, # negative samples, word embedding size)
Returns:
Scalar tensor representing the batch cost
"""
posmul = tf.mul(pVecs, f)
negmul = tf.mul(nVecs, f)
tfpos = tf.reduce_sum(posmul, reduction_indices=2)
tfneg = tf.reduce_sum(negmul, reduction_indices=2)
tfpos = tf.transpose(tfpos, [1,0])
tfneg = tf.transpose(tfneg, [1,0])
negexpan = tf.tile( tf.expand_dims(tfneg, -1), [1, 1, tf.shape(tfpos)[1]] )
posexpan = tf.tile( tf.transpose(tf.expand_dims(tfpos, -1), [0,2,1]), [1, tf.shape(tfneg)[1], 1])
differences = tf.sub(negexpan, posexpan)
return tf.reduce_sum(tf.reduce_sum(tf.log(1 + tf.exp(differences)), reduction_indices=[1,2]))
loss = fztloss(self.model_info['prediction'], self.model_info['y_truth'], self.model_info['y_neg'])
self.model_info['loss'] = loss
self.model_info['optimizer'] = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
self.model_info['init_op'] = tf.initialize_all_variables()
self.model_info['saver'] = tf.train.Saver()
def initialize_model(self, sess):
sess.run(self.model_info['init_op'])
def predict(self, sess, x):
return sess.run(self.model_info['prediction'], feed_dict={self.model_info['input']: x})
def fit(self, sess, x, y, **kwargs):
_, loss = sess.run([self.model_info['optimizer'], self.model_info['loss']],
feed_dict={
self.model_info['input']: x,
self.model_info['pos_ids']: y,
self.model_info['neg_ids']: kwargs['neg_word_ids']
})
return loss
def save(self, sess, model_output_path):
self.model_info['saver'].save(sess, model_output_path)
def load(self, sess, model_input_path):
self.model_info['saver'].restore(sess, model_input_path)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.