text stringlengths 8 6.05M |
|---|
import os
import sys
import pandas as pd
# for GUI Version
from appJar import gui
appg=None
csv_path=None
df=None
df_select=None
def intro():
'''
Shows description of this program.
'''
# os.get_terminal_size is some os class type. See it's attributes
# using dir(size). The one we needed here was columns.
size=os.get_terminal_size().columns
print("\n")
# if size is used instead of 20, the * prints exceeds the terminal screen width,
# apparently, there is a difference between column width and the no. of printed chars
# check that out later.
for i in range(0,20):
print("*",end = " ")
if i == 19:
print("*")
print("CSVPlay:\nGenerate various views from data in CSV.")
for i in range(0,20):
print("*",end = " ")
if i == 19:
print("*")
print("\n")
def generate_data_frame():
global df
df = pd.read_csv(csv_path)
print("Generated dataframe.")
# get the requested fields to analyze in a data frame
def get_requested_fields(fields):
global df_select
fields_list = fields.strip().replace(" ","").split(",")
print("Requested fields:")
print(fields_list)
df_select = df[fields_list]
print("Generated dataframe for requested fields.")
def set_csv_path(path):
global csv_path
csv_path = path
print(".csv filepath give as : "+csv_path + "...")
# handle button events
def press(button):
global df, df_select
if button == "Process":
try:
set_csv_path(appg.getEntry(".csv Path: "))
generate_data_frame()
except OSError:
print("Please enter a valid path.")
elif button == "Top 5":
try:
print(df.head())
except AttributeError:
print("Please process csv first.")
elif button == "Process Selection":
get_requested_fields(appg.getEntry("Select Fields: "))
elif button == "Selection Top 5":
print(df_select.head())
def gui_version():
'''
Opens GUI Version of CSVPlay
'''
# create a GUI variable called app
global appg
appg = gui("CSVPlay", "500x350")
appg.setBg("white")
appg.setFont(18)
# add & configure widgets - widgets get a name, to help referencing them later
appg.addLabel("title", "Generate views of data in CSV")
appg.setLabelBg("title", "gray")
appg.setLabelFg("title", "white")
appg.addLabelEntry(".csv Path: ")
# link the buttons to the function called press
appg.addButtons(["Process", "Top 5"], press)
appg.addLabelEntry("Select Fields: ")
appg.addButtons(["Process Selection"], press)
appg.addButtons(["Selection Top 5"], press)
appg.setFocus(".csv Path: ")
# start the GUI
appg.go()
def userselect_main():
val = input("Select Task: \n [0] Exit \n [1] Test \n [2] Open GUI version.\n> ")
if val == "2":
gui_version()
userselect_main()
if val == "1":
print("I am running!")
userselect_main()
elif val == "0":
print("Exiting.")
sys.exit()
else:
print("Please select a valid entry.")
userselect_main()
intro()
userselect_main()
"""
References:
- http://appjar.info/#appjar
"""
|
A=int(input("A= "))
hundred=int(A/100)
tens=int(A/10%10)
ones=int(A%10)
print(hundred*10+tens*100+ones) |
import pygame as py
import dice_chess
import random
from network import Network
"""
Placed this part of code in server file
qw = dice_chess.queen(7, 3, 0)
qb = dice_chess.queen(0, 3, 1)
kw = dice_chess.king(7, 4, 0)
kb = dice_chess.king(0, 4, 1)
rw1 = dice_chess.rook(7, 0, 0)
rw2 = dice_chess.rook(7, 7, 0)
rb1 = dice_chess.rook(0, 0, 1)
rb2 = dice_chess.rook(0, 7, 1)
bw1 = dice_chess.bishop(7, 2, 0)
bb1 = dice_chess.bishop(0, 2, 1)
bw2 = dice_chess.bishop(7, 5, 0)
bb2 = dice_chess.bishop(0, 5, 1)
nb1 = dice_chess.knight(0, 1, 1)
nw1 = dice_chess.knight(7, 1, 0)
nb2 = dice_chess.knight(0, 6, 1)
nw2 = dice_chess.knight(7, 6, 0)
avai_piece = [qw, qb, kw, kb, rw1, rw2, rb1, rb2, bw1, bw2, bb1, bb2, nb1, nb2, nw1, nw2]
def create_pawn_structure():
for rows in range(8):
black_pawn = dice_chess.pawn(1, rows, 1)
white_pawn = dice_chess.pawn(6, rows, 0)
avai_piece.append(black_pawn)
avai_piece.append(white_pawn)
create_pawn_structure()
"""
def createboard(avai_piece):
chess_board = []
for rows in range(8):
row = []
for colomns in range(8):
row.append(None)
chess_board.append(row)
for piece in avai_piece:
if piece.x == -1:
continue
chess_board[piece.x][piece.y] = piece
return chess_board
py.init()
WIDTH = 480
win = py.display.set_mode((WIDTH, WIDTH))
py.display.set_caption("DICE CHESS")
length = WIDTH / 8
def display_board(win, board=[], moves_shower=[], moves_counter=0, dice=[]):
length = WIDTH / 8
win.fill((0, 255, 255))
for i in range(8):
for j in range(8):
if (i + j) % 2 != 0:
py.draw.rect(win, (0, 200, 0), (length * i, length * j, length, length))
for piece in avai_piece:
if piece.x == -1:
continue
text = piece.draw_piece(win)
tuple = (piece.y * length, piece.x * length)
win.blit(text, tuple)
if len(moves_shower) > 0:
for move in moves_shower:
pos = (int((move[1] * length) + length / 2), int((move[0] * length) + length / 2))
py.draw.circle(win, (0, 0, 255), pos, 5)
if len(dice) > 0:
for piece in avai_piece:
if piece.x == -1:
continue
if (piece.color == moves_counter % 2) and (piece.type in dice):
pos = ((int(piece.y * length) + length / 2), int((piece.x * length) + length / 2))
py.draw.circle(win, (255, 255, 0), pos, 5)
py.display.update()
###dice generator
def dice(no_of_dice):
pieces_name = ["K", "Q", "B", "N", "R", "P"]
output = []
while len(output) < no_of_dice:
face_of_dice = random.choice(pieces_name)
output.append(face_of_dice)
return output
#####
def skip_to_next_player(board, dcp, move_counter):
for row in board:
for sq in row:
if sq == None:
continue
else:
if sq.color == move_counter % 2:
if sq.type in dcp:
if len(sq.possible_moves(board)) > 0:
return False
return True
def promotion():
promotion_sq = [0, 7]
for piece in avai_piece:
if piece.type != "P" or piece.x == -1 or piece.x not in promotion_sq:
continue
else:
promotion_x = piece.x
promotion_y = piece.y
promoted_piece = random.choice(["Q", "R", "N", "B"])
promoted_color = random.choice([0, 1, piece.color])
if promoted_piece == "Q":
avai_piece.append(dice_chess.queen(promotion_x, promotion_y, promoted_color))
if promoted_piece == "R":
avai_piece.append(dice_chess.rook(promotion_x, promotion_y, promoted_color))
if promoted_piece == "B":
avai_piece.append(dice_chess.bishop(promotion_x, promotion_y, promoted_color))
if promoted_piece == "N":
avai_piece.append(dice_chess.knight(promotion_x, promotion_y, promoted_color))
piece.x = -1
####
run = True
move_shower = []
move_counter = 0
dice_choosen_pieces = []
no_to_colour = {0: "white", 1: "black"}
n = Network()
avai_piece = n.getpos()
n.send(avai_piece)
while run:
chess_board = createboard(avai_piece)
display_board(win, avai_piece, move_shower, move_counter, dice_choosen_pieces)
promotion()
if avai_piece[2].x == -1 or avai_piece[3].x == -1:
font = py.font.Font("freesansbold.ttf", 30)
text = font.render("GAME OVER", True, (255, 0, 0))
win.blit(text, (300, 300))
py.display.update()
for event in py.event.get():
if event.type == py.QUIT:
run = False
continue
for event in py.event.get():
###rolling of dice
if event.type == py.KEYDOWN:
if event.key == py.K_SPACE:
if len(dice_choosen_pieces) > 0:
continue
dice_choosen_pieces = dice(3)
if skip_to_next_player(chess_board, dice_choosen_pieces, move_counter):
print("no possible moves")
move_counter += 1
dice_choosen_pieces = []
if event.type == py.QUIT:
run = False
if py.mouse.get_pressed()[0]:
if len(dice_choosen_pieces) == 0:
continue
pos = py.mouse.get_pos(0)
square = (int(pos[1] // length), int(pos[0] // length))
if square in move_shower:
if chess_board[square[0]][square[1]] != None:
chess_board[square[0]][square[1]].x = -1
if selected_piece.type == "R":
selected_piece.castling = False
if selected_piece.type == "K":
if square not in selected_piece.possible_moves_without_casting(chess_board):
print("casting")
castle_rooks = selected_piece.castle(chess_board)
for castle_rook in castle_rooks:
if square[1] == 6:
if castle_rook.y == 7:
castle_rook.y = 5
selected_piece.castling = False
else:
if castle_rook.y == 0:
castle_rook.y = 3
selected_piece.castling = False
selected_piece.castling = False
selected_piece.x = square[0]
selected_piece.y = square[1]
move_shower = []
move_counter += 1
dice_choosen_pieces = []
if chess_board[square[0]][square[1]] != None:
if chess_board[square[0]][square[1]].color != move_counter % 2:
continue
if chess_board[square[0]][square[1]].type in dice_choosen_pieces:
selected_piece = chess_board[square[0]][square[1]]
move_shower = selected_piece.possible_moves(chess_board)
n.send(avai_piece)
|
#!/usr/bin/env python
import unittest
from xml.dom.minidom import parseString
import xml.etree.ElementTree as ET
from decimal import Decimal
from gomatic import GoCdConfigurator, FetchArtifactDir, RakeTask, ExecTask, FetchArtifactTask, \
FetchArtifactFile, Tab, GitMaterial, PipelineMaterial, Pipeline, PackageMaterial
from gomatic.fake import FakeHostRestClient, empty_config_xml, config, empty_config
from gomatic.gocd.pipelines import DEFAULT_LABEL_TEMPLATE
from gomatic.gocd.artifacts import Artifact
from gomatic.xml_operations import prettify
def find_with_matching_name(things, name):
return [thing for thing in things if thing.name == name]
def standard_pipeline_group():
return GoCdConfigurator(config('config-with-typical-pipeline')).ensure_pipeline_group('P.Group')
def typical_pipeline():
return standard_pipeline_group().find_pipeline('typical')
def more_options_pipeline():
return GoCdConfigurator(config('config-with-more-options-pipeline')).ensure_pipeline_group('P.Group').find_pipeline('more-options')
def empty_pipeline():
return GoCdConfigurator(empty_config()).ensure_pipeline_group("pg").ensure_pipeline("pl").set_git_url("gurl")
def empty_stage():
return empty_pipeline().ensure_stage("deploy-to-dev")
class TestAgents(unittest.TestCase):
def _agents_from_config(self):
return GoCdConfigurator(config('config-with-just-agents')).agents
def test_could_have_no_agents(self):
agents = GoCdConfigurator(empty_config()).agents
self.assertEquals(0, len(agents))
def test_agents_have_resources(self):
agents = self._agents_from_config()
self.assertEquals(2, len(agents))
self.assertEquals({'a-resource', 'b-resource'}, agents[0].resources)
def test_agents_have_names(self):
agents = self._agents_from_config()
self.assertEquals('go-agent-1', agents[0].hostname)
self.assertEquals('go-agent-2', agents[1].hostname)
def test_agent_could_have_no_resources(self):
agents = self._agents_from_config()
self.assertEquals(0, len(agents[1].resources))
def test_can_add_resource_to_agent_with_no_resources(self):
agent = self._agents_from_config()[1]
agent.ensure_resource('a-resource-that-it-does-not-already-have')
self.assertEquals(1, len(agent.resources))
def test_can_add_resource_to_agent(self):
agent = self._agents_from_config()[0]
self.assertEquals(2, len(agent.resources))
agent.ensure_resource('a-resource-that-it-does-not-already-have')
self.assertEquals(3, len(agent.resources))
class TestJobs(unittest.TestCase):
def test_jobs_have_resources(self):
stages = typical_pipeline().stages
job = stages[0].jobs[0]
resources = job.resources
self.assertEquals(1, len(resources))
self.assertEquals({'a-resource'}, resources)
def test_job_has_nice_tostring(self):
job = typical_pipeline().stages[0].jobs[0]
self.assertEquals("Job('compile', [ExecTask(['make', 'options', 'source code'])])", str(job))
def test_jobs_can_have_timeout(self):
job = typical_pipeline().ensure_stage("deploy").ensure_job("upload")
self.assertEquals(True, job.has_timeout)
self.assertEquals('20', job.timeout)
def test_can_set_timeout(self):
job = empty_stage().ensure_job("j")
j = job.set_timeout("42")
self.assertEquals(j, job)
self.assertEquals(True, job.has_timeout)
self.assertEquals('42', job.timeout)
def test_jobs_do_not_have_to_have_timeout(self):
stages = typical_pipeline().stages
job = stages[0].jobs[0]
self.assertEquals(False, job.has_timeout)
try:
timeout = job.timeout
self.fail("should have thrown exception")
except RuntimeError:
pass
def test_jobs_can_run_on_all_agents(self):
job = more_options_pipeline().ensure_stage("earlyStage").ensure_job("earlyWorm")
self.assertEquals(True, job.runs_on_all_agents)
def test_jobs_do_not_have_to_run_on_all_agents(self):
job = typical_pipeline().ensure_stage("build").ensure_job("compile")
self.assertEquals(False, job.runs_on_all_agents)
def test_jobs_can_be_made_to_run_on_all_agents(self):
job = typical_pipeline().ensure_stage("build").ensure_job("compile")
j = job.set_runs_on_all_agents()
self.assertEquals(j, job)
self.assertEquals(True, job.runs_on_all_agents)
def test_jobs_can_be_made_to_not_run_on_all_agents(self):
job = typical_pipeline().ensure_stage("build").ensure_job("compile")
j = job.set_runs_on_all_agents(False)
self.assertEquals(j, job)
self.assertEquals(False, job.runs_on_all_agents)
def test_can_ensure_job_has_resource(self):
stages = typical_pipeline().stages
job = stages[0].jobs[0]
j = job.ensure_resource('moo')
self.assertEquals(j, job)
self.assertEquals(2, len(job.resources))
self.assertEquals({'a-resource', 'moo'}, job.resources)
def test_jobs_have_artifacts(self):
job = more_options_pipeline().ensure_stage("earlyStage").ensure_job("earlyWorm")
artifacts = job.artifacts
self.assertEquals({
Artifact.get_build_artifact("target/universal/myapp*.zip", "artifacts"),
Artifact.get_build_artifact("scripts/*", "files"),
Artifact.get_test_artifact("from", "to")},
artifacts)
def test_job_that_has_no_artifacts_has_no_artifacts_element_to_reduce_thrash(self):
go_cd_configurator = GoCdConfigurator(empty_config())
job = go_cd_configurator.ensure_pipeline_group("g").ensure_pipeline("p").ensure_stage("s").ensure_job("j")
job.ensure_artifacts(set())
self.assertEquals(set(), job.artifacts)
xml = parseString(go_cd_configurator.config)
self.assertEquals(0, len(xml.getElementsByTagName('artifacts')))
def test_artifacts_might_have_no_dest(self):
job = more_options_pipeline().ensure_stage("s1").ensure_job("rake-job")
artifacts = job.artifacts
self.assertEquals(1, len(artifacts))
self.assertEquals({Artifact.get_build_artifact("things/*")}, artifacts)
def test_can_add_build_artifacts_to_job(self):
job = more_options_pipeline().ensure_stage("earlyStage").ensure_job("earlyWorm")
job_with_artifacts = job.ensure_artifacts({
Artifact.get_build_artifact("a1", "artifacts"),
Artifact.get_build_artifact("a2", "others")})
self.assertEquals(job, job_with_artifacts)
artifacts = job.artifacts
self.assertEquals(5, len(artifacts))
self.assertTrue({Artifact.get_build_artifact("a1", "artifacts"), Artifact.get_build_artifact("a2", "others")}.issubset(artifacts))
def test_can_add_test_artifacts_to_job(self):
job = more_options_pipeline().ensure_stage("earlyStage").ensure_job("earlyWorm")
job_with_artifacts = job.ensure_artifacts({
Artifact.get_test_artifact("a1"),
Artifact.get_test_artifact("a2")})
self.assertEquals(job, job_with_artifacts)
artifacts = job.artifacts
self.assertEquals(5, len(artifacts))
self.assertTrue({Artifact.get_test_artifact("a1"), Artifact.get_test_artifact("a2")}.issubset(artifacts))
def test_can_ensure_artifacts(self):
job = more_options_pipeline().ensure_stage("earlyStage").ensure_job("earlyWorm")
job.ensure_artifacts({
Artifact.get_test_artifact("from", "to"),
Artifact.get_build_artifact("target/universal/myapp*.zip", "somewhereElse"),
Artifact.get_test_artifact("another", "with dest"),
Artifact.get_build_artifact("target/universal/myapp*.zip", "artifacts")})
self.assertEquals({
Artifact.get_build_artifact("target/universal/myapp*.zip", "artifacts"),
Artifact.get_build_artifact("scripts/*", "files"),
Artifact.get_test_artifact("from", "to"),
Artifact.get_build_artifact("target/universal/myapp*.zip", "somewhereElse"),
Artifact.get_test_artifact("another", "with dest")
},
job.artifacts)
def test_jobs_have_tasks(self):
job = more_options_pipeline().ensure_stage("s1").jobs[2]
tasks = job.tasks
self.assertEquals(4, len(tasks))
self.assertEquals('rake', tasks[0].type)
self.assertEquals('sometarget', tasks[0].target)
self.assertEquals('passed', tasks[0].runif)
self.assertEquals('fetchartifact', tasks[1].type)
self.assertEquals('more-options', tasks[1].pipeline)
self.assertEquals('earlyStage', tasks[1].stage)
self.assertEquals('earlyWorm', tasks[1].job)
self.assertEquals(FetchArtifactDir('sourceDir'), tasks[1].src)
self.assertEquals('destDir', tasks[1].dest)
self.assertEquals('passed', tasks[1].runif)
def test_runif_defaults_to_passed(self):
pipeline = typical_pipeline()
tasks = pipeline.ensure_stage("build").ensure_job("compile").tasks
self.assertEquals("passed", tasks[0].runif)
def test_jobs_can_have_rake_tasks(self):
job = more_options_pipeline().ensure_stage("s1").jobs[0]
tasks = job.tasks
self.assertEquals(1, len(tasks))
self.assertEquals('rake', tasks[0].type)
self.assertEquals("boo", tasks[0].target)
def test_can_ensure_rake_task(self):
job = more_options_pipeline().ensure_stage("s1").jobs[0]
job.ensure_task(RakeTask("boo"))
self.assertEquals(1, len(job.tasks))
def test_can_add_rake_task(self):
job = more_options_pipeline().ensure_stage("s1").jobs[0]
job.ensure_task(RakeTask("another"))
self.assertEquals(2, len(job.tasks))
self.assertEquals("another", job.tasks[1].target)
def test_can_add_exec_task_with_runif(self):
stages = typical_pipeline().stages
job = stages[0].jobs[0]
added_task = job.add_task(ExecTask(['ls', '-la'], 'some/dir', "failed"))
self.assertEquals(2, len(job.tasks))
task = job.tasks[1]
self.assertEquals(task, added_task)
self.assertEquals(['ls', '-la'], task.command_and_args)
self.assertEquals('some/dir', task.working_dir)
self.assertEquals('failed', task.runif)
def test_can_add_exec_task(self):
stages = typical_pipeline().stages
job = stages[0].jobs[0]
added_task = job.add_task(ExecTask(['ls', '-la'], 'some/dir'))
self.assertEquals(2, len(job.tasks))
task = job.tasks[1]
self.assertEquals(task, added_task)
self.assertEquals(['ls', '-la'], task.command_and_args)
self.assertEquals('some/dir', task.working_dir)
def test_can_ensure_exec_task(self):
stages = typical_pipeline().stages
job = stages[0].jobs[0]
t1 = job.ensure_task(ExecTask(['ls', '-la'], 'some/dir'))
t2 = job.ensure_task(ExecTask(['make', 'options', 'source code']))
job.ensure_task(ExecTask(['ls', '-la'], 'some/otherdir'))
job.ensure_task(ExecTask(['ls', '-la'], 'some/dir'))
self.assertEquals(3, len(job.tasks))
self.assertEquals(t2, job.tasks[0])
self.assertEquals(['make', 'options', 'source code'], (job.tasks[0]).command_and_args)
self.assertEquals(t1, job.tasks[1])
self.assertEquals(['ls', '-la'], (job.tasks[1]).command_and_args)
self.assertEquals('some/dir', (job.tasks[1]).working_dir)
self.assertEquals(['ls', '-la'], (job.tasks[2]).command_and_args)
self.assertEquals('some/otherdir', (job.tasks[2]).working_dir)
def test_exec_task_args_are_unescaped_as_appropriate(self):
job = more_options_pipeline().ensure_stage("earlyStage").ensure_job("earlyWorm")
task = job.tasks[1]
self.assertEquals(["bash", "-c",
'curl "http://domain.com/service/check?target=one+two+three&key=2714_beta%40domain.com"'],
task.command_and_args)
def test_exec_task_args_are_escaped_as_appropriate(self):
job = empty_stage().ensure_job("j")
task = job.add_task(ExecTask(["bash", "-c",
'curl "http://domain.com/service/check?target=one+two+three&key=2714_beta%40domain.com"']))
self.assertEquals(["bash", "-c",
'curl "http://domain.com/service/check?target=one+two+three&key=2714_beta%40domain.com"'],
task.command_and_args)
def test_can_have_no_tasks(self):
self.assertEquals(0, len(empty_stage().ensure_job("empty_job").tasks))
def test_can_add_fetch_artifact_task_to_job(self):
stages = typical_pipeline().stages
job = stages[0].jobs[0]
added_task = job.add_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('d'), runif="any"))
self.assertEquals(2, len(job.tasks))
task = job.tasks[1]
self.assertEquals(added_task, task)
self.assertEquals('p', task.pipeline)
self.assertEquals('s', task.stage)
self.assertEquals('j', task.job)
self.assertEquals(FetchArtifactDir('d'), task.src)
self.assertEquals('any', task.runif)
def test_fetch_artifact_task_can_have_src_file_rather_than_src_dir(self):
job = more_options_pipeline().ensure_stage("s1").ensure_job("variety-of-tasks")
tasks = job.tasks
self.assertEquals(4, len(tasks))
self.assertEquals('more-options', tasks[1].pipeline)
self.assertEquals('earlyStage', tasks[1].stage)
self.assertEquals('earlyWorm', tasks[1].job)
self.assertEquals(FetchArtifactFile('someFile'), tasks[2].src)
self.assertEquals('passed', tasks[1].runif)
self.assertEquals(['true'], tasks[3].command_and_args)
def test_fetch_artifact_task_can_have_dest(self):
pipeline = more_options_pipeline()
job = pipeline.ensure_stage("s1").ensure_job("variety-of-tasks")
tasks = job.tasks
self.assertEquals(FetchArtifactTask("more-options",
"earlyStage",
"earlyWorm",
FetchArtifactDir("sourceDir"),
dest="destDir"),
tasks[1])
def test_can_ensure_fetch_artifact_tasks(self):
job = more_options_pipeline().ensure_stage("s1").ensure_job("variety-of-tasks")
job.ensure_task(FetchArtifactTask("more-options", "middleStage", "middleJob", FetchArtifactFile("someFile")))
first_added_task = job.ensure_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('dir')))
self.assertEquals(5, len(job.tasks))
self.assertEquals(first_added_task, job.tasks[4])
self.assertEquals('p', (job.tasks[4]).pipeline)
self.assertEquals('s', (job.tasks[4]).stage)
self.assertEquals('j', (job.tasks[4]).job)
self.assertEquals(FetchArtifactDir('dir'), (job.tasks[4]).src)
self.assertEquals('passed', (job.tasks[4]).runif)
job.ensure_task(FetchArtifactTask('p', 's', 'j', FetchArtifactFile('f')))
self.assertEquals(FetchArtifactFile('f'), (job.tasks[5]).src)
job.ensure_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('dir'), dest="somedest"))
self.assertEquals("somedest", (job.tasks[6]).dest)
job.ensure_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('dir'), runif="failed"))
self.assertEquals('failed', (job.tasks[7]).runif)
def test_tasks_run_if_defaults_to_passed(self):
job = empty_stage().ensure_job("j")
job.add_task(ExecTask(['ls', '-la'], 'some/dir'))
job.add_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('dir')))
job.add_task(RakeTask('x'))
self.assertEquals('passed', (job.tasks[0]).runif)
self.assertEquals('passed', (job.tasks[1]).runif)
self.assertEquals('passed', (job.tasks[2]).runif)
def test_tasks_run_if_variants(self):
job = more_options_pipeline().ensure_stage("s1").ensure_job("run-if-variants")
tasks = job.tasks
self.assertEquals('t-passed', tasks[0].command_and_args[0])
self.assertEquals('passed', tasks[0].runif)
self.assertEquals('t-none', tasks[1].command_and_args[0])
self.assertEquals('passed', tasks[1].runif)
self.assertEquals('t-failed', tasks[2].command_and_args[0])
self.assertEquals('failed', tasks[2].runif)
self.assertEquals('t-any', tasks[3].command_and_args[0])
self.assertEquals('any', tasks[3].runif)
self.assertEquals('t-both', tasks[4].command_and_args[0])
self.assertEquals('any', tasks[4].runif)
def test_cannot_set_runif_to_random_things(self):
try:
ExecTask(['x'], runif='whatever')
self.fail("should have thrown exception")
except RuntimeError as e:
self.assertTrue(e.message.count("whatever") > 0)
def test_can_set_runif_to_particular_values(self):
self.assertEquals('passed', ExecTask(['x'], runif='passed').runif)
self.assertEquals('failed', ExecTask(['x'], runif='failed').runif)
self.assertEquals('any', ExecTask(['x'], runif='any').runif)
def test_tasks_dest_defaults_to_none(self): # TODO: maybe None could be avoided
job = empty_stage().ensure_job("j")
job.add_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('dir')))
self.assertEquals(None, (job.tasks[0]).dest)
def test_can_add_exec_task_to_empty_job(self):
job = empty_stage().ensure_job("j")
added_task = job.add_task(ExecTask(['ls', '-la'], 'some/dir', "any"))
self.assertEquals(1, len(job.tasks))
task = job.tasks[0]
self.assertEquals(task, added_task)
self.assertEquals(['ls', '-la'], task.command_and_args)
self.assertEquals('some/dir', task.working_dir)
self.assertEquals('any', task.runif)
def test_can_remove_all_tasks(self):
stages = typical_pipeline().stages
job = stages[0].jobs[0]
self.assertEquals(1, len(job.tasks))
j = job.without_any_tasks()
self.assertEquals(j, job)
self.assertEquals(0, len(job.tasks))
def test_can_have_encrypted_environment_variables(self):
pipeline = GoCdConfigurator(config('config-with-encrypted-variable')).ensure_pipeline_group("defaultGroup").find_pipeline("example")
job = pipeline.ensure_stage('defaultStage').ensure_job('defaultJob')
self.assertEquals({"MY_JOB_PASSWORD": "yq5qqPrrD9/j=="}, job.encrypted_environment_variables)
def test_can_set_encrypted_environment_variables(self):
job = empty_stage().ensure_job("j")
job.ensure_encrypted_environment_variables({'one': 'blah=='})
self.assertEquals({"one": "blah=="}, job.encrypted_environment_variables)
def test_can_add_environment_variables(self):
job = typical_pipeline() \
.ensure_stage("build") \
.ensure_job("compile")
j = job.ensure_environment_variables({"new": "one"})
self.assertEquals(j, job)
self.assertEquals({"CF_COLOR": "false", "new": "one"}, job.environment_variables)
def test_environment_variables_get_added_in_sorted_order_to_reduce_config_thrash(self):
go_cd_configurator = GoCdConfigurator(empty_config())
job = go_cd_configurator\
.ensure_pipeline_group('P.Group')\
.ensure_pipeline('P.Name') \
.ensure_stage("build") \
.ensure_job("compile")
job.ensure_environment_variables({"ant": "a", "badger": "a", "zebra": "a"})
xml = parseString(go_cd_configurator.config)
names = [e.getAttribute('name') for e in xml.getElementsByTagName('variable')]
self.assertEquals([u'ant', u'badger', u'zebra'], names)
def test_can_remove_all_environment_variables(self):
job = typical_pipeline() \
.ensure_stage("build") \
.ensure_job("compile")
j = job.without_any_environment_variables()
self.assertEquals(j, job)
self.assertEquals({}, job.environment_variables)
def test_job_can_haveTabs(self):
job = typical_pipeline() \
.ensure_stage("build") \
.ensure_job("compile")
self.assertEquals([Tab("Time_Taken", "artifacts/test-run-times.html")], job.tabs)
def test_can_addTab(self):
job = typical_pipeline() \
.ensure_stage("build") \
.ensure_job("compile")
j = job.ensure_tab(Tab("n", "p"))
self.assertEquals(j, job)
self.assertEquals([Tab("Time_Taken", "artifacts/test-run-times.html"), Tab("n", "p")], job.tabs)
def test_can_ensure_tab(self):
job = typical_pipeline() \
.ensure_stage("build") \
.ensure_job("compile")
job.ensure_tab(Tab("Time_Taken", "artifacts/test-run-times.html"))
self.assertEquals([Tab("Time_Taken", "artifacts/test-run-times.html")], job.tabs)
class TestStages(unittest.TestCase):
def test_pipelines_have_stages(self):
self.assertEquals(2, len(typical_pipeline().stages))
def test_stages_have_names(self):
stages = typical_pipeline().stages
self.assertEquals('build', stages[0].name)
self.assertEquals('deploy', stages[1].name)
def test_stages_can_have_manual_approval(self):
self.assertEquals(False, typical_pipeline().stages[0].has_manual_approval)
self.assertEquals(True, typical_pipeline().stages[1].has_manual_approval)
def test_can_set_manual_approval(self):
stage = typical_pipeline().stages[0]
s = stage.set_has_manual_approval()
self.assertEquals(s, stage)
self.assertEquals(True, stage.has_manual_approval)
def test_stages_have_fetch_materials_flag(self):
stage = typical_pipeline().ensure_stage("build")
self.assertEquals(True, stage.fetch_materials)
stage = more_options_pipeline().ensure_stage("s1")
self.assertEquals(False, stage.fetch_materials)
def test_can_set_fetch_materials_flag(self):
stage = typical_pipeline().ensure_stage("build")
s = stage.set_fetch_materials(False)
self.assertEquals(s, stage)
self.assertEquals(False, stage.fetch_materials)
stage = more_options_pipeline().ensure_stage("s1")
stage.set_fetch_materials(True)
self.assertEquals(True, stage.fetch_materials)
def test_stages_have_jobs(self):
stages = typical_pipeline().stages
jobs = stages[0].jobs
self.assertEquals(1, len(jobs))
self.assertEquals('compile', jobs[0].name)
def test_can_add_job(self):
stage = typical_pipeline().ensure_stage("deploy")
self.assertEquals(1, len(stage.jobs))
ensured_job = stage.ensure_job("new-job")
self.assertEquals(2, len(stage.jobs))
self.assertEquals(ensured_job, stage.jobs[1])
self.assertEquals("new-job", stage.jobs[1].name)
def test_can_add_job_to_empty_stage(self):
stage = empty_stage()
self.assertEquals(0, len(stage.jobs))
ensured_job = stage.ensure_job("new-job")
self.assertEquals(1, len(stage.jobs))
self.assertEquals(ensured_job, stage.jobs[0])
self.assertEquals("new-job", stage.jobs[0].name)
def test_can_ensure_job_exists(self):
stage = typical_pipeline().ensure_stage("deploy")
self.assertEquals(1, len(stage.jobs))
ensured_job = stage.ensure_job("upload")
self.assertEquals(1, len(stage.jobs))
self.assertEquals("upload", ensured_job.name)
def test_can_have_encrypted_environment_variables(self):
pipeline = GoCdConfigurator(config('config-with-encrypted-variable')).ensure_pipeline_group("defaultGroup").find_pipeline("example")
stage = pipeline.ensure_stage('defaultStage')
self.assertEquals({"MY_STAGE_PASSWORD": "yq5qqPrrD9/s=="}, stage.encrypted_environment_variables)
def test_can_set_encrypted_environment_variables(self):
stage = typical_pipeline().ensure_stage("deploy")
stage.ensure_encrypted_environment_variables({'one': 'blah=='})
self.assertEquals({"one": "blah=="}, stage.encrypted_environment_variables)
def test_can_set_environment_variables(self):
stage = typical_pipeline().ensure_stage("deploy")
s = stage.ensure_environment_variables({"new": "one"})
self.assertEquals(s, stage)
self.assertEquals({"BASE_URL": "http://myurl", "new": "one"}, stage.environment_variables)
def test_can_remove_all_environment_variables(self):
stage = typical_pipeline().ensure_stage("deploy")
s = stage.without_any_environment_variables()
self.assertEquals(s, stage)
self.assertEquals({}, stage.environment_variables)
class TestPipeline(unittest.TestCase):
def test_pipelines_have_names(self):
pipeline = typical_pipeline()
self.assertEquals('typical', pipeline.name)
def test_can_add_stage(self):
pipeline = empty_pipeline()
self.assertEquals(0, len(pipeline.stages))
new_stage = pipeline.ensure_stage("some_stage")
self.assertEquals(1, len(pipeline.stages))
self.assertEquals(new_stage, pipeline.stages[0])
self.assertEquals("some_stage", new_stage.name)
def test_can_ensure_stage(self):
pipeline = typical_pipeline()
self.assertEquals(2, len(pipeline.stages))
ensured_stage = pipeline.ensure_stage("deploy")
self.assertEquals(2, len(pipeline.stages))
self.assertEquals("deploy", ensured_stage.name)
def test_can_remove_stage(self):
pipeline = typical_pipeline()
self.assertEquals(2, len(pipeline.stages))
p = pipeline.ensure_removal_of_stage("deploy")
self.assertEquals(p, pipeline)
self.assertEquals(1, len(pipeline.stages))
self.assertEquals(0, len([s for s in pipeline.stages if s.name == "deploy"]))
def test_can_ensure_removal_of_stage(self):
pipeline = typical_pipeline()
self.assertEquals(2, len(pipeline.stages))
pipeline.ensure_removal_of_stage("stage-that-has-already-been-deleted")
self.assertEquals(2, len(pipeline.stages))
def test_can_ensure_initial_stage(self):
pipeline = typical_pipeline()
stage = pipeline.ensure_initial_stage("first")
self.assertEquals(stage, pipeline.stages[0])
self.assertEquals(3, len(pipeline.stages))
def test_can_ensure_initial_stage_if_already_exists_as_initial(self):
pipeline = typical_pipeline()
stage = pipeline.ensure_initial_stage("build")
self.assertEquals(stage, pipeline.stages[0])
self.assertEquals(2, len(pipeline.stages))
def test_can_ensure_initial_stage_if_already_exists(self):
pipeline = typical_pipeline()
stage = pipeline.ensure_initial_stage("deploy")
self.assertEquals(stage, pipeline.stages[0])
self.assertEquals("build", pipeline.stages[1].name)
self.assertEquals(2, len(pipeline.stages))
def test_can_set_stage_clean_policy(self):
pipeline = empty_pipeline()
stage1 = pipeline.ensure_stage("some_stage1").set_clean_working_dir()
stage2 = pipeline.ensure_stage("some_stage2")
self.assertEquals(True, pipeline.stages[0].clean_working_dir)
self.assertEquals(True, stage1.clean_working_dir)
self.assertEquals(False, pipeline.stages[1].clean_working_dir)
self.assertEquals(False, stage2.clean_working_dir)
def test_pipelines_can_have_git_urls(self):
pipeline = typical_pipeline()
self.assertEquals("git@bitbucket.org:springersbm/gomatic.git", pipeline.git_url)
def test_git_is_polled_by_default(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group("g").ensure_pipeline("p")
pipeline.set_git_url("some git url")
self.assertEquals(True, pipeline.git_material.polling)
def test_pipelines_can_have_git_material_with_material_name(self):
pipeline = more_options_pipeline()
self.assertEquals("git@bitbucket.org:springersbm/gomatic.git", pipeline.git_url)
self.assertEquals("some-material-name", pipeline.git_material.material_name)
def test_package_material_can_find_package_id(self):
configurator = GoCdConfigurator(config('config-with-typical-pipeline-repositories-and-packages'))
p = PackageMaterial.of(repository_name="baboon", package_name="bar", configurator=configurator)
self.assertEquals("cf8d24ea-50ec-478d-a8a2-60a5e0ca958b", p.package_id)
def test_package_can_have_package_material(self):
configurator = GoCdConfigurator(config('config-with-typical-pipeline-repositories-and-packages'))
p = configurator.ensure_pipeline_group('P.Group') \
.ensure_pipeline('typical') \
.ensure_material(PackageMaterial.of(repository_name="baboon", package_name="bar", configurator=configurator))
self.assertEquals(2, len(p.materials))
self.assertEquals(False, p.materials[1].is_git)
def test_git_material_can_ignore_sources(self):
pipeline = GoCdConfigurator(config('config-with-source-exclusions')).ensure_pipeline_group("P.Group").find_pipeline("with-exclusions")
self.assertEquals({"excluded-folder", "another-excluded-folder"}, pipeline.git_material.ignore_patterns)
def test_can_set_pipeline_git_url(self):
pipeline = typical_pipeline()
p = pipeline.set_git_url("git@bitbucket.org:springersbm/changed.git")
self.assertEquals(p, pipeline)
self.assertEquals("git@bitbucket.org:springersbm/changed.git", pipeline.git_url)
self.assertEquals('master', pipeline.git_branch)
def test_can_set_pipeline_git_url_with_options(self):
pipeline = typical_pipeline()
p = pipeline.set_git_material(GitMaterial(
"git@bitbucket.org:springersbm/changed.git",
branch="branch",
destination_directory="foo",
material_name="material-name",
ignore_patterns={"ignoreMe", "ignoreThisToo"},
polling=False))
self.assertEquals(p, pipeline)
self.assertEquals("branch", pipeline.git_branch)
self.assertEquals("foo", pipeline.git_material.destination_directory)
self.assertEquals("material-name", pipeline.git_material.material_name)
self.assertEquals({"ignoreMe", "ignoreThisToo"}, pipeline.git_material.ignore_patterns)
self.assertFalse(pipeline.git_material.polling, "git polling")
def test_throws_exception_if_no_git_url(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group("g").ensure_pipeline("p")
self.assertEquals(False, pipeline.has_single_git_material)
try:
url = pipeline.git_url
self.fail("should have thrown exception")
except RuntimeError:
pass
def test_git_url_throws_exception_if_multiple_git_materials(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group("g").ensure_pipeline("p")
pipeline.ensure_material(GitMaterial("git@bitbucket.org:springersbm/one.git"))
pipeline.ensure_material(GitMaterial("git@bitbucket.org:springersbm/two.git"))
self.assertEquals(False, pipeline.has_single_git_material)
try:
url = pipeline.git_url
self.fail("should have thrown exception")
except RuntimeError:
pass
def test_set_git_url_throws_exception_if_multiple_git_materials(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group("g").ensure_pipeline("p")
pipeline.ensure_material(GitMaterial("git@bitbucket.org:springersbm/one.git"))
pipeline.ensure_material(GitMaterial("git@bitbucket.org:springersbm/two.git"))
try:
pipeline.set_git_url("git@bitbucket.org:springersbm/three.git")
self.fail("should have thrown exception")
except RuntimeError:
pass
def test_can_add_git_material(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group("g").ensure_pipeline("p")
p = pipeline.ensure_material(GitMaterial("git@bitbucket.org:springersbm/changed.git"))
self.assertEquals(p, pipeline)
self.assertEquals("git@bitbucket.org:springersbm/changed.git", pipeline.git_url)
def test_can_ensure_git_material(self):
pipeline = typical_pipeline()
pipeline.ensure_material(GitMaterial("git@bitbucket.org:springersbm/gomatic.git"))
self.assertEquals("git@bitbucket.org:springersbm/gomatic.git", pipeline.git_url)
self.assertEquals([GitMaterial("git@bitbucket.org:springersbm/gomatic.git")], pipeline.materials)
def test_can_have_multiple_git_materials(self):
pipeline = typical_pipeline()
pipeline.ensure_material(GitMaterial("git@bitbucket.org:springersbm/changed.git"))
self.assertEquals([GitMaterial("git@bitbucket.org:springersbm/gomatic.git"), GitMaterial("git@bitbucket.org:springersbm/changed.git")],
pipeline.materials)
def test_pipelines_can_have_pipeline_materials(self):
pipeline = more_options_pipeline()
self.assertEquals(2, len(pipeline.materials))
self.assertEquals(GitMaterial('git@bitbucket.org:springersbm/gomatic.git', branch="a-branch", material_name="some-material-name", polling=False),
pipeline.materials[0])
def test_pipelines_can_have_more_complicated_pipeline_materials(self):
pipeline = more_options_pipeline()
self.assertEquals(2, len(pipeline.materials))
self.assertEquals(True, pipeline.materials[0].is_git)
self.assertEquals(PipelineMaterial('pipeline2', 'build'), pipeline.materials[1])
def test_pipelines_can_have_no_materials(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group("g").ensure_pipeline("p")
self.assertEquals(0, len(pipeline.materials))
def test_can_add_pipeline_material(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group("g").ensure_pipeline("p")
p = pipeline.ensure_material(PipelineMaterial('deploy-qa', 'baseline-user-data'))
self.assertEquals(p, pipeline)
self.assertEquals(PipelineMaterial('deploy-qa', 'baseline-user-data'), pipeline.materials[0])
def test_can_add_more_complicated_pipeline_material(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group("g").ensure_pipeline("p")
p = pipeline.ensure_material(PipelineMaterial('p', 's', 'm'))
self.assertEquals(p, pipeline)
self.assertEquals(PipelineMaterial('p', 's', 'm'), pipeline.materials[0])
def test_can_ensure_pipeline_material(self):
pipeline = more_options_pipeline()
self.assertEquals(2, len(pipeline.materials))
pipeline.ensure_material(PipelineMaterial('pipeline2', 'build'))
self.assertEquals(2, len(pipeline.materials))
def test_can_remove_all_pipeline_materials(self):
pipeline = more_options_pipeline()
pipeline.remove_materials()
self.assertEquals(0, len(pipeline.materials))
def test_materials_are_sorted(self):
go_cd_configurator = GoCdConfigurator(empty_config())
pipeline = go_cd_configurator.ensure_pipeline_group("g").ensure_pipeline("p")
pipeline.ensure_material(PipelineMaterial('zeta', 'build'))
pipeline.ensure_material(GitMaterial('git@bitbucket.org:springersbm/zebra.git'))
pipeline.ensure_material(PipelineMaterial('alpha', 'build'))
pipeline.ensure_material(GitMaterial('git@bitbucket.org:springersbm/art.git'))
pipeline.ensure_material(PipelineMaterial('theta', 'build'))
pipeline.ensure_material(GitMaterial('git@bitbucket.org:springersbm/this.git'))
xml = parseString(go_cd_configurator.config)
materials = xml.getElementsByTagName('materials')[0].childNodes
self.assertEquals('git', materials[0].tagName)
self.assertEquals('git', materials[1].tagName)
self.assertEquals('git', materials[2].tagName)
self.assertEquals('pipeline', materials[3].tagName)
self.assertEquals('pipeline', materials[4].tagName)
self.assertEquals('pipeline', materials[5].tagName)
self.assertEquals('git@bitbucket.org:springersbm/art.git', materials[0].attributes['url'].value)
self.assertEquals('git@bitbucket.org:springersbm/this.git', materials[1].attributes['url'].value)
self.assertEquals('git@bitbucket.org:springersbm/zebra.git', materials[2].attributes['url'].value)
self.assertEquals('alpha', materials[3].attributes['pipelineName'].value)
self.assertEquals('theta', materials[4].attributes['pipelineName'].value)
self.assertEquals('zeta', materials[5].attributes['pipelineName'].value)
def test_can_set_pipeline_git_url_for_new_pipeline(self):
pipeline_group = standard_pipeline_group()
new_pipeline = pipeline_group.ensure_pipeline("some_name")
new_pipeline.set_git_url("git@bitbucket.org:springersbm/changed.git")
self.assertEquals("git@bitbucket.org:springersbm/changed.git", new_pipeline.git_url)
def test_pipelines_do_not_have_to_be_based_on_template(self):
pipeline = more_options_pipeline()
self.assertFalse(pipeline.is_based_on_template)
def test_pipelines_can_be_based_on_template(self):
pipeline = GoCdConfigurator(config('pipeline-based-on-template')).ensure_pipeline_group('defaultGroup').find_pipeline('siberian')
assert isinstance(pipeline, Pipeline)
self.assertTrue(pipeline.is_based_on_template)
template = GoCdConfigurator(config('pipeline-based-on-template')).templates[0]
self.assertEquals(template, pipeline.template)
def test_pipelines_can_be_created_based_on_template(self):
configurator = GoCdConfigurator(empty_config())
configurator.ensure_template('temple').ensure_stage('s').ensure_job('j')
pipeline = configurator.ensure_pipeline_group("g").ensure_pipeline('p').set_template_name('temple')
self.assertEquals('temple', pipeline.template.name)
def test_pipelines_have_environment_variables(self):
pipeline = typical_pipeline()
self.assertEquals({"JAVA_HOME": "/opt/java/jdk-1.8"}, pipeline.environment_variables)
def test_pipelines_have_encrypted_environment_variables(self):
pipeline = GoCdConfigurator(config('config-with-encrypted-variable')).ensure_pipeline_group("defaultGroup").find_pipeline("example")
self.assertEquals({"MY_SECURE_PASSWORD": "yq5qqPrrD9/htfwTWMYqGQ=="}, pipeline.encrypted_environment_variables)
def test_pipelines_have_unencrypted_secure_environment_variables(self):
pipeline = GoCdConfigurator(config('config-with-unencrypted-secure-variable')).ensure_pipeline_group("defaultGroup").find_pipeline("example")
self.assertEquals({"MY_SECURE_PASSWORD": "hunter2"}, pipeline.unencrypted_secure_environment_variables)
def test_can_add_environment_variables_to_pipeline(self):
pipeline = empty_pipeline()
p = pipeline.ensure_environment_variables({"new": "one", "again": "two"})
self.assertEquals(p, pipeline)
self.assertEquals({"new": "one", "again": "two"}, pipeline.environment_variables)
def test_can_add_encrypted_secure_environment_variables_to_pipeline(self):
pipeline = empty_pipeline()
pipeline.ensure_encrypted_environment_variables({"new": "one", "again": "two"})
self.assertEquals({"new": "one", "again": "two"}, pipeline.encrypted_environment_variables)
def test_can_add_unencrypted_secure_environment_variables_to_pipeline(self):
pipeline = empty_pipeline()
pipeline.ensure_unencrypted_secure_environment_variables({"new": "one", "again": "two"})
self.assertEquals({"new": "one", "again": "two"}, pipeline.unencrypted_secure_environment_variables)
def test_can_add_environment_variables_to_new_pipeline(self):
pipeline = typical_pipeline()
pipeline.ensure_environment_variables({"new": "one"})
self.assertEquals({"JAVA_HOME": "/opt/java/jdk-1.8", "new": "one"}, pipeline.environment_variables)
def test_can_modify_environment_variables_of_pipeline(self):
pipeline = typical_pipeline()
pipeline.ensure_environment_variables({"new": "one", "JAVA_HOME": "/opt/java/jdk-1.1"})
self.assertEquals({"JAVA_HOME": "/opt/java/jdk-1.1", "new": "one"}, pipeline.environment_variables)
def test_can_remove_all_environment_variables(self):
pipeline = typical_pipeline()
p = pipeline.without_any_environment_variables()
self.assertEquals(p, pipeline)
self.assertEquals({}, pipeline.environment_variables)
def test_can_remove_specific_environment_variable(self):
pipeline = empty_pipeline()
pipeline.ensure_encrypted_environment_variables({'a': 's'})
pipeline.ensure_environment_variables({'c': 'v', 'd': 'f'})
pipeline.remove_environment_variable('d')
p = pipeline.remove_environment_variable('unknown')
self.assertEquals(p, pipeline)
self.assertEquals({'a': 's'}, pipeline.encrypted_environment_variables)
self.assertEquals({'c': 'v'}, pipeline.environment_variables)
def test_environment_variables_get_added_in_sorted_order_to_reduce_config_thrash(self):
go_cd_configurator = GoCdConfigurator(empty_config())
pipeline = go_cd_configurator \
.ensure_pipeline_group('P.Group') \
.ensure_pipeline('P.Name')
pipeline.ensure_environment_variables({"badger": "a", "xray": "a"})
pipeline.ensure_environment_variables({"ant": "a2", "zebra": "a"})
xml = parseString(go_cd_configurator.config)
names = [e.getAttribute('name') for e in xml.getElementsByTagName('variable')]
self.assertEquals([u'ant', u'badger', u'xray', u'zebra'], names)
def test_encrypted_environment_variables_get_added_in_sorted_order_to_reduce_config_thrash(self):
go_cd_configurator = GoCdConfigurator(empty_config())
pipeline = go_cd_configurator \
.ensure_pipeline_group('P.Group') \
.ensure_pipeline('P.Name')
pipeline.ensure_encrypted_environment_variables({"badger": "a", "xray": "a"})
pipeline.ensure_encrypted_environment_variables({"ant": "a2", "zebra": "a"})
xml = parseString(go_cd_configurator.config)
names = [e.getAttribute('name') for e in xml.getElementsByTagName('variable')]
self.assertEquals([u'ant', u'badger', u'xray', u'zebra'], names)
def test_unencrypted_environment_variables_do_not_have_secure_attribute_in_order_to_reduce_config_thrash(self):
go_cd_configurator = GoCdConfigurator(empty_config())
pipeline = go_cd_configurator \
.ensure_pipeline_group('P.Group') \
.ensure_pipeline('P.Name')
pipeline.ensure_environment_variables({"ant": "a"})
xml = parseString(go_cd_configurator.config)
secure_attributes = [e.getAttribute('secure') for e in xml.getElementsByTagName('variable')]
# attributes that are missing are returned as empty
self.assertEquals([''], secure_attributes, "should not have any 'secure' attributes")
def test_cannot_have_environment_variable_which_is_both_secure_and_insecure(self):
go_cd_configurator = GoCdConfigurator(empty_config())
pipeline = go_cd_configurator \
.ensure_pipeline_group('P.Group') \
.ensure_pipeline('P.Name')
pipeline.ensure_unencrypted_secure_environment_variables({"ant": "a"})
pipeline.ensure_environment_variables({"ant": "b"}) # not secure
self.assertEquals({"ant": "b"}, pipeline.environment_variables)
self.assertEquals({}, pipeline.unencrypted_secure_environment_variables)
def test_can_change_environment_variable_from_secure_to_insecure(self):
go_cd_configurator = GoCdConfigurator(empty_config())
pipeline = go_cd_configurator \
.ensure_pipeline_group('P.Group') \
.ensure_pipeline('P.Name')
pipeline.ensure_unencrypted_secure_environment_variables({"ant": "a", "badger": "b"})
pipeline.ensure_environment_variables({"ant": "b"})
self.assertEquals({"ant": "b"}, pipeline.environment_variables)
self.assertEquals({"badger": "b"}, pipeline.unencrypted_secure_environment_variables)
def test_pipelines_have_parameters(self):
pipeline = more_options_pipeline()
self.assertEquals({"environment": "qa"}, pipeline.parameters)
def test_pipelines_have_no_parameters(self):
pipeline = typical_pipeline()
self.assertEquals({}, pipeline.parameters)
def test_can_add_params_to_pipeline(self):
pipeline = typical_pipeline()
p = pipeline.ensure_parameters({"new": "one", "again": "two"})
self.assertEquals(p, pipeline)
self.assertEquals({"new": "one", "again": "two"}, pipeline.parameters)
def test_can_modify_parameters_of_pipeline(self):
pipeline = more_options_pipeline()
pipeline.ensure_parameters({"new": "one", "environment": "qa55"})
self.assertEquals({"environment": "qa55", "new": "one"}, pipeline.parameters)
def test_can_remove_all_parameters(self):
pipeline = more_options_pipeline()
p = pipeline.without_any_parameters()
self.assertEquals(p, pipeline)
self.assertEquals({}, pipeline.parameters)
def test_can_have_timer(self):
pipeline = more_options_pipeline()
self.assertEquals(True, pipeline.has_timer)
self.assertEquals("0 15 22 * * ?", pipeline.timer)
self.assertEquals(False, pipeline.timer_triggers_only_on_changes)
def test_can_have_timer_with_onlyOnChanges_option(self):
pipeline = GoCdConfigurator(config('config-with-more-options-pipeline')).ensure_pipeline_group('P.Group').find_pipeline('pipeline2')
self.assertEquals(True, pipeline.has_timer)
self.assertEquals("0 0 22 ? * MON-FRI", pipeline.timer)
self.assertEquals(True, pipeline.timer_triggers_only_on_changes)
def test_need_not_have_timer(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group('Group').ensure_pipeline('Pipeline')
self.assertEquals(False, pipeline.has_timer)
try:
timer = pipeline.timer
self.fail('should have thrown an exception')
except RuntimeError:
pass
def test_can_set_timer(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group('Group').ensure_pipeline('Pipeline')
p = pipeline.set_timer("one two three")
self.assertEquals(p, pipeline)
self.assertEquals("one two three", pipeline.timer)
def test_can_set_timer_with_only_on_changes_flag_off(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group('Group').ensure_pipeline('Pipeline')
p = pipeline.set_timer("one two three", only_on_changes=False)
self.assertEquals(p, pipeline)
self.assertEquals("one two three", pipeline.timer)
self.assertEquals(False, pipeline.timer_triggers_only_on_changes)
def test_can_set_timer_with_only_on_changes_flag(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group('Group').ensure_pipeline('Pipeline')
p = pipeline.set_timer("one two three", only_on_changes=True)
self.assertEquals(p, pipeline)
self.assertEquals("one two three", pipeline.timer)
self.assertEquals(True, pipeline.timer_triggers_only_on_changes)
def test_can_remove_timer(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group('Group').ensure_pipeline('Pipeline')
pipeline.set_timer("one two three")
p = pipeline.remove_timer()
self.assertEquals(p, pipeline)
self.assertFalse(pipeline.has_timer)
def test_can_have_label_template(self):
pipeline = typical_pipeline()
self.assertEquals("something-${COUNT}", pipeline.label_template)
self.assertEquals(True, pipeline.has_label_template)
def test_might_not_have_label_template(self):
pipeline = more_options_pipeline() # TODO swap label with typical
self.assertEquals(False, pipeline.has_label_template)
try:
label_template = pipeline.label_template
self.fail('should have thrown an exception')
except RuntimeError:
pass
def test_can_set_label_template(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group('Group').ensure_pipeline('Pipeline')
p = pipeline.set_label_template("some label")
self.assertEquals(p, pipeline)
self.assertEquals("some label", pipeline.label_template)
def test_can_set_default_label_template(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group('Group').ensure_pipeline('Pipeline')
p = pipeline.set_default_label_template()
self.assertEquals(p, pipeline)
self.assertEquals(DEFAULT_LABEL_TEMPLATE, pipeline.label_template)
def test_can_set_automatic_pipeline_locking(self):
configurator = GoCdConfigurator(empty_config())
pipeline = configurator.ensure_pipeline_group("new_group").ensure_pipeline("some_name")
p = pipeline.set_automatic_pipeline_locking()
self.assertEquals(p, pipeline)
self.assertEquals(True, pipeline.has_automatic_pipeline_locking)
class TestPipelineGroup(unittest.TestCase):
def _pipeline_group_from_config(self):
return GoCdConfigurator(config('config-with-two-pipelines')).ensure_pipeline_group('P.Group')
def test_pipeline_groups_have_names(self):
pipeline_group = standard_pipeline_group()
self.assertEquals("P.Group", pipeline_group.name)
def test_pipeline_groups_have_pipelines(self):
pipeline_group = self._pipeline_group_from_config()
self.assertEquals(2, len(pipeline_group.pipelines))
def test_can_add_pipeline(self):
configurator = GoCdConfigurator(empty_config())
pipeline_group = configurator.ensure_pipeline_group("new_group")
new_pipeline = pipeline_group.ensure_pipeline("some_name")
self.assertEquals(1, len(pipeline_group.pipelines))
self.assertEquals(new_pipeline, pipeline_group.pipelines[0])
self.assertEquals("some_name", new_pipeline.name)
self.assertEquals(False, new_pipeline.has_single_git_material)
self.assertEquals(False, new_pipeline.has_label_template)
self.assertEquals(False, new_pipeline.has_automatic_pipeline_locking)
def test_can_find_pipeline(self):
found_pipeline = self._pipeline_group_from_config().find_pipeline("pipeline2")
self.assertEquals("pipeline2", found_pipeline.name)
self.assertTrue(self._pipeline_group_from_config().has_pipeline("pipeline2"))
def test_does_not_find_missing_pipeline(self):
self.assertFalse(self._pipeline_group_from_config().has_pipeline("unknown-pipeline"))
try:
self._pipeline_group_from_config().find_pipeline("unknown-pipeline")
self.fail("should have thrown exception")
except RuntimeError as e:
self.assertTrue(e.message.count("unknown-pipeline"))
def test_can_remove_pipeline(self):
pipeline_group = self._pipeline_group_from_config()
pipeline_group.ensure_removal_of_pipeline("pipeline1")
self.assertEquals(1, len(pipeline_group.pipelines))
try:
pipeline_group.find_pipeline("pipeline1")
self.fail("should have thrown exception")
except RuntimeError:
pass
def test_ensuring_replacement_of_pipeline_leaves_it_empty_but_in_same_place(self):
pipeline_group = self._pipeline_group_from_config()
self.assertEquals("pipeline1", pipeline_group.pipelines[0].name)
pipeline = pipeline_group.find_pipeline("pipeline1")
pipeline.set_label_template("something")
self.assertEquals(True, pipeline.has_label_template)
p = pipeline_group.ensure_replacement_of_pipeline("pipeline1")
self.assertEquals(p, pipeline_group.pipelines[0])
self.assertEquals("pipeline1", p.name)
self.assertEquals(False, p.has_label_template)
def test_can_ensure_pipeline_removal(self):
pipeline_group = self._pipeline_group_from_config()
pg = pipeline_group.ensure_removal_of_pipeline("already-removed-pipeline")
self.assertEquals(pg, pipeline_group)
self.assertEquals(2, len(pipeline_group.pipelines))
try:
pipeline_group.find_pipeline("already-removed-pipeline")
self.fail("should have thrown exception")
except RuntimeError:
pass
class TestGoCdConfigurator(unittest.TestCase):
def test_can_tell_if_there_is_no_change_to_save(self):
configurator = GoCdConfigurator(config('config-with-two-pipeline-groups'))
p = configurator.ensure_pipeline_group('Second.Group').ensure_replacement_of_pipeline('smoke-tests')
p.set_git_url('git@bitbucket.org:springersbm/gomatic.git')
p.ensure_stage('build').ensure_job('compile').ensure_task(ExecTask(['make', 'source code']))
self.assertFalse(configurator.has_changes)
def test_can_tell_if_there_is_a_change_to_save(self):
configurator = GoCdConfigurator(config('config-with-two-pipeline-groups'))
p = configurator.ensure_pipeline_group('Second.Group').ensure_replacement_of_pipeline('smoke-tests')
p.set_git_url('git@bitbucket.org:springersbm/gomatic.git')
p.ensure_stage('moo').ensure_job('bar')
self.assertTrue(configurator.has_changes)
def test_keeps_schema_version(self):
empty_config = FakeHostRestClient(empty_config_xml.replace('schemaVersion="72"', 'schemaVersion="73"'), "empty_config()")
configurator = GoCdConfigurator(empty_config)
self.assertEquals(1, configurator.config.count('schemaVersion="73"'))
def test_can_find_out_server_settings(self):
configurator = GoCdConfigurator(config('config-with-server-settings'))
self.assertEquals("/some/dir", configurator.artifacts_dir)
self.assertEquals("http://10.20.30.40/", configurator.site_url)
self.assertEquals("my_ci_server", configurator.agent_auto_register_key)
self.assertEquals(Decimal("55.0"), configurator.purge_start)
self.assertEquals(Decimal("75.0"), configurator.purge_upto)
def test_can_find_out_server_settings_when_not_set(self):
configurator = GoCdConfigurator(config('config-with-no-server-settings'))
self.assertEquals(None, configurator.artifacts_dir)
self.assertEquals(None, configurator.site_url)
self.assertEquals(None, configurator.agent_auto_register_key)
self.assertEquals(None, configurator.purge_start)
self.assertEquals(None, configurator.purge_upto)
def test_can_set_server_settings(self):
configurator = GoCdConfigurator(config('config-with-no-server-settings'))
configurator.artifacts_dir = "/a/dir"
configurator.site_url = "http://1.2.3.4/"
configurator.agent_auto_register_key = "a_ci_server"
configurator.purge_start = Decimal("44.0")
configurator.purge_upto = Decimal("88.0")
self.assertEquals("/a/dir", configurator.artifacts_dir)
self.assertEquals("http://1.2.3.4/", configurator.site_url)
self.assertEquals("a_ci_server", configurator.agent_auto_register_key)
self.assertEquals(Decimal("44.0"), configurator.purge_start)
self.assertEquals(Decimal("88.0"), configurator.purge_upto)
def test_can_have_no_pipeline_groups(self):
self.assertEquals(0, len(GoCdConfigurator(empty_config()).pipeline_groups))
def test_gets_all_pipeline_groups(self):
self.assertEquals(2, len(GoCdConfigurator(config('config-with-two-pipeline-groups')).pipeline_groups))
def test_can_get_initial_config_md5(self):
configurator = GoCdConfigurator(empty_config())
self.assertEquals("42", configurator._initial_md5)
def test_config_is_updated_as_result_of_updating_part_of_it(self):
configurator = GoCdConfigurator(config('config-with-just-agents'))
agent = configurator.agents[0]
self.assertEquals(2, len(agent.resources))
agent.ensure_resource('a-resource-that-it-does-not-already-have')
configurator_based_on_new_config = GoCdConfigurator(FakeHostRestClient(configurator.config))
self.assertEquals(3, len(configurator_based_on_new_config.agents[0].resources))
def test_can_remove_agent(self):
configurator = GoCdConfigurator(config('config-with-just-agents'))
self.assertEquals(2, len(configurator.agents))
configurator.ensure_removal_of_agent('go-agent-1')
self.assertEquals(1, len(configurator.agents))
self.assertEquals('go-agent-2', configurator.agents[0].hostname)
def test_can_add_pipeline_group(self):
configurator = GoCdConfigurator(empty_config())
self.assertEquals(0, len(configurator.pipeline_groups))
new_pipeline_group = configurator.ensure_pipeline_group("a_new_group")
self.assertEquals(1, len(configurator.pipeline_groups))
self.assertEquals(new_pipeline_group, configurator.pipeline_groups[-1])
self.assertEquals("a_new_group", new_pipeline_group.name)
def test_can_ensure_pipeline_group_exists(self):
configurator = GoCdConfigurator(config('config-with-two-pipeline-groups'))
self.assertEquals(2, len(configurator.pipeline_groups))
pre_existing_pipeline_group = configurator.ensure_pipeline_group('Second.Group')
self.assertEquals(2, len(configurator.pipeline_groups))
self.assertEquals('Second.Group', pre_existing_pipeline_group.name)
def test_can_remove_all_pipeline_groups(self):
configurator = GoCdConfigurator(config('config-with-two-pipeline-groups'))
s = configurator.remove_all_pipeline_groups()
self.assertEquals(s, configurator)
self.assertEquals(0, len(configurator.pipeline_groups))
def test_can_remove_pipeline_group(self):
configurator = GoCdConfigurator(config('config-with-two-pipeline-groups'))
s = configurator.ensure_removal_of_pipeline_group('P.Group')
self.assertEquals(s, configurator)
self.assertEquals(1, len(configurator.pipeline_groups))
def test_can_ensure_removal_of_pipeline_group(self):
configurator = GoCdConfigurator(config('config-with-two-pipeline-groups'))
configurator.ensure_removal_of_pipeline_group('pipeline-group-that-has-already-been-removed')
self.assertEquals(2, len(configurator.pipeline_groups))
def test_can_have_templates(self):
templates = GoCdConfigurator(config('config-with-just-templates')).templates
self.assertEquals(2, len(templates))
self.assertEquals('api-component', templates[0].name)
self.assertEquals('deploy-stack', templates[1].name)
self.assertEquals('deploy-components', templates[1].stages[0].name)
def test_can_have_no_templates(self):
self.assertEquals(0, len(GoCdConfigurator(empty_config()).templates))
def test_can_add_template(self):
configurator = GoCdConfigurator(empty_config())
template = configurator.ensure_template('foo')
self.assertEquals(1, len(configurator.templates))
self.assertEquals(template, configurator.templates[0])
self.assertTrue(isinstance(configurator.templates[0], Pipeline), "so all methods that use to configure pipeline don't need to be tested for template")
def test_can_ensure_template(self):
configurator = GoCdConfigurator(config('config-with-just-templates'))
template = configurator.ensure_template('deploy-stack')
self.assertEquals('deploy-components', template.stages[0].name)
def test_can_ensure_replacement_of_template(self):
configurator = GoCdConfigurator(config('config-with-just-templates'))
template = configurator.ensure_replacement_of_template('deploy-stack')
self.assertEquals(0, len(template.stages))
def test_can_remove_template(self):
configurator = GoCdConfigurator(config('config-with-just-templates'))
self.assertEquals(2, len(configurator.templates))
configurator.ensure_removal_of_template('deploy-stack')
self.assertEquals(1, len(configurator.templates))
def test_if_remove_all_templates_also_remove_templates_element(self):
configurator = GoCdConfigurator(config('config-with-just-templates'))
self.assertEquals(2, len(configurator.templates))
configurator.ensure_removal_of_template('api-component')
configurator.ensure_removal_of_template('deploy-stack')
self.assertEquals(0, len(configurator.templates))
xml = configurator.config
root = ET.fromstring(xml)
self.assertEqual(['server'], [element.tag for element in root])
def test_top_level_elements_get_reordered_to_please_go(self):
configurator = GoCdConfigurator(config('config-with-agents-and-templates-but-without-pipelines'))
configurator.ensure_pipeline_group("some_group").ensure_pipeline("some_pipeline")
xml = configurator.config
root = ET.fromstring(xml)
self.assertEquals("pipelines", root[0].tag)
self.assertEquals("templates", root[1].tag)
self.assertEquals("agents", root[2].tag)
def test_top_level_elements_with_environment_get_reordered_to_please_go(self):
configurator = GoCdConfigurator(config('config-with-pipelines-environments-and-agents'))
configurator.ensure_pipeline_group("P.Group").ensure_pipeline("some_pipeline")
xml = configurator.config
root = ET.fromstring(xml)
self.assertEqual(['server', 'pipelines', 'environments', 'agents'], [element.tag for element in root])
def test_top_level_elements_that_cannot_be_created_get_reordered_to_please_go(self):
configurator = GoCdConfigurator(config('config-with-many-of-the-top-level-elements-that-cannot-be-added'))
configurator.ensure_pipeline_group("P.Group").ensure_pipeline("some_pipeline")
xml = configurator.config
root = ET.fromstring(xml)
self.assertEqual(['server', 'scms', 'pipelines', 'environments', 'agents'],
[element.tag for element in root])
def test_elements_can_be_created_in_order_to_please_go(self):
configurator = GoCdConfigurator(empty_config())
pipeline = configurator.ensure_pipeline_group("some_group").ensure_pipeline("some_pipeline")
pipeline.ensure_parameters({'p': 'p'})
pipeline.set_timer("some timer")
pipeline.ensure_environment_variables({'pe': 'pe'})
pipeline.set_git_url("gurl")
stage = pipeline.ensure_stage("s")
stage.ensure_environment_variables({'s': 's'})
job = stage.ensure_job("j")
job.ensure_environment_variables({'j': 'j'})
job.ensure_task(ExecTask(['ls']))
job.ensure_tab(Tab("n", "p"))
job.ensure_resource("r")
job.ensure_artifacts({Artifact.get_build_artifact('s', 'd')})
xml = configurator.config
pipeline_root = ET.fromstring(xml).find('pipelines').find('pipeline')
self.assertEquals("params", pipeline_root[0].tag)
self.assertEquals("timer", pipeline_root[1].tag)
self.assertEquals("environmentvariables", pipeline_root[2].tag)
self.assertEquals("materials", pipeline_root[3].tag)
self.assertEquals("stage", pipeline_root[4].tag)
self.__check_stage(pipeline_root)
def test_elements_are_reordered_in_order_to_please_go(self):
configurator = GoCdConfigurator(empty_config())
pipeline = configurator.ensure_pipeline_group("some_group").ensure_pipeline("some_pipeline")
pipeline.set_git_url("gurl")
pipeline.ensure_environment_variables({'pe': 'pe'})
pipeline.set_timer("some timer")
pipeline.ensure_parameters({'p': 'p'})
self.__configure_stage(pipeline)
self.__configure_stage(configurator.ensure_template('templ'))
xml = configurator.config
pipeline_root = ET.fromstring(xml).find('pipelines').find('pipeline')
self.assertEquals("params", pipeline_root[0].tag)
self.assertEquals("timer", pipeline_root[1].tag)
self.assertEquals("environmentvariables", pipeline_root[2].tag)
self.assertEquals("materials", pipeline_root[3].tag)
self.assertEquals("stage", pipeline_root[4].tag)
self.__check_stage(pipeline_root)
template_root = ET.fromstring(xml).find('templates').find('pipeline')
self.assertEquals("stage", template_root[0].tag)
self.__check_stage(template_root)
def __check_stage(self, pipeline_root):
stage_root = pipeline_root.find('stage')
self.assertEquals("environmentvariables", stage_root[0].tag)
self.assertEquals("jobs", stage_root[1].tag)
job_root = stage_root.find('jobs').find('job')
self.assertEquals("environmentvariables", job_root[0].tag)
self.assertEquals("tasks", job_root[1].tag)
self.assertEquals("tabs", job_root[2].tag)
self.assertEquals("resources", job_root[3].tag)
self.assertEquals("artifacts", job_root[4].tag)
def __configure_stage(self, pipeline):
stage = pipeline.ensure_stage("s")
job = stage.ensure_job("j")
stage.ensure_environment_variables({'s': 's'})
job.ensure_tab(Tab("n", "p"))
job.ensure_artifacts({Artifact.get_build_artifact('s', 'd')})
job.ensure_task(ExecTask(['ls']))
job.ensure_resource("r")
job.ensure_environment_variables({'j': 'j'})
def simplified(s):
return s.strip().replace("\t", "").replace("\n", "").replace("\\", "").replace(" ", "")
def sneakily_converted_to_xml(pipeline):
if pipeline.is_template:
return ET.tostring(pipeline.element)
else:
return ET.tostring(pipeline.parent.element)
class TestReverseEngineering(unittest.TestCase):
def check_round_trip_pipeline(self, configurator, before, show=False):
reverse_engineered_python = configurator.as_python(before, with_save=False)
if show:
print('r' * 88)
print(reverse_engineered_python)
pipeline = "evaluation failed"
template = "evaluation failed"
exec reverse_engineered_python
# exec reverse_engineered_python.replace("from gomatic import *", "from gomatic.go_cd_configurator import *")
xml_before = sneakily_converted_to_xml(before)
# noinspection PyTypeChecker
xml_after = sneakily_converted_to_xml(pipeline)
if show:
print('b' * 88)
print(prettify(xml_before))
print('a' * 88)
print(prettify(xml_after))
self.assertEquals(xml_before, xml_after)
if before.is_based_on_template:
# noinspection PyTypeChecker
self.assertEquals(sneakily_converted_to_xml(before.template), sneakily_converted_to_xml(template))
def test_can_round_trip_simplest_pipeline(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line")
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_standard_label(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_default_label_template()
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_non_standard_label(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_label_template("non standard")
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_automatic_pipeline_locking(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_automatic_pipeline_locking()
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_pipeline_material(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").ensure_material(PipelineMaterial("p", "s", "m"))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_multiple_git_materials(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line")
before.ensure_material(GitMaterial("giturl1", "b", "m1"))
before.ensure_material(GitMaterial("giturl2"))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_git_url(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_git_url("some git url")
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_git_extras(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_git_material(
GitMaterial("some git url",
branch="some branch",
material_name="some material name",
polling=False,
ignore_patterns={"excluded", "things"},
destination_directory='foo/bar'))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_git_branch_only(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_git_material(GitMaterial("some git url", branch="some branch"))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_git_material_only(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_git_material(GitMaterial("some git url", material_name="m name"))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_git_polling_only(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_git_material(GitMaterial("some git url", polling=False))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_git_ignore_patterns_only_ISSUE_4(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_git_material(GitMaterial("git url", ignore_patterns={"ex", "cluded"}))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_git_destination_directory_only(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_git_material(GitMaterial("git url", destination_directory='foo/bar'))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_pipeline_parameters(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").ensure_parameters({"p": "v"})
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_pipeline_environment_variables(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").ensure_environment_variables({"p": "v"})
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_pipeline_encrypted_environment_variables(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").ensure_encrypted_environment_variables({"p": "v"})
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_pipeline_unencrypted_secure_environment_variables(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").ensure_unencrypted_secure_environment_variables({"p": "v"})
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_timer(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_timer("some timer")
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_timer_only_on_changes(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_timer("some timer", only_on_changes=True)
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_stage_bits(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line")
before.ensure_stage("stage1").ensure_environment_variables({"k": "v"}).set_clean_working_dir().set_has_manual_approval().set_fetch_materials(False)
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_stages(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line")
before.ensure_stage("stage1")
before.ensure_stage("stage2")
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_job(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line")
before.ensure_stage("stage").ensure_job("job")
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_job_bits(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line")
before.ensure_stage("stage").ensure_job("job") \
.ensure_artifacts({Artifact.get_build_artifact("s", "d"), Artifact.get_test_artifact("sauce")}) \
.ensure_environment_variables({"k": "v"}) \
.ensure_resource("r") \
.ensure_tab(Tab("n", "p")) \
.set_timeout("23") \
.set_runs_on_all_agents()
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_jobs(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line")
stage = before.ensure_stage("stage")
stage.ensure_job("job1")
stage.ensure_job("job2")
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_tasks(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line")
job = before.ensure_stage("stage").ensure_job("job")
job.add_task(ExecTask(["one", "two"], working_dir="somewhere", runif="failed"))
job.add_task(ExecTask(["one", "two"], working_dir="somewhere", runif="failed"))
job.ensure_task(ExecTask(["one"], working_dir="somewhere else"))
job.ensure_task(ExecTask(["two"], runif="any"))
job.ensure_task(FetchArtifactTask('p', 's', 'j', FetchArtifactFile('f'), runif="any"))
job.ensure_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('d')))
job.ensure_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('d'), dest="somewhere-else"))
job.ensure_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('d'), dest="somewhere-else", runif="any"))
job.ensure_task(RakeTask('t1', runif="any"))
job.ensure_task(RakeTask('t2'))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_pipeline_base_on_template(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_template_name("temple")
configurator.ensure_template("temple").ensure_stage("stage").ensure_job("job")
self.check_round_trip_pipeline(configurator, before)
def test_can_reverse_engineer_pipeline(self):
configurator = GoCdConfigurator(config('config-with-more-options-pipeline'))
actual = configurator.as_python(more_options_pipeline(), with_save=False)
expected = """#!/usr/bin/env python
from gomatic import *
configurator = GoCdConfigurator(FakeConfig(whatever))
pipeline = configurator\
.ensure_pipeline_group("P.Group")\
.ensure_replacement_of_pipeline("more-options")\
.set_timer("0 15 22 * * ?")\
.set_git_material(GitMaterial("git@bitbucket.org:springersbm/gomatic.git", branch="a-branch", material_name="some-material-name", polling=False))\
.ensure_material(PipelineMaterial("pipeline2", "build")).ensure_environment_variables({'JAVA_HOME': '/opt/java/jdk-1.7'})\
.ensure_parameters({'environment': 'qa'})
stage = pipeline.ensure_stage("earlyStage")
job = stage.ensure_job("earlyWorm").ensure_artifacts(set([BuildArtifact("scripts/*", "files"), BuildArtifact("target/universal/myapp*.zip", "artifacts"), TestArtifact("from", "to")])).set_runs_on_all_agents()
job.add_task(ExecTask(['ls']))
job.add_task(ExecTask(['bash', '-c', 'curl "http://domain.com/service/check?target=one+two+three&key=2714_beta%40domain.com"']))
stage = pipeline.ensure_stage("middleStage")
job = stage.ensure_job("middleJob")
stage = pipeline.ensure_stage("s1").set_fetch_materials(False)
job = stage.ensure_job("rake-job").ensure_artifacts({BuildArtifact("things/*")})
job.add_task(RakeTask("boo", "passed"))
job = stage.ensure_job("run-if-variants")
job.add_task(ExecTask(['t-passed']))
job.add_task(ExecTask(['t-none']))
job.add_task(ExecTask(['t-failed'], runif="failed"))
job.add_task(ExecTask(['t-any'], runif="any"))
job.add_task(ExecTask(['t-both'], runif="any"))
job = stage.ensure_job("variety-of-tasks")
job.add_task(RakeTask("sometarget", "passed"))
job.add_task(FetchArtifactTask("more-options", "earlyStage", "earlyWorm", FetchArtifactDir("sourceDir"), dest="destDir"))
job.add_task(FetchArtifactTask("more-options", "middleStage", "middleJob", FetchArtifactFile("someFile")))
job.add_task(ExecTask(['true']))
"""
self.assertEquals(simplified(expected), simplified(actual))
class TestXmlFormatting(unittest.TestCase):
def test_can_format_simple_xml(self):
expected = '<?xml version="1.0" ?>\n<top>\n\t<middle>stuff</middle>\n</top>'
non_formatted = "<top><middle>stuff</middle></top>"
formatted = prettify(non_formatted)
self.assertEquals(expected, formatted)
def test_can_format_more_complicated_xml(self):
expected = '<?xml version="1.0" ?>\n<top>\n\t<middle>\n\t\t<innermost>stuff</innermost>\n\t</middle>\n</top>'
non_formatted = "<top><middle><innermost>stuff</innermost></middle></top>"
formatted = prettify(non_formatted)
self.assertEquals(expected, formatted)
def test_can_format_actual_config(self):
formatted = prettify(open("test-data/config-unformatted.xml").read())
expected = open("test-data/config-formatted.xml").read()
def head(s):
return "\n".join(s.split('\n')[:10])
self.assertEquals(expected, formatted, "expected=\n%s\n%s\nactual=\n%s" % (head(expected), "=" * 88, head(formatted)))
|
from random import randint
from time import sleep
from mass_whois.config import get_config
from django.core.management.base import BaseCommand
import requests
def main_loop():
coserver_endpoint = get_config('COSERVER_ENDPOINT')
#'http://127.0.0.1:8000/coserver/'
print 'get data from' + coserver_endpoint
try:
resp = requests.get(coserver_endpoint, timeout=120)
except Exception as exc:
print exc
print 'Sleeping for a bit to give the coordinator a break'
sleep(randint(3, 8))
return
if resp.status_code != 200:
# Coordinator might no be up, try later.
print 'Got non-HTTP 200 back from coordinator, will try later'
sleep(5)
return
if resp.text.strip().upper() == 'END':
# No more IPs to work with or the list hasn't finished
# generating yet
print 'No more IPs from coordinator, will check back later'
sleep(30)
return
for name in resp.text.split(','):
print name
sleep(30)
class Command(BaseCommand):
help = 'Get names to lookup from coserver'
def handle(self, *args, **options):
while True:
main_loop()
|
import numpy as np
import cv2
from clize import run
def convert_HSV_to_IJSV(img_arr):
height, width, depth = img_arr.shape
res = np.zeros((height, width, 4), np.uint8)
hue = img_arr[:,:,0]
sat = img_arr[:,:,1]
val = img_arr[:,:,2]
res[:,:,2] = sat
res[:,:,3] = val
y = 0
while y < height:
row = np.multiply(hue[y,:], 2)
rad_arr = np.deg2rad(row)
row_i = np.cos(rad_arr)
row_j = np.sin(rad_arr)
row_i = np.multiply(row_i, 127)
row_j = np.multiply(row_j, 127)
row_i = np.add(row_i, 127)
row_j = np.add(row_j, 127)
row_i = np.round(row_i)
row_j = np.round(row_j)
row_i = row_i.astype(np.uint8)
row_j = row_j.astype(np.uint8)
res[y,:,0] = row_i
res[y,:,1] = row_j
y += 1
return res
def convert_IJSV_to_HSV(img_arr):
height, width, depth = img_arr.shape
res = np.zeros((height, width, 3), np.uint8)
hue_i = img_arr[:,:,0]
hue_j = img_arr[:,:,1]
sat = img_arr[:,:,2]
val = img_arr[:,:,3]
res[:,:,1] = sat
res[:,:,2] = val
y = 0
while y < height:
row_i = hue_i[y,:]
row_j = hue_j[y,:]
row_i = row_i.astype(np.float64)
row_j = row_j.astype(np.float64)
row_i = np.subtract(row_i, 127)
row_j = np.subtract(row_j, 127)
hue_row = np.arctan2(row_j, row_i)
hue_row = np.rad2deg(hue_row)
hue_row = np.add(hue_row, 360)
hue_row = np.mod(hue_row, 360)
hue_row = np.divide(hue_row, 2)
hue_row = np.round(hue_row)
hue_row = hue_row.astype(np.uint8)
res[y,:,0] = hue_row
y += 1
return res
def test(fpath):
img = cv2.imread(fpath, -1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img2 = convert_HSV_to_IJSV(img)
cv2.imshow("intermediate", img2)
img3 = convert_IJSV_to_HSV(img2)
img4 = cv2.cvtColor(img3, cv2.COLOR_HSV2BGR)
cv2.imshow("converted", img4)
cv2.waitKey()
if __name__ == "__main__":
run(test)
|
# -*- coding: utf-8 -*-
import command
from django.core.management.base import BaseCommand
class SubCommand(command.SubCommand):
pass
class Command(BaseCommand, command.Command):
def run_from_argv(self, argv):
return self.run(argv[1:])
|
from .abstract_repository_analyzer import AbstractRepositoryAnalyzer
from git import Repo
from git import InvalidGitRepositoryError
import subprocess
import os
import logging
class GitRepositoryAnalyzer(AbstractRepositoryAnalyzer):
"""
Analysis plug-in for Git-Repositories.
"""
def count_repo_branches(self, repo_path: str, remote: str) -> None:
"""
Counts the repository's branches.
:param repo_path: path to the repository root.
:param remote: remote uri of the branches
:return: None
"""
branches = subprocess.check_output("cd " + repo_path + ";git branch -a | wc -l", shell=True)
self.get_details(remote)["branch_count"] = int(branches)
def count_repo_contributors(self, repo_path: str, remote: str) -> None:
"""
Counts the repository's contributors.
:param repo_path: path to the repository root.
:param remote: remote uri of the branches
:return: None
"""
contributors = subprocess.check_output("cd " + repo_path + ";git shortlog -s HEAD | wc -l", shell=True)
self.get_details(remote)["contributors"] = int(contributors)
def extract_last_repo_update(self, repo_path: str, remote: str) -> None:
"""
Extracts the repository's last update-timestamp.
:param repo_path: path to the repository root.
:param remote: remote uri of the branches
:return: None
"""
timestamp = subprocess.check_output("cd " + repo_path + ";git log -1 --format=%ct", shell=True)
self.get_details(remote)["last_update"] = int(timestamp)
def _analyze(self, path: str, repo_details: dict) -> iter:
self._repo_details = repo_details
for folder in os.listdir(path):
# Build path and inform user...
current_path = path + "/" + folder + ""
logging.info("[GitRepositoryAnalyzer]: Analyzing:" + current_path)
# Check if repo is valid.
try:
repo = Repo(path + "/" + folder + "/")
except InvalidGitRepositoryError:
continue
# Extract origin url.
origin_url = repo.remotes.origin.url
# Git analysis.
self.count_repo_contributors(current_path, origin_url)
self.count_repo_branches(current_path, origin_url)
self.extract_last_repo_update(current_path, origin_url)
yield (current_path, origin_url)
def analyzes(self):
return "git"
|
# 自然数Nをコマンドライン引数などの手段で受け取り,入力のうち末尾のN行だけを表示せよ
# 確認にはtailコマンドを用いよ.
#!usr/bin/env python
# -*- coding:utf-8 -*-
import sys
N=sys.argv[1]
assert len(sys.argv) is 2,"[usage]: python nock_14.py N"
lines = [line for line in open("hightemp.txt","r")]
print(''.join(lines[-int(N):]),end="")
# with open("hightemp.txt") as f:
# print("".join(f.readlines()[-int(sys.argv[1]):]), end="")
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 19 14:26:12 2021
@author: Sotiris
"""
import os
import numpy as np
import pandas as pd
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
import imageio
from random import seed
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_absolute_error, mean_absolute_percentage_error, r2_score
##############################################################################
# NN network
##############################################################################
class NN(nn.Module):
def __init__(self, feature_dim, hidden_dim, output_dim, depth = None, Ogden_BWD = False, Holz_BWD = False):
super(NN, self).__init__()
self.depth = depth
self.Holz_BWD = Holz_BWD
self.Ogden_BWD = Ogden_BWD
if self.depth is None:
# Default depth = 3 if DEPTH not defined
self.fc1 = nn.Linear(feature_dim,hidden_dim)
#self.drop1 = nn.Dropout(0.1)
self.fc2 = nn.Linear(hidden_dim,hidden_dim)
self.fc3 = nn.Linear(hidden_dim,hidden_dim)
self.fc4 = nn.Linear(hidden_dim,output_dim)
else:
self.layers = nn.ModuleDict() # a collection that will hold your layers
self.layers['input'] = nn.Linear(feature_dim, hidden_dim)
for i in range(1, depth):
self.layers['hidden_'+str(i)] = torch.nn.Linear(hidden_dim, hidden_dim)
self.layers['output'] = torch.nn.Linear(hidden_dim, output_dim)
# Define sigmoid layer
if self.Holz_BWD or self.Ogden_BWD:
self.sigmoid = nn.Sigmoid()
def forward(self, x):
if self.depth is None:
x = F.elu(self.fc1(x))
#x = self.drop1(x)
x = F.elu(self.fc2(x))
x = F.elu(self.fc3(x))
x = self.fc4(x)
else:
for layer in self.layers:
if layer not in set(['output']):
x = F.elu(self.layers[layer](x))
x = self.layers['output'](x)
if self.Holz_BWD:
# scale_tensor = torch.zeros(8, 8)
# scale_tensor.fill_diagonal_(1e2)
# af and afs where sampled one order lower than the others
# Max limits output for a's: 50kPa and for b's: 30
scale_tensor = torch.diag(torch.FloatTensor([500,30,50,30,50,30,500,30]))
x = torch.matmul(self.sigmoid(x), scale_tensor)
if self.Ogden_BWD:
# scale_tensor = torch.zeros(2, 2)
# scale_tensor.fill_diagonal_(1e2)
scale_tensor = torch.diag(torch.FloatTensor([50,30]))
x = torch.matmul(self.sigmoid(x), scale_tensor)
return x
def make_train_step(model, loss_fn, optimizer):
# Builds function that performs a step in the train loop
def train_step(x, y):
# Sets model to TRAIN mode
model.train()
# Makes predictions
yhat = model(x)
# Computes loss
loss = loss_fn(y, yhat)
# Computes gradients
loss.backward()
# Updates parameters and zeroes gradients
optimizer.step()
optimizer.zero_grad()
# Returns the loss
return loss.item()
# Returns the function that will be called inside the train loop
return train_step
def scaled_to_tensor(device, Xscaler, X, Y, XY_BatchSize, Yscaler = None, X_noise = None):
X_scaled = Xscaler.transform(X)
if X_noise is not None:
X_scaled = X_scaled + X_noise
if Yscaler is not None:
Y = Yscaler.transform(Y)
X_tensor = torch.from_numpy(X_scaled).float().to(device)
Y_tensor = torch.from_numpy(Y).float().to(device)
data = TensorDataset(X_tensor, Y_tensor)
loader = DataLoader(dataset=data, batch_size = XY_BatchSize)
return X_tensor, data, loader
def convert_to_ref(TestData, n_val, fX_val):
if TestData.isshear:
fX_val_ref = np.array([]).reshape(0, 100)
for i in range(0,n_val):
sam_range = i*50
Fx = fX_val[sam_range:sam_range+50,0]
Fz = fX_val[sam_range:sam_range+50,1]
temp = np.hstack((Fx,Fz))
fX_val_ref = np.vstack((fX_val_ref,temp))
else:
fX_val_ref = np.array([]).reshape(0, 50)
for i in range(0,n_val):
sam_range = i*50
Fz = fX_val[sam_range:sam_range+50,0]
fX_val_ref = np.vstack((fX_val_ref,Fz))
return fX_val_ref
class CustomLoss(torch.nn.Module):
def __init__(self, alpha):
super(CustomLoss,self).__init__()
self.alpha = alpha #regularization for monotonicity constraint
def MonotonicityLoss(self,yhat):
yhat_np = yhat.data.numpy()
yhat_pd = pd.DataFrame(yhat_np[:,0:50])
y_cummax = yhat_pd.cummax(axis=1)
y_dif = abs(y_cummax - yhat_pd)
# Find elements that are not satisfying cummax(Yk) == Yk
col_count = y_dif[y_dif > 1e-15].count()
total_count = col_count.to_numpy().sum()
return total_count
def forward(self,yhat,y):
abs_tensor = torch.abs(yhat - y)
L1 = torch.mean(abs_tensor)
L2 = self.MonotonicityLoss(yhat)
totLoss = L1 + self.alpha*L2
return totLoss
class R2Loss(torch.nn.Module):
def __init__(self):
super(R2Loss,self).__init__()
def forward(self,yhat,y):
denom = torch.var(y,1, unbiased=False)
MSE_each = F.mse_loss(yhat, y, reduction="none")
MSE_sample = torch.sum(MSE_each,1)
# Normalize error per element
NormError = torch.div(MSE_sample,denom)
# Sum errors
return torch.sum(NormError)
class HolzScaler:
def __init__(self):
a = 1e3 #convert to kPa
b = 1 #leave exponent as is
self.scale_mat = np.diag([a*10,b,a,b,a,b,a*10,b]) # af and afs where sampled one order lower than the others
self.inv_mat = np.linalg.inv(self.scale_mat)
def transform(self, Y_inp):
Y_scaled = np.matmul(Y_inp,self.scale_mat)
return Y_scaled
def inverse_transform(self, Y_scaled):
Y_inp = np.matmul(Y_scaled, self.inv_mat)
return Y_inp
class OgdenScaler:
def __init__(self):
a = 1e3 #convert to kPa
b = 1 #leave exponent as is
self.scale_mat = np.diag([a,b])
self.inv_mat = np.linalg.inv(self.scale_mat)
def transform(self, Y_inp):
Y_scaled = np.matmul(Y_inp,self.scale_mat)
return Y_scaled
def inverse_transform(self, Y_scaled):
Y_inp = np.matmul(Y_scaled, self.inv_mat)
return Y_inp
#########################################################
# Active learning GPR
def GP_regression_std(regressor, X, n_max=1):
_, std = regressor.predict(X, return_std=True)
query_idx = (-std).argsort()[:n_max]
return query_idx, X[query_idx,:]
def NN_regression_R2(regressor, X, Y_true, n_max):
# NN prediction
Y_pred = regressor.predict(X)
# Calculate R2 error
R2_Raws = r2_score(Y_true.T, Y_pred.T, multioutput= 'raw_values')
query_idx = (R2_Raws).argsort()[:n_max]
return query_idx, X[query_idx,:] |
from django.urls import path
from . import views
urlpatterns = [
path('', views.search, name="search"),
path('photo/', views.photo, name="search_photo"),
path('design/', views.design, name="search_design"),
path('photo/<int:pk>/', views.photoByid, name="search_photo"),
path('design/<int:pk>/', views.designByid, name="search_design"),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""System Information Gathering Script
"""
import subprocess
from sys import stdout
def uname():
"""uname command function"""
print "\n\n"
uname = "uname -a"
printline1 = """
\rGathering system information using `{0}` command\r
""".format(uname).strip()
print printline1
for i in printline1:
stdout.write("="),
stdout.flush()
print("\n")
subprocess.call(uname, shell=True)
print "\n\n"
def disk_space():
"""Function that runs the Unix 'df' command to measure
disk space"""
# Alternatively, you can split your command into command and separate
# arguments
df = "df"
df_arg = "-h"
printline2 = """
\rGathering diskspace info using `{0} {1}` command\r
""".format(df, df_arg).strip()
print printline2
for i in printline2:
stdout.write("="),
stdout.flush()
print("\n")
subprocess.call([df, df_arg])
print "\n\n"
def temp_space():
"""This function will run shell command 'du -h' to measure the temp
space on this machine"""
du = "du"
du_arg = "-h"
temp = "/tmp"
printline2 = """
\rGathering diskspace used by the {2} dir using `{0} {1}` command\r
""".format(du, du_arg, temp).strip()
print printline2
for i in printline2:
stdout.write("="),
stdout.flush()
print("\n")
subprocess.call([du, du_arg, temp])
print "\n\n"
if __name__ == "__main__":
uname()
disk_space()
temp_space()
|
import pandas as pd
import numpy as np
import requests
tickers = ['eth','ada','btc','xmr','xrp','ltc']
coin_info = {}
for ticker in tickers:
tmp = requests.get('https://coinmetrics.io/data/'+ticker+'.csv')
df = pd.read_csv(pd.compat.StringIO(tmp.text))
df = df.reset_index()
df.columns = np.append(df.columns[1:].values,[''])
df = df.iloc[:,:-1]
coin_info[ticker] = df |
from responses.conf import settings
from responses.models import Response
from responses.serializers import ResponseSerializer
from responses.tasks import add_survey_response, publish_survey_data
from responses.utils.importers import import_class
from rest_framework.views import APIView
from rest_framework.response import Response as RestResponse
authentication = import_class(settings.API_AUTHENTICATION_CLASS)
permission = import_class(settings.API_PERMISSION_CLASS)
class ResponseViewSet(APIView):
serializer_class = ResponseSerializer
def get(self, request):
responses = Response.objects.all()
data = ResponseSerializer(responses, many=True).data
return RestResponse(data)
def post(self, request):
add_survey_response.delay(request.data)
publish_survey_data.delay()
return RestResponse(200)
|
import pygame
from pygame.locals import *
import random
import time
## Type "pygame.display.quit()" in the GUI to exit
## The first center is 87x87
## The second center is 187x187
## The third center is 287x287
class Game_Info_1(object):
## Enter tile info here
bList = ['Jason','Hercules','Theseus','Odesseus','e','90','80']
iList = ['753 B.C.','509 B.C.','27 B.C.','476 A.D.','264 B.C.','146 B.C.','60 B.C.','43 B.C.','72 B.C.', '44 B.C.']
nList = ['k','l','m','n','o','90','80']
gList = ['p','q','r','s','t','90','80']
oList = ['u','v','w','x','y','90','80']
mainList = [bList, iList, nList, gList, oList]
def CheckRun():
listCheck = ListCheck()
if listCheck == False:
print ("Lists are not the minimum length, please correct.")
elif listCheck == True:
main()
## Resets Checklist to a full list containing all items in mainList
def FillCheckList(checkList):
length = ( len(bList) + len(iList) + len(nList) + len(gList) + len(oList) )
b = len(bList)
i = len(iList)
n = len(nList)
g = len(gList)
o = len(oList)
count = -1
for x in range (length-1):
count += 1
if count <= (b-1):
checkList.append(bList[x])
elif count <= ( (b+i) -1):
checkList.append(iList[x- (b) ])
elif count <= ( (b+i+n) -1):
checkList.append(nList[x- (b+i) ])
elif count <= ( (b+i+n+g) -1):
checkList.append(gList[x- (b+i+n) ])
elif count <= ( (b+i+n+g+o) -1):
checkList.append(oList[x- (b+i+n+g) ])
return checkList
## Checks equal length for all the lists
def ListCheck():
b = len(bList)
i = len(iList)
n = len(nList)
g = len(gList)
o = len(oList)
if (b > 5) and (i > 5) and (n > 5) and (g > 5) and (i > 5):
return True
else:
return False
## Returns a random letter from a given list
def GetLetter(mainList,checkList):
final = []
for x in range(5):
randomNum = random.randint(0,4)
currentList = mainList[x]
while (currentList[randomNum] in checkList) == False:
randomNum = random.randint(0,len(currentList)-1)
final.append(currentList[randomNum])
checkList.remove(currentList[randomNum])
return final
################# New Class #####################
class Game_Info_2(object):
## Enter tile info here
mainList = ['I','V','M','C','II','IV','VIII',
'XII','g','h','i','j','90','80',
'k','l','m','n','o','90','80',
'p','q','r','s','t','90','80',
'u','v','w','x','y','90','80', '70', '60']
## Resets Checklist to a full list containing all items in mainList
def FillCheckList(checkList):
for x in range( len(mainList)-1 ):
checkList.append(mainList[x])
return checkList
## Returns a random letter from a given list
def GetLetter(mainList, checkList):
final = []
for x in range(5):
randomNum = random.randint(0,len(mainList)-1)
while (mainList[randomNum] in checkList) == False:
randomNum = random.randint(0,len(mainList)-1)
final.append(mainList[randomNum])
checkList.remove(mainList[randomNum])
return final
##########################################################
########## Everything after is universal code ############
##########################################################
def GetGameType():
game_type = raw_input("Would you like to enter words into each separate line: (y/n) ")
if game_type == 'y':
className = Game_Info_1
elif game_type == 'n':
className = Game_Info_2
else:
GetGameType()
return className
game_type = GetGameType()
## Screenshot function
## All varibles are purely for the name of the save file
def TakeScreenshot(screen):
time_taken = time.asctime(time.localtime(time.time()))
time_taken = time_taken.replace(" ", "_")
time_taken = time_taken.replace(":", ".")
save_file = "H:/" +time_taken+ ".png"
pygame.image.save(screen, save_file)
print ("A screen shot has been taken and saved as : %s") % (save_file)
def BuildBingo():
## display BINGO
font = pygame.font.SysFont("bold", 40, True, False)
bingo = ['B','I','N','G','O']
y = -1
for x in bingo:
y += 1
current = font.render(x, 1, (0,0,0))
rectDims = pygame.Surface.get_rect(current)
width = rectDims[2]
height = rectDims[3]
screen.blit(current, (37+100*y-width/2, 37-height/2))
def BuildLists():
checkList = []
game_type.FillCheckList(checkList)
## setup for the font
####(font, size, bold, italic)
font = pygame.font.SysFont("Arial", 16, False, True)
## checkList catches repeat items from occuring
## x and y correspond to the x and y axis
for x in range (5):
currentList = Game_Info_1.GetLetter(mainList, checkList)
for y in range (5):
text = font.render( (currentList[y]) , 1, (0, 0, 0))
rectDims = pygame.Surface.get_rect(text)
width = rectDims[2]
height = rectDims[3]
## this is where you set the center point of each line
textpos = ( (37 + 100*y)-width/2, (137 + 100*x)-height/2 )
screen.blit(text, textpos)
## Main exe.
def main():
pygame.init()
global screen
## build the window
pygame.display.set_caption('Bingo')
screen = pygame.display.set_mode((475,575))
pygame.mouse.set_visible(1)
## build the background
background = pygame.image.load('BingoFinal.png').convert()
screen.blit(background,(0,0))
## build text
BuildBingo()
BuildLists()
## keep updating
clock = pygame.time.Clock()
while 1:
clock.tick(60)
for event in pygame.event.get():
if event.type == QUIT:
return pygame.display.quit()
elif event.type == KEYDOWN and event.key == K_ESCAPE:
return pygame.display.quit()
elif event.type == MOUSEBUTTONDOWN:
screen.blit(background, (0,0))
BuildBingo()
BuildLists()
elif event.type == KEYDOWN and event.key == K_F1:
TakeScreenshot(screen)
pygame.display.update()
main()
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import pytx3.common
class TestCommon(unittest.TestCase):
def test_camel_case_to_underscore(self):
assert pytx3.common.camel_case_to_underscore("AbcXyz") == "abc_xyz"
|
"""
ai-architecture-template - test_notebooks.py
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
"""
import pytest
from azure_utils.dev_ops.testing_utilities import run_notebook
from notebooks import directory
@pytest.mark.parametrize(
"notebook",
['00_AMLConfiguration.ipynb'],
)
def test_notebook(notebook, add_nunit_attachment):
run_notebook(notebook, add_nunit_attachment, kernel_name="ai-architecture-template", root=directory)
|
from utils import read_plaintext_to_stream, convert_stream_to_plaintext
from cipher import cipher
from inv_cipher import inv_cipher
from key_expansion import key_expansion
import logger
def encrypt_decrypt(plaintext, key, decrypt=False):
logger.plaintext(plaintext)
logger.key(key)
data = read_plaintext_to_stream(plaintext)
key = read_plaintext_to_stream(key)
N_k = len(key) // 4
key = key_expansion(key)
if not decrypt:
logger.encrypt()
result = cipher(data, key, N_k)
else:
logger.decrypt()
result = inv_cipher(data, key, N_k)
return result
def encrypt(plaintext, key):
return encrypt_decrypt(plaintext, key)
def decrypt(plaintext, key):
return encrypt_decrypt(plaintext, key, decrypt=True)
if __name__ == '__main__':
data = [
("00112233445566778899aabbccddeeff", "000102030405060708090a0b0c0d0e0f"),
("00112233445566778899aabbccddeeff", "000102030405060708090a0b0c0d0e0f1011121314151617"),
("00112233445566778899aabbccddeeff", "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f")
]
for plaintext, key in data:
encrypted = encrypt(plaintext, key)
print("")
decrypt(convert_stream_to_plaintext(encrypted), key)
print("\n") |
#!/usr/bin/env python
import os
import sys
# Using below code for tornado
import tornado.httpserver
import tornado.ioloop
import tornado.wsgi
import django.core.handlers.wsgi
from django.core.wsgi import get_wsgi_application
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "blog.settings")
#import pdb;pdb.set_trace()
from django.core.management import execute_from_command_line
if len(sys.argv) >2 and sys.argv[2] == "tornado":
application = django.core.handlers.wsgi.WSGIHandler()
application = get_wsgi_application()
container = tornado.wsgi.WSGIContainer(application)
http_server = tornado.httpserver.HTTPServer(container)
http_server.listen(8001)
http_server.listen(8002)
http_server.listen(8003)
http_server.listen(8000)
tornado.ioloop.IOLoop.instance().start()
else:
execute_from_command_line(sys.argv)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 29 11:10:11 2020
@author: user
"""
import numpy as np
import pandas as pd
import os
import cv2
import random
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
from tensorflow import keras
file_list = []
class_list = []
DATADIR = "C:/Users/user/Desktop/Shopee_Code_League_Stuff/shopee-product-detection-dataset/train/train"
# All the categories you want your neural network to detect
CATEGORIES = ["00", "01", "02", "03", "04",
"05", "06", "07", "08", "09",
"10", "11", "12", "13", "14", "15", "16",
"17", "18", "19", "20", "21", "22", "23",
"24", "25", "26", "27", "28", "29", "30",
"31", "32", "33", "34", "35", "36", "37",
"38", "39", "40", "41"]
# Can try increasing to improve accuracy
# 48 about 1hr+
IMG_SIZE = 64
training_data = []
def create_training_data():
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
class_num = CATEGORIES.index(category)
count = 0
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
training_data.append([new_array, class_num])
count += 1
if count == 1600:
break
except Exception as e:
pass
create_training_data()
random.shuffle(training_data)
X = [] #features
y = [] #labels
for features, label in training_data:
X.append(features)
y.append(label)
X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
X = X / 255.0
model = Sequential()
#3 convolutional layers
model.add(Conv2D(32, (3, 3), input_shape = X.shape[1:]))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(128, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
#1 hidden layers
model.add(Flatten())
model.add(Dense(64))
model.add(Activation("relu"))
model.add(Dense(64))
model.add(Activation("relu"))
#output layer
model.add(Dense(42, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy',optimizer='Adam',metrics=['accuracy'])
model.fit(X, y, batch_size=32, epochs=20)
TESTDIR = "C:/Users/user/Desktop/Shopee_Code_League_Stuff/shopee-product-detection-dataset/test/test"
test_data = []
filenames = []
class_predictions = []
test_data_in_csv = pd.read_csv("C:/Users/user/Desktop/Shopee_Code_League_Stuff/shopee-product-detection-dataset/test.csv")
test_data_filenames = test_data_in_csv['filename'].values.tolist()
def create_test_data():
for img in os.listdir(TESTDIR):
try:
if img in test_data_filenames:
filenames.append(img)
img_array = cv2.imread(os.path.join(TESTDIR, img), cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
test_data.append(new_array)
except Exception as e:
pass
create_test_data()
X_test = [] #features
for features in test_data:
X_test.append(features)
X_test = np.array(X_test).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
X_test = X_test / 255.0
probability_model = tf.keras.Sequential([model,
tf.keras.layers.Softmax()])
#softmax layer normalizes output into a probability distribution
predictions = probability_model.predict(X_test)
for i in range(0, len(predictions)):
class_num = np.argmax(predictions[i])
class_predictions.append(class_num)
filename_df = pd.DataFrame(filenames, columns=['filename'])
category_df = pd.DataFrame(class_predictions, columns=['category'])
category_df['category'] = category_df.category.apply(lambda c: "{:02d}".format(c))
full_df = pd.concat([filename_df, category_df], axis=1)
full_df.to_csv('test_predictions1.csv', index = False) |
from flexp.browser import MainHandler
from os import path
class FeaturesHandler(MainHandler):
def initialize(self, experiments_folder, html_chain):
self.experiments_folder = experiments_folder
self.html_chain = html_chain
def create_title(self, experiment_folder, data):
annotation_id = data['annotation_id']
return "<h1>Features for {}</h1>".format(annotation_id)
def create_navigation(self, navigation_html, experiment_folder, experiment_path, data):
return navigation_html
def get(self):
annotation_id = self.get_argument("id", default="")
# if not path.isdir(experiment_path):
# experiment_folder = ""
data = {"experiment_path": "", # experiment_path,
"experiment_folder": "", # experiment_folder,
"html": [],
"header": dict(),
"scripts": dict(),
"annotation_id": annotation_id,
}
self.create_page("", "", data)
def create_content(self, experiment_folder, data):
annotation_id = data['annotation_id']
content_html = ""
content_html += "<br>{}".format(annotation_id)
content_html += '<br><table class="w3-table w3-bordered w3-striped w3-border w3-hoverable w3-card-2"> ' \
'<colgroup> <col span="1" style="width: 50%;"> <col span="1" style="width: 50%;"> </colgroup> ' \
'<thead><tr class="w3-green">' '<th class="header">Feature name</th><th class="header">Value</th></tr>' \
'</thead><tbody> {}</tbody></table>'.format(''.join(["<tr><td>{}</td><td>{}</td>".format(feature_name, feature)
for feature_name, feature in [['f1', 0.3], ['f1', 0.3],
['f1', 0.3]]]))
return content_html
|
# -*- coding: utf-8 -*-
from django.conf.urls import url, include
from adminapp.views import index, bienvenido
from adminapp.views import conexion_new, conexion_list, conexion_edit, conexion_delete
from adminapp.views import servicio_new, servicio_list, servicio_edit
from adminapp.views import directorio_new, directorio_list, directorio_edit, directorio_delete
from adminapp.views import localizacion_new, localizacion_list, localizacion_edit, localizacion_delete
from adminapp.views import articulo_list, articulo_delete, parametrizacion_list, FotoPDetailView
from adminapp import views
from rest_framework. urlpatterns import format_suffix_patterns
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.views.static import serve
urlpatterns = [
#Urls principal.
url(r'^$', login_required(index), name='index'),
url(r'^$', login_required(bienvenido), name='bienvenido'),
#Urls para gestionar conexiones.
url(r'^nconexion$', login_required(conexion_new), name='conexion_crear'),
url(r'^lconexion$',login_required(conexion_list), name='conexion_listar'),
url(r'^edconexion/(?P<id_conexion>\d+)/$', login_required(conexion_edit), name='conexion_editar'),
url(r'^elconexion/(?P<id_conexion>\d+)/$', login_required(conexion_delete), name='conexion_eliminar'),
#Urls para gestionar servicios.
url(r'^nservicio$', login_required(servicio_new), name='servicio_crear'),
url(r'^lservicio$', login_required(servicio_list), name='servicio_listar'),
url(r'^edservicio/(?P<id_servicio>\d+)/$', login_required(servicio_edit), name='servicio_editar'),
#Urls para gestionar el directorio telefónico.
url(r'^ndirectorio$', login_required(directorio_new), name='directorio_crear'),
url(r'^ldirectorio$', login_required(directorio_list), name='directorio_listar'),
url(r'^eddirectorio/(?P<id_directorio>\d+)/$', login_required(directorio_edit), name='directorio_editar'),
url(r'^eldirectorio/(?P<id_directorio>\d+)/$', login_required(directorio_delete), name='directorio_eliminar'),
#Urls para gestionar georreferenciación.
url(r'^nlocalizacion$', login_required(localizacion_new), name='localizacion_crear'),
url(r'^llocalizacion$', login_required(localizacion_list), name='localizacion_listar'),
url(r'^edlocalizacion/(?P<id_localizacion>\d+)/$', login_required(localizacion_edit), name='localizacion_editar'),
url(r'^ellocalizacion/(?P<id_localizacion>\d+)/$', login_required(localizacion_delete), name='localizacion_eliminar'),
#Urls para gestionar artículos perdidos.
url(r'^narticulo$', login_required(views.ArticuloCreate.as_view()), name='articulo_crear'),
url(r'^larticulo$', login_required(articulo_list), name='articulo_listar'),
url(r'^edarticulo/(?P<pk>\d+)/update/$', login_required(views.ArticuloUpdate.as_view()), name='articulo_editar'),
url(r'^elarticulo/(?P<id_descripcion>\d+)/$', login_required(articulo_delete), name='articulo_eliminar'),
#Urls para poder listar los diferentes servicios con la ayuda de Django Rest Framework y poder mostrarlos en formato Json.
url(r'^articulo/$', views.ArticuloList.as_view(),name='articuloList'),
url(r'^directorio/$', views.DirectorioList.as_view(),name='directorioList'),
url(r'^localizacion/$', views.LocalizacionList.as_view(),name='localizacionList'),
url(r'^servicios/$', views.ServiciosList.as_view(),name='serviciosList'),
url(r'^usuarios/$', views.UsuariosList.as_view(),name='usuariosList'),
url(r'^servicio/(?P<id_servicio>\d+)/$',views. ServicioList.as_view(),name='servicio'),
url(r'^lparametrizacion$', login_required(parametrizacion_list), name='parametrizacion_listar'),
url(r'^edparametrizacion/(?P<pk>\d+)/update/$', login_required(views.ParametrizacionUpdate.as_view()), name='parametrizacion_editar'),
url(r'^fotoP/(?P<pk>\d+)/detail/$',views.FotoPDetailView.as_view(), name='foto-detail'),
url(r'^parametrizacion/$', views.ParametrizacionList.as_view(),name='parametrizacionList'),
]
#Url utilizada para acceder a la carpeta en la cual tenemos guardadas las imágenes.
if settings.DEBUG:
urlpatterns += [
url(r'^media/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT,}),]
urlpatterns = format_suffix_patterns(urlpatterns) |
"""
Types, identify yourself!
Even if you are a function/mono-type/monad!
It's somewhat about static type and Py-skell layer of type
"""
from Pyskell.Language.HMTypeSystem import *
import types
__python_builtins__ = {
types.BooleanType, types.BufferType, types.BuiltinFunctionType,
types.BuiltinMethodType, types.ClassType, types.CodeType,
types.ComplexType, types.DictProxyType, types.DictType,
types.DictionaryType, types.EllipsisType, types.FileType,
types.FloatType, types.FrameType, types.FunctionType,
types.GeneratorType, types.GetSetDescriptorType, types.InstanceType,
types.IntType, types.LambdaType, types.ListType, types.LongType,
types.MemberDescriptorType, types.MethodType, types.ModuleType,
types.NoneType, types.NotImplementedType, types.ObjectType,
types.SliceType, types.StringType, types.StringTypes,
types.TracebackType, types.TupleType, types.TypeType,
types.UnboundMethodType, types.UnicodeType, types.XRangeType, set,
frozenset}
def is_builtin_type(some_type):
return some_type in __python_builtins__
__python_function_types__ = {
types.FunctionType, types.LambdaType, types.MethodType,
types.UnboundMethodType, types.BuiltinFunctionType,
types.BuiltinMethodType}
def is_py_func_type(some_type):
return some_type in __python_function_types__
class TypeSignatureError(Exception):
pass
class TypeSignature(object):
def __init__(self, constraints, args):
self.constraints = constraints
self.args = args
class TypeSignatureHigherKind(object):
def __init__(self, t_constructor, t_parameters):
self.constructor = t_constructor
self.parameters = t_parameters
def make_func_type(type_para_list):
if len(type_para_list) < 2:
raise TypeSignatureError("Something's wrong in make func part.")
elif len(type_para_list) == 2:
return Arrow(type_para_list[0], type_para_list[1])
else:
return Arrow(type_para_list[0],
make_func_type(type_para_list[1:]))
def type_sig_arg_build(argument, constraints, type_var_dict):
if isinstance(argument, str) and argument.islower():
if argument not in type_var_dict:
if argument in constraints:
type_var_dict[argument] = \
TypeVariable(constraints=constraints[argument])
else:
type_var_dict[argument] = TypeVariable()
return type_var_dict[argument]
elif isinstance(argument, TypeSignature):
"""
Due to the Syntax of Python, Tuple is used
So I have to let function sig be another type signature
"""
return make_func_type(type_sig_build(argument, type_var_dict))
elif isinstance(argument, TypeSignatureHigherKind):
if type(argument.constructor) is str:
higher_kind = type_sig_arg_build(argument.constructor,
constraints,
type_var_dict)
else:
higher_kind = argument.constructor
return TypeOperator(higher_kind,
list(map(lambda x:
type_sig_arg_build(x,
constraints,
type_var_dict),
argument.parameters)))
elif argument is None:
return TypeOperator(None, [])
elif isinstance(argument, list) and len(argument) == 1:
return ListType(type_sig_arg_build(argument[0],
constraints,
type_var_dict))
elif isinstance(argument, tuple):
return TupleType(list(map(lambda x:
type_sig_arg_build(x,
constraints,
type_var_dict),
argument)))
elif isinstance(argument, type):
return TypeOperator(argument, [])
raise TypeSignatureError(
"Type Signature Fail to Build Argument: {}".format(argument)
)
def type_sig_build(type_sig, type_var_dict=None):
args, constraints = type_sig.args, type_sig.constraints
type_var_dict = {} if type_var_dict is None else type_var_dict
return list(map(lambda x:
type_sig_arg_build(x, constraints, type_var_dict),
args))
class PythonFunctionType(object):
pass
class OriginType(object):
"""
Everything starts at this type in Pyskell
"""
def __type__(self):
raise TypeError("You touch something you should never touch\n"
"THIS IS ORIGIN TYPE, EVERYTHING STARTS HERE")
class Undefined(OriginType):
def __type__(self):
return TypeVariable()
def type_of(unknown_type):
TypeVariable.next_var_name = 0
if isinstance(unknown_type, OriginType):
return unknown_type.__type__()
elif isinstance(unknown_type, tuple):
return TupleType(list(map(type_of, unknown_type)))
elif unknown_type is None:
return TypeOperator(None, [])
elif type(unknown_type) in __python_function_types__:
return TypeOperator(PythonFunctionType, [])
return TypeOperator(type(unknown_type), [])
|
# Generated by Django 3.2.6 on 2021-08-24 17:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ip_address', '0003_measurement'),
]
operations = [
migrations.AddField(
model_name='visitor',
name='latitud',
field=models.DecimalField(decimal_places=7, default=19.42847, max_digits=11),
),
migrations.AddField(
model_name='visitor',
name='longitud',
field=models.DecimalField(decimal_places=7, default=-99.12766, max_digits=11),
),
]
|
# Method 1 --> 344ms (Using Binary Search and List as Data Structure) || 17700 kb
import bisect
class MyHashSet:
def __init__(self):
self.hashset = []
def present(self, key: int):
if self.hashset:
l = 0
u = len(self.hashset)-1
flag = False
while l<=u:
mid = l+(u - l)//2
if self.hashset[mid] == key:
flag = True
break
if self.hashset[mid]<key:
l = mid+1
else:
u = mid-1
if flag == True:
return mid
else:
return -1
return -1
def add(self, key: int) -> None:
if self.present(key) == -1:
if not self.hashset:
self.hashset = [key]
else:
bisect.insort(self.hashset, key)
def remove(self, key: int) -> None:
mid = self.present(key)
if mid != -1:
self.hashset.pop(mid)
return True
else:
return False
def contains(self, key: int) -> bool:
if self.present(key) != -1:
return True
else:
return False
#Method 2 --> 152 ms (Using Dictionary/Hash) || 18500 kb
class MyHashSet:
def __init__(self):
self.hashset = {}
def add(self, key: int) -> None:
if key in self.hashset:
return
self.hashset[key] = 1
def remove(self, key: int) -> None:
if key not in self.hashset:
return
del self.hashset[key]
def contains(self, key: int) -> bool:
if key in self.hashset:
return True |
from google.cloud import translate
from managers.cache import CacheManager
class TranslateManager(object):
def __init__(self,langs=[],params={}):
self.params = params
print(self.params)
print(params)
self.langs = langs
self.client = translate.Client()
self.cache_manager = CacheManager()
def translate(self,item):
cached_value = self.cache_manager.get(item)
if cached_value is None:
translation = self.client.translate(item.value,target_language=item.lang)
item.value = translation['translatedText']
self.cache_manager.save(item)
else:
return cached_value
return item.value
def translations_for_key(self,item):
temp = []
for lang in self.langs:
item.lang = lang
translation = self.cache_manager.get(item)
if translation:
temp.append(translation)
return temp
def finalize(self):
self.cache_manager.persist()
def log(self):
return self.params['debug']
|
from easystruct import *
class S(EasyStructBase):
s: EasyStruct('10s')
ull: EasyStruct('Q')
c: EasyStruct('s')
class A(EasyStructBase):
a: EasyStruct('<4H')
b: EasyStruct('<12s')
class C(A):
a: A
b: EasyStruct('H')
def _create(cls, *args):
print(80 * '=')
c = cls(*args)
print(c)
print(c.pack())
print(cls.unpack(c.pack()))
print(80 * '=')
return c
def _compare(s: EasyStructBase, packed: bytes):
s_p = s.pack()
s_u = s.unpack(packed)
assert s_p == packed
assert s == s_u
def test_simple():
s = S('a', 2 ** 64 - 1, 'c')
_compare(s, b'a\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xffc')
def test_compound_fields():
a = A(tuple(range(40, 44)), 'abcdefghi')
_compare(a, b'(\x00)\x00*\x00+\x00abcdefghi\x00\x00\x00')
def test_compound_objects():
a = A(tuple(range(40, 44)), 'abcdefghi')
c = C(a, 65535)
print(c)
print(c.pack())
print(C.unpack(c.pack()))
return
_compare(c, b'(\x00)\x00*\x00+\x00abcdefghi\x00\x00\x00\xff\xff')
class Bits(EasyStructBase):
two_bits: EasyStruct(b'u2')
nine_bits: EasyStruct(b'u9')
class MultiBits(EasyStructBase):
three: EasyStruct(b'u4u4u4')
two: EasyStruct(b'u4u12')
one: EasyStruct(b'u97')
class BitsBytes(EasyStructBase):
s: EasyStruct('H')
twelve_bits: EasyStruct(b'u12')
seven_bits: EasyStruct(b'u7')
s2: EasyStruct('12s')
class IterDC(MultiBits):
other: EasyStruct('<4H')
class BitsBytesStr(EasyStructBase):
bits: Bits
s: EasyStruct('12s')
bits_bytes: BitsBytes
bits = _create(Bits, 2, 255)
bits_bytes = _create(BitsBytes, 4, 1000, 100, 'jebtus')
bbs = _create(BitsBytesStr, bits, 'hello', bits_bytes)
multi = _create(MultiBits, [1, 2, 3], [4, 5], 98273498798729873987298374987)
iter_dc = _create(IterDC, *multi, tuple(range(1000, 1004)))
def test_bits():
_compare(bits, b'\x9f\xe0')
def test_bits_bytes():
_compare(bits_bytes, b'\x04\x00>\x8c\x80jebtus\x00\x00\x00\x00\x00\x00')
def test_bbs():
_compare(bbs, b'\x9f\xe0hello\x00\x00\x00\x00\x00\x00\x00\x04\x00>\x8c\x80jebtus\x00\x00\x00\x00\x00\x00')
def test_multibits():
_compare(multi, b'\x124\x00Y\xecOv\xa6"A\xc1t\xed}\xaaX')
def test_iterable_dataclass():
_compare(iter_dc, b'\x124\x00Y\xecOv\xa6"A\xc1t\xed}\xaaX\xe8\x03\xe9\x03\xea\x03\xeb\x03')
|
from threading import Thread
from MathFunctions import *
class WieferichThread(Thread):
def __init__(self, range_start, range_end):
Thread.__init__(self)
self.range_start = range_start
self.range_end = range_end
def run(self):
self.search_wieferich()
def search_wieferich(self):
"""
search for wieferich
"""
i = self.range_start
if i % 2 == 0: # ensures that start is an odd number
i += 1
while i <= self.range_end:
if MathFunctions.is_wieferich(i):
print self.name + " might have found a wieferich: " + str(i)
i += 2
print "Thread no. " + self.name + " finished"
if __name__ == '__main__':
start = 6.7*10**15
no_of_threads = 4
block = 12800000 / no_of_threads
for i in range(0, no_of_threads + 1):
WieferichThread(start, start + block).start()
start += block + 1 |
# coding: utf-8
import sys
from os.path import dirname, abspath
sys.path.append(dirname(dirname(abspath(__file__))))
import struct
import logging
logging.basicConfig(level=logging.INFO)
import zmq
from modules.constants import *
ids = ["1", "2"]
msg_id = 0
identities = []
identity_backend = {}
backend_dict = {}
heartbeats_trigger = 3
def handle_message(msg_type, socket, sender_identity):
global heartbeats_trigger
logging.info('received message with type {}'.format(msg_type))
if msg_type == MESSAGE_TYPE_BACKEND_HAERTBEAT:
if sender_identity in identities:
socket.send(sender_identity, flags=zmq.SNDMORE)
socket.send('', flags=zmq.SNDMORE)
socket.send(
struct.pack('<I', MESSAGE_TYPE_BACKEND_HAERTBEAT),
flags=zmq.SNDMORE)
socket.send(struct.pack('<I', BACKEND_HEARTBEAT_TYPE_KEEPALIVE))
# trigger retrain each 3 heartbeats
if heartbeats_trigger == 0:
retrain_trigger(socket, sender_identity)
heartbeats_trigger = 5
else:
heartbeats_trigger -= 1
else:
# first time, require metadata
identities.append(sender_identity)
socket.send(sender_identity, flags=zmq.SNDMORE)
socket.send('', flags=zmq.SNDMORE)
socket.send(
struct.pack('<I', MESSAGE_TYPE_BACKEND_HAERTBEAT),
flags=zmq.SNDMORE)
socket.send(
struct.pack('<I', BACKEND_HEARTBEAT_TYPE_REQUEST_METADATA))
elif msg_type == MESSAGE_TYPE_BACKEND_METADATA:
backend_name = socket.recv()
backend_version = socket.recv()
app_name = socket.recv()
policy = socket.recv()
runtime_profile = socket.recv()
logging.info('Received backend metadata : {}'.format(backend_name))
identity_backend[sender_identity] = backend_name
backend_dict[backend_name] = (backend_version, app_name)
elif msg_type == MESSAGE_TYPE_RETRAIN_STARTED:
msg_id_bytes = socket.recv()
received_msg_id = int(struct.unpack("<I", msg_id_bytes)[0])
logging.info('Retrain started in backend : {} with msg id : {}'.format(
identity_backend[sender_identity], received_msg_id))
elif msg_type == MESSAGE_TYPE_RETRAIN_ENDED:
msg_id_bytes = socket.recv()
received_msg_id = int(struct.unpack("<I", msg_id_bytes)[0])
logging.info('Retrain ended in backend : {} with msg id : {}'.format(
identity_backend[sender_identity], received_msg_id))
def retrain_trigger(socket, sender_identity):
global msg_id
logging.info('trigger retrain to backend {}'.format(
identity_backend[sender_identity]))
# ROUTER socket needs to specify sender identity first
socket.send(sender_identity, flags=zmq.SNDMORE)
socket.send('', flags=zmq.SNDMORE)
socket.send(
struct.pack('<I', MESSAGE_TYPE_START_RETRAIN), flags=zmq.SNDMORE)
socket.send(struct.pack('<I', msg_id), flags=zmq.SNDMORE)
socket.send(
struct.pack('<I', REQUEST_TYPE_START_RETRAIN), flags=zmq.SNDMORE)
socket.send(bytearray('\0'.join(ids) + '\0'))
msg_id += 1
if __name__ == '__main__':
context = zmq.Context()
socket = context.socket(zmq.ROUTER)
socket.bind("tcp://*:7001")
while True:
# ROUTER socket will receive routing identity first
routing_identity = socket.recv()
logging.debug('{!r}'.format(routing_identity))
message_delimiter = socket.recv() # an empty frame
message = socket.recv() # actual message
logging.debug('{!r}'.format(message))
msg_type = struct.unpack('<I', message)[0]
handle_message(msg_type, socket, routing_identity)
|
import os
# info, warning 제거
os.environ['TF_CPP_MIN_LOG_LEVEL'] ='2'
import tensorflow as tf
import pandas as pd
import numpy as np
import datetime
from distutils.dir_util import copy_tree
import shutil
import json
import requests
import math
import joblib
import sys
model_dir = './model/'
m_pred_dir = './pred/mouse'
r_pred_dir = './pred/resource'
m_feature_file = './m_feature_extract.csv'
r_feature_file = './r_feature_extract.csv'
def set_yesterday_str():
yesterday = datetime.datetime.today() + datetime.timedelta(days=-1)
yesterday_str = yesterday.strftime("%Y%m%d")
return yesterday_str
# predict을 대기하고 있는 중복된 user 찾기
def find_user():
user = None
while True:
m_user_list = os.listdir(m_pred_dir) # mouse predict 대기 user list
r_user_list = os.listdir(r_pred_dir) # resource predict 대기 user list
for user in m_user_list:
if user in r_user_list:
break
else:
user = None
if user is not None:
if len(os.listdir(m_pred_dir+'\\'+user)) > 0: # 폴더 하위 파일 생긴 경우만 break(폴더만 생겨서 break 되는 가능성 제거)
if len(os.listdir(r_pred_dir+'\\'+user)) > 0:
break
return user
# ~/target_dir/user/file(s) 정보 수집
def find_files(target_dir_path, user):
dir_path = target_dir_path+'\\'+user
file_dict_list = []
for file in os.listdir(dir_path):
file_path = os.path.join(dir_path, file) # 전체 path
user_index = file_path.find('\\')
filename_index = file_path.rfind('\\')
extension_index = file_path.rfind('.') # 확장자 시작 직전 인덱스
upper_dir = file_path[:filename_index] # 상위 디렉토리
filename = file_path[filename_index+1:extension_index] # 파일명
user = file_path[user_index+1:filename_index] # file의 owner
file_dict_list.append({"path":file_path, "user":user, "upper_dir":upper_dir, "filename":filename})
return file_dict_list
# ~/target_dir/user/file 정보 수집
def find_file(target_dir_path, user):
dir_path = target_dir_path+'\\'+user
file_dict_list = []
for file in os.listdir(dir_path):
file_path = os.path.join(dir_path, file) # 전체 path
user_index = file_path.find('\\')
filename_index = file_path.rfind('\\')
extension_index = file_path.rfind('.') # 확장자 시작 직전 인덱스
upper_dir = file_path[:filename_index] # 상위 디렉토리
filename = file_path[filename_index+1:extension_index] # 파일명
user = file_path[user_index+1:filename_index] # file의 owner
file_dict_list.append({"path":file_path, "user":user, "upper_dir":upper_dir, "filename":filename})
break
return file_dict_list
# mouse file에 'action interval'을 추가하고 button-state의 7가지 경우의 수를 'button state' 열로 나타냄
# 그 외에 이동방향, 이동거리 등을 계산하여 열로 나타냄, 불필요한 열 제거
# file(file dictionary)
def mouse_parse_save(file):
df = pd.read_csv(file['path'], engine='python')
# action interval 은 action 간의 시간 차이
# button state의 경우의 수는 7가지
# (NoButton-Move, NoButton-Drag, Nobutton-Scrool, Left-Pressed, Left-Released, Right-Pressed, Right-Released)
df.insert(len(df.columns),'action interval',np.nan)
df.insert(len(df.columns),'button state',np.nan)
df.insert(len(df.columns), 'move distance', np.nan)
df.insert(len(df.columns), 'move way', np.nan)
for i in range(0, df.shape[0]):
# action interval
if i == 0:
df['action interval'].iloc[i] = 0.0
else:
df['action interval'].iloc[i] = df['client timestamp'].iloc[i] - df['client timestamp'].iloc[i-1]
# button state
state = df['state'].iloc[i]
ButtonState = ""
if(state == 'Move'):
ButtonState = 'Move'
elif(state == 'Drag'):
ButtonState = 'Drag'
elif(state == 'Scrool'):
ButtonState = 'Scroll'
elif(state == 'Pressed'):
if(df['button'].iloc[i]=='Left'):
ButtonState = 'Left_Pressed'
else:
ButtonState = 'Right_Pressed'
elif(state == 'Released'):
if(df['button'].iloc[i]=='Left'):
ButtonState = 'Left_Released'
else:
ButtonState = 'Right_Released'
df['button state'].iloc[i] = ButtonState
# 기존 Scrool의 Y좌표는 X좌표와 동일 -> Y좌표를 앞선 action의 Y좌표로 변경
# Scrool Y 좌표 변경은 move distance, move way 구하기 전 처리해야함
if (state == 'Scrool'):
if i != 0 :
df['y'].iloc[i] = df['y'].iloc[i-1]
# move distance
if i == 0:
df['move distance'].iloc[i] = 0.0
else:
x1 = df['x'].iloc[i-1]
y1 = df['y'].iloc[i-1]
x2 = df['x'].iloc[i]
y2 = df['y'].iloc[i]
distance = math.sqrt(math.pow((x2 - x1), 2) + math.pow((y2 - y1),2))
df['move distance'].iloc[i] = distance
# move way
move_way = np.nan
if i != 0:
xl = df['x'].iloc[i] - df['x'].iloc[i-1]
yl = df['y'].iloc[i] - df['y'].iloc[i-1]
if xl >= 0 and yl >= 0:
if xl > yl: move_way = 1
else: move_way = 2
elif xl <= 0 and yl >= 0:
if -xl > yl: move_way = 4
else: move_way = 3
elif xl <= 0 and yl <= 0:
if -xl > -yl: move_way = 5
else: move_way = 6
elif xl >= 0 and yl <= 0:
if xl > -yl: move_way = 8
else: move_way = 7
df['move way'].iloc[i] = move_way
df = df.drop(['client timestamp', 'state', 'button'], axis=1)
yesterday_str = set_yesterday_str()
new_filename = yesterday_str+'_'+file['user']+'_'+file['filename']
write_file_path = file['upper_dir']+'/'+new_filename
df.to_csv(write_file_path+'.csv', header=True, index=False)
os.remove(file['path']) # 기존 파일 삭제
# mouse file feature extract and save
def mouse_feature_extract(path, filename, label):
df = pd.read_csv(path)
# feature extract dict
extract = {}
# 파일명 저장
extract['filename'] = filename
# label 저장
extract['label'] = label
# 생성 시간
time = datetime.datetime.today()
time = time.strftime("%Y-%m-%d %H:%M:%S")
extract['time'] = time
# mouse action이 하나도 없는 경우(idle)
if df.shape[0] == 0:
extract_df = pd.DataFrame([extract])
# extract_df.to_csv(m_feature_file, mode='a', index=False, header=None) # idle 데이터 저장x
return extract_df
# 전체 액션 count
total_action_count = df['button state'].count()
# 각 action별 통계
ButtonState_values = ['Move','Drag','Scroll','Left_Pressed','Left_Released','Right_Pressed','Right_Released']
for i in range(len(ButtonState_values)):
action = ButtonState_values[i]
action_desc = df[df['button state']==action]['action interval'].describe()
# 각 action별 count 비율
action_count = action_desc['count']
action_count_ratio = action_count/total_action_count
key = action+'_ratio'
extract[key] = action_count_ratio
# 각 action 별 소요시간 (std, mean, 25%, 50%, 75%)
statistics_list = ['mean', 'std', '25%', '50%', '75%']
for j in range(len(statistics_list)):
statistics = statistics_list[j]
key = action+'_'+statistics
extract[key] = action_desc[statistics]
# 각 action, move way 별 통계(시간-50%, 거리-50%)
for j in range(1,9):
action_way = df[(df['button state']==action)& (df['move way']==j)]
key = action+'_way_timeMedian_'+str(j)
extract[key] = action_way['action interval'].describe()['50%']
key = action+'_way_distanceMedian_'+str(j)
extract[key] = action_way['move distance'].describe()['50%']
# extract to dataframe and save
extract_df = pd.DataFrame([extract])
if(os.path.isfile(m_feature_file)):
extract_df.to_csv(m_feature_file, mode='a', index=False, header=None)
else:
extract_df.to_csv(m_feature_file, mode='w', index=False)
return extract_df
# file_dict_list에 있는 파일에서 feature 추출하고 파일 저장
# 각 file의 feature들을 합쳐서 하나의 dataframe으로 반환
def mouse_preprocess(file_dict_list):
# empty dataframe
mouse_data = pd.DataFrame()
for i in range(len(file_dict_list)):
data = mouse_feature_extract(file_dict_list[i]['path'], file_dict_list[i]['filename'], file_dict_list[i]['user'])
mouse_data = mouse_data.append(data)
return mouse_data
# resource file을 유용한 형태로 변환하여 저장
def resource_parse_save(file):
df = pd.read_csv(file['path'])
# resource file column명 통일
df.columns = [
"DateTime",
"Memory\% Committed Bytes In Use",
"Memory\Available MBytes",
"Process(_Total)\% Processor Time",
"Process(_Total)\Private Bytes",
"Process(_Total)\Working Set",
"Processor Information(_Total)\% Processor Time",
"Processor Information(_Total)\% Processor Utility"
]
# datatime parsing
featureVector=[None] * 5
for i in range(df.shape[0]):
date = df.iloc[i,0] # 날짜 column 가져옴
arr = date.split(' ')
ymd = arr[0].split('/')
month = int(ymd[0])
day = int(ymd[1])
year = int(ymd[2])
time = arr[1].split(':')
hour = int(time[0])
minute = int(time[1])
yoli = datetime.date(year, month, day).weekday()
features = np.array([month, day, yoli, hour, minute])
featureVector = np.column_stack((featureVector,features))
df = df.drop(['DateTime'], axis=1)
# 빈칸은 mean값으로 채우기
df_columns = df.columns
for i in range(len(df_columns)):
col = df_columns[i]
if(df[col].dtypes == 'object'):
df[col] = df[col].replace(r'[\s]',np.nan,regex=True)
mean = df[col].astype('float64').mean()
df[col] = df[col].fillna(mean)
featureVector = featureVector[:,1:] # 첫번째 행이 None이라
date_df = pd.DataFrame(data=featureVector,
index=["Month", "Day", "Yoli", "Hour", "Minute"])
date_df = date_df.transpose()
df = pd.concat([df, date_df],axis=1)
yesterday_str = set_yesterday_str()
new_filename = yesterday_str+'_'+file['user']+'_'+file['filename']
write_file_path = file['upper_dir']+'/'+new_filename
df.to_csv(write_file_path+'.csv', index=False)
os.remove(file['path']) # 기존 파일 삭제
# resource file feature extract, save, and return datframe
def resource_feature_extract(path, filename, label):
df = pd.read_csv(path)
df = df.astype('float64')
df = df.drop(['Month', 'Day', 'Yoli', 'Hour', 'Minute'], axis=1)
# feature extract dict
extract = {}
# 파일명 저장
extract['filename'] = filename
# label 저장
extract['label'] = label
# 생성 시간
time = datetime.datetime.today()
time = time.strftime("%Y-%m-%d %H:%M:%S")
extract['time'] = time
extract_df = pd.DataFrame([extract])
columns = df.columns
for i in range(df.shape[0]):
new_columns = str(i+1)+'_'+columns
row_df = pd.DataFrame([df.iloc[i].values], columns=new_columns)
extract_df = pd.concat([extract_df, row_df], axis=1)
if(os.path.isfile(r_feature_file)):
extract_df.to_csv(r_feature_file, mode='a', index=False, header=None)
else:
extract_df.to_csv(r_feature_file, mode='w', index=False)
return extract_df
# file_dict_list에 있는 파일 feature를 추출하여 저장
# 각 file의 feature들을 합쳐서 하나의 dataframe으로 반환
def resource_preprocess(file_dict_list):
resource_data = pd.DataFrame()
for i in range(len(file_dict_list)):
data = resource_feature_extract(file_dict_list[i]['path'], file_dict_list[i]['filename'], file_dict_list[i]['user'])
resource_data = resource_data.append(data)
return resource_data
# model 사용을 위한 dataframe 전처리
# df(scaling 대상), filename(저장된 scaler 이름)
def scaling(df, filename):
df = df.fillna(0)
scaler = joblib.load(model_dir+filename)
sc_data = scaler.transform(df)
return sc_data
# AI를 통해 패턴 owner 예측 확률 반환
def predict_pattern(user, data, kind):
yesterday_str = set_yesterday_str()
model_name = model_dir+yesterday_str+'_'+user+'_model'
if(kind == 'mouse'):
model_name += '_m.h5'
else:
model_name += '_r.h5'
# model load
model = tf.keras.models.load_model(model_name)
# model pred
pred = model.predict(data)
return pred
# directory 이동
# user(login한 user), preDir(이전 directory path)
def preDir_move_patternDir(user, preDir):
m_pred_user_dir = preDir+'/mouse/'+ user
r_pred_user_dir = preDir+'/resource/'+ user
m_data_user_dir = './pattern/mouse/'
r_data_user_dir = './pattern/resource/'
copy_tree(m_pred_user_dir, m_data_user_dir)
shutil.rmtree(m_pred_user_dir)
copy_tree(r_pred_user_dir, r_data_user_dir)
shutil.rmtree(r_pred_user_dir)
# pattern 파일에 labeling
def modify_pattern_label(file_dict_list, kind, label):
path = m_feature_file # path의 default는 mouse path
if kind == 'resource':
path = r_feature_file
df = pd.read_csv(path)
for file in file_dict_list:
df.loc[df['filename']==file['filename'], 'label'] = label
df.to_csv(path, index=False)
# request로 보낼 데이터 만들기
def make_sendData(issue, user, label, m_pred, r_pred, m_files, r_files):
time = datetime.datetime.today()
time = time.strftime("%Y-%m-%d %H:%M:%S.%f")
m_file_list =[]
for file in m_files:
m_file_list.append(file['filename'])
m_file_string = ','.join(m_file_list)
r_file_list =[]
for file in r_files:
r_file_list.append(file['filename'])
r_file_string = ','.join(r_file_list)
m_pred = m_pred[0][0]
r_pred = r_pred[0][0]
sendData = {'user':user,
'time':time,
'mouse_prediction': str(round(m_pred,5)),
'resource_prediction': str(round(r_pred,5)),
'type':issue,
'label':label,
'mouse_file_list': m_file_string,
'resource_file_list': r_file_string
}
return sendData
# CERT 팀에게 경고 알리는 함수
def alert_to_CERT(data, url):
data = json.dumps(data)
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
res = requests.post(url, data=data, verify=False , headers=headers) # verify는 SSL인증서 체크 관련 내용
return res
if __name__ == "__main__":
user = sys.argv[1]
by_pass = sys.argv[2]
print(user +', '+ by_pass + " in python")
# 0 ~ < threshold : user 차단
# threshold <= ~ < tolerance : 허용&벌점부여
# tolerance <= ~ 1 : 허용
m_threshold = 0.9 # 마우스 임계값
m_tolerance = 0.95 # 마우스 벌점범위
r_threshold = 0.9 # 리소스 임계값
r_tolerance = 0.95 # 리소스 벌점범위
idle_r_threshold = 0.95 # 마우스 idle인 경우, 임계값
idle_r_tolerance = 0.98 # 마우스 idle인 경우, 벌점 범위
yesterday_str = set_yesterday_str()
url = 'http://localhost:8222/cert' # proxy 주소
model_dir = './model/'
m_pred_dir = './pred/mouse'
r_pred_dir = './pred/resource'
m_feature_file = './m_feature_extract.csv'
r_feature_file = './r_feature_extract.csv'
# Resource
# R file에서 필요한 column 구성하여 저장
r_files = find_file(r_pred_dir, user)
for file in r_files:
resource_parse_save(file)
# 수정된 R 파일 정보 저장
r_files = find_file(r_pred_dir, user)
# R file에서 feature 추출 및 csv로 저장, 반환
r_data = resource_preprocess(r_files)
print(r_data.shape)
# preprocess
r_scaler_name = yesterday_str+'_'+user+'_scaler_r.gz'
r_data = r_data.drop(['filename', 'label', 'time'], axis=1)
r_data = scaling(r_data, r_scaler_name)
# AI
# 사용자 패턴 owner 예측
r_pred = predict_pattern(user, r_data, 'resource') # resource AI가 예측한 해당 user일 확률
# Mouse
# M file에서 필요한 column 구성하여 저장
m_files = find_file(m_pred_dir, user)
for file in m_files:
mouse_parse_save(file)
# 수정한 M 파일 정보 저장
m_files = find_file(m_pred_dir, user)
# M file에서 feature 추출 및 csv로 저장, 반환
m_data = mouse_preprocess(m_files)
print(m_data.shape)
if(m_data.shape != (1,3)): # 마우스가 idle이 아닌 경우
# preprocess
m_scaler_name = yesterday_str+'_'+user+'_scaler_m.gz'
m_data = m_data.drop(['filename', 'label', 'time'], axis=1)
m_data = scaling(m_data, m_scaler_name)
# AI
# 사용자 패턴 owner 예측
m_pred = predict_pattern(user, m_data, 'mouse') # mouse AI가 예측한 해당 user일 확률
else:
m_pred = np.array([[-1.0]]) # AI 예측값이 -1이면 idle로 마우스 예측은 하지 않은 것으로 가정
print(str(m_pred[0][0])+', '+ str(r_pred[0][0]))
# ./pred/mouse/user/M, ./pred/resource/user/R file을
# ./pattern/mouse/~, ./pattenr/resource/~ 로 이동
preDir_move_patternDir(user, './pred')
# bypass 권한 있으면, AI 판단 후 처리 과정 거치지 않음
if by_pass != "PASS":
if m_pred == -1: # idle인 경우
if r_pred < idle_r_threshold:
label = 'unknown'
modify_pattern_label(r_files, 'resource', label)
sendData = make_sendData(4, user, label, m_pred, r_pred, m_files, r_files)
res = alert_to_CERT(sendData, url) # 관리자에게 idle 차단 알림(block : 4)
elif r_pred < idle_r_tolerance:
sendData = make_sendData(5, user, user, m_pred, r_pred, m_files, r_files)
res = alert_to_CERT(sendData, url) # 관리자에게 idle 벌점 알림(demerit : 5)
else: # idle이 아닌 경우
# true) file들 unknown으로 labeling, CERT(proxy 거쳐서 서버로)
# elif true) CERT(proxy 거쳐서 서버로)
if m_pred < m_threshold or r_pred < r_threshold :
label = 'unknown'
modify_pattern_label(m_files, 'mouse', label)
modify_pattern_label(r_files, 'resource', label)
sendData = make_sendData(2, user, label, m_pred, r_pred, m_files, r_files)
res = alert_to_CERT(sendData, url) # 관리자에게 차단 알림(block : 2)
elif m_pred < m_tolerance or r_pred < r_tolerance:
sendData = make_sendData(3, user, user, m_pred, r_pred, m_files, r_files)
res = alert_to_CERT(sendData, url) # 관리자에게 벌점 알림(demerit : 3) |
import numpy as np
import sys
from environments.openai_environment import OpenAIEnvironment
from algorithms.deep_q_learning import DeepQLearning
from absl import flags
def main():
"""Cartpole example"""
# Load environment
env = OpenAIEnvironment('CartPole-v0')
algorithm = DeepQLearning(gamma = 0.95,
epsilon = 0.01,
num_actions = env.num_actions(),
observation_dim=env.observation_dimensions,
learning_rate = 0.001,
buffer_capacity = 2000,
batch_size = 32)
# Train
train(env, algorithm)
# Main function to train the
# env - OpenAI Gym Environment
def train(env, algorithm):
score = 0
scores = []
while True:
algorithm.act(env)
# Reset if complete
if env.is_complete():
env.reset()
scores.append(score)
if len(scores) % 50 == 0:
print("Score Average: " + str(sum(scores)/len(scores)))
scores = []
score = 0
score += 1
if __name__ == "__main__":
flags.FLAGS(sys.argv)
main()
|
class Solution:
def maxIceCream(self, costs: List[int], coins: int) -> int:
costs.sort()
ic = 0
while coins>0 and len(costs)!=0:
c = costs.pop(0)
if c<=coins:
ic+=1
coins-=c
else:
break
return ic
|
# coding:utf-8
__author__ = "golden"
__date__ = '2018/6/29'
PID_FILE = "jspider.pid"
LOG_PATH = "log"
SPIDER_PATH = 'spiders'
WEB_DOMAIN = '193.168.4.101'
WEB_SERVER = {
'host': '0.0.0.0',
'port': 8081,
'debug': True,
'ssl': None,
'sock': None,
'protocol': None,
'backlog': 100,
'stop_event': None,
'access_log': True
}
WEB_CONFIG = { # key must upper
"KEEP_ALIVE": True,
"AUTH_LOGIN_ENDPOINT": "login"
}
NODE = {
'name': ''
}
|
import pymysql
import datetime
import re
class Db:
db = None
cursor = None
@staticmethod
def table_exists(con, table_name):
sql = "show tables;"
con.execute(sql)
tables = [con.fetchall()]
table_list = re.findall('(\'.*?\')', str(tables))
table_list = [re.sub("'", '', each) for each in table_list]
if table_name in table_list:
return 1
else:
return 0
@staticmethod
def check_conn(cursor):
if cursor is None:
print("you have wrong connection")
return cursor is None
def __init__(self):
self.db = pymysql.connect('localhost', 'root', '123', 'test_db')
if self.db is None:
print("Connect error")
else:
self.cursor = self.db.cursor()
if self.cursor is not None:
print("Connect ok")
def create_table(self):
if self.check_conn(self.cursor):
return
if self.table_exists(self.cursor, 'bmp280'):
print("table existed")
return
self.cursor.execute("DROP TABLE IF EXISTS bmp280")
sql = '''
CREATE TABLE `bmp280` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`temp` float DEFAULT NULL,
`pressure` float DEFAULT NULL,
`time` datetime DEFAULT NULL,
`creator` varchar(45) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
'''
res = self.cursor.execute(sql)
self.db.commit();
def insert(self, data):
if self.check_conn(self.cursor):
return
sql = 'insert into bmp280(temp,pressure,time,creator) ' \
'values(%s, %s, %s, %s)'
res = self.cursor.execute(sql, data)
self.db.commit()
print("insert res"+str(res))
def select_all(self):
if self.check_conn(self.cursor):
return
sql = 'select * from bmp280'
count = self.cursor.execute(sql)
print('count' + str(count))
res = self.cursor.fetchall()
self.db.commit()
return res
def destroy(self):
self.cursor.close()
self.db.close()
def test():
db = Db()
db.create_table()
create_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
data = [23, 3223, create_time, 'tester']
db.insert(data)
print(db.select_all())
db.destroy()
if __name__ == '__main__':
test()
|
def is_palindrome(word):
length = len(word)
for i in range(0, length//2):
if word[i] == word[length - i - 1]:
continue
else:
return 0
return 1
word = input()
print(is_palindrome(word))
|
# 迭代器与生成器
from collections import Iterator
isinstance((x for x in range(10)), Iterator)
# iter()函数获得一个Iterator对象。
it = iter([1, 2, 3, 4, 5])
while True:
try:
# 获得下一个值:
x = next(it)
except StopIteration:
# 遇到StopIteration就退出循环
break
# 一边循环一边计算的机制,称为生成器
def fib(max):
n, a, b = 0, 0, 1
while n < max:
yield b
a, b = b, a+b
n = n+1
return 'done'
fib(6)
g = fib(6)
while True:
try:
x = next(g)
print('g', x)
except StopIteration as e:
print('Generator return value', e.value)
break
# 输入和输出
# File(文件) 方法
# file
# 读文件
f = open('hello.txt','w')
# 写文件
f.write('hello\n')
f.write('world\n')
# lose()方法关闭文件。文件使用完毕后必须关闭,因为文件对象会占用操作系统的资源,并且操作系统同一时间能打开的文件数量也是有限的
f.close()
t = open('hello.txt')
# 读文件
text = t.read()
t.close()
print(text)
st = text.split()
print(st)
detail = dir(f)
print(detail)
he = help(f.seek)
print(he)
# 简便写法
# try:
# f = open('hello.txt','r')
# print(f)
# finally:
# if f:
# f.close()
with open('hello.txt', 'r') as f:
print(f.read())
# 推荐这种
with open('hello.txt', 'r') as f:
for line in f.readlines():
print(line.strip())
# 二进制文件
f = open('test.jpg', 'rb')
f.read()
# 字符编码
f = open('gbk.txt', 'r', encoding='gbk', errors='ignore')
f.read()
# 写文件
f = open('text.txt', 'w')
f.write('hhahah,super')
f.close()
# 简便写法
with open('text.txt', 'w') as f:
f.write('hello world')
# OS 文件/目录方法
# StringIO
# getvaule方法获得写入后的str
from io import StringIO
f = StringIO()
f.write('hello')
f.write(' ')
f.write('world')
print(f.getvalue())
f = StringIO('hello!\nhi!\ngoodbye!')
while True:
s = f.readline()
if s == '':
break
print(s.strip())
# BytesIO
# 操作二进制数据
from io import BytesIO
f = BytesIO
f.write('中文'.encode('utf-8'))
print(f.getvalue())
f = BytesIO(b'\xe4\xb8\xad\xe6\x96\x87')
f.read()
# 操作文件和目录
# os 模块的某些函数是跟操作系统相关的
import os
print(os.name)
# 如果需要获取详细的系统信息,可以调用uname()
print(os.uname())
# 环境变量
# 在操作系统中定义环境变量全部保存在os.environ变量中
print(os.environ)
print(os.environ.get('PATH'))
print(os.environ.get('x', 'default'))
# 操作文件和目录
# 查看当前目录的绝对路径
os.path.abspath('.')
# 在某个目录下创建一个新目录
os.path.join('/user/michael','testdir') # 把新目录的完整路径表示出来
os.mkdir('/user/michael/testdir') # 创建一个目录
os.rmdir('/user/michael/testdir') # 删除一个目录
# 列出当前目录下的所有目录
[x for x in os.listdir('.') if os.path.isdir(x)]
# 列出特定后缀名的文件
[x for x in os.listdir('.') if os.path.isfile(x) and os.path.splitext(x)[1]=='.py']
# 正则表达式
# []表示匹配這些字符之一,$匹配字符串的結尾 re.sub()函數執行基於正則表達式的字符串的替換
# ^ 除了
import re
# def plural(noun):
# if re.search('[sxz]$', noun):
# return re.sub('$', 'es', noun)
# elif re.search('[^aeioudgkprt]h$',noun):
# return re.sub('$', 'es', noun)
# elif re.search('[aeiou]y$', noun):
# return re.sub('y$', 'ies', noun)
# else:
# return noun + 's'
#
# print(plural('vacancy'))
def match_sxz(noun):
return re.search('[sxz]$', noun)
def apply_sxz(noun):
return re.sub('$', 'es', noun)
def match_h(noun):
return re.search('[^aeioudgkprt]h$', noun)
def apply_h(noun):
return re.sub('$', 'es', noun)
def match_y(noun):
return re.search('[^aeiou]y$', noun)
def apply_y(noun):
return re.sub('y$', 'ies', noun)
def match_default(noun):
return True
def apply_default(noun):
return noun + 's'
rules = (
(match_sxz, apply_sxz),
(match_h, apply_h),
(match_y, match_y),
(match_default, match_default( ))
)
# 每一條匹配規則都有自己的函數,返回對re.search()函數
def plural(noun):
for matches_rule, apply_rule in rules:
if matches_rule(noun):
return apply_rule(noun)
|
#!usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
__author__ = 'Sapocaly'
from utils import DBconfig
import xmlrpclib
import utils.PathHelper
utils.PathHelper.configure_dir()
import src.DB.Entry as Entry
import src.DB.DAL as DAL
#DB saving related
# config = DBconfig.DBConfig("conf/byyy_ba_db.cfg")
# config_args = dict(zip(['host', 'user', 'passwd', 'database'],
# [config.DB_HOST, config.DB_USER, config.DB_PASSWORD, config.DB_NAME]))
# DAL.create_engine(**config_args)
#
# t = Entry.Page(url='test_url', content='糖糖糖')
# Entry.Page.add(t)
# del (t)
#rpc cal related
#ip 10.84.14.55 for remote usage
def fetch(url):
response = urllib2.urlopen(url)
html = response.read()
return html
def save_html(url,html):
import base64
encoded_html = base64.b64encode(html)
print encoded_html
with DAL.connection():
t = Entry.Page(url=url, content=encoded_html)
Entry.Page.add(t)
del(t)
def parse_html(html):
pass
config = DBconfig.DBConfig("conf/byyy_ba_db.cfg")
config_args = dict(zip(['host', 'user', 'passwd', 'database'],
[config.DB_HOST, config.DB_USER, config.DB_PASSWORD, config.DB_NAME]))
DAL.create_engine(**config_args)
#html = fetch("http://www.baidu.com")
#save_html("http://www.baidu.com",html)
while True:
proxy = xmlrpclib.ServerProxy("http://127.0.0.1:8000/")
multicall = xmlrpclib.MultiCall(proxy)
multicall.get()
result = multicall()
start_url = tuple(result)[0]
html = fetch(start_url)
new_urls = parse_html(html)
save_html(start_url,html)
for url in new_urls:
multicall.put(url)
|
"""
Given a digit string, return all possible letter combinations that the number could represent.
A mapping of digit to letters (just like on the telephone buttons) is given below.
Input:Digit string "23"
Output: ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"].
"""
class Solution(object):
def letterCombinations(self, digits):
"""
:type digits: str
:rtype: List[str]
"""
NToS = {'2':['a','b','c'],'9':['w','x','y','z'],
'3':['d','e','f'],'4':['g','h','i'],
'5':['j','k','l'],'6':['m','n','o'],
'7':['p','q','r','s'],'8':['t','u','v']}
if len(digits)==0:
return []
if len(digits)==1:
return NToS[digits]
res = self.letterCombinations(digits[:-1])
return [s+a for s in res for a in NToS[digits[-1]]] |
#!/usr/bin/env python
#Bao Dang
#Assignment 2
"""Fot this problem, since the list python starts at 0, the forest is supposed to start at 0 """
class huffman_algorithm:
def __init__(self):
self.FOREST = []
self.ALPHABET = []
self.TREE = []
self.least = 0
self.second = 1
self.lasttree = 0
self.lastnode = 0
def tree(self, leftchild=0, rightchild=0, parent=0):
self.TREE.append([leftchild, rightchild, parent])
self.lastnode = len(self.TREE)
def alphabet(self, symbol, probability, leaf):
self.ALPHABET.append([symbol, probability, leaf])
def forest(self, weight, root):
self.FOREST.append([weight, root])
self.lasttree = len(self.FOREST)
#find the trees that have smallest and second smallest weight
def lightones(self):
if self.FOREST[0][0] <= self.FOREST[1][0]:
self.least = 0
self.second = 1
else:
self.least = 1
self.second = 0
for i in range(2, self.lasttree):
if self.FOREST[i][0] < self.FOREST[self.least][0]:
self.second = self.least
self.least = i
elif self.FOREST[i][0] < self.FOREST[self.second][0]:
self.second = i
#create a new node with left child and right child that have smallest and second smallest weight
def create(self, lefttree, righttree):
self.tree()
self.lastnode-=1
self.TREE[self.lastnode][0] = self.FOREST[lefttree][1]
self.TREE[self.lastnode][1] = self.FOREST[righttree][1]
self.TREE[self.lastnode][2] = 0
self.TREE[self.FOREST[lefttree][1]][2] = self.lastnode
self.TREE[self.FOREST[righttree][1]][2] = self.lastnode
return self.lastnode
def huffman(self):
while self.lasttree > 1:
self.lightones()
self.FOREST[self.least][0] = self.FOREST[self.least][0] + self.FOREST[self.second][0]
self.FOREST[self.least][1] = self.create(self.least, self.second)
self.FOREST[self.second] = self.FOREST[self.lasttree-1]
self.FOREST.pop()
self.lasttree -= 1
if __name__ == "__main__":
A = huffman_algorithm()
A.tree()
A.tree()
A.tree()
A.tree()
A.tree()
A.alphabet('a', 0.07, 0)
A.alphabet('b', 0.09, 1)
A.alphabet('c', 0.12, 2)
A.alphabet('d', 0.22, 3)
A.alphabet('e', 0.23, 4)
A.alphabet('f', 0.27, 5)
A.forest(0.07, 0)
A.forest(0.09, 1)
A.forest(0.12, 2)
A.forest(0.22, 3)
A.forest(0.23, 4)
A.forest(0.27, 5)
print "FOREST TABLE"
for n in A.FOREST:
print n,
print "\nALPHABET TABLE"
for n in A.ALPHABET:
print n,
print "\nTREE TABLE"
for n in A.TREE:
print n,
A.huffman()
print "\n-------------\n"
print "Result after doing Huffman"
print "FOREST TABLE"
for n in A.FOREST:
print n,
print "\nALPHABET TABLE"
for n in A.ALPHABET:
print n,
print "\nTREE TABLE"
for n in A.TREE:
print n,
|
# 第一种:for in
tuple1 = ("a", "1", 1, "b", "c", "c")
for value in tuple1:
print("value=", value)
print("--------------------")
# 第二种:使用内置函数enumerate()
tuple2 = ("a", "1", 1, "b", "c", "c")
for index, value in enumerate(tuple2):
print("index=", index, "value=", value)
print("-------------------- ")
# 第三种:使用range遍历
tuple3 = ("a", "1", 1, "b", "c", "c")
for index in range(len(tuple3)):
print("index=", index, "value=", tuple3[index])
print("--------------------")
# 第四种:iter遍历
tuple4 = ("a", "1", 1, "b", "c", "c")
for value in iter(tuple4):
print("value=", value)
"""
四种方式遍历元组的语法:
for item in tuple:
pass
for index in range(len(tuple)):
pass
for index, value in enumerate(tuple):
pass
for item in iter(tuple):
pass
""" |
from flask import Flask, render_template, jsonify, request
from flask_sqlalchemy import SQLAlchemy
from datatables import ColumnDT, DataTables
import datetime
import random
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///sites.db'
db = SQLAlchemy(app)
class User(db.Model) :
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(40))
phone = db.Column(db.String(10))
birthday = db.Column(db.Date)
@app.route('/', methods=['GET', 'POST'])
def index() :
return render_template('/index.html')
@app.route('/load_table_sql', methods=['GET', 'POST'])
def load_table_sql() :
columns = [ColumnDT(User.id),ColumnDT(User.name),ColumnDT(User.phone),ColumnDT(User.birthday)]
query = db.session.query().select_from(User)
rowTable = DataTables(request.args.to_dict(), query, columns)
# returns what is needed by DataTable
return rowTable.output_result()
def random_date() :
start_date = datetime.date(1997, 1, 1)
end_date = datetime.date(2010, 1, 1)
time_between_dates = end_date - start_date
days_between_dates = time_between_dates.days
random_number_of_days = random.randrange(days_between_dates)
random_date = start_date + datetime.timedelta(days=random_number_of_days)
return random_date
def create_db_user() :
db.drop_all()
db.create_all()
for i in range(100) :
user = User(name='user'+str(i), phone='phone' +
str(i), birthday=random_date())
db.session.add(user)
db.session.commit()
print('create done')
create_db_user()
if __name__ == "__main__" :
app.run( host="0.0.0.0",debug=True) |
import json
from asyncio import sleep
from collections import defaultdict
from aiotg import TgBot
from secrets import token, botan
from atlantis.translations import en, ru
bot = TgBot(token, botan_token=botan)
users = defaultdict(dict)
@bot.command(r'/start')
async def start(chat, match):
users[chat.id] = Atlantis(chat)
print('{} joined or started anew, total {}'.format(chat.sender, len(users)))
await chat.send_text('Choose language / Выберите язык:\n\n/en English\n/ru Русский')
await users[chat.id].flush()
@bot.command(r'/(en|ru)')
async def set_locale(chat, match):
if not users[chat.id]:
return (await start(chat, match))
locale = match.group(1)
print('{} changed locale to {}'.format(chat.sender, locale))
if locale == 'en':
users[chat.id].locale = en
elif locale == 'ru':
users[chat.id].locale = ru
users[chat.id].goto('Atl_Start')
await users[chat.id].flush()
@bot.command(r'/(fast|slow)')
async def set_speed(chat, match):
speed = match.group(1)
print('{} changed speed to {}'.format(chat.sender, speed))
if speed == 'fast':
users[chat.id].fast = True
else:
users[chat.id].fast = False
@bot.command(r'.*')
async def choose(chat, match):
if not users[chat.id]:
return (await start(chat, match))
choice = match.group(0)
print('{} chose {}'.format(chat.sender, choice))
users[chat.id].choose(choice)
await users[chat.id].flush()
class Atlantis:
'Bot-bound Atlantis story.'
def __init__(self, bot):
self.state = {}
self.options = []
self.choices = []
self.messages = []
self.bot = bot
self.locale = en
self.fast = False
def say(self, text):
'Enqueue a message.'
lines = [x for x in self.locale[text].split('\n\n') if x.strip()]
self.messages.extend(lines)
async def typing(self, message):
'Delay based on message length.'
delay = min(max(1.5, len(message) / 50), 4.0)
if not self.fast:
await self.bot.send_chat_action(action='typing')
await sleep(delay)
async def flush(self):
'Send all pending messages and a reply keyboard.'
queue = list(self.messages)
self.messages = []
keyboard = {
'keyboard': [[choice] for choice in self.choices],
'resize_keyboard': True,
}
while len(queue) > 1:
message = queue.pop(0)
await self.typing(message)
await self.bot.send_text(message)
if queue:
message = queue.pop(0)
await self.typing(message)
await self.bot.send_text(message, reply_markup=json.dumps(keyboard))
def choose(self, choice):
'Advance the story based on a choice.'
try:
option = self.options[self.choices.index(choice)]
except ValueError:
print('invalid choice "{}" by {}'.format(choice, self.bot.sender))
else:
for s in option.get('set', ()):
self.state[s] = True
self.goto(option['next'])
def goto(self, jump):
'Jump to specific part of the story.'
if jump == 'Atl_Start':
self.say('Ln0016.0.text.FAREWELLATLANTISAStoryOf')
self.state = {}
self.options = [
{'next': 'Atl_Begin', 'text': 'Ln0037.0.option.Begin', 'short': 'Ln0037.0.short.Begin'},
{'next': 'Atl_Credits', 'text': 'TermDlg.Common.Credits', 'short': 'TermDlg.Common.Credits2'},
{'next': 'MessageBoardInterface_On', 'text': 'Ln0039.0.option.IOpenMyEyes', 'short': 'TermDlg.Common.ExitBrackets'},
]
if jump == 'Atl_Credits':
self.say('Ln0044.0.text.WrittenByLilithDedicatedTo')
self.options = [
{'next': 'Atl_Begin', 'text': 'Ln0037.0.option.Begin', 'short': 'Ln0037.0.short.Begin'},
{'next': 'MessageBoardInterface_On', 'text': 'Ln0039.0.option.IOpenMyEyes', 'short': 'Ln0053.0.short.Quit'},
]
if jump == 'Atl_Begin':
self.say('Ln0058.0.text.ChooseYourCharacterClass')
self.options = [
{'set': ['ClassPoet'], 'next': 'Atl_Poet', 'text': 'Ln0063.0.option.Poet'},
{'set': ['ClassPhysician'], 'next': 'Atl_Physician', 'text': 'Ln0064.0.option.Physician'},
{'set': ['ClassFarmer'], 'next': 'Atl_Farmer', 'text': 'Ln0065.0.option.Farmer'},
{'set': ['ClassScientist'], 'next': 'Atl_Scientist', 'text': 'Ln0066.0.option.Scientist'},
{'set': ['ClassMagician'], 'next': 'Atl_Magician', 'text': 'Ln0067.0.option.Magician'},
]
# POET
if jump == 'Atl_Poet':
self.say('Ln0074.0.text.YouAreSittingUponA')
self.options = [
{'next': 'Atl_PoetWork', 'text': 'Ln0079.0.option.WorkOnPoetry'},
{'next': 'Atl_PoetObserve', 'text': 'Ln0080.0.option.ObserveTheChildren'},
{'next': 'Atl_PoetPlay', 'text': 'Ln0081.0.option.PlayWithTheChildren'},
]
if jump == 'Atl_PoetWork':
self.say('Ln0086.0.text.YouCloseYourEyesFocusing')
self.options = [
{'next': 'Atl_Messenger', 'text': 'Ln0093.0.option.TalkToTheMessenger'},
]
if jump == 'Atl_PoetObserve':
self.say('Ln0098.0.text.YouObserveThePlayingChildren')
self.options = [
{'next': 'Atl_Messenger', 'text': 'Ln0093.0.option.TalkToTheMessenger'},
]
if jump == 'Atl_PoetPlay':
self.say('Ln0112.0.text.YouGoDownToThe')
self.options = [
{'next': 'Atl_Messenger', 'text': 'Ln0093.0.option.TalkToTheMessenger'},
]
# PHYSICIAN
if jump == 'Atl_Physician':
self.say('Ln0128.0.text.YoureInTheGreatHall')
self.options = [
{'next': 'Atl_Approach', 'text': 'Ln0135.0.option.Approach'},
]
if jump == 'Atl_Approach':
self.say('Ln0140.0.text.AsYouApproachYouSee')
self.options = [
{'next': 'Atl_Sit', 'text': 'Ln0147.0.option.SitWithTheMan'},
{'next': 'Atl_Offer', 'text': 'Ln0148.0.option.OfferHimSleepSnakePoison'},
]
if jump == 'Atl_Sit':
self.say('Ln0153.0.text.YouSitForTheMan')
self.options = [
{'next': 'Atl_Pray', 'text': 'Ln0162.0.option.SayAPrayer'},
{'next': 'Atl_Pray', 'text': 'Ln0163.0.option.CloseHisEyes'},
]
if jump == 'Atl_Pray':
self.say('Ln0168.0.text.YouGetUpAndApproach')
self.options = [
{'next': 'Atl_Messenger', 'text': 'Ln0093.0.option.TalkToTheMessenger'},
]
if jump == 'Atl_Offer':
self.say('Ln0180.0.text.TheManConsidersThisFor')
self.options = [
{'next': 'Atl_Pray', 'text': 'Ln0162.0.option.SayAPrayer'},
{'next': 'Atl_Pray', 'text': 'Ln0163.0.option.CloseHisEyes'},
]
# FARMER
if jump == 'Atl_Farmer':
self.say('Ln0197.0.text.YoureOutsideOnYourFarm')
self.options = [
{'next': 'Atl_Dig', 'text': 'Ln0202.0.option.DigItUp'},
{'next': 'Atl_LeaveIt', 'text': 'Ln0203.0.option.LeaveItAlone'},
]
if jump == 'Atl_Dig':
self.say('Ln0208.0.text.ItsNotJustAStone')
self.options = [
{'next': 'Atl_KeepDigging', 'text': 'Ln0213.0.option.KeepDigging'},
]
if jump == 'Atl_KeepDigging':
self.say('Ln0218.0.text.NoYouBeginToRealize')
self.options = [
{'next': 'Atl_Messenger', 'text': 'Ln0093.0.option.TalkToTheMessenger'},
]
if jump == 'Atl_LeaveIt':
self.say('Ln0234.0.text.YouLeaveItButIt')
self.options = [
{'next': 'Atl_Messenger', 'text': 'Ln0093.0.option.TalkToTheMessenger'},
]
# SCIENTIST
if jump == 'Atl_Scientist':
self.say('Ln0252.0.text.YoureInTheTempleOf')
self.options = [
{'next': 'Atl_Experiment', 'text': 'Ln0257.0.option.StartTheExperiment'},
]
if jump == 'Atl_Experiment':
self.say('Ln0262.0.text.TheGearsOfTheMachine')
self.options = [
{'next': 'Atl_ApproachMachine', 'text': 'Ln0269.0.option.ApproachTheMachine'},
{'next': 'Atl_ShutDown', 'text': 'Ln0270.0.option.ShutItDown'},
{'next': 'Atl_KeepRunning', 'text': 'Ln0271.0.option.KeepItRunning'},
]
if jump == 'Atl_ApproachMachine':
self.say('Ln0276.0.text.YouStepCloserToThe')
self.options = [
{'next': 'Atl_Investigate', 'text': 'Ln0283.0.option.Investigate'},
]
if jump == 'Atl_Investigate':
self.say('Ln0288.0.text.ItsACityYouCan')
self.options = [
{'next': 'Atl_Messenger', 'text': 'Ln0297.0.option.TalkToTheMessenger'},
]
if jump == 'Atl_ShutDown':
self.say('Ln0302.0.text.NoThatsWrongTheLight')
self.options = [
{'next': 'Atl_ApproachMachine', 'text': 'Ln0307.0.option.ApproachTheLight'},
]
if jump == 'Atl_KeepRunning':
self.say('Ln0312.0.text.TheLightGrowsStrongerAnd')
self.options = [
{'next': 'Atl_ApproachMachine', 'text': 'Ln0307.0.option.ApproachTheLight'},
]
# MAGICIAN
if jump == 'Atl_Magician':
self.say('Ln0323.0.text.YouAreInYourTower')
self.options = [
{'next': 'Atl_SeekAnswers', 'text': 'Ln0330.0.option.SeekAnswers'},
{'next': 'Atl_LetItGo', 'text': 'Ln0331.0.option.LetItGo'},
]
if jump == 'Atl_SeekAnswers':
self.say('Ln0336.0.text.YouGoDownTheStairs')
self.options = [
{'next': 'Atl_TryTheSpell', 'text': 'Ln0341.0.option.TryTheSpell'},
{'next': 'Atl_LetItGo', 'text': 'Ln0331.0.option.LetItGo'},
]
if jump == 'Atl_TryTheSpell':
self.say('Ln0347.0.text.YouReturnToTheTop')
self.options = [
{'next': 'Atl_LookCloser', 'text': 'Ln0352.0.option.LookCloser'},
]
if jump == 'Atl_LookCloser':
self.say('Ln0357.0.text.ItsACityACity')
self.options = [
{'next': 'Atl_Messenger', 'text': 'Ln0297.0.option.TalkToTheMessenger'},
]
if jump == 'Atl_LetItGo':
self.say('Ln0371.0.text.YouDecideToLetIt')
self.options = [
{'next': 'Atl_Messenger', 'text': 'Ln0297.0.option.TalkToTheMessenger'},
]
# MESSENGER
if jump == 'Atl_Messenger':
self.say('Ln0387.0.text.TheMessengerIsIndeedA')
self.options = [
{'set': ['AtlantisDelay1'], 'next': 'Atl_AskAbout', 'text': 'Ln0396.0.option.AskWhatThisIsAbout'},
{'next': 'Atl_City', 'text': 'Ln0397.0.option.FollowTheMessenger'},
]
if jump == 'Atl_AskAbout':
self.say('Ln0402.0.text.IAmSorryTheMessenger')
self.options = [
{'next': 'Atl_City', 'text': 'Ln0397.0.option.FollowTheMessenger'},
]
if jump == 'Atl_City':
self.say('Ln0412.0.text.TheMessengerTakesYouTo')
self.options = [
{'next': 'Atl_Throne', 'text': 'Ln0419.0.option.EnterTheThroneRoom'},
]
# MEETING THE KING
if jump == 'Atl_Throne':
self.say('Ln0426.0.text.YouEnterTheThroneRoom')
self.options = [
{'set': ['AtlantisDelay2'], 'next': 'Atl_Mosaic', 'text': 'Ln0431.0.option.ExamineTheMosaic'},
{'next': 'Atl_LookKing', 'text': 'Ln0432.0.option.LookForTheKing'},
]
if jump == 'Atl_LookKing':
self.say('Ln0437.0.text.InAllThisSplendourThe')
self.options = [
{'next': 'Atl_Bow', 'text': 'Ln0444.0.option.BowBeforeTheKing'},
]
if jump == 'Atl_Mosaic':
self.say('Ln0449.0.text.TheSheerAmountOfWork')
self.options = [
{'next': 'Atl_Bow', 'text': 'Ln0444.0.option.BowBeforeTheKing'},
]
if jump == 'Atl_Bow':
if 'ClassFarmer' in self.state:
self.say('Ln0461.0.text.NoDoNotBowMy')
if 'ClassMagician' in self.state:
self.say('Ln0473.0.text.NoDoNotBowMy')
if 'ClassPhysician' in self.state:
self.say('Ln0485.0.text.NoDoNotBowMy')
if 'ClassPoet' in self.state:
self.say('Ln0497.0.text.NoDoNotBowMy')
if 'ClassScientist' in self.state:
self.say('Ln0509.0.text.NoDoNotBowMy')
self.options = [
{'next': 'Atl_Scroll', 'text': 'Ln0468.0.option.ReadScroll'},
]
if jump == 'Atl_Scroll':
if 'ClassFarmer' in self.state:
self.say('Ln0521.0.text.YouReadTheScrollWith')
if 'ClassMagician' in self.state:
self.say('Ln0533.0.text.YouReadTheScrollWith')
if 'ClassPhysician' in self.state:
self.say('Ln0545.0.text.YouReadTheScrollWith')
if 'ClassPoet' in self.state:
self.say('Ln0557.0.text.YouReadTheScrollWith')
if 'ClassScientist' in self.state:
self.say('Ln0569.0.text.YouReadTheScrollWith')
self.options = [
{'next': 'Atl_Confirm', 'text': 'Ln0528.0.option.ConfirmTheTruth'},
]
if jump == 'Atl_Confirm':
self.say('Ln0581.0.text.TheKingSighsIWas')
self.options = [
{'next': 'Atl_AskDone', 'text': 'Ln0588.0.option.AskWhatCanBeDone'},
{'next': 'Atl_Despair', 'text': 'Ln0589.0.option.SayThereIsNoHope'},
{'next': 'Atl_Curse', 'text': 'Ln0590.0.option.CurseTheGods'},
]
if jump == 'Atl_Despair':
self.say('Ln0595.0.text.MyFriendPoseidonasSaysDo')
self.options = [
{'next': 'Atl_AskDone', 'text': 'Ln0588.0.option.AskWhatCanBeDone'},
]
if jump == 'Atl_Curse':
self.say('Ln0605.0.text.MyFriendPoseidonasSaysYou')
self.options = [
{'next': 'Atl_AskDone', 'text': 'Ln0588.0.option.AskWhatCanBeDone'},
]
if jump == 'Atl_AskDone':
self.say('Ln0615.0.text.TellMeWhatIsThe')
self.options = [
{'set': ['AtlantisArt'], 'next': 'Atl_Art', 'text': 'Ln0620.0.option.Art', 'short': 'Ln0620.0.short.ChooseArt'},
{'set': ['AtlantisPeople'], 'next': 'Atl_People', 'text': 'Ln0621.0.option.ThePeople', 'short': 'Ln0621.0.short.ChooseThePeople'},
{'set': ['AtlantisKnowledge'], 'next': 'Atl_Knowledge', 'text': 'Ln0622.0.option.Knowledge', 'short': 'Ln0622.0.short.ChooseKnowledge'},
]
if jump == 'Atl_Art':
self.say('Ln0627.0.text.TheHeartOfAtlantisIs')
self.options = [
{'next': 'Atl_How', 'text': 'Ln0636.0.option.How'},
]
if jump == 'Atl_People':
self.say('Ln0641.0.text.TheHeartOfAtlantisIs')
self.options = [
{'next': 'Atl_How', 'text': 'Ln0636.0.option.How'},
]
if jump == 'Atl_Knowledge':
self.say('Ln0653.0.text.TheHeartOfAtlantisIs')
self.options = [
{'next': 'Atl_How', 'text': 'Ln0636.0.option.How'},
]
if jump == 'Atl_How':
self.say('Ln0667.0.text.ThereIsNotMuchTime')
self.options = [
{'set': ['AtlantisDelay3'], 'next': 'Atl_WhyMe', 'text': 'Ln0676.0.option.WhyMe'},
{'next': 'Atl_YesMyLord', 'text': 'Ln0677.0.option.YesMyLord'},
]
if jump == 'Atl_WhyMe':
self.say('Ln0682.0.text.WhyNotYouWhyA')
self.options = [
{'next': 'Atl_YesMyLord', 'text': 'Ln0677.0.option.YesMyLord'},
]
if jump == 'Atl_YesMyLord':
self.say('Ln0692.0.text.PoseidonasLaughsIAmNot')
self.options = [
{'next': 'Atl_GoHarbour', 'text': 'Ln0699.0.option.HeadForTheHarbour'},
]
# GOING TO THE HARBOUR
if jump == 'Atl_GoHarbour':
self.say('Ln0706.0.text.EscortedByTheSameMessenger')
self.options = [
{'next': 'Atl_GoChariot', 'text': 'Ln0711.0.option.TakeTheChariotToGet'},
{'set': ['AtlantisDelay4'], 'next': 'Atl_GoWalk', 'text': 'Ln0712.0.option.WalkToSeeTheCity'},
]
if jump == 'Atl_GoWalk':
self.say('Ln0717.0.text.YouDecideToTakeThe')
self.options = [
{'set': ['AtlantisMessengerYes'], 'next': 'Atl_Hurry', 'text': 'Ln0726.0.option.Hurry'},
]
if jump == 'Atl_GoChariot':
self.say('Ln0731.0.text.YouGetOnTheChariot')
self.options = [
{'next': 'Atl_GetUp', 'text': 'Ln0738.0.option.GetUp'},
{'next': 'Atl_Examine', 'text': 'Ln0739.0.option.ExamineTheMessenger'},
]
if jump == 'Atl_GetUp':
self.say('Ln0744.0.text.YouGetUpTheMessenger')
self.options = [
{'next': 'Atl_NoTime', 'text': 'Ln0749.0.option.KeepGoing'},
{'next': 'Atl_Examine', 'text': 'Ln0739.0.option.ExamineTheMessenger'},
]
if jump == 'Atl_Examine' and 'ClassPhysician' in self.state:
self.say('Ln0755.0.text.HeHasTwistedHisLeg')
self.options = [
{'set': ['AtlantisMessengerYes'], 'next': 'Atl_Hurry', 'text': 'Ln0726.0.option.Hurry'},
]
if jump == 'Atl_Examine' and 'ClassPhysician' not in self.state:
self.say('Ln0765.0.text.TheMessengerSeemsBadlyInjured')
self.options = [
{'next': 'Atl_CallHelp', 'text': 'Ln0770.0.option.CallForHelp'},
{'next': 'Atl_NoTime', 'text': 'Ln0771.0.option.LeaveHimBehind'},
]
if jump == 'Atl_CallHelp':
self.say('Ln0776.0.text.YouCallForHelpAnd')
self.options = [
{'set': ['AtlantisMessengerNo', 'AtlantisDelay4'], 'next': 'Atl_Hurry', 'text': 'Ln0726.0.option.Hurry'},
]
if jump == 'Atl_NoTime':
self.say('Ln0789.0.text.YouJustDontHaveThe')
self.options = [
{'set': ['AtlantisMessengerNo'], 'next': 'Atl_Hurry', 'text': 'Ln0726.0.option.Hurry'},
]
# IN THE HARBOUR
if jump == 'Atl_Hurry':
if 'AtlantisMessengerYes' in self.state:
self.say('Ln0803.0.text.HurryingAsMuchAsPossible')
if 'AtlantisMessengerNo' in self.state:
self.say('Ln0814.0.text.HurryingAsMuchAsPossible')
self.options = [
{'next': 'Atl_Speech', 'text': 'Ln0808.0.option.SpeakToTheCaptains'},
{'next': 'Atl_LoadShips', 'text': 'Ln0809.0.option.LoadTheShips'},
]
if jump == 'Atl_Speech':
self.say('Ln0825.0.text.YouSpeakBrieflyButWith')
self.options = [
{'next': 'Atl_LoadShips', 'text': 'Ln0809.0.option.LoadTheShips'},
]
if jump == 'Atl_LoadShips' and 'AtlantisArt' in self.state:
self.say('Ln0835.0.text.YouGiveTheOrderTo')
self.options = [
{'next': 'Atl_SaveSculptures', 'text': 'Ln0842.0.option.MostlySculptures'},
{'next': 'Atl_SaveBooks', 'text': 'Ln0843.0.option.MostlyBooks'},
{'next': 'Atl_SavePaintings', 'text': 'Ln0844.0.option.MostlyPaintings'},
{'next': 'Atl_SaveBalance', 'text': 'Ln0845.0.option.AnEvenBalance'},
]
if jump == 'Atl_SaveSculptures':
self.say('Ln0850.0.text.AhTheSculpturalMasterpiecesOf')
self.options = [
{'next': 'Atl_Sail', 'text': 'Ln0857.0.option.GetReadyToSetSail'},
]
if jump == 'Atl_SaveBooks':
self.say('Ln0862.0.text.FromTheAncientMythsOf')
self.options = [
{'next': 'Atl_Sail', 'text': 'Ln0857.0.option.GetReadyToSetSail'},
]
if jump == 'Atl_SavePaintings':
self.say('Ln0874.0.text.AtlanteanPaintingBeganOnCave')
self.options = [
{'next': 'Atl_Sail', 'text': 'Ln0857.0.option.GetReadyToSetSail'},
]
if jump == 'Atl_SaveBalance':
self.say('Ln0886.0.text.YouTryToSaveA')
self.options = [
{'next': 'Atl_Sail', 'text': 'Ln0857.0.option.GetReadyToSetSail'},
]
if jump == 'Atl_LoadShips' and 'AtlantisPeople' in self.state:
self.say('Ln0898.0.text.YouWantToSaveThe')
self.options = [
{'next': 'Atl_SaveFamilies', 'text': 'Ln0905.0.option.TheSailorsFamilies'},
{'next': 'Atl_SaveCelebs', 'text': 'Ln0906.0.option.FamousIndividuals'},
{'next': 'Atl_SaveRandom', 'text': 'Ln0907.0.option.WhoeverIsClosest'},
]
if jump == 'Atl_SaveFamilies':
self.say('Ln0912.0.text.YouTellTheSailorsTo')
self.options = [
{'next': 'Atl_Sail', 'text': 'Ln0857.0.option.GetReadyToSetSail'},
]
if jump == 'Atl_SaveCelebs':
self.say('Ln0924.0.text.YouSendOutSailorsTo')
self.options = [
{'next': 'Atl_Sail', 'text': 'Ln0857.0.option.GetReadyToSetSail'},
]
if jump == 'Atl_SaveRandom':
self.say('Ln0936.0.text.YouHaveToBePractical')
self.options = [
{'next': 'Atl_Sail', 'text': 'Ln0857.0.option.GetReadyToSetSail'},
]
if jump == 'Atl_LoadShips' and 'AtlantisKnowledge' in self.state:
self.say('Ln0948.0.text.ThePeopleOfAtlantisAre')
self.options = [
{'next': 'Atl_SaveFamiliesTwo', 'text': 'Ln0957.0.option.OfCourse'},
{'next': 'Atl_InsaneCruelBastard', 'text': 'TermDlg.Common.No2'},
]
if jump == 'Atl_SaveFamiliesTwo':
self.say('Ln0963.0.text.TheSailorsAreOverjoyedAnd')
self.options = [
{'next': 'Atl_Sail', 'text': 'Ln0857.0.option.GetReadyToSetSail'},
]
if jump == 'Atl_InsaneCruelBastard':
self.say('Ln0977.0.text.YouSpeakOfTheImportance')
self.options = [
{'next': 'Atl_Sail', 'text': 'Ln0857.0.option.GetReadyToSetSail'},
]
if jump == 'Atl_Sail' and 'AtlantisMessengerYes' in self.state:
self.say('Ln0991.0.text.TheShipsAreReadyThe')
self.options = [
{'next': 'Atl_StayBehind', 'text': 'Ln1000.0.option.StayBehindSoHeCan'},
{'next': 'Atl_GoodbyeMessenger', 'text': 'Ln1001.0.option.SayGoodbye'},
]
if jump == 'Atl_GoodbyeMessenger':
self.say('Ln1006.0.text.HeavyHeartedYouSayGoodbye')
self.options = [
{'next': 'Atl_SetSail', 'text': 'Ln1015.0.option.SetSail'},
]
if jump == 'Atl_StayBehind':
self.say('Ln1020.0.text.ItIsNotEasyTo')
self.options = [
{'next': 'Atl_Watch', 'text': 'Ln1027.0.option.WatchingTheShipsTakeOff'},
{'next': 'Atl_Tavern', 'text': 'Ln1028.0.option.InATavern'},
{'next': 'Atl_Palace', 'text': 'Ln1029.0.option.InThePalace'},
]
if jump == 'Atl_Watch':
self.say('Ln1034.0.text.YouSitInTheHarbour')
self.options = [
{'next': 'MessageBoardInterface_On', 'text': 'Ln0039.0.option.IOpenMyEyes', 'short': 'Ln1047.0.short.End'},
]
if jump == 'Atl_Tavern':
self.say('Ln1052.0.text.YouSitDownInA')
self.options = [
{'next': 'MessageBoardInterface_On', 'text': 'Ln0039.0.option.IOpenMyEyes', 'short': 'Ln1047.0.short.End'},
]
if jump == 'Atl_Palace':
self.say('Ln1072.0.text.PoseidonasGreetsYouLikeAn')
self.options = [
{'next': 'MessageBoardInterface_On', 'text': 'Ln0039.0.option.IOpenMyEyes', 'short': 'Ln1047.0.short.End'},
]
if jump == 'Atl_Sail' and 'AtlantisMessengerNo' in self.state:
self.say('Ln1088.0.text.TheShipsAreReadyOne')
self.options = [
{'next': 'Atl_SetSail', 'text': 'Ln1015.0.option.SetSail'},
]
if jump == 'Atl_SetSail':
self.say('Ln1100.0.text.TheTimeHasComeYou')
self.options = [
{'next': 'Atl_LookLand', 'text': 'Ln1107.0.option.SailOnward'},
]
if jump == 'Atl_LookLand':
self.say('Ln1112.0.text.DaysPassTerribleWavesShake')
self.options = [
{'next': 'Atl_Land', 'text': 'Ln1119.0.option.FindANewHome'},
]
if jump == 'Atl_Land':
if 'ClassFarmer' in self.state:
self.say('Ln1124.0.text.OneDayYouComeUpon')
if 'ClassMagician' in self.state:
self.say('Ln1142.0.text.OneDayYouComeUpon')
if 'ClassPhysician' in self.state:
self.say('Ln1160.0.text.OneDayYouComeUpon')
if 'ClassPoet' in self.state:
self.say('Ln1178.0.text.OneDayYouComeUpon')
if 'ClassScientist' in self.state:
self.say('Ln1196.0.text.OneDayYouComeUpon')
self.options = [
{'next': 'MessageBoardInterface_On', 'text': 'Ln0039.0.option.IOpenMyEyes', 'short': 'Ln1047.0.short.End'},
]
if jump == 'MessageBoardInterface_On':
self.goto('Atl_Start')
return
self.choices = [self.locale[o.get('short', o['text'])] for o in self.options]
if __name__ == '__main__':
bot.run()
|
import numpy as np
def rss(y_true, y_pred, df=None):
return ((y_true - y_pred) ** 2).sum()
def cv(y_true, y_pred, df):
return (((y_true - y_pred) / (1 - df)) ** 2).sum()
def gcv(y_true, y_pred, df):
return rss(y_true, y_pred) / ((1 - (df / len(y_true))) ** 2)
def aic(y_true, y_pred, df):
return np.log(rss(y_true, y_pred)) + 2 * df / len(y_true)
def aicc(y_true, y_pred, df):
return np.log(rss(y_true, y_pred)) + (2 * (df + 1)) / (len(y_true) - df - 2)
|
#!/usr/bin/python
import time
import numpy as np
import sys
#tos stuff
from DecodedMsg import *
from tinyos.message import MoteIF
class MyClass:
def __init__(self,N):
self.prevtime = time.time()
self.N = N
self.A = make_A_matrix(self.N)
self.current_row = 0;
# Create a MoteIF
self.mif = MoteIF.MoteIF()
# Attach a source to it
self.source = self.mif.addSource("sf@localhost:9002")
# SomeMessageClass.py would be generated by MIG
self.mif.addListener(self, DecodedMsg)
def send(self):
smsg = DecodedMsg()
#this line here needs to be changed to just be random coefficients.
blah = np.random.randn(1,self.N)
smsg.set_V_coeff(blah[0,:])
#smsg.set_V_coeff(self.A[self.current_row])
smsg.set_crow(self.current_row)
smsg.set_data([1])
self.mif.sendMsg(self.source, 0xFFFF, smsg.get_amType(), 0, smsg)
# Called by the MoteIF's receive thread when a new message
# is received
def receive(self, src, msg):
time.sleep(1)
m = DecodedMsg(msg.dataGet())
timeformat = '%Y/%d/%m %H:%M:%S'
print 'Received message %s:' % time.strftime(timeformat)
print ' true current row: ', self.current_row
## get received data from mote
rec_row = m.get_crow()
print rec_row
x_mote = np.array(m.get_V_coeff())
#x_mote = x_mote[0:self.current_row+1]
print 'mote result: ', x_mote
## check functionality in python
V = self.A[:self.current_row+1]
#print 'A', self.A
#print 'V', V
#U, S, W = np.linalg.svd(V.T)
#print S
Vnull = V[ :, [1,3,5,7] ]
z = nullvec(Vnull.T)
#print z
ant_vec = np.mat('[0; 0; :1; 0; 1; 0; 1; 0]')
Vant = V*ant_vec
if len(z)>0:
print 'antidoteresult: ',z.T*Vant
else:
print 'antidoteresult: ',[]
if len(z)>0:
#x_python = np.dot(z.T, V[:,0])
#print x_python
#print np.shape(z), np.shape(Vnull)
#print np.matrix(Vnull).T*np.matrix(z)
nulldata= z.T*np.ones((self.current_row+1,1))
antdata = z.T*Vant
finalresult = nulldata - antdata
maindimension = V[:,[0]]
xdot = z.T*maindimension
finalresult = (finalresult/xdot)
print 'final result: ',finalresult
else:
print []
#U,S,V = np.linalg.svd(Vnull.T)
#print S
#U, s, W = np.linalg.svd(Vnull.T)
#print W.T
#print self.A[m.get_current_row()][:]
#print m.get_current_row()
#print S
#V_null = self.A[0:self.current_row+1,[1,3, 9, 14]]
#U, S, W = np.linalg.svd(V_null)
#print S
#if m.get_perform_svd() == self.N:
##print ' svd received:'
#Svals = m.get_W()
#print 'Rx svd: ', Svals
#U,S,V = np.linalg.svd(self.A)
##S = [s**2 for s in S]
##print ' svd check:'
#print 'PC svd: ', S
#self.perform_svd = 0
#self.A = make_A_matrix(self.N)
#print 'MSE: ', np.linalg.norm(np.array(S)-np.array(Svals),2)
#proctime = time.time() - self.prevtime
#print 'Elapsed time: %f seconds' % proctime
#else:
#self.prevtime = time.time()
#self.perform_svd += 1
self.current_row = (self.current_row + 1) % self.N
#if self.current_row == 0:
#self.A = make_A_matrix(self.N)
self.send()
def make_A_matrix(N):
A = np.random.randn(N,N)
B = np.matrix(np.random.randn(4,4))
U, s, W = np.linalg.svd(B)
s[-1] = 0
B = np.array(U*np.diag(s)*W)
A[0:4,1] = B[:,0]
A[0:4,3] = B[:,1]
A[0:4,5] = B[:,2]
A[0:4,7] = B[:,0]
print A
return A
def nullvec(X, tol=1e-5):
if np.shape(X)[0] == 0:
V = np.eye(np.shape(X)[1])
else:
(U, s, Vt) = np.linalg.svd(X)
V = Vt.T
if np.shape(X)[1] > np.shape(X)[0]:
z = V[:, -1]
else:
s = s < tol
if np.any(s):
z = V[:, s]
else:
z = []
return np.matrix(np.array(z).reshape(-1, 1))
if __name__ == "__main__":
print "Running"
np.set_printoptions(precision=3)
np.set_printoptions(suppress=True)
if len(sys.argv) > 1:
N = int(sys.argv[1])
else:
N = 6
m = MyClass(N)
time.sleep(1)
m.send()
|
def get_number_from_string(strng):
return int(''.join(a for a in strng if a.isdigit()))
|
import ConfigParser
import os
import sys
configurationFile = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'../config/main.ini')
if not os.path.exists(configurationFile):
print 'Could not find configuration file: %s' % configurationFile
sys.exit(1)
try:
config = ConfigParser.RawConfigParser()
config.read(configurationFile)
except Exception as e:
print 'Failed to parse config file: %s, %s' % (configurationFile, e)
sys.exit(2)
|
# 문제 설명
# 점심시간에 도둑이 들어, 일부 학생이 체육복을 도난당했습니다. 다행히 여벌 체육복이 있는 학생이 이들에게 체육복을 빌려주려 합니다. 학생들의 번호는 체격 순으로 매겨져 있어, 바로 앞번호의 학생이나 바로 뒷번호의 학생에게만 체육복을 빌려줄 수 있습니다. 예를 들어, 4번 학생은 3번 학생이나 5번 학생에게만 체육복을 빌려줄 수 있습니다. 체육복이 없으면 수업을 들을 수 없기 때문에 체육복을 적절히 빌려 최대한 많은 학생이 체육수업을 들어야 합니다.
# 전체 학생의 수 n, 체육복을 도난당한 학생들의 번호가 담긴 배열 lost, 여벌의 체육복을 가져온 학생들의 번호가 담긴 배열 reserve가 매개변수로 주어질 때, 체육수업을 들을 수 있는 학생의 최댓값을 return 하도록 solution 함수를 작성해주세요.
# 제한사항
# 전체 학생의 수는 2명 이상 30명 이하입니다.
# 체육복을 도난당한 학생의 수는 1명 이상 n명 이하이고 중복되는 번호는 없습니다.
# 여벌의 체육복을 가져온 학생의 수는 1명 이상 n명 이하이고 중복되는 번호는 없습니다.
# 여벌 체육복이 있는 학생만 다른 학생에게 체육복을 빌려줄 수 있습니다.
# 여벌 체육복을 가져온 학생이 체육복을 도난당했을 수 있습니다. 이때 이 학생은 체육복을 하나만 도난당했다고 가정하며, 남은 체육복이 하나이기에 다른 학생에게는 체육복을 빌려줄 수 없습니다.
# 입출력 예
# n lost reserve return
# 5 [2, 4] [1, 3, 5] 5
# 5 [2, 4] [3] 4
# 3 [3] [1] 2
def solution(n, lost, reserve):
new_reserve = set(reserve) - set(lost)
new_lost = set(lost) - set(reserve)
for i in new_reserve:
if i-1 in new_lost:
new_lost.remove(i-1)
elif i+1 in new_lost:
new_lost.remove(i+1)
return n - len(new_lost) |
#Nombre: Bianca Munteanu
#Asignatura: Programación para el tratamiento de datos
# Ejercicio 1, ejercicio 2 y ejercicio 3 de la PEC2
# Ejercicio 2: Realizar un programa que guarde los elementos en común que tienen dos listas.
#modulos
def comunes_listas(list1, list2):
""" list1() + list2() --> list3()
obj: elemento en comun de ambas listas
pre: los valores list1 y list 2 son listas de la longitud que queramos"""
result = []
for element in list1:
if element in list2:
result.append(element)
return set(result) #hacemos set() para que no se repitan los números en una lista infinita en caso de que tengamos muchos datos
def pregunta_longitud():
"""int
obj: preguntar al usuario cuantos valores quiere que tenga su lista
pre: no tiene valor porque se le asignará una variable"""
return int(input("¿Cuántos valores quieres que tenga la lista? "))
def creacion_listas(x, y):
""" int, list --> list
obj: rellenar una lista vacia
pre: x: longitud de la lista
y: nombre de la lista vacia """
for i in range(x):
i = int(input("Escribe el valor de la posicion %i: " %i))
y.append(i)
return y
#código principal
if __name__ == "__main__":
print("----------------------------------------------------------------------------")
print("\nVamos a sumar dos listas creadas ti. Primero, te pediremos el número de valores que tenddrán las 2 listas")
print("A continuación, nos indicarás cada valor numérico de las listas, sin decimales")
print("Por último, en la pantalla nos mostrará los elementos en comun que tienen esas listas.")
print("\n----------------------------------------------------------------------------")
lista_prueba = []
pregunta_1 = pregunta_longitud()
print("Preparando la lista para introducir valores...")
creacion_listas(pregunta_1, lista_prueba)
print("La primera lista es: ", lista_prueba)
lista_prueba2 = []
pregunta_2 = pregunta_longitud()
print("Preparando la lista para introducir valores...")
creacion_listas(pregunta_2, lista_prueba2)
print("La segunda lista es: ", lista_prueba2)
lista_comun = comunes_listas(lista_prueba, lista_prueba2)
print("Los elementos en común son: ", lista_comun)
|
from itertools import cycle
def phase(seq, n):
base = [0, 1, 0, -1]
mod_seq = [x for x in seq]
pattern_iter = cycle(base)
new_seq = []
v = 0
next(pattern_iter)
for i in range(1, n+1):
v = 0
mul = next(pattern_iter)
for s in mod_seq:
v += s * mul
mul = next(pattern_iter)
nout = abs(v) % 10
new_seq.append(nout)
rep = i + 1
new_pattern = []
for p in base:
new_pattern += [p] * rep
if len(new_pattern[1:]) >= len(seq):
mod_seq = seq[i:]
pattern_iter = cycle(new_pattern[i+1:n+1])
else:
pattern_iter = cycle(new_pattern)
next(pattern_iter)
return new_seq
def phase2(seq):
new_seq = [seq[-1]]
prev = seq[-1]
for i in range(len(seq)-2, -1, -1):
v = (prev + seq[i]) % 10
new_seq.append(v)
prev = v
new_seq.reverse()
return new_seq
f = open("input", 'r')
seq = [int(x) for x in f.read().strip('\n')]
n = len(seq)
nseq = [x for x in seq]
for i in range(0, 1):
nseq = phase(nseq, n)
last8 = "".join([str(c) for c in nseq[:8] ] )
print (last8)
nseq = [x for x in seq] * 10000
offset = int("".join([str(c) for c in nseq[0:7] ] ))
nseq = nseq[offset:]
for i in range(0, 100):
nseq = phase2(nseq)
last8 = "".join([str(c) for c in nseq[:8] ] )
print (last8)
f.close() |
from _typeshed import Incomplete
def newman_betweenness_centrality(
G,
v: Incomplete | None = None,
cutoff: Incomplete | None = None,
normalized: bool = True,
weight: Incomplete | None = None,
): ...
def edge_load_centrality(G, cutoff: bool = False): ...
|
__author__ = "Narwhale"
# import copy
#
# a = 'a'
# b = copy.copy(a)
# print(b)
###################################
# def multipliers():
# return [lambda x:i*x for i in range(4)]
# print([m(2) for m in multipliers()])
# a = [lambda x:i*x for i in range(4)]
# print(a)
# for i in a:
# print(i(2))
###############################
#现有两个元组(('a'),('b')),(('c'),('d')),请使用python中匿名函数生成列表[{'a':'c'},{'b':'d'}]
# a = (('a'),('b'))
# b = (('c'),('d'))
#
# res = zip(a,b)
# # for i in res:
# # print(i)
# res1 = lambda x,y:zip(x,y)
# dic =[{k:v} for k,v in res1(a,b)]
# print(dic)
#######################################
# v = dict.fromkeys(['k1','k2'],[])
# print(v)
# print(v['k1'])
# v['k1'].append(666)
# print(v)
# v['k1'].append(777)
# print(v)
# v['k1']=777
# print(v)
#print('\n'.join([' '.join(['%s*%s=%-2s' % (j, i, i * j) for j in range(1, i + 1)]) for i in range(1, 10)]))
#################################################
# class Parent(object):
# x = 1
# class Child1(Parent):
# pass
# class Child2(Parent):
# pass
# print(Parent.x, Child1.x, Child2.x)
# Child1.x = 2
# print(Parent.x, Child1.x, Child2.x)
# Parent.x = 3
# print(Parent.x, Child1.x, Child2.x)
#########################################
#gen的x在生成器内,生成器有自己的作用域。所以x会报错,但是用lambda就不会了。
#
# class A(object):
# x = 1
# gen = (lambda x:x for _ in range(10))
#
#
# a = A()
# print(a.gen) |
def array_diff(a, b):
b = set(b)
return [c for c in a if c not in b]
|
#1
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import seaborn as sns
%matplotlib inline
np.random.seed(2)
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import itertools
from keras.utils.np_utils import to_categorical # convert to one-hot-encoding
from keras.models import Sequential
#Dense表示这个神经层是全连接层
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
#优化器采用RMSprop,加速神经网络训练方法
from keras.optimizers import RMSprop
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau
sns.set(style='white', context='notebook', palette='deep')
#2
f1=open("E:/Tinky/大学课件及作业/6 自学课/6-3.Kaggle竞赛/C3_手写数字识别/train.csv")
f2=open("E:/Tinky/大学课件及作业/6 自学课/6-3.Kaggle竞赛/C3_手写数字识别/test.csv")
train = pd.read_csv(f1)
test = pd.read_csv(f2)
#3
#要把X和预测的y分开一下
Y_train = train["label"]
X_train = train.drop(labels = ["label"],axis = 1)
del train #用这种方式节省空间
g = sns.countplot(Y_train)#查看每种数字的数量
Y_train.value_counts()
#4
#检查是否有缺失数据
print(X_train.isnull().any().describe(),'\n',test.isnull().any().describe())
#5
#标准化
#利用grayscale normalization来避免光影的影响
#CNN需要将0-255的值收敛成0-1
X_train = X_train / 255.0
test = test / 255.0
#6
#将二维数据变为三维
X_train = X_train.values.reshape(-1,28,28,1)
test = test.values.reshape(-1,28,28,1)
#特征因子化
#to_categorical是Keras中的onehot编码方法
Y_train = to_categorical(Y_train, num_classes = 10)
#7
#分离训练集和交叉验证集
random_seed = 2
X_train,X_val,Y_train,Y_val=train_test_split(X_train,Y_train,test_size=0.1,random_state=random_seed)
#8
#实际观测一下(imshow本来是绘制热图的,这里用也真合适)
g = plt.imshow(X_train[0][:,:,0])
#9
# Set the CNN model
# my CNN architechture is In -> [[Conv2D->relu]*2 -> MaxPool2D -> Dropout]*2 -> Flatten -> Dense -> Dropout -> Out
'''
Keras搭建神经网络的步骤:
Step1:选择模型
Step2:构建网络层
Step3:编译
Step4:训练
Step5:预测
'''
'''Step1:选择模型(序贯模型或函数式模型)这里是序贯模型'''
model = Sequential()#Sequential建立模型
'''Step2:构建网络层(输入层、隐藏层、输出层)'''
#Conv2D:实现卷积的函数
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same', activation ='relu', input_shape = (28,28,1)))
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same', activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
#Dropout:一定的概率暂时丢弃某个单元网格,防止过拟合
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same', activation ='relu'))
model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same', activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax"))
#10
# 定义优化器RMSprop
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
'''Step3:编译(优化器、损失函数、性能评估)'''
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])
#11
#学习率退火器
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001)
epochs = 1 # Turn epochs to 30 to get 0.9967 accuracy
batch_size = 86
#12
# Without data augmentation i obtained an accuracy of 0.98114
#history = model.fit(X_train, Y_train, batch_size = batch_size, epochs = epochs,
# validation_data = (X_val, Y_val), verbose = 2)
# With data augmentation to prevent overfitting (accuracy 0.99286)
#数据增强
datagen = ImageDataGenerator(
featurewise_center=False, # 输入数据集去中心化(均值为0)
samplewise_center=False, # 使输入数据样本均值为零
featurewise_std_normalization=False, # 标准化:数据集除以标准差
samplewise_std_normalization=False, # 输入的每个样本除以自身的标准差
zca_whitening=False, #对输入数据施加ZCA白化
rotation_range=10, # 数据提升时图片随机旋转的角度(0-180)
zoom_range = 0.1, #
width_shift_range=0.1, # 数据提升时图片随机水平偏移的幅度(图片宽度的某个比例)
height_shift_range=0.1, # 数据提升时图片随机竖直偏移的幅度(图片高度度的某个比例)
horizontal_flip=False, # 随机对图片水平翻转
vertical_flip=False) # 随机进行竖直翻转
datagen.fit(X_train)
#13
# Fit the model
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs, validation_data = (X_val,Y_val),
verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size
, callbacks=[learning_rate_reduction])
'''
参数解释:
batch_size:训练多少个参数更新一下权重
epochs:训练的轮数
validation_data:验证集
steps_per_epoch:将一个epoch分为多少个steps,也就是划分一个batch_size多大
verbose:0为不在标准输出流输出日志信息,1为输出进度条记录,2为每个epoch输出一行记录
callbacks:训练中会调用的回调函数
'''
#14
# Plot the loss and accuracy curves for training and validation
fig, ax = plt.subplots(2,1)
ax[0].plot(history.history['loss'], color='b', label="Training loss")
ax[0].plot(history.history['val_loss'], color='r', label="validation loss",axes =ax[0])
legend = ax[0].legend(loc='best', shadow=True)
ax[1].plot(history.history['acc'], color='b', label="Training accuracy")
ax[1].plot(history.history['val_acc'], color='r',label="Validation accuracy")
legend = ax[1].legend(loc='best', shadow=True)
#15
#查看混合矩阵(误差矩阵)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
'''Step5:预测'''
# Predict the values from the validation dataset
Y_pred = model.predict(X_val)
# Convert predictions classes to one hot vectors
Y_pred_classes = np.argmax(Y_pred,axis = 1)
# Convert validation observations to one hot vectors
Y_true = np.argmax(Y_val,axis = 1)
# compute the confusion matrix
confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
# plot the confusion matrix
plot_confusion_matrix(confusion_mtx, classes = range(10))
#16
# 查看测试集中预测错误的样本
errors = (Y_pred_classes - Y_true != 0)
Y_pred_classes_errors = Y_pred_classes[errors]
Y_pred_errors = Y_pred[errors]
Y_true_errors = Y_true[errors]
X_val_errors = X_val[errors]
def display_errors(errors_index,img_errors,pred_errors, obs_errors):
""" This function shows 6 images with their predicted and real labels"""
n = 0
nrows = 2
ncols = 3
fig, ax = plt.subplots(nrows,ncols,sharex=True,sharey=True)
for row in range(nrows):
for col in range(ncols):
error = errors_index[n]
ax[row,col].imshow((img_errors[error]).reshape((28,28)))
ax[row,col].set_title("Predicted label :{}\nTrue label :{}".format(pred_errors[error],obs_errors[error]))
n += 1
# Probabilities of the wrong predicted numbers
Y_pred_errors_prob = np.max(Y_pred_errors,axis = 1)
# Predicted probabilities of the true values in the error set
true_prob_errors = np.diagonal(np.take(Y_pred_errors, Y_true_errors, axis=1))
# Difference between the probability of the predicted label and the true label
delta_pred_true_errors = Y_pred_errors_prob - true_prob_errors
# Sorted list of the delta prob errors
sorted_dela_errors = np.argsort(delta_pred_true_errors)
# Top 6 errors
most_important_errors = sorted_dela_errors[-6:]
# Show the top 6 errors
display_errors(most_important_errors, X_val_errors, Y_pred_classes_errors, Y_true_errors)
#17
# predict results
results = model.predict(test)
# select the indix with the maximum probability
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label")
#18
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("E:/Tinky/大学课件及作业/6 自学课/6-3.Kaggle竞赛/C3_手写数字识别/submission_Exp.csv",index=False) |
class Solution:
def lengthOfLastWord(self, s: str) -> int:
if not s:
return 0
i = len(s) - 1
ret = 0
while s[i] == " " and i >= 0:
i -= 1
continue
while s[i] != " " and i >= 0:
ret += 1
i -= 1
while s[i] == " " and i >= 0:
break
return ret
|
# import sys,os
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
#
# print(BASE_DIR)
# import os
# import random
#
# files_path = "data/img"
# assert os.path.exists(files_path), "path: '{}' does not exist.".format(files_path)
#
# val_rate = 0.5
#
# files_name = sorted([file.split(".")[0] for file in os.listdir(files_path)])
# files_num = len(files_name)
# a = range(0,files_num)
# print(a)
# val_index = random.sample(a, k=int(files_num*val_rate))
# print(val_index)
class pe():
def __init__(self):
self.lis = [1, 2, 3]
def __getitem__(self, itm):
return self.lis[itm]
p = pe()
for i in p:
print(i) |
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('level.urls'))
]
admin.site.site_header = 'Password administration'
admin.site.site_title = 'Password administration'
admin.site.index_title = 'Password administration'
|
import tarfile
import gzip
import numpy as np
import pandas as pd
import os
import dask.dataframe as dd
from sklearn.cluster import KMeans
pd.set_option('display.width', 500)
pd.set_option('display.max_columns', 40)
class Extractor:
def __init__(self, source_path):
self.source_path = source_path
def tar_extract(self, year, destination_folder="extracted"):
year_file = "gsod_" + str(year) + ".tar"
with tarfile.open(self.source_path + year_file, "r") as file:
file.extractall(destination_folder + "/" + str(year))
def tar_extract_all(self, begin, end, destination_folder="extracted"):
for i in range(begin, end+1):
self.tar_extract(i, destination_folder)
class From_gz_to_csv:
def __init__(self, source_path):
self.source_path = source_path
def append_gz_to_csv(self, gz, csv):
columns_names = ["STN","WBAN","YEARMODA","TEMP","TEMP_C","DEWP","DEWP_C",
"SLP","SLP_C","STP","STP_C","VISIB","VISIB_C","WDSP","WDSP_C","MXSPD","GUST","MAX","MIN","PRCP","SNDP","FRSHTT"]
with gzip.open(gz, "r") as file:
df = pd.read_csv(file, sep=r"\s+", skiprows=1, names=columns_names)
if not os.path.isfile(csv):
df.to_csv(csv, header=columns_names)
else:
df.to_csv(csv, mode='a', header=False)
def convert(self, year, destination_folder="Meteo_CSV"):
files = os.listdir(self.source_path + str(year))
for file in files:
self.append_gz_to_csv(self.source_path + str(year) + "/" + file, destination_folder + "/" + str(year) + ".csv")
def convert_all(self, begin, end, destination_folder="Meteo_CSV"):
for i in range(begin, end + 1):
self.convert(i, destination_folder)
# execute once
# Ext = Extractor("noaa-global-surface-summary-of-the-day/gsod_all_years/")
# Ext.tar_extract_all(1901, 2019)
# csv_creator = From_gz_to_csv("extracted/")
# csv_creator.convert_all(1901, 2019)
class DataFrame_cleaner:
def __init__(self, df):
self.df = df
def inner_join(self, df2):
self.df = dd.merge(left=self.df, right=df2, how="inner", left_on=["STN", "WBAN"], right_on=["USAF", "WBAN"])
def clean(self):
self.df = self.df[["STN","WBAN","YEARMODA","TEMP","DEWP","SLP","STP","VISIB","WDSP",
"MXSPD","GUST","MAX","MIN","PRCP","SNDP","FRSHTT"]]
self.df["STN"] = self.df["STN"].apply(lambda x: x.zfill(6))
self.df[["MAX", "MIN"]] = self.df[["MAX", "MIN"]].replace(r"\*", "", regex=True)
self.df[["TEMP", "DEWP", "SLP", "STP", "MAX", "MIN"]] = self.df[["TEMP", "DEWP", "SLP", "STP", "MAX", "MIN"]].replace(9999.9, np.nan)
self.df[["VISIB", "WDSP", "MXSPD", "GUST", "SNDP"]] = self.df[["VISIB", "WDSP", "MXSPD", "GUST", "SNDP"]].replace(999.9, np.nan)
self.temp = self.df["PRCP"].str.extract(r"(\d*\.\d*)(.)")
self.df["PRCP"] = self.temp[0].astype(float)
self.df["PRCP"] = self.df["PRCP"].replace(99.99, np.nan)
self.df["FRSHTT"] = self.df["FRSHTT"].apply(lambda x: str(x).zfill(6))
self.temp = self.df["FRSHTT"].str.extract(r"(.)(.)(.)(.)(.)(.)")
self.df[["FOG", "RAIN", "SNOW", "HAIL", "THUN", "TORN"]] = self.temp
self.df = self.df.drop("FRSHTT", axis=1)
# ddf_france = dd.read_csv("isd-history.csv", dtype={"STATE": 'object', "USAF": 'object'})
# ddf_france = ddf_france[ddf_france["CTRY"] == "FR"]
# ddf_france = ddf_france[["USAF", "WBAN", "STATION NAME", "LAT", "LON", "ELEV(M)"]]
# ddf_meteo = dd.read_csv("Meteo_CSV/2018.csv", dtype={"STN": 'object'})
# cleaner = DataFrame_cleaner(ddf_meteo)
# cleaner.clean()
# cleaner.inner_join(ddf_france)
# cleaner.df.compute().to_csv("For_clustering_2018.csv")
class Clustering_Preparation:
def __init__(self, df):
self.df = df[["STATION NAME", "YEARMODA", "LAT", "LON", "ELEV(M)", "TEMP", "MAX", "MIN", "DEWP",
"WDSP", "MXSPD", "PRCP", "FOG", "RAIN", "SNOW", "HAIL", "THUN", "TORN"]]
@staticmethod
def saison(row, column):
if row["YEARMODA"] >= pd.to_datetime("20-03-2018") and row["YEARMODA"] <= pd.to_datetime("19-06-2018"):
return pd.Series([row[column], np.nan, np.nan, np.nan])
elif row["YEARMODA"] >= pd.to_datetime("20-06-2018") and row["YEARMODA"] <= pd.to_datetime("22-09-2018"):
return pd.Series([np.nan, row[column], np.nan, np.nan])
elif row["YEARMODA"] >= pd.to_datetime("23-09-2018") and row["YEARMODA"] <= pd.to_datetime("21-12-2018"):
return pd.Series([np.nan, np.nan, row[column], np.nan])
else:
return pd.Series([np.nan, np.nan, np.nan, row[column]])
def seasonalisation2(self, list_columns):
self.df["YEARMODA"] = pd.to_datetime(self.df["YEARMODA"], format='%Y%m%d')
for i in list_columns:
spring = str(i) + "_SPRING"
summer = str(i) + "_SUMMER"
autumn = str(i) + "_AUTUMN"
winter = str(i) + "_WINTER"
self.df[[spring, summer, autumn, winter]] = self.df[["YEARMODA", i]].apply(lambda row: Clustering_Preparation.saison(row, i), axis=1)
def seasonalisation(self, list_columns):
self.df["YEARMODA"] = pd.to_datetime(self.df["YEARMODA"], format='%Y%m%d')
for i in list_columns:
spring = str(i) + "_SPRING"
summer = str(i) + "_SUMMER"
autumn = str(i) + "_AUTUMN"
winter = str(i) + "_WINTER"
self.df[spring] = self.df[["YEARMODA", i]].apply(lambda row: row[i] if row["YEARMODA"] >
pd.to_datetime("19-03-2018") and row["YEARMODA"] < pd.to_datetime("20-06-2018") else np.nan, axis=1)
self.df[summer] = self.df[["YEARMODA", i]].apply(lambda row: row[i] if row["YEARMODA"] >
pd.to_datetime("19-06-2018") and row["YEARMODA"] < pd.to_datetime("23-09-2018") else np.nan, axis=1)
self.df[autumn] = self.df[["YEARMODA", i]].apply(lambda row: row[i] if row["YEARMODA"] >
pd.to_datetime("22-09-2018") and row["YEARMODA"] < pd.to_datetime("22-12-2018") else np.nan, axis=1)
self.df[winter] = self.df[["YEARMODA", i]].apply(lambda row: row[i] if (row["YEARMODA"] >
pd.to_datetime("21-12-2018") and row["YEARMODA"] < pd.to_datetime("31-12-2018")) or
(row["YEARMODA"] >= pd.to_datetime("01-01-2018") and row["YEARMODA"] <= pd.to_datetime("19-03-2018"))
else np.nan, axis=1)
def grouping(self):
#self.df = self.df[["STATION NAME", "LAT", "LON", "ELEV(M)", "TEMP", "MAX", "MIN", "DEWP",
#"WDSP", "MXSPD", "PRCP", "FOG", "RAIN", "SNOW", "HAIL", "THUN", "TORN"]]
self.df = self.df.groupby("STATION NAME").mean()
def normalisation(self):
self.raw_df = self.df
self.df = (self.df - self.df.min()) / (self.df.max() - self.df.min())
def clustering(self, list_columns, k_clusters_min, k_clusters_max):
self.df = self.df[list_columns]
self.df[["LAT", "LON"]] = self.raw_df[["LAT", "LON"]]
for i in range(k_clusters_min, k_clusters_max + 1):
self.K_means = KMeans(i, random_state=0).fit(self.df[list_columns])
name_colonne = "CLUST_" + str(i)
self.df[name_colonne] = self.K_means.labels_
df = pd.read_csv("For_clustering_2018.csv")
clustering1 = Clustering_Preparation(df)
clustering1.seasonalisation2(["TEMP","PRCP"])
# print(clustering1.df["PRCP_SPRING"].describe())
# print(clustering1.df["PRCP_SUMMER"].describe())
# print(clustering1.df["PRCP_AUTUMN"].describe())
# print(clustering1.df["PRCP_WINTER"].describe())
clustering1.grouping()
clustering1.normalisation()
clustering1.df = clustering1.df.dropna(axis=0, how='any')
print(clustering1.df)
list_to_cluster = ["TEMP_SPRING","TEMP_SUMMER","TEMP_AUTUMN","TEMP_WINTER",
"PRCP_SPRING","PRCP_SUMMER","PRCP_AUTUMN","PRCP_WINTER"]
clustering1.clustering(list_to_cluster, 2, 10)
clustering1.df.to_csv("Clusters/Clusters_2018_TEMP_PRCP_SAISON.csv")
# print(clustering1.df[pd.isnull(clustering1.df).any(axis=1)])
|
VALID = set(xrange(1, 10))
def validSolution(board):
""" valid_solution == PEP8 (forced mixedCase by CodeWars)
:param board: List of lists, 9 x 9 Sudoku grid
:return: Boolean indicating if the grid is a valid solution
"""
boxes = [[] for _ in xrange(9)]
columns = zip(*board)
for i, row in enumerate(board):
for col in xrange(3):
boxes[col + i / 3 * 3].extend(row[col * 3:col * 3 + 3])
for a in xrange(9):
if any(b != VALID for b in
(set(boxes[a]), set(columns[a]), set(board[a]))):
return False
return True
|
import os
import numpy as np
import torch
import json
import math
def temp2coco():
basicPath = os.sys.path[0]
trainPath = os.path.join(basicPath, "train")
valPath = os.path.join(basicPath, "val")
annoPath = os.path.join(basicPath, "train_annotations.json")
classList = ['i2', 'i4', 'i5', 'io', 'ip', 'p11', 'p23', 'p26', 'p5', 'pl30',
'pl40', 'pl5', 'pl50', 'pl60', 'pl80', 'pn', 'pne', 'po', 'w57']
class2id = {} # 建立类和类编号的字典
for i in range(len(classList)):
class2id[classList[i]] = i
train_dirs = os.listdir(trainPath)
val_dirs = os.listdir(valPath)
with open(annoPath, 'r') as f:
anno = json.load(f)
train_outfile = os.path.join(basicPath, "train.json")
val_outfile = os.path.join(basicPath, "val.json")
train_coco = {}
train_coco['info'] = "spytensor created"
train_coco['license'] = ["license"]
train_coco['images'] = []
train_coco['annotations'] = []
train_coco['categories'] = []
for i in range(len(classList)):
cat = {}
cat['id'] = i
cat['name'] = classList[i]
train_coco['categories'].append(cat)
val_coco = {}
val_coco['info'] = "spytensor created"
val_coco['license'] = ["license"]
val_coco['images'] = []
val_coco['annotations'] = []
val_coco['categories'] = train_coco['categories']
sum_train = 0
sum_val = 0
for key in anno['imgs']:
img = {}
img["height"] = 2048
img["width"] = 2048
img["id"] = anno['imgs'][key]['id']
img["file_name"] = anno['imgs'][key]['path'][6:]
temp = str(key) + '.jpg'
if temp in train_dirs:
train_coco['images'].append(img)
for j in range(len(anno['imgs'][key]['objects'])):
target = anno['imgs'][key]['objects'][j]
obj = {}
obj['id'] = sum_train
obj['image_id'] = int(key)
obj['category_id'] = class2id[target['category']]
obj['segementation'] = []
if 'ellipse_org' in target:
seg_temp = []
for v in target['ellipse_org']:
seg_temp.append(v[0])
seg_temp.append(v[1])
obj['segementation'].append(seg_temp)
obj['bbox'] = [math.floor(target['bbox']['xmin'])]
obj['bbox'].append(math.floor(target['bbox']['ymin']))
w = math.floor(target['bbox']['xmax'] - target['bbox']['xmin'])
h = math.floor(target['bbox']['ymax'] - target['bbox']['ymin'])
obj['bbox'].append(w)
obj['bbox'].append(h)
obj['iscrowd'] = 0
obj['area'] = float((w + 1)*(h + 1))
train_coco['annotations'].append(obj)
sum_train = sum_train + 1
else:
val_coco['images'].append(img)
for j in range(len(anno['imgs'][key]['objects'])):
target = anno['imgs'][key]['objects'][j]
obj = {}
obj['id'] = sum_val
obj['image_id'] = int(key)
obj['category_id'] = class2id[target['category']]
obj['segementation'] = []
if 'ellipse_org' in target:
seg_temp = []
for v in target['ellipse_org']:
seg_temp.append(v[0])
seg_temp.append(v[1])
obj['segementation'].append(seg_temp)
obj['bbox'] = [math.floor(target['bbox']['xmin'])]
obj['bbox'].append(math.floor(target['bbox']['ymin']))
w = math.floor(target['bbox']['xmax'] - target['bbox']['xmin'])
h = math.floor(target['bbox']['ymax'] - target['bbox']['ymin'])
obj['bbox'].append(w)
obj['bbox'].append(h)
obj['iscrowd'] = 0
obj['area'] = float((w + 1) * (h + 1))
val_coco['annotations'].append(obj)
sum_val = sum_val + 1
with open(train_outfile, 'w') as f:
json.dump(train_coco, f)
with open(val_outfile, 'w') as f:
json.dump(val_coco, f)
if __name__ =='__main__':
temp2coco()
|
# coding=utf8
from opener import Opener
from funcs import coroutine
from funcs import get_cst
import logging, time, datetime, itertools, multiprocessing
logging.basicConfig(format='[%(asctime)s] %(message)s')
class Corp(multiprocessing.Process):
def __init__(self, corplist_url, corp_url, info_from, corplist_post_data=None, corp_post_data=None, corplist_reg=None, corp_regs=[], timeout=5, commit_each_times=30, has_cookie=True, charset='utf8', model=None):
""" 参数 corplist_url 和 corp_url 取胜字符串的高级格式化:format, 使用{0},{1}等通配符; """
super().__init__()
self.charset = charset
self.info_from = info_from
self.corplist_url = corplist_url
self.corp_url = corp_url
self.opener = Opener(has_cookie=has_cookie, encoding=self.charset)
self.corplist_post_data = corplist_post_data
self.corp_post_data = corp_post_data
self.corplist_reg = corplist_reg
self.corp_regs = corp_regs
self.commit_each_times = commit_each_times
self.timeout = timeout
if model:
self.model = model
else:
from lib.models import CorpModel
self.model = CorpModel
#self._today = get_cst()
self._today = datetime.date.today()
def _msg(self, msg=''):
#print('%s %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), msg))
logging.info(msg)
def set_queue(self, queue):
self.queue = queue
def process_corp_info(self, corp_info, date_reg=r'(?P<year>\d+)-(?P<month>\d+)-(?P<day>\d+)'):
for key, values in corp_info.items():
corp_info[key] = values.strip()
if 'insert_date' in corp_info:
corp_info['insert_date'] = self.model._str2date(corp_info['insert_date'], date_reg=date_reg)
else:
corp_info['insert_date'] = self._today
return corp_info
def get_next_page_url(self):
""" 必须是一个非协程的Generator, 或者返回一个iterable. """
return (self.corplist_url)
def get_corp_url(self, corp_info={}):
return self.corp_url.format(**corp_info)
def prepare(self):
pass
def fetch_corplist(self, page_url):
""" 如果成功抓取, 返回一个包含 Corp Info dict 的列表或者iterable; 否则返回 {}. """
content = self.opener.urlopen(page_url, data=self.corplist_post_data, timeout=self.timeout, times=0)
return ({} if not search_obj else search_obj.groupdict() for search_obj in self.corplist_reg.finditer(content))
def fetch_corp(self, corp_info=None):
""" 如果成功抓取, 返回一个Corp Info 的 dict. """
corp_url = self.get_corp_url(corp_info)
content = self.opener.urlopen(corp_url, data=self.corp_post_data, timeout=self.timeout, times=5)
try:
for reg in self.corp_regs:
search_obj = reg.search(content)
search_obj and corp_info.update(search_obj.groupdict())
except:
pass
return corp_info
def before_save(self, corp_info):
corp_info = self.process_corp_info(corp_info)
corp_info['info_from'] = self.info_from
return corp_info
def commit(self):
self.model.commit()
@coroutine
def check_exists(self):
""" Generator, 存在的话返回其 info_from, 否则返回 None. """
corp_names_cache = {}
corp_names_cache_list = []
cache_length = 0
result = None
while 1:
corp_info = (yield result)
result = None
corp_name = corp_info['name'].strip()
if corp_name not in corp_names_cache:
corp_names_cache[corp_name] = self.info_from
corp_names_cache_list.insert(0,corp_name)
cache_length += 1
if cache_length > self.commit_each_times:
del corp_names_cache[corp_names_cache_list.pop()]
cache_length -= 1
exists_corp = self.model.filter_by(name=corp_name).first()
if exists_corp:
result = exists_corp.info_from
corp_names_cache[corp_name] = result
else:
result = corp_names_cache[corp_name]
def run(self):
self.prepare()
check_exists = self.check_exists()
cur_page = itertools.count()
for page_url in self.get_next_page_url():
print('\n%s 第%s页' % (self.info_from, next(cur_page)+1))
for corp_info in self.fetch_corplist(page_url):
self._msg('***************************************************')
print(corp_info['name'], end=' ')
info_from = check_exists.send(corp_info)
if not info_from:
if self.corp_regs:
corp_info = self.fetch_corp(corp_info)
corp_info = self.before_save(corp_info)
self.queue.put(corp_info)
print('保存成功!')
else:
print('已经存在于: %s' % info_from)
self._msg('\n%s 抓取完毕!' % self.info_from)
self.queue.put(None)
def report(self, fields=None):
corps = self.model.filter_by(info_from=self.info_from, insert_date=datetime.date.today())
#corps = self.model.filter_by(info_from=self.info_from, insert_date=datetime.date(2011,12,8))
fields = fields or (
('名称', 'name'),
('地址', 'addr'),
('联系人', 'contact_person'),
('区号', 'contact_tel_code'),
('电话号码', 'contact_tel_no'),
('邮箱', 'mail'),
('网址', 'website'),
('信息来源', 'info_from'),
('更新日期', 'insert_date'),
('链接', self.corp_url),
)
self.model.report('%s最新公司信息_%s.csv' % (self.info_from, time.strftime('%Y-%m-%d')), fields=fields, rows=corps, encoder='gbk')
class Commiter(multiprocessing.Process):
""" 查询或者插入数据. """
def __init__(self, queue, db_lock, process_num, model=None, commit_each_times=30):
super().__init__()
if model:
self.model = model
else:
from lib.models import CorpModel
self.model = CorpModel
self.queue = queue
self.db_lock = db_lock
self.process_num = process_num
self._over_times = 0
self._cache = set()
self._add_times = 0
self.commit_each_times = commit_each_times
def is_over(self):
""" 所有processes全结束才返回true. """
return self._over_times>=self.process_num
def save(self, corp_info):
corp_name = corp_info.get('name').strip()
if corp_name in self._cache:
return
#with self.db_lock:
self.model.add(corp_info, is_commit=False)
self._cache.add(corp_name)
self._add_times += 1
if self._add_times % self.commit_each_times == 0:
self.model.commit()
def run(self):
while 1:
corp_info = self.queue.get()
if corp_info is None:
self._over_times += 1
if self.is_over():
""" 当接受到None数据, 并且所有processes结束, 此process才结束. """
self.model.commit()
return 0
continue
self.save(corp_info)
|
from django.contrib import auth
from django.contrib.auth.models import User
from django.contrib import messages
from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_exempt
from products.models import Product
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
# Create your views here.
@csrf_exempt
def register(request):
if request.method == "POST":
firstname = request.POST["firstname"]
lastname = request.POST["lastname"]
email = request.POST["email"]
username = request.POST["username"]
password1 = request.POST["psw"]
password2 = request.POST["psw-repeat"]
if password1 == password2:
if User.objects.filter(username=username).exists():
return redirect("register")
elif User.objects.filter(email=email).exists():
return redirect("register")
else:
user = User.objects.create_user(
username=username,
password=password1,
email=email,
first_name=firstname,
last_name=lastname,
)
messages.success(request, 'User registered successfully.')
return redirect("login")
else:
messages.error(request, 'User registration Failed. Please try again!!')
return redirect("register")
else:
return render(request, "register.html")
def home(request):
return render(request, "home.html")
def addproduct(request):
return render(request, "addproduct.html")
@csrf_exempt
def login(request):
if request.method == "POST":
username = request.POST["username"]
password = request.POST["password"]
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request, user)
messages.success(request, 'User logged in successfully.')
return redirect("products")
else:
messages.error(request, 'Authentication Unsuccessful. Please try again with correct credentials.!!')
return redirect("login")
else:
return render(request, "login.html")
def add_pagination(request, all_data):
page = request.GET.get('page', 1)
paginator = Paginator(all_data, 10)
try:
all_data = paginator.page(page)
except PageNotAnInteger:
all_data = paginator.page(1)
except EmptyPage:
all_data = paginator.page(paginator.num_pages)
return all_data
def products(request):
if request.method == "POST":
name = request.POST.get("name")
price = request.POST.get("price")
Product.objects.create(name=name, price=price, username=request.user.get_full_name())
messages.success(request, 'Product added successfully.')
return redirect("products")
else:
f = []
qs = Product.objects.get_queryset().order_by('-id').filter(username=request.user.get_full_name())
for e in qs:
data = Product()
data.id = e.id
data.name = e.name
data.price = e.price
data.username = e.username
f.append(data)
final_data = add_pagination(request, f)
return render(request, "products.html", {"data": final_data})
def delproduct(request, id):
Product.objects.filter(id=id).delete()
messages.success(request, 'Product deleted successfully.')
return redirect("products")
@csrf_exempt
def logout(request):
auth.logout(request)
messages.success(request, 'Logout Successful.')
return redirect("/")
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.animation as anim
import ffmpeg
def init_episode():
start_state = np.array([-0.6+np.random.random()/5,0])
start_action = np.random.choice(ACTIONS)
episode_state = False
return start_state,start_action,episode_state
class tiling:
def __init__(self,x_tiles,y_tiles,x_bound,y_bound):
self.tile_width = (x_bound[1]-x_bound[0])/x_tiles
self.tile_height = (y_bound[1]-y_bound[0])/y_tiles
self.x_bounds = np.arange(x_bound[0],x_bound[1]+0.1,self.tile_width)
self.y_bounds = np.arange(y_bound[0], y_bound[1]+0.1, self.tile_height)
self.tile_values = np.zeros((len(ACTIONS),x_tiles,y_tiles),dtype=float)
def determine_tileValues(self,state):
x_location = np.argmin(state[0]>self.x_bounds)-1
y_location = np.argmin(state[1]>self.y_bounds)-1
return self.tile_values[:,x_location,y_location]
def update_tileValues(self,increment):
x_location = np.argmin(current_state[0]>self.x_bounds)-1
y_location = np.argmin(current_state[1]>self.y_bounds)-1
self.tile_values[np.where(ACTIONS==current_action)[0][0],x_location,y_location] += increment
def choose_action():
e = np.random.random()
action_values = np.array([0, 0, 0], dtype=float)
if e<0:
action = np.random.choice(ACTIONS)
else:
for I,tiling in enumerate(tiling_list):
action_values+=tiling.determine_tileValues(next_state)
action = ACTIONS[np.random.choice(np.flatnonzero(action_values == max(action_values)))]
return action,np.max(action_values)
def update_actionValues():
value_increment = ALFA*(reward + GAMMA * action_value2 - action_value1)
for I, tiling in enumerate(tiling_list):
tiling.update_tileValues(value_increment)
def current_stateActionValue():
action_values = np.array([0, 0, 0], dtype=float)
for I, tiling in enumerate(tiling_list):
action_values += tiling.determine_tileValues(current_state)
return action_values[np.where(ACTIONS == current_action)[0][0]]
ACTIONS = np.array([-1,0,1])
TILINGS = 8
TILES = 8
ALFA = 0.1/8
GAMMA = 0.9
EPISODES = 1000
position_bound = np.array([-1.2,0.5])
velocity_bound = np.array([-0.07, 0.07])
tile_offsetX = (position_bound[1]-position_bound[0])/TILINGS/TILES
tile_offsetY = (velocity_bound[1]-velocity_bound[0])/TILINGS/TILES
tiling_list = []
for i in range(TILINGS):
tile = tiling(TILES,TILES,position_bound+(i)*1*tile_offsetX,velocity_bound+(i)*3*tile_offsetY)
tiling_list.append(tile)
episode_steps = []
episode_positions = []
for episode in range(EPISODES):
if episode in np.arange(0,EPISODES,EPISODES/10):
print('running episode : {}'.format(episode))
current_state, current_action, episode_end = init_episode()
step = 0
state_memory = []
while not episode_end and step<8000:
next_state = np.array([current_state[0]+current_state[1],current_state[1]+0.001*current_action-0.0025*np.cos(3*current_state[0])])
action_value1 = current_stateActionValue()
reward = -1
if next_state[0]<=position_bound[0]:
next_state[1] = 0
next_state[0] = current_state[0]
elif next_state[0]>=position_bound[1]:
episode_end = True
action_value2 = 0
update_actionValues()
episode_steps.append(step)
break
if abs(next_state[1])>velocity_bound[1]:
next_state[1] = next_state[1]/abs(next_state[1])*velocity_bound[1]
next_action,action_value2 = choose_action()
update_actionValues()
current_state = next_state
current_action = next_action
state_memory.append(current_state[0])
step += 1
if episode in np.arange(0,EPISODES,EPISODES/50):
episode_positions.append(state_memory)
# plt.figure(EPISODES*2+episode+1)
# state_plot = plt.subplot()
# state_plot.plot(state_memory)
if episode==999:# in np.arange(0,EPISODES,EPISODES/10):
cost_to_go = np.zeros((100, 100))
for i in range(100):
for j in range(100):
state = np.array([position_bound[0] + i * 1.7 / 100, velocity_bound[0] + j * 0.14 / 100])
action_values = np.array([0, 0, 0], dtype=float)
for I, tiling in enumerate(tiling_list):
action_values += tiling.determine_tileValues(state)
cost_to_go[i, j] = -max(action_values)
fig1 = plt.figure(episode)
ax = fig1.gca(projection='3d')
X = np.arange(position_bound[0], position_bound[1], 1.7 / 100)
Y = np.arange(velocity_bound[0], velocity_bound[1], 0.14 / 100)
Y, X = np.meshgrid(Y, X)
Z = np.array(cost_to_go)
Z = np.reshape(Z, (100, 100))
ax.plot_surface(X, Y, Z,shade=False,edgecolor='k',linewidth=0.5)
ax.set_xlabel('Position')
ax.set_ylabel('Velocity')
ax.set_zlabel('maximum cost to go')
if False:
fig2= plt.figure()
stepEpisode_plot = plt.subplot()
stepEpisode_plot.plot(episode_steps)
episode = 49
if True:
x_car = np.array(episode_positions[episode])
y_car = np.sin(3*x_car)
x_road = np.linspace(position_bound[0], position_bound[1], 100)
y_road = np.sin(3*x_road)
fig, ax = plt.subplots()
ln, = plt.plot([],[], 'ro')
plt.plot(x_road, y_road, 'k')
def update(frame):
x = x_car[frame]
y = y_car[frame]
ln.set_data(x,y)
return ln,
Writer = anim.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
ani = anim.FuncAnimation(fig, update, frames=len(episode_positions[episode]), interval=10, blit=True)
ani.save('Episode1000_Animation.mp4', writer = writer)
plt.show() |
from re import compile, search
from string import ascii_letters, digits
NAME = compile(r'<(.+)>')
PHONE = compile(r'\+(\d\d?-\d{3}-\d{3}-\d{4})')
VALID = set(ascii_letters + digits + ' .-')
def phone(strng, num):
result = {}
for a in strng.rstrip().split('\n'):
name = search(NAME, a).group(1)
address = a.replace(name, '')
phone_num = search(PHONE, a).group(1)
address = ' '.join(''.join(b if b in VALID else ' ' for b in
address.replace(phone_num, '')).split())
if phone_num in result:
result[phone_num]['duplicate'] = True
else:
result[phone_num] = {
'name': name, 'address': address, 'duplicate': False}
try:
match = result[num]
if match['duplicate']:
return 'Error => Too many people: {}'.format(num)
return 'Phone => {}, Name => {}, Address => {}'.format(
num, match['name'], match['address'])
except KeyError:
return 'Error => Not found: {}'.format(num)
|
# -*- coding: utf-8 -*-
import scrapy
import requests
from lxml import etree
from sina import items
from scrapy.spiders import CrawlSpider,Rule #CrawlSpiders:定义了一些规则跟进link
from scrapy.linkextractors import LinkExtractor #提取链接
class MysinaSpider(CrawlSpider):
name = 'mysina'
allowed_domains = ['sina.com.cn']
start_urls = ['http://roll.news.sina.com.cn/news/gnxw/gdxw1/index_2.shtml']
'''
Rule参数:link_extractor, callback=None, cb_kwargs=None, follow=None, process_links=None, process_request=identity
LinkExtractor部分参数: allow=(), deny=(), allow_domains=(), deny_domains=(), restrict_xpaths=()
allow=(正则)允许的, deny=(正则)不允许的
callback=回调
follow= 跟随如果为True就跟随
'''
rules = [Rule(LinkExtractor(allow=('index_(\d+).shtml')),callback='getParse',follow=True)]
def getParse(self, response):
newsList = response.xpath("//ul[@class='list_009']/li")
for news in newsList:
# item = items.SinaItem()
newsTitle = news.xpath('./a/text()')[0].extract()
newsUrl = news.xpath('./a/@href')[0].extract()
newsTime = news.xpath('./span/text()')[0].extract()
# content = self.getContent(newsUrl)
#构造请求
request = scrapy.Request(newsUrl,callback=self.getMataContent)
#存储到item对象
# item['newsTitle'] = newsTitle
# item['newsUrl'] = newsUrl
# item['newsTime'] = newsTime
# item['content'] = content
# print(item)
#使用meta传参
request.meta['newsTitle'] = newsTitle
request.meta['newsUrl'] = newsUrl
request.meta['newsTime'] = newsTime
# request.meta['content'] = content
# yield item
yield request
# def getContent(self,url):
# headers = {
# "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36"
# }
# response = requests.get(url,headers=headers).content.decode('utf-8','ignore') #content二进制
# mytree = etree.HTML(response)
# contentList = mytree.xpath("//div[@class='article']//text()")
# print(contentList)
# content = ''
# for c in contentList:
# # print(content.replace('\n',''))
# #Python strip() 方法用于移除字符串头尾指定的字符(默认为空格或换行符)或字符序列
# content += c.strip().replace('\n','')
# return content
def getMataContent(self,response):
contentList = response.xpath("//div[@class='article']//text()")
content = ''
for c in contentList:
content += c.extract().strip()
item = items.SinaItem()
#通过response.meta
item['newsTitle'] = response.meta['newsTitle']
item['newsUrl'] = response.meta['newsUrl']
item['newsTime'] = response.meta['newsTime']
item['content'] = content
yield item
|
from Code.Python.robot_calc_functions import Magnitude, matmult, TransInv, Adjoint, FKinSpace, JacobianSpace, TransToRp, so3ToVec, VecToso3, MatrixLog3, RotInv
import numpy as np
from math import acos, tan, pi
# There was an error found in the source code for IKinSpace and IKinBody. The and condition in the if statement should be replaced with or
def IKinSpace(Slist, M, T, thetalist0, eomg, ev):
maxiterations = 20
success = False
thf =[]#Final return variable
thf.append(thetalist0)
Vs = MatrixLog6(matmult(TransInv(FKinSpace(M, Slist, thetalist0)),T))
wb = Magnitude ([Vs[0],Vs[1],Vs[2]])
vb = Magnitude ([Vs[3],Vs[4],Vs[5]])
for i in range (maxiterations):
if (wb>eomg or vb>ev):
Jb = matmult(Adjoint(TransInv(FKinSpace(M, Slist, thetalist0))),JacobianSpace(Slist, thetalist0))
thetalist0 = np.add(thetalist0, matmult(np.linalg.pinv(Jb),Vs))
thf.append(thetalist0)
Vs = MatrixLog6(matmult(TransInv(FKinSpace(M, Slist, thetalist0)),T))
wb = Magnitude ([Vs[0],Vs[1],Vs[2]])
vb = Magnitude ([Vs[3],Vs[4],Vs[5]])
else:
success = True
return (thf[len(thf)-1],success)
return (thf[len(thf)-1],success)
def MatrixLog6(T): # Takes a transformation matrix T SE(3)
R, p = TransToRp(T)
Rtrace = R[0][0] + R[1][1] + R[2][2]
if (R == np.eye(3)).all():
omg = [0, 0, 0]
v = p
theta = 1
else:
if (Rtrace == -1):
theta = pi
omg = MatrixLog3(R)
G = (1 / theta) * np.eye(3) - 0.5 * np.asarray(VecToso3(omg)) + ((1 / theta) - (
(1 / (tan(theta / 2.0))) / 2.0)) * (matmult(VecToso3(omg), VecToso3(omg)))
v = np.dot(G, p)
else:
# Modified source code as I was running into issues with math domain errors from acos and a divide by zero error
# with theta. I believe that these errors occur due to rounding and float calculation errors by the software. Because
# of this, I am assuming that when theta is equal to zero, or the value in acos is > 1, it is in fact just a number close to
# 0 or 1.
if ((Rtrace - 1) / 2.0) > 1:
Rtrace = 2.99999
theta = acos((Rtrace - 1) / 2.0)
if theta == 0:
theta = .000001
omg = so3ToVec((1 / (2 * np.sin(theta))) * (np.subtract(R, RotInv(R))))
G = (1 / theta) * np.eye(3) - 0.5 * np.asarray(VecToso3(omg)) + ((1 / theta) - (
(1 / (tan(theta / 2.0))) / 2.0)) * (matmult(VecToso3(omg), VecToso3(omg)))
v = np.dot(G, p)
return ([omg[0] * theta, omg[1] * theta, omg[2] * theta, v[0] * theta, v[1] * theta, v[2] * theta])
# createTraj is a function that will take in beginning and end coordinates to move the end effector of the UR5 6 joint robot from point to point.
# It will create a list of 101 points between these two points and use inverse kinematics to solve what joint angles are needed
# to get to the desired point in trajectory. This function uses the IKinSpace function; therefore the screws list used is with respect
# to the space frame.
#
def createTraj(begin_coords, end_coords):
list_size = 101
list_x = np.linspace(begin_coords[0], end_coords[0], list_size)
list_y = np.linspace(begin_coords[1], end_coords[1], list_size)
list_z = np.linspace(begin_coords[2], end_coords[2], list_size)
anglelist = []
L1 = .425
L2 = .392
W1 = .109
W2 = .082
H1 = .089
H2 = .095
M = [[-1, 0, 0, L1 + L2], [0, 0, 1, W1 + W2], [0, 1, 0, H1 - H2], [0, 0, 0, 1]]
Slist = [[0, 0, 1, 0, 0, 0],[0, 1, 0, -H1, 0, 0],[0, 1, 0, -H1, 0, L1],[0, 1, 0, -H1, 0, L1+L2],[0, 0, -1, -W1, L1+L2, 0],[0, 1, 0, H2-H1, 0, L1+L2]]
# Blist =[[0, 1, 0, W1+W2, 0, L1+L2], [0, 0, 1, H2, -L1-L2, 0], [0, 0, 1, H2, -L2, 0],[0, 0, 1, H2, 0, 0], [0, -1, 0, -W2, 0, 0],[0, 0, 1, 0, 0, 0]]
eomg = 0.1
ev = 0.01
theta_guess = [.1, .1, .1, .1, .1, .1]
for i in range(0,list_size):
target_coords = (list_x[i],list_y[i],list_z[i])
T_goal = [[1, 0, 0, target_coords[0]], [0, 1, 0, target_coords[1]], [0, 0, 1, target_coords[2]], [0, 0, 0, 1]]
# result = IKinBody(Blist, M, T_goal, theta_guess, eomg, ev)
result = IKinSpace(Slist, M, T_goal, theta_guess, eomg, ev)
if result[1] == False:
print "Error!"
break
else:
print "Success!"
anglelist.append(result[0])
theta_guess = result[0]
return anglelist
#writeToFile will go through the list of angle joints returned from createTraj function and write them into a csv file name angle.csv
def writeToFile(jointangles):
f = open('angle.csv','w')
for i in range(0, len(jointangles)):
joint = jointangles[i]
f.write(",".join(map(str, joint)) + "\n")
f.close()
def main():
begin_coords = (.7, 0, .1)
end_coords = (0, -.3, .5)
angleList = createTraj(begin_coords, end_coords)
writeToFile(angleList)
if __name__=="__main__":
main() |
def balanced_brackets(s):
brackets = {'(': ')', '[': ']', '{':'}'}
stack = []
for c in s:
if c in brackets:
stack.append(c)
elif c in list(brackets.values()):
if not stack:
return False
opener = stack.pop()
expected = brackets[opener]
if expected != c:
return False
# if not stack:
# return True
# else:
# return False
# better way below
# if you want a true or false value, could use bool on stack
return not stack
print(balanced_brackets('{}}'))
|
# -*- coding: utf-8 -*-
from sqlalchemy.schema import Column, ForeignKey, UniqueConstraint
from sqlalchemy.types import Integer, String, Text, DateTime
from ..extensions import db
from ..utils import get_current_time
class Repo(db.Model):
__tablename__ = "repos"
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey("users.id"), nullable=False)
name = Column(String(255), nullable=False)
description = Column(Text, nullable=False)
forked_from_id = Column(Integer, ForeignKey("repos.id"), nullable=False)
master_branch = Column(String(255), nullable=False)
status = Column(Integer, nullable=False)
flags = Column(Integer, nullable=False)
creation = Column(DateTime, nullable=False)
updated = Column(DateTime, nullable=False)
__table_args__ = (UniqueConstraint("user_id","name", name="user_repos"),)
def init(self, name, user_id, description="", status=0, flags=0):
self.name = name
self.user_id = user_id
self.description = description
self.forked_from_id = 0
self.master_branch = 0
self.status = status
self.flags = flags
self.creation = get_current_time()
self.updated = self.creation
def __repr__(self):
return "<Repo %r: %r/%r>" % (self.id, self.user.username, self.name)
|
#to reverse the element of the list
fr=['bhvaya','komal','khushi','akshuni','divya','komal']
print('before the reverse operation of fr value are:=',fr)
fr.reverse()
print('after the reverse opertion of fr value are:=',fr)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `web-platform-compat` fields module."""
from datetime import datetime
from pytz import UTC
from django.contrib.auth.models import User
from django.test.utils import override_settings
from webplatformcompat.cache import Cache
from webplatformcompat.history import Changeset
from webplatformcompat.models import (
Browser, Feature, Maturity, Reference, Section, Specification, Support,
Version)
from .base import TestCase
class TestCache(TestCase):
def setUp(self):
self.cache = Cache()
self.login_user(groups=['change-resource'])
def test_browser_v1_serializer(self):
browser = self.create(Browser)
out = self.cache.browser_v1_serializer(browser)
expected = {
'id': browser.id,
'slug': u'',
'name': {},
'note': {},
'history:PKList': {
'app': u'webplatformcompat',
'model': 'historicalbrowser',
'pks': [browser.history.all()[0].pk],
},
'history_current:PK': {
'app': u'webplatformcompat',
'model': 'historicalbrowser',
'pk': browser.history.all()[0].pk,
},
'versions:PKList': {
'app': u'webplatformcompat',
'model': 'version',
'pks': [],
},
}
self.assertEqual(out, expected)
def test_browser_v1_serializer_empty(self):
self.assertEqual(None, self.cache.browser_v1_serializer(None))
def test_browser_v1_loader(self):
browser = self.create(Browser)
with self.assertNumQueries(3):
obj = self.cache.browser_v1_loader(browser.pk)
with self.assertNumQueries(0):
serialized = self.cache.browser_v1_serializer(obj)
self.assertTrue(serialized)
def test_browser_v1_loader_not_exist(self):
self.assertFalse(Browser.objects.filter(pk=666).exists())
self.assertIsNone(self.cache.browser_v1_loader(666))
def test_browser_v1_invalidator(self):
browser = self.create(Browser)
self.assertEqual([], self.cache.browser_v1_invalidator(browser))
def test_changeset_v1_serializer(self):
created = datetime(2014, 10, 29, 8, 57, 21, 806744, UTC)
changeset = self.create(Changeset, user=self.user)
Changeset.objects.filter(pk=changeset.pk).update(
created=created, modified=created)
changeset = Changeset.objects.get(pk=changeset.pk)
out = self.cache.changeset_v1_serializer(changeset)
expected = {
'id': changeset.id,
'created:DateTime': '1414573041.806744',
'modified:DateTime': '1414573041.806744',
'target_resource_type': '',
'target_resource_id': 0,
'closed': False,
'user:PK': {
'app': u'auth',
'model': 'user',
'pk': self.user.pk,
},
'historical_browsers:PKList': {
'app': u'webplatformcompat',
'model': 'historicalbrowser',
'pks': []
},
'historical_features:PKList': {
'app': u'webplatformcompat',
'model': 'historicalfeature',
'pks': []
},
'historical_maturities:PKList': {
'app': u'webplatformcompat',
'model': 'historicalmaturity',
'pks': []
},
'historical_references:PKList': {
'app': u'webplatformcompat',
'model': 'historicalreference',
'pks': []
},
'historical_sections:PKList': {
'app': u'webplatformcompat',
'model': 'historicalsection',
'pks': []
},
'historical_specifications:PKList': {
'app': u'webplatformcompat',
'model': 'historicalspecification',
'pks': []
},
'historical_supports:PKList': {
'app': u'webplatformcompat',
'model': 'historicalsupport',
'pks': []
},
'historical_versions:PKList': {
'app': u'webplatformcompat',
'model': 'historicalversion',
'pks': []
},
}
self.assertEqual(out, expected)
def test_changeset_v1_serializer_empty(self):
self.assertEqual(None, self.cache.changeset_v1_serializer(None))
def test_changeset_v1_loader(self):
changeset = self.create(Changeset, user=self.user)
with self.assertNumQueries(9):
obj = self.cache.changeset_v1_loader(changeset.pk)
with self.assertNumQueries(0):
serialized = self.cache.changeset_v1_serializer(obj)
self.assertTrue(serialized)
def test_changeset_v1_loader_not_exist(self):
self.assertFalse(Changeset.objects.filter(pk=666).exists())
self.assertIsNone(self.cache.changeset_v1_loader(666))
def test_changeset_v1_invalidator(self):
changeset = self.create(Changeset, user=self.user)
self.assertEqual([], self.cache.changeset_v1_invalidator(changeset))
def test_feature_v1_serializer(self):
feature = self.create(
Feature, slug='the-slug', name='{"en": "A Name"}')
out = self.cache.feature_v1_serializer(feature)
expected = {
'id': feature.id,
'slug': 'the-slug',
'mdn_uri': {},
'experimental': False,
'standardized': True,
'stable': True,
'obsolete': False,
'name': {'en': 'A Name'},
'descendant_count': 0,
'references:PKList': {
'app': 'webplatformcompat',
'model': 'reference',
'pks': [],
},
'supports:PKList': {
'app': 'webplatformcompat',
'model': 'support',
'pks': [],
},
'parent:PK': {
'app': 'webplatformcompat',
'model': 'feature',
'pk': None,
},
'children:PKList': {
'app': 'webplatformcompat',
'model': 'feature',
'pks': [],
},
'row_children:PKList': {
'app': 'webplatformcompat',
'model': 'feature',
'pks': [],
},
'row_children_pks': [],
'page_children_pks': [],
'descendant_pks': [],
'row_descendant_pks': [],
'history:PKList': {
'app': 'webplatformcompat',
'model': 'historicalfeature',
'pks': [feature.history.all()[0].pk],
},
'history_current:PK': {
'app': 'webplatformcompat',
'model': 'historicalfeature',
'pk': feature.history.all()[0].pk,
},
}
self.assertEqual(out, expected)
def test_feature_v1_serializer_mixed_descendants(self):
feature = self.create(
Feature, slug='the-slug', name='{"en": "A Name"}')
child1 = self.create(Feature, slug='child1', parent=feature)
child2 = self.create(Feature, slug='child2', parent=feature)
child21 = self.create(Feature, slug='child2.1', parent=child2)
page1 = self.create(
Feature, slug='page1', parent=feature,
mdn_uri='{"en": "https://example.com/page1"}')
page2 = self.create(
Feature, slug='page2', parent=child2,
mdn_uri='{"en": "https://example.com/page2"}')
feature = Feature.objects.get(id=feature.id)
out = self.cache.feature_v1_serializer(feature)
self.assertEqual(out['descendant_count'], 5)
self.assertEqual(
out['descendant_pks'],
[child1.pk, child2.pk, child21.pk, page2.pk, page1.pk])
self.assertEqual(
out['row_descendant_pks'], [child1.pk, child2.pk, child21.pk])
self.assertEqual(out['page_children_pks'], [page1.pk])
self.assertEqual(out['row_children_pks'], [child1.pk, child2.pk])
@override_settings(PAGINATE_VIEW_FEATURE=2)
def test_feature_v1_serializer_paginated_descendants(self):
feature = self.create(
Feature, slug='the-slug', name='{"en": "A Name"}')
self.create(Feature, slug='child1', parent=feature)
self.create(Feature, slug='child2', parent=feature)
self.create(Feature, slug='child3', parent=feature)
feature = Feature.objects.get(id=feature.id)
out = self.cache.feature_v1_serializer(feature)
self.assertEqual(out['descendant_count'], 3)
self.assertEqual(out['descendant_pks'], [])
def test_feature_v1_serializer_empty(self):
self.assertEqual(None, self.cache.feature_v1_serializer(None))
def test_feature_v1_loader(self):
feature = self.create(Feature)
with self.assertNumQueries(4):
obj = self.cache.feature_v1_loader(feature.pk)
with self.assertNumQueries(0):
serialized = self.cache.feature_v1_serializer(obj)
self.assertTrue(serialized)
def test_feature_v1_loader_not_exist(self):
self.assertFalse(Feature.objects.filter(pk=666).exists())
self.assertIsNone(self.cache.feature_v1_loader(666))
def test_feature_v1_invalidator_basic(self):
feature = self.create(Feature)
self.assertEqual([], self.cache.feature_v1_invalidator(feature))
def test_feature_v1_invalidator_with_relation(self):
parent = self.create(Feature, slug='parent')
feature = self.create(Feature, slug='child', parent=parent)
expected = [('Feature', parent.id, False)]
self.assertEqual(expected, self.cache.feature_v1_invalidator(feature))
def test_maturity_v1_serializer(self):
maturity = self.create(
Maturity, slug='REC', name='{"en-US": "Recommendation"}')
out = self.cache.maturity_v1_serializer(maturity)
expected = {
'id': maturity.id,
'slug': 'REC',
'name': {'en-US': 'Recommendation'},
'specifications:PKList': {
'app': u'webplatformcompat',
'model': 'specification',
'pks': [],
},
'history:PKList': {
'app': u'webplatformcompat',
'model': 'historicalmaturity',
'pks': [maturity.history.all()[0].pk],
},
'history_current:PK': {
'app': u'webplatformcompat',
'model': 'historicalmaturity',
'pk': maturity.history.all()[0].pk,
},
}
self.assertEqual(out, expected)
def test_maturity_v1_serializer_empty(self):
self.assertEqual(None, self.cache.maturity_v1_serializer(None))
def test_maturity_v1_loader(self):
maturity = self.create(Maturity)
with self.assertNumQueries(3):
obj = self.cache.maturity_v1_loader(maturity.pk)
with self.assertNumQueries(0):
serialized = self.cache.maturity_v1_serializer(obj)
self.assertTrue(serialized)
def test_maturity_v1_loader_not_exist(self):
self.assertFalse(Maturity.objects.filter(pk=666).exists())
self.assertIsNone(self.cache.maturity_v1_loader(666))
def test_maturity_v1_invalidator(self):
maturity = self.create(Maturity)
self.assertEqual([], self.cache.maturity_v1_invalidator(maturity))
def setup_reference(self):
"""Setup a Reference instance with related models."""
maturity = self.create(
Maturity, slug='REC', name={'en': 'Recommendation'})
spec = self.create(
Specification, slug='mathml2', mdn_key='MathML2',
maturity=maturity,
name='{"en": "MathML 2.0"}',
uri='{"en": "http://www.w3.org/TR/MathML2/"}')
section = self.create(
Section, specification=spec,
number={'en': '3.2.4'},
name={'en': 'Number (mn)'},
subpath={'en': 'chapter3.html#presm.mn'})
feature = self.create(
Feature, slug='the_feature')
reference = self.create(
Reference, section=section, feature=feature,
note={'en': 'This note'})
return reference
def test_reference_v1_serializer(self):
"""Test serialization of Reference instance."""
reference = self.setup_reference()
out = self.cache.reference_v1_serializer(reference)
expected = {
'id': reference.id,
'note': {'en': 'This note'},
'section:PK': {
'app': u'webplatformcompat',
'model': 'section',
'pk': reference.section.pk,
},
'feature:PK': {
'app': u'webplatformcompat',
'model': 'feature',
'pk': reference.feature.pk,
},
'history:PKList': {
'app': u'webplatformcompat',
'model': 'historicalreference',
'pks': [reference.history.all()[0].pk],
},
'history_current:PK': {
'app': u'webplatformcompat',
'model': 'historicalreference',
'pk': reference.history.all()[0].pk,
},
}
self.assertEqual(out, expected)
def test_reference_v1_serializer_empty(self):
"""Test serialization of missing Reference."""
self.assertEqual(None, self.cache.reference_v1_serializer(None))
def test_reference_v1_loader(self):
"""Test efficent loading of Reference from database."""
reference = self.setup_reference()
with self.assertNumQueries(2):
obj = self.cache.reference_v1_loader(reference.pk)
with self.assertNumQueries(0):
serialized = self.cache.reference_v1_serializer(obj)
self.assertTrue(serialized)
def test_reference_v1_loader_not_exist(self):
"""Test loading a non-existant Reference returns None."""
self.assertFalse(Reference.objects.filter(pk=666).exists())
self.assertIsNone(self.cache.reference_v1_loader(666))
def test_reference_v1_invalidator(self):
reference = self.setup_reference()
self.assertEqual(
self.cache.reference_v1_invalidator(reference),
[
('Section', reference.section.pk, False),
('Feature', reference.feature.pk, False),
])
def test_section_v1_serializer(self):
maturity = self.create(
Maturity, slug='REC', name={'en': 'Recommendation'})
spec = self.create(
Specification, slug='mathml2', mdn_key='MathML2',
maturity=maturity,
name='{"en": "MathML 2.0"}',
uri='{"en": "http://www.w3.org/TR/MathML2/"}')
section = self.create(
Section, specification=spec,
number={'en': '3.2.4'},
name={'en': 'Number (mn)'},
subpath={'en': 'chapter3.html#presm.mn'})
out = self.cache.section_v1_serializer(section)
expected = {
'id': section.id,
'number': {'en': '3.2.4'},
'name': {'en': 'Number (mn)'},
'subpath': {'en': 'chapter3.html#presm.mn'},
'specification:PK': {
'app': u'webplatformcompat',
'model': 'specification',
'pk': spec.pk,
},
'references:PKList': {
'app': u'webplatformcompat',
'model': 'reference',
'pks': [],
},
'history:PKList': {
'app': u'webplatformcompat',
'model': 'historicalsection',
'pks': [section.history.all()[0].pk],
},
'history_current:PK': {
'app': u'webplatformcompat',
'model': 'historicalsection',
'pk': section.history.all()[0].pk,
},
}
self.assertEqual(out, expected)
def test_section_v1_serializer_empty(self):
self.assertEqual(None, self.cache.section_v1_serializer(None))
def test_section_v1_loader(self):
maturity = self.create(
Maturity, slug='WD', name={'en': 'Working Draft'})
spec = self.create(
Specification, slug='push_api', mdn_key='Push API',
maturity=maturity,
name={'en': 'Push API'},
uri={'en': (
'https://dvcs.w3.org/hg/push/raw-file/default/index.html')}
)
section = self.create(
Section, specification=spec, name={'en': ''})
with self.assertNumQueries(3):
obj = self.cache.section_v1_loader(section.pk)
with self.assertNumQueries(0):
serialized = self.cache.section_v1_serializer(obj)
self.assertTrue(serialized)
def test_section_v1_loader_not_exist(self):
self.assertFalse(Section.objects.filter(pk=666).exists())
self.assertIsNone(self.cache.section_v1_loader(666))
def test_section_v1_invalidator(self):
maturity = self.create(
Maturity, slug='WD', name={'en': 'Working Draft'})
spec = self.create(
Specification, slug='spec', mdn_key='Spec', maturity=maturity,
name={'en': 'Spec'},
uri={'en': 'http://example.com/spec.html'})
section = self.create(
Section, specification=spec,
name={'en': 'A section'}, subpath={'en': '#section'})
self.assertEqual(
[('Specification', spec.pk, False)],
self.cache.section_v1_invalidator(section))
def test_specification_v1_serializer(self):
maturity = self.create(
Maturity, slug='REC', name={'en': 'Recommendation'})
spec = self.create(
Specification, slug='mathml2', mdn_key='MathML2',
maturity=maturity,
name='{"en": "MathML 2.0"}',
uri='{"en": "http://www.w3.org/TR/MathML2/"}')
history = spec.history.all()[0]
out = self.cache.specification_v1_serializer(spec)
expected = {
'id': spec.id,
'slug': 'mathml2',
'mdn_key': 'MathML2',
'name': {'en': 'MathML 2.0'},
'uri': {'en': 'http://www.w3.org/TR/MathML2/'},
'sections:PKList': {
'app': u'webplatformcompat',
'model': 'section',
'pks': [],
},
'maturity:PK': {
'app': u'webplatformcompat',
'model': 'maturity',
'pk': maturity.pk,
},
'history:PKList': {
'app': u'webplatformcompat',
'model': 'historicalspecification',
'pks': [history.pk],
},
'history_current:PK': {
'app': u'webplatformcompat',
'model': 'historicalspecification',
'pk': history.pk,
},
}
self.assertEqual(out, expected)
def test_specification_v1_serializer_empty(self):
self.assertEqual(None, self.cache.specification_v1_serializer(None))
def test_specification_v1_loader(self):
maturity = self.create(
Maturity, slug='WD', name={'en': 'Working Draft'})
spec = self.create(
Specification, slug='push-api', maturity=maturity,
name={'en': 'Push API'},
uri={'en': (
'https://dvcs.w3.org/hg/push/raw-file/default/index.html')}
)
with self.assertNumQueries(3):
obj = self.cache.specification_v1_loader(spec.pk)
with self.assertNumQueries(0):
serialized = self.cache.specification_v1_serializer(obj)
self.assertTrue(serialized)
def test_specification_v1_loader_not_exist(self):
self.assertFalse(Specification.objects.filter(pk=666).exists())
self.assertIsNone(self.cache.specification_v1_loader(666))
def test_specification_v1_invalidator(self):
maturity = self.create(
Maturity, slug='WD', name={'en': 'Working Draft'})
spec = self.create(
Specification, slug='spec', maturity=maturity,
name={'en': 'Spec'},
uri={'en': 'http://example.com/spec.html'})
self.assertEqual(
[('Maturity', maturity.pk, False)],
self.cache.specification_v1_invalidator(spec))
def test_support_v1_serializer(self):
browser = self.create(Browser)
version = self.create(Version, browser=browser, version='1.0')
feature = self.create(Feature, slug='feature')
support = self.create(Support, version=version, feature=feature)
out = self.cache.support_v1_serializer(support)
expected = {
'id': support.id,
'support': u'yes',
'prefix': u'',
'prefix_mandatory': False,
'alternate_name': u'',
'alternate_mandatory': False,
'requires_config': u'',
'default_config': u'',
'protected': False,
'note': {},
'version:PK': {
'app': u'webplatformcompat',
'model': 'version',
'pk': version.id,
},
'feature:PK': {
'app': u'webplatformcompat',
'model': 'feature',
'pk': feature.id,
},
'history:PKList': {
'app': u'webplatformcompat',
'model': 'historicalsupport',
'pks': [support.history.all()[0].pk],
},
'history_current:PK': {
'app': u'webplatformcompat',
'model': 'historicalsupport',
'pk': support.history.all()[0].pk,
},
}
self.assertEqual(out, expected)
def test_support_v1_serializer_empty(self):
self.assertEqual(None, self.cache.support_v1_serializer(None))
def test_support_v1_loader(self):
browser = self.create(Browser)
version = self.create(Version, browser=browser, version='1.0')
feature = self.create(Feature, slug='feature')
support = self.create(Support, version=version, feature=feature)
with self.assertNumQueries(2):
obj = self.cache.support_v1_loader(support.pk)
with self.assertNumQueries(0):
serialized = self.cache.support_v1_serializer(obj)
self.assertTrue(serialized)
def test_support_v1_loader_not_exist(self):
self.assertFalse(Support.objects.filter(pk=666).exists())
self.assertIsNone(self.cache.support_v1_loader(666))
def test_support_v1_invalidator(self):
browser = self.create(Browser)
version = self.create(Version, browser=browser, version='1.0')
feature = self.create(Feature, slug='feature')
support = self.create(Support, version=version, feature=feature)
expected = [
('Version', version.id, True),
('Feature', feature.id, True),
]
self.assertEqual(expected, self.cache.support_v1_invalidator(support))
def test_version_v1_serializer(self):
browser = self.create(Browser)
version = self.create(Version, browser=browser)
out = self.cache.version_v1_serializer(version)
expected = {
'id': version.id,
'version': u'',
'release_day:Date': None,
'retirement_day:Date': None,
'status': u'unknown',
'release_notes_uri': {},
'note': {},
'_order': 0,
'browser:PK': {
'app': u'webplatformcompat',
'model': 'browser',
'pk': browser.id,
},
'supports:PKList': {
'app': u'webplatformcompat',
'model': 'support',
'pks': [],
},
'history:PKList': {
'app': u'webplatformcompat',
'model': 'historicalversion',
'pks': [version.history.all()[0].pk],
},
'history_current:PK': {
'app': u'webplatformcompat',
'model': 'historicalversion',
'pk': version.history.all()[0].pk,
},
}
self.assertEqual(out, expected)
def test_version_v1_serializer_empty(self):
self.assertEqual(None, self.cache.version_v1_serializer(None))
def test_version_v1_loader(self):
browser = self.create(Browser)
version = self.create(Version, browser=browser)
with self.assertNumQueries(3):
obj = self.cache.version_v1_loader(version.pk)
with self.assertNumQueries(0):
serialized = self.cache.version_v1_serializer(obj)
self.assertTrue(serialized)
def test_version_v1_loader_not_exist(self):
self.assertFalse(Version.objects.filter(pk=666).exists())
self.assertIsNone(self.cache.version_v1_loader(666))
def test_version_v1_invalidator(self):
browser = self.create(Browser)
version = self.create(Version, browser=browser)
expected = [('Browser', browser.id, True)]
self.assertEqual(expected, self.cache.version_v1_invalidator(version))
def test_user_v1_serializer(self):
user = self.create(
User, date_joined=datetime(2014, 9, 22, 8, 14, 34, 7, UTC))
out = self.cache.user_v1_serializer(user)
expected = {
'id': user.id,
'username': '',
'date_joined:DateTime': '1411373674.000007',
'changesets:PKList': {
'app': 'webplatformcompat',
'model': 'changeset',
'pks': []
},
'group_names': ['change-resource'],
}
self.assertEqual(expected, out)
def test_user_v1_serializer_empty(self):
self.assertEqual(None, self.cache.user_v1_serializer(None))
def test_user_v1_inactive(self):
user = self.create(
User, date_joined=datetime(2014, 9, 22, 8, 14, 34, 7, UTC),
is_active=False)
out = self.cache.user_v1_serializer(user)
self.assertEqual(out, None)
def test_user_v1_loader(self):
user = self.create(User)
with self.assertNumQueries(3):
obj = self.cache.user_v1_loader(user.pk)
with self.assertNumQueries(0):
serialized = self.cache.user_v1_serializer(obj)
self.assertTrue(serialized)
def test_user_v1_loader_not_exist(self):
self.assertFalse(User.objects.filter(pk=666).exists())
self.assertIsNone(self.cache.user_v1_loader(666))
def test_user_v1_invalidator(self):
user = self.create(User)
self.assertEqual([], self.cache.user_v1_invalidator(user))
|
import json
import requests
import datetime
class Boosters:
def __init__(self, gamemode: str):
self.gamemode = gamemode
self.get_boosters_link = requests.get(f'https://api.slothpixel.me/api/boosters/{self.gamemode}')
self.boosters_data = json.loads(self.get_boosters_link.text)
self.amount = len(self.boosters_data)
def get_user(self, booster_number: int):
booster_number -= 1
self.booster_number = booster_number
return self.boosters_data[int(self.booster_number)]['uuid']
def get_multiplier(self, booster_number: int):
booster_number -= 1
self.booster_number = booster_number
return self.boosters_data[int(self.booster_number)]['multiplier']
def get_activated(self, booster_number: int):
booster_number -= 1
self.booster_number = booster_number
return datetime.datetime.fromtimestamp(round(self.boosters_data[int(self.booster_number)]['activated']/1000))
def get_length(self, booster_number: int):
booster_number -= 1
self.booster_number = booster_number
self.original_length = self.boosters_data[int(self.booster_number)]['original_length']
self.length = self.boosters_data[int(booster_number)]['length']
def get_active(self, booster_number: int):
booster_number -= 1
self.booster_number = booster_number
return self.boosters_data[int(self.booster_number)]['active']
def get_gamemodes():
get_boosters_link = requests.get(f'https://api.slothpixel.me/api/boosters')
boosters_data = json.loads(get_boosters_link.text)
gamemode_list = []
for gameMode in boosters_data['boosters']:
gamemode_list.append(gameMode)
return gamemode_list |
"""
Copyright 1999 Illinois Institute of Technology
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL ILLINOIS INSTITUTE OF TECHNOLOGY BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of Illinois Institute
of Technology shall not be used in advertising or otherwise to promote
the sale, use or other dealings in this Software without prior written
authorization from Illinois Institute of Technology.
"""
from os import makedirs
from os.path import exists
import pandas as pd
try:
from ..utils.file_manager import fullPath
except: # for coverage
from utils.file_manager import fullPath
class XV_CSVManager:
"""
A class taking care of writing results including csv file and failedcases file
"""
def __init__(self, dir_path):
"""
init with directory path
:param dir_path:
"""
self.dataframe = None
self.result_path = fullPath(dir_path, "xv_results")
self.filename = fullPath(self.result_path, 'summary.csv')
self.colnames = ['Filename', 'Histogram', 'Comment']
self.loadSummary()
def loadSummary(self):
"""
Load summary.csv file and keep data in self.dataframe
:return:
"""
if not exists(self.filename):
self.dataframe = pd.DataFrame(columns = self.colnames)
else:
self.dataframe = pd.read_csv(self.filename)
def writeNewData(self, xrayViewer):
"""
Add new data to dataframe, then re-write summary.csv
:param xrayViewer: QuadrantFolder object with results in its info dict
:return: -
"""
if not exists(self.result_path):
makedirs(self.result_path)
img_name = xrayViewer.img_name
self.removeData(img_name)
data = {}
# If there is no result
if xrayViewer.hist == []:
for k in self.dataframe.columns:
data[k] = '-'
data['Filename'] = img_name
data['comment'] = "No slice or box selected"
else:
# Get all needed infos
data['Filename'] = img_name
data['Histogram'] = xrayViewer.hist
self.dataframe = pd.concat([self.dataframe, pd.DataFrame.from_records([data])])
# self.dataframe = self.dataframe.append(data, ignore_index=True) # Future warning deprecated
self.dataframe.reset_index()
self.dataframe.to_csv(self.filename, index=False, columns=self.colnames) # Write to csv file
def removeData(self, img_name):
"""
Remove data from dataframe
:param img_name: (str)
:return:
"""
self.dataframe = self.dataframe[self.dataframe["Filename"] != img_name] |
# Generated by Django 2.1.3 on 2018-11-06 14:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0035_auto_20181107_0348'),
]
operations = [
migrations.RemoveField(
model_name='employee',
name='phone',
),
]
|
import json
import requests
from django.http import HttpResponseRedirect, JsonResponse
from django.shortcuts import render
from weather.average import average
from weather.parser import parse_weather
from django.views.decorators.cache import cache_page
def index(request):
return HttpResponseRedirect('/weather/')
@cache_page(60 * 2)
def main(request):
unit = request.GET.get('unit', 'celsius')
days_count = request.GET.get('daysCount', '10')
parsed_data = parse_weather(unit=unit, days_count=int(days_count))
average_data = average(parsed_data)
parsed_data['forecastsAverage'] = average_data
return JsonResponse(parsed_data, json_dumps_params={'ensure_ascii': False})
|
class Road:
def __init__(self, id, length, speed_limit, lane_counts, start_id, end_id,
is_bothway):
self.__id = id
self.__length = length
self.__speed_limit = speed_limit
self.__lane_counts = lane_counts
self.__start_id = start_id
self.__end_id = end_id
self.__is_bothway = is_bothway
self.__direction = 1
def get_id(self):
return self.__id
def get_length(self):
return self.__length
def get_speed_limit(self):
return self.__speed_limit
def get_lane_counts(self):
return self.__lane_counts
def get_start_id(self):
return self.__start_id
def get_end_id(self):
return self.__end_id
def is_bothway(self):
return self.__is_bothway
def get_on_road_cars(self):
return self.__on_road_cars
def set_direction(self, direction):
self.__direction = direction
def get_direction(self):
return self.__direction
def set_on_road_cars(self, cars):
self.__on_road_cars = cars
def get_on_road_cars(self):
return self.__on_road_cars
|
#this is the example of file using python
wertwteryadfg
def game():
return int(input("enter score "))
score=game()
with open("Highscore.txt") as f:ertewt
hiScortyeteStr=f.read()
if hiScoreStr=='':yer
asfh
adf
gh
adrgh
ad
hjryj
ast
gae
g
eagod ge
if
sad
f
as
fias gaer
t
a4ger
yet
with open("Highscore.txt","w") asyaer f:e
choice = tor no in lowercase ")
if choice == "yes":aw argerya eg
print("Appreciate your patiencewtwet")
print(f.read())yeer
|
from django.conf.urls import patterns, include, url
import xadmin
from xadmin.plugins import xversion
xadmin.autodiscover()
xversion.register_models()
urlpatterns = patterns('',
url(r'^admin/', include(xadmin.site.urls)),
url(r'^tea/',include('teaman.tea.urls')),
url(r'^$',include('teaman.tea.urls')),
)
|
import xmltodict
import cPickle as pickle
import sys,os
import re
class ForumPost(object):
def __init__(self,xml_file_name):
with open(xml_file_name,'r') as data:
parsed_data = xmltodict.parse(data.read())
self.post_type = parsed_data.keys()[0]
self.message_type = parsed_data[self.post_type][u'message'][u'@type']
message = parsed_data[self.post_type][u'message']
self.board_id = message[u'board_id'][u'#text']
try:
self.original_text = message[u'body'][u'#text']
self.message = self.clean_message(self.original_text)
except Exception, e:
self.original_text = ""
self.message = ""
self.author = message[u'author'][u'login'][u'#text']
self.label = ''
self.label_fg = ''
def clean_message(self,message):
text = ''
index = 0
if '<' not in message:
return messsage
count = 0
while index < len(message):
inicio = index
opening = message[inicio:].find('<')
if opening == -1:
text += message[inicio+1:-1]
break
else:
if inicio == 0:
text += message[inicio:opening+inicio]
else:
text += message[inicio+1:opening+inicio]
index+= opening
closing = message[index:].find('>')
if 'emoticon-' in message[opening:closing+index]:
#given <img class="emoticon emoticon-smileyhappy" id...>
#extract smileyhappy
emoticon = message[opening:closing+index].split('emoticon-')[1].split('"')[0]
text += ' #%s' % emoticon
text += ' '
index += closing
text = text.replace(" ", " ")
text = text.replace("<", "")
text = text.replace(">", "")
text = text.replace("(", "")
text = text.replace(")", "")
text = text.replace("_", "")
text = text.replace(".", " . ")
return text |
POSTGRES_ADAPTER = 'postgres'
DB_QUERY_LIMIT = 10
|
#!/usr/bin/env python3.5
#coding: utf-8
import licant
from licant.core import core
from licant.modules import module, submodule
from licant.cxx_modules import make as make_module
from licant.make import make as make
import licant.util as gu
from licant.scripter import scriptq
scriptq.execute("../../gxx.g.py")
module("main",
target = "target",
type = "application",
sources = ["main.cpp"],
include_paths = ["../.."],
modules = [
submodule("gxx", "posix"),
submodule("gxx.dprint", "stdout"),
]
)
make_module("main",
cxxstd = "gnu++14",
)
target = "target"
licant.make.doit(target) |
# Generated by Django 3.2.7 on 2021-09-24 07:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0002_lectureclasssession'),
]
operations = [
migrations.AddField(
model_name='lecturer',
name='mail',
field=models.EmailField(default=' - ', max_length=254),
),
]
|
"""
Application setup and initialization code
"""
import os
import logging
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
db = SQLAlchemy(app)
from .routes import api, pages
|
#Libraries
import RPi.GPIO as GPIO
import sys
import telepot
import time
#IMPORT LIBRARY OS
import os
import telepot
text = 'AIR HABIS'
text1 = 'AIR TERSEDIA'
chat_id = CHATID
GPIO.setwarnings(False) # Ignore warning for now
GPIO.setmode(GPIO.BCM) # Use physical pin numberi
GPIO.setup(15, GPIO.IN, pull_up_down=GPIO.PUD_UP)
if GPIO.input(15) != GPIO.LOW :
bot = telepot.Bot('TOKEN')
bot.sendMessage(chat_id, text)
else :
bot = telepot.Bot('TOKEN')
bot.sendMessage(chat_id, text1)
|
class Party:
def __init__(self, number_of_persons, budget):
self.number_of_persons = number_of_persons
self.budget = budget
self.left_over_money = budget
self.left_over_pieces = 0
self.cake_count = 0
self.cost = 0
def reset(self):
self.left_over_pieces = 0
self.cake_count = 0
self.cost = 0
def cake(self, pie):
self.pie = pie
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 20 17:08:46 2017
@author: dgratz
"""
import PyLongQt as pylqt
from multiprocessing import Pool
settings = pylqt.Misc.SettingsIO.getInstance()
proto = pylqt.Protocols.GridProtocol()
settings.readSettings(proto,'D:/synchrony-data/2SAN1RandLogNormal.xml')
lastProto = settings.lastProto.clone()
def runSim(num):
proto = lastProto.clone()
proto.pvars.calcIonChanParams()
proto.setDataDir('D:/synchrony-data/2SAN1RandLogNormal/'+str(num))
proto.runSim()
settings.writeSettings(proto,proto.datadir+'/'+proto.simvarfile)
return num
p = Pool(4)
print(p.map(runSim,range(300))) |
import datetime
import json
import pdb
from django.http import HttpResponse, HttpResponseBadRequest
from django.http.response import HttpResponseServerError
from django.shortcuts import get_object_or_404
from zeep import Client, Transport
from zeep.cache import SqliteCache
from django.conf import settings
from zeep.helpers import serialize_object
from dateutil import parser
from .util import parsearFecha, MyEncoder
from .decoradores import access_token_requerido
from .models import Reserva, Cliente, Vendedor
transport = Transport(cache=SqliteCache())
soap = Client(settings.URL_WSDL, transport=transport)
client = Client("http://romeroruben-001-site1.itempurl.com/WCFReservaVehiculos.svc?singlewsdl")
@access_token_requerido
def getClientes(request):
clientes = Cliente.objects.all()
results = [c.dic() for c in clientes]
return HttpResponse(json.dumps(results), content_type='application/json')
@access_token_requerido
def getVendedores(request):
vendedores = Vendedor.objects.all()
results = [v.dic() for v in vendedores]
return HttpResponse(json.dumps(results), content_type='application/json')
@access_token_requerido
def getPaises(request):
data = serialize_object(client.service.ConsultarPaises())
data = data['Paises']['PaisEntity']
data_json = json.dumps(data)
return HttpResponse(data_json, content_type='application/json')
@access_token_requerido
def getCiudades(request, idPais):
data = serialize_object(client.service.ConsultarCiudades({"IdPais": idPais}))
data = data['Ciudades']['CiudadEntity']
data_json = json.dumps(data)
return HttpResponse(data_json, content_type='application/json')
@access_token_requerido
def getVehiculosDisponibles(request, idCiudad):
retiro_str = request.GET.get('retiro')
devolucion_str = request.GET.get('devolucion')
retiro = parsearFecha(retiro_str)
devolucion = parsearFecha(devolucion_str)
datos = {'IdCiudad': idCiudad, 'FechaHoraRetiro': retiro, 'FechaHoraDevolucion': devolucion}
data = serialize_object(soap.service.ConsultarVehiculosDisponibles(datos))
data = data['VehiculosEncontrados']['VehiculoModel']
data_json = json.dumps(data, cls=MyEncoder)
return HttpResponse(data_json, content_type='application/json')
@access_token_requerido
def reservas(request):
if request.method == 'GET':
"""Devuelve todas las reservas"""
reservas = Reserva.objects.all()
lista = []
for reserva in reservas:
lista.append(reserva.dic())
return HttpResponse(json.dumps(lista, cls=MyEncoder), content_type="application/json")
elif request.method == 'POST':
"""Registra una reserva"""
datos_input = json.loads(request.body.decode("utf-8"))
nombre = datos_input['nombre']
apellido = datos_input['apellido']
dni = datos_input['dni']
fechaRetiro = datos_input['fechaRetiro']
fechaDevolucion = datos_input['fechaDevolucion']
idVehiculoCiudad = datos_input['idVehiculoCiudad']
idVendedor = datos_input['idVendedor']
idPais = datos_input['idPais']
if nombre and apellido and dni and idVehiculoCiudad and fechaDevolucion and fechaRetiro \
and idVendedor:
datos = {
'ApellidoNombreCliente': "%s , %s" % (apellido, nombre),
'FechaHoraDevolucion': parsearFecha(fechaDevolucion),
'FechaHoraRetiro': parsearFecha(fechaRetiro),
'IdVehiculoCiudad': idVehiculoCiudad,
'NroDocumentoCliente': dni
}
response = soap.service.ReservarVehiculo(datos)
data = serialize_object(response)
data = data['Reserva']
cliente, creado = Cliente.objects.get_or_create(
nombre=nombre,
apellido=apellido,
nro_documento=dni
)
vendedor = Vendedor.objects.get(id=idVendedor)
datos_reserva = Reserva(
codigo_reserva=data['CodigoReserva'],
fecha_reserva=data['FechaReserva'],
id_cliente=cliente,
id_vendedor=vendedor,
costo=float(data['VehiculoPorCiudadEntity']['VehiculoEntity']['PrecioPorDia']),
precio_venta=float(data['VehiculoPorCiudadEntity']['VehiculoEntity']['PrecioPorDia'])*1.20,
id_vehiculo_ciudad=data['VehiculoPorCiudadId'],
id_ciudad=data['VehiculoPorCiudadEntity']['CiudadId'],
id_pais=idPais,
)
reserva = Reserva.save(datos_reserva)
return HttpResponse(json.dumps(reserva))
else:
return HttpResponseBadRequest('Faltan datos')
return HttpResponseBadRequest('')
@access_token_requerido
def reserva(request, idReserva):
if request.method == 'DELETE':
try:
r = Reserva.objects.get(id=idReserva)
r.delete()
return HttpResponse('')
except Exception as ex:
print(ex)
return HttpResponseServerError('Ha ocurrido un error')
elif request.method == 'GET':
r = get_object_or_404(Reserva, id=idReserva)
return HttpResponse(json.dumps(r.dic(), cls=MyEncoder), content_type="application/json")
|
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Schema( Base ):
__tablename__ = 'db_schema'
uuid = Column( Text, primary_key = True )
schema = Column( Text, nullable = False )
ver = Column( Integer, nullable = False )
rev = Column( Integer, nullable = False )
def __init__( self, schema, ver, rev ):
self.uuid = uuid.uuid1()
self.schema = schema
self.ver = ver
self.rev = rev
def __repr__( self ):
return 'Schema( %r, %r, %r, %r )' % ( self.uuid, self.schema, self.ver, self.rev )
def back_up_db_file( dbfile ):
f = file( dbfile, 'rb' )
n = 0
while( 1 ):
if( not os.path.isfile( dbfile + '.bak' + str( n ) ) ):
break
n += 1
g = file( dbfile + '.bak' + str( n ), 'wb' )
while( 1 ):
buff = f.read( 1024 )
if( len( buff ) == 0 ):
f.close()
g.close()
break
g.write( buff )
def _determine_db_version( dbfile, schema, migrator ):
engine = create_engine( 'sqlite:///' + database_file )
def migrate( dbfile, migrator ):
session = SqlLiteDatabase( dbfile )
dbi = ver8tables.DatabaseInfo( session.get_table( 'dbi' ) )
while( True ):
ver = dbi.get_version()
rev = dbi.get_revision()
log.debug( 'Database is version v%s', str( ver ) )
if( ver != VERSION or rev != REVISION ):
# Back-up the dbfile
back_up_db_file( dbfile )
if( ver == 0 ):
pre8.upgrade_from_0_to_1( log, session )
continue
elif( ver == 1 ):
pre8.upgrade_from_1_to_2( log, session )
continue
elif( ver == 2 ):
pre8.upgrade_from_2_to_3( log, session )
continue
elif( ver == 3 ):
pre8.upgrade_from_3_to_4( log, session )
continue
elif( ver == 4 ):
pre8.upgrade_from_4_to_5( log, session )
continue
elif( ver == 5 ):
pre8.upgrade_from_5_to_6( log, session )
continue
elif( ver == 6 ):
pre8.upgrade_from_6_to_7( log, session )
continue
elif( ver == 7 ):
pre8.upgrade_from_7_to_8( log, session )
continue
elif( ver == 8 and rev == 0 ):
ver8rules.upgrade_from_8_to_8_1( log, session )
continue
else:
raise RuntimeError( 'Incompatible database version' )
elif( dbi.get_revision() > REVISION ):
raise RuntimeError( 'Incompatible database revision' )
elif( dbi.get_revision() != REVISION ):
dbi.set_revision( REVISION )
session.commit()
break
session.commit()
session.close()
|
def hash_kilian(string):
output = [0,42,69]
index = 0
for char in string:
#output[index] = output[index] ^ (ord(char)+index) % 256
output[index] = output[index] ^ (ord(char)+index) % 256
index = (index + 1) % 3
o = (output[0] + output[1] + output[2]) % 256
return o
def hash_johan(string):
val = [19,46,67,123]
i = 0
for char in string:
val[i%4] = ord(char) ^ (val[(i+1)%4]+i)
i+=1
somme = 0
for k in val:
somme += k
return somme % 256
strings = []
inf = 33
sup = 36
for i in range(inf,sup):
for j in range(inf,sup):
for k in range(inf,sup):
for l in range(inf,sup):
strings.append(chr(i)+chr(j)+chr(k)+chr(l))
hashes_k = dict()
hashes_j = dict()
collisions_k = 0
collisions_j = 0
for string in strings:
hk = hash_kilian(string)
hj = hash_johan(string)
if hk in hashes_k:
print("K Collision between {} and {}.".format(hashes_k[hk], string))
collisions_k+=1
else:
hashes_k[hk] = string
if hj in hashes_j:
print("J Collision between {} and {}.".format(hashes_j[hj], string))
collisions_j+=1
else:
hashes_j[hj] = string
print("{:%} collisions k".format(collisions_k/len(strings)))
print("{:%} collisions j".format(collisions_j/len(strings)))
|
from os import listdir;
import markdown
import re
import json
from datetime import datetime, date
from collections import defaultdict
import pytz
from feedgen.feed import FeedGenerator
from framework import generateFuncTemplate
"""
logic goes:
# Go through each markdown file
# Create post data object
# Note that post.md will have special syntax for determining file
# After creating post data file order posts by:
# Date
# Topic
# Then append this information for each post
# This is all information required to actually generate file for each post type
# If byDate is empty then don't include it etc
# To generate the correct header, base it on the directory structure for posts (?)
# e.g if posts/art then replace post object with desired html and so on
# For light and dark, generate 2 css variables for light and dark and then conditionally create them
# Constants folder(? - Good enough)
# Then use this to generate navigation for each type
# Include special markdown file for topics
topics: [
{
topic: string
descrip: string
}
]
post: {
id: randomValue:
name: string,
date: date,
topic: enum,
path: file.html,
nextPost: {
byDate: string
byTopic: string
},
prevPost: {
byDate: string
byTopic: string
}
}
"""
def getTopics():
with open("assets/topics.json", 'r') as f:
topics = f.readlines()
topics = ''.join(topic.strip() for topic in topics)
topics = json.loads(topics)
for i in topics:
i["posts"] = []
return topics
#[
# {topic: name,
# posts: [name]
# }
#]
def initOutput(focused, posts):
return generateFuncTemplate("./assets/template.html") (
generateFuncTemplate("./assets/header.html") (
ricky = "focused" if focused == "ricky" else "",
posts = "focused" if focused == "posts" else "",
archive = "focused" if focused == "archive" else "",
mostRecentPost = posts[0]["path"] + ".html"
)
)
# Should be fairly easy to implement now :)
posts = []
topics = getTopics() # Gets correct topics
for i in listdir("./posts/"):
with open("./posts/"+i, 'r') as f:
post = {}
line = f.readline()
while(line.startswith("^")):
# Extract tag - set to lower case
# Extract description and set as value
tag = re.match("\^[a-zA-Z0-9]*", line).group(0)[1:]
description = line.replace("^"+tag, "").rstrip().lstrip()
if (tag.lower() == "date"):
post[tag.lower()] = datetime.strptime(description.strip(), "%Y-%m-%d")
else:
post[tag.lower()] = description.strip()
line = f.readline()
post["path"] = i[:-3]
post["nextPost"] = {"byDate": None, "byTopic": None}
post["prevPost"] = {"byDate": None, "byTopic": None}
if ("unpublished" in post.keys()):
continue
# Throw error here if topic is undefined or not within specified enum
if ("title" not in post):
raise AssertionError("no title provided")
if ("date" not in post):
raise AssertionError("no date provided")
if ("topic" not in post):
raise AssertionError("no topic provided")
topicList = [e["name"] for e in topics]
if (post["topic"] not in topicList):
raise AssertionError("topic not defined")
posts.append(post)
# Now we need to order based on date and topic
posts = sorted(posts, key = lambda x: x["date"])
posts.reverse()
# should be earliest to latest
prev = posts[0]
for post in posts[1:]:
post["nextPost"]["byDate"] = prev["path"]
prev["prevPost"]["byDate"] = post["path"]
prev = post
# Put each post into corresponding sublist based on topic:
topicsPostHolder = defaultdict(list)
for post in posts:
topicsPostHolder[post["topic"]].append(post)
for topic in topicsPostHolder:
prev = topicsPostHolder[topic][0]
for post in topicsPostHolder[topic][1:]:
post["nextPost"]["byTopic"] = prev["path"]
prev["prevPost"]["byTopic"] = post["path"]
prev = post
for topic in topics:
list1 = []
for post in topicsPostHolder[topic["name"]]:
list1.append(post)
topic["posts"] = list1
# Put each post into corresponding sublist based on year:
yearPostHolder = defaultdict(list)
for post in posts:
yearPostHolder[post["date"].year].append(post)
# So now we have topics, and posts which should be everything required to generate every page and thingy
latestPost = posts[0]
# Begin generating pages:
for post in posts:
with open("./posts/"+post['path']+".md" , 'r') as f:
output = initOutput("posts", posts)
line = f.readline()
while(line.startswith("^")):
line = f.readline()
format1 = "<h1>" + post["title"] + "</h1>\n"
format1 += "<p class = \"date\">" + post["date"].strftime("published on %B %d, %Y") + "</p>\n"
post["description"] = markdown.markdown(f.read())
format1 += "<div class = \"bodyStyle\">" +(post['description']) + "</div>"
output = output(body = format1)
output = output(generateFuncTemplate("./assets/footer.html")(
nextPost = "" if post["nextPost"]["byDate"] else "invisible",
nextPart = "" if post["nextPost"]["byTopic"] else "invisible",
prevPost = "" if post["prevPost"]["byDate"] else "invisible",
prevPart = "" if post["prevPost"]["byTopic"] else "invisible"
)(
nextPostLink = post["nextPost"]["byDate"]+ ".html" if post["nextPost"]["byDate"] else "",
nextPartLink = post["nextPost"]["byTopic"]+ ".html" if post["nextPost"]["byTopic"] else "",
prevPostLink = post["prevPost"]["byDate"]+ ".html" if post["prevPost"]["byDate"] else "",
prevPartLink = post["prevPost"]["byTopic"]+ ".html" if post["prevPost"]["byTopic"] else ""
))
with open("./compiled/" + post['path'] + ".html", 'w' ) as f:
f.write(output)
output = initOutput("archive", posts)
# Generate navigation by date:
body = generateFuncTemplate("./assets/navigationTemplate.html")(
navigation="By date"
)(
navigationLink = "<a href=\"topicNavigation.html\">By topic</a>"
)
for year in yearPostHolder.keys():
body += "<h2 >" + str(year) + "</h2>\n"
for post in yearPostHolder[year]:
body += "<div class = \"rowFlexBox archiveNav\" ><div class = \"navDate navPadding\">"+ post["date"].strftime("%B %d") +"</div><a href = "+ post["path"] + ".html class = \"navPadding\">"+ post["title"]+"</a></div>\n"
output = output(body = body)(footer="")
with open("./compiled/" + "dateNavigation" + ".html", 'w' ) as f:
f.write(output)
output = initOutput("archive", posts)
# Generate navigation by topic:
body = generateFuncTemplate("./assets/navigationTemplate.html")(
navigation="By topic"
)(
navigationLink = "<a href=\"dateNavigation.html\">By date</a>"
)
for topic in topics:
body += "<h2 style=\"margin-bottom:0;\">" + topic["name"] + "</h2>\n"
body += "<p class = \"description\">" + topic["Description"] + "</p>\n"
for post in topic["posts"]:
body += "<div class = \"rowFlexBox archiveNav\" ><div class = \"navDate navPadding\">"+ post["date"].strftime("%B %d, %Y") +"</div><a href = "+ post["path"] + ".html class = \"navPadding\">"+ post["title"]+"</a></div>\n"
output = output(body = body)(footer="")
with open("./compiled/" + "topicNavigation" + ".html", 'w' ) as f:
f.write(output)
output = initOutput("ricky", posts)
output = output(body = "")(footer="")
with open("./compiled/" + "index" + ".html", 'w' ) as f:
f.write(output)
# Feed generator
fg = FeedGenerator()
fg.title("RSS Feed for rleek.github.io")
fg.link(href="https://rleek.github.io/")
fg.description("Hi, I'm Ricky and this is my personal blog")
fg.author({'name':'Ricky Liu', 'email':'rky.w.liu@gmail.com'})
fg.language("en-au")
fg.pubDate(pytz.utc.localize(datetime.today()))
posts.reverse();
for post in posts:
fe = fg.add_entry()
fe.title(post["title"])
fe.link(href = "https://rleek.github.io/" + post["path"] + ".html")
fe.author({'name':'Ricky Liu', 'email':'rky.w.liu@gmail.com'})
fe.description(post["description"])
fe.id("https://rleek.github.io/" + post["path"] + ".html")
fe.pubDate(pytz.utc.localize(post["date"]))
fg.rss_file('compiled/rss.xml')
|
import pymysql
db=pymysql.connect('localhost','root','123456','python')
cur=db.cursor()
a='show tables;'
b='show databases;'
#cur.execute('create table t3 (id int);')
cur.execute('show tables;')
print(cur.fetchall())
db.commit()
cur.close()
db.close()
|
from tornado.testing import AsyncHTTPTestCase
import web
login_data_user_valid = {
'email': 's.ivanov@lab15.ru',
'password': 'UserPassword123',
}
login_data_user_invalid = {
'email': 's.ivanov@lab15.ru',
'password': '000000000000000',
}
login_data_admin_valid = {
'email': 'a.anisimov@lab15.ru',
'password': 'AdminPassword123',
}
registry_new_user = {
'email': 'test@test.ru',
'password': 'testPassword',
'first_name': 'Тест',
'last_name': 'Тестов',
}
add_new_birthday = {
'name': 'Тест Test',
'gender': 'F',
'birthday': '2000-10-20',
'comment': 'test_comment',
}
class BasicTestsClass(AsyncHTTPTestCase):
""" Базовый класс для тестовых кейсов
"""
def get_app(self):
return web.make_app()
|
from script.base_api.service_science.versionInfo import *
|
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('DBStorage/', include('DBStorage.urls')),
path('DBCalls/', include('DBCalls.urls')),
path('admin/', admin.site.urls),
]
|
# coding: utf-8
# In[2]:
import pandas as pd
#reading csv file
ec = pd.read_csv("Data/employee_compensation.csv")
# In[3]:
result = ec.groupby(['Organization Group','Department']).mean().reset_index()
final_result = result[['Organization Group','Department','Total Compensation']]
final_result
# In[5]:
#sorted_result = final_result.sort(['Organization Group','Total Compensation'],ascending=False)
sorted_result = final_result.sort(['Total Compensation'],ascending=False)
sorted_result
# In[7]:
#sorted_result_grouped = sorted_result.groupby(['Organization Group'])
reindexed_result = sorted_result.reset_index(drop=True)
reindexed_result.head()
# In[8]:
reindexed_result.to_csv('solution_q2_p1.csv',index = False, sep=',')
# In[ ]:
|
#!/usr/bin/env python3
import os
from ctypes import CDLL
from time import sleep, monotonic, process_time
from operator import itemgetter
from sys import stdout, stderr, argv, exit
from re import search
from sre_constants import error as invalid_re
from signal import signal, SIGKILL, SIGTERM, SIGINT, SIGQUIT, SIGHUP, SIGUSR1
###############################################################################
def mlockall():
"""
"""
MCL_CURRENT = 1
MCL_FUTURE = 2
MCL_ONFAULT = 4
libc = CDLL('libc.so.6', use_errno=True)
result = libc.mlockall(
MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT
)
if result != 0:
result = libc.mlockall(
MCL_CURRENT | MCL_FUTURE
)
def check_mem_and_swap():
"""find mem_available, swap_total, swap_free"""
with open('/proc/meminfo') as f:
for n, line in enumerate(f):
if n == 2:
mem_available = int(line.split(':')[1][:-4])
continue
if n == swap_total_index:
swap_total = int(line.split(':')[1][:-4])
continue
if n == swap_free_index:
swap_free = int(line.split(':')[1][:-4])
break
return mem_available, swap_total, swap_free
def arcstats():
"""
"""
with open(arcstats_path , 'rb') as f:
a_list = f.read().decode().split('\n')
for n, line in enumerate(a_list):
if n == c_min_index:
c_min = int(line.rpartition(' ')[2]) / 1024
elif n == size_index:
size = int(line.rpartition(' ')[2]) / 1024
elif n == arc_meta_used_index:
arc_meta_used = int(line.rpartition(' ')[2]) / 1024
elif n == arc_meta_min_index:
arc_meta_min = int(line.rpartition(' ')[2]) / 1024
else:
continue
return c_min, size, arc_meta_used, arc_meta_min
###############################################################################
with open('/proc/meminfo') as f:
mem_list = f.readlines()
mem_list_names = []
for s in mem_list:
mem_list_names.append(s.split(':')[0])
if mem_list_names[2] != 'MemAvailable':
errprint('Your Linux kernel is too old, Linux 3.14+ requied\nExit')
exit(1)
swap_total_index = mem_list_names.index('SwapTotal')
swap_free_index = swap_total_index + 1
# arcstats_path = '/proc/spl/kstat/zfs/arcstats'
arcstats_path = './arcstats'
ZFS = os.path.exists(arcstats_path)
if not ZFS:
print('arcstats not found')
exit()
# find indexes
with open(arcstats_path , 'rb') as f:
a_list = f.read().decode().split('\n')
for n, line in enumerate(a_list):
if line.startswith('c_min '):
c_min_index = n
elif line.startswith('size '):
size_index = n
elif line.startswith('arc_meta_used '):
arc_meta_used_index = n
elif line.startswith('arc_meta_min '):
arc_meta_min_index = n
else:
continue
s = 1
# MemAvailable: 6569, arcstats_size: 442, arcstats_c_min: 62, arc_meta_min: 16, zfs_available: 364, NewMemAvailable: 6933
mlockall()
print('Values in MiB')
while True:
ma, _, _ = check_mem_and_swap()
c_min, size, arc_meta_used, arc_meta_min = arcstats()
zfs_available = size + arc_meta_used - c_min - arc_meta_min
print('MA: {}, size: {}, c_min: {}, meta_used: {}, meta_min: {}, zfs_available: {}, NewMA: {}'.format(
round(ma / 1024),
round(size / 1024),
round(c_min / 1024),
round(arc_meta_used / 1024),
round(arc_meta_min / 1024),
round(zfs_available / 1024),
round((ma + zfs_available) / 1024)
))
try:
sleep(s)
except KeyboardInterrupt:
exit()
|
# Turn ON this...
# https://myaccount.google.com/lesssecureapps?pli=1&rapt=AEjHL4Nr-cE8QbO3xnA0PuHG2regofVD-TQMQzdCLV-4vlaJkS64k33ZgTWGY7dIhRxBJggs_iNb4gBjz7J9LU9evV4rEuQbDA
# Python code to illustrate Sending mail from
# your Gmail account
def send(sub = 'COVID19 Slot Notification',
data = '...DATA...',
mailto = 'hellovickykumar123@gmail.com'):
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
import smtplib
msg = MIMEMultipart()
msg['From'] = 'imvickykumar999@gmail.com'
msg['To'] = mailto
msg['Subject'] = sub
body = f"Sent using python code by vicks, Slots is {data}"
msg.attach(MIMEText(body,'plain'))
text = msg.as_string()
server = smtplib.SMTP('smtp.gmail.com',587)
server.starttls()
server.login('imvickykumar999@gmail.com','Hellovix999@')
server.sendmail(msg['From'], msg['To'], text)
server.quit()
# send()
|
from django.db import models
# TODO: remember that JSON output will inflate this data model
# - adding @type (left implicit here)
# - amenityFeature needs several more attributes autopopulated
# note that a lot of fields listed as REQUIRED in the specification
# are given as optional (blank=True) in the models. This is because
# required fields mess with the flow of data submission and potentially
# block it - the front end won't accept partially-defined objects,
# all the classes are interdependent, and thus it becomes impossible
# to actually submit anything
# it would probably be possible to get around this with code in bespoke
# serializer classes, but that's another can of worms.
# note that the django ORM sometimes does not cope well with changes of
# cardinality
class PersonAndOrganization(models.Model):
name = models.CharField(max_length=100, verbose_name='Name')
email = models.EmailField(verbose_name='Email', blank=True)
website = models.URLField(verbose_name='Website', blank=True)
class TransportNote(models.Model):
transport_mode = models.CharField(max_length=100, choices=[("Bus", "Bus"), ("Rail", "Rail"), ("Road", "Road"), ("Foot", "Foot"), ("Bicycle", "Bicycle")])
description = models.CharField(max_length=500)
routepoint = models.ForeignKey('RoutePoint', on_delete=models.CASCADE, blank=True, null=True, related_name='rp_transport_note')
class MapReference(models.Model):
map_series = models.CharField(max_length=50)
map_number = models.CharField(max_length=10)
grid_reference = models.CharField(max_length=10)
publisher = models.ForeignKey(PersonAndOrganization, on_delete=models.CASCADE, verbose_name='publisher', related_name='publisher', blank=True, null=True)
routepoint = models.ManyToManyField('RoutePoint', related_name='rp_mapref')
class Provenance(models.Model):
publisher = models.ForeignKey(PersonAndOrganization, on_delete=models.CASCADE, null=True, blank=True)
provenance_url = models.URLField(verbose_name='Provenance')
version = models.DateField(auto_now=False, auto_now_add=False)
description = models.CharField(max_length=250)
route_guide = models.ForeignKey('RouteGuide', on_delete=models.CASCADE, null=True, blank=True, related_name='rg_provenance')
route_guide_segment = models.ForeignKey('RouteGuideSegment', on_delete=models.CASCADE, null=True, blank=True, related_name='seg_provenance')
class Category(models.Model):
content = models.CharField(max_length=30)
class Surface(models.Model):
surface = models.CharField(max_length=30)
class SuggestedEquipment(models.Model):
item = models.CharField(max_length=100)
class Article(models.Model):
headline = models.CharField(max_length=100)
body = models.TextField()
author = models.OneToOneField(PersonAndOrganization, on_delete=models.CASCADE, blank=True, null=True, verbose_name='Author')
image_url = models.URLField(blank=True, null=True, verbose_name='Image URL')
class GeoPath(models.Model):
map_type = models.CharField(max_length=12, choices=[("RouteMap", "RouteMap"), ("ElevationMap", "ElevationMap"), ("CustomMap", "CustomMap")])
url = models.URLField()
encoding_format = models.CharField(max_length=40)
# note that the only easy way to define a one-to-many relationship in django
# is to treat the 'one' side of the equasion as a Foreign Key for the 'many' side.
# in other words, a RouteGuide can have several GeoPaths, but a given GeoPath can
# be associated with only one RouteGuide.
route_guide = models.ForeignKey('RouteGuide', on_delete=models.CASCADE, null=True, blank=True, related_name='rg_geopath')
route_guide_segment = models.ForeignKey('RouteGuideSegment', on_delete=models.CASCADE, null=True, blank=True, related_name='seg_geopath')
class MapImage(models.Model):
map_type = models.CharField(max_length=12, choices=[("RouteMap", "RouteMap"), ("ElevationMap", "ElevationMap"), ("CustomMap", "CustomMap")])
image = models.URLField()
encoding_format = models.CharField(max_length=40)
route_guide = models.ForeignKey('RouteGuide', on_delete=models.CASCADE, null=True, blank=True, related_name='rg_mapimage')
route_guide_segment = models.ForeignKey('RouteGuideSegment', on_delete=models.CASCADE, null=True, blank=True, related_name='seg_mapimage')
class VerificationRecord(models.Model):
verified_by = models.ManyToManyField(PersonAndOrganization, verbose_name='Verified By')
date_verified = models.DateField(auto_now=False, auto_now_add=False, verbose_name='Date Verified')
route_guide = models.OneToOneField('RouteGuide', on_delete=models.CASCADE, blank=True, null=True, related_name='rg_verification_record')
route_guide_segment = models.OneToOneField('RouteGuideSegment', on_delete=models.CASCADE, blank=True, null=True, related_name='seg_verification_record')
class AccessibilityDescription(models.Model):
description = models.CharField(max_length=250)
route_guide = models.ForeignKey('RouteGuide', on_delete=models.CASCADE, blank=True, null=True, related_name='rg_access_description')
route_guide_segment = models.ForeignKey('RouteGuideSegment', on_delete=models.CASCADE, blank=True, null=True, related_name='seg_access_description')
class Activity(models.Model):
prefLabel = models.CharField(max_length=100)
identifier = models.URLField(verbose_name='Identifier')
class IndicativeDuration(models.Model):
duration = models.CharField(max_length=10, verbose_name='Duration (8601)') # TODO: Add regex for validation
activity = models.ForeignKey('Activity', on_delete=models.CASCADE, verbose_name='Activity', null=True)
route_guide = models.ForeignKey('RouteGuide', on_delete=models.CASCADE, null=True, blank=True,related_name='rg_duration')
route_guide_segment = models.ForeignKey('RouteGuideSegment', on_delete=models.CASCADE, null=True, blank=True, related_name='seg_duration')
class AmenityFeature(models.Model):
name = models.CharField(max_length=75)
routepoint = models.ForeignKey('RoutePoint', on_delete=models.CASCADE, blank=True, null=True, related_name='rp_amenity')
class GeoCoordinates(models.Model):
latitude = models.FloatField(verbose_name='Latitude')
longitude = models.FloatField(verbose_name='Longitude')
postal_code = models.CharField(max_length=10, verbose_name='Post Code', null=True, blank=True)
routepoint = models.OneToOneField('RoutePoint', on_delete=models.CASCADE, blank=True, null=True, related_name='rp_geo')
class RoutePoint(models.Model):
name = models.CharField(max_length=100, verbose_name='Name')
is_access_point = models.BooleanField()
is_preferred_access_point = models.BooleanField(verbose_name='Is Preferred Access Point')
description = models.TextField(verbose_name='Description')
headline = models.CharField(blank=True, null=True, max_length=200, verbose_name='Headline (Brief Description)')
same_as = models.URLField(verbose_name='Same As', blank=True, null=True)
is_start_point = models.BooleanField(verbose_name='Is Start Point', default=False)
is_end_point = models.BooleanField(verbose_name='Is End Point', default=False)
class RouteGradient(models.Model):
max_gradient = models.CharField(max_length=10)
avg_gradient = models.CharField(max_length=10)
total_elevation_gain = models.CharField(max_length=9, verbose_name='Total Elevation Loss')
total_elevation_loss = models.CharField(max_length=9, verbose_name='Total Elevation Loss')
gradient_term = models.CharField(max_length=100, verbose_name='Gradient Term')
gradient_defurl = models.URLField(verbose_name='Gradient Definition URL')
description = models.CharField(max_length=250)
route_guide = models.OneToOneField('RouteGuide', on_delete=models.CASCADE, null=True, blank=True,related_name='rg_gradient')
route_guide_segment = models.OneToOneField('RouteGuideSegment', on_delete=models.CASCADE, null=True, blank=True, related_name='seg_gradient')
class RouteDifficulty(models.Model):
difficulty_term = models.CharField(max_length=15)
description = models.CharField(max_length=250)
difficulty_defurl = models.URLField(verbose_name='Difficulty Definition URL')
activity = models.ForeignKey('Activity', on_delete=models.CASCADE, verbose_name='Activity', null=True)
route_guide = models.ForeignKey('RouteGuide', on_delete=models.CASCADE, null=True, blank=True, related_name='rg_difficulty')
route_guide_segment = models.ForeignKey('RouteGuideSegment', on_delete=models.CASCADE, null=True, blank=True, related_name='seg_difficulty')
class RouteLegalAdvisory(models.Model):
route_designation = models.OneToOneField('RouteDesignation', on_delete=models.CASCADE, verbose_name='Route Designation', null=True, blank=True)
description = models.CharField(max_length=250)
legal_defurl = models.URLField(verbose_name='Legal Definition URL')
route_guide = models.OneToOneField('RouteGuide', on_delete=models.CASCADE, null=True, blank=True, related_name='rg_legal_advisory')
route_guide_segment = models.OneToOneField('RouteGuideSegment', on_delete=models.CASCADE, null=True, blank=True, related_name='seg_legal_advisory')
class RouteDesignation(models.Model):
term = models.ManyToManyField('RouteDesignationTerm', verbose_name='Route Designation Term', related_name='terms')
description = models.CharField(max_length=250)
url = models.URLField(verbose_name='Formal Definition URL')
legal_advisory = models.ForeignKey('RouteLegalAdvisory', on_delete=models.CASCADE, null=True, blank=True, related_name='rg_route_designation')
class RouteDesignationTerm(models.Model):
term = models.CharField(max_length=100)
class Image(models.Model):
caption = models.CharField(max_length=250, verbose_name='Caption')
url = models.URLField(verbose_name='Image URL')
encoding_format = models.CharField(max_length=40, verbose_name='Encoding Format')
size = models.CharField(max_length=20, verbose_name='Size')
width = models.IntegerField(verbose_name='Width')
height = models.IntegerField(verbose_name='Height')
route_guide = models.ForeignKey('RouteGuide', on_delete=models.CASCADE, null=True, blank=True, related_name='image')
class RouteSegmentGroup(models.Model):
id_as_url = models.URLField(verbose_name='@id', blank=False)
name = models.CharField(max_length=100)
description = models.CharField(max_length=250)
segments = models.ManyToManyField('RouteGuideSegment', verbose_name='Includes Segments', related_name='rg_route_segment_group')
alternatives = models.ForeignKey('RouteSegmentGroup', on_delete=models.CASCADE, verbose_name='Alternative Group To', related_name='seg_route_segment_group')
class UserGeneratedContent(models.Model):
creator = models.ForeignKey(PersonAndOrganization, on_delete=models.CASCADE, null=True, blank=True, related_name='created_by')
accountable_person = models.ForeignKey(PersonAndOrganization, on_delete=models.CASCADE, null=True, blank=True)
spatial_coverage = models.CharField(max_length=500)
associated_media = models.CharField(max_length=500)
route_guide = models.ForeignKey('RouteGuide', on_delete=models.CASCADE, null=True, blank=True, related_name='user_generated_content')
class RouteGuide(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=200, verbose_name='Name')
url = models.URLField(verbose_name='Trackback URL', blank=True)
date_published = models.DateField(null=True, auto_now=False, auto_now_add=False, verbose_name='Date Published')
date_modified = models.DateField(null=True, auto_now=False, auto_now_add=False, verbose_name='Date Modified')
description = models.TextField(blank=True, verbose_name='Description')
headline = models.CharField(blank=True, null=True, max_length=200, verbose_name='Headline (Brief Description)')
distance = models.CharField(max_length=9, verbose_name='Distance')
is_loop = models.BooleanField(verbose_name='Is Loop', default=True, blank=True, null=True)
id_as_url = models.URLField(verbose_name='ID (URL)')
author = models.ManyToManyField(PersonAndOrganization, blank=True, verbose_name='Author')
activity = models.ManyToManyField(Activity, blank=True)
categories = models.ManyToManyField('Category', verbose_name='Category', related_name="categories", blank=True)
surfaces = models.ManyToManyField('Surface', verbose_name='Surface', related_name="surfaces", blank=True)
suggested_equipment = models.ManyToManyField('SuggestedEquipment', verbose_name='Equipment', related_name="equipment", blank=True)
additional_info = models.ManyToManyField('Article', verbose_name='Additional Info', blank=True, related_name="additional_info")
route_point = models.ManyToManyField('RoutePoint', blank=True)
class RouteGuideSegment(models.Model):
# TODO: check blank values permitted align with specification
# TODO: rename this and related to RouteSegmentGuide **not** RouteGuideSegment
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=200, verbose_name='Name')
author = models.ManyToManyField(PersonAndOrganization, verbose_name='Author')
url = models.URLField(verbose_name='Trackback URL', blank=True, null=True)
date_published = models.DateField(blank=True, auto_now=False, auto_now_add=False, verbose_name='Date Published')
date_modified = models.DateField(blank=True, auto_now=False, auto_now_add=False, verbose_name='Date Modified')
description = models.TextField(blank=True, verbose_name='Description')
headline = models.CharField(blank=True, max_length=200, verbose_name='Headline (Brief Description)')
is_loop = models.BooleanField(verbose_name='Is Loop', default=True)
id_as_url = models.URLField(verbose_name='ID (URL)')
sequence = models.IntegerField(verbose_name='Segment Number')
activity = models.ManyToManyField(Activity)
additional_info = models.ManyToManyField('Article', verbose_name='Additional Info', blank=True, related_name="seg_additional_info")
route_guide = models.ForeignKey('RouteGuide', on_delete=models.CASCADE, blank=True, null=True, related_name='seg_route_guide')
point_of_interest = models.ManyToManyField('RoutePoint', blank=True)
class RouteRiskAdvisory(models.Model):
risk_description = models.CharField(max_length=250)
user_safety_feedback = models.CharField(max_length=500) # TODO: expand into schema:Review object
is_maintained = models.BooleanField()
risk_information_url = models.URLField()
traffic_description = models.CharField(max_length=500)
maintained_by = models.ForeignKey(PersonAndOrganization, on_delete=models.CASCADE, verbose_name='Is Maintained By', related_name='maintains', blank=True, null=True)
known_risk = models.ForeignKey('KnownRisk', on_delete=models.CASCADE, blank=True, null=True)
risk_modifier = models.ForeignKey('RiskModifier', on_delete=models.CASCADE, blank=True, null=True)
risk_mitigator = models.ForeignKey('RiskMitigator', on_delete=models.CASCADE, blank=True, null=True)
route_guide = models.OneToOneField('RouteGuide', on_delete=models.CASCADE, null=True, blank=True, related_name='rg_risk_advisory')
route_guide_segment = models.ForeignKey('RouteGuideSegment', on_delete=models.CASCADE, null=True, blank=True, related_name='seg_risk_advisory')
class KnownRisk(models.Model):
description = models.CharField(max_length=100)
class RiskModifier(models.Model):
description = models.CharField(max_length=100)
class RouteAccessRestriction(models.Model):
description = models.CharField(max_length=250)
terms = models.ManyToManyField('RouteAccessRestrictionTerm', blank=True)
information_url = models.URLField()
timespan = models.CharField(max_length=50)
route_guide = models.ForeignKey('RouteGuide', on_delete=models.CASCADE, null=True, blank=True, related_name='rg_access_restriction')
route_guide_segment = models.ForeignKey('RouteGuideSegment', on_delete=models.CASCADE, null=True, blank=True, related_name='seg_access_restriction')
class RouteAccessRestrictionTerm(models.Model):
description = models.CharField(max_length=250)
class RiskMitigator(models.Model):
description = models.CharField(max_length=100)
|
from __future__ import print_function, division
import astropy.io.fits as pyfits
import numpy as np
import sys
import subprocess
from scipy import signal
import weightedstats as ws
from skimage.filters.rank import median as skmed
"""
__author__ = 'Will Hartley'
Code to clean up the VISTA VIDEO single-chip coadds' backgrounds.
Steps:
- run sextractor, with some default set of params
- expand segmentation map by 4 px (choice of 6px for 0.1342" / px is optimal for UDS, VISTA pixels are 0.2636"). Actually using a convolution with a square array
- use expanded segmap as mask in a median filtering of the background
- Filter is a simple square (129x129 px, i.e., 34x34 arcsec)
- to make the algorithm run easier, we should embed the image in a larger array.
"""
# Image Object
# properties: file_name, weightmap, size, weight_name, mask, cleaned_im, cleaned_name, conf (configuration constants - see __main__)
# methods: run sextractor, embed image, make mask (inc. read segmap, expand segmap), clean image, save cleaned
class video_image:
def __init__(self, fname, conf):
self.fname = fname
self.conf = conf
# read the image, set-up weightmap filename
self.read_image()
def pad_array(self, im_array):
# pad the array on all sides with the number of px in conf
# and return the padded array
pad_array = np.zeros((im_array.shape[0]+self.conf.pad_px*2,im_array.shape[1]+self.conf.pad_px*2))
pad_array[self.conf.pad_px-1:pad_array.shape[0]-self.conf.pad_px-1, self.conf.pad_px-1:pad_array.shape[1]-self.conf.pad_px-1] = im_array
return pad_array
def read_image(self):
# read header keywords and image data, pad image data, construct weightmap filename
with pyfits.open(self.fname) as f:
self.im_size = f[0].header['NAXIS2'], f[0].header['NAXIS1'] # axes switched in python w.r.t. fits expectation.
self.im_data = self.pad_array(f[0].data)
self.im_head = f[0].header
self.weight_name = '.'.join(fname.split('.')[:-1])+'.weight.fits' # assumes weight extension, could allow this to change via conf.
self.clean_name = '.'.join(fname.split('.')[:-1])+'.cleaned.fits'
def run_sex(self):
# run sextractor
subprocess.call('sex -c VIDEO.sex {0} -WEIGHT_IMAGE {1}'.format(self.fname, self.weight_name), shell=True)
def expand_seg(self, seg):
seg = signal.convolve2d(seg, np.ones((self.conf.exp_seg,self.conf.exp_seg)), mode='same')
seg = np.floor(seg/self.conf.exp_seg**2)
return seg
def save_mask(self):
# save the mask
hdu = pyfits.PrimaryHDU(self.mask)
hdu.writeto('mask.fits', clobber=True)
def read_mask(self):
# read in segmap
segmap = pyfits.open('seg.fits')[0].data
# change values such that source pixels are 0 and background are 1
segmap[segmap==1] = 2
segmap[segmap==0] = 1
segmap[segmap>1] = 0
# expand sources in segmap by pre-defined amount
segmap = self.expand_seg(segmap)
# embed in larger array
self.mask = self.pad_array(segmap)
# mask border region
self.mask[0:self.conf.pad_px+self.conf.border,:] = 0
self.mask[self.mask.shape[0]-(1+self.conf.pad_px+self.conf.border):,:] = 0
self.mask[:, 0:self.conf.pad_px+self.conf.border] = 0
self.mask[:, self.mask.shape[1]-(1+self.conf.pad_px+self.conf.border):] = 0
# For testing purposes, we might want to save the mask
if self.conf.bug_check == True:
self.save_mask()
def save_bugcheck_im(self, im, name):
# save the bugcheck image
hdu = pyfits.PrimaryHDU(im)
hdu.writeto(name, clobber=True)
def clean_im_BAD(self):
# THIS DOESN'T WORK AT ALL - BIT DEPTH IS INSUFFICIENT
# using masked median filter from scikit
# filter the image, rejecting source pixels and masked regions
tmp_im = self.im_data
tmp_im[self.mask==0] = 0.
tmp_im[tmp_im > self.conf.clip] = self.conf.clip
tmp_im[tmp_im < -1.* self.conf.clip] = -1. * self.conf.clip
# if testing, save the temporary image
if self.conf.bug_check == True:
self.save_bugcheck_im(tmp_im, 'tmp.fits')
# invert mask for use with the scikit function - don't need
#self.mask = -1 * self.mask + 1
# scale to range (-1, 1)
im_max = np.max(np.abs(tmp_im))
tmp_im /= im_max
self.cleaned_im = skmed(tmp_im, selem=np.ones((self.conf.exp_seg*2+1,self.conf.exp_seg*2+1)), mask=self.mask)
self.cleaned_im = self.cleaned_im.astype(np.float) * im_max # this is a lost cause
# invert back - don't need
#self.mask = -1 * (self.mask - 1.)
# if testing, save the intermediate background image
if self.conf.bug_check == True:
self.save_bugcheck_im(self.cleaned_im, 'bkgnd.fits')
# subtract from original data
self.cleaned_im = self.im_data - self.cleaned_im
# de-pad the array (because that is what I assume when saving)
self.cleaned_im = self.depad_array(self.cleaned_im)
def fix_px(self, px_val, data, weights):
# weighted median. Masked pixels have weight zero.
try:
return px_val - np.average(data, weights=weights)
except:
return px_val
def fix_px_med(self, px_val, data, weights):
# weighted median. Masked pixels have weight zero.
try:
return px_val - ws.numpy_weighted_median(data, weights=weights)
except:
return px_val
def clean_im(self):
# correct the background via a square median filter, ignoring masked pixels
self.cleaned_im = np.zeros((self.im_size[0], self.im_size[1]))
for i in range(self.im_size[0]):
for j in range(self.im_size[1]):
# the slices are of padded arrays, and extend the number of pixels given
# by the half-size of the filter, in self.conf.filt_size, either side of
# the target pixel. Slice dimension is always odd.
self.cleaned_im[i,j] = self.fix_px(self.im_data[i+self.conf.pad_px-1,j+self.conf.pad_px-1], self.im_data[i+self.conf.pad_px-1-self.conf.filt_size:i+self.conf.pad_px-1+self.conf.filt_size+1,j+self.conf.pad_px-1-self.conf.filt_size:j+self.conf.pad_px-1+self.conf.filt_size+1], self.mask[i+self.conf.pad_px-1-self.conf.filt_size:i+self.conf.pad_px-1+self.conf.filt_size+1,j+self.conf.pad_px-1-self.conf.filt_size:j+self.conf.pad_px-1+self.conf.filt_size+1])
def depad_array(self, im_array):
# remove the padded region
depad_array = np.zeros((im_array.shape[0]-self.conf.pad_px*2,im_array.shape[1]-self.conf.pad_px*2))
depad_array = im_array[self.conf.pad_px-1:im_array.shape[0]-self.conf.pad_px, self.conf.pad_px-1:im_array.shape[1]-self.conf.pad_px]
return depad_array
def save_clean_im(self):
# save the cleaned image - unpad the array first. Copy original header
hdu = pyfits.PrimaryHDU(self.cleaned_im) # don't need to depad the array, done during cleaning
hdu.header = self.im_head
hdu.writeto(self.clean_name, clobber=True)
# main prog.
if __name__ == '__main__':
# get filename arg.
if len(sys.argv) != 2:
print('Error: No filename supplied.')
print('Call as: python clean_im.py <filename>')
else:
fname = sys.argv[1]
# here are some constants that can be tuned (Using a lambda function as an empty object)
conf = lambda: None
conf.exp_seg = 8 # number of pixels by which to expand the seg map in mask making (conv. w. sqyare array, so need twice the required n-px expansion).
conf.pad_px = 65 # number of pixel to pad the image with - to simplify coding.
conf.filt_size = 64 # half-dimension of the square filter
conf.border = 175 # depth in pixels that forms mask region around the border
conf.clip = 500. #
conf.bug_check = False # do we want to save bug checking images?
print('Using config: exp_seg = {0}, pad_px = {1}, (half-)filt_size = {2}, border = {3}, clip_value = {4}, bug_check = {5}'.format(conf.exp_seg, conf.pad_px, conf.filt_size, conf.border, conf.clip, conf.bug_check))
# create the VISTA image object (will read the data and set up weightmap filename
im = video_image(fname, conf)
# run sextractor
im.run_sex()
# read in the segmap and prepare it as a mask
im.read_mask()
# clean the image
im.clean_im()
# save cleaned image
im.save_clean_im()
|
# -*- coding: utf-8 -*-
'''
Obtiene los usuarios de la base de datos principal y los crea dentro de la base del correo.
Crea los usuarios si es que no existen en la base del dovecot y la base del sogo.
Para el proceso de actualización solo chequea los usuarios con claves que hayan cambiado posteriormente a la ultima actualización.
'''
import connection
import dovecotConnection
import users
import groups
import systems
import logging
import datetime
if __name__ == '__main__':
dcon = dovecotConnection.getConnection()
con = connection.getConnection()
try:
dcur = dcon.cursor()
cur = con.cursor()
try:
dcur.execute('select modified from dovecot.users order by modified desc limit 1')
lastSinc = None
if dcur.rowcount <= 0:
lastSinc = datetime.datetime.now() - datetime.timedelta(days=365)
else:
lastSinc = dcur.fetchone()['modified']
logging.info('Fecha de la ultima actualización : {}'.format(lastSinc))
cur.execute('select du.user_id from dovecot.users du, credentials.user_password up where du.user_id = up.user_id and (du.modified > %s or up.updated > %s)', (lastSinc, lastSinc))
logging.info('Registros encontrados {}'.format(cur.rowcount))
usersToSync = cur.fetchall()
for mu in usersToSync:
logging.info('Sincronizando {}'.format(mu['user_id']))
cur.execute('select * from profile.users pu, dovecot.users du, credentials.user_password up where pu.id = %(user_id)s, du.user_id = %(user_id)s and up.user_id = %(user_id)s', mu)
du = cur.fetchone()
dcur.execute('select username from dovecot.users where username = %(username)s', du)
if dcur.rowcount <= 0:
logging.info('Insertando {}'.format(du['username']))
dcur.execute('insert into dovecot.users (username, password, domain, home, uid, gid, maildir, active, modified) values (%(username)s, %(password)s, %(domain)s, %(home)s, %(uid)s, %(gid)s, %(maildir)s, %(active)s, %(modified)s)', du)
else:
logging.info('Actualizando {}'.format(du['username']))
dcur.execute('update dovecot.users set password = %(password)s, domain = %(domain)s, home = %(home)s, uid = %(uid)s, gid = %(gid)s, maildir = %(maildir)s, active = %(active)s, modified = %(modified)s where username = %(username)s', du)
dcon.commit()
finally:
cur.close()
dcur.close()
finally:
dovecotConnection.closeConnection(dcon)
connection.closeConnection(con)
|
from QPlayer import QPlayer
from SpindelTable import Table
import random
import json
from Deck import Card
# Run 100 games
wonGames = 0
N = 100
qPlayer = QPlayer(None, loadFromFile = False)
for i in range(N):
print("Game: " + str(i))
table = Table(1)
table.piles = []
for stack in table.stacks:
stack.faceDownCards = []
stack.faceUpCards = []
cards = [Card(1,0),Card(2,0),Card(3,0),Card(4,0),Card(5,0),Card(6,0),Card(7,0),Card(8,0),Card(9,0),Card(10,0),Card(11,0),Card(12,0),Card(13,0)]
random.shuffle(cards)
for i in range(13):
table.stacks[i%10].faceUpCards.append(cards[i])
print(table)
qPlayer.newTable(table)
lastPile = False
# Game loop
while True:
# distribute loop
n = 0
while True:
# Prevent going back and forth
n += 1
if n > 1000 and not lastPile:
break
if n > 10000 and lastPile:
break
# Print every something
if not n % 1000:
jsonDump = json.dumps(qPlayer.Q)
f = open("Q.json","w")
f.write(jsonDump)
f.close()
#print(n)
possMoves = table.possibleMoves()
if not possMoves:
break
qPlayer.move()
#input("Press Enter to continue...")
if table.isWon():
break
if table.piles:
table.distribute()
print("Distributing")
if not table.piles:
lastPile = True
continue
break
if table.isWon():
wonGames += 1
print("WON")
else:
print("lost")
print(f"Number of won games: {wonGames} out of {N}")
|
# -*- coding: utf-8 -*-
import cv2
from numpy import*
import random
import matplotlib.pyplot as plt
import sys
import copy
def read(filename):
img = cv2.imread(filename,0)
return img
def get_matlist(img):
m,n = img.shape
# print m,n
result = []
row_counts8 = m/8
column_counts8 = n/8
for i in range(row_counts8):
for j in range(column_counts8):
temp8x8 = zeros((8,8))
for k in range(8):
for l in range(8):
temp8x8[k][l] = img[i*8+k][j*8+l]
result.append(temp8x8)
# print result
return result
def get_next(row, column):
sum = row + column
if (row == 0 or row == 7) and column % 2 == 0:
return row,column+1
if (column == 0 or column == 7) and row % 2 == 1:
return row+1,column
if sum % 2 == 1:
return row+1,column-1
return row-1,column+1
def LSB(original_map,secret_information):
cot = 0
m,n = original_map.shape
while(cot<len(secret_information)-1):
cot +=1
row = cot%m
colum = cot/n
if (secret_information[cot]=='0'):
if(original_map[row-1][colum]%2==1):
original_map[row-1][colum] -= 1
elif (secret_information[cot]=='1'):
if(original_map[row-1][colum]%2==0):
original_map[row-1][colum] += 1
# print cot
return original_map
def relation(img):
cot = 0
m,n = img.shape
start_m,start_n = 0,0
next_m,next_n = get_next(start_m,start_n)
while (next_m<m and next_n<n) :
cot += abs((img[next_m][next_n]-img[start_n][start_n]))
start_m,start_n = next_m,next_n
next_m,next_n = get_next(next_m,next_n)
return cot
def non_positive_rotation(img):#接收8x8
jg = random.random()
if(jg>0.5):
return img
else:
next_m,next_n = 0,0
while (next_m<8 and next_n<8) :
if(img[next_m][next_n]%2==0 and img[next_m][next_n] != 0):
img[next_m][next_n] -=1
elif(img[next_m][next_n]%2==1 and img[next_m][next_n] != 255):
img[next_m][next_n] +=1
next_m,next_n = get_next(next_m,next_n)
return img
def non_negative_rotation(img):#接收8x8
jg = random.random()
if(jg>0.5):
return img
else:
next_m,next_n = 0,0
while (next_m<8 and next_n<8) :
if(img[next_m][next_n]%2==0):
img[next_m][next_n] +=1
elif(img[next_m][next_n]%2==1):
img[next_m][next_n] -=1
next_m,next_n = get_next(next_m,next_n)
return img
def get_param(mat_list):
relation_list=[]
non_negro_relation_list=[]
non_posro_relation_list=[]
for i in range(len(mat_list)):
temp = copy.deepcopy(mat_list[i])
temp1=copy.deepcopy(temp)
temp2=copy.deepcopy(temp)
relation_list.append(relation(temp))
non_posro_relation_list.append(relation(non_positive_rotation(temp2)))
non_negro_relation_list.append(relation(non_negative_rotation(temp1)))
sm = 0.0
rm = 0.0
s_m=0.0
r_m=0.0
for i in range(len(mat_list)):
if((relation_list[i]-non_negro_relation_list[i])>0):
sm+=1
elif((relation_list[i]-non_negro_relation_list[i])<0):
rm+=1
if((relation_list[i]-non_posro_relation_list[i])>0):
s_m+=1
elif((relation_list[i]-non_posro_relation_list[i])<0):
r_m+=1
sm = sm/len(relation_list)
rm = rm/len(relation_list)
s_m = s_m/len(relation_list)
r_m = r_m/len(relation_list)
print "sm,s_m,rm,r_m"
print sm,s_m,rm,r_m
return sm,s_m,rm,r_m
def getimplant(img,secret_information,percentage):
newlen = int(len(secret_information)*percentage)
temp_info = secret_information[0:newlen]
tempimg = copy.deepcopy(img)
implant_img = LSB(tempimg,temp_info)
implant_mat_list = get_matlist(implant_img)
return get_param(implant_mat_list)
#
photofilename = sys.argv[1]
img = read(photofilename)
original_mat_list = get_matlist(img)
m, n = img.shape
secret_information = ""
for i in xrange(m * n):
jg = random.random()
if(jg>0.5):
secret_information +='0'
else:
secret_information +='1'
print "begin"
result_list = []
ratio = []
for i in range(11):
cur_ratio = i * 0.1
ratio.append(cur_ratio)
xx = copy.deepcopy(img)
re = getimplant(xx,secret_information,cur_ratio)
result_list.append(re)
print "end function"
sm = []
s_m = []
rm = []
r_m = []
for i in result_list:
sm.append(i[0])
s_m.append(i[1])
rm.append(i[2])
r_m.append(i[3])
print result_list
print "end"
plt.plot(ratio, sm, marker='o')# mec='r', mfc='w',label=u'y=x^2曲线图')
plt.plot(ratio, s_m, marker='*') #ms=10,label=u'y=x^3曲线图')
plt.plot(ratio, rm, marker='^') #mec='r', mfc='w',label=u'y=x^2曲线图')
plt.plot(ratio, r_m, marker="+") #, ms=10,label=u'y=x^3曲线图')
# plt.legend() # 让图例生效
# plt.xticks(x, names, rotation=45)
# plt.margins(0)
# plt.subplots_adjust(bottom=0.15)
# plt.xlabel(u"time(s)邻居") #X轴标签
# plt.ylabel("RMSE") #Y轴标签
# plt.title("A simple plot") #标题
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.