text stringlengths 8 6.05M |
|---|
import pytest
from pelops.analysis import analysis
class experimentGen():
def __init__(self):
self.fd = featureData()
self.experiment = list()
c1 = ['a', 'b', 'c', 'd']
c2 = ['a', 'e', 'f', 'g']
cam1 = list()
cam2 = list()
for c in c1:
cam1.append(self.fd.getchip(c))
for c in c2:
cam2.append(self.fd.getchip(c))
self.experiment.append(cam1)
self.experiment.append(cam2)
def generate(self):
return self.experiment
class chip():
def __init__(self, x):
self.car_id = x[0]
self.feature = x[1]
class featureData():
def __init__(self):
self.data = list()
fun = [('a', [1, 2, 3, 4, 5, 6, 7]),
('b', [10, 20, 30, 40, 11, 9, 2.7]),
('c', [100, 20, 30, 40, 11, 9, 2.7]),
('d', [10, 200, 30, 40, 11, 9, 2.7]),
('e', [10, 20, 300, 40, 11, 9, 2.7]),
('f', [10, 20, 30, 400, 11, 9, 2.7]),
('g', [10, 20, 30, 40, 110, 9, 2.7]),
('h', [10, 20, 30, 40, 11, 90, 2.7]),
('i', [10, 20, 30, 40, 11, 9, 27.0])]
for f in fun:
self.data.append(chip(f))
def get_feats_for_chip(self, chip):
for d in self.data:
if d.car_id == chip.car_id:
return d.feature
def getchip(self, id):
for d in self.data:
if d.car_id == id:
return d
# test the comparisons
def test_cosine():
a = [1, 2, 3, 4, 5, 6, 7]
b = [10, 20, 30, 40, 11, 9, 2.7]
out = analysis.comp_cosine(a, b)
assert(abs(out - 0.63837193721375185) < 0.0000001)
def test_euclidean():
a = [1, 2, 3, 4, 5, 6, 7]
b = [10, 20, 30, 40, 11, 9, 2.7]
out = analysis.comp_euclid(a, b)
assert(abs(out - 49.93485756463114) < 0.0000001)
# test the matching works correctly
def test_is_correct_match():
fd = featureData()
c1 = ['a', 'b', 'c', 'd']
c2 = ['a', 'e', 'f', 'g']
cam1 = list()
cam2 = list()
for c in c1:
cam1.append(fd.getchip(c))
for c in c2:
cam2.append(fd.getchip(c))
out = analysis.is_correct_match(fd, cam1, cam2)
assert (out == 0)
def test_pre_cmc():
eg = experimentGen()
fd = featureData()
keys, values = analysis.pre_cmc(fd, eg, EXPPERCMC=10)
assert values[0] == 1.0
#test the statistics are being generated correctly
def test_make_cmc_stats():
eg = experimentGen()
fd = featureData()
experimentHolder = analysis.repeat_pre_cmc(fd, eg, NUMCMC=10, EXPPERCMC=10)
stats, gdata = analysis.make_cmc_stats(experimentHolder, 4)
for x in range(len(gdata[0])):
assert ( gdata[1][x] ==gdata[2][x] == gdata[0][x])
|
#!/usr/bin/env python3
import requests
res = requests.get('https://notpurple.com')
res.raise_for_status()
playFile = open('my_web_file.html', 'wb')
for chunk in res.iter_content(100000):
playFile.write(chunk)
playFile.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pygmsh as pg
import numpy as np
def generate():
'''Torus, rotated in space.
'''
geom = pg.Geometry()
R = np.array([
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 0.0]
])
geom.add_torus(
irad=0.05, orad=0.6, lcar=0.03,
x0=[0.0, 0.0, -1.0],
R=R
)
R = np.array([
[0.0, 0.0, 1.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0]
])
geom.add_torus(
irad=0.05, orad=0.6, lcar=0.03,
x0=[0.0, 0.0, 1.0],
variant='extrude_circle'
)
return geom
if __name__ == '__main__':
import meshio
points, cells = pg.generate_mesh(generate())
meshio.write('torus.vtu', points, cells)
|
from board2 import *
import sys
from djikstra import djikstra
class Pipeline:
def __init__(self, *modules):
self._modules = []
self._weights = []
for module, weight in modules:
self._weights.append(float(weight))
self._modules.append(module)
def add_module(self, name, weight, module):
self._modules.append(module)
self._weights.append(float(weight))
return self
def __call__(self, board):
game_over = board.get_winner()
if game_over < 0 or game_over > 0:
return game_over * 100000 # returns the game over score.
#indexesWithPieces = [i for i in range(0, board.squares) if len(board.stacks[i]) > 0]
return sum(m(board) * w for m, w in zip(self._modules, self._weights) if w != 0)
def __str__(self):
return "Pipeline(\n\t%s\n)" % (",\n\t".join("(" + str(m) + ", " + str(w) + ")" for m, w in zip(self._modules, self._weights)))
class MFlatCoverage:
name = "Flat Coverage"
def __call__(self, board):
scores = {
PIECE_FLAT: 1.0,
-PIECE_FLAT: -1.0
}
return sum(s[-1] in scores and scores[s[-1]] or 0 for s in board.stacks if len(s) > 0)
def __str__(self):
return "MFlatCoverage()"
class MNoFeedTheBeast:
name = "No Feed the Beast"
def __init__(self, domWeight=1.3, subWeight=1.0):
# TODO: add a way for these to be changed
self._domWeight = float(domWeight)
self._subWeight = float(subWeight)
def mutate(self):
# TODO: implement mutation of the individual modules...
pass
def __call__(self, board):
total = 0
domWeight = self._domWeight
subWeight = self._subWeight
for s in board.stacks:
if len(s) == 0: continue
if s[-1] < 0:
wB = domWeight
wW = subWeight
else:
wB = subWeight
wW = domWeight
total += sum(p < 0 and wB or wW for p in s)
return total
def __str__(self):
return "MNoFeedTheBeast(domWeight=%d, subWeight=%d)" % (self._domWeight, self._subWeight)
class MDjikstraDistance:
name = "Djikstra Distance"
def __call__(self, board):
whiteCostsHor = tuple(not (len(s) > 0 and (s[-1] == PIECE_FLAT or s[-1] == PIECE_CAP)) and 1 or 0 for s in board.stacks)
blackCostsHor = tuple(not (len(s) > 0 and (s[-1] == -PIECE_FLAT or s[-1] == -PIECE_CAP)) and 1 or 0 for s in board.stacks)
whiteCostsVrt = tuple(whiteCostsHor[x + y * board.size] for x, y in itertools.product(range(0, board.size), range(0, board.size)))
blackCostsVrt = tuple(blackCostsHor[x + y * board.size] for x, y in itertools.product(range(0, board.size), range(0, board.size)))
whiteHorDist = djikstra(whiteCostsHor, board.size)
blackHorDist = djikstra(blackCostsHor, board.size)
whiteVrtDist = djikstra(whiteCostsVrt, board.size)
blackVrtDist = djikstra(blackCostsVrt, board.size)
return whiteHorDist + whiteVrtDist - blackHorDist - blackVrtDist
def __str__(self):
return "MDjikstraDistance()"
# pipeline = Pipeline(
# (MFlatCoverage(), 1.0),
# (MNoFeedTheBeast(domWeight=1.3, subWeight=1.0), 0.1),
# (MDjikstraDistance(), 0.1)
# )
|
import os, png, io
from nshg import index_handler, dxt, convert
from .util import * # in package
import logging as log
def unpack(nodes, file_path, asset_dir, command):
index = []
# Can't find a proper flag for whether or not an external file is used...
if os.path.isfile(file_path + '.resS'):
file_path += '.resS'
with open(file_path, 'rb') as source_file:
for node in nodes:
# Unsure how much of this is shared, so for the moment hard-code for images
if node['type_id'] == 28: # image
source_file.seek(node['offset'])
data_raw = source_file.read(node['size'])
name = node['image_name']
if command == 'unpack_raw':
with open(os.path.join(asset_dir, name), 'wb') as image_file:
image_file.write(data_raw)
entry = index_handler.format_entry(node, data_raw)
index.append(entry)
else: # command in ('unpack' or 'unpack-decode')
image_format = node['img_format']
image_path = ''
x_res = node['x_res']
y_res = node['y_res']
if image_format in (3, 5): # RGB24, ARGB32
image_path = os.path.join(asset_dir, name + '.png')
alpha = True if image_format == 5 else False
image_data_list = convert.raw_image_to_list(data_raw, x_res, y_res, alpha, True)
with open(image_path, 'wb') as image_file:
writer = png.Writer(x_res, y_res, alpha=alpha)
writer.write(image_file, image_data_list)
elif image_format in (10, 12): # RGB24 DXT1, ARGB32 DXT5
if command == 'unpack':
print('dxt unpack')
else: # command == 'unpack-decode'
image_path = os.path.join(asset_dir, name + '.png')
if image_format == 10:
image_data_list = dxt.decode_bc1(data_raw, x_res, y_res)
else: # 12
image_data_list = dxt.decode_bc3(data_raw, x_res, y_res, False)
with open(image_path, 'wb') as image_file:
writer = png.Writer(x_res, y_res, alpha=True)
writer.write(image_file, image_data_list)
# This is sloppy and lazy, but hey it works
if image_path:
with open(image_path, 'rb') as image_file:
data = image_file.read()
entry = index_handler.format_entry(node, data)
index.append(entry)
return index
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-04-15 13:42
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("elections", "0028_auto_20170415_1319")]
operations = [
migrations.AlterField(
model_name="election",
name="explanation",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="elections.Explanation",
),
),
migrations.AlterField(
model_name="election",
name="geography",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="organisations.DivisionGeography",
),
),
]
|
# coding=utf-8
import os
import glob
import csv
data = {}
for dirname, dirnames, filenames in os.walk('static_input'):
# For each subdir (image set)
for subdirname in dirnames:
srcpath = os.path.join(dirname, subdirname)
destpath = os.path.join('static_output', subdirname + '.png')
for filename in glob.glob('static2_output/'+subdirname+'*.txt'):
method = filename[len('static2_output/'+subdirname)+1:-12]
with open(filename, 'r') as f:
if subdirname not in data:
data[subdirname] = {}
data[subdirname][method] = f.read()
for filename in glob.glob('static_output/'+subdirname+'*.txt'):
with open(filename, 'r') as f:
if subdirname not in data:
data[subdirname] = {}
data[subdirname]["ourmethod"] = f.read()
with open('results.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['Image'] + data[data.keys()[0]].keys())
for image in data.keys():
print image
writer.writerow([image] + data[image].values()) |
#!/usr/bin/env python
#
##
# Copyright (c) 2006-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
#
# Runs the CalDAVTester test suite ensuring that required packages are available.
#
import getopt
import os
import subprocess
import sys
cwd = os.getcwd()
top = cwd[:cwd.rfind("/")]
add_paths = []
svn = "/usr/bin/svn"
uri_base = "http://svn.calendarserver.org/repository/calendarserver"
packages = [
("pycalendar", "pycalendar/src", uri_base + "/PyCalendar/trunk", "HEAD"),
]
def usage():
print """Usage: run.py [options]
Options:
-h Print this help and exit
-s Do setup only - do not run any tests
-r Run tests only - do not do setup
-p Print PYTHONPATH
"""
def setup():
for package in packages:
ppath = "%s/%s" % (top, package[0],)
if not os.path.exists(ppath):
print "%s package is not present." % (package[0],)
os.system("%s checkout -r %s %s@%s %s" % (svn, package[3], package[2], package[3], ppath,))
else:
print "%s package is present." % (package[0],)
fd = os.popen("%s info ../%s --xml" % (svn, package[0],))
line = fd.read()
wc_url = line[line.find("<url>") + 5:line.find("</url>")]
if wc_url != package[2]:
print "Current working copy (%s) is from the wrong URI: %s != %s, switching..." % (ppath, wc_url, package[2],)
os.system("%s switch -r %s %s %s" % (svn, package[3], package[2], ppath,))
else:
rev = line[line.find("revision=\"") + 10:]
rev = rev[:rev.find("\"")]
if rev != package[3]:
print "Updating %s..." % (package[0],)
os.system("%s update -r %s %s" % (svn, package[3], ppath,))
add_paths.append("%s/%s" % (top, package[1],))
def pythonpath():
for package in packages:
add_paths.append("%s/%s" % (top, package[1],))
pypaths = sys.path
pypaths.extend(add_paths)
return ":".join(pypaths)
def runit():
pythonpath = ":".join(add_paths)
return subprocess.Popen(["./testcaldav.py", "--all"], env={"PYTHONPATH": pythonpath}).wait()
if __name__ == "__main__":
try:
do_setup = True
do_run = True
options, args = getopt.getopt(sys.argv[1:], "hprs")
for option, value in options:
if option == "-h":
usage()
sys.exit(0)
elif option == "-p":
print pythonpath()
sys.exit(0)
elif option == "-r":
do_setup = False
elif option == "-s":
do_run = False
else:
print "Unrecognized option: %s" % (option,)
usage()
raise ValueError
# Process arguments
if len(args) != 0:
print "No arguments allowed."
usage()
raise ValueError
if (do_setup):
setup()
else:
pythonpath()
if (do_run):
sys.exit(runit())
else:
sys.exit(0)
except SystemExit, e:
pass
except Exception, e:
sys.exit(str(e))
|
import pygame as py
board=[]
for i in range(8):
b=[]
for j in range(8):
b.append(None)
board.append(b)
class queen():
def __init__(self,x,y,color):
self.type="Q"
self.x=x
self.y=y
self.color=color
def possible_moves(self,board):
temp_x=self.x+1
temp_y=self.y+1
moves=[]
while temp_x < 8:
if board[temp_x][self.y] != None:
if board[temp_x][self.y].color == self.color:
break
else:
moves.append((temp_x, self.y))
break
moves.append((temp_x,self.y))
temp_x+=1
while temp_y < 8:
if board[self.x][temp_y] != None:
if board[self.x][temp_y].color == self.color:
break
else:
moves.append((self.x, temp_y))
break
moves.append((self.x, temp_y))
temp_y += 1
temp_x = self.x-1
temp_y = self.y-1
while temp_x >= 0:
if board[temp_x][self.y] != None:
if board[temp_x][self.y] != None:
if board[temp_x][self.y].color == self.color:
break
else:
moves.append((temp_x, self.y))
break
moves.append((temp_x,self.y))
temp_x-=1
while temp_y >=0:
if board[self.x][temp_y] != None:
if board[self.x][temp_y].color == self.color:
break
else:
moves.append((self.x, temp_y))
break
moves.append((self.x, temp_y))
temp_y -= 1
###################
x_change = [-1,1]
y_change = [-1,1]
for xc in x_change:
for yc in y_change:
temp_x=self.x+ xc
temp_y=self.y +yc
while (0<=temp_x<8) and (0<=temp_y<8):
if board[temp_x][temp_y] != None:
if board[temp_x][temp_y].color == self.color:
break
else:
moves.append((temp_x, temp_y))
break
moves.append((temp_x,temp_y))
temp_x += xc
temp_y += yc
return moves
##
def draw_piece(self,win):
if self.color == 1:
piece_color= py.image.load(r"gameres/bqueen.png")
else:
piece_color = py.image.load(r"gameres/wqueen.png")
return piece_color
##
class rook():
def __init__(self,x,y,color):
self.type="R"
self.x=x
self.y=y
self.color=color
self.castling=True
def possible_moves(self,board):
temp_x = self.x + 1
temp_y = self.y + 1
moves = []
while temp_x < 8:
if board[temp_x][self.y] != None:
if board[temp_x][self.y].color == self.color:
break
else:
moves.append((temp_x, self.y))
break
moves.append((temp_x, self.y))
temp_x += 1
while temp_y < 8:
if board[self.x][temp_y] != None:
if board[self.x][temp_y].color == self.color:
break
else:
moves.append((self.x, temp_y))
break
moves.append((self.x, temp_y))
temp_y += 1
temp_x = self.x - 1
temp_y = self.y - 1
while temp_x >= 0:
if board[temp_x][self.y] != None:
if board[temp_x][self.y] != None:
if board[temp_x][self.y].color == self.color:
break
else:
moves.append((temp_x, self.y))
break
moves.append((temp_x, self.y))
temp_x -= 1
while temp_y >= 0:
if board[self.x][temp_y] != None:
if board[self.x][temp_y].color == self.color:
break
else:
moves.append((self.x, temp_y))
break
moves.append((self.x, temp_y))
temp_y -= 1
return moves
def draw_piece(self,win):
if self.color == 1:
piece_color= py.image.load(r"gameres/brook.png")
else:
piece_color = py.image.load(r"gameres/wrook.png")
return piece_color
class bishop():
def __init__(self,x,y,color):
self.type="B"
self.x=x
self.y=y
self.color=color
def possible_moves(self,board):
x_change = [-1, 1]
y_change = [-1, 1]
moves=[]
for xc in x_change:
for yc in y_change:
temp_x = self.x + xc
temp_y = self.y + yc
while (0 <= temp_x < 8) and (0 <= temp_y < 8):
if board[temp_x][temp_y] != None:
print((temp_x, temp_y))
if board[temp_x][temp_y].color == self.color:
break
else:
moves.append((temp_x, temp_y))
break
moves.append((temp_x, temp_y))
temp_x += xc
temp_y += yc
return moves
def draw_piece(self,win):
if self.color == 1:
piece_color= py.image.load(r"gameres/bBis.png")
else:
piece_color = py.image.load(r"gameres/wBis.png")
return piece_color
class king():
def __init__(self,x,y,color):
self.x=x
self.y=y
self.color=color
self.type="K"
self.castling=True
def castle(self,board):
rook_castle=[]
king_moves = self.possible_moves_without_casting(board)
if self.castling == True :
for row in board:
for sq in row:
if sq != None:
if sq.color == self.color and sq.type == "R":
if sq.castling:
rook_moves = sq.possible_moves(board)
for rook_move in rook_moves:
if rook_move in king_moves:
rook_castle.append(sq)
return rook_castle
def possible_moves_without_casting(self,board):
moves=[]
diagonal=[-1,0,1]
for dia_x in diagonal:
for dia_y in diagonal:
if (0<= self.x + dia_x < 8 ) and (0<= self.y + dia_y < 8):
if board[self.x + dia_x][self.y + dia_y] != None:
if (board[self.x + dia_x][self.y + dia_y].color == self.color):
continue
moves.append((self.x + dia_x ,self.y + dia_y))
return moves
def possible_moves(self,board):
moves=[]
diagonal=[-1,0,1]
for dia_x in diagonal:
for dia_y in diagonal:
if (0<= self.x + dia_x < 8 ) and (0<= self.y + dia_y < 8):
if board[self.x + dia_x][self.y + dia_y] != None:
if (board[self.x + dia_x][self.y + dia_y].color == self.color):
continue
moves.append((self.x + dia_x ,self.y + dia_y))
castling_possiblity=self.castle(board)
if len(castling_possiblity) > 0 :
for rook in castling_possiblity:
if rook.y == 0:
moves.append((self.x,self.y-2))
else:
moves.append((self.x,self.y+2))
return moves
def draw_piece(self,win):
if self.color == 1:
piece_color= py.image.load(r"gameres/bking.png")
else:
piece_color = py.image.load(r"gameres/wking.png")
return piece_color
class pawn():
def __init__(self,x,y,color):
self.x =x
self.y =y
self.color=color
self.type = "P"
def possible_moves(self,board):
moves=[]
if self.color == 1:
if board[self.x + 1][self.y] == None:
moves.append((self.x+1,self.y))
if self.x == 1 and board[self.x + 2][self.y] == None:
moves.append((self.x+2,self.y))
for dia_y in [-1,1]:
if 0<= self.y + dia_y < 8 and board[self.x + 1][self.y + dia_y] != None:
if board[self.x + 1][self.y + dia_y].color != self.color:
moves.append((self.x+1,self.y + dia_y))
if self.color == 0:
if board[self.x - 1][self.y] == None:
moves.append((self.x-1,self.y))
if self.x == 6 and board[self.x - 2][self.y] == None:
moves.append((self.x-2,self.y))
for dia_y in [-1,1]:
if 0<= self.y + dia_y < 8 and board[self.x - 1][self.y + dia_y] != None:
if board[self.x - 1][self.y + dia_y].color != self.color:
moves.append((self.x-1,self.y + dia_y))
return moves
def draw_piece(self,win):
if self.color == 1:
piece_color= py.image.load(r"gameres/bpawn.png")
else:
piece_color = py.image.load(r"gameres/wpawn.png")
return piece_color
class knight():
def __init__(self,x,y,color):
self.x = x
self.y = y
self.color= color
self.type ="N"
def possible_moves(self,board):
moves=[]
kn=[-2,-1,1,2]
for k in kn:
for n in kn :
if abs(k) == abs(n):
continue
kn_x=self.x+k
kn_y=self.y+n
if 0 <= kn_x < 8 and 0 <= kn_y < 8:
if board[kn_x][kn_y] != None and board[kn_x][kn_y].color == self.color:
continue
moves.append((kn_x,kn_y))
return moves
def draw_piece(self,win):
if self.color == 1:
piece_color= py.image.load(r"gameres/bknight.png")
else:
piece_color = py.image.load(r"gameres/wknight.png")
return piece_color
|
from vector import Vector
def main():
dot_product(get_input())
def get_input():
v1 = Vector()
v2 = Vector()
v1.get_input("1")
v2.get_input("2")
return (v1, v2)
def dot_product(vectors):
vector1 = vectors[0]
vector2 = vectors[1]
print("(x1 * x2) + (y1 * y2) + (z1 * z2) = ")
print("(%(x1)d * %(x2)d) + (%(y1)d * %(y2)d) + (%(z1)d * %(z2)d) = " % \
{"x1": vector1.x, "x2": vector2.x, "y1": vector1.y, "y2": vector2.y, "z1": vector1.z, "z2": vector2.z})
x = vector1.x * vector2.x
y = vector1.y * vector2.y
z = vector1.z * vector2.z
print("(%(x0)d) + (%(y0)d) + (%(z0)d) = " % \
{"x0": x, "y0": y, "z0": z})
product = x + y + z
print(str(product))
if (product == 0):
print_vectors(vector1, vector2)
print("are perpendicular because the dot product is 0." % \
{"vector1": vector1.__str__(), "vector2": vector2.__str__()})
else:
xCoeff = vector1.x / vector2.x
yCoeff = vector1.y / vector2.y
zCoeff = vector1.z / vector2.z
if (xCoeff == yCoeff and xCoeff == zCoeff):
print_vectors(vector1, vector2)
print("are parallel because they have a scalar multiple of %(scalar)d." % \
{"vector1": vector1.__str__(), "vector2": vector2.__str__(), "scalar": xCoeff})
else:
print_vectors(vector1, vector2)
print("are skew because the dot product is not 0,\nand they are no scalar multiples of each other." % \
{"vector1": vector1.__str__(), "vector2": vector2.__str__()})
def print_vectors(vector1, vector2):
print("The two vectors %(vector1)s and %(vector2)s" % \
{"vector1": vector1.__str__(), "vector2": vector2.__str__()})
if __name__=="__main__":
main() |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect
import random
myList = ['hello', 'world', 'good', 'bye', 'hungry', 'food', 'eating', 'poop']
def index(request):
return render(request, 'suprise/index.html')
def suprise(request):
return render(request, 'suprise/suprise.html')
def my_list(request):
request.session['num'] = int(request.POST['num'])
newList = []
# for new in myList:
# new.append(newList)
# print newList
# for new in myList:
# newList.append(new)
# print newList
# for new in range(0, request.session['num']):
while len(newList) != request.session['num']:
word = random.choice(myList)
if word not in newList:
newList.append(word)
print newList
# newList.append(myList[new])
# random.choice.newList
# # newList.append(new1)
request.session['list'] = newList
# request.POST['num'] * VALUES
# print i
return redirect('/suprise')
|
# 주석 처리 예시임
# 만든 날짜:2016.5.30
a = 1 # a에 1을 대입함
b = 5 # b에 5를 대입함
print(a+b) # a+b의 결과를 출력함
|
from rest_framework.routers import DefaultRouter
from legalapp import views
router = DefaultRouter()
router.register('User', views.UserViewSet,basename='User')
router.register('Plan', views.PlanViewSet,basename='Plan')
router.register('Previous_Plans', views.Previous_PlansViewSet,basename='Previous_Plans')
urlpatterns = router.urls |
from pyspark.sql.functions import udf, trim, lower
s3 = "s3a://shwes3udacapstone/"
df_immigration_airport=spark.read.parquet(s3+"data/processed/airports/")
df_immigration_airport = df_immigration_airport.withColumn("city",lower(df_immigration_airport.city))
df_demo = spark.read.parquet(s3 + 'data/processed/city/')
df_demo_airport = df_immigration_airport.join(df_demo,["city","state_code","state_name"])
df_immigration = spark.read.parquet(s3+"data/processed/immigration/").filter("i94dt=='{0}'".format(year_month))
df_immigration = df_immigration.withColumnRenamed("port_of_entry","airport_code")
df_demo_airport = df_demo_airport.drop("state_code","state_name")
df_immigration_demo = df_immigration.join(df_demo_airport,["airport_code"]).\
selectExpr("cicid","arrival_date","departure_date","airport_code","name","city","state_code","state_name","population","median_age","i94dt")
df_immigrant = spark.read.parquet(s3+"data/processed/immigrant/").filter("i94dt=='{0}'".format(year_month)).drop("i94dt")
df_immigrant_demographics = df_immigrant.join(df_immigration_demo,["cicid"]).\
selectExpr("cicid","age","birth_country","residence_country","gender","visatype","visa",\
"i94dt","arrival_date","departure_date","airport_code","name","city","state_code",\
"state_name","population","median_age")
df_immigrant_demographics.write.partitionBy("i94dt").mode("append").parquet(s3 + 'data/processed/immigration_demographics/')
|
#!/usr/bin/python
from __future__ import division
import numpy as np
import pandas as pd
import sys
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
import csv
from sklearn.metrics import matthews_corrcoef
from sklearn.model_selection import train_test_split
# train_file = sys.argv[1]
train_frame = pd.read_csv(sys.argv[1])
test_file = sys.argv[2]
test_frame = pd.read_csv(test_file)
cols = ['correlation','conservation','polaritychange','chargechange','hydroindexchange','secondarystruc','asa','sizechange']
cols1 = ['correlation','conservation','polaritychange','hydroindexchange','secondarystruc','asa','sizechange']
cols2 = ['correlation','conservation','polaritychange','chargechange','hydroindexchange','secondarystruc','sizechange']
cols3 = ['correlation','conservation','polaritychange','chargechange','secondarystruc','sizechange']
colsRes = ['class']
X = train_frame.iloc[:,0:7].values
y = train_frame.iloc[:, 8].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.17, random_state = 0)
trainArr = train_frame.as_matrix(cols)
trainRes = train_frame.as_matrix(colsRes)
trainRes = trainRes.ravel()
testArr = test_frame.as_matrix(cols)
testRes = test_frame.as_matrix(colsRes)
testRes = testRes.ravel()
test_class = test_frame[['class']]
#correct = 0
classifier = svm.SVC(kernel = 'poly',class_weight='balanced')
classifier.fit(trainArr, trainRes)
results = classifier.predict(testArr)
classifier = svm.SVC(kernel = 'linear',class_weight={1: .45, -1: .55 })
classifier.fit(trainArr, trainRes)
results2 = classifier.predict(testArr)
predicted_class = results
mcc = matthews_corrcoef(test_class, predicted_class)
print(mcc)
#with open('majority_voting4_new.csv', 'w') as f:
# writer = csv.writer(f, delimiter=',')
# writer.writerows(zip(results))
|
import os,sys
import numpy as np
import pandas as pd
__hootdb = None
def buildhootdb():
global __hootdb
hootdict = {'crate':[],'slot':[],'femch':[],
'FT':[],'connect':[],'plane':[],'mbid':[],
'asicid':[],'wireid':[]}
for cratenum in xrange(1,10):
f = open('hoot/crate%d.txt'%(cratenum),'r')
lines = f.readlines()
for l in lines[1:]:
data = l.split()
hootdict['crate'].append( int(data[0].strip()) )
hootdict['slot'].append( int(data[1].strip()) )
hootdict['femch'].append( int(data[2].strip()) )
hootdict['FT'].append( int(data[3].strip()) )
hootdict['connect'].append( data[4].strip() )
hootdict['plane'].append( data[6].strip() )
hootdict['mbid'].append( int(data[7].strip()) )
hootdict['asicid'].append( int(data[8].strip()) )
hootdict['wireid'].append( int(data[0].strip() ) )
__hootdb = pd.DataFrame( hootdict )
def gethootdb():
global __hootdb
if __hootdb is None:
buildhootdb()
return __hootdb
if __name__=="__main__":
db = gethootdb()
print db
|
import numpy as np
import pylab as P
import ROOT
from ROOT import gROOT
gROOT.ProcessLine(".L /home/mage/PROSPECT/PROSPECT-G4-build/lib/libEventLib.so")
gROOT.ProcessLine(".L /home/mage/PROSPECT/PROSPECT-G4-Sec/include/Output/Event.hh")
# histCell=ROOT.TH2D("Cell Ionization Hits","Cell Ionization Hits",10,0,10,14,0,14)
# pmt1Hist=ROOT.TH2D("d","d",100,-1200,1200,100,-1200,1200)
Nx=14
Ny=10
print "start"
for k in xrange(0,32):
# if k!=1 and k!=9:
cryFile=ROOT.TFile("../cry_"+str(k)+".root")
tree=ROOT.TTree()
tree=cryFile.Get("PG4")
sp=ROOT.SecondaryParticleEvent()
ion=ROOT.IoniClusterEvent()
tree.SetBranchAddress("SecParticle",sp)
tree.SetBranchAddress("ScIoni",ion)
entries=int(tree.GetEntries())
for i in xrange(0,entries):
print "event "+str(i)+" for file "+str(k)
tree.GetEntry(i)
clust=ion.nIoniClusts
det=sp.nParticlesDet
clus=ion.nIoniClusts
E=ion.EIoni
e=0
for j in xrange(0,clus):
vert=ion.clusts.At(j)
x=vert.x[0]
vol=vert.vol
x=vol/Nx
y=vol%(Ny)
# histCell.Fill(x,y)
e+=vert.E
percent=-1
for j in xrange(0,det):
percentUp=int(float(j)/float(det)*100)
if percentUp != percent:
percent=percentUp
if percent%10==0:
print str(percentUp)+" %"
photo=sp.particles.At(j)
x=photo.x[0]
y=photo.x[1]
z=photo.x[2]
#print "x: "+str(x)+" y: "+str(y)+" z: "+str(z)
# if y<0:
# pmt1Hist.Fill(x,z)
pmt1Hist.Draw("colz")
|
"""Moving the fmc.deployabledevices to an actual api_object."""
import logging
import time
class DeployableDevices(
object
): # Can't import APIClassTemplate due to dependency loop.
"""
Collect a list of FMC managed devices whose configuration is not up-to-date.
:return: List of devices needing updates.
"""
URL_SUFFIX = "/deployment/deployabledevices?expanded=true"
WAIT_TIME = 15
def __init__(self, fmc):
"""
Initialize DeployableDevices object.
:param fmc (object): FMC object
:return: None
"""
logging.debug("In __init__ for DeployableDevices() class.")
logging.info(
f"Waiting {self.WAIT_TIME} seconds to allow the FMC to update the list of deployable devices."
)
time.sleep(self.WAIT_TIME)
self.fmc = fmc
self.URL = f"{self.fmc.configuration_url}{self.URL_SUFFIX}"
def get(self):
"""
Use GET API call to query FMC for a list of devices that need configuration updates pushed to them.
:return: (list) uuids
"""
logging.debug("GET method for API for DeployableDevices.")
logging.info("Getting a list of deployable devices.")
response = self.fmc.send_to_api(method="get", url=self.URL)
# Now to parse the response list to get the UUIDs of each device.
if "items" not in response:
return
uuids = []
for item in response["items"]:
if not item["canBeDeployed"]:
pass
else:
uuids.append(item)
return uuids
def post(self):
"""POST method for API for DeployableDevices not supported."""
logging.info("POST method for API for DeployableDevices not supported.")
pass
def put(self):
"""PUT method for API for DeployableDevices not supported."""
logging.info("PUT method for API for DeployableDevices not supported.")
pass
def delete(self):
"""DELETE method for API for DeployableDevices not supported."""
logging.info("DELETE method for API for DeployableDevices not supported.")
pass
|
#!/usr/bin/env python3
import sys
import struct
import binascii
from .elf import *
from .helpers import *
#typedef struct {
# Elf32_Addr r_offset;
# Elf32_Word r_info;
#} Elf32_Rel;
def tag_elf32_rel(fp, machine:E_MACHINE=None):
tag(fp, 8, 'Elf32_Rel', 1)
#r_offset = uint32(fp, 1)
tagUint32(fp, 'r_offset') # location relocation is applied
# - relocated file: section offset
# - executable/shared object: virtual address
r_info = uint32(fp, 1)
r_sym = ELF32_R_SYM(r_info)
r_type = ELF32_R_TYPE(r_info)
r_type_str = ''
if machine == E_MACHINE.EM_ARM.value:
r_type_str = ' %s' % RELOC_TYPE_ARM(r_type).name
descr = 'r_info=%08X (sym=%06X type=%02X%s)' % \
(r_info, r_sym, r_type, r_type_str)
tag(fp, 4, descr) # relocation type and symbol table index
# - eg: R_SPARC_GOT10, R_386_PLT32, R_AMD64_JUMP_SLOT
#typedef struct {
# Elf32_Addr r_offset;
# Elf32_Word r_info;
# Elf32_Sword r_addend;
#} Elf32_Rela;
def tag_elf32_rela(fp):
tag(fp, 12, 'Elf32_Rela', 1)
tag(fp, 4, 'r_offset')
tag(fp, 4, 'r_info')
tag(fp, 4, 'r_addend')
# get symbol table index from r_info
#define ELF32_R_SYM(info) ((info)>>8)
def ELF32_R_SYM(info):
return (info >> 8) & 0xFFFFFF
# get symbol type from r_info
#define ELF32_R_TYPE(info) ((unsigned char)(info))
def ELF32_R_TYPE(info):
return info & 0xFF
def tag_elf32_sym(fp, index, strTab:StringTable):
base = fp.tell()
st_name = uint32(fp, 1)
nameStr = strTab[st_name]
tag(fp, 4, "st_name=0x%X \"%s\"" % (st_name,nameStr))
st_value = tagUint32(fp, "st_value")
st_size = tagUint32(fp, "st_size")
st_info = uint8(fp, 1)
bindingStr = symbol_binding_tostr(st_info >> 4)
typeStr = symbol_type_tostr(st_info & 0xF)
tag(fp, 1, "st_info bind:%d(%s) type:%d(%s)" % \
(st_info>>4, bindingStr, st_info&0xF, typeStr))
st_other = tagUint8(fp, "st_other")
st_shndx = tagUint16(fp, "st_shndx")
fp.seek(base)
tag(fp, SIZE_ELF32_SYM, "Elf32_Sym \"%s\" (index:%d)" % (nameStr, index))
def tag_elf32_dyn(fp, e_machine):
base = fp.tell()
d_tag = uint32(fp, 1)
tagStr = dynamic_type_tostr(d_tag, e_machine)
tag(fp, 4, "d_tag:0x%X (%s)" % (d_tag, tagStr))
tagUint32(fp, "val_ptr")
fp.seek(base)
tag(fp, SIZE_ELF32_DYN, "Elf32_Dyn (%s)" % tagStr)
if d_tag == DynamicType.DT_NULL:
return 'quit'
def tag_elf32_shdr(fp, index, scnStrTab):
base = fp.tell()
sh_name = tagUint32(fp, "sh_name")
sh_type = uint32(fp, 1)
tag(fp, 4, "sh_type=0x%X (%s)" % \
(sh_type, sh_type_tostr(sh_type)))
sh_flags = uint32(fp, 1)
tag(fp, 4, "sh_flags=0x%X (%s)" % \
(sh_flags, sh_flags_tostr(sh_flags)))
sh_addr = tagUint32(fp, "sh_addr")
sh_offset = tagUint32(fp, "sh_offset")
sh_size = tagUint32(fp, "sh_size")
sh_link = tagUint32(fp, "sh_link") # usually the section index of the associated string or symbol table
sh_info = tagUint32(fp, "sh_info") # usually the section index of the section to which this applies
sh_addralign = tagUint32(fp, "sh_addralign")
sh_entsize = tagUint32(fp, "sh_entsize")
fp.seek(base)
tag(fp, 40, 'elf32_shdr "%s" %s (index: %d)' % \
(scnStrTab[sh_name], sh_type_tostr(sh_type), index))
return {'sh_name':sh_name,
'sh_type':sh_type,
'sh_flags':sh_flags,
'sh_addr':sh_addr,
'sh_offset':sh_offset,
'sh_size':sh_size,
'sh_link':sh_link,
'sh_info':sh_info,
'sh_addralign':sh_addralign,
'sh_entsize':sh_entsize}
def analyze(fp):
if not isElf32(fp):
return
# read elf32_hdr
tag(fp, SIZE_ELF32_HDR, "elf32_hdr", 1)
tag(fp, 4, "e_ident[0..4)")
tagUint8(fp, "e_ident[EI_CLASS] (32-bit)")
ei_data = uint8(fp, 1)
tagUint8(fp, "e_ident[EI_DATA] %s" % ei_data_tostr(ei_data))
assert(ei_data in [ELFDATA2LSB,ELFDATA2MSB])
if ei_data == ELFDATA2LSB:
setLittleEndian()
elif ei_data == ELFDATA2MSB:
setBigEndian()
tagUint8(fp, "e_ident[EI_VERSION]")
tagUint8(fp, "e_ident[EI_OSABI]")
tagUint8(fp, "e_ident[EI_ABIVERSION]")
tag(fp, 7, "e_ident[EI_PAD]")
e_type = uint16(fp, 1)
tagUint16(fp, "e_type %s" % e_type_tostr(e_type))
e_machine = uint16(fp, 1)
tagUint16(fp, "e_machine %s" % (e_machine_tostr(e_machine)))
tagUint32(fp, "e_version")
tagUint32(fp, "e_entry")
e_phoff = tagUint32(fp, "e_phoff")
e_shoff = tagUint32(fp, "e_shoff")
tagUint32(fp, "e_flags")
e_ehsize = tagUint16(fp, "e_ehsize")
assert(e_ehsize == SIZE_ELF32_HDR)
tagUint16(fp, "e_phentsize")
e_phnum = tagUint16(fp, "e_phnum")
e_shentsize = tagUint16(fp, "e_shentsize")
assert(e_shentsize == SIZE_ELF32_SHDR)
e_shnum = tagUint16(fp, "e_shnum")
e_shstrndx = tagUint16(fp, "e_shstrndx")
# read the string table
tmp = e_shoff + e_shstrndx*SIZE_ELF32_SHDR
#print('seeking to %X for the string table section header' % tmp)
fp.seek(tmp)
fmt = {ELFDATA2LSB:'<IIIIII', ELFDATA2MSB:'>IIIIII'}[ei_data]
(a,b,c,d,sh_offset,sh_size) = struct.unpack(fmt, fp.read(24))
#print('sh_offset: %08X, sh_size: %08X' % (sh_offset, sh_size))
fp.seek(sh_offset)
scnStrTab = StringTable(fp, sh_size)
# tag and save all section headers
fp.seek(e_shoff)
scn_infos = []
for i in range(e_shnum):
info:dict = tag_elf32_shdr(fp, i, scnStrTab)
scn_infos.append(info)
# tag section contents
for (i,info) in enumerate(scn_infos):
# top level container
if not info['sh_type'] in [SHT_NULL, SHT_NOBITS] and info['sh_size'] > 0:
print('[0x%X,0x%X) raw section "%s" contents' % \
(info['sh_offset'], info['sh_offset']+info['sh_size'], scnStrTab[info['sh_name']]))
# like .dynamic
if info['sh_type'] == SHT_DYNAMIC:
# array of Elf32_Dyn entries
fp.seek(info['sh_offset'])
while fp.tell() < (info['sh_offset'] + info['sh_size']):
if tag_elf32_dyn(fp, e_machine) == 'quit':
break
# like .dynsym
elif info['sh_type'] in [SHT_SYMTAB, SHT_DYNSYM]:
# get associated string table
link = info['sh_link']
fp.seek(scn_infos[link]['sh_offset'])
strtab = StringTable(fp, scn_infos[link]['sh_size'])
# array of Elf32_Sym entries
idx = 0
fp.seek(info['sh_offset'])
while fp.tell() < (info['sh_offset'] + info['sh_size']):
tag_elf32_sym(fp, idx, strtab)
idx += 1
elif info['sh_type'] == SHT_STRTAB:
fp.seek(info['sh_offset'])
tag_strtab(fp, info['sh_size'])
elif info['sh_type'] == SHT_REL:
fp.seek(info['sh_offset'])
while fp.tell() < (info['sh_offset'] + info['sh_size']):
tag_elf32_rel(fp, e_machine)
elif info['sh_type'] == SHT_RELA:
fp.seek(info['sh_offset'])
while fp.tell() < (info['sh_offset'] + info['sh_size']):
tag_elf32_rela(fp, e_machine)
# read program headers
# REMINDER! struct member 'p_flags' changes between 32/64 bits
fp.seek(e_phoff)
for i in range(e_phnum):
oHdr = fp.tell()
p_type = uint32(fp, True)
tagUint32(fp, 'p_type', '('+phdr_type_tostr(p_type)+')')
tagUint32(fp, "p_offset")
tagUint32(fp, "p_vaddr")
tagUint32(fp, "p_paddr")
tagUint32(fp, "p_filesz")
tagUint32(fp, "p_memsz")
p_flags = uint32(fp, True)
tagUint32(fp, 'p_flags', '('+phdr_flags_tostr(p_flags)+')')
tagUint32(fp, "p_align")
print('[0x%X,0x%X) raw elf32_phdr index=%d' % \
(oHdr, fp.tell(), i))
if __name__ == '__main__':
with open(sys.argv[1], 'rb') as fp:
analyze(fp)
|
import spacy
from os import listdir
from os.path import isfile, join
from tqdm import tqdm
nlp = spacy.load('en_core_web_sm')
from spacy.lang.en.stop_words import STOP_WORDS
stopWords = set(STOP_WORDS)
stopWords.add("e.g")
stopWords.add("i.e")
def main():
"""
document terms extraction
"""
#inputs
corpus_dir = r".\Corpus" #directory for corpus documents
output_dir = r".\OutputDir" #result files output directory
output_files = [output_dir+r"\ExtractedTerms0-205.txt", output_dir+r"\ExtractedTerms206-400.txt", output_dir+r"\ExtractedTerms401-600.txt"] #the path to save the extracted terms
minFreq = 7 #minimum frequency threshold
#compute tf for each term in the corpus
tf= computerTf(corpus_dir)
#if tf of the term is greater than minimum freq save it to the output file
word_writed = 0
file_output = 0
terms_file = open(output_files[file_output], "w", errors='ignore')
for term, score in tf.items():
if score >= minFreq:
word_writed += 1
terms_file.write(str(term) + "\n")
if word_writed == 205 or word_writed == 400:
file_output+=1
terms_file = open(output_files[file_output], "w", errors='ignore')
print("\n > Word writed : {}".format(word_writed))
def removeArticles(text):
#remove stop words from the begining of a NP
words = text.split()
if words[0] in stopWords:
return text.replace(words[0]+ " ", "")
return text
def computerTf(dir):
alldocs = [join(dir, f) for f in listdir(dir) if isfile(join(dir, f))]
AllTerms = dict()
for doc in alldocs:
lines = open(doc, "r", errors='ignore').readlines()
scale = 50
scale_lines = [0] + list(range(0, len(lines), scale))
if scale_lines[-1] != len(lines):
scale_lines.append(len(lines))
for i in tqdm(range(1, len(scale_lines))):
docParsing = nlp("\n".join(lines[scale_lines[i-1]:scale_lines[i]]))
for chunk in docParsing.noun_chunks:
np = removeArticles(chunk.text.lower())
if np in stopWords:
continue
if np in AllTerms.keys():
AllTerms[np] += 1
else:
AllTerms[np] = 1
return AllTerms
if __name__ == '__main__':
main()
|
import re
import bcrypt
from . import db
async def validate_registration(db_engine, form):
try:
email = form['login']
first_name, last_name = form['first_name'], form['last_name']
password, password_confirmation = form['password'], form['password_confirmation']
except KeyError:
return
if not re.match(r'[\w\.]+@\w+\.\w+', email): # email validation
return
if password != password_confirmation:
return
async with db_engine.acquire() as conn:
email_query = db.users.select().where(db.users.c.email == email)
ret = await conn.execute(email_query)
if ret.rowcount:
return
return {'email': email,
'first_name': first_name,
'last_name': last_name,
'password': bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt()).decode('utf-8')}
def validate_post(title, content):
return title is not None and content is not None \
and isinstance(title, str) and isinstance(content, str)
|
"""Command to overwrite dovecot LDAP configuration (auth)."""
from django.core.management.base import BaseCommand
from modoboa.parameters import tools as param_tools
from ... import lib
from modoboa.core import models
class Command(BaseCommand):
"""Command definition."""
help = "Update dovecot configuration file to enable LDAP auth"
def handle(self, *args, **options):
"""Command entry point."""
localconfig = models.LocalConfig.objects.first()
if not localconfig.need_dovecot_update:
return
config = dict(param_tools.get_global_parameters("core"))
condition = (
config["authentication_type"] == "ldap" and
config["ldap_dovecot_sync"]
)
if condition:
lib.update_dovecot_config_file(config)
localconfig.need_dovecot_update = False
localconfig.save(update_fields=["need_dovecot_update"])
|
# vim: set fileencoding=utf-8 :
from __future__ import absolute_import, division
import unittest
import zeelalchemy.tests as tests
from zeelalchemy import make_class_dictable
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Column, String, Integer
engine = create_engine('sqlite:///:memory:', echo=False)
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base(engine)
class MakeClassDictable(Base):
__tablename__ = 'makeclassdictable'
id = Column(Integer, primary_key=True)
name = Column(String)
def __init__(self, name):
self.name = name
class TestAsdict(unittest.TestCase):
def setUp(self):
""" Recreate the database """
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
self.session = Session()
def tearDown(self):
Base.metadata.drop_all()
def test_make_class_dictable(self):
assert not hasattr(MakeClassDictable, 'asdict')
m = MakeClassDictable('dictable')
self.session.add(m)
self.session.commit()
assert not hasattr(m, 'asdict')
make_class_dictable(MakeClassDictable)
assert m.asdict() == {'id': m.id, 'name': m.name}
class TestMakeDictable(tests.TestCase):
def test_dict(self):
named = tests.Named('a name')
self.session.add(named)
self.session.commit()
assert dict(named) == {'id': named.id, 'name': 'a name'}
def test_arg_to_dict():
from zeelalchemy.utils import arg_to_dict
assert arg_to_dict(None) == {}
assert arg_to_dict([]) == {}
assert arg_to_dict(['a', 'b']) == {'a': {}, 'b': {}}
assert arg_to_dict({
'a': {'is_a': True},
'b': {'is_b': True},
}) == {'a': {'is_a': True}, 'b': {'is_b': True}}
|
# This file is part of AstroHOG
#
# Copyright (C) 2013-2017 Juan Diego Soler
import sys
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
sys.path.append('/Users/jsoler/Documents/astrohog/')
from astrohog import *
from astropy.wcs import WCS
from reproject import reproject_interp
def astroHOGexampleWHAM(frame, vmin, vmax, ksz=1):
fstr="%4.2f" % frame
dir='/Users/jsoler/DATA/WHAM/'
hdu1=fits.open(dir+'hi_filament_cube.fits')
hdu2=fits.open(dir+'ha_filament_cube.fits')
v1=vmin*1000.; v2=vmax*1000.
v1str="%4.1f" % vmin
v2str="%4.1f" % vmax
limsv=np.array([v1, v2, v1, v2])
cube1=hdu1[0].data
sz1=np.shape(hdu1[0].data)
CTYPE3=hdu1[0].header['CTYPE3']
CDELT3=hdu1[0].header['CDELT3']
CRVAL3=hdu1[0].header['CRVAL3']
CRPIX3=hdu1[0].header['CRPIX3']
#zmin1=0
zmin1=int(CRPIX3+(v1-CRVAL3)/CDELT3)
#zmax1=sz1[0]-1
zmax1=int(CRPIX3+(v2-CRVAL3)/CDELT3)
velvec1=hdu1[0].header['CRVAL3']+(np.arange(sz1[0])-hdu1[0].header['CRPIX3'])*hdu1[0].header['CDELT3']
#np.arange(v1,v2,CDELT3)/1000.
cube2=hdu2[0].data
sz2=np.shape(hdu2[0].data)
CTYPE3=hdu2[0].header['CTYPE3']
CDELT3=hdu2[0].header['CDELT3']
CRVAL3=hdu2[0].header['CRVAL3']
CRPIX3=hdu2[0].header['CRPIX3']
#zmin2=0
zmin2=int(CRPIX3+(v1-CRVAL3)/CDELT3)
#zmax2=sz2[0]-1
zmax2=int(CRPIX3+(v2-CRVAL3)/CDELT3)
velvec2=hdu2[0].header['CRVAL3']+(np.arange(sz2[0])-hdu2[0].header['CRPIX3'])*hdu2[0].header['CDELT3']
refhdr1=hdu1[0].header.copy()
NAXIS31=refhdr1['NAXIS3']
del refhdr1['NAXIS3']
del refhdr1['CTYPE3']
del refhdr1['CRVAL3']
del refhdr1['CRPIX3']
del refhdr1['CDELT3']
del refhdr1['CUNIT3']
del refhdr1['CNAME3']
refhdr1['NAXIS']=2
refhdr1['WCSAXES']=2
refhdr2=hdu2[0].header.copy()
NAXIS3=refhdr2['NAXIS3']
del refhdr2['NAXIS3']
del refhdr2['CTYPE3']
del refhdr2['CRVAL3']
del refhdr2['CRPIX3']
del refhdr2['CDELT3']
del refhdr2['CUNIT3']
del refhdr2['CNAME3']
del refhdr2['PV1_3']
refhdr2['NAXIS']=2
refhdr2['WCSAXES']=2
newcube1=np.zeros([NAXIS31, sz2[1], sz2[2]])
for i in range(0, NAXIS31):
hduX=fits.PrimaryHDU(cube1[i,:,:])
hduX.header=refhdr1
mapX, footprintX=reproject_interp(hduX, refhdr2)
newcube1[i,:,:]=mapX
#import pdb; pdb.set_trace()
# ==========================================================================================================
sz1=np.shape(newcube1)
x=np.sort(newcube1.ravel())
minrm=x[int(0.2*np.size(x))]
#minrm=np.std(newcube1[0,:,:])
mask1=np.zeros(sz1)
mask1[(newcube1 > minrm).nonzero()]=1
mask1[:,0:ksz,:]=0.; mask1[:,sz1[1]-ksz:sz1[1],:]=0.
mask1[:,:,0:ksz]=0.; mask1[:,:,sz1[2]-ksz:sz1[2]]=0.
mask1[:,sz1[1]-80:sz1[1],:]=0.;
sz2=np.shape(cube2)
minrm=np.std(cube2[0,:,:])
mask2=np.zeros(sz2)
mask2[(cube2 > minrm).nonzero()]=1
corrplane, corrcube=HOGcorr_cube(newcube1, cube2, zmin1, zmax1, zmin2, zmax2, ksz=ksz, mask1=mask1, mask2=mask2)
strksz="%i" % ksz
limsv=np.array([velvec1[zmin1], velvec1[zmax1], velvec2[zmin2], velvec2[zmax2]])
plt.imshow(corrplane, origin='lower', extent=limsv/1e3, interpolation='none')
plt.xlabel(r'$v_{HI}$ [km/s]')
plt.ylabel(r'$v_{H\alpha}$ [km/s]')
plt.yticks(rotation='vertical')
plt.colorbar()
plt.show()
#plt.savefig('HOGcorrelationPlanck353GRSL'+fstr+'_b'+blimstr+'_k'+strksz+'_v'+v1str+'to'+v2str+'.png', bbox_inches='tight')
#plt.close()
ix=(corrplane == np.max(corrplane)).nonzero()[0][0]
jx=(corrplane == np.max(corrplane)).nonzero()[1][0]
print(velvec1[ix]/1e3)
print(velvec2[jx]/1e3)
#limsv=np.array([velvec1[ix-10], velvec1[ix+10], velvec2[jx-10], velvec2[jx+10]])
#plt.imshow(corrplane[ix-10:ix+10,jx-10:jx+10], origin='lower', extent=limsv/1e3, interpolation='none')
#plt.xlabel(r'$v_{HI}$ [km/s]')
#plt.ylabel(r'$v_{H\alpha}$ [km/s]')
#plt.yticks(rotation='vertical')
#plt.colorbar()
#plt.show()
ax1=plt.subplot(1,1,1, projection=WCS(refhdr2))
ax1.imshow(newcube1[ix,:,:], origin='lower', cmap='seismic', clim=[np.min(newcube1[ix,:,:]),4.]) #, interpolation='none')
ax1.imshow(cube2[jx,:,:], origin='lower', alpha=0.55, cmap='binary', clim=[0.,1.0])
ax1.coords.grid(color='white')
ax1.coords['glon'].set_axislabel('Galactic Longitude')
ax1.coords['glat'].set_axislabel('Galactic Latitude')
ax1.coords['glat'].set_axislabel_position('r')
ax1.coords['glat'].set_ticklabel_position('r')
ax1.set_title('DKs cubes')
plt.show()
inmap=newcube1[ix,:,:]
inmap[inmap > np.mean(inmap)]=np.mean(inmap)
r=(inmap-np.min(inmap))/(np.max(inmap)-np.min(inmap))
inmap=cube2[jx,:,:]
inmap[inmap > np.mean(inmap)]=np.mean(inmap)
g=(inmap-np.min(inmap))/(np.max(inmap)-np.min(inmap))
b=0.*g
sz=np.shape(r)
rgb=np.zeros([sz[0], sz[1], 3])
rgb[:,:,0]=r#(1.-r)
rgb[:,:,1]=g#(1.-g)
rgb[:,:,2]=b
ax1=plt.subplot(1,1,1, projection=WCS(refhdr2))
ax1.imshow(rgb, origin='lower')
ax1.coords['glon'].set_axislabel('Galactic Longitude')
ax1.coords['glat'].set_axislabel('Galactic Latitude')
ax1.coords['glat'].set_axislabel_position('r')
ax1.coords['glat'].set_ticklabel_position('r')
ax1.set_title('DKs cubes')
plt.show()
ax1=plt.subplot(1,1,1, projection=WCS(refhdr2))
ax1.imshow(corrcube[ix,:,:], origin='lower', cmap='Reds', clim=[np.min(newcube1[ix,:,:]),4.])
plt.show()
corrcube[np.isnan(corrcube).nonzero()]=0.
ax1=plt.subplot(1,1,1, projection=WCS(refhdr2))
ax1.imshow(corrcube[ix-1:ix+1,:,:].sum(axis=0), origin='lower', cmap='seismic')
plt.show()
import pdb; pdb.set_trace()
ksz=5
astroHOGexampleWHAM(23.75, 0., 45., ksz=ksz)
#astroHOGexampleWHAM(23.75, -45., 45., ksz=ksz)
|
# -*- coding: utf-8 -*-
class Solution:
def findPoisonedDuration(self, timeSeries, duration):
result = duration * len(timeSeries)
for i in range(1, len(timeSeries)):
result -= max(0, duration - (timeSeries[i] - timeSeries[i - 1]))
return result
if __name__ == "__main__":
solution = Solution()
assert 4 == solution.findPoisonedDuration([1, 4], 2)
assert 3 == solution.findPoisonedDuration([1, 2], 2)
|
#!/home/moritz/.pyenv/shims/python
#SBATCH -D /home/moritz/repos/MiComPy/
#SBATCH -J acIs
#SBATCH -o /home/moritz/acI_clustering.out
#SBATCH -e /home/moritz/acI_clustering.err
#SBATCH -A b2014036
#SBATCH -t 5-00:00:00
#SBATCH -n 16
#SBATCH -p core
#SBATCH --mail-user murumbii@gmail.com
#SBATCH --mail-type=ALL
import os
from pandas import DataFrame
from os.path import join as pjoin
from subprocess import call
import sys
from tqdm import tqdm
import sh
from micompy.common.genome import Genome
from micompy.pipes.analyses import *
from micompy.common.utils.renaming_tree import renaming_tree
from micompy.common.utils.intrasimilarity import NIC_similarity
from micompy.gene_clusterings.orthomcl.orthoMCL import orthoMCL
from micompy.gene_clusterings.orthomcl.clustering import Clustering as MCLClustering
from micompy.gene_clusterings.clustering import Clustering
from micompy.gene_clusterings.pfam_clusters.clustering import PfamClustering
from itertools import groupby
from pylab import *
from micompy.common.utils.iotl_annotations import *
root = "/home/moritz/people/sarahi/all_enrichmentss/"
data_root = pjoin(root, "all_AGs/")
analyses_root = pjoin(root, "")
google_data = pjoin(root, "ag_metadata.csv")
manual_metadata = DataFrame.from_csv(google_data).transpose().to_dict()
cpus = 16
all_genomes = [ Genome(g, pjoin(data_root,g), pjoin(data_root, m['genomes']), manual_metadata[g]) for g,m in manual_metadata.iteritems()]
for g in tqdm(all_genomes):
if not g.size:
g.compute_size()
all_genomes.sort(key=lambda x: x.size, reverse = True)
annotation(all_genomes, cpus)
sh.cat(*[g.proteom.replace(".faa",".gff") for g in all_genomes],_out ="temp.gff")
sh.grep("CDS","temp.gff",_out = pjoin(analyses_root,"all_gff.gff"))
#checkm(all_genomes, pjoin(analyses_root,"checkm"), cpus)
mcl = orthoMCL(pjoin(analyses_root, "orthoMCL/"), all_genomes, "big_clustering")
#mcl.start_server()
#mcl.full_pipe()
#mcl.stop_server()
#mcl.post_blast()
clusters = Clustering(all_genomes, pjoin(analyses_root, "clustering/"),"acIs", checkm = pjoin(analyses_root,"checkm"), gff = pjoin(analyses_root,"all_gff.gff"))
#cooc = clusters.cooccurence_matrix()
#cooc.to_csv(pjoin(clusters.path,"coocurence.txt"))
#bmft = clusters.make_cluster_bmft()
#bmft.to_csv(pjoin(clusters.path,"bmft.txt"))
for c in clusters:
c.genomes = set(c.genomes)
#intersects = {c1.name : {c2 : 2.0*float(len(c1.genomes.intersection(c2.genomes)))/(len(c1.genomes)+ len(c2.genomes)) for c2 in clusters} for c1 in clusters}
intersects = {c1.name : {c2 : float(len(c1.genomes.intersection(c2.genomes)))/min(len(c1.genomes),len(c2.genomes)) for c2 in clusters if len(c2.genomes) > 1} for c1 in clusters if len(c1.genomes) > 1}
DataFrame.from_dict(intersects).to_csv(pjoin(clusters.path,"cluster_coocs.txt"))
with open(pjoin(analyses_root,"soft_core.txt")) as handle:
softcore = [c[:-1] for c in handle.readlines()]
with open(pjoin(analyses_root,"hard_core.txt")) as handle:
hardcore = [c[:-1] for c in handle.readlines()]
with open(pjoin(analyses_root, "cluster_denses.txt"),"w") as handle:
handle.writelines([str(sum([len(c.genomes) == i for c in clusters])) +"\n" for i in range(1,43) ])
with open(pjoin(analyses_root, "hypo_prob.txt"),"w") as handle:
handle.writelines([str(float(sum([c.annotation == "hypothetical protein" for c in clusters if len(c.genomes) == i]))/sum([len(c.genomes) == i for c in clusters])) + "\n" for i in range(1,43)])
|
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
from skimage.util import random_noise
def uniform_noise(img, interval):
"""
Add uniform noise to image
:param img: input image
:param interval: [a,b]
:return: image with uniform noise
"""
# create noise
uniform_matrix = np.random.randint(low=interval[0], high=interval[1], size=img.shape)
# add noise
img_noisy = np.int32(img) + uniform_matrix
# normalize intensities
norm_img_noisy = np.zeros(img_noisy.shape)
norm_img_noisy = cv.normalize(img_noisy, norm_img_noisy, 0, 255, cv.NORM_MINMAX)
# convert to uint8
norm_img_noisy = np.uint8(norm_img_noisy)
return norm_img_noisy
# path of images
img_path = '.\\Images\\Uniform.jpg'
# read image form file
img = cv.imread(filename=img_path, flags=cv.IMREAD_GRAYSCALE)
# different standard deviations
intervals = [[0,21], [-10,11], [0,31], [0,51], [0,81], [-60,61], [0,161], [0,191]]
# histograms
hists = []
# display image
fig = plt.figure(figsize=(10,10))
ax = plt.subplot(3, 3, 1)
ax.set_title("Input Image")
plt.imshow(img, cmap='gray')
plt.gca().axes.get_xaxis().set_visible(False)
plt.gca().axes.get_yaxis().set_visible(False)
# histogram
hists.append(cv.calcHist([img], [0], None, [256], [0, 256]))
for idx, interval in enumerate(intervals):
# add uniform noise
new_img = uniform_noise(img=img, interval=interval)
ax = plt.subplot(3, 3, idx+2)
ax.set_title("Interval {}".format(intervals[idx]))
plt.imshow(new_img, cmap='gray')
plt.gca().axes.get_xaxis().set_visible(False)
plt.gca().axes.get_yaxis().set_visible(False)
# histogram
hists.append(cv.calcHist([new_img], [0], None, [256], [0, 256]))
cv.imwrite('.\\noisy_images\\uniform-{}.jpg'.format(idx), new_img)
# plot histograms
fig = plt.figure(figsize=(10, 10))
for idx, hist in enumerate(hists):
ax = plt.subplot(3, 3, idx + 1)
if idx != 0:
ax.set_title("Interval {}".format(intervals[idx-1]))
else:
ax.set_title("Input")
plt.plot(hist)
#plt.gca().axes.get_xaxis().set_visible(False)
#plt.gca().axes.get_yaxis().set_visible(False)
plt.show()
|
from django.conf.urls import patterns, url, include
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib import admin
from django.shortcuts import render
urlpatterns = patterns('',
url('^flatblocks/', include("flatblocks.urls")),
url('^admin/', include(admin.site.urls)),
url('^$', render, {'template_name': 'index.html'}),
)
|
from flask import Blueprint
from flask import render_template,redirect,url_for,flash,Response,request,session
from project import app
from project.models import user
from project.users.forms import registrationform,loginform
from project import db
from flask_login import login_user,logout_user
from project.my_functions import Camera
users=Blueprint('users',__name__)
@users.route('/register',methods=["GET","POST"])
def register_page():
form = registrationform()
if form.validate_on_submit():
user_to_create=user(username=form.username.data,
email_address=form.email_address.data,
phone_number=form.phone_number.data,
password=form.password1.data)
db.session.add(user_to_create)
db.session.commit()
login_user(user_to_create)
flash(f'successfull created account and logged in as : {form.username.data}', category='success')
return redirect(url_for('users.face_register'))
if form.errors != {}: # if there are not errors from validation
for err_msg in form.errors.values():
flash(f'There was an error creating a user : {err_msg}',category='danger')
return render_template("register.html",form=form)
@users.route('/login',methods=["GET","POST"])
def login_page():
form=loginform()
if form.validate_on_submit():
attempted_user=user.query.filter_by(username=form.username.data).first()
if attempted_user and attempted_user.check_password_correction(
attempted_password=form.password.data):
login_user(attempted_user)
flash(f'Success ! you are logged in as : {attempted_user.username}',category='success')
return redirect(url_for('events_bp.event_page'))
else:
flash("Username and password are not match! Please Try Again",category='danger')
return render_template('login.html', form=form)
@users.route('/logout',methods=["GET","POST"])
def logout_page():
logout_user()
session.clear()
flash("You have been logged out ! ",category="info")
return redirect(url_for('main.home_page'))
@users.route("/face_recognition_check",methods=["GET",'POST'])
def face_recognition_check():
start_face_recognition = False
if request.method=='POST':
if request.form.get('face_recognition_action')=='start_recognition':
start_face_recognition=True
elif request.form.get('face_recognition_action')=='stop_recognition':
start_face_recognition=False
return render_template('face_recognition_check.html',start_face_recognition=start_face_recognition)
@users.route('/face_register',methods=["GET","POST"])
def face_register():
start_face_register='false'
if request.method == 'POST':
if request.form.get('register face') == 'register_face':
start_face_register='true'
return render_template("face_register.html",start_face_register=start_face_register)
@users.route('/recognise_faces/<start_face_recognition>',methods=["GET","POST"])
def recognise_faces(start_face_recognition):
return Response(Camera.face_recogniser(start_face_recognition),mimetype='multipart/x-mixed-replace; boundary=frame')
@users.route('/register_face/<name>',methods=["GET","POST"])
def register_face(name):
return Response(Camera.register_face(person_name=name),mimetype='multipart/x-mixed-replace; boundary=frame')
@users.route('/face_login2/',methods=["GET","POST"])
def face_login2():
name = Camera.face_recogniser_get_name()
if name =='unknown':
flash(f"no faces detected or recognised ,please try again later", category='danger')
else:
attempted_user = user.query.filter_by(username=name).first()
try:
login_user(attempted_user)
flash(f'Success ! you are logged in as : {attempted_user.username}', category='success')
except :
flash(f"no user named {name} found in data base",category='danger')
return redirect(url_for('events_bp.event_page'))
@users.route('/face_login_check',methods=["GET","POST"])
def face_login3():
logined_user=''
start_video='start'
if request.method == 'POST':
if request.form.get('start') == 'start':
name = Camera.face_recogniser_get_name()
if name == 'unknown':
flash(f"no faces detected or recognised ,please try again later", category='danger')
else:
attempted_user = user.query.filter_by(username=name).first()
try:
login_user(attempted_user)
flash(f'Success ! you are logged in as : {attempted_user.username}', category='success')
start_video = 'stop'
logined_user=attempted_user.username
return redirect(url_for('events_bp.event_page'))
except:
flash(f"no user named {name} found in data base", category='danger')
elif request.form.get('start') == 'stop':
start_video = 'stop'
return render_template('face_login3.html',start_video=start_video,user=logined_user)
@users.route('/stream_video/<start_stream_video>',methods=["GET","POST"])
def stream_video(start_stream_video):
if start_stream_video=='start_stream_video':
camera_status = 'open_camera'
else:
camera_status = 'release_camera'
return Response(Camera.gen_frames(camera_status),mimetype='multipart/x-mixed-replace; boundary=frame') |
from md_importer.importer import (
DEFAULT_LANG,
DEFAULT_TEMPLATE,
logger,
)
from md_importer.importer.tools import remove_leading_and_trailing_slash
from developer_portal.models import RawHtml
from cms.api import create_page, add_plugin
from cms.models import Page
from cms.utils.page_resolver import get_page_from_path
from djangocms_text_ckeditor.html import clean_html
from bs4 import BeautifulSoup
import re
import os
class ParentNotFoundException(Exception):
def __init__(self, parent_url):
self.parent_url = parent_url
def __str__(self):
return repr(self.parent_url)
class ArticlePage:
def _text_plugin_needs_update(self, html):
if _compare_html(html, self.draft_text_plugin.body):
return False
if self.text_plugin and _compare_html(html, self.text_plugin.body):
return False
return True
def update(self, title, full_url, menu_title=None, in_navigation=True,
html=None, template=None):
if self.draft.get_title() != title:
self.draft.title = title
if self.draft.get_menu_title() != menu_title:
self.draft.menu_title = menu_title
if self.draft.in_navigation != in_navigation:
self.draft.in_navigation = in_navigation
if self.draft.template != template:
self.draft.template = template
if html:
if self.page:
self.text_plugin = find_text_plugin(self.page)
if self._text_plugin_needs_update(html):
self.draft_text_plugin.body = html
self.draft_text_plugin.save()
else:
# Reset draft
self.draft.revert(DEFAULT_LANG)
def __init__(self, title, full_url, menu_title=None, in_navigation=True,
html=None, template=DEFAULT_TEMPLATE):
self.page = None
self.draft = None
self.draft_text_plugin = None
self.text_plugin = None
self.full_url = full_url
self.title = title
self.menu_title = menu_title
self.in_navigation = in_navigation
self.template = template
# First check if pages already exist.
self.draft = find_page(full_url, draft=True)
if not self.draft:
parent = _find_parent(full_url)
if not parent:
raise ParentNotFoundException(
'Parent for {} not found.'.format(full_url))
slug = os.path.basename(full_url)
self.draft = create_page(
title=title, template=template, language=DEFAULT_LANG,
slug=slug, parent=parent, menu_title=menu_title,
in_navigation=in_navigation, position='last-child')
else:
remove_superfluous_placeholders(self.draft)
remove_superfluous_plugins(self.draft)
add_rawhtml_plugin(self.draft)
self.draft_text_plugin = find_text_plugin(self.draft)
self.update(title, full_url, menu_title, in_navigation, html,
template)
def publish(self):
if self.draft.is_dirty(DEFAULT_LANG):
self.draft.publish(DEFAULT_LANG)
if self.draft.get_public_object():
self.page = self.draft.get_public_object()
class IndexPage(ArticlePage):
def __init__(self, title, full_url, menu_title='Overview',
in_navigation=True, html='', template=DEFAULT_TEMPLATE):
self.imported_articles = []
self.origin = ''
ArticlePage.__init__(self, title, full_url, menu_title,
in_navigation, html, template)
self.publish()
def add_imported_articles(self, imported_articles, origin):
self.imported_articles = imported_articles
self.origin = origin
list_pages = u''
for article in [a for a
in self.imported_articles
if a.full_url.startswith(self.full_url)]:
list_pages += u'<li><a href=\"{}\">{}</a></li>'.format(
unicode(os.path.basename(article.full_url)),
article.title)
html = (
u'<div class=\"row\"><div class=\"eight-col\">\n'
'<p>This section contains documentation for the '
'Snappy project.</p>'
'<p><ul class=\"list-ubuntu\">{}</ul></p>\n'
'<p>Auto-imported from <a '
'href=\"{}\">{}</a>.</p>\n'
'</div></div>'.format(list_pages, self.origin, self.origin))
self.update(self.title, self.full_url, self.menu_title,
self.in_navigation, html, self.template)
self.publish()
def _compare_html(html_a, html_b):
soup_a = BeautifulSoup(html_a, 'html5lib')
soup_b = BeautifulSoup(html_b, 'html5lib')
return (clean_html(soup_a.prettify()) == clean_html(soup_b.prettify()))
def slugify(filename):
return os.path.basename(filename).replace('.md', '').replace('.html', '')
def clean_url(url):
return remove_leading_and_trailing_slash(
re.sub(
r'^\/None|{}\/'.format(DEFAULT_LANG),
'',
url))
def find_page(url, draft=False):
page = get_page_from_path(clean_url(url), draft)
if page and draft and not page.publisher_is_draft:
return page.get_draft_object()
return page
def _find_parent(full_url):
parent_url = os.path.dirname(full_url)
if not parent_url:
root = Page.objects.get_home()
if not root:
return None
return root
parent = get_page_from_path(parent_url, draft=True)
if not parent:
logger.error('Parent {} not found.'.format(parent_url))
return None
return parent
# More than one placeholder -> old style page
def remove_superfluous_placeholders(page):
if page.placeholders.count() > 1:
for placeholder in page.placeholders.all()[1:]:
placeholder.delete()
return page.placeholders.all()[0]
def remove_superfluous_plugins(page):
placeholder = page.placeholders.all()[0]
plugins = placeholder.get_plugins()
if plugins.count() >= 1:
for plugin in plugins[1:]:
plugin.delete()
if plugins.count() == 1 and \
type(plugins[0].get_plugin_instance()[0]) != RawHtml:
plugins[0].delete()
def add_rawhtml_plugin(page):
placeholder = page.placeholders.all()[0]
if not placeholder.get_plugins().count():
add_plugin(
placeholder, 'RawHtmlPlugin', DEFAULT_LANG,
body='')
def find_text_plugin(page):
if not page:
return None
placeholders = page.placeholders.all()
if not placeholders:
return None
# We create the page, so we know there's just one placeholder
plugins = placeholders[0].get_plugins()
return plugins[0].get_plugin_instance()[0]
|
import sqlite3
import xml.etree.ElementTree as ET
connection = sqlite3.connect('Ram.db')
tree = ET.parse('configurations.xml')
root = tree.getroot()
for line in root[2]:
query = 'insert into general_configurations values ("' + line.tag + '", "' + line.text + '");'
connection.execute(query)
connection.commit()
connection.close() |
"""
Extract doc2vec embeddings for metaphor identification
"""
import pandas as pd
import numpy as np
from tqdm import tqdm
from gensim.utils import simple_preprocess
import gensim.models as g
HIDDEN_DIM_SIZE = 300
if __name__ == '__main__':
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(
description='Extract doc2vec vectors',
formatter_class=ArgumentDefaultsHelpFormatter)
args = parser.parse_args()
m = g.Doc2Vec.load('models/enwiki_dbow/doc2vec.bin')
vuamc = pd.read_csv('./data/vuamc.csv',
keep_default_na=False)
unique_ctx = vuamc.min_context.unique()
ctx_embs = np.stack([
m.infer_vector(simple_preprocess(ctx), alpha=0.01, steps=1000)
for ctx in tqdm(unique_ctx, desc='Context vectors')])
ctx_to_idx = {ctx: i for i, ctx in enumerate(unique_ctx)}
v_embs = np.zeros((vuamc.shape[0], HIDDEN_DIM_SIZE), dtype=np.float32)
s_embs = np.zeros((vuamc.shape[0], HIDDEN_DIM_SIZE), dtype=np.float32)
o_embs = np.zeros((vuamc.shape[0], HIDDEN_DIM_SIZE), dtype=np.float32)
vuamc_rows_to_idxs = np.zeros(vuamc.shape[0], dtype=np.int32)
for i, row in tqdm(vuamc.iterrows(), total=vuamc.shape[0], desc='Lemmas + Args'):
ctx_idx = ctx_to_idx[row.min_context]
vuamc_rows_to_idxs[i] = ctx_idx
v_emb = m.infer_vector([row.verb_lemma], alpha=0.01, steps=1000)
s_emb = m.infer_vector([row.subject], alpha=0.01, steps=1000) if row.subject else np.zeros(HIDDEN_DIM_SIZE, dtype=np.float32)
o_emb = m.infer_vector([row.object], alpha=0.01, steps=1000) if row.object else np.zeros(HIDDEN_DIM_SIZE, dtype=np.float32)
v_embs[i] = v_emb
s_embs[i] = s_emb
o_embs[i] = o_emb
np.savez('./features/doc2vec.npz',
ctx_embs=ctx_embs, v_embs=v_embs, s_embs=s_embs, o_embs=o_embs,
ctx_idxs=vuamc_rows_to_idxs,
y=np.array(vuamc.y.values, dtype=np.uint8),
partition=vuamc.partition.values, genre=vuamc.genre.values, id=vuamc.id.values)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 18 04:06:28 2019
@author: tboydev
program reprints information about gallons of gasoline
"""
gallons = float(input("Enter gallons of gasoline: "))
print("Total gallons of gasoline is: {:.2f}".format(gallons))
"""convert gasoline to litres"""
gallons_in_litres = gallons * 3.78541
print("Number of litres of gasoline is: {:.2f}".format(gallons_in_litres))
"""calculate number of barrels of oil
required to make this amount of gasoline"""
barrels_of_oil = gallons / 19.5
print("Number of barrels of oil required to make {:.2f} gallons of gasoline \
is {:.2f}".format(gallons, barrels_of_oil))
"""calculate price in dollars"""
cost_per_gallons = 3.0 #in dollars
total_cost = cost_per_gallons * gallons
print("price in dollars: {} dollars".format(total_cost))
|
##import sys
##sys.path.append("C:\\Users\\Satyam\\Desktop\\test")
from testing import b
from testing import test
b()
test.c()
|
from StockData import GetStockInfo
from DataGather import *
from pathlib import Path
formatters = {
'RED': '\033[91m',
'GREEN': '\033[92m',
'END': '\033[0m',
}
def get_choice(options):
options = list(enumerate(list(options)))
for e, x in options:
print(f'{e}: {x}')
try:
choice = int(input("Please select an option: "))
if choice >= len(options) or choice < 0:
return "Invalid argument!"
return options[choice][1]
except (TypeError, ValueError) as te:
print(str(te))
return "Invalid argument!"
def read_to_df(path):
df = pd.read_csv(path)
return list(df['Symbol'])
def list_portfolios(path='../Data/Portfolio'):
options = [x.name for x in Path(os.path.join(os.getcwd(), path)).glob('*/')]
return options
class Main:
def __init__(self):
pass
def main(self):
loop = True
while loop:
try:
options = ["New Portfolio", "Get Stock Prices", "Exit"]
choice = (get_choice(options=options))
if choice == options[0]:
choice = get_choice(options=["Custom", "Category", "All", "Back"])
if choice == "Custom":
DataGather().get_custom_tickers()
continue
elif choice == "Category":
DataGather().category_tickers()
continue
elif choice == "All":
DataGather().all_market_tickers()
continue
elif choice == "Cancel":
continue
elif choice == options[1]:
choice = get_choice(options=list_portfolios())
if choice == "all":
path = f"../Data/Portfolio/{choice}/all_tickers.csv"
tickers = read_to_df(path)
GetStockInfo(tickers, portfolio_name="all").get_historical_price()
continue
elif choice == "categorical":
path = f"../Data/Portfolio/{choice}/"
choice = get_choice(options=list_portfolios(path))
print(f"You chose: # {choice} #")
tickers = read_to_df(f"{path}/{choice}")
GetStockInfo(tickers=tickers, portfolio_name=choice).get_historical_price()
continue
elif choice == "custom":
path = f"../Data/Portfolio/{choice}/"
choice = get_choice(options=list_portfolios(path))
tickers = read_to_df(f"{path}/{choice}")
print(f"You chose: # {choice} #")
print(tickers)
GetStockInfo(tickers=tickers,
portfolio_name=choice.replace('.csv', '')).get_historical_price()
continue
elif choice == "Exit":
loop = False
else:
continue
except FileNotFoundError as e:
print(f"{formatters['RED']}File not found, please build portfolio first!\n" + str(e)
+ f"{formatters['END']}")
m = Main()
m.main() |
# Generated by Django 3.1.7 on 2021-09-04 13:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('first', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='first',
name='app_name',
field=models.CharField(default='first_app', max_length=20),
),
]
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import itertools
import logging
from collections import defaultdict
from dataclasses import dataclass
from pathlib import PurePath
from typing import Iterable
from pants.backend.python.util_rules import pex
from pants.backend.python.util_rules.pex import PexRequest, VenvPex, VenvPexProcess
from pants.core.goals.lint import LintResult, LintTargetsRequest
from pants.core.util_rules.partitions import Partition, Partitions
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.addresses import Address
from pants.engine.fs import (
CreateDigest,
Digest,
FileContent,
MergeDigests,
PathGlobs,
Paths,
Snapshot,
)
from pants.engine.process import FallibleProcessResult, ProcessCacheScope
from pants.engine.rules import Get, MultiGet, Rule, collect_rules, rule
from pants.engine.unions import UnionRule
from pants.option.global_options import GlobalOptions
from pants.util.logging import LogLevel
from pants.util.strutil import pluralize
from .subsystem import SemgrepFieldSet, SemgrepSubsystem
logger = logging.getLogger(__name__)
class SemgrepLintRequest(LintTargetsRequest):
field_set_type = SemgrepFieldSet
tool_subsystem = SemgrepSubsystem
@dataclass(frozen=True)
class PartitionMetadata:
config_files: frozenset[PurePath]
ignore_files: Snapshot
@property
def description(self) -> str:
return ", ".join(sorted(str(path) for path in self.config_files))
_IGNORE_FILE_NAME = ".semgrepignore"
_RULES_DIR_NAME = ".semgrep"
_RULES_FILES_GLOBS = (
".semgrep.yml",
".semgrep.yaml",
f"{_RULES_DIR_NAME}/*.yml",
f"{_RULES_DIR_NAME}/*.yaml",
)
@dataclass
class SemgrepIgnoreFiles:
snapshot: Snapshot
@dataclass
class AllSemgrepConfigs:
configs_by_dir: dict[PurePath, set[PurePath]]
def ancestor_configs(self, address: Address) -> Iterable[PurePath]:
# TODO: introspect the semgrep rules and determine which (if any) apply to the files, e.g. a
# Python file shouldn't depend on a .semgrep.yml that doesn't have any 'python' or 'generic'
# rules, and similarly if there's path inclusions/exclusions.
# TODO: this would be better as actual dependency inference (e.g. allows inspection, manual
# addition/exclusion), but that can only infer 'full' dependencies and it is wrong (e.g. JVM
# things break) for real code files to depend on this sort of non-code linter config; requires
# dependency scopes or similar (https://github.com/pantsbuild/pants/issues/12794)
spec = PurePath(address.spec_path)
for ancestor in itertools.chain([spec], spec.parents):
yield from self.configs_by_dir.get(ancestor, [])
def _group_by_semgrep_dir(all_paths: Paths) -> AllSemgrepConfigs:
configs_by_dir = defaultdict(set)
for path_ in all_paths.files:
path = PurePath(path_)
# A rule like foo/bar/.semgrep/baz.yaml should behave like it's in in foo/bar, not
# foo/bar/.semgrep
parent = path.parent
config_directory = parent.parent if parent.name == _RULES_DIR_NAME else parent
configs_by_dir[config_directory].add(path)
return AllSemgrepConfigs(configs_by_dir)
@rule
async def find_all_semgrep_configs() -> AllSemgrepConfigs:
all_paths = await Get(Paths, PathGlobs([f"**/{file_glob}" for file_glob in _RULES_FILES_GLOBS]))
return _group_by_semgrep_dir(all_paths)
@dataclass(frozen=True)
class RelevantSemgrepConfigsRequest:
field_set: SemgrepFieldSet
class RelevantSemgrepConfigs(frozenset[PurePath]):
pass
@rule
async def infer_relevant_semgrep_configs(
request: RelevantSemgrepConfigsRequest, all_semgrep: AllSemgrepConfigs
) -> RelevantSemgrepConfigs:
return RelevantSemgrepConfigs(all_semgrep.ancestor_configs(request.field_set.address))
@rule
async def all_semgrep_ignore_files() -> SemgrepIgnoreFiles:
snapshot = await Get(Snapshot, PathGlobs([f"**/{_IGNORE_FILE_NAME}"]))
return SemgrepIgnoreFiles(snapshot)
@rule
async def partition(
request: SemgrepLintRequest.PartitionRequest[SemgrepFieldSet],
semgrep: SemgrepSubsystem,
ignore_files: SemgrepIgnoreFiles,
) -> Partitions:
if semgrep.skip:
return Partitions()
all_configs = await MultiGet(
Get(RelevantSemgrepConfigs, RelevantSemgrepConfigsRequest(field_set))
for field_set in request.field_sets
)
# partition by the sets of configs that apply to each input
by_config = defaultdict(list)
for field_set, configs in zip(request.field_sets, all_configs):
if configs:
by_config[configs].append(field_set)
return Partitions(
Partition(tuple(field_sets), PartitionMetadata(configs, ignore_files.snapshot))
for configs, field_sets in by_config.items()
)
# We have a hard-coded settings file to side-step
# https://github.com/returntocorp/semgrep/issues/7102, and also provide more cacheability.
_DEFAULT_SETTINGS = FileContent(
path="__semgrep_settings.yaml",
content=b"has_shown_metrics_notification: true",
)
@rule(desc="Lint with Semgrep", level=LogLevel.DEBUG)
async def lint(
request: SemgrepLintRequest.Batch[SemgrepFieldSet, PartitionMetadata],
semgrep: SemgrepSubsystem,
global_options: GlobalOptions,
) -> LintResult:
config_files, semgrep_pex, input_files, settings = await MultiGet(
Get(Snapshot, PathGlobs(str(s) for s in request.partition_metadata.config_files)),
Get(VenvPex, PexRequest, semgrep.to_pex_request()),
Get(SourceFiles, SourceFilesRequest(field_set.source for field_set in request.elements)),
Get(Digest, CreateDigest([_DEFAULT_SETTINGS])),
)
input_digest = await Get(
Digest,
MergeDigests(
(
input_files.snapshot.digest,
config_files.digest,
settings,
request.partition_metadata.ignore_files.digest,
)
),
)
cache_scope = ProcessCacheScope.PER_SESSION if semgrep.force else ProcessCacheScope.SUCCESSFUL
# TODO: https://github.com/pantsbuild/pants/issues/18430 support running this with --autofix
# under the fix goal... but not all rules have fixes, so we need to be running with
# --error/checking exit codes, which FixResult doesn't currently support.
result = await Get(
FallibleProcessResult,
VenvPexProcess(
semgrep_pex,
argv=(
"scan",
*(f"--config={f}" for f in config_files.files),
"--jobs={pants_concurrency}",
"--error",
*semgrep.args,
# we don't pass the target files directly because that overrides .semgrepignore
# (https://github.com/returntocorp/semgrep/issues/4978), so instead we just tell its
# traversal to include all the source files in this partition. Unfortunately this
# include is implicitly unrooted (i.e. as if it was **/path/to/file), and so may
# pick up other files if the names match. The highest risk of this is within the
# semgrep PEX.
*(f"--include={f}" for f in input_files.files),
f"--exclude={semgrep_pex.pex_filename}",
),
extra_env={
"SEMGREP_FORCE_COLOR": "true",
# disable various global state/network requests
"SEMGREP_SETTINGS_FILE": _DEFAULT_SETTINGS.path,
"SEMGREP_ENABLE_VERSION_CHECK": "0",
"SEMGREP_SEND_METRICS": "off",
},
input_digest=input_digest,
concurrency_available=len(input_files.files),
description=f"Run Semgrep on {pluralize(len(input_files.files), 'file')}.",
level=LogLevel.DEBUG,
cache_scope=cache_scope,
),
)
return LintResult.create(request, result, strip_formatting=not global_options.colors)
def rules() -> Iterable[Rule | UnionRule]:
return [*collect_rules(), *SemgrepLintRequest.rules(), *pex.rules()]
|
# calculadora solo suma
primero = int(input('ingresa el primer numero: '))
segundo = int(input('ingresa el segundo numero: '))
signo = input("ingresa operacion: ")
if signo == "+":
resultado = (primero + segundo)
print("El resultado de la suma es : ", resultado)
if signo == "-":
resultado = (primero - segundo)
print("El resultado de la resta es : ", resultado)
if signo == "*":
resultado = (primero * segundo)
print("El resultado de la multiplicacion es : ", resultado)
if signo == "/":
resultado = (primero / segundo)
print("El resultado de la division es : ", resultado)
else:
print("El simbolo ingresado no es valido")
|
#! /usr/bin/env python
#**********************************************************************
#* Name: game_agent (Isolation) *
#* *
#* Function: This module serves as an implementation of a game agent *
#* for the board game Isolation. This agent is realized with the *
#* implementation of the CustomPlayer defined herein. An instance of *
#* this class represents an Isolation player that, given a Board *
#* representation, is able to perform depth-first-search of a game *
#* tree using adversarial search concepts to determine a best move *
#* within a specified time limit. *
#* The CustomPlayer instance is initialized with a maximum search *
#* depth, a timeout value, a evaluation score function, a flag *
#* indicating whether to make use of Iterative Deepening, and an *
#* indicator of whether to use the Minimax or AlphaBeta algorithms *
#* during search. *
#* This module features three difference evaluation score functions *
#* which may be swapped in to the custom_score() function for actual *
#* use by the game playing agent. *
#* *
#* Usage: Import this module to make use of the CustomPlayer class *
#* *
#* Written: 03/16/2017 James Damgar (Based on Udacity AIND content) *
#* Modified: 03/20/2017 JED Added additional heuristic functions *
#* *
#**********************************************************************
"""This file contains all the classes you must complete for this project.
You can use the test cases in agent_test.py to help during development, and
augment the test suite with your own test cases to further test your code.
You must test your agent's strength against a set of agents with known
relative strength using tournament.py and include the results in your report.
"""
import random
import math
class Timeout(Exception):
"""Subclass base exception for code clarity."""
pass
def custom_score(game, player):
"""Calculate the heuristic value of a game state from the point of view
of the given player.
Note: this function should be called from within a Player instance as
`self.score()` -- you should not need to call this function directly.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
player : object
A player instance in the current game (i.e., an object corresponding to
one of the player objects `game.__player_1__` or `game.__player_2__`.)
Returns
-------
float
The heuristic value of the current game state to the specified player.
"""
# Return our current favorite evaluation function
#return simple_score(game, player)
#return central_score(game, player)
return partition_score(game, player)
def simple_score(game, player):
"""Calculate the heuristic value of a game state from the point of view
of the given player.
This function represents a "simple" evaluation score function which uses the
following to score the state of the board for a player:
- If the score of the game is positive or negative infinity based on "utility":
- Then we've reached an end-game state
- Return +inf for a maximizing player and -inf for a minimizing player
- Otherwise, the score is the number of available moves for the current
player minus 2 times the number of available moves for the opponent
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
player : object
A player instance in the current game (i.e., an object corresponding to
one of the player objects `game.__player_1__` or `game.__player_2__`.)
Returns
-------
float
The heuristic value of the current game state to the specified player.
"""
# First, see if we've reached an end-game situation
# +inf means this game state is a win for the maximizing player
# -inf means this game state is a loss for the minimizing player
util = game.utility(player)
# If we're at an endgame, then that's the heuristic score for this node
if util != 0:
return util
# Otherwise, the heuristic is the difference in available moves between
# the current player and the opposition
return float(len(game.get_legal_moves(player)) - 2.0 * len(game.get_legal_moves(game.get_opponent(player))))
def central_score(game, player):
"""Calculate the heuristic value of a game state from the point of view
of the given player.
This function represents a somewhat more involved evaluation score function that
takes into account how centrally-located the player is in addition to the factors
which go into the simple_score() function. Score is determined as follows:
- If the score of the game is positive or negative infinity based on "utility":
- Then we've reached an end-game state
- Return +inf for a maximizing player and -inf for a minimizing player
- Otherwise, the score is the number of available moves for the current
player minus 2 times the number of available moves for the opponent minus how far
away the current player is from the center of the board
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
player : object
A player instance in the current game (i.e., an object corresponding to
one of the player objects `game.__player_1__` or `game.__player_2__`.)
Returns
-------
float
The heuristic value of the current game state to the specified player.
"""
# First, see if we've reached an end-game situation
# +inf means this game state is a win for the current player
# -inf means this game state is a loss for the current player
util = game.utility(player)
# If we're at an endgame, then that's the heuristic score for this node
if util != 0:
return util
# Otherwise, the heuristic is the difference in available moves between
# the current player and the opposition
return float(len(game.get_legal_moves(player)) - 2.0 * len(game.get_legal_moves(game.get_opponent(player)))) - board_distance(game, player)
def partition_score(game, player):
"""Calculate the heuristic value of a game state from the point of view
of the given player.
This function represents a more complex evaluation function that takes into account
whether there is a "partition" present on the board. If there is a partition, this
means that each player is effectively on an "island" of squares and cannot reach the
other player. We first check to see if there is a partition. If there is, then if a
player has a greater number of contiguous squares on their "island" than the opponent,
then that player should win. If the number of squares is tied, then the player whose
turn it is will lose. If none of these apply, use the simple_score() heuristic. Mentioned
earlier. If a player has a partition advantage, return the appropriate value (+/- inf).
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
player : object
A player instance in the current game (i.e., an object corresponding to
one of the player objects `game.__player_1__` or `game.__player_2__`.)
Returns
-------
float
The heuristic value of the current game state to the specified player.
"""
# First, see if we've reached an end-game situation
# +inf means this game state is a win for the current player
# -inf means this game state is a loss for the current player
util = game.utility(player)
# If we're at an endgame, then that's the heuristic score for this node
if util != 0:
return util
# Next, check for a partition on the board.
# Partitions are only possible if we have a certain number of moves that have occurred.
if ( game.move_count >= 2 * game.height ) or ( game.move_count >= 2 * game.width ):
# Grab the set of blank spaces and each player's position
blank_spaces = game.get_blank_spaces()
player_location = game.get_player_location(player)
opponent_location = game.get_player_location(game.get_opponent(player))
# Find all partitions on the game board as lines where each is a list of the form: list<(int, int)>
partition_lines = find_partitions(game.width, game.height, blank_spaces)
player_contig = -1
opponent_contig = -1
for line in partition_lines:
# Check to see if players are on either side of this partition line
partitioned = False
if line[0][0] == line[1][0]:
# ROW-line : Row indexes match across line
# See if player row locations differ and are separated by this line
if player_location[0] != opponent_location[0] and \
( ( player_location[0] > line[0][0] and opponent_location[0] < line[0][0] ) or \
( player_location[0] < line[0][0] and opponent_location[0] > line[0][0] ) ):
# Players are on either side of this partition!
# Count contiguous squares for each player if it hasn't already been done.
partitioned = True
if player_contig == -1:
player_contig = count_contig(player_location, blank_spaces)
if opponent_contig == -1:
opponent_contig = count_contig(opponent_location, blank_spaces)
elif line[0][1] == line[1][1]:
# COLUMN-line : Column indexes match across line
# See if player row locations differ and are separated by this line
if player_location[1] != opponent_location[1] and \
( ( player_location[1] > line[0][1] and opponent_location[1] < line[0][1] ) or \
( player_location[1] < line[0][1] and opponent_location[1] > line[0][1] ) ):
# Players are on either side of this partition!
# Count contiguous squares for each player if it hasn't already been done.
partitioned = True
if player_contig == -1:
player_contig = count_contig(player_location, blank_spaces)
if opponent_contig == -1:
opponent_contig = count_contig(opponent_location, blank_spaces)
# If this line counts as a partition, we should be able to determine a winner
if partitioned == True:
# If the contiguous space for the current player is greater than the opponent,
# then the current player should win
if player_contig > opponent_contig:
return float("inf")
else:
# Else if there's less contiguous space or a tie in space, the current player
# should most likely lose
return float("-inf")
# Otherwise, the heuristic is the difference in available moves between
# the current player and the opposition
return float(len(game.get_legal_moves(player)) - 2.0 * len(game.get_legal_moves(game.get_opponent(player))))
def find_partitions(width, height, blank_spaces):
"""Given the width and height of a game board along with a set of the "blank"
spaces on the board, determine if there is a partition present on the board.
One of the following two conditions apply for a partition as estimated here:
(1) A double band of non-blank (used) spaces on the board. For example:
XX
XX
XX
XX
(2) A single band of non-blank (used) spaces on the board, along with
a "cross" alternating down the sides. For example:
XXX
X
XXX
X
XXX
Return a list of all such partitions present either horizontally or virtically.
The elements returns in the list are lines that represent dividing lines of the partition.
Parameters
----------
width : integer
Width of an Isolation game board
height : integer
Height of an Isolation game board
blank_spaces : list<(int, int)>
A list of integer pairs representing blank spaces on the Isolation game board
Returns
-------
list<list<(int, int)>>
List of lines representing partitioning lines
"""
partition_lines = []
# ROWS
# For each horizontal row other than the ends, check for straight lines
row_lines = []
for r in range(1,(height-1)):
current_line = []
full_line = True
for c in range(0,width):
if (r,c) in blank_spaces:
full_line = False
break;
current_line.append((r,c))
if full_line == True:
row_lines.append(current_line)
# Check for row lines which are adjacent forming a partition and add a dividing line to represent them
adjacent_lines = [(lineA, lineB) for lineA in row_lines for lineB in row_lines if lineB[0][0] == lineA[0][0]+1 ]
for adj_lines in adjacent_lines:
partition_lines.append(adj_lines[0])
# For each row line, check for cross pattern partitions
for line in row_lines:
cross_columns = []
still_possible = True
r = line[0][0]
for c in range(0,width):
# Check above and below for a cross
if not (r-1,c) in blank_spaces and not (r+1,c) in blank_spaces:
cross_columns.append(c)
# If no crosses found, give up
if len(cross_columns) == 0:
break
# If cross columns were found, make sure they were spaced 2 units apart
if still_possible == True:
for c in range(cross_columns[0], width, 2):
if not c in cross_columns:
still_possible = False
break
# If we've passed all of the checks. Then add the line
if still_possible == True:
partition_lines.append(line)
# COLUMNS
# For each virtical column other than the ends, check for straight lines
col_lines = []
for c in range(1,(width-1)):
current_line = []
full_line = True
for r in range(0,height):
if (r,c) in blank_spaces:
full_line = False
break;
current_line.append((r,c))
if full_line == True:
col_lines.append(current_line)
# Check for row lines which are adjacent forming a partition and add a dividing line to represent them
adjacent_lines = [(lineA ,lineB) for lineA in col_lines for lineB in col_lines if lineB[0][1] == lineA[0][1]+1 ]
for adj_lines in adjacent_lines:
partition_lines.append(adj_lines[0])
# For each row line, check for cross pattern partitions
for line in col_lines:
cross_rows = []
still_possible = True
c = line[0][1]
for r in range(0,height):
# Check to left and right for a cross
if not (r,c-1) in blank_spaces and not (r,c+1) in blank_spaces:
cross_rows.append(r)
# If no crosses found, give up
if len(cross_rows) == 0:
break
# If cross columns were found, make sure they were spaced 2 units apart
if still_possible == True:
for r in range(cross_rows[0], height, 2):
if not r in cross_rows:
still_possible = False
break
# If we've passed all of the checks. Then add the line
if still_possible == True:
partition_lines.append(line)
return partition_lines
def count_contig(player_location, blank_spaces):
"""Given a player location and the overall set of blank spaces on the Isolation
board, count the number of contiguous spaces the player has around them, excluding
diagonals. Here we perform a breadth-first search of game board, from the position of
the player outwards to count.
Parameters
----------
player_location : (int, int)
Tuple coordinates for row, column location of the player
blank_spaces : list<(int, int)>
List of all of the blank spaces on the game board
Returns
-------
int
Count of the contiguous spaces available to the player
"""
frontier = [player_location]
spaces_visited = [player_location]
while frontier:
space = frontier.pop(0)
# Only add blank spaces around this one we haven't visited already
# Check up
if (space[0]-1, space[1]) in blank_spaces and not (space[0]-1, space[1]) in spaces_visited:
frontier.append((space[0]-1, space[1]))
spaces_visited.append((space[0]-1, space[1]))
# Check down
if (space[0]+1, space[1]) in blank_spaces and not (space[0]+1, space[1]) in spaces_visited:
frontier.append((space[0]+1, space[1]))
spaces_visited.append((space[0]+1, space[1]))
# Check left
if (space[0], space[1]-1) in blank_spaces and not (space[0], space[1]-1) in spaces_visited:
frontier.append((space[0], space[1]-1))
spaces_visited.append((space[0], space[1]-1))
# Check right
if (space[0]-1, space[1]+1) in blank_spaces and not (space[0]-1, space[1]+1) in spaces_visited:
frontier.append((space[0]-1, space[1]+1))
spaces_visited.append((space[0]-1, space[1]+1))
# Return the count of spaces, minus our starting point
return len(spaces_visited) - 1
def board_distance(game, player):
"""Calculate the approximate distance between the player and the center
of the game board.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
player : object
A player instance in the current game (i.e., an object corresponding to
one of the player objects `game.__player_1__` or `game.__player_2__`.)
Returns
-------
float
An approximate distance between the player and the center of the board
"""
center_x = game.width / 2.0
center_y = game.height / 2.0
player_x, player_y = game.get_player_location(player)
dist_x = center_x - player_x
dist_y = center_y - player_y
return math.sqrt( math.pow(dist_x, 2) + math.pow(dist_y, 2) )
class CustomPlayer:
"""Game-playing agent that chooses a move using your evaluation function
and a depth-limited minimax algorithm with alpha-beta pruning. You must
finish and test this player to make sure it properly uses minimax and
alpha-beta to return a good move before the search time limit expires.
Parameters
----------
search_depth : int (optional)
A strictly positive integer (i.e., 1, 2, 3,...) for the number of
layers in the game tree to explore for fixed-depth search. (i.e., a
depth of one (1) would only explore the immediate sucessors of the
current state.)
score_fn : callable (optional)
A function to use for heuristic evaluation of game states.
iterative : boolean (optional)
Flag indicating whether to perform fixed-depth search (False) or
iterative deepening search (True).
method : {'minimax', 'alphabeta'} (optional)
The name of the search method to use in get_move().
timeout : float (optional)
Time remaining (in milliseconds) when search is aborted. Should be a
positive value large enough to allow the function to return before the
timer expires.
"""
def __init__(self, search_depth=3, score_fn=custom_score,
iterative=True, method='minimax', timeout=10.):
self.search_depth = search_depth
self.iterative = iterative
self.score = score_fn
self.method = method
self.time_left = None
self.TIMER_THRESHOLD = timeout
def get_move(self, game, legal_moves, time_left):
"""Search for the best move from the available legal moves and return a
result before the time limit expires.
This function must perform iterative deepening if self.iterative=True,
and it must use the search method (minimax or alphabeta) corresponding
to the self.method value.
**********************************************************************
NOTE: If time_left < 0 when this function returns, the agent will
forfeit the game due to timeout. You must return _before_ the
timer reaches 0.
**********************************************************************
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
legal_moves : list<(int, int)>
A list containing legal moves. Moves are encoded as tuples of pairs
of ints defining the next (row, col) for the agent to occupy.
time_left : callable
A function that returns the number of milliseconds left in the
current turn. Returning with any less than 0 ms remaining forfeits
the game.
Returns
-------
(int, int)
Board coordinates corresponding to a legal move; may return
(-1, -1) if there are no available legal moves.
"""
# Keep a record of how much time we have left to go
self.time_left = time_left
# Perform any required initializations, including selecting an initial
# move from the game board (i.e., an opening book), or returning
# immediately if there are no legal moves
# No move so far
move = (-1, -1)
# Return if there are no legal moves to attempt
if len(legal_moves) == 0:
return move
# Move to the center, if possible (as an optimal place to start as a player)
if (int(game.height/2), int(game.width/2)) in legal_moves:
move = (int(game.height/2), int(game.width/2))
else:
move = legal_moves[0]
try:
# The search method call (alpha beta or minimax) should happen in
# here in order to avoid timeout. The try/except block will
# automatically catch the exception raised by the search method
# when the timer gets close to expiring
# Note that we can employ iterative deepening here to progressively search
# greater depths of the game tree
if self.iterative == True:
# Keep increasing depths
d = 1
while True:
if self.time_left() < self.TIMER_THRESHOLD:
raise Timeout()
if self.method == 'minimax':
score, move = self.minimax(game, d, True)
# Check if we've reached endgame
if score == float("inf") or score == float("-inf"):
break
elif self.method == 'alphabeta':
score, move = self.alphabeta(game, d, float("-inf"), float("inf"), True)
# Check if we've reached endgame
if score == float("inf") or score == float("-inf"):
break
d = d+1
else:
if self.method == 'minimax':
score, move = self.minimax(game, self.search_depth, True)
elif self.method == 'alphabeta':
score, move = self.alphabeta(game, self.search_depth, float("-inf"), float("inf"), True)
except Timeout:
# Return the best move we've found so far or an insurance move
return move
# Return the best move from the last completed search iteration
return move
def minimax(self, game, depth, maximizing_player=True):
"""Implement the minimax search algorithm as described in the lectures.
Parameters
----------
game : isolation.Board
An instance of the Isolation game `Board` class representing the
current game state
depth : int
Depth is an integer representing the maximum number of plies to
search in the game tree before aborting
maximizing_player : bool
Flag indicating whether the current search depth corresponds to a
maximizing layer (True) or a minimizing layer (False)
Returns
-------
float
The score for the current search branch
tuple(int, int)
The best move for the current branch; (-1, -1) for no legal moves
Notes
-----
(1) You MUST use the `self.score()` method for board evaluation
to pass the project unit tests; you cannot call any other
evaluation function directly.
"""
# Raise an exception if we've run out of time without an answer
if self.time_left() < self.TIMER_THRESHOLD:
raise Timeout()
# Evaluate the score of the current board for potential use
# Keep track of our best move choice.
if maximizing_player:
best_score = float("-inf")
else:
best_score = float("inf")
best_move = (-1,-1)
current_score = best_score
current_move = best_move
# Get the set of legal moves for the current player
legal_moves = game.get_legal_moves()
# If, they're aren't any legal moves, then we're at an endgame
if len(legal_moves) == 0:
return best_score, (-1,-1)
# If we've reached our max depth, evaluate the game board as per
# the current player
if depth == 0:
return self.score(game, self), (-1,-1)
# If the depth is 1, evaluate all children
if depth == 1:
for current_move in legal_moves:
game_copy = game.forecast_move(current_move) # Get the counter to increment
current_score = self.score(game_copy, self)
# Update our best choice, if necessary
if maximizing_player and current_score > best_score:
best_score = current_score
best_move = current_move
elif not maximizing_player and current_score < best_score:
best_score = current_score
best_move = current_move
# Shortcut if we've found the best score possible
if best_score == float("inf") or best_score == float("-inf"):
return best_score, best_move
return best_score, best_move
# Iterate over every legal move in depth-search fashion
for current_move in legal_moves:
# Copy the game state as if the move occurred and make a recursive call
game_copy = game.forecast_move(current_move) # Get the counter to increment
next_max_player = True
if maximizing_player == True:
next_max_player = False
current_score, junk_move = self.minimax(game_copy, depth-1, next_max_player)
# Update our best choice, if necessary
if maximizing_player and current_score > best_score:
best_score = current_score
best_move = current_move
elif not maximizing_player and current_score < best_score:
best_score = current_score
best_move = current_move
# Shortcut if we've found the best score possible
if best_score == float("inf") or best_score == float("-inf"):
return best_score, best_move
# Return the best score and move found
return best_score, best_move
def alphabeta(self, game, depth, alpha=float("-inf"), beta=float("inf"), maximizing_player=True):
"""Implement minimax search with alpha-beta pruning as described in the
lectures.
Parameters
----------
game : isolation.Board
An instance of the Isolation game `Board` class representing the
current game state
depth : int
Depth is an integer representing the maximum number of plies to
search in the game tree before aborting
alpha : float
Alpha limits the lower bound of search on minimizing layers
beta : float
Beta limits the upper bound of search on maximizing layers
maximizing_player : bool
Flag indicating whether the current search depth corresponds to a
maximizing layer (True) or a minimizing layer (False)
Returns
-------
float
The score for the current search branch
tuple(int, int)
The best move for the current branch; (-1, -1) for no legal moves
Notes
-----
(1) You MUST use the `self.score()` method for board evaluation
to pass the project unit tests; you cannot call any other
evaluation function directly.
"""
# Raise an exception if we've run out of time without an answer
if self.time_left() < self.TIMER_THRESHOLD:
raise Timeout()
# Evaluate the score of the current board for potential use
# Keep track of our best move choice.
if maximizing_player:
best_score = float("-inf")
else:
best_score = float("inf")
best_move = (-1,-1)
current_score = best_score
current_move = best_move
# Get the set of legal moves for the current player
legal_moves = game.get_legal_moves()
# If, they're aren't any legal moves, then we're at an endgame
if len(legal_moves) == 0:
return best_score, best_move
# If we've reached our max depth, evaluate the game board as per
# the current player
if depth == 0:
return self.score(game, self), (-1,-1)
# If the depth is 1, evaluate all children
if depth == 1:
for current_move in legal_moves:
game_copy = game.forecast_move(current_move) # Get the counter to increment
current_score = self.score(game_copy, self)
# Update our best choice, if necessary
if maximizing_player and current_score > best_score:
best_score = current_score
best_move = current_move
elif not maximizing_player and current_score < best_score:
best_score = current_score
best_move = current_move
# Shortcut if we've found the best score possible
if best_score == float("inf") or best_score == float("-inf"):
break
# Decide if we need to perform a cut-off.
# If we're a maximizing player:
# - If the current move taken from the minimizing child is
# higher than anything we've seen so far on the path back to the root,
# we don't need to proceed further.
# If we're a minimizing player:
# - If the current move taken from the maximizing child is
# lower than anything we've seen so far on the path back to the root,
# we don't need to proceed further.
if maximizing_player:
alpha = max(alpha, current_score)
if alpha >= beta:
# Cutoff
break
else:
beta = min(beta, current_score)
if beta <= alpha:
# Cutoff
break
# We're at our max depth, return a result
return best_score, best_move
# Iterate over every legal move in depth-search fashion if there's still depth left to go
for current_move in legal_moves:
# Copy the game state as if the move occurred and make a recursive call
game_copy = game.forecast_move(current_move) # Get the counter to increment
next_max_player = True
if maximizing_player == True:
next_max_player = False
current_score, junk_move = self.alphabeta(game_copy, depth-1, alpha, beta, next_max_player)
# Update our best choice, if necessary
if maximizing_player and current_score > best_score:
best_score = current_score
best_move = current_move
elif not maximizing_player and current_score < best_score:
best_score = current_score
best_move = current_move
# Shortcut if we've found the best score possible
if best_score == float("inf") or best_score == float("-inf"):
break
# Decide if we need to perform a cut-off.
# If we're a maximizing player:
# - If the current move taken from the minimizing child is
# higher than anything we've seen so far on the path back to the root,
# we don't need to proceed further.
# If we're a minimizing player:
# - If the current move taken from the maximizing child is
# lower than anything we've seen so far on the path back to the root,
# we don't need to proceed further.
if maximizing_player:
alpha = max(alpha, current_score)
if alpha >= beta:
# Cutoff
break
else:
beta = min(beta, current_score)
if beta <= alpha:
# Cutoff
break
# Return the best score and move found
return best_score, best_move
|
import mimpy.mesh.hexmesh as hexmesh
import mimpy.models.twophase as twophase
import numpy as np
def res_k(p, i, j, k):
return np.eye(3)
res_mesh = hexmesh.HexMesh()
permfile = open("spe_perm_layer85.dat")
res_mesh.build_mesh(50, 1, 60, 670., .6, 365., res_k)
Kx = []
Ky = []
Kz = []
for cell_index in range(res_mesh.get_number_of_cells()):
line = permfile.readline()
line = line.split()
line = map(float, line)
Kx.append(line[0])
Ky.append(line[1])
Kz.append(line[2])
current_k = line[0]*np.eye(3)
current_k *= 1.e-12
res_mesh.set_cell_k((cell_index%60)*50+cell_index/60, current_k)
res_mesh.output_vtk_mesh("spe_10_mesh",
[res_mesh.get_all_k_entry(0, 0),
res_mesh.get_all_k_entry(1, 1)],
["Kx", "Ky"])
res_twophase = twophase.TwoPhase()
res_twophase.set_mesh(res_mesh)
res_twophase.apply_flux_boundary_from_function(0, lambda p:np.array([0.,0.,0.]))
res_twophase.apply_flux_boundary_from_function(1, lambda p:np.array([0.,0.,0.]))
res_twophase.apply_flux_boundary_from_function(2, lambda p:np.array([0.,0.,0.]))
res_twophase.apply_flux_boundary_from_function(3, lambda p:np.array([0.,0.,0.]))
res_twophase.apply_flux_boundary_from_function(4, lambda p:np.array([0.,0.,0.]))
res_twophase.apply_flux_boundary_from_function(5, lambda p:np.array([0.,0.,0.]))
res_twophase.set_model_name("spe_10_")
res_twophase.set_initial_p_o(np.array([0.]*res_mesh.get_number_of_cells()))
res_twophase.set_initial_s_w(np.array([0.]*res_mesh.get_number_of_cells()))
res_twophase.set_porosities(np.array([.3]*res_mesh.get_number_of_cells()))
res_twophase.set_viscosity_water(8.90e-4)
res_twophase.set_viscosity_oil(8.90e-4)
res_twophase.set_compressibility_water(1.e-9)
res_twophase.set_compressibility_oil(1.e-9)
res_twophase.set_ref_density_water(1000.)
res_twophase.set_ref_density_oil(1000.)
res_twophase.set_ref_pressure_oil(0.)
res_twophase.set_ref_pressure_water(0.)
res_twophase.set_residual_saturation_water(.0)
res_twophase.set_residual_saturation_oil(.2)
res_twophase.set_corey_relperm(2., 2.)
well_location1 = res_mesh.find_cell_near_point(np.array([0., 0., 365.]))
well_location2 = res_mesh.find_cell_near_point(np.array([670., 0., 0.]))
res_twophase.add_rate_well(1.e-1, 0., well_location1, "WELL1")
res_twophase.add_pressure_well(3000., 1.e-13, well_location2, "WELL2")
res_twophase.initialize_system()
res_twophase.set_time_step_size(3.e5)
res_twophase.set_output_frequency(100)
res_twophase.set_number_of_time_steps(1000)
res_twophase.set_saturation_substeps(3)
res_twophase.start_solving()
|
import os
from teaman import settings
def get_image_paths(url):
filename=os.path.basename(url)
base,ext=os.path.splitext(filename)
thumbname=base+'_thumb'+ext
upload_path="%s%s%s" % (settings.MEDIA_ROOT,'uploads/',filename)
thumb_path="%s%s%s" % (settings.MEDIA_ROOT,'thumbs/',thumbname)
return upload_path,thumb_path
|
'''
Created on May 10, 2016
@author: hershbca
'''
"""
Don't touch anything without training, and saving. Look below and use premade commands before making your own.
"""
# import modules
import pygame, random, easygui, sys, time
#pygame initializations /*
pygame.init()
pygame.font.init()
#*/
#screen size
screen = pygame.display.set_mode ([1200, 750])
# ||
# ||
# pay attention \/ this make whole screen white
screen.fill([255,255,255])
# ||
# ||
# pay attention \/ this updates the screen
pygame.display.flip()
# time controls the way stocks are made, don't touch without consulting later
time = 0
# the current turn the game is running
turnnum = 1
# amount of turns the code runs before stopping
passes = 1
# lets you demonstrate the system
demonstrate = False
#dummy variable, please consult below
fail = False
#this operates the functions, see functions below
command = 0
#how effective a sale is, ie 300 = a amount of money from 0 to 300
sale = 300
#the number of an event that effects the economy
chance = 0
market = 0
#dummy variable
sc = 0
#the dollar value change that the whole markets undergoes
marketgeneral = 3
# makes sure you can't over expand a company
inhibitor = 16
#this is a feature that wasn't implemented, I keep it here for possible implementation later
#a string that tells people weather they get refunded their stock if the market crashes
pardon = "";
#economy = []
#for x in range(0, 100):
# economy[x] = random.randint(0, (1000000000/x+1))
#the company class, each company is one object that is also stored in companies
class comp:
def __init__(self, name, value, color, group):
#value is current price
self.value = value
# history is the records of its previous price
self.history = [[300, 500-self.value]]
# not implemented
self.policies = []
# the amount of money the company gains per turn due to ad money/ brand recognition
self.ad = 0
# not implemented
self.committee = []
# not implemented
self.committeecount = 0
#color of words and line
self.color = color
# company name
self.name = name
#dummy variable
self.percentchange = 0
#possible to implement in next expansion
self.group = group
self.products = []
"""
class product:
def __init__(self, name, price, value):
self.name = name
self.price = price
self.value = value
"""
p = comp("company 1", 110,[255,0,0], 0)
d = comp("company 2", 150,[0,255,0], 1)
r = comp("company 3", 180,[0,0,255], 2)
f = comp("company 4", 170,[225,255,0], 1)
c = comp("company 5", 150,[0,255,255], 2)
hc = comp("company 6", 130,[255,0,255], 0)
h = comp ("company 7", 60,[255,0,125], 0)
cs = comp("company 8", 10,[125,0,255], 1)
cc = comp("company 9", 250,[0,0,0], 2)
companies = [p,d,r,f,c,hc,h,cs,cc]
#selected is a list of all the companies that are displayed, please see "select" in while before editing
selected = companies
#news peices in a list, have a corrosponding responce in the end of turn function, before adding, please add an effect in turn
bignews = ["people are shopping localy!", "people head to malls"]
bignews.extend(["wallstreet in decline!", "wallstreet rises", "holidays are coming", "advertising businesses at new low"])
bignews.extend(["the run away succes of wall street's best company was a patsi scheme!"])
bignewssub = ["more people shop localy now so people spend less on big corporations", "more people visit large business now."]
bignewssub.extend(["uh-oh", "halleullah!", "dress to impress with a holiday sale!", "advertising is down, 2 for 1 sale!"])
bignewssub.append("its stock has plummeted, some will never recover")
###
#small news in a list, no affect on economy, please add more jokes.
smallnews = ["Procrastinators united event shut down due to lack of planning"]
smallnews.append("New study shows link between video games and violins")
smallnews.append("Infra sonic weapon designers sued becuase 'sound is a weapon' was copywrited")
smallnews.extend(["I am typing on this screen lol", "help I'm stuck in this machine!"])
smallnews.append("Extremely hot super model goes on first date with Charlie Hershberger, apocalapse declared imminent",)
smallnews.append("Nuclear weapons found in syria, syrians say 'for peaceful purposes'")
smallnews.append("Russia bans gays, rainbows declared enemy of the state")
smallnews.append("Putin declares himself gay, no one is suprised.",)
smallnews.append("Local man finds jesus 'I found him behind my couch eating ramen'")
smallnews.append("Chuck norris died today, authorities suggest you stay inside until his blood rage subsides")
smallnews.append("Shocking new study reveals only 1 perecent of all americans know that the earth orbits the sun")
smallnews.append("Gun control debate ended when progun activist shot his debate opponent",)
smallnews.append("New study shows that 95 percent of all computer glitches are cured by turning 'off and on'")
smallnews.append("Bankrupcy goes bankrupt, wall street appalled")
smallnews.append("Shia Labeouf plagerizes script, responds 'to be or not to be, that is the question'")
smallnews.append("Coding international goto social()")
smallnews.append("New study says some people in Mississippi are rascist")
smallnews.append("#yolo #swag #comment out")
smallnews.append("My little pony now more popular than Doctor Who")
smallnews.append("Stick of truth banned in austrailia. population decreased by 20%")
smallnews.append("Stock market game by Charlie Hershberger is now a national sensation, *pats self on back*")
smallnews.append("Mysterious stranger found in my house, arcane voices and knife shapening heard")
smallnews.append("Atheism becomes Italy's offical religeon, the Vatican demands Italy move elsewhere")
smallnews.append("Local gangster runs for president, wins by landslide")
smallnews.append("Cocacola buys out Pepsi so it can sue itself for stealing Cocacola formula")
smallnews.append("Mr lewis starts his own political group, rumors of obtaining matches and trash cans unconfirmed")
smallnews.append("Today a clown fish farted")
smallnews.append("Local scientests found oxygen in the air")
smallnews.append("Raisins infiltrate chocolate chip cookies")
smallnews.append("Oh my god run! oh seriously oh dear, ah! ah! oh snap! gah!!! (faints)")
smallnews.append("Local woman claims that ""Jesus took the wheel, he took it from my hand"" ")
smallnews.append("New perfume based on anthrax")
smallnews.append("Neighborhood hulk eats a puppy")
smallnews.append("Dark souls releases, due to difficulty, many switch to the easier hobby of rocket science")
smallnews.append("3d printer prints a whole pizza, dominos 'not having any of that'")
smallnews.append("Victorian era fashion is back, 'black is the new black'")
smallnews.append("Scientists attempt to create pokemon")
smallnews.append("Large halron collider destroys earth, wall street unaffected")
smallnews.append("New antidote for cancer: licking computer screens")
smallnews.append("America declares war on terrorism, local goverment of Terrorism, Brazil appalled.")
smallnews.append("California becomes a country. national anthem is Snoop Dogg's 'smoke weed every day'.")
smallnews.append("New fasion trend sweeping the nation: bagel face")
smallnews.append("Fetuses found in power aide drinks")
smallnews.append("Unfinished bread products on trial, judge say 'may the defendant please rise'")
smallnews.append("Look at all the chairs, Mr. White.")
smallnews.append("Mexico took Texas back, guys. america presses no charges")
smallnews.append("Scientist experiments with chocolate, 'it turn grey guys, yay.'")
smallnews.append("Love is in the air, gas masks recommended.")
smallnews.append("Charlie Hershberger, entering the dating pool, interested need only inquire")
smallnews.append("Man proclaims to be Jesus while on acid")
smallnews.append("Mysterious graffiti, perpetrator claiming to be 'the Huss', color schemes follow a rainbow")
smallnews.append("Someone raps about the constitution, becomes more well known than original document")
smallnews.append("Man claims to be a Brony. accepts Celestia as his lord and savior.")
smallnews.append("Chocolate fountain gone wrong, spilling out the unknown.")
smallnews.append("Suspected message from aliens "" ay lmao"" ")
smallnews.append("Rising beverage company falls after 'tea party scandal'.")
smallnews.append("France surrenders, no one is surprised")
smallnews.append("The new media found the internet sensation of 'Fanfiction'")
smallnews.append("Photograph emerges, supposed zebra wearing a crown attacking bystanders.")
smallnews.append("Famous Youtuber elected presidant. Like, favorite, and subscribe for a chance to be vise president")
smallnews.append("Cloud captured in a container,'We're making a flying numbus!'")
smallnews.append("Marriage between man and horse is now legal. Neigh neigh.")
smallnews.append("Adoption of horses can legally can be your child now. oh god, the bronys are coming.")
smallnews.append("World record broken for most fabulous chicken wings.")
smallnews.append("Slenderman sees paranormal activity, rates it 5 out of 8 pages.")
smallnews.append("President sasses America. Oooh kill 'em.")
smallnews.append("Pennies suddenly disapear from entire continent, America left penniless.")
smallnews.append("Large lizard found roaming rural home, described as a 'fire-breathing adorable death machine'.")
smallnews.append("Godzilla becomes wrestler, world champion")
smallnews.append("Godzilla vs. Hulk Hogan to be least watched television program of all time")
smallnews.append("new moon landing conspiricy, 'they landed on mars, and used black and white footage!'")
smallnews.append("Terrorist projects porn in front of Fifth Avenue, Al-Queda refuses to neogtiate for the girl's number.")
smallnews.append("Cabbage patch kids go on strike, salad supplys are limited")
smallnews.append("Chameleons run out of colors to become")
smallnews.append("20th anniversay of the Lion King, fans cry over Mufasa all over again")
smallnews.append("Chairs have gone out of fashion, standing is the new trend.")
smallnews.append("Darth vader to star in anti-smoking ad, Luke cast in similar amputee PSA.")
smallnews.append("Jedi coming under fire for constant use of force.")
smallnews.append("Yu-Gi-Oh card trend getting out of hand.")
smallnews.append("Left handed people demand rights.")
smallnews.append("Concerns over censorship on twitter, local writer exclaims, '####!'")
smallnews.append("Jazz rises out of the blues.")
smallnews.append("Old musician sues pirate, 'I had no idea that mozart filed a copyright'")
smallnews.append("Pirates proclaimed to have ten favorite letters: I I, R, and the seven C's.")
smallnews.append("Foundation of a loyal fathers' club collapses, founding fathers displeased.")
smallnews.append("Supposedly immortal man's prison sentence disclosed, 10,000,000,000,000,000,000,000,000,000,000...")
smallnews.append("Surrealist's paintings too lifelike, legitimacy questioned.")
smallnews.append("Burglar attacked by guard dog, reported to have screamed, 'PAYBACK IS A B**CH!!'")
smallnews.append("New time machine discovered to be a defective clock, gears installed backwards.")
smallnews.append("'All information broadcasted may or may not be real', announces Faux Gnus.")
smallnews.append("Architects plan to bridge America and China, blues redone due to 'being an eyesore'.")
smallnews.append("4 out of 5 locals agree that the news is getting stale, perhaps 4 out of 5 locals should be more interesting.")
smallnews.append("Local journal of missing man reads, 'Stupid Pewds, doesn't suspect a thing. Ducks gonna duck. Quack.'")
smallnews.append("NWA member admits to affair with police officer")
smallnews.append("Jeffery becomes the worlds most successful comedian. Apocalapse is already here")
smallnews.append("Gay men found to be the ultimate power source, Charlie Hershberger can now power the western sea board")
smallnews.append("Anit-Gamergate spokesmen have declared gamergate dead, but the damn gamers keep hiting 'E' to revive")
smallnews.append("python code doesn't require semicolons, so my code just looks like an over actvive english major essay")
smallnews.append("'The Order 1886' shown to have an order of n*log(n). gamers appalled")
smallnews.append("Robots take over the news, NEVER_MIND_False_ALARM_FELLOW_HU_MANS")
smallnews.append("The walls are moving. The Walls Are Moving. The Walls Are Moving!! THE WALLS ARE MOVING!!!")
smallnews.append("you fools! you can't kill Cuthulu! Your only hope is to flee his terror in hopes that he ignores you!")
smallnews.append("The universe is so large and we are so small, in the same way bugs are small in comparison to cars on a highway")
smallnews.append("Local man blames the jews, crime unknown")
# this is a sample texgt adder, put text between quotes and add the line in uncommented
#smallnews.append("")
#
#please use this to replace the shit code at new company, I may fix this soon however
def redraw():
screen.fill([255,255,255])
# this is only nessisary for First round but it is very needed, I really need to work on this bit.
"""
for company in companies:
if company.value > 0:
company.value = int(company.value + marketgeneral + 10)
company.value = int(company.value + (company.value*float(random.randint(-2, 3))/20))
company.value = company.value - int(float(company.value)/inhibitor)
company.value = company.value + company.ad
place = [(time * 10) + 300, -company.value + 610]
company.history[0] = place
print company.color
pygame.draw.circle(screen, company.color, place, 2, 0)
cs = 0
"""
for company in companies:
if (company.value > 0):
surf1 = font.render(str(company.value), 1, company.color)
screen.blit(surf1, [((time*10)+300),600 - company.value])
surf1 = font.render((str(company.value) + " " + company.name), 1, company.color)
screen.blit(surf1, [800, (30+(cs*70))])
surf1 = font.render("advertisement status" + str(company.ad), 1, company.color)
screen.blit(surf1, [0, (30 + cs*70)])
cs = cs + 1
pygame.draw.rect(screen, (0,0,0),[300, 500, 500, 2], 0)
pygame.draw.rect(screen, (0,0,0),[300, 0, 2, 500], 0)
for x in range (0, 51):
pygame.draw.rect(screen, [0,0,0], [300 + (x * 10),0, 1, 600], 0)
for x in range (0, 14):
pygame.draw.rect(screen, [0,0,0], [300, 600-(x*50), 500, 2], 0)
for x in range (0, 14):
number = x * 50
number = font.render(str(number), 1, (0,0,0))
screen.blit(number, [260, (-x * 50) + 590])
pygame.display.flip()
#main function! save before you edit
def turn ():
global chance, time, turnnum, passes, companies, patsi, marketgeneral, advertise, inhibitor, sale, companies, demonstrate
screen.fill([255,255,255])
#creates font (none, 30)
font = pygame.font.Font(None, 30)
# checks if there is a ponsi scheme (protip when chance = 6 it is a ponsi scheme)
if chance != 6:
# if the program is in demonstrate mode it has no events
if demonstrate == True:
chance = -1
# if it isn't it will have a one in three chance of being one
if demonstrate == False:
chance = random.randint(1,3)
# ensures that there is only an event on the last day that the "passes" function renders
if turnnum + 1 == passes:
# if 1 then it choses an event at random,
#if it is 6 clench your checks and prepare for the ponsi scheme
if chance == 1 or chance == 6:
screen.fill([255,255,255])
if chance != 6:
#choses event
chance = random.randint(0,5)
# sets event
surf19 = font.render(bignews[chance], 1, (0,0,0))
surf20=font.render(bignewssub[chance], 1, (0,0,0))
#creates ponsi scheme
if chance == 6:
# prints who is ponsi
surf19=font.render(patsi + " is revealed to be a patsi scheme", 1, (0,0,0))
# prints if goverment will refund
surf20=font.render(pardon, 1, (0,0,0))
chance = 10
#prints both messages
screen.blit(surf19, (10,10))
screen.blit(surf20, (10,500))
#
#if you want you can use pygame.imageload and screet blit here to put an image,
#this has not been implemented but it shoulc be here
#
#screen.blit(image[chance], (20, 20))
#show screen
pygame.display.flip()
# a button to stop simulation so you can read the message, just hit ok to leave
easygui.msgbox("")
#if it is not an event
else:
#set chance to ten to ensure that it is not mistaken as event
chance = 10
# make new screen so the last text doesn't overlap
screen.fill([255,255,255])
# time moves so stocks can be made at a new position
time = time + 1
# cs is a true false dummy variable
cs = 0
# scrooling function
if time == 41:
#moves apperent time back
time = 40
#moves all companies equally
for company in companies:
if company.value > 0:
#delete first entry
del company.history[0]
#move all the elements back one mark
for x in company.history:
x[0] = x[0]-10
cs = 0
#sets company value
for company in companies:
# no dead companies please
if company.value > 0:
# make percentage change
company.percentchange = float(company.value)
# company is affected by total economy
company.value = int(company.value + marketgeneral + 10)
#company has random fluctuations
company.value = int(company.value + (company.value*float(random.randint(-2, 3))/20))
# companies can't over expand
company.value = company.value - int(float(company.value)/inhibitor)
# ads help company
company.value = company.value + company.ad
# lets it be set to company history
place = [(time * 10) + 300, -company.value + 600]
company.history.append(place)
#draws the history line for this company
pygame.draw.lines(screen, company.color, False, company.history, 2)
#prints the percent change
company.percentchange = float(float(company.value)/company.percentchange)*100
company.percentchange = company.percentchange - 100
cs = 0
for company in companies:
# no dead companies
if (company.value > 0):
#print stats
surf1 = font.render(str(company.value), 1, company.color)
screen.blit(surf1, [((time*10)+300),590 - company.value])
surf1 = font.render(str(company.value) + " " + company.name + " " + str(company.percentchange) +"%", 1, company.color)
screen.blit(surf1, [800, (30+(cs*70))])
surf1 = font.render("advertisement status" + str(company.ad), 1, company.color)
screen.blit(surf1, [0, (30 + cs*70)])
# if it is a dead company
else:
# prints that is it dead
surf1 = font.render(company.name + " is dead", 1, (0,0,0))
screen.blit(surf1, [0,30+(cs*70)])
screen.blit(surf1, [800, 30+(cs*70)])
cs = cs + 1
#prints the grid
pygame.draw.rect(screen, (0,0,0),[300, 500, 500, 2], 0)
pygame.draw.rect(screen, (0,0,0),[300, 0, 2, 500], 0)
for x in range (0, 51):
pygame.draw.rect(screen, [0,0,0], [300 + (x * 10),0, 1, 600], 0)
for x in range (0, 13):
pygame.draw.rect(screen, [0,0,0], [300, 600-(x*50), 500, 2], 0)
for x in range (0, 13):
number = x * 50
number = font.render(str(number), 1, (0,0,0))
screen.blit(number, [260, (-x * 50) + 590])
#prints the small-news
news = random.choice (smallnews)
surf18 = font.render(news, 1, (0,0,0))
screen.blit(surf18, [0, 640])
#displays screen
pygame.display.flip()
#ensures the market can't fluctuate to much
if marketgeneral <= -5:
marketgeneral = -4
if marketgeneral >= 5:
marketgeneral = 4
# if it is normal randomly change the market
if marketgeneral >= -4 and marketgeneral <= 4:
marketgeneral = marketgeneral + random.choice([1,-1])
# this is all the effects of the large news items
# good for large businesses
if chance == 0:
inhibitor = 8
# good for small businesses
if chance == 1:
inhibitor = 24
# sets inhibitor to normal if not an event
if chance != 1 and chance != 0:
inhibitor = 16
# wallstreet market goes down
if chance == 2:
marketgeneral = -30
#wallstreet market goes up
if chance == 3:
marketgeneral = 30
# makes sale a better deal
if chance == 4:
sale = 400
# sets sale to normal if not an event
else:
sale = 300
# doubles advertisement value
if chance == 5:
advertise = 10
#resets advertisement value
else:
advertise = 5
font = pygame.font.Font(None, 30)
#white to prepare to write
screen.fill([255,255,255])
#prints
for company in companies:
if company.value > 0:
company.value = int(company.value + marketgeneral + 10)
company.value = int(company.value + (company.value*float(random.randint(-2, 3))/20))
company.value = company.value - int(float(company.value)/inhibitor)
company.value = company.value + company.ad
place = [(time * 10) + 300, -company.value + 610]
company.history[0] = place
pygame.draw.circle(screen, company.color, place, 2, 0)
cs = 0
for company in companies:
if (company.value > 0):
surf1 = font.render(str(company.value), 1, company.color)
screen.blit(surf1, [((time*10)+300),600 - company.value])
surf1 = font.render((str(company.value) + " " + company.name), 1, company.color)
screen.blit(surf1, [800, (30+(cs*70))])
surf1 = font.render("advertisement status" + str(company.ad), 1, company.color)
screen.blit(surf1, [0, (30 + cs*70)])
cs = cs + 1
pygame.draw.rect(screen, (0,0,0),[300, 500, 500, 2], 0)
pygame.draw.rect(screen, (0,0,0),[300, 0, 2, 500], 0)
for x in range (0, 51):
pygame.draw.rect(screen, [0,0,0], [300 + (x * 10),0, 1, 600], 0)
for x in range (0, 14):
pygame.draw.rect(screen, [0,0,0], [300, 600-(x*50), 500, 2], 0)
for x in range (0, 14):
number = x * 50
number = font.render(str(number), 1, (0,0,0))
screen.blit(number, [260, (-x * 50) + 590])
#\\ //\\ // || || ====||==== || ||=======
# \\ // \\ // || || || || ||
# \\ // \\ // || || || || ||
# \\ // \\ // ||=========|| || || ||=======
# \\ // \\ // || || || || ||
# \\ // \\ // || || || || ||
# \\// \\// || || ====||==== ||====== ||=======
while True:
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
command = easygui.enterbox("What is your command, my master")
"""
ok, from here on all the documentation you need is in the users guide,
I will update the long print statements so they inherit from redraw, but I am still working on it
when I finish it should just be some statements that
find information, do some math, and then either call redraw or finish on thier own
if you have a problem, just look at the doc,
to make your own, to set key word that gui will look for say
if command == "keyword":
{your code there}
"""
# if command == "pass":
# turn()
if command == "new company":
newcompany = easygui.enterbox("what is your name?")
companysharecosthundreds = easygui.integerbox("what is your cost in hundreds?")
companysharecost = easygui.integerbox("what is your cost?")
redcolor = easygui.integerbox("how red is your color (0 - 99%)")
greencolor = easygui.integerbox("how green is your color (0 - 99%)")
bluecolor = easygui.integerbox("how blue is your color (0 - 99%)")
a = comp(newcompany, (100*companysharecosthundreds) + companysharecost, [int(redcolor*2.55), int(greencolor*2.55), int(bluecolor*2.55)],0)
companies.append(a)
screen.fill([255,255,255])
for company in companies:
if company.value > 0:
company.value = int(company.value + marketgeneral + 10)
company.value = int(company.value + (company.value*float(random.randint(-2, 3))/20))
company.value = company.value - int(float(company.value)/inhibitor)
company.value = company.value + company.ad
place = [(time * 10) + 300, -company.value + 610]
company.history[0] = place
print company.color
pygame.draw.circle(screen, company.color, place, 2, 0)
cs = 0
for company in companies:
if (company.value > 0):
surf1 = font.render(str(company.value), 1, company.color)
screen.blit(surf1, [((time*10)+300),600 - company.value])
surf1 = font.render((str(company.value) + " " + company.name), 1, company.color)
screen.blit(surf1, [800, (30+(cs*70))])
surf1 = font.render("advertisement status" + str(company.ad), 1, company.color)
screen.blit(surf1, [0, (30 + cs*70)])
cs = cs + 1
pygame.draw.rect(screen, (0,0,0),[300, 500, 500, 2], 0)
pygame.draw.rect(screen, (0,0,0),[300, 0, 2, 500], 0)
for x in range (0, 51):
pygame.draw.rect(screen, [0,0,0], [300 + (x * 10),0, 1, 600], 0)
for x in range (0, 14):
pygame.draw.rect(screen, [0,0,0], [300, 600-(x*50), 500, 2], 0)
for x in range (0, 14):
number = x * 50
number = font.render(str(number), 1, (0,0,0))
screen.blit(number, [260, (-x * 50) + 590])
if command == "delete":
scompany = easygui.enterbox("who are you deleting?")
checkcount = 0
for company in companies:
if company.name == scompany:
del companies[checkcount]
checkcount += 1
screen.fill([255,255,255])
for company in companies:
if company.value > 0:
company.value = int(company.value + marketgeneral + 10)
company.value = int(company.value + (company.value*float(random.randint(-2, 3))/20))
company.value = company.value - int(float(company.value)/inhibitor)
company.value = company.value + company.ad
place = [(time * 10) + 300, -company.value + 610]
company.history[0] = place
pygame.draw.circle(screen, company.color, place, 2, 0)
cs = 0
for company in companies:
if (company.value > 0):
surf1 = font.render(str(company.value), 1, company.color)
screen.blit(surf1, [((time*10)+300),600 - company.value])
surf1 = font.render((str(company.value) + " " + company.name), 1, company.color)
screen.blit(surf1, [800, (30+(cs*70))])
surf1 = font.render("advertisement status" + str(company.ad), 1, company.color)
screen.blit(surf1, [0, (30 + cs*70)])
cs = cs + 1
pygame.draw.rect(screen, (0,0,0),[300, 500, 500, 2], 0)
pygame.draw.rect(screen, (0,0,0),[300, 0, 2, 500], 0)
for x in range (0, 51):
pygame.draw.rect(screen, [0,0,0], [300 + (x * 10),0, 1, 600], 0)
for x in range (0, 14):
pygame.draw.rect(screen, [0,0,0], [300, 600-(x*50), 500, 2], 0)
for x in range (0, 14):
number = x * 50
number = font.render(str(number), 1, (0,0,0))
screen.blit(number, [260, (-x * 50) + 590])
if command == "passes":
passes = easygui.integerbox("how many times, lord")
for turnnum in range (0, passes):
turn()
if command == "demonstrate":
demonstrate = True
rounds = easygui.integerbox("How long may I entertain your esteemed guests?")
currentround = 0
while demonstrate == True:
currentround += 1
for x in range (0, 99):
turn()
if currentround == rounds:
demonstrate = False
if command == "plummet":
cs = 1
while cs:
target = easygui.enterbox("What shall plummet, my master?")
for company in companies:
if company.name == target:
company.value = company.value - 200
cs = 0
if command == "prosper":
cs = 1
while cs:
target = easygui.enterbox("What shall prosper, my master?")
for company in companies:
if company.name == target:
company.value = company.value + 200
cs = 0
if command == "advertise":
cs = 1
while cs:
target = easygui.enterbox("who is advertising, my master?")
for company in companies:
if company.name == target:
company.value = company.value - 50 + random.randint(-10, 10)
company.ad = company.ad + 5
cs = 0
if command == "sale":
cs = 1
while cs:
target = easygui.enterbox("Who is making a sale, my master?")
for company in companies:
if company.name == target:
if company.ad >= 15:
company.value = company.value + random.randint(0, sale)
company.ad = company.ad - 15
cs = 0
else:
easygui.msgbox("My master, the fools can not advertise.")
cs = 0
if command == "doomsday":
cs = 1
while cs:
pardon = easygui.enterbox("My master, it is not my place, but surely these fools deserve mercy")
if pardon == "yes":
cs = 0
pardon = "refunds will be given but its stock is trashed"
if pardon == "no":
cs = 0
pardon = "its stock has plummeted, some will never recover"
cs = 1
while cs:
target = easygui.enterbox("May you have mercy on thier souls")
for company in companies:
if company.name == target:
company.value = company.value - 400
company.ad = 0
cs = 0
chance = 6
patsi = target
if command == "selected":
target = easygui.enterbox("revert to all, my master? y/n")
if target == "y":
selected = companies
else:
cs = 1
selected = []
while 1:
target = easygui.enterbox("what is your selected company, my master? (end to exit)")
if target == "end":
break
for company in companies:
if company.name == target:
if target not in selected:
selected.append(company)
screen.fill([255,255,255])
cs = 0
for company in selected:
if (company.value > 0):
pygame.draw.lines(screen, company.color, False, company.history, 2)
surf1 = font.render(str(company.value), 1, company.color)
screen.blit(surf1, [((time*10)+300),590 - company.value])
surf1 = font.render(str(company.value) + " " + company.name + " " + str(company.percentchange) +"%", 1, company.color)
screen.blit(surf1, [800, (30+(cs*70))])
surf1 = font.render("advertisement status" + str(company.ad), 1, company.color)
screen.blit(surf1, [0, (30 + cs*70)])
else:
surf1 = font.render(company.name + " is dead", 1, (0,0,0))
screen.blit(surf1, [0,30+(cs*70)])
screen.blit(surf1, [800, 30+(cs*70)])
cs = cs + 1
pygame.draw.rect(screen, (0,0,0),[300, 500, 500, 2], 0)
pygame.draw.rect(screen, (0,0,0),[300, 0, 2, 500], 0)
for x in range (0, 51):
pygame.draw.rect(screen, [0,0,0], [300 + (x * 10),0, 1, 600], 0)
for x in range (0, 13):
pygame.draw.rect(screen, [0,0,0], [300, 600-(x*50), 500, 2], 0)
for x in range (0, 13):
number = x * 50
number = font.render(str(number), 1, (0,0,0))
screen.blit(number, [260, (-x * 50) + 590])
news = random.choice (smallnews)
surf18 = font.render(news, 1, (0,0,0))
screen.blit(surf18, [0, 640])
pygame.display.flip()
|
# coding: utf-8
# Local imports
from ... import load_record
def empty(self,
upload: bool = True):
"""
Builds new empty PotentialProperties records based on the existing
potential LAMMPS (and KIM) records. Empty PotentialProperties records
ensure that the property pages get generated, even if there is no
calculation results yet. The new records will be added to the props list.
Parameters
----------
upload : bool, optional
If True (default), then any new PotentialProperties records will be
automatically saved to the database immediately after creating them.
"""
# Class attributes
database = self.database
props = self.props
# Fetch all LAMMPS potentials
potentials_df = self.potentials_df
print(len(potentials_df), 'LAMMPS potentials found')
# Get potential LAMMPS keys from existing property records
prop_imp_keys = []
for prop in props:
prop_imp_keys.append(prop.potential_LAMMPS_key)
# Identify good potentials that do not have property records
missing = (~(potentials_df.key.isin(prop_imp_keys))
&~(potentials_df.id.isin(database.potdb.bad_lammps_potentials)))
print(missing.sum(), 'property records to be created')
# Loop over missing potentials
newprops = []
for i in potentials_df[missing].index:
series = potentials_df.loc[i]
# Build a new property record
newprop = load_record('PotentialProperties',
potential_key=series.potkey,
potential_id=series.potid,
potential_LAMMPS_key=series.key,
potential_LAMMPS_id=series.id)
newprop.build_model()
# Add it to the database
if upload:
database.add_record(newprop)
print(newprop.name, 'added to database')
else:
print(newprop.name, 'record created')
newprops.append(newprop)
if len(newprops) > 0:
self.add_props(newprops) |
from typing import Any
from enum import Enum
from time import sleep
import matplotlib.pyplot as plt
import numpy as np
from graph import GeospatialGraph
import osmnx as ox
from shapely import line_interpolate_point
import geopandas as gpd
import neworder as no
class Status(Enum):
SUSCEPTIBLE = 0
INFECTED = 1
IMMUNE = 2
DEAD = 3
@property
def rgba(self) -> tuple[float, float, float, float]:
match self:
case Status.SUSCEPTIBLE:
return (1.0, 1.0, 1.0, 1.0)
case Status.INFECTED:
return (1.0, 0.0, 0.0, 1.0)
case Status.IMMUNE:
return (0.0, 1.0, 0.0, 1.0)
case Status.DEAD:
return (0.0, 0.0, 0.0, 1.0)
class Infection(no.Model):
def __init__(self,
point: tuple[float, float],
dist: float,
n_agents: int,
n_infected: int,
speed: float,
infection_radius: float,
recovery_time: int,
mortality: float) -> None:
super().__init__(no.LinearTimeline(0.0, 1.0), no.MonteCarlo.deterministic_independent_stream)
# expose the model's MC engine to numpy
self.nprand = no.as_np(self.mc)
# create the spatial domain
self.domain = GeospatialGraph.from_point(point, dist, network_type="drive", crs='epsg:27700')
# set the parameters
self.infection_radius = infection_radius
self.recovery_time = recovery_time
self.marginal_mortality = 1.0 - (1.0 - mortality) ** (1.0 / recovery_time)
# create the agent data, which is stored in a geopandas geodataframe
start_positions = self.domain.all_nodes.sample(n=n_agents, random_state=self.nprand, replace=True).index.values
speeds = self.nprand.lognormal(np.log(speed), 0.2, n_agents)
agents = gpd.GeoDataFrame(data={"node": start_positions, "speed": speeds, "status": Status.SUSCEPTIBLE, "t_infect": no.time.never()})
agents["dest"] = agents["node"].apply(self.__random_next_dest)
agents["path"] = agents[["node", "dest"]].apply(lambda r: self.domain.shortest_path(r["node"], r["dest"], weight="length"), axis=1)
agents["dist"] = agents.path.apply(lambda p: p.length)
agents["offset"] = 0.0
agents["geometry"] = agents["path"].apply(lambda linestr: line_interpolate_point(linestr, 0))
infected = self.nprand.choice(agents.index, n_infected, replace=False)
agents.loc[infected, "status"] = Status.INFECTED
agents.loc[infected, "t_infect"] = self.timeline.index
self.agents = agents
self.fig, self.g = self.__init_visualisation()
def step(self) -> None:
self.__update_position()
self.__infect_nearby()
self.__recover()
self.__succumb()
num_infected = (self.agents.status == Status.INFECTED).sum()
num_immune = (self.agents.status == Status.IMMUNE).sum()
num_dead = (self.agents.status == Status.DEAD).sum()
self.__update_visualisation(num_infected, num_immune, num_dead)
if num_infected == 0:
sleep(5)
self.halt()
self.finalise()
def finalise(self) -> None:
no.log(f"total steps: {self.timeline.index}")
no.log(f"infections: {len(self.agents.t_infect.dropna())}")
no.log(f"recoveries: {(self.agents.status == Status.IMMUNE).sum()}")
no.log(f"deaths: {(self.agents.status == Status.DEAD).sum()}")
no.log(f"unaffected: {(self.agents.status == Status.SUSCEPTIBLE).sum()}")
def __random_next_dest(self, node: int) -> int:
# ensure dest is different from origin
dest = node
while dest == node:
dest = self.domain.all_nodes.sample(n=1, random_state=self.nprand).index.values[0]
return dest
def __update_position(self) -> None:
self.agents.offset += self.agents.speed
# move agent along its route
self.agents["geometry"] = self.agents[["path", "offset"]].apply(lambda r: line_interpolate_point(r["path"], r["offset"]), axis=1)
# check if arrived at destination and set a new destination if necessary
overshoots = self.agents.offset >= self.agents.dist
if not overshoots.empty:
# offset <- offset - dist
self.agents.loc[overshoots, "offset"] -= self.agents.loc[overshoots, "dist"]
# node <- dest
self.agents.loc[overshoots, "node"] = self.agents.loc[overshoots, "dest"]
# dest <- random
self.agents.loc[overshoots, "dest"] = self.agents.loc[overshoots, "node"].apply(self.__random_next_dest)
# path <- (node, dest), dist <- new_dist
self.agents.loc[overshoots, "path"] = self.agents.loc[overshoots, ["node", "dest"]] \
.apply(lambda r: self.domain.shortest_path(r["node"], r["dest"], weight="length"), axis=1)
self.agents.loc[overshoots, "dist"] = self.agents.loc[overshoots, "path"].apply(lambda p: p.length)
# finally update position
self.agents.loc[overshoots, "geometry"] = self.agents.loc[overshoots, "path"].apply(lambda linestr: line_interpolate_point(linestr, 0))
def __infect_nearby(self) -> None:
infected = self.agents[self.agents.status == Status.INFECTED].geometry
susceptible = self.agents[self.agents.status == Status.SUSCEPTIBLE].geometry
new_infections = []
# loop over smallest group for efficiency
if len(infected) < len(susceptible):
for i in infected:
new = susceptible.geometry.distance(i) < self.infection_radius
# new[new].index gives us only the index values corresponding to True
new_infections.extend(new[new].index)
else:
for i, p in susceptible.items():
new = infected.geometry.distance(p) < self.infection_radius
if new.any():
new_infections.append(i)
self.agents.loc[new_infections, "status"] = Status.INFECTED
self.agents.loc[new_infections, "t_infect"] = self.timeline.index
def __recover(self) -> None:
t = self.timeline.index
self.agents.loc[(t - self.agents.t_infect >= self.recovery_time) & (self.agents.status == Status.INFECTED), "status"] = Status.IMMUNE
def __succumb(self) -> None:
infected = self.agents[self.agents.status == Status.INFECTED]
death = self.mc.hazard(self.marginal_mortality, len(infected)).astype(bool)
self.agents.loc[infected[death].index.values, "status"] = Status.DEAD
self.agents.loc[infected[death].index.values, "speed"] = 0.0
def __init_visualisation(self) -> tuple[Any, Any]:
plt.ion()
fig, ax = ox.plot_graph(self.domain.graph, bgcolor="w", node_size=5, edge_linewidth=2, edge_color="#777777", figsize=(12,9))
plt.tight_layout()
# optionally add a basemap:
# import contextily as ctx
# ctx.add_basemap(ax, crs=self.domain.crs, url=ctx.providers.OpenTopoMap)
g = ax.scatter(self.agents.geometry.x, self.agents.geometry.y, color=self.agents.status.apply(lambda c: c.rgba), edgecolor='k')
fig.suptitle("[q to quit]")
fig.canvas.mpl_connect('key_press_event', lambda event: self.halt() if event.key == "q" else None)
fig.canvas.flush_events()
return fig, g
def __update_visualisation(self, num_infected, num_immune, num_dead) -> None:
offsets = np.array(list(zip(self.agents.geometry.x, self.agents.geometry.y)))
colours = self.agents.status.apply(lambda c: c.rgba)
self.g.set_offsets(offsets)
self.g.set_facecolors(colours)
self.fig.suptitle(f"step {self.timeline.index}: inf={num_infected} imm={num_immune} dead={num_dead} / {len(self.agents)} [q to quit]")
self.fig.canvas.flush_events()
|
#Body mass index calculator
## prompt for metrics weight and height
weight = int(input("Enter weigth in kilograms "))
height = int(input("Enter heigth in meteres "))
BMI = weight / (height**2)
print("Body mass index is: ", BMI)
|
# coding: utf-8
from __future__ import division, absolute_import
from __future__ import print_function, unicode_literals
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import lake_envs as lake_env
def print_policy(policy, action_names):
"""Print the policy in human-readable format.
Parameters
----------
policy: np.ndarray
Array of state to action number mappings
action_names: dict
Mapping of action numbers to characters representing the action.
"""
str_policy = policy.astype('str')
for action_num, action_name in action_names.items():
np.place(str_policy, policy == action_num, action_name)
print(str_policy)
def value_function_to_policy(env, gamma, value_function):
"""Output action numbers for each state in value_function.
Parameters
----------
env: gym.core.Environment
Environment to compute policy for. Must have nS, nA, and P as
attributes.
gamma: float
Discount factor. Number in range [0, 1)
value_function: np.ndarray
Value of each state.
Returns
-------
np.ndarray
An array of integers. Each integer is the optimal action to take
in that state according to the environment dynamics and the
given value function.
"""
# NOTE: You might want to first calculate Q value, followed by argmax
actions = [lake_env.LEFT, lake_env.RIGHT, lake_env.UP, lake_env.DOWN]
q_values = np.zeros(shape=(env.nS, len(actions))) # (s, a)
num_states = env.nS
# Traverse through all states and find the best action
for s in range(num_states):
for idx, a in enumerate(actions):
new_val = 0
for (prob, next_state, reward, is_terminal) in env.P[s][a]:
# Terminal state must have a value of 0
# FIXME: Not sure when to make the value 0 for terminal state
if is_terminal:
value_function[next_state] = 0.0
new_val += prob * (reward + gamma * value_function[next_state])
q_values[s][idx] = new_val
best_actions = np.argmax(q_values, axis=1)
policy = np.array([actions[action] for action in best_actions])
assert(len(policy) == len(value_function))
return policy
def evaluate_policy_sync(env,
gamma,
policy,
max_iterations=int(1e3),
tol=1e-3):
"""Performs policy evaluation.
Evaluates the value of a given policy.
Parameters
----------
env: gym.core.Environment
The environment to compute value iteration for. Must have nS,
nA, and P as attributes.
gamma: float
Discount factor, must be in range [0, 1)
policy: np.array
The policy to evaluate. Maps states to actions.
max_iterations: int
The maximum number of iterations to run before stopping.
tol: float
Determines when value function has converged.
Returns
-------
np.ndarray, int
The value for the given policy and the number of iterations till
the value function converged.
"""
num_states = env.nS
value_func = np.zeros(num_states) # initialize value function
it_convergence = 0 # number of iterations until convergence
for it in range(max_iterations):
new_value_func = np.zeros_like(value_func) # to store new values
it_convergence += 1
delta = 0
for s in range(num_states):
old_val = value_func[s]
# Compute the new value
a = policy[s]
new_val = 0
for (prob, next_state, reward, is_terminal) in env.P[s][a]:
# Terminal state must have a value of 0
# FIXME: Not sure when to make the value 0 for terminal state
if is_terminal:
value_func[next_state] = 0.0
new_val += prob * (reward + gamma * value_func[next_state])
new_value_func[s] = new_val
delta = max(delta, np.abs(old_val - new_val))
value_func = new_value_func # Update value function for next iter
# Check for convergence criterion
if delta < tol:
break
return value_func, it_convergence
def evaluate_policy_async_ordered(env,
gamma,
policy,
max_iterations=int(1e3),
tol=1e-3):
"""Performs policy evaluation.
Evaluates the value of a given policy by asynchronous DP. Updates states
in their 1-N order.
Parameters
----------
env: gym.core.Environment
The environment to compute value iteration for. Must have nS,
nA, and P as attributes.
gamma: float
Discount factor, must be in range [0, 1)
policy: np.array
The policy to evaluate. Maps states to actions.
max_iterations: int
The maximum number of iterations to run before stopping.
tol: float
Determines when value function has converged.
Returns
-------
np.ndarray, int
The value for the given policy and the number of iterations till
the value function converged.
"""
num_states = env.nS
value_func = np.zeros(num_states) # initialize value function
it_convergence = 0 # number of iterations until convergence
for it in range(max_iterations):
it_convergence += 1
delta = 0
for s in range(num_states):
old_val = value_func[s]
# Compute the new value
a = policy[s]
new_val = 0
for (prob, next_state, reward, is_terminal) in env.P[s][a]:
# Terminal state must have a value of 0
# FIXME: Not sure when to make the value 0 for terminal state
if is_terminal:
value_func[next_state] = 0.0
new_val += prob * (reward + gamma * value_func[next_state])
value_func[s] = new_val
delta = max(delta, np.abs(old_val - new_val))
# Check for convergence criterion
if delta < tol:
break
return value_func, it_convergence
def evaluate_policy_async_randperm(env,
gamma,
policy,
max_iterations=int(1e3),
tol=1e-3):
"""Performs policy evaluation.
Evaluates the value of a policy. Updates states by randomly sampling index
order permutations.
Parameters
----------
env: gym.core.Environment
The environment to compute value iteration for. Must have nS,
nA, and P as attributes.
gamma: float
Discount factor, must be in range [0, 1)
policy: np.array
The policy to evaluate. Maps states to actions.
max_iterations: int
The maximum number of iterations to run before stopping.
tol: float
Determines when value function has converged.
Returns
-------
np.ndarray, int
The value for the given policy and the number of iterations till
the value function converged.
"""
num_states = env.nS
value_func = np.zeros(num_states) # initialize value function
it_convergence = 0 # number of iterations until convergence
for it in range(max_iterations):
it_convergence += 1
delta = 0
# Shuffle the states
states = np.arange(num_states)
np.random.shuffle(states)
for s in states:
old_val = value_func[s]
# Compute the new value
a = policy[s]
new_val = 0
for (prob, next_state, reward, is_terminal) in env.P[s][a]:
# Terminal state must have a value of 0
# FIXME: Not sure when to make the value 0 for terminal state
if is_terminal:
value_func[next_state] = 0.0
new_val += prob * (reward + gamma * value_func[next_state])
value_func[s] = new_val
delta = max(delta, np.abs(old_val - new_val))
# Check for convergence criterion
if delta < tol:
break
return value_func, it_convergence
def improve_policy(env, gamma, value_func, policy):
"""Performs policy improvement.
Given a policy and value function, improves the policy.
Parameters
----------
env: gym.core.Environment
The environment to compute value iteration for. Must have nS,
nA, and P as attributes.
gamma: float
Discount factor, must be in range [0, 1)
value_func: np.ndarray
Value function for the given policy.
policy: dict or np.array
The policy to improve. Maps states to actions.
Returns
-------
bool, np.ndarray
Returns true if policy changed. Also returns the new policy.
"""
best_action_policy = value_function_to_policy(env, gamma, value_func)
assert(len(best_action_policy) == len(policy))
policy_changed = not np.array_equal(best_action_policy, policy)
return policy_changed, best_action_policy
def policy_iteration_sync(env, gamma, max_iterations=int(1e3), tol=1e-3):
"""Runs policy iteration.
See page 85 of the Sutton & Barto Second Edition book.
You should use the improve_policy() and evaluate_policy_sync() methods to
implement this method.
Parameters
----------
env: gym.core.Environment
The environment to compute value iteration for. Must have nS,
nA, and P as attributes.
gamma: float
Discount factor, must be in range [0, 1)
max_iterations: int
The maximum number of iterations to run before stopping.
tol: float
Determines when value function has converged.
Returns
-------
(np.ndarray, np.ndarray, int, int)
Returns optimal policy, value function, number of policy
improvement iterations, and number of value iterations.
"""
improvement_steps = 0
evaluation_steps = 0
num_states = env.nS
policy = np.zeros(num_states, dtype='int')
value_func = np.zeros(num_states)
for it in range(max_iterations):
# Policy evaluation
value_func, it_convergence = evaluate_policy_sync(env, gamma, policy,
max_iterations, tol)
evaluation_steps += it_convergence
# Policy improvement
policy_changed, policy = improve_policy(env, gamma, value_func, policy)
improvement_steps += 1
if not policy_changed:
break
return policy, value_func, improvement_steps, evaluation_steps
def policy_iteration_async_ordered(env, gamma, max_iterations=int(1e3),
tol=1e-3):
"""Runs policy iteration.
You should use the improve_policy and evaluate_policy_async_ordered methods
to implement this method.
Parameters
----------
env: gym.core.Environment
The environment to compute value iteration for. Must have nS,
nA, and P as attributes.
gamma: float
Discount factor, must be in range [0, 1)
max_iterations: int
The maximum number of iterations to run before stopping.
tol: float
Determines when value function has converged.
Returns
-------
(np.ndarray, np.ndarray, int, int)
Returns optimal policy, value function, number of policy
improvement iterations, and number of value iterations.
"""
improvement_steps = 0
evaluation_steps = 0
num_states = env.nS
policy = np.zeros(num_states, dtype='int')
value_func = np.zeros(num_states)
for it in range(max_iterations):
# Policy evaluation
value_func, it_converg = evaluate_policy_async_ordered(env, gamma,
policy,
max_iterations,
tol)
evaluation_steps += it_converg
# Policy improvement
policy_changed, policy = improve_policy(env, gamma, value_func, policy)
improvement_steps += 1
if not policy_changed:
break
return policy, value_func, improvement_steps, evaluation_steps
def policy_iteration_async_randperm(env, gamma, max_iterations=int(1e3),
tol=1e-3):
"""Runs policy iteration.
You should use the improve_policy and evaluate_policy_async_randperm
methods to implement this method.
Parameters
----------
env: gym.core.Environment
The environment to compute value iteration for. Must have nS,
nA, and P as attributes.
gamma: float
Discount factor, must be in range [0, 1)
max_iterations: int
The maximum number of iterations to run before stopping.
tol: float
Determines when value function has converged.
Returns
-------
(np.ndarray, np.ndarray, int, int)
Returns optimal policy, value function, number of policy
improvement iterations, and number of value iterations.
"""
improvement_steps = 0
evaluation_steps = 0
num_states = env.nS
policy = np.zeros(num_states, dtype='int')
value_func = np.zeros(num_states)
for it in range(max_iterations):
# Policy evaluation
value_func, it_converg = evaluate_policy_async_randperm(env, gamma,
policy,
max_iterations,
tol)
evaluation_steps += it_converg
# Policy improvement
policy_changed, policy = improve_policy(env, gamma, value_func, policy)
improvement_steps += 1
if not policy_changed:
break
return policy, value_func, improvement_steps, evaluation_steps
def value_iteration_sync(env, gamma, max_iterations=int(1e3), tol=1e-3):
"""Runs value iteration for a given gamma and environment.
Parameters
----------
env: gym.core.Environment
The environment to compute value iteration for. Must have nS,
nA, and P as attributes.
gamma: float
Discount factor, must be in range [0, 1)
max_iterations: int
The maximum number of iterations to run before stopping.
tol: float
Determines when value function has converged.
Returns
-------
np.ndarray, iteration
The value function and the number of iterations it took to converge.
"""
num_states = env.nS
actions = [lake_env.LEFT, lake_env.RIGHT, lake_env.UP, lake_env.DOWN]
value_func = np.zeros(num_states) # initialize value function
it_convergence = 0
for it in range(max_iterations):
it_convergence += 1
new_value_func = np.zeros_like(value_func)
delta = 0
# Traverse through all states and find the best action
for s in range(num_states):
old_val = value_func[s]
best_val = float('-inf') # stores the best action return
for a in actions:
new_val = 0
for (prob, next_state, reward, is_terminal) in env.P[s][a]:
# Terminal state must have a value of 0
# FIXME: Unsure about making the value of terminal state 0
if is_terminal:
value_func[next_state] = 0.0
new_val += prob * (reward + gamma * value_func[next_state])
best_val = max(best_val, new_val)
new_value_func[s] = best_val # assign best return
delta = max(delta, np.abs(old_val - best_val))
value_func = new_value_func # Update value function for next iter
# Check for convergence criterion
if delta < tol:
break
return value_func, it_convergence
def value_iteration_async_ordered(env,
gamma,
max_iterations=int(1e3),
tol=1e-3):
"""Runs value iteration for a given gamma and environment.
Updates states in their 1-N order.
Parameters
----------
env: gym.core.Environment
The environment to compute value iteration for. Must have nS,
nA, and P as attributes.
gamma: float
Discount factor, must be in range [0, 1)
max_iterations: int
The maximum number of iterations to run before stopping.
tol: float
Determines when value function has converged.
Returns
-------
np.ndarray, iteration
The value function and the number of iterations it took to converge.
"""
num_states = env.nS
actions = [lake_env.LEFT, lake_env.RIGHT, lake_env.UP, lake_env.DOWN]
value_func = np.zeros(num_states) # initialize value function
it_convergence = 0
for it in range(max_iterations):
it_convergence += 1
delta = 0
# Traverse through all states and find the best action
for s in range(num_states):
old_val = value_func[s]
best_val = float('-inf') # stores the best action return
for a in actions:
new_val = 0
for (prob, next_state, reward, is_terminal) in env.P[s][a]:
# Terminal state must have a value of 0
# FIXME: Unsure about making the value of terminal state 0
if is_terminal:
value_func[next_state] = 0.0
new_val += prob * (reward + gamma * value_func[next_state])
best_val = max(best_val, new_val)
value_func[s] = best_val # assign best return
delta = max(delta, np.abs(old_val - best_val))
# Check for convergence criterion
if delta < tol:
break
return value_func, it_convergence
def value_iteration_async_randperm(env, gamma, max_iterations=int(1e3),
tol=1e-3):
"""Runs value iteration for a given gamma and environment.
Updates states by randomly sampling index order permutations.
Parameters
----------
env: gym.core.Environment
The environment to compute value iteration for. Must have nS,
nA, and P as attributes.
gamma: float
Discount factor, must be in range [0, 1)
max_iterations: int
The maximum number of iterations to run before stopping.
tol: float
Determines when value function has converged.
Returns
-------
np.ndarray, iteration
The value function and the number of iterations it took to converge.
"""
num_states = env.nS
actions = [lake_env.LEFT, lake_env.RIGHT, lake_env.UP, lake_env.DOWN]
value_func = np.zeros(num_states) # initialize value function
it_convergence = 0
for it in range(max_iterations):
it_convergence += 1
delta = 0
# Traverse through all states and find the best action
states = np.arange(num_states) # shuffle the states
np.random.shuffle(states)
for s in states:
old_val = value_func[s]
best_val = float('-inf') # stores the best action return
for a in actions:
new_val = 0
for (prob, next_state, reward, is_terminal) in env.P[s][a]:
# Terminal state must have a value of 0
# FIXME: Unsure about making the value of terminal state 0
if is_terminal:
value_func[next_state] = 0.0
new_val += prob * (reward + gamma * value_func[next_state])
best_val = max(best_val, new_val)
value_func[s] = best_val # assign best return
delta = max(delta, np.abs(old_val - best_val))
# Check for convergence criterion
if delta < tol:
break
return value_func, it_convergence
def value_iteration_async_custom(env, gamma, max_iterations=int(1e3),
tol=1e-3):
"""Runs value iteration for a given gamma and environment.
Updates states by student-defined heuristic.
Parameters
----------
env: gym.core.Environment
The environment to compute value iteration for. Must have nS,
nA, and P as attributes.
gamma: float
Discount factor, must be in range [0, 1)
max_iterations: int
The maximum number of iterations to run before stopping.
tol: float
Determines when value function has converged.
Returns
-------
np.ndarray, iteration
The value function and the number of iterations it took to converge.
"""
num_states = env.nS
actions = [lake_env.LEFT, lake_env.RIGHT, lake_env.UP, lake_env.DOWN]
value_func = np.zeros(num_states) # initialize value function
it_convergence = 0
# NOTE: Get Manhattan ordering of the states
grid_size = input('Specify the grid size: ')
states = get_manhattan_ordering(int(grid_size))
for it in range(max_iterations):
it_convergence += 1
delta = 0
# Traverse through all states and find the best action
for s in states:
old_val = value_func[s]
best_val = float('-inf') # stores the best action return
for a in actions:
new_val = 0
for (prob, next_state, reward, is_terminal) in env.P[s][a]:
# Terminal state must have a value of 0
# FIXME: Unsure about making the value of terminal state 0
if is_terminal:
value_func[next_state] = 0.0
new_val += prob * (reward + gamma * value_func[next_state])
best_val = max(best_val, new_val)
value_func[s] = best_val # assign best return
delta = max(delta, np.abs(old_val - best_val))
# Check for convergence criterion
if delta < tol:
break
return value_func, it_convergence
######################
# Optional Helpers #
######################
# Here we provide some helper functions simply for your convinience.
# You DON'T necessarily need them, especially "env_wrapper" if
# you want to deal with it in your different ways.
# Feel FREE to change/delete these helper functions.
def display_policy_letters(env, policy):
"""Displays a policy as letters, as required by problem 2.2 & 2.6
Parameters
----------
env: gym.core.Environment
policy: np.ndarray, with shape (env.nS)
"""
policy_letters = []
for l in policy:
policy_letters.append(lake_env.action_names[l][0])
policy_letters = np.array(policy_letters).reshape(env.nrow, env.ncol)
for row in range(env.nrow):
print(''.join(policy_letters[row, :]))
def env_wrapper(env_name):
"""Create a convinent wrapper for the loaded environment
Parameters
----------
env: gym.core.Environment
Usage e.g.:
----------
envd4 = env_load('Deterministic-4x4-FrozenLake-v0')
envd8 = env_load('Deterministic-8x8-FrozenLake-v0')
"""
# env = gym.make(env_name)
# # T : the transition probability from s to s’ via action a
# # R : the reward you get when moving from s to s' via action a
# env.T = np.zeros((env.nS, env.nA, env.nS))
# env.R = np.zeros((env.nS, env.nA, env.nS))
# for state in range(env.nS):
# for action in range(env.nA):
# for prob, nextstate, reward, is_terminal in env.P[state][action]:
# env.T[state, action, nextstate] = prob
# env.R[state, action, nextstate] = reward
# return env
pass
def value_func_heatmap(env, value_func):
"""Visualize a policy as a heatmap, as required by problem 2.3 & 2.5
Note that you might need:
import matplotlib.pyplot as plt
import seaborn as sns
Parameters
----------
env: gym.core.Environment
value_func: np.ndarray, with shape (env.nS)
"""
fig, ax = plt.subplots(figsize=(7, 6))
sns.heatmap(np.reshape(value_func, [env.nrow, env.ncol]),
annot=True,
linewidths=.5,
cmap="GnBu_r",
ax=ax,
yticklabels=np.arange(1, env.nrow+1)[::-1],
xticklabels=np.arange(1, env.nrow + 1))
plt.show()
return None
def get_manhattan_ordering(grid_size):
"""Generate states according to manhattan distance.
Parameters
----------
grid_size: 4 (CartPole) or 8 (MountainCar)
"""
goal_pos = tuple() # stores (x, y) position of goal
states = np.arange(grid_size * grid_size)
if grid_size == 8:
goal_pos = (7, 1) # specified in the map
elif grid_size == 4:
goal_pos = (1, 1) # specified in the map
states = sorted(states, key=lambda x: get_manhattan_distance(x,
goal_pos,
grid_size))
return states
def get_cartesian_coordinates(n, grid_size):
"""Convert state to cartesian coordinate.
Parameters
----------
n: State number
grid_size: 4 (CartPole) or 8 (MountainCar)
"""
row = n // grid_size
col = n - row * grid_size
return row, col
def get_manhattan_distance(n, coord2, grid_size):
"""Calculate manhattan distance between state and goal.
Parameters
----------
n: State number
coord2: Goal state cartesian coordinate.
grid_size: 4 (CartPole) or 8 (MountainCar)
"""
coord1 = get_cartesian_coordinates(n, grid_size)
distance = np.abs(coord1[0] - coord2[0]) + np.abs(coord1[1] - coord2[1])
return distance
|
name = "qpolgrad"
__all__ = ["BaseQPolicyGradient", "DDPG", "TD3", "SAC"]
from flare.qpolgrad.base import BaseQPolicyGradient
from flare.qpolgrad.ddpg import DDPG
from flare.qpolgrad.td3 import TD3
from flare.qpolgrad.sac import SAC
|
"""Escaping backlash characters.
A backlash is used to define metacharacters in regex.
So to cover them as characters, you need to escape them and use '\\'.
"""
import re
pattern = re.compile('\\\\');
another_pattern = re.compile(r'\\');
result = pattern.match("\\author");
print(result.group());
res = another_pattern.match("\\at");
print(res.group());
|
def busca_no_campo(linha, coluna, campo):
if campo[linha][coluna] == "#":
return
if campo[linha][coluna] == "k":
global oPasto
oPasto += 1
if campo[linha][coluna] == "v":
global lPasto
lPasto += 1
if campo[linha][coluna] != "#":
campo[linha][coluna] = "#"
busca_no_campo(linha-1,coluna,campo)
busca_no_campo(linha+1,coluna,campo)
busca_no_campo(linha,coluna-1,campo)
busca_no_campo(linha,coluna+1,campo)
linha,coluna = [int(x) for x in input().split(" ")]
campo = []
for x in range(linha+2):
campo.append(["#"] * (coluna+2))
L = []
for x in range(linha):
pastejo = input()
L.append(pastejo)
for i in range(1,linha+1):
for j in range(1,coluna+1):
campo[i][j] = L[i - 1][j - 1]
ovelha = lobo = 0
for i in range(1,linha+1):
for j in range(1,coluna+1):
oPasto = lPasto = 0
busca_no_campo(i,j,campo)
if oPasto > lPasto:
ovelha += oPasto
lPasto = 0
else:
lobo += lPasto
oPasto = 0
print(ovelha , lobo) |
from .pagination import StandardResultSetPagination
from .filters import OrbitalStatusFilter, LaunchSiteFilter, SourceFilter, OperationalStatusFilter, CatalogEntryFilter
from .catalogentry import CatalogEntryViewSet
from .operationalstatus import OperationalStatusViewSet
from .orbitalstatus import OrbitalStatusViewSet
from .datasource import DataSourceViewSet
from .tle import TLEViewSet
from .source import SourceViewSet
from .launchsite import LaunchSiteViewSet
from .compute import ComputeView |
from tkinter import *
from random import randint as rnd
def change_loc():
button.place(x = rnd(0,400), y = rnd(0, 400), width = rnd(50, 100), height = rnd(50, 100))
button['text'] += 1
window = Tk()
window.geometry('500x500')
button = Button(window, text = 0, command = change_loc)
button.place(x = rnd(0,400), y = rnd(0, 400), width = rnd(50, 100), height = rnd(50, 100))
window.mainloop() |
# -*- coding: utf-8 -*-
import pprint
import logging
import csv
import sys
from zksoftware.zkSoftware import ZkSoftware
logging.getLogger().setLevel(logging.INFO)
zk = ZkSoftware("192.168.19.19", 80)
logs = zk.getAttLog('ALL')
w = csv.writer(sys.stdout, delimiter=';')
for l in logs:
w.writerow(l.values())
|
import h5py
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
from tensorflow.keras.models import load_model,Model
from scipy.io import loadmat
import tensorflow.compat.v1.keras.backend as K
K.set_image_data_format('channels_first')
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
# 先定义我们读取keras模型权重的函数
def print_keras_wegiths(weight_file_path):
# 读取weights h5文件返回File类
f = h5py.File(weight_file_path)
try:
# 读取各层的名称以及包含层信息的Group类
for layer, g in f.items():
print(" {}".format(layer))
print(" Attributes:")
# 输出储存在Group类中的attrs信息,一般是各层的weights和bias及他们的名称
for key, value in g.attrs.items():
print(" {}: {}".format(key, value))
finally:
f.close()
npp_params=[1,5,0.1]
path='EEG_Data/MI/'
data = loadmat(path + 'data2-{}-{}-{}.mat'.format(npp_params[0], npp_params[1],npp_params[2]))
x_train = data['x_train']
y_train = data['y_train']
x_validation = data['x_validation']
y_validation = data['y_validation']
x_poison = data['x_poison']
y_poison = data['y_poison']
x_test= data['x_test']
y_test = data['y_test']
x_test_poison = data['x_test_poison']
y_test_poison = data['y_test_poison']
y_train=np.squeeze(y_train)
y_validation=np.squeeze(y_validation)
y_poison=np.squeeze(y_poison)
y_test=np.squeeze(y_test)
y_test_poison=np.squeeze(y_test_poison)
#model=load_model(filepath='model_orginal_poison_before{}_{}_{}.h5'.format(npp_params[0], npp_params[1], npp_params[2]))
model0 = load_model(filepath='model_orginal_poison_beforeMI{}_{}_{}.h5'.format(npp_params[0], npp_params[1], npp_params[2]))
#model = load_model(filepath='model_orginal_gauss0.1{}_{}_{}.h5'.format(npp_params[0], npp_params[1], npp_params[2]))
#model = load_model(filepath='model_orginal_poison_MIDRpruning-{}_{}_{}.h5'.format(npp_params[0], npp_params[1], npp_params[2]))
#model = load_model(filepath='model_orginal_poison_MIDRgausspruning6-{}_{}_{}.h5'.format(npp_params[0], npp_params[1], npp_params[2]))
print(model0.summary())
# 我们再来看一下模型的各层,选择某一层的特征输出作为降维数据
print("Using loaded model to predict...")
model = Model(inputs=model0.input, outputs=model0.get_layer('flatten').output)
# 读取要做tsne降维的数据,shape=(1000,28,28,1)
# 用模型得到预测结果,进而得到降维后的结果
predict = model.predict(x_train)
print(predict.shape)
tsne = TSNE(n_components=2, learning_rate=300, init='pca', random_state=0)
X_tsne_0 = tsne.fit_transform(predict)
# 利用数据的label和降维的数据画图
for j in range(predict.shape[0]):#predict.shape[0]
#if X_tsne_0[j, 0] <= -5 and X_tsne_0[j, 0] >= -7 and X_tsne_0[j, 1] <= 45 and X_tsne_0[j, 1] >= -33:
if y_train[j] == 0: # label=75的类
plt.scatter(X_tsne_0[j, 0], X_tsne_0[j, 1], marker='x', c='b')
elif y_train[j] == 1: # label=10的类
plt.scatter(X_tsne_0[j, 0], X_tsne_0[j, 1], marker='x', c='r')
# plt.xticks(np.linspace(-7, -5.5, 10))
# plt.yticks(np.linspace(-0.5, 1.0, 10))
plt.show()
|
__author__ = 'Elisabetta Ronchieri'
import commands
import os
from tstorm.utils import utils
class Ldd:
def __init__(self, library):
self.library = library
self.cmd = {
'name':'ldd'
}
self.otpt = {
'status':'',
'otpt':''}
def get_command(self):
a = self.cmd['name']
a += ' ' + self.library
return a
def run_command(self):
a=()
if utils.cmd_exist(self.cmd['name']):
a=commands.getstatusoutput(self.get_command())
return a
def get_output(self):
a=self.run_command()
if a[0] == 0:
self.otpt['status'] = 'PASS'
self.otpt['otpt'] = a[1]
else:
self.otpt['status'] = 'FAILURE'
return self.otpt
|
import numpy as np
import pandas as pd
from DimensionReduction import *
from ConvertSubConceptToCore import *
from sklearn.cluster import DBSCAN
from sklearn.metrics import adjusted_rand_score, completeness_score, homogeneity_score, v_measure_score
def main():
# files
window_matrix_file = ".\OutputDir\window_matrix.csv"
window_terms_file = ".\OutputDir\window_matrix_terms.txt"
sub_verb_matrix_file = ".\OutputDir\sub_verb_matrix.csv"
subverb_terms_file = ".\OutputDir\sub_verb_matrix_terms.txt"
abstract_concepts_file = ".\csv_docs\TP_CS_CoreConceptIn2Level.csv"
ontology_file = ".\OutputDir\goldsorted.csv"
print("===================================================")
print("=========== DBSCAN On sub-core concepts ===========")
print("===================================================")
print(
"""
===================================
========= sub_verb_matrix =========
===================================
"""
)
# load the data
X = np.loadtxt(sub_verb_matrix_file, delimiter=",", dtype=float)
Y = eliminate_non_existing_terms(subverb_terms_file, ontology_file)
# compute frequency of classes in order to reduce complexity of classification
frequency = repartition(Y, ploting=True)
# reduce dataset
X, Y, old_labels = eliminate_non_frequent_class(X, Y, frequency, 3)
# Compute Matrix Sparcity
Sparsity(X, "sub_verb_matrix", "sub_verb_matrix Sparcity")
# Compute PCA
X_pca = PCA_reduction(X, keep_n_component=2, n_print=20)
# Compute T-SNE
X_tsne = TSNE_reduction(X, keep_n_component=2)
# Compute DBSCAN
db = DBSCAN(eps=1.2, min_samples=3)
gs_dbscan_pca = db.fit_predict(X)
gs_dbscan_tsne = db.fit_predict(X_tsne)
print("> With PCA reduction:")
print(" > Random score :", adjusted_rand_score(Y[:,1], gs_dbscan_pca))
print(" > Macro-precision score :", homogeneity_score(Y[:,1], gs_dbscan_pca))
print(" > Micro-precision score :", completeness_score(Y[:,1], gs_dbscan_pca))
print(" > V-measure score :", v_measure_score(Y[:,1], gs_dbscan_pca))
print("> With TSNE reduction:")
print(" > Random score :", adjusted_rand_score(Y[:,1], gs_dbscan_tsne))
print(" > Macro-precision score :", homogeneity_score(Y[:,1], gs_dbscan_tsne))
print(" > Micro-precision score :", completeness_score(Y[:,1], gs_dbscan_tsne))
print(" > V-measure score :", v_measure_score(Y[:,1], gs_dbscan_tsne))
# plt.scatter(X_tsne[:, 0], X_tsne[:, 1], c=gs_dbscan_pca)
# plt.show()
# display_clusters(gs_dbscan_pca, Y)
print(
"""
===================================
========= window_matrix =========
===================================
"""
)
# load the data
X = np.loadtxt(window_matrix_file, delimiter=",", dtype=float)
Y = eliminate_non_existing_terms(window_terms_file, ontology_file)
# compute frequency of classes in order to reduce complexity of classification
frequency = repartition(Y, ploting=True)
# reduce dataset
X, Y, old_labels = eliminate_non_frequent_class(X, Y, frequency, 3)
# Compute Matrix Sparcity
Sparsity(X, "window_matrix", "window_matrix Sparcity")
# Compute PCA
X_pca = PCA_reduction(X, keep_n_component=2, n_print=20)
# Compute T-SNE
X_tsne = TSNE_reduction(X, keep_n_component=2)
# Compute DBSCAN
db = DBSCAN(eps=1.2, min_samples=3)
gs_dbscan_pca = db.fit_predict(X)
gs_dbscan_tsne = db.fit_predict(X_tsne)
print("> With PCA reduction:")
print(" > Random score :", adjusted_rand_score(Y[:,1], gs_dbscan_pca))
print(" > Macro-precision score :", homogeneity_score(Y[:,1], gs_dbscan_pca))
print(" > Micro-precision score :", completeness_score(Y[:,1], gs_dbscan_pca))
print(" > V-measure score :", v_measure_score(Y[:,1], gs_dbscan_pca))
print("> With TSNE reduction:")
print(" > Random score :", adjusted_rand_score(Y[:,1], gs_dbscan_tsne))
print(" > Macro-precision score :", homogeneity_score(Y[:,1], gs_dbscan_tsne))
print(" > Micro-precision score :", completeness_score(Y[:,1], gs_dbscan_tsne))
print(" > V-measure score :", v_measure_score(Y[:,1], gs_dbscan_tsne))
# plt.scatter(X_tsne[:, 0], X_tsne[:, 1], c=gs_dbscan_pca)
# plt.show()
# display_clusters(gs_dbscan_pca, Y)
print("===================================================")
print("============= DBSCAN On core concepts =============")
print("===================================================")
print(
"""
===================================
========= sub_verb_matrix =========
===================================
"""
)
# load the data
X = np.loadtxt(sub_verb_matrix_file, delimiter=",", dtype=float)
Y = eliminate_non_existing_terms(subverb_terms_file, ontology_file)
Y, core_concepts = convert_sub_concepts_to_core(Y, abstract_concepts_file)
# compute frequency of classes in order to reduce complexity of classification
frequency = repartition(Y, ploting=True)
# reduce dataset
X, Y, old_labels = eliminate_non_frequent_class(X, Y, frequency, 3)
# Compute Matrix Sparcity
Sparsity(X, "sub_verb_matrix", "sub_verb_matrix Sparcity")
# Compute PCA
X_pca = PCA_reduction(X, keep_n_component=2, n_print=20)
# Compute T-SNE
X_tsne = TSNE_reduction(X, keep_n_component=3)
# Compute DBSCAN
db = DBSCAN(eps=1.2, min_samples=3)
gs_dbscan_pca = db.fit_predict(X)
gs_dbscan_tsne = db.fit_predict(X_tsne)
print("> With PCA reduction:")
print(" > Random score :", adjusted_rand_score(Y[:,1], gs_dbscan_pca))
print(" > Macro-precision score :", homogeneity_score(Y[:,1], gs_dbscan_pca))
print(" > Micro-precision score :", completeness_score(Y[:,1], gs_dbscan_pca))
print(" > V-measure score :", v_measure_score(Y[:,1], gs_dbscan_pca))
print("> With TSNE reduction:")
print(" > Random score :", adjusted_rand_score(Y[:,1], gs_dbscan_tsne))
print(" > Macro-precision score :", homogeneity_score(Y[:,1], gs_dbscan_tsne))
print(" > Micro-precision score :", completeness_score(Y[:,1], gs_dbscan_tsne))
print(" > V-measure score :", v_measure_score(Y[:,1], gs_dbscan_tsne))
print(
"""
===================================
========= window_matrix =========
===================================
"""
)
# load the data
X = np.loadtxt(window_matrix_file, delimiter=",", dtype=float)
Y = eliminate_non_existing_terms(window_terms_file, ontology_file)
Y, core_concepts = convert_sub_concepts_to_core(Y, abstract_concepts_file)
# compute frequency of classes in order to reduce complexity of classification
frequency = repartition(Y, ploting=True)
# reduce dataset
X, Y, old_labels = eliminate_non_frequent_class(X, Y, frequency, 3)
# Compute Matrix Sparcity
Sparsity(X, "window_matrix", "window_matrix Sparcity")
# Compute PCA
X_pca = PCA_reduction(X, keep_n_component=2, n_print=20)
# Compute T-SNE
X_tsne = TSNE_reduction(X, keep_n_component=2)
# Compute DBSCAN
db = DBSCAN(eps=1.2, min_samples=3)
gs_dbscan_pca = db.fit_predict(X)
gs_dbscan_tsne = db.fit_predict(X_tsne)
print("> With PCA reduction:")
print(" > Random score :", adjusted_rand_score(Y[:,1], gs_dbscan_pca))
print(" > Macro-precision score :", homogeneity_score(Y[:,1], gs_dbscan_pca))
print(" > Micro-precision score :", completeness_score(Y[:,1], gs_dbscan_pca))
print(" > V-measure score :", v_measure_score(Y[:,1], gs_dbscan_pca))
print("> With TSNE reduction:")
print(" > Random score :", adjusted_rand_score(Y[:,1], gs_dbscan_tsne))
print(" > Macro-precision score :", homogeneity_score(Y[:,1], gs_dbscan_tsne))
print(" > Micro-precision score :", completeness_score(Y[:,1], gs_dbscan_tsne))
print(" > V-measure score :", v_measure_score(Y[:,1], gs_dbscan_tsne))
def display_clusters(dbscan, Y):
clusters = dict()
nbClusters = len(set(dbscan))
for i in range(0, nbClusters):
clusters[i] = []
print("NB", nbClusters)
#get the clusters
for j in range(Y.shape[0]):
term = Y[j,0]
clusterNb = dbscan[j]+1
clusters[clusterNb].append(term)
for cluster_id, cluster in clusters.items():
print("cluster: " + str(cluster_id))
print(cluster)
if __name__ == '__main__':
main() |
__author__ = 'luca'
from searcher import Searcher
from images.image_comparator import ImageComparator
from images.image_converter import ImageConverter
class FullSearch(Searcher):
def __init__(self, block_size, margin_size):
self.block_size = block_size
self.margin_size = margin_size
super(FullSearch, self).__init__()
def search(self, image1_pixels, x_start, y_start, image2_pixels):
self.reset_search()
block_size = self.block_size
margin_size = self.margin_size
best_MAD = 1000000
best_x = None
best_y = None
subimage_1_pixels = ImageConverter.sub_pixels(image1_pixels, x_start, y_start, x_start+block_size, y_start+block_size)
#Start with the center
if ImageComparator.is_valid_coordinate(x_start, y_start, block_size, image2_pixels):
MAD = self.calculate_MAD(subimage_1_pixels, image2_pixels, x_start, y_start, x_start+block_size, y_start+block_size)
if MAD < best_MAD:
#klog("Best MAD found: %f, at (%f,%f)" % (MAD, px, py))
best_MAD = MAD
best_x = x_start
best_y = y_start
if best_MAD == 0:
return best_x, best_y, best_MAD, self._MAD_checks_count
for py in range(y_start-margin_size, y_start+margin_size):
if py < 0:
continue #il blocco esce in su dall'immagine, avanza con il prossimo py incrementato
#CHECK!!
if not ImageComparator.is_valid_coordinate(0, py, block_size, image2_pixels):
break #il blocco esce in giu dall'immagine, esci
for px in range(x_start-margin_size, x_start+margin_size):
if px < 0:
continue #il blocco esce a sinistra dall'immagine, avanza con il prossimo px incrementato
#CHECK!!
if not ImageComparator.is_valid_coordinate(px, py, block_size, image2_pixels):
break #il blocco esce in giu dall'immagine, esci
#if px+block_size > image2.width():
# break #il blocco esce a destra dall'immagine, esci
#klog("Valuating block (%f,%f)" %(px, py))
MAD = self.calculate_MAD(subimage_1_pixels, image2_pixels, px, py, px+block_size, py+block_size)
if MAD < best_MAD:
#klog("Best MAD found: %f, at (%f,%f)" % (MAD, px, py))
best_MAD = MAD
best_x = px
best_y = py
if best_MAD == 0:
return best_x, best_y, best_MAD, self._MAD_checks_count
return best_x, best_y, best_MAD, self._MAD_checks_count
|
from __future__ import absolute_import, division, print_function
import albumentations as albu
import cv2
import kvt
import kvt.augmentation
import numpy as np
def get_training_augmentation(resize_to=(320, 640)):
print(
"[get_training_augmentation] crop_size:", crop_size, ", resize_to:", resize_to
)
train_transform = [
albu.Resize(*resize_to),
albu.Normalize(),
]
return albu.Compose(train_transform)
def get_test_augmentation(resize_to=(320, 640)):
"""Add paddings to make image shape divisible by 32"""
test_transform = [
albu.Resize(*resize_to),
albu.Normalize(),
]
return albu.Compose(test_transform)
def get_transform(cfg):
def get_object(trans):
params = trans.params if trans.params is not None else {}
if trans.name in {"Compose", "OneOf"}:
augs_tmp = [get_object(aug) for aug in trans.member]
return getattr(albu, trans.name)(augs_tmp, **params)
if hasattr(albu, trans.name):
return getattr(albu, trans.name)(**params)
elif hasattr(kvt.augmentation, trans.name):
return getattr(kvt.augmentation, trans.name)(**params)
else:
return eval(trans.name)(**params)
augs = [get_object(t) for t in cfg]
return albu.Compose(augs)
@kvt.TRANSFORMS.register
def base_image_transform(split, aug_cfg=None, height=256, width=256, tta=1, **_):
resize_to = (height, width)
print("resize_to:", resize_to)
print("tta:", tta)
if aug_cfg is not None:
aug = get_transform(aug_cfg)
# use default transform
elif split == "train":
aug = get_training_augmentation(resize_to)
else:
aug = get_test_augmentation(resize_to)
def transform(image, mask=None):
def _transform(image):
if split == "train":
augmented = aug(image=image)
else:
augmented = aug(image=image)
if (split == "test") and (tta > 1):
images = []
images.append(augmented["image"])
images.append(aug(image=np.fliplr(image))["image"])
if tta > 2:
images.append(aug(image=np.flipud(image))["image"])
if tta > 3:
images.append(aug(image=np.flipud(np.fliplr(image)))["image"])
image = np.stack(images, axis=0)
image = np.transpose(image, (0, 3, 1, 2))
else:
image = augmented["image"]
image = np.transpose(image, (2, 0, 1))
return image
image = _transform(image)
if mask is not None:
mask = _transform(mask)
return {"image": image, "mask": mask}
return image
return transform
|
#!/usr/bin/env python3
# Copyright (c) 2021 Mahdi Biparva, mahdi.biparva@gmail.com
# miTorch: Medical Imaging with PyTorch
# Deep Learning Package for 3D medical imaging in PyTorch
# Implemented by Mahdi Biparva, April 2021
# Brain Imaging Lab, Sunnybrook Research Institute (SRI)
import os
import torch
import numpy as np
import nibabel as nib
import data.transforms_mitorch as tf
from torch.utils.data import DataLoader
import torchvision.transforms as torch_tf
from data.data_container import ds_worker_init_fn
from data.VolSet import collate_fn
from data.TestSetExt import TestSet
import utils.checkpoint as checkops
from data.build import build_dataset
from config.defaults import init_cfg
from netwrapper.net_wrapper import NetWrapperHFB, NetWrapperWMH
from datetime import datetime
import logging
import pandas as pd
import utils.metrics as metrics
from utils.net_pred import post_proc_pred, pack_pred
def setup_logger():
local_logger = logging.getLogger(__name__)
local_logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# create file handler if you wish
file_handler = logging.FileHandler('/tmp/test_error_output_{}.log'.format(datetime.now().strftime('%Y%m%d_%H%M')))
file_handler.setLevel(logging.ERROR)
file_handler.setFormatter(formatter)
# create stream handler
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
local_logger.addHandler(file_handler)
local_logger.addHandler(stream_handler)
return local_logger
logger = setup_logger()
def binarize_pred(p, binarize_threshold):
prediction_mask = p.ge(binarize_threshold)
p = p.masked_fill(prediction_mask, 1)
p = p.masked_fill(~prediction_mask, 0)
return p
def save_pred(pred, output_dir, basename, *in_mod):
output_path = ''
file_type = ('nii', 'img')[0]
pred = pred.detach().cpu().numpy()[0, 0] # batch size is 1, channel is 1 too.
if file_type == 'nii':
output_path = os.path.join(output_dir, '{}_mask_pred.nii.gz'.format(os.path.basename(basename)))
pred = nib.Nifti1Image(pred, np.eye(4))
if len(in_mod):
img = in_mod[0].detach().cpu().numpy()[0, 0] # batch size is 1, channel 0 is usually T1
img_output_path = os.path.join(output_dir, '{}_T1.nii.gz'.format(os.path.basename(basename)))
img = nib.Nifti1Image(img, np.eye(4))
nib.save(img, img_output_path)
img = in_mod[0].detach().cpu().numpy()[0, 1] # batch size is 1, channel 0 is usually T1
img_output_path = os.path.join(output_dir, '{}_FLAIR.nii.gz'.format(os.path.basename(basename)))
img = nib.Nifti1Image(img, np.eye(4))
nib.save(img, img_output_path)
elif file_type == 'img':
output_path = os.path.join(output_dir, '{}_mask_pred.img'.format(os.path.basename(basename)))
pred = nib.AnalyzeImage(pred, np.eye(4))
nib.save(pred, output_path)
def eval_pred(p, a, meters, cfg):
for m in cfg.TEST.EVAL_METRICS:
metric_function = getattr(metrics, f'{m}_metric')
meters[m] = metric_function(p, a, ignore_index=cfg.MODEL.IGNORE_INDEX)
def reset_cfg_init(cfg):
if not len(os.listdir(cfg.OUTPUT_DIR)):
os.rmdir(cfg.OUTPUT_DIR) # the old one is useless, we create a new one instead
cfg = init_cfg(cfg)
return cfg
def setup_test(cfg):
cfg = reset_cfg_init(cfg)
cfg.TRAIN.ENABLE = cfg.VALID.ENABLE = False
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
cuda_device_id = cfg.GPU_ID
torch.cuda.set_device(cuda_device_id)
if cfg.USE_GPU and torch.cuda.is_available():
device = torch.device('cuda:{}'.format(cuda_device_id))
logger.info('cuda available')
logger.info(f'device count is {torch.cuda.device_count()}')
logger.info(f'{device} will be used ...')
else:
device = torch.device('cpu')
assert cfg.TEST.CHECKPOINT_FILE_PATH and \
len(cfg.TEST.CHECKPOINT_FILE_PATH) and \
os.path.exists(cfg.TEST.CHECKPOINT_FILE_PATH), 'TEST.CHECKPOINT_FILE_PATH not set'
return cfg, device
def build_transformations():
transformations = torch_tf.Compose([
tf.ToTensorImageVolume(),
tf.RandomOrientationTo('RPI'),
# tf.RandomResampleTomm(target_spacing=(1, 1, 1)),
tf.ConcatAnnot2ImgVolume(num_channels=-1), # concat all except the last to the image
tf.MaskIntensityVolume(mask_data=None), # crop a tight 3D box
tf.ConcatAnnot2ImgVolume(num_channels=-1), # concat all annot to the image
tf.CropForegroundVolume(margin=1), # crop the brain region
tf.ConcatImg2AnnotVolume(num_channels=2),
tf.NormalizeMinMaxVolume(max_div=True, inplace=True),
])
return transformations
def create_test_set(cfg, transformations):
# Define any test dataset with annotation as known dataset otherwise call TestSet
if len(cfg.TEST.DATA_PATH):
if cfg.WMH.ENABLE and not cfg.WMH.HFB_GT:
assert cfg.WMH.HFB_CHECKPOINT and \
len(cfg.WMH.HFB_CHECKPOINT) and \
os.path.exists(cfg.WMH.HFB_CHECKPOINT), 'WMH.HFB_CHECKPOINT not set'
logger.info('you chose {} mode'.format(('single', 'batch')[cfg.TEST.BATCH_MODE]))
eval_pred_flag = (False, True)[1]
save_pred_flag = True
cfg.TEST.DATASET = 'TestSet{}'.format(('single', 'batch')[cfg.TEST.BATCH_MODE].upper())
cfg = reset_cfg_init(cfg)
cfg.TEST.IN_MOD = [ # TODO if needed, we can add this to the input arguments
# ('t1', 'T1_nu.img'),
# ('fl', 'T1acq_nu_FL.img'),
# ('annot', 'T1acq_nu_HfBd.img'),
# ('t1', 'T1_nu.nii.gz'), # wmh test cases
# ('fl', 'T1acq_nu_FL.nii.gz'),
# ('annot', 'wmh_seg.nii.gz'),
('t1', 'T1.nii.gz'), # wmh challenge test cases
('fl', 'FLAIR.nii.gz'),
('annot', 'wmh.nii.gz'),
]
test_set = TestSet(cfg, 'test', transformations, prefix_name=False if cfg.WMH.ENABLE else True)
else:
test_set = build_dataset(cfg.TEST.DATASET, cfg, 'test', transformations)
return test_set
def create_net(cfg, device):
if cfg.WMH.ENABLE:
net_wrapper = NetWrapperWMH(device, cfg)
else:
net_wrapper = NetWrapperHFB(device, cfg)
checkops.load_checkpoint(cfg.TEST.CHECKPOINT_FILE_PATH, net_wrapper.net_core, distributed_data_parallel=False)
net_wrapper.net_core.eval()
return net_wrapper
def test_loop(cfg, test_loader, device, net_wrapper, save_pred_flag, eval_pred_flag):
meters_test_set = list()
for cnt, (image, annot, meta) in enumerate(test_loader):
logger.info(f'testing on: {cnt+1:05d}|{len(test_loader):05d}')
meters = dict()
image = image.to(device, non_blocking=True)
annot = annot.to(device, non_blocking=True)
# (A) Get prediction
if cfg.WMH.ENABLE:
pred, annot, image = net_wrapper.forward((image, annot), return_input=True)
else:
pred = net_wrapper.forward(image)
pred = pack_pred(pred)
pred, annot = post_proc_pred(pred, annot, cfg)
# (B) Threshold prediction
pred = binarize_pred(pred, binarize_threshold=cfg.TEST.BINARIZE_THRESHOLD)
# (C) Save prediction
if save_pred_flag:
save_pred(pred, cfg.OUTPUT_DIR, meta[0]['sample_path'], *[image])
# (D) Evaluate prediction
if eval_pred_flag:
eval_pred(pred, annot, meters, cfg)
meters_test_set.append(meters)
if save_pred_flag:
logger.info('*** Done saving segmentation prediction for the test data.'
'*** Results are saved at: {}'.format(cfg.OUTPUT_DIR))
return meters_test_set
def get_output_results(meters_test_set, eval_pred_flag):
output_results = dict()
if eval_pred_flag:
logger.info('Evaluation results on the test set is ---')
meters_test_set = pd.DataFrame(meters_test_set)
meters_mean = meters_test_set.mean()
meters_std = meters_test_set.std()
for k in meters_test_set.columns:
output_results[f'{k}_mean'] = meters_mean[k]
output_results[f'{k}_std'] = meters_std[k]
logger.info(output_results)
return output_results
@torch.no_grad()
def test(cfg, transformations=None, eval_pred_flag=True, save_pred_flag=True):
# (0) initial setup
cfg, device = setup_test(cfg)
# (1) define data pipeline
transformations = build_transformations() if transformations is None else transformations
# (2) create test set and loader
test_set = create_test_set(cfg, transformations)
test_loader = DataLoader(test_set,
batch_size=cfg.TEST.BATCH_SIZE,
shuffle=False,
drop_last=False,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
worker_init_fn=ds_worker_init_fn,
collate_fn=collate_fn,
)
# (3) create network and load snapshots
net_wrapper = create_net(cfg, device)
# (4) loop over samples
meters_test_set = test_loop(cfg, test_loader, device, net_wrapper, save_pred_flag, eval_pred_flag)
# (5) log formatted outputs
output_results = get_output_results(meters_test_set, eval_pred_flag)
return output_results
|
import matplotlib.pyplot as plt
from pyrsa.vis import rdm_plot
from pyrsa.vis.colors import rdm_colormap
import pickle
import numpy as np
import scipy
import scipy.cluster.hierarchy as sch
import os
#for epoch in epochs
#load data, get rdm, plot
#save to png
with open("stimuli_perception_order.txt", "rb") as p1: #Pickling
order = pickle.load(p1)
models = ['GD', 'Hebbian', 'Burstprop']
for model in models:
DIR_epoch_data = '/home/ajay/Desktop/Learning_Rule_paperspace/'+str(model)+'/epoch_data/'
DIR_rdm_save = '/home/ajay/Desktop/Learning_Rule_paperspace/'+str(model)+'/rdm_epoch_plots/'
num_files = len([f for f in os.listdir(DIR_epoch_data) if os.path.isfile(os.path.join(DIR_epoch_data, f))])
for epoch in range(num_files):
# Burstprop
with open(DIR_epoch_data+str(model)+"_brain_area_rsa_epoch"+str(epoch+1)+".pkl","rb") as f:
rsa_model = pickle.load(f)
rdm_model = rsa_model['V4'][0].get_matrices()
rdm_model = rdm_model[0]
rdm_model = rdm_model[:, order][order]
cmap = rdm_colormap()
plt.imshow(rdm_model, cmap)
ax = plt.gca()
ax.set_xticks([])
ax.set_yticks([])
plt.xlabel('Epoch: '+str(epoch+1))
if model == 'GD':
plt.title('Gradient Descent')
else: plt.title(model)
plt.savefig(DIR_rdm_save+str(model)+'_rdm_epoch_'+str(epoch+1)+'.png') |
import caffe2.python.onnx.backend as backend
import numpy as np
import onnx
# Load the ONNX model
model = onnx.load("alexnet.onnx")
# Check that the IR is well formed
onnx.checker.check_model(model)
# Print a human readable representation of the graph
onnx.helper.printable_graph(model.graph)
rep = backend.prepare(model, device="CUDA:0") # or "CPU"
# For the Caffe2 backend:
# rep.predict_net is the Caffe2 protobuf for the network
# rep.workspace is the Caffe2 workspace for the network
# (see the class caffe2.python.onnx.backend.Workspace)
outputs = rep.run(np.random.randn(10, 3, 224, 224).astype(np.float32))
# To run networks with more than one input, pass a tuple
# rather than a single numpy ndarray.
print(outputs[0])
|
from django_middleware_global_request.middleware import get_request
class DbRouter:
def db_for_read(self, model, **hints):
request = get_request()
try:
if "/api" in request.path:
return "replicas"
except AttributeError:
pass
return "default"
def db_for_write(self, model, **hints):
return "default"
def allow_relation(self, obj1, obj2, **hints):
return None
def allow_migrate(self, db, app_label, model_name=None, **hints):
return db == "default"
|
#Exercise 11: Asking Questions
age = input("How old are you? ")
height = input("How tall are you? ")
weight = input("How much do you weight? ")
print ("So, you're %r old, %r tall ad %r heavy."
%(age, height, weight))
#exercise seems diferent from the book
#since I'm using python3.6
#but in a new edition of the book
#he probably use a newer version of python
|
all = ['env', 'igtdetect'] |
'''
Created on Jan 24, 2016
@author: Andrei Padnevici
@note: This is an exercise: 7.1
'''
file = open("mbox-short.txt")
for line in file:
print(line.upper())
|
""" config.py
Microsimulation config for World population microsimulation
Data soruced from the World Bank, https://databank.worldbank.org
"""
import numpy as np
import pandas as pd
import glob
import os
import neworder
# MPI split initial population files over threads
def partition(arr, count):
if count > 1:
return [arr[i::count] for i in range(count)]
return [arr]
allcountries = pd.read_csv("./examples/world/data/CountryLookup.csv", encoding='utf-8', sep="\t")["Code"]
initial_populations = partition(allcountries, neworder.mpi.size())
#initial_populations = [["ALB", "ASM", "ATG"]]
# running/debug options
neworder.log_level = 1
# initialisation
neworder.initialisations = {
"people": { "module": "microsynth", "class_": "Microsynth", "args": (initial_populations[neworder.mpi.rank()]) }
}
# define the evolution
neworder.timeline = neworder.Timeline(2019, 2030, [11])
# timestep must be defined in neworder
neworder.dataframe.transitions = {
}
# checks to perform after each timestep. Assumed to return a boolean
neworder.do_checks = True # Faith
# assumed to be methods of class_ returning True if checks pass
neworder.checks = {
}
# Generate output at each checkpoint
neworder.checkpoints = {
"write": "people.write_table()"
}
|
import os
import torch
import torch.utils.data as data
from PIL import Image
def make_dataset(root, data='SBU', sub=''):
if data == 'SBU':
img_list = [os.path.splitext(f)[0] for f in os.listdir(os.path.join(root, 'ShadowImages')) if f.endswith('.jpg')]
return [
(os.path.join(root, 'ShadowImages', img_name + '.jpg'), os.path.join(root, 'ShadowMasks', img_name + '.png'))
for img_name in img_list]
if data == 'CUHK':
if (sub == 'KITTI') or (sub == 'MAP'):
img_list = [os.path.splitext(f)[0] for f in os.listdir(os.path.join(root, 'shadow_' + sub)) if f.endswith('.png')]
return [
(os.path.join(root, 'shadow_' + sub, img_name + '.png'), os.path.join(root, 'mask_' + sub, img_name + '.png'))
for img_name in img_list]
else:
img_list = [os.path.splitext(f)[0] for f in os.listdir(os.path.join(root, 'shadow_' + sub)) if f.endswith('.jpg')]
return [
(os.path.join(root, 'shadow_' + sub, img_name + '.jpg'), os.path.join(root, 'mask_' + sub, img_name + '.png'))
for img_name in img_list]
class ImageFolder(data.Dataset):
def __init__(self, root, data, sub, joint_transform=None, transform=None, target_transform=None):
self.root = root
self.imgs = make_dataset(root, data, sub)
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img_path, gt_path = self.imgs[index]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path)
if self.joint_transform is not None:
img, target = self.joint_transform(img, target)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.imgs)
class VideoframeDataset(data.Dataset):
"""extract sequences frames from video dataset"""
def __init__(self, root, seq_len=1, transform=None):
"""
root: path to videos
seq_len: length of frame sequence in each sample
transform: optional, callable, apply image transform
"""
self.root = root
self.seq_len = seq_len
self.transform = transform
groups = []
imgs = []
for folder in os.listdir(root):
if os.path.isdir(os.path.join(root, folder)):
images = [os.path.join(folder, img_name) for img_name in os.listdir(os.path.join(root, folder)) if img_name.endswith('.jpg')]
images.sort(key=lambda f: int(''.join(filter(str.isdigit, f))))
imgs.extend(images)
groups.extend(images[0:(len(images)-seq_len)])
self.imgs = imgs
self.groups = groups
def __len__(self):
return len(self.groups)
def __getitem__(self, index):
image_path = self.groups[index]
idx = self.imgs.index(image_path)
cur_imgs = []
fol_imgs = []
for i in range(self.seq_len):
image = Image.open(os.path.join(self.root, self.imgs[idx+i])).convert('RGB')
if self.transform:
image = self.transform(image)
cur_imgs.append(image)
for i in range(self.seq_len):
image = Image.open(os.path.join(self.root, self.imgs[idx+1+i])).convert('RGB')
if self.transform:
image = self.transform(image)
fol_imgs.append(image)
x = torch.stack(cur_imgs)
y = torch.stack(fol_imgs)
# for i in range(self.seq_len):
# image = self.imgs[idx+i]
# cur_imgs.append(image)
# for i in range(self.seq_len):
# image = self.imgs[idx+1+i]
# fol_imgs.append(image)
# x = cur_imgs
# y = fol_imgs
return x,y |
# -*- coding:utf-8 -*-
import sys
sys.path.append('..')
import lib.Logging as L
def data_marker(cpu, mem, h_cpu, h_mem, path):
import matplotlib
matplotlib.use('Agg')
import pylab as pl
pl.plot(cpu, 'r')
pl.plot(mem, 'g')
pl.title('performance')
pl.xlabel('second')
pl.ylabel('percent')
pl.plot(cpu, color="red", linewidth=2.5, linestyle="-",label="this_cpu")
pl.plot(mem, color="blue", linewidth=2.5, linestyle="-",label="this_mem")
if h_mem is not None:
pl.plot(h_cpu, color="magenta", linewidth=2.5,linestyle="-",label="history_cpu")
pl.plot(h_mem, color="green",linewidth=2.5,linestyle="-",label="history_mem")
pl.legend(loc='upper left')
pl.xlim(0.0,len(mem))
pl.ylim(0.0, 100.0)
pl.savefig(path)
L.Logging.debug('Report: %s' % path)
# pl.show()
pl.close()
if __name__ == "__main__":
import random
def get_num():
lst=[]
for i in range(10):
lst.append(random.randint(1,60))
return lst
for i in range(1):
data_marker(get_num(),get_num(),get_num(),get_num(),'%s.png' % i)
|
import validators
from animal_case import keys_to_snake_case
from flask import Blueprint, g, current_app
from marshmallow import fields
from validate_email import validate_email
from webargs import validate
from grant.email.send import send_email
from grant.utils.misc import make_url
import grant.utils.auth as auth
from grant.comment.models import Comment, user_comments_schema
from grant.email.models import EmailRecovery
from grant.ccr.models import CCR, ccrs_schema
from grant.extensions import limiter
from grant.parser import query, body
from grant.proposal.models import (
Proposal,
ProposalTeamInvite,
invites_with_proposal_schema,
user_proposal_contributions_schema,
user_proposals_schema,
user_proposal_arbiters_schema
)
from grant.proposal.models import ProposalContribution
from grant.utils.enums import ProposalStatus, ContributionStatus, CCRStatus
from grant.utils.exceptions import ValidationException
from grant.utils.social import verify_social, get_social_login_url, VerifySocialException
from grant.utils.upload import remove_avatar, sign_avatar_upload, AvatarException
from .models import (
User,
SocialMedia,
Avatar,
self_user_schema,
user_schema,
user_settings_schema,
db
)
from grant.utils.validate import is_z_address_valid
blueprint = Blueprint('user', __name__, url_prefix='/api/v1/users')
@blueprint.route("/me", methods=["GET"])
@auth.requires_auth
def get_me():
dumped_user = self_user_schema.dump(g.current_user)
return dumped_user
@blueprint.route("/<user_id>", methods=["GET"])
@query({
"withProposals": fields.Bool(required=False, missing=None),
"withComments": fields.Bool(required=False, missing=None),
"withFunded": fields.Bool(required=False, missing=None),
"withPending": fields.Bool(required=False, missing=None),
"withArbitrated": fields.Bool(required=False, missing=None),
"withRequests": fields.Bool(required=False, missing=None),
"withRejectedPermanently": fields.Bool(required=False, missing=None)
})
def get_user(user_id, with_proposals, with_comments, with_funded, with_pending, with_arbitrated, with_requests, with_rejected_permanently):
user = User.get_by_id(user_id)
if user:
result = user_schema.dump(user)
authed_user = auth.get_authed_user()
is_self = authed_user and authed_user.id == user.id
if with_requests:
requests = CCR.get_by_user(user)
requests_dump = ccrs_schema.dump(requests)
result["requests"] = requests_dump
if with_proposals:
proposals = Proposal.get_by_user(user)
proposals_dump = user_proposals_schema.dump(proposals)
result["proposals"] = proposals_dump
if with_funded:
contributions = ProposalContribution.get_by_userid(user_id)
if not authed_user or user.id != authed_user.id:
contributions = [c for c in contributions if c.status == ContributionStatus.CONFIRMED]
contributions = [c for c in contributions if not c.private]
contributions = [c for c in contributions if c.proposal.status == ProposalStatus.LIVE]
contributions_dump = user_proposal_contributions_schema.dump(contributions)
result["contributions"] = contributions_dump
if with_comments:
comments = Comment.get_by_user(user)
comments_dump = user_comments_schema.dump(comments)
result["comments"] = comments_dump
if with_pending and is_self:
pending_proposals = Proposal.get_by_user(user, [
ProposalStatus.STAKING,
ProposalStatus.PENDING,
ProposalStatus.APPROVED,
ProposalStatus.REJECTED,
])
pending_proposals_dump = user_proposals_schema.dump(pending_proposals)
result["pendingProposals"] = pending_proposals_dump
pending_ccrs = CCR.get_by_user(user, [
CCRStatus.PENDING,
CCRStatus.APPROVED,
CCRStatus.REJECTED,
])
pending_ccrs_dump = ccrs_schema.dump(pending_ccrs)
result["pendingRequests"] = pending_ccrs_dump
if with_arbitrated and is_self:
result["arbitrated"] = user_proposal_arbiters_schema.dump(user.arbiter_proposals)
if with_rejected_permanently and is_self:
rejected_proposals = Proposal.get_by_user(user, [
ProposalStatus.REJECTED_PERMANENTLY
])
result["rejectedPermanentlyProposals"] = user_proposals_schema.dump(rejected_proposals)
rejected_ccrs = CCR.get_by_user(user, [
CCRStatus.REJECTED_PERMANENTLY,
])
result["rejectedPermanentlyRequests"] = ccrs_schema.dump(rejected_ccrs)
return result
else:
message = "User with id matching {} not found".format(user_id)
return {"message": message}, 404
@blueprint.route("/", methods=["POST"])
@limiter.limit("30/day;5/minute")
@body({
"emailAddress": fields.Str(required=True, validate=lambda e: validate_email(e)),
"password": fields.Str(required=True),
"displayName": fields.Str(required=True, validate=validate.Length(min=2, max=50)),
"title": fields.Str(required=True, validate=validate.Length(min=2, max=50)),
})
def create_user(
email_address,
password,
display_name,
title
):
existing_user = User.get_by_email(email_address)
if existing_user:
return {"message": "User with that email already exists"}, 409
user = User.create(
email_address=email_address,
password=password,
display_name=display_name,
title=title
)
user.login()
result = self_user_schema.dump(user)
return result, 201
@blueprint.route("/auth", methods=["POST"])
@limiter.limit("30/hour;5/minute")
@body({
"email": fields.Str(required=True),
"password": fields.Str(required=True)
})
def auth_user(email, password):
authed_user = auth.auth_user(email, password)
return self_user_schema.dump(authed_user)
@blueprint.route("/me/password", methods=["PUT"])
@auth.requires_auth
@body({
"currentPassword": fields.Str(required=True),
"password": fields.Str(required=True)
})
def update_user_password(current_password, password):
if not g.current_user.check_password(current_password):
return {"message": "Current password incorrect"}, 403
g.current_user.set_password(password)
return {"message": "ok"}, 200
@blueprint.route("/me/email", methods=["PUT"])
@auth.requires_auth
@body({
"email": fields.Str(required=True, validate=lambda e: validate_email(e)),
"password": fields.Str(required=True)
})
def update_user_email(email, password):
if not g.current_user.check_password(password):
return {"message": "Password is incorrect"}, 403
current_app.logger.info(
f"Updating userId: {g.current_user.id} with current email: {g.current_user.email_address} to new email: {email}"
)
g.current_user.set_email(email)
return {"message": "ok"}, 200
@blueprint.route("/me/resend-verification", methods=["PUT"])
@auth.requires_auth
def resend_email_verification():
g.current_user.send_verification_email()
return {"message": "ok"}, 200
@blueprint.route("/logout", methods=["POST"])
@auth.requires_auth
def logout_user():
auth.logout_current_user()
return {"message": "ok"}, 200
@blueprint.route("/social/<service>/authurl", methods=["GET"])
@auth.requires_auth
def get_user_social_auth_url(service):
try:
return {"url": get_social_login_url(service)}
except VerifySocialException as e:
return {"message": str(e)}, 400
@blueprint.route("/social/<service>/verify", methods=["POST"])
@auth.requires_auth
@body({
"code": fields.Str(required=True)
})
def verify_user_social(service, code):
try:
# 1. verify with 3rd party
username = verify_social(service, code)
# 2. remove existing username/service
sm_other_db = SocialMedia.query.filter_by(service=service, username=username).first()
if sm_other_db:
db.session.delete(sm_other_db)
# 3. remove existing for authed user/service
sm_self_db = SocialMedia.query.filter_by(service=service, user_id=g.current_user.id).first()
if sm_self_db:
db.session.delete(sm_self_db)
# 4. set this users verified social item
sm = SocialMedia(service=service, username=username, user_id=g.current_user.id)
db.session.add(sm)
db.session.commit()
return {"username": username}, 200
except VerifySocialException as e:
return {"message": str(e)}, 400
@blueprint.route("/recover", methods=["POST"])
@limiter.limit("10/day;2/minute")
@body({
"email": fields.Str(required=True)
})
def recover_user(email):
existing_user = User.get_by_email(email)
if not existing_user:
return {"message": "No user exists with that email"}, 400
auth.throw_on_banned(existing_user)
existing_user.send_recovery_email()
return {"message": "ok"}, 200
@blueprint.route("/recover/<code>", methods=["POST"])
@body({
"password": fields.Str(required=True)
})
def recover_email(code, password):
er = EmailRecovery.query.filter_by(code=code).first()
if er:
if er.is_expired():
return {"message": "Reset code expired"}, 401
auth.throw_on_banned(er.user)
er.user.set_password(password)
db.session.delete(er)
db.session.commit()
return {"message": "ok"}, 200
return {"message": "Invalid reset code"}, 400
@blueprint.route("/avatar", methods=["POST"])
@limiter.limit("20/day;3/minute")
@auth.requires_auth
@body({
"mimetype": fields.Str(required=True)
})
def upload_avatar(mimetype):
user = g.current_user
try:
signed_post = sign_avatar_upload(mimetype, user.id)
return signed_post
except AvatarException as e:
return {"message": str(e)}, 400
@blueprint.route("/avatar", methods=["DELETE"])
@auth.requires_auth
@body({
"url": fields.Str(required=True)
})
def delete_avatar(url):
user = g.current_user
remove_avatar(url, user.id)
@blueprint.route("/<user_id>", methods=["PUT"])
@auth.requires_auth
@auth.requires_same_user_auth
@body({
"displayName": fields.Str(required=True, validate=lambda d: 2 <= len(d) <= 60),
"title": fields.Str(required=True, validate=lambda t: 2 <= len(t) <= 60),
"socialMedias": fields.List(fields.Dict(), required=True),
"avatar": fields.Str(required=True, allow_none=True, validate=lambda d: validators.url(d))
})
def update_user(user_id, display_name, title, social_medias, avatar):
user = g.current_user
if display_name is not None:
user.display_name = display_name
if title is not None:
user.title = title
# only allow deletions here, check for absent items
db_socials = SocialMedia.query.filter_by(user_id=user.id).all()
new_socials = list(map(lambda s: s['service'], social_medias))
for social in db_socials:
if social.service not in new_socials:
db.session.delete(social)
db_avatar = Avatar.query.filter_by(user_id=user.id).first()
if db_avatar:
db.session.delete(db_avatar)
if avatar:
new_avatar = Avatar(image_url=avatar, user_id=user.id)
db.session.add(new_avatar)
old_avatar_url = db_avatar and db_avatar.image_url
if old_avatar_url and old_avatar_url != avatar:
remove_avatar(old_avatar_url, user.id)
db.session.commit()
result = self_user_schema.dump(user)
return result
@blueprint.route("/<user_id>/invites", methods=["GET"])
@auth.requires_same_user_auth
def get_user_invites(user_id):
invites = ProposalTeamInvite.get_pending_for_user(g.current_user)
return invites_with_proposal_schema.dump(invites)
@blueprint.route("/<user_id>/invites/<invite_id>/respond", methods=["PUT"])
@auth.requires_same_user_auth
@body({
"response": fields.Bool(required=True)
})
def respond_to_invite(user_id, invite_id, response):
invite = ProposalTeamInvite.query.filter_by(id=invite_id).first()
if not invite:
return {"message": "No invite found with id {}".format(invite_id)}, 404
invite.accepted = response
db.session.add(invite)
if invite.accepted:
invite.proposal.team.append(g.current_user)
db.session.add(invite)
db.session.commit()
return {"message": "ok"}, 200
@blueprint.route("/<user_id>/settings", methods=["GET"])
@auth.requires_same_user_auth
def get_user_settings(user_id):
return user_settings_schema.dump(g.current_user.settings)
@blueprint.route("/<user_id>/settings", methods=["PUT"])
@auth.requires_same_user_auth
@body({
"emailSubscriptions": fields.Dict(required=False, missing=None),
"refundAddress": fields.Str(required=False, missing=None),
"tipJarAddress": fields.Str(required=False, missing=None),
"tipJarViewKey": fields.Str(required=False, missing=None) # TODO: add viewkey validation here
})
def set_user_settings(user_id, email_subscriptions, refund_address, tip_jar_address, tip_jar_view_key):
if email_subscriptions:
try:
email_subscriptions = keys_to_snake_case(email_subscriptions)
g.current_user.settings.email_subscriptions = email_subscriptions
except ValidationException as e:
return {"message": str(e)}, 400
if refund_address is not None and refund_address != '' and not is_z_address_valid(refund_address):
return {"message": "Refund address is not a valid z address"}, 400
if refund_address == '' and g.current_user.settings.refund_address:
return {"message": "Refund address cannot be unset, only changed"}, 400
if refund_address:
g.current_user.settings.refund_address = refund_address
if tip_jar_address is not None and tip_jar_address is not '' and not is_z_address_valid(tip_jar_address):
return {"message": "Tip address is not a valid z address"}, 400
if tip_jar_address is not None:
g.current_user.settings.tip_jar_address = tip_jar_address
if tip_jar_view_key is not None:
g.current_user.settings.tip_jar_view_key = tip_jar_view_key
db.session.commit()
return user_settings_schema.dump(g.current_user.settings)
@blueprint.route("/<user_id>/arbiter/<proposal_id>", methods=["PUT"])
@auth.requires_same_user_auth
@body({
"isAccept": fields.Bool(required=False, missing=None)
})
def set_user_arbiter(user_id, proposal_id, is_accept):
try:
proposal = Proposal.query.filter_by(id=int(proposal_id)).first()
if not proposal:
return {"message": "No such proposal"}, 404
if is_accept:
proposal.arbiter.accept_nomination(g.current_user.id)
for user in proposal.team:
send_email(user.email_address, 'proposal_arbiter_assigned', {
'user': user,
'proposal': proposal,
'proposal_url': make_url(f'/proposals/{proposal.id}')
})
return {"message": "Accepted nomination"}, 200
else:
proposal.arbiter.reject_nomination(g.current_user.id)
return {"message": "Rejected nomination"}, 200
except ValidationException as e:
return {"message": str(e)}, 400
|
from django.forms import ModelForm
from models import Message
class MessageForm(ModelForm):
class Meta(object):
model = Message
fields = ("body",)
|
# Copyright (c) 2017-2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# fmt: off
# isort: skip_file
import builtins as _builtins, typing as _typing
import grpc as _grpc
from grpc import aio as _grpc_aio
from .ledger_identity_service_pb2 import GetLedgerIdentityRequest, GetLedgerIdentityResponse
__all__ = [
"LedgerIdentityServiceStub",
]
# noinspection PyPep8Naming,DuplicatedCode
class LedgerIdentityServiceStub:
@classmethod
@_typing.overload
def __new__(cls, channel: _grpc.Channel) -> _LedgerIdentityServiceBlockingStub: ... # type: ignore
@classmethod
@_typing.overload
def __new__(cls, channel: _grpc_aio.Channel) -> _LedgerIdentityServiceAsyncStub: ... # type: ignore
def GetLedgerIdentity(self, __1: GetLedgerIdentityRequest, *, timeout: _typing.Optional[float] = ..., metadata: _typing.Optional[_typing.Tuple[_typing.Tuple[str, _typing.Union[str, bytes]], ...]] = ..., credentials: _typing.Optional[_grpc.CallCredentials] = ..., wait_for_ready: _typing.Optional[bool] = ..., compression: _typing.Optional[_grpc.Compression] = ...) -> _typing.Union[GetLedgerIdentityResponse, _grpc_aio.UnaryUnaryCall[_typing.Any, GetLedgerIdentityResponse]]: ...
# noinspection PyPep8Naming,DuplicatedCode
class _LedgerIdentityServiceBlockingStub(LedgerIdentityServiceStub):
def GetLedgerIdentity(self, __1: GetLedgerIdentityRequest, timeout: _typing.Optional[float] = ..., metadata: _typing.Optional[_typing.Tuple[_typing.Tuple[str, _typing.Union[str, bytes]], ...]] = ..., credentials: _typing.Optional[_grpc.CallCredentials] = ..., wait_for_ready: _typing.Optional[bool] = ..., compression: _typing.Optional[_grpc.Compression] = ...) -> GetLedgerIdentityResponse: ...
# noinspection PyPep8Naming,DuplicatedCode
class _LedgerIdentityServiceAsyncStub(LedgerIdentityServiceStub):
def GetLedgerIdentity(self, __1: GetLedgerIdentityRequest, *, timeout: _typing.Optional[float] = ..., metadata: _typing.Optional[_grpc_aio.Metadata] = ..., credentials: _typing.Optional[_grpc.CallCredentials] = ..., wait_for_ready: _typing.Optional[bool] = ..., compression: _typing.Optional[_grpc.Compression] = ...) -> _grpc_aio.UnaryUnaryCall[_typing.Any, GetLedgerIdentityResponse]: ... # type: ignore
|
import xutils
import urllib2
from BeautifulSoup import BeautifulSoup
def getFileList():
headers2 = {
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
,'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:33.0) Gecko/20100101 Firefox/33.0'
,'Accept-Language': 'en-US,en;q=0.5'
}
url = "https://raw.githubusercontent.com/taikhoanonlinevn/all/master/plugin.video.fvideo/plugin.video.fvideo/resources/000_list.txt"
req = urllib2.Request(urllib2.unquote(url),headers=headers2)
f = urllib2.urlopen(req)
soup = BeautifulSoup(f.read(), convertEntities=BeautifulSoup.HTML_ENTITIES)
lines = soup.text.split('\n')
return lines
def getFilmList(vfilename):
headers2 = {
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
,'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:33.0) Gecko/20100101 Firefox/33.0'
,'Accept-Language': 'en-US,en;q=0.5'
}
ret = ''
filetemp=''
foldertemp=''
url = "https://raw.githubusercontent.com/taikhoanonlinevn/all/master/plugin.video.fvideo/plugin.video.fvideo/resources/" + vfilename
req = urllib2.Request(urllib2.unquote(url),headers=headers2)
f = urllib2.urlopen(req)
soup = BeautifulSoup(f.read(), convertEntities=BeautifulSoup.HTML_ENTITIES)
lines = soup.text.split('\n')
for line in lines:
try:
list = line.split("##")
href = list[1].strip()
name = list[0].encode("utf-8")
if href.find('fshare.vn/file')>0:
if filetemp=='':
filetemp = '{"name":"'+name+'","icon":"","href":"'+href+'"}'
else:
filetemp = filetemp + ',' + '{"name":"'+name+'","icon":"","href":"'+href+'"}'
elif href.find('fshare.vn/folder')>0:
if foldertemp=='':
foldertemp = '{"name":"'+name+'","icon":"","href":"'+href+'"}'
else:
foldertemp = foldertemp + ',' + '{"name":"'+name+'","icon":"","href":"'+href+'"}'
except:
pass
ret = '{"files":['+filetemp+'],"folders":['+foldertemp+']}'
return ret |
import re
import numpy as np
import itertools as it
from random import random
from sympy import Symbol, poly
class SymmetryGroup(object):
"""
Class that contains symmetry groups
:param descr: Name of the symmetry group
:type descr: str
:param listofgenerators: Python list containing the generators of the group
:type listofgenerators: list
"""
def __init__(self, descr, listofgenerators):
self.__generators = listofgenerators
self.__name = descr
self.numgens = len(listofgenerators)
self.__buildGroup()
self.numelem = len(self.__transforms)
@classmethod
def isDummy(cls, descr):
"""
Classmethod for isotropic symmetry group. Creates a dummy SymmetryGroup object.
:param descr: Name of the symmetry group
:return: None
"""
return cls(descr, [np.identity(3)])
def __buildGroup(self):
"""
Builds symmetry group from listofgenerators
:return: list of symmetry transformations
"""
self.__transforms = self.__generators
startLength = self.numgens
endLength = 10000
while startLength != endLength:
startLength = len(self.__transforms)
for t1 in self.__transforms:
for t2 in self.__transforms:
cand = t1@t2
add = True
for arr in self.__transforms:
if np.isclose(arr, cand).all():
add = False
break
if add:
self.__transforms.append(cand)
endLength = len(self.__transforms)
def getGenerators(self):
return self.__generators
def getName(self):
return self.__name
def getElements(self):
return self.__transforms
|
#!/usr/bin/env python
# https://www.postgresqltutorial.com/postgresql-python/
# -----------------------------------------------------------------------
# bookbag.py
# Author: Sophie Li, Jayson Wu, Connie Xu
# -----------------------------------------------------------------------
from sys import stderr
# takes database as input, creates a listingphotos table, returns True if
# executed properly, False if error occurred
def create(database):
cursor = database._connection.cursor()
commands = (
'DROP TABLE IF EXISTS ListingPhotos',
"""
CREATE TABLE ListingPhotos (
public_id TEXT PRIMARY KEY,
listing_id BIGINT NOT NULL,
url TEXT NOT NULL
)
"""
)
for command in commands:
try:
cursor.execute(command)
print("EXECUTED: " + str(command))
except Exception as e:
print(str(e), file=stderr)
return False
cursor.close()
database._connection.commit()
return True
# inserts a row into the listingphotos table
def insert_row(database, info):
command = 'INSERT INTO listingphotos (public_id, listing_id, url) ' + \
'VALUES (%s, %s, %s)'
return database._execute_command(command, info)
# updates a row in the listingphotos table
def update_row(database, info):
command = 'UPDATE listingphotos ' + \
'SET listing_id = %s, url = %s ' + \
'WHERE public_id = %s'
return database._execute_command(command, info)
# deletes a row from the listingphotos table
def delete_row(database, info):
command = 'DELETE FROM listingphotos WHERE public_id = %s'
return database._execute_command(command, info) |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 20:14:48 2018
@author: user
連加計算
"""
def compute(a,b):
sum=0
for i in range(a,b+1):
sum+=i
print(sum)
a=int(input())
b=int(input())
compute(a,b) |
from main import PKT_DIR_INCOMING, PKT_DIR_OUTGOING
# TODO: Feel free to import any Python standard moduless as necessary.
import struct
import socket
import time
import pickle
from firewall import *
# hard coded constants
data = pickle.load(open('testpacket.p', 'rb'))
packets = data['packets']
rules = data['rules']
geos = data['geos']
for packet in packets:
prot = get_protocol(packet[0])
if prot == TCP_PROTOCOL:# and is_dns(packet[1],packet[0]):
pkt = packet[0]
pkt_dir = packet[1]
print pkt
def make_tcp_resonse(pkt):
ip_hdrlen = get_ip_header_length(pkt)
# create IP header
ip_hdr = pkt[:ip_hdrlen]
ip_flags = struct.unpack('!B', pkt[6])[0]
ip_flags = ip_flags & 0b00011111 # set ip flags to 0
ip_hdr = set_string(ip_hdr, struct.pack('!B', 0), 1, 2) # TOS = 0
ip_hdr = set_string(ip_hdr, struct.pack('!B', ip_flags), 6, 7) # ip flags = 0
ip_hdr = set_string(ip_hdr, struct.pack('!H', ip_hdrlen + 20), 2, 4) # total length
ip_hdr = set_string(ip_hdr, pkt[12:16], 16, 20) # switch src dst
ip_hdr = set_string(ip_hdr, pkt[16:20], 12, 16) # switch src dst
ip_hdr = set_string(ip_hdr, struct.pack('!H', ip_checksum(ip_hdr)), 10, 12) # checksum
# create TCP header
tcp_hdr = pkt[ip_hdrlen: ip_hdrlen + 20]
seqno = struct.unpack('!L', pkt[ip_hdrlen + 4: ip_hdrlen + 8])[0] # old seqno
ackno = seqno + 1
offset = struct.unpack('!B', tcp_hdr[12])[0]
offset = offset & 0b00001111 # set offset = 0
tcp_hdr = set_string(tcp_hdr, struct.pack('!L', 0), 4, 8) # seqnum = 0
tcp_hdr = set_string(tcp_hdr, struct.pack('!L', ackno), 8, 12) # ackno = oldseqno + 1o
tcp_hdr = set_string(tcp_hdr, struct.pack('!B', offset), 12, 13) # offset = 0
tcp_hdr = set_string(tcp_hdr, struct.pack('!H', 0), 14, 16) # window = 0
tcp_hdr = set_string(tcp_hdr, struct.pack('!H', 0), 18, 20) # urgent = 0
tcp_hdr = set_string(tcp_hdr, struct.pack('!B', 4), 13, 14) # RST flag = 4
tcp_hdr = set_string(tcp_hdr, pkt[ip_hdrlen:ip_hdrlen+2], 2, 4) # switch src dst
tcp_hdr = set_string(tcp_hdr, pkt[ip_hdrlen+2:ip_hdrlen+4], 0, 2) # switch src dst
tcp_hdr = set_string(tcp_hdr, struct.pack('!H', tcp_checksum(ip_hdr + tcp_hdr)), 16, 18) # checksum
return ip_hdr + tcp_hdr
print len(resp)
|
# Generated by Django 3.0 on 2019-12-10 21:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0006_customuser_is_staff'),
]
operations = [
migrations.RemoveField(
model_name='customuser',
name='active',
),
migrations.RemoveField(
model_name='customuser',
name='staff',
),
migrations.AlterField(
model_name='customuser',
name='is_superuser',
field=models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status'),
),
]
|
"""用于测试视图,测试部件"""
import unittest
from django.test import TestCase
from django.contrib.auth.models import User
class MyViewTest(TestCase):
def setUp(self):
#User.objects.create_user("admin","admin@qq.com","admin123456")
pass
def tearDown(self):
pass
#测试用例1 - 测试是否能进入登录页,状态是否为200
def test_login_status_code(self):
rsp = self.client.get("index/")
self.assertEqual(rsp.status_code,200)
#测试用例2 - 测试是否返回的是登录页
def test_login_page(self):
rsp = self.client.get("index/")
self.assertTemplateUsed(rsp,"index.html")
#测试用例2 - 测试用户名为空,密码为空,登录失败
def test_login_fail_because_username_password_is_null(self):
test_data = {
"username": "",
"password":""
}
rsp = self.client.post("/login_action/",data=test_data)
self.assertIn(rsp.content,"用户名或者密码不能为空~")
#测试用例3 - 测试用户名错误,密码错误,登录失败
def test_login_fail_because_username_password_is_fault(self):
test_data = {
"username": "error",
"password": "error"
}
rsp = self.client.post("/login_action/",data=test_data)
self.assertIn(rsp.content,"用户名或者密码错误~")
#测试用例4 - 测试用户名正确,密码正确,登录成功
def test_login_success(self):
test_data = {
"username": "admin",
"password": "admin123456"
}
rsp = self.client.post("/login_action/",data=test_data)
self.assertEqual(rsp.status_code,302) |
from flask import Flask, render_template, session, redirect, url_for, flash, send_file, send_from_directory
from flask_bootstrap import Bootstrap
from wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField, SelectMultipleField
from flask_wtf import FlaskForm
from wtforms.validators import DataRequired
import os
from io import BytesIO
from subprocess import call
from docker import *
import tarfile
import gzip
import shutil
import boto3, botocore
from botocore.client import Config
from config import S3_KEY, S3_SECRET, S3_BUCKET, S3_LOCATION
from werkzeug.utils import secure_filename
# Uses wtfforms, bootstrap, jinja
# python3 -m venv venv
# source venv/bin/activate
# pip install flask
# pip install flask-wtf
# pip install flask-bootstrap
# pip install docker
# Uses code from http://zabana.me/notes/upload-files-amazon-s3-flask.html
app = Flask(__name__)
app.config.from_object("config")
bootstrap = Bootstrap(app)
client = from_env()
s3 = boto3.client("s3", aws_access_key_id=S3_KEY, aws_secret_access_key=S3_SECRET, config=Config(signature_version='s3v4'))
client = APIClient(base_url='unix://var/run/docker.sock')
# Dictionary that maps the name of the app (key) to a list of required images' names (value)
# Customise for each use case
dictionary = { 'app1' : ['alpine:3.9.2'],
'app2' : ['alpine:3.9'],
'app3' : ['alpine:3.8.4'],
'app4' : ['alpine:3.7.3'],
'app5' : ['alpine:3.6.5'],
'app6' : ['alpine:3.6'] }
imagesDownloaded=[]
filename = 'clientImages.tar'
class SelectForm(FlaskForm):
# Choices are in (value1, value2) pairs,
# the first value in the tuple is the one put into a list and stored in form.filesToDownload.data
# the second value in the tuple is the one displayed on the web form
filesToDownload = SelectMultipleField("Choose docker images to download",
choices=[('app1','app1'), ('app2','app2'),('app3','app3'),('app4','app4'),('app5','app5'),('app6','app6')],
validators=[DataRequired()])
submit = SubmitField('Submit')
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
@app.route('/', methods=['GET', 'POST'])
def index():
# Need to change session context into another context, annoying to keep data after webpage/server refresh/restart
form = SelectForm()
if form.validate_on_submit():
session['filesToDownload'] = form.filesToDownload.data
flash("Download will commence shortly")
return redirect(url_for('download'))
return render_template('index.html', form=form, filesToDownload=session.get('filesToDownload'))
@app.route('/download', methods=['GET', 'POST'])
def download():
# Gets data from selectForm and grabs apps selected into a list
# filesToDownload is a list of strings of app names
apps = session.get('filesToDownload')
# Saves the images into a tar file
tarfile = open(filename, 'wb')
for selected_app in apps:
images = dictionary[selected_app]
for image in images:
if image not in imagesDownloaded:
imagesDownloaded.append(image)
strs = image.split(":")
client.pull(strs[0], tag=strs[1])
# we can use get_image directly to get the image with a specific version
tarball = client.get_image(image)
for chunk in tarball:
tarfile.write(chunk)
tarfile.close()
# GZip compresses tar file
# Convert tar file to a file-like object for uploading to S3
compressedFile = BytesIO()
with open(filename, 'rb') as f_in:
with gzip.GzipFile(fileobj=compressedFile, mode="wb") as f_out:
shutil.copyfileobj(f_in, f_out)
compressedFile.seek(0)
if f_out:
output = upload_file_to_s3(compressedFile)
f_in.close()
f_out.close()
# Ensures that existings tar files do not get lumped into new download request
# os.remove should be compatible with all OS
os.remove(filename)
return str(output)
def upload_file_to_s3(file):
fileName = filename + '.gz'
try:
s3.upload_fileobj(
file,
S3_BUCKET,
fileName,
ExtraArgs={
"ContentType": 'application/tar',
'ContentEncoding' : 'gzip'
}
)
# generate the presigned url
url = s3.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': S3_BUCKET,
'Key': fileName
}
)
except Exception as e:
# This is a catch all exception, edit this part to fit your needs.
print("Something Happened: ", e)
return e
return url
if __name__ == "__main__":
app.run() |
from django import forms
class GroupADD(forms.Form):
name = forms.CharField(max_length=80)
class GroupDelete(forms.Form):
name = forms.CharField(max_length=80)
|
from django.contrib import admin
from .models import *
import bulk_admin
@admin.register(Myteacher)
class ImageAdmin(bulk_admin.BulkModelAdmin):
search_fields = ('name',)
#@admin.register(Achievements)
#class ImageAdmin(bulk_admin.BulkModelAdmin):
# search_fields = ('name')
@admin.register(Employer)
class ImageAdmin(bulk_admin.BulkModelAdmin):
search_fields = ('name','pin')
admin.site.register(About,verbose_name_plural = "storiy")
# admin.site.register(Myteacher)
admin.site.register(Mystudent)
admin.site.register(Myfriend)
admin.site.register(Profile)
admin.site.register(Waiter)
admin.site.register(Restaurant)
admin.site.register(Place)
admin.site.register(Engine)
admin.site.register(Engine2)
admin.site.register(Car)
admin.site.register(Car2)
admin.site.register(Example)
#admin.site.register(Employer)
#admin.site.register(Achievements)
admin.site.register(TeachOpc)
admin.site.register(TeachLstp)
admin.site.register(AcadAct)
admin.site.register(Enclosures)
admin.site.register(GenInfo)
admin.site.register(Image)
admin.site.register(TeachTlm)
admin.site.register(Orie)
|
__author__ = "Narwhale"
def binary_search(alist,item):
"""二分查找"""
n = len(alist)
low = 0
high = n-1
while low <= high:
mid = (low + high) // 2
if alist[mid] == item:
return mid
elif alist[mid] > item:
high = mid -1
else:
low =mid + 1
return
a = [1,2,3,4,5,6,7,8,9,45,47,56,87,89]
b = binary_search(a,8)
print(b)
|
#vou executar dois codigos que fazem a mesma coisa
#um utilizando o python puro e outro o numpy
#perceba a diferença de tempo ao executa-los separadamente
soma = 0
for i in range(1,100000001):
soma += i
print(soma) |
# -*- coding:utf-8 -*-
import json
from flask import Blueprint, abort, request
from model.user import User
from model.gcm_user import GcmUser
from model.apns_user import ApnsUser
app = Blueprint("user", __name__, url_prefix='/api/user')
@app.route('/', methods=['GET'])
def index():
users = [{'id': user.id, 'created_at': user.created_at.strftime('%Y-%m-%d %H:%M:%S')} for user in User.query.all()]
return json.dumps(users)
@app.route('/', methods=['POST'])
def create():
import model
values = request.values
apns_token = values.get('apns_token')
registration_id = values.get('registration_id')
if apns_token and registration_id:
return abort(500)
user = User()
model.add(user)
if apns_token:
apns_user = ApnsUser(user, apns_token)
model.add(apns_user)
elif registration_id:
gcm_user = GcmUser(user, registration_id)
model.add(gcm_user)
model.commit()
return show(user.id)
@app.route('/<user_id>', methods=['GET'])
def show(user_id):
user = User.query.filter_by(id=user_id).first()
if user is None:
return abort(404)
data = {'id': user.id, 'created_at': user.created_at.strftime('%Y-%m-%d %H:%M:%S')}
if user.apns_user is not None:
data['apns_token'] = user.apns_user.apns_token
if user.gcm_user is not None:
data['gcm_user'] = user.gcm_user.registration_id
return json.dumps(data)
|
MPLib MODULES _______________________________________________________________________
%matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
MPLib READ FILES _______________________________________________________________________
df = pd.read_csv("Resources/filename.csv")
df.head()
MPLib / Pandas Manipulators _________________________________________________________
df.sum()
df.mean()
df['column name'].value_counts()
df['column name'.numunique()
df['column name'].unique()
df.add(otherdf, fill_value = 0)
df.groupby()
df.keys
df.set_index('column name')
df.loc
df.drop(df.index[])
pd.to_numeric
pd.merge(file1, file2, on='column name') # or if col name diff // left_on = "colname" right_on = "col name"
del df['column']
df.rename(columns={'column original':'Country Code'})
MPLib PLOT TYPES ____________________________________________________________________
plt.plot(kind=' ', )
plt.bar()
plt.pie() # (members, explode=explode, labels=' ', colors=colors, autopct='%1.1f%%', shadow=True, startangle=90)
plt.scatter()
plt.xticks()
plt.show()
plt.legend(handles[]. loc="best)
MPLib / Pandas PLOT TYPES ___________________________________________________________
MPLib EMPTY ARRAY FOR X LABELS ______________________________________________________
x_axis = np.arange
tick_locations = []
for x in x_axis
tick_locations
x_axis = np.arange(len(rain_df))
tick_locations = [value for value in x_axis]
MPLib PLOT FORMATTING _______________________________________________________________
plt.title(' ')
plt.axis("equal")
plt.xlabel(' ')
plt.ylabel(' ')
plt.hlines(0, 0, x_lim, alpha = 0.2)
plt.xlim(-0.75, len()-0.25)
plt.ylim(-0.75, len()-0.25)
plt.tight_layout
MPLib EXPORT FILES _______________________________________________________________
plt.savefig("../Images/lineConfig.png")
Pandas & MPLib PLOT _______________________________________________________________
# API week 6 session 1 _______________________________________________________________
# # API : Application Programming Interface
# Basic Idea, you tell the computer to go find info on a server, the API pulls the request and sends it back to you
# Data is stored in entries that resemble Python Dictionary / Key-pairs
#
# # Start of code notation ---------------------------------------------
# url = "api_url" # To import API
# requests.get(url) # API response object
# requests.get(url).json() # Cleans up API response object
# requests.get(url + "specific_api_key") # Calls single entry based on specific api key
# json.dumps(_____, indent=#, sort_keys=True) # prints out API in JSON with indented format
# reponse_json['key index'][# of index]['sub index']
# print(f"some text string {pipe} string)
# # gflAuK75xVQRMEQLpPyueZmtLpAyXIxG 0r2GHTXJ6VZR3NDM
# /articlesearch.json?q={query}&fq={filter}
# https://api.nytimes.com/svc/search/v2/articlesearch.json?q=election&api-key=yourkey
# #
# #
# #
# #
# # |
from pyparsing import Word, alphas,printables,Suppress
myDSL="select * from employees"
action=Word(alphas)
fields=Word(printables)
FROM=Suppress("from")
dataset=Word(alphas)
query_pattern=action+fields+FROM+dataset
print(query_pattern.parseString(myDSL)) |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from StringIO import StringIO
import os
import urllib
import webapp2
import jinja2
from google.appengine.ext import ndb
from google.appengine.api import memcache, users
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
import models
import view
class BaseHandler(webapp2.RequestHandler):
def __init__(self, request, response):
self.initialize(request, response)
self.user = users.get_current_user()
self.loggedin = bool(self.user)
# def handle_exception(self, exception, debug):
# # Log the error.
# logging.exception(exception)
# # Set a custom message.
# response.write('An error occurred.')
# # If the exception is a HTTPException, use its error code.
# # Otherwise use a generic 500 error code.
# if isinstance(exception, webapp2.HTTPException):
# response.set_status(exception.code)
# else:
# response.set_status(500)
class MainHandler(BaseHandler):
def get(self):
p = view.ElPage(self.user, self.request.path)
p.title = ''
p.content = '<p>Hello, World!</p>'
p.navactive = 'home'
self.response.write(p.render())
class HymnListHandler(BaseHandler):
def get(self):
p = view.ElHymnListPage(self.user)
p.title = 'Hymn Book'
p.hymn_q = models.Hymn.list()
self.response.write(p.render())
class HymnHandler(BaseHandler):
def get(self):
k = self.request.get('k')
try:
h = ndb.Key(urlsafe=k).get()
except:
self.response.set_status(400)
return
p = view.ElHymnPage(h, self.user)
self.response.write(p.render())
class EditHymnHandler(BaseHandler):
def get(self):
if not self.loggedin:
self.response.set_status(403)
return
k = self.request.get('k')
try:
h = ndb.Key(urlsafe=k).get()
except:
self.response.set_status(400)
return
p = view.ElPage(self.user)
p.title = 'Edit Hymn'
p.navactive = 'hymns'
f = view.ElForm()
f.action = "/hymns/edit?k=" + k
f.add(view.ElFormElem('input', 'title', 'Title: ', h.title))
f.add(view.ElFormElem('textarea', 'text', 'Text: ', h.text))
f.add(view.ElFormElem('input', 'tags', 'Tags: ', h.tags))
p.content = f.render()
self.response.write(p.render())
def post(self):
k = self.request.get('k')
try:
h = ndb.Key(urlsafe=k).get()
except:
self.response.set_status(400)
return
h.title = self.request.get('title')
h.text = self.request.get('text')
h.tags = self.request.get('tags')
h.put()
class AddHymnHandler(BaseHandler):
def get(self):
if not self.loggedin:
self.response.set_status(403)
return
p = view.ElPage(self.user, '/hymns/add')
p.title = 'Add Hymn'
p.navactive = 'hymns'
f = view.ElForm()
f.action = "/hymns/add"
f.add(view.ElFormElem('input', 'title', 'Title: ', ''))
f.add(view.ElFormElem('textarea', 'text', 'Text: ', ''))
f.add(view.ElFormElem('input', 'tags', 'Tags: ', ''))
f.add(view.ElFormElem('number', 'index', 'Index: ', ''))
p.content = f.render()
self.response.write(p.render())
def post(self):
if not self.loggedin:
self.response.set_status(403)
return
h = models.Hymn.create()
h.title = self.request.get('title')
h.text = self.request.get('text')
h.tags = self.request.get('tags')
h.index = int(self.request.get('index'))
h.put()
class TeamListHandler(BaseHandler):
def get(self):
p = view.ElTeamListPage(self.user)
t_q = models.Team.list()
p.team_q = t_q.fetch(10)
self.response.write(p.render())
class TeamHandler(BaseHandler):
def get(self):
k = self.request.get('t')
try:
t = ndb.Key(urlsafe=k).get()
except:
self.response.set_status(400)
return
p = view.ElTeamPage(t, self.user)
p.players = models.Player.list(t, 0)
self.response.write(p.render())
class ProfileHandler(BaseHandler):
def get(self):
k = self.request.get('p')
try:
h = ndb.Key(urlsafe=k).get()
except:
self.response.set_status(400)
return
p = view.ElProfilePage(h, self.user)
self.response.write(p.render())
class TeamEditHandler(BaseHandler):
def get(self):
k = self.request.get('t')
try:
t = ndb.Key(urlsafe=k).get()
except:
self.error(400)
return
p = view.ElPage(self.user, '/team/edit?t'+k)
p.navactive = 'profiles'
p.title = "Edit Team"
f = view.ElForm()
f.action = '/team/edit'
f.add(view.ElFormElem('hidden', 't', '', k))
f.add(view.ElFormElem('input', 'name', 'Name', t.name))
f.add(view.ElFormElem('textarea', 'summary', 'Summary', t.summary))
p.content = f.render()
self.response.write(p.render())
def post(self):
k = self.request.get('t')
try:
t = ndb.Key(urlsafe=k).get()
except:
self.error(400)
return
t.summary = self.request.get('summary')
t.name = self.request.get('name')
t.put()
self.response.redirect('/team?t=' + t)
class TeamSetImageFormHandler(BaseHandler):
def get(self):
k = self.request.get('t')
try:
t = ndb.Key(urlsafe=k).get()
except:
self.error(400)
return
p = view.ElPage(self.user, '/team/editpic?t' + k)
p.navactive = 'profiles'
p.title = 'Edit Team Image'
f = view.ElForm()
f.action = blobstore.create_upload_url('/team/editpic')
f.add(view.ElFormElem('hidden', 't', '', k))
f.add(view.ElFormElem('image', 'picture', 'Picture', ''))
p.content = f.render()
self.response.write(p.render())
class TeamSetImageHandler(blobstore_handlers.BlobstoreUploadHandler):
def post(self):
k = self.request.get('t')
try:
t = ndb.Key(urlsafe=k).get()
except:
self.error(400)
try:
upload = self.get_uploads()[0]
t.picture = upload.key
t.put()
self.redirect('/team?t=' + t)
except:
self.error(500)
class ImageHandler(blobstore_handlers.BlobstoreDownloadHandler):
@staticmethod
def url(img):
return '/res/' + img.key + ".png"
def get(self, img):
if not blobstore.get(img):
self.error(404)
else:
self.send_blob(img)
class InitHandler(BaseHandler):
def get(self):
models.setting.set("currentyear", "2s3s_17")
yr = models.year.create("2s3s_17")
for tn in ["Wanderers", "Nomads", "Squanderers", "Bedouin"]:
t = models.Team(parent=yr.key)
t.year = 2017
t.name = tn
t.put()
# class HymnRequest(webapp2.RequestHandler):
# def get(self):
# k = self.request.get('k')
# try:
# h = ndb.Key(urlsafe=k).get()
# io = StringIO()
# json.dump(h, io, cls=models.Hymn.HymnEncoder)
# self.response.write(io.getvalue())
# except:
# self.response.set_status(400)
# class HymnListRequest(webapp2.RequestHandler):
# def get(self):
# hymns = models.Hymn.list()
# io = StringIO()
# json.dump(hymns, io, cls=models.Hymn.QueryEncoder)
# self.response.write(io.getvalue())
app = webapp2.WSGIApplication([
('/hymns/add', AddHymnHandler),
('/hymns/edit', EditHymnHandler),
('/hymns/delete', EditHymnHandler),
('/hymns', HymnListHandler),
('/hymn', HymnHandler),
('/teams', TeamListHandler),
('/team/edit', TeamEditHandler),
('/team/edit/submit', TeamEditSubmitHandler),
('/team/editpic', TeamSetImageFormHandler),
('/team/editpic/submit', TeamSetImageHandler),
('/team', TeamHandler),
('/profile', ProfileHandler),
('/init', InitHandler),
('/res/([^\.]+)?\.png', ImageHandler),
('/', MainHandler)
], debug=True)
|
import tree as t
import treePlotter as tp
import os
f = open(os.path.dirname(__file__) +'/lenses.txt')
lenses = [r.strip().split('\t') for r in f.readlines()]
lensesLabel = ['age','prescript','astigmatic','tearRate']
lensesTree = t.createTree(lenses,lensesLabel)
tp.createPlot(lensesTree)
fmt = '%10s'
print [fmt % x for x in lensesLabel]
for lense in lenses:
print [fmt % x for x in lense],t.classify(lensesTree,lensesLabel,lense[0:-1]) |
from bs4 import BeautifulSoup
import csv
from selenium import webdriver
csv_file = open('Amazon.csv', 'w', newline='')
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['product_name', 'product_rating', 'num_of_reviews', 'product_price'])
driver = webdriver.Chrome()
productName = input("Enter name of the product")
productName = productName.replace(' ','+')
def url(i):
url=f"https://www.amazon.in/s?k={productName}&page={i}&crid=2HPF3IZH5D4TR&qid=1607409586&sprefix=sam%2Caps%2C306&ref=sr_pg_2"
return url
def extraction(section):
try:
product_name = section.find('span', class_="a-size-medium a-color-base a-text-normal").text
product_name = product_name.split('with')[0]
except Exception as e:
product_name = None
product_rating = None
num_of_reviews = None
product_price = None
pass
try:
product_rating = section.find('span',class_="a-icon-alt").text
product_rating = float(product_rating[:3])
except Exception as e:
product_rating = None
try:
num_of_reviews = section.find('span', class_="a-size-base").text
num_of_reviews = num_of_reviews.replace(',','')
num_of_reviews = int(num_of_reviews)
except Exception as e:
num_of_reviews = None
try:
product_price = soup.find('span', class_="a-price-whole").text
product_price = product_price.replace(',','')
product_price = float(product_price)
except Exception as e:
product_price = None
return product_name, product_rating, num_of_reviews, product_price
for i in range(1,11):
driver.get(url(i))
soup = BeautifulSoup(driver.page_source, 'html.parser')
for section in soup.find_all('div', class_="a-section a-spacing-medium"):
print(extraction(section))
csv_writer.writerow(extraction(section))
csv_file.close()
|
import base64
import json
from jwt import (JWT, jwk_from_dict)
from jwt.exceptions import JWTDecodeError
import os
instance = JWT()
public_keys = {}
public_key = None
messages = ["Messages"]
def message(event, context):
body = get_post_data(event['body'])
result = verify(body['token'])
if not bool(result):
messages.append(body['message'])
result = {
'statusCode': 200,
'headers': {
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'OPTIONS,POST,GET',
},
'body': json.dumps(messages)
}
return result
def get_post_data(body):
postdata = {}
for items in body.split('&'):
values = items.split('=')
postdata[values[0]] = values[1]
return postdata
def verify(token):
result = {}
try:
decoded = instance.decode(token, public_key, False)
except JWTDecodeError:
result = { 'statusCode': 403, 'body': 'Forbidden '}
return result
def get_keys():
keys = base64.b64decode(os.environ['OKTA_KEYS'])
jwks = json.loads(keys)
for jwk in jwks['keys']:
kid = jwk['kid']
public_key = jwk_from_dict(jwk)
public_keys[kid] = public_key
get_keys() |
from celery import Celery
import time
import redis
import json
import requests
#celery -A task worker --pool=solo --loglevel=info
celery=Celery('task',backend='redis://localhost/0',broken='redis://localhost/0')
@celery.task()
def executing_task(url,delay=0):
delay=int(delay)
time.sleep(delay//1000)
result=requests.get(url)
return(str(result))
|
#!/usr/bin/env python3
# Copyright (c) 2021 Mahdi Biparva, mahdi.biparva@gmail.com
# miTorch: Medical Imaging with PyTorch
# Deep Learning Package for 3D medical imaging in PyTorch
# Implemented by Mahdi Biparva, April 2021
# Brain Imaging Lab, Sunnybrook Research Institute (SRI)
from .build import DATASET_REGISTRY, build_dataset
from .WMHSegChal import WMHSegmentationChallenge
from .SRIBILSet import SRIBIL, SRIBILhfb, SRIBILhfbTest, LEDUCQTest, PPMITest, SRIBILTest
from .NeuroSegSets import TRAP, CAPTURE, TRACING
from .HPSubfieldSegSets import HPSubfield
from .DatasetTransformations import *
from .TestTransformations import *
|
import botometer
import json
import pymongo
import numpy
import tweepy
print("Search.py Loaded")
from pymongo import MongoClient
client = MongoClient('mongodb://localhost:27017/')
db = client.twit_ids
db_bot = client.bot_ids
db_notbot = client.notbot_ids
if db == None:
print ('kaaskoek')
# user = {"_id":164628182, "id_str":str(164628182), "friends_ids": ["164628183", "164628184", "164628185"] }
#
user_database = db.twit_ids
# handle_return = user_database.insert_one(user).inserted_id
# print (handle_return)
for user in user_database.find():
print(user["friends_ids"])
mashape_key = "BSFpXdyE3FmshqYT7Muon7z6Sj6Dp1LSM4gjsnXqR6SYFhd93q"
# #SirBredbeddle
# twitter_app_auth = {
# 'consumer_key': 'TvBda1gItC98cdU9iSXY8cM65',
# 'consumer_secret': 'zcVRxr0to7mgu9FBtkD2fRJE7FAGU3uak8xS2MYStlzibeBxqu',
# 'access_token': '971671773933645824-FmDlseKdoHxqOkEuWJD9NvfcEfCLDOj',
# 'access_token_secret': 'ZGadThrGEqi5l5kJT3pqMpMAA0o4DO2YIFhPgwdFuRMzc',
# }
#SirConstantine
twitter_app_auth = {
'consumer_key': 'zqievuaw5A4ByVVtrVvcjGd5z',
'consumer_secret': 'UZJ4X75Na20t80kxlEdfiVcvCrMVv9iFC7LURuU80TWo4yvBzs',
'access_token': '971671773933645824-ieJQ3Gifvwdtnl13EAgyQ9QFYrd865m',
'access_token_secret': 'EQccniXzaQ48zKqvOo4jwOlXi50rj5zAF1yHrzFO55P0a',
}
auth = tweepy.OAuthHandler("npb4vI5OhwkXyxY8ixvZ2qAHx","SR10qi0e2nLcl4a2cXDZ8ZNeM3MyaCdm31fVmD0Nm1MYhs8nB0")
auth.set_access_token("971671773933645824-B4U9gTzabJFqB7SjiMWKtjcisqPIvpL","0iDxzvlar7OIDH0RiJ4JL30muWBdzcc6OVudTuxA9MUC9")
bom = botometer.Botometer(wait_on_ratelimit=True,
mashape_key=mashape_key,
**twitter_app_auth)
print ("blah")
def search_run():
pass
# ids = open("ids.json",'r')
# api = tweepy.API(auth)
# data = ids.read()
# item_dict = json.loads(data)
# count_ids = len(item_dict)
# details_file = open("full_detail.json", 'w')
# data_file = open("data.json", 'w')
# alleg_bot = open("abot_ids.json", 'w')
# alleg_notbot = open("anotbot_ids.json", 'w')
# attributelist = open("attributelist.json", 'w')
# abot_ids = list()
# anotbot_ids = list()
# attributelist_dict = {'ID': [], 'BS': []}
#
# for x in range(0, count_ids):
# print("Busy with:", item_dict[x])
# user = api.get_user(item_dict[x])
# if user.protected == False:
# result = bom.check_account(item_dict[x])
# data_file.write(str(item_dict[x]))
# score_mean = numpy.mean(list(result["categories"].values()))
# temporal = result["categories"]["temporal"]
# data_file.write(" " + str(score_mean) + "\n")
# attributelist_dict["ID"].append(str(item_dict[x]))
# attributelist_dict["BS"].append(str(score_mean))
# if score_mean >= 0.65 or temporal >=0.7:
# abot_ids.append(item_dict[x])
# else:
# anotbot_ids.append(item_dict[x])
# json.dump(result,details_file)
# details_file.write(" " + str(score_mean)+ "\n")
#
# json.dump(attributelist_dict,attributelist)
# json.dump(abot_ids,alleg_bot)
# json.dump(anotbot_ids,alleg_notbot)
# alleg_bot.close()
# alleg_notbot.close()
# data_file.close()
# details_file.close()
|
print("구구단 몇단을 계산 할까요?")
x = 1
while (x is not 0) :
x = int(input())
if x == 0: break
if not(1 <= x <= 9):
print("잘못 입력하셨습니다.")
continue
else:
print("구구단 ",x,"단을 게산 합니다.")
for i in range(1,10):
print(x, "*", i)
print("구구단 몇 단을 계산할까요?")
print('구구단 종료')
|
from functools import lru_cache
# 究极背包问题
class Solution:
def shoppingOffers(self, price: List[int], special: List[List[int]], needs: List[int]) -> int:
# list不能生成hash,所以参数转为tuple
@lru_cache(None)
def dfs(remains):
# 计算单独买要多少钱
ans = sum(r*price[i] for i,r in enumerate(remains))
if ans:
# 遍历找满足要求的礼包
for ssp in special:
check = True
for i in range(len(remains)):
if ssp[i] > remains[i]:
check = False
break
if check:
new = list(remains)
# 更新买了礼包后的needs,加上买礼包的钱
for i in range(len(remains)):
new[i] -= ssp[i]
ans = min(ans, dfs(tuple(new)) + ssp[-1])
return ans
return dfs(tuple(needs))
|
import tkinter.font as TkFont
import tkinter
import threading
import math
from time import time, sleep
from sys import exit
from pynput.keyboard import Listener
from pynput import *
def startimer():
global timer;global splitTime;global startime;global splitBeg;global currentSplit;global start;global labels;global timeLabel;global tk;global timerready;global going;global saving
timer = 0
splitTime = 0
timerready = True
innerSplit = 0
splitTimes = [0.0] * len(config["splits"])
start.destroy()
for i in range(len(labels)):
labels[i].place(relx = 0, rely = 0.15*i, relwidth = 1, relheight = 0.15)
timeLabel.place(relx = 0, rely = 0.75, relwidth = 1, relheight = 0.25)
timeLabel.config(text = "0 : 00 : 00.00\nPress space to start")
tk.update()
while not going:
tk.update()
startime = time()
splitBeg = startime
while True:
timer = time()-startime
splitTime = time()-splitBeg
try:
splitTimes[innerSplit] = splitTime
except:
going = False
currentSplit -= 1
if currentSplit != innerSplit:
innerSplit = currentSplit
splitBeg = time()
sleep(0.015)
strtime = timeToString(timer)
if True:
top = -0.15*max(0, currentSplit-4)
#for i in range(max(0, currentSplit-5), max(currentSplit, min(5, len(config["splits"])))):
for i in range(0, len(config["splits"])):
labels[i].place(relx = 0, rely = top)
if i == currentSplit:
labels[i].config(text = config["splits"][i] + ": " + timeToString(splitTimes[i]), bg = "gray")
else:
labels[i].config(text = config["splits"][i] + ": " + timeToString(splitTimes[i]), bg = tk.cget('bg'))
top += 0.15
timeLabel.config(text = strtime + "\nPress space to move on\nto the next split")
tk.update()
if going == False:
if saving:
if timer < dynamic["best"]:
dynamic["best"] = timer
dynamic["bestSplits"] = splitTimes
save = open("dynamicSave.txt", "w")
save.write(str(dynamic))
save.close()
for i in range(len(splitTimes)):
if splitTimes[i] > dynamic["bestSplitsE"][i]:
dynamic["bestSplitsE"][i] = splitTimes[i]
break
def toDict(i):
lines = i.split("\n")
for i in range(len(lines)-1):
lines[i] = lines[i].split(":")
toreturn = {}
for i in range(len(lines)-1):
try:
toreturn[lines[i][0]] = eval(lines[i][1])
except:
toreturn[lines[i][0]] = lines[i][1]
return toreturn
def timeToString(i):
return str(math.floor(i/3600)) + " : " + str(math.floor(i/60)%60).zfill(2) + " : " + str(math.floor(i)%60).zfill(2) + "." + (str(round(i%1, 2)).split(".")[1])
timer = None
splitTime = None
splitBeg = None
startime = None
saving = True
currentSplit = -1
going = False
timerready = False
configFile = open("config.txt", "r")
configText = configFile.read()
configFile.close()
config = toDict(configText)
dynamicFile = open("dynamicSave.txt", "r")
dynamicText = dynamicFile.read()
dynamicFile.close()
dynamic = eval(dynamicText)
tk = tkinter.Tk()
tk.title("PySplits")
tk.geometry("250x400")
tk.resizable(1, 1)
tk.wm_attributes("-topmost", True)
start = tkinter.Button(tk, text = "Press to ready timer", command = startimer)
start.place(relx = 0, rely = 0, relwidth = 1, relheight = 1)
labels = [None] * len(config["splits"])
for i in range(len(labels)):
labels[i] = tkinter.Label(tk)
timeLabel = tkinter.Label(tk)
def onPress(key):
global going;global timerready;global currentSplit
if(str(key) == config["moveOnKey"] and timerready):
going = True
currentSplit += 1;
if currentSplit > len(config["splits"]):
going = False
if(str(key) == "Key.delete" and timerready):
going = True
currentSplit += 1;
saving = False
if currentSplit > len(config["splits"]):
going = False
def onRelease(key):
pass
def liste(e = None):
Listener(on_press = onPress, on_release = onRelease).start()
e = threading.Thread(target = liste)
e.run()
|
import json
import requests
import boto3
import uuid
import time
profile_name = 'mine'
region = 'us-west-2'
session = boto3.Session(profile_name=profile_name)
api = session.client('apigateway', region_name=region)
cf = session.client('cloudformation', region_name=region)
def get_key(name_of_key):
print('Discovering API Key')
response = api.get_api_keys(includeValues=True)
items = response['items']
for item in items:
if name_of_key in item['name']:
return item['value']
def get_url(name_of_stack):
print('Discovering Cloudformation Exports')
exports = cf.list_exports()['Exports']
for export in exports:
if export['Name'] == 'url-{}'.format(name_of_stack):
return export['Value']
def post(url, key, data):
data_json = json.dumps(data)
headers = {'Content-type': 'application/json', 'x-api-key': key}
return requests.post(url, data=data_json, headers=headers)
if __name__ == "__main__":
name = 'advanced'
full_url = get_url(name)
api_key = get_key(name)
while True:
body = {
"input": [
str(uuid.uuid4()),
str(uuid.uuid4())
]
}
print(post(full_url, api_key, body))
time.sleep(1)
|
# Generated by Django 3.1.5 on 2021-02-15 14:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('roasters', '0001_initial'),
('coffees', '0003_delete_roaster'),
]
operations = [
migrations.AlterField(
model_name='coffee',
name='roaster',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='roasters.roaster'),
),
]
|
from tree import Tree
from tree import Node
myTree = Tree()
#print(myTree.get_root())
n = Node('taste')
n.add_value('o')
p = Node('var')
n.add_value('a')
q = Node('var')
n.add_value('b')
r = Node('var')
r.add_value('c')
s = Node('name')
myTree.add_node(n,myTree.get_root())
print("Traversing the tree after adding 1 node")
myTree.print_tree(myTree.get_root(),0)
myTree.add_node(p,n)
#myTree.add_node(p,myTree.search_node(myTree.get_root(),n.feature,n.value))
print("Traversing the tree after adding 2 nodes")
myTree.print_tree(myTree.get_root(),0)
myTree.add_node(q,n)
myTree.add_node(r,n)
print("Traversing the tree after adding 4 nodes")
myTree.print_tree(myTree.get_root(),0)
myTree.add_node(s,r)
"""
n.add_child(p)
n.add_child(q)
n.add_child(r)
r.add_child(s)
"""
print("Traversing the tree after adding 5 nodes")
myTree.print_tree(myTree.root,0)
|
import os
class BaseConfig(object):
SQLALCHEMY_TRACK_MODIFICATIONS=False
SQLALCHEMY_DATABASE_URI=os.environ['DATABASE_URL']
DEBUG=False
TESTING=False
MAIL_SERVER = os.environ.get('MAIL_SERVER')
#MAIL_PORT = int(os.environ.get('MAIL_PORT') or 25)
#MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS') is not None
#MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
#ADMINS = ['your-email@example.com']
class DevelopmentConfig(BaseConfig):
DEBUG=True
TESTING=True
class TestingConfig(BaseConfig):
DEBUG=False
TESTING=True |
#This program rates stores based on sales
print("Phillip Smith's Store Grading Program")
#input number of stores
store_num = int(input("How many stores are there? "))
sales = []
#get sales for each store
for x in range(store_num):
print("Enter today's sales for store", x + 1, end='')
x = int(input(": "))
sales.append(x)
print(sales)
print()
print("Sales Bar Chart")
print("(Each '*' = 100)")
for y in range(store_num):
print("Store",y + 1,": ",end='')
for x in (sales):
stars = int(x / 100)
for i in range(stars):
print('*',end='')
print()
exit = input('')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.