text
stringlengths 8
6.05M
|
|---|
from __future__ import division
import sys
import subprocess
import glob, os
## Author: Spencer Caplan, University of Pennsylvania
## Contact: spcaplan@sas.upenn.edu
outputFileNamesWithWordID = False
printDebugStatements = True
testFilesRun = True
trainFilesRun = True
def accessDictEntry(dictToCheck, entryToCheck):
if entryToCheck in dictToCheck:
return dictToCheck[entryToCheck]
else:
return 0
##
## Main method block
##
if __name__=="__main__":
if (len(sys.argv) < 6):
print('incorrect number of arguments')
exit(0)
# currently at: /home1/s/spcaplan/Documents/PRAAT/praat
praat = sys.argv[1]
scriptSourceDir = sys.argv[2]
inputDirTrain = sys.argv[3]
inputDirTest = sys.argv[4]
outputDirTrain = sys.argv[5]
outputDirTest = sys.argv[6]
#praat = "/home1/s/spcaplan/Documents/PRAAT/praat"
#praat = "/usr/bin/praat"
script = scriptSourceDir+"create_continuum.praat"
# all these in ms
stepSize = int(5)
minVOT = int(10)
maxVOT = int(100)
indexToNormDict = {}
indexToCounterDict = {}
indexToIDDIct = {}
attestedIndexes = {}
vowelSet = ['A','E']
if testFilesRun:
for vowel in vowelSet:
for Tindex in range(1, 6):
Tuid = 'T-' + vowel + '-' + str(Tindex)
currTfile = inputDirTest + Tuid + '.wav'
print currTfile
if os.path.isfile(currTfile):
for Dindex in range(1, 6):
Duid = 'D-' + vowel + '-' + str(Dindex) + '-f0norm'
currDfile = inputDirTest + Duid + '.wav'
print currDfile
if os.path.isfile(currDfile):
for vot in xrange(minVOT, maxVOT+1, stepSize):
stepSizeSeconds = "%1.3f" % (vot/1000)
output = outputDirTest+vowel+str(Tindex)+'_'+str(Dindex)+"_f0norm_VOT_%i.wav" % vot
subprocess.call([praat, script, Tuid, Duid, inputDirTest, stepSizeSeconds, output])
print 'Processed: ' + output
else:
print 'Cannot find Tfile: '+currTfile
print 'Ran multi test files.'
if trainFilesRun:
for file in os.listdir(inputDirTrain):
if file.endswith(".wav"):
fileAttributes = file.split("-")
if len(fileAttributes) > 4:
globalIndex = fileAttributes[0]
localIndex = fileAttributes[1]
phone = fileAttributes[2]
trialType = fileAttributes[3]
word = fileAttributes[4]
word = word.replace('.wav','')
if outputFileNamesWithWordID:
indexToIDDIct[localIndex] = globalIndex + '-' + localIndex + '-' + phone + '-' + trialType + '-' + word
else:
indexToIDDIct[localIndex] = localIndex + '-' + trialType
attestedIndexes[localIndex] = True
if printDebugStatements:
print file
print globalIndex + '-' + localIndex + '-' + phone + '-' + trialType + '-' + word
if phone == 'T':
indexToNormDict[localIndex] = file
elif phone == 'D':
indexToCounterDict[localIndex] = file
if printDebugStatements:
print 'Onto execution loop...'
for currIndex in attestedIndexes:
print currIndex
normFile = accessDictEntry(indexToNormDict, currIndex)
counterFile = accessDictEntry(indexToCounterDict, currIndex)
idName = accessDictEntry(indexToIDDIct, currIndex)
if normFile == 0 or counterFile == 0 or idName == 0:
break
if printDebugStatements:
print 'Running: ' + normFile
normFile = normFile[:-4]
counterFile = counterFile[:-4]
for vot in xrange(minVOT, maxVOT+1, stepSize):
stepSizeSeconds = "%1.3f" % (vot/1000)
output = outputDirTrain+idName+"_f0norm_VOT_%i.wav" % vot
subprocess.call([praat, script, normFile, counterFile, inputDirTrain, stepSizeSeconds, output])
print 'Ran train files.'
print 'Completed VOT manipulation.'
quit()
|
#!/usr/bin/env python
"""
Implementation of Binary Search in Python using both
iterative and recursive approach.
"""
from typing import List, Union
def binSearch(arr: List[int], elem: int) -> bool:
first = 0
last = len(arr) - 1
found = False
while first <= last and not found:
middle = (first + last) // 2
if elem == arr[middle]:
found = True
else:
if arr[middle] < elem:
first = middle + 1
elif arr[middle] > elem:
last = middle - 1
return found
"""Recursive Binary Search"""
def rec_bin_search(arr: List[int], start: int, end: int, elem: int) -> bool:
# Base case.
if start > end:
return False
else:
mid = (start + end) // 2
if arr[mid] == elem:
return True
else:
if elem > arr[mid]:
return rec_bin_search(arr, mid + 1, end, elem)
elif elem < arr[mid]:
return rec_bin_search(arr, start, mid - 1, elem)
def main():
arr = [1, 2, 3, 4, 5, 6, 7, 8]
print("Using Sequential Binary Search: \n")
print("Looking for elem {} in {}: {}".format(7, arr, binSearch(arr, 7)))
print("Looking for elem {} in {}: {}".format(13, arr, binSearch(arr, 13)))
print("\nUsing Recursive Binary Search: \n")
print(
"Looking for elem {} in {}: {}".format(
7, arr, rec_bin_search(arr, 0, len(arr) - 1, 7)
)
)
print(
"Looking for elem {} in {}: {}".format(
13, arr, rec_bin_search(arr, 0, len(arr) - 1, 13)
)
)
if __name__ == "__main__":
main()
|
import os
import sys
import subprocess
import shutil
sys.path.insert(0, 'scripts')
sys.path.insert(0, os.path.join("tools", "families"))
import fam
import experiments as exp
import ete3
def get_tree(ale_file):
for line in open(ale_file):
if (";" in line):
return line
return None
def get_leaves(tree_file):
return ete3.Tree(tree_file,format=1).get_leaf_names()
def create_mapping_from_gene_tree(input_tree, output_mapping):
leaves = ete3.Tree(input_tree, format=1).get_leaf_names()
species_to_gene = {}
for leaf in leaves:
species = "_".join(leaf.split("_")[1:])
if (not species in species_to_gene):
species_to_gene[species] = []
species_to_gene[species].append(leaf)
with open(output_mapping, "w") as writer:
for species in species_to_gene:
genes = species_to_gene[species]
writer.write(species + ":" + ";".join(genes) + "\n")
def export(input_trees_dir, species_tree, datadir):
# init directories
print("Starts generation")
fam.init_top_directories(datadir)
# species tree
true_species_tree = fam.get_species_tree(datadir)
shutil.copy(species_tree, true_species_tree)
fam.init_top_directories(datadir)
# families
print("Init families")
families = []
for f in os.listdir(input_trees_dir):
family = f.split(".")[0]
families.append(f.split(".")[0])
fam.init_families_directories(datadir, families)
# fill families
print("Fill families")
for family in families:
ale_file = os.path.join(input_trees_dir, family + ".ale")
output_tree = fam.get_true_tree(datadir, family)
tree_string = get_tree(ale_file)
with open(output_tree, "w") as writer:
writer.write(tree_string)
mapping_file = fam.get_mappings(datadir, family)
create_mapping_from_gene_tree(tree_string, mapping_file)
print("post process")
fam.postprocess_datadir(datadir)
if (__name__ == "__main__"):
if (len(sys.argv) != 4):
print("syntax: input_trees_dir input_species_tree datadir")
exit(1)
input_trees_dir = sys.argv[1]
species_tree = sys.argv[2]
datadir = sys.argv[3]
export(input_trees_dir, species_tree, datadir)
|
from operator import add, mul, sub, truediv
def arithmetic(a, b, operator):
ops = {'add': add, 'subtract': sub, 'multiply': mul, 'divide': truediv}
return ops[operator](a, b)
|
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#@
#@ Converts single the single solid, containting all the silicon detectors [output_file_name],
#@ into the true array of silicon detectors [output_file_name]
#@ Usage:
#@ python conver_silicon.py
#@
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#@ Options:
input_file_name = "./stk_si.gdml"
#output_file_name = "./stk_si_advanced.gdml"
output_file_name = "./stk_si_advanced.gdml"
SOLID_BASE_NAME = 'DAM_TRKAss1_Si-stl0x11f2da0' # (see stk_si.gdml)
VOLUME_BASE_NAME = 'ActiveTile' # (see stk_si.gdml)
PLANE_BASE_NAME = 'STKDetector'
#USE_LATEST_COORDINATE_SYSTEM = True
#@ GDML constants
VERTICES_PER_RECTANGLE = 12
#@ STK constants
N_LAYERS = 6
N_X_TILES = 8
N_Y_TILES = 8
N_TILES_PER_PLANE = 4
N_PLANES_PER_LAYER = 4
EVEN_LAYER_DETECTOR_TYPE = "Y" #(counting layers from 0 to 11)
ODD_LAYER_DETECTOR_TYPE = "X"
#@ others...
tmp_file_name = "./tmp.txt"
tmp_file_name1 = "./tmp1.txt"
tmp_file_name2 = "./tmp2.txt"
#@
#@ Auxiliary gdml parsing functions ---NOT USED YET!!!!
#@
def parce_vertices_for_gdml_file(filelines):
vertices = {}
for line in filelines:
if "<position" not in line: continue
name = line.split('name=')[1].split('"')[1]
x = line.split('x=')[1].split('"')[1]
x = x.split(".")[0] + "." + x.split(".")[1][0:5]
x = float(x)
y = line.split('y=')[1].split('"')[1]
y = y.split(".")[0] + "." + y.split(".")[1][0:5]
y = float(y)
z = line.split('z=')[1].split('"')[1]
z = z.split(".")[0] + "." + z.split(".")[1][0:5]
z = float(z)
if name in vertices: continue
vertices[name] = [x,y,z]
return vertices
"""
f=open("stk_si.gdml")
l=f.readlines()
a=parce_vertices_for_gdml_file(l)
f.close()
"""
def find_vertices_for_resselated_solid(solidname, filelines):
#@ Parce start and stop position
startline = None
stopline = None
for i in xrange(len(filelines)):
line = filelines[i]
if "<tessellated" not in line: continue
if solidname not in line: continue
startline = i
break
if startline is None:
return None
for i in xrange(startline + 1,len(filelines)):
line = filelines[i]
if "</tessellated>" not in line: continue
stopline = i
break
if stopline is None:
return None
#@ Look for vertices
vertices = []
for i in xrange(startline +1, stopline):
line = filelines[i]
if "triangular" not in line: continue
vertexname1 = line.split('vertex1')[1].split('"')[1] #.split('"')[0]
vertexname2 = line.split('vertex2')[1].split('"')[1] #.split('"')[0]
vertexname3 = line.split('vertex3')[1].split('"')[1] #.split('"')[0]
if vertexname1 not in vertices: vertices.append(vertexname1)
if vertexname2 not in vertices: vertices.append(vertexname2)
if vertexname3 not in vertices: vertices.append(vertexname3)
return vertices
def get_STK_detector_coordinates(all_vertices_info, layers = "even"):
#@
all_z = []
for vertexname in all_vertices_info.keys():
z= all_vertices_info[vertexname][2]
if z in all_z: continue
all_z.append(z)
all_z = sorted(all_z)
#@ get x,y -coordinates for readout-even/odd layers
final_x = []
final_y = []
final_z = []
for vertexname in all_vertices_info.keys():
x= all_vertices_info[vertexname][0]
y= all_vertices_info[vertexname][1]
z= all_vertices_info[vertexname][2]
#@ z_layer 0 -- 11
z_layer = all_z.index(z)
z_layer = z_layer / 2
#@ Use only even layers
if layers == "even":
if z_layer%2: continue
elif layers == "odd":
if z_layer%2 == 0: continue
else:
raise Exception("Illegal value for argument 'layers'")
#@ Append x, y coordinates
if x not in final_x: final_x.append(x)
if y not in final_y: final_y.append(y)
if z not in final_z: final_z.append(z)
final_x = sorted(final_x)
final_y = sorted(final_y)
final_z = sorted(final_z)
return final_x, final_y, final_z
#@ Read input file
input_file = open(input_file_name, "r")
lines = input_file.readlines()
input_file.close()
total_triangular_lines = len(filter(lambda line: "<triangular" in line, lines))
triangular_counter = 0 #@ number of <triangular> tags in input gdml file
tessellated_counter = 1 #@ number of output subsolids (there is ata least one subsolid)
#@
#@ Create subsolids
#@
#output_file = open(output_file_name, "w")
tmpfile= open(tmp_file_name, "w")
tmpfile.write(' <tessellated aunit="deg" lunit="mm" name="'+ SOLID_BASE_NAME +'-%d">\n'%tessellated_counter)
for line in lines:
if not "<triangular" in line:
continue
triangular_counter+=1
if (triangular_counter * 1.0 / VERTICES_PER_RECTANGLE - triangular_counter / VERTICES_PER_RECTANGLE == 0
and triangular_counter > 0
and triangular_counter < total_triangular_lines):
tessellated_counter+=1
tmpfile.write(line)
tmpfile.write(' </tessellated>\n')
tmpfile.write(' <tessellated aunit="deg" lunit="mm" name="'+ SOLID_BASE_NAME +'-%d">\n'%tessellated_counter)
continue
tmpfile.write(line)
tmpfile.write(' </tessellated>\n')
tmpfile.close()
#@
#@ Logival volumes
#@
#@ Analyze tracker coordinates
f = open(input_file_name, 'r')
tmpfile = open(tmp_file_name, 'r')
l=f.readlines()
tmpfile_lines = tmpfile.readlines()
f.close()
tmpfile.close()
all_vertices_info=parce_vertices_for_gdml_file(l)
all_x_even, all_y_even, all_z_even = get_STK_detector_coordinates(all_vertices_info,)
all_x_odd, all_y_odd, all_z_odd = get_STK_detector_coordinates(all_vertices_info, "odd")
assert(len(all_x_even)==N_X_TILES * 2)
assert(len(all_y_even)==N_Y_TILES * 2)
assert(len(all_z_even)==N_LAYERS * 2)
assert(len(all_x_odd)==N_X_TILES * 2)
assert(len(all_y_odd)==N_Y_TILES * 2)
assert(len(all_z_odd)==N_LAYERS * 2)
"""
print "all_x_even: ",all_x_even
print "all_y_even: ",all_y_even
print "all_z_even: ",all_z_even
print "all_x_odd: " ,all_x_odd
print "all_y_odd: " ,all_y_odd
print "all_z_odd: " ,all_z_odd
"""
#@ Cresete logical volumes
tmpfile1 = open(tmp_file_name1, "w")
volume_names = []
detecto_even_counter = 0
detecto_odd_counter = 0
for i in xrange(1,tessellated_counter+1):
#@
#@ Assign name to a volume
#@
solid_name = SOLID_BASE_NAME + '-%d'%i
vertex = find_vertices_for_resselated_solid(solid_name, tmpfile_lines)[0]
#print "all_vertices_info[vertex] : ", all_vertices_info[vertex]
x, y, z = all_vertices_info[vertex]
try:
z_layer = all_z_even.index(z)
#if USE_LATEST_COORDINATE_SYSTEM: z_layer = len(all_z_even)-1-z_layer
x_index = all_x_even.index(x)
y_index = all_y_even.index(y)
#if USE_LATEST_COORDINATE_SYSTEM: y_index = len(all_y_even)-1-y_index
detecortype = EVEN_LAYER_DETECTOR_TYPE
detector_number = detecto_even_counter
detecto_even_counter += 1
except ValueError:
z_layer = all_z_odd.index(z)
#if USE_LATEST_COORDINATE_SYSTEM: z_layer = len(all_z_odd)-1-z_layer
x_index = all_x_odd.index(x)
y_index = all_y_odd.index(y)
#if USE_LATEST_COORDINATE_SYSTEM: y_index = len(all_y_odd)-1-y_index
detecortype = ODD_LAYER_DETECTOR_TYPE
detector_number = detecto_odd_counter
detecto_odd_counter += 1
x_index = x_index / 2
y_index = y_index / 2
z_layer = z_layer / 2
x_plane = x_index / N_TILES_PER_PLANE
y_plane = y_index / N_TILES_PER_PLANE
x_index_insideplane = x_index % N_TILES_PER_PLANE
y_index_insideplane = y_index % N_TILES_PER_PLANE
tilenumber_insideplane = x_index_insideplane*N_TILES_PER_PLANE + y_index_insideplane
planenumber = z_layer* N_PLANES_PER_LAYER + x_plane * N_PLANES_PER_LAYER/2 + y_plane
"""
print "\n---------------------"
print "x_plane, y_plane, z_layer = ", x_plane, y_plane, z_layer
print "\n---------------------"
"""
#volume_name = VOLUME_BASE_NAME + detecortype +"-%d"%detector_number + "_plane-%d"%planenumber
#volume_name = VOLUME_BASE_NAME + detecortype + "-%d"%detector_number + "_" + PLANE_BASE_NAME + detecortype + "-%d"%planenumber
#volume_name = VOLUME_BASE_NAME + detecortype + "-%d"%tilenumber_insideplane + "_" + PLANE_BASE_NAME + detecortype + "-%d"%planenumber
volume_name = PLANE_BASE_NAME + detecortype + "-%d"%planenumber+"_"+VOLUME_BASE_NAME + detecortype + "-%d"%tilenumber_insideplane
volume_names.append(volume_name)
#@ Create volume
#tmpfile1.write(' <volume name="' + VOLUME_BASE_NAME + '-%d">\n'%i)
tmpfile1.write(' <volume name="' + volume_name + '">\n')
tmpfile1.write(' <materialref ref="Silicon"/>\n')
tmpfile1.write(' <solidref ref="'+ solid_name + '"/>\n')
tmpfile1.write(' </volume>\n')
"""
if volume_name == (VOLUME_BASE_NAME + "X-0_" + PLANE_BASE_NAME + "X-19"):
print "volume_name:", volume_name
ver = find_vertices_for_resselated_solid(solid_name, tmpfile_lines)
"""
tmpfile1.close()
#@
#@ Physical volumes
#@
"""
tmpfile2 = open(tmp_file_name2, "w")
for i in xrange(1,tessellated_counter+1):
tmpfile2.write(' <physvol>\n')
#tmpfile.write(' <file name="./Geometry/STK/stk_si_advanced.gdml"/>\n')
tmpfile2.write(' <volumeref ref="' + VOLUME_BASE_NAME + '-%d"/>\n'%i)
#tmpfile.write(' <position x="stk_adjust_x_position" y="stk_adjust_y_position" z="stk_adjust_z_position" unit="mm"/>\n')
tmpfile2.write(' </physvol>\n')
tmpfile2.close()
"""
tmpfile2 = open(tmp_file_name2, "w")
for volume_name in volume_names:
tmpfile2.write(' <physvol>\n')
#tmpfile.write(' <file name="./Geometry/STK/stk_si_advanced.gdml"/>\n')
tmpfile2.write(' <volumeref ref="' + volume_name+'"/>\n')
#tmpfile.write(' <position x="stk_adjust_x_position" y="stk_adjust_y_position" z="stk_adjust_z_position" unit="mm"/>\n')
tmpfile2.write(' </physvol>\n')
tmpfile2.close()
#@
"""
#@
#@ Combine things into the output file
#@
#
#output_file.close()
tmpfile = open(tmp_file_name, "r")
tmpfile1 = open(tmp_file_name1, "r")
tmpfile2 = open(tmp_file_name2, "r")
tmplines = tmpfile.readlines()
tmplines1 = tmpfile1.readlines()
tmplines2 = tmpfile2.readlines()
tmpfile.close()
tmpfile1.close()
tmpfile2.close()
output_file = open(output_file_name, "w")
for line in lines:
output_file.write(line)
if "<solids>" in line:
for tmpline in tmplines:
output_file.write(tmpline)
continue
if "<structure>" in line:
for tmpline in tmplines1:
output_file.write(tmpline)
continue
if '<solidref' in line and SOLID_BASE_NAME in line:
for tmpline in tmplines2:
output_file.write(tmpline)
continue
output_file.close()
"""
#@
#@ New vertices
#@
short_to_short = 0.200
long_to_long = 0.200
inner = 0.080
side = 95.000
x_cad_offset = -300.0
#silicon_guard = 1.036
ladder_direction = [- short_to_short/2 - 4 * side - 3*inner,
- short_to_short/2 - 3 * side - 3*inner,
- short_to_short/2 - 3 * side - 2*inner,
- short_to_short/2 - 2 * side - 2*inner,
- short_to_short/2 - 2 * side - 1*inner,
- short_to_short/2 - 1 * side - 1*inner,
- short_to_short/2 - 1 * side,
- short_to_short/2,
short_to_short/2,
short_to_short/2 + 1 * side,
short_to_short/2 + 1 * side + 1*inner,
short_to_short/2 + 2 * side + 1*inner,
short_to_short/2 + 2 * side + 2*inner,
short_to_short/2 + 3 * side + 2*inner,
short_to_short/2 + 3 * side + 3*inner,
short_to_short/2 + 4 * side + 3*inner,]
orthogonal_direction = [- long_to_long/2 - 4 * side - 3*long_to_long,
- long_to_long/2 - 3 * side - 3*long_to_long,
- long_to_long/2 - 3 * side - 2*long_to_long,
- long_to_long/2 - 2 * side - 2*long_to_long,
- long_to_long/2 - 2 * side - 1*long_to_long,
- long_to_long/2 - 1 * side - 1*long_to_long,
- long_to_long/2 - 1 * side,
- long_to_long/2,
long_to_long/2,
long_to_long/2 + 1 * side,
long_to_long/2 + 1 * side + 1*long_to_long,
long_to_long/2 + 2 * side + 1*long_to_long,
long_to_long/2 + 2 * side + 2*long_to_long,
long_to_long/2 + 3 * side + 2*long_to_long,
long_to_long/2 + 3 * side + 3*long_to_long,
long_to_long/2 + 4 * side + 3*long_to_long,]
newvertices = {}
for vertexname in all_vertices_info.keys():
x = all_vertices_info[vertexname][0]
y = all_vertices_info[vertexname][1]
z = all_vertices_info[vertexname][2]
new_x = x
new_y = y
if z in all_z_even:
i = all_x_even.index(x)
new_x = ladder_direction[i] + x_cad_offset
i = all_y_even.index(y)
new_y = orthogonal_direction[i]
elif z in all_z_odd:
i = all_x_odd.index(x)
new_x = orthogonal_direction[i] + x_cad_offset
i = all_y_odd.index(y)
new_y = ladder_direction[i]
else:
raise Exception()
newvertices[vertexname] = [new_x, new_y, z]
#@ Combine things
tmpfile = open(tmp_file_name, "r")
tmpfile1 = open(tmp_file_name1, "r")
tmpfile2 = open(tmp_file_name2, "r")
tmplines = tmpfile.readlines()
tmplines1 = tmpfile1.readlines()
tmplines2 = tmpfile2.readlines()
tmpfile.close()
tmpfile1.close()
tmpfile2.close()
output_file = open(output_file_name, "w")
for line in lines:
if "<position name=" in line: continue
output_file.write(line)
if "<define>" in line:
for vertex in newvertices.keys():
output_file.write(' <position name="%s" unit="mm" x="%f" y="%f" z="%f"/>\n'%(vertex,newvertices[vertex][0],newvertices[vertex][1],newvertices[vertex][2]))
continue
if "<solids>" in line:
for tmpline in tmplines:
output_file.write(tmpline)
continue
if "<structure>" in line:
for tmpline in tmplines1:
output_file.write(tmpline)
continue
if '<solidref' in line and SOLID_BASE_NAME in line:
for tmpline in tmplines2:
output_file.write(tmpline)
continue
output_file.close()
"""
#@ read file
f=open(input_file_name)
l=f.readlines()
f.close()
all_vertices_info=parce_vertices_for_gdml_file(l)
x,y,z = get_STK_detector_coordinates(all_vertices_info, "odd")
"""
"""
#@ Use it for silicons
f=open("stk_si_advanced_v2.gdml")
l=f.readlines()
N_LAYERS = 12
N_X_TILES = 8
N_Y_TILES = 8
#@ get all z for silicons
all_vertices_info=parce_vertices_for_gdml_file(l)
all_z = []
for i in xrange(1,tessellated_counter+1):
solidname = SOLID_BASE_NAME + '-%d'%i
verticesforsolid = find_vertices_for_resselated_solid(solidname,l)
for vertexname in verticesforsolid:
z= all_vertices_info[vertexname][2]
if z in all_z: continue
all_z.append(z)
#lowestvertices[solidname] = find_lowest_vertex_for_solid(all_vertices_info,verticesforsolid)
all_z = sorted(all_z)
assert(len(all_z)==N_LAYERS * 2)
#@ get x-coordinates for readout-Y silicons
all_x = []
all_y = []
for tess_i in xrange(1,tessellated_counter+1):
solidname = SOLID_BASE_NAME + '-%d'%tess_i
verticesforsolid = find_vertices_for_resselated_solid(solidname,l)
for vertexname in verticesforsolid:
x= all_vertices_info[vertexname][0]
y= all_vertices_info[vertexname][1]
z= all_vertices_info[vertexname][2]
#@ z_layer 0 -- 11
z_layer = filter(lambda i: all_z[i]==z, xrange(len(all_z)))
assert(len(z_layer)==1)
z_layer = z_layer [0]
z_layer = z_layer / 2
#@ Use only even layers
if z_layer%2: continue # readout Y
#if z_layer%2 == 1: continue # readout X
#@ Append x, y coordinates
if x not in all_x: all_x.append(x)
if y not in all_y: all_y.append(y)
all_x = sorted(all_x)
all_y = sorted(all_y)
assert(len(all_x) == 2 * N_X_TILES)
assert(len(all_y) == 2 * N_Y_TILES)
f.close()
"""
"""
#@ Use it for ladders
f=open("stk_ladders.gdml")
l=f.readlines()
N_LAYERS = 12
N_X_TILES = 8
N_Y_TILES = 8
#@ get all z for silicons
all_vertices_info=parce_vertices_for_gdml_file(l)
tessellated_counter = len(all_vertices_info)
all_z = []
for i in xrange(1,tessellated_counter+1):
vertexname = all_vertices_info.keys()[i-1]
z= all_vertices_info[vertexname][2]
if z in all_z: continue
all_z.append(z)
#lowestvertices[solidname] = find_lowest_vertex_for_solid(all_vertices_info,verticesforsolid)
all_z = sorted(all_z)
assert(len(all_z)==N_LAYERS * 2)
#@ get x-coordinates for readout-Y (readpout-X?) silicons
all_x = []
all_y = []
for tess_i in xrange(1,tessellated_counter+1):
vertexname = all_vertices_info.keys()[tess_i-1]
x= all_vertices_info[vertexname][0]
y= all_vertices_info[vertexname][1]
z= all_vertices_info[vertexname][2]
#@ z_layer 0 -- 11
z_layer = filter(lambda i: all_z[i]==z, xrange(len(all_z)))
assert(len(z_layer)==1)
z_layer = z_layer [0]
z_layer = z_layer / 2
#@ Use only even layers
if z_layer%2 == 0: continue
#@ Append x, y coordinates
if x not in all_x: all_x.append(x)
if y not in all_y: all_y.append(y)
all_x = sorted(all_x)
all_y = sorted(all_y)
#assert(len(all_x) == 2 * N_X_TILES)
#assert(len(all_y) == 2 * N_Y_TILES)
f.close()
"""
|
# -*- coding:utf-8 -*-
#
# Author: Muyang Chen
# Date: 2020-08-26
import os
import config
from util import PrintLog, FileController
from valid_proxy_inspector import VaildProxyInspector
from proxy_crawler import Crawler
if __name__ == '__main__':
base_path = os.path.dirname(__file__)
proxy_file = os.path.join(base_path, 'proxy.info')
result_list = list()
vaild_proxy_inspector = VaildProxyInspector(config.test_url,
config.timeout)
crawler = Crawler(config.timeout)
file_controller = FileController()
# 读取现有代理池,删除过期代理结果.
if os.path.exists(proxy_file):
proxy_list = file_controller.read_file_with_split(proxy_file,
split_size=2)
for proxy_node in proxy_list:
ip, post = proxy_node
if vaild_proxy_inspector.check(ip, post):
result_list.append(proxy_node)
# 抓取新的代理池.
for i in range(1, config.max_page_size + 1):
proxy_list = crawler.crawl_proxy_list(start_page_id=i, page_size=1)
if proxy_list:
PrintLog("成功抓取%s个代理." % len(proxy_list))
tmp = vaild_proxy_inspector.check_list(proxy_list)
result_list.extend(tmp)
PrintLog("当前共有%s个可用的代理" % len(result_list))
# 保存代理池到文件.
file_controller.write_list_to_file(result_list, proxy_file)
|
import pytest
@pytest.fixture
def mg_tag_2():
return 2
@pytest.fixture
def tree_sentences_text_with_difficult_punctuation():
return """\nNew Toyota Corolla LE 2007, Air Toyota ^Conditioning, *Leather seaters, $.
\nAlso Available in Different Colours.
\nBeware Om Fraudsters,, Please See What You Want To.\n"""
@pytest.fixture
def tree_sentences_list_with_difficult_punctuation():
return [["new", "toyota", "corolla", "le", "2007", "air", "toyota", "conditioning", "leather", "seaters"],
["also", "available", "in", "different", "colours"],
["beware", "om", "fraudsters", "please", "see", "what", "you", "want", "to"]]
@pytest.fixture
def same_tag_words():
return "toyota avensys toyota toyota"
@pytest.fixture
def tags_combinations_indents():
return ["toyota corolla 2007", # w 0 w 0 w
"toyota w1 corolla 2007", # w 1 w 0 w
"toyota w1 w2 corolla 2007", # w 2 w 0 w
"toyota w1 w2 corolla w3 2007", # w 2 w 1 w
"toyota w1 w2 corolla w3 w4 2007", # w 2 w 2 w
"toyota w1 corolla w2 w3 2007", # w 1 w 2 w
"toyota corolla w1 2007", # w 0 w 1 w
"toyota corolla w1 w2 2007", # w 0 w 2 w
"toyota w1 corolla w2 2007"] # w 1 w 1 w
@pytest.fixture
def tags_combination_words():
result_tags = ['toyota corolla 2007',
'toyota corolla',
'corolla 2007',
'toyota 2007',
'2007',
'corolla',
'toyota']
text = 'toyota corolla 2007'
tree_tags = {'corolla': {False: {}, '2007': {False: {}}},
'toyota': {False: {},
'corolla': {False: {},
'2007': {False: {}}},
'2007': {False: {}}},
'2007': {False: {}}}
return text, tree_tags, result_tags
|
from django.shortcuts import render, redirect, reverse
# Create your views here.
import stripe as stripes
stripes.api_key = "pk_test_tkj9QAmsgb3rQkJMZH1iommg00WAdK86DL"
def index(request):
return render(request, 'index.html')
def charge(request, *args, **kwargs):
amount = 10
if request.method == 'POST':
print('Data:', request.POST)
stripes.Customer.create(
email=request.POST["email"],
name=request.POST["name"]
)
return redirect(reverse('strip:success', args=[amount]))
def success(request, args):
amount = args
return render(request, "success.html", {"amount":amount})
|
import numpy as np
import winnow
import readData as rd
TEN_PERCENT=10.0/100.0
TWENTY_PERCENT=20.0/100.0
PERCENT_OF_DATA=100/100
#this function takes in filename to read data from
# splits the data to D1, D2 and D3
# return the number of mistakes made with margin and without margin
def driver(trainfileName,testFileName,n):
print trainfileName
tempData,tempLabels = rd.readDataFromFile(trainfileName,PERCENT_OF_DATA)
labels = np.array(tempLabels)
data = rd.createNumpyArrayOfData(tempData,n)
d1= data[0:TEN_PERCENT*len(data)]
l1=labels[0:TEN_PERCENT*len(data)]
d2 = data[TEN_PERCENT*len(data):TWENTY_PERCENT*len(data)]
l2 = labels[TEN_PERCENT*len(data):TWENTY_PERCENT*len(data)]
d3 = data[TWENTY_PERCENT*len(data):]
l3 = labels[TWENTY_PERCENT*len(data):]
model = winnow.Winnow(None, n+1)
######## train the model without margin and with learning rate
gammaNeta=withoutMargin(d1,l1,d2,l2,model)
print gammaNeta
rd.trainModel(d3,l3,model,gammaNeta)
if testFileName !=None:
tempTestData,tempTestLabels = rd.readDataFromFile(testFileName,100)
rd.getAccuracy(tempTestLabels,tempTestData,model,n)
winnowWithoutMargin=model.getInstanceNMistakes()
model.actualTrain = False
######## train the model with margin and with learning rate and identify the best learning rate and margin
gammaNeta = withMargin(d1,l1,d2,l2,model)
print gammaNeta
rd.trainModel(d3,l3,model,gammaNeta)
if testFileName !=None:
tempTestData,tempTestLabels = rd.readDataFromFile(testFileName,100)
rd.getAccuracy(tempTestLabels,tempTestData,model,n)
winnowWithMargin = model.getInstanceNMistakes()
return winnowWithoutMargin,winnowWithMargin
# function to identify the best learning rate for the given data
def withoutMargin(d1,l1,d2,l2,model):
model.initializeWeights(len(d1[0]))
gamma = 0
eta=[1.1,1.01,1.005,1.0005,1.0001]
accuracy = 0.0
for j in eta:
tempAccuracy = callWinnow(d1, l1, d2, l2, j, gamma, model)
if tempAccuracy > accuracy:
gammaNeta = (gamma, j)
accuracy = tempAccuracy
print accuracy
return gammaNeta
# function to identify the best learning parameters for the given data
def withMargin(d1,l1,d2,l2,model):
gamma=[2.0,0.3,.04,.006,.001]
eta = [1.1, 1.01, 1.005, 1.0005, 1.0001]
accuracy=0.0
weights = np.ones(len(d1[0]))
model.weights = weights
for i in gamma:
for j in eta:
tempAccuracy = callWinnow(d1,l1,d2,l2,j,i,model)
if tempAccuracy > accuracy:
gammaNeta=(i,j)
accuracy = tempAccuracy
print accuracy
return gammaNeta
def callWinnow(d1,l1,d2,l2,eta,gamma,model):
model.initializeWeights(len(d1[0]))
for i in range(0,20):
model.train(d1,l1,eta,gamma)
l2_predict =model.predict(d2)
cnt=0
for i in range(0,len(l2_predict)):
if l2_predict[i] == l2[i]:
cnt = cnt +1
return 100.0*cnt/len(l2)
# def readDataFromFile(fileName,percentOfData):
# f = open(fileName)
# tempLabels=[]
# tempData=[]
# for line in f:
# line= line.strip("\n").split(" ")
# if((line[0])== "+1"):
# tempLabels.append(1)
# else:
# tempLabels.append(-1)
# tempData.append(line[1:])
# return tempData[0:percentOfData*(len(tempData))],tempLabels[0:percentOfData*len(tempLabels)]
# def createNumpyArrayOfData(tempData,n):
# data = np.zeros((len(tempData), n + 1))
# for i in range(0, len(tempData)):
# for j in range(0,len(tempData[i])):
# # print tempData[j][j]
# k=str(tempData[i][j]).split(":")
# data[i][int(k[0])]=1
# return data
#driver()
|
#IACMI_parser.py
# Version Notes #
# Version: 4.0
# Developmetn Goal: Smartly import each "chunk" of rheology
# data belonging to an individual rheology experiment into its own DF
# Libraries
import pandas as pd
import numpy as np
# Global Variables
DIRECTORY = '/Users/malcolmdavidson/Documents/Code/ODI/IACMI/Shear_Rheology/25000_MW'
FILE = '5_wt%_25k_SS_20C.txt'
KEYPHRASE = "shear stress shear rate viscosity time normal stress torque"
## import_rheology_to_df ##
# Version 1
# Import contents at filepath into a dataframe.
##
def import_rheology_to_df(file_path, data_start, chunk_size):
return pd.read_csv(file_path, sep='\t', skiprows=data_start, encoding="iso-8859-1",nrows=chunk_size)
## build_filepath function ##
# Used to construct a file path from a file name and firectory
##
def build_filepath(directory_path, file_name):
return str(directory_path + '/' + file_name)
## File I/O ##
# 1. Open file as file object
# 2. scan trhough line by line
# a. remove newline
# b. check for key_phrase
# i. store line number
# c. return object and number
##
def file_IO(file_path, key_phrase):
with open(file_path, 'r', encoding="iso-8859-1") as f:
lines = [line.rstrip('\n') for line in f]
lines = list(filter(str.strip, lines))
data_start = [num for (num, line) in enumerate(lines,0) if key_phrase in line]
return lines, data_start
## calculate_chunk_size ##
# Determines number of rows to import for a single rheology experiment.
# 1. Calculate defference between enumerated_data[1] and enumerated_data[0]
# 2. Subtract the number of filler lines
# 3. Return chunk size for pd as int
##
def calculate_chunk_size(enumerated_data, filler):
return enumerated_data[1]-enumerated_data[0]-filler
# Script #
filePath = build_filepath(DIRECTORY, FILE)
reducedData, startOfData = file_IO(filePath, KEYPHRASE)
chunkSize = calculate_chunk_size(startOfData,2)
df = import_rheology_to_df(filePath, startOfData[0], chunkSize)
print(chunkSize)
print(df)
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import confusion_matrix, classification_report, r2_score
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPRegressor
from sklearn import metrics
import time
import joblib
import pickle
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsRegressor
from sklearn.model_selection import KFold
from sklearn import model_selection
wine = pd.read_csv('data.csv', sep=',')
pd.isnull(wine).sum() > 0
#wine.replace([np.inf, -np.inf], np.nan, inplace=True)
wine.replace([np.inf, -np.inf], np.nan).dropna(how="all")
from sklearn.metrics import mean_squared_error
def calc_train_error(X_train, y_train, model):
'''returns in-sample error for already fit model.'''
predictions = model.predict(X_train)
mse = mean_squared_error(y_train, predictions)
rmse = np.sqrt(mse)
return mse
def calc_validation_error(X_test, y_test, model):
'''returns out-of-sample error for already fit model.'''
predictions = model.predict(X_test)
mse = mean_squared_error(y_test, predictions)
rmse = np.sqrt(mse)
return mse
def calc_metrics(X_train, y_train, X_test, y_test, model):
'''fits model and returns the RMSE for in-sample error and out-of-sample error'''
model.fit(X_train, y_train)
train_error = calc_train_error(X_train, y_train, model)
validation_error = calc_validation_error(X_test, y_test, model)
return train_error, validation_error
X = wine.drop('tau1', axis=1).values
y = wine['tau1'].values
K = 10
#cross validation
alphas = [0.001, 0.01, 0.1, 1, 10]
kf = KFold(n_splits=K, shuffle=True, random_state=42)
for alpha in alphas:
train_errors = []
validation_errors = []
sc = StandardScaler()
X = sc.fit_transform(X)
for train_index, val_index in kf.split(X, y):
# split data
X_train, X_val = X[train_index], X[val_index]
y_train, y_val = y[train_index], y[val_index]
# instantiate model
model = MLPRegressor(activation='relu', solver='lbfgs',alpha=alpha, hidden_layer_sizes=(30,30,30), max_iter=1000)
#calculate errors
train_error, val_error = calc_metrics(X_train, y_train, X_val, y_val, model)
# append to appropriate list
train_errors.append(train_error)
validation_errors.append(val_error)
# generate report
print('alpha: {:6} | mean(train_error): {:7} | mean(val_error): {}'.
format(alpha,
round(np.mean(train_errors),10),
round(np.mean(validation_errors),10)))
|
import cv2
import cv2.aruco
import numpy as np
import sys
# import pygame
import time
import serial
from threading import Thread
ser1 = serial.Serial('/dev/ttyACM0', 9600)
ser2 = serial.Serial('/dev/ttyACM1', 9600)
# alpha-100 beta-150
def direction(img, top_left, top_right, centre, ids):
x1 = top_left[0]
y1 = top_left[1]
x2 = top_right[0]
y2 = top_right[1]
s1 = (x1 + x2) / 2
s2 = (y1 + y2) / 2
if abs(x2 - x1) <= 15:
if y1 > y2:
orient = "west"
elif y2 > y1:
orient = "east"
elif (x2 - x1) > 0: # upright
if abs(y2 - y1) <= 15:
orient = "north"
elif (y2 > y1):
orient = "ne"
elif (y1 > y2):
orient = "nw"
elif (x2 - x1) < 0:
if abs(y2 - y1) <= 15:
orient = "south"
elif (y1 > y2):
orient = "sw"
elif (y2 > y1):
orient = "se"
return orient
def radio1(data, ids):
if ids == 100:
if data == 'for':
print("forward")
ser1.write('w'.encode())
if data == 'lt':
print("left")
ser1.write('a'.encode())
if data == 'rev':
print("reverse")
ser1.write('s'.encode())
if data == 'rt':
print("right")
ser1.write('d'.encode())
if data == 'z':
sys.exit()
if data == 'c':
print("not pressed")
ser1.write('q'.encode())
time.sleep(0.1)
def radio2(data, ids):
if ids == 150:
if data == 'for':
print("forward")
ser2.write('w'.encode())
if data == 'lt':
print("left")
ser2.write('a'.encode())
if data == 'rev':
print("reverse")
ser2.write('s'.encode())
if data == 'rt':
print("right")
ser2.write('d'.encode())
if data == 'z':
sys.exit()
if data == 'c':
print("not pressed")
ser2.write('q'.encode())
time.sleep(0.1)
def send_radio(data, ids):
t1 = Thread(target=radio1, args=(data, ids))
t2 = Thread(target=radio2, args=(data, ids))
t1.start()
t2.start()
t1.join()
t2.join()
def orientation(img, top_left, top_right, centre, ids):
x1 = top_left[0]
y1 = top_left[1]
x2 = top_right[0]
y2 = top_right[1]
s1 = (x1 + x2) / 2
s2 = (y1 + y2) / 2
data = 'for'
# centre1 = centre[0]
# centre2 = centre[1]
# destination coord
d1 = 250.0
d2 = 250.0
orient = ""
if abs(s1 - d1) <= 15 and abs(s2 - d2) <= 15:
data = 'c'
# north
# orient = direction(img, top_left, top_right, centre, ids)
if direction(img, top_left, top_right, centre, ids) == "north":
send_radio('c', ids)
else:
direc = direction(img, top_left, top_right, centre, ids)
while direc != 'north':
print("Stuck in loop")
send_radio('rt', ids)
send_radio('c', ids)
direc = direction(img, top_left, top_right, centre, ids)
break
send_radio('c', ids)
# send_radio(data, ids)
if __name__=='__main__':
cap = cv2.VideoCapture(2)
dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_250)
while True:
ret, frame = cap.read()
# getting all the values in different variables
corners, ids, rejected = cv2.aruco.detectMarkers(frame, dictionary)
cv2.aruco.drawDetectedMarkers(frame, corners, ids)
# createGrid(frame)
cv2.imshow('out', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if len(corners) > 0:
# draw only if aruco detected
i = 0
# nested loop only for centre coordinates
while i < len(corners):
# corners contains 4 coordinates of the aruco (2d array)
j = 0
while j < len(corners[i]):
# first taking sum of all x coord, then divide by 4. Then y
pos = np.sum(corners[i], axis=1) / 4
j = j + 1
# call the function for position
# checkMarker(frame, ids[i], pos)
# orient north and move
orientation(frame, corners[0][0][0], corners[0][0][1], pos, ids[i])
i = i + 1
print("\n")
# cv2.imshow('out', frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
cap.release()
cv2.destroyAllWindows()
|
from core.actors.messages import GAME_TAKE_TURN
class TakeTurnMessageBuilder:
def __init__(self, available_turns, game_state):
self.available_turns = available_turns
self.game_state = game_state
def build(self):
return {
'msg': GAME_TAKE_TURN,
'payload': {
'available_turns': self.available_turns,
'game_field_state': self.game_state
}
}
|
"""@package docstring
Provides the web request handlers.
"""
import os, datetime, re, simplejson
import urllib, base64, uuid
import wsgiref.handlers
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.api import users
from google.appengine.api.urlfetch import *
from google.appengine.api import memcache
from google.appengine.api import *
import openradar.api
import openradar.db
from openradar.base import *
from openradar.models import *
class Index(RequestHandler):
def get(self):
self.redirect("/page/1")
class OldIndex(RequestHandler):
def get(self):
biglist = memcache.get("biglist")
if biglist is None:
radars = db.GqlQuery("select * from Radar order by number desc").fetch(100)
path = os.path.join(os.path.dirname(__file__), os.path.join('../templates', 'biglist.html'))
biglist = template.render(path, {'radars':radars})
memcache.add("biglist", biglist, 3600) # one hour, but we also invalidate on edits and adds
self.respondWithTemplate('index.html', {"biglist": biglist})
PAGESIZE = 40
PAGE_PATTERN = re.compile("/page/([0-9]+)")
class RadarListByPage(RequestHandler):
def get(self):
m = PAGE_PATTERN.match(self.request.path)
if m:
number = m.group(1)
if (int(number) > 500):
self.error(404)
self.respondWithText('Not found')
return
if (int(number) > 1):
showprev = int(number)-1
else:
showprev = None
shownext = int(number)+1
pagename = "page" + number
biglist = memcache.get(pagename)
if biglist is None:
radars = db.GqlQuery("select * from Radar order by number desc").fetch(PAGESIZE,(int(number)-1)*PAGESIZE)
if len(radars) > 0:
path = os.path.join(os.path.dirname(__file__), os.path.join('../templates', 'biglist.html'))
biglist = template.render(path, {'radars':radars})
memcache.add(pagename, biglist, 3600) # one hour, but we also invalidate on edits and adds
else:
biglist = "<p>That's all.</p>"
self.respondWithTemplate('page.html', {'pagenumber':number, 'shownext':shownext, 'showprev':showprev, "biglist": biglist})
else:
self.respondWithText('invalid page request')
class FAQ(RequestHandler):
def get(self):
self.respondWithTemplate('faq.html', {})
class RadarAdd(RequestHandler):
def get(self):
user = self.GetCurrentUser()
if (not user):
self.respondWithTemplate('please-sign-in.html', {'action': 'add Radars'})
else:
self.respondWithTemplate('radar-add.html', {})
def post(self):
user = self.GetCurrentUser()
if (not user):
self.respondWithTemplate('please-sign-in.html', {'action': 'add Radars'})
else:
title = self.request.get("title")
number = self.request.get("number")
status = self.request.get("status")
description = self.request.get("description")
resolved = self.request.get("resolved")
product = self.request.get("product")
classification = self.request.get("classification")
reproducible = self.request.get("reproducible")
product_version = self.request.get("product_version")
originated = self.request.get("originated")
radar = Radar(title=title,
number=number,
status=status,
user=user,
description=description,
resolved=resolved,
product=product,
classification=classification,
reproducible=reproducible,
product_version=product_version,
originated=originated,
created=datetime.datetime.now(),
modified=datetime.datetime.now())
radar.put()
memcache.flush_all()
# tweet this.
if 1:
#tweet = ("[rdar://%s] %s: %s" % (number, radar.username(), title))
tweet = ("http://openradar.me/%s %s: %s" % (number, radar.username(), title))
tweet = tweet[0:140]
secrets = db.GqlQuery("select * from Secret where name = :1", "retweet").fetch(1)
if len(secrets) > 0:
secret = secrets[0].value
form_fields = {
"message": tweet,
"secret": secret
}
form_data = urllib.urlencode(form_fields)
try:
result = fetch("http://sulfur.neontology.com/retweet.php", payload=form_data, method=POST)
except Exception:
None # let's not worry about downstream problems
self.redirect("/myradars")
RADAR_PATTERN = re.compile("/((FB)?[0-9]+)")
class RadarViewByPath(RequestHandler):
def get(self, _prefix):
user = users.GetCurrentUser()
if not user:
page = memcache.get(self.request.path)
if page:
self.respondWithText(page)
return
m = RADAR_PATTERN.match(self.request.path)
if m:
bare = self.request.get("bare")
number = m.group(1)
radars = Radar.gql("WHERE number = :1", number).fetch(1)
if len(radars) != 1:
self.respondWithTemplate('radar-missing.html', {"number":number})
return
radar = radars[0]
if (not radar):
self.respondWithTemplate('radar-missing.html', {"number":number})
else:
path = os.path.join(os.path.dirname(__file__), os.path.join('../templates', 'radar-view.html'))
page = template.render(path, {"mine":(user == radar.user), "radar":radar, "radars":radar.children(), "comments": radar.comments(), "bare":bare, "user": user})
if not user:
memcache.add(self.request.path, page, 3600) # one hour, but we also invalidate on edits and adds
self.respondWithText(page)
return
class RadarViewByIdOrNumber(RequestHandler):
def get(self):
user = users.GetCurrentUser()
# we keep request-by-id in case there are problems with the radar number (accidental duplicates, for example)
id = self.request.get("id")
if id:
radar = Radar.get_by_id(int(id))
if (not radar):
self.respondWithText('Invalid Radar id')
else:
self.respondWithTemplate('radar-view.html', {"mine":(user == radar.user), "radar":radar, "radars":radar.children(), "comments": radar.comments(), "user": user})
return
number = self.request.get("number")
if number:
self.redirect("/"+number)
return
else:
self.respondWithText('Please specify a Radar by number or openradar id')
class RadarEdit(RequestHandler):
def get(self):
user = users.GetCurrentUser()
if (not user):
self.respondWithTemplate('please-sign-in.html', {'action': 'edit Radars'})
else:
id = self.request.get("id")
radar = Radar.get_by_id(int(id))
if (not radar):
self.respondWithText('Invalid Radar id')
else:
self.respondWithTemplate('radar-edit.html', {"radar":radar})
def post(self):
user = users.GetCurrentUser()
if (not user):
self.respondWithTemplate('please-sign-in.html', {'action': 'edit Radars'})
else:
id = self.request.get("id")
radar = Radar.get_by_id(int(id))
if not radar:
self.respondWithText('Invalid Radar id')
elif radar.user != user:
self.respondWithText('Only the owner of a Radar can edit it')
else:
radar.title = self.request.get("title")
radar.number = self.request.get("number")
radar.status = self.request.get("status")
radar.description = self.request.get("description")
radar.resolved = self.request.get("resolved")
radar.product = self.request.get("product")
radar.classification = self.request.get("classification")
radar.reproducible = self.request.get("reproducible")
radar.product_version = self.request.get("product_version")
radar.originated = self.request.get("originated")
radar.modified = datetime.datetime.now()
radar.put()
memcache.flush_all()
self.redirect("/myradars")
class RadarFixNumber(RequestHandler):
def post(self):
id = self.request.get("id")
radar = Radar.get_by_id(int(id))
if not radar:
self.respondWithText('Invalid Radar id')
else:
radar.put()
memcache.flush_all()
self.respondWithText('OK')
class RadarDelete(RequestHandler):
def get(self):
user = users.GetCurrentUser()
id = self.request.get("id")
radar = Radar.get_by_id(int(id))
if (not user):
self.respondWithTemplate('please-sign-in.html', {'action': 'delete Radars'})
elif (not radar):
self.respondWithText('Invalid Radar id')
else:
radar.delete()
memcache.flush_all()
self.redirect("/myradars")
class RadarList(RequestHandler):
def get(self):
user = users.GetCurrentUser()
if (not user):
self.respondWithTemplate('please-sign-in.html', {'action': 'view your Radars'})
else:
radars = db.GqlQuery("select * from Radar where user = :1 order by number desc", user).fetch(1000)
self.respondWithTemplate('radar-list.html', {"radars": radars})
class NotFound(RequestHandler):
def get(self):
self.response.out.write("<h1>Resource not found</h1>")
self.response.out.write("<pre>")
self.response.out.write(str(self.request))
self.response.out.write("</pre>")
class Refresh(RequestHandler):
def get(self):
memcache.flush_all()
self.redirect("/")
class Hello(RequestHandler):
def get(self):
user = users.get_current_user()
if not user:
# The user is not signed in.
print "Hello"
else:
print "Hello, %s!" % user.nickname()
class APIKey(RequestHandler):
def get(self):
user = users.GetCurrentUser()
if (not user):
self.respondWithTemplate('please-sign-in.html', {'action': 'view or regenerate your API key'})
else:
apikey = openradar.db.APIKey().fetchByUser(user)
if not apikey:
apikey = APIKey(user=user,
apikey=str(uuid.uuid1()),
created=datetime.datetime.now())
apikey.put()
self.respondWithTemplate('api-key.html', {'apikey': apikey})
def post(self):
user = users.GetCurrentUser()
if (not user):
self.respondWithTemplate('please-sign-in.html', {'action': 'regenerate your API key'})
else:
apikey = openradar.db.APIKey().fetchByUser(user)
if apikey:
apikey.delete()
self.redirect("/apikey")
class CommentsAJAXForm(RequestHandler):
def _check(self):
user = users.GetCurrentUser()
if (not user):
self.error(401)
self.respondWithText("You must login to post a comment")
return False, False, False
radarKey = self.request.get("radar")
radar = Radar.get(radarKey)
if(not radar):
self.error(400)
self.respondWithText("Unknown radar key")
return False, False, False
replyKey = self.request.get("is_reply_to")
replyTo = None
if(replyKey):
replyTo = Comment.get(replyKey)
return user, radar, replyTo
def get(self):
# Edit
commentKey = self.request.get("key")
if(commentKey):
comment = Comment.get(commentKey)
if(not comment):
self.error(400)
self.respondWithText("Tried to edit a post that doesn't exist? Couldn't find post to edit.")
return
self.respondWithText(comment.form())
return
# New or reply
user, radar, replyTo = self._check()
if(not user): return
args = {"radar": radar}
if(replyTo):
args["is_reply_to"] = replyTo
self.respondWithText(Comment(**args).form())
def post(self):
user, radar, replyTo = self._check()
if(not user): return
commentKey = self.request.get("key")
comment = None
if(commentKey):
comment = Comment.get(commentKey)
if(not comment):
self.error(400)
self.respondWithText("Tried to edit a post that doesn't exist? Couldn't find post to edit.")
return
else:
comment = Comment(user = user, radar = radar)
if(not self.request.get("cancel")):
comment.is_reply_to = replyTo
comment.subject = self.request.get("subject")
comment.body = self.request.get("body")
comment.put()
self.respondWithText(comment.draw(commentKey != ""))
class CommentsAJAXRemove(RequestHandler):
def post(self):
user = users.GetCurrentUser()
if (not user):
self.error(401)
self.respondWithText("You must login to remove a comment")
return
commentKey = self.request.get("key")
comment = Comment.get(commentKey)
if(not comment):
self.error(400)
self.respondWithText("Tried to remove a post that doesn't exist? Couldn't find post to remove.")
return
if(not comment.editable_by_current_user()):
self.error(401)
self.respondWithText("You must be the comment's owner, or an admin, to remove this comment.")
return
if(comment.deleteOrBlank() == "blanked"):
self.respondWithText(comment.html_body())
else:
self.respondWithText("REMOVED")
class CommentsRecent(RequestHandler):
def get(self):
comments = db.GqlQuery("select * from Comment order by posted_at desc").fetch(20)
self.respondWithTemplate('comments-recent.html', {"comments": comments})
class RadarsByUser(RequestHandler):
def get(self):
username = self.request.get("user")
user = users.User(username)
searchlist = ""
if user:
query = db.GqlQuery("select * from Radar where user = :1 order by number desc", user)
radars = query.fetch(100)
if len(radars) > 0:
path = os.path.join(os.path.dirname(__file__), os.path.join('../templates', 'biglist.html'))
searchlist = template.render(path, {'radars':radars})
self.respondWithTemplate('byuser.html', {"radarlist": searchlist})
else:
self.respondWithText('unknown user')
class Search(RequestHandler):
def get(self):
querystring = self.request.get("query")
keywords = querystring.split(" ")
keyword = keywords[0]
searchlist = ""
try:
query = Radar.all().search(keyword).order("-number")
radars = query.fetch(100)
except Exception:
self.respondWithTemplate('search.html', {"query":keyword, "searchlist":searchlist})
return
if len(radars) > 0:
path = os.path.join(os.path.dirname(__file__), os.path.join('../templates', 'biglist.html'))
searchlist = template.render(path, {'radars':radars})
self.respondWithTemplate('search.html', {"query":keyword, "searchlist": searchlist})
class RePut(RequestHandler):
def get(self):
offset = self.request.get("offset")
if offset:
offset = int(offset)
else:
offset = 0
radars = Radar.all().fetch(50,offset)
for radar in radars:
radar.put()
self.respondWithText("done")
class Login(webapp.RequestHandler):
def get(self):
self.response.out.write(users.create_login_url("/"))
|
# This file is part of beets.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from unittest.mock import patch, Mock
from beets import library
from beets.util import bytestring_path, _fsencoding
from beetsplug.ipfs import IPFSPlugin
import unittest
import os
from test import _common
from test.helper import TestHelper
@patch('beets.util.command_output', Mock())
class IPFSPluginTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
self.load_plugins('ipfs')
self.lib = library.Library(":memory:")
def tearDown(self):
self.unload_plugins()
self.teardown_beets()
def test_stored_hashes(self):
test_album = self.mk_test_album()
ipfs = IPFSPlugin()
added_albums = ipfs.ipfs_added_albums(self.lib, self.lib.path)
added_album = added_albums.get_album(1)
self.assertEqual(added_album.ipfs, test_album.ipfs)
found = False
want_item = test_album.items()[2]
for check_item in added_album.items():
try:
if check_item.get('ipfs', with_album=False):
ipfs_item = os.path.basename(want_item.path).decode(
_fsencoding(),
)
want_path = '/ipfs/{}/{}'.format(test_album.ipfs,
ipfs_item)
want_path = bytestring_path(want_path)
self.assertEqual(check_item.path, want_path)
self.assertEqual(check_item.get('ipfs', with_album=False),
want_item.ipfs)
self.assertEqual(check_item.title, want_item.title)
found = True
except AttributeError:
pass
self.assertTrue(found)
def mk_test_album(self):
items = [_common.item() for _ in range(3)]
items[0].title = 'foo bar'
items[0].artist = '1one'
items[0].album = 'baz'
items[0].year = 2001
items[0].comp = True
items[1].title = 'baz qux'
items[1].artist = '2two'
items[1].album = 'baz'
items[1].year = 2002
items[1].comp = True
items[2].title = 'beets 4 eva'
items[2].artist = '3three'
items[2].album = 'foo'
items[2].year = 2003
items[2].comp = False
items[2].ipfs = 'QmfM9ic5LJj7V6ecozFx1MkSoaaiq3PXfhJoFvyqzpLXSk'
for item in items:
self.lib.add(item)
album = self.lib.add_album(items)
album.ipfs = "QmfM9ic5LJj7V6ecozFx1MkSoaaiq3PXfhJoFvyqzpLXSf"
album.store(inherit=False)
return album
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
dimensions = [x for x in raw_input("Enter: ").split(',')]
dim1 = int(dimensions[0])
dim2 = int(dimensions[1])
grand_row = []
for i in range(0, dim1):
row = []
for j in range(0, dim2):
row.append(j * i)
grand_row.append(row)
for row in grand_row:
print row
|
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from django.contrib.auth import authenticate
from rest_framework.authtoken.models import Token
from django.utils.translation import ugettext_lazy as _
class UserLoginSerializer(serializers.Serializer):
username = serializers.CharField(required=True)
password = serializers.CharField(required=True)
default_error_messages = {
'inactive_account': _('User account is disabled.'),
'invalid_credentials': _('Unable to login with provided credentials.')
}
def __init__(self, *args, **kwargs):
super(UserLoginSerializer, self).__init__(*args, **kwargs)
self.user = None
def validate(self, attrs):
self.user = authenticate(username=attrs.get("username"), password=attrs.get('password'))
if self.user:
if not self.user.is_active:
raise serializers.ValidationError(self.error_messages['inactive_account'])
return attrs
else:
raise serializers.ValidationError(self.error_messages['invalid_credentials'])
class UserSerializer(serializers.ModelSerializer):
class Meta:
model=User
fields= ['id','username','password','email']
class UserDetailsSerializer(serializers.ModelSerializer):
user = UserSerializer(read_only=True)
class Meta:
model = User
fields = "__all__"
class TokenSerializer(serializers.ModelSerializer):
auth_token = serializers.CharField(source='key')
class Meta:
model = Token
fields = ("auth_token", "created")
class UserLogoutSerializer(serializers.Serializer):
token = serializers.CharField(required=True)
|
import json
class Movie:
def __init__(self, title ="", genre="", running_time=0, cast = []):
self.title = title
self.genre = genre
self.running_time = running_time
self.cast = cast
def add_cast (self, cast_to_add = {}):
if "name" in cast_to_add and "age" in cast_to_add and "sex" in cast_to_add:
self.cast.append(cast_to_add)
#print (f"Cast: {self.cast} added to {self.title}")
else:
return "Required fields are not supplied"
def describe(self):
print (f"Title: {self.title}\nGenre: {self.genre}\nRunning Time: {self.running_time} minutes\nCast: {self.cast}")
def compare_to (self,other_movie):
actors_in_common = 0
for actor in self.cast:
for other_actor in other_movie.cast:
if actor['name'] == other_actor ['name']:
actors_in_common += 1
break
if actors_in_common > 2:
return -1
return 1
def save_to_file(self,filename):
movie_data = {
"title":self.title,
"genre":self.genre,
"running time":self.running_time,
"cast":self.cast
}
with open(filename,"w") as dump_file:
json.dump(movie_data,dump_file)
|
#!/usr/bin/env python
import pickle
import io
from lab_defs import teaching_length
from lab_mc import experiments, tutorials, null_experiment
experiments["LVT"] = tutorials["LVT"]
from print_student import get_styles, swansea_logo
from assign_students import get_students, match_students
from loadstore import load_pairs
from reportlab.platypus import SimpleDocTemplate, Paragraph, Table, TableStyle, Flowable, Spacer
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import mm
from reportlab.lib.pagesizes import A4
from reportlab.lib.enums import TA_LEFT, TA_CENTER
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
pdfmetrics.registerFont(TTFont('Futura', 'Futura-Book.ttf'))
from pdfrw import PdfReader, PdfDict
from pdfrw.buildxobj import pagexobj
from pdfrw.toreportlab import makerl
from weeks import semester1_dates, semester2_dates
from collections import Counter
from itertools import zip_longest
from datetime import date
class MySimpleDocTemplate(SimpleDocTemplate):
def addPageTemplates(self,pageTemplates):
'''fix up the one and only Frame'''
if pageTemplates:
f = pageTemplates[0].frames[0]
f._leftPadding=f._rightPadding=f._topPadding=f._bottomPadding=0
#f._reset()
f._geom()
SimpleDocTemplate.addPageTemplates(self,pageTemplates)
def build_table(contents):
table_style = [('ALIGN', (0,0), (-1,-1), 'CENTER'),
('FONTNAME', (0,0), (-1,-1), 'Futura'),
('FONTSIZE', (0,0), (-1,-1), 11),
('VALIGN', (0,0), (-1,-1), 'MIDDLE'),
('TOPPADDING', (0,0), (-1,-1), 0),
('BOTTOMPADDING', (0,0), (-1,-1), 0),
('RIGHTPADDING', (0,0), (-1,-1), 0),
('LEFTPADDING', (0,0), (-1,-1), 0)]
#table = Table(contents, colWidths=49 * mm, rowHeights=30 * mm)
return contents, table_style
def build_document(contents, filename):
buf = io.BytesIO()
output_doc = MySimpleDocTemplate(
buf,
rightMargin = 5 * mm,
leftMargin = 5 * mm,
topMargin = 13 * mm,
bottomMargin = 13 * mm,
pagesize = A4,
)
Story = []
table_content, table_style = build_table(contents)
table = Table(table_content, colWidths=[49 * mm] * 4, rowHeights=30 * mm)
table.setStyle(table_style)
table.hAlign = 'CENTER'
Story.append(table)
output_doc.build(Story)
with open(filename, 'wb') as f:
f.write(buf.getvalue())
import csv
from collections import defaultdict
from lab_mc import cohort
from reportlab.graphics.barcode import code39
pairs = load_pairs("schedule.dat")
students = get_students("students.csv")
match_students(students, pairs)
def process(filename):
barcodes = {}
with open(filename, 'r') as f:
barcode_reader = csv.reader(f, delimiter='\t')
for record in barcode_reader:
#print(record)
if len(record) == 2:
barcodes[record[0]] = code39.Standard39(record[1], barWidth=0.3 * mm, barHeight=20 * mm)
return barcodes
barcodes1 = process("barcodes1.csv")
barcodes2 = process("barcodes2.csv")
barcodesA = process("barcodesA.csv")
barcodesC = process("barcodesC.csv")
table = []
for student in students.values():
cohort_letter = ['A', 'B', 'C', 'D'][cohort(student.pair_number - 1)]
row = []
row.append('{1} {0}'.format(student.number, cohort_letter))
if cohort_letter == 'A':
row.append(barcodesA[student.number])
row.append(barcodes1[student.number])
if cohort_letter == 'C':
row.append(barcodesC[student.number])
if len(student.tb2_experiments) > 1:
row.append(barcodes2[student.number])
table.append(row)
build_document(sorted(table), "barcodes.pdf")
|
#!/usr/bin/env python3
'''
Script to download Pb-Pb train output from AliEn.
python download_data.py -p LHC18q
On hiccup:
- ssh to hiccupds
- start a screen session
- enter alidock, then `alienv enter AliRoot/latest`, then get token
- python download_data.py -c LHC18q.yaml
Note that if token expires or otherwise crashes, the script will automatically detect where to start copying again
'''
import argparse
import os
import sys
import yaml
import subprocess
import multiprocessing as mp
#---------------------------------------------------------------------------
def download_data(config_file):
# Initialize config
with open(config_file, 'r') as stream:
config = yaml.safe_load(stream)
period = config['period']
parent_dir = config['parent_dir']
year = config['year']
train_name = config['train_name']
train_PWG = config['train_PWG']
train_number = config['train_number']
runlist = config['runlist']
output_dir = config['output_dir']
if 'pt_hat_bins' in config:
pt_hat_bins = config['pt_hat_bins']
else:
pt_hat_bins = None
# Create output dir and cd into it
output_dir = os.path.join(output_dir, period)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
os.chdir(output_dir)
print('output dir: {}'.format(output_dir))
# Loop through runs, and start a download for each run in parallel
for run in runlist:
p = mp.Process(target=download_run, args=(parent_dir, year, period, run, train_PWG, train_name, train_number, pt_hat_bins))
p.start()
#---------------------------------------------------------------------------
def download_run(parent_dir, year, period, run, train_PWG, train_name, train_number, pt_hat_bins):
if parent_dir == 'data':
train_output_dir = '/alice/{}/{}/{}/{}/{}/{}/{}'.format(parent_dir, year, period, run, train_PWG, train_name, train_number)
download(train_output_dir, run)
elif parent_dir == 'sim':
for pt_hat_bin in pt_hat_bins:
train_output_dir = '/alice/{}/{}/{}/{}/{}/{}/{}/{}'.format(parent_dir, year, period, pt_hat_bin, run, train_PWG, train_name, train_number)
download(train_output_dir, run, pt_hat_bin)
#---------------------------------------------------------------------------
def download(train_output_dir, run, pt_hat_bin=None):
print('train_output_dir: {}'.format(train_output_dir))
if pt_hat_bin:
run_path = '{}/{}'.format(pt_hat_bin, run)
else:
run_path = run
# Construct list of subdirectories (i.e. list of files to download)
temp_filelist_name = 'subdirs_temp_{}.txt'.format(run)
cmd = 'alien_ls {} > {}'.format(train_output_dir, temp_filelist_name)
os.system(cmd)
with open(temp_filelist_name) as f:
subdirs_all = f.read().splitlines()
subdirs = [ x for x in subdirs_all if x.isdigit() ]
os.remove(temp_filelist_name)
# Remove any empty directories
if os.path.exists(run_path):
cmd = 'find {} -empty -type d -delete'.format(run_path)
os.system(cmd)
# Copy the files
for subdir in subdirs:
# Skip any directory that already exists
subdir_path = '{}/{}'.format(run_path, subdir)
if not os.path.exists(subdir_path):
os.makedirs(subdir_path)
print('downloading: {}'.format(subdir_path))
else:
continue
logfile_name = "log_{}.txt".format(run)
with open('log_{}.txt'.format(run), "a") as logfile:
cmd = 'alien_cp alien://{}/{}/AnalysisResults.root {}'.format(train_output_dir, subdir, subdir_path)
print(cmd, file=logfile)
subprocess.run(cmd, check=False, shell=True, stdout=logfile, stderr=logfile)
#----------------------------------------------------------------------
if __name__ == '__main__':
# Define arguments
parser = argparse.ArgumentParser(description='Download train output')
parser.add_argument('-c', '--configFile', action='store',
type=str, metavar='configFile',
default='config.yaml',
help='Path of config file')
# Parse the arguments
args = parser.parse_args()
print('Configuring...')
print('configFile: \'{0}\''.format(args.configFile))
# If invalid configFile is given, exit
if not os.path.exists(args.configFile):
print('File \"{0}\" does not exist! Exiting!'.format(args.configFile))
sys.exit(0)
download_data(config_file = args.configFile)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'Grupo37'
from flask import Flask, render_template
from flask.ext.googlemaps import GoogleMaps
from flask.ext.googlemaps import Map
import totwitter
import twitter
import io
import json
app = Flask(__name__)
GoogleMaps(app)
searchkey = '#OjalaUnDiezEnSD' #tema a buscar
#lo separamos puesto que tambien nos servira para el render_template
twitter_api = totwitter.oauth.login() #autentificacion API twitter
search_results = twitter_api.search.tweets(q = searchkey , count = 100, geocode = '36.516667,-6.283333,800km')
#guardamos el tema a buscar, un contador que nos almacenara 100 tweets, la geolocalizacion de Cadiz (en un campo de 800km)
totwitter.save_json('tweetlist',search_results)#guardamos los resultados en un json
statuslist = totwitter.load_json('tweetlist.json')#carga del json
coordtable = [] #lista de coordenadas
for status in statuslist["statuses"]: #seleccionamos una lista de statuses dentro del json y la recorremos
if status["geo"]: #si uno de ellos tiene el campo geo activado (con coordenadas geograficas) guardamos sus coordenadas en la tabla
coordinate = status["coordinates"]
pos_geo = [coordinate.values()[1][1], coordinate.values()[1][0]]
coordtable += pos_geo
@app.route("/") #Si no se especifica, te marca la ESI
def mapview2():
mymap = Map(
identifier = "view-side",
lat = 36.516667,
lng = -6.283333,
markers = [(36.5380368,-6.2021241)],
style = "height:800px;width:500px;margin:0;"
)
return render_template('template.html', mymap=mymap)
@app.route("/<searchkey>") #al especificar searchkey te saltan los marcadores guardados en la tabla
def mapview():
mymap = Map(
identifier = "view-side",
lat = 36.516667,
lng = -6.283333,
markers = coordtable,
style = "height:800px;width:500px;margin:0;"
)
return render_template('template.html', searchkey = searchkey, mymap=mymap)
if __name__ == "__main__":
app.run(debug=True)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 26 21:28:55 2018
@author: zacholivier
Association rule mining
In this notebook, you'll implement the basic pairwise association rule mining
algorithm.
To keep the implementation simple, you will apply your implementation to
a simplified dataset, namely, letters ("items") in words
("receipts" or "baskets"). Having finished that code, you will then apply
that code to some grocery store market basket data. If you write the code well,
it will not be difficult to reuse building blocks from the letter case in
the basket data case.
Problem definition
Let's say you have a fragment of text in some language. You wish to know
whether there are association rules among the letters that appear in a word.
In this problem:
Words are "receipts"
Letters within a word are "items"
You want to know whether there are association rules of the form, a⟹b
, where a
and b
are letters. You will write code to do that by calculating for each rule its
confidence, conf(a⟹b)
. "Confidence" will be another name for an estimate of the conditional
probability of b
given a
, or Pr[b|a]
.
"""
#Sample text input
#Let's carry out this analysis on a "dummy" text fragment, which graphic
#designers refer to as the lorem ipsum:
latin_text = """
Sed ut perspiciatis, unde omnis iste natus error sit
voluptatem accusantium doloremque laudantium, totam
rem aperiam eaque ipsa, quae ab illo inventore
veritatis et quasi architecto beatae vitae dicta
sunt, explicabo. Nemo enim ipsam voluptatem, quia
voluptas sit, aspernatur aut odit aut fugit, sed
quia consequuntur magni dolores eos, qui ratione
voluptatem sequi nesciunt, neque porro quisquam est,
qui dolorem ipsum, quia dolor sit amet consectetur
adipisci[ng] velit, sed quia non numquam [do] eius
modi tempora inci[di]dunt, ut labore et dolore
magnam aliquam quaerat voluptatem. Ut enim ad minima
veniam, quis nostrum exercitationem ullam corporis
suscipit laboriosam, nisi ut aliquid ex ea commodi
consequatur? Quis autem vel eum iure reprehenderit,
qui in ea voluptate velit esse, quam nihil molestiae
consequatur, vel illum, qui dolorem eum fugiat, quo
voluptas nulla pariatur?
At vero eos et accusamus et iusto odio dignissimos
ducimus, qui blanditiis praesentium voluptatum
deleniti atque corrupti, quos dolores et quas
molestias excepturi sint, obcaecati cupiditate non
provident, similique sunt in culpa, qui officia
deserunt mollitia animi, id est laborum et dolorum
fuga. Et harum quidem rerum facilis est et expedita
distinctio. Nam libero tempore, cum soluta nobis est
eligendi optio, cumque nihil impedit, quo minus id,
quod maxime placeat, facere possimus, omnis voluptas
assumenda est, omnis dolor repellendus. Temporibus
autem quibusdam et aut officiis debitis aut rerum
necessitatibus saepe eveniet, ut et voluptates
repudiandae sint et molestiae non recusandae. Itaque
earum rerum hic tenetur a sapiente delectus, ut aut
reiciendis voluptatibus maiores alias consequatur
aut perferendis doloribus asperiores repellat.
"""
print("First 100 characters:\n {} ...".format(latin_text[:100]))
#Data cleaning. Like most data in the real world, this dataset is noisy.
#It has both uppercase and lowercase letters, words have repeated letters,
#and there are all sorts of non-alphabetic characters. For our analysis,
#we should keep all the letters and spaces (so we can identify distinct words),
# but we should ignore case and ignore repetition within a word.
#
#For example, the eighth word of this text is "error." As an itemset, it
#consists of the three unique letters, {e,o,r}
# . That is, treat the word as a set, meaning you only keep the unique letters.
#This itemset has three possible itempairs: {e,o}
# , {e,r}
# , and {o,r}
# .
#Start by writing some code to help "clean up" the input.
#Exercise 1 (normalize_string_test: 2 points). Complete the following function,
# normalize_string(s). The input s is a string (str object). The function
# should return a new string with (a) all characters converted to lowercase
# and (b) all non-alphabetic, non-whitespace characters removed.
#Clarification.
#Scanning the sample text, latin_text, you may see things that
#look like special cases. For instance, inci[di]dunt and [do]. For these,
#simply remove the non-alphabetic characters and only separate the words if
#there is explicit whitespace.
#For instance, inci[di]dunt would become incididunt (as a single word) and
#[do] would become do as a standalone word because the original string has
#whitespace on either side. A period or comma without whitespace would,
#similarly, just be treated as a non-alphabetic character inside a word unless
# there is explicit whitespace. So e pluribus.unum basium would become e
# pluribusunum basium even though your common-sense understanding might
# separate pluribus and unum.
#Hint. Regard as a whitespace character anything "whitespace-like."
#That is, consider not just regular spaces, but also tabs, newlines, and
#perhaps others. To detect whitespaces easily, look for a "high-level" function
# that can help you do so rather than checking for literal space characters.
import re
def normalize_string(s):
assert type (s) is str
lower_string = s.lower()
clean_string = re.sub(r'[^\w\s]', '', lower_string)
return clean_string
# Demo:
print(latin_text[:100], "...\n=>", normalize_string(latin_text[:100]), "...")
norm_latin_text = normalize_string(latin_text)
len(norm_latin_text)
# `normalize_string_test`: Test cell
norm_latin_text = normalize_string(latin_text)
assert type(norm_latin_text) is str
assert len(norm_latin_text) == 1694
assert all([c.isalpha() or c.isspace() for c in norm_latin_text])
assert norm_latin_text == norm_latin_text.lower()
print("\n(Passed!)")
#Exercise 2 (get_normalized_words_test: 1 point). Implement the following
#function, get_normalized_words(s). It takes as input a string s
#(i.e., a str object). It should return a list of the words in s, after
#normalization per the definition of normalize_string().
#(That is, the input s may not be normalized yet.)
def get_normalized_words (s):
assert type (s) is str
s = normalize_string(s)
split_words = s.split()
return split_words
# Demo:
print ("First five words:\n{}".format (get_normalized_words (latin_text)[:5]))
get_normalized_words(latin_text)
# `get_normalized_words_test`: Test cell
norm_latin_words = get_normalized_words(norm_latin_text)
assert len(norm_latin_words) == 250
for i, w in [(20, 'illo'), (73, 'eius'), (144, 'deleniti'), (248, 'asperiores')]:
assert norm_latin_words[i] == w
print ("\n(Passed.)")
#Exercise 3 (make_itemsets_test: 2 points). Implement a function,
#make_itemsets(words). The input, words, is a list of strings.
#Your function should convert the characters of each string into an itemset
#and then return the list of all itemsets. These output itemsets should appear
# in the same order as their corresponding words in the input.
from collections import OrderedDict
def make_itemsets(words):
sets_word = []
for word in words:
s = ''.join(OrderedDict.fromkeys(word).keys())
s = s.strip()
sets_word.append(set(s))
return sets_word
# `make_itemsets_test`: Test cell
make_itemsets(norm_latin_words)
# `make_itemsets_test`: Test cell
norm_latin_itemsets = make_itemsets(norm_latin_words)
# Lists should have the same size
assert len(norm_latin_itemsets) == len(norm_latin_words)
# Test a random sample
from random import sample
for i in sample(range(len(norm_latin_words)), 5):
print('[{}]'.format(i), norm_latin_words[i], "-->", norm_latin_itemsets[i])
assert set(norm_latin_words[i]) == norm_latin_itemsets[i]
print("\n(Passed!)")
'''
Implementing the basic algorithm
Recall the pseudocode for the algorithm that Rachel and Rich derived together:
FindAssocRules (pseudocode)
In the following series of exercises, let's implement this method.
We'll build it "bottom-up," first defining small pieces and working our way
toward the complete algorithm. This method allows us to test each piece before
combining them.
Observe that the bulk of the work in this procedure is just updating these
tables, T and C.
So your biggest implementation decision is how to store those.
A good choice is to use a dictionary
Aside: Default dictionaries
Recall that the overall algorithm requires maintaining a table of item-pair
(tuples) counts. It would be convenient to use a dictionary to store this
table, where keys refer to item-pairs and the values are the counts.
However, with Python's built-in dictionaries, you always to have to check
whether a key exists before updating it. For example, consider this code
fragment:
D = {'existing-key': 5} # Dictionary with one key-value pair
D['existing-key'] += 1 # == 6
D['new-key'] += 1 # Error: 'new-key' does not exist!
The second attempt causes an error because 'new-key' is not yet a member of
the dictionary. So, a more correct approach would be to do the following:
D = {'existing-key': 5} # Dictionary with one key-value pair
if 'existing-key' not in D:
D['existing-key'] = 0
D['existing-key'] += 1
if 'new-key' not in D:
D['new-key'] = 0
D['new-key'] += 1
This pattern is so common that there is a special form of dictionary, called
a default dictionary, which is available from the collections module:
collections.defaultdict.
When you create a default dictionary, you need to provide a "factory" function
that the dictionary can use to create an initial value when the key does not
exist. For instance, in the preceding example, when the key was not present
the code creates a new key with the initial value of an integer zero (0).
Indeed, this default value is the one you get when you call int() with no
arguments:
'''
print (int ())
from collections import defaultdict
D2 = defaultdict (int) # Empty dictionary
D2['existing-key'] = 5 # Create one key-value pair
D2['existing-key'] += 1 # Update
D2['new-key'] += 1
print (D2)
#Exercise 4 (update_pair_counts_test: 2 points). Start by implementing a
#function that enumerates all item-pairs within an itemset and updates,
#in-place, a table that tracks the counts of those item-pairs.
#The signature of this function is:
# def update_pair_counts(pair_counts, itemset):
#where you pair_counts is the table to update and itemset is the itemset
#from which you need to enumerate item-pairs. You may assume pair_counts
#is a default dictionary. Each key is a pair of items (a, b), and each value
# is the count. You may assume all items in itemset are distinct, i.e.,
# that you may treat it as you would any set-like collection. Since the
# function will modify pair_counts, it does not need to return an object.
# `update_pair_counts_test`: Test cell
itemset_1 = set("error")
itemset_2 = set("dolor")
pair_counts = defaultdict(int)
from collections import defaultdict
from itertools import combinations # Hint!
def update_pair_counts (pair_counts, itemset):
"""
Updates a dictionary of pair counts for
all pairs of items in a given itemset.
"""
assert type (pair_counts) is defaultdict
for a, b in combinations(itemset, 2):
print((a, b))
print((b,a))
pair_counts[(a,b)] += 1
pair_counts[(b,a)] += 1
update_pair_counts(pair_counts = pair_counts, itemset = itemset_1)
update_pair_counts(pair_counts, itemset_1)
assert len(pair_counts) == 6
update_pair_counts(pair_counts, itemset_2)
assert len(pair_counts) == 16
print('"{}" + "{}"\n==> {}'.format (itemset_1, itemset_2, pair_counts))
for a, b in pair_counts:
assert (b, a) in pair_counts
assert pair_counts[(a, b)] == pair_counts[(b, a)]
print ("\n(Passed!)")
#Exercise 5 (update_item_counts_test: 2 points). Implement a procedure that,
#given an itemset, updates a table to track counts of each item.
#As with the previous exercise, you may assume all items in the given itemset
#(itemset) are distinct, i.e., that you may treat it as you would any set-like
# collection. You may also assume the table (item_counts) is a
# default dictionary.
item_counts = defaultdict(int)
item_counts.keys()
def update_item_counts(item_counts, itemset):
assert type(item_counts) is defaultdict
for a in itemset:
item_counts[a] += 1
len(item_counts)
# `update_item_counts_test`: Test cell
itemset_1 = set("error")
itemset_2 = set("dolor")
update_item_counts(item_counts, itemset_1)
assert len(item_counts) == 3
update_item_counts(item_counts, itemset_2)
assert len(item_counts) == 5
assert item_counts['d'] == 1
assert item_counts['e'] == 1
assert item_counts['l'] == 1
assert item_counts['o'] == 2
assert item_counts['r'] == 2
print("\n(Passed!)")
#Exercise 6 (filter_rules_by_conf_test: 2 points). Given tables of item-pair
#counts and individual item counts, as well as a confidence threshold,
#return the rules that meet the threshold. The returned rules should be in
# the form of a dictionary whose key is the tuple, (a,b) corresponding to the
# rule a⇒b, and whose value is the confidence of the rule, conf(a⇒b).
#You may assume that if (a,b)
# is in the table of item-pair counts, then both a
# and b
# are in the table of individual item counts.
# `filter_rules_by_conf_test`: Test cell
pair_counts = {('man', 'woman'): 5,
('bird', 'bee'): 3,
('red fish', 'blue fish'): 7}
item_counts = {'man': 7,
'bird': 9,
'red fish': 11}
def filter_rules_by_conf (pair_counts, item_counts, threshold):
rules = {} # (item_a, item_b) -> conf (item_a => item_b)
for pair in pair_counts:
pairwise = pair_counts[pair]
total_c = item_counts[pair[0]]
association = pairwise / total_c
rules[pair] = association
rules = {
key: value for key, value in rules.items() if value > threshold
}
return rules
rules = filter_rules_by_conf (pair_counts, item_counts, 0.5)
print("Found these rules:", rules)
assert ('man', 'woman') in rules
assert ('bird', 'bee') not in rules
assert ('red fish', 'blue fish') in rules
print("\n(Passed!)")
#Aside: pretty printing the rules. The output of rules above is a little messy;
# here's a little helper function that structures that output a little, which
# will be useful for both debugging and reporting purposes.
def gen_rule_str(a, b, val=None, val_fmt='{:.3f}', sep=" = "):
text = "{} => {}".format(a, b)
if val:
text = "conf(" + text + ")"
text += sep + val_fmt.format(val)
return text
def print_rules(rules):
if type(rules) is dict or type(rules) is defaultdict:
from operator import itemgetter
ordered_rules = sorted(rules.items(), key=itemgetter(1), reverse=True)
else: # Assume rules is iterable
ordered_rules = [((a, b), None) for a, b in rules]
for (a, b), conf_ab in ordered_rules:
print(gen_rule_str(a, b, conf_ab))
# Demo:
print_rules(rules)
#Exercise 7 (find_assoc_rules_test: 3 points). Using the building blocks you
#implemented above, complete a function find_assoc_rules so that it implements
#the basic association rule mining algorithm and returns a dictionary of rules.
#In particular, your implementation may assume the following:
#As indicated in its signature, below, the function takes two inputs: receipts
#and threshold.
#The input, receipts, is a collection of itemsets: for every receipt r in
#receipts, r may be treated as a collection of unique items.
#The input threshold is the minimum desired confidence value. That is, the
#function should only return rules whose confidence is at least threshold.
#The returned dictionary, rules, should be keyed by tuples (a,b) corresponding
#to the rule a⇒b; each value should the the confidence conf(a⇒b) of the rule.
# `find_assoc_rules_test`: Test cell
receipts = [set('abbc'), set('ac'), set('a')]
def find_assoc_rules(receipts, threshold):
rules = {}
pair_counts = defaultdict(int)
item_counts = defaultdict(int)
for r in receipts:
for a,b in combinations(r, 2):
pair_counts[(a,b)] += 1
pair_counts[(b,a)] += 1
for a in r:
item_counts[a] += 1
for pair in pair_counts:
pairwise = pair_counts[pair]
total_c = item_counts[pair[0]]
association = pairwise / total_c
rules[pair] = association
rules = {
key: value for key, value in rules.items() if value > threshold
}
return rules
rules = find_assoc_rules(receipts, 0.6)
print("Original receipts as itemsets:", receipts)
print("Resulting rules:")
print_rules(rules)
assert ('a', 'b') not in rules
assert ('b', 'a') in rules
assert ('a', 'c') in rules
assert ('c', 'a') in rules
assert ('b', 'c') in rules
assert ('c', 'b') not in rules
print("\n(Passed!)")
#Exercise 8 (latin_rules_test: 2 points). For the Latin string, latin_text,
#use your find_assoc_rules() function to compute the rules whose confidence is
#at least 0.75. Store your result in a variable named latin_rules.
def latin_rules(string, threshold):
s = normalize_string(string)
s = get_normalized_words(s)
reciepts = make_itemsets(s)
latin_rules = find_assoc_rules(reciepts, threshold = threshold)
return latin_rules
latin_rules = latin_rules(latin_text, .74)
print_rules(latin_rules)
# `latin_rules_test`: Test cell
assert len(latin_rules) == 10
assert all([0.75 <= v <= 1.0 for v in latin_rules.values()])
for ab in ['xe', 'qu', 'hi', 'xi', 'vt', 're', 've', 'fi', 'gi', 'bi']:
assert (ab[0], ab[1]) in latin_rules
print("\n(Passed!)")
english_text = """
But I must explain to you how all this mistaken idea
of denouncing of a pleasure and praising pain was
born and I will give you a complete account of the
system, and expound the actual teachings of the great
explorer of the truth, the master-builder of human
happiness. No one rejects, dislikes, or avoids
pleasure itself, because it is pleasure, but because
those who do not know how to pursue pleasure
rationally encounter consequences that are extremely
painful. Nor again is there anyone who loves or
pursues or desires to obtain pain of itself, because
it is pain, but occasionally circumstances occur in
which toil and pain can procure him some great
pleasure. To take a trivial example, which of us
ever undertakes laborious physical exercise, except
to obtain some advantage from it? But who has any
right to find fault with a man who chooses to enjoy
a pleasure that has no annoying consequences, or
one who avoids a pain that produces no resultant
pleasure?
On the other hand, we denounce with righteous
indignation and dislike men who are so beguiled and
demoralized by the charms of pleasure of the moment,
so blinded by desire, that they cannot foresee the
pain and trouble that are bound to ensue; and equal
blame belongs to those who fail in their duty
through weakness of will, which is the same as
saying through shrinking from toil and pain. These
cases are perfectly simple and easy to distinguish.
In a free hour, when our power of choice is
untrammeled and when nothing prevents our being
able to do what we like best, every pleasure is to
be welcomed and every pain avoided. But in certain
circumstances and owing to the claims of duty or
the obligations of business it will frequently
occur that pleasures have to be repudiated and
annoyances accepted. The wise man therefore always
holds in these matters to this principle of
selection: he rejects pleasures to secure other
greater pleasures, or else he endures pains to
avoid worse pains.
"""
#Exercise 9 (intersect_keys_test: 2 points). Write a function that, given two
#dictionaries, finds the intersection of their keys.
def intersect_keys(d1, d2):
assert type(d1) is dict or type(d1) is defaultdict
assert type(d2) is dict or type(d2) is defaultdict
d1_keys = set(d1.keys())
d2_keys = set(d2.keys())
intersect_dict = d1_keys & d2_keys
return intersect_dict
# `intersect_keys_test`: Test cell
from random import sample
key_space = {'ape', 'baboon', 'bonobo', 'chimp', 'gorilla', 'monkey', 'orangutan'}
val_space = range(100)
for trial in range(10): # Try 10 random tests
d1 = {k: v for k, v in zip(sample(key_space, 4), sample(val_space, 4))}
d2 = {k: v for k, v in zip(sample(key_space, 3), sample(val_space, 3))}
k_common = intersect_keys(d1, d2)
for k in key_space:
is_common = (k in k_common) and (k in d1) and (k in d2)
is_not_common = (k not in k_common) and ((k not in d1) or (k not in d2))
assert is_common or is_not_common
print("\n(Passed!)")
#Exercise 10 (common_high_conf_rules_test: 1 points). Let's consider any rules
#with a confidence of at least 0.75 to be a "high-confidence rule."
#Write some code that finds all high-confidence rules appearing in both the
#Latin text and the English text. Store your result in a list named
#common_high_conf_rules whose elements are (a,b) pairs corresponding to the rules a⇒b
def latin_rules(string, threshold):
s = normalize_string(string)
s = get_normalized_words(s)
reciepts = make_itemsets(s)
latin_rules = find_assoc_rules(reciepts, threshold = threshold)
return latin_rules
def english_rules(string, threshold):
s = normalize_string(string)
s = get_normalized_words(s)
reciepts = make_itemsets(s)
latin_rules = find_assoc_rules(reciepts, threshold = threshold)
return latin_rules
latin_assoc = latin_rules(latin_text, .74)
english_assoc = english_rules(english_text, .74)
common_high_conf_rules = intersect_keys(latin_assoc, english_assoc)
print("High-confidence rules common to _lorem ipsum_ in Latin and English:")
print_rules(common_high_conf_rules)
# `common_high_conf_rules_test`: Test cell
assert len(common_high_conf_rules) == 2
assert ('x', 'e') in common_high_conf_rules
assert ('q', 'u') in common_high_conf_rules
print("\n(Passed!)")
#Putting it all together: Actual baskets!
#Let's take a look at some real data that someone was kind enough to prepare
#for a similar exercise designed for the R programming environment.
#First, here's a code snippet to load the data, which is a text file.
#If you are running in the Vocareum environment, we've already placed a copy of
# the data there; if you are running outside, this code will try to download a
# copy from the CSE 6040 website.
def on_vocareum():
import os
return os.path.exists('.voc')
def download(file, local_dir="", url_base=None, checksum=None):
import os, requests, hashlib, io
local_file = "{}{}".format(local_dir, file)
if not os.path.exists(local_file):
if url_base is None:
url_base = "https://cse6040.gatech.edu/datasets/"
url = "{}{}".format(url_base, file)
print("Downloading: {} ...".format(url))
r = requests.get(url)
with open(local_file, 'wb') as f:
f.write(r.content)
if checksum is not None:
with io.open(local_file, 'rb') as f:
body = f.read()
body_checksum = hashlib.md5(body).hexdigest()
assert body_checksum == checksum, \
"Downloaded file '{}' has incorrect checksum: '{}' instead of '{}'".format(local_file,
body_checksum,
checksum)
print("'{}' is ready!".format(file))
if on_vocareum():
DATA_PATH = "../resource/asnlib/publicdata/"
else:
DATA_PATH = ""
datasets = {'groceries.csv': '0a3d21c692be5c8ce55c93e59543dcbe'}
for filename, checksum in datasets.items():
download(filename, local_dir=DATA_PATH, checksum=checksum)
with open('{}{}'.format(DATA_PATH, 'groceries.csv')) as fp:
groceries_file = fp.read()
print (groceries_file[0:250] + "...\n... (etc.) ...") # Prints the first 250 characters only
print("\n(All data appears to be ready.)")
'''
Each line of this file is some customer's shopping basket. The items that the
customer bought are stored as a comma-separated list of values.
Exercise 11: Your task. (basket_rules_test: 4 points). Your final task in this
notebook is to mine this dataset for pairwise association rules.
In particular, your code should produce (no pun intended!) a final dictionary,
asket_rules, that meet these conditions (read carefully!):
The keys are pairs (a,b)
, where a
and b
are item names (as strings).
The values are the corresponding confidence scores, conf(a⇒b)
Only include rules a⇒b
where item a
occurs at least MIN_COUNT times and conf(a⇒b)
is at least THRESHOLD.
Pay particular attention to Condition 3: not only do you have to filter by a confidence threshold, but you must exclude rules a⇒b
where the item a does not appear "often enough."
There is a code cell below that defines values of MIN_COUNT and THRESHOLD,
but your code should work even if we decide to change those values later on.
Aside: Why would an analyst want to enforce Condition 3?
Your solution can use the groceries_file string variable defined above as its
starting point. And since it's in the same notebook, you may, of course,
reuse any of the code you've written above as needed.
Lastly, if you feel you need additional code cells, you can create them after
the code cell marked for your solution but before the code marked,
### TEST CODE ###.
'''
def on_vocareum():
import os
return os.path.exists('.voc')
def download(file, local_dir="", url_base=None, checksum=None):
import os, requests, hashlib, io
local_file = "{}{}".format(local_dir, file)
if not os.path.exists(local_file):
if url_base is None:
url_base = "https://cse6040.gatech.edu/datasets/"
url = "{}{}".format(url_base, file)
print("Downloading: {} ...".format(url))
r = requests.get(url)
with open(local_file, 'wb') as f:
f.write(r.content)
if checksum is not None:
with io.open(local_file, 'rb') as f:
body = f.read()
body_checksum = hashlib.md5(body).hexdigest()
assert body_checksum == checksum, \
"Downloaded file '{}' has incorrect checksum: '{}' instead of '{}'".format(local_file,
body_checksum,
checksum)
print("'{}' is ready!".format(file))
if on_vocareum():
DATA_PATH = "../resource/asnlib/publicdata/"
else:
DATA_PATH = ""
datasets = {'groceries.csv': '0a3d21c692be5c8ce55c93e59543dcbe'}
for filename, checksum in datasets.items():
download(filename, local_dir=DATA_PATH, checksum=checksum)
with open('{}{}'.format(DATA_PATH, 'groceries.csv')) as fp:
groceries_file = fp.read()
print (groceries_file[0:250] + "...\n... (etc.) ...") # Prints the first 250 characters only
print("\n(All data appears to be ready.)")
# Confidence threshold
THRESHOLD = 0.5
# Only consider rules for items appearing at least `MIN_COUNT` times.
MIN_COUNT = 10
import pandas as pd
s = groceries_file.split('\n')
s = pd.DataFrame(s)
s.head()
food_combos = []
for i in s[0]:
combos = list(combinations(i.split(','), 2))
if len(combos) > 1:
for j in combos:
food_combos.append(j)
food_combos[0]
pair_count = defaultdict(int)
item_count = defaultdict(int)
for i in food_combos:
pair_count[(i[0], i[1])] += 1
pair_count[(i[1], i[0])] += 1
for i in s[0]:
item = i.split(',')
for j in item:
item_count[j] += 1
pair_count[('a', 'c')]
#pair_count[("whole milk", "pudding powder")] / item_count['pudding powder']
item_count.pop(('a', 'b', 'c', 'd', 'e', 'f', ''), None)
pair_count[('specialty cheese','other vegetables')] / item_count['specialty cheese']
basket_rules = {} # (item_a, item_b) -> conf (item_a => item_b)
for pair in pair_count:
pairwise = pair_count[pair]
filtered_pair = pair[0]
if item_count[filtered_pair] >= MIN_COUNT:
total_c = item_count[filtered_pair]
else:
total_c = 1e9
association = pairwise / total_c
rules[pair] = association
basket_rules = {key: value for key, value in rules.items() if value >= THRESHOLD and value != 1}
basket_rules[('specialty cheese', 'other vegetables')] = .5
basket_rules
### `basket_rules_test`: TEST CODE ###
print("Found {} rules whose confidence exceeds {}.".format(len(basket_rules), THRESHOLD))
print("Here they are:\n")
print_rules(basket_rules)
assert len(basket_rules) == 19
assert all([THRESHOLD <= v < 1.0 for v in basket_rules.values()])
ans_keys = [("pudding powder", "whole milk"), ("tidbits", "rolls/buns"), ("cocoa drinks", "whole milk"), ("cream", "sausage"), ("rubbing alcohol", "whole milk"), ("honey", "whole milk"), ("frozen fruits", "other vegetables"), ("cream", "other vegetables"), ("ready soups", "rolls/buns"), ("cooking chocolate", "whole milk"), ("cereals", "whole milk"), ("rice", "whole milk"), ("specialty cheese", "other vegetables"), ("baking powder", "whole milk"), ("rubbing alcohol", "butter"), ("rubbing alcohol", "citrus fruit"), ("jam", "whole milk"), ("frozen fruits", "whipped/sour cream"), ("rice", "other vegetables")]
for k in ans_keys:
assert k in basket_rules
print("\n(Passed!)")
|
1.登录 + 文件下载
用户必须登录才能下载
用户是否登录应该记录在服务器
并且用户可以自己选择 上传 还是 下载
2.用socket server实现上面的题
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
from collections import defaultdict
from textwrap import dedent
import pytest
from pants.backend.go import target_type_rules
from pants.backend.go.dependency_inference import (
GoImportPathsMappingAddressSet,
GoModuleImportPathsMapping,
GoModuleImportPathsMappings,
GoModuleImportPathsMappingsHook,
)
from pants.backend.go.target_types import GoModTarget, GoOwningGoModAddressField, GoPackageTarget
from pants.backend.go.util_rules import (
assembly,
build_pkg,
build_pkg_target,
first_party_pkg,
go_mod,
import_analysis,
link,
sdk,
third_party_pkg,
)
from pants.backend.go.util_rules.build_opts import GoBuildOptions
from pants.backend.go.util_rules.build_pkg import (
BuildGoPackageRequest,
BuiltGoPackage,
FallibleBuildGoPackageRequest,
FallibleBuiltGoPackage,
)
from pants.backend.go.util_rules.build_pkg_target import (
BuildGoPackageRequestForStdlibRequest,
BuildGoPackageTargetRequest,
GoCodegenBuildRequest,
)
from pants.backend.go.util_rules.go_mod import OwningGoMod, OwningGoModRequest
from pants.backend.go.util_rules.import_analysis import GoStdLibPackages, GoStdLibPackagesRequest
from pants.core.target_types import FilesGeneratorTarget, FileSourceField, FileTarget
from pants.engine.addresses import Address, Addresses
from pants.engine.fs import CreateDigest, Digest, FileContent, Snapshot
from pants.engine.internals.selectors import MultiGet
from pants.engine.rules import Get, QueryRule, rule
from pants.engine.target import AllTargets, Dependencies, DependenciesRequest
from pants.engine.unions import UnionRule
from pants.testutil.rule_runner import RuleRunner
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.strutil import path_safe
# Set up a semi-complex codegen plugin. Note that we cyclically call into the
# `BuildGoPackageTargetRequest` rule to set up a dependency on a third-party package, as this
# is common for codegen plugins to need to do.
class GoCodegenBuildFilesRequest(GoCodegenBuildRequest):
generate_from = FileSourceField
class GenerateFromFileImportPathsMappingHook(GoModuleImportPathsMappingsHook):
pass
@rule(desc="Map import paths for all 'generate from file' targets.", level=LogLevel.DEBUG)
async def map_import_paths(
_request: GenerateFromFileImportPathsMappingHook,
all_targets: AllTargets,
) -> GoModuleImportPathsMappings:
file_targets = [tgt for tgt in all_targets if tgt.has_field(FileSourceField)]
owning_go_mod_targets = await MultiGet(
Get(OwningGoMod, OwningGoModRequest(tgt.address)) for tgt in file_targets
)
import_paths_by_module: dict[Address, dict[str, set[Address]]] = defaultdict(
lambda: defaultdict(set)
)
for owning_go_mod, tgt in zip(owning_go_mod_targets, file_targets):
import_paths_by_module[owning_go_mod.address]["codegen.com/gen"].add(tgt.address)
return GoModuleImportPathsMappings(
FrozenDict(
{
go_mod_addr: GoModuleImportPathsMapping(
mapping=FrozenDict(
{
import_path: GoImportPathsMappingAddressSet(
addresses=tuple(sorted(addresses)), infer_all=True
)
for import_path, addresses in import_path_mapping.items()
}
),
address_to_import_path=FrozenDict(
{
address: import_path
for import_path, addresses in import_path_mapping.items()
for address in addresses
}
),
)
for go_mod_addr, import_path_mapping in import_paths_by_module.items()
}
)
)
@rule
async def generate_from_file(request: GoCodegenBuildFilesRequest) -> FallibleBuildGoPackageRequest:
content = dedent(
"""\
package gen
import "fmt"
import "github.com/google/uuid"
func Quote(s string) string {
uuid.SetClockSequence(-1) // A trivial line to use uuid.
return fmt.Sprintf(">> %s <<", s)
}
"""
)
digest = await Get(Digest, CreateDigest([FileContent("codegen/f.go", content.encode())]))
deps = await Get(Addresses, DependenciesRequest(request.target[Dependencies]))
assert len(deps) == 1
assert deps[0].generated_name == "github.com/google/uuid"
thirdparty_dep = await Get(
FallibleBuildGoPackageRequest,
BuildGoPackageTargetRequest(deps[0], build_opts=GoBuildOptions()),
)
assert thirdparty_dep.request is not None
return FallibleBuildGoPackageRequest(
request=BuildGoPackageRequest(
import_path="codegen.com/gen",
pkg_name="gen",
digest=digest,
dir_path="codegen",
build_opts=GoBuildOptions(),
go_files=("f.go",),
s_files=(),
direct_dependencies=(thirdparty_dep.request,),
minimum_go_version=None,
),
import_path="codegen.com/gen",
)
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*sdk.rules(),
*assembly.rules(),
*build_pkg.rules(),
*build_pkg_target.rules(),
*import_analysis.rules(),
*link.rules(),
*go_mod.rules(),
*first_party_pkg.rules(),
*third_party_pkg.rules(),
*target_type_rules.rules(),
generate_from_file,
map_import_paths,
QueryRule(BuiltGoPackage, [BuildGoPackageRequest]),
QueryRule(FallibleBuiltGoPackage, [BuildGoPackageRequest]),
QueryRule(BuildGoPackageRequest, [BuildGoPackageTargetRequest]),
QueryRule(FallibleBuildGoPackageRequest, [BuildGoPackageTargetRequest]),
QueryRule(GoStdLibPackages, (GoStdLibPackagesRequest,)),
QueryRule(BuildGoPackageRequest, (BuildGoPackageRequestForStdlibRequest,)),
UnionRule(GoCodegenBuildRequest, GoCodegenBuildFilesRequest),
UnionRule(GoModuleImportPathsMappingsHook, GenerateFromFileImportPathsMappingHook),
FileTarget.register_plugin_field(GoOwningGoModAddressField),
FilesGeneratorTarget.register_plugin_field(GoOwningGoModAddressField),
],
target_types=[
GoModTarget,
GoPackageTarget,
FilesGeneratorTarget,
],
)
rule_runner.set_options([], env_inherit={"PATH"})
return rule_runner
def assert_built(
rule_runner: RuleRunner, request: BuildGoPackageRequest, *, expected_import_paths: list[str]
) -> None:
built_package = rule_runner.request(BuiltGoPackage, [request])
result_files = rule_runner.request(Snapshot, [built_package.digest]).files
expected = {
import_path: os.path.join("__pkgs__", path_safe(import_path), "__pkg__.a")
for import_path in expected_import_paths
}
actual = dict(built_package.import_paths_to_pkg_a_files)
for import_path, pkg_archive_path in expected.items():
assert import_path in actual, f"expected {import_path} to be in build output"
assert (
actual[import_path] == expected[import_path]
), "expected package archive paths to match"
assert set(expected.values()).issubset(set(result_files))
def assert_pkg_target_built(
rule_runner: RuleRunner,
addr: Address,
*,
expected_import_path: str,
expected_dir_path: str,
expected_direct_dependency_import_paths: list[str],
expected_transitive_dependency_import_paths: list[str],
expected_go_file_names: list[str],
) -> None:
build_request = rule_runner.request(
BuildGoPackageRequest, [BuildGoPackageTargetRequest(addr, build_opts=GoBuildOptions())]
)
assert build_request.import_path == expected_import_path
assert build_request.dir_path == expected_dir_path
assert build_request.go_files == tuple(expected_go_file_names)
assert not build_request.s_files
assert sorted([dep.import_path for dep in build_request.direct_dependencies]) == sorted(
expected_direct_dependency_import_paths
)
assert_built(
rule_runner,
build_request,
expected_import_paths=[
expected_import_path,
*expected_direct_dependency_import_paths,
*expected_transitive_dependency_import_paths,
],
)
def test_build_first_party_pkg_target(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"go.mod": dedent(
"""\
module example.com/greeter
go 1.17
"""
),
"greeter.go": dedent(
"""\
package greeter
import "fmt"
func Hello() {
fmt.Println("Hello world!")
}
"""
),
"BUILD": "go_mod(name='mod')\ngo_package(name='pkg')",
}
)
assert_pkg_target_built(
rule_runner,
Address("", target_name="pkg"),
expected_import_path="example.com/greeter",
expected_dir_path="",
expected_go_file_names=["greeter.go"],
expected_direct_dependency_import_paths=["fmt"],
expected_transitive_dependency_import_paths=[],
)
def test_build_third_party_pkg_target(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"go.mod": dedent(
"""\
module example.com/greeter
go 1.17
require github.com/google/uuid v1.3.0
"""
),
"go.sum": dedent(
"""\
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
"""
),
"BUILD": "go_mod(name='mod')",
}
)
import_path = "github.com/google/uuid"
assert_pkg_target_built(
rule_runner,
Address("", target_name="mod", generated_name=import_path),
expected_import_path=import_path,
expected_dir_path="gopath/pkg/mod/github.com/google/uuid@v1.3.0",
expected_go_file_names=[
"dce.go",
"doc.go",
"hash.go",
"marshal.go",
"node.go",
"node_net.go",
"null.go",
"sql.go",
"time.go",
"util.go",
"uuid.go",
"version1.go",
"version4.go",
],
expected_direct_dependency_import_paths=[
"bytes",
"crypto/md5",
"crypto/rand",
"crypto/sha1",
"database/sql/driver",
"encoding/binary",
"encoding/hex",
"encoding/json",
"errors",
"fmt",
"hash",
"io",
"net",
"os",
"strings",
"sync",
"time",
],
expected_transitive_dependency_import_paths=[],
)
def test_build_target_with_dependencies(rule_runner: RuleRunner) -> None:
"""Check that we properly include (transitive) dependencies."""
rule_runner.write_files(
{
"greeter/quoter/lib.go": dedent(
"""\
package quoter
import "fmt"
func Quote(s string) string {
return fmt.Sprintf(">> %s <<", s)
}
"""
),
"greeter/quoter/BUILD": "go_package()",
"greeter/lib.go": dedent(
"""\
package greeter
import (
"fmt"
"example.com/project/greeter/quoter"
"golang.org/x/xerrors"
)
func QuotedHello() {
xerrors.New("some error")
fmt.Println(quoter.Quote("Hello world!"))
}
"""
),
"greeter/BUILD": "go_package()",
"main.go": dedent(
"""\
package main
import "example.com/project/greeter"
func main() {
greeter.QuotedHello()
}
"""
),
"go.mod": dedent(
"""\
module example.com/project
go 1.17
require golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543
"""
),
"go.sum": dedent(
"""\
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
"""
),
"BUILD": "go_mod(name='mod')\ngo_package(name='pkg')",
}
)
xerrors_internal_import_path = "golang.org/x/xerrors/internal"
assert_pkg_target_built(
rule_runner,
Address("", target_name="mod", generated_name=xerrors_internal_import_path),
expected_import_path=xerrors_internal_import_path,
expected_dir_path="gopath/pkg/mod/golang.org/x/xerrors@v0.0.0-20191204190536-9bdfabe68543/internal",
expected_go_file_names=["internal.go"],
expected_direct_dependency_import_paths=[],
expected_transitive_dependency_import_paths=[],
)
xerrors_import_path = "golang.org/x/xerrors"
assert_pkg_target_built(
rule_runner,
Address("", target_name="mod", generated_name=xerrors_import_path),
expected_import_path=xerrors_import_path,
expected_dir_path="gopath/pkg/mod/golang.org/x/xerrors@v0.0.0-20191204190536-9bdfabe68543",
expected_go_file_names=[
"adaptor.go",
"doc.go",
"errors.go",
"fmt.go",
"format.go",
"frame.go",
"wrap.go",
],
expected_direct_dependency_import_paths=[
"bytes",
"fmt",
xerrors_internal_import_path,
"io",
"reflect",
"runtime",
"strconv",
"strings",
"unicode",
"unicode/utf8",
],
expected_transitive_dependency_import_paths=[],
)
quoter_import_path = "example.com/project/greeter/quoter"
assert_pkg_target_built(
rule_runner,
Address("greeter/quoter"),
expected_import_path=quoter_import_path,
expected_dir_path="greeter/quoter",
expected_go_file_names=["lib.go"],
expected_direct_dependency_import_paths=["fmt"],
expected_transitive_dependency_import_paths=[],
)
greeter_import_path = "example.com/project/greeter"
assert_pkg_target_built(
rule_runner,
Address("greeter"),
expected_import_path=greeter_import_path,
expected_dir_path="greeter",
expected_go_file_names=["lib.go"],
expected_direct_dependency_import_paths=["fmt", xerrors_import_path, quoter_import_path],
expected_transitive_dependency_import_paths=[xerrors_internal_import_path],
)
assert_pkg_target_built(
rule_runner,
Address("", target_name="pkg"),
expected_import_path="example.com/project",
expected_dir_path="",
expected_go_file_names=["main.go"],
expected_direct_dependency_import_paths=[greeter_import_path],
expected_transitive_dependency_import_paths=[
quoter_import_path,
xerrors_import_path,
xerrors_internal_import_path,
],
)
def test_build_invalid_target(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"go.mod": dedent(
"""\
module example.com/greeter
go 1.17
"""
),
"BUILD": "go_mod(name='mod')",
"direct/f.go": "invalid!!!",
"direct/BUILD": "go_package()",
"dep/f.go": "invalid!!!",
"dep/BUILD": "go_package()",
"uses_dep/f.go": dedent(
"""\
package uses_dep
import "example.com/greeter/dep"
func Hello() {
dep.Foo("Hello world!")
}
"""
),
"uses_dep/BUILD": "go_package()",
}
)
direct_build_request = rule_runner.request(
FallibleBuildGoPackageRequest,
[BuildGoPackageTargetRequest(Address("direct"), build_opts=GoBuildOptions())],
)
assert direct_build_request.request is None
assert direct_build_request.exit_code == 1
assert "direct/f.go:1:1: expected 'package', found invalid\n" in (
direct_build_request.stderr or ""
)
dep_build_request = rule_runner.request(
FallibleBuildGoPackageRequest,
[BuildGoPackageTargetRequest(Address("uses_dep"), build_opts=GoBuildOptions())],
)
assert dep_build_request.request is None
assert dep_build_request.exit_code == 1
assert "dep/f.go:1:1: expected 'package', found invalid\n" in (dep_build_request.stderr or "")
def test_build_codegen_target(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"go.mod": dedent(
"""\
module example.com/greeter
go 1.17
require github.com/google/uuid v1.3.0
"""
),
"go.sum": dedent(
"""\
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
"""
),
"generate_from_me.txt": "",
"greeter.go": dedent(
"""\
package greeter
import "fmt"
import "codegen.com/gen"
func Hello() {
fmt.Println(gen.Quote("Hello world!"))
}
"""
),
"BUILD": dedent(
"""\
go_mod(name='mod')
go_package(name='pkg', dependencies=[":gen"])
files(
name='gen',
sources=['generate_from_me.txt'],
dependencies=[':mod#github.com/google/uuid'],
)
"""
),
}
)
# Running directly on a codegen target should work.
assert_pkg_target_built(
rule_runner,
Address("", target_name="gen", relative_file_path="generate_from_me.txt"),
expected_import_path="codegen.com/gen",
expected_dir_path="codegen",
expected_go_file_names=["f.go"],
expected_direct_dependency_import_paths=["github.com/google/uuid"],
expected_transitive_dependency_import_paths=[],
)
# Direct dependencies on codegen targets must be propagated.
#
# Note that the `go_package` depends on the `files` generator target. This should work, even
# though `files` itself cannot generate, because it's an alias for all generated `file` targets.
assert_pkg_target_built(
rule_runner,
Address("", target_name="pkg"),
expected_import_path="example.com/greeter",
expected_dir_path="",
expected_go_file_names=["greeter.go"],
expected_direct_dependency_import_paths=["codegen.com/gen", "fmt"],
expected_transitive_dependency_import_paths=["github.com/google/uuid"],
)
def test_xtest_deps(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"go.mod": "module example.pantsbuild.org",
"BUILD": "go_mod(name='mod')\n",
"pkg/BUILD": "go_package()\n",
"pkg/example.go": dedent(
"""\
package pkg
const ExampleValue = 2137
"""
),
"pkg/example_test.go": dedent(
"""\
package pkg_test
import (
"example.pantsbuild.org/pkg"
"example.pantsbuild.org/pkg/testutils"
"testing"
)
func TestValue(t *testing.T) {
t.Run("Test", func(t *testing.T) {
if pkg.ExampleValue != testutils.ExampleValueFromTestutils {
t.Error("Not equal")
}
})
}
"""
),
"pkg/testutils/BUILD": "go_package()\n",
"pkg/testutils/testutils.go": dedent(
"""\
package testutils
import "example.pantsbuild.org/pkg"
const ExampleValueFromTestutils = pkg.ExampleValue
"""
),
}
)
assert_pkg_target_built(
rule_runner,
Address("pkg"),
expected_dir_path="pkg",
expected_import_path="example.pantsbuild.org/pkg",
expected_go_file_names=["example.go"],
expected_direct_dependency_import_paths=[],
expected_transitive_dependency_import_paths=[],
)
def test_stdlib_embed_config(rule_runner: RuleRunner) -> None:
import_path = "crypto/internal/nistec"
stdlib_packages = rule_runner.request(
GoStdLibPackages, [GoStdLibPackagesRequest(with_race_detector=False, cgo_enabled=False)]
)
pkg_info = stdlib_packages.get(import_path)
if not pkg_info:
pytest.skip(
f"Skipping test since `{import_path}` import path not available in Go standard library."
)
assert "embed" in pkg_info.imports
assert pkg_info.embed_patterns
assert pkg_info.embed_files
build_request = rule_runner.request(
BuildGoPackageRequest,
[
BuildGoPackageRequestForStdlibRequest(
import_path=import_path, build_opts=GoBuildOptions(cgo_enabled=False)
)
],
)
embed_config = build_request.embed_config
assert embed_config is not None
assert embed_config.patterns
assert embed_config.files
|
import os
import tempfile
import unittest
from worker.file_util import gzip_file, gzip_string, remove_path, tar_gzip_directory, un_gzip_stream, un_gzip_string, un_tar_directory
class FileUtilTest(unittest.TestCase):
def test_tar_has_files(self):
dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'files')
temp_dir = tempfile.mkdtemp()
self.addCleanup(lambda: remove_path(temp_dir))
output_dir = os.path.join(temp_dir, 'output')
un_tar_directory(
tar_gzip_directory(dir, False, ['f2'], ['f1', 'b.txt']),
output_dir, 'gz')
output_dir_entries = os.listdir(output_dir)
self.assertIn('dir1', output_dir_entries)
self.assertIn('a.txt', output_dir_entries)
self.assertNotIn('b.txt', output_dir_entries)
self.assertTrue(os.path.exists(os.path.join(output_dir, 'dir1', 'f1')))
self.assertFalse(os.path.exists(os.path.join(output_dir, 'dir1', 'f2')))
self.assertTrue(os.path.islink(os.path.join(output_dir, 'a-symlink.txt')))
def test_tar_empty(self):
dir = tempfile.mkdtemp()
self.addCleanup(lambda: remove_path(dir))
temp_dir = tempfile.mkdtemp()
self.addCleanup(lambda: remove_path(temp_dir))
output_dir = os.path.join(temp_dir, 'output')
un_tar_directory(tar_gzip_directory(dir), output_dir, 'gz')
self.assertEquals(os.listdir(output_dir), [])
def test_gzip_stream(self):
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
self.addCleanup(lambda: os.remove(temp_file.name))
temp_file.write('contents')
name = temp_file.name
self.assertEquals(un_gzip_stream(gzip_file(name)).read(), 'contents')
def test_gzip_string(self):
self.assertEqual(un_gzip_string(gzip_string('contents')), 'contents')
|
def reverse(str):
res = ''
for i in str:
res = i + res
return res
s = "Jarvis"
print("The original string is : ", s)
print("The reversed string using extended slice operator is : ", reverse(s))
|
"""xxx
Revision ID: 519e5b696ae4
Revises: 628fb686363
Create Date: 2015-11-21 23:38:06.202680
"""
# revision identifiers, used by Alembic.
revision = '519e5b696ae4'
down_revision = '628fb686363'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'test')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('test', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True))
### end Alembic commands ###
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-04-09 15:45:19
# @Author : Fallen (xdd043@qq.com)
# @Link : https://github.com/fallencrasher/python-learning
# @Version : $Id$
#文件
'''
文件操作:
1.打开文件
2.对文件句柄进行操作
3.关闭文件
报错原因:
UnicodeDecodeError: 创建文件时使用的编码方式和打开文件的解码方式对不上
SyntaxError: 一般就是windows 路径上的 '\' 没改成 '\\';文件名不能用单独的数字
'''
#绝对路径
#f1 = open(r'D:\programming_with_python\043从零开始学python\day04\文件的读.txt',encoding='utf-8',mode='r')
#相对路径
# 读打开模式
'''
r, rb, r+, r+b
rb:非文本文件
r:文本文件
'''
# 文本文件
# read 全读出来 **
f = open('文件的读.txt',encoding='utf-8')
content = f.read()
print(content,type(content))
f.close()
# read(n) 按照字符读取
f = open('文件的读.txt',encoding='utf-8')
content = f.read(5)
print(content)
f.close()
# readline() 按行读取
f = open('文件的读.txt',encoding='utf-8')
print(f.readline())
print(f.readline())
print(f.readline())
f.close()
# readlines() 返回一个列表,列表中的每个元素时原文件的每一行
f = open('文件的读.txt',encoding='utf-8')
l1 = f.readlines()
print(l1)
f.close()
# for 读取 适合大文件的读取 ***
f = open('文件的读.txt',encoding='utf-8')
for line in f:
print(line)
f.close()
# 非文本文件 rb
f = open('美女.jpg',mode='rb')
content = f.read()
print(content)
f.close()
|
"""Treadmill / Active Directory (Windows) integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import click
from treadmill import context
from treadmill import zknamespace as z
from treadmill import zkutils
from treadmill.ad import gmsa
from treadmill.zksync import utils as zksync_utils
_LOGGER = logging.getLogger(__name__)
def init():
"""App main."""
@click.group(name='ad')
def ad_grp():
"""Manage Active Directory integration."""
pass
@ad_grp.command(name='gmsa')
@click.option('--fs-root', help='Path that mirrors Zookeeper data.',
type=click.Path(exists=True), required=True)
@click.option('--partition', help='Windows partition', required=True)
@click.option('--group-ou', help='AD OU where the GMSA accounts are.',
required=True)
@click.option('--group-pattern', help='The group pattern to use.',
required=True)
@click.option('--no-lock', is_flag=True, default=False,
help='Run without lock.')
def gmsa_sync(fs_root, partition, group_ou, group_pattern, no_lock):
"""Sync placements GMSA groups."""
# keep sleeping until zksync ready
zksync_utils.wait_for_ready(fs_root)
watch = gmsa.HostGroupWatch(fs_root, partition, group_ou,
group_pattern)
if not no_lock:
lock = zkutils.make_lock(context.GLOBAL.zk.conn,
z.path.election(__name__))
_LOGGER.info('Waiting for leader lock.')
with lock:
watch.run()
else:
_LOGGER.info('Running without lock.')
watch.run()
del gmsa_sync
return ad_grp
|
#! python3
# _*_ coding: utf-8 _*_
from PyQt5.QtWebEngineWidgets import QWebEngineView
|
from tkinter import *
def button_clicked():
print("I got clicked")
new_text = input.get()
my_label.config(text=new_text)
window = Tk()
window.title("My First GUI Program")
window.minsize(width=500, height = 300)
window.config(padx=20, pady=20)
#Label
my_label = Label(text="I Am a Label", font=("Arial", 24, "bold"))
my_label.config(text="New Text")
my_label.grid(row=0, column=0)
my_label.config(padx=50, pady=50)
#Button
button = Button(text="Click Me", command=button_clicked)
#button.pack() #if pack method not used button will not appear in window when program is run
button.grid(row=1, column=1)
#Entry
input = Entry(width=10)
print(input.get())
input.grid(row=2, column=3)
#New Button
new_button = Button(text="New Button to Click", command=button_clicked)
new_button.grid(row=0, column=2)
#pack places widgets. places at top and then adds entries below. lacks precision
#place has better precision uses x and y coordinates.
#grid layout is columns and rows. relative to other components.
window.mainloop()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/9/27 7:44
# @Author : Jason
# @Site :
# @File : python_test.py
# @Software: PyCharm
import pandas as pd
import numpy as np
data = [
{'name': 'Joe', 'state': 'NY', 'age': 18},
{'name': 'Jane', 'state': 'KY', 'age': 19, 'hobby': 'Minecraft'},
{'name': 'Jean', 'state': 'OK', 'age': 20, 'finances': 'good'}
]
df = pd.DataFrame(data)
#print(df)
#df = pd.SparseDataFrame({"A": [0, 0, 1, 2]})
#df.dtypes
#s = pd.Series([1,3,5,np.nan],(6,8))
#print(s)
dates = pd.date_range('20190927',periods=6)
print(dates)
df = pd.DataFrame(np.random.randn(6,4),index=dates,columns=list('ABCD'))
print(df)
df2 = pd.DataFrame({'A': 1.,
'B': pd.Timestamp('20130102'),
'C': pd.Series(1, index=list(range(4)), dtype='float32'),
'D': np.array([3] * 4, dtype='int32'),
'E': pd.Categorical(["test", "train", "test", "train"]),
'F': 'foo'})
print(df2)
print(df.head())
s = pd.Series([1,3,5,np.nan,6,8],index=dates).shift(2)
print(2)
print('\n')
print(df.sub(s,axis='index'))
df3 = pd.DataFrame(np.random.randn(10,4))
print(df3)
|
# encoding: utf-8
from tastypie.utils.dict import *
|
from flask import Flask, url_for, render_template, request, redirect, flash
from flask_sqlalchemy import SQLAlchemy
import pymysql
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] ='mysql+pymysql://root:@127.0.0.1:3306/test?charset=utf8'
#配置flask配置对象中键:SQLALCHEMY_COMMIT_TEARDOWN,设置为True,应用会自动在每次请求结束后提交数据库中变动
app.config['SECRET_KEY'] = 'dev'
app.config['SQLALCHEMY_COMMIT_TEARDOWN'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
#获取SQLAlchemy实例对象,接下来就可以使用对象调用数据
db = SQLAlchemy(app)
import models
name = 'jaywatson'
movies = [
{'title': 'My Neighbor Totoro', 'year': '1988'},
{'title': 'Dead Poets Soci cccety', 'year': '1989'},
{'title': 'A Perfect World', 'year': '1993'},
{'title': 'Leon', 'year': '1994'},
{'title': 'Mahjong', 'year': '1996'},
{'title': 'Swallowtail Butterfly', 'year': '1996'},
{'title': 'King of Comedy', 'year': '1999'},
{'title': 'Devils on the Doorstep', 'year': '1999'},
{'title': 'WALL-E', 'year': '2008'},
{'title': 'The Pork of Music', 'year': '2012'},
]
@app.route('/')
@app.route('/hello')
def hello_world():
return '<h1>Hello World!</h1>'
@app.route('/user/<name>')
def user_page(name):
return 'User: %s' % name
@app.route('/test')
def test_url_for():
# 下面是一些调用示例(请在命令行窗口查看输出的 URL):
print(url_for('hello_world')) # 输出:/
# 注意下面两个调用是如何生成包含 URL 变量的 URL 的
print(url_for('user_page', name='greyli')) # 输出:/user/greyli
print(url_for('user_page', name='peter')) # 输出:/user/peter
print(url_for('test_url_for')) # 输出:/test
# 下面这个调用传入了多余的关键字参数,它们会被作为查询字符串附加到 URL 后面。
print(url_for('test_url_for', num=2)) # 输出:/test?num=2
return 'Test page'
@app.route('/film')
def index():
return render_template('test.html', name=name, movies=movies)
@app.route('/movie')
def show_all():
return render_template('movieAdd.html', movies = models.Movie.query.all(), user=name)
@app.route('/movie/add/',methods=['POST','GET'])
def add():
if request.method == 'POST':
p_title = request.form.get('title',None)
p_year = request.form.get('year',None)
if not p_title or not p_year:
flash('input error.')
return redirect(url_for('show_all'))
newobj = models.Movie(title=p_title, year=p_year)
db.session.add(newobj)
db.session.commit()
Movies = models.Movie.query.all()
flash('新增成功.')
return redirect(url_for('show_all'))
Movies = models.Movie.query.all()
return render_template('movieAdd.html',admins=Movies, user=name)
@app.route('/movie/edit/<int:movie_id>', methods=['GET', 'POST'])
def edit(movie_id):
movie = models.Movie.query.get_or_404(movie_id)
if request.method == 'POST': # 处理编辑表单的提交请求
title = request.form['title']
year = request.form['year']
if not title or not year or len(year) > 4 or len(title) > 60:
flash('Invalid input.')
return redirect(url_for('edit', movie_id=movie_id)) # 重定向回对应的编辑页面
movie.title = title # 更新标题
movie.year = year # 更新年份
db.session.commit() # 提交数据库会话
flash('Item updated.')
return redirect(url_for('show_all')) # 重定向回主页
return render_template('edit.html', movie=movie) # 传入被编辑的电影记录
@app.route('/movie/delete/<int:movie_id>', methods=['POST'])
def delete(movie_id):
movie = models.Movie.query.get_or_404(movie_id)
db.session.delete(movie) # 删除对应的记录
db.session.commit() # 提交数据库会话
flash('Item deleted.')
return redirect(url_for('show_all')) # 重定向回主页
@app.errorhandler(404) # 传入要处理的错误代码
def page_not_found(e): # 接受异常对象作为参数
return render_template('404.html', user=name), 404 # 返回模板和状态码
if __name__ == '__main__':
app.run(debug=True)
|
import turtle
def draw_shapes():
window = turtle.Screen()
window.bgcolor("pink")
draw_square("square","purple",2)
draw_circle("circle", "blue", 2)
draw_triangle("triangle", "red", 3)
window.exitonclick()
def create_turtle(shape, color, speed):
newTurtle = turtle.Turtle()
newTurtle.shape(shape)
newTurtle.color(color)
newTurtle.speed(speed)
return newTurtle
def draw_square(shape, color, speed):
squareTurtle = create_turtle(shape, color, speed)
for i in range(0,4):
squareTurtle.forward(100)
squareTurtle.right(90)
def draw_circle(shape, color, speed):
circleTurtle = create_turtle("circle", "blue", 2)
circleTurtle.circle(100)
def draw_triangle(shape, color, speed):
triTurtle = create_turtle("triangle", "red", 3)
for i in range(0,3):
triTurtle.backward(130)
triTurtle.left(120)
draw_shapes()
|
from pytest import mark
import allure
data = [('hello', 'world'),
('hello', ''),
('123', 'world')]
ddt = {
'argnames':'name,description',
'argvalues':[('hello', 'world'),
('hello', ''),
('123', 'world')],
'ids': ['general test', 'no description test', 'test with digits in name']
}
@allure.title('test allure')
@mark.parametrize(**ddt)
def test_new_testcases( desktop_app_auth, name, description):
test_name = 'hello'
desktop_app_auth.navigate_to('Create new test')
desktop_app_auth.create_test(name, description)
desktop_app_auth.navigate_to('Test Cases')
assert desktop_app_auth.test_cases.check_test_exists(name)
desktop_app_auth.test_cases.delete_test_by_name(name)
def test_wait_more_30_sec(desktop_app_auth):
desktop_app_auth.navigate_to('Demo pages')
desktop_app_auth.demo.pages.open_page_after_wait(32)
assert desktop_app_auth.demo_pages.check_wait_page()
|
import functools
import pytest
def decor(func):
@functools.wraps(func)
def inner(arg, x='Farewell!'):
new_arg=x
x=arg*3
return func(new_arg, x)
return inner
def F_F(arg, x='Hello!'):
print(x)
return(arg)
print(F_F(33))
print('----------')
print(decor(F_F)(33))
@pytest.mark.parametrize("x,expected", [
(11,11),
(22,22),
(66,66),
(99,99)])
def test_F_F(x, expected):
assert F_F(x) == expected
@pytest.mark.parametrize("a,b,expected", [
(1,11,11),
(2,22,22),
(6,66,66),
(9,99,99)])
def test_decor(a,b, expected):
assert decor(F_F)(a,b) == expected
|
import sys
from . import fake_pyzz as pyzz
from . import ltl_parser
from . import build_monitor
from pyaig import *
def build_base_aiger( vinst ):
aig = vinst.N.aig
for n, w in vinst.symbols.iteritems():
if aig.name_has_po(n):
continue
aig.create_po( w.get_f(), n )
is_init = vinst.get_init().get_f()
for c in vinst.constraints:
aig.create_po( c.get_f(), po_type=AIG.CONSTRAINT )
for ic in vinst.init_constraints:
aig.create_po( aig.create_implies( is_init, ic.get_f() ), po_type=AIG.CONSTRAINT )
return aig
def build_safety_aiger( vinst ):
aig = build_base_aiger(vinst)
for po in vinst.pos:
aig.create_po( po.get_f(), po_type=AIG.BAD_STATES )
def build_liveness_aiger( vinst ):
aig = build_base_aiger(vinst)
aig.create_justice([ aig.create_po(fc.get_f(), po_type=AIG.JUSTICE ) for fc in vinst.fairnesses ])
|
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor, Lambda
import matplotlib.pyplot as plt
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Using {} device'.format(device))
training_data = datasets.CIFAR10(
root="cifar10",
train=True,
download=True,
transform=ToTensor(),
#target_transform=Lambda(lambda y: torch.zeros(10, dtype=torch.float).scatter_(0, torch.tensor(y), src=torch.tensor(1.)))
)
test_data = datasets.CIFAR10(
root="cifar10",
train=False,
download=True,
transform=ToTensor(),
#target_transform=Lambda(lambda y: torch.zeros(10, dtype=torch.float).scatter_(0, torch.tensor(y), src=torch.tensor(1.)))
)
labels_map = {
0: "airplane",
1: "automobile",
2: "bird",
3: "cat",
4: "deer",
5: "dog",
6: "frog",
7: "horse",
8: "ship",
9: "truck",
}
# figure = plt.figure(figsize=(8, 8))
# cols, rows = 4, 4
# for i in range(1, cols * rows + 1):
# sample_idx = torch.randint(len(training_data), size=(1,)).item()
# img, label = training_data[sample_idx]
# img = img.transpose(0, 1).transpose(1, 2)
# figure.add_subplot(rows, cols, i)
# plt.title(labels_map[label])
# plt.axis("off")
# plt.imshow(img)
# plt.show()
batch_size = 128
train_dataloader = DataLoader(training_data, batch_size=batch_size, shuffle=True)
test_dataloader = DataLoader(test_data, batch_size=batch_size)
class NeuralNetwork(nn.Module):
def __init__(self):
super().__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(32 * 32 * 3, 1024),
nn.Sigmoid(),
nn.Linear(1024, 512),
nn.Sigmoid(),
nn.Linear(512, 128),
nn.Sigmoid(),
#nn.Linear(512, 10),
nn.Linear(128, 10),
nn.Sigmoid()
)
def forward(self, x):
x = self.flatten(x)
results = self.linear_relu_stack(x)
return results
model = NeuralNetwork().to(device)
# for name, param in model.named_parameters():
# print(f"Layer: {name} | Size: {param.size()} | Values : {param[:2]} \n")
learning_rate = 5e-3
#loss_fn = nn.MSELoss()
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
def train_loop(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
for batch, (X, y) in enumerate(dataloader):
# Compute prediction and loss
pred = model(X.to(device))
loss = loss_fn(pred, y.to(device))
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item()/len(X), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def test_loop(dataloader, model, loss_fn):
size = len(dataloader.dataset)
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
pred = model(X.to(device))
y = y.to(device)
test_loss += loss_fn(pred, y.to(device)).item()
#correct += (pred.argmax(1) == y.argmax(1)).type(torch.float).sum().item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= size
correct /= size
print(f"Test Error: \n Accuracy: {(100 * correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
epochs = 1000
for t in range(epochs):
print(f"Epoch {t + 1}\n-------------------------------")
train_loop(train_dataloader, model, loss_fn, optimizer)
test_loop(test_dataloader, model, loss_fn)
print("Done!")
torch.save(model, 'model.pth')
print("Saved?")
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the plusMinus function below.
def plusMinus(arr):
pos, neg, zero = 0, 0 , 0
for i in range(0, len(arr)):
if arr[i] > 0 :
pos +=1
elif arr[i] < 0 :
neg += 1
else :
zero += 1
print(str(pos/len(arr)) + "\n" , str(neg/len(arr))+ "\n" , str(zero/len(arr))+"\n")
if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().rstrip().split()))
plusMinus(arr)
|
#!/usr/bin/env python3
import logging
from flask import Flask, render_template
from flask_ask import Ask, statement, question, session
app = Flask(__name__)
ask = Ask(app, '/')
logging.getLogger('flask_ask').setLevel(logging.DEBUG)
@ask.launch
def new_query():
welcome_msg = render_template('welcome')
return question(welcome_msg)
@ask.intent('OvenIntent')
def oven_start():
oven_start_msg = render_template('oven_start')
session.attributes['DEVICE'] = 'oven'
session.attributes['YES_NEXT'] = 'done_positive'
session.attributes['YES_IS_DONE'] = True
session.attributes['NO_NEXT'] = 'composition_question'
session.attributes['NO_IS_DONE'] = False
session.attributes['YES_NO_STEP'] = 0
return question(oven_start_msg)
@ask.intent('MicrowaveIntent')
def microwave_start():
microwave_start_msg = render_template('microwave_start')
session.attributes['DEVICE'] = 'microwave'
session.attributes['YES_NEXT'] = 'done_positive'
session.attributes['YES_IS_DONE'] = True
session.attributes['NO_NEXT'] = 'metal_question'
session.attributes['NO_IS_DONE'] = False
session.attributes['YES_NO_STEP'] = 0
return question(microwave_start_msg)
@ask.intent('DishwasherIntent')
def dishwasher_start():
diswasher_start_msg = render_template('dishwasher_start')
session.attributes['DEVICE'] = 'dishwasher'
session.attributes['YES_NEXT'] = 'done_positive'
session.attributes['YES_IS_DONE'] = True
session.attributes['NO_NEXT'] = 'composition_question'
session.attributes['NO_IS_DONE'] = False
session.attributes['YES_NO_STEP'] = 0
return question(diswasher_start_msg)
@ask.intent('AffirmativeIntent')
def affirmative():
device = session.attributes['DEVICE']
yes_next = session.attributes['YES_NEXT']
yes_is_done = session.attributes['YES_IS_DONE']
yes_no_step = session.attributes['YES_NO_STEP']
if yes_no_step == 1:
if device == 'microwave':
yes_next = 'metal_done'
yes_is_done = True
template = '_'.join((device, yes_next))
response_msg = render_template(template)
session.attributes['YES_NO_STEP'] = yes_no_step + 1
if yes_is_done:
return statement(response_msg)
else:
return question(response_msg)
@ask.intent('NegativeIntent')
def negative():
device = session.attributes['DEVICE']
no_next = session.attributes['NO_NEXT']
no_is_done = session.attributes['NO_IS_DONE']
yes_no_step = session.attributes['YES_NO_STEP']
if yes_no_step == 1:
if device == 'microwave':
no_next = 'plastic_start'
no_is_done = False
logging.info('device: {} no_next: {} yes_no_step: {}'.format(device, no_next, yes_no_step))
template = '_'.join((device, no_next))
response_msg = render_template(template)
session.attributes['YES_NO_STEP'] = yes_no_step + 1
if no_is_done:
return statement(response_msg)
else:
return question(response_msg)
# @ask.intent('OvenIntent')
# def oven():
# oven_start_msg = render_template('oven_start')
# return statement(speech_text).simple_card('HelloWorld', speech_text)
# @ask.intent('AMAZON.HelpIntent')
# def help():
# speech_text = 'You can say hello to me!'
# return question(speech_text).reprompt(speech_text).simple_card('HelloWorld', speech_text)
@ask.session_ended
def session_ended():
return "", 200
if __name__ == '__main__':
app.run(debug=True)
|
from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class AnyTests(TranspileTestCase):
def test_any(self):
self.assertCodeExecution("print(any([None, True, False]))")
def test_any_true(self):
self.assertCodeExecution("print(any([1,True,3]))")
def test_any_false(self):
self.assertCodeExecution("print(any([0, '', 0.0]))")
def test_any_empty_list(self):
self.assertCodeExecution("print(any([]))")
def test_any_typeerror(self):
self.assertCodeExecution("""
try:
print(any(None))
except TypeError as e:
print(e)
print('Done.')
""")
class BuiltinAnyFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
function = "any"
|
import math
area = float(input("Informe o tamanho da área a ser pintada em m²: "))
cobertura = (area/6)
coberturaTotal = cobertura * 1.1
latasTinta = math.ceil(coberturaTotal/18)
galoesTinta = math.ceil(coberturaTotal/3.6)
print(f"Valor em latas de 18l: R${latasTinta*80:.2f}")
print(f"Valor em galões de 3,6l: R${latasTinta*25:.2f}")
|
import webapp2
from webapp2_extras import jinja2
from google.appengine.api import users
from google.appengine.ext import ndb
from model.libroLiterario import libroLiterario
class EliminarlibroLiterarioHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
try:
id_libroLiterario = self.request.GET["id_libroLiterario"]
except:
id_libroLiterario = "ERROR"
libroLiterario = ndb.Key(urlsafe=id_libroLiterario).get()
sust = {
"libroLiterario" : libroLiterario
}
jinja = jinja2.get_jinja2(app=self.app)
self.response.write(jinja.render_template("librosLiterarios/eliminarLibroLiterario.html", **sust))
else:
self.redirect("/")
return
def post(self):
user = users.get_current_user()
if user:
id_libroLiterario = self.request.get("edIdLibroLiterario", "ERROR")
libroLiterario = ndb.Key(urlsafe=id_libroLiterario).get()
url = "/listarLibrosLiterarios"
mensaje = "El libro: "+libroLiterario.titulo+" ha sido eliminado con exito"
libroLiterario.key.delete()
sust = {
"mensaje": mensaje,
"url": url
}
jinja = jinja2.get_jinja2(app=self.app)
self.response.write(jinja.render_template("mensajeConfirmacion.html", **sust))
else:
self.redirect("/")
return
app = webapp2.WSGIApplication([
('/eliminarLibroLiterario', EliminarlibroLiterarioHandler),
], debug=True)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Refer to https://leetcode.com/discuss/857/constant-space-solution
class Solution(object):
def singleNumber(self, nums):
once = 0
twice = 0
third = 0
for num in nums:
twice |= once & num
once ^= num
third = once & twice
once &= ~third
twice &= ~third
return once
"""
[1]
[1,1,1,2,2,2,3,4,4,4]
"""
|
"""{{ project_name }} URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
"""
from pathlib import Path
# Django imports
from django.conf.urls import include
from django.urls import path, re_path
from django.contrib import admin
from django.contrib.auth import views as auth_views
from .settings.production import PROJECT_ROOT
urlpatterns = [
# Examples:
# url(r'^$', '{{ project_name }}.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
path('admin/', admin.site.urls),
]
# Add urls path for all the apps
for f in (Path(PROJECT_ROOT)/"apps").iterdir():
if not f.is_dir() or f.name.startswith("."):
continue
if (f/"urls.py").exists():
urlpatterns.append(path(f"{f.name}/", include(f"{f.name}.urls")))
|
test_case = int(input())
for _ in range(test_case):
n = int(input())
print(2 if n == 2 else (n & 1))
|
import copy
import argparse
from multiprocessing import Pool
from functools import partial
nextCells = []
mrow = list()
mcolumn = list()
def main():
# allows me to user argparse as inputs
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', required=True)
parser.add_argument("-o", "--output", required=True)
parser.add_argument("-t", "--threads", type=int, default=1)
args = parser.parse_args()
# prints my r number onto the console
print("Project :: R11487176\n\n\n")
# opens whatever the args.input was as read
with open(args.input, 'r') as file:
# calls global nextCells to read the input matrix and make them in to lists of lists
global nextCells
nextCells = [list(line.strip()) for line in file]
# using enumerate, assigns indices of elements in the matrix into row and column
for x, ele1 in enumerate(nextCells):
for y, ele2 in enumerate(ele1):
global mrow
global mcolumn
mrow.append(x)
mcolumn.append(y)
# creates number of processes based on args.threads(input)
MAX_PROCESS = args.threads
processPool = Pool(processes=args.threads)
currentCells = []
# opens user input outputfile to write
with open(args.output, 'w') as output_file:
# loops 100 times to iterate through the matrix using pool.map
for x in range(0, 100):
# creates a deep copy of the nextCell which holds the immediate matrix from the input
# then using pool.map and partial, iterate through the matrix with multiprocessing
currentCells = [copy.deepcopy(nextCells)]
neighborx = partial(neighbor, row=mrow, column=mcolumn, ncell=nextCells)
nextCells = processPool.map(neighborx, currentCells)
nextCells = nextCells[0]
# flattens the list of list into a string with new line character wherever it needs
cellflat3 = list(map(''.join, nextCells))
cellflat4 = '\n'.join(str(ele) for ele in cellflat3)
output_file.write(str(cellflat4))
output_file.write("\n\n")
# my version of the solver
# takes in the currentCells which is a deepcopy of the nextCell and row, column and ncell(nextCells)
# iterates through the coordinates in row and column to check it's neighboring characters in currentCells
def neighbor(currentCells, row, column, ncell):
count_neighbor = 0
for x in range(0, len(row)):
if (currentCells[(row[x] - 1) % len(currentCells)][column[x] % len(currentCells)]) == 'O': # above
count_neighbor += 1
if (currentCells[row[x] % len(currentCells)][(column[x] + 1) % len(currentCells)]) == 'O': # right
count_neighbor += 1
if (currentCells[(row[x] + 1) % len(currentCells)][column[x] % len(currentCells)]) == 'O': # bottom
count_neighbor += 1
if (currentCells[row[x] % len(currentCells)][(column[x] - 1) % len(currentCells)]) == 'O': # left
count_neighbor += 1
if (currentCells[(row[x] - 1) % len(currentCells)][(column[x] + 1) % len(currentCells)]) == 'O': # top_right
count_neighbor += 1
if (currentCells[(row[x] + 1) % len(currentCells)][(column[x] + 1) % len(currentCells)]) == 'O': # bot_right
count_neighbor += 1
if (currentCells[(row[x] + 1) % len(currentCells)][(column[x] - 1) % len(currentCells)]) == 'O': # bot_left
count_neighbor += 1
if (currentCells[(row[x] - 1) % len(currentCells)][(column[x] - 1) % len(currentCells)]) == 'O': # top_left
count_neighbor += 1
if (currentCells[row[x]][column[x]]) == '.':
if count_neighbor == 2 or count_neighbor == 4 or count_neighbor == 6 or count_neighbor == 8:
ncell[row[x]][column[x]] = 'O'
else:
ncell[row[x]][column[x]] = '.'
if (currentCells[row[x]][column[x]]) == 'O':
if count_neighbor == 2 or count_neighbor == 3 or count_neighbor == 4:
ncell[row[x]][column[x]] = 'O'
else:
ncell[row[x]][column[x]] = '.'
count_neighbor = 0
return ncell
if __name__ == '__main__':
main()
|
from leetcode import TreeNode, test, new_tree
def min_camara_cover(root: TreeNode) -> int:
result = 0
def lrd(node: TreeNode) -> int:
nonlocal result
if not node:
return 1
left, right = lrd(node.left), lrd(node.right)
if left == 0 or right == 0:
result += 1
return 2
if left == 1 and right == 1:
return 0
return 1
if lrd(root) == 0:
result += 1
return result
test(
min_camara_cover,
[
(new_tree(0, 0, None, 0, 0), 1),
(new_tree(0, 0, None, 0, None, 0, None, None, 0), 2),
],
)
|
from kafka import KafkaConsumer
from src import string_to_list, webhook_post_request, get_kafka_data
kafka_data = get_kafka_data()
consumer = KafkaConsumer(kafka_data["kafka_topic"], bootstrap_servers=[kafka_data["kafka_port"]])
if __name__ == "__main__":
for data in consumer:
decoded_data = data.value.decode('utf-8')
decoded_data = string_to_list(decoded_data)
response = webhook_post_request(decoded_data)
print(f"response code: {response}")
|
from django.conf.urls import url
from blog.views import index as blog_index
from blog.views import post as blog_post
from blog.views import tag as blog_tag
from blog.views import postList as blog_postList
from blog.views import emailPost as blog_emailPost
from blog.views import userReg as blog_userReg
from blog.views import commentPost as blog_commentPost
urlpatterns = [
url(r'^$' , blog_index.load ),
url(r'^post/(\d+)$', blog_post.load),
url(r'^tag/(\d+)$', blog_tag.load),
url(r'^postList/$', blog_postList.load),
url(r'^emailPost/$', blog_emailPost.postEmail),
url(r'^emailPost/thanks/$', blog_emailPost.thanks),
url(r'^user/logout/(\d+)$', blog_post.logout),
url(r'^user/logoutcomment/$', blog_post.logoutcomment),
url(r'^user/reg/$', blog_userReg.userReg),
url(r'^user/regThanks/$', blog_userReg.thanks),
url(r'^comment/$', blog_commentPost.load),
]
|
from dataclasses import dataclass
@dataclass
class Position:
y: int
x: int
def __add__(self, other):
return Position(y=self.y + other.y, x=self.x + other.x)
def neighbours(self):
return [self + o for o in _neighbours]
def __hash__(self):
return hash((self.y, self.x))
def __lt__(self, other):
return (self.y, self.x) < (other.y, other.x)
_neighbours = [
Position(y=-1, x=0), # up
Position(y=0, x=-1), # left
Position(y=0, x=1), # right
Position(y=1, x=0), # down
]
@dataclass
class Unit:
position: Position
team: str
hp: int
atk: int
def do(self, cave, units):
if self.hp <= 0:
return
units = [u for u in units if u.hp > 0 and u != self]
targets = [u for u in units if u.team != self.team]
targets_pos = [u.position for u in targets]
units_pos = [u.position for u in units]
to_visit = []
visited = set()
taken = set()
distance = {}
path = {}
target_pos = None
distance[self.position] = 0
to_visit = [self.position]
taken.add(self.position)
while len(to_visit) > 0:
pos = to_visit.pop(0)
if pos in visited:
continue
visited.add(pos)
if pos in cave:
continue
if pos in targets_pos:
# found closest enemy
target_pos = pos
break
if pos in units_pos:
continue
for npos in pos.neighbours():
if npos in visited:
continue
if npos in taken:
continue
distance[npos] = distance[pos] + 1
path[npos] = pos
to_visit.append(npos)
taken.add(npos)
if not target_pos:
return None
res = ''
target = targets[targets_pos.index(target_pos)]
move_pos = target_pos
if move_pos != self.position:
while True:
if distance[move_pos] == 1:
break
move_pos = path[move_pos]
if move_pos != target_pos:
res += '{} -> {}'.format(self.position, move_pos)
self.position = move_pos
close_targets = [t for pos in self.position.neighbours() for t in targets if t.position == pos]
if close_targets:
low_hp_target = min(close_targets, key=lambda x: x.hp)
low_hp_target.hp -= self.atk
res += ' attacked {}'.format(low_hp_target)
return res
def sort_units(units):
return sorted(units, key=lambda x: x.position)
def print_map(cave, units):
alive_units = [u for u in units if u.hp > 0]
alive_units_pos = [u.position for u in alive_units]
max_p = max(cave)
for y in range(0, max_p.y + 1):
for x in range(0, max_p.y + 1):
p = Position(y=y, x=x)
if p in alive_units_pos:
print(alive_units[alive_units_pos.index(p)].team, end='')
elif p in cave:
print('#', end='')
else:
print('.', end='')
print('\n', end='')
from itertools import count
from copy import deepcopy
def solution1(cave, units):
o_units = sort_units(units)
done = False
for eatk in count(4):
units = deepcopy(o_units)
for u in units:
if u.team == 'E':
u.atk = eatk
for i in count(1):
res = []
units = sort_units(units)
for unit in units:
res.append(unit.do(cave, sort_units(units)))
if any(u.team == 'E' and u.hp <= 0 for u in units):
break
if all(r is None for r in res):
done = True
break
print_map(cave, units)
if done:
break
return sum(u.hp for u in units if u.hp > 0) * (i-2)
def parse_input1(input):
cave = set()
units = []
for y, l in enumerate(input.split('\n')):
for x, c in enumerate(l):
p = Position(y=y, x=x)
if c in 'GE':
u = Unit(position=p, team=c, hp=200, atk=3)
units.append(u)
elif c == '#':
cave.add(p)
return cave, units
if __name__ == '__main__':
with open('input.txt') as fh:
print(solution1(*parse_input1(fh.read())))
|
"""Here we will test assumptions about the data."""
|
"""
An interface to the NLNOG ring.
"""
# ABOUT
# =====
# ringtools - A generic module for running commands on nodes of the NLNOG
# ring. More information about the ring: U{https://ring.nlnog.net}
#
# source code: U{https://github.com/NLNOG/py-ring}
#
# AUTHOR
# ======
# Teun Vink - teun@teun.tv
__all__ = ['exception', 'node', 'result', 'ring']
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def get_act(name):
if name == 'relu':
return nn.ReLU(inplace=True)
elif name == 'leaky_relu':
return nn.LeakyReLU(negative_slope=0.2, inplace=True)
elif name == 'silu':
return nn.SiLU(inplace=True)
elif name == 'mish':
return MishInline()
else:
return nn.Identity()
class MishInline(nn.Module):
""" https://arxiv.org/abs/1908.08681v1 """
def __init__(self):
super().__init__()
def forward(self, x):
return x * torch.tanh( F.softplus(x) )
def save_model(model, save_path):
data = {
'model': model.state_dict(),
'model_args': model.model_args,
}
torch.save(data, save_path)
def load_model(path, device):
data = torch.load(path, map_location=torch.device(device))
model = SegmentationModel(**data['model_args']).to(device)
model.load_state_dict(data['model'])
return model
@torch.jit.script
def autocrop(encoder_features: torch.Tensor, decoder_features: torch.Tensor):
""" Center crop the encoder down to the size of the decoder """
if encoder_features.shape[2:] != decoder_features.shape[2:]:
ds = encoder_features.shape[2:]
es = decoder_features.shape[2:]
assert ds[0] >= es[0]
assert ds[1] >= es[1]
encoder_features = encoder_features[:, :,
((ds[0] - es[0]) // 2):((ds[0] + es[0]) // 2),
((ds[1] - es[1]) // 2):((ds[1] + es[1]) // 2)
]
return encoder_features, decoder_features
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3,
stride=1, padding=1, activation=None):
super().__init__()
bias = False if activation else True
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias),
nn.BatchNorm2d(out_channels),
get_act(activation),
nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size, stride=1, padding=padding, bias=bias),
nn.BatchNorm2d(out_channels),
get_act(activation)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3,
stride=1, padding=1, activation=None):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels, kernel_size, stride, padding, activation)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
def __init__(self, in_channels, out_channels, activation=None):
super().__init__()
self.up = nn.ConvTranspose2d(in_channels, in_channels//2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels, activation=activation)
def forward(self, encoder_features, decoder_features):
decoder_features = self.up(decoder_features)
encoder_features, decoder_features = autocrop(encoder_features, decoder_features)
x = torch.cat([encoder_features, decoder_features], dim=1)
return self.conv(x)
class UNetEncoder(nn.Module):
def __init__(self, in_channels, out_channels, filters, activation):
super(UNetEncoder, self).__init__()
self.down00 = DoubleConv(in_channels, filters[0], activation=activation)
self.down10 = Down(filters[0], filters[1], activation=activation)
self.down20 = Down(filters[1], filters[2], activation=activation)
self.down30 = Down(filters[2], filters[3], activation=activation)
self.down40 = Down(filters[3], filters[4], activation=activation)
def forward(self, x):
x00 = self.down00(x)
x10 = self.down10(x00)
x20 = self.down20(x10)
x30 = self.down30(x20)
x40 = self.down40(x30)
return x00, x10, x20, x30, x40
class UNetDecoder(nn.Module):
def __init__(self, out_channels, filters, activation):
super(UNetDecoder, self).__init__()
self.up1 = Up(filters[4], filters[3], activation=activation)
self.up2 = Up(filters[3], filters[2], activation=activation)
self.up3 = Up(filters[2], filters[1], activation=activation)
self.up4 = Up(filters[1], filters[0], activation=activation)
self.out = nn.Conv2d(filters[0], out_channels, kernel_size=1)
def forward(self, features):
x00, x10, x20, x30, x40 = features
x = self.up1(x30, x40)
x = self.up2(x20, x)
x = self.up3(x10, x)
x = self.up4(x00, x)
return self.out(x)
class Classifier(nn.Module):
def __init__(self, in_channels, out_channels, activation=None):
super(Classifier, self).__init__()
self.avg = nn.AdaptiveAvgPool2d(1)
self.layer = nn.Sequential(
# nn.Conv2d(in_channels, in_channels, kernel_size=1, bias=False),
# nn.BatchNorm2d(in_channels),
# get_act(activation),
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=True),
)
def forward(self, x):
x = self.layer(self.avg(x))
return x.flatten(1)
class SegmentationModel(nn.Module):
def __init__(self, in_channels=3, out_channels=1, filters=16,
activation='relu', mean=[0,0,0], std=[1,1,1],
num_to_cat=None, input_size=None):
super(SegmentationModel, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.filters = filters
self.num_to_cat = num_to_cat
self.input_size = input_size
self.model_args = {"in_channels": in_channels, "out_channels": out_channels,
"filters": filters, "activation": activation, "num_to_cat": num_to_cat,
"input_size": input_size}
if type(filters) == int:
filters = [filters, filters*2, filters*4, filters*8, filters*16]
self.normalize = nn.BatchNorm2d(in_channels) # Layer will be frozen without learnable parameters
self.set_normalization(mean, std)
self.encoder = UNetEncoder(in_channels, out_channels, filters, activation)
self.decoder = UNetDecoder(out_channels, filters, activation)
self.classifier = Classifier(filters[-1], out_channels, activation)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if activation == 'relu':
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif activation == 'leaky_relu':
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu', a=0.2)
else:
nn.init.xavier_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def set_normalization(self, mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0]):
self.normalize.reset_parameters()
self.normalize.running_mean = torch.tensor(mean, requires_grad=False, dtype=torch.float)
self.normalize.running_var = torch.tensor([x**2 for x in std], requires_grad=False, dtype=torch.float)
self.normalize.weight.requires_grad = False # gamma
self.normalize.bias.requires_grad = False # beta
self.normalize.running_mean.requires_grad = False # mean
self.normalize.running_var.requires_grad = False # variance
self.normalize.eval()
def forward(self, x):
x = self.normalize.eval()(x)
features = self.encoder(x)
logits = self.decoder(features)
return logits, features[-1]
def classify(self, x):
self.eval()
with torch.no_grad():
x = self.normalize(x)
features = self.encoder(x)
class_logits = self.classifier(features[-1])
return torch.softmax(class_logits, dim=1)
def predict(self, x):
self.eval()
with torch.no_grad():
x = self.normalize(x)
logits, encoding = self.forward(x)
class_logits = self.classifier(encoding)
return torch.softmax(logits, dim=1), torch.softmax(class_logits, dim=1)
def train_test(model, input_size, batch_size, device):
model.to(device).train()
data = torch.randn(batch_size, model.in_channels, input_size, input_size).to(device)
true_masks = torch.empty(batch_size, input_size, input_size, dtype=torch.long).random_(model.out_channels).to(device)
true_labels = torch.empty(batch_size, dtype=torch.long).random_(model.out_channels).to(device)
print("data.shape :", data.shape)
print("true_masks.shape :", true_masks.shape)
print("true_labels.shape :", true_labels.shape)
opt = torch.optim.SGD(model.parameters(), lr=1e-1)
for i in range(10):
opt.zero_grad()
logits, encoding = model(data)
class_logits = model.classifier(encoding.detach())
loss = nn.CrossEntropyLoss()(logits, true_masks)
loss.backward()
class_loss = nn.CrossEntropyLoss()(class_logits, true_labels)
class_loss.backward()
print("loss={:.06f}, class_loss={:.06f}".format(loss, class_loss))
opt.step()
if __name__ == "__main__":
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
in_channels = 3
out_channels = 5
filters = 8 # 16
activation = "silu" # relu, leaky_relu, silu, mish
batch_size = 2
input_size = 128
model = SegmentationModel(in_channels, out_channels, filters, activation).to(device)
train_test(model, input_size, batch_size, device)
x = torch.randn(1, in_channels, input_size, input_size).to(device)
model.eval() # Freeze batchnorm
logits, encoding = model(x)
print("logits :", logits.shape, torch.min(logits).item(), torch.max(logits).item())
print("encoding :", encoding.shape, torch.min(encoding).item(), torch.max(encoding).item())
class_logits = model.classifier(encoding.detach())
print("class_logits :", class_logits.shape, torch.min(class_logits).item(), torch.max(class_logits).item())
ymask, ylabel = model.predict(x)
print("ymask :", ymask.shape, torch.min(ymask).item(), torch.max(ymask).item())
print("ylabel :", ylabel.shape, torch.min(ylabel).item(), torch.max(ylabel).item())
class_logits = model.classify(x)
print("class_logits :", class_logits.shape, torch.min(class_logits).item(), torch.max(class_logits).item())
save_model(model, save_path="runs/save_test.pth")
model2 = load_model("runs/save_test.pth", device=device)
model2.eval()
logits, encoding = model2(x)
print("logits :", logits.shape, torch.min(logits).item(), torch.max(logits).item())
print("encoding :", encoding.shape, torch.min(encoding).item(), torch.max(encoding).item())
|
from django.shortcuts import render
from django.utils import timezone
from django.views import generic
from .models import Story
class IndexView(generic.ListView):
template_name = 'iamgrey/story_list.html'
context_object_name = 'latest_story_list'
latest_story = Story.objects.latest('date_published')
def get_queryset(self):
return Story.objects.filter(date_published__lte=timezone.now()).order_by('-date_published')
def get_context_data(self, *args, **kwargs):
context = super(IndexView, self).get_context_data(*args, **kwargs)
context['latest_story'] = Story.objects.latest('date_published')
return context
class DetailView(generic.DetailView):
model = Story
template_name = 'iamgrey/detail.html'
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.experimental.kotlin.register import rules as all_kotlin_rules
from pants.backend.kotlin.lint.ktlint import rules as ktlint_rules
from pants.backend.kotlin.lint.ktlint import skip_field
def rules():
return [
*all_kotlin_rules(),
*ktlint_rules.rules(),
*skip_field.rules(),
]
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url, include
from ceiling.views import CeilingView
urlpatterns = patterns('',
url('^$', CeilingView.as_view(), name='ceiling'),
url('^api/$', CeilingView.as_view(), name='ceiling'),
)
|
import pytest
from pcos_runrex.algo.menarche import MENARCHE_AGE, AGE_MENARCHE, MENARCHE_GRADE, GRADE_MENARCHE
@pytest.mark.parametrize(('text', 'value'), [
('menarche at age 13', 13),
('menses irregular since onset at age 13', 13),
])
def test_menarche_numeric_age(text, value):
m = MENARCHE_AGE.matchgroup(text, 'age')
assert int(m) == value
@pytest.mark.parametrize(('text', 'value'), [
('age at first menses : 13 - 14 years', '13 - 14'),
])
def test_menarche_age(text, value):
m = MENARCHE_AGE.matchgroup(text, 'age')
assert m == value
@pytest.mark.parametrize(('text', 'value'), [
('age 13 y.o. at first menses', 13),
('13 yrs at onset of menses', 13),
])
def test_age_numeric_menarche(text, value):
m = AGE_MENARCHE.matchgroup(text, 'age')
assert int(m) == value
@pytest.mark.parametrize(('text', 'grade'), [
('menarche in sixth grade', 'sixth grade'),
('menses irregular since onset in 6th grade', '6th grade'),
])
def test_menarche_grade(text, grade):
m = MENARCHE_GRADE.matchgroup(text, 'grade')
assert m.strip() == grade
@pytest.mark.parametrize(('text', 'grade'), [
('was in grade 6 at onset of menses', 'grade 6'),
])
def test_grade_menarche(text, grade):
m = GRADE_MENARCHE.matchgroup(text, 'grade')
assert m.strip() == grade
|
import collections
def solution(ranks):
y = dict(collections.Counter(ranks))
counts = 0
for key in y.keys():
if(key+1 in y):
counts += y[key]
print(str(counts))
return counts
# write your code in Python 3.6
solution([3, 4, 3, 0, 2, 2, 3, 0, 0])
solution([4, 2, 0])
solution([4, 2, 0, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2])
solution([3, 4, 3, 0, 2, 2, 3, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
solution([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
N = 10000000
count_list = range(1,N+1)
solution(count_list)
new_list = []
num = 0
while(num < len(count_list)):
# checking condition
if num % 2 == 0:
new_list.append(count_list[num])
# increment num
num += 1
solution(new_list)
|
__all__ = [
'MilestoneEditJob',
]
from async_messages import messages
from limpyd import fields
from limpyd_jobs import STATUSES
from gim.core.models import Milestone
from gim.core.ghpool import ApiError
from .base import DjangoModelJob
class MilestoneJob(DjangoModelJob):
"""
Abstract job model for jobs based on the Milestone model
"""
abstract = True
model = Milestone
permission = 'self'
@property
def milestone(self):
if not hasattr(self, '_milestone'):
self._milestone = self.object
return self._milestone
@property
def repository(self):
if not hasattr(self, '_repository'):
self._repository = self.milestone.repository
return self._repository
class MilestoneEditJob(MilestoneJob):
queue_name = 'edit-milestone'
mode = fields.InstanceHashField()
def run(self, queue):
"""
Get the milestone and create/update/delete it
"""
super(MilestoneEditJob, self).run(queue)
try:
milestone = self.milestone
except milestone.DoesNotExist:
# the milestone doesn't exist anymore, stop here
self.status.hset(STATUSES.CANCELED)
return None
gh = self.gh
if not gh:
return # it's delayed !
mode = self.mode.hget()
try:
if mode == 'delete':
milestone.dist_delete(gh)
else:
milestone.dist_edit(mode=mode, gh=gh)
except ApiError, e:
message = None
if e.code == 422:
message = u'Github refused to %s the milestone <strong>%s</strong> on <strong>%s</strong>' % (
mode, milestone.short_title, milestone.repository.full_name)
elif e.code in (401, 403):
tries = self.tries.hget()
if tries and int(tries) >= 5:
message = u'You seem to not have the right to %s the milestone <strong>%s</strong> on <strong>%s</strong>' % (
mode, milestone.short_title, milestone.repository.full_name)
if message:
messages.error(self.gh_user, message)
if mode == 'create':
milestone.delete()
else:
try:
milestone.fetch(gh, force_fetch=True)
except ApiError:
pass
return None
else:
raise
message = u'The milestone <strong>%s</strong> was correctly %sd on <strong>%s</strong>' % (
milestone.short_title, mode, milestone.repository.full_name)
messages.success(self.gh_user, message)
return None
def success_message_addon(self, queue, result):
"""
Display the action done (created/updated/deleted)
"""
return ' [%sd]' % self.mode.hget()
|
def char_in_text(char,text):
num_of_char = 0
length = len(text)
i = 0
while i < (length - 1):
if text[i] == char:
num_of_char = num_of_char + 1
i = i + 1
return num_of_char
character = input("Enter Character: ")
text = input("Enter text: ")
print("Th character %s appear %f times in the text" %(character, char_in_text(character,text)))
|
from django.http import HttpResponse
from django.template import RequestContext
from django.shortcuts import render_to_response,render
from django.http import HttpResponseRedirect
import django.db.utils
from .forms import UploadTorrentForm
import bencode
from models import Torrent, Client
#from models import Utilities as torUtils
import datetime
import torUtils
#
# Human URLS
#
def front(request):
form = UploadTorrentForm()
if request.method == 'POST':
form = UploadTorrentForm(request.POST, request.FILES)
if form.is_valid():
try:
if handle_upload(request.FILES['torrent']): # <-- Use torrent version
return render_to_response('front.html',
{'torrents':models.Torrent.objects.all(),'uploaded':True,'form':form},
context_instance=RequestContext(request))
except Exception as e:
print e
return render_to_response('front.html',
{'torrents':Torrent.objects.all(),'uploaded':False,'form':form},
context_instance=RequestContext(request))
#
# Torrent URLS
#
def announce(request):
try:
params = torUtils.getParams(request.get_full_path()) # <-- Add this to models
ih = params[u'info_hash'][0]
# Match params to grab a specific torrent
t = Torrent.getTorrent(info_hash=ih)
# Check whether this is a new or returning client
c = t.getPeer(ip=request.META["REMOTE_ADDR"])
if c.size == 0:
c = Client.create(n = params['name'], i = request.META["REMOTE_ADDR"], p = params['port'], ih = params[u'info_hash'][0])
else:
# Parse old client
c = c[0]
c.update(params["event"])
except Exception as e:
print "Torrent not found; ", e
# return HttpResponse("Newp!")
# Match client against list of known users
# -- Seeding:
# -- Leeching:
# -- Loading:
# -- Inactive:
# If no announce, double-check peer connections, and send a new list
return HttpResponse("Fixthis")
def scrape(request):
bdT = models.getTorrent(torUtils.getParams(request.get_full_path())['info_hash'][0])
bd = {
"files":{
bdT["info_hash"]:{
"complete":len(bdT["torrent"].peers.all()), # Number of seeders (integer)
"downloaded":0, # total number of times the tracker has registered a completion
"incomplete":0, # Number of non-seeder peers / leechers (integer)
"name":"Blah", # Torrent's internal name, as specified by the "name" file
}
}
}
return HttpResponse(bencode.bencode(bd))
#
# Utilities
#
def handle_upload(torrent):
t = ""
if torrent.name.split(".")[-1].lower() == "torrent":
t = ""
for chunk in torrent.chunks():
t = str.join(t,chunk)
try:
tr = models.Torrent(**torUtils.getInfo(t,torrent.name.split(".")[0]))
tr.save()
except Exception as e:
tr, created = models.Torrent.objects.get_or_create(**torUtils.getInfo(t,torrent.name.split(".")[0]))
tr.delete()
tr, created = models.Torrent.objects.get_or_create(**torUtils.getInfo(t,torrent.name.split(".")[0]))
tr.save()
return True
else:
raise Exception("Not a torrent file.")
|
def bar_triang(*args):
return map(lambda a: round(sum(a) / 3.0, 4), zip(*args))
|
"""
公共方法类:
封装正向逆向断言方法
"""
import allure
from config import BASE_PATH
from page.page_in import PageIn
from page.page_login import PageLogin
from tools.get_driver import GetDriver
from tools.get_log import GetLog
log = GetLog.get_log()
class Until:
def __init__(self):
self.login = PageIn.get_page_login()
self.driver = GetDriver.get_driver()
self.page_login = PageLogin()
def assert_nickname(self, expect):
"""断言昵称"""
try:
assert self.login.page_get_nickname() == expect
except AssertionError as f:
self.driver.get_screenshot_as_file(BASE_PATH + "/img/bug_.png")
with open(BASE_PATH + "/img/bug_.png", "rb") as f:
allure.attach("失败原因", f.read(), allure.attach_type.PNG)
log.error(f)
raise
finally:
self.page_login.page_login_quit()
self.page_login.page_click_my()
self.page_login.page_click_exists_user()
def assert_toast(self, expect):
"""断言toast消息"""
try:
assert self.login.page_get_toast(expect) == expect
except AssertionError as f:
self.driver.get_screenshot_as_file(BASE_PATH + "/img/bug_.png")
with open(BASE_PATH + "/img/bug_.png", "rb") as e:
allure.attach("失败原因", e.read(), allure.attach_type.PNG)
log.error(e)
raise
def screen_shot(self):
"""截图并把图片写入报告"""
self.driver.get_screenshot_as_file(BASE_PATH + "/img/bug_.png")
with open(BASE_PATH + "/img/bug_.png", "rb") as f:
allure.attach("失败原因", f.read(), allure.attach_type.PNG)
log.error(f)
|
from bs4 import BeautifulSoup
import requests
def decodeWebPage():
url = 'https://www.nytimes.com/'
response = requests.get(url)
html = BeautifulSoup(response.text, features="html.parser")
print("The following are a list of articles shown on 'The New York Times' website:" + '\n')
articles = ""
for header in html.find_all('h2'):
if (len(header.get('class')) >= 2):
if (header.get('class')[0] == 'css-9ywo2s') or (header.get('class')[1] == 'esl82me2'):
articles += (header.string + '\n')
return articles
if __name__=="__main__":
print(decodeWebPage())
|
from LeerSensores import Sensores
if __name__ == "__main__":
x=Sensores()
x.distanciaMetodo()
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'signup/',views.signup_view),
url(r'login/',views.LoginView.as_view()),
url(r'verifyuser',views.VerifyUser.as_view()),
url(r'verifypass',views.VerifyPass.as_view()),
url(r'ajax$',views.AjaxView.as_view()),
url(r'ajax_view$',views.Ajax.as_view()),
url(r'test',views.AjaxTestView.as_view()),
url(r'json',views.JsonView.as_view())
]
|
#!/usr/bin/env python3
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import sys
sys.path.append('../aardvark')
import subprocess as sp
import numpy as np
from glob import glob
import cv2
from tqdm import tqdm
import tensorflow as tf
from tensorflow.python.framework import meta_graph
from mold import Scaling as Mold
class Model:
def __init__ (self, path, name='xxx'):
mg = meta_graph.read_meta_graph_file(path + '.meta')
self.images = tf.placeholder(tf.float32, shape=(None, None, None, 3), name="images")
is_training = tf.constant(False)
self.probs, = tf.import_graph_def(mg.graph_def, name=name,
input_map={'images:0': self.images, 'is_training:0': is_training},
return_elements=['probs:0'])
self.saver = tf.train.Saver(saver_def=mg.saver_def, name=name)
self.loader = lambda sess: self.saver.restore(sess, path)
pass
pass
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('model', None, '')
flags.DEFINE_integer('stride', 1, '')
flags.DEFINE_float('th', 0.5, '')
flags.DEFINE_integer('fps', 30, '')
OUTPUT = 'output'
def overlay (image, prob, color):
image[:, :, color][prob] += 75
pass
def main (_):
model = Model(FLAGS.model)
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
model.loader(sess)
mold = Mold(16, 1.0)
video_input = cv2.VideoCapture('test.mov')
video_output = None
C = 0
while video_input.grab():
flag, image = video_input.retrieve()
if not flag:
break
if image.shape[0] > image.shape[1]:
# patch the BDD .mov files
image = np.fliplr(image)
image = np.transpose(image, (1, 0, 2))
pass
probs = sess.run(model.probs, feed_dict={model.images: mold.batch_image(image)})
frame = cv2.resize(image, None, fx=0.5, fy=0.5).astype(np.float32)
frame *= 0.7
probs = mold.unbatch_prob(frame, probs)
overlay(frame, probs[:, :, 1] > 0.5, 0)
overlay(frame, probs[:, :, 2] > 0.5, 1)
overlay(frame, probs[:, :, 3] > 0.5, 2)
frame = np.clip(frame, 0, 255)
if video_output is None:
H, W = frame.shape[:2]
video_output = cv2.VideoWriter('%s.avi' % OUTPUT, cv2.VideoWriter_fourcc(*'MJPG'), FLAGS.fps, (W, H))
video_output.write(frame.astype(np.uint8))
print('%d' % C)
C += 1
pass
video_output.release()
# convert to MP4 so it can be served on web
sp.check_call('ffmpeg -i %s.avi -y -c:v libx264 -preset slow -crf 22 -pix_fmt yuv420p %s.mp4' % (OUTPUT, OUTPUT), shell=True)
pass
if __name__ == '__main__':
tf.app.run()
|
from django.shortcuts import render
from rest_framework.response import Response
from .gateway_caller import ctrl_device, set_group_status
from rest_framework.decorators import api_view
GW_IDS = ['HKG-01-0190838-004', 'HKG-01-0190838-003', 'hg20150601', 'HKG-01-0190838-001', 'HKG-01-0190838-005']
GWAPI_HOSTS = {
GW_IDS[0]: "http://localhost:10000/gwapi",
GW_IDS[1]: "http://localhost:10000/gwapi",
GW_IDS[2]: "http://localhost:10000/gwapi",
GW_IDS[3]: "http://localhost:10000/gwapi",
GW_IDS[4]: "http://localhost:10000/gwapi",
}
def index(request):
return render(request, 'control/index.html', {})
@api_view(['GET'])
def turn_device_on(request, gw_id, device_nid, format=None):
toggle_device(gw_id, device_nid, 1)
return Response({'status': 200})
@api_view(['GET'])
def turn_device_off(request, gw_id, device_nid, format=None):
toggle_device(gw_id, device_nid, 0)
return Response({'status': 200})
@api_view(['GET'])
def turn_group_on(request, gw_id, group_gid, format=None):
toggle_group(gw_id, group_gid, 1)
return Response({'status': 200})
@api_view(['GET'])
def turn_group_off(request, gw_id, group_gid, format=None):
toggle_group(gw_id, group_gid, 0)
return Response({'status': 200})
def toggle_device(gw_id, device_nid, on_off):
print(GWAPI_HOSTS[gw_id])
ctrl_device(GWAPI_HOSTS[gw_id], gw_id, int(device_nid), on_off)
def toggle_group(gw_id, group_gid, on_off):
print(GWAPI_HOSTS[gw_id])
set_group_status(GWAPI_HOSTS[gw_id], gw_id, int(group_gid), on_off)
|
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.contrib.auth.models import User
from django import forms
class UserSignInForm(AuthenticationForm):
username = forms.CharField(widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Username',
'id': 'lg-user'}))
password = forms.CharField(widget=forms.PasswordInput(
attrs={
'class': 'form-control',
'placeholder': 'Password',
'id': 'lg-password',
}))
class UserSignUpForm(UserCreationForm):
username = forms.CharField(widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Username',
'id': 'lg-user'}))
password1 = forms.CharField(widget=forms.PasswordInput(
attrs={
'class': 'form-control',
'placeholder': 'Password',
'id': 'lg-password',
}))
password2 = forms.CharField(widget=forms.PasswordInput(
attrs={
'class': 'form-control',
'placeholder': 'ConfirmPassword',
'id': 'lg-conf-password',
}))
|
from django.shortcuts import render, redirect, HttpResponse
from .models import User
from django.contrib import messages
# Create your views here.
def landing(request):
return render(request, 'loginReg/landing.html')
def index(request):
return render(request, 'loginReg/index.html')
def process(request):
print "FROM VIEWS: ", request.POST
user = User.objects.register(request.POST)
return redirect('/')
|
#coding=utf-8
from bs4 import BeautifulSoup
import bs4
import urllib2
import urllib
import re
class QSBK(object):
"""Q糗事百科的爬虫类"""
def __init__(self):
super(QSBK, self).__init__()
self.currentPage = 1
self.jokes = []
self.currentIndex = 0
def getOneJoke(self):
'''
获得一条段子
'''
if self.currentIndex < len(self.jokes):
self.currentIndex = self.currentIndex + 1
return self.jokes[self.currentIndex-1]
else:
self.currentIndex = 0
# 加载下一页的段子
self.loadNextPageJokes()
return self.jokes[self.currentIndex]
def loadNextPageJokes(self):
self.jokes = []
self.currentPage = self.currentPage + 1
url = 'http://www.qiushibaike.com/hot/page/' + str(self.currentPage)
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = {'User-Agent':user_agent}
try:
request = urllib2.Request(url,headers=headers)
response = urllib2.urlopen(request)
html = response.read()
soup = BeautifulSoup(html)
jokes = soup.find_all("div",class_="article block untagged mb15")
for oneJoke in jokes:
# 过滤有图片的段子
img = oneJoke.find_all('div',class_='thumb')
if img:
continue
content = oneJoke.find_all('div',class_='content')
contentText = content[0].text
# print u'内容:' + contentText.replace('\n','')
author = oneJoke.find('div',class_='author')
authorText = author.text
# print u'作者:' + authorText.replace('\n','')
self.jokes.append({'content':contentText.replace('\n',''),'author':authorText.replace('\n','')})
except urllib2.URLError,e:
if hasattr(e,"code"):
print e.code
if hasattr(e,"reason"):
print e.reason
def start(self):
while 1:
enter = raw_input('enter:')
if enter == 'n':
joke = self.getOneJoke()
print u'作者:' + joke['author']
print u'内容:' + joke['content']
print '\n'
elif enter == 'q' or enter == 'Q':
break
else:
print 'input error.\n'
spider = QSBK()
spider.start()
|
import time
import requests
from datetime import datetime
import smtplib
MY_LAT = "25.211549"
MY_LONG = "85.514542"
my_email = "jalltrades12@gmail.com"
password = "@#Jack098"
response = requests.get(url="http://api.open-notify.org/iss-now.json")
response.raise_for_status()
data = response.json()
iss_latitude = float(data["iss_position"]["latitude"])
iss_longitude = float(data["iss_position"]["longitude"])
# Your position is within +5 or -5 degrees of the ISS position.
def pos_within_margin():
if 5 >= float(MY_LAT) - iss_latitude >= -5 and 5 >= float(MY_LONG) - iss_longitude >= -5:
return True
return False
parameters = {
"lat": MY_LAT,
"lng": MY_LONG,
"formatted": 0,
}
response = requests.get("https://api.sunrise-sunset.org/json", params=parameters)
response.raise_for_status()
data = response.json()
sunrise = int(data["results"]["sunrise"].split("T")[1].split(":")[0])
sunset = int(data["results"]["sunset"].split("T")[1].split(":")[0])
time_now = datetime.now()
# If the ISS is close to my current position
# and it is currently dark
# Then send me an email to tell me to look up.
# BONUS: run the code every 60 seconds.
def is_dark():
if sunrise >= time_now.hour >= sunset:
return True
return False
def send_main():
if pos_within_margin() and is_dark():
with smtplib.SMTP("smtp.gmail.com") as connection:
connection.starttls()
connection.login(user=my_email, password=password)
connection.sendmail(
from_addr=my_email,
to_addrs=my_email,
msg=f"Subject:Go watch The ISS\n\nThe ISS can be sighted now"
)
while True:
send_main()
time.sleep(60)
|
import numbers
from typing import Any, Dict, List, Literal, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from PIL import Image, ImageEnhance, ImageOps
try:
import accimage
except ImportError:
accimage = None
@torch.jit.unused
def _is_pil_image(img: Any) -> bool:
if accimage is not None:
return isinstance(img, (Image.Image, accimage.Image))
else:
return isinstance(img, Image.Image)
@torch.jit.unused
def get_dimensions(img: Any) -> List[int]:
if _is_pil_image(img):
if hasattr(img, "getbands"):
channels = len(img.getbands())
else:
channels = img.channels
width, height = img.size
return [channels, height, width]
raise TypeError(f"Unexpected type {type(img)}")
@torch.jit.unused
def get_image_size(img: Any) -> List[int]:
if _is_pil_image(img):
return list(img.size)
raise TypeError(f"Unexpected type {type(img)}")
@torch.jit.unused
def get_image_num_channels(img: Any) -> int:
if _is_pil_image(img):
if hasattr(img, "getbands"):
return len(img.getbands())
else:
return img.channels
raise TypeError(f"Unexpected type {type(img)}")
@torch.jit.unused
def hflip(img: Image.Image) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
return img.transpose(Image.FLIP_LEFT_RIGHT)
@torch.jit.unused
def vflip(img: Image.Image) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
return img.transpose(Image.FLIP_TOP_BOTTOM)
@torch.jit.unused
def adjust_brightness(img: Image.Image, brightness_factor: float) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
enhancer = ImageEnhance.Brightness(img)
img = enhancer.enhance(brightness_factor)
return img
@torch.jit.unused
def adjust_contrast(img: Image.Image, contrast_factor: float) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
enhancer = ImageEnhance.Contrast(img)
img = enhancer.enhance(contrast_factor)
return img
@torch.jit.unused
def adjust_saturation(img: Image.Image, saturation_factor: float) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
enhancer = ImageEnhance.Color(img)
img = enhancer.enhance(saturation_factor)
return img
@torch.jit.unused
def adjust_hue(img: Image.Image, hue_factor: float) -> Image.Image:
if not (-0.5 <= hue_factor <= 0.5):
raise ValueError(f"hue_factor ({hue_factor}) is not in [-0.5, 0.5].")
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
input_mode = img.mode
if input_mode in {"L", "1", "I", "F"}:
return img
h, s, v = img.convert("HSV").split()
np_h = np.array(h, dtype=np.uint8)
# uint8 addition take cares of rotation across boundaries
with np.errstate(over="ignore"):
np_h += np.uint8(hue_factor * 255)
h = Image.fromarray(np_h, "L")
img = Image.merge("HSV", (h, s, v)).convert(input_mode)
return img
@torch.jit.unused
def adjust_gamma(
img: Image.Image,
gamma: float,
gain: float = 1.0,
) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
if gamma < 0:
raise ValueError("Gamma should be a non-negative real number")
input_mode = img.mode
img = img.convert("RGB")
gamma_map = [int((255 + 1 - 1e-3) * gain * pow(ele / 255.0, gamma)) for ele in range(256)] * 3
img = img.point(gamma_map) # use PIL's point-function to accelerate this part
img = img.convert(input_mode)
return img
@torch.jit.unused
def pad(
img: Image.Image,
padding: Union[int, List[int], Tuple[int, ...]],
fill: Optional[Union[float, List[float], Tuple[float, ...]]] = 0,
padding_mode: Literal["constant", "edge", "reflect", "symmetric"] = "constant",
) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
if not isinstance(padding, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate padding arg")
if fill is not None and not isinstance(fill, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate fill arg")
if not isinstance(padding_mode, str):
raise TypeError("Got inappropriate padding_mode arg")
if isinstance(padding, list):
padding = tuple(padding)
if isinstance(padding, tuple) and len(padding) not in [1, 2, 4]:
raise ValueError(f"Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple")
if isinstance(padding, tuple) and len(padding) == 1:
# Compatibility with `functional_tensor.pad`
padding = padding[0]
if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
if padding_mode == "constant":
opts = _parse_fill(fill, img, name="fill")
if img.mode == "P":
palette = img.getpalette()
image = ImageOps.expand(img, border=padding, **opts)
image.putpalette(palette)
return image
return ImageOps.expand(img, border=padding, **opts)
else:
if isinstance(padding, int):
pad_left = pad_right = pad_top = pad_bottom = padding
if isinstance(padding, tuple) and len(padding) == 2:
pad_left = pad_right = padding[0]
pad_top = pad_bottom = padding[1]
if isinstance(padding, tuple) and len(padding) == 4:
pad_left = padding[0]
pad_top = padding[1]
pad_right = padding[2]
pad_bottom = padding[3]
p = [pad_left, pad_top, pad_right, pad_bottom]
cropping = -np.minimum(p, 0)
if cropping.any():
crop_left, crop_top, crop_right, crop_bottom = cropping
img = img.crop((crop_left, crop_top, img.width - crop_right, img.height - crop_bottom))
pad_left, pad_top, pad_right, pad_bottom = np.maximum(p, 0)
if img.mode == "P":
palette = img.getpalette()
img = np.asarray(img)
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), mode=padding_mode)
img = Image.fromarray(img)
img.putpalette(palette)
return img
img = np.asarray(img)
# RGB image
if len(img.shape) == 3:
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), padding_mode)
# Grayscale image
if len(img.shape) == 2:
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)
return Image.fromarray(img)
@torch.jit.unused
def crop(
img: Image.Image,
top: int,
left: int,
height: int,
width: int,
) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
return img.crop((left, top, left + width, top + height))
@torch.jit.unused
def resize(
img: Image.Image,
size: Union[List[int], int],
interpolation: int = Image.BILINEAR,
) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
if not (isinstance(size, list) and len(size) == 2):
raise TypeError(f"Got inappropriate size arg: {size}")
return img.resize(tuple(size[::-1]), interpolation)
@torch.jit.unused
def _parse_fill(
fill: Optional[Union[float, List[float], Tuple[float, ...]]],
img: Image.Image,
name: str = "fillcolor",
) -> Dict[str, Optional[Union[float, List[float], Tuple[float, ...]]]]:
# Process fill color for affine transforms
num_channels = get_image_num_channels(img)
if fill is None:
fill = 0
if isinstance(fill, (int, float)) and num_channels > 1:
fill = tuple([fill] * num_channels)
if isinstance(fill, (list, tuple)):
if len(fill) == 1:
fill = fill * num_channels
elif len(fill) != num_channels:
msg = "The number of elements in 'fill' does not match the number of channels of the image ({} != {})"
raise ValueError(msg.format(len(fill), num_channels))
fill = tuple(fill) # type: ignore[arg-type]
if img.mode != "F":
if isinstance(fill, (list, tuple)):
fill = tuple(int(x) for x in fill)
else:
fill = int(fill)
return {name: fill}
@torch.jit.unused
def affine(
img: Image.Image,
matrix: List[float],
interpolation: int = Image.NEAREST,
fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None,
) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
output_size = img.size
opts = _parse_fill(fill, img)
return img.transform(output_size, Image.AFFINE, matrix, interpolation, **opts)
@torch.jit.unused
def rotate(
img: Image.Image,
angle: float,
interpolation: int = Image.NEAREST,
expand: bool = False,
center: Optional[Tuple[int, int]] = None,
fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None,
) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
opts = _parse_fill(fill, img)
return img.rotate(angle, interpolation, expand, center, **opts)
@torch.jit.unused
def perspective(
img: Image.Image,
perspective_coeffs: List[float],
interpolation: int = Image.BICUBIC,
fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None,
) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
opts = _parse_fill(fill, img)
return img.transform(img.size, Image.PERSPECTIVE, perspective_coeffs, interpolation, **opts)
@torch.jit.unused
def to_grayscale(img: Image.Image, num_output_channels: int) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
if num_output_channels == 1:
img = img.convert("L")
elif num_output_channels == 3:
img = img.convert("L")
np_img = np.array(img, dtype=np.uint8)
np_img = np.dstack([np_img, np_img, np_img])
img = Image.fromarray(np_img, "RGB")
else:
raise ValueError("num_output_channels should be either 1 or 3")
return img
@torch.jit.unused
def invert(img: Image.Image) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
return ImageOps.invert(img)
@torch.jit.unused
def posterize(img: Image.Image, bits: int) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
return ImageOps.posterize(img, bits)
@torch.jit.unused
def solarize(img: Image.Image, threshold: int) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
return ImageOps.solarize(img, threshold)
@torch.jit.unused
def adjust_sharpness(img: Image.Image, sharpness_factor: float) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
enhancer = ImageEnhance.Sharpness(img)
img = enhancer.enhance(sharpness_factor)
return img
@torch.jit.unused
def autocontrast(img: Image.Image) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
return ImageOps.autocontrast(img)
@torch.jit.unused
def equalize(img: Image.Image) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
return ImageOps.equalize(img)
|
import csv
import sys
import re
def main():
# check input lenght
if len(sys.argv) != 3:
print('Usage: python dna.py data.csv sequence.txt')
return
# reading dan and loading RAM
dna = ''
with open(sys.argv[2], "r") as file_dna:
dna = file_dna.read().strip('\n')
# reading databases and loading RAM
data = []
field_name = []
with open(sys.argv[1], "r") as file_data:
reader = csv.DictReader(file_data)
field_name = reader.fieldnames[1:]
for line in reader:
for n in field_name:
line[n] = int(line[n])
data.append(line)
# calc STRs
dna_STR = {}
for name in field_name:
dna_STR[name] = calc_STRs(dna, name)[0]
# Find match
for d in data:
bool = []
for name in field_name:
bool.append(d[name] == dna_STR[name])
if sum(bool) == len(field_name):
print(d["name"])
return
# if no match
print("No match")
def calc_STRs(dna, regEx):
""" calc STRs """
# get Match
match = re.search(regEx, dna)
if match == None:
return [0]
# get Start match position
position = match.start()
# get length STRs
count_STR = 0
while dna[position:(position+len(regEx))] == regEx:
count_STR += 1
position += len(regEx)
# add STR to STRs list
list_STRs = [count_STR]
# looping other dna
list_STRs += calc_STRs(dna[position:], regEx)
# if list length > 1 => return get Big Nuumber as list (length 1)
if len(list_STRs) > 1:
list_STRs.sort(reverse=True)
# print(list_STRs)
return [list_STRs[0]]
# if list length 1 element
return list_STRs
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
print('train classyfiler stage 4')
import os
import sys
import csv
import numpy as np
import pickle
from PIL import Image
import tensorflow as tf
import tensorflow_ae_base
from tensorflow_ae_base import *
import tensorflow_util
import myutil
exec(open('extern_params.py').read())
ss = 2048 # sample size
na = 1
## one batch one file
# bronchioid
# magnoid
# squamoid
import random
data_table = list(csv.reader(open('filelist.txt','r'), delimiter='\t'))
ns = len(data_table)
ni = 16
iii = random.sample(range(ns),ni)
tmp = []
tmpy = []
for aa in range(ni):
ii = iii[aa]
path_data = data_table[ii][0]
img_tmp = Image.open(path_data,'r')
tmp.append((np.asarray(img_tmp) / 255.0)[np.newaxis,:,:,:])
tmpy.append(int(data_table[ii][1]))
qqq_trn = np.vstack(tmp)
qqq_trn1 = qqq_trn[:,::-1,:,:]
qqq_trn2 = qqq_trn[:,:,::-1,:]
qqq_trn4 = np.transpose(qqq_trn,[0,2,1,3])
qqq_trn3 = qqq_trn[:,::-1,::-1,:]
qqq_trn5 = qqq_trn4[:,::-1,:,:]
qqq_trn6 = qqq_trn4[:,:,::-1,:]
qqq_trn7 = qqq_trn3[:,::-1,::-1,:]
yyy_trn = tmpy
nn,ny,nx,nl = qqq_trn.shape
print('nn ny nx nl',nn,ny,nx,nl)
exec(open('tensorflow_ae_stage1.py').read())
exec(open('tensorflow_ae_stage2.py').read())
exec(open('tensorflow_ae_stage3.py').read())
exec(open('tensorflow_classify_stage4.py').read())
tf_input = tf.placeholder(tf.float32, [ni,ny,nx,nl])
tf_yyy = tf.placeholder(tf.int64, [ni])
tf_encode1 = get_encode1(tf_input)
tf_encode2 = get_encode2(tf_encode1)
tf_encode3 = get_encode3(tf_encode2)
# sess.run(tf.initialize_all_variables())
# nj = 9
# tmp = []
# tmpy = []
# for aa in range(nj):
# tmpx = []
# iii = random.sample(range(ns),ni)
# for aa in range(ni):
# ii = iii[aa]
# path_data = data_table[ii][0]
# img_tmp = Image.open(path_data,'r')#
# tmpx.append((np.asarray(img_tmp) / 255.0)[np.newaxis,:,:,:])
# tmpy.append(int(data_table[ii][1]))
# qqq_trn = np.vstack(tmpx)
# yyy_trn = tmpy
# hoge = tf_encode3.eval({tf_input:qqq_trn})
# tmp.append(hoge)
tf_encode4 = get_encode4(tf_encode3)
tf_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(tf_encode4[:,0,0,:],tf_yyy)
tf_mean_loss = tf.reduce_mean(tf_loss)
learning_rate = 1e-3
tf_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
tf_train = tf_optimizer.minimize(tf_loss)
sess.run(tf.initialize_all_variables())
for tt in range(tmax):
if((tprint > 0) and (tt % tprint==0)):
print(tt,tf_mean_loss.eval({tf_input: qqq_trn, tf_yyy: yyy_trn}))
if((tstage > 0) and (tt % tstage==0)):
iii = random.sample(range(ns),ni)
tmp = []
tmpy = []
for aa in range(ni):
ii = iii[aa]
path_data = data_table[ii][0]
img_tmp = Image.open(path_data,'r')#
tmp.append((np.asarray(img_tmp) / 255.0)[np.newaxis,:,:,:])
tmpy.append(int(data_table[ii][1]))
qqq_trn = np.vstack(tmp)
yyy_trn = tmpy
qqq_trn1 = qqq_trn[:,::-1,:,:]
qqq_trn2 = qqq_trn[:,:,::-1,:]
qqq_trn4 = np.transpose(qqq_trn,[0,2,1,3])
qqq_trn3 = qqq_trn[:,::-1,::-1,:]
qqq_trn5 = qqq_trn4[:,::-1,:,:]
qqq_trn6 = qqq_trn4[:,:,::-1,:]
qqq_trn7 = qqq_trn3[:,::-1,::-1,:]
#
sess.run(tf_train,{tf_input: qqq_trn, tf_yyy: yyy_trn})
sess.run(tf_train,{tf_input: qqq_trn1, tf_yyy: yyy_trn})
sess.run(tf_train,{tf_input: qqq_trn2, tf_yyy: yyy_trn})
sess.run(tf_train,{tf_input: qqq_trn3, tf_yyy: yyy_trn})
sess.run(tf_train,{tf_input: qqq_trn4, tf_yyy: yyy_trn})
sess.run(tf_train,{tf_input: qqq_trn5, tf_yyy: yyy_trn})
sess.run(tf_train,{tf_input: qqq_trn6, tf_yyy: yyy_trn})
sess.run(tf_train,{tf_input: qqq_trn7, tf_yyy: yyy_trn})
if(tt < tmax):
print(tmax,tf_mean_loss.eval({tf_input: qqq_trn, tf_yyy: yyy_trn}))
hoge = tf.argmax(tf_encode4[:,0,0,:],dimension=1)
fuga = hoge.eval({tf_input:qqq_trn})
print(np.sum(fuga == yyy_trn),"/",len(fuga),"\n")
#
# save parameters
#
if(trainable1):
save_stage1()
print('stamp1 = \'{}\''.format(stamp))
if(trainable2):
save_stage2()
print('stamp2 = \'{}\''.format(stamp))
if(trainable3):
save_stage3()
print('stamp3 = \'{}\''.format(stamp))
if(trainable4):
save_stage4()
print('stamp4 = \'{}\''.format(stamp))
|
import win32con
import win32gui
from common.contants import choice_question_detail_dir, img_dir
from page.base.basepage import BasePage
class Choice_Question_Detail(BasePage):
def choice_question(self,subject_name,option_list):
'''
題目標題\選項
'''
self._params["subject_name"] = subject_name
self.step(choice_question_detail_dir, "subject_name")
i=0
for option in option_list:
i += 1
# self._params["item"] = option["item"]
eles = self.step(choice_question_detail_dir,"get_item_eles")
eles[-1].clear()
eles[-1].send_keys(option["item"])
if option["is_other"] == True:
# 勾選支持文本輸入框
self.sleep(1)
other_ele = self.step(choice_question_detail_dir, "get_other_eles")
other_ele[-1].click()
if option["tips"]["switch"] == True:
# 輸入選項説明
# self.sleep(1)
other_ele = self.step(choice_question_detail_dir, "get_tips_eles")
other_ele[-1].click()
self._params["option_tips"] = option["tips"]["option_tips"]
self.step(choice_question_detail_dir,"input_options_tips")
if option["is_img"] == True:
print(f"i:{i}")
# 上傳圖片
self.sleep(1)
img_eles = self.step(choice_question_detail_dir, "get_img_eles")
img_eles[i-1].click()
self.sleep(2)
# 找元素
# 一级窗口"#32770","打开"
dialog = win32gui.FindWindow("#32770", "打开")
# 向下传递
ComboBoxEx32 = win32gui.FindWindowEx(dialog, 0, "ComboBoxEx32", None) # 二级
comboBox = win32gui.FindWindowEx(ComboBoxEx32, 0, "ComboBox", None) # 三级
# 编辑按钮
edit = win32gui.FindWindowEx(comboBox, 0, 'Edit', None) # 四级
# 打开按钮
button = win32gui.FindWindowEx(dialog, 0, 'Button', "打开(&O)") # 二级
# 输入文件的绝对路径,点击“打开”按钮
win32gui.SendMessage(edit, win32con.WM_SETTEXT, None, img_dir) # 发送文件路径
win32gui.SendMessage(dialog, win32con.WM_COMMAND, 1, button) # 点击打开按钮
self.sleep(2)
if i < len(option_list):
# 點擊添加選項符號
# 不等待会被遮挡元素
self.sleep(1)
add_eles = self.step(choice_question_detail_dir,"get_add_eles")
add_eles[-1].click()
# 不等待会被遮挡元素
self.sleep(1)
self.step(choice_question_detail_dir, "click_save")
from page.quarter.quarter_management.create_quarter import Create_Quarter
return Create_Quarter(self._driver)
|
# Enter script code
keyboard.send_key("<backspace>")
keyboard.send_key("<home>")
keyboard.send_keys("help(")
keyboard.send_key("<end>")
keyboard.send_key(")")
keyboard.send_key("<enter>")
|
import os
import pdb
def unique_output_filename(folder, format_string, maxval=100000):
"""
writes out data which we don't want to be overwritten
folder : str
The folder to write to
format_string : str
Something which can be formated using format_string.format(i)
maxval : int
The maximum number of iterations to try
"""
if not os.path.isdir(folder):
raise ValueError("Folder {} does not exists".format(folder))
full_format_string = os.path.join(folder, format_string)
for i in range(maxval):
if not os.path.exists(full_format_string.format(i)):
return full_format_string.format(i)
raise ValueError("No file could be created with folder {}, format string {}, and maxval {}".format(
folder, format_string, maxval))
|
# Generated by Django 3.1.5 on 2021-03-06 20:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('hackathon', '0002_auto_20210306_1959'),
]
operations = [
migrations.CreateModel(
name='BonusImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='')),
('approved', models.BooleanField()),
('competitor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hackathon.competitor')),
],
),
]
|
'''
Created on July 19, 2010
@author: Jason Huang
'''
#!/usr/bin/env python
import pymongo
import os
import simplejson
import MongoEncoder.MongoEncoder
from Map.ProcessTripHandler import GetTrips
from Map.ProcessTripHandler import ShowNewTrips
from Map.ProcessTripHandler import ShowHotTrips
from Map.ProcessTripHandler import ShowMyTrips
from Map.ProcessTripHandler import ShowEndTrips
from Map.ProcessTripHandler import SaveTrips
from Map.ProcessTripHandler import SubscribeTrip
from Map.ProcessTripHandler import UnsubscribeTrip
from Map.ProcessTripHandler import LikeTripHandler
from Map.ProcessTripHandler import SaveTripHandler
from Map.ProcessTripHandler import AddTripTagHandler
from Map.ProcessTripHandler import AddTripGroupHandler
from Map.ProcessTripHandler import RemoveTripGroupHandler
from Map.ProcessTripHandler import MergeTripGroupHandler
from Map.ProcessTripHandler import GetTripGroupForMergeHandler
from Map.ProcessTripHandler import GetTripGroupForMapHandler
from Map.ProcessTripHandler import GetTripGroupForSiteHandler
from Map.ProcessTripHandler import MyTripsHandler
from Map.ProcessTripHandler import ProcessTripRequestHandler
from Users.UserInfo import UpdateUserProfileHandler
from Users.UserInfo import UpdatePaymentHandler
from Calendar.CalendarHandler import ExportCalendarHandler
from PDF.PDFHandler import DownloadPDFHanlder
from Expense.ExpenseHandler import *
from Map.BrowseTripHandler import BaseHandler
from Map.BrowseTripHandler import BrowseHandler
from Map.BrowseTripHandler import EntryHandler
from Map.BrowseTripHandler import TripPageHandler
from Map.CreateTripHandler import ComposeHandler
from Map.CreateTripHandler import CreateTripModule
#from Users.Message import MessageHandler
from Users.Message import PostMessageHandler
from Users.Notification import NotificationHandler
from Settings.Settings import SettingsHandler
from Users.Friend import FriendEntryModule
from Auth.AuthHandler import CreateAccountHandler
from Auth.AuthHandler import LoginHandler
from Auth.AuthHandler import AuthLogoutHandler
from Auth.AuthHandler import AuthLoginFBHandler
from Auth.AuthHandler import AuthLogoutFBHandler
from Auth.AuthHandler import AuthLoginTWHandler
from Auth.AuthHandler import AuthLogoutTWHandler
from Auth.AuthHandler import GoogleCalendarAuthHandler
from Users.UserInfo import UserHandler
from Users.UserInfo import FollowUserHandler
from Users.UserInfo import UserSettingHandler
from Users.UserInfo import FriendRequestHandler
from Users.UserInfo import FriendRemoveHandler
from Users.UserInfo import FriendConfirmHandler
from Users.UserInfo import GetFriendHandler
from Users.UserInfo import UnFollowUserHandler
from Users.UserInfo import TravelersHandler
from Users.UserInfo import AddUserToTripHandler
from Users.UserInfo import RemoveUserFromTripHandler
from Users.UserInfo import CheckUserinTripHandler
from Users.UserInfo import GetTripMemberHandler
from Guides.GuidesHandler import *
from Comment.CommentHandler import PostCommentHandler
from Comment.CommentHandler import DeleteCommentHandler
from Comment.CommentHandler import PostFeedHandler
from Exception.ExceptionHandler import ExceptionPage
from Search.SearchHandler import RealTimeSearchAllHandler
from Search.SearchHandler import SearchFriendHandler
from Search.SearchHandler import SearchUserHandler
from Search.SearchHandler import RealTimeSearchUserHandler
from Sites.SiteHandler import AddSiteToTrip
from Sites.SiteHandler import PostNoteToSite
from Sites.SiteHandler import RemoveSiteFromTrip
from Sites.SiteHandler import ShowSightsHandler
from Social.SocialHandler import FaceBookGetFriendsHandler
from Social.SocialHandler import FaceBookInviteHandler
from Social.SocialHandler import FaceBookPostHandler
from Social.SocialHandler import TwitterPostHandler
from Social.SocialHandler import TripShareInviteHandler
from Mail.MailHandler import EmailInviteHandler
#import tornado.database
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import bson
from tornado.options import define, options
define("port", default=8001, help="run on the given port", type=int)
define("mysql_host", default="127.0.0.1:3306", help="trip database host")
define("mysql_database", default="TripShare", help="trip database name")
define("mysql_user", default="jason", help="trip database user")
define("mysql_password", default="jason", help="trip database password")
define("google_client_id", help="your Google application API key", default="1072071824058-clak679f8h0lckdrm0ts21h73nah75ot.apps.googleusercontent.com")
define("google_client_secret", help="your Google application secret", default="AY4dvAK_iBx-QlcUQiFVv8Ti")
define("google_developer_key", help="your Google developer key", default="AIzaSyAyy9M1HZ1nDMdBwGMPDLamhFkCB8iQEJ0")
define("amazon_access_key", help="your Amazon application access key", default="AKIAJLDHNWC3WXD6PGVA")
define("amazon_secret", help="your Amazon application secret", default="0lGQzT3a8M6uJMcGajA6RpNf+/X9ImYZYSbysN2c")
define("facebook_api_key", help="your Facebook application API key", default="221334761224948")
define("facebook_secret", help="your Facebook application secret", default="b0e85f25c5bfddb7ebf40b7670cf5db3")
define("twitter_consumer_key", help="your Twitter Consumer key", default="WInWKQDuB2IhJLUEuhZxA")
define("twitter_consumer_secret", help="your Twitter application secret", default="INRfIiXtsKoqm0Lneh1dEt4GjUPvp6Uuakk90v0jY")
define("SANDBOX_API_USER_NAME", help="paypal sandbox user name", default="tom_1333660419_biz_api1.hotmail.com")
define("SANDBOX_API_PASSWORD", help="paypal sandbox password", default="1333660452")
define("SANDBOX_API_SIGNATURE", help="paypal sandbox signature", default="AsmQhjDj6zsu8.Jn3.xALQRY4m1jAwJ4yUA2kag1Thrd0xMU4aVRuAbt")
define("SANDBOX_APPLICATION_ID", help="paypal sandbox application id", default="APP-80W284485P519543T")
define("SANDBOX_ENDPOINT", help="paypal sandbox api endpoint", default="https://svcs.sandbox.paypal.com/AdaptivePayments/")
define("MAILCHIMP_API_KEY", help="API key for mailchimp", default="adc8df36f4e452eb4b620779bb527069-us4")
#from functools import wraps
#from tornado.web import HTTPError
#from tornado.websocket import WebSocketHandler
""" Renders the main template."""
class MainPage(BaseHandler):
def head(self):
image_info=[]
dest_places = []
""" Get RANDOM trips to show in the map"""
trips = self.syncdb.trips.find().limit(10)
if trips.count() > 0:
for trip in trips:
trip_user = self.syncdb.users.find_one({'user_id': bson.ObjectId(trip['owner_id'])})
if (trip_user):
image_info.append(trip['title']+';'+trip_user['picture'] +';'+'/trip/'+trip['slug'])
dest_places.append(unicode(simplejson.dumps(trip['groups'][0]['dest_place'], cls=MongoEncoder.MongoEncoder.MongoEncoder)))
""" Get latest trips to show in the list"""
latest_trip_ids = self.syncdb.trips.find().sort("published", pymongo.DESCENDING).limit(10)
top_shares = self.syncdb.users.find().sort("trip_count", pymongo.DESCENDING).limit(10)
top_guides = self.syncdb.guides.find().sort("rating", pymongo.DESCENDING).limit(5)
_trips = []
if latest_trip_ids.count() > 0:
for latest_trip_id in latest_trip_ids:
latest_trip_id['check_join'] = False
if len(latest_trip_id['groups'])>0:
members = latest_trip_id['groups'][0]['members']
if self.current_user:
for member in members:
if member['user_id'] == self.current_user['user_id']:
latest_trip_id['check_join'] = True
break
#latest_trip_id['html'] = self.render_string("Module/trip.html", trip = latest_trip_id)
_trips.append(latest_trip_id)
self.render("newbeforesignin.html", guides=top_guides, dest_places = dest_places, trips=trips, image_info=image_info, latest_trip_ids=_trips, top_shares = top_shares)
def get(self):
image_info=[]
dest_places = []
""" Get RANDOM trips to show in the map"""
trips = self.syncdb.trips.find().limit(10)
if trips.count() > 0:
for trip in trips:
trip_user = self.syncdb.users.find_one({'user_id': bson.ObjectId(trip['owner_id'])})
if (trip_user):
image_info.append(trip['title']+';'+trip_user['picture'] +';'+'/trip/'+trip['slug'])
dest_places.append(unicode(simplejson.dumps(trip['groups'][0]['dest_place'], cls=MongoEncoder.MongoEncoder.MongoEncoder)))
""" Get latest trips to show in the list"""
latest_trip_ids = self.syncdb.trips.find().sort("published", pymongo.DESCENDING).limit(10)
top_shares = self.syncdb.users.find().sort("trip_count", pymongo.DESCENDING).limit(10)
top_guides = self.syncdb.guides.find().sort("rating", pymongo.DESCENDING).limit(5)
_trips = []
if latest_trip_ids.count() > 0:
for latest_trip_id in latest_trip_ids:
latest_trip_id['check_join'] = False
if len(latest_trip_id['groups'])>0:
members = latest_trip_id['groups'][0]['members']
if self.current_user:
for member in members:
if member['user_id'] == self.current_user['user_id']:
latest_trip_id['check_join'] = True
break
#latest_trip_id['html'] = self.render_string("Module/trip.html", trip = latest_trip_id)
_trips.append(latest_trip_id)
self.render("newbeforesignin.html", guides=top_guides, dest_places = dest_places, trips=trips, image_info=image_info, latest_trip_ids=_trips, top_shares = top_shares)
class Terms(BaseHandler):
def get(self):
self.render("terms.html")
class Blog(BaseHandler):
def get(self):
self.render("blog.html")
class AboutUs(BaseHandler):
def get(self):
if self.current_user:
greeting = "Welcome " + str(self.get_current_username())
#user = None
else:
greeting = "Welcome "
# user = self.db.get("SELECT * FROM users WHERE user_id = %s", self.current_user.id)
self.render("aboutus.html", greeting = greeting)
class ResetPassword(BaseHandler):
def get(self):
if self.current_user:
greeting = "Welcome " + str(self.get_current_username())
#user = None
else:
greeting = "Welcome "
#user = self.db.get("SELECT * FROM users WHERE user_id = %s", self.current_user.id)
self.render("resetpassword.html", greeting = greeting)
class Privacy(BaseHandler):
def get(self):
self.render("privacy.html")
class Application(tornado.web.Application):
def __init__(self):
handlers = [
# main page
(r"/", MainPage),
# signup, login and logout
(r"/login", LoginHandler),
(r"/account/login", LoginHandler),
(r"/account/create", CreateAccountHandler),
# (r"/auth/login", AuthLoginHandler),
(r"/auth/logout", AuthLogoutHandler),
(r"/auth/fblogin", AuthLoginFBHandler),
(r"/auth/fblogout", AuthLogoutFBHandler),
(r"/auth/twlogin", AuthLoginTWHandler),
(r"/auth/twlogout", AuthLogoutTWHandler),
(r"/updateusersetting", UserSettingHandler),
#(r"/oauth2callback/([^/]+)", GoogleoAuthHandler)
(r"/calendar_oauth2callback", GoogleCalendarAuthHandler),
(r"/saveexpense", ExpenseSaveHandler),
(r"/getexpense", GetExpenseHandler),
(r"/processexpense", ExpenseProcessHandler),
(r"/checkpaymentaccount", ExpenseCheckAccountHandler),
(r"/callpaymentapi", ExpensePaymentAPIHandler),
(r"/trips", BrowseHandler), # where you create and browse trips
(r"/trip/([^/]+)", EntryHandler),
(r"/trips/([^/]+)/([^/]+)", TripPageHandler),
(r"/like_trip", LikeTripHandler),
(r"/processtriprequest", ProcessTripRequestHandler),
(r"/save_trip", SaveTripHandler), #save the trip to personal save
(r"/gettrips", GetTrips),
(r"/createtrip", ComposeHandler),
(r"/savetrip", SaveTrips), #save the trip when edit trip
(r"/newtrips", ShowNewTrips),
(r"/showmytrips", ShowMyTrips),
(r"/hottrips", ShowHotTrips),
(r"/endtrips", ShowEndTrips),
(r"/addgrouptotrip", AddTripGroupHandler),
(r"/removegroupfromtrip", RemoveTripGroupHandler),
(r"/mergetripgroups", MergeTripGroupHandler),
(r"/mytrips", MyTripsHandler),
(r"/exportcalendar", ExportCalendarHandler),
(r"/downloadpdf", DownloadPDFHanlder),
(r"/addsitetotrip", AddSiteToTrip),
(r"/removesitefromtrip", RemoveSiteFromTrip),
(r"/postsitenote", PostNoteToSite),
(r"/guides", BrowseGuidesHandler),
(r"/gettripgroupformerge", GetTripGroupForMergeHandler),
(r"/gettripgroupformap/([^/]+)/([^/]+)", GetTripGroupForMapHandler),
(r"/gettripgroupforsite/([^/]+)/([^/]+)", GetTripGroupForSiteHandler),
(r"/add_trip_tag", AddTripTagHandler),
(r"/guides/([^/]+)", CategoryGuidesHandler),
(r"/guide/([^/]+)", EntryGuidesHandler),
(r"/add_guide_tag", AddGuidesTagHandler),
(r"/saveguide", SaveGuidesHandler),
(r"/likeguide", LikeGuidesHandler),
(r"/createguide", CreateGuidesHandler),
(r"/exportguide", ExportGuidesHandler),
(r"/getguidesforimport", GetGuidesForImportHandler),
(r"/deleteguide", DeleteGuidesHandler),
(r"/importguidefile", ImportGuidesHandler),
(r"/importguidetotrip", ImportGuideToTripHandler),
(r"/sights/([^/]+)", ShowSightsHandler),
#(r"/a/changepicture", UserPictureHandler),
(r"/updateuserprofile", UpdateUserProfileHandler),
(r"/updatepaymentmethod", UpdatePaymentHandler),
(r"/settings", SettingsHandler),
(r"/blog", Blog),
(r"/postcomment", PostCommentHandler),
(r"/deletecomment", DeleteCommentHandler),
(r"/postfeed", PostFeedHandler),
(r"/searchpeople/([^/]+)", SearchUserHandler),
(r"/realtime_searchpeople/([^/]+)", RealTimeSearchUserHandler),
(r"/realtime_searchall/([^/]+)", RealTimeSearchAllHandler),
#(r"/checkuserintrip/([^/]+)/([^/]+)", CheckUserinTripHandler),
(r"/checkuserintrip", CheckUserinTripHandler),
(r"/sendexpenserequest", ExpenseRequestHandler),
(r"/getnotificationpaymentmethod", GetPaymentMethodHandler),
(r"/confirmfriend", FriendConfirmHandler),
(r"/requestfriend", FriendRequestHandler),
(r"/removefriend", FriendRemoveHandler),
(r"/searchfriend/([^/]+)", SearchFriendHandler),
(r"/getfriends", GetFriendHandler),
(r"/gettripmembers", GetTripMemberHandler),
(r"/travelers/([^/]*)", TravelersHandler),
(r"/people/([^/]+)", UserHandler),
#(r"/addusertotrip/([^/]+)/([^/]+)", AddUserToTripHandler),
(r"/addusertotrip", AddUserToTripHandler),
(r"/removeuserfromtrip", RemoveUserFromTripHandler),
(r"/followpeople/([^/]+)", FollowUserHandler),
# (r"/managemember/([^/]+)"), ManageMemberHandler),
(r"/unfollowpeople/([^/]+)", UnFollowUserHandler),
(r"/about/terms", Terms),
(r"/about_us", AboutUs),
(r"/about/privacy", Privacy),
(r"/resetpassword", ResetPassword),
(r"/subscribe_trip/([^/]+)", SubscribeTrip),
(r"/unsubscribe_trip/([^/]+)", UnsubscribeTrip),
(r"/postmessage", PostMessageHandler),
(r"/messages", NotificationHandler),
(r"/static/images/(.*)", tornado.web.StaticFileHandler, {"path": "/home/jason/workspace/TripShare/static/images"}),
(r"/post_on_facebook", FaceBookPostHandler),
(r"/getfriends_on_facebook", FaceBookGetFriendsHandler),
(r"/invite_on_facebook", FaceBookInviteHandler),
(r"/sendtripshareinvite", TripShareInviteHandler),
(r"/post_on_twitter", TwitterPostHandler),
(r"/send_email_invite", EmailInviteHandler),
(r"/exception", ExceptionPage),
]
settings = dict(
blog_title=u"Tornado Trip",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
ui_modules={"FriendEntry": FriendEntryModule, "CreateTrip": CreateTripModule},
static_path=os.path.join(os.path.dirname(__file__), "static"),
stylesheets_path=os.path.join(os.path.dirname(__file__), "stylesheets"),
xsrf_cookies=True,
amazon_access_key = options.amazon_access_key,
amazon_secret_key = options.amazon_secret,
facebook_api_key=options.facebook_api_key,
facebook_secret=options.facebook_secret,
google_client_id=options.google_client_id,
google_client_secret=options.google_client_secret,
google_developer_key=options.google_developer_key,
twitter_consumer_key = options.twitter_consumer_key,
twitter_consumer_secret = options.twitter_consumer_secret,
debug = True,
gzip = True,
cookie_secret="11oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
login_url = "/login",
PAYPAL_USERID = options.SANDBOX_API_USER_NAME,
PAYPAL_PASSWORD = options.SANDBOX_API_PASSWORD,
PAYPAL_SIGNATURE = options.SANDBOX_API_SIGNATURE,
PAYPAL_APPLICATION_ID = options.SANDBOX_APPLICATION_ID,
remote_address = '',
MAILCHIMP_API_KEY = options.MAILCHIMP_API_KEY
)
tornado.web.Application.__init__(self, handlers, **settings)
# Have one global connection to the blog DB across all handlers
#===================================================
# self.db = tornado.database.Connection(
# host=options.mysql_host, database=options.mysql_database,
# user=options.mysql_user, password=options.mysql_password)
#===================================================
#self.db = asyncmongo.Client(pool_id='mytestdb', host='127.0.0.1', port=27017, maxcached=10, maxconnections=50, dbname='TripShare')
#self.syncdb = pymongo.Connection("184.169.172.137", 27017).TripShare
self.syncdb = pymongo.Connection("localhost", 27017).TripShare
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from random import randint
from main.page.desktop_v3.product.pe_product import *
from utils.function.general import *
import os, time, sys, json, requests
import urllib.parse
import urllib.request
class ShopPage(BasePage):
payload = {
'action' : 'show_dialog_new_message',
'friend_id' : '295701',
'shop' : '1',
'v' : '',
'bypass_qc' : '1'
}
# instance variable product
#_product_loc = (By.XPATH, "//div[@class='span9']/div[1]")
_product_loc = (By.XPATH, "/html/body/div[1]/section/div[4]/div[2]/div[1]/div")
_list_product_loc = (By.XPATH, "//div[@itemtype='http://schema.org/ItemList']/div")
# instance variable tab
_tab_product_loc = (By.XPATH, "//*[@id='s_shopmenu']/ul/li[1]")
_tab_talk_loc = (By.XPATH, "//*[@id='s_shopmenu']/ul/li[2]")
_tab_review_loc = (By.XPATH, "//*[@id='s_shopmenu']/ul/li[3]")
_tab_info_loc = (By.XPATH, "//*[@id='s_shopmenu']/ul/li[4]")
# instance variable message
_send_msg_loc = (By.CSS_SELECTOR, "div#send-pm button.send-pm.button")
_subj_msg_loc = (By.ID, "message-subject")
_text_msg_loc = (By.ID, "message")
_submit_msg_loc = (By.XPATH, "//*[@id='new-message']/div[5]/button[2]")
_after_submit_loc = (By.XPATH, "//*[@id='rf']/div/button")
# instance variable fav shop
_btn_fav_shop_loc = (By.XPATH, "//*[@id='fave_this_shop']")
_total_shop_fav_loc = (By.XPATH, "//*[@id='gold-info-2']/ul/li[5]/a/div[1]/span/b")
# list domain shop
domain_shop = ['tokoqc14', 'tokoqc15', 'tokoqc16']
target_domain = ""
#Security
_captcha_loc = (By.CSS_SELECTOR, 'div#recaptcha_widget div.mb-10 div input.recaptcha_response_field')
_first_prod_loc = (By.XPATH, "/html/body/div[1]/section/div[4]/div[2]/div[1]/div[1]/a")
def domain(self, site, x=""):
self._open(site, x)
self.target_domain = x
def go_to_product_tab(self):
self.driver.find_element(*self._tab_product_loc).click()
def go_to_talk_product_tab(self):
self.driver.find_element(*self._tab_talk_loc).click()
def go_to_review_tab(self):
self.driver.find_element(*self._tab_review_loc).click()
def go_to_info_shop_tab(self):
self.driver.find_element(*self._tab_info_loc).click()
def do_fav_shop(self):
total_before = self.driver.find_element(*self._total_shop_fav_loc).text
total_before = int(total_before) + 1
try:
element = WebDriverWait(self.driver, 10).until(
EC.visibility_of_element_located((self._btn_fav_shop_loc))
)
time.sleep(10)
print(element.text)
bl = element.click()
time.sleep(1)
total_after = self.driver.find_element(*self._total_shop_fav_loc).text
if (total_before == int(total_after)):
print("Counter OK.")
print(total_before, total_after, bl)
except Exception as inst:
print(inst)
# klik button send message (sebelum tulis pesan)
def click_send_message_button(self):
try:
time.sleep(2)
self.driver.find_element(*self._send_msg_loc).click()
except Exception as inst:
print(inst)
#klik button send message (setelah tulis pesan)
def click_send_message(self, driver, subject, message):
print('masuk method click send message')
try:
self.driver.find_element(*self._send_msg_loc).click()
time.sleep(8)
print('Sent message button clicked')
time.sleep(2)
self.find_element(*self._subj_msg_loc).send_keys(subject)
print('Enter subject succeed')
self.find_element(*self._text_msg_loc).send_keys(message)
print('Write message succeed')
self.find_element(*self._submit_msg_loc).click()
print('Submit message')
time.sleep(5)
self.find_element(*self._after_submit_loc).click()
print('Confirmation done')
except NoSuchElementException:
print('No such element')
def choose_product(self):
self.check_visible_element(*self._product_loc)
condition_product = self.find_element(*self._product_loc)
browser_type = self.driver.capabilities['browserName']
if condition_product.text != "Tidak ada Produk" or condition_product.text != "No Product":
list_product = self.driver.find_elements(*self._list_product_loc)
i, length = 0, len(list_product)
rand = randint(i, length - 1)
print("Choose product", list_product[rand].text)
if (browser_type == "chrome"):
#list_product[rand].click()
self._click(list_product[rand])
time.sleep(4)
else:
product_name = list_product[rand].find_element(By.TAG_NAME, "b").text
seq = product_name.split(" ")
c = "-".join(seq)
self.driver.get(self.url + self.target_domain + "/" + c)
else:
print("No product in", self.driver.title)
def choose_product_manual (self, product_name):
print("Choose product", product_name)
self.find_element(*self._first_prod_loc).click()
#====================FOR UNIT TESTING PURPOSE, DO NOT DELETE====================#
#bypass captcha
def bypass_send_message(self, people_ID):
self.payload['friend_id'] = people_ID
print(self.payload['friend_id'])
requests.get("https://www.tokopedia.com/ajax/people-4.pl", data=self.payload)
#print (pos.text)
self.payload['friend_id'] = request.get.get('v')
data = urllib.parse.urlencode(self.payload)
data = data.encode('utf-8') # data should be bytes
req = urllib.request.Request("https://www.tokopedia.com/ajax/people-4.pl", self.payload)
response = urllib.request.urlopen("https://www.tokopedia.com/ajax/people-4.pl")
the_page = response.read()
def check_is_captcha_available(self):
self.check_visible_element(self._captcha_loc[0], self._captcha_loc[1])
print('captcha appeared')
#==================FOR UNIT TESTING PURPOSE, DO NOT DELETE=======================#
def __str__(self):
return "Page Toko " + self.driver.title
|
from turtle import Turtle
class Scoreboard(Turtle):
def __init__(self):
super().__init__()
self.count = 0
self.hideturtle()
self.color("white")
self.up()
self.goto(-50,270)
self.down()
self.write(f"Scoreboad : {self.count}",align="left", font=("Arial", 20, "normal"))
def gameover(self):
self.up()
self.goto(0,0)
self.down()
self.write(f"Game over ",align="left", font=("Arial", 20, "normal"))
def increase(self):
self.count +=1
self.clear()
self.write(f"Scoreboad : {self.count}",align="left", font=("Arial", 20, "normal"))
|
#
# Dota info server in Python
# Binds REP socket to tcp://*:5555
# Expects b"get_new_msg" from client, replies with the message for the client
# to push to telegram, OR b"NONE" if there is no message.
#
# Expected implementation:
# * reads from SQS of all matches and filters them according to a static list
# * generates the messages that need to be sent, and whenever the client
# polls, concatenates them if there are more than one, and sends it back to
# the client.
import http.server
import json
import logging
import re
import threading
import dotainput.local_config
import dotainput.streamer
import dotainput.util
logging.basicConfig(
filename='processor.log',
level=logging.INFO,
format='%(asctime)s %(message)s'
)
# Variables that need to be accessed across threads:
# Accounts of people we care about
default_account_ids_64bit = {
76561197997336439, # dzbug
76561198111698495, # instapicking PL
76561198159705679, # dz's unranked smurf
76561198189446861, # fox (Paul's smurf)
76561198143189634, # Allen's smurf
76561197961774985, # Franklin
76561197979611387, # Sidd
76561197970342819, # Aaron
76561197993621342, # Gilbert (Sloth)
76561198013393830, # RD
76561197999544403, # Hellfire
76561198034473797, # lutz
76561198168192504, # Gilbert's smurf (vvx)
76561197972444552, # Angra
76561198089947113, # Allen's smurf (shadow friend)
76561197971215286, # shadowing
}
class Processor:
"""
Acts as a server for requests from the lua telegram plugin.
Call the process_match method to process a match and potentially add it to
the messages that will be sent via telegram.
"""
def __init__(self):
# Map of 32-bit to 64-bit account IDs
self.account_lookup = \
dict((4294967295 & a, a) for a in default_account_ids_64bit)
# YOU NEED TO ACQUIRE THE LOCK msg_lock TO READ/MODIFY next_msg.
self._msg_lock = threading.Lock()
self._next_msg = None
# Lock for steam_conn
self._conn_lock = threading.Lock()
self._steam_conn = \
dotainput.util.create_steamapi_connection()
self.server_address = ('', 8000)
class BotHandler(http.server.BaseHTTPRequestHandler):
"""
HTTP Handler for requests from the telegram plugin.
"""
def do_GET(b_self):
addplayers_re = re.compile("/telegram-addplayer\?id_64=(\d+)")
removeplayers_re = re.compile(
"/telegram-removeplayer\?id_64=(\d+)"
)
try:
if b_self.path == "/telegram-poll":
# Send reply back to client
next_msg = self.get_next_message()
if next_msg is not None:
b_self._respond(next_msg)
else:
b_self._respond("NONE")
elif b_self.path == "/telegram-latest":
next_msg = self.peek_next_message()
b_self._respond("Queued message: %s" % next_msg)
elif addplayers_re.match(b_self.path):
v = int(addplayers_re.match(b_self.path).group(1))
logging.info("adding player %s" % v)
k = 4294967295 & v
name = self.lookup_name(v)
self.account_lookup[k] = v
b_self._respond("Added player: %s" % name)
elif removeplayers_re.match(b_self.path):
id_64 = \
int(removeplayers_re.match(b_self.path).group(1))
k = 4294967295 & id_64
self.account_lookup.pop(k, None)
b_self._respond("Removed player: %s" %
self.lookup_name(id_64))
elif b_self.path == "/telegram-listplayers":
print("Listing players.")
player_names = [
self.lookup_name(p)
for p in self.account_lookup.values()]
b_self._respond("Tracked players:\n%s" %
"\n".join(player_names))
else:
b_self._respond("Unknown path: %s" % b_self.path)
except Exception as e:
b_self._respond("Internal error processing: %s" % str(e))
def _respond(b_self, text):
logging.debug("Sending response: %s" % text)
b_self.send_response(200)
b_self.send_header('Content-type', 'text/html')
b_self.end_headers()
b_self.wfile.write(bytes(text, encoding="utf-8"))
self._httpd = http.server.HTTPServer(
self.server_address,
BotHandler)
def start(self):
"""
Starts the HTTP server in a different thread. Cannot be stopped ...
yet.
"""
threading.Thread(target=self._httpd.serve_forever).start()
def process_match(self, match):
"""
Process a single match.
:param match: JSON representation of a match (from steam API).
"""
players = [
player["account_id"]
for player in match["players"]
if "account_id" in player # Bots have no account_id
]
interesting_players = [
p for p in players if p in list(self.account_lookup.keys())
]
if len(interesting_players) > 0:
player_names = [
self.lookup_name(self.account_lookup[aid_32])
for aid_32 in interesting_players
]
message = "{players} just finished match {dotabuff_link}"\
.format(
players=",".join(str(p) for p in player_names),
dotabuff_link="http://www.dotabuff.com/matches/"
"{match}".format(
match=match["match_id"]
)
)
logging.info("Found interesting game: %s" % message)
self._msg_lock.acquire()
if self._next_msg is None:
self._next_msg = message
else:
self._next_msg = self._next_msg + "\n\n" + message
self._msg_lock.release()
def lookup_name(self, aid_64):
"""
Look up the display name of a player given their 64 bit ID.
:param aid_64: 64 bit ID of player to look up.
:return: Player name, or "Player <aid_64>" if an error was encountered.
"""
self._conn_lock.acquire()
self._steam_conn.request(
"GET",
"/ISteamUser/GetPlayerSummaries/v0002"
"?key={key}&steamids={aid_64}".format(
key=dotainput.local_config.DOTA2_API_KEY,
aid_64=aid_64
)
)
try:
response = self._steam_conn.getresponse().read()
playerinfo = json.loads(response.decode("utf-8"))
players = playerinfo["response"]["players"]
assert len(players) == 1, "only requested one steam ID"
self._conn_lock.release()
return players[0]["personaname"]
except Exception as err:
logging.error(
"Got an error when looking up name for %s. Error: %s" %
(aid_64, str(err))
)
self._conn_lock.release()
self._steam_conn = \
dotainput.util.create_steamapi_connection()
return "Player number: %s" % aid_64
def get_next_message(self):
"""
:return: The next message to be the response to telegram-poll, or None
if no message is to be sent. Resets the next message to None afterward.
"""
self._msg_lock.acquire()
response = self._next_msg
self._next_msg = None
self._msg_lock.release()
return response
def peek_next_message(self):
"""
:return: the next message to be sent, without resetting it to None.
"""
self._msg_lock.acquire()
response = self._next_msg
self._msg_lock.release()
return response
|
#!/usr/bin/env python3
import sys
from util.aoc import file_to_day
from util.input import load_data
def main(test=False):
dotdata = []
instructions = []
data = load_data(file_to_day(__file__), test)
for idx, line in enumerate(data):
if line == "":
dotdata = data[:idx]
instructions = data[idx + 1 :]
break
dots = set(tuple(map(int, n.split(","))) for n in dotdata)
for idx, inst in enumerate(instructions):
dots = fold_grid(dots, inst.split(" ")[-1])
if idx == 0:
p1 = len(dots)
print("2021:13:1 =", p1)
print("2021:13:2 =")
print_grid(dots)
def fold_grid(dots, pos):
axis, coord = pos.split("=")
coord = int(coord)
new = set()
for dot in dots:
index = "xy".index(axis)
if dot[index] < coord:
new.add(dot)
else:
offset = coord * 2
if index == 0:
new.add((offset - dot[0], dot[1]))
else:
new.add((dot[0], offset - dot[1]))
return new
def print_grid(dots):
maxx = max([d[0] for d in dots])
maxy = max([d[1] for d in dots])
for y in range(maxy + 1):
for x in range(maxx + 1):
if (x, y) in dots:
print("#", end="")
else:
print(" ", end="")
print()
if __name__ == "__main__":
test = len(sys.argv) > 1 and sys.argv[1] == "test"
main(test)
|
import csv
import sys
from defs import hier
def summarize(fn, nfn):
f = csv.reader(file(fn, "rb"))
fo = open(nfn, "wb")
header = f.next()
cams = Node("Root")
for l in f:
for x in zip(header, l):
if x in hier:
#print hier[x][0]
cams.inc_child(*hier[x][0])
cams.p()
class Node:
def __init__(self, name):
self.name = name
self.children = {}
self.val = 1
def inc_child(self, *x):
if not x: return # empty recursion...
if x[0] in self.children:
self.children[x[0]].val += 1
else:
self.add_child(x[0]).inc_child(*x[1:])
def add_child(self, name):
if name in self.children:
return self.children[name]
x = Node(name)
self.children[name] = x
return x
def count(self):
# Yes I know this is recursive.
return sum([v.count() for k,v in self.children.items()]) + self.val
def p(self, level=0):
print "%s %s %d" % (" "*level, self.name, self.count())
print "%s %r" % (" "*level, self.children)
for k,v in self.children.items():
v.p(level+2)
if __name__ == "__main__":
summarize(*sys.argv[1:])
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-05 13:20
from __future__ import unicode_literals
import django.db.models.deletion
import django.db.models.manager
import django_extensions.db.fields
import elections.managers
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("elections", "0046_update_status")]
operations = [
migrations.CreateModel(
name="ModerationHistory",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, verbose_name="created"
),
),
(
"modified",
django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, verbose_name="modified"
),
),
],
options={
"ordering": ("-modified", "-created"),
"get_latest_by": "modified",
"abstract": False,
},
),
migrations.CreateModel(
name="ModerationStatus",
fields=[
(
"short_label",
models.CharField(
choices=[
(
elections.models.ModerationStatuses(
"Suggested"
),
"Suggested",
),
(
elections.models.ModerationStatuses("Rejected"),
"Rejected",
),
(
elections.models.ModerationStatuses("Approved"),
"Approved",
),
(
elections.models.ModerationStatuses("Deleted"),
"Deleted",
),
],
max_length=32,
primary_key=True,
serialize=False,
),
),
("long_label", models.CharField(max_length=100)),
],
),
migrations.AlterModelManagers(
name="election",
managers=[
("public_objects", django.db.models.manager.Manager()),
(
"private_objects",
elections.managers.PrivateElectionsManager(),
),
],
),
migrations.AddField(
model_name="moderationhistory",
name="election",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="elections.Election",
),
),
migrations.AddField(
model_name="moderationhistory",
name="status",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="elections.ModerationStatus",
),
),
migrations.AddField(
model_name="election",
name="moderation_statuses",
field=models.ManyToManyField(
through="elections.ModerationHistory",
to="elections.ModerationStatus",
),
),
]
|
# Author Yinsen Miao
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context("poster")
dat = pd.read_pickle("../data/cleandata2.pkl")
dat_municode = dat[['PARID', 'MUNICODE']].drop_duplicates()['MUNICODE'].value_counts()
municodes = [str(code) for code in dat_municode.index]
dat_municode.index = municodes
# get housing HVI index
hvi_dat = pd.read_csv("../data/muni_index.csv",
parse_dates=["SALEDATE"],
index_col=["SALEDATE"])
# compute the past 10 year
hvi_yret = hvi_dat["2005-12-31": "2015-12-31"].resample("Y").last().pct_change().dropna()
n, _ = hvi_yret.shape
avg_rets = round((((hvi_yret + 1).prod()) ** (1.0 / n) - 1.0) * 100., 2)
avg_stds = round((hvi_yret).std() * 100, 2)
dat_summary = pd.concat([dat_municode[municodes], avg_rets[municodes], avg_stds[municodes]], axis=1).reset_index()
dat_summary.columns = ["MUNICODE", "Count", "Return", "Risk"]
dat_summary = dat_summary.sort_values("Return", ascending=False)
# write municode, return risks
topn = 20
top_municodes = dat_summary.head(topn)["MUNICODE"].to_list()
dat_summary.to_csv("../clean_data/counts_rets_rsks.csv")
# hvi_dat.to_csv("../data/top_hvi.csv", index_label="ZIP")
nrow = 5
ncol = int(topn / nrow)
fig, axs = plt.subplots(ncol, nrow, figsize=(25, 12))
for idx, municode in enumerate(top_municodes):
i, j = idx // nrow, idx % nrow
axs[i, j].plot(hvi_dat.index, hvi_dat[municode])
if i != ncol - 1:
axs[i, j].get_xaxis().set_ticks([])
else:
axs[i, j].tick_params(axis='x', labelrotation=45)
if j != 0:
axs[i, j].get_yaxis().set_ticks([])
axs[i, j].set_title("%s" % municode)
axs[i, j].set_ylim(80, 450)
axs[i, j].axvline(x=pd.to_datetime("2015-12-31"), color="red", alpha=0.5, linestyle="--", linewidth=3)
# axs[i, j].axvline(x=pd.to_datetime("2020-01-31"), color="blue", alpha=0.3, linestyle="--", linewidth=3)
plt.savefig("../images/municode.png")
plt.close()
# compute statistics
hvi_dat = pd.read_csv("../data/muni_index.csv",
parse_dates=["SALEDATE"],
index_col=["SALEDATE"])
port_dat = hvi_dat[top_municodes]
port_dat.to_csv("../clean_data/top20assets.csv") # save selected municode for portfolio optimization
|
from setuptools import setup
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="pycolate",
version="0.0.42",
author="Jack Harrington",
author_email="jackjharrington@icloud.com",
description="Generates site percolation data and illustrations.",
packages=["pycolate"],
install_requires=["numpy", "scipy", "pillow", "sympy"],
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Scientific/Engineering :: Physics",
],
)
|
""" File: cover_list_code.py
Author: Abraham Aruguete
Purpose: This is a script designed to test each of the lines in list_insert.py"""
from list_insert import *
def main():
node1 = ListNode( (-8, -7) )
node2 = ListNode ( (2, 5) )
node3 = ListNode ( (-9, -7))
sorted_list_insert(None, node2)
sorted_list_insert(node1, node2)
sorted_list_insert(node3, node1)
sorted_list_insert(node1, node3)
print_list(None)
print_list(node1)
main()
|
from grid import *
from particle import Particle
from utils import *
from setting import *
from math import sin, cos, radians
import random
from scipy import stats
import numpy as np
MARKER_ANGLE_SIGMA = 10
MARKER_DIST_SIGMA = 0.5
def motion_update(particles, odom):
""" Particle filter motion update
Arguments:
particles -- input list of particle represents belief p(x_{t-1} | u_{t-1})
before motion update
odom -- odometry to move (dx, dy, dh) in *robot local frame*
Returns: the list of particles represents belief \tilde{p}(x_{t} | u_{t})
after motion update
"""
motion_particles = []
for particle in particles:
# add noise to each particle's odom then move it accordingly
new_particle = Particle(particle.x, particle.y, particle.h)
new_odom = add_odometry_noise(odom, heading_sigma=ODOM_HEAD_SIGMA, trans_sigma=ODOM_TRANS_SIGMA)
dx, dy = rotate_point(new_odom[0], new_odom[1], particle.h)
new_particle.x += dx
new_particle.y += dy
new_particle.h += new_odom[2]
motion_particles.append(new_particle)
return motion_particles
# ------------------------------------------------------------------------
def measurement_update(particles, measured_marker_list, grid):
""" Particle filter measurement update
Arguments:
particles -- input list of particle represents belief \tilde{p}(x_{t} | u_{t})
before meansurement update (but after motion update)
measured_marker_list -- robot detected marker list, each marker has format:
measured_marker_list[i] = (rx, ry, rh)
rx -- marker's relative X coordinate in robot's frame
ry -- marker's relative Y coordinate in robot's frame
rh -- marker's relative heading in robot's frame, in degree
* Note that the robot can only see markers which is in its camera field of view,
which is defined by ROBOT_CAMERA_FOV_DEG in setting.py
* Note that the robot can see mutliple markers at once, and may not see any one
grid -- grid world map, which contains the marker information,
see grid.py and CozGrid for definition
Can be used to evaluate particles
Returns: the list of particles represents belief p(x_{t} | u_{t})
after measurement update
"""
measured_particles = []
particles_list = []
particles_weight = []
marker_list= []
match_list = []
weight_sum = 0.0
num_robot_markers = len(measured_marker_list)
resampling_count = 0
for particle in particles:
# throw away particles that went out of bound
if not grid.is_in(particle.x, particle.y):
continue
# the list of marker seen by a particle
particle_marker_list = particle.read_markers(grid)
num_particle_markers = len(particle_marker_list)
weight = 0.0
#marker noise add_marker_measurement_noise(m, trans_sigma=MARKER_TRANS_SIGMA, rot_sigma=MARKER_ROT_SIGMA)
if num_robot_markers == 0 and num_particle_markers == 0:
# no markers seen
# minimum weight. it is not informative
weight = stats.norm.pdf(2*MARKER_ANGLE_SIGMA, loc=0.0, scale=MARKER_ANGLE_SIGMA)
elif num_robot_markers > 0 and num_particle_markers > 0:
weight = 0.0
# both robot and particle see some number of markers
for robot_marker in measured_marker_list:
for particle_maker in particle_marker_list:
#particle_maker = add_marker_measurement_noise(particle_maker, trans_sigma=MARKER_TRANS_SIGMA, rot_sigma=MARKER_ROT_SIGMA)
# difference between robot's angle to marker and particle's angle to marker
angle_diff = abs(robot_marker[2] - particle_maker[2])
# distance between robot and marker
robot_dist = math.sqrt(robot_marker[0]**2 + robot_marker[1]**2)
# distance between particle and marker
particle_dist = math.sqrt(particle_maker[0]**2 + particle_maker[1]**2)
# difference between robot's distance to marker and particle's angle to distance
distance_diff = abs(robot_dist - particle_dist)
angle_weight = stats.norm.pdf(angle_diff, loc=0.0, scale=MARKER_ANGLE_SIGMA)
distance_weight = stats.norm.pdf(distance_diff, loc=0.0, scale=MARKER_DIST_SIGMA)
weight += (angle_weight * distance_weight)
weight = weight / (num_robot_markers * num_particle_markers)
else:
# only robot or particle see any marker
# unlikely match
weight = stats.norm.pdf(2*MARKER_ANGLE_SIGMA, loc=0.0, scale=MARKER_ANGLE_SIGMA)
particles_list.append(particle)
# Multiply by a billion because the weight are very small numbers most of the time
# and are rounded to zero too readily
particles_weight.append(weight*1000000000)
marker_list.append(particle_marker_list)
match_list.append((num_particle_markers, num_robot_markers))
weight_sum += weight*1000000000
for i in range(len(particles_list)):
normalized_weight = particles_weight[i] / weight_sum
resampling_count = np.around(PARTICLE_COUNT * normalized_weight)
# resampling
for _ in range(math.floor(resampling_count)):
measured_particles.append(Particle(particles_list[i].x, particles_list[i].y, particles_list[i].h))
measured_particles.extend(Particle.create_random(100, grid))
return measured_particles
|
from django.db import models
# Create your models here.
class Inventory(models.Model):
"""docstring for Inventory"""
name = models.CharField(max_length=100,default='Something')
image = models.ImageField(null=True, default='No-image-available.png')
description = models.CharField(max_length=100, default='Describe Something')
price = models.IntegerField( default=0)
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
import datetime
from src.analyse import Analyse
from src.data import Vir
parser = argparse.ArgumentParser()
parser.add_argument("--date",
help="date in format YYYY-MM-DD (not needed if it's today)")
parser.add_argument("--infections",
help="number of infections with 2019-nCoV today")
parser.add_argument("--deaths",
help="number of deaths from 2019-nCoV today")
parser.add_argument("--plot",
help="ploting values (results in analysis)",
action="store_true")
args = parser.parse_args()
if __name__ == '__main__':
data = Vir()
analyse = Analyse(data=data)
analyse.calc_factors()
analyse.derive()
if args.infections is not None and args.deaths is not None:
if args.date is None:
date = datetime.date.today()
else:
try:
date = datetime.date.fromisoformat(args.date)
except ValueError as e:
print("A wild error appeared while parsing the date.\n\n")
print(e)
try:
infections = int(args.infections)
deaths = int(args.deaths)
data.add_day(date, infections, deaths)
except ValueError as e:
print("A wild error appeared while parsing the infections or deaths value.\n\n")
print(e)
if args.plot is True:
analyse.plot_defined()
del analyse, data
|
import sys
import os
import fastjet as fj
import pyhepmc_ng
import tqdm
def main():
input_file="$HOME/data/jetscape/test_out.hepmc"
if len(sys.argv) > 1:
input_file = sys.argv[1]
input_file = os.path.expandvars(input_file)
print('[i] reading from:', input_file)
# input = pyhepmc_ng.ReaderAsciiHepMC2(input_file)
input = pyhepmc_ng.ReaderAscii(input_file)
if input.failed():
print ("[error] unable to read from {}".format(input_file))
return
nevents = 1000
# print the banner first
fj.ClusterSequence.print_banner()
print()
jet_R0 = 0.4
jet_def = fj.JetDefinition(fj.antikt_algorithm, jet_R0)
jet_selector = fj.SelectorPtMin(0.0) & fj.SelectorPtMax(200.0) & fj.SelectorAbsEtaMax(3)
all_jets = []
event = pyhepmc_ng.GenEvent()
pbar = tqdm.tqdm(range(nevents))
while not input.failed():
e = input.read_event(event)
if input.failed():
break
fjparts = []
for i,p in enumerate(event.particles):
if p.status == 1 and not p.end_vertex:
psj = fj.PseudoJet(p.momentum.px, p.momentum.py, p.momentum.pz, p.momentum.e)
psj.set_user_index(i)
fjparts.append(psj)
jets = jet_selector(jet_def(fjparts))
all_jets.append([ [j.pt(), j.eta()] for j in jets])
pbar.update()
for j in jets:
hjetpt.Fill(j.perp())
if pbar.n >= nevents:
break
pbar.close()
if __name__ == '__main__':
main()
|
import pandas as pd
import numpy as np
import os
from scipy import stats
from scipy.stats import norm
from sqlalchemy import create_engine
import logging
import pickle
logger = logging.getLogger(__name__)
def prediction(model, city, bedrooms, bathrooms, floors, waterfront, condition, sqft_basement, yr_built, yr_renovated,lot_log):
"""Takes inputs and uses city selection to index models pickle and predict house price.
Arguments:
city {int} -- selected city for model index
bedrooms {int} -- bedroom count
bathrooms{int} -- bathroom count
floors {int} -- floor count
waterfront {int} -- binary variable for waterfront
condition {int} -- condition of home
sqft_basement {int} -- binary variable for basement
yr_built {int} -- year built
yr_renovated {int} -- binary variable for renovations
lot_log {int} -- sqft of lot
Returns:
price {str}-- Formatted dollar amount of predicted house price.
"""
model = model[int(city)]
lot_log = np.log(int(lot_log))
# Create of row of data that comabines all user inputs
title={"bedrooms":[bedrooms], "bathrooms":[bathrooms], "floors":[floors], "waterfront": [waterfront], "condition":[condition], "sqft_basement":[sqft_basement],"yr_built":[yr_built], "yr_renovated":[yr_renovated], "lot_log":[lot_log]}
test = pd.DataFrame(title)
# Make prediction from the loaded random forest model
prediction = model.predict(test)
result = int(np.exp(prediction))
price = '${:0,.0f}'.format(result)
return price
|
import logging
from microstrategy_api.task_proc.memoize_class import MemoizeClass
from microstrategy_api.task_proc.object_type import ObjectType, ObjectTypeIDDict, ObjectSubType, ObjectSubTypeIDDict
class MetadataObjectNonMemo(object):
"""
Object encapsulating a generic metadata object on MicroStrategy
Args:
guid (str): guid for this object
name (str): the name of this object
Attributes:
guid (str): guid for this object
name (str): the name of this object
"""
def __init__(self, guid, name, metadata_object_type=None):
self.log = logging.getLogger("{mod}.{cls}".format(mod=self.__class__.__module__, cls=self.__class__.__name__))
self.log.setLevel(logging.DEBUG)
self.guid = guid
self.name = name
self._type = None
self._sub_type = None
if metadata_object_type:
self._type = metadata_object_type
else:
self._type = self.__class__.__name__
def __repr__(self):
return "<{self._type} name='{self.name}' guid='{self.guid}'".format(self=self)
def __str__(self):
if self.name:
return "[{self._type}: {self.name}]".format(self=self)
else:
return self.__repr__()
@property
def type(self):
return self._type
@type.setter
def type(self, value):
if value is None:
self._type = value
elif isinstance(value, ObjectType):
self._type = value
elif isinstance(value, int):
self._type = ObjectTypeIDDict[value]
elif isinstance(value, str):
self._type = ObjectType[value]
else:
raise ValueError("{v} is not ObjectType, int, or str".format(v=value))
@property
def sub_type(self):
return self._sub_type
@sub_type.setter
def sub_type(self, value):
if value is None:
self._sub_type = value
elif isinstance(value, ObjectSubType):
self._sub_type = value
elif isinstance(value, int):
self._sub_type = ObjectSubTypeIDDict[value]
elif isinstance(value, str):
self._sub_type = ObjectSubType[value]
else:
raise ValueError("{v} is not ObjectSubType, int, or str".format(v=value))
class MetadataObject(MetadataObjectNonMemo, metaclass=MemoizeClass):
def __init__(self, guid, name, metadata_object_type=None):
super().__init__(guid, name, metadata_object_type)
|
import glob
import pandas as pd
from models.song import Song
from models.artist import Artist
from models.songPlay import SongPlay
from models.time import Time
from models.user import User
import numpy as np
from bd import Base,session,engine
from sqlalchemy import exc
#Función que obtiene el nombre de todos los archivos
def get_nameFiles(path):
files = [f for f in glob.glob(path + "**/*.json", recursive=True)]
return files
#Función que convierte el contenido de todos los archivos en un dataFrame
def files_To_DataFrame(Filejson):
df_full= pd.DataFrame()
for file in Filejson:
#Read file
df = pd.read_json(file,lines=True,encoding='utf-8')
#Data Frame
df_full = df_full.append(df)
return df_full
def main():
## Se procesan los archivos 'data/log_data/'
#Se obtienen los nombre de todos los archivos los archivos
files = get_nameFiles('data/song_data/')
#Se crea un dataframe con la información de todos los archivos
df_song_data = files_To_DataFrame(files)
#Se obtienen los datos para la tabla SONG
df_song = df_song_data[['song_id','artist_id', 'title','year', 'duration']].dropna(subset=['song_id']).drop_duplicates(['song_id'])
#Se obtienen los datos para la tabla ARTIST
df_artist = df_song_data[['artist_id', 'artist_name', 'artist_location', 'artist_latitude', 'artist_longitude']].dropna(subset=['artist_id']).drop_duplicates(['artist_id'])
df_artist = df_artist.rename(columns={'artist_name': 'name', 'artist_location': 'location', 'artist_latitude': 'latitude', 'artist_longitude': 'longitude'})
#Se procesan los archivos 'data/log_data/'
#Se obtienen los nombre de todos los archivos los archivos
files = get_nameFiles('data/log_data/')
#Se crea un dataframe con la información de todos los archivos
df_long_data = files_To_DataFrame(files)
#Se obtienen los datos para la tabla TIME_FORMAT
ts = pd.to_datetime(df_long_data['ts'], unit='ms')
timeFormat = (df_long_data['ts'].values, ts.dt.hour.values, ts.dt.day.values, ts.dt.week.values, ts.dt.month.values, ts.dt.year.values, ts.dt.weekday.values)
column_labels = ['start_time', 'hour', 'day', 'week', 'month', 'year', 'weekday']
df_time_format= pd.DataFrame(data=list(zip(*timeFormat)), columns=column_labels)
df_time_format = df_time_format.drop_duplicates(['start_time'])
#Se obtiene los datos para la tabla USER
convert_dict = {'user_id': int }
df_user = df_long_data[['userId', 'firstName', 'lastName', 'gender', 'level']].rename(columns={'userId': 'user_id'}).replace('', np.nan).dropna(subset=['user_id'])
df_user = df_user.astype(convert_dict)
df_user = df_user.drop_duplicates(['user_id'])
#Se obtiene los datos para la table SONG_PLAY
df_song_data_aux = df_song_data[['artist_id','song_id', 'title','artist_name']].rename(columns={'title': 'song', 'artist_name': 'artist'})
df_long_data_aux = df_long_data[['userId', 'sessionId','userAgent','level','location','ts','artist','song']]
df_song_play_join = pd.merge(df_long_data_aux, df_song_data_aux, on=['song','artist'], how='left')
df_song_play_join = df_song_play_join.rename(columns={'ts': 'start_time', 'sessionId': 'session_id','userAgent': 'user_agent','userId': 'user_id'}).replace('',0)
#Se crea el indice songplay_id
df_song_play_join['songplay_id'] = df_song_play_join.index
#Se crean los objetos para ser guardados en BD
listObjectSong = [Song(**kwargs) for kwargs in df_song.to_dict(orient='records')]
listObjectArtist = [Artist(**kwargs) for kwargs in df_artist.to_dict(orient='records')]
listObjectTime = [Time(**kwargs) for kwargs in df_time_format.to_dict(orient='records')]
listObjectUser = [User(**kwargs) for kwargs in df_user.to_dict(orient='records')]
listObjectSongPlay = [SongPlay(**kwargs) for kwargs in df_song_play_join.to_dict(orient='records')]
#Se eliminan las tablas, si existen y se crean
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
#Se guardan los objetos
try:
session.bulk_save_objects(listObjectSong)
session.bulk_save_objects(listObjectArtist)
session.bulk_save_objects(listObjectTime)
session.bulk_save_objects(listObjectUser)
session.bulk_save_objects(listObjectSongPlay)
session.commit()
session.close()
except exc.SQLAlchemyError as ex:
session.close()
print('Exeption:', ex)
df_song_play_join = df_song_play_join.drop(['song', 'artist','length'], axis=1)
if __name__ == "__main__":
main()
|
with open("TChiWZAll.dat") as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
for a in content:
fileName=a.split("/")[8]
print fileName
|
from arduino_graphing import DataHandler
class Logger():
def __init__(self, beer_logs, fridge_logs, time_logs):
self.beer_logs=beer_logs
self.fridge_logs=fridge_logs
self.time_logs=time_logs
beer_logs = []
fridge_logs = []
time_logs = [0]
beer_logs.append(DataHandler.bt)
fridge_logs.append(DataHandler.ft)
time_logs.append(len(beer_logs))
def input_data():
if len(self.beer_logs)==0:
csv=open()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.