text
stringlengths 8
6.05M
|
|---|
# LEVEL 1
# http://www.pythonchallenge.com/pc/def/map.html
scrambled = "g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj."
new = ""
first = ord('a')
last = ord('z')
print(scrambled)
for c in scrambled:
if c not in ".' ()":
num = ord(c) + 2
if num > last:
num = first + num - last - 1
new += chr(num)
else:
new += c
print(new)
str1 = "".join([chr(x) for x in range(first, last + 1)])
str2 = "".join([chr(first + x + 2 - last - 1) if x + 2 > last else chr(x + 2) for x in range(first, last + 1)])
print(str1)
print(str2)
table = str.maketrans(str1, str2)
print(scrambled.translate(table))
print("http://www.pythonchallenge.com/pc/def/map.html".translate(table))
|
import redis as rd
import json
class User:
def __init__(self, id):
self.id = id
self.cache = rd.StrictRedis()
def save_thumbs_change(self, track_id, change):
try:
data = self.get_data()
except AttributeError:
# must be no data yet
data = {'thumbs_tracks': {}}
tracks = data['thumbs_tracks']
if track_id not in tracks:
tracks[track_id] = {
'was_upvoted': False,
'was_downvoted': False
}
if change == 'up':
tracks[track_id]['was_upvoted'] = not tracks[track_id]['was_upvoted']
else:
tracks[track_id]['was_downvoted'] = not tracks[track_id]['was_downvoted']
self.save_data(data)
def get_data(self):
data = self.cache.get(self.id)
data = json.loads(data.decode('utf-8'))
return data
def save_data(self, data):
data = json.dumps(data)
self.cache.set(self.id, data)
|
##ok so i'm gonna make 5 functions to draw things;
##circle, rectangle, python logo, and star, and then i make one up
##[probably my name].
##then all i need is a loop
## to make the program ask if they have a picture they wanna draw,
##a loop inside that asking what picture,
##a try/except block to try to open the file
## and run a warning if it doesnt open right.
##then the program should open the file,
##draw the picture,
##clear the screen,
##and ask if you want to draw something else
import turtle
not_integers = ('not_int')
def try_open(file):
try:
opening_file = open(file, 'r')
return True
except IOError:
print("I'm sorry, that file cannot be opened, please try again.")
except:
print("Please try again")
def set_color():
Line = line.split()
end = len(Line)
color = str(Line[1])
turtle.color (color)
def draw_rectangle():
Fline = line.split()
if Fline[1] == 'not_int':
print(Fline)
print("I'm sorry, I cannot understand that integer")
return
if len(Fline) < 4:
print(Fline)
print("I'm sorry, I do not understand that value")
return
x = int(Fline[1])
y = int(Fline[2])
width = int(Fline[3])
height = int(Fline[4])
turtle.penup()
turtle.setpos(x, y)
turtle.setheading(0)
turtle.pendown()
turtle.begin_fill()
turtle.forward(width)
turtle.setheading(-90)
turtle.forward(height)
turtle.setheading(180)
turtle.forward(width)
turtle.setheading(90)
turtle.forward(height)
turtle.end_fill()
def draw_circle(x, y, radius):
Line = line.split()
if Line[0] == 'color':
return
x = int(Line[1])
y = int(Line[2])
radius = int(Line[3])
turtle.penup()
turtle.setpos((x+radius),(y))
turtle.pendown()
turtle.begin_fill()
turtle.circle(radius)
turtle.end_fill()
def draw_line():
Bits_of_Fline = line.split()
x = int(Bits_of_Fline[1])
y = int(Bits_of_Fline[2])
angle = int(Bits_of_Fline[3])
length = int(Bits_of_Fline[4])
turtle.penup()
turtle.setpos(x, y)
turtle.pendown()
turtle.setheading(angle)
turtle.forward(length)
draw_a_file = "YES"
Open = True
while draw_a_file in ('YES', 'Y'):
open_file = input("What file would you like to open? ")
if open_file in ('quit', 'QUIT'):
break
try_open(open_file)
F = open(open_file, 'r')
for line in F:
line = line.lower()
if 'color' in line:
set_color()
if 'rect' in line:
draw_rectangle()
if 'circle' in line:
draw_circle(0,0,0)
if 'line' in line:
draw_line()
if 'bigstar' in line:
print("I'm sorry, that's not an understandable command.")
turtle.done()
draw_again = input("Do you have something else you would like to draw? ")
draw_again = draw_again.upper()
if draw_again in ('Y', 'YES'):
turtle.reset()
print('Excellent! Lets do this again!')
else:
print("Thank you for playing")
break
|
#!/usr/bin/env python
from os.path import join
from functools import partial
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits import basemap
import netCDF4 as nc4
from e3sm_case_output import E3SMCaseOutput, day_str
abs_cmap = plt.get_cmap('BuGn')
cmap = plt.get_cmap('coolwarm')
bmap = basemap.Basemap(lon_0=180.)
def forward(a):
a = np.deg2rad(a)
return np.sin(a)
def inverse(a):
a = np.arcsin(a)
return np.rad2deg(a)
START_DAY = 3
END_DAY = 15
DAILY_FILE_LOC = "/p/lscratchh/santos36/timestep_daily_avgs_lat_lon"
USE_PRESAER = False
days = list(range(START_DAY, END_DAY+1))
ndays = len(days)
suffix = '_d{}-{}'.format(day_str(START_DAY), day_str(END_DAY))
if USE_PRESAER:
suffix += '_presaer'
log_file = open("plot_2D_log{}.txt".format(suffix), 'w')
if USE_PRESAER:
REF_CASE = E3SMCaseOutput("timestep_presaer_ctrl", "CTRLPA", DAILY_FILE_LOC, START_DAY, END_DAY)
TEST_CASES = [
E3SMCaseOutput("timestep_presaer_all_10s", "ALL10PA", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_presaer_CLUBB_MG2_10s", "CLUBBMICRO10PA", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_presaer_ZM_10s", "ZM10PA", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_presaer_CLUBB_MG2_10s_ZM_10s", "CLUBBMICRO10ZM10PA", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_presaer_cld_10s", "CLD10PA", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_presaer_ZM_10s_lower_tau", "ZM10LTPA", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_presaer_CLUBB_MG2_10s_ZM_10s_lower_tau", "CLUBBMICRO10ZM10LTPA", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_presaer_cld_10s_lower_tau", "CLD10LTPA", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_presaer_all_10s_lower_tau", "ALL10LTPA", DAILY_FILE_LOC, START_DAY, END_DAY),
]
else:
REF_CASE = E3SMCaseOutput("timestep_ctrl", "CTRL", DAILY_FILE_LOC, START_DAY, END_DAY)
TEST_CASES = [
E3SMCaseOutput("timestep_all_10s", "ALL10", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_dyn_10s", "DYN10", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_MG2_10s", "MICRO10", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_CLUBB_10s", "CLUBB10", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_CLUBB_10s_MG2_10s", "CLUBB10MICRO10", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_CLUBB_MG2_Strang", "CLUBBMICROSTR", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_CLUBB_MG2_Strang_60s", "CLUBBMICROSTR60", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_CLUBB_MG2_10s", "CLUBBMICRO10", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_CLUBB_MG2_60s", "CLUBBMICRO60", DAILY_FILE_LOC, START_DAY, END_DAY),
# E3SMCaseOutput("timestep_ZM_10s", "ZM10", DAILY_FILE_LOC, START_DAY, END_DAY),
# E3SMCaseOutput("timestep_ZM_300s", "ZM300", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_all_rad_10s", "ALLRAD10", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_all_300s", "ALL300", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_all_60s", "ALL60", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_precip_grad", "PFMG", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_precip_grad_MG2_10s", "PFMGMICRO10", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_precip_grad_CLUBB_MG2_10s", "PFMGCLUBBMICRO10", DAILY_FILE_LOC, START_DAY, END_DAY),
]
rfile0 = nc4.Dataset(REF_CASE.get_daily_file_name(START_DAY), 'r')
lat = rfile0['lat'][:]
lon = rfile0['lon'][:]
lev = rfile0['lev'][:]
nlat = len(lat)
nlon = len(lon)
nlev = len(lev)
rfile0.close()
def get_overall_averages(ref_case, test_cases, days, varnames, scales):
case_num = len(test_cases)
day_num = len(days)
ref_means = dict()
for name in varnames:
if name in vars_3D:
ref_means[name] = np.zeros((nlev, nlat, nlon))
else:
ref_means[name] = np.zeros((nlat, nlon))
test_means = []
diff_means = []
for case in test_cases:
next_test_means = dict()
next_diff_means = dict()
for name in varnames:
if name in vars_3D:
next_test_means[name] = np.zeros((nlev, nlat, nlon))
next_diff_means[name] = np.zeros((nlev, nlat, nlon))
else:
next_test_means[name] = np.zeros((nlat, nlon))
next_diff_means[name] = np.zeros((nlat, nlon))
test_means.append(next_test_means)
diff_means.append(next_diff_means)
varnames_read = [name for name in varnames if name != "PRECT" and name != "TAU"]
if "PRECT" in varnames:
if "PRECL" not in varnames:
varnames_read.append("PRECL")
if "PRECC" not in varnames:
varnames_read.append("PRECC")
if "TAU" in varnames:
if "TAUX" not in varnames:
varnames_read.append("TAUX")
if "TAUY" not in varnames:
varnames_read.append("TAUY")
for day in days:
ref_daily, test_daily, diff_daily = ref_case.compare_daily_averages(test_cases, day, varnames_read)
if "PRECT" in varnames:
ref_daily["PRECT"] = ref_daily["PRECL"] + ref_daily["PRECC"]
for icase in range(case_num):
test_daily[icase]["PRECT"] = test_daily[icase]["PRECL"] + test_daily[icase]["PRECC"]
diff_daily[icase]["PRECT"] = diff_daily[icase]["PRECL"] + diff_daily[icase]["PRECC"]
if "TAU" in varnames:
ref_daily["TAU"] = np.sqrt(ref_daily["TAUX"]**2 + ref_daily["TAUY"]**2)
for icase in range(case_num):
test_daily[icase]["TAU"] = np.sqrt(test_daily[icase]["TAUX"]**2 + test_daily[icase]["TAUY"]**2)
diff_daily[icase]["TAU"] = test_daily[icase]["TAU"] - ref_daily["TAU"]
for name in varnames:
ref_means[name] += ref_daily[name]
for icase in range(case_num):
test_means[icase][name] += test_daily[icase][name]
diff_means[icase][name] += diff_daily[icase][name]
for name in varnames:
ref_means[name] *= scales[name]/day_num
for icase in range(case_num):
test_means[icase][name] *= scales[name]/day_num
diff_means[icase][name] *= scales[name]/day_num
return (ref_means, test_means, diff_means)
plot_names = {
'LWCF': "long wave cloud forcing",
'SWCF': "short wave cloud forcing",
'PRECC': "convective precipitation",
'PRECL': "large-scale precipitation",
'PRECE': "extreme precipitation",
'PRECT': "total precipitation",
'TGCLDIWP': "ice water path",
'TGCLDLWP': "liquid water path",
'CLDTOT': "cloud area fraction",
'CLDLOW': "low cloud area fraction",
'CLDMED': "mid-level cloud area fraction",
'CLDHGH': "high cloud area fraction",
'LHFLX': "latent heat flux",
'SHFLX': "sensible heat flux",
'TAU': "surface wind stress",
'TS': "surface temperature",
'PSL': "sea level pressure",
'OMEGA500': "vertical velocity at 500 mb",
'U10': "10 meter wind speed",
'RELHUM': "surface relative humidity",
'Q': "specific humidity",
'CLDLIQ': "lowest level cloud liquid",
'T': "lowest level temperature",
'CLOUD': "lowest level cloud fraction",
'TMQ': "precipitable water",
}
units = {
'LWCF': r'$W/m^2$',
'SWCF': r'$W/m^2$',
'PRECC': r'$mm/day$',
'PRECL': r'$mm/day$',
'PRECE': r'$mm/day$',
'PRECT': r'$mm/day$',
'TGCLDIWP': r'$g/m^2$',
'TGCLDLWP': r'$g/m^2$',
'AODABS': r'units?',
'AODUV': r'units?',
'AODVIS': r'units?',
'FLDS': r'$W/m^2$',
'FLNS': r'$W/m^2$',
'FLNSC': r'$W/m^2$',
'FLNT': r'$W/m^2$',
'FLNTC': r'$W/m^2$',
'FLUT': r'$W/m^2$',
'FLUTC': r'$W/m^2$',
'FSDS': r'$W/m^2$',
'FSDSC': r'$W/m^2$',
'FSNS': r'$W/m^2$',
'FSNSC': r'$W/m^2$',
'FSNT': r'$W/m^2$',
'FSNTC': r'$W/m^2$',
'FSNTOA': r'$W/m^2$',
'FSNTOAC': r'$W/m^2$',
'FSUTOA': r'$W/m^2$',
'FSUTOAC': r'$W/m^2$',
'CLDTOT': r'fraction',
'CLDLOW': r'fraction',
'CLDMED': r'fraction',
'CLDHGH': r'fraction',
'OMEGA500': r'Pa/s',
'LHFLX': r'$W/m^2$',
'SHFLX': r'$W/m^2$',
'TAU': r'$N/m^2$',
'TAUX': r'$N/m^2$',
'TAUY': r'$N/m^2$',
'TS': r'$K$',
'PSL': r'$Pa$',
'U10': r'$m/s$',
'RELHUM': r'%',
'Q': r'$g/kg$',
'CLDLIQ': r"$g/kg$",
'T': r'$K$',
'CLOUD': r'$fraction$',
'TMQ': r'$kg/m^2$',
}
varnames = list(units.keys())
scales = dict()
for name in varnames:
scales[name] = 1.
scales['TGCLDIWP'] = 1000.
scales['TGCLDLWP'] = 1000.
scales['PRECC'] = 1000.*86400.
scales['PRECL'] = 1000.*86400.
scales['PRECE'] = 1000.*86400.
scales['PRECT'] = 1000.*86400.
scales['Q'] = 1000.
scales['CLDLIQ'] = 1000.
diff_lims = {
'OMEGA500': 0.05,
'TAU': 0.1,
'PRECC': 10.,
'PRECL': 10.,
'PRECT': 10.,
'PSL': 200.
}
vars_3D = [
'RELHUM',
'Q',
'CLDLIQ',
'T',
'CLOUD',
]
PRECIP_OUTPUT_DIR = "/p/lustre2/santos36/timestep_precip_lat_lon/"
out_file_template = "{}.freq.short.d{}-d{}.nc"
# Threshold for precipitation to be considered "extreme", in mm/day.
PRECE_THRESHOLD = 97.
def get_prece(case_name, start_day, end_day):
file_name = out_file_template.format(case_name, day_str(start_day), day_str(end_day))
precip_file = nc4.Dataset(join(PRECIP_OUTPUT_DIR, file_name), 'r')
nbins = len(precip_file.dimensions['nbins'])
bin_lower_bounds = precip_file['bin_lower_bounds'][:]
ibinthresh = -1
for i in range(nbins):
if bin_lower_bounds[i] > PRECE_THRESHOLD:
ibinthresh = i
break
if ibinthresh == -1:
print("Warning: extreme precip threshold greater than largest bin bound.")
prece = precip_file["PRECT_amount"][:,:,ibinthresh:].sum(axis=2)
return prece
# Possible ways to extract a 2D section start here:
def identity(x):
return x
def slice_at(level, x):
return x[level,:,:]
print("Reading model output.")
varnames_readhist = [name for name in varnames if name != "PRECE"]
ref_means, test_means, diff_means = get_overall_averages(REF_CASE, TEST_CASES, days, varnames_readhist, scales)
print("Reading extreme precipitation.")
if "PRECE" in varnames:
ref_means["PRECE"] = get_prece(REF_CASE.case_name, START_DAY, END_DAY)
for icase in range(len(TEST_CASES)):
test_means[icase]["PRECE"] = get_prece(TEST_CASES[icase].case_name, START_DAY, END_DAY)
diff_means[icase]["PRECE"] = test_means[icase]["PRECE"] - ref_means["PRECE"]
# Should have this actually read from the plot_daily_means output.
diff_global_means = {
'PRECL': {
'ALL10': 0.2612134688516978,
'MICRO10': 0.11923748066367695,
'CLUBB10MICRO10': 0.11881571958007726,
'CLUBBMICRO10': 0.19409486771529938,
},
}
for name in varnames:
plot_name = name
if name in plot_names:
plot_name = plot_names[name]
get_2D = identity
if name in ["RELHUM", "Q", "CLDLIQ", "T", "CLOUD"]:
get_2D = partial(slice_at, nlev-1)
ref_plot_var = get_2D(ref_means[name])
clim_val = [ref_plot_var.min(), ref_plot_var.max()]
clim_diff = 0.
for icase in range(len(TEST_CASES)):
test_plot_var = get_2D(test_means[icase][name])
diff_plot_var = get_2D(diff_means[icase][name])
clim_val[0] = min(clim_val[0], test_plot_var.min())
clim_val[1] = max(clim_val[1], test_plot_var.max())
clim_diff = max(clim_diff, - diff_plot_var.min())
clim_diff = max(clim_diff, diff_plot_var.max())
if name in diff_lims:
clim_diff = diff_lims[name]
plt.pcolormesh(lon[:], lat[:], ref_plot_var, cmap=abs_cmap)
bmap.drawcoastlines()
ax = plt.gca()
ax.set_xticks([0., 90., 180., 270., 360.])
ax.set_xticklabels(['0', '90E', '180', '90W', '0'])
ax.set_yscale('function', functions=(forward, inverse))
ax.set_yticks([60., 45., 30., 15., 0., -15., -30., -45., -60.])
ax.set_yticklabels(['60N', '45N', '30N', '15N', '0', '15S', '30S', '45S', '60S'])
# ax.set_yticks([60., 30., 0., -30., -60.])
# ax.set_yticklabels(['60N', '30N', '0', '30S', '60S'])
plt.axis('tight')
plt.xlim([0., 360.])
plt.colorbar()
plt.clim(clim_val[0], clim_val[1])
plt.title("{} for case {}\n({}, days {}-{})".format(plot_name, REF_CASE.short_name, units[name], START_DAY, END_DAY))
plt.savefig('{}_{}{}.png'.format(name, REF_CASE.short_name, suffix))
plt.close()
for icase in range(len(TEST_CASES)):
test_plot_var = get_2D(test_means[icase][name])
diff_plot_var = get_2D(diff_means[icase][name])
case_name = TEST_CASES[icase].short_name
plt.pcolormesh(lon[:], lat[:], test_plot_var, cmap=abs_cmap)
bmap.drawcoastlines()
ax = plt.gca()
ax.set_xticks([0., 90., 180., 270., 360.])
ax.set_xticklabels(['0', '90E', '180', '90W', '0'])
ax.set_yscale('function', functions=(forward, inverse))
ax.set_yticks([60., 45., 30., 15., 0., -15., -30., -45., -60.])
ax.set_yticklabels(['60N', '45N', '30N', '15N', '0', '15S', '30S', '45S', '60S'])
# ax.set_yticks([60., 30., 0., -30., -60.])
# ax.set_yticklabels(['60N', '30N', '0', '30S', '60S'])
plt.axis('tight')
plt.xlim([0., 360.])
plt.colorbar()
plt.clim(clim_val[0], clim_val[1])
plt.title("{} for case {}\n({}, days {}-{})".format(plot_name, case_name, units[name], START_DAY, END_DAY))
plt.savefig('{}_{}{}.png'.format(name, case_name, suffix))
plt.close()
plt.pcolormesh(lon[:], lat[:], diff_plot_var, cmap=cmap)
bmap.drawcoastlines()
ax = plt.gca()
ax.set_xticks([0., 90., 180., 270., 360.])
ax.set_xticklabels(['0', '90E', '180', '90W', '0'])
ax.set_yscale('function', functions=(forward, inverse))
ax.set_yticks([60., 45., 30., 15., 0., -15., -30., -45., -60.])
ax.set_yticklabels(['60N', '45N', '30N', '15N', '0', '15S', '30S', '45S', '60S'])
# ax.set_yticks([60., 30., 0., -30., -60.])
# ax.set_yticklabels(['60N', '30N', '0', '30S', '60S'])
plt.axis('tight')
plt.xlim([0., 360.])
plt.colorbar()
plt.clim(-clim_diff, clim_diff)
if name in diff_global_means and case_name in diff_global_means[name]:
unit_string = units[name]
if unit_string == 'fraction':
unit_string = ''
else:
unit_string = " " + unit_string
mean_string = 'mean {:.2g}'.format(diff_global_means[name][case_name]) + unit_string
else:
mean_string = units[name]
plt.title("Mean difference in {}\nfor case {} ({}, days {}-{})".format(plot_name, case_name, mean_string, START_DAY, END_DAY))
plt.savefig('{}_diff_{}{}.png'.format(name, case_name, suffix))
plt.close()
|
"""
Pick ith Ball after Arranging
Given the radius and color of ‘n’ balls and an index ‘i’, write a C++ program to arrange them in ascending order as per their volume and when volumes of two balls are same arrange them as per their name. After arranging, print the details of the ith ball. For example, when five balls are given with radius and color as below:
2 red
1 blue
1 red
2 blue
3 red
and index is given as 1, then after arranging in ascending order the balls will be:
1 blue
1 red
2 blue
2 red
3 red
therefore print 1 blue.
Note: Input can be very large
Hint:
This problem can be quickly solved using list in STL.
1. Design a class for ball overload < and > operator to decide about the realtionship between two balls
2. #include<list> includes list
3. list<int> l is used to create an integer list
4. l.sort() - will sort the list, provided < and > operators are overloaded for user defined data types
5. Random access is not possible in list
6. list<int>::iterator it=l.begin(); - creates an iterator for integer list and initializes to first element
7. Iterator can be moved by it++
8. value of an element by iterator it can be retrieved as *it
Input Format
First line contains the number of balls, n
Next ‘n’ lines contain the details of the balls radius and color separated by a space
Next line contains the value of ‘i’
Output Format:
Print the radius and color of ith ball after arrangement
"""
n = int(input())
ele, res =[], []
for i in range(n):
k1, k2 = input().split()
if int(k1) not in ele:
ele.append(int(k1))
res.append((int(k1),k2))
ele.sort()
res.sort()
final=[]
for i in range(len(ele)):
kk = []
for j in res:
k1, k2 = j
if k1 == ele[i]:
kk.append(k2)
for m in range(len(kk)):
final.append((ele[i],kk[m]))
q = int(input())
print(*final[q-1])
|
# Dessa bibliotek används för att förenkla livet
from gpiozero import Device, LED, PWMLED, Button
from gpiozero.pins.mock import MockFactory
from unittest.mock import Mock
import pygame.mixer as mixerenhet
import time
import sys
from cowsay import kitty as säger
if sys.platform == 'darwin':
Device.pin_factory = MockFactory()
temperatur_läsare = Mock(return_value=(80, 21))
temperatur_enhet = Mock()
PWMLED = Mock()
else:
import Adafruit_DHT
temperatur_enhet = Adafruit_DHT.DHT11
temperatur_läsare = Adafruit_DHT.read_retry
# Detta är en s.k. "konstant" variabel bara för göra koden mer lättläst längre ned
EN_SEKUND = 1
# Alla delar (objekt) utav hunden som kommer användas med lättlästa namn
vänster_öga = LED(23)
höger_öga = LED(24)
nos = PWMLED(18)
mun = Button(22)
hörsel = Button(27)
temperatur_pinne = 25
# Detta är i objekt-orienterad-programmering en "klass" som representerar en hund
class Hund():
def __init__(self, namn):
# En hund har "egna" egenskaper som inte behöver vara samma för andra hundar
self.namn = namn
self.hjärtslag_räknare = 0
self.födsel_temperatur = self.känn_temperatur() # Vad är temperaturen vid födsel för att jämföra med senare
# När vi föder en hund vill vi att den ska veta/göra följande
hörsel.when_activated = self.apport
mun.when_activated = self.ät
säger(f"Vov!!! nu är jag född och jag heter {self.namn}...\n"
f"Nu när jag föddes så är jag {self.födsel_temperatur} grader")
mixerenhet.init()
self.skäll()
def slå_hjärtslag(self, hjärtslag_per_sekund=10):
# Denna "metod" bestämmer hur ofta/regelbundet hjärtat ska slå
time.sleep(EN_SEKUND / hjärtslag_per_sekund)
self.hjärtslag_räknare = self.hjärtslag_räknare + 1
def nollställ_hjärtslag(self):
# Vi använder denna för att bestämma "intervaller" mellan exempelvis hur ofta hunden ska känna på sin päls
self.hjärtslag_räknare = 0
def känn_temperatur(self, pinne=temperatur_pinne, antal_försök=10, normal_temp=23):
# Denna metod/egenskap kommer lära hunden att känna på sin päls
fuktighet, temperatur = temperatur_läsare(temperatur_enhet, pinne, retries=antal_försök)
return temperatur or normal_temp
def dax_att_känna_efter(self, efter_hur_många_hjärtslag=100):
# Denna metod är enbart för att svara på frågan om det är dags för hunden att känna efter sin päls igen
self.hjärtslag_räknare = self.hjärtslag_räknare + 1
if self.hjärtslag_räknare > efter_hur_många_hjärtslag:
self.nollställ_hjärtslag()
return True
else:
return False
def blinka_ögonen(self, tid=0.5, antal=6):
# Denna metod bestämmer hur hunden ska blinka med sina ögon
vänster_öga.blink(on_time=tid, off_time=tid, n=antal)
höger_öga.blink(on_time=tid, off_time=tid, n=antal)
def pulsera_nosen(self, tid_sekunder=1, antal=4):
# Denna metod bestämmer hur hundens nästa ska pulsera
nos.pulse(fade_in_time=tid_sekunder, fade_out_time=tid_sekunder, n=antal, background=False)
def apport(self):
# Denna metod kallar vi på när vi vill att han ska springa apport
print(self.namn, 'springer apport 🦴')
self.blinka_ögonen(0.2, 10) # Snabbare än vanligt
self.skäll()
def ät(self):
# Denna metod talar om vad som händer när hunden äter
print(self.namn, 'äter gladligen 🌭')
self.blinka_ögonen()
mixerenhet.music.load('ljud/eat.mp3')
mixerenhet.music.play()
def skäll(self):
# Denna metod lär hunden hur han ska låta när han skäller
mixerenhet.music.load('ljud/vov.mp3')
mixerenhet.music.play()
def lev(self, värmeslag_temparatur_ökning=1):
# Denna metod kallar som sista steg vid "init" (födsel) när valpen börjar leva
while True:
self.slå_hjärtslag() # Vi slår ett hjärtslag
if self.dax_att_känna_efter() is True: # Bara om tillräckligt många hjärtslag ska hunden känna efter
temperatur_just_nu = self.känn_temperatur() # ... då ska han känna efter temperatur
print("Grr.. min päls just nu är", temperatur_just_nu, "grader")
if temperatur_just_nu is not None and temperatur_just_nu >= self.födsel_temperatur + värmeslag_temparatur_ökning:
# Om temeperaturen överstiger värmeslag ska vi göra nedan
print('🥵')
self.pulsera_nosen()
# Här börjar huvud programmet
hugo = Hund(namn='Hugo') # Vi föder en ny "Hund" med namnet Hugo
hugo.lev() # Vi talar för hugo att börja leva
|
#Coastal Engineering Design Package
#Title: Wave Mechanics
#Author: Francisco Chaves
#Version 0.00
#First Created: 21.12.2015
#Latest Edit: 21.12.2015
#Description:
import datetime
# By default, MyTime is set to today's date, but it can be set to any other date.
class MyTime():
def __init__(self):
self.year = datetime.date.today().year
self.month = datetime.date.today().month
self.day = datetime.date.today().day
self.days = self.day + self.month*31+self.year*365
#sets MyTime to a given date.
def setDate(self,y,m,d):
self.year = y
self.month = m
self.day = d
#calculates the difference between a date and MyTime's current date
def diffDate(self,d8):
difDay = self.days - d8.days
return difDay
dateNow = MyTime()
d8 = MyTime()
|
t_ctr = t = inc = total = 0
while t_ctr <= 1_000_000:
s = str(t)
t += (inc+1)
inc += 1
t_ctr += 1
for _ in range(len(s)):
if int(s)**0.5 == int(int(s)**0.5):
total += 1
break
s = s[1:]+s[0]
print(total)
|
import redis
class Test:
def __init__(s, collector, test, participant):
s.collector = collector
s.test = test
s.participant = participant
s.counter = 0
def error(s, msg):
s.__report('error', msg)
def warning(s, msg):
s.__report('warning', msg)
def ok(s, msg):
s.__report('ok', msg)
def __report(s, level, msg):
r = redis.StrictRedis(host=s.collector)
t = r.time()
key = '%s:%s:%s:%s'%(
s.test,
s.participant,
t[0],
t[1])
value = '%s:::%s'%(
level,
msg)
r.set(key, value)
|
#---------------------- Import packages as per the requirement-----------------------
import json
import datetime
import time
import os
import dateutil.parser
import logging
import boto3
import re
import requests
#import pymssql
#from datetime import datetime
import urllib
import urllib2
from botocore.exceptions import ClientError
from requests.auth import HTTPBasicAuth
from urllib2 import Request, urlopen, URLError, HTTPError
import csv
region = 'us-east-1'
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ec2 = boto3.resource('ec2', region_name=region)
ec2_client = boto3.client('ec2')
lex_client = boto3.client('lex-models')
cloudwatch = boto3.client('cloudwatch')
ambariUser = "admin"
ambariPass = "admin"
server = 'chatbottestdb.xxxxxxxx.xx-xxxx-1.rds.amazonaws.com'
user = 'xxxxxxxxxxxxx'
password = 'xxxxxxxx'
#----------------------------------AWS Pricing List for CSV -----------------------------------------------
with open('AWS_Pricing.csv', 'rb') as price_chart:
reader = csv.reader(price_chart, delimiter=',')
price_chart_list = list(reader)
total_rows = len(price_chart_list)
#----------------------------------- Greetings--------------------------------------------------------------
def greetings(intent_request):
return elicit(
'Fulfilled',
{
'contentType': 'PlainText',
'content': 'Hello!! My name is ChatOps, a Chatbot. I can help you with DataLake related queries. \n How can I help you today?'
}
)
#--------------------- Start / Stop and List an instance/s in the User Specified environment-----------------
def action_instances(intent_request):
instance_action = intent_request['currentIntent']['slots']['instance_actions']
instance_identifier = intent_request['currentIntent']['slots']['instance_identifiers']
stack_identifier = intent_request['currentIntent']['slots']['stack_identifiers']
'''if instance_identifier is None:
response_get_slot_type = lex_client.get_slot_type(name='instance_identifiers', version='$LATEST')
print response_get_slot_type
slot_values_present = []
for evals in response_get_slot_type[enumerationValues]:
slot_values_present.append(evals['value'])
print slot_values_present
user_input = intent_request['currentIntent']['inputTranscript'].split()
response_put_slot_type = lex_client.put_slot_type(name='instance_identifiers',enumerationValues=[{'value': 'ekta'}],checksum='0379e74f-1cbe-4a3a-8fd0-efeba73c608f')
instance_identifier = 'none' '''
#print (type(instance_action))
#print (type(instance_identifier))
#response_all_instances = ec2_client.describe_instances(Filters=[{'Name': 'tag:Name','Values': ['*'instance_identifier'*']}])
#print (response_all_instances)
response_describe = ec2_client.describe_instances(Filters=[{'Name': 'tag:Name','Values': ['*'+instance_identifier+'*']}])
response_stack_describe = ec2_client.describe_instances(Filters=[{'Name': 'tag:Name','Values': ['*'+stack_identifier+'*']}])
print (type(stack_identifier))
print stack_identifier
print response_describe
print response_stack_describe
words_show = ['show','list','fetch']
words_start = ['start','bring up','initialise','initialize']
words_stop = ['stop','shut down','Power Off','Power Down','bring down']
stack_list_dev = ['dev','Dev','Development','DEVELOPMENT','development']
stack_list_non_prod = ['Non Prod','NON PROD','Non Production','Non-Prod','Non-prod','non prod']
stack_list_prod_stage = ['Prod-Stage','Production-Stage','production-stage','Prod Stage','prod-stage','Production Stage']
stack_list_prod = ['prod','PROD','Prod','Production','production']
instance_ids = []
instance_names = []
total_instances = 0
for i in range(0, len(response_describe['Reservations'])):
for j in range(0, len(response_describe['Reservations'][i]['Instances'])):
instance_ids.append(response_describe['Reservations'][i]['Instances'][j]['InstanceId'])
if instance_action in words_show:
for i in range(0, len(response_describe['Reservations'])):
for j in range(0, len(response_describe['Reservations'][i]['Instances'])):
for k in range(0, len(response_describe['Reservations'][i]['Instances'][j]['Tags'])):
if(response_describe['Reservations'][i]['Instances'][j]['Tags'][k]['Key'] == 'Name'):
instance_names.append(response_describe['Reservations'][i]['Instances'][j]['Tags'][k]['Value'])
total_instances +=1
break
str1 = ' , \n \t -> '.join(instance_names)
print str1
output_message_action_instances = 'There are a total of '+str(total_instances)+' '+str(instance_identifier)+' Instances in the '+str(stack_identifier)+ ' environment. \n They are as follows:- '+'\n \t -> '+str1
if instance_action in words_start:
'''for i in range(0, len(response_describe['Reservations'])):
for j in range(0,len(response_describe['Reservasations'][i]['Instances'])):
for k in range(0, len(response_describe['Reservations'][i]['Instances'][j]['Tags'])):
if(response_describe['Reservations'][i]['Instances'][j]['Tags'][k]['Key'] == 'Name'):
response_describe = ec2_client.start_instances(InstanceIds=instance_ids)
total_instances +=1
print('StartAction_individual')
break
'''
#str1 = ''
response_action = ec2_client.start_instances(InstanceIds=instance_ids)
print (response_action)
#total_instances +=1
print ('startAction')
#break
#str1 = ' , \n \t -> '.join(instance_names)
#output_message_action_instances = 'There are a total of '+str(total_instances)+' '+str(instance_identifier)+' Instances in the '+str(stack_identifier)+ ' environment. \n They have been started. \n They are as follows:- '+'\n \t -> '+str1
output_message_action_instances = '\n The '+str(instance_identifier)+' instance/s you have requested in the '+str(stack_identifier)+ ' environment has been '+str(instance_action)+'ed.'
if instance_action in words_stop:
response_action = ec2_client.stop_instances(InstanceIds=instance_ids)
print('stopAction')
output_message_action_instances = 'The '+str(instance_identifier)+' instance/s you have requested in the '+str(stack_identifier) + ' environment has been '+str(instance_action)+'ped.'
#"Observed %s instances running at %s" % (num_instances, timestamp)
return close(
'Fulfilled',
{
'contentType': 'PlainText',
'content': output_message_action_instances
}
)
# -------------------------------Instance Utilization wrt Environment Status--------------------------------------
def utilization_statistics(intent_request):
#instance_action = intent_request['currentIntent']['slots']['instance_actions']
instance_identifier = intent_request['currentIntent']['slots']['instance_identifiers']
stack_identifier = intent_request['currentIntent']['slots']['stack_identifiers']
response_describe = ec2_client.describe_instances(Filters=[{'Name': 'tag:Name','Values': ['*'+instance_identifier+'*']}])
response_stack_describe = ec2_client.describe_instances(Filters=[{'Name': 'tag:Name','Values': ['*'+stack_identifier+'*']}])
print (type(stack_identifier))
print stack_identifier
print response_describe
print response_stack_describe
words_show = ['show','list']
words_start = ['start']
words_stop = ['stop']
stack_list_dev = ['dev']
stack_list_prod = ['prod','PROD','Prod','Production','production']
instance_ids = []
instance_names = []
total_instances = 0
for i in range(0, len(response_describe['Reservations'])):
for j in range(0, len(response_describe['Reservations'][i]['Instances'])):
instance_ids.append(response_describe['Reservations'][i]['Instances'][j]['InstanceId'])
insts = ""
instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['running','stopped','terminated','pending','stopping','shutting-down']}])
for instance in instances:
if (instance.id in instance_ids):
launch_time = instance.launch_time
current_time = datetime.datetime.now(launch_time.tzinfo)
lt_delta = current_time - launch_time
running_time = str(lt_delta)
lt_Delta_hr = lt_delta.total_seconds()/3600
period = 60
if lt_Delta_hr > 360 and lt_Delta_hr < 1412 :
period = 300 * int(lt_delta.total_seconds()/1440)
elif lt_delta.total_seconds()/60 > 1412 :
period = 3600 * int(lt_delta.total_seconds()/1440)
results = cloudwatch.get_metric_statistics(Namespace='AWS/EC2', MetricName='CPUUtilization', Dimensions=[{'Name': 'InstanceId', 'Value': instance.id}], StartTime=launch_time, EndTime=current_time, Period=period, Statistics=['Average'])
length = len(results['Datapoints'])
if length == 0 : length = 1
sum_of_avg = 0
for datapoint in results['Datapoints'] :
sum_of_avg = sum_of_avg + datapoint['Average']
average = str(sum_of_avg / length) + '%'
insts = insts +'\n \t --> '+ instance.id + ' , \t ' + str(instance.launch_time) + ' , \t ' + running_time + ' , \t ' + average
print('Instance : ' + instance.id + ', Launch Time : ' + str(instance.launch_time) + ', Running Time : ' + running_time + ', CPU Utilization : ' + average)
message_utilization = 'Here is the List of '+str(instance_identifier) +' instances from the '+str(stack_identifier)+' environment with the Utilization Details :: \n\t\t\t\t INSTANCE ID \t||\t LAUNCH TIME \t||\t RUNNING SINCE \t||\t CPU UTILIZATION :- ' + insts
return elicit(
'Fulfilled',
{
'contentType': 'PlainText',
'content': message_utilization
}
)
#---------------------------------------- Price and Cost structure of machines based on User Input-----------------------
def pricing_information(intent_request):
#instance_action = intent_request['currentIntent']['slots']['instance_actions']
instance_identifier = intent_request['currentIntent']['slots']['instance_identifiers']
stack_identifier = intent_request['currentIntent']['slots']['stack_identifiers']
response_describe = ec2_client.describe_instances(Filters=[{'Name': 'tag:Name','Values': ['*'+instance_identifier+'*']}])
response_stack_describe = ec2_client.describe_instances(Filters=[{'Name': 'tag:Name','Values': ['*'+stack_identifier+'*']}])
print (type(stack_identifier))
print stack_identifier
print response_describe
print response_stack_describe
words_show = ['show','list']
words_start = ['start']
words_stop = ['stop']
stack_list_dev = ['dev']
stack_list_prod = ['prod','PROD','Prod','Production','production']
instance_ids = []
instance_names = []
total_instances = 0
for i in range(0, len(response_describe['Reservations'])):
for j in range(0, len(response_describe['Reservations'][i]['Instances'])):
instance_ids.append(response_describe['Reservations'][i]['Instances'][j]['InstanceId'])
insts = ""
instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['running','stopped','terminated','pending','stopping','shutting-down']}])
for instance in instances:
if (instance.id in instance_ids):
print type(instance)
instance_type = instance.instance_type
launch_time = instance.launch_time
current_time = datetime.datetime.now(launch_time.tzinfo)
lt_delta = current_time - launch_time
running_time_hr = str(lt_delta)
price = 0.0
for row_cnt in range(1, total_rows):
if price_chart_list[row_cnt][0] == region and price_chart_list[row_cnt][1] == instance_type :
price = float(price_chart_list[row_cnt][2]) * (lt_delta.total_seconds()/3600)
insts = insts +'\n \t --> '+instance.id+ ' , \t ' +running_time_hr+ ' hours , \t ' +str(instance_type)+ ' , \t $'+str(price)
print('Instance : ' + instance.id + ', Running Time : ' + running_time_hr + ', Instance Type : ' + str(instance_type) + ', Price : ' + str(price))
message_price = 'The following is the list of '+str(instance_identifier)+' instances from the '+str(stack_identifier)+' environment with the Cost Statistics :: \n\t\t\t\t INSTANCE ID \t||\t TOTAL RUNNING TIME \t||\t INSTANCE TYPE \t||\t PRICE in ($) :- ' + insts
return elicit(
'Fulfilled',
{
'contentType': 'PlainText',
'content': message_price
}
)
#---------------------------------------- Listing all the services in Ambari--------------------------------------------------
def services_list(intent_request):
instance_identifier = intent_request['currentIntent']['slots']['instance_identifiers']
print instance_identifier
stack_identifier = intent_request['currentIntent']['slots']['stack_identifiers']
response_describe = ec2_client.describe_instances(Filters=[{'Name': 'tag:Name','Values': ['*'+instance_identifier+'*']}])
print response_describe
words_show = ['show','list']
statck_list_dev = ['dev']
stack_list_int = ['integration','int','nonprod']
stack_list_prod = ['prod','prodstage']
words_start = ['start']
words_stop = ['stop']
instance_ids = []
instance_id = []
instance_names = []
instance_states = []
instance_states_1 = []
total_instances = 0
for i in range(0, len(response_describe['Reservations'])):
for j in range(0, len(response_describe['Reservations'][i]['Instances'])):
instance_ids.append(response_describe['Reservations'][i]['Instances'][j]['InstanceId'])
for i in range(0, len(response_describe['Reservations'])):
for j in range(0, len(response_describe['Reservations'][i]['Instances'])):
for k in range(0, len(response_describe['Reservations'][i]['Instances'][j]['Tags'])):
if('Ambari' in (response_describe['Reservations'][i]['Instances'][j]['Tags'][k]['Value']) or ('ambari' in (response_describe['Reservations'][i]['Instances'][j]['Tags'][k]['Value'] ))):
instance_id = response_describe['Reservations'][i]['Instances'][j]['InstanceId']
#print instance_id
str2 = instance_id
#print instance_id
#instance_names.append(response_describe['Reservations'][i]['Instances'][j]['Tags'][k]['Value'])
#total_instances +=1
#break
#str1 = ' , '+'\n'.join(instance_names)
#print 'wow'
print instance_id
print 'Not Now :('
print 'There are a total of '+str(total_instances)+' Instances and they are as follows:-'+'\n'+str2
service_list_0 = check_service_list(instance_id)
#service_list_1 = check_service_list(instance_id)
str3 = ', \n'.join(service_list_0)
#print service_list_1
#return check_service_list(instance_id)
return elicit(
'Fulfilled',
{
'contentType': 'PlainText',
'content': 'Hers is list of services which are present in '+str(stack_identifier)+' environment: ' + str3
}
)
# Checking the list of services in Ambari
def check_service_list(ip):
try:
print("getting service list")
response_instance = ec2_client.describe_instances( InstanceIds = [ip])
#print("DNS: " + response_instance['Reservations'][0]['Instances'][0]['PrivateDnsName'])
#base_url = 'https://'+response_instance['Reservations'][0]['Instances'][0]['PrivateDnsName']+':8080/api/v1/clusters'
#print base_url
print("Public DNS: " + response_instance['Reservations'][0]['Instances'][0]['PublicDnsName'])
base_url2 = 'http://'+response_instance['Reservations'][0]['Instances'][0]['PublicDnsName']+':8080/api/v1/clusters'
print base_url2
r = requests.get(base_url2, auth=HTTPBasicAuth(ambariUser, ambariPass), verify=False)
cluster_name = r.json()['items'][0]['Clusters']['cluster_name']
print ("cluster name")
print (cluster_name)
base_url_services = 'http://'+response_instance['Reservations'][0]['Instances'][0]['PublicDnsName']+':8080/api/v1/clusters/'+cluster_name+'/services'
r_services = requests.get(base_url_services, auth=HTTPBasicAuth(ambariUser, ambariPass), verify=False)
print(r_services.json())
service_list = []
#total_serv_count = 0
for i in range(0,len(r_services.json()['items'])):
service_list.append(r_services.json()['items'][i]['ServiceInfo']['service_name'])
#total_serv_count + = 1
print (service_list)
#print (total_serv_count)
except Exception as e:
print(e)
return (service_list)#,total_serv_count)
#return (service_list,total_serv_count)
# Checking the health of services in Ambari
def status_list_services(intent_request):
#instance_identifier = intent_request['currentIntent']['slots']['instance_identifiers']
#print instance_identifier
stack_identifier = intent_request['currentIntent']['slots']['stack_identifiers']
#response_describe = ec2_client.describe_instances(Filters=[{'Name': 'tag:Name','Values': ['*'+instance_identifier+'*']}])
instance_identifier= "Ambari"
print instance_identifier
response_describe = ec2_client.describe_instances(Filters=[{'Name': 'tag:Name','Values': ['*'+instance_identifier+'*']}])
print response_describe
words_show = ['show','list']
statck_list_dev = ['dev']
stack_list_int = ['integration','int','nonprod']
stack_list_prod = ['prod','prodstage']
words_start = ['start']
words_stop = ['stop']
instance_ids = []
instance_id = []
instance_names = []
instance_states = []
instance_states_1 = []
total_instances = 0
for i in range(0, len(response_describe['Reservations'])):
for j in range(0, len(response_describe['Reservations'][i]['Instances'])):
instance_ids.append(response_describe['Reservations'][i]['Instances'][j]['InstanceId'])
for i in range(0, len(response_describe['Reservations'])):
for j in range(0, len(response_describe['Reservations'][i]['Instances'])):
for k in range(0, len(response_describe['Reservations'][i]['Instances'][j]['Tags'])):
if('Ambari' in (response_describe['Reservations'][i]['Instances'][j]['Tags'][k]['Value']) or ('ambari' in (response_describe['Reservations'][i]['Instances'][j]['Tags'][k]['Value'] ))):
instance_id = response_describe['Reservations'][i]['Instances'][j]['InstanceId']
#print instance_id
str2 = instance_id
#print instance_id
#instance_names.append(response_describe['Reservations'][i]['Instances'][j]['Tags'][k]['Value'])
#total_instances +=1
#break
#str1 = ' , '+'\n'.join(instance_names)
#print 'wow'
print instance_id
print 'There are a total of '+str(total_instances)+' Instances and they are as follows:-'+'\n'+str2
service_list = check_service_list(instance_id)
#get total number of sevices present in hdp
service_list_count = len(service_list)
print service_list_count
print service_list
print instance_id
status_with_service = check_service_state(service_list,instance_id)
running_service_list=list()
stop_service_list = list()
for key, value in status_with_service.iteritems():
if value == 'STARTED' or value == 'Started':
running_service_list.append(key)
elif value == 'INSTALLED' or value == 'Installed':
poweredoff_service_list.append(key)
else:
stop_service_list.append(key)
print 'count of list of services running is : ' + str(len(running_service_list))
print 'count of list of services stop is : ' + str(len(stop_service_list))
print 'count of list of '
str3 = json.dumps(status_with_service)
#print 'There are a total of ' ' Services and
#return check_service_list(instance_id)
message_health_status = str(stack_identifier)+" Environment Current Status : \n Currently we have a total of "+str(service_list_count)+" services in the "+ str(stack_identifier)+" environment. \n From the above, "+str(len(running_service_list))+" services are HEALTHY and "+str(len(stop_service_list))+" services are UNHEALTHY !!! \n The detailed list of services are : \n"+ str3
return elicit(
'Fulfilled',
{
'contentType': 'PlainText',
'content': message_health_status #'Here is list of services alongwith their State: ' + str3
}
)
# Checking the list of services along with their state in Ambari
def check_service_state(service_list,instance_id):
try:
response_instance = ec2_client.describe_instances( InstanceIds = [instance_id])
print("DNS: " + response_instance['Reservations'][0]['Instances'][0]['PublicDnsName'])
base_url = 'http://'+response_instance['Reservations'][0]['Instances'][0]['PublicDnsName']+':8080/api/v1/clusters'
r = requests.get(base_url, auth=HTTPBasicAuth(ambariUser, ambariPass), verify=False)
cluster_name = r.json()['items'][0]['Clusters']['cluster_name']
print("checking the State of the services ")
service_dict = {}
#GET api/v1/clusters/c1/services/HDFS?fields=ServiceInfo/state
for service in service_list:
print('Turning Off maintenance mode ' + service)
maintenanceURL = 'http://'+response_instance['Reservations'][0]['Instances'][0]['PublicDnsName']+':8080/api/v1/clusters/'+cluster_name+'/services/'+service
stopdata = {"RequestInfo":{"context":"Turn Off Maintenance Mode"},"Body":{"ServiceInfo":{"maintenance_state":"OFF"}}}
headers = {"X-Requested-By": "ambari"}
response = requests.put(maintenanceURL, auth=HTTPBasicAuth(ambariUser, ambariPass), data=json.dumps(stopdata), headers=headers, verify=False)
print('Maintenance response is')
print(response)
#curl -u admin:$PASSWORD -i -H 'X-Requested-By: ambari' -X PUT -d '{"RequestInfo": {"context" :"Remove Falcon from maintenance mode"}, "Body": {"ServiceInfo": {"maintenance_state": "OFF"}}}' http://$AMBARI_HOST:8080/api/v1/clusters/$CLUSTER/services/FALCON
#print('Service check begins')
base_url_state = 'http://'+response_instance['Reservations'][0]['Instances'][0]['PublicDnsName']+':8080/api/v1/clusters/'+cluster_name+'/services/'+service+'?fields=ServiceInfo/state'
print(base_url_state)
r_state = requests.get(base_url_state, auth=HTTPBasicAuth(ambariUser, ambariPass), verify=False)
print(r_state)
print(r_state.json())
state_of_service = r_state.json()['ServiceInfo']['state']
print(service +' = '+ state_of_service)
service_dict[service] = state_of_service
return service_dict
except Exception as e:
print(e)
# --- Return handler for the different functions ---
def close(fulfillment_state, message):
response = {
'dialogAction': {
'type': 'Close',
'fulfillmentState': fulfillment_state,
'message': message
}
}
return response
def elicit(fulfillment_state, message):
response = {
'dialogAction': {
'type': 'ElicitIntent',
'message': message
}
}
return response
# --- Intent handler ---
def dispatch(intent_request):
logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))
intent_name = intent_request['currentIntent']['name']
print(intent_request)
# Dispatch to your bot's intent handlers
if intent_name == 'action_instances':
return action_instances(intent_request)
elif intent_name == 'greetings':
return greetings(intent_request)
elif intent_name == 'utilization_statistics':
return utilization_statistics(intent_request)
elif intent_name == 'pricing_information':
return pricing_information(intent_request)
elif intent_name == 'Status_list_machines':
return services_list(intent_request)
elif intent_name == 'status_list_services':
return status_list_services(intent_request)
elif intent_name == 'list_all_instances':
return list_all_instances(intent_request)
elif intent_name == 'list_all_run_instances':
return list_all_run_instances(intent_request)
elif intent_name == 'list_all_stop_instances':
return list_all_stop_instances(intent_request)
elif intent_name == 'list_instance_untagged':
return list_instance_untagged(intent_request)
elif intent_name == 'list_instance_tagged':
return list_instance_tagged(intent_request)
else:
return close(
'Fulfilled',
{
'contentType': 'PlainText',
'content': 'Apologies !! The request which you are looking for does not support with the current release!! \n\n\n Can I help you with any other request? '
}
)
# --- Main handler ---
def lambda_handler(event, context):
# By default, treat the user request as coming from the America/New_York time zone.
os.environ['TZ'] = 'America/New_York'
time.tzset()
logger.debug('event.bot.name={}'.format(event['bot']['name']))
return dispatch(event)
# ---End of Main handler ----
# ---------------------------------------------------- Old lambda Code ------------- Need to refine ----------------------
#---------------------- The following is the old code from the old lambda
#--------------------------- List of all the instances in the Account-------------------------------
def list_all_instances(intent_request):
#instance_action = intent_request['currentIntent']['slots']['instance_actions']
#instance_identifier = intent_request['currentIntent']['slots']['instance_identifiers']
stack_identifier = intent_request['currentIntent']['slots']['stack_identifiers']
instances = ec2.instances.filter(
Filters=[{'Name': 'instance-state-name', 'Values': ['running','stopped','terminated','pending','stopping','shutting-down']}])
insts = ""
counter = 0
for instance in instances:
if instance.tags:
for tag in instance.tags:
counter = counter+1
if tag['Key'] == 'Name':
insts = insts+" , \t "+ "\n -> " +tag['Value']
#if 'Name' in instance.tags:
#insts = insts+" "+instance.tags['Name']
print (instance.id, instance.instance_type, instance.state)
message_list = 'The following is the List of all the Instances in the '+str(stack_identifier)+' environment :- ' + insts
return elicit(
'Fulfilled',
{
'contentType': 'PlainText',
'content': message_list
}
)
# List of all the running instances
def list_all_run_instances(intent_request):
#instance_action = intent_request['currentIntent']['slots']['instance_actions']
#instance_identifier = intent_request['currentIntent']['slots']['instance_identifiers']
stack_identifier = intent_request['currentIntent']['slots']['stack_identifiers']
instances = ec2.instances.filter(
Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])
insts = ""
counter = 0
for instance in instances:
if instance.tags:
for tag in instance.tags:
counter = counter+1
if tag['Key'] == 'Name':
insts = insts+" \t "+ "\n \t --> " +tag['Value'] + ","
#if 'Name' in instance.tags:
#insts = insts+" "+instance.tags['Name']
print (instance.id, instance.instance_type, instance.state)
message_instances = 'The following is the List of all the Running Instances in the '+str(stack_identifier)+' environment :- ' + insts
return elicit(
'Fulfilled',
{
'contentType': 'PlainText',
'content': message_instances
}
)
# List of all the stopped instances
def list_all_stop_instances(intent_request):
#instance_action = intent_request['currentIntent']['slots']['instance_actions']
#instance_identifier = intent_request['currentIntent']['slots']['instance_identifiers']
stack_identifier = intent_request['currentIntent']['slots']['stack_identifiers']
instances = ec2.instances.filter(
Filters=[{'Name': 'instance-state-name', 'Values': ['stopped']}])
insts = ""
counter = 0
for instance in instances:
if instance.tags:
for tag in instance.tags:
counter = counter+1
if tag['Key'] == 'Name':
insts = insts+" \t "+ "\n \t --> " +tag['Value'] + ","
#if 'Name' in instance.tags:
#insts = insts+" "+instance.tags['Name']
print (instance.id, instance.instance_type, instance.state)
message_instances = 'The following is the List of all the Stopped Instances in the '+str(stack_identifier)+' environment :- ' + insts
return elicit(
'Fulfilled',
{
'contentType': 'PlainText',
'content': message_instances
}
)
def list_instance_untagged(intent_request):
#instance_action = intent_request['currentIntent']['slots']['instance_actions']
#instance_identifier = intent_request['currentIntent']['slots']['instance_identifiers']
stack_identifier = intent_request['currentIntent']['slots']['stack_identifiers']
instances = ec2.instances.filter(
Filters=[{'Name': 'instance-state-name', 'Values': ['running','stopped','terminated','pending','stopping','shutting-down']}])
insts = ""
for instance in instances:
present_instance = 0
if len(instance.tags) > 0:
for tag in instance.tags:
if tag['Key'].lower() == 'owner':
present_instance = 1
if present_instance == 0:
for tag in instance.tags:
if tag['Key'] == 'Name':
insts = insts + " \t "+ "\n \t --> " +tag['Value'] + ","
message_tagged = 'The following is the List of all the Untagged Instances in the '+str(stack_identifier)+' environment :- ' + insts
return elicit(
'Fulfilled',
{
'contentType': 'PlainText',
'content': message_tagged
}
)
# List of all the tagged instances in the account
def list_instance_tagged(intent_request):
#instance_action = intent_request['currentIntent']['slots']['instance_actions']
#instance_identifier = intent_request['currentIntent']['slots']['instance_identifiers']
stack_identifier = intent_request['currentIntent']['slots']['stack_identifiers']
instances = ec2.instances.filter(
Filters=[{'Name': 'instance-state-name', 'Values': ['running','stopped','terminated','pending','stopping','shutting-down']}])
insts = ""
for instance in instances:
if instance.tags:
for tag in instance.tags:
if tag['Key'] == 'Name':
insts = insts+" \t "+ "\n" +tag['Value'] + ","
#if 'Name' in instance.tags:
#insts = insts+" "+instance.tags['Name']
#print (instance.id, instance.instance_type, instance.tags)
message_tagged = 'The following is the List of all the Tagged Instances in the '+str(stack_identifier)+' environment :- ' + insts
return elicit(
'Fulfilled',
{
'contentType': 'PlainText',
'content': message_tagged
}
)
|
'''
rule_handler.py
Copyright 2013 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
'''
import logging
def parse_rules(enabled_rules):
'''
Read all rules from the rule_path and return a list of all functors
which need to be called every minute.
:enabled_rules: A string list with rule names.
'''
functors = []
for rule in enabled_rules:
'''
Reminder:
>>> __import__('sentinela.rules.apache_debug', fromlist=['apache_debug'])
<module 'sentinela.rules.apache_debug' from 'sentinela/rules/apache_debug.pyc'>
'''
module_name = 'sentinela.rules.%s' % (rule)
try:
module_inst = __import__(module_name, fromlist=[rule])
except Exception, e:
msg = 'Failed to import the "%s" rule. Exception: "%s".'
logging.exception(msg % (module_name, e))
else:
logging.debug('Imported %s' % module_name)
cev = 'call_every_minute'
functor = getattr(module_inst, cev, None)
if functor is None:
msg = 'The %s rule does NOT define the required %s'
logging.error(msg % (module_name, cev))
continue
functors.append(functor)
return functors
def get_enabled_rules(config_file):
'''
Read the config file and return the list of enabled rules.
'''
enabled_rules = []
try:
config_file_handler = file(config_file)
except:
logging.exception('Failed to open sentinela config "%s"' % config_file)
else:
for rule_name in config_file_handler.readlines():
rule_name = rule_name.strip()
if rule_name.startswith('#'):
continue
if not rule_name:
continue
enabled_rules.append(rule_name)
return enabled_rules
|
# Реализуйте дек с динамическим зацикленным буфером.
# Для тестирования дека на вход подаются команды.
# В первой строке количество команд. Затем в каждой строке записана одна команда.
# Каждая команда задаётся как 2 целых числа: a b.
# a = 1 - push front,
# a = 2 - pop front,
# a = 3 - push back,
# a = 4 - pop back.
# Если дана команда pop*, то число b - ожидаемое значение. Если команда pop вызвана для пустой структуры данных, то ожидается “-1”.
# Требуется напечатать YES, если все ожидаемые значения совпали. Иначе, если хотя бы одно ожидание не оправдалось, то напечатать NO.
# Sample Input:
# 5
# 1 44
# 3 50
# 2 44
# 2 50
# 2 -1
# Sample Output:
# YES
from collections import deque
PUSH_FRONT, POP_FRONT, PUSH_BACK, POP_BACK = range(1, 5)
def process_commands(commands):
d = deque()
for command in commands:
if command[0] == PUSH_FRONT:
d.append(command[1])
elif command[0] == POP_FRONT:
if len(d) == 0 and command[1] == -1:
return True
if len(d) == 0 and command[1] != -1:
return False
el = d.pop()
if el != command[1]:
return False
elif command[0] == PUSH_BACK:
d.appendleft(command[1])
elif command[0] == POP_BACK:
if len(d) == 0 and command[1] == -1:
return True
if len(d) == 0 and command[1] != -1:
return False
el = d.popleft()
if el != command[1]:
return False
return True
def main():
command_count = int(input())
# command_count = 5
commands = []
for i in range(command_count):
command_list = input().split()
command_list = list(map(int, command_list))
commands.append(command_list)
#commands = [[1, 44], [3, 50], [2, 44], [2, 50], [2, -1]]
if not process_commands(commands):
print("NO")
else:
print("YES")
if __name__ == "__main__":
main()
|
#! /usr/bin/env python3
# ---------------------------------------------------------------------------- #
# check_solvable.py #
# #
# By - jacksonwb #
# Created: Wednesday December 1969 4:00:00 pm #
# Modified: Saturday Aug 2019 1:04:04 pm #
# Modified By: jacksonwb #
# ---------------------------------------------------------------------------- #
def is_solvable(n, n_map, goal):
start = []
for row in n_map:
start += list(row)
finish = []
for row in goal:
finish += list(row)
inversion = 0
for i in range(n * n):
for j in range(i + 1, n * n):
if finish.index(start[i]) > finish.index(start[j]):
inversion += 1
start_zero_row = start.index(0) // n
start_zero_col = start.index(0) % n
finish_zero_row = finish.index(0) // n
finish_zero_col = finish.index(0) % n
zero_dif = abs(start_zero_row - finish_zero_row) + abs(start_zero_col - finish_zero_col)
if zero_dif % 2 == 0 and inversion % 2 == 0:
return True
if zero_dif % 2 == 1 and inversion % 2 == 1:
return True
return False
|
def solution(s):
answer = []
A = list(map(int, s.split()))
answer.append(str(min(A)))
answer.append(str(max(A)))
# str(min(A) + ' ' + max(A))
# join은 리스트의 자료형이 str이어야함
return ' '.join(answer)
|
#!/usr/bin/env python
'''
Regular Expressions Exercise 3:
Given the spec-* files in the data/ directory, write a script that uses regular
expressions to list the three numbers in each filename to a new file where the
values are tab delimited, e.g.
4055 55359 0001
Hint: Look up the Python module "glob".
'''
import re
from glob import glob
files = glob('./data/spectra/spec*')
outfile = 'output/sdss_spec_regex.txt'
with open(outfile,'w+') as out:
for fil in files:
fil = fil.strip('./data/spectra/spec')
m = re.match("-([0-9]+)-([0-9]+)-([0-9]+)\.",fil)
assert m is not None, "The pattern was not matched"
id = m.group(1), m.group(2), m.group(3)
out.write(id[0]+"\t"+id[1]+"\t"+id[2]+'\n')
|
from os import environ
class Config:
# Database
db_uri = environ.get('SQLALCHEMY_DATABASE_URI')
db_epic_table = environ.get('SQLALCHEMY_EPIC_TABLE')
db_jira_table = environ.get('SQLALCHEMY_JIRA_TABLE')
# JIRA
jira_username = environ.get('JIRA_USERNAME')
jira_api_key = environ.get('JIRA_API_KEY')
jira_endpoint = environ.get('JIRA_ENDPOINT')
jira_issues_jql = environ.get('JIRA_ISSUES_JQL')
jira_issues_fields = environ.get('JIRA_ISSUES_FIELDS')
jira_epics_jql = environ.get('JIRA_EPICS_JQL')
jira_epics_fields = environ.get('JIRA_EPICS_FIELDS')
|
import argparse
def swapcase_decorator(gen):
def wrapper(*arg, **kwargs):
for i in gen(*arg, **kwargs):
yield i.swapcase()
return wrapper
@swapcase_decorator
def duplicate_words_gen(file_path):
with open(file_path, 'r') as f:
content = f.read()
def filter_func(s: str):
_s = s.lower()
return bool(len(_s) - len(set(_s)))
# OR
return filter(filter_func, content.split())
# OR
return (i for i in content.split() if filter_func(i))
# OR
for i in content.split():
if filter_func(i):
yield i
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file_path', metavar='FILE_PATH', help='enter file path')
args = parser.parse_args()
for i in duplicate_words_gen('test.txt'):
print(i)
|
# Juice Front Middleware. This module contains a set of classes that could be
# used as middleware to Django.
import tidy
import django.conf
# The Tidy middleware prettifies HTML markup, can remove broken validation
# and a bunch of other cool stuff. This certainly gives a slight impact on
# performance, but if your caching is right, you can afford it.
#
# Debug mode is also available. Via debug mode you can view Tidy error reports
# on your old HTML by sending a 'notidy' GET variable to any URL. If DEBUG
# is switched on in Django settings, you'll find the errors listed at the end
# of your HTML output as comments.
#
# For a full list of Tidy options and sweeties, visit:
# http://tidy.sourceforge.net/docs/quickref.html
class Tidy(object):
def __init__(self):
self.tidy_options = dict(output_xhtml=True,
add_xml_decl=True,
doctype='strict',
indent='yes',
indent_spaces='4',
tidy_mark=False,
hide_comments=True,
wrap=100,
force_output=True)
# This method is called by the middleware mechanism in Django
def process_response(self, request, response):
if response['Content-Type'].split(';', 1)[0] == 'text/html':
content = response.content
content = tidy.parseString(content, **self.tidy_options)
# Notidy in DEBUG mode will attempt to Tidy the response, but doesn't
# replace the original one so that error lines and numbers in the
# errors list are correct. Use this for convenient templates validation.
if 'notidy' not in request.GET or not django.conf.settings.DEBUG:
response.content = content.__str__()
# List the errors if DEBUG is on at the end of the response text
if content.errors and django.conf.settings.DEBUG:
response.content += "<!-- Validation Errors:\n"
for error in content.errors:
response.content += "%s\n" % error.__str__()
response.content += "-->"
return response
|
"""
Generic tools for distributing computationally intensive tasks across multiple threads.
"""
import os
import numpy as np
import shutil
from tempfile import mkdtemp
import multiprocessing as mp
from tqdm import tqdm
from hylite import HyCloud, HyImage
from hylite import io
def _split(data, nchunks):
"""
Split the specified HyCloud instance into a number of chunks.
*Arguments*:
- data = the complete HyData object to copy and split.
- nchunks = the number of chunks to split into.
*Returns*:
- a list of split
"""
if isinstance(data, HyCloud): # special case for hyperclouds - split xyz, rgb and normals too
chunksize = int(np.floor(data.point_count() / nchunks))
chunks = [(i * chunksize, (i + 1) * chunksize) for i in range(nchunks)]
chunks[-1] = (chunks[-1][0], data.point_count()) # expand last chunk to include remainder
# split points
xyz = [data.xyz[c[0]:c[1], :].copy() for c in chunks]
# split data
bands = [None for c in chunks]
if data.has_bands():
X = data.get_raveled().copy()
bands = [X[c[0]:c[1], :] for c in chunks]
# split rgb
rgb = [None for c in chunks]
if data.has_rgb():
rgb = [data.rgb[c[0]:c[1], :].copy() for c in chunks]
# split normals
normals = [None for c in chunks]
if data.has_normals():
normals = [data.normals[c[0]:c[1], :].copy() for c in chunks]
return [HyCloud(xyz[i],
rgb=rgb[i],
normals=normals[i],
bands=bands[i],
header=data.header.copy()) for i in range(len(chunks))]
else: # just split data (for HyImage and other types)
X = data.get_raveled().copy()
chunksize = int(np.floor(X.shape[0] / nchunks))
chunks = [(i * chunksize, (i + 1) * chunksize) for i in range(nchunks)]
chunks[-1] = (chunks[-1][0], X.shape[0]) # expand last chunk to include remainder
out = []
for c in chunks:
_o = data.copy(data=False) # create copy
_o.data = X[c[0]:c[1], :][:,None,:]
out.append(_o)
return out
def _merge(chunks, shape):
"""
Merge a list of HyData objects into a combined one (aka. do the opposite of split(...)).
*Arguments*:
- chunks = a list of HyData chunks to merge.
- shape = the output data shape.
*Returns*: a single merged HyData instance (of the same type as the input).
The header of this instance will be a copy of chunks[0].header.
"""
# merge data
X = np.vstack([c.data for c in chunks])
X = X.reshape((*shape, -1))
if not isinstance(chunks[0], HyCloud): # easy!
# make copy
out = chunks[0].copy(data=False)
out.data = X
out.header = chunks[0].header.copy()
return out
else: # less easy
xyz = np.vstack([c.xyz for c in chunks])
rgb = None
if chunks[0].has_rgb():
rgb = np.vstack([c.rgb for c in chunks])
normals = None
if chunks[0].has_normals():
normals = np.vstack([c.normals for c in chunks])
return HyCloud( xyz, rgb=rgb, normals=normals, bands=X, header=chunks[0].header.copy())
def _call(func, path, arg, kwd, n):
"""
This function will be called by each thread. It loads each data chunk from disk, runs the operation, then saves
the results.
"""
# print("Spawning thread %d." % n)
# func, path, arg, kwd = args
# load data chunk
if '.ply' in path:
data = io.loadCloudPLY(path) # load point cloud
result = func(data, *arg, **kwd) # compute results
assert isinstance(result, HyCloud), "Error - function %s does not return a HyCloud." % func
io.saveCloudPLY(path, result) # save point cloud
else:
data = io.load(path) # load image
result = func(data, *arg, **kwd) # compute results
assert isinstance(result, HyImage), "Error - function %s does not return a HyImage." % func
io.save(path, result) # save result
return True # done
def parallel_chunks(function, data, *args, **kwds):
"""
Run a function that operates per-point or per-pixel on smaller chunks of a point cloud or image dataset
in parallel. Only use for expensive operations as otherwise overheads (writing files to cache, spawning threads,
loading files from cache) are too costly.
*Arguments*:
- function = the function to run on each chunk of the dataset. Must take a HyCloud or HyImage dataset as it's first
argument and also return a HyCloud or HyImage dataset (cf., mwl(...), get_hull_corrected(...)).
- data = the HyCloud or HyImage instance to run the function on.
- args = tuple of arguments to pass to the function.
**Keywords**:
- nthreads = the number of threads to spawn. Default is the number of cores - 2. Negative numbers will be subtracted
from the number of cores.
- any other keywords are passed to the function
"""
assert isinstance(data, HyCloud) or isinstance(data, HyImage)
# get number of threads
if 'nthreads' in kwds:
nthreads = kwds['nthreads']
del kwds['nthreads']
else:
nthreads = -2
if nthreads < 1:
nthreads = os.cpu_count() - nthreads
assert nthreads > 0, "Error - cannot spawn %d threads" % nthreads
assert isinstance(nthreads, int), "Error - nthreads must be an integer."
assert nthreads is not None, "Error - could not identify CPU count. Please specify nthreads keyword."
# split data into chunks
shape = data.data.shape[:-1] # store shape (important for images)
chunks = _split(data, nthreads)
# dump chunks into temp directory
pth = mkdtemp() # make temp directory
print("Writing thread cache to %s:" % pth)
# dump clouds to directory
paths = []
for i, c in enumerate(chunks):
if isinstance(c, HyCloud):
p = os.path.join(pth, '%d.ply' % i)
io.saveCloudPLY(p, c)
else:
p = os.path.join(pth, '%d.hdr' % i)
io.save(p, c)
paths.append(p)
# make sure we don't multithread twice when using advanced scipy/numpy functions...
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['MKL_DYNAMIC'] = 'FALSE'
# spawn worker processes
P = [mp.Process(target=_call, args=(function, p, args, kwds, i)) for i, p in enumerate(paths)]
try:
for p in P:
p.start()
for p in P:
p.join()
# successs! load data again...
if isinstance(data, HyCloud):
chunks = [io.loadCloudPLY(p) for p in paths]
else:
chunks = [io.load(p) for p in paths]
# remove temp directory
shutil.rmtree(pth) # delete temp directory
print("Process complete (thread cache cleaned successfully).")
except (KeyboardInterrupt, SystemExit) as e:
print("Job cancelled. Cleaning temp directory... ", end='')
shutil.rmtree(pth) # delete temp directory
print("Done.")
assert False, "Multiprocessing job cancelled by KeyboardInterrupt or SystemExit."
except Exception as e:
print("Error thrown. Cleaning temp directory... ", end='')
shutil.rmtree(pth) # delete temp directory
print("Done.")
raise e
# re-enable scipy/numpy multithreading
del os.environ['MKL_NUM_THREADS']
del os.environ['OMP_NUM_THREADS']
del os.environ['MKL_DYNAMIC']
# merge back into one dataset
out = _merge(chunks, shape=shape)
return out
def _call2(func, in_paths, out_paths, kwd, n):
for i, o in zip(in_paths, out_paths): # loop through paths managed by this thread
func(i, o, **kwd) # call function
def parallel_datasets(function, in_paths, out_paths=None, nthreads=-2, **kwds):
"""
Parallelise a single function across many HyData datasets.
*Arguments*:
- function = the function to run on each dataset. This should take an input path (string) as its first input
and output path (also string) as its second output. Anything returned by the function will be ignored.
- in_paths = a list of input paths, each of which will be passed to function in each thread.
- out_paths = a list of corresponding output paths that each function should write to. Defaults to in_paths.
- nthreads = the number of threads to spawn. Default is the number of cores - 2. Negative numbers are subtracted
from the total number of cores.
*Keywords*:
- any keywords are passed directly to function in each thread.
*Returns*: Nothing.
"""
assert isinstance(in_paths, list), "Error - in_paths must be a list of file paths (string)."
if out_paths is None:
out_paths = in_paths
assert isinstance(out_paths, list), "Error - out_paths must be a list of file paths (string)."
assert len(out_paths) == len(in_paths), "Error - length of input and output paths must match."
# get number of threads
assert isinstance(nthreads, int), "Error - nthreads must be an integer."
if nthreads < 1:
nthreads = os.cpu_count() - nthreads
assert nthreads > 0, "Error - cannot spawn %d threads" % nthreads
# distribute input paths across threads
nthreads = min( len(in_paths), nthreads ) # avoid case where we have more threads than paths
stride = int( len(in_paths) / nthreads )
inP = []
outP = []
for i in range(nthreads):
idx0 = i*stride
idx1 = min( (i+1)*stride, len(in_paths) )
inP.append( in_paths[idx0:idx1] )
outP.append( out_paths[idx0:idx1] )
for i in range(len(in_paths) % nthreads): # and add remainder
inP[i].append(in_paths[-i-1])
outP[i].append(out_paths[-i - 1])
# make sure we don't multithread twice when using advanced scipy/numpy functions...
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['MKL_DYNAMIC'] = 'FALSE'
# spawn worker processes and wait for jobs to finish
P = [mp.Process(target=_call2, args=(function, inP[i], outP[i], kwds, i)) for i in range(nthreads)]
for p in P:
p.start()
for p in P:
p.join()
# re-enable scipy/numpy multithreading
del os.environ['MKL_NUM_THREADS']
del os.environ['OMP_NUM_THREADS']
del os.environ['MKL_DYNAMIC']
|
from __future__ import division
import pandas as pd
import Bio
from Bio.PDB import *
import urllib2
import os
import shutil
import sys
import subprocess
from ete3 import Tree
import copy
from blosum import *
from Bio.Blast import NCBIWWW
from Bio.Blast import NCBIXML
from Bio import SeqIO
from Bio.Align.Applications import ClustalOmegaCommandline
class Etree(Tree):
"""class for creating phylogenetic tree and computing conservation of mutated position"""
_names = []
alignements = dict()
_identificators = []
_IDs = dict()
_idArray = dict()
#get fasta file for entered pdb id and chain
def get_fasta(self):
fasta_file = self.name + ".fasta"
fasta_output = open(fasta_file,"w")
url_f = "https://www.rcsb.org/pdb/download/downloadFile.do?fileFormat=fastachain&compression=NO&structureId=%s&chainId=%s"%(self.name,self.chain)
#url_f = "http://www.rcsb.org/pdb/download/downloadFastaFiles.do?structureIdList=%s&compressionType=uncompressed"%self.name
try:
h = urllib2.urlopen(url_f)
except URLError as error:
print(error.reason)
sys.exit(1)
fasta_output.write(h.read())
fasta_output.close()
i = 0
fasta_cleaned = open(self.name+"FASTA.fasta","w")
handle = open(fasta_file,"r")
lines = iter(handle.readlines())
for line in lines:
if(line.startswith('>')):
i+=1
if(i < 2):
fasta_cleaned.write(line)
fasta_cleaned.close()
def parse_XML(self,name):
i=0
output = open(name+".txt","w")
f = open(name+".xml","r")
blast = NCBIXML.parse(f)
names = []
protein = ''
for record in blast:
for align in record.alignments:
for hsp in align.hsps:
i+= 1
protein = '>'+align.hit_id+align.hit_def
if(protein in names):
break
else:
names.append(protein)
output.write('>'+align.hit_id+align.hit_def+'\n')
output.write(hsp.sbjct+'\n') #find out
f.close()
output.close()
def run_blast(self,name):
subprocess.call(['./blastp','-query','%sFASTA.fasta'%name,'-db','nr','-outfmt','5','-out','%s.xml'%name,'-max_target_seqs','250','-remote'])
def run_clustal(self,name):
in_file = name + ".txt"
out_file = name +"clustal.fasta"
clustalomega_cline = ClustalOmegaCommandline(infile=in_file, outfile=out_file, verbose=True, auto=True)
subprocess.call(['./clustalo','-i','%s'%in_file,'--outfmt=vie','-o','%s'%out_file,'--auto','-v','--force'])
def create_newick_file(self,name):
self.get_fasta()
self.run_blast(name)
self.parse_XML(name)
self.run_clustal(name)
protein = name+'clustal.fasta'
output = name+'output'
subprocess.call(['./FastTree','-out','%s'%output,'%s'%protein])
#get pdb file for entered pdb id
def get_pdb(self,name):
protein_file = self.name + ".pdb"
pdb_output = open(protein_file, "w")
url_pdb = "https://files.rcsb.org/download/%s.pdb" %self.name
try:
handle = urllib2.urlopen(url_pdb)
except URLError as error:
print(error.reason)
sys.exit(1)
pdb_output.write(handle.read())
pdb_output.close()
#parse PDB file to find stating position
def PDB_parse(self,name):
p = PDBParser()
structure = p.get_structure(self.name,self.name+".pdb")
model = structure[0]
#pridat try na jednotlive chainy
try:
chain = model['A']
except KeyError as error:
try:
chain = model['B']
except KeyError as error:
try:
chain = model['C']
except KeyError as error:
try:
chain = model['I']
except KeyError as error:
try:
chain = model['X']
except KeyError as error:
print("Cannot find this type of chain.")
sys.exit(1)
else:
pass
else:
pass
else:
pass
else:
pass
else:
pass
#always returns position of first chain which could no be correct
residue_list = Selection.unfold_entities(chain,'A')
#print(residue_list[0].get_full_id()[3][1])
residue_start = residue_list[0].get_full_id()[3][1]
return residue_start
def compute_conservation(self,file,residue_start,index,weightsArray,acid1):
count_mezera = 0
count_basic_acid = 0
count_mutated_acid = 0
count_else=0
all_count =0
count_pos=0
start_position = 1 #meni sa
pos = 0
handle = open(file,"r")
lines = iter(handle.readlines())
for line in lines:
if(line.startswith('>')):
continue
else:
for word in line.split():
#if(word[0] == '-'):
# break
#if(word[0] == 'M'):
# count_pos -=1#-residue_start+1
if(residue_start > len(word)):
print(residue_start)
print(index)
count_pos = residue_start
print(count_pos)
for i in range(0,len(word),1):
if(word[i] != '-'):
count_pos +=1
if(count_pos == residue_start+index):
pos = i
print(word[i])
break
else:
print(residue_start)
print(index)
count_pos = residue_start
if(residue_start < 0):
chain_res = index#+residue_start #+ abs(residue_start) + abs(residue_start) -1
elif (residue_start == 1):
chain_res= index+residue_start
else:
chain_res= index+residue_start+2
for i in range(0,len(word),1):
if(word[i] != '-'):
count_pos +=1
if(count_pos == chain_res):
pos = i
#print(pos)
print(word[i])
break
break
print(pos)
conservation_value = 0
base_acid = 0
weights = 0
for name in self._names:
sequence = self._idArray[name]
acid = sequence[pos]
if(acid == acid1):
base_acid = 1
else:
base_acid= 0
weights += weightsArray[name]
conservation_value += weightsArray[name] * base_acid
accuracy = conservation_value/ weights
return accuracy
def create_ID_table(self):
"""create table where key is node name and value is sequence to speed up lookup"""
for name in self._names:
key1 = self._IDs.get(name)
seq1 = self.alignements[key1]
self._idArray[name] = seq1
def create_alignement_table(self,file):
"""creates lookup table for sequence names and sequences"""
with open(file,'r') as f:
lines = iter(f.readlines())
for line in lines:
if(line.startswith('>')):
name = line.strip('>').strip('\n')
sequence = lines.next().strip('\n')
self.alignements[name] = sequence
def create_names_table(self,file):
"""create lookup table for complete sequence ID according to its abbrevation"""
with open(file,'r') as f:
lines = iter(f.readlines())
for line in lines:
if(line.startswith('>')):
self._identificators.append(line.strip('>').strip('\n'))
for item in self._identificators:
for name in self._names:
if(name in item):
self._IDs[name] = item
def get_table_value(self,value):
"""get value from alignements table"""
return self.alignements[value]
def get_names(self):
"""get all leaf names in the tree and stores them in _names array"""
for leaf in self:
if(leaf.is_leaf()):
self._names.append(leaf.name)
def print_names(self):
"""function for printing leafs names"""
for name in self._names:
print(name)
def create_array(self):
"""creates array of weights and fills it with value according to its node"""
self.weightsArray = dict()
for name in self._names:
self.weightsArray[name] = 0
if self.name != '':
self.weightsArray[self.name] = 1
def add_node_array(self):
"""adds weights array to every node in the tree"""
for node in self.traverse('postorder'):
node.create_array()
def calculate_weights(self):
"""calculates the values in weights array in each node"""
#fudge factor constant to prevent 0 in the weights array
fugde_factor = 0.1
#traverse the tree and compute values in each node
for node in t.traverse('postorder'):
#get children nodes of actual node
children = node.get_children()
#if no children found, continue with next node
if not children:
continue
else:
i = 0
#array where value of multiplication for each item in array is stored
vals = [1]*250
#calculate value for each child
for child in children:
for parentItem in node._names:
result = 0
seq2 = node._idArray[parentItem]
for childItem in child._names:
#calculate probability of changing child sequence to parent sequence
seq1 = child._idArray[childItem]
probability = probability_matrix.find_pair(seq1,seq2)
#vzorec Pi*Li*t
result += probability * child.weightsArray[childItem] * (child.dist + fugde_factor)
#value from each child needs to be multiplicated
vals[i] *= result
#store actual value to weightsArray item in parent node
node.weightsArray[parentItem] = vals[i]
i+=1
i = 0
#print(node.weightsArray.values())
return t.get_tree_root().weightsArray
|
'''module for item catalog db handlers'''
from flask import session as login_session
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Catalog, Base, CatalogItem, User
db_session = None
def SetupDB():
engine = create_engine('sqlite:///ItemCatalog.db')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
global db_session
db_session = DBSession()
if db_session:
print "session created"
def getCatalog():
catalog = db_session.query(Catalog).all()
return catalog
#Catalog Item
def getLatestItems():
#need to show all categories, so makes sense to show
#same number of items to make it look good.
count = db_session.query(Catalog).count()
catitems = db_session.query(CatalogItem) \
.order_by(CatalogItem.last_updated.desc()) \
.limit(count)
return catitems
def getCatalogItems():
catalogitems = db_session.query(CatalogItem).all()
return catalogitems
def addItem(item_name, item_desc, cat_id):
catitem = CatalogItem()
catitem.item_name = item_name
catitem.description = item_desc
catitem.cat_id = cat_id
catitem.user_id = login_session['userid']
db_session.add(catitem)
db_session.commit()
def getItemByName(item_name):
catitem = db_session.query(CatalogItem) \
.filter(CatalogItem.item_name == item_name)
return catitem
def getItemsByCat(cat_name):
catitems = db_session.query(CatalogItem). \
join(CatalogItem.catalog). \
filter(Catalog.cat_name == cat_name)
return catitems
def getItemById(item_id):
catitem = db_session.query(CatalogItem). \
filter(CatalogItem.item_id == item_id).one()
return catitem
def createUser(login_session):
newUser = User(name=login_session['username'], \
email=login_session['email'], \
picture=login_session['picture'])
db_session.add(newUser)
db_session.commit()
user = db_session.query(User) \
.filter_by(email=login_session['email']).one()
return user.id
def editItem(item_id,item_name, item_desc, cat_id):
try:
catitem = getItemById(item_id)
catitem.item_name = item_name
catitem.description = item_desc
catitem.cat_id = cat_id
db_session.commit()
except:
return False
return True
def getUserInfo(user_id):
user = db_session.query(User).filter_by(id=user_id).one()
return user
def getUserID(email):
try:
user = db_session.query(User).filter_by(email=email).one()
return user.id
except:
return None
|
import getopt
import socket
import sys
import os
import platform
from nfutil import *
from enumip import *
#-----------------------------------------------------------------------------
def usage():
print
print "Usage: nfcli --list --eth_addr=ADDR --ip_type=TYPE --ip_addr=IP, --netmask=MASK, --gateway=GW"
print "Search and configure Prologix GPIB-ETHERNET Controllers."
print "--help : display this help"
print "--list : search for controllers"
print "--eth_addr=ADDR : configure controller with Ethernet address ADDR"
print "--ip_type=TYPE : set controller ip address type to TYPE (\"static\" or \"dhcp\")"
print "--ip_addr=IP : set controller address to IP"
print "--netmask=MASK : set controller network mask to MASK"
print "--gateway=GW : set controller default gateway to GW"
#-----------------------------------------------------------------------------
def enumIp():
if platform.system() in ('Windows', 'Microsoft'):
return socket.gethostbyname_ex(socket.gethostname())[2];
return enumIpUnix()
#-----------------------------------------------------------------------------
def ValidateNetParams(ip_str, mask_str, gw_str):
try:
ip = socket.inet_aton(ip_str)
except:
print "IP address is invalid."
return False
try:
mask = socket.inet_aton(mask_str)
except:
print "Network mask is invalid."
return False
try:
gw = socket.inet_aton(gw_str)
except:
print "Gateway address is invalid."
return False
# Validate network mask
# Convert to integer from byte array
mask = struct.unpack("!L", mask)[0]
# Exclude restricted masks
if (mask == 0) or (mask == 0xFFFFFFFF):
print "Network mask is invalid."
return False
# Exclude non-left-contiguous masks
if (((mask + (mask & -mask)) & 0xFFFFFFFF) != 0):
print "Network mask is not contiguous."
return False
# Validate gateway address
octet1 = ord(gw[0])
# Convert to integer from byte array
gw = struct.unpack("!L", gw)[0]
# Exclude restricted addresses
# 0.0.0.0 is valid
if ((gw != 0) and ((octet1 == 0) or (octet1 == 127) or (octet1 > 223))):
print "Gateway address is invalid."
return False
# Validate IP address
octet1 = ord(ip[0])
# Convert to integer from byte array
ip = struct.unpack("!L", ip)[0]
# Exclude restricted addresses
if ((octet1 == 0) or (octet1 == 127) or (octet1 > 223)):
print "IP address is invalid."
return False
# Exclude subnet network address
if ((ip & ~mask) == 0):
print "IP address is invalid."
return False
# Exclude subnet broadcast address
if ((ip & ~mask) == (0xFFFFFFFF & ~mask)):
print "IP address is invalid."
return False
return True
#-----------------------------------------------------------------------------
#def ValidateAddress(address):
# if address is None:
# return False
# parts = address.split(".")
# if len(parts) != 4:
# return False
# try:
# for item in parts:
# if not 0 <= int(item) <= 255:
# return False
# except:
# return False
# return True
#-----------------------------------------------------------------------------
def main():
invalid_args = False
showhelp = False
search = False
ip_type = None
ip_addr = None
netmask = None
gateway = None
eth_addr = None
try:
opts, args = getopt.getopt(sys.argv[1:], '', ['help', 'list', 'eth_addr=', 'ip_type=', 'ip_addr=', 'netmask=', 'gateway='])
except getopt.GetoptError, err:
print str(err)
sys.exit(1)
# Check for unparsed parameters
if len(args) != 0:
usage()
sys.exit(1)
for o, a in opts:
if o == "--help":
showhelp = True
elif o == "--list":
search = True
elif o == "--eth_addr":
eth_addr = a
elif o == "--ip_type":
ip_type = a
elif o == "--ip_addr":
ip_addr = a
elif o == "--netmask":
netmask = a
elif o == "--gateway":
gateway = a
if (len(opts) == 0) or (showhelp):
usage()
sys.exit(1)
if search:
if not eth_addr is None:
print "--list and --eth_addr are not compatible."
invalid_args = True
if not ip_type is None:
print "--list and --ip_type are not compatible."
invalid_args = True
if not ip_addr is None:
print "--list and --ip_addr are not compatible."
invalid_args = True
if not netmask is None:
print "--list and --netmask are not compatible."
invalid_args = True
if not gateway is None:
print "--list and --gateway are not compatible."
invalid_args = True
else:
try:
eth_addr = eth_addr.strip().replace(":", "").replace("-", "")
eth_addr = eth_addr.decode('hex')
except:
print "Invalid Ethernet address."
sys.exit(1)
if len(eth_addr) != 6:
print "Invalid Ethernet address."
sys.exit(1)
if ip_type in ["Static", "static"]:
ip_type = NF_IP_STATIC
elif ip_type in ["Dynamic", "dynamic", "Dhcp", "dhcp"]:
ip_type = NF_IP_DYNAMIC
else:
print "--ip_type must be 'static' or 'dhcp'."
sys.exit(1)
if ip_type == NF_IP_STATIC:
if not ValidateNetParams(ip_addr, netmask, gateway):
invalid_args = True
# if not ValidateIP(ip_addr):
# print "Invalid, or no, IP address specified."
# invalid_args = True
# if not ValidateIP(netmask):
# print "Invalid, or no, netmask specified."
# invalid_args = True
# if not ValidateIP(gateway):
# print "Invalid, or no, gateway address specified."
# invalid_args = True
else:
if ip_addr is None:
ip_addr = "0.0.0.0"
else:
print "--ip_addr not allowed when --ip_type=dhcp."
invalid_args = True
if netmask is None:
netmask = "0.0.0.0"
else:
print "--netmask not allowed when --ip_type=dhcp."
invalid_args = True
if gateway is None:
gateway = "0.0.0.0"
else:
print "--gateway not allowed when --ip_type=dhcp."
invalid_args = True
if invalid_args:
sys.exit(1)
global seq
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
iplist = enumIp();
if len(iplist) == 0:
print "Host has no IP address."
sys.exit(1)
devices = {}
for ip in iplist:
print "Searching through network interface:", ip
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
port = 0
try:
s.bind((ip, port))
except socket.error, e:
print "Bind error on send socket:", e
sys.exit(1)
port = s.getsockname()[1]
r = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
r.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
r.setblocking(1)
r.settimeout(0.100)
try:
r.bind(('', port))
except socket.error, e:
print "Bind error on receive socket:", e
sys.exit(1)
d = Discover(s, r)
print
for k in d:
d[k]['host_ip'] = ip
devices[k] = d[k]
s.close()
r.close()
if search:
print
print "Found", len(devices), "Prologix GPIB-ETHERNET Controller(s)."
for key in devices:
PrintDetails(devices[key])
print
else:
if eth_addr in devices:
print "Updating network settings of Prologix GPIB-ETHERNET Controller", FormatEthAddr(eth_addr)
device = devices[eth_addr]
if (device['ip_type'] == NF_IP_STATIC) or (ip_type == NF_IP_STATIC):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
port = 0
try:
s.bind((device['host_ip'], port))
except socket.error, e:
print "Bind error on send socket:", e
sys.exit(1)
port = s.getsockname()[1]
r = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
r.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
r.setblocking(1)
r.settimeout(0.100)
try:
r.bind(('', port))
except socket.error, e:
print "Bind error on receive socket:", e
sys.exit(1)
result = Assignment(s, r, eth_addr, ip_type, ip_addr, netmask, gateway)
print
if len(result) == 0:
print "Network settings update failed."
else:
if result['result'] == NF_SUCCESS:
print "Network settings updated successfully."
else:
print "Network settings update failed."
else:
print "Prologix GPIB-ETHERNET Controller", FormatEthAddr(eth_addr), "already configured for DHCP."
else:
print "Prologix GPIB-ETHERNET Controller", FormatEthAddr(eth_addr), "not found."
#-----------------------------------------------------------------------------
if __name__ == "__main__":
main()
|
__version__ = '0.38.0'
|
# Implement function ToLowerCase() that has a string parameter str,
# and returns the same string in lowercase.
class Solution:
def toLowerCase(self, s) -> str:
return s.lower()
if __name__ == '__main__':
test_input = 'HelLo'
print(Solution.toLowerCase(Solution, test_input))
|
"""
Tools for MD scripts
"""
import pytraj as pt
import MDAnalysis as mda
import os
from typing import Optional, Tuple
default_mask = ""
def load_traj_mda(itraj: str, itop: Optional[str] = None) -> mda.Universe:
"""
Load trajectory (and topology) from file.
Args:
itraj (str): Trajectory file name
itop (str): Topology file name
Returns:
Returns a `mda.Universe` as trajectory
"""
print(f"Loading trajectory {os.path.basename(itraj)}...", end="")
if itop is None:
u = mda.Universe(itraj)
else:
u = mda.Universe(itop, itraj)
print(" done")
return u
def load_traj(
itraj: str, itop: Optional[str] = None, mask: str = default_mask
) -> pt.Trajectory:
"""
Load trajectory (and topology) from file.
Args:
itraj (str): Trajectory file name
itop (str): Topology file name
mask (str): Selection mask (in `pytraj` formart)
Returns:
Returns a `pt.Trajectory` as trajectory
"""
print(f'Loading trajectory {os.path.basename(itraj)} with mask "{mask}"...', end="")
traj = pt.load(itraj, itop, mask=mask)
print(" done")
return traj
def load_ref(
iref: str, itop: Optional[str] = None, mask: str = default_mask
) -> pt.Trajectory:
"""
Load reference structure (and topology) from file.
Args:
iref (str): Reference structure file name
itop (str): Topology file name
mask (str): Selection mask (in `pytraj` formart)
Returns:
Returns a `pt.Trajectory` as reference structure
Raises:
ValueError: An error occurs when the reference structure contains more than one
frame.
"""
print(f'Loading reference {os.path.basename(iref)} with mask "{mask}"...', end="")
ref = pt.load(iref, itop, mask=mask)
if ref.n_frames != 1:
raise ValueError(f"Reference structure contains {ref.n_frames} frames.")
print(" done")
return ref
|
# Problem Statement :
# 9.4 Write a program to read through the mbox-short.txt and figure out who has
# sent the greatest number of mail messages. The program looks for 'From ' lines
# and takes the second word of those lines as the person who sent the mail. The
# program creates a Python dictionary that maps the sender's mail address to a
# count of the number of times they appear in the file. After the dictionary is
# produced, the program reads through the dictionary using a maximum loop to
# find the most prolific committer.
name = input("Enter file:")
if len(name) < 1:
name = "mbox-short.txt"
handle = open(name)
words = list()
email = dict()
for line in handle:
line = line.rstrip()
if not line.startswith('From '): continue
line = line.split()
#words.append(line[1])
email[line[1]] = email.get(line[1],0) + 1
#for w in words:
#email[w] = email.get(w,0) + 1
largest = -1
theemail = None
for k,v in email.items():
if v > largest:
largest = v
theemail = k
print(theemail,largest)
|
import io
import json
import os
import sys
from . import sources
class TR(object):
def __init__(self, source):
self.source = source
@classmethod
def main(cls, argv=None):
if argv is None:
argv = sys.argv[1:]
source = get_source(argv[0])
tr = cls(source)
tr.send({'ready': True})
tr.run()
def send(self, data):
# file descriptor 3 is used for IPC.
os.write(3, json.dumps(data) + '\n')
def run(self):
# file descriptor 3 is used for IPC.
# messages are split by newlines
with io.open(3, newline='\n') as f:
for line in f:
tile = map(int, json.loads(line))
self.run_tile(tile)
def run_tile(self, tile):
x, y, zoom = tile
data = self.source.fetch_tile(x, y, zoom)
value = self.mapper(x, y, zoom, data)
self.send({'reduce': True, 'value': value, 'tile': tile});
def mapper(self, x, y, zoom, data):
raise NotImplementedError()
def get_source(arg):
specs = json.loads(arg)
for spec in specs:
db = spec.get('mbtiles')
if db:
source = sources.MBTiles(db)
return source
raise ValueError('Could not create tile source from argument "%s"' % arg)
|
# encoding: utf-8
"""
@ author: wangmingrui
@ time: 2019/1/30 16:32
@ desc: 配置文件
"""
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # client的根目录
HOME_DIR = os.path.join(BASE_DIR, 'user_data','HOME')
MAX_RECV_SIZE = 1024 * 8
USER_QUATO = 1024 * 1024 * 1024 * 10 # 初始用户配额为10G
HOST = "localhost"
PORT = 8086
ip_port = (HOST, PORT)
|
from django.db import models
# Create your models here.
class Carro(models.Model):
nombre = models.CharField(max_length=50)
precio = models.FloatField(default=1)
año = models.IntegerField(default=20)
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
"""
Given a Binary Search Tree (BST) with the root node root, return the minimum
difference between the values of any two different nodes in the tree.
1. Naive solution: traverse the tree, get in order traversal. Then find all possible
difference between pairs, which takes O(n^2)
2. For each node find the closest number to it, which takes O(nlogn), because we search
for closer number in logn time for each n nodes.
3. Do in order traversal and Compare the adjacents, keep track of min difference
"""
class Solution(object):
# smallest number possible
prev = -float('inf')
# the biggest possible difference
difference = float('inf')
def minDiffInBST(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.in_order(root)
return self.difference
def in_order(self, node, prev=None, difference=None):
if node is None:
return
# if prev is None and difference is None:
# prev = -float('inf')
# difference = float('inf')
self.in_order(node.left)
self.difference = min(node.val-self.prev, self.difference)
# update the prev value to current node's val
self.prev = node.val
# move to the right side of node
self.in_order(node.right)
|
class cpairs:
def __init__(self, l, w):
self.l = l
self.w = w
def __eq__(self, other):
if not isinstance(other, cpairs):
return NotImplemented
return (self.l == other.l and self.w == other.w) or (self.l == other.w and self.w == other.l)
def __hash__(self):
return hash((self.l, self.w))
def __ne__(self, other):
# Not strictly necessary, but to avoid having both x==y and x!=y
# True at the same time
return not(self == other)
cache = dict()
def getTotal(l,w):
global cache
if(l==w):
return 1
obj = cpairs(l,w)
if obj in cache :
return cache[obj]
if(l>w):
total = getTotal(l-w, w) + getTotal(w,w)
cache[obj] = total
return total
else:
total = getTotal(l, w-l) + getTotal(l,l)
cache[obj] = total
return total
min_l = int(input())
max_l = int(input())
min_w = int(input())
max_w = int(input())
total = 0
for l in range(min_l, max_l+1):
for w in range(min_w, max_w+1):
# print("({}, {}) = {}".format(l, w, getTotal(l,w)))
total += getTotal(l,w)
print(total)
|
from pyfiglet import figlet_format
from halo import Halo
import time
from datetime import datetime, timedelta
import pygame
from functions.get_conjectures import get_conjectures, remove_duplicates
import pickle
valid_invariants = {1:'domination_number',
2:'total_domination_number',
3:'connected_domination_number',
4:'independence_number',
5:'power_domination_number',
6:'zero_forcing_number',
7:'total_zero_forcing_number',
8:'connected_zero_forcing_number',
9:'independent_domination_number',
10:'chromatic_number',
11:'matching_number',
12:'min_maximal_matching_number',
13:'clique_number'}
graph_properties = ['triameter',
'randic_index',
'augmented_randic_index',
'harmonic_index',
'atom_bond_connectivity_index',
'sum_connectivity_index',
'min_degree',
'max_degree',
'number_of_min_degree_nodes',
'number_of_max_degree_nodes',
'diameter',
'radius',
'order',
'size']
graph_families = {1:'small_connected',
2:'cubic'}#,
#3:'triangle_free',
#4:'claw_free',
#5:'triangulation',
#6:'polyhedral',
#7:'tree'}
__version__ = '0.0.2'
def main():
print(figlet_format('TxGraffiti', font='slant'))
print(figlet_format(' - LIGHT', font='slant'))
print('Version ' + __version__)
print('Copyright ' + u'\u00a9' + ' 2018 Randy Davila')
print()
print('The invariants you may conjecture against are: ')
print('----------------------------------------')
print()
i = 1
for x in valid_invariants:
print(str(i)+'.', valid_invariants[x])
i+=1
print()
print('----------------------------------------')
print()
invariant = valid_invariants[int(input('Invariant: '))]
print()
print('The families of graphs you may conjecture against are: ')
print('----------------------------------------')
print()
i = 1
for x in graph_families:
print(str(i)+'.', graph_families[x])
i +=1
print()
print('---------------------------------------------')
family = graph_families[int(input('Graph family: '))]
print()
print('---------------------------------------------')
print()
print()
print(figlet_format('TxGraffiti', font='slant'))
print(figlet_format(' - LIGHT', font='slant'))
print('Version ' + __version__)
print('Copyright ' + u'\u00a9' + ' 2018 Randy Davila')
print()
try:
with open(f'graph_data/{invariant}_{family}_conjectures', 'rb') as file:
read_data = file.read()
except FileNotFoundError as fnf_error:
print(fnf_error, '. Please make desired database.')
return None
conjectures = get_conjectures(invariant, family)
U = remove_duplicates(conjectures['upper'])
L = remove_duplicates(conjectures['lower'])
print('Upper Bounds')
for i in range(1, 10):
print(f'Conjecture {i}. {U[i]}')
print('')
print()
print('Lower Bounds')
for i in range(1, 10):
print(f'Conjecture {i}. {L[i]}')
print('')
print()
work = input('Remove conjectures? (y/n) ')
while work == 'y':
type = input('Upper or lower? (U/L) ')
index = int(input('Conjecture label? '))
if type == 'U':
U.pop(index)
else:
L.pop(index)
print('Upper Bounds')
for i in range(1, 10):
print(f'Conjecture {i}. {U[i]}')
print('')
print()
print('Lower Bounds')
for i in range(1, 10):
print(f'Conjecture {i}. {L[i]}')
print('')
print()
work = input('Remove conjectures? (y/n) ')
f = open(f'graph_data/{invariant}_{family}_conjectures', 'wb')
conj_dict = {'upper': U, 'lower': L}
pickle.dump(conj_dict, f)
f.close()
return 0
if __name__ == '__main__':
main()
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Test that the case where an action is only specified under a conditional is
# evaluated appropriately.
{
'targets': [
{
'target_name': 'extension_does_not_match_sources_and_no_action',
'type': 'none',
'msvs_cygwin_shell': 0,
'sources': [
'file1.in',
'file2.in',
],
'rules': [
{
'rule_name': 'assemble',
'extension': 'asm',
'outputs': [
'<(RULE_INPUT_ROOT).fail',
],
'conditions': [
# Always fails.
[ '"true"=="false"', {
'action': [
'python', '../copy-file.py', '<(RULE_INPUT_PATH)', '<@(_outputs)',
],
'process_outputs_as_sources': 1,
'message': 'test_rule',
}],
],
},
],
},
],
}
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib2
import hashlib
import json
import time
import datetime
import datamodel as dm
from apiconfig import APIConfig
class WetterCom:
def getDateStr(self, delta_days):
#create time string of the following format: "yyyy-mm-dd"
today = datetime.date.today()
target = today + datetime.timedelta(days=delta_days)
return target.strftime("%Y-%m-%d")
def __init__(self, config):
# set up API data
self.project = config.project_name
self.apikey = config.api_key
self.city_code = config.city_code
# create MD5 checksum to verfiy your identity at wetter.com API
self.checksum = self.project+self.apikey+self.city_code
self.md5_sum = hashlib.md5(self.checksum).hexdigest()
self.url = "http://api.wetter.com/forecast/weather/city/{0}/project/{1}\
/cs/{2}/output/json".format(self.city_code, self.project, self.md5_sum)
# get date and hour to read API results
nr_days_forecast = 3
self.api_dates = []
for i in range(0, nr_days_forecast):
self.api_dates.append( self.getDateStr(i) )
self.api_hours = ["06:00", "11:00", "17:00", "23:00"]
self.api_hours_num = [6,11,17,23]
self.sampled_days = []
def get_forecast(self, wind_named=False):
forecast_status = None
# read data as JSON and transform it this way to a dict
try:
print "fetching data from API... WetterCom"
f = urllib2.urlopen(self.url)
data = json.load(f)
forecast_status = True
except:
print "Could not connect to the wetter.com server - sorry, please \
check your internet connection and possible server down times."
forecast_status = False
# read the forecast basis in the variable
if forecast_status:
#print data
del self.sampled_days[:]
for d in self.api_dates:
wds = dm.WeatherDaySample(d, self.api_hours)
for h in self.api_hours:
f = data["city"]["forecast"][d][h]
wsp = dm.WeatherSamplePoint(d, h)
wsp.setValues(f["tx"], f["tn"], f["pc"]/100.0, f["ws"])
wds.setValuesDayTimeData(wsp)
self.sampled_days.append(wds)
else:
print "WARNING: could not get forecast!"
|
from models.dilated_resnet import resnet101
model = resnet101(pretrained=True)
print(model)
|
from __future__ import unicode_literals
from django.db import models
class ChallengeTimestamp(models.Model):
"""
Challenge Timestamp model class.
"""
team = models.ForeignKey('team', on_delete=models.CASCADE, related_name='challenge_timestamps', related_query_name='challenge_timestamp')
challenge = models.ForeignKey('challenge', on_delete=models.CASCADE, related_name='challenge_timestamps', related_query_name='challenge_timestamp')
created = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name_plural = 'ChallengeTimestamps'
def __unicode__(self):
return 'timestamp {}: {}'.format(self.id, self.created)
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
from .default import *
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm41u&nm8r3e*bkf)5adbvdd##u#ba!r3s)b&_@4g#oj(!7^#3k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
@Time : 2019/11/5 22:44
@Author : Jason.Jia
@contact: jiajunp@163.com
@Version : 1.0
@file :handlers.py
@desc :
'''
from tkinter import *
import pymysql
|
from daos.erequest_dao import ErequestDAO
from exceptions.resource_not_found import ResourceNotFound
from utils.connection_util import connection
from typing import List
from abc import ABC
from entities.erequest import Erequest
class ErequestDaoPostgres(ErequestDAO):
def get_all_requests_by_eid(self, employee_id: int) -> [Erequest]: # employee_id is referencing employee table
sql = """ select * from erequest where employee_id =%s order by rstatus"""
cursor = connection.cursor()
cursor.execute(sql, [employee_id])
records = cursor.fetchall()
erequest = [Erequest(*record) for record in records]
if len(erequest) == 0:
raise ResourceNotFound
return erequest
def create_request(self, erequest: Erequest) -> Erequest:
sql = """insert into erequest (amount, reason, rstatus, message, employee_id)
values(%s, %s, %s,%s, %s) returning erequest_id"""
cursor = connection.cursor()
cursor.execute(sql, [erequest.amount, erequest.reason, erequest.rstatus,
erequest.message, erequest.employee_id])
connection.commit()
record = cursor.fetchone()
if record is None:
raise ResourceNotFound
erequest.erequest_id = record[0]
return erequest
def get_all_requests(self) -> [Erequest]:
sql = """select * from erequest order by erequest_id"""
cursor = connection.cursor()
cursor.execute(sql)
records = cursor.fetchall()
if len(records) == 0:
raise ResourceNotFound
erequest = [Erequest(*record) for record in records]
return erequest
def update_request(self, erequest: Erequest, rstatus: str, message: str) -> Erequest:
sql = """update erequest set rstatus =%s, message =%s where erequest_id =%s returning rstatus , message"""
cursor = connection.cursor()
cursor.execute(sql, [rstatus, message, erequest.erequest_id])
connection.commit()
record = cursor.fetchone()
if record is None:
raise ResourceNotFound
erequest.rstatus = record[0] # 0
erequest.message = record[1] # 1
return erequest
def get_request_by_rid(self, erequest_id: int) -> Erequest:
sql = """select * from erequest where erequest_id=%s"""
cursor = connection.cursor()
cursor.execute(sql, [erequest_id])
connection.commit()
record = cursor.fetchone()
if record is None:
raise ResourceNotFound
erequest = Erequest(*record)
return erequest
# for statistics
def get_report_for_all(self) -> [dict]:
sql = """select employee.employee_id , employee.first_name , SUM(erequest.amount) as tex_pemp,
(select sum(erequest.amount) from erequest)as total_sum,
(select count(erequest.erequest_id) from erequest where erequest.employee_id = employee.employee_id ) as total_request,
(select count(erequest_id) from erequest) as total_request
from employee
inner join erequest
on employee.employee_id = erequest.employee_id
group by employee.employee_id, employee.first_name"""
cursor = connection.cursor()
cursor.execute(sql)
records = cursor.fetchall()
if records is None:
raise ResourceNotFound
reports = []
for record in records:
percentage = round(((record[2] / record[3]) * 100), 2)
report = {"employeeId": record[0], "firstName": record[1], "amount": record[2], "totalSum": record[3],
"percentage": percentage, "trPere": record[4], "tRequest": record[5]}
reports.append(report)
return reports
|
import pickle
import json
import time
def to_json(python_object):
if isinstance(python_object, time.struct_time):
return {'__class__': 'time.asctime',
'__value__': time.asctime(python_object)}
if isinstance(python_object, bytes):
return {'__class__': 'bytes',
'__value__': list(python_object)}
raise TypeError(repr(python_object) + ' is not JSON serializable')
def from_json(json_object):
if '__class__' in json_object:
if json_object['__class__'] == 'time.asctime':
return time.strptime(json_object['__value__'])
if json_object['__class__'] == 'bytes':
return bytes(json_object['__value__'])
return json_object
if __name__ == '__main__':
entry = {}
entry['title'] = 'Dive into history, 2009 edition'
entry['article_link'] = 'http://diveintomark.org/archives/2009/03/27/dive-into-history-2009-edition'
entry['comments_link'] = None
entry['internal_id'] = b'\xDE\xD5\xB4\xF8'
entry['tags'] = ('diveintopython', 'docbook', 'html')
entry['published'] = True
entry['published_date'] = time.strptime('Fri Mar 27 22:20:42 2009')
with open('entry.pickle', 'wb') as f:
pickle.dump(entry, f)
with open('entry.pickle', 'rb') as f:
entry2 = pickle.load(f)
print(entry == entry2)
print(type(entry['tags']))
print(type(entry2['tags']))
with open('entry.json', 'w', encoding='utf-8') as f:
json.dump(entry, f, default=to_json)
with open('entry.json', 'r', encoding='utf-8') as f:
entry2 = json.load(f, object_hook=from_json)
print(entry == entry2)
print(type(entry['tags']))
print(type(entry2['tags']))
|
import openrouteservice as ors
import folium
#import GDest, geocodeing_2
ors_key = '5b3ce3597851110001cf62486905683bd4754a8c8c22017f27414546'
###################### Origin lat&lng
#print(geocodeing_2.ORG_lat,geocodeing_2.ORG_lng)
##################### Destination lat&lng
#print(GDest.DST_lat, GDest.DST_lng)
#######################
#coordinates = [[geocodeing_2.ORG_lng, geocodeing_2.ORG_lat], [GDest.DST_lng, GDest.DST_lat]]
coordinates = [[-86.781247, 36.163532], [-80.191850, 25.771645]]
client = ors.Client(key=ors_key)
route = client.directions(coordinates=coordinates,
profile='driving-car',
format='geojson')
map_directions = folium.Map(location=[33.77, -84.37], zoom_start=5)
folium.GeoJson(route, name='route').add_to(map_directions)
folium.LayerControl().add_to(map_directions)
map_directions
print(route['features'][0]['properties']['segments'][0]['distance'], 'miles')
print(route['features'][0]['properties']['segments'][0]['duration'], 'hours\n')
print('directions')
for index, i in enumerate(route['features'][0]['properties']['segments'][0]['steps']):
print(index+1, i, '\n')
|
class DivergentSolution(Exception):
"""Raised when a calculated solution does not converge"""
def __init__(self, solver_name, *args: object) -> None:
super().__init__(("%s solution is divergent!" % solver_name), *args)
class SolutionValidation(Exception):
"""Raised when a calculated solution does not match expected results"""
|
#!/usr/bin/env python3
"""
Lambdas are anonymous functions, similar to lambdas in Scheme or JavaScript.
It is pretty much just a function without a name. Super simple!
The syntax:
create a lambda function:
lambda <parameter>: <expression>
create and execute a lambda:
(lambda <parameter>: <expression>)()
Closures are also in Python!
"""
# Example 1: Make/execute a lambda with no arguments
print("Example 1:")
print(lambda: True)
print((lambda: True)())
# Example 2: Make/execute a lambda with one argument
print("Example 2:")
print(lambda x: x * 2)
print((lambda x: x * 2)(4))
# Example 3: Assign lambda to a variable
print("Example 3:")
myfunc = lambda x: type(x) is int
print(myfunc)
print(myfunc(2))
# Example 4: Return a closure from a function
print("Example 4:")
def makeAdder(numberToAdd):
return lambda number: number + numberToAdd
add4 = makeAdder(4)
print(add4(4))
|
import torch
from torch.utils.data import Dataset
import cv2
from PIL import Image
from torchvision import transforms, utils
import matplotlib.pyplot as plt
from sklearn import preprocessing
import ast
def create_inout_sequence(input_data, i, tw):
train_seq = [seq[0] for seq in input_data[i:i+tw]]
train_label = input_data[i+tw][1]
return (train_seq, train_label)
class OrbDataset(Dataset):
def __init__(self, orb_file, speed_file, sequence_length, size, context):
self.speed = []
self.orbs = []
self.seq_len = sequence_length
with open(speed_file) as f:
lines = f.read()
lines = lines.split("\n")
if context == "train":
lines = lines[:int(len(lines)*size)]
if context == "test":
lines = lines[-int(len(lines)*size):]
self.speed = [[float(line)] for line in lines]
with open(orb_file) as f:
lines = f.read()
lines = lines.split("\n")
if context == "train":
lines = lines[:int(len(lines)*size)]
if context == "test":
lines = lines[-int(len(lines)*size):]
self.orbs = [ast.literal_eval(line.strip()) for line in lines]
self.inout_seq = list(zip(self.orbs, self.speed))
def __len__(self):
return len(self.speed) - self.seq_len
def __getitem__(self, index):
seq = create_inout_sequence(self.inout_seq, index, self.seq_len)
return {"orbs":seq[0], "speed":seq[1]}
if __name__ == "__main__":
od = OrbDataset("keypoints.txt", "train.txt",20,0.1,"train")
od[0]
od[1]
pass
|
import random
from typing import Callable
def findAnyInSet(source: set, predicate: Callable[..., bool]):
try:
return random.choice([item for item in source if predicate(item)])
except IndexError:
return None
def findFirstInSet(source: set, predicate: Callable[..., bool]):
return next(iter({item for item in source if predicate(item)}), None)
def findAllInSet(source: set, predicate: Callable[..., bool]) -> set:
return {item for item in source if predicate(item)}
class IdentifierGenerator:
def __init__(self, seed: str):
self.SEED = seed
random.seed(seed)
def getIdentifier(self) -> str:
return str(random.randint(0, 20000))
class Rule:
def __init__(self, identifier: str, firstToken: str, followingToken: str, isTerminal: bool = False, isInitial: bool = False):
self.isTerminal = isTerminal
self.isInitial = isInitial
self.identifier = identifier
self.firstToken = firstToken
self.followingToken = followingToken
def generate(self, rules: set):
if (self.isTerminal):
return self.firstToken
else:
return findAnyInSet(rules, lambda rule: rule.identifier == self.firstToken).generate(rules) + ((findAnyInSet(rules, lambda rule: rule.identifier == self.followingToken).generate(rules)) if (not self.isTerminal) else '')
def formatRule(rule: Rule) -> str:
return f'{rule.identifier} {"i" if rule.isInitial else ""}{"t" if rule.isTerminal else ""}-> ({rule.firstToken}, {rule.followingToken})'
def formatRules(rules) -> str:
return list(map(lambda rule: formatRule(rule), rules))
class Grammar:
def __init__(self, identifierGenerator: IdentifierGenerator, rules: set = set()):
self.__ig = identifierGenerator
self.__rules = rules
def generate(self, count:int) -> list[str]:
generated = []
for i in range(count):
initialRule = findAnyInSet(self.__rules, lambda rule: rule.isInitial == True)
if (initialRule is not None):
generated.append(initialRule.generate(self.__rules))
else:
break
return generated
def teachExact(self, inputStr: str):
inputStrIter = iter(inputStr)
currentChar = next(inputStrIter, None)
nextChar = next(inputStrIter, None)
isInitial = True
currentRuleIdentifier = self.__ig.getIdentifier()
nextRuleIdentifier = self.__ig.getIdentifier()
currentRule = None
while(currentChar is not None):
if (currentChar is not None):
terminalRule = findFirstInSet(self.__rules, lambda rule, currentChar = currentChar: (rule.isTerminal == True) and (rule.firstToken == currentChar))
if (terminalRule is None):
terminalIdentifier = self.__ig.getIdentifier()
terminalRule = Rule(
identifier = terminalIdentifier,
firstToken= currentChar,
followingToken= None,
isTerminal= True,
isInitial= False
)
print('creating Rule', formatRule(terminalRule))
self.__rules.add(terminalRule)
# currentRule = terminalRule
if (nextChar is not None):
currentRule = findFirstInSet(self.__rules,\
lambda rule, terminalRule = terminalRule: (rule.isTerminal == False)\
and (rule.firstToken == terminalRule.identifier)\
and ((rule.identifier == currentRuleIdentifier) \
or ((isInitial == True) and (rule.isInitial == True)) )
)
if (currentRule is None):
currentRule = Rule(
identifier= currentRuleIdentifier,
firstToken= terminalRule.identifier,
followingToken= nextRuleIdentifier,
isTerminal= False,
isInitial= isInitial
)
print('creating Rule', formatRule(currentRule))
self.__rules.add(currentRule)
else:
print('reused Rule', formatRule(currentRule))
nextRuleIdentifier = currentRule.followingToken
else:
if (currentRule is not None):
# at least one rule is created
# setting followingToken for previous rule to be the terminal rule of the last char not generated identifier
if (findFirstInSet(self.__rules, lambda rule, followingToken = previousRule.followingToken: rule.identifier == followingToken) is None):
print(f'followingToken of previousRule ({currentRule.identifier}) is set to {terminalRule.identifier}')
previousRule.followingToken = terminalRule.identifier
else:
currentRule = Rule(
identifier= previousRule.identifier,
firstToken= previousRule.firstToken,
followingToken= terminalRule.identifier,
isTerminal= False,
isInitial= previousRule.isInitial
)
print('creating ending Rule', formatRule(currentRule))
self.__rules.add(currentRule)
else:
#happens when single char word appears and no initial rules have been created
terminalRule.isInitial = True
isInitial = False
previousRule = currentRule
currentRuleIdentifier = nextRuleIdentifier
nextRuleIdentifier = self.__ig.getIdentifier()
currentChar = nextChar
nextChar = next(inputStrIter, None)
def getDuplicatedRules(self) -> list[Rule]:
iterator = iter(self.__rules)
firstRule = next(iterator, None)
while (firstRule is not None):
secondRule = findFirstInSet(self.__rules, lambda secondRule, firstRule = firstRule: (secondRule.identifier != firstRule.identifier) and (secondRule.firstToken == firstRule.firstToken) and (secondRule.followingToken == firstRule.followingToken) and (secondRule.isInitial == firstRule.isInitial))
if secondRule is not None:
print('found duplicated rules', formatRules([firstRule, secondRule]))
return [firstRule, secondRule]
firstRule = next(iterator, None)
return []
def mergeDuplicatedRules(self, rule1, rule2):
print('removing duplicated rule', formatRules([rule1]))
for rule in self.__rules:
if (rule != rule1):
if (rule.identifier == rule1.identifier):
rule.identifier = rule2.identifier
if (rule.firstToken == rule1.identifier):
rule.firstToken = rule2.identifier
if (rule.followingToken == rule1.identifier):
rule.followingToken = rule2.identifier
self.__rules.remove(rule1)
del rule1
def mergeAllDuplicatedRules(self):
print('rules before mergeAllDuplicatedRules', formatRules(self.__rules))
isMerged = True
while (isMerged):
isMerged = False
duplicatedRules = self.getDuplicatedRules()
if (len(duplicatedRules) != 0):
isMerged = True
self.mergeDuplicatedRules(duplicatedRules[0], duplicatedRules[1])
print('rules after mergeAllDuplicatedRules', formatRules(self.__rules))
def teach(self, inputStrs: list[str]):
inputStrs.sort(reverse=True, key=len)
for inputStr in inputStrs:
self.teachExact(inputStr)
self.mergeAllDuplicatedRules()
separator = ' '
with open('translation.txt', 'r') as file:
data = file.read().replace('\n', '')
inputStrings = data.split(separator)
grammar = Grammar(IdentifierGenerator('1'))
grammar.teach(inputStrings)
with open('output.txt', 'w') as file:
file.write(' '.join(grammar.generate(30)))
|
from tkinter import *
from PIL import ImageTk, Image
import shutil
import os
from tkinter import filedialog
from tkinter import messagebox as mb
import easygui
# Major functions of file manager
# open a file box window
# when we want to select a file
def open_window():
read=easygui.fileopenbox()
return read
# open file function
def open_file():
string = open_window()
try:
os.startfile(string)
except:
mb.showinfo('confirmation', "File not found!")
# copy file function
def copy_file():
source1 = open_window()
destination1=filedialog.askdirectory()
shutil.copy(source1,destination1)
mb.showinfo('confirmation', "File Copied !")
# delete file function
def delete_file():
del_file = open_window()
if os.path.exists(del_file):
os.remove(del_file)
else:
mb.showinfo('confirmation', "File not found !")
# rename file function
def rename_file():
chosenFile = open_window()
path1 = os.path.dirname(chosenFile)
extension=os.path.splitext(chosenFile)[1]
print("Enter new name for the chosen file")
newName=input()
path = os.path.join(path1, newName+extension)
print(path)
os.rename(chosenFile,path)
mb.showinfo('confirmation', "File Renamed !")
# move file function
def move_file():
source = open_window()
destination =filedialog.askdirectory()
if(source==destination):
mb.showinfo('confirmation', "Source and destination are same")
else:
shutil.move(source, destination)
mb.showinfo('confirmation', "File Moved !")
# function to remove a folder
def remove_folder():
delFolder = filedialog.askdirectory()
os.rmdir(delFolder)
mb.showinfo('confirmation', "Folder Deleted !")
# function to list all the files in folder
def list_files():
folderList = filedialog.askdirectory()
sortlist=sorted(os.listdir(folderList))
i=0
print("Files in ", folderList, "folder are:")
while(i<len(sortlist)):
print(sortlist[i]+'\n')
i+=1
# UI for file manager
root = Tk()
# creating label and buttons to perform operations
Label(root, text="File Manager", font=("Helvetica", 16), fg="blue").grid(row = 5, column = 2)
Button(root, text = "Open a File", command = open_file).grid(row=20, column =2)
Button(root, text = "Copy a File", command = copy_file).grid(row = 20, column = 4)
Button(root, text = "Delete a File", command = delete_file).grid(row = 40, column = 2)
Button(root, text = "Rename a File", command = rename_file).grid(row = 40, column = 4)
Button(root, text = "Move a File", command = move_file).grid(row = 60, column =2)
Button(root, text = "Make a Folder", command = make_folder).grid(row = 60, column = 4)
Button(root, text = "Remove a Folder", command = remove_folder).grid(row = 80, column =2)
Button(root, text = "List all Files in Directory", command = list_files).grid(row = 80,column = 4)
root.mainloop()
|
from random import randint
'''
该方法传入两个参数
lst:当前待排序列表
reserve 若不传值,默认为True
reserve = True 从大到小排列
reserve = False 从小到大排列
'''
def bubbleSort(lst, reverse=True):
# 获取列表长度
length = len(lst)
for i in range(0, length):
for j in range(0, length - i - 1):
# 比较相邻两个元素大小,并根据需要进行交换
# 默认升序排序
exp = 'lst[j] > lst[j+1]'
# 如果reverse=True则降序排序
if reverse:
exp = 'lst[j] < lst[j+1]'
if eval(exp):
lst[j], lst[j + 1] = lst[j + 1], lst[j]
# 声明20个长度的list列表
lst = [randint(1, 100) for i in range(20)]
# 输出未排序前列表
print('Before sort:\n', lst)
# 开始排序
bubbleSort(lst, True)
# 输出排序后列表
print('After sort:\n', lst)
|
# 我的sb解法
# 因为python string 不可变所以必须得有个额外的结果数组
class Solution:
def reverseWords(self, s: str) -> str:
temp = s.split(' ')
temp.reverse()
temp = [x.strip() for x in temp if x.strip() != '']
print(temp)
return ' '.join(x for x in temp)
class Solution:
def reverseWords(self, s: str) -> str:
return " ".join(s.split()[::-1])
# c++空间复杂度才能达到O(1)
# 思路是先去除左右空格和中间的多余空格
# 然后反转整个字符串再反转单词
|
from flask import Blueprint, jsonify, request
from ..services.news_service import NewsService
import time
import datetime
import json
news_bp = Blueprint('news_routes', __name__, url_prefix='/api/v1/news')
news_service = NewsService()
@news_bp.route('', methods=['POST'])
def save_news():
articles = request.get_json()
news_service.save_articles(articles)
return "Articles saved", 200
@news_bp.route('/scrape', methods=["GET"])
def get_news():
if request.args.get('company_name'):
news_service = NewsService()
articles = news_service.scrape(**request.args)
return jsonify(articles), 200
return "No company name provided", 400
|
'''
We create the fibonacci sequence below.
As a refresher, the fibonacci sequence is a recursive sequence in which the last/most-recent term is a sum of the previous two terms
Here instead, we implement MEMOIZATION using built-in python tools to make memoization trivial
'''
from functools import lru_cache # lru cache stands for least-recently used cache
# in order to imbue the powers of the lru_cache you do it as such:
@lru_cache(maxsize=1000) # my default, python will cache the 120 most recently used values
def fibonacci(n):
# if we put 0 or -1 into the function, we will get an error
# check to make sure that we input a positive integer
if type(n) != int:
raise TypeError("Input must be an integer")
if n < 1:
raise ValueError("Use a positive integer")
# Compute the Nth Term
if n == 1:
return 1
elif n == 2:
return 1
elif n > 2:
return fibonacci(n-1) + fibonacci(n-2)
for n in range(1, 1001):
print(n, ":", fibonacci(n))
print(fibonacci(3.2))
# print(fibonacci(-1))
# print(fibonacci(0))
|
from openpyxl import worksheet
from openpyxl.utils import get_column_letter
from src.core import config
from src.utils import connect_to_wb
@connect_to_wb
def save_to_excel(
ws: worksheet,
cleared_dict: dict,
offset: int,
exchange: str,
) -> None:
ws.cell(
column=offset + 1,
row=1,
value='Дата',
).style = config.HEADER_STYLE
ws.cell(
column=offset + 2,
row=1,
value=f'Курс {exchange}',
).style = config.HEADER_STYLE
ws.cell(
column=offset + 3,
row=1,
value='Изменение',
).style = config.HEADER_STYLE
for row, date in enumerate(cleared_dict.keys()):
try:
exchange_rate = float(cleared_dict[date][0]['value'])
difference = exchange_rate - float(cleared_dict[date][1]['value'])
color = config.RED_FILL if difference <= 0 else config.GREEN_FILL
except ValueError:
exchange_rate = float(cleared_dict[date][1]['value'])
difference = 0
color = config.BASE_FILL
except IndexError:
exchange_rate = float(cleared_dict[date][0]['value'])
difference = 0
color = config.BASE_FILL
ws.cell(
column=offset + 1,
row=row + 2,
value=date,
).style = config.BASE_STYLE
cell_rate = ws.cell(
column=offset + 2,
row=row + 2,
value=exchange_rate,
)
cell_rate.style = config.BASE_STYLE
cell_rate.number_format = f'₽{config.NUMBER_FORMAT}'
cell_diff = ws.cell(
column=offset + 3,
row=row + 2,
value=difference,
)
cell_diff.number_format = f'₽{config.NUMBER_FORMAT}'
cell_diff.border = config.THIN_BORDER
cell_diff.fill = color
@connect_to_wb
def update_data(ws: worksheet) -> int:
for index, row in enumerate(ws.iter_rows()):
if index == 0:
continue
usd, eur = 0, 0
for cell in row:
if cell.column == 2:
usd = cell.value
elif cell.column == 5:
eur = cell.value
try:
mid_market_rate = eur / usd
except ZeroDivisionError:
mid_market_rate = 0
res_col = ws.cell(
column=7,
row=index + 1,
value=mid_market_rate,
)
res_col.number_format = config.NUMBER_FORMAT
res_col.style = config.BASE_STYLE
ws.cell(column=7, row=1, value='Средний курс').style = config.HEADER_STYLE
for column_cells in ws.columns:
length = max(len(str(cell.value) or '') for cell in column_cells)
ws.column_dimensions[
get_column_letter(column_cells[0].column)
].width = (length * config.RIGHT_INDENT)
return index + 1
|
#Code to find the permutations of a string
def swap( a, b):
temp=a
a=b
b=temp
|
import re
pattern = r"(=|\/)([A-Z][A-Za-z]{2,})\1"
locations_on_map = input()
travel_points = 0
valid_locations = re.findall(pattern, locations_on_map)
destinations = []
for valid_location in valid_locations:
destination = valid_location[1]
travel_points += len(destination)
destinations.append(destination)
print(f"Destinations: {', '.join(destinations)}")
print(f"Travel Points: {travel_points}")
|
import sys
import os
import logging
from datetime import datetime
# Logging Levels
# https://docs.python.org/3/library/logging.html#logging-levels
# CRITICAL 50
# ERROR 40
# WARNING 30
# INFO 20
# DEBUG 10
# NOTSET 0
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(name)s - %(message)s')
def set_up_logging():
file_path = sys.modules[__name__].__file__
project_path = os.path.dirname(
os.path.dirname(os.path.dirname(file_path))
)
print(project_path)
log_location = project_path + '/logs/'
if not os.path.exists(log_location):
os.makedirs(log_location)
current_time = datetime.now()
current_date = current_time.strftime('%Y-%m-%d')
file_name = current_date + '.log'
file_location = log_location + file_name
with open(file_location, 'a+'):
pass
logger = logging.getLogger(__name__)
log_format = '[%(asctime)s] [%(levelname)s] [%(message)s] [--> %(pathname)s [%(process)d]:]'
# To store in file
logging.basicConfig(
format=log_format,
filemode='a+',
filename=file_location,
level=logging.DEBUG)
# To print only
# logging.basicConfig(format=log_format, level=logging.DEBUG)
return logger
|
# -*- coding: utf-8 -*-
import sys,os,time
from test44 import clss
class cls2:
t= None
def __init__(self):
print 'cls2'
def p2(self):
c= clss()
print c.t
|
import sqlalchemy
from sqlalchemy_serializer import SerializerMixin
from database.data import db_session
class Lesson(db_session.SqlAlchemyBase, SerializerMixin):
__tablename__ = "lessons"
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True, autoincrement=True)
name = sqlalchemy.Column(sqlalchemy.String, nullable=True)
content_file = sqlalchemy.Column(sqlalchemy.String, nullable=True)
list_id_tasks = sqlalchemy.Column(sqlalchemy.String)
|
"""
The fields module provides a number of data structures and functions to represent continuous, spatially varying data.
All fields are subclasses of `Field` which provides abstract functions for sampling field values at physical locations.
The most important field types are:
* `CenteredGrid` embeds a tensor in the physical space. Uses linear interpolation between grid points.
* `StaggeredGrid` samples the vector components at face centers instead of at cell centers.
* `Noise` is a function that produces a procedurally generated noise field
Use `grid()` to create a `Grid` from data or by sampling another `Field` or `phi.geom.Geometry`.
Alternatively, the `phi.physics.Domain` class provides convenience methods for grid creation.
All fields can be sampled at physical locations or volumes using `sample()` or `reduce_sample()`.
See the `phi.field` module documentation at https://tum-pbs.github.io/PhiFlow/Fields.html
"""
from ._field import Field, SampledField, sample, reduce_sample, resample, as_extrapolation
from ._mask import HardGeometryMask, SoftGeometryMask as GeometryMask, SoftGeometryMask
from ._grid import Grid, CenteredGrid, StaggeredGrid
from ._point_cloud import PointCloud
from ._noise import Noise
from ._angular_velocity import AngularVelocity
from phiml.math import (
abs, sign, round, ceil, floor, sqrt, exp, is_finite as isfinite, is_finite, real, imag, sin, cos, cast, to_float, to_int32, to_int64, convert,
stop_gradient,
jit_compile, jit_compile_linear, gradient as functional_gradient, jacobian, gradient,
solve_linear, solve_nonlinear, minimize,
l2_loss, l1_loss, frequency_loss,
unstack, stack, concat # expand, rename_dims, pack_dims, unpack_dims
)
from ._field_math import (
assert_close,
bake_extrapolation,
laplace, spatial_gradient, divergence, stagger, curl, # spatial operators
fourier_poisson, fourier_laplace,
mean, pad, shift, normalize, center_of_mass,
concat, stack,
where, maximum, minimum,
vec_squared, vec_length as vec_abs, vec_length,
downsample2x, upsample2x,
finite_fill,
native_call,
integrate,
pack_dims,
support, mask,
connect, connect_neighbors,
)
from ._field_io import write, read
from ._scene import Scene
__all__ = [key for key in globals().keys() if not key.startswith('_')]
__pdoc__ = {
'Grid.__init__': False,
'Scene.__init__': False,
}
|
liste = []
n = 0
a = 0
i = 0
k = 0
max = 0
min = 20
snote = 0
moyenne = 0
while a == 0 :
print("entrez une note :" , end = "")
n = int(input())
liste.append(n)
print("il y a " , len(liste), "note(s)")
while i < len(liste):
if liste[i] > max:
max = liste[i]
if liste [i] < min :
min = liste[i]
i = i + 1
print("la note maximum est " , max)
print("la note minimum est" , min)
while k < len(liste):
snote = snote + liste[k]
moyenne = snote/len(liste)
k = k + 1
print("la moyenne des notes est " , moyenne )
if n < a :
a = a+1
|
from random import randint
num = int(input('Write numbers from 1 to 3 which are: 1-stone, 2-scisores, 3-paper: '))
num2 = randint(1, 3)
if num == num2:
print(f'Computer draws {num2}. Draw')
elif num == 1 and num2 == 2:
print(f'Computer draws {num2}. You win!')
elif num == 1 and num2 == 3:
print(f'Computer draws {num2}. Computer wins!')
elif num == 2 and num2 == 1:
print(f'Computer draws {num2}. Computer wins!')
elif num == 2 and num2 == 3:
print(f'Computer draws {num2}. You win!')
elif num == 3 and num2 == 1:
print(f'Computer draws {num2}. You win!')
elif num == 3 and num2 == 2:
print(f'Computer draws {num2}. Computer wins!')
|
def portrayCell(cell):
'''
This function is registered with the visualization
server to be called each tick to indicate how to draw the cell in its current state.
:param cell: the cell in the simulation
:return: the portrayal dictionary.
'''
assert cell is not True
return{
'Share': 'rect',
'w': 1,
'h': 1,
'Filled': 'true',
'Layer': 0,
'x': cell.x,
'y': cell.y,
'Color': 'black' if cell.isAlive else 'white',
}
|
import unittest
import HtmlTestRunner
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from fixtures.params import CHROME_EXECUTABLE_PATH, DOMAIN, JPG_500_kb_path, Pdf_file_path, JPG_2_Mb_path
from pages.add_photograph_page import AddPhotographPage
from pages.contact_details_page import ContactDetailsPage
from pages.emergency_contacts_page import EmergencyContactsPage
from pages.login_page import LoginPage
from pages.personal_details_page import PersonalDetailsPage
class EmergencyContactsTestCase(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(executable_path=CHROME_EXECUTABLE_PATH)
self.driver.get(DOMAIN)
self.wait = WebDriverWait(self.driver, 2)
self.login_page = LoginPage(self.driver)
self.personal_details_page = PersonalDetailsPage(self.driver)
self.add_photograph_page = AddPhotographPage(self.driver)
self.contact_details_page = ContactDetailsPage(self.driver)
self.emergency_contacts_page = EmergencyContactsPage(self.driver)
def tearDown(self):
self.driver.quit()
def test_16_add_emergency_contacts(self):
name = 'Emer'
relationship = 'wife'
home_phone = '123456789'
mobile_phone = '987654321'
work_phone = '123366654'
self.login_page.login()
self.login_page.get_welcome_massage()
self.personal_details_page.goto_page()
self.emergency_contacts_page.goto_page()
self.emergency_contacts_page.add_contact_button()
self.emergency_contacts_page.set_name(name)
self.emergency_contacts_page.set_relationship(relationship)
self.emergency_contacts_page.set_home_phone(home_phone)
self.emergency_contacts_page.set_mobile_phone(mobile_phone)
self.emergency_contacts_page.set_work_phone(work_phone)
self.emergency_contacts_page.save_button()
self.wait.until(expected_conditions.text_to_be_present_in_element((By.CSS_SELECTOR, ".message.success"),'Successfully Saved'))
table_name = self.driver.find_element_by_xpath('//*[@id="emgcontact_list"]/tbody/tr[1]/td[2]/a')
self.assertTrue(table_name.text == name)
# deleting created emergency contact
self.driver.find_element_by_css_selector("td>input").click()
self.driver.find_element_by_id("delContactsBtn").click()
self.wait.until(expected_conditions.text_to_be_present_in_element((By.CSS_SELECTOR, ".message.success"),'Successfully Deleted'))
def test_17_emergency_name_requered(self):
driver = self.driver
relationship = 'wife'
home_phone = '123456789'
mobile_phone = '987654321'
work_phone = '123366654'
self.login_page.login()
self.login_page.get_welcome_massage()
self.personal_details_page.goto_page()
self.emergency_contacts_page.goto_page()
self.emergency_contacts_page.add_contact_button()
self.emergency_contacts_page.set_relationship(relationship)
self.emergency_contacts_page.set_home_phone(home_phone)
self.emergency_contacts_page.set_mobile_phone(mobile_phone)
self.emergency_contacts_page.set_work_phone(work_phone)
self.emergency_contacts_page.save_button()
self.assertTrue(driver.find_element_by_xpath('//*[@id="frmEmpEmgContact"]/fieldset/ol/li[1]/span').text == 'Required')
def test_18_emergency_relationship_requered(self):
driver = self.driver
name = 'Emer'
home_phone = '123456789'
mobile_phone = '987654321'
work_phone = '123366654'
self.login_page.login()
self.login_page.get_welcome_massage()
self.personal_details_page.goto_page()
self.emergency_contacts_page.goto_page()
self.emergency_contacts_page.add_contact_button()
self.emergency_contacts_page.set_name(name)
self.emergency_contacts_page.set_home_phone(home_phone)
self.emergency_contacts_page.set_mobile_phone(mobile_phone)
self.emergency_contacts_page.set_work_phone(work_phone)
self.emergency_contacts_page.save_button()
self.assertTrue(driver.find_element_by_xpath('//*[@id="frmEmpEmgContact"]/fieldset/ol/li[2]/span').text == 'Required')
def test_19_emergency_one_phone_enough(self):
name = 'Emer'
relationship = 'wife'
home_phone = '123456789'
self.login_page.login()
self.login_page.get_welcome_massage()
self.personal_details_page.goto_page()
self.emergency_contacts_page.goto_page()
self.emergency_contacts_page.add_contact_button()
self.emergency_contacts_page.set_name(name)
self.emergency_contacts_page.set_relationship(relationship)
self.emergency_contacts_page.set_home_phone(home_phone)
self.emergency_contacts_page.save_button()
self.wait.until(expected_conditions.text_to_be_present_in_element((By.CSS_SELECTOR, ".message.success"),
'Successfully Saved'))
def test_20_emergency_one_phone_requered(self):
driver = self.driver
name = 'Emer'
relationship = 'wife'
self.login_page.login()
self.login_page.get_welcome_massage()
self.personal_details_page.goto_page()
self.emergency_contacts_page.goto_page()
self.emergency_contacts_page.add_contact_button()
self.emergency_contacts_page.set_name(name)
self.emergency_contacts_page.set_relationship(relationship)
self.emergency_contacts_page.save_button()
self.assertTrue(driver.find_element_by_xpath('//*[@id="frmEmpEmgContact"]/fieldset/ol/li[3]/span').text == 'At least one phone number is required')
def test_21_add_multiply_emg_contacts(self):
name = 'Emer'
relationship = 'wife'
home_phone = '123456789'
mobile_phone = '987654321'
work_phone = '123366654'
self.login_page.login()
self.login_page.get_welcome_massage()
self.personal_details_page.goto_page()
self.emergency_contacts_page.goto_page()
self.emergency_contacts_page.add_contact_button()
self.emergency_contacts_page.set_name(name)
self.emergency_contacts_page.set_relationship(relationship)
self.emergency_contacts_page.set_home_phone(home_phone)
self.emergency_contacts_page.set_mobile_phone(mobile_phone)
self.emergency_contacts_page.set_work_phone(work_phone)
self.emergency_contacts_page.save_button()
self.wait.until(expected_conditions.text_to_be_present_in_element((By.CSS_SELECTOR, ".message.success"),'Successfully Saved'))
self.emergency_contacts_page.add_contact_button()
self.emergency_contacts_page.set_name(name)
self.emergency_contacts_page.set_relationship(relationship)
self.emergency_contacts_page.set_home_phone(home_phone)
self.emergency_contacts_page.set_mobile_phone(mobile_phone)
self.emergency_contacts_page.set_work_phone(work_phone)
self.emergency_contacts_page.save_button()
self.wait.until(expected_conditions.text_to_be_present_in_element((By.CSS_SELECTOR, ".message.success"),
'Successfully Saved'))
# deleting all emergency contacts
self.driver.find_element_by_id('checkAll').click()
self.driver.find_element_by_id("delContactsBtn").click()
self.wait.until(expected_conditions.text_to_be_present_in_element((By.CSS_SELECTOR, ".message.success"),
'Successfully Deleted'))
def test_22_delete_emg_contacts(self):
name = 'Emer'
relationship = 'wife'
home_phone = '123456789'
self.login_page.login()
self.login_page.get_welcome_massage()
self.personal_details_page.goto_page()
self.emergency_contacts_page.goto_page()
self.emergency_contacts_page.add_contact_button()
self.emergency_contacts_page.set_name(name)
self.emergency_contacts_page.set_relationship(relationship)
self.emergency_contacts_page.set_home_phone(home_phone)
self.emergency_contacts_page.save_button()
self.wait.until(expected_conditions.text_to_be_present_in_element((By.CSS_SELECTOR, ".message.success"),
'Successfully Saved'))
self.driver.find_element_by_css_selector("td>input").click()
self.driver.find_element_by_id("delContactsBtn").click()
self.wait.until(expected_conditions.text_to_be_present_in_element((By.CSS_SELECTOR, ".message.success"),
'Successfully Deleted'))
def test_23_add_attachment_emg_contacts(self):
file_path = Pdf_file_path
self.login_page.login()
self.login_page.get_welcome_massage()
self.personal_details_page.goto_page()
self.emergency_contacts_page.goto_page()
self.emergency_contacts_page.add_attachment_button()
self.emergency_contacts_page.choose_file(file_path)
self.emergency_contacts_page.upload_button()
self.wait.until(expected_conditions.text_to_be_present_in_element((By.CSS_SELECTOR, ".message.success"),'Successfully Saved'))
# cleanup_deleting attachment
self.driver.find_element_by_css_selector("#tblAttachments > tbody > tr.odd > td.center > input").click()
self.driver.find_element_by_id("btnDeleteAttachment").click()
self.wait.until(expected_conditions.text_to_be_present_in_element((By.CSS_SELECTOR, ".message.success"),
'Successfully Deleted'))
def test_24_delete_attachment_emg_contacts(self):
file_path = Pdf_file_path
self.login_page.login()
self.login_page.get_welcome_massage()
self.personal_details_page.goto_page()
self.emergency_contacts_page.goto_page()
self.emergency_contacts_page.add_attachment_button()
self.emergency_contacts_page.choose_file(file_path)
self.emergency_contacts_page.upload_button()
self.wait.until(expected_conditions.text_to_be_present_in_element((By.CSS_SELECTOR, ".message.success"),'Successfully Saved'))
self.driver.find_element_by_css_selector("#tblAttachments > tbody > tr.odd > td.center > input").click()
self.driver.find_element_by_id("btnDeleteAttachment").click()
self.wait.until(expected_conditions.text_to_be_present_in_element((By.CSS_SELECTOR, ".message.success"),
'Successfully Deleted'))
def test_25_add_attachment_large_size(self):
file_path = JPG_2_Mb_path
self.login_page.login()
self.login_page.get_welcome_massage()
self.personal_details_page.goto_page()
self.emergency_contacts_page.goto_page()
self.emergency_contacts_page.add_attachment_button()
self.emergency_contacts_page.choose_file(file_path)
self.emergency_contacts_page.upload_button()
self.assertEqual('413 Request Entity Too Large', self.driver.find_element_by_xpath('/html/body/center[1]/h1').text)
if __name__ == '__main__':
unittest.main(testRunner= HtmlTestRunner.HTMLTestRunner(output ='/Users/nazarkruk/PycharmProjects/HRM100Full/Reports'))
|
__author__ = 'luca'
from PyQt4.QtCore import *
from PyQt4.QtGui import QPixmap
class QFramesTimelineListModel(QAbstractListModel):
def __init__(self, video):
self.video = video
self.pixmaps = {}
super(QFramesTimelineListModel, self).__init__()
def rowCount(self, parent):
return self.video.frames_count()
def data(self, index, role):
if role == Qt.DisplayRole:
if not self.pixmaps.has_key(index.row()):
frame = self.video.frames[index.row()]
pixmap = QPixmap(frame.path())
self.pixmaps[index.row()] = pixmap #.scaled(300, 250, Qt.KeepAspectRatio)
return self.pixmaps[index.row()]
else:
return QVariant()
|
import sqlite3
import json
import base64
import tornado.ioloop
import tornado.web
def b64ToNormal(inputStr : str)->str :
if inputStr=="":
return ""
return base64.b64decode(inputStr.encode("ascii")).decode("ascii")
def normalTob64(inputStr :str)->str :
if inputStr=="":
return ""
return base64.b64encode(inputStr.encode("ascii")).decode("ascii")
class DbTable :
def __init__(self):
self.conn = sqlite3.connect("main.db")
self.c=self.conn.cursor()
try:
self.c.execute("SELECT * FROM bssidloc")
self.c.execute("SELECT * FROM studentbssid")
except sqlite3.OperationalError :
self.c.execute("CREATE TABLE studentbssid(id TEXT NOT NULL PRIMARY KEY,password TEXT NOT NULL,bssid TEXT,whitelist TEXT)")
self.c.execute("CREATE TABLE bssidloc(bssid TEXT NOT NULL PRIMARY KEY,location TEXT)")
self.conn.commit()
def checkCreds(self,studentID:str,password:str)->dict:
b64_password = normalTob64(password)
out = list(self.c.execute(f"SELECT password FROM studentbssid WHERE id LIKE '{studentID}'"))
#print(out)
if len(out)==0 :
return {
"msg":"NO_SUCH_USER"
}
else :
if out[0][0] == normalTob64(password):
return {
"msg":"AUTH_SUCCESS"
}
else:
return {
"msg":"AUTH_FAIL"
}
def getWhiteList(self,studentID:str):
command = f"SELECT whitelist FROM studentbssid WHERE id LIKE '{studentID}'"
#print(command)
out = list(self.c.execute(command))
#print(out)
if len(out)==0 :
return {
"msg":"GET_WHITELIST_FAIL"
}
else:
dat = b64ToNormal(out[0][0])
return{
"msg":"GET_WHITELIST_SUCCESS",
"data":dat
}
def getBSSIDLocation(self,bssid:str):
b64_bssid = normalTob64(bssid)
out = list(self.c.execute(f"SELECT location FROM bssidloc WHERE bssid LIKE '{b64_bssid}'"))
if len(out)==0 :
return {
"msg":"GET_LOCATION_FAILED"
}
else:
return {
"msg":"GET_LOCATION_SUCCESS",
"data":b64ToNormal(out[0][0])
}
def checkBSSIDPresence(self,bssid:str):
if self.getBSSIDLocation(bssid)["msg"]=="GET_LOCATION_SUCCESS" :
return {
"msg":"BSSID_PRESENCE_CONFIRMED"
}
else:
return {
"msg":"BSSID_ABSENCE_CONFIRMED"
}
def checkStudentIDPresence(self,studentID):
result = self.getWhiteList(studentID)
if result["msg"]=="GET_WHITELIST_SUCCESS":
return{
"msg":"STUDENT_PRESENCE_CONFIRMED"
}
return{
"msg":"STUDENT_ABSENCE_CONFIRMED"
}
def getStudentBSSID(self,studentID):
if self.checkStudentIDPresence(studentID)['msg']=="STUDENT_PRESENCE_CONFIRMED":
#print(studentID)
out = list(self.c.execute(f"SELECT bssid FROM studentbssid WHERE id LIKE '{studentID}'"))
#print(out)
if len(out)==0 :
return{
"msg":"BSSID_NOT_AVAILABLE"
}
else:
#print(b64ToNormal(out[0][0]))
return{
"msg":"GET_BSSID_SUCCESS",
"data":b64ToNormal(out[0][0])
}
else:
return{
"msg":"GET_BSSID_FAIL"
}
def getFriendLocation(self,requestStudentID,targetStudentID): # GET FRIEDN LOCATION MODUEL
result = self.getWhiteList(targetStudentID)
if result['msg'] == "GET_WHITELIST_SUCCESS" :
whitelist = result["data"]
if requestStudentID in whitelist :
result = self.getStudentBSSID(targetStudentID)
#print("p",targetStudentID,result)
if result["msg"]=="GET_BSSID_SUCCESS" and result["data"]!="":
bssid = result["data"]
result = self.getBSSIDLocation(bssid)
if result["msg"]=="GET_LOCATION_SUCCESS":
return{
"msg":"GET_LOCATION_SUCCESS",
"data":result["data"]
}
else:
return{
"msg":"UNAUTHORIZED_ERROR"
}
return{
"msg":"GET_LOCATION_FAILED"
}
def updateStudentBSSID(self,studentID,bssid): #UPDATE LOCATION MDOULE
result1 = self.checkStudentIDPresence(studentID)["msg"]
result2 = self.checkBSSIDPresence(bssid)["msg"]
#print(result2,bssid,len(bssid))
if result1=="STUDENT_PRESENCE_CONFIRMED" and result2=="BSSID_PRESENCE_CONFIRMED":
b64_bssid = normalTob64(bssid)
self.c.execute(f"UPDATE studentbssid SET bssid='{b64_bssid}' WHERE id LIKE '{studentID}'")
self.conn.commit()
return{
"msg":"BSSID_UPDATION_SUCCESS"
}
else:
return{
"msg":"BSSID_UPDATION_FAIL" #addBSSIDLocation must be called
}
def addBSSIDLocation(self,bssid,location):
result = self.checkBSSIDPresence(bssid)["msg"]
#print(result,bssid,len(bssid))
if result=="BSSID_ABSENCE_CONFIRMED" :
self.c.execute("INSERT INTO bssidloc VALUES(?,?)",(normalTob64(bssid),normalTob64(location)))
self.conn.commit()
return {
"msg":"ADD_BSSID_LOCATION_SUCCESS"
}
else:
return {
"msg":"ADD_BSSID_LOCATION_FAIL"
}
def createUser(self,studentID:str,password:str):
if self.checkStudentIDPresence(studentID)["msg"]=="STUDENT_ABSENCE_CONFIRMED" :
self.c.execute("INSERT INTO studentbssid VALUES(?,?,?,?)",(studentID,normalTob64(password),"",""))
self.conn.commit()
return {
"msg":"ADD_USER_SUCCESS"
}
else :
return {
"msg":"ADD_USER_FAILED"
}
def updateWhitelist(self,studentID:str,whitelist:str):
if self.checkStudentIDPresence(studentID)["msg"]=="STUDENT_PRESENCE_CONFIRMED" :
#print(whitelist,studentID)
b64_whitelist = normalTob64(whitelist)
command = f"UPDATE studentbssid SET whitelist='{b64_whitelist}' WHERE id LIKE '{studentID}'"
#print(command)
self.c.execute(command)
self.conn.commit()
#print(list(self.c.execute(f"SELECT whitelist FROM studentbssid WHERE id LIKE '{studentID}'")))
return{
"msg":"UPDATE_WHITELIST_SUCCESS"
}
else:
return{
"msg":"UPDATE_WHITELIST_FAILED"
}
table = DbTable()
#table.getTableData()
#print(list(table.conn.execute("pragma table_info('bssidloc')")))
#print(list(table.conn.execute("pragma table_info('studentbssid')")))
#print(table.createUser("abc","123456789"))
#print(table.checkCreds("abc","123456789"))
#print(table.checkCreds("abc",'1234567'))
#print(table.checkCreds('abcd','123456789'))
table.getWhiteList('abcd')
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("-1")
def post(self):
print("Incoming Request")
self.set_header("Content-Type","application/json")
body=self.request.body.decode("ascii")
print(body)
if(body==""):
self.write("-1")
return
request = json.loads(self.request.body.decode())
if request["mode"]!="CREATE_USER":
if table.checkCreds(request["info"]["studentID"],request["info"]["password"])["msg"]=="AUTH_SUCCESS":
if request["mode"]=="AUTH_USER" :
self.write(json.dumps({"msg":"SUCCESS"}))
return
elif request["mode"]=="UPDATE_LOCATION" :
result = table.updateStudentBSSID(request["info"]["studentID"],request["info"]["bssid"])
if result['msg']=="BSSID_UPDATION_SUCCESS" :
self.write({"msg":"UPDATION_SUCCESS"})
else:
self.write(json.dumps({
"msg":"BSSID_DOESNT_EXISTS"
}))
elif request["mode"]=="GET_FRIEND_LOCATION":
result = table.getFriendLocation(request["info"]["studentID"],request["info"]["otherStudentID"])
if result["msg"]=="GET_LOCATION_SUCCESS":
self.write(json.dumps(
{
"msg":"SUCCESS",
"data":result["data"]
}))
else:
self.write(json.dumps({
"msg":"NOT_IN_WHITELIST_ERROR"
}))
elif request["mode"]=="ADD_BSSID_LOCATION":
result = table.addBSSIDLocation(request["info"]["bssid"],request["info"]["location"])
if result['msg']=="ADD_BSSID_LOCATION_SUCCESS" :
self.write(json.dumps(
{
"msg":"SUCCESS"
}
))
else:
self.write(json.dumps(
{
"msg":"FAILED"
}
))
elif request["mode"]=="GET_WHITELIST" :
#print("id",request["info"]["studentID"])
result = table.getWhiteList(request["info"]["studentID"])
if result['msg']=="GET_WHITELIST_SUCCESS" :
self.write(json.dumps({
"msg":"SUCCESS",
"data":result["data"]
}))
else:
self.write(json.dumps({
"msg":"FAILED"
}))
elif request["mode"]=="UPDATE_WHITELIST":
result = table.updateWhitelist(request["info"]["studentID"],request["info"]["whitelist"])
if result["msg"]=="UPDATE_WHITELIST_SUCCESS" :
self.write(json.dumps({
"msg":"SUCCESS"
}))
else:
self.write(json.dumps({
"msg":"FAILED"
}))
else:
self.write(json.dumps({
"msg":"AUTH_FAIL"
}))
else:
if table.createUser(request["info"]["studentID"],request["info"]["password"])["msg"]=="ADD_USER_SUCCESS" :
self.write(json.dumps({
"msg":"ADD_USER_SUCCESS"
}))
else:
self.write(
json.dumps({
"msg":"ADD_USER_FAILED"
}
))
def make_app():
return tornado.web.Application([
(r"/", MainHandler),
])
if __name__ == "__main__":
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
|
#!/usr/bin/python3
def max_integer(my_list=[]):
if not my_list:
return None
maxim = my_list[0]
for i in range(len(my_list)):
if my_list[i] > maxim:
maxim = my_list[i]
return maxim
|
from django.shortcuts import render, redirect
from rest_framework import viewsets, serializers
from rest_framework.decorators import api_view
from rest_framework.response import Response
from django.contrib.auth.decorators import login_required
from datetime import datetime
from .models import Scan, Peserta
from akun.models import Profil
from .forms import ScanForm
from .serializers import ScanSerializer
@login_required
def list_absen(request):
user = request.user
if request.user.is_superuser:
datas = Scan.objects.all()
else:
datas = Scan.objects.filter(peserta__user__user=user)
return render(request, 'absensi_apps/list.html', {'datas': datas})
@login_required
def scan_absen(request, *args, **kwargs):
cuser = request.user
# user_scan = Peserta.objects.get(user=request.user)
if request.method == 'POST':
form = ScanForm(request.POST or None)
if form.is_valid():
# form.cleaned_data['peserta'] = cuser
post = form.save(commit=False)
# post.peserta = request.POST.get('id_peserta')
# print(post.peserta)
# print(post, user_scan)
post.save()
return redirect('absensi:list')
else:
form = ScanForm()
return render(request, 'absensi_apps/scan.html', {'form': form, 'cuser': cuser})
# class ScanViewSet(viewsets.ModelViewSet):
# serializer_class = ScanSerializer
# def get_queryset(self):
# cuser = self.request.user
# queryset = Scan.objects.filter(peserta__user=cuser)
# # return super().get_queryset()
# return queryset
@api_view(['GET'])
def apiOverview(request):
api_urls = {
'List': '/scan-list/',
'Scan': '/scan/',
}
return Response(api_urls)
@api_view(['GET'])
def scanList(request):
if request.user.is_superuser:
scans = Scan.objects.all()
else:
# scans = Scan.objects.filter(peserta__user__user=request.user)
scans = Scan.objects.all()
serializer = ScanSerializer(scans, many=True)
return Response(serializer.data)
@api_view(['GET'])
def scanDetail(request, pk):
if request.user.is_superuser:
scans = Scan.objects.get(id=pk)
else:
scans = Scan.objects.filter(
peserta__user__user=request.user).get(id=pk)
serializer = ScanSerializer(scans, many=False)
return Response(serializer.data)
@api_view(['POST'])
def scanCreate(request):
serializer = ScanSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
|
TABLE_SCHEMA = (
'IDKEY:STRING, '
'FECHA:STRING, '
'ANO:STRING, '
'DIA:STRING, '
'MES:STRING, '
'FECHA_GRABACION:STRING, '
'TIPO_CLIENTE:STRING, '
'GRABADOR_PAGO:STRING, '
'CENTRO_DE_COSTOS:STRING, '
'ESTADO_CARTERA:STRING, '
'PROXIMO_A_REPORTE:STRING, '
'VALOR_PAGADO:STRING, '
'OBLIGACION:STRING, '
'INICIO_DEL_CREDITO:STRING, '
'TIPO_ACTIVIDAD:STRING, '
'DIAS_MORA:STRING, '
'FECHA_DE_PAGO:STRING, '
'ESTADO_DE_COBRO:STRING, '
'CAMPANA:STRING, '
'SIGNO:STRING, '
'NIT:STRING, '
'ASESOR:STRING, '
'VALOR_ABONO_INTERESES:STRING, '
'VALOR_ABONO_CAPITAL:STRING, '
'NOMBRES:STRING, '
'MARCA:STRING, '
'CONSECUTIVO_OBLIGACION:STRING, '
'FECHA_TRASLADO:STRING, '
'GASTOS_DE_COBRANZA:STRING '
)
|
from abc import ABCMeta, abstractmethod
class IRepositoryCloner(object):
"""
Interface for classes implementing cloning-functionality for different repository-types.
"""
__metaclass__ = ABCMeta
@abstractmethod
def clone_repositories(self, repository_set: set) -> None:
"""
Clones repositories from URLs provided by repository set
:param repository_set: A set containing repository-URLs.
:return: None
"""
raise NotImplementedError
@abstractmethod
def clones(self) -> str:
"""
Returns which type of repository is cloned by this cloner.
:return: A string designating which type of repository is cloned (e.g. "git", "hg", "svn", ...)
"""
raise NotImplementedError
|
from rest_framework import serializers
class SampleSerializer(serializers.Serializer):
name = serializers.CharField()
count = serializers.IntegerField()
|
from functools import reduce
import inspect
def _attr_category(x):
if x.startswith('__') and x.endswith('__'):
return '"magic"'
if x.startswith('__'):
return 'mangling'
if x.startswith('_'):
return 'internal'
if x.endswith('_'):
return 'conflict'
return 'public'
def _members_sort(acc, x):
category_exists = any(node['name'] == x['category'] for node in acc)
if category_exists:
category = next(filter(lambda node: node['name'] == x['category'], acc))
category['attributes'].append(x['name'])
else:
acc.append({'name': x['category'], 'attributes': [x['name']]})
return acc
def members_names(x):
members = [{'name': x[0], 'category': _attr_category(x[0])}
for x in inspect.getmembers(x)]
result = reduce(_members_sort, members, list())
return result
def is_iterable(x):
try:
_ = (e for e in x)
return True
except TypeError:
return False
def is_hashable(x):
try:
_ = hash(x)
return True
except TypeError:
return False
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Tan Chao'
'''
logger wrapper.
'''
import logging
import logging.config
import sys
def test_logging():
"""
learn how to use logging
"""
logging.basicConfig(
level=logging.DEBUG,
format='[%(asctime)s] %(filename)s[line:%(lineno)d][fun:%(funcName)s] %(levelname)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filename='log/myapp.log',
filemode='w',
stream=sys.stderr
)
logging.debug('This is a message')
logging.info('This is a message')
logging.warning('This is a message')
test_logging()
|
class Solution:
def getAllElements(self, root1: TreeNode, root2: TreeNode) -> List[int]:
res = []
def traverse(root):
if root:
res.append(root.val)
traverse(root.left)
traverse(root.right)
return
traverse(root1)
traverse(root2)
res.sort()
return res
|
import numpy
import numpy.random
import smat
import smat.util
import argparse
import scipy.optimize
parser = argparse.ArgumentParser(description="Train a 784-1000-1000-10 neural net on MNIST and print out the error rates.")
parser.add_argument("-d","--device",type=int,default=None,help="The device to use, e.g. CUDA device.")
parser.add_argument("-m","--method",type=str,default="L-BFGS-B",help="The optimization algorithm to use. Valid values are COBYLA and L-BFGS-B.")
args = parser.parse_args()
if args.device is not None:
smat.set_backend_options(device=args.device)
print "Using device",smat.get_backend_info().device
print "Using method",args.method,"with float64"
# Load some sample bio data. Specifically this is a subset of the
# RNAcompete protein binding affinities from Ray et al., Nature, 2013.
y = numpy.load('data/rnac/rnac_subset.npz')['y']
n,m = y.shape
def objective_function(x,y,lib):
# The test objective function below happens to be that corresponding to
# "Variance Stabilization" (Huber et al., Bioinformatics, 2002).
# The specific objective is not important.
# The point is that the parameters can be sent to the GPU,
# evaluated, pulled back, and STILL be much faster than CPU.
# Shorthand for some functions that we're getting from lib=smat/numpy
asarray,arcsinh,sqrt,mean,log,sum = lib.asarray,lib.arcsinh,lib.sqrt,lib.mean,lib.log,lib.sum
# Push coefficients to GPU and get separate views to 'a' and 'b'
a,b = asarray(x).reshape((2,-1))
# Calculate h(y) and h'(y); see Huber et al., equation (6)
y = a+y*b
h = arcsinh(y)
hprime = b/sqrt(y**2+1)
# Calculate negative log-likelihood of current variance distribution; see Huber et al., equation (13)
hmean = mean(h,axis=1).reshape((-1,1))
term1 = log(sum((h-hmean)**2))
term2 = sum(log(hprime))
variance_nll = (.5*n*m)*term1 - term2
# Pull final objective value back from GPU
return float(variance_nll)
def run_minimize(y,method,lib):
print "\nOptimizing with %s..." % lib.__name__
# Push y to GPU ahead of time, in the case of smat
y = lib.asarray(y)
# Set up initial parameter vector x=[a;b]
a = numpy.zeros((1,m))
b = numpy.ones((1,m))
x = numpy.vstack([a,b]).ravel()
# Set up bounds for vector a (unbounded) and vector b (positive)
bounds = [(None,None) for i in range(m)] + [(1e-5,None) for i in range(m)]
# Call scipy to do the optimization
if method == "COBYLA": maxiter = 1000
elif method == "L-BFGS-B": maxiter = 5
else: quit("Unsupported \"method\".")
time = 0
print " iter 0: objective = %.1f at start" % (objective_function(x,y,lib))
for t in range(5):
smat.util.tic()
x = scipy.optimize.minimize(objective_function,x,args=(y,lib),bounds=bounds,method=method,options={'maxiter':maxiter},tol=1e-20).x
time += smat.util.toc()
print " iter %3d: objective = %.1f, time elapsed = %.1fs" % ((t+1)*maxiter,objective_function(x,y,lib),time)
run_minimize(y,args.method,smat)
run_minimize(y,args.method,numpy)
|
#Bullet Time!!! 90 degrees GIF
import time
import sys
import requests
import re
import urllib
import os
from wireless import Wireless
#Iniciando la clase para conexion con camaras
wifi = Wireless('wlan0')
#Lista con ssid de camaras
cameras = ['Bullet_5', 'Bullet_6']
#Funcion para descargar las ultimas 10 imagenes de la camara seleccionada
def getImages(cam):
image = urllib.URLopener()
#Variable con la direccion donde se encuentran las fotos
adress = "http://10.5.5.9/videos/DCIM/100GOPRO/"
#Descargando toda la informacion desplegada en el url
r = requests.get(adress)
#Variable con caracteres que forman parte del nombre de las imagenes
expression = r'"(\w+.JPG)"'
#Creando un patron con esos caracteres
pattern = re.compile(expression)
#Buscando en el contenido de la url el patron que corresponde al nombre de las imagenes
#Los ultimos 10 nombres se guardan en la variable photos como una lista
photos = re.findall(pattern, r.content)[-10:]
#Contador para el nombre de las imagenes
photoCount = 1
#Creando carpeta de la camara
if not os.path.exists(cam):
os.makedirs(cam)
#Recorriendo la lista de nombres
for item in photos:
print photoCount
#Descargando las imagenes
image.retrieve(adress+item, cam+"/"+str(photoCount)+".jpg")
photoCount = photoCount + 1
return;
for camera in cameras:
print camera
print "--------------------"
wifi.connect(ssid=camera, password='goprosession')
time.sleep(7)
getImages(camera)
|
one_list = [i for i in [1, 4, 5, 8, 6, 12, 14] if ( i % 3 == 0 and i > 0 and i % 4 !=0 )]
print(one_list)
|
# 题目:将一个正整数分解质因数。例如:输入90,打印出90=2*3*3*5。
# 程序分析:对n进行分解质因数,应先找到一个最小的质数k,然后按下述步骤完成:
# (1)如果这个质数恰等于n,则说明分解质因数的过程已经结束,打印出即可。
# (2)如果n<>k,但n能被k整除,则应打印出k的值,并用n除以k的商,作为新的正整数你n,重复执行第一步。
# (3)如果n不能被k整除,则用k+1作为k的值,重复执行第一步。
from sys import stdout
from pip._vendor.distlib.compat import raw_input
n = int(raw_input("请输入正整数 : "))
for i in range(2,n+1):
while n != i:
if n % i == 0:
stdout.write(str(i))
stdout.write('*')
# 用int转换,否则会出现结果最后一个数字带小数点
n = int(n / i)
else:
break
print(n)
|
#! /bin/env python
# coding:utf-8
#
# Read tweets in json fils, and create a corpus for each json.
#
#import create_corpus as cc
import mecab_inc as mi
import numpy as np
import random
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
file_name = 'tweets/new_A.txt'
#file_name = 'tweets/test_data.txt'
k = 10 #number of clusters
def create_corpus(file_name):
global W,C
W = {}
C = {}
for line in open(file_name,'r'):
text = mi.clean_text(line)
WL = mi.create_word_list(text)
WL = sorted(set(WL))
for i in xrange(len(WL)):
if WL[i] in W:
W[WL[i]] += 1
else:
W[WL[i]] = 1
for j in xrange(i+1,len(WL)):
if WL[i] in C:
if WL[j] in C[WL[i]]:
C[WL[i]][WL[j]] += 1
else:
C[WL[i]][WL[j]] = 1
else:
C[WL[i]] = {WL[j]:1}
def f(i,j):
try:
a = C[Words[i]][Words[j]]
return a*1.0/(W[Words[i]]+W[Words[j]]-a)
except:
return 0
def create_table():
#単語Word[i]とWord[j]の共起度table[i][j]=table[j][i]を生成
global table
table = [[None]*len(Words) for i in xrange(len(Words))]
for i in xrange(len(Words)):
table[i][i] = 1
for j in xrange(i+1,len(Words)):
table[i][j] = f(i,j)
table[j][i] = table[i][j]
def centroid(mem_ship):
V = [np.array([0]*len(Words)) for i in xrange(k)]
count = [0]*k
for i in xrange(len(Words)):
count[mem_ship[i]] += 1
V[mem_ship[i]] += np.array(table[i])
V = [V[i]*1.0/np.array(count[i]) for i in xrange(k)]
return V
def argmin(i,V):
min_i = 0
min = sum((V[0]-np.array(table[i]))**2)
for j in xrange(1,k):
tmp = sum((V[j]-np.array(table[i]))**2)
if tmp == min:
print tmp,'warn!'
if tmp < min:
min_i = j
min = tmp
return min_i
def update(V):
new_mem_ship = [None]*len(Words)
for i in xrange(len(Words)):
new_mem_ship[i] = argmin(i,V)
return new_mem_ship
def k_means():
while True:
mem_ship = list(np.random.randint(0,k,len(Words)))
#mem_ship = [random.randrange(k) for i in range(len(Words))]
if len(set(mem_ship)) == k:
break
r = 0
while True:
r+=1
#print mem_ship
V = centroid(mem_ship)
#########中心の合体処理
#print V
#########
new_mem_ship = update(V)
if mem_ship == new_mem_ship:
break
else:
mem_ship = new_mem_ship
print 'クラスタ数:',k
print '繰り返し回数:',r
return mem_ship
if __name__ == '__main__':
create_corpus(file_name)
Words = sorted(W) #dict W -> list Words
create_table()
mem_ship = k_means()
#for i in xrange(len(Words)):
# print Words[i]+':',mem_ship[i]
for i in xrange(k):
print 'cluster num =',i
for j in xrange(len(Words)):
if mem_ship[j] == i and W[Words[j]] > 2:
print Words[j]+':',W[Words[j]],
print ''
|
import load
import model
import numpy as np
import args
import os
def clean_data(data):
def _find_bound(np_array, l_num, u_num):
sorted_np_array = np.sort(np_array)
return sorted_np_array[l_num],\
sorted_np_array[u_num],\
np.mean(sorted_np_array[l_num+1:u_num])
def _check_bound(num, l_bound, r_bound):
return num >= u_bound or num <= l_bound
filtered_l_num = int(data.shape[0] * 0.01)
filtered_u_num = data.shape[0] - filtered_l_num
data_bounds = []
for attr_index in range(data.shape[1]):
l_bound, u_bound, middle_mean = _find_bound(
data[:,attr_index], filtered_l_num, filtered_u_num)
data_bounds.append((l_bound, u_bound, middle_mean))
for i in range(data.shape[0]):
if _check_bound(data[i][attr_index], l_bound, u_bound):
if i != 0:
data[i][attr_index] = data[i-1][attr_index]
else:
data[i][attr_index] = middle_mean
with open('models/data_bounds.npy', 'wb') as f:
np.save(f, data_bounds)
return data
def filter_attributes(data, filename):
with open(filename, 'rb') as f:
booleans = np.load(f)
ret_data = data[:,booleans]
total_index = np.sum(booleans == True)
index = np.sum(booleans[:9] == True)
return ret_data, total_index, index
def make_data(x, y):
# filter out PM2.5 with possible invalid values
# [2, 130]
data = [(x[i:i+9].reshape(-1),y[i+9])
for i in range(x.shape[0])
if (i+9) % 480 >= 9]
return data
def filter_data(data, total, index):
filtered_data = [ele for ele in data
if ((ele[0].reshape(-1,total)[:,index-1:index+1].reshape(-1) < 2) |\
(ele[0].reshape(-1,total)[:,index-1:index+1].reshape(-1) > 130) |\
(ele[1] < 2) | (ele[1] > 130)).any() == False]
return filtered_data
def preprocessing(train_filename, attributes_filename):
data = load.read_csv(train_filename)
data = load.parse_csv(data)
train_x, train_y = load.csv_to_np(data)
# leave useful attributes
train_x, total_attributes, PM25_index = \
filter_attributes(train_x, attributes_filename)
# using modes to modify possible invalid values
train_x = clean_data(train_x)
# Augment data to create full training data
data = make_data(train_x, train_y)
# filter out possible invalid data
# with lower and upper bounds
data = filter_data(data, total_attributes, PM25_index)
return np.array(data)
def validate(trainer):
total_loss = 0.0
batches = trainer.get_validation_data()
if len(batches) == 0:
return 0.0
for x, y in batches:
prediction = trainer.forward(x)
total_loss += np.power(prediction - y, 2)
return total_loss/len(batches)
def check_dir(dir_name):
if os.path.isdir(dir_name) == False:
os.mkdir(dir_name)
def train(data,
validation,
prefix,
total_epoches,
learning_rate,
save_intervals,
params_init_model=None,
lambda_value=0.0):
trainer = model.LinearRegression(
data=data,
validation=validation)
if params_init_model is not None:
trainer.load_model(params_init_model)
logs_path = os.path.join('logs', prefix+'.log')
check_dir('logs')
check_dir('models')
model_path = os.path.join('models', prefix)
check_dir(model_path)
f_log = open(logs_path, 'w')
f_log.write('epoch, training loss, rmse loss, validation loss\n')
adagrad_n = 0
for epoch in range(total_epoches):
trainer.new_epoch()
total_loss, total_rmse_loss = 0.0, 0.0
batches = trainer.get_data()
for step, (x, y) in enumerate(batches):
prediction = trainer.forward(x)
rmse_loss = np.power(prediction - y, 2)
loss = rmse_loss + lambda_value*trainer.get_weight_norm()
total_loss += loss
total_rmse_loss += rmse_loss
adagrad_n += loss
grad = learning_rate * (prediction-y) / np.power(adagrad_n+1e-6, 0.5)
trainer.backward(grad,
x,
lambda_value=lambda_value,
grad_clip=False)
total_loss = total_loss / len(batches)
total_rmse_loss = total_rmse_loss / len(batches)
valid = validate(trainer)
print('epoch:%d, total loss:%.3f, total_rmse_loss:%.3f, validation:%.3f'
% (epoch+1, total_loss, total_rmse_loss, valid))
f_log.write('%d,%.3f,%.3f,%.3f\n' % \
(epoch+1, total_loss, total_rmse_loss, valid))
if (epoch+1) % save_intervals == 0:
trainer.save_model(
os.path.join(model_path,'model_e%d.npy') % (epoch+1))
f_log.close()
if __name__ == '__main__':
args = args.get_args()
np.random.seed(7122)
data = preprocessing(
args.train_filename,
args.attributes_filename)
train(data=data,
validation=args.validation,
prefix=args.prefix,
total_epoches=args.epoches,
learning_rate=args.learning_rate,
save_intervals=args.save_intervals,
lambda_value=args.lambda_value)
|
import multiprocessing
import os
def print_task(task1, task2):
print task1, task2, 'done in process %s' % os.getpid()
tasks = ['Alice', 'Bob', 'Cat', 'Dog']
pool = multiprocessing.Pool(processes=4)
for i in range(len(tasks)):
pool.apply_async(print_task, args=(tasks[i],tasks[i], ))
pool.close()
pool.join()
print 'End tasks'
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson and Diego Moreda.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Aid in submitting information to MusicBrainz.
This plugin allows the user to print track information in a format that is
parseable by the MusicBrainz track parser [1]. Programmatic submitting is not
implemented by MusicBrainz yet.
[1] https://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings
"""
from beets import ui
from beets.autotag import Recommendation
from beets.plugins import BeetsPlugin
from beets.ui.commands import PromptChoice
from beetsplug.info import print_data
class MBSubmitPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({
'format': '$track. $title - $artist ($length)',
'threshold': 'medium',
})
# Validate and store threshold.
self.threshold = self.config['threshold'].as_choice({
'none': Recommendation.none,
'low': Recommendation.low,
'medium': Recommendation.medium,
'strong': Recommendation.strong
})
self.register_listener('before_choose_candidate',
self.before_choose_candidate_event)
def before_choose_candidate_event(self, session, task):
if task.rec <= self.threshold:
return [PromptChoice('p', 'Print tracks', self.print_tracks)]
def print_tracks(self, session, task):
for i in sorted(task.items, key=lambda i: i.track):
print_data(None, i, self.config['format'].as_str())
def commands(self):
"""Add beet UI commands for mbsubmit."""
mbsubmit_cmd = ui.Subcommand(
'mbsubmit', help='Submit Tracks to MusicBrainz')
def func(lib, opts, args):
items = lib.items(ui.decargs(args))
self._mbsubmit(items)
mbsubmit_cmd.func = func
return [mbsubmit_cmd]
def _mbsubmit(self, items):
"""Print track information to be submitted to MusicBrainz."""
for i in sorted(items, key=lambda i: i.track):
print_data(None, i, self.config['format'].as_str())
|
#!/usr/bin/env python
from sys import argv
from daemonize import Daemonize
pid = argv[1]
working_dir = argv[2]
file_name = argv[3]
def main():
with open(file_name, "w") as f:
f.write("test")
daemon = Daemonize(app="test_app", pid=pid, action=main, chdir=working_dir)
daemon.start()
|
# Generated by Django 2.0 on 2019-04-29 07:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ChatMessages',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('message', models.TextField()),
('is_read', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='ChatRoom',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=250, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('buyer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='chatroom_buyer', to=settings.AUTH_USER_MODEL)),
('seller', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='chatroom_seller', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='chatmessages',
name='chat_room',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='message_room', to='chat.ChatRoom'),
),
migrations.AddField(
model_name='chatmessages',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='message_user', to=settings.AUTH_USER_MODEL),
),
]
|
#!/usr/bin/python3
import pylab as pl
import scipy as sp
import numpy as np
from scipy import ndimage
img = pl.imread("converse3s.png")
s = img.shape
img2 = ndimage.sobel(img)
img3 = ndimage.median_filter(img2, 5)
wLeft = -3
wRight = 3
wBottom = -3
wTop = 3
thresholdMin = 49*0.2
thresholdMax = 49*0.4
img2 = np.copy(img)
print(s)
for i in range(0, s[0]):
for j in range(0, s[1]):
img[i,j] = round(img[i,j]*64.0) / 64.0
for i in range(0, s[0]):
print("\rLinia {}/{}".format(i,s[0]), end=' ')
for j in range(0, s[1]):
c = 0
for k in range(wBottom, wTop+1):
for l in range(wLeft, wRight+1):
y = (i+k)%s[0]
x = (j+l)%s[1]
if img[y,x] == img[i,j]:
c = c+1
if thresholdMin < c and c < thresholdMax:
# print ("{}-{}".format(i,j))
# x1 = max(0, j+wLeft)
# x2 = min(s[1]-1, j+wRight)
# y1 = max(0, i+wBottom)
# y2 = min(s[0]-1, i+wTop)
img2[i,j] = 0
# print ("x1: {}, x2: {}".format(x1,x2))
# for t in range(x1,x2+1):
# img2[y1,t] = 1.0
# img2[y2,t] = 1.0
# for k in range(y1,y2+1):
# img2[k,x1] = 1.0
# img2[k,x2] = 1.0
pl.imshow(img2,cmap=pl.gray())
pl.show()
|
from datetime import datetime
from .models import Event
from rest_framework import generics
from .serializers import DetailEventSerializer, ListEventSerializer
from .pagination import EventsResultsPagination
# Create your views here.
class ListEventView(generics.ListAPIView):
current_date = datetime.now().date()
queryset = Event.objects.filter(is_published=True, event_end__gte=current_date).order_by('-event_end')
serializer_class = ListEventSerializer
pagination_class = EventsResultsPagination
class DetailEventView(generics.RetrieveAPIView):
lookup_field = 'id'
queryset = Event.objects.all()
serializer_class = DetailEventSerializer
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from typing import Type
import pytest
from pants.build_graph.build_configuration import BuildConfiguration
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.engine.goal import GoalSubsystem
from pants.engine.target import Target
from pants.engine.unions import UnionRule, union
from pants.option.subsystem import Subsystem
from pants.util.frozendict import FrozenDict
from pants.util.ordered_set import FrozenOrderedSet
@pytest.fixture
def bc_builder() -> BuildConfiguration.Builder:
return BuildConfiguration.Builder()
def _register_aliases(bc_builder, **kwargs) -> None:
bc_builder.register_aliases(BuildFileAliases(**kwargs))
def test_register_bad(bc_builder: BuildConfiguration.Builder) -> None:
with pytest.raises(TypeError):
bc_builder.register_aliases(42)
def test_register_exposed_object(bc_builder: BuildConfiguration.Builder) -> None:
_register_aliases(bc_builder, objects={"jane": 42})
aliases = bc_builder.create().registered_aliases
assert FrozenDict() == aliases.context_aware_object_factories
assert FrozenDict(jane=42) == aliases.objects
def test_register_exposed_context_aware_object_factory(
bc_builder: BuildConfiguration.Builder,
) -> None:
def caof_function(parse_context):
return parse_context.rel_path
class CaofClass:
def __init__(self, parse_context):
self._parse_context = parse_context
def __call__(self):
return self._parse_context.rel_path
_register_aliases(
bc_builder, context_aware_object_factories={"func": caof_function, "cls": CaofClass}
)
aliases = bc_builder.create().registered_aliases
assert FrozenDict() == aliases.objects
assert (
FrozenDict({"func": caof_function, "cls": CaofClass})
== aliases.context_aware_object_factories
)
def test_register_union_rules(bc_builder: BuildConfiguration.Builder) -> None:
@union
class Base:
pass
class A:
pass
class B:
pass
union_a = UnionRule(Base, A)
union_b = UnionRule(Base, B)
bc_builder.register_rules("_dummy_for_test_", [union_a])
bc_builder.register_rules("_dummy_for_test_", [union_b])
assert bc_builder.create().union_rules == FrozenOrderedSet([union_a, union_b])
def test_validation(caplog, bc_builder: BuildConfiguration.Builder) -> None:
def mk_dummy_subsys(_options_scope: str, goal: bool = False) -> Type[Subsystem]:
class DummySubsystem(GoalSubsystem if goal else Subsystem): # type: ignore[misc]
options_scope = _options_scope
return DummySubsystem
def mk_dummy_tgt(_alias: str) -> Type[Target]:
class DummyTarget(Target):
alias = _alias
core_fields = tuple()
return DummyTarget
bc_builder.register_subsystems(
"_dummy_for_test_",
(
mk_dummy_subsys("foo"),
mk_dummy_subsys("Bar-bar"),
mk_dummy_subsys("baz"),
mk_dummy_subsys("qux", goal=True),
mk_dummy_subsys("global"),
),
)
bc_builder.register_target_types(
"_dummy_for_test_", (mk_dummy_tgt("bar_bar"), mk_dummy_tgt("qux"), mk_dummy_tgt("global"))
)
with pytest.raises(TypeError) as e:
bc_builder.create()
assert (
"Naming collision: `Bar-bar`/`bar_bar` is registered as a subsystem and a "
"target type." in caplog.text
)
assert "Naming collision: `qux` is registered as a goal and a target type." in caplog.text
assert (
"Naming collision: `global` is registered as a reserved name, a subsystem "
"and a target type." in caplog.text
)
assert "Found naming collisions" in str(e)
def test_register_subsystems(bc_builder: BuildConfiguration.Builder) -> None:
def mk_dummy_subsys(_options_scope: str) -> Type[Subsystem]:
class DummySubsystem(Subsystem):
options_scope = _options_scope
return DummySubsystem
foo = mk_dummy_subsys("foo")
bar = mk_dummy_subsys("bar")
baz = mk_dummy_subsys("baz")
bc_builder.register_subsystems("backend1", [foo, bar])
bc_builder.register_subsystems("backend2", [bar, baz])
bc_builder.register_subsystems("backend3", [baz])
bc = bc_builder.create()
assert bc.subsystem_to_providers == FrozenDict(
{
foo: ("backend1",),
bar: ("backend1", "backend2"),
baz: (
"backend2",
"backend3",
),
}
)
def test_register_target_types(bc_builder: BuildConfiguration.Builder) -> None:
def mk_dummy_tgt(_alias: str) -> Type[Target]:
class DummyTarget(Target):
alias = _alias
core_fields = tuple()
return DummyTarget
foo = mk_dummy_tgt("foo")
bar = mk_dummy_tgt("bar")
baz = mk_dummy_tgt("baz")
bc_builder.register_target_types("backend1", [foo, bar])
bc_builder.register_target_types("backend2", [bar, baz])
bc_builder.register_target_types("backend3", [baz])
bc = bc_builder.create()
assert bc.target_type_to_providers == FrozenDict(
{
foo: ("backend1",),
bar: ("backend1", "backend2"),
baz: (
"backend2",
"backend3",
),
}
)
|
# encoding: utf-8
"""
@author: l1aoxingyu
@contact: sherlockliao01@gmail.com
"""
import logging
from fastai.vision import *
from .callbacks import *
def do_train(
cfg,
model,
data_bunch,
test_labels,
opt_func,
lr_sched,
loss_func,
num_query,
):
eval_period = cfg.SOLVER.EVAL_PERIOD
output_dir = Path(cfg.OUTPUT_DIR)
epochs = cfg.SOLVER.MAX_EPOCHS
total_iter = len(data_bunch.train_dl)
logger = logging.getLogger("reid_baseline.train")
logger.info("Start Training")
cb_fns = [
partial(LRScheduler, lr_sched=lr_sched),
partial(TestModel, test_labels=test_labels, eval_period=eval_period, num_query=num_query, logger=logger),
# partial(LearnerTensorboardWriter, base_dir=output_dir, name='tb')
]
learn = Learner(
data_bunch,
model,
path=output_dir,
opt_func=opt_func,
loss_func=loss_func,
true_wd=False,
callback_fns=cb_fns,
callbacks=[TrackValue(logger, total_iter)])
# continue training
if cfg.MODEL.CHECKPOINT is not '':
state = torch.load(cfg.MODEL.CHECKPOINT)
if set(state.keys()) == {'model', 'opt'}:
model_state = state['model']
learn.model.load_state_dict(model_state)
learn.create_opt(0, 0)
learn.opt.load_state_dict(state['opt'])
else:
learn.model.load_state_dict(state['model'])
logger.info(f'continue training from checkpoint {cfg.MODEL.CHECKPOINT}')
learn.fit(epochs, lr=cfg.SOLVER.BASE_LR, wd=cfg.SOLVER.WEIGHT_DECAY)
|
from __future__ import division
from astropy.io import ascii
import os
import numpy as np
ifl = ascii.read("bb_and_4s_pars.txt", header_start = None, comment = '#')
print ifl
create_file = "echo \"\" > table10.txt"
os.system(create_file)
ofile = open("table10.txt", 'r+')
tab = []
for i in range(0,len(ifl['col1'])):
tab.append([str(round(ifl['col'+str(3*j+1)][i] , 3) )+'_{'+str(round(ifl['col'+str(3*j+2)][i], 3))+'}^{'+str(round(ifl['col'+str(3*j+3)][i], 3))+'}' for j in range(1,8)])
for i in range(len(tab)):
ofile.write(str(tab[i])+'\n')
ofile.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Adapted from:
- http://www.djangosnippets.org/snippets/764/
- http://www.satchmoproject.com/trac/browser/satchmo/trunk/satchmo/apps/satchmo_utils/views.py
- http://tinyurl.com/shoppify-credit-cards
"""
from __future__ import unicode_literals
import re
# Well known card regular expressions.
CARDS = {
'Visa': re.compile(r"^4\d{12}(\d{3})?$"),
'Mastercard': re.compile(r"^(5[1-5]\d{4}|677189)\d{10}$"),
'Dinersclub': re.compile(r"^3(0[0-5]|[68]\d)\d{11}$"),
'Amex': re.compile(r"^3[47]\d{13}$"),
'Discover': re.compile(r"^(6011|65\d{2})\d{12}$"),
}
# Well known test numbers
TEST_NUMBERS = [
"378282246310005", "371449635398431", "378734493671000", "30569309025904",
"38520000023237", "6011111111111117", "6011000990139424", "555555555554444",
"5105105105105100", "4111111111111111", "4012888888881881", "4222222222222"
]
def verify_credit_card(number):
"""Returns the card type for given card number or None if invalid."""
return CreditCard(number).verify()
class CreditCard(object):
def __init__(self, number):
self.number = number
def is_number(self):
"""True if there is at least one digit in number."""
self.number = re.sub(r'[^\d]', '', self.number)
return self.number.isdigit()
def is_mod10(self):
"""Returns True if number is valid according to mod10."""
double = 0
total = 0
for i in range(len(self.number) - 1, -1, -1):
for c in str((double + 1) * int(self.number[i])):
total = total + int(c)
double = (double + 1) % 2
return (total % 10) == 0
def is_test(self):
"""Returns True if number is a test card number."""
# Note: test numbers cannot be used in the PP Pro sandbox.
# Instead, use the credit card number associated with a
# sandbox account (Test Accounts -> View Details).
return self.number in TEST_NUMBERS
def get_type(self):
"""Return the type if it matches one of the cards."""
for card, pattern in CARDS.items():
if pattern.match(self.number):
return card
return None
def verify(self):
"""Returns the card type if valid else None."""
if self.is_number() and not self.is_test() and self.is_mod10():
return self.get_type()
return None
|
import requests
from copy import deepcopy
class InvokeManager():
def __init__(self, address, fileManager):
self.address = address
self.fileManager = fileManager
def invoke(self, name, param, default_conf_class, except_conf={}, log=True, filePath="", paramID="", optimise=True):
newparam = deepcopy(param)
if (filePath and paramID):
code, text = self.fileManager.upload(filePath)
if code == 200:
newparam[paramID] = text
data = {
"name": name,
"param": newparam,
"default": {"actionClass": default_conf_class},
"except": except_conf,
"optimise": optimise,
"log": log
}
resp = requests.post(self.address, json=data)
return resp.status_code, resp.text
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import spi, sensor
from esphome.const import CONF_ID, ICON_EMPTY, UNIT_EMPTY
DEPENDENCIES = ['spi']
empty_spi_sensor_ns = cg.esphome_ns.namespace('empty_spi_sensor')
EmptySPISensor = empty_spi_sensor_ns.class_('EmptySPISensor', cg.PollingComponent,
spi.SPIDevice)
CONFIG_SCHEMA = sensor.sensor_schema(UNIT_EMPTY, ICON_EMPTY, 1).extend({
cv.GenerateID(): cv.declare_id(EmptySPISensor),
}).extend(cv.polling_component_schema('60s')).extend(spi.spi_device_schema())
def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
yield cg.register_component(var, config)
yield sensor.register_sensor(var, config)
yield spi.register_spi_device(var, config)
|
import requests
import pymongo
from splinter import Browser
from bs4 import BeautifulSoup
from webdriver_manager.chrome import ChromeDriverManager
import pandas as pd
def init_browser():
# Set up splinter
executable_path = {'executable_path':ChromeDriverManager().install()}
return Browser('chrome', **executable_path, headless=False)
def init_mongodb():
# Initialize PyMongo to work with MongoDBs
conn = 'mongodb://localhost:27017'
client = pymongo.MongoClient(conn)
return client
def scrape():
print("Now scraping -----------")
# Create dictionary object to return the scraped data
dict_mars = {}
# NASA Mars News ....
print("NASA Mars News")
# Use the splinter browser...
nasa_url = "https://redplanetscience.com/"
browser = init_browser()
browser.visit(nasa_url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
results = soup.find_all('div', class_='list_text')
# client = init_mongodb()
# db = client.nasa_db
# collection = db.items
nasa_news_articles = []
# Loop through results
for result in results:
try:
title = result.find('div', class_='content_title').text
blurb = result.find('div', class_='article_teaser_body').text
if (title and blurb):
post = {
'title': title,
'blurb': blurb}
nasa_news_articles.append(post)
except Exception as e:
print(e)
dict_mars['nasa_news_articles'] = nasa_news_articles
# JPL Mars Space Images - Featured Image
#print("JPL Mars Space Images - Featured Image")
jpl_url = "https://spaceimages-mars.com/"
browser.visit(jpl_url)
html = browser.html
jpl_soup = BeautifulSoup(html, 'html.parser')
jpl_results = jpl_soup.find_all('img', class_='headerimage fade-in')
featured_image_url = f"{jpl_url}{jpl_results[0]['src']}"
dict_mars['featured_image_url'] = featured_image_url
# try:
# #post = {'featured_image_url':featured_image_url}
# #collection.insert_one(post)
# dict_mars['featured_image_url'] = featured_image_url
# except Exception as e:
# print(e)
browser.quit()
# Mars Facts
#print("Mars Facts from galaxyfacts-mars.com")
mars_facts_url = 'https://galaxyfacts-mars.com/'
tables = pd.read_html(mars_facts_url)
mars_earth_table_df = tables[0]
mars_profile_table_df = tables[1]
me_list = mars_earth_table_df.loc[0]
mars_earth_table_df.columns = me_list
mp_list =["Mars Attribute", "Measurement"]
mars_profile_table_df.columns = mp_list
dict_mars['mars_earth_table_html'] = mars_earth_table_df.to_html()
dict_mars['mars_profile_table_html'] = mars_profile_table_df.to_html()
# Mars Hemispheres - high resolutions images for each of Mars' hemispheres
USGS_url="https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
base_USGS_url = "https://astrogeology.usgs.gov"
browser.visit(USGS_url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
results = soup.find_all('div', class_='item')
hemisphere_image_urls = []
for result in results:
title = result.find('h3').text
href = result.find('a')['href']
img_url = f'{base_USGS_url}{href}'
print(img_url)
browser.visit(img_url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
original_img = soup.find('div', class_='downloads')
img_url = original_img.find('a')['href']
hemi = {'title':title, 'img_url': img_url}
hemisphere_image_urls.append(hemi)
dict_mars['hemisphere_image_urls'] = hemisphere_image_urls
brower.quit()
client = init_mongodb()
db = client.nasa_db
collection = db.items
try:
collection.insert_one(dict_mars)
except Exception as e:
print(e)
print(f'in scrape_mars.py, this is the return: {dict_mars}')
return dict_mars
# Let's do the thing
print('scrape_mars.py now running.... ')
scrape()
|
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = staticfiles_urlpatterns()
admin.autodiscover()
|
#Author Gayatri Deo
import nltk;
import sys;
from nltk.corpus import wordnet as wn;
def printDef(category):
for f in wn.synsets(category):
for hypo in f.hyponyms():
for hypo1 in hypo.hyponyms():
print category, ",", hypo1.name.split('.')[0], ",", hypo1.definition;
def __main__():
printDef(sys.argv[1]);
__main__();
|
import serial
__author__ = 'wgiersche'
if __name__ == "__main__":
serial = serial.Serial(port="/dev/tty.RNBT-37D2-RNI-SPP",
baudrate=9600, timeout=0)
num = 0
while num < 100:
num += 1
line = serial.readline()
if line:
print line
|
#!/usr/bin/env python3
from rpgdieroller.dierolls import *
import cmd
class DieRollerShell(cmd.Cmd):
intro = (
"Welcome to the Python RPG Die Roller shell. Type help or ? to list commands.\n"
)
prompt = "(RPG Die Roller) "
def do_roll(self, arg):
"Roll the dice for an expression like 'd10+2d6 + 5 - 3' or '5d20+2' or 'd20' or '3d6'. Any number of sides and dice are allowed."
print(dierollexpr(arg))
def do_fateroll(self, arg):
"Roll 4 fate dice. Any argument is the modifier and can be '-4', '6', '+6', and even 'd4+3d6+4-10'."
print(fateroll(arg))
def do_countsuccessrolls(self, args):
"(RPG Die Roller) countsuccessrolls <NDICE> <NSIDES> <NFORSUCCESS>\nRoll NDICE each with NSIDES and count how many of the rolled dice are at least NFORSUCCESS."
arglist = args.split(" ")
if len(arglist) != 3:
print("Error: there must be 3 positive integer arguments")
return
argnums = None
try:
argnums = [int(x) for x in arglist]
except ValueError:
print("Error: there must be 3 positive integer arguments")
return
for x in argnums:
if x <= 0:
print("Error: there must be 3 positive integer arguments")
return
print(rollcountsuccess(*argnums))
def do_ironsworn(self, args):
"Do an Ironsworn action roll with the modifiers as arguments '+2 -2' or anything you can put in the roll command"
print(ironswornaction(args))
def do_ironswornprogress(self, args):
"Do an Ironsworn progress roll with the amount of progress as an integer argument '2' or '9'"
arglist = args.split(" ")
if len(arglist) != 1:
print("Error: there must be exactly 1 integer argument")
return
argnums = None
try:
argnums = [int(x) for x in arglist]
except ValueError:
print("Error: there must be exactly 1 integer argument")
return
print(ironswornprogress(argnums[0]))
def do_exit(self, _):
"Exit the shell."
print("Exiting.")
return True
def do_quit(self, _):
"Exit the shell."
print("Exiting.")
return True
def main():
DieRollerShell().cmdloop()
if __name__ == "__main__":
main()
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""Contains the "base" code for plugin APIs which require partitioning."""
from __future__ import annotations
import itertools
from dataclasses import dataclass
from enum import Enum
from typing import Generic, Iterable, TypeVar, overload
from typing_extensions import Protocol
from pants.core.goals.multi_tool_goal_helper import SkippableSubsystem
from pants.engine.collection import Collection
from pants.engine.engine_aware import EngineAwareParameter
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.rules import collect_rules, rule
from pants.engine.target import FieldSet, SourcesField, SourcesPaths, SourcesPathsRequest
from pants.util.memo import memoized
from pants.util.meta import runtime_ignore_subscripts
_FieldSetT = TypeVar("_FieldSetT", bound=FieldSet)
class PartitionerType(Enum):
"""What type of partitioner to use to partition the input specs."""
CUSTOM = "custom"
"""The plugin author has a rule to go from `RequestType.PartitionRequest` -> `Partitions`."""
DEFAULT_SINGLE_PARTITION = "default_single_partition"
"""Registers a partitioner which returns the inputs as a single partition
The returned partition will have no metadata.
"""
DEFAULT_ONE_PARTITION_PER_INPUT = "default_one_partition_per_input"
"""Registers a partitioner which returns a single-element partition per input.
Each of the returned partitions will have no metadata.
"""
def default_rules(self, cls, *, by_file: bool) -> Iterable:
"""Return an iterable of rules defining the default partitioning logic for this
`PartitionerType`.
NOTE: Not all `PartitionerType`s have default logic, so this method can return an empty iterable.
:param by_file: If `True`, rules returned from this method (if any) will compute partitions with
`str`-type elements, where each `str` value is the path to the resolved source field. If `False`, rule will compute
partitions with `FieldSet`-type elements.
"""
if self == PartitionerType.CUSTOM:
# No default rules.
return
elif self == PartitionerType.DEFAULT_SINGLE_PARTITION:
rules_generator = (
_single_partition_file_rules if by_file else _single_partition_field_set_rules
)
yield from rules_generator(cls)
elif self == PartitionerType.DEFAULT_ONE_PARTITION_PER_INPUT:
rules_generator = (
_partition_per_input_file_rules if by_file else _partition_per_input_field_set_rules
)
yield from rules_generator(cls)
else:
raise NotImplementedError(f"Partitioner type {self} is missing default rules!")
class PartitionMetadata(Protocol):
@property
def description(self) -> str | None:
...
class _EmptyMetadata:
@property
def description(self) -> None:
return None
PartitionMetadataT = TypeVar("PartitionMetadataT", bound=PartitionMetadata)
PartitionElementT = TypeVar("PartitionElementT")
@dataclass(frozen=True)
@runtime_ignore_subscripts
class Partition(Generic[PartitionElementT, PartitionMetadataT]):
"""A collection of 'compatible' inputs for a plugin tool, with optional common metadata.
Inputs are 'compatible' if it is safe/possible for them to be processed in the same invocation
of a tool (i.e. two files formatted in the same run of a formatter, or two test targets executed
in a single test runner process).
The metadata in a partition (if any) can be any type able to cross a rule boundary, and will be
provided to the rule which "runs" your tool. If it isn't `None` it must implement the
`PartitionMetadata` protocol.
NOTE: Partitions may be further divided into multiple batches before being passed to the tool-running
rule. When this happens, the same metadata is passed along with each batch.
"""
elements: tuple[PartitionElementT, ...]
metadata: PartitionMetadataT
@runtime_ignore_subscripts
class Partitions(Collection[Partition[PartitionElementT, PartitionMetadataT]]):
"""A collection of (<compatible inputs>, <metadata>) pairs.
When implementing a plugin, one of your rules will return this type, taking in a
`PartitionRequest` specific to your plugin.
The return likely will fit into one of:
- Returning empty partitions: E.g. if your tool is being skipped.
- Returning one partition. The partition may contain all of the inputs
(as will likely be the case for target-based plugins) or a subset (which will likely be the
case for targetless plugins).
- Returning >1 partition. This might be the case if you can't run
the tool on all the inputs at once. E.g. having to run a Python tool on XYZ with Py3,
and files ABC with Py2.
"""
@overload
@classmethod
def single_partition(
cls, elements: Iterable[PartitionElementT]
) -> Partitions[PartitionElementT, _EmptyMetadata]:
...
@overload
@classmethod
def single_partition(
cls, elements: Iterable[PartitionElementT], *, metadata: PartitionMetadataT
) -> Partitions[PartitionElementT, PartitionMetadataT]:
...
@classmethod
def single_partition(
cls, elements: Iterable[PartitionElementT], *, metadata: PartitionMetadataT | None = None
) -> Partitions:
"""Helper constructor for implementations that have only one partition."""
return Partitions([Partition(tuple(elements), metadata or _EmptyMetadata())])
@dataclass(frozen=True)
@runtime_ignore_subscripts
class _BatchBase(Generic[PartitionElementT, PartitionMetadataT], EngineAwareParameter):
"""Base class for a collection of elements that should all be processed together.
For example, a collection of strings pointing to files that should be linted in one process, or
a collection of field-sets pointing at tests that should all execute in the same process.
"""
tool_name: str
elements: tuple[PartitionElementT, ...]
partition_metadata: PartitionMetadataT
@dataclass(frozen=True)
@runtime_ignore_subscripts
class _PartitionFieldSetsRequestBase(Generic[_FieldSetT], EngineAwareParameter):
"""Returns a unique type per calling type.
This serves us 2 purposes:
1. `<Core Defined Plugin Type>.PartitionRequest` is the unique type used as a union base for plugin registration.
2. `<Plugin Defined Subclass>.PartitionRequest` is the unique type used as the union member.
"""
field_sets: tuple[_FieldSetT, ...]
@dataclass(frozen=True)
class _PartitionFilesRequestBase(EngineAwareParameter):
"""Returns a unique type per calling type.
This serves us 2 purposes:
1. `<Core Defined Plugin Type>.PartitionRequest` is the unique type used as a union base for plugin registration.
2. `<Plugin Defined Subclass>.PartitionRequest` is the unique type used as the union member.
"""
files: tuple[str, ...]
@memoized
def _single_partition_field_set_rules(cls) -> Iterable:
"""Returns a rule that implements a "partitioner" for `PartitionFieldSetsRequest`, which returns
one partition."""
@rule(
_param_type_overrides={
"request": cls.PartitionRequest,
"subsystem": cls.tool_subsystem,
}
)
async def partitioner(
request: _PartitionFieldSetsRequestBase, subsystem: SkippableSubsystem
) -> Partitions[FieldSet, _EmptyMetadata]:
return Partitions() if subsystem.skip else Partitions.single_partition(request.field_sets)
return collect_rules(locals())
@memoized
def _single_partition_file_rules(cls) -> Iterable:
"""Returns a rule that implements a "partitioner" for `PartitionFieldSetsRequest`, which returns
one partition."""
# NB: This only works if the FieldSet has a single `SourcesField` field. We check here for
# a better user experience.
sources_field_name = _get_sources_field_name(cls.field_set_type)
@rule(
_param_type_overrides={
"request": cls.PartitionRequest,
"subsystem": cls.tool_subsystem,
}
)
async def partitioner(
request: _PartitionFieldSetsRequestBase, subsystem: SkippableSubsystem
) -> Partitions[str, _EmptyMetadata]:
assert sources_field_name is not None
all_sources_paths = await MultiGet(
Get(SourcesPaths, SourcesPathsRequest(getattr(field_set, sources_field_name)))
for field_set in request.field_sets
)
return (
Partitions()
if subsystem.skip
else Partitions.single_partition(
itertools.chain.from_iterable(
sources_paths.files for sources_paths in all_sources_paths
)
)
)
return collect_rules(locals())
@memoized
def _partition_per_input_field_set_rules(cls) -> Iterable:
"""Returns a rule that implements a "partitioner" for `PartitionFieldSetsRequest`, which returns
a single-element partition per input."""
@rule(
_param_type_overrides={
"request": cls.PartitionRequest,
"subsystem": cls.tool_subsystem,
}
)
async def partitioner(
request: _PartitionFieldSetsRequestBase, subsystem: SkippableSubsystem
) -> Partitions[FieldSet, _EmptyMetadata]:
return (
Partitions()
if subsystem.skip
else Partitions(
Partition((field_set,), _EmptyMetadata()) for field_set in request.field_sets
)
)
return collect_rules(locals())
@memoized
def _partition_per_input_file_rules(cls) -> Iterable:
"""Returns a rule that implements a "partitioner" for `PartitionFieldSetsRequest`, which returns
a single-element partition per input."""
# NB: This only works if the FieldSet has a single `SourcesField` field. We check here for
# a better user experience.
sources_field_name = _get_sources_field_name(cls.field_set_type)
@rule(
_param_type_overrides={
"request": cls.PartitionRequest,
"subsystem": cls.tool_subsystem,
}
)
async def partitioner(
request: _PartitionFieldSetsRequestBase, subsystem: SkippableSubsystem
) -> Partitions[str, _EmptyMetadata]:
assert sources_field_name is not None
all_sources_paths = await MultiGet(
Get(SourcesPaths, SourcesPathsRequest(getattr(field_set, sources_field_name)))
for field_set in request.field_sets
)
return (
Partitions()
if subsystem.skip
else Partitions(
Partition((path,), _EmptyMetadata())
for path in itertools.chain.from_iterable(
sources_paths.files for sources_paths in all_sources_paths
)
)
)
return collect_rules(locals())
def _get_sources_field_name(field_set_type: type[FieldSet]) -> str:
"""Get the name of the one `SourcesField` belonging to the given target type.
NOTE: The input target type's fields must contain exactly one `SourcesField`.
Otherwise this method will raise a `TypeError`.
"""
sources_field_name = None
for fieldname, fieldtype in field_set_type.fields.items():
if issubclass(fieldtype, SourcesField):
if sources_field_name is None:
sources_field_name = fieldname
break
raise TypeError(
f"Type {field_set_type} has multiple `SourcesField` fields."
+ " Pants can't provide a default partitioner."
)
else:
raise TypeError(
f"Type {field_set_type} has does not have a `SourcesField` field."
+ " Pants can't provide a default partitioner."
)
return sources_field_name
|
Python 3.7.4 (tags/v3.7.4:e09359112e, Jul 8 2019, 20:34:20) [MSC v.1916 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> a="Python"
>>> a[0]
'P'
>>> a[1]
'y'
>>> a[-1]
'n'
>>> a[1:3]
'yt'
>>> a[0:4]
'Pyth'
>>> a[ : ]
'Python'
>>> a[ :4]
'Pyth'
>>> a[2: ]
'thon'
>>> a[-1:-5]
''
>>> a[-5:-1]
'ytho'
>>> a[ : :2]
'Pto'
>>> a[ : :3]
'Ph'
>>> a[ : :-1]
'nohtyP'
>>> a[ : :-2]
'nhy'
>>>
|
from .solve_neon_eq import ymat_pretty as neon_betas
from .fit_pad import TargetHeliumPad, TargetNeonPad
from .solve_helium_eq import ymat_pretty as helium_betas
__all__ = [
"helium_betas",
"neon_betas",
"TargetHeliumPad",
"TargetNeonPad",
]
|
# Parses HTMLS for juicy hrefs
import requests
import config
from lxml import html
from knowlify import worker
DATA_DIR = config.DATA_DIR
def get_page_from_web(url):
"""
:type url: str
:return: HTML file from string
:type page: html.HtmlElement
"""
try:
page = html.document_fromstring(requests.get(url).content)
except ValueError:
raise ValueError("Unable to parse URL at %s" % url)
return page
def append_header(page):
assert(isinstance(page.head, html.HtmlElement))
page.head.insert(1, html.Element(
'script',
type="text/javascript",
src='knowl.js',
))
page.head.insert(1, html.Element(
'link',
href='knowlstyle.css',
rel='stylesheet',
type='text/css',
))
page.head.insert(1, html.Element(
'script',
type='text/javascript',
src='jquery-latest.min.js'
))
return page
def swap_href(page):
#TODO: Create another function that intelligently selects knowlable words/phrases
c = 0
for element, attribute, link, pos in page.body.iterlinks():
if attribute == 'href':
dumb_url = element.classes._attributes['href']
element.classes._attributes.pop('href')
element.classes._attributes['knowl'] = dumb_url
return page
def build_full_page_from_url(url):
"""
:param url:
:type url: basestring
:return: simply knowlified page
"""
page = get_page_from_web(url)
page = swap_href(page)
page = append_header(page)
return page
def build_dummy_page_from_url(url, len=5):
page = get_page_from_web(url)
return page.body
if __name__ == "__main__":
pass
|
/Users/samnayrouz/anaconda3/lib/python3.6/shutil.py
|
GREEN = '\033[92m'
RED = '\033[91m'
BLUE = '\033[94m'
YELLOW = '\033[93m'
ENDC = '\033[0m'
UNDERLINE = '\033[4m'
BOLD = '\033[1m'
WHITE = '\033[97m'
MAGENTA = '\033[95m'
GREY = '\033[90m'
BLACK = '\033[90m'
DEFAULT = '\033[99m'
class Color:
def success(self, text):
self.green(text)
def error(self, text):
self.red(text)
def warn(self, text):
self.yellow(text)
def green(self, text):
color = GREEN
self.__colorize(text, color)
def red(self, text):
color = RED
self.__colorize(text, color)
def blue(self, text):
color = BLUE
self.__colorize(text, color)
def yellow(self, text):
color = YELLOW
self.__colorize(text, color)
def default(self, text):
color = DEFAULT
self.__colorize(text, color)
def white(self, text):
color = WHITE
self.__colorize(text, color)
def magenta(self, text):
color = MAGENTA
self.__colorize(text, color)
def grey(self, text):
color = GREY
self.__colorize(text, color)
def black(self, text):
color = BLACK
self.__colorize(text, color)
def __colorize(self, text, color):
normal_text = self.__normalize_text(text)
print(f'{color} {normal_text} {ENDC}')
def __normalize_text(self, text):
text = self.__replace_underline(text)
text = self.__replace_bold(text)
return text
@staticmethod
def __replace_underline(text):
text = text.replace('<u>', UNDERLINE)
text = text.replace('</u>', ENDC)
return text
@staticmethod
def __replace_bold(text):
text = text.replace('<b>', BOLD)
text = text.replace('</b>', ENDC)
return text
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.