text stringlengths 8 6.05M |
|---|
# Generated by Django 2.2 on 2020-12-16 02:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('coffee_app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CoffeeShop',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('description', models.TextField(max_length=360)),
('image', models.ImageField(null=True, upload_to='')),
('facebook', models.CharField(max_length=200, null=True)),
('address', models.CharField(max_length=200, null=True)),
('latitude', models.FloatField(blank=True, null=True)),
('longitude', models.FloatField(blank=True, null=True)),
('region', models.CharField(max_length=200, null=True)),
],
),
migrations.CreateModel(
name='Newsletter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.TextField(max_length=360)),
('image', models.ImageField(upload_to='')),
('author', models.CharField(max_length=200)),
('genre', models.CharField(max_length=200)),
('shop', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='coffee_app.CoffeeShop')),
],
),
]
|
#!usr/bin/env python
# -*- coding: utf-8 -*-
# *******************************************************
# @File: upload
# @Auth: winver9@gmail.com
# @Create: 2019-2-26 14:47
# @License: © Copyright 2019, LBlog Programs.
# *******************************************************
from django.core.exceptions import PermissionDenied
from django.http import JsonResponse
from django.conf import settings
from PIL import Image, ImageSequence
import os, uuid
def compresImage(image):
width = image.width
height = image.height
rate = 1.0 # 压缩率
# 根据图像大小设置压缩率
if width >= 2000 or height >= 2000:
rate = 0.3
elif width >= 1000 or height >= 1000:
rate = 0.5
elif width >= 500 or height >= 500:
rate = 0.9
width = int(width * rate) # 新的宽
height = int(height * rate) # 新的高
# 生成缩略图
image.thumbnail((width, height), Image.ANTIALIAS)
return image
def uploadImage(request):
if request.method == 'POST':
file = request.FILES['upload_image']
if file.size > 500000:
return JsonResponse({
'success': False,
'file_path': '',
'msg': '上传的图片大小不能超过500K'
})
if file.name.split('.')[-1] in ['jpg', 'jpeg', 'png', 'bmp', 'gif']:
#这里的file_path指的是服务器上保存图片的路径
image = Image.open(file)
file_path = settings.MEDIA_IMAGE_ROOT + uuid.uuid4().hex + '.' + image.tile[0][0]
# GIF单独保存
#print(image.tile, image.tile[0][0], image.size, image.height, image.width)
if image.tile[0][0] == 'gif':
frames = [frame.copy() for frame in ImageSequence.Iterator(image)]
print(frames)
print(type(image))
image.save(file_path, save_all=True, append_images=frames)
return JsonResponse({
'success': True,
# 这里的file_path指的是访问该图片的url
'file_path': settings.MEDIA_URL + 'image/' + os.path.basename(file_path),
'msg': 'Success!'
})
try:
r, g, b, a = image.split()
image = Image.merge('RGB', (r, g, b))
except:
pass
image = compresImage(image)
image.save(file_path)
return JsonResponse({
'success': True,
#这里的file_path指的是访问该图片的url
'file_path': settings.MEDIA_URL + 'image/' + os.path.basename(file_path),
'msg': 'Success!'
})
else:
return JsonResponse({
'success': False,
'file_path': '',
'msg': 'Unexpected File Format!'
})
else:
raise PermissionDenied('Only Accept POST Request!') |
# Generated by Django 2.2.6 on 2019-10-23 18:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contest', '0009_auto_20191012_2255'),
]
operations = [
migrations.AddField(
model_name='score',
name='minutes',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='problem',
name='input',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='problem',
name='solution',
field=models.TextField(blank=True, null=True),
),
]
|
#Array reversa in Place O(n)
def array_reversal(a):
N=len(a)
if(N%2==0):
mid=N/2
for i in range(0,mid):
temp=a[i]
a[i]=a[N-1-i]
a[N-1-i]=temp
elif(N%2==1):
mid=(N+1)/2
for i in range(0, mid):
temp = a[i]
a[i] = a[N - 1 - i]
a[N - 1 - i] = temp
return a
a=[1,2,3,4,5,6,7,8,9]
reversed_array=array_reversal(a)
print reversed_array |
# Exercício 8.11 - Livro
def valida_string(string, minimo, maximo):
string_length = len(string)
if string_length < minimo or string_length > maximo:
return False
else:
return True
res = valida_string('Eu', 1, 5)
print(res)
|
n = int(input("Input integer"))
d = dict()
for x in range(1,n+1):
d[x] = x*x
print(d)
|
import numpy as np
import math
from matplotlib import pyplot as plt
# Parametros de la senal de interes
fo = 1000. # es una frecuencia fundamental
k = 4
f = k*fo # es la frecuencia de la senoidal
N = 32
# Las senales discretas
n = np.linspace(0,N-1,N)
signal = np.cos(2.*math.pi*k*n/N)
signal2 = np.exp(1.j*2.*math.pi*k*n/N)
fourier = np.fft.fft(signal)
fourier2 = np.fft.fft(signal2)
fourier_mejor = np.fft.fftshift(fourier)
fourier_mejor2 = np.fft.fftshift(fourier2)
# calculos para relacional la senal discreta con el mundo real
T = 1./fo
Tsamp = T/N
Fsamp = 1./Tsamp
Fmin = -Fsamp/2.
Fresol = Fsamp/N
Fmax = -Fmin-Fresol
f = np.linspace(Fmin,Fmax,N)
#plt.stem(n,signal)
plt.stem(f,fourier_mejor) # plt.plot(f,fourier_mejor)
plt.stem(f,fourier_mejor2, linefmt = 'red') # plt.plot(f,fourier_mejor)
plt.show()
print("Tsamp es: ", Tsamp)
print("Fsamp es: ", Fsamp)
print("Fmin es: ", Fmin)
print("Fresol es: ", Fresol)
print("Fmax es: ", Fmax) |
#!/usr/bin/env python
# coding: utf-8
# script to test the read speed of two competing methods for reading candidates:
# 1) pre-saved images (resampled)
# 2) resample on-the-fly
# imports
import SimpleITK as sitk
import numpy as np
import csv
import os
from PIL import Image
import matplotlib.pyplot as plt
import scipy.interpolate
import scipy.sparse
get_ipython().run_line_magic('matplotlib', 'inline')
img_path = '/home/se14/Documents/LIDC/LUNA16_nodule_detection/LUNA16_data/Tutorial/TUTORIAL_SimpleITK/TUTORIAL/data/1.3.6.1.4.1.14519.5.2.1.6279.6001.148447286464082095534651426689.mhd'
cand_path = '/home/se14/Documents/LIDC/LUNA16_nodule_detection/LUNA16_data/Tutorial/TUTORIAL_SimpleITK/TUTORIAL/data/candidates.csv'
# %% function definition
def resample_sitk(image,spacing, new_spacing=[1,1,1]):
# reorder sizes as sitk expects them
spacing_sitk = [spacing[1],spacing[2],spacing[0]]
new_spacing_sitk = [new_spacing[1],new_spacing[2],new_spacing[0]]
# set up the input image as at SITK image
img = sitk.GetImageFromArray(image)
img.SetSpacing(spacing_sitk)
# set up an identity transform to apply
affine = sitk.AffineTransform(3)
affine.SetMatrix(np.eye(3,3).ravel())
affine.SetCenter(img.GetOrigin())
# make the reference image grid, 80x80x80, with new spacing
refImg = sitk.GetImageFromArray(np.zeros((80,80,80),dtype=image.dtype))
refImg.SetSpacing(new_spacing_sitk)
refImg.SetOrigin(img.GetOrigin())
imgNew = sitk.Resample(img, refImg, affine,sitk.sitkLinear,0)
imOut = sitk.GetArrayFromImage(imgNew).copy()
#
# image.tofile('debug1.dat')
# imOut.tofile('debug2.dat')
return imOut
#%%
def load_itk_image(filename):
itkimage = sitk.ReadImage(filename)
numpyImage = sitk.GetArrayFromImage(itkimage)
numpyOrigin = np.array(list(reversed(itkimage.GetOrigin())))
numpySpacing = np.array(list(reversed(itkimage.GetSpacing())))
return numpyImage, numpyOrigin, numpySpacing
def readCSV(filename):
lines = []
with open(filename, "rt") as f:
csvreader = csv.reader(f)
for line in csvreader:
lines.append(line)
return lines
def worldToVoxelCoord(worldCoord, origin, spacing):
stretchedVoxelCoord = np.absolute(worldCoord - origin)
voxelCoord = stretchedVoxelCoord // spacing
return [int(i) for i in voxelCoord]
def normalizePatches(npzarray):
npzarray = npzarray
maxHU = 400.
minHU = -1000.
npzarray = (npzarray - minHU) / (maxHU - minHU)
npzarray[npzarray>1] = 1.
npzarray[npzarray<0] = 0.
return npzarray
#%% load image
numpyImage, numpyOrigin, numpySpacing = load_itk_image(img_path)
print(numpyImage.shape)
print(numpyOrigin)
print(numpySpacing)
# load candidates
cands = readCSV(cand_path)
print(cands)
#%% process and save one candidate to file as a raw, with isotropic resampling, and a coverage of 80mm in each dim
cand = cands[2]
worldCoord = np.asarray([float(cand[3]),float(cand[2]),float(cand[1])])
voxelCoord = worldToVoxelCoord(worldCoord, numpyOrigin, numpySpacing)
voxelWidth = np.ceil(80 / numpySpacing).astype('uint16')
patch = numpyImage[voxelCoord[0]-voxelWidth[0]//2:voxelCoord[0]+voxelWidth[0]//2,voxelCoord[1]-voxelWidth[1]//2:voxelCoord[1]+voxelWidth[1]//2,voxelCoord[2]-voxelWidth[2]//2:voxelCoord[2]+voxelWidth[2]//2]
patch_new = resample_sitk(patch,numpySpacing,new_spacing=[1,1,1])
#patch_new = normalizePatches(patch_new)
print(patch_new.shape)
patch_new.tofile('example_patch.dat')
#%% try saving in a compressed form
sparse_matrix = scipy.sparse.csc_matrix(patch_new.reshape(80*80,80))
scipy.sparse.save_npz(f'example_patch_conpressed.npz', sparse_matrix, compressed=True)
|
import subprocess
import Info_to_web_site
test = Info_to_web_site.Site()
subprocess.run(Info_to_web_site.Site.__init__(test))
print(123123123)
|
import z
import math
import csv
from collections import defaultdict
import table_print
import statistics
import os
from sortedcontainers import SortedSet
from rows import *
from scipy import stats
import args
import buy
#table_print.accurate = 2
# mc 30.00B to 1.54T
# mc 7.5B to 30.00B
# mc 2.7B to 7.5B
# mc 0 to 2.7B
cols = ["last",
"target",
# ("md", "%"),
# ("md1", "%"),
# ("md2", "%"),
# ("mg", "%"),
# "gddif",
("dl", "%"),
("chg1", "%"),
"chg1p",
("chg5", "%"),
("chg30", "%"),
"chg30p",
("y1u"),
("ivvb"),
("ly", "%"),
("l2y", "%"),
("wc", "%"),
("bc", "%"),
("avg", "%"),
("avg8", "%"),
("dfh1y", "%"),
("gfl1y", "%"),
("m30c", "%"),
("w30", "%"),
("ma1y"),
("ma1yv"),
("ma1c", "%"),
# "ma",
# "mav",
# "revmcp",
"div",
"bta" ]
tory = z.getp("tory")
mine = z.getp("mine")
def single(astock):
mydic = z.getpp(astock)
values = [("name", astock)]
for col in cols:
if type(col) is tuple:
colname = col[0]
values.append((colname, mydic[colname], col[1]))
else:
values.append((col, mydic[col]))
if args.args.live:
try:
live_price = z.getLiveData(astock, key = "price")
liveChange = live_price / mydic["last"]
values.append(("live", liveChange, "%"))
except:
pass
try:
loc, portvalue = buy.getFrom("owned", astock)
except:
where = ""
portvalue = ""
loc = ""
values.append(("owned", portvalue))
# owned = buy.portFolioValue(astock)
# values.append(("owned", owned))
# if owned:
# loc += "P" if astock in mine else ""
# loc += "T" if astock in tory else ""
try:
# combined[astock] = (buyprice, buy_value[astock], col_valus[astock])
buyprice, buy_value, loct = buy.getFrom("combined", astock)
loc = "{}{}".format(loc,loct)
# order = buy.getFrom("orders", astock)[0]
# order,value = order[1], round(order[0])
ochg = buyprice/mydic["last"]
# values.insert(3, ("ochg", ochg, "%"))
# values.insert(4, ("value", buy_value))
except Exception as e:
ochg = ""
value = "NA"
# values.insert(3, ("ochg", ochg, "%"))
# values.insert(4, ("value", value))
# loc = ""
# if ochg:
# try:
# loc = "t" if astock in z.getp("torys") else "p"
## except:
# pass
#
#
values.append(("location", loc))
table_print.store(values)
if __name__ == '__main__':
# coindata = z.getp("coins")
# stocks = stocks + [ data['symbol'].upper() for data in coindata if data['market_cap_rank'] <= 60 ]
print("stocks: {}".format( stocks))
if not args.args.mode:
for astock in stocks:
try:
single(astock)
except Exception as e:
z.trace(e)
else:
mcdic = z.getp("latestmc")
mcs = list(mcdic.keys())
idx = 0
end = idx + 50
for astock in mcs[idx:end]:
single(astock)
table_print.initiate()
|
'''
Процедура - именованный блок кода, который работает, но НЕ возвращает результат
Функция - именованный блок кода, который работает, но возвращает результат
def имя_функции(параметр1, параметр2):
--->сделать_что_то
--->вернуть результат
'''
# определить функцию с именем add
def add(a, b): # добавить параметры а и b
res = a + b # сложить два числа
return res # вернуть получившееся значение
# вызвать функцию add и передать числа 4 и 5 и вывести результат
my_res = add(4, 5)
print(my_res)
|
from tkinter import *
from tkinter import messagebox
calculator = Tk()
calculator.title("CALCULATOR")
calculator.resizable(0, 1)#remove or change this in order to get different screen sizes
class Application(Frame):
def __init__(self, master, *args, **kwargs):
Frame.__init__(self, master, *args, **kwargs)
self.createWidgets()
def replaceText(self, text):
self.display.delete(0, END)
self.display.insert(0, text)
def appendToDisplay(self, text):
self.entryText = self.display.get()
self.textLength = len(self.entryText)
if self.entryText == "0":
self.replaceText(text)
else:
self.display.insert(self.textLength, text)
def calculateExpression(self):#python's calculate function
self.expression = self.display.get()
self.expression = self.expression.replace("%", "/ 100")
try:
self.result = eval(self.expression)
self.replaceText(self.result)
except:
messagebox.showinfo("ERROR", "Invalid input", icon="warning", parent=calculator)
def clearText(self):#clears imput on pressing C on Calculator
self.replaceText("0")
def createWidgets(self):
self.display = Entry(self, font=("Helvetica", 16), borderwidth=0, relief=RAISED, justify=RIGHT)
self.display.insert(0, "0")
self.display.grid(row=0, column=0, columnspan=5)
#This is the First Row
self.sevenButton = Button(self, font=("Helvetica", 11), text="7", borderwidth=0, command=lambda: self.appendToDisplay("7"))
self.sevenButton.grid(row=1, column=0, sticky="NWNESWSE")
self.eightButton = Button(self, font=("Helvetica", 11), text="8", borderwidth=0, command=lambda: self.appendToDisplay("8"))
self.eightButton.grid(row=1, column=1, sticky="NWNESWSE")
self.nineButton = Button(self, font=("Helvetica", 11), text="9", borderwidth=0, command=lambda: self.appendToDisplay("9"))
self.nineButton.grid(row=1, column=2, sticky="NWNESWSE")
self.timesButton = Button(self, font=("Helvetica", 11), text="*", borderwidth=0, command=lambda: self.appendToDisplay("*"))
self.timesButton.grid(row=1, column=3, sticky="NWNESWSE")
self.clearButton = Button(self, font=("Helvetica", 11), text="C", borderwidth=0, command=lambda: self.clearText())
self.clearButton.grid(row=1, column=4, sticky="NWNESWSE")
#This is the Second Row
self.fourButton = Button(self, font=("Helvetica", 11), text="4", borderwidth=0, command=lambda: self.appendToDisplay("4"))
self.fourButton.grid(row=2, column=0, sticky="NWNESWSE")
self.fiveButton = Button(self, font=("Helvetica", 11), text="5", borderwidth=0, command=lambda: self.appendToDisplay("5"))
self.fiveButton.grid(row=2, column=1, sticky="NWNESWSE")
self.sixButton = Button(self, font=("Helvetica", 11), text="6", borderwidth=0, command=lambda: self.appendToDisplay("6"))
self.sixButton.grid(row=2, column=2, sticky="NWNESWSE")
self.divideButton = Button(self, font=("Helvetica", 11), text="/", borderwidth=0, command=lambda: self.appendToDisplay("/"))
self.divideButton.grid(row=2, column=3, sticky="NWNESWSE")
self.percentageButton = Button(self, font=("Helvetica", 11), text="%", borderwidth=0, command=lambda: self.appendToDisplay("%"))
self.percentageButton.grid(row=2, column=4, sticky="NWNESWSE")
#This is the Third Row
self.oneButton = Button(self, font=("Helvetica", 11), text="1", borderwidth=0, command=lambda: self.appendToDisplay("1"))
self.oneButton.grid(row=3, column=0, sticky="NWNESWSE")
self.twoButton = Button(self, font=("Helvetica", 11), text="2", borderwidth=0, command=lambda: self.appendToDisplay("2"))
self.twoButton.grid(row=3, column=1, sticky="NWNESWSE")
self.threeButton = Button(self, font=("Helvetica", 11), text="3", borderwidth=0, command=lambda: self.appendToDisplay("3"))
self.threeButton.grid(row=3, column=2, sticky="NWNESWSE")
self.minusButton = Button(self, font=("Helvetica", 11), text="-", borderwidth=0, command=lambda: self.appendToDisplay("-"))
self.minusButton.grid(row=3, column=3, sticky="NWNESWSE")
self.equalsButton = Button(self, font=("Helvetica", 11), text="=", borderwidth=0, command=lambda: self.calculateExpression())
self.equalsButton.grid(row=3, column=4, sticky="NWNESWSE", rowspan=2)
#This is the Fourth Row
self.zeroButton = Button(self, font=("Helvetica", 11), text="0", borderwidth=0, command=lambda: self.appendToDisplay("0"))
self.zeroButton.grid(row=4, column=0, columnspan=2, sticky="NWNESWSE")
self.dotButton = Button(self, font=("Helvetica", 11), text=".", borderwidth=0, command=lambda: self.appendToDisplay("."))
self.dotButton.grid(row=4, column=2, sticky="NWNESWSE")
self.plusButton = Button(self, font=("Helvetica", 11), text="+", borderwidth=0, command=lambda: self.appendToDisplay("+"))
self.plusButton.grid(row=4, column=3, sticky="NWNESWSE")
app = Application(calculator).grid()
calculator.mainloop()
|
from sklearn.datasets import load_iris
iris = load_iris()
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import train_test_split
import matplotlib.pyplot as plt
# source - http://www.ritchieng.com/machine-learning-k-nearest-neighbors-knn/
# create X (features) and y (response)
X = iris.datasets
y = iris.target
# Logistic regression
# instantiate the model (using the default parameters)
logreg = LogisticRegression()
# fit the model with data
logreg.fit(X, y)
# predict the response values for the observations in X
logreg.predict(X)
# store the predicted response values
y_pred = logreg.predict(X)
# check how many predictions were generated
len(y_pred)
# compute classification accuracy for the logistic regression model
print(metrics.accuracy_score(y, y_pred))
# KNN Neigbors
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(X, y)
y_pred = knn.predict(X)
print(metrics.accuracy_score(y, y_pred))
# STEP 1: split X and y into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=4)
# print the shapes of the new X objects
print(X_train.shape)
print(X_test.shape)
# print the shapes of the new y objects
print(y_train.shape)
print(y_test.shape)
# STEP 2: train the model on the training set
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
# STEP 3: make predictions on the testing set
y_pred = logreg.predict(X_test)
# compare actual response values (y_test) with predicted response values (y_pred)
print(metrics.accuracy_score(y_test, y_pred))
# Repeat for KNN with K=5
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
print(metrics.accuracy_score(y_test, y_pred))
# Repeat for KNN with K=1
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
print(metrics.accuracy_score(y_test, y_pred))
# Can we locate an even better value for K?
# try K=1 through K=25 and record testing accuracy
k_range = range(1, 26)
# We can create Python dictionary using [] or dict()
scores = []
# We use a loop through the range 1 to 26
# We append the scores in the dictionary
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
scores.append(metrics.accuracy_score(y_test, y_pred))
print(scores)
# plot the relationship between K and testing accuracy
# plt.plot(x_axis, y_axis)
plt.plot(k_range, scores)
plt.xlabel('Value of K for KNN')
plt.ylabel('Testing Accuracy')
|
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import classification_report, confusion_matrix, precision_recall_fscore_support, accuracy_score
class Classification_svm:
"""docstring for Classification_svm."""
def __init__(self, url):
self.url = url
self.data = pd.read_csv(self.url)
self.X = self.data.drop('Class', axis=1) # menghilangkan kelas pada data
self.y = self.data['Class'] # menampung kolom Class ke variable y
def classificationSVM(self, bagiUji=5, kernel='rbf'):
# memisahkan data training dan data test yaitu data test 30% dari data asli
self.bagi = len(self.X)//bagiUji
bagi = self.bagi
self.X_test = self.X[-bagi:]
self.y_test = self.y[-bagi:]
self.X_train = self.X[:-bagi]
self.y_train = self.y[:-bagi]
svclassifierRbf = SVC(kernel=kernel)
svclassifierRbf.fit(self.X_train, self.y_train)
self.y_pred2 = svclassifierRbf.predict(self.X_test)
self.confusi = confusion_matrix(self.y_test, self.y_pred2)
self.accuracy = classification_report(self.y_test, self.y_pred2)
self.recall = precision_recall_fscore_support(self.y_test, self.y_pred2)
self.accuracyScore = accuracy_score(self.y_test, self.y_pred2)
# print(self.confusi)
return self.accuracyScore*100
def predictUji(self, urlTest, kernel='rbf'):
self.dataTest = pd.read_csv(urlTest)
svclassifierRbf = SVC(kernel=kernel)
svclassifierRbf.fit(self.X, self.y)
print(self.dataTest.values)
self.y_pred2 = svclassifierRbf.predict(self.dataTest)
return self.y_pred2
# svm = Classification_svm("dataSvm0.csv")
# svm.classificationSVM()
|
# -*- coding: utf-8 -*-
"""
ytelapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class Numbertype1Enum(object):
"""Implementation of the 'Numbertype1' enum.
Number type either SMS,Voice or all
Attributes:
ALL: TODO: type description here.
VOICE: TODO: type description here.
SMS: TODO: type description here.
"""
ALL = 'all'
VOICE = 'voice'
SMS = 'sms'
|
from sqlalchemy import nullsfirst, nullslast, desc
class NullOrderMixinView(object):
"""
A mixin view that will allow setting NULLS FIRST or NULLS LAST
"""
def _order_by(self, query, joins, sort_joins, sort_field, sort_desc):
"""
Apply order_by to the query
:param query:
Query
:pram joins:
Current joins
:param sort_joins:
Sort joins (properties or tables)
:param sort_field:
Sort field
:param sort_desc:
Select sort order:
* True: for descending order
* False or None: for ascending default order
* 'LAST': for NULLS LAST
* 'FIRST': for NULLS FIRST
"""
if sort_field is not None:
# Handle joins
query, joins, alias = self._apply_path_joins(query, joins, sort_joins, inner_join=False)
column = sort_field if alias is None else getattr(alias, sort_field.key)
if sort_desc is True:
if isinstance(column, tuple):
query = query.order_by(*map(desc, column))
else:
query = query.order_by(desc(column))
elif sort_desc is False:
if isinstance(column, tuple):
query = query.order_by(*column)
else:
query = query.order_by(column)
elif sort_desc == 'LAST':
query = query.order_by(nullslast(desc(column)))
elif sort_desc == 'FIRST':
query = query.order_by(nullsfirst(desc(column)))
return query, joins
|
"""
REF:[giampaolo/psutil](https://github.com/giampaolo/psutil)
[ArgumentParserの使い方を簡単にまとめた - Qiita](https://qiita.com/kzkadc/items/e4fc7bc9c003de1eb6d0)
[Pythonの文字列フォーマット(formatメソッドの使い方) | ガンマソフト株式会社](https://gammasoft.jp/blog/python-string-format/)
cpu[%] mem[%] (= [GB] / [GB])
------ -----------------------
***5.8 **10.0 (= *4.1 / 11.1)
**10.2
*100.0
cpu[%] mem[%] mem.used[GB] mem.total[GB]
------ -----------------------------------
***5.8 **10.0 ********4.11 ********11.12
hhmmss cpu[%] mem[%] mem.used[GB] mem.total[GB]
------ ------ -----------------------------------
hhmmss ***5.8 **10.0 ********4.11 ********11.12
cpu: ***.*, mem.percent: ***.*, mem.total: **.**, mem.used: **.**, mem.free: **.**
{ "cpu": x, "mem": { "percent": x, "total": x, "used": x, "free": x } }
"""
import psutil
import json
from datetime import datetime
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--json', action='store_true')
parser.add_argument('--cpu', action='store_true')
# parser.add_argument('--interactive', action='store_true', default=False)
parser.add_argument('--interval-seconds', type=float, default=2.0)
args = parser.parse_args()
vmem = psutil.virtual_memory()
info = {
"cpu": psutil.cpu_percent(),
"mem": {
"total": vmem.total,
"percent": vmem.percent,
"used": vmem.used,
"free": vmem.free,
}
}
print(psutil.cpu_percent())
print(vmem.used, 'Bits')
print(vmem.used / 1024, 'Bytes')
print(vmem.used / 1024 / 1024, 'MB')
print(vmem.used / 1024 / 1024 / 1024, 'GB')
print(vmem.total / 1024 / 1024 / 1024, 'GB')
[
{ "cpu[%]": 10.0, "mem[%]": 20.0, "mem used/total": "" }
]
from time import sleep
print(f' cpu mem')
print(f'------------')
while True:
vmem = psutil.virtual_memory()
# print(f'{psutil.cpu_percent(): >5.1f} {vmem.percent: >5.1f} ')
print(f'\r{psutil.cpu_percent():> 5.1f} {vmem.percent: >5.1f} ', end='')
sleep(2)
|
import sys
sys.path.append('../doubly_linked_list')
from doubly_linked_list import DoublyLinkedList, ListNode
class Stack:
def __init__(self):
self.size = 0
# Why is our DLL a good choice to store our elements?
# self.storage = ?
self.storage = DoublyLinkedList()
def push(self, value):
self.storage.add_to_tail(ListNode(value))
self.size += 1
def pop(self):
if (len(self.storage) > 0):
last_item = self.storage.remove_from_tail()
self.size -= 1
if last_item.value is not None:
return last_item.value
else:
return last_item
def len(self):
return self.size
def __len__(self):
return self.size
print(len(Stack())) |
from django.contrib import admin
from .models import ShortUrl
# Register your models here.
class ShortUrlAdmin(admin.ModelAdmin):
list_display = ['id', 'url', 'short_url']
class Meta:
model = ShortUrl
admin.site.register(ShortUrl,ShortUrlAdmin) |
from django.core.management.base import BaseCommand
from django.contrib.auth.models import Group,Permission,ContentType
from apps.news.models import News,NewsCategory,Banner,Comment
from apps.course.models import Course,CourseCategory,Teacher,CourseOrder
from apps.payinfo.models import Payinfo,PayinfoOrder
class Command(BaseCommand):
def handle(self, *args, **options):
#1.编辑组(管理新闻+课程+评论+轮播图)
edit_content_types=[
ContentType.objects.get_for_model(News),
ContentType.objects.get_for_model(NewsCategory),
ContentType.objects.get_for_model(Banner),
ContentType.objects.get_for_model(Comment),
ContentType.objects.get_for_model(Course),
ContentType.objects.get_for_model(CourseCategory),
ContentType.objects.get_for_model(Teacher),
ContentType.objects.get_for_model(Payinfo),
]
edit_permissions=Permission.objects.filter(content_type__in=edit_content_types)
editGroup=Group.objects.create(name='编辑')
editGroup.permissions.set(edit_permissions)
editGroup.save()
self.stdout.write(self.style.SUCCESS('编辑分组创建完成!'))
#2.财务组(课程订单/付费资讯订单)
finance_content_types=[
ContentType.objects.get_for_model(CourseOrder),
ContentType.objects.get_for_model(PayinfoOrder),
]
finance_permissions=Permission.objects.filter(content_type__in=finance_content_types)
financeGroup=Group.objects.create(name='财务')
financeGroup.permissions.set(finance_permissions)
financeGroup.save()
self.stdout.write(self.style.SUCCESS('财务分组创建完成!'))
#3.管理员组(编辑组+财务组)
admin_permissions=edit_permissions.union(finance_permissions)
adminGroup=Group.objects.create(name='管理员')
adminGroup.permissions.set(admin_permissions)
adminGroup.save()
self.stdout.write(self.style.SUCCESS('管理员分组创建完成!'))
#4.超级管理员通过字段is_supervisor创建 |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 5 20:01:41 2018
@author: Octavio Ordaz
"""
#Libreria para leer desde archivos csv
import pandas as pd
#Libreria para trabajar con documentos JSON
import json
#Libreria que ocupamos para graficar los resultados de las consultas
import matplotlib.pyplot as plt
#En la consola usar: %matplotlib auto para graficar en ventana nueva.
#Base de usuarios en el mes de enero
data = pd.read_csv('2018-01.csv')
#Convierte de csv a json
data_json = json.loads(data.to_json(orient='records'))
#Crea colleccion bicis e inserta datos json a mongo
db.bicis.insert_many(data_json)
#Hace conexion entre Python y Mongo
from pymongo import MongoClient as Connection
connection = Connection('localhost',27017)
#Crea la base de datos Ecobicis
db = connection.ecobicis
#Crea la colección bicis
coll = db.bicis
#Histograma de las edades de los usuarios
#Creamos una lista vacia para almacenar las edades de los usuarios
edades = []
#Selecciona cada una de las tuplas que la consulta devolvio
for doc in coll.find({},{"_id":0,"Edad_Usuario":1}):
#Del documento selecciona el valor que esta en el campo de "Edad_Usuario"
edades.append(doc["Edad_Usuario"])
#Crea una nueva ventana para graficar
plt.figure()
#Grafica un histograma para los valores de las edades en grupos de 10
plt.hist(edades,bins=10,range=(18,80))
#Titulo de la grafica
plt.title("Histograma de edades",size=15)
#Añade etiquetas a los ejes
plt.xlabel("Edad en años")
plt.ylabel("Cantidad de personas")
#Grafica de la cantidad de usuarios al día
#Crea listas vacias para almacenar las fechas y la cantidad de usuarios
fechaRetiro = []
cantidad = []
#Ciclo que representa los dias del mes de enero
for i in range(1,32):
#Generamos la fecha por buscar dentro de la consulta
if i < 10:
fecha = "0"+str(i)+"/01/2018"
else:
fecha = str(i)+"/01/2018"
#Guarda la fecha en la lista
fechaRetiro.append(fecha)
#Guarda en la lista la cuenta de los usuarios registrados en esa fecha
cantidad.append(coll.find({"Fecha_Retiro":fecha},{":id":0,"Bici":1}).count())
#Crea una nueva ventana para graficar
plt.figure()
#Hace una grafica de dispersión de la cantidad de usuarios por fecha
plt.plot(fechaRetiro,cantidad,'bo',fechaRetiro,cantidad,'b')
#Titulo de la grafica
plt.title("Cantidad de usuarios al día",size=15)
#Añade etiquetas a los ejes, pone las etiquetas del eje x rotadas verticalmente
plt.xlabel("Fecha")
plt.xticks(rotation=90)
plt.ylabel("Cantidad de usuarios")
#Activa las lineas de la grafica
plt.grid(True) |
easy_test = 'aabcdefgaa'
hard_test = 'ieodomkazucvgmuy'
found = False
positions = []
for i, c in enumerate(list(hard_test)):
if i < len(hard_test)-1:
if len(positions) > 0:
for p in positions:
if [c, hard_test[i + 1]] == p[0] and i != p[1][1]:
found = True
positions.append([[c, hard_test[i + 1]], [i, i+1]])
if found:
print('Passed hard test')
# [['q','j'], [0, 1]]
|
import discord
from discord.ext import commands
class MyClient(discord.Client):
async def on_ready(self):
print('Logged on as', self.user)
async def on_message(self, message):
# don't respond to ourselves
if message.author == self.user:
return
if message.content == 'คนสร้าง':
await message.channel.send('Im Just Dog#6918')
if message.content == 'coding':
await message.channel.send('Im Just Dog#6918')
client = MyClient()
client.run('') # Token of bot #
client.run(bottoken)
|
import collections, math
class StupidBackoffLanguageModel:
def __init__(self, corpus):
"""Initialize your data structures in the constructor."""
self.unigramCounts = collections.defaultdict(lambda: 1)
self.bigramCounts = collections.defaultdict(lambda: 0)
self.unigramTotal = 0
self.bigramTotal = 0
self.train(corpus)
def train(self, corpus):
""" Takes a corpus and trains your language model.
Compute any counts or other corpus statistics in this function.
"""
for sentence in corpus.corpus:
cleanSentence = sentence.cleanSentence()
for datum in cleanSentence.data:
token = datum.word
self.unigramCounts[token] = self.unigramCounts[token] + 1
self.unigramTotal += 1
i = 1
while i < len(sentence.data):
bigram = str(cleanSentence.get(i-1)) + " " + str(cleanSentence.get(i))
self.bigramCounts[bigram] = self.bigramCounts[bigram] + 1
i += 1
def score(self, sentence):
""" Takes a list of strings as argument and returns the log-probability of the
sentence using your language model. Use whatever data you computed in train() here.
"""
score = 0
i = 1
coeff = math.log(0.4)
while i < len(sentence):
bigram = str(sentence[i-1]) + " " + str(sentence[i])
unigram = str(sentence[i])
if self.bigramCounts.has_key(bigram):
score += math.log(self.bigramCounts[bigram])
score -= math.log(self.unigramCounts[sentence[i-1]])
else:
score += coeff + math.log(self.unigramCounts[unigram])
score -= math.log(self.unigramTotal)
i += 1
return score
|
import numpy as np
import matplotlib.pyplot as plt
def f(x):
return x**2
x = np.arange(0,10,0.01)
y = f(x)
print(y)
plt.plot(x,y)
plt.show()
|
# Generated by Django 2.2.11 on 2020-03-16 03:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orgchart', '0015_detail_subordinates_url'),
]
operations = [
migrations.RemoveField(
model_name='detail',
name='subordinates_url',
),
migrations.AddField(
model_name='detail',
name='department_url',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='detail',
name='location_url',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='detail',
name='manager_url',
field=models.TextField(null=True),
),
]
|
my_list = [1, 3, 2, 5, 14, 6, 2, 3, 5, 6, 14, 9, 1]
new_list = list(set(my_list))
print(new_list)
|
import numpy as np
import matplotlib.pyplot as plt
import os
import shutil
# Generate the figures in the same folder
os.chdir(os.path.dirname(__file__))
rng = np.random.RandomState(42)
# 2D parameter space:
n_steps = 200
w1 = np.linspace(-2.5, 2.5, n_steps)
w2 = np.linspace(-2.5, 2.5, n_steps)
w1, w2 = np.meshgrid(w1, w2)
def relu(x):
return np.maximum(x, 0)
def mini_mlp(x, w1, w2):
return w2 * relu(w1 * x)
# 30 points dataset
n = 30
x = np.abs(rng.randn(n) + 1)
y = 2 * x + 0.5 * rng.randn(n) # f(x) = 2x + noise
loss = 0.
for x_i, y_i in zip(x, y):
loss += (y_i - mini_mlp(w1, w2, x_i)) ** 2
loss /= len(x)
# Plot output surface
fig = plt.figure(figsize=(8, 8))
plt.pcolormesh(w1, w2, loss, cmap=plt.cm.afmhot_r)
plt.contour(w1, w2, loss, 40, colors='k', alpha=0.3)
plt.xlabel('$w_1$')
plt.ylabel('$w_2$')
plt.title('Loss function of a ReLU net with 2 params')
fig.savefig("full_data_mlp_loss_landscape.png", dpi=80)
# SGD loss
vmin = 0
vmax = loss.max() * 1.5
folder = "tmp_loss_frames"
shutil.rmtree(folder, ignore_errors=True)
os.makedirs(folder)
for i in range(len(x)):
loss_i = (y[i] - mini_mlp(w1, w2, x[i])) ** 2
fig = plt.figure(figsize=(8, 8))
cmesh = plt.pcolormesh(w1, w2, loss_i, vmin=vmin, vmax=vmax,
cmap=plt.cm.afmhot_r)
contour = plt.contour(w1, w2, loss_i, 40, colors='k', alpha=0.3)
plt.text(-2, 1, "x = %0.2f ; y = %0.2f" % (x[i], y[i]))
plt.xlabel('$w_1$')
plt.ylabel('$w_2$')
plt.title('Loss function of a ReLU net with 2 params')
filename = '%s/loss_%03d.png' % (folder, i)
print('saving %s...' % filename)
fig.savefig(filename, dpi=80)
cmd = ("convert -resize 640x640 -delay 100 -loop 0 %s/*.png"
" sgd_mlp_loss_landscape.gif" % folder)
print(cmd)
os.system(cmd)
shutil.rmtree(folder, ignore_errors=True)
|
import cv2
import tensorflow.keras
import numpy as np
from kakao import beepsound, send_music_link, send_question_text
## 이미지 전처리
def preprocessing(frame):
# 사이즈 조정
size = (224, 224)
frame_resized = cv2.resize(frame, size, interpolation=cv2.INTER_AREA)
# 이미지 정규화
frame_normalized = (frame_resized.astype(np.float32) / 127.0) - 1
# 이미지 차원 재조정 - 예측을 위해 reshape 해줍니다.
frame_reshaped = frame_normalized.reshape((1, 224, 224, 3))
return frame_reshaped
## 학습된 모델 불러오기
model_filename = 'keras_model.h5'
model = tensorflow.keras.models.load_model(model_filename)
# 카메라 캡쳐 객체, 0=내장 카메라
capture = cv2.VideoCapture(0)
# 캡쳐 프레임 사이즈 조절
capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
sleep_cnt = 1 # 30초간 "졸림" 상태를 확인하기 위한 변수
while True:
ret, frame = capture.read()
if ret == True:
print("read success!")
# 이미지 뒤집기
frame_fliped = cv2.flip(frame, 1)
# 이미지 출력
cv2.imshow("VideoFrame", frame_fliped)
# 1초마다 검사하며, videoframe 창으로 아무 키나 누르게 되면 종료
if cv2.waitKey(200) > 0:
break
# 데이터 전처리
preprocessed = preprocessing(frame_fliped)
# 예측
prediction = model.predict(preprocessed)
print(np.round(prediction, 3))
if prediction[0, 1] >= np.mean(prediction[0]):
print('졸림 상태')
sleep_cnt += 1
# 졸린 상태가 30초간 지속되면 소리 & 카카오톡 보내기
if sleep_cnt % 30 == 0:
sleep_cnt = 1
print('30초간 졸고 있네요!!!')
beepsound()
send_music_link()
break
elif prediction[0, 0] >= np.mean(prediction[0]):
print('깨어있는 상태')
sleep_cnt = 1
elif prediction[0, 2] >= np.mean(prediction[0]):
print('질문이 있어요!')
beepsound()
send_question_text()
break
# 카메라 객체 반환
capture.release()
# 화면에 나타난 윈도우들을 종료
cv2.destroyAllWindows() |
"""Treadmill module launcher.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import importlib
if __name__ == '__main__':
init_hook = os.environ.get('TREADMILL_INIT_HOOK')
if init_hook:
for module_name in init_hook.split(':'):
importlib.import_module(module_name)
from . import console
# pylint complains "No value passed for parameter ... in function call".
# This is ok, as these parameters come from click decorators.
console.run() # pylint: disable=no-value-for-parameter
|
import sys
import clean_taffy
import make_pool_v2
import preprocess_data
import train_recommend_v2
import glob
directory = "/var/www/html/users/"+str(sys.argv[1])
province = sys.argv[2]
root_dir = "/var/www/html/"
clean_taffy.clean_taffy_liste(root_dir, directory, province)
make_pool_v2.make_pool_data(root_dir, directory, province)
preprocess_data.preprocess_all_data(root_dir, directory)
train_recommend_v2.train_and_recommend(root_dir, directory, province)
|
# Tuples #
# Tuples are immutable sequences.
tpl = (1,2,3)
print(tpl[1])
tpl = ('a', True, 123)
print(tpl)
# tpl[0] ='New'#It gives error because of tuples not changable
# Sets #
# Sets are unordered collections of unique elements.
x = set()
x.add(1)
x.add(2)
x.add(4)
x.add(0.1)
x.add(4)
print(x)
convert = set([1,1,1,1,2,2,2,2,3,3,4,4,5,5,5,5,5,5])
print(convert)
# Booleans #
# Booleans are False and True
True
False |
import numpy as np
import cv2 as cv
import copy
from cameraParameter import CameraParameter
from algo import get_padding_transform
class FrameInterface:
def __init__(self):
self.m_img = None
self.m_kp = None
self.m_desc = None
self.m_R = None
self.m_T = None
self.m_M = None
def get_M(self):
'''
获取完整映射矩阵
'''
# return np.dot(self.m_R, self.m_T)
return self.m_M
def set_M(self, M):
self.m_M = M
self.m_R = copy.copy(M)
self.m_R[0, 2] = 0
self.m_R[1, 2] = 0
self.m_T = np.eye(3)
self.m_T[0, 2] = M[0, 2]
self.m_T[1, 2] = M[1, 2]
def get_R(self):
'''
获取旋转矩阵
'''
return self.m_R
def set_R(self, R):
'''
更新旋转矩阵
'''
self.m_R = R
def get_T(self):
'''
获取平移矩阵
'''
return self.m_T
def set_T(self, T):
'''
设置平移矩阵
'''
self.m_T = T
def get_image(self):
'''
获取图像本身
'''
return self.m_img
def get_rotated_image(self):
'''
根据R矩阵得到的图像
'''
raise NotImplemented
def set_image(self, img):
self.m_img = img
def get_kp(self):
return self.m_kp
def get_kp_description(self):
return self.m_desc
def merge(self, frame):
pass
def clear_cache(self):
'''
清除缓存(即为了加速自动添加到class上的内容)
'''
pass
def decomposeM(self, M):
r = copy.copy(M)
t = np.eye(3, dtype=np.float32)
r[0, 2] = 0
r[1, 2] = 0
t[0, 2] = M[0, 2]
t[1, 2] = M[1, 2]
return r, t
class Frame(FrameInterface):
def __init__(self, image=np.array((1, 1, 3), dtype=np.uint8), camPara=CameraParameter()):
FrameInterface.__init__(self)
self.m_R = camPara.outer[:3, :3]
self.m_T = camPara.outer[:3, 3]
self.m_M = camPara.outer[0:3, 0:3]
self.m_img = image
self.m_kp = None
self.m_desc = None
def set_image(self, img):
'''
设置图像
'''
self.m_desc = None
self.m_kp = None
# self.m_img = img
FrameInterface.set_image(self, img)
def __get_detector(self):
return cv.xfeatures2d.SURF_create(5)
def clear_cache(self):
self.m_kp = None
self.m_desc = None
super().clear_cache()
def _get_kp_and_desc(self):
detector = self.__get_detector()
img = self.m_img
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
_, mask = cv.threshold(gray, 1, 255, cv.THRESH_BINARY)
kp, desc = detector.detectAndCompute(gray, mask)
# write cache
self.m_kp = kp
self.m_desc = desc
return kp, desc
def get_kp(self):
if self.m_desc is None:
self._get_kp_and_desc()
return FrameInterface.get_kp(self)
def get_kp_description(self):
if self.m_desc is None:
self._get_kp_and_desc()
return FrameInterface.get_kp_description(self)
class GpuFrame(Frame):
def __init__(self, img, camPara=CameraParameter()):
FrameInterface.__init__(self)
super().set_M(camPara.outer[0:3, 0:3])
self.m_kp = None
self.m_desc = None
self.m_img = img
self.m_gpu_desc = None
self.m_gpu_img = None
def _get_kp_and_desc(self):
detector = self.__get_detector()
img = self.get_gpu_img()
gray = cv.cuda.cvtColor(img, cv.COLOR_BGR2GRAY)
_, mask = cv.cuda.threshold(gray, 1, 255, cv.THRESH_BINARY)
kp, desc = detector.detectWithDescriptors(gray, mask)
# write cache
self.m_kp = detector.downloadKeypoints(kp)
self.m_gpu_desc = desc
return kp, desc
def __get_detector(self):
return cv.cuda.SURF_CUDA_create(5)
def get_gpu_img(self):
if self.m_gpu_img is None:
a = cv.cuda_GpuMat()
a.upload(self.m_img)
self.m_gpu_img = a
return self.m_gpu_img
def clear_cache(self):
self.m_gpu_desc = None
self.m_gpu_img = None
return super().clear_cache()
def get_kp_description(self):
if self.m_gpu_desc is None or self.m_desc is None:
self._get_kp_and_desc()
if self.m_desc is None:
self.m_desc = self.m_gpu_desc.download()
return self.m_desc
def get_gpu_kp_description(self):
if self.m_gpu_desc is None:
self._get_kp_and_desc()
return self.m_gpu_desc
|
# don't need -u everytime we push changes, only the first time it happens in a repo
# friendlistCleanser -- removes everyone from a League of Legends account's friendlist
import pyautogui, time
pyautogui.FAILSAFE = True
# click coordinates
FRIEND = (1517, 269)
UNFRIEND = (1560, 500)
CONFIRM = (900, 600 )
print('Client window in focus, please.')
time.sleep(2)
numberOfFriends = 241
# future addition?: how fast is your client (very slow, slow, average, fast)
for friend in range(numberOfFriends):
pyautogui.moveTo(FRIEND, duration=0.1)
pyautogui.rightClick()
pyautogui.moveTo(UNFRIEND, duration=0.1)
pyautogui.leftClick()
pyautogui.moveTo(CONFIRM, duration=0.1)
pyautogui.leftClick()
time.sleep(1.5) # accounts for the speed of the impeccible LoL client made by Röjet Germs
print('Done.') |
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import itertools
import os.path
from dataclasses import dataclass
from typing import Iterable
from pants.backend.javascript import package_json
from pants.backend.javascript.package_json import (
FirstPartyNodePackageTargets,
NodePackageDependenciesField,
NodePackageNameField,
OwningNodePackage,
OwningNodePackageRequest,
PackageJsonEntryPoints,
PackageJsonImports,
PackageJsonSourceField,
)
from pants.backend.javascript.subsystems.nodejs_infer import NodeJSInfer
from pants.backend.javascript.target_types import JSDependenciesField, JSSourceField
from pants.build_graph.address import Address
from pants.engine.addresses import Addresses
from pants.engine.internals.graph import Owners, OwnersRequest
from pants.engine.internals.native_dep_inference import NativeParsedJavascriptDependencies
from pants.engine.internals.native_engine import InferenceMetadata, NativeDependenciesRequest
from pants.engine.internals.selectors import Get
from pants.engine.rules import Rule, collect_rules, rule
from pants.engine.target import (
FieldSet,
HydratedSources,
HydrateSourcesRequest,
InferDependenciesRequest,
InferredDependencies,
Targets,
)
from pants.engine.unions import UnionRule
from pants.util.frozendict import FrozenDict
from pants.util.ordered_set import FrozenOrderedSet
@dataclass(frozen=True)
class NodePackageInferenceFieldSet(FieldSet):
required_fields = (PackageJsonSourceField, NodePackageDependenciesField)
source: PackageJsonSourceField
dependencies: NodePackageDependenciesField
class InferNodePackageDependenciesRequest(InferDependenciesRequest):
infer_from = NodePackageInferenceFieldSet
@dataclass(frozen=True)
class JSSourceInferenceFieldSet(FieldSet):
required_fields = (JSSourceField, JSDependenciesField)
source: JSSourceField
dependencies: JSDependenciesField
class InferJSDependenciesRequest(InferDependenciesRequest):
infer_from = JSSourceInferenceFieldSet
@rule
async def infer_node_package_dependencies(
request: InferNodePackageDependenciesRequest,
nodejs_infer: NodeJSInfer,
) -> InferredDependencies:
if not nodejs_infer.package_json_entry_points:
return InferredDependencies(())
entry_points = await Get(
PackageJsonEntryPoints, PackageJsonSourceField, request.field_set.source
)
candidate_js_files = await Get(Owners, OwnersRequest(tuple(entry_points.globs_from_root())))
js_targets = await Get(Targets, Addresses(candidate_js_files))
return InferredDependencies(tgt.address for tgt in js_targets if tgt.has_field(JSSourceField))
class NodePackageCandidateMap(FrozenDict[str, Address]):
pass
@dataclass(frozen=True)
class RequestNodePackagesCandidateMap:
address: Address
@rule
async def map_candidate_node_packages(
req: RequestNodePackagesCandidateMap, first_party: FirstPartyNodePackageTargets
) -> NodePackageCandidateMap:
owning_pkg = await Get(OwningNodePackage, OwningNodePackageRequest(req.address))
candidate_tgts = itertools.chain(
first_party, owning_pkg.third_party if owning_pkg != OwningNodePackage.no_owner() else ()
)
return NodePackageCandidateMap(
(tgt[NodePackageNameField].value, tgt.address) for tgt in candidate_tgts
)
@rule
async def prepare_inference_metadata(imports: PackageJsonImports) -> InferenceMetadata:
return InferenceMetadata.javascript(
imports.root_dir,
{pattern: list(replacements) for pattern, replacements in imports.imports.items()},
)
async def _prepare_inference_metadata(address: Address) -> InferenceMetadata:
owning_pkg = await Get(OwningNodePackage, OwningNodePackageRequest(address))
if not owning_pkg.target:
return InferenceMetadata.javascript(address.spec_path, {})
return await Get(
InferenceMetadata, PackageJsonSourceField, owning_pkg.target[PackageJsonSourceField]
)
@rule
async def infer_js_source_dependencies(
request: InferJSDependenciesRequest,
nodejs_infer: NodeJSInfer,
) -> InferredDependencies:
source: JSSourceField = request.field_set.source
if not nodejs_infer.imports:
return InferredDependencies(())
sources = await Get(
HydratedSources, HydrateSourcesRequest(source, for_sources_types=[JSSourceField])
)
metadata = await _prepare_inference_metadata(request.field_set.address)
import_strings = await Get(
NativeParsedJavascriptDependencies,
NativeDependenciesRequest(sources.snapshot.digest, metadata),
)
owners = await Get(Owners, OwnersRequest(tuple(import_strings.file_imports)))
owning_targets = await Get(Targets, Addresses(owners))
non_path_string_bases = FrozenOrderedSet(
non_path_string.partition(os.path.sep)[0]
for non_path_string in import_strings.package_imports
)
candidate_pkgs = await Get(
NodePackageCandidateMap, RequestNodePackagesCandidateMap(request.field_set.address)
)
pkg_addresses = (
candidate_pkgs[pkg_name] for pkg_name in non_path_string_bases if pkg_name in candidate_pkgs
)
return InferredDependencies(
itertools.chain(
pkg_addresses,
(tgt.address for tgt in owning_targets if tgt.has_field(JSSourceField)),
)
)
def rules() -> Iterable[Rule | UnionRule]:
return [
*collect_rules(),
*package_json.rules(),
UnionRule(InferDependenciesRequest, InferNodePackageDependenciesRequest),
UnionRule(InferDependenciesRequest, InferJSDependenciesRequest),
]
|
#JSON(javascript object notation) realtime server_to_browser communication
#Loading json in python
import json
with open('E:\csvdhf5xlsxurlallfiles/snakes.json', 'r') as json_file:
json_data = json.load(json_file)
print(type(json_data))
for key, value in json_data.items():
print(str(key)+':'+str(value))
#connecting to an API in python
import requests
url = 'http://www.omdbapi.com/?apikey=ff21610b&t=social+network'
r = requests.get(url)
json_data = r.json()
for key, value in json_data.items():
print(str(key)+':'+str(value))
url1 = 'http://www.omdbapi.com/?apikey=ff21610b&t=social+network'
r = requests.get(url1)
json_data = r.json()
for k in json_data.keys():
print(k+':', json_data[k])
url2='http://en.wikipedia.org/w/api.php?action=query&prop=extracts&format=json&enintro=&titles=pizza'
r = requests.get(url2)
json_data = r.json()
pizza_exract = json_data['query']
print(pizza_exract)
import tweepy, json
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 28 22:53:35 2015
@author: lenovo
"""
"""
题目内容:
如果列出10以内自然数中3或5的倍数,则包括3,5,6,9。
那么这些数字的和为23。
要求计算得出任意正整数n以内中3或5的倍数的自然数之和。
"""
#a = int(raw_input())
#sum = 0
#for i in range(0,a):
# if i % 3 == 0:
# sum += i
# elif i % 3 == 0 and i % 5 ==0:
# sum += i
# elif i % 5 == 0:
# sum += i
#print sum
"""
题目内容:
10以内的素数2,3,5,7的和为17。
要求计算得出任意正整数n以内的所有素数的和。
"""
#x = int(raw_input())
#sum = 0
#j = 2
#while j < x:
# for i in range(2,j):
# if j % i == 0:
# break
# else:
# sum += j
# j += 1
#print sum
"""
题目内容:
根据下列信息计算在1901年1月1日至2000年12月31日间
共有多少个星期天落在每月的第一天上?
a) 1900.1.1是星期一
b) 1月,3月,5月,7月,8月,10月和12月是31天
c) 4月,6月,9月和11月是30天
d) 2月是28天,在闰年是29天
e) 公元年数能被4整除且又不能被100整除是闰年
f) 能直接被400整除也是闰年
"""
#方法一
#day = 0
#con = 0
#for i in range(1901,2001):
# for j in range(1,13):
# if j == 1 or j == 3 or j == 5 or j == 7 or j == 8 or j == 10 or j == 12:
# day += 31
# elif j == 2:
# if (i % 4 == 0 and i % 100 != 0) or i % 400 == 0:
# day += 29
# else:
# day += 28
# else:
# day += 30
# if day % 7 == 5:
# con += 1
#print con
#
##方法二
#sundays = 0
#pastdays = 0
#
#for i in range(1900,2001):
# if (i % 4 == 0 and i % 100 !=0) or i % 400 == 0:
# for j in (31,29,31,30,31,30,31,31,30,31,30,31):
# pastdays = pastdays + j
# if (pastdays+1) % 7 == 0:
# sundays += 1
#
# else:
# for k in (31,28,31,30,31,30,31,31,30,31,30,31):
# pastdays = pastdays + k
# if (pastdays+1) % 7 == 0:
# sundays += 1
#print sundays - 2
"""
题目内容:
数字197可以被称为循环素数,
因为197的三个数位循环移位后的数字:197,971,719均为素数。
100以内这样的数字包括13个,
2,3,5,7,11,13,17,31,37,71,73,79,97。
要求任意正整数n以内一共有多少个这样的循环素数。
"""
import math
n = int(raw_input())
count = 0
"""判断素数"""
for i in range(2,n):
for j in range(2,int(math.sqrt(i)+1)):
if i%j == 0:
break
else:
prime = True
temp = i
digit = len(str(i))
"""
判断循环
"""
for k in range(1, digit):
temp = (temp % 10) * 10 ** (digit-1) + temp / 10
for m in range(2,int(math.sqrt(temp)+1)):
if temp % m == 0:
prime = False
break
if prime == False:
break
"""当循环非正常结束的时候,才执行if"""
else:
count += 1
print count |
#Test
import pyaudio
import numpy as np
from numpy import zeros,linspace,short,fromstring,hstack,transpose,log, ndarray
from scipy import fft
from time import sleep
import time
import piplates.DAQC2plate as DAQC2
#import openpyxl
#from openpyxl import Workbook
import struct
import scipy.fftpack
#import matplotlib.pyplot as plt
import piplates.RELAYplate as RELAY
global button_pressed
button_pressed=0
global which1
whcih1=[]
global row_index
row_index=4
# Set up audio sampler -
NUM_SAMPLES = 6144#12288 #24576 #previously 6144
SAMPLING_RATE = 48000
pa = pyaudio.PyAudio()
_stream = pa.open(format=pyaudio.paInt16,
channels=1, rate=SAMPLING_RATE,
input=True,
frames_per_buffer=NUM_SAMPLES)
print("Frequency detector working. Press CTRL-C to quit.")
def get_audio(): #Initiates audio stream and stores top 3 frequencies to top[]
for i in range (0,4):
while _stream.get_read_available()< NUM_SAMPLES: pass # sleep(0.005)
audio_data = fromstring(_stream.read(
_stream.get_read_available(),exception_on_overflow=False), dtype=short)[-NUM_SAMPLES:]
data = np.array(audio_data)
w = np.fft.fft(data)
freqs = np.fft.fftfreq(len(w))
#print(freqs.min(), freqs.max())
# (-0.5, 0.499975)
# Find the peak in the coefficients
idx = np.argmax(np.abs(w))
freq = freqs[idx]
freq_in_hertz = abs(freq * SAMPLING_RATE)
print(int(freq_in_hertz))
return
def button():
if DAQC2.getDINbit(0,0) == 0:
global button_pressed
button_pressed = 1
DAQC2.setDOUTbit(0,0)
sleep(0.02)
DAQC2.clrDOUTbit(0,0)
sleep(0.015)
relay()
get_audio()
button_pressed = 0
return
def relay():
global button_pressed
if button_pressed ==1:
RELAY.getID(1)
'Pi-Plate RELAY'
RELAY.relayON(1,1)
sleep(0.02)
RELAY.relayOFF(1,1)
return()
run = None
while not run:
button()
matched_freq=[0,0,0,0,0,0,0,0,0,0]
|
"""
Models for User Information (students, staff, etc)
Migration Notes
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the edx-platform dir
2. ./manage.py lms schemamigration student --auto description_of_your_change
3. Add the migration file created in edx-platform/common/djangoapps/student/migrations/
"""
from datetime import datetime
from random import randint
import hashlib
import json
import logging
import uuid
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.signals import user_logged_in, user_logged_out
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.forms import ModelForm, forms
import comment_client as cc
from pytz import UTC
log = logging.getLogger(__name__)
AUDIT_LOG = logging.getLogger("audit")
class UserProfile(models.Model):
"""This is where we store all the user demographic fields. We have a
separate table for this rather than extending the built-in Django auth_user.
Notes:
* Some fields are legacy ones from the first run of 6.002, from which
we imported many users.
* Fields like name and address are intentionally open ended, to account
for international variations. An unfortunate side-effect is that we
cannot efficiently sort on last names for instance.
Replication:
* Only the Portal servers should ever modify this information.
* All fields are replicated into relevant Course databases
Some of the fields are legacy ones that were captured during the initial
MITx fall prototype.
"""
class Meta:
db_table = "auth_userprofile"
# CRITICAL TODO/SECURITY
# Sanitize all fields.
# This is not visible to other users, but could introduce holes later
user = models.OneToOneField(User, unique=True, db_index=True, related_name='profile')
name = models.CharField(blank=True, max_length=255, db_index=True)
meta = models.TextField(blank=True) # JSON dictionary for future expansion
courseware = models.CharField(blank=True, max_length=255, default='course.xml')
# Location is no longer used, but is held here for backwards compatibility
# for users imported from our first class.
language = models.CharField(blank=True, max_length=255, db_index=True)
location = models.CharField(blank=True, max_length=255, db_index=True)
# Optional demographic data we started capturing from Fall 2012
this_year = datetime.now(UTC).year
VALID_YEARS = range(this_year, this_year - 120, -1)
year_of_birth = models.IntegerField(blank=True, null=True, db_index=True)
GENDER_CHOICES = (('m', 'Male'), ('f', 'Female'), ('o', 'Other'))
gender = models.CharField(
blank=True, null=True, max_length=6, db_index=True, choices=GENDER_CHOICES
)
# [03/21/2013] removed these, but leaving comment since there'll still be
# p_se and p_oth in the existing data in db.
# ('p_se', 'Doctorate in science or engineering'),
# ('p_oth', 'Doctorate in another field'),
LEVEL_OF_EDUCATION_CHOICES = (
('p', 'Doctorate'),
('m', "Master's or professional degree"),
('b', "Bachelor's degree"),
('a', "Associate's degree"),
('hs', "Secondary/high school"),
('jhs', "Junior secondary/junior high/middle school"),
('el', "Elementary/primary school"),
('none', "None"),
('other', "Other")
)
level_of_education = models.CharField(
blank=True, null=True, max_length=6, db_index=True,
choices=LEVEL_OF_EDUCATION_CHOICES
)
mailing_address = models.TextField(blank=True, null=True)
goals = models.TextField(blank=True, null=True)
allow_certificate = models.BooleanField(default=1)
def get_meta(self):
js_str = self.meta
if not js_str:
js_str = dict()
else:
js_str = json.loads(self.meta)
return js_str
def set_meta(self, js):
self.meta = json.dumps(js)
TEST_CENTER_STATUS_ACCEPTED = "Accepted"
TEST_CENTER_STATUS_ERROR = "Error"
class TestCenterUser(models.Model):
"""This is our representation of the User for in-person testing, and
specifically for Pearson at this point. A few things to note:
* Pearson only supports Latin-1, so we have to make sure that the data we
capture here will work with that encoding.
* While we have a lot of this demographic data in UserProfile, it's much
more free-structured there. We'll try to pre-pop the form with data from
UserProfile, but we'll need to have a step where people who are signing
up re-enter their demographic data into the fields we specify.
* Users are only created here if they register to take an exam in person.
The field names and lengths are modeled on the conventions and constraints
of Pearson's data import system, including oddities such as suffix having
a limit of 255 while last_name only gets 50.
Also storing here the confirmation information received from Pearson (if any)
as to the success or failure of the upload. (VCDC file)
"""
# Our own record keeping...
user = models.ForeignKey(User, unique=True, default=None)
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
updated_at = models.DateTimeField(auto_now=True, db_index=True)
# user_updated_at happens only when the user makes a change to their data,
# and is something Pearson needs to know to manage updates. Unlike
# updated_at, this will not get incremented when we do a batch data import.
user_updated_at = models.DateTimeField(db_index=True)
# Unique ID we assign our user for the Test Center.
client_candidate_id = models.CharField(unique=True, max_length=50, db_index=True)
# Name
first_name = models.CharField(max_length=30, db_index=True)
last_name = models.CharField(max_length=50, db_index=True)
middle_name = models.CharField(max_length=30, blank=True)
suffix = models.CharField(max_length=255, blank=True)
salutation = models.CharField(max_length=50, blank=True)
# Address
address_1 = models.CharField(max_length=40)
address_2 = models.CharField(max_length=40, blank=True)
address_3 = models.CharField(max_length=40, blank=True)
city = models.CharField(max_length=32, db_index=True)
# state example: HI -- they have an acceptable list that we'll just plug in
# state is required if you're in the US or Canada, but otherwise not.
state = models.CharField(max_length=20, blank=True, db_index=True)
# postal_code required if you're in the US or Canada
postal_code = models.CharField(max_length=16, blank=True, db_index=True)
# country is a ISO 3166-1 alpha-3 country code (e.g. "USA", "CAN", "MNG")
country = models.CharField(max_length=3, db_index=True)
# Phone
phone = models.CharField(max_length=35)
extension = models.CharField(max_length=8, blank=True, db_index=True)
phone_country_code = models.CharField(max_length=3, db_index=True)
fax = models.CharField(max_length=35, blank=True)
# fax_country_code required *if* fax is present.
fax_country_code = models.CharField(max_length=3, blank=True)
# Company
company_name = models.CharField(max_length=50, blank=True, db_index=True)
# time at which edX sent the registration to the test center
uploaded_at = models.DateTimeField(null=True, blank=True, db_index=True)
# confirmation back from the test center, as well as timestamps
# on when they processed the request, and when we received
# confirmation back.
processed_at = models.DateTimeField(null=True, db_index=True)
upload_status = models.CharField(max_length=20, blank=True, db_index=True) # 'Error' or 'Accepted'
upload_error_message = models.CharField(max_length=512, blank=True)
# Unique ID given to us for this User by the Testing Center. It's null when
# we first create the User entry, and may be assigned by Pearson later.
# (However, it may never be set if we are always initiating such candidate creation.)
# (However, it may never be set if we are always initiating such candidate creation.)
candidate_id = models.IntegerField(null=True, db_index=True)
confirmed_at = models.DateTimeField(null=True, db_index=True)
@property
def needs_uploading(self):
return self.uploaded_at is None or self.uploaded_at < self.user_updated_at
@staticmethod
def user_provided_fields():
return ['first_name', 'middle_name', 'last_name', 'suffix', 'salutation',
'address_1', 'address_2', 'address_3', 'city', 'state', 'postal_code', 'country',
'phone', 'extension', 'phone_country_code', 'fax', 'fax_country_code', 'company_name']
@property
def email(self):
return self.user.email
def needs_update(self, fields):
for fieldname in TestCenterUser.user_provided_fields():
if fieldname in fields and getattr(self, fieldname) != fields[fieldname]:
return True
return False
@staticmethod
def _generate_edx_id(prefix):
NUM_DIGITS = 12
return u"{}{:012}".format(prefix, randint(1, 10 ** NUM_DIGITS - 1))
@staticmethod
def _generate_candidate_id():
return TestCenterUser._generate_edx_id("edX")
@classmethod
def create(cls, user):
testcenter_user = cls(user=user)
# testcenter_user.candidate_id remains unset
# assign an ID of our own:
cand_id = cls._generate_candidate_id()
while TestCenterUser.objects.filter(client_candidate_id=cand_id).exists():
cand_id = cls._generate_candidate_id()
testcenter_user.client_candidate_id = cand_id
return testcenter_user
@property
def is_accepted(self):
return self.upload_status == TEST_CENTER_STATUS_ACCEPTED
@staticmethod
def _generate_edx_id(prefix):
NUM_DIGITS = 12
return u"{}{:012}".format(prefix, randint(1, 10 ** NUM_DIGITS - 1))
@staticmethod
def _generate_candidate_id():
return TestCenterUser._generate_edx_id("edX")
@classmethod
def create(cls, user):
testcenter_user = cls(user=user)
# testcenter_user.candidate_id remains unset
# assign an ID of our own:
cand_id = cls._generate_candidate_id()
while TestCenterUser.objects.filter(client_candidate_id=cand_id).exists():
cand_id = cls._generate_candidate_id()
testcenter_user.client_candidate_id = cand_id
return testcenter_user
@property
def is_accepted(self):
return self.upload_status == TEST_CENTER_STATUS_ACCEPTED
@staticmethod
def _generate_edx_id(prefix):
NUM_DIGITS = 12
return u"{}{:012}".format(prefix, randint(1, 10 ** NUM_DIGITS - 1))
@staticmethod
def _generate_candidate_id():
return TestCenterUser._generate_edx_id("edX")
@classmethod
def create(cls, user):
testcenter_user = cls(user=user)
# testcenter_user.candidate_id remains unset
# assign an ID of our own:
cand_id = cls._generate_candidate_id()
while TestCenterUser.objects.filter(client_candidate_id=cand_id).exists():
cand_id = cls._generate_candidate_id()
testcenter_user.client_candidate_id = cand_id
return testcenter_user
@property
def is_accepted(self):
return self.upload_status == TEST_CENTER_STATUS_ACCEPTED
@property
def is_rejected(self):
return self.upload_status == TEST_CENTER_STATUS_ERROR
@property
def is_pending(self):
return not self.is_accepted and not self.is_rejected
class TestCenterUserForm(ModelForm):
class Meta:
model = TestCenterUser
fields = ('first_name', 'middle_name', 'last_name', 'suffix', 'salutation',
'address_1', 'address_2', 'address_3', 'city', 'state', 'postal_code', 'country',
'phone', 'extension', 'phone_country_code', 'fax', 'fax_country_code', 'company_name')
def update_and_save(self):
new_user = self.save(commit=False)
# create additional values here:
new_user.user_updated_at = datetime.now(UTC)
new_user.upload_status = ''
new_user.save()
log.info("Updated demographic information for user's test center exam registration: username \"{}$
# add validation:
def clean_country(self):
code = self.cleaned_data['country']
if code and (len(code) != 3 or not code.isalpha()):
raise forms.ValidationError(u'Must be three characters (ISO 3166-1): e.g. USA, CAN, MNG')
return code.upper()
def clean(self):
def _can_encode_as_latin(fieldvalue):
try:
fieldvalue.encode('iso-8859-1')
except UnicodeEncodeError:
return False
return True
cleaned_data = super(TestCenterUserForm, self).clean()
# check for interactions between fields:
if 'country' in cleaned_data:
country = cleaned_data.get('country')
if country == 'USA' or country == 'CAN':
if 'state' in cleaned_data and len(cleaned_data['state']) == 0:
self._errors['state'] = self.error_class([u'Required if country is USA or CAN.'])
del cleaned_data['state']
if 'postal_code' in cleaned_data and len(cleaned_data['postal_code']) == 0:
self._errors['postal_code'] = self.error_class([u'Required if country is USA or CAN.'$
del cleaned_data['postal_code']
if 'fax' in cleaned_data and len(cleaned_data['fax']) > 0 and 'fax_country_code' in cleaned_data $
self._errors['fax_country_code'] = self.error_class([u'Required if fax is specified.'])
del cleaned_data['fax_country_code']
# check encoding for all fields:
cleaned_data_fields = [fieldname for fieldname in cleaned_data]
for fieldname in cleaned_data_fields:
if not _can_encode_as_latin(cleaned_data[fieldname]):
self._errors[fieldname] = self.error_class([u'Must only use characters in Latin-1 (iso-88$
del cleaned_data[fieldname]
# Always return the full collection of cleaned data.
return cleaned_data
# our own code to indicate that a request has been rejected.
ACCOMMODATION_REJECTED_CODE = 'NONE'
ACCOMMODATION_CODES = (
(ACCOMMODATION_REJECTED_CODE, 'No Accommodation Granted'),
if 'postal_code' in cleaned_data and len(cleaned_data['postal_code']) == 0:
self._errors['postal_code'] = self.error_class([u'Required if country is USA or CAN.'$
del cleaned_data['postal_code']
if 'fax' in cleaned_data and len(cleaned_data['fax']) > 0 and 'fax_country_code' in cleaned_data $
self._errors['fax_country_code'] = self.error_class([u'Required if fax is specified.'])
del cleaned_data['fax_country_code']
# check encoding for all fields:
cleaned_data_fields = [fieldname for fieldname in cleaned_data]
for fieldname in cleaned_data_fields:
if not _can_encode_as_latin(cleaned_data[fieldname]):
self._errors[fieldname] = self.error_class([u'Must only use characters in Latin-1 (iso-88$
del cleaned_data[fieldname]
# Always return the full collection of cleaned data.
return cleaned_data
# our own code to indicate that a request has been rejected.
ACCOMMODATION_REJECTED_CODE = 'NONE'
ACCOMMODATION_CODES = (
(ACCOMMODATION_REJECTED_CODE, 'No Accommodation Granted'),
('EQPMNT', 'Equipment'),
('ET12ET', 'Extra Time - 1/2 Exam Time'),
('ET30MN', 'Extra Time - 30 Minutes'),
('ETDBTM', 'Extra Time - Double Time'),
('SEPRMM', 'Separate Room'),
('SRREAD', 'Separate Room and Reader'),
('SRRERC', 'Separate Room and Reader/Recorder'),
('SRRECR', 'Separate Room and Recorder'),
('SRSEAN', 'Separate Room and Service Animal'),
('SRSGNR', 'Separate Room and Sign Language Interpreter'),
)
ACCOMMODATION_CODE_DICT = {code: name for (code, name) in ACCOMMODATION_CODES}
class TestCenterRegistration(models.Model):
"""
This is our representation of a user's registration for in-person testing,
and specifically for Pearson at this point. A few things to note:
* Pearson only supports Latin-1, so we have to make sure that the data we
capture here will work with that encoding. This is less of an issue
than for the TestCenterUser.
* Registrations are only created here when a user registers to take an exam in person.
The field names and lengths are modeled on the conventions and constraints
of Pearson's data import system.
"""
# to find an exam registration, we key off of the user and course_id.
# If multiple exams per course are possible, we would also need to add the
# exam_series_code.
testcenter_user = models.ForeignKey(TestCenterUser, default=None)
course_id = models.CharField(max_length=128, db_index=True)
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
updated_at = models.DateTimeField(auto_now=True, db_index=True)
# user_updated_at happens only when the user makes a change to their data,
# and is something Pearson needs to know to manage updates. Unlike
# updated_at, this will not get incremented when we do a batch data import.
# The appointment dates, the exam count, and the accommodation codes can be updated,
# but hopefully this won't happen often.
user_updated_at = models.DateTimeField(db_index=True)
# "client_authorization_id" is our unique identifier for the authorization.
# This must be present for an update or delete to be sent to Pearson.
client_authorization_id = models.CharField(max_length=20, unique=True, db_index=True)
# information about the test, from the course policy:
exam_series_code = models.CharField(max_length=15, db_index=True)
eligibility_appointment_date_first = models.DateField(db_index=True)
eligibility_appointment_date_last = models.DateField(db_index=True)
# this is really a list of codes, using an '*' as a delimiter.
# So it's not a choice list. We use the special value of ACCOMMODATION_REJECTED_CODE
# to indicate the rejection of an accommodation request.
accommodation_code = models.CharField(max_length=64, blank=True)
# store the original text of the accommodation request.
accommodation_request = models.CharField(max_length=1024, blank=True, db_index=False)
# time at which edX sent the registration to the test center
uploaded_at = models.DateTimeField(null=True, db_index=True)
# confirmation back from the test center, as well as timestamps
# on when they processed the request, and when we received
# confirmation back.
processed_at = models.DateTimeField(null=True, db_index=True)
upload_status = models.CharField(max_length=20, blank=True, db_index=True) # 'Error' or 'Accepted'
upload_error_message = models.CharField(max_length=512, blank=True)
# Unique ID given to us for this registration by the Testing Center. It's null when
# we first create the registration entry, and may be assigned by Pearson later.
# (However, it may never be set if we are always initiating such candidate creation.)
authorization_id = models.IntegerField(null=True, db_index=True)
confirmed_at = models.DateTimeField(null=True, db_index=True)
@property
def candidate_id(self):
return self.testcenter_user.candidate_id
@property
def client_candidate_id(self):
return self.testcenter_user.client_candidate_id
@property
def authorization_transaction_type(self):
if self.authorization_id is not None:
return 'Update'
elif self.uploaded_at is None:
return 'Add'
elif self.registration_is_rejected:
# Assume that if the registration was rejected before,
# it is more likely this is the (first) correction
# than a second correction in flight before the first was
# processed.
return 'Add'
else:
# TODO: decide what to send when we have uploaded an initial version,
# but have not received confirmation back from that upload. If the
# registration here has been changed, then we don't know if this changed
# registration should be submitted as an 'add' or an 'update'.
#
# If the first registration were lost or in error (e.g. bad code),
# the second should be an "Add". If the first were processed successfully,
# then the second should be an "Update". We just don't know....
return 'Update'
@property
def exam_authorization_count(self):
# Someday this could go in the database (with a default value). But at present,
# we do not expect anyone to be authorized to take an exam more than once.
return 1
@property
def needs_uploading(self):
return self.uploaded_at is None or self.uploaded_at < self.user_updated_at
@classmethod
def create(cls, testcenter_user, exam, accommodation_request):
registration = cls(testcenter_user=testcenter_user)
registration.course_id = exam.course_id
registration.accommodation_request = accommodation_request.strip()
registration.exam_series_code = exam.exam_series_code
registration.eligibility_appointment_date_first = exam.first_eligible_appointment_date.strftime("%Y-%m-%d")
registration.eligibility_appointment_date_last = exam.last_eligible_appointment_date.strftime("%Y-%m-%d")
registration.client_authorization_id = cls._create_client_authorization_id()
# accommodation_code remains blank for now, along with Pearson confirmation information
return registration
@staticmethod
def _generate_authorization_id():
return TestCenterUser._generate_edx_id("edXexam")
@staticmethod
def _create_client_authorization_id():
"""
Return a unique id for a registration, suitable for using as an authorization code
for Pearson. It must fit within 20 characters.
"""
# generate a random value, and check to see if it already is in use here
auth_id = TestCenterRegistration._generate_authorization_id()
while TestCenterRegistration.objects.filter(client_authorization_id=auth_id).exists():
auth_id = TestCenterRegistration._generate_authorization_id()
return auth_id
# methods for providing registration status details on registration page:
@property
def demographics_is_accepted(self):
return self.testcenter_user.is_accepted
@property
def demographics_is_rejected(self):
return self.testcenter_user.is_rejected
@property
def demographics_is_pending(self):
return self.testcenter_user.is_pending
@property
def accommodation_is_accepted(self):
return len(self.accommodation_request) > 0 and len(self.accommodation_code) > 0 and self.accommodation_code != ACCOMMODATION_REJECTED_CODE
@property
def accommodation_is_rejected(self):
return len(self.accommodation_request) > 0 and self.accommodation_code == ACCOMMODATION_REJECTED_CODE
@property
def accommodation_is_pending(self):
return len(self.accommodation_request) > 0 and len(self.accommodation_code) == 0
@property
def accommodation_is_skipped(self):
return len(self.accommodation_request) == 0
@property
def registration_is_accepted(self):
return self.upload_status == TEST_CENTER_STATUS_ACCEPTED
@property
def registration_is_rejected(self):
return self.upload_status == TEST_CENTER_STATUS_ERROR
@property
def registration_is_pending(self):
return not self.registration_is_accepted and not self.registration_is_rejected
# methods for providing registration status summary on dashboard page:
@property
def is_accepted(self):
return self.registration_is_accepted and self.demographics_is_accepted
@property
def is_rejected(self):
@property
def is_pending(self):
return not self.is_accepted and not self.is_rejected
def get_accommodation_codes(self):
return self.accommodation_code.split('*')
def get_accommodation_names(self):
return [ACCOMMODATION_CODE_DICT.get(code, "Unknown code " + code) for code in self.get_accommodation_codes()]
@property
def registration_signup_url(self):
return settings.PEARSONVUE_SIGNINPAGE_URL
def demographics_status(self):
if self.demographics_is_accepted:
return "Accepted"
elif self.demographics_is_rejected:
return "Rejected"
else:
return "Pending"
def accommodation_status(self):
if self.accommodation_is_skipped:
return "Skipped"
elif self.accommodation_is_accepted:
return "Accepted"
elif self.accommodation_is_rejected:
return "Rejected"
else:
return "Pending"
def registration_status(self):
if self.registration_is_accepted:
return "Accepted"
elif self.registration_is_rejected:
return "Rejected"
else:
return "Pending"
class TestCenterRegistrationForm(ModelForm):
class Meta:
model = TestCenterRegistration
fields = ('accommodation_request', 'accommodation_code')
def clean_accommodation_request(self):
code = self.cleaned_data['accommodation_request']
if code and len(code) > 0:
return code.strip()
return code
def update_and_save(self):
registration = self.save(commit=False)
# create additional values here:
registration.user_updated_at = datetime.now(UTC)
registration.upload_status = ''
registration.save()
log.info("Updated registration information for user's test center exam registration: username \"{}\" course \"{}\", examcode \"{}\"".format(registration.testcenter_user.user.username, registration.course_id, registration.exam_seri$
def clean_accommodation_code(self):
code = self.cleaned_data['accommodation_code']
if code:
code = code.upper()
codes = code.split('*')
for codeval in codes:
if codeval not in ACCOMMODATION_CODE_DICT:
raise forms.ValidationError(u'Invalid accommodation code specified: "{}"'.format(codeval))
return code
def get_testcenter_registration(user, course_id, exam_series_code):
try:
tcu = TestCenterUser.objects.get(user=user)
except TestCenterUser.DoesNotExist:
return []
return TestCenterRegistration.objects.filter(testcenter_user=tcu, course_id=course_id, exam_series_code=exam_series_code)
# nosetests thinks that anything with _test_ in the name is a test.
# Correct this (https://nose.readthedocs.org/en/latest/finding_tests.html)
get_testcenter_registration.__test__ = False
def unique_id_for_user(user):
"""
Return a unique id for a user, suitable for inserting into
e.g. personalized survey links.
"""
# include the secret key as a salt, and to make the ids unique across
# different LMS installs.
h = hashlib.md5()
h.update(settings.SECRET_KEY)
h.update(str(user.id))
return h.hexdigest()
# TODO: Should be renamed to generic UserGroup, and possibly
# Given an optional field for type of group
class UserTestGroup(models.Model):
users = models.ManyToManyField(User, db_index=True)
name = models.CharField(blank=False, max_length=32, db_index=True)
description = models.TextField(blank=True)
class Registration(models.Model):
''' Allows us to wait for e-mail before user is registered. A
registration profile is created when the user creates an
account, but that account is inactive. Once the user clicks
on the activation key, it becomes active. '''
class Meta:
db_table = "auth_registration"
user = models.ForeignKey(User, unique=True)
activation_key = models.CharField(('activation key'), max_length=32, unique=True, db_index=True)
def register(self, user):
# MINOR TODO: Switch to crypto-secure key
self.activation_key = uuid.uuid4().hex
self.user = user
self.save()
def activate(self):
self.user.is_active = True
self.user.save()
class PendingNameChange(models.Model):
user = models.OneToOneField(User, unique=True, db_index=True)
new_name = models.CharField(blank=True, max_length=255)
rationale = models.CharField(blank=True, max_length=1024)
class PendingEmailChange(models.Model):
user = models.OneToOneField(User, unique=True, db_index=True)
new_email = models.CharField(blank=True, max_length=255, db_index=True)
activation_key = models.CharField(('activation key'), max_length=32, unique=True, db_index=True)
class CourseEnrollment(models.Model):
"""
Represents a Student's Enrollment record for a single Course. You should
generally not manipulate CourseEnrollment objects directly, but use the
classmethods provided to enroll, unenroll, or check on the enrollment status
of a given student.
We're starting to consolidate course enrollment logic in this class, but
more should be brought in (such as checking against CourseEnrollmentAllowed,
checking course dates, user permissions, etc.) This logic is currently
scattered across our views.
"""
user = models.ForeignKey(User)
course_id = models.CharField(max_length=255, db_index=True)
created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)
# If is_active is False, then the student is not considered to be enrolled
# in the course (is_enrolled() will return False)
is_active = models.BooleanField(default=True)
# Represents the modes that are possible. We'll update this later with a
# list of possible values.
mode = models.CharField(default="honor", max_length=100)
class Meta:
unique_together = (('user', 'course_id'),)
ordering = ('user', 'course_id')
def __unicode__(self):
return (
"[CourseEnrollment] {}: {} ({}); active: ({})"
).format(self.user, self.course_id, self.created, self.is_active)
@classmethod
def create_enrollment(cls, user, course_id, mode="honor", is_active=False):
"""
Create an enrollment for a user in a class. By default *this enrollment
is not active*. This is useful for when an enrollment needs to go
through some sort of approval process before being activated. If you
don't need this functionality, just call `enroll()` instead.
Returns a CoursewareEnrollment object.
`user` is a Django User object. If it hasn't been saved yet (no `.id`
attribute), this method will automatically save it before
adding an enrollment for it.
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
`mode` is a string specifying what kind of enrollment this is. The
default is "honor", meaning honor certificate. Future options
may include "audit", "verified_id", etc. Please don't use it
until we have these mapped out.
`is_active` is a boolean. If the CourseEnrollment object has
`is_active=False`, then calling
`CourseEnrollment.is_enrolled()` for that user/course_id
will return False.
It is expected that this method is called from a method which has already
verified the user authentication and access.
"""
# If we're passing in a newly constructed (i.e. not yet persisted) User,
# save it to the database so that it can have an ID that we can throw
# into our CourseEnrollment object. Otherwise, we'll get an
# IntegrityError for having a null user_id.
if user.id is None:
user.save()
enrollment, _ = CourseEnrollment.objects.get_or_create(
user=user,
course_id=course_id,
)
# In case we're reactivating a deactivated enrollment, or changing the
# enrollment mode.
if enrollment.mode != mode or enrollment.is_active != is_active:
enrollment.mode = mode
enrollment.is_active = is_active
enrollment.save()
return enrollment
@classmethod
def enroll(cls, user, course_id, mode="honor"):
"""
Enroll a user in a course. This saves immediately.
Returns a CoursewareEnrollment object.
`user` is a Django User object. If it hasn't been saved yet (no `.id`
attribute), this method will automatically save it before
adding an enrollment for it.
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
`mode` is a string specifying what kind of enrollment this is. The
default is "honor", meaning honor certificate. Future options
may include "audit", "verified_id", etc. Please don't use it
until we have these mapped out.
It is expected that this method is called from a method which has already
verified the user authentication and access.
"""
return cls.create_enrollment(user, course_id, mode, is_active=True)
@classmethod
def enroll_by_email(cls, email, course_id, mode="honor", ignore_errors=True):
"""
Enroll a user in a course given their email. This saves immediately.
Note that enrolling by email is generally done in big batches and the
error rate is high. For that reason, we supress User lookup errors by
default.
Returns a CoursewareEnrollment object. If the User does not exist and
`ignore_errors` is set to `True`, it will return None.
`email` Email address of the User to add to enroll in the course.
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
`mode` is a string specifying what kind of enrollment this is. The
default is "honor", meaning honor certificate. Future options
may include "audit", "verified_id", etc. Please don't use it
until we have these mapped out.
`ignore_errors` is a boolean indicating whether we should suppress
`User.DoesNotExist` errors (returning None) or let it
bubble up.
It is expected that this method is called from a method which has already
verified the user authentication and access.
"""
try:
user = User.objects.get(email=email)
return cls.enroll(user, course_id, mode)
except User.DoesNotExist:
err_msg = u"Tried to enroll email {} into course {}, but user not found"
if ignore_errors:
return None
raise
@classmethod
def unenroll(cls, user, course_id):
"""
Remove the user from a given course. If the relevant `CourseEnrollment`
object doesn't exist, we log an error but don't throw an exception.
`user` is a Django User object. If it hasn't been saved yet (no `.id`
attribute), this method will automatically save it before
adding an enrollment for it.
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
"""
try:
record = CourseEnrollment.objects.get(user=user, course_id=course_id)
record.is_active = False
record.save()
except cls.DoesNotExist:
err_msg = u"Tried to unenroll student {} from {} but they were not enrolled"
log.error(err_msg.format(user, course_id))
@classmethod
def unenroll_by_email(cls, email, course_id):
"""
Unenroll a user from a course given their email. This saves immediately.
User lookup errors are logged but will not throw an exception.
`email` Email address of the User to unenroll from the course.
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
"""
try:
user = User.objects.get(email=email)
return cls.unenroll(user, course_id)
except User.DoesNotExist:
err_msg = u"Tried to unenroll email {} from course {}, but user not found"
log.error(err_msg.format(email, course_id))
@classmethod
def is_enrolled(cls, user, course_id):
"""
Returns True if the user is enrolled in the course (the entry must exist
and it must have `is_active=True`). Otherwise, returns False.
`user` is a Django User object. If it hasn't been saved yet (no `.id`
attribute), this method will automatically save it before
adding an enrollment for it.
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
"""
try:
record = CourseEnrollment.objects.get(user=user, course_id=course_id)
return record.is_active
log.error(err_msg.format(email, course_id))
except cls.DoesNotExist:
return False
@classmethod
def enrollment_mode_for_user(cls, user, course_id):
"""
Returns the enrollment mode for the given user for the given course
`user` is a Django User object
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
"""
try:
record = CourseEnrollment.objects.get(user=user, course_id=course_id)
if record.is_active:
return record.mode
else:
return None
except cls.DoesNotExist:
return None
@classmethod
def enrollments_for_user(cls, user):
return CourseEnrollment.objects.filter(user=user, is_active=1)
def activate(self):
"""Makes this `CourseEnrollment` record active. Saves immediately."""
if not self.is_active:
self.is_active = True
self.save()
def deactivate(self):
"""Makes this `CourseEnrollment` record inactive. Saves immediately. An
inactive record means that the student is not enrolled in this course.
"""
if self.is_active:
self.is_active = False
self.save()
@classmethod
def students_enrolled(course_id):
return User.objects.filter(course_id=course_id, is_active=True).order_by('username')
class CourseEnrollmentAllowed(models.Model):
"""
Table of users (specified by email address strings) who are allowed to enroll in a specified course.
The user may or may not (yet) exist. Enrollment by users listed in this table is allowed
even if the enrollment time window is past.
"""
email = models.CharField(max_length=255, db_index=True)
course_id = models.CharField(max_length=255, db_index=True)
auto_enroll = models.BooleanField(default=0)
created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)
class Meta:
unique_together = (('email', 'course_id'),)
def __unicode__(self):
return "[CourseEnrollmentAllowed] %s: %s (%s)" % (self.email, self.course_id, self.created)
# cache_relation(User.profile)
#### Helper methods for use from python manage.py shell and other classes.
def get_user_by_username_or_email(username_or_email):
"""
Return a User object, looking up by email if username_or_email contains a
'@', otherwise by username.
Raises:
User.DoesNotExist is lookup fails.
"""
if '@' in username_or_email:
return User.objects.get(email=username_or_email)
else:
return User.objects.get(username=username_or_email)
def get_user(email):
u = User.objects.get(email=email)
up = UserProfile.objects.get(user=u)
return u, up
def user_info(email):
u, up = get_user(email)
print "User id", u.id
print "Username", u.username
print "E-mail", u.email
print "Name", up.name
print "Location", up.location
print "Language", up.language
return u, up
def change_email(old_email, new_email):
u = User.objects.get(email=old_email)
u.email = new_email
u.save()
def change_name(email, new_name):
u, up = get_user(email)
up.name = new_name
up.save()
def user_count():
print "All users", User.objects.all().count()
print "Active users", User.objects.filter(is_active=True).count()
return User.objects.all().count()
def students_enrolled(course_id):
return User.objects.filter(course_id=course_id, is_active=True).order_by('username')
def active_user_count():
return User.objects.filter(is_active=True).count()
def create_group(name, description):
utg = UserTestGroup()
utg.name = name
utg.description = description
utg.save()
def add_user_to_group(user, group):
utg = UserTestGroup.objects.get(name=group)
utg.users.add(User.objects.get(username=user))
utg.save()
def remove_user_from_group(user, group):
utg = UserTestGroup.objects.get(name=group)
utg.users.remove(User.objects.get(username=user))
utg.save()
default_groups = {'email_future_courses': 'Receive e-mails about future MITx courses',
'email_helpers': 'Receive e-mails about how to help with MITx',
'mitx_unenroll': 'Fully unenrolled -- no further communications',
'6002x_unenroll': 'Took and dropped 6002x'}
def add_user_to_default_group(user, group):
try:
utg = UserTestGroup.objects.get(name=group)
except UserTestGroup.DoesNotExist:
utg = UserTestGroup()
utg.name = group
utg.description = default_groups[group]
utg.save()
utg.users.add(User.objects.get(username=user))
utg.save()
@receiver(post_save, sender=User)
def update_user_information(sender, instance, created, **kwargs):
if not settings.MITX_FEATURES['ENABLE_DISCUSSION_SERVICE']:
# Don't try--it won't work, and it will fill the logs with lots of errors
return
try:
cc_user = cc.User.from_django_user(instance)
cc_user.save()
except Exception as e:
log = logging.getLogger("mitx.discussion")
log.error(unicode(e))
log = logging.getLogger("mitx.discussion")
log.error(unicode(e))
log.error("update user info to discussion failed for user with id: " + str(instance.id))
# Define login and logout handlers here in the models file, instead of the views file,
# so that they are more likely to be loaded when a Studio user brings up the Studio admin
# page to login. These are currently the only signals available, so we need to continue
# identifying and logging failures separately (in views).
@receiver(user_logged_in)
def log_successful_login(sender, request, user, **kwargs):
"""Handler to log when logins have occurred successfully."""
AUDIT_LOG.info(u"Login success - {0} ({1})".format(user.username, user.email))
@receiver(user_logged_out)
def log_successful_logout(sender, request, user, **kwargs):
"""Handler to log when logouts have occurred successfully."""
AUDIT_LOG.info(u"Logout - {0}".format(request.user))
|
from django.urls import path
from . import views
from django.contrib.auth import views as auth_views
urlpatterns = [
path("", views.index, name="ShopHome"),
path("about/", views.about, name="AboutUs"),
path("contact/", views.contact, name="ContactUs"),
path("profile/", views.profile, name="profile"),
path("tracker/", views.tracker, name="TrackingStatus"),
path("search/", views.search, name="Search"),
path("products/<int:myid>", views.productView, name="ProductView"),
path("checkout/", views.checkout, name="Checkout"),
path("handlerequest/", views.handlerequest, name="HandleRequest"),
path('signup/', views.signup, name="signup"),
path('activate/<uidb64>/<token>/',views.activate, name='activate'),
path("login/", auth_views.LoginView.as_view(template_name='shop/login.html', redirect_authenticated_user=True),name='login'),
path("logout/", auth_views.LogoutView.as_view(next_page='/'),name='logout'),
path("password_reset/", auth_views.PasswordResetView.as_view(template_name='shop/password_reset.html'),name='password_reset'),
path("password_reset/done/", auth_views.PasswordResetDoneView.as_view(template_name='shop/password_reset_done.html'),name='password_reset_done'),
path("password_reset_confirm/<uidb64>/<token>/", auth_views.PasswordResetConfirmView.as_view(template_name='shop/password_reset_confirm.html'),name='password_reset_confirm'),
path("password_reset_complete/", auth_views.PasswordResetCompleteView.as_view(template_name='shop/password_reset_complete.html'),name='password_reset_complete'),
]
|
""" Core Messaging Pages """
import os
from typing import Optional
from datetime import datetime, timedelta
from fastapi import APIRouter, Request, status, Cookie, Depends
from fastapi.responses import HTMLResponse, RedirectResponse
from fastapi.templating import Jinja2Templates
from pydantic import BaseModel
from ..utils import get_cursor, use_cursor
from .__init__ import templates
# FastAPI context
router = APIRouter()
# Functions
@router.get('/messages', response_class=HTMLResponse)
async def messages(request: Request):
return templates.TemplateResponse('messages.html', {'request': request})
|
Lat1= -33.4503685
Lon1= -70.6897731
Alt = 10
AltMax = 20
perimetroLat= [-33.4506, -33.4506, -33.4500, -33.4500];
perimetroLon= [-70.6897, -70.6894, -70.6894, -70.6897];
largo=0
ancho=0
dron = [0, 0, 4, Alt]
area=0
completitud= 0
trayectoria= []
import numpy as np
def SetearParametros():
Lat1 = input("Latitud de Despegue: ")
Lon1 = input("Longitud de Despegue: ")
Alt = input("Altitud de Vuelo: ")
AltMax = input("Altitud Max de Vuelo: ")
print("A continuacion se le pedira el perimetro de vuelo:")
i = 1
while i <= 4:
print("Latitud del punto: " + {i})
perimetroLat[i]=input()
print("Longitud del punto: " + {i})
perimetroLon[i]=input()
i += 1
def EscribirParametros():
print(Lat1, Lon1)
i = 0
while i < 4:
print(perimetroLat[i], perimetroLon[i])
i += 1
def ConstruirGrid():
global ancho
global largo
ancho= int((abs(perimetroLon[0])- abs(perimetroLon[1]))/0.00005435)
largo= int((abs(perimetroLat[1]) - abs(perimetroLat[2])) / 0.0000416 )
valoresGrid = np.zeros((ancho,largo), dtype=int)
return valoresGrid
def GenerarWaypoints():
puntos = largo*ancho
waypoints= np.zeros((puntos,2), dtype=float)
lon_i=perimetroLon[0]
lat_i=perimetroLat[2]
i =0
while i<puntos:
waypoints[i][0]= lat_i-(int(i%largo)*0.0000416 + 0.0000208)
waypoints[i][1] = lon_i + (int(i / largo) * 0.00005435 - 0.0000272)
print(waypoints[i][0], "+", waypoints[i][1])
print("everything okay?")
i += 1
return waypoints
def EscribirWaypoints(ruta, waypoints):
f = open(ruta, "w+")
i=1
init ="QGC WPL 110\n"
initialPoint="0 1 0 16 0 0 0 0 "+ str(Lat1)+" "+str(Lon1) +" "+str(Alt)+" 1\n"
f.write(init)
f.write(initialPoint)
for x in trayectoria:
punto= x[0]*largo + x[1]
latitudPunto= str(waypoints[punto][0])
longitudPunto= str(waypoints[punto][1])
alturaPunto= str(x[2])
Punto= str(i)+" 1 0 16 0 0 0 0 "+latitudPunto +" "+longitudPunto +" "+ alturaPunto+" 1\n"
f.write(Punto)
i+=1
f.close
def IngresarObtaculo():
obstaculos= int(input("ingresar el numero de obstaculos:"))
vertices = np.zeros((4, 2), dtype=int)
global Grid
i=0
while i<obstaculos:
print("Coordenadas Centro")
pLat= float(input("ingresar latitud:"))
pLon= float(input("ingresar longitud:"))
alturaObs = int(input("ingresar altura Obstaculo (mt): "))
dim= float(input("Ingresar Dimensión (mt): "))
DimLon = (dim * (0.00005435/5)) / 5 # Cuantos grados de Lon es esto
DimLat = (dim * (0.0000416/5)) / 5 # Cuantos Grados de Lat es esto.
#Vertice 1:
vertices[0][0] = int(-((pLon+ DimLon/2) -perimetroLon[1]) / 0.00005435)
vertices[0][1] = int(-((pLat+ DimLat/2) - perimetroLat[2]) / 0.0000416 )
# Vertice 1:
vertices[1][0] = int(-((pLon+ DimLon/2) -perimetroLon[1]) / 0.00005435)
vertices[1][1] = int(-((pLat- DimLat/2) - perimetroLat[2]) / 0.0000416 )
# Vertice 1:
vertices[2][0] = int(-((pLon- DimLon/2) -perimetroLon[1]) / 0.00005435)
vertices[2][1] = int(-((pLat+ DimLat/2) - perimetroLat[2]) / 0.0000416 )
# Vertice 1:
vertices[3][0] = int(-((pLon- DimLon/2) -perimetroLon[1]) / 0.00005435)
vertices[3][1] = int(-((pLat- DimLat/2) - perimetroLat[2]) / 0.0000416 )
x1= vertices[0][0]-1
y1= vertices[0][1]-1
y2= vertices[0][1]-1
x2= vertices[0][0]-1
for x in vertices:
if x[0]!=x1:
if x[0]<x1:
x2= x1
x1= x[0]
else:
x2= x[0]
if x[1]!=y1:
if x[1]<y1:
y2=y1
y1=x[1]
else:
y2=x[1]
#Realizar los cambios
print("X1: ", x1, " x2: ", x2, " y1: ", y1, " y2: ", y2)
while x1<= x2:
print("x1 vale: ", x1)
if x1>=0 and x1<ancho:
aux= y1
while aux<=y2:
if aux>=0 and aux<largo:
if alturaObs < AltMax:
print("x: ", x1, "y: ", aux)
Grid[x1][aux] = 3
else:
Grid[x1][aux] = 1
aux+=1
x1 += 1
i+=1
#Cambiar los valores entre vertices por 1 o 2 si se puede sobrevolar!
return vertices
"""LOGICA DE EVACIÓN DE OBSTACULOS"""
"""Ejecutar Giros"""
def girarDerecha():
global dron
dir= dron[2]
if dir==1:
dron[2]= 4
if dir==2:
dron[2]= 3
if dir==3:
dron[2]= 1
if dir==4:
dron[2]= 2
def girarIzquierda():
global dron
dir= dron[2]
if dir==1:
dron[2]= 3
if dir==2:
dron[2]= 4
if dir==3:
dron[2]= 2
if dir==4:
dron[2]= 1
def avanzar():
global dron
dir = dron[2]
if dir == 1:
dron[0] -= 1
if dir == 2:
dron[0] += 1
if dir == 3:
dron[1] -= 1
if dir == 4:
dron[1] += 1
if Grid[dron[0]][dron[1]]==3:
dron[3]=AltMax
else:
dron[3]=Alt
"""¿Es posible Girar/Avanzar?"""
def puedoGirarDerecha():
dir= dron[2]
if dir==1:
if dron[1] < largo-1:
if Grid[dron[0]][dron[1]+1]==0 or Grid[dron[0]][dron[1]+1]==3:
return True
if dir==2:
if dron[1] > 0:
if Grid[dron[0]][dron[1] - 1] == 0 or Grid[dron[0]][dron[1] - 1] == 3:
return True
if dir==4:
if dron[0]< ancho-1:
if Grid[dron[0] + 1][dron[1]] == 0 or Grid[dron[0] + 1][dron[1]]==3:
return True
if dir==3:
if dron[0] > 0:
if Grid[dron[0]-1][dron[1]] == 0 or Grid[dron[0]-1][dron[1]]==3:
return True
else:
return False
def puedoGirarIzquierda():
dir= dron[2]
if dir==1:
if dron[1]>0:
if Grid[dron[0]][dron[1]-1]==0 or Grid[dron[0]][dron[1]-1]==3:
return True
if dir==2:
if dron[1] <largo-1:
if Grid[dron[0]][dron[1] + 1] == 0 or Grid[dron[0]][dron[1] + 1] == 3:
return True
if dir==4:
if dron[0] > 0:
if Grid[dron[0] - 1][dron[1]] == 0 or Grid[dron[0] - 1][dron[1]] == 3:
return True
if dir==3:
if dron[0] < ancho-1:
if Grid[dron[0] +1][dron[1]] == 0 or Grid[dron[0] +1][dron[1]] == 3:
return True
else:
return False
def puedoAvanzar():
dir= dron[2]
if dir==1:
if dron[0] > 0:
if Grid[dron[0]-1][dron[1]]==0 or Grid[dron[0]-1][dron[1]]==3:
return True
if dir==2:
if dron[0] < ancho-1:
if Grid[dron[0]+1][dron[1]] == 0 or Grid[dron[0]+1][dron[1]]==3:
return True
if dir==3:
if dron[1] > 0:
if Grid[dron[0]][dron[1]-1] == 0 or Grid[dron[0]][dron[1]-1] == 3:
return True
if dir==4:
if dron[1] < largo-1:
if Grid[dron[0]][dron[1]+1] == 0 or Grid[dron[0]][dron[1]+1] == 3:
return True
else:
return False
"""PARA VOLVER ATRAS"""
def traerDron(x, y, h):
global dron
global trayectoria
xi=dron[0] #Pocición actual.
yi=dron[1]
dron[0] = x #Reasigno la posición del Dron
dron[1] = y
tray= trayectoria.copy()
tray.reverse()#Invertir lista de puntos coordenados para recorrer al reves
print("TRAYECTORIA DE RETORNO")
meta= tray.index((x, y, h)) #Ultima ocurrencia del punto de destino
actual=0
while actual<meta:
a= tray[actual][0] #Revisar desde donde estoy ahora
b= tray[actual][1]
j= actual + 1
atajo=False
while j<=meta:
t=tray[j][0]
v=tray[j][1]
if puedoLlegar(t,v,a,b): #Si puedo llegar lo cambio.
actual=j
atajo=True
j+=1
if atajo==False:
actual+=1
print(tray[actual], " ")
trayectoria.append((tray[actual][0],tray[actual][1],AltMax ))
def puedoLlegar(x,y,a,b):
if a+1==x and b==y:
return True
if a-1==x and b==y:
return True
if a==x and b+1==y:
return True
if a==x and b-1==y:
return True
else:
return False
dron[0]=x
dron[1]=y
"""AREA A RECORRER"""
def calcularArea():
i=0
global area
while i<ancho:
j=0
while j< largo:
if Grid[i][j]==0 or Grid[i][j]==3:
area+=1
j+=1
i+=1
"""Función que recorre la Grid y genera la trayectoria"""
def recorrerGrid():
global Grid
x= dron[0]
y= dron[1]
a= x
b= y
h=0
if Grid[x][y]==3:
h = AltMax
else:
h= Alt
trayectoria.append((x,y,h)) #Agrego el punto en que estoy a los puntos recorridos
Grid[x][y]= 2 #Marco el punto en que estoy.<<
global completitud
completitud+=1
if puedoAvanzar(): #Verifico si puedo avanzar.
avanzar() #Si puedo, Avanzo.
if Grid[dron[0]][dron[1]]==3 and h==Alt:
trayectoria.append((a, b, AltMax)) #Puede que haya que crear esto
recorrerGrid() #Llamo recursivamente a la función.
if(completitud!=area):
traerDron(x, y, h)
if puedoGirarDerecha(): #Verifico si puedo girar a la derecha.
print("giro derecha")
girarDerecha() #Si puedo, Giro.
avanzar()
if Grid[dron[0]][dron[1]] == 3 and h == Alt:
trayectoria.append((a, b, AltMax)) # Puede que haya que crear esto
recorrerGrid() #Llamo recursivamente a la función.
if (completitud != area):
traerDron(x, y, h)
if puedoGirarIzquierda(): #Verifico si puedo girar a la derecha.
print("giro izquierda")
girarIzquierda() #Si puedo, Giro.
avanzar()
if Grid[dron[0]][dron[1]] == 3 and h == Alt:
trayectoria.append((a, b, AltMax)) # Puede que haya que crear esto
recorrerGrid() #Llamo recursivamente a la función.
return
""" MAIN """
Grid= ConstruirGrid()
Waypoints= GenerarWaypoints()
"""EscribirWaypoints("puntosGeneradosPython.waypoints", Waypoints)"""
"""vertices= IngresarObtaculo()"""
IngresarObtaculo()
print(Grid)
calcularArea()
recorrerGrid()
print(Grid)
print(trayectoria)
waypoints = GenerarWaypoints()
EscribirWaypoints("Magia.waypoints", waypoints)
print("end") |
version = "1.7.5-$Format:%h$"
|
# -*- coding: utf-8 -*-
from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def getAllElements(self, root1: TreeNode, root2: TreeNode) -> List[int]:
result = []
self.inorderTraversal(root1, result)
self.inorderTraversal(root2, result)
return list(sorted(result))
def inorderTraversal(self, root: TreeNode, result: List[int]):
if root:
self.inorderTraversal(root.left, result)
result.append(root.val)
self.inorderTraversal(root.right, result)
if __name__ == "__main__":
solution = Solution()
t0_0 = TreeNode(2)
t0_1 = TreeNode(1)
t0_2 = TreeNode(4)
t0_0.right = t0_2
t0_0.left = t0_1
t1_0 = TreeNode(1)
t1_1 = TreeNode(0)
t1_2 = TreeNode(3)
t1_0.right = t1_2
t1_0.left = t1_1
assert [0, 1, 1, 2, 3, 4] == solution.getAllElements(t0_0, t1_0)
t2_0 = TreeNode(0)
t2_1 = TreeNode(-10)
t2_2 = TreeNode(10)
t2_0.right = t2_2
t2_0.left = t2_1
t3_0 = TreeNode(5)
t3_1 = TreeNode(1)
t3_2 = TreeNode(7)
t3_3 = TreeNode(0)
t3_4 = TreeNode(2)
t3_1.right = t3_4
t3_1.left = t3_3
t3_0.right = t3_2
t3_0.left = t3_1
assert [-10, 0, 0, 1, 2, 5, 7, 10] == solution.getAllElements(t2_0, t3_0)
t4_0 = None
t5_0 = TreeNode(5)
t5_1 = TreeNode(1)
t5_2 = TreeNode(7)
t5_3 = TreeNode(0)
t5_4 = TreeNode(2)
t5_1.right = t5_4
t5_1.left = t5_3
t5_0.right = t5_2
t5_0.left = t5_1
assert [0, 1, 2, 5, 7] == solution.getAllElements(t4_0, t5_0)
t6_0 = TreeNode(0)
t6_1 = TreeNode(-10)
t6_2 = TreeNode(10)
t6_0.right = t6_2
t6_0.left = t6_1
t7_0 = None
assert [-10, 0, 10] == solution.getAllElements(t6_0, t7_0)
t8_0 = TreeNode(1)
t8_1 = TreeNode(8)
t8_0.right = t8_1
t9_0 = TreeNode(8)
t9_1 = TreeNode(1)
t9_0.left = t9_1
assert [1, 1, 8, 8] == solution.getAllElements(t8_0, t9_0)
|
from __future__ import division
import numpy as np
import numpy.random as npr
from scipy.stats import norm
from svae.forward_models import _diagonal_gaussian_loglike
def check_diag_gauss_loglike(x, mu, log_sigmasq):
loglike = _diagonal_gaussian_loglike(x, mu, log_sigmasq)
scipy_loglike = np.mean(norm.logpdf(x[:,None,:], mu, np.sqrt(np.exp(log_sigmasq))), axis=1).sum()
assert np.isclose(loglike, scipy_loglike)
def test_diag_gauss_loglike():
npr.seed(0)
for _ in xrange(50):
T, K, p = npr.randint(1, 20), npr.randint(1, 5), npr.randint(1, 10)
yield check_diag_gauss_loglike, npr.randn(T, p), npr.randn(T, K, p), npr.randn(T, K, p)
|
import os.path
# Set up a level object
class Level:
width = 25
height = 15
filename = 'DEFAULT'
level = []
# Initialize
def __init__(self, filename):
self.filename = filename
self.loadLevel()
# Just fill our level with lots of zeroes
def zeroLevel(self):
for i in range(0, self.height):
self.level.append([])
for j in range(0, self.width):
# Fill up the level with a whole lotta nothin
self.level[i].append(0)
# Returns a 2D array of values
def getSlice(self, camera_x, camera_y, screenInfo):
returner = []
# For each vertical slice
for h in range(0, screenInfo[2]):
returner.append([])
for w in range(0, screenInfo[1]):
returner[h].append(self.level[h + camera_y][w + camera_x])
return returner
# Puts the selected block on the screen at the selected position
def stamp(self, selected, x, y, cx, cy):
self.level[y + cy][x + cx] = selected
# Check to make sure I can move the camera to the right
def validateRight(self, camera_x, screenInfo):
if self.width < camera_x + screenInfo[1]:
for row in range(0, self.height):
self.level[row].append(0)
def convert_strints(self, theline):
returner = []
for strnum in theline:
if strnum != '\n':
returner.append(int(strnum))
return returner
def loadLevel(self):
if(os.path.exists(os.path.join('Levels', self.filename))):
f = open(os.path.join('Levels', self.filename), 'r')
firstLine = f.readline().split(" ")
self.level = []
self.width = int(firstLine[0])
self.height = int(firstLine[1])
for line in range(0, self.height):
myLine = f.readline().split(" ")
myLine = self.convert_strints(myLine)
self.level.append(myLine)
def save(self):
levelPath = os.path.join('Levels', self.filename)
# If the path already exists, erase it
if os.path.exists(levelPath):
open(levelPath, 'w').close()
# Write your level out to it
f = open(levelPath, 'w')
f.write(str(self.width) + ' ')
f.write(str(self.height) + '\n')
for line in self.level:
for val in line:
f.write(str(val) + ' ')
f.write('\n')
f.close() |
from setuptools import setup
setup(
name='delivery',
version='1.0',
py_modules=[''],
install_requires=['Click', 'numpy', 'pandas', 'colorama'],
entry_points='''
[console_scripts]
delivery=delivery:cli
''',
) |
# -*- coding: utf-8 -*-
import os.path
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
# flask core settings
DEBUG = True
TESTING = False
SECRET_KEY = 'qh\x98\xc4o\xc4]\x8f\x8d\x93\xa4\xec\xc5\xfd]\xf8\xb1c\x84\x86\xa7A\xcb\xc0'
PERMANENT_SESSION_LIFETIME = 60 * 60 * 24 * 30
# flask wtf settings
WTF_CSRF_ENABLED = True
# flask sqlalchemy
SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(os.path.join(PROJECT_PATH, 'pythonistas.db'))
# flask cache settings
CACHE_TYPE = 'simple'
# flask mail settings
MAIL_DEFAULT_SENDER = 'naoresponda@pythonistas.com.br'
# flask babel
BABEL_DEFAULT_LOCALE = 'pt_BR'
BABEL_DEFAULT_TIMEZONE = 'America/Recife'
# github keys
GITHUB_CLIENT_ID = 'bc0209ad1d2a580f1716'
GITHUB_CLIENT_SECRET = 'dace4724f6089037ac5fce19da4db1bcd44d6053'
GITHUB_CALLBACK = 'http://pythonistas.com.br/auth/github/callback/'
# bitbucket keys
BITBUCKET_CLIENT_ID = 'HFnK6XAfLUcCu8y6Df'
BITBUCKET_CLIENT_SECRET = 'W2MnNmAFAFJZDa8LmcC5vBc8gWQgFVGG'
BITBUCKET_CALLBACK = 'http://pythonistas.com.br/auth/bitbucket/callback/'
# linkedin keys
LINKEDIN_CLIENT_ID = 'ypdrywxik45z'
LINKEDIN_CLIENT_SECRET = 'B3bQvqkWOlTZhWyw'
LINKEDIN_CALLBACK = 'http://pythonistas.com.br/auth/linkedin/callback/'
|
#!/usr/bin/python
from __future__ import print_function
from vizdoom import *
from agent import Runner
import time
game = DoomGame()
game.load_config("config/my_custom_config.cfg")
# Name your agent and select color
# colors: 0 - green, 1 - gray, 2 - brown, 3 - red, 4 - light gray, 5 - light brown, 6 - light red, 7 - light blue
game.add_game_args("+name F1 +colorset 2")
game.init()
print("F1 joined the party!")
runner = Runner(game)
# Play until the game (episode) is over.
while not game.is_episode_finished():
if game.is_player_dead():
# Use this to respawn immediately after death, new state will be available.
game.respawn_player()
# Or observe the game until automatic respawn.
#game.advance_action();
#continue;
runner.step()
game.close()
|
val = 0.0
def change():
global val
val = 1.0
print("val is " + str(val)) |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2019-11-12 00:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('index', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Products',
fields=[
('snack_id', models.IntegerField(primary_key=True, serialize=False, unique=True, verbose_name='编号')),
('title', models.CharField(max_length=20, unique=True, verbose_name='名称')),
('price', models.DecimalField(decimal_places=2, max_digits=5, verbose_name='定价')),
('market_price', models.DecimalField(decimal_places=2, max_digits=5, verbose_name='售价')),
('repertory', models.IntegerField(verbose_name='库存')),
('supplier', models.CharField(max_length=50, verbose_name='供货商')),
('invest_money', models.DecimalField(decimal_places=2, default=0, max_digits=8, verbose_name='投资金额')),
('sell_number', models.IntegerField(default=0, verbose_name='售出数量')),
('img', models.ImageField(upload_to='static/imgs/snacks', verbose_name='图片')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('mod_time', models.DateTimeField(auto_now=True, verbose_name='修改时间')),
('products_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='index.ProductsType')),
],
options={
'verbose_name': '商品',
'verbose_name_plural': '商品',
'db_table': 'products',
},
),
]
|
import os
import shutil
import unittest
DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
_STORAGE_DIR = os.path.join(DATA_DIR, 'storage')
os.environ.setdefault('GRAPHITE_STORAGE_DIR', _STORAGE_DIR)
class TestCase(unittest.TestCase):
def _cleanup(self):
shutil.rmtree(DATA_DIR, ignore_errors=True)
def setUp(self):
self._cleanup()
def tearDown(self):
self._cleanup()
|
#!/usr/bin/env python
import http.server
import os
import pathlib
import socketserver
build_root = pathlib.Path(
os.path.abspath(os.path.dirname(__file__)), "..", "build", "local"
).resolve()
os.chdir(str(build_root))
port = 8020
class MagicHTMLHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
self.rewrite_endpoint()
return super().do_GET()
def rewrite_endpoint(self):
path = self.translate_path(self.path)
new_path = self.path
if os.path.exists(path + ".html"):
new_path += ".html"
elif os.path.exists(path + "/index.html"):
new_path += "/index.html"
else:
return
print("REWROTE {} => {}".format(self.path, new_path))
self.path = new_path
handler = MagicHTMLHandler
socketserver.TCPServer.allow_reuse_address = True
httpd = socketserver.TCPServer(("", port), handler)
print("Serving CONSOLE on {}".format(port))
httpd.serve_forever()
|
import datetime
import logging
import sqlalchemy
from flask import json
from sqlalchemy import (create_engine, Column, String,
DateTime, func, Integer)
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from collectors.exceptions import DuplicateFound
from storage import Storage
logger = logging.getLogger(__name__)
Base = declarative_base()
class Item(Base):
__tablename__ = 'items'
id = Column(String, primary_key=True)
type = Column(String, nullable=False)
url = Column(String)
timestamp = Column(DateTime, nullable=False)
title = Column(String, nullable=False)
extra = Column(String)
class User(Base):
__tablename__ = 'users'
id = Column(String, primary_key=True)
name = Column(String, nullable=False)
password = Column(String, nullable=False)
class Subscription(Base):
__tablename__ = 'subscriptions'
id = Column(Integer, primary_key=True)
subscription_date = Column(DateTime, default=datetime.datetime.utcnow)
user_agent = Column(String, nullable=True)
subscription = Column(String, nullable=False)
last_notification = Column(DateTime, nullable=True)
invalidation_date = Column(DateTime, nullable=True)
@property
def min_date(self):
return (self.last_notification
or self.subscription_date
or (datetime.datetime.now() - datetime.timedelta(days=1)))
class StorageSqliteDB(Storage):
SQL_FIELDS = {'id', 'type', 'url', 'timestamp', 'title'}
@staticmethod
def item_from_db(dbitem):
""" Return an item dictionary out of the DB one """
item = {k: getattr(dbitem, k) for k in StorageSqliteDB.SQL_FIELDS}
if dbitem.extra:
extra_fields = json.loads(dbitem.extra)
item.update(extra_fields)
return item
def all(self):
items = []
for dbitem in self.db.query(Item) \
.order_by(sqlalchemy.desc(Item.timestamp)):
item = self.item_from_db(dbitem)
items.append(item)
return items
def __init__(self, db_filename) -> None:
super().__init__()
engine = create_engine(f"sqlite:///{db_filename}",
# echo=True # show the queries
)
Base.metadata.create_all(engine)
# noinspection PyPep8Naming
Session = sessionmaker(bind=engine)
self.db = Session()
def search(self, id, type):
pass
def getitem(self, item_id):
dbitem = self.db.query(Item).get(item_id)
return self.item_from_db(dbitem)
def upsert(self, item, update=False):
""" Write to SQL Item - move all extra fields in an extra json"""
fields = {k: v for k, v in item.items()
if k in self.SQL_FIELDS}
extra = {k: v for k, v in item.items()
if k not in self.SQL_FIELDS}
new = Item(
**fields,
extra=json.dumps(extra)
)
if update:
self.db.merge(new)
else:
self.db.add(new)
try:
self.db.commit()
except IntegrityError as e:
self.db.rollback()
if "unique" in str(e.orig).lower():
raise DuplicateFound(
f"We already have the id {item['id']} of type {item['type']} in the DB"
)
else:
raise
def max_timestamp(self, **kwargs):
max_ts = self.db.query(func.max(Item.timestamp)).filter_by(**kwargs)
return max_ts.one()[0]
def close(self):
self.db.close()
def active_subscriptions(self):
return list(
self.db.query(Subscription)
.filter(Subscription.invalidation_date.is_(None))
.order_by(sqlalchemy.asc(Subscription.subscription_date))
)
def set_last_notification(self, subscription):
""" Mark the time when the subscription got last notified """
q = self.db.query(Subscription).filter(
Subscription.subscription == subscription
)
subscription = q.first()
subscription.last_notification = datetime.datetime.utcnow()
self.db.commit()
def invalidate_subscription(self, subscription):
q = self.db.query(Subscription).filter(
Subscription.subscription == subscription
)
subscription = q.first()
subscription.invalidation_date = datetime.datetime.utcnow()
self.db.commit()
|
import qutip as qt
import random
from typing import Tuple, Callable
import numpy as np
from simulator import Simulator
Strategy = Tuple[Callable[[int], int], Callable[[int], int]]
def random_bit() -> int:
return random.randint(0, 1)
def referee(strategy: Callable[[], Strategy]) -> bool:
you, eve = strategy()
your_input, eve_input = random_bit(), random_bit()
parity = 0 if you(your_input) == eve(eve_input) else 1
return parity == (your_input and eve_input)
def est_win_probability(strategy: Callable[[], Strategy],
n_games: int = 1000) -> float:
return sum(
referee(strategy)
for _ in range(n_games)
) / n_games
def constant_strategy() -> Strategy:
return (
lambda your_input: 0,
lambda eve_input: 0
)
def quantum_strategy(initial_state: qt.Qobj) -> Strategy:
shared_system = Simulator(capacity=2)
shared_system.register_state = initial_state
your_qubit = shared_system.allocate_qubit()
eve_qubit = shared_system.allocate_qubit()
shared_system.register_state = qt.bell_state()
your_angles = [90 * np.pi / 180, 0]
eve_angles = [45 * np.pi / 180, 135 * np.pi / 180]
def you(your_input: int) -> int:
your_qubit.ry(your_angles[your_input])
return your_qubit.measure()
def eve(eve_input: int) -> int:
eve_qubit.ry(eve_angles[eve_input])
return eve_qubit.measure()
return you, eve
|
"""
Faça um Programa que peça dois números e imprima o maior deles.
"""
def pede_numero_ao_usuario(msg):
return float(input(msg))
def obter_o_maior_numero(num_1, num_2):
# este é o chamado if ternário, faz o if em apenas uma linha
maior_numero = num_1 if num_1 >= num_2 else num_2
return maior_numero
def imprime_maior_numero():
numero_1 = pede_numero_ao_usuario('Informe um número: ')
numero_2 = pede_numero_ao_usuario('Informe mais um número: ')
maior_numero = obter_o_maior_numero(numero_1, numero_2)
print('O número {} é o maior número'.format(maior_numero))
if __name__ == '__main__':
print('+---------------------------------------+')
print('| Programa: Exibe maior de dois números |')
print('+---------------------------------------+')
imprime_maior_numero()
|
from __future__ import print_function
import urllib2
from bs4 import BeautifulSoup
# File to dump the article link list
f = open('lists/listAddPortalArticles.txt','w')
#URLs for the Vital 100
#urls = ['http://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Level/2'] # Single page where the 100 vital articles are linked
#URLs for the Vital 10,000
#urls = ['http://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Expanded/People','http://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Expanded/History','http://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Expanded/Geography','http://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Expanded/Arts','http://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Expanded/Philosophy_and_religion','http://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Expanded/Everyday_life','http://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Expanded/Society_and_social_sciences','http://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Expanded/Biology_and_health_sciences','http://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Expanded/Physical_sciences','http://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Expanded/Technology','http://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Expanded/Mathematics'] #pulls the article links to the vital 9742, plus some misc. links
#URLs related to Rwanda
#urls = ['https://en.wikipedia.org/wiki/Portal:Rwanda','https://en.wikipedia.org/wiki/Portal:Rwanda/Featured_article','https://en.wikipedia.org/wiki/Portal:Rwanda/Featured_picture','https://en.wikipedia.org/wiki/Portal:Rwanda/Featured_biography']
#URLs from https://en.wikipedia.org/wiki/Portal:Contents
#urls = ['https://en.wikipedia.org/wiki/Portal:Contents/Reference','https://en.wikipedia.org/wiki/Portal:Contents/Culture_and_the_arts','https://en.wikipedia.org/wiki/Portal:Contents/Geography_and_places','https://en.wikipedia.org/wiki/Portal:Contents/Health_and_fitness','https://en.wikipedia.org/wiki/Portal:Contents/History_and_events','https://en.wikipedia.org/wiki/Portal:Contents/Mathematics_and_logic','https://en.wikipedia.org/wiki/Portal:Contents/Natural_and_physical_sciences','https://en.wikipedia.org/wiki/Portal:Contents/People_and_self','https://en.wikipedia.org/wiki/Portal:Contents/Philosophy_and_thinking','https://en.wikipedia.org/wiki/Portal:Contents/Religion_and_belief_systems','https://en.wikipedia.org/wiki/Portal:Contents/Society_and_social_sciences','https://en.wikipedia.org/wiki/Portal:Contents/Technology_and_applied_sciences']
#URLs from Africa portal
urls = []
#URLs from the 5000 most viewed pages of the week
#urls = ['https://en.wikipedia.org/wiki/User:West.andrew.g/Popular_pages']
for url in urls:
s = urllib2.urlopen(url).read()
#with file('List of articles every Wikipedia should have_Expanded - Meta.html') as f:
# s = f.read()
soup = BeautifulSoup(s)
soup.prettify()
for anchor in soup.findAll('a', href=True):
if '/wiki/' in anchor['href'] and not ':' in anchor['href'] and not '//' in anchor['href'] and not 'Main_Page' in anchor['href']: # keeps the links mostly limited to Wikipedia articles
print(anchor['href'],file=f)
f.close
|
#!/usr/bin/python
from copy import copy
import sys
def find_words(letter_string, preferred_letters=''):
#Convert letters to lowercase and split it into a list
letters = list(letter_string.lower())
preferred = list(preferred_letters.lower())
#list to hold our words loaded from our text file
words=[]
found_words=[]
preferred_words=[]
partial_words=[]
#load words from dictionary file
with open("enable.txt") as f:
words = [line.strip() for line in f.readlines()]
#for each word in the list
for word in words:
word_as_list = list(word)
pref = copy(preferred)
any_missing = False
for letter in letters:
if letter in word_as_list:
word_as_list.remove(letter)
if letter in pref:
pref.remove(letter)
else:
any_missing = True
if len(word_as_list) == 0:
found_words.append(word)
if preferred and len(pref) == 0:
preferred_words.append(word)
if not any_missing:
partial_words.append(word)
found_words.sort(key=len)
preferred_words.sort(key=len)
partial_words.sort(key=len)
print "Top 5 Words by length: "
for word in found_words[-5:]:
print word
if preferred_letters:
print "Top 5 preferred Words by length: "
for word in preferred_words[-5:]:
print word
print "Top 5 Words by that are supersets: "
for word in partial_words[:10]:
print word
if __name__ == '__main__':
#The list of letters that appear on your letterpress screen
letter_string = 'WCVOUBISHROZORELDANCLZYVG'
preferred_letters = ''
if len(sys.argv) > 1:
letter_string = sys.argv[1]
if len(sys.argv) > 2:
preferred_letters = sys.argv[2]
find_words(letter_string, preferred_letters) |
import numpy as np
from tqdm import tqdm
from notebooks.notebook_utils import vod_coefficient, dice_coefficient, accuracy_score, get_contour_mask
### Functions that determine slices order of fixing
# Can return the full order (like baseline_func, baseline_seq_func) or return the next slice to fix (like best_func2)
# Arguments:
# prediction - uint8 prediction 3d matrix
# truth - uint8 ground-truth 3d matrix
# metric_fcn - function by which to rank slices
# bool_arr - boolean array the length of #slices, 1 if slice was already fixed, else 0
# prediction_var - used for functions that need some side data (variance, other predictions...)
# use_contour_roi - if true, rank slices by metrics applied to area around the contour only
# decide on slices fix order randomly
def baseline_func(prediction, truth, metric_fcn, bool_arr=None, prediction_var=None, use_contour_roi=False):
return np.random.permutation(np.arange(prediction.shape[-1]))
# fix slices in sequential order, starting with first non-blank slice
def baseline_seq_func(prediction, truth, metric_fcn, bool_arr=None, prediction_var=None, use_contour_roi=False):
# Assumes binary prediction
A_sums = np.sum(np.sum(prediction, axis=0), axis=0)
non_blank_inds = [i for i in range(prediction.shape[-1]) if A_sums[i] > 0]
starting_ind = non_blank_inds[0]
return np.roll(np.arange(prediction.shape[-1]), -1 * starting_ind)
# fix slices in actual optimal solution - determine next slice by greatest VO improvement with GT
# NOTE: DO NOT USE, very high run-time
def best_func(prediction, truth, metric_fcn, bool_arr=None, prediction_var=None):
cur_metric = metric_fcn(truth, prediction)
best_metric = cur_metric
best_slice = -1
if bool_arr is None:
remaining_inds = [i for i in range(prediction.shape[-1])]
else:
remaining_inds = [i for i in range(prediction.shape[-1]) if not bool_arr[i]]
for i in remaining_inds:
pred_tmp = prediction.copy()
pred_tmp[:, :, i] = truth[:, :, i]
new_val = metric_fcn(truth, pred_tmp)
if is_better(best_metric, new_val, metric_fcn):
best_slice = i
best_metric = new_val
return best_slice
# fix slices in some 2d קירוב of optimal solution - determine next slice by its' VO with GT
def best_func2(prediction, truth, metric_fcn, bool_arr=None, prediction_var=None, use_contour_roi=False):
if bool_arr is None:
remaining_inds = [i for i in range(prediction.shape[-1])]
else:
remaining_inds = [i for i in range(prediction.shape[-1]) if not bool_arr[i]]
bool_mask = np.array(np.ones_like(prediction), dtype=np.bool, copy=True)
if use_contour_roi:
for i in range(bool_mask.shape[-1]):
bool_mask[:, :, i] = get_contour_mask(prediction[:, :, i])
vals = [metric_fcn(truth[:, :, j][bool_mask[:, :, j]], prediction[:, :, j][bool_mask[:, :, j]]) for j in
remaining_inds]
best_slice = remaining_inds[np.asarray(vals).argmin()] # Note - assumes lowest is worst (VO, DICE)
return best_slice
# fix by some estimation of the slice's VO - depends on specific folder structure my experiments produced
# Note - actually *needs* the prediction_var
def by_estimated_vo(prediction, truth, metric_fcn, bool_arr=None, prediction_var=None, use_contour_roi=False):
# Must have prediction_var - list of multiple predictions to use for estimation
# here for compatability - currently, runs one-time
if bool_arr is None:
remaining_inds = [i for i in range(prediction.shape[-1])]
else:
remaining_inds = [i for i in range(prediction.shape[-1]) if not bool_arr[i]]
bool_mask = np.array(np.ones_like(prediction), dtype=np.bool, copy=True)
if use_contour_roi:
for i in range(bool_mask.shape[-1]):
bool_mask[:, :, i] = get_contour_mask(prediction[:, :, i])
slices_mets_est = [0] * len(remaining_inds)
for i in range(len(remaining_inds)):
cur_slice = remaining_inds[i]
curr_mets_est = [metric_fcn(prediction_var[j][:, :, cur_slice][bool_mask[:, :, cur_slice]],
prediction[:, :, cur_slice][bool_mask[:, :, cur_slice]]) for j in
range(len(prediction_var))]
slices_mets_est[i] = np.mean(curr_mets_est)
order_of_slices = np.array(remaining_inds)[np.argsort(slices_mets_est)]
return order_of_slices
# So I can use the same comparison with all metrics
def is_better(val1, val2, metric_func):
if (metric_func == dice_coefficient) | (metric_func == vod_coefficient) | (metric_func == accuracy_score):
if val2 >= val1:
return True
else:
return False
else:
if val2 > val1:
return False
else:
return True
# run the process of fixing a specific scan according to a specific policy
def get_scan_progress(prediction, truth, next_slice_fcn, metric_fcn=dice_coefficient,
prediction_var=None, use_contour_roi=False):
n_slices = prediction.shape[-1]
y_values = [0] * (n_slices + 1) # y_values - depending on the metrics
prediction_copy = prediction.copy()
was_slice_done = [False] * n_slices
# currently the only function that really works iteratively
if next_slice_fcn == best_func2:
for i in tqdm(range(n_slices)):
y_values[i] = metric_fcn(truth, prediction_copy)
next_slice_ind = next_slice_fcn(prediction, truth, metric_fcn, bool_arr=was_slice_done,
prediction_var=prediction_var, use_contour_roi=use_contour_roi)
prediction_copy[:, :, next_slice_ind] = truth[:, :, next_slice_ind]
was_slice_done[next_slice_ind] = True
else:
slices_order = next_slice_fcn(prediction, truth, metric_fcn, bool_arr=was_slice_done,
prediction_var=prediction_var, use_contour_roi=use_contour_roi)
for i in tqdm(range(n_slices)):
y_values[i] = metric_fcn(truth, prediction_copy)
next_slice_ind = slices_order[i]
prediction_copy[:, :, next_slice_ind] = truth[:, :, next_slice_ind]
y_values[-1] = metric_fcn(truth, prediction_copy)
return y_values
# Go from different amount of slices per scan to 34 datapoints (so can compare different scans on same graph)
def reduce_progress_for_graph(y_values):
x_vals = [i for j in (range(0, 55, 3), range(58, 101, 3)) for i in j]
n_init_pts = len(y_values)
n_datapoints = 34
final_vals = [0] * n_datapoints
final_vals[0] = y_values[0]
# I.E. on 90 slices: 0, 2, ..., 48, 51, 54, ...,86, 89
# I.E. on 70 slices: 0, 1, 3, 37, 40, 42, ..., 67, 69
gen = (i for j in (range(3, 55, 3), range(58, 101, 3)) for i in j)
new_ind = 1
for x in gen:
cur_ind = int(round(n_init_pts * x / 100) - 1)
final_vals[new_ind] = y_values[cur_ind]
new_ind += 1
assert len(x_vals) == len(final_vals), "lengths do not match"
return x_vals, final_vals
|
from collections import OrderedDict
import torch
import torch.nn as nn
from torchvision.models import densenet121, densenet169, densenet201, densenet161, squeezenet1_1
from torchvision.models.video import r2plus1d_18
def densenet_121(num_classes, expansion=False):
"""
Args:
num_classes (int):
Returns:
torch.nn.modules.module.Module
"""
model = densenet121(pretrained=False, progress=True)
num_features = model.classifier.in_features
if expansion:
model.classifier = nn.Sequential(OrderedDict([
('dense1', nn.Linear(in_features=num_features, out_features=200)),
('norm1', nn.BatchNorm1d(num_features=200)),
('relu1', nn.ReLU()),
('dropout1', nn.Dropout(p=0.25)),
('last', nn.Linear(in_features=200, out_features=num_classes))
]))
else:
model.classifier = nn.Linear(num_features, num_classes, bias=True)
return model
def densenet_169(num_classes, expansion=False):
"""
Args:
num_classes (int):
Returns:
torch.nn.modules.module.Module
"""
model = densenet169(pretrained=False, progress=True)
num_features = model.classifier.in_features
if expansion:
model.classifier = nn.Sequential(OrderedDict([
('dense1', nn.Linear(in_features=num_features, out_features=1000)),
('norm1', nn.BatchNorm1d(num_features=1000)),
('relu1', nn.ReLU()),
('dropout1', nn.Dropout(p=0.25)),
('dense2', nn.Linear(in_features=1000, out_features=200)),
('norm2', nn.BatchNorm1d(num_features=200)),
('relu2', nn.ReLU()),
('dropout2', nn.Dropout(p=0.25)),
('last', nn.Linear(in_features=200, out_features=num_classes))
]))
else:
model.classifier = nn.Linear(num_features, num_classes, bias=True)
return model
def densenet_201(num_classes, expansion=False):
"""
Args:
num_classes (int):
Returns:
torch.nn.modules.module.Module
"""
model = densenet201(pretrained=False, progress=True)
num_features = model.classifier.in_features
if expansion:
model.classifier = nn.Sequential(OrderedDict([
('dense1', nn.Linear(in_features=num_features, out_features=1000)),
('norm1', nn.BatchNorm1d(num_features=1000)),
('relu1', nn.ReLU()),
('dropout1', nn.Dropout(p=0.25)),
('dense2', nn.Linear(in_features=1000, out_features=200)),
('norm2', nn.BatchNorm1d(num_features=200)),
('relu2', nn.ReLU()),
('dropout2', nn.Dropout(p=0.25)),
('last', nn.Linear(in_features=200, out_features=num_classes))
]))
else:
model.classifier = nn.Linear(num_features, num_classes, bias=True)
return model
def densenet_161(num_classes, expansion=False):
"""
Args:
num_classes (int):
Returns:
torch.nn.modules.module.Module
"""
model = densenet161(pretrained=False, progress=True)
num_features = model.classifier.in_features
if expansion:
model.classifier = nn.Sequential(OrderedDict([
('dense1', nn.Linear(in_features=num_features, out_features=1000)),
('norm1', nn.BatchNorm1d(num_features=1000)),
('relu1', nn.ReLU()),
('dropout1', nn.Dropout(p=0.25)),
('dense2', nn.Linear(in_features=1000, out_features=200)),
('norm2', nn.BatchNorm1d(num_features=200)),
('relu2', nn.ReLU()),
('dropout2', nn.Dropout(p=0.25)),
('last', nn.Linear(in_features=200, out_features=num_classes))
]))
else:
model.classifier = nn.Linear(num_features, num_classes, bias=True)
return model
def squeezenet(num_classes):
"""
Args:
num_classes (int):
Returns:
torch.nn.modules.module.Module
"""
model = nn.Sequential(OrderedDict([
('squeezenet', squeezenet1_1(pretrained=False, progress=True)),
('dense', nn.Linear(in_features=1000, out_features=200)),
('norm', nn.BatchNorm1d(num_features=200)),
('relu', nn.ReLU()),
('dropout', nn.Dropout(p=0.25)),
('last', nn.Linear(in_features=200, out_features=num_classes))
]))
return model
def resnet3d(num_classes, expansion=False, maxpool=False):
"""
Args:
num_classes (int):
Returns:
torch.nn.modules.module.Module
"""
model = r2plus1d_18(pretrained=False, progress=True)
num_features = model.fc.in_features
if expansion:
model.fc = nn.Sequential(OrderedDict([
('dense', nn.Linear(in_features=num_features, out_features=200)),
('norm', nn.BatchNorm1d(num_features=200)),
('relu', nn.ReLU()),
('dropout', nn.Dropout(p=0.25)),
('last', nn.Linear(in_features=200, out_features=num_classes))
]))
else:
model.fc = nn.Linear(num_features, num_classes, bias=True)
if maxpool:
model.avgpool = nn.AdaptiveMaxPool3d(output_size=(1, 1, 1))
return model
def mycnn(num_classes):
"""
Args:
num_classes (int):
Returns:
torch.nn.modules.module.Module
"""
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
x = x.view(x.size(0), -1)
return x
model = nn.Sequential(OrderedDict([
('conv00', nn.Conv2d(in_channels=3, out_channels=32, kernel_size=(3, 3))),
('norm00', nn.BatchNorm2d(num_features=32)),
('relu00', nn.ReLU(inplace=False)),
('conv01', nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3))),
('norm01', nn.BatchNorm2d(num_features=32)),
('relu01', nn.ReLU(inplace=False)),
('pool0', nn.MaxPool2d(kernel_size=(2, 2))),
('conv10', nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3))),
('norm10', nn.BatchNorm2d(num_features=64)),
('relu10', nn.ReLU(inplace=False)),
('conv11', nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3))),
('norm11', nn.BatchNorm2d(num_features=64)),
('relu11', nn.ReLU(inplace=False)),
('pool1', nn.MaxPool2d(kernel_size=(2, 2))),
('conv20', nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3))),
('norm20', nn.BatchNorm2d(num_features=128)),
('relu20', nn.ReLU(inplace=False)),
('conv21', nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(3, 3))),
('norm21', nn.BatchNorm2d(num_features=128)),
('relu21', nn.ReLU(inplace=False)),
('pool2', nn.MaxPool2d(kernel_size=(2, 2))),
('conv30', nn.Conv2d(in_channels=128, out_channels=256, kernel_size=(3, 3))),
('norm30', nn.BatchNorm2d(num_features=256)),
('relu30', nn.ReLU(inplace=False)),
('conv31', nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3, 3))),
('norm31', nn.BatchNorm2d(num_features=256)),
('relu31', nn.ReLU(inplace=False)),
('pool3', nn.MaxPool2d(kernel_size=(2, 2))),
('conv40', nn.Conv2d(in_channels=256, out_channels=512, kernel_size=(3, 3))),
('norm40', nn.BatchNorm2d(num_features=512)),
('relu40', nn.ReLU(inplace=False)),
('conv41', nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3))),
('norm41', nn.BatchNorm2d(num_features=512)),
('relu41', nn.ReLU(inplace=False)),
('pool4', nn.MaxPool2d(kernel_size=(2, 2))),
('flatten', Flatten()),
('dense', nn.Linear(in_features=8192, out_features=1000, bias=True)),
('norm', nn.BatchNorm1d(num_features=1000)),
('relu', nn.ReLU(inplace=False)),
('dropout', nn.Dropout(p=0.2, inplace=False)),
('last', nn.Linear(in_features=1000, out_features=num_classes, bias=True))
]))
return model
def lrcn(num_classes, lrcn_time_steps, lstm_hidden_size=200, lstm_num_layers=2):
"""
Args:
num_classes (int):
Returns:
torch.nn.modules.module.Module
"""
class TimeDistributed(nn.Module):
def __init__(self, layer, time_steps):
super(TimeDistributed, self).__init__()
# self.layers = nn.ModuleList([layer for _ in range(time_steps)])
self.layers = nn.ModuleList([nn.Linear(10, 10) for _ in range(time_steps)])
def forward(self, x):
batch_size, time_steps, *_ = x.size()
# outputs = list()
for i, layer in enumerate(self.layers):
x = layer(x)
# output_t = layer(x[:, i])
# if i == 0:
# output = output_t.unsqueeze(1)
# else:
# output = torch.cat((output, output_t.unsqueeze(1)), 1)
# outputs.append(output_t)
# output = torch.stack(outputs, dim=1)
# return output
return x
class BiLSTMHidden2Dense(nn.Module):
def __init__(self):
super(BiLSTMHidden2Dense, self).__init__()
def forward(self, x):
lstm_output, (hn, cn) = x
lstm_last_hidden_state = hn[-2:].transpose(0, 1).contiguous().view(hn.size(1), -1)
return lstm_last_hidden_state
cnn_model = squeezenet1_1(pretrained=False, progress=True)
model = nn.Sequential(OrderedDict([
('timedistributed_cnn', TimeDistributed(nn.Conv2d(3, 60, (1, 1)), time_steps=lrcn_time_steps)),
# ('timedistributed_cnn', TimeDistributed(cnn_model, time_steps=lrcn_time_steps)),
# ('bidirectional_stacked_lstm', nn.LSTM(input_size=1000, hidden_size=lstm_hidden_size, num_layers=lstm_num_layers,
# batch_first=True, dropout=0.2, bidirectional=True)),
# ('hidden2dense', BiLSTMHidden2Dense()),
# ('dense', nn.Linear(in_features=2*lstm_hidden_size, out_features=lstm_hidden_size)),
# ('norm', nn.BatchNorm1d(num_features=lstm_hidden_size)),
# ('relu', nn.ReLU()),
# ('dropout', nn.Dropout(p=0.25)),
# ('last', nn.Linear(in_features=lstm_hidden_size, out_features=num_classes))
]))
return model
|
from random import randint
from typing import List
from MainSettings import MainSettings
from shardcalc.models.Awaken import Awaken
from common.utils.DateUtils import DateUtils
from shardcalc.utils.ShardEventUtils import CommonEventUtils, ShardEventUtils
class ShardUtils:
__QUEST_COOLDOWN: int = 21
__QUEST_REWARDS = [0, 150, 220, 320]
__ARENA_SHARDS = {
"L3": [240, 1200],
"L2": [230, 1100],
"L1": [220, 1000],
"D3": [210, 9000],
"D2": [190, 800],
"D1": [170, 700]
}
__GW_CHEST_REWARD = 500
eventUtils: CommonEventUtils
shards: int
shardsDaily: int
daily: str
season: str
awakens: List[Awaken] = []
questCooldowns = {}
raidBoss: int
gwScore: int
gwKeys: int
gwWinRate: int
def __init__(self):
config = MainSettings.config
self.eventUtils = ShardEventUtils(config)
self.shards = config.getint("settings", "shards")
self.__processPacifier(config.get("settings", "pacifier"))
self.shardsDaily = config.getint("settings", "artifact")
# leagues
self.daily = config.get("arena", "daily")
self.season = config.get("arena", "season")
# gw
self.gwScore = config.getint("gw", "score")
self.gwKeys = config.getint("gw", "keys")
self.gwWinRate = config.getint("gw", "win")
# quests
self.questCooldowns[1] = config.getint("quest", "arena1")
self.questCooldowns[2] = config.getint("quest", "arena2")
self.questCooldowns[3] = config.getint("quest", "arena3")
# raids
self.raidBoss = config.getint("settings", "raidBoss")
# awakens
awakens = config["awakens"]
for key in config["awakens"]:
values: str = awakens[key]
splitValues = values.split(" ")
for value in splitValues:
awaken = Awaken(key, int(value))
self.awakens.append(awaken)
def __processPacifier(self, pacifier: str):
if len(pacifier) == 0:
return
pacifies = pacifier.split(",")
for pacify in pacifies:
pacifyFromTo = pacify.strip().split(" ")
fromCost = Awaken.AWAKEN_COST[int(pacifyFromTo[0])]
toCost = Awaken.AWAKEN_COST[int(pacifyFromTo[1])]
self.shards += fromCost - toCost
def getDailyShards(self, league: str):
return self.__ARENA_SHARDS[league][0]
def getSeasonShards(self, league: str):
return self.__ARENA_SHARDS[league][1]
def getArenaRewards(self) -> int:
dailyLeague = self.daily[0]
if DateUtils.isMonday():
return 0
if DateUtils.isTuesday():
league = dailyLeague + "1"
return self.getDailyShards(league)
if DateUtils.isWednesday():
league = dailyLeague + "2"
if self.daily >= league:
return self.getDailyShards(league)
dailyShards = self.getDailyShards(self.daily)
if DateUtils.isSunday():
return self.getSeasonShards(self.season) + dailyShards
return dailyShards
def getQuestRewards(self) -> int:
shards = 0
for arenaQuest in range(1, 4):
arena = self.questCooldowns[arenaQuest]
if arena == 0:
shards += self.__QUEST_REWARDS[arenaQuest]
self.questCooldowns[arenaQuest] = self.__QUEST_COOLDOWN
else:
self.questCooldowns[arenaQuest] = arena - 1
return shards
def getGwRewards(self) -> int:
if not DateUtils.isGwRewardDay():
return 0
randomNumber = randint(0, 100)
isWin = randomNumber <= self.gwWinRate
self.gwKeys += 3 if isWin else 1
winLoseMult = 4 if isWin else 8
# open chest
chestShards = 0
if self.gwKeys == 6:
self.gwKeys -= 6
chestShards = self.__GW_CHEST_REWARD
return int(self.gwScore / winLoseMult) + chestShards
def getRaidRewards(self) -> int:
isOffDay = not self.eventUtils.isRaidDay(DateUtils.currentDate)
if isOffDay:
return self.raidBoss * 4
return 0
def getArtifactShards(self) -> int:
return self.shardsDaily
|
# -*- coding: utf-8 -*-
from openerp import http
# class Demo(http.Controller):
# @http.route('/demo/demo/', auth='public')
# def index(self, **kw):
# return "Hello, world"
# @http.route('/demo/demo/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('demo.listing', {
# 'root': '/demo/demo',
# 'objects': http.request.env['demo.demo'].search([]),
# })
# @http.route('/demo/demo/objects/<model("demo.demo"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('demo.object', {
# 'object': obj
# }) |
from __future__ import print_function
import functools
import tensorflow as tf
def lazy_property(function):
attribute = '_' + function.__name__
@property
@functools.wraps(function)
def wrapper(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return wrapper
def regularization(function):
@functools.wraps(function)
def wrapper(self,*args):
if self.regularization is None:
return function(self, *args)
return tf.add(function(self, *args), self.regularization.regularization(self.weights))
return wrapper
def distance(function):
@functools.wraps(function)
def wrapper(self,*args):
if self.distance is None:
return function(self, *args)
return tf.add(function(self, *args), self.regularization.regularization(self.weights))
return wrapper
|
#!/usr/bin/env python
# coding: utf-8
# <b> Load data from the File-input.csv file into a pandas dataframe and print it to the Jupyter console. <b>
# In[12]:
import os
# In[4]:
import pandas as pd
# In[5]:
dataframe = pd.read_csv("File-input.csv")
dataframe
# <b> Convert the dataframe into a list of Dictionaries using the .to_dict method and then save all the records from the pandas dataframe as different dictionaries. Orient them as 'records' and print them on different lines. <b>
# In[6]:
result = dataframe.to_dict(orient = 'records')
for row in result:
dict = row.copy()
print(dict)
# <b> Access and print the 5th dictionary as 5th record of the pandas dataframe. <b>
# In[7]:
n = 4
print(result[n])
# <b> List all the keys of this dictionary, one per line, and then create a list with the them called “headers”. <b>
# In[8]:
dict = result[n]
for key in dict.keys():
print(key)
print()
headers = list(dict.keys())
print(headers)
# <b>Select and print the 3rd element in the list. <b>
# In[9]:
n = 2
print(headers[n])
# <b> Modify the values for "Temp" and "Weight" keys for this specific record. <b>
# In[10]:
dict["Temp"] = 100.0
dict["Weight"] = 200
print(list(dict.values()))
# <b> Find the "Last name" that has the max value of "Weight" from the dataframe. <b>
# In[11]:
df = dataframe[['Last name','Weight']]
result = df.to_dict(orient='records')
max = -1
solution = ((),)
for row in result:
weight = row['Weight']
last_name = row['Last name']
if weight > max:
max = weight
solution = last_name, weight
print(solution)
# In[ ]:
|
import sys
from PyQt5.QtWidgets import (QApplication, QCheckBox, QColorDialog, QDialog,
QErrorMessage, QFileDialog, QFontDialog, QFrame, QGridLayout,
QInputDialog, QLabel, QLineEdit, QMessageBox, QPushButton)
def defin_right(d_x):
d_x_s = str(d_x)
pos = d_x_s.find('.')
right = d_x_s[pos + 1:]
return len(right)
def defin_left(d_x):
d_x_s = str(d_x)
pos = d_x_s.find('.')
left = d_x_s[:pos]
return len(left)
def defin_delta(delta, d_x):
if delta == int(delta):
return 0.5
leng = defin_right(d_x)
delta = 5.0 / (10 ** leng)
for i in range(leng):
if d_x < delta:
res_d = delta
break
delta *= 10
return res_d
def len_for_cifr(x):
return (len(str(x)) - 1)
class Dialog(QDialog):
GLOB = 0.0
MESSAGE = "dimagrishko"
def __init__(self, parent=None):
super(Dialog, self).__init__(parent)
self.openFilesPath = ''
self.errorMessageDialog = QErrorMessage(self)
frameStyle = QFrame.Sunken | QFrame.Panel
self.textLabel = QLabel()
self.textLabel.setFrameStyle(frameStyle)
self.textButton = QPushButton("Press")
self.textButton.clicked.connect(self.setText)
self.native = QCheckBox()
self.native.setText("Use native file dialog.")
self.native.setChecked(True)
if sys.platform not in ("win32", "darwin"):
self.native.hide()
layout = QGridLayout()
layout.setColumnStretch(1, 1)
layout.setColumnMinimumWidth(1, 250)
layout.addWidget(self.textButton, 3, 0)
layout.addWidget(self.textLabel, 3, 1)
self.setLayout(layout)
self.setWindowTitle("2a")
def setText(self):
text, ok = QInputDialog.getText(self, "Finding...",
"Enter your number", QLineEdit.Normal, "NULL")
if ok and text != '':
ls = text.split()
num1 = float(ls[0])
num2 = float(ls[1])
print(num1)
print(num2)
X = num1 # 72.353
d_xm = num2 # 0.026
X_a = X
delta = defin_delta(d_xm, d_xm)
if len_for_cifr(X) != len_for_cifr(delta):
for i in range(len_for_cifr(delta)):
X = round(X_a, len_for_cifr(delta) - defin_left(X))
dx_abs = d_xm + abs(X_a - X)
if dx_abs < delta:
break
X = round(X_a, len_for_cifr(delta) - defin_left(X) - 1)
delta = defin_delta(X, X)
GLOB = str(X)
self.textLabel.setText(GLOB)
if __name__ == '__main__':
app = QApplication(sys.argv)
dialog = Dialog()
dialog.show()
sys.exit(app.exec_()) |
#!/usr/bin/python3
"""
PYTHON OBJECT RELATIONAL MAPPING MODULE
Model_State_Fetch_All module provides function to get all states from states
table in the DB.
"""
import sys
from model_state import Base, State
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
def model_state_fetch_all():
"""
Lists all State objects from the database and show results sorted in
ascending order by states.id.
Takes 3 arguments: mysql username, mysql password and database name.
"""
engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.format(
sys.argv[1], sys.argv[2], sys.argv[3]), pool_pre_ping=True)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
for row in session.query(State).order_by(State.id.asc()):
print('{}: {}'.format(row.id, row.name))
session.close()
if __name__ == "__main__":
model_state_fetch_all()
|
from django.db import models
from django.contrib.auth import get_user_model
from django.core.validators import MinLengthValidator
class Category(models.Model):
"""カテゴリー"""
title = models.CharField(max_length=20)
def __str__(self):
return self.title
class ReadBook(models.Model):
"""読んだ本"""
title = models.CharField(blank=False, null=False, max_length=40)
quote1 = models.TextField(blank=True)
consideration1 = models.TextField(blank=True)
quote2 = models.TextField(blank=True)
consideration2 = models.TextField(blank=True)
quote3 = models.TextField(blank=True)
consideration3 = models.TextField(blank=True)
consideration4 = models.TextField(blank=False, null=False, validators=[MinLengthValidator(50)])
category = models.ForeignKey(Category, on_delete=models.PROTECT)
user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
|
import pytest
from unittest import mock
import builtins
import re
regex_pattern = r"M{0,3}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})$" # Do not delete 'r'.
def validating():
return str(bool(re.match(regex_pattern, input(''))))
def test_substraction():
with mock.patch.object(builtins, 'input', lambda _: 7, 7):
assert subtraction_of_numbers() == 0
def test_product():
with mock.patch.object(builtins, 'input', lambda _: 2, 2):
assert product_of_numbers() == 4 |
"""
Author: John Beckingham
Student ID: <redacted>
Author: Hai Yang Xu
Student ID: <redacted>
Maze Generator maze solver
"""
import server.libbfs as libbfs
def solve(graph, start=(0, 0), end=(34, 24)):
"""
Given a graph representing a maze with unique solution, find that solution
The default player position is (0, 0) and the default goal is (34, 24)
"""
tree = libbfs.breadth_first_search(graph, start)
return libbfs.get_path(tree, start, end)
|
#!/usr/bin/python2.7 -E
#
# Code generator main program
#
# Copyright (C) Sierra Wireless, Inc. 2013. All rights reserved. Use of this work is subject to license.
#
import os
import sys
import argparse
import collections
import codeTypes
import interfaceParser
import codeGen
def GetArguments():
# Define the command line arguments/options
parser = argparse.ArgumentParser(description='Interface Code Generator')
parser.add_argument('interfaceFile',
metavar='FILE',
help='name of interface file')
parser.add_argument('--gen-all',
dest="genAll",
action='store_true',
default=False,
help='generate all files; overrides individual file options')
parser.add_argument('--gen-interface',
dest="genInterface",
action='store_true',
default=False,
help='generate interface header file')
parser.add_argument('--gen-local',
dest="genLocal",
action='store_true',
default=False,
help='generate local header file')
parser.add_argument('--gen-client',
dest="genClient",
action='store_true',
default=False,
help='generate client IPC implementation file')
parser.add_argument('--gen-server-interface',
dest="genServerInterface",
action='store_true',
default=False,
help='generate server interface header file')
parser.add_argument('--gen-server',
dest="genServer",
action='store_true',
default=False,
help='generate server IPC implementation file')
parser.add_argument('--async-server',
dest="async",
action='store_true',
default=False,
help='generate asynchronous-style server functions')
parser.add_argument('--name-prefix',
dest="namePrefix",
default='',
help='''optional prefix for generated functions/types; defaults to input
filename''')
parser.add_argument('--file-prefix',
dest="filePrefix",
default='',
help='optional prefix for generated files; defaults to input file name')
parser.add_argument('--service-name',
dest="serviceName",
default='',
help='optional service instance name; defaults to input file name')
parser.add_argument('--output-dir',
dest="outputDir",
default='',
help='optional output directory for generated files')
parser.add_argument('--get-import-list',
dest="getImportList",
action='store_true',
default=False,
help='print out the list of imported files')
parser.add_argument('--import-dir',
dest="importDirs",
action="append",
default=[],
help='optional directory for imported files; may be given multiple times')
parser.add_argument('--no-default-prefix',
dest="noDefaultPrefix",
action='store_true',
default=False,
help='do not use default file or name prefix if none is specified')
parser.add_argument('--hash',
dest="hash",
action='store_true',
default=False,
help='print SHA256 hash for interface; NO files are generated')
parser.add_argument('--dump',
dest="dump",
action='store_true',
default=False,
help='print info on parsed functions; NO files are generated')
# Parse the command lines arguments
args = parser.parse_args()
# If --gen-all is specified, it forces all files to be generated
if args.genAll:
args.genInterface=True
args.genLocal=True
args.genClient=True
args.genServerInterface=True
args.genServer=True
# Get the base file name, without any extensions.
apiFileName = os.path.splitext( os.path.basename(args.interfaceFile) )[0]
# If appropriate, use the default name or file prefixes
if not args.noDefaultPrefix:
if not args.namePrefix:
args.namePrefix = apiFileName
if not args.filePrefix:
args.filePrefix = apiFileName
# Use the default service instance name, if none is given.
if not args.serviceName:
args.serviceName = apiFileName
# todo: Remove this once all callers are updated.
# The usage has changed slightly, so the trailing '_' will be added when necessary.
# Until all callers have been updated, strip off the trailing underscore, if given.
if args.namePrefix and args.namePrefix[-1] == '_':
args.namePrefix = args.namePrefix[:-1]
if args.filePrefix and args.filePrefix[-1] == '_':
args.filePrefix = args.filePrefix[:-1]
# fix relative paths for all paths that aren't ''
args.importDirs = [ os.path.abspath(path) if path else path for path in args.importDirs ]
if args.outputDir:
args.outputDir = os.path.abspath(args.outputDir)
return args
def MakeImportList(data, importDirs):
importNameList = interfaceParser.GetImportList(data)
importPathList = []
# The name does not contain any leading path or any suffix
for name in importNameList:
fullname = name+".api"
# Try to find the imported file in the given directory list
found = False
for d in importDirs:
path = os.path.join(d, fullname)
if os.path.isfile(path):
found = True
break
if not found:
sys.stderr.write("ERROR: '%s' not found in %s\n" % (fullname, importDirs))
sys.exit(1)
# Found the imported file, so add it to the path list
importPathList.append(path)
# Process each imported file for nested imports. The nested imported files
# are added to the beginning of the list, so that they get processed first.
data = open(path, 'r').read()
importPathList = MakeImportList(data, importDirs) + importPathList
# Ensure there are no duplicates in the list. Any duplicates that appear later
# in the list should be removed. We must maintain the order of the elements in
# the list, since this determines the processing order. For an OrderedDict:
# "If a new entry overwrites an existing entry, the original insertion
# position is left unchanged"
d = collections.OrderedDict()
for p in importPathList:
d[p] = 1
importPathList = d.keys()
return importPathList
def ProcessImportList(importList):
#print importList
importedCodeList = []
for path in importList:
fullname = os.path.basename(path)
name = os.path.splitext(fullname)[0]
# NOTE: Don't remove this. The mk tools depend on it.
print "importing", fullname
data = open(path, 'r').read()
# In the current .api file, the imported types will be referenced using "name.",
# whereas in the generated C code, the prefix will be "name_".
codeTypes.SetImportName(name)
codeTypes.SetNamePrefix(name)
# Parse the imported file, which implicitly populates the type translation data
parsedCode = interfaceParser.ParseCode(data, path)
codeList = parsedCode['codeList']
importedCodeList += codeList
return importedCodeList
#
# Main
#
def Main():
args = GetArguments()
#print args
data = open(args.interfaceFile, 'r').read()
# Create a list of all the imported files.
importDirs = [ os.path.split(args.interfaceFile)[0] ] + args.importDirs
importList = MakeImportList(data, importDirs)
# If we just want the import list, then print it out and exit
if args.getImportList:
print '\n'.join(importList)
sys.exit(0)
# Process all the imported files first.
importedCodeList = ProcessImportList(importList)
# Set the name prefix first. This has to be done before the interface is actually
# parsed, since the resulting parsedCode will use the prefix. Also, reset the
# import name, since this is the main file.
codeTypes.SetNamePrefix(args.namePrefix)
codeTypes.SetImportName("")
parsedCode = interfaceParser.ParseCode(data, args.interfaceFile)
allCodeList = importedCodeList + parsedCode['codeList']
# Dump info on the parsed file. No need to generate any code.
if args.dump:
if args.hash:
# Print out the text used for generating the hash
print codeTypes.GetHashString(allCodeList)
else:
# Print out info on all the types, functions and handlers
codeTypes.PrintCode(allCodeList)
# Print out the type dictionary.
print '='*40 + '\n'
codeTypes.PrintDefinedTypes()
sys.exit(0)
# Calculate the hashValue, as it is always needed
hashValue = codeTypes.GetHash(allCodeList)
# Handle the --hash argument here. No need to generate any code
if args.hash:
print hashValue
sys.exit(0)
# Pass 'args' so that the function can determine what needs to be output
codeGen.WriteAllCode(args, parsedCode, hashValue)
#
# Init
#
Main()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 14 10:32:09 2017
@author: Xithrius
"""
import random
rString = random.randint(0, 3)
r = int(rString)
d = {0: 'screw you',
1: 'frick you',
2: 'flip you',
3: 'razzel dazzel you'
}
x = d[r]
print(x)
|
from lib.Song import Song
import requests as http
import redis as rd
from base64 import b64encode
import statistics
cache = rd.StrictRedis(host='localhost', port=6379, db=0)
client_id = 'f3b0c51df1124cc985fd4012b6d55d95'
client_secret = 'e54ca2e0bf394944a1247830443dba3c'
token_uri = 'https://accounts.spotify.com/api/token'
track_uri = 'https://api.spotify.com/v1/tracks/'
recommendations_uri = 'https://api.spotify.com/v1/recommendations?seed_tracks='
def get_request(url, call_type='GET', body=None):
"""Returns response from server to GET, POST, or PUT requests"""
access_token = cache.get('access_token').decode('utf-8')
if call_type is 'GET':
response = http.get(url, headers={'Authorization': 'Bearer ' + access_token})
## TODO:
# change for different responses; invalid client, malformed request, etc.
if int(response.status_code) >= 400:
refresh_access_token()
access_token = cache.get('access_token').decode('utf-8')
response = http.get(url, headers={'Authorization': 'Bearer ' + access_token})
if call_type is 'POST':
response = http.post(url, data=body, headers={'Authorization': 'Bearer ' + access_token})
if int(response.status_code) >= 400:
refresh_access_token()
access_token = cache.get('access_token').decode('utf-8')
response = http.post(url, data=body, headers={'Authorization': 'Bearer ' + access_token})
if call_type is 'PUT':
response = http.put(url, data=body, headers={'Authorization': 'Bearer ' + access_token})
if int(response.status_code) >= 400:
refresh_access_token()
access_token = cache.get('access_token').decode('utf-8')
response = http.put(url, data=body, headers={'Authorization': 'Bearer ' + access_token})
return response
def refresh_access_token():
"""Refreshes access_token in cache."""
body = {
'grant_type': 'refresh_token',
'refresh_token': cache.get('refresh_token').decode('utf-8')
}
string = (client_id + ':' + client_secret).encode('utf-8')
encoded_string = b64encode(string).decode('utf-8')
response = http.post(token_uri, data=body, headers={
'Authorization': 'Basic ' + encoded_string,
'Content-Type': 'application/x-www-form-urlencoded'
})
data = response.json()
cache.set('access_token', data['access_token'])
# takes either a track id or a track object as returned by the spotify api
def create_song(track, added_by=None, explicit=True, return_is_explicit=False):
if type(track) is str:
min_track_id = track[14:]
response = get_request(track_uri + min_track_id)
data = response.json()
else:
data = track
track_id = data['id']
track_name = data['name']
track_artists = ','.join(artist['name'] for artist in data['artists'])
album_uri = data['album']['images'][0]['url']
album_name = data['album']['name']
duration = data['duration_ms']
is_explicit = data['explicit']
song_obj = Song(track_name, track_id, track_artists, album_uri, album_name, duration, explicit=explicit, added_by=added_by)
if return_is_explicit:
return song_obj, is_explicit
else:
return song_obj
def get_implicit_songs(seeds, num):
"""Returns implicit songs by calling the Spotify Recommendations API"""
songs = ','.join([song['track_id'] for song in seeds])
url = recommendations_uri + songs + '&limit=' + str(num)
response = get_request(url)
data = response.json()
return [create_song(track_obj, explicit=False).to_dict() for track_obj in data['tracks']]
def get_medians(seeds):
"""An (unused) method Kai wrote because he thought we needed it. Returns median valence and energy of seeds."""
median_valence = statistics.median(song['valence'] for song in seeds)
median_energy = statistics.median(song['energy'] for song in seeds)
response = '&target_valence=' + median_valence + '&target_energy' + median_energy
return response
|
Python 3.8.1 (tags/v3.8.1:1b293b6, Dec 18 2019, 22:39:24) [MSC v.1916 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> range(20)
range(0, 20)
>>> range(10,20)
range(10, 20)
>>> list(range(10,20))
[10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
>>> list(range(10,21))
[10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
>>>
|
#Author Gayatri Deo
import nltk;
import sys;
from nltk.corpus import wordnet as wn;
def printExamples(category):
for f in wn.synsets(category):
for hypo in f.hyponyms():
for hypo1 in hypo.hyponyms():
for e in hypo1.examples:
e = '__'.join(e.split(hypo1.name.split('.')[0]));
print category, ",", hypo1.name.split('.')[0], ",", e;
def __main__():
"printing examples for words from a particular category";
printExamples(sys.argv[1]);
__main__();
|
from utils import s_box, r_con, combine_byte, mix_matrix, get_rounds, sub_byte
def subWord(a):
"""
>>> hex(subWord(0x00102030))
'0x63cab704'
>>> hex(subWord(0x40506070))
'0x953d051'
>>> hex(subWord(0x8090a0b0))
'0xcd60e0e7'
>>> hex(subWord(0xc0d0e0f0))
'0xba70e18c'
"""
res = 0
for i in range(24, -1, -8):
byte = (a >> i) & 0xff
res = (res << 8) + sub_byte(byte)
# print(hex(i), hex(j), hex(sub_byte), hex(res))
return res
def rotWord(a, i):
"""
>>> hex(rotWord(0x09cf4f3c, 1))
'0xcf4f3c09'
>>> hex(rotWord(0x2a6c7605, 1))
'0x6c76052a'
>>> hex(rotWord(0x2a6c7605, 2))
'0x76052a6c'
"""
high_byte = a >> (32 - 8*i)
return (((a << 8*i ) & 0xffffff00) | high_byte)
def key_expansion(key):
N_k = len(key) // 4
rounds = get_rounds(N_k)
expanded_key = []
for i in range(N_k):
expanded_key.append(combine_byte(key[i*4: (i+1)*4]))
for i in range(N_k, 4*(rounds)):
W_prev = expanded_key[-1]
W_i_N = expanded_key[i - N_k]
if i % N_k == 0:
sub = subWord(rotWord(W_prev, 1))
r_word = r_con[i // N_k]
key = W_i_N ^ sub ^ r_word
elif N_k > 6 and i % N_k == 4:
sub = subWord(W_prev)
key = W_i_N ^ sub
else:
key = W_i_N ^ W_prev
expanded_key.append(key)
return expanded_key
if __name__ == '__main__':
assert hex(subWord(0x00102030)) == '0x63cab704' |
from nltk.tag import StanfordNERTagger
from nltk.tokenize import word_tokenize, sent_tokenize
import fileinput
def ner_tagger(filename):
st = StanfordNERTagger('/Users/avnish/stanford-ner-2017-06-09/classifiers/english.muc.7class.distsim.crf.ser.gz','/Users/avnish/stanford-ner-2017-06-09/stanford-ner.jar',encoding = 'utf-8')
with open(filename, 'r',encoding = "ISO-8859-1") as f:
text = f.read()
tokenize = text.split()
tagger = st.tag(tokenize)
return tagger
# return tagger
def get_continuous_chunks(tagged_sent):
continuous_chunk = []
current_chunk = []
for token, tag in tagged_sent:
if tag != "O":
current_chunk.append((token, tag))
else:
if current_chunk: # if the current chunk is not empty
continuous_chunk.append(current_chunk)
current_chunk = []
# Flush the final current_chunk into the continuous_chunk, if any.
if current_chunk:
continuous_chunk.append(current_chunk)
return continuous_chunk
# conceal(continuous_chunk)
filename = '/Users/avnish/LearningNewstuff/Data_Analysis/Annonymizer/sample1.txt'
tagger = ner_tagger(filename)
print (tagger)
named_entities = get_continuous_chunks(tagger)
data_str = [" ".join([token for token, tag in ne]) for ne in named_entities]
data_str_tag = [(" ".join([token for token, tag in ne]), ne[0][1]) for ne in named_entities]
print(data_str)
print
print(data_str_tag)
print
for line in fileinput.input(filename, inplace=True):
for i in data_str:
line = line.replace(i, 'XXXXXX')
#The print() call is a little magic here; the fileinput module temporarily replaces sys.stdout meaning that print() will write to the replacement file rather than your console. The end='' tells print() not to include a newline; that newline is already part of the original line read from the input file.
print(line, end='')
print(data_str)
|
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence,pad_packed_sequence
MAX_QUESTION_LEN = 20
UNKNOWN_TOKEN = "<unk>"
PAD_TOKEN = "<pad>"
SPECIAL_TOKENS = [PAD_TOKEN, UNKNOWN_TOKEN]
class LSTM_question(nn.Module):
def __init__(self, word_vocab_size, word_embedding_dim, hidden_dim, out_dim: int,
batch_size, num_layers=2, p_dropout = 0.3):
super(LSTM_question, self).__init__()
self.batch_size = batch_size
self.word_embedding = nn.Embedding(word_vocab_size, word_embedding_dim, padding_idx=0)
# Implement BiLSTM module which is fed with word embeddings and outputs hidden representations
self.encoder = nn.LSTM(input_size=word_embedding_dim, hidden_size=hidden_dim,
num_layers=num_layers, bidirectional=True, batch_first=True,dropout=p_dropout)
def forward(self, question, question_len):
# Embedding the question
word_embs = self.word_embedding(question) # [batch_size, seq_length, word_emb_dim] [1, 16, 100]
# pack the embedded rep
packed_input = pack_padded_sequence(word_embs, question_len, batch_first=True,enforce_sorted=False)
# pass the packed rep through bi-lstm
lstm_out, hidden = self.encoder(packed_input)
# unpack the lstm result
output, input_sizes = pad_packed_sequence(lstm_out,total_length=MAX_QUESTION_LEN, batch_first=True) # [batch_size, max_seq_length, word_emb_dim]
return output |
#!/usr/bin/python3
import pandas as pd
import quandl
quandl.ApiConfig.api_key = 'UqFxQsZQUXnBRfLxAnsp'
df = quandl.get_table('WIKI/PRICES')
df = df[["adj_open","adj_high","adj_low","adj_close","adj_volume"]]
df['HL-PCT']=(df['adj_high']-df['adj_close'])/df["adj_close"] *100.0
df['HL-change']=(df['adj_close']-df['adj_open'])/df["adj_close"] *100.0
df = df[['adj_close',"adj_volume",'HL-PCT','HL-change']]
print(df)
|
import openpyxl
from openpyxl.chart import RadarChart, Reference
wb = openpyxl.load_workbook(r"..\data\radar_chart.xlsx")
sh = wb.active
data = Reference(sh, min_col=2, max_col=4, min_row=1, max_row=sh.max_row)
labels = Reference(sh, min_col=1, min_row=2, max_row=sh.max_row)
chart = RadarChart()
#預設為standard
#filled為填色
#chart.type = "filled"
chart.title = "各部門業績"
chart.add_data(data, titles_from_data=True)
chart.set_categories(labels)
sh.add_chart(chart, "F2")
wb.save(r"..\data\radar_chart.xlsx") |
from collections import deque
from typing import Optional, List
class TreeNode:
def __init__(self, val: int):
self.val: int = val
self.left: Optional[TreeNode] = None
self.right: Optional[TreeNode] = None
def __eq__(self, other: object) -> bool:
return (
isinstance(other, TreeNode)
and self.val == other.val
and self.left == other.left
and self.right == other.right
)
def __str__(self) -> str:
return f"({self.val} {self.left} {self.right})"
def __repr__(self) -> str:
return f"TreeNode(val={self.val}, left={self.left}, right={self.right})"
def new_tree(*nums: Optional[int]) -> Optional[TreeNode]:
if not nums:
return None
root = TreeNode(nums[0])
queue = deque()
queue.append(root)
i = 1
while i < len(nums):
node = queue.popleft()
if nums[i] is not None:
node.left = TreeNode(nums[i])
queue.append(node.left)
i += 1
if i < len(nums):
if nums[i] is not None:
node.right = TreeNode(nums[i])
queue.append(node.right)
i += 1
return root
def is_valid_bst(root: TreeNode) -> bool:
if not root:
return True
if root.left and root.left.val > root.val:
return False
if root.right and root.right.val < root.val:
return False
return is_valid_bst(root.left) and is_valid_bst(root.right)
def height(root: TreeNode) -> int:
if not root:
return 0
left_height = height(root.left)
right_height = height(root.right)
root.height = max(left_height, right_height) + 1
return root.height
def is_valid_avl(root: TreeNode) -> bool:
if not root:
return True
if not is_valid_bst(root):
return False
return abs(height(root.left) - height(root.right)) <= 1
def inorder_traverse(root: TreeNode) -> List[int]:
result = []
def inorder(node: TreeNode) -> None:
if node:
inorder(node.left)
result.append(node.val)
inorder(node.right)
inorder(root)
return result
def preorder_traverse(root: TreeNode) -> List[int]:
result = []
def preorder(node: TreeNode) -> None:
if node:
result.append(node.val)
preorder(node.left)
preorder(node.right)
preorder(root)
return result
def postorder_traverse(root: TreeNode) -> List[int]:
result = []
def postorder(node: TreeNode) -> None:
if node:
postorder(node.left)
postorder(node.right)
result.append(node.val)
postorder(root)
return result
def level_order_traverse(root: TreeNode) -> List[int]:
queue, result = deque(), []
if root:
queue.append(root)
while queue:
n = len(queue)
for _ in range(n):
node = queue.popleft()
result.append(node.val)
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
return result
|
import io, os, sys, csv, random, logging
from jacks.infer import LOG, inferJACKS
from jacks.jacks_io import createPseudoNonessGenes, readControlGeneset, createGeneSpec, createSampleSpec, getJacksParser, collateTestControlSamples, writeJacksWResults
from jacks.preprocess import loadDataAndPreprocess
py_cmd = 'python'
def combineSingleResults(single_jacks_results):
jacks_results = {}
for gene in single_jacks_results[0].keys():
y, tau, x1, x2, w1, w2 = single_jacks_results[0][gene]
w1 = [x[gene][4] for x in single_jacks_results]
w2 = [x[gene][5] for x in single_jacks_results]
jacks_results[gene] = y, tau, x1, x2, w1, w2
return jacks_results
def filterSampleSpec(sample_spec, cell_line, ctrl_spec):
new_sample_spec = {}
for filename in sample_spec:
for sample_id, colname in sample_spec[filename]:
if sample_id == cell_line or sample_id == ctrl_spec[cell_line]:
if filename not in new_sample_spec:
new_sample_spec[filename] = []
new_sample_spec[filename].append((sample_id, colname))
return new_sample_spec
def filterCtrlSpec(ctrl_spec, cell_line):
new_ctrl_spec = {}
new_ctrl_spec[cell_line] = ctrl_spec[cell_line] #Sample
new_ctrl_spec[ctrl_spec[cell_line]] = ctrl_spec[cell_line] #Control
return new_ctrl_spec
if __name__ == '__main__':
LOG.setLevel(logging.WARNING)
parser = getJacksParser()
parser.add_argument("--cell_line",
type=str,
default=None,
help="cell line to run")
parser.add_argument("--separate",
action='store_true',
default=False,
help="Run cell lines separately")
args = parser.parse_args()
outprefix = args.outprefix
if '/' in outprefix and not os.path.exists(os.path.dirname(outprefix)): os.makedirs(os.path.dirname(outprefix))
# Load the specification of samples to include
LOG.info('Loading sample specification')
sample_spec, ctrl_spec, sample_num_reps = createSampleSpec(args.countfile, args.replicatefile, args.rep_hdr,
args.sample_hdr, args.common_ctrl_sample, args.ctrl_sample_hdr)
if args.cell_line != None:
sample_spec = filterSampleSpec(sample_spec, args.cell_line, ctrl_spec)
ctrl_spec = filterCtrlSpec(ctrl_spec, args.cell_line)
outprefix += ('_' + args.cell_line)
elif args.separate:
for cell_line in ctrl_spec:
if ctrl_spec[cell_line] == cell_line: continue
cmd = '%s --cell_line=%s' % (' '.join(sys.argv), cell_line)
os.system('%s %s' % (py_cmd cmd))
exit()
# Load the mappings from guides to genes
LOG.info('Loading gene mappings')
gene_spec = createGeneSpec(args.guidemappingfile, args.sgrna_hdr, args.gene_hdr, ignore_blank_genes=args.ignore_blank_genes)
# Load negative control guides (if any)
ctrl_geneset = readControlGeneset(args.ctrl_genes) if args.ctrl_genes is not None else set()
# Load the data and preprocess
LOG.info('Loading data and pre-processing')
data, meta, sample_ids, genes, gene_index = loadDataAndPreprocess(sample_spec, gene_spec,ctrl_spec=ctrl_spec, normtype=args.norm_type, ctrl_geneset=ctrl_geneset)
gene_grnas = {gene: [x for x in meta[gene_index[gene], 0]] for gene in gene_index}
testdata, ctrldata, test_sample_idxs = collateTestControlSamples(data, sample_ids, ctrl_spec)
sample_ids_without_ctrl = [sample_ids[idx] for idx in test_sample_idxs]
#Run all samples against their controls
LOG.info('Running Single JACKS inference')
single_jacks_results = []
for ts in range(testdata.shape[1]):
single_jacks_results.append(inferJACKS(gene_index, testdata[:,[ts],:], ctrldata[:,[ts],:], w_only=True))
jacks_results = combineSingleResults(single_jacks_results)
#Add a set of pseudo genes, created by randomly sampling from guides targeting genes in the control set
if args.n_pseudo > 0 and len(ctrl_geneset) > 0:
LOG.info('Running Single JACKS inference on %d pseudogenes' % args.n_pseudo)
pseudo_gene_index = createPseudoNonessGenes(gene_index, ctrl_geneset, args.n_pseudo)
pseudo_single_results = []
for ts in range(testdata.shape[1]):
pseudo_single_results.append(inferJACKS(pseudo_gene_index, testdata[:,[ts],:], ctrldata[:,[ts],:], w_only=True))
jacks_pseudo_results = combineSingleResults(pseudo_single_results)
writeJacksWResults(outprefix + '_pseudo_noness', jacks_pseudo_results, sample_ids_without_ctrl, write_types=['', '_std'] )
# Write out the results
LOG.info('Writing Single JACKS results')
if len(ctrl_geneset) > 1:
writeJacksWResults(outprefix, jacks_results, sample_ids_without_ctrl, ctrl_geneset=ctrl_geneset, write_types=['', '_std', '_pval', '_fdr'], fdr=args.fdr, fdr_thresh_type=args.fdr_thresh_type)
else:
writeJacksWResults(outprefix, jacks_results, sample_ids_without_ctrl, ctrl_geneset=ctrl_geneset, write_types=['', '_std'])
#Write pseudo-normalized pvalue results
if args.n_pseudo > 0 and len(ctrl_geneset) > 0:
LOG.info('Writing pseudo-normalized Single JACKS results')
for gene in jacks_pseudo_results: jacks_results[gene] = jacks_pseudo_results[gene]
pseudo_genes = set([gene for gene in jacks_pseudo_results])
writeJacksWResults(outprefix + '_pseudo_combined', jacks_results, sample_ids_without_ctrl, ctrl_geneset=pseudo_genes, write_types=['', '_std', '_pval', '_fdr'], fdr=args.fdr, fdr_thresh_type=args.fdr_thresh_type)
|
# import sys
# import os
# sys.path.append(os.path.normpath(os.path.join(
# os.path.dirname(os.path.abspath(__file__)), '..')))
|
import datetime
from django import forms
from django.conf import settings
from django.core.validators import ValidationError
from django.template.defaultfilters import filesizeformat
from django.utils.translation import ugettext_lazy as _
from crispy_forms.bootstrap import StrictButton
from crispy_forms.layout import Layout, ButtonHolder, Submit
from crispy_forms.helper import FormHelper
from .models import Task, Comment, File
class CSVForm(forms.Form):
date_from = forms.DateField()
date_to = forms.DateField()
def __init__(self, *args, **kwargs):
super(CSVForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.html5_required = True
self.helper.form_tag = False
self.helper.field_template = 'bootstrap3/layout/inline_field.html'
self.helper.layout = Layout(
'date_from',
'date_to',
StrictButton('Get CSV', type='submit', css_class='btn-default'),
)
def clean(self):
cleaned_data = super(CSVForm, self).clean()
date_from = cleaned_data.get('date_from')
date_to = cleaned_data.get('date_to')
if date_from > date_to:
raise ValidationError(_('Date to must be in future'),
code=_('invalid'))
class TaskForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(TaskForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.html5_required = True
self.helper.form_show_errors = True
self.helper.layout = Layout(
'title',
'description',
ButtonHolder(
Submit('submit', 'Submit', css_class='button')
)
)
class Meta:
model = Task
fields = ['title', 'description']
class ExpectDateForm(forms.ModelForm):
class Meta:
model = Task
fields = ['expect_date', ]
def clean_expect_date(self):
expect_date = self.cleaned_data.get('expect_date')
if expect_date:
if expect_date < datetime.date.today():
raise forms.ValidationError(_('Expect Date must be in future'),
code=_('invalid'))
else:
raise forms.ValidationError(_('Expect Date empty'),
code=_('invalid'))
return expect_date
class FileForm(forms.ModelForm):
class Meta:
model = File
fields = ['file', ]
def clean_file(self):
file = self.cleaned_data.get('file')
if file:
if len(file.name.split('.')) == 1:
raise forms.ValidationError(_('File type is not supported'),
code=_('invalid'))
if file.content_type in settings.TASK_UPLOAD_FILE_TYPES:
if file._size > settings.TASK_UPLOAD_FILE_MAX_SIZE:
raise forms.ValidationError(
_('Please keep filesize under %s. Current filesize %s')
% (filesizeformat(
settings.TASK_UPLOAD_FILE_MAX_SIZE),
filesizeformat(file._size)), code=_('invalid'))
else:
raise forms.ValidationError(_('File type is not supported'),
code=_('invalid'))
return file
class CommentForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(CommentForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.html5_required = True
self.helper.form_show_errors = True
self.helper.form_show_labels = False
self.helper.form_tag = False
self.helper.layout = Layout(
'text'
)
class Meta:
model = Comment
fields = ['text', ]
widgets = {
'text': forms.Textarea(attrs={
'rows': 4, 'placeholder': _('Type the comment...')}),
}
|
"""Entry point script; implements CLI."""
import argparse
import msvcrt
import sys
from src import classify, train
def training_prompt():
"""
Prompts the user with a warning message about overwriting the saved model.
"""
print('WARNING: Training will overwrite the saved model (if it exists). EXECUTE Y/N?')
while True:
resp = msvcrt.getch().decode().lower()
if resp == 'y':
print('Loading...')
return
elif resp == 'n':
sys.exit('Training aborted.')
else:
print('Press either the Y or N key.')
def parse_arguments():
"""
Parses the CLI for specific arguments via the `argparse` library.
Returns:
- A dictionary mapping each argument to the given value.
- Each string value is lowercased to prevent case sensitivity issues.
"""
experiment_help = '(str) Either \'mnist\' or \'cifar10\'.'
train_help = '(flag) Tells program to train the model.'
resuming_help = ('(flag) Tells program to resume training off of a saved model '
'whose path is given by the --savepath arg.')
steps_help = '(int) Indicates how many images to train on (one gradient update per image).'
classify_help = ('(flag) Tells program to classify something using the saved '
'model from the --savepath arg.')
source_help = '(str) A path to either an image or directory of images to classify.'
parser = argparse.ArgumentParser()
parser.add_argument('--experiment', type=str, help=experiment_help)
parser.add_argument('--train', action='store_true', help=train_help)
parser.add_argument('--resuming', action='store_true', help=resuming_help)
parser.add_argument('--steps', type=int, help=steps_help)
parser.add_argument('--classify', action='store_true', help=classify_help)
parser.add_argument('--source', type=str, help=source_help)
parser.set_defaults(experiment=None,
train=False,
resuming=False,
steps=None,
classify=False,
source=None)
return vars(parser.parse_args())
def main(args):
"""
Executes the program.
Parameters:
- args (dict, str -> ?)
- Maps CLI arguments to their values.
"""
if args['train']:
training_prompt()
train.main(args['experiment'], args['steps'], args['resuming'])
# TODO
elif args['classify']:
prediction = classify.main(args['source'], args['savepath'], args['label_dict'])
if type(prediction) == str:
print(prediction)
elif type(prediction) == dict:
keys, values = list(prediction.keys()), list(prediction.values())
for key, value in zip(keys, values):
print(key, value)
if __name__ == '__main__':
args = parse_arguments()
main(args)
|
import time
from selenium import webdriver
browser='ie'
if browser=='chrome':
driver=webdriver.Chrome(executable_path="C:/Users/Dell/PycharmProjects/5_Class/drivers/chromedriver.exe")
elif browser=='firefox':
driver = webdriver.Firefox(executable_path="C:/Users/Dell/PycharmProjects/5_Class/drivers/geckodriver.exe")
elif browser=='ie'
driver = webdriver.Chrome(executable_path="C:/Users/Dell/PycharmProjects/5_Class/drivers/IEDriver.exe")
else:
print("Provide appropriate browser name")
driver.get("http://makemytrip.com")
time.sleep(5) |
from urllib.request import urlopen #used to open remote object and read it
from urllib.error import HTTPError #it is used to through an exception if any library error is there
from urllib.error import URLError #to check any url exception is there
from bs4 import BeautifulSoup
try:
html = urlopen('https://pythonscrapingthisurldoesnotexist.com') ## tries to read the web page first
except HTTPError as e: ##calls an http error
print(e)
except URLError as e: ## calls an url error
print('The server could not be found!')
else:
print('It Worked!')
|
def color(code):
def inner(text, bold=False):
c = code
if bold:
c = '1;{}'.format(c)
return '\033[{new}m{text}\033[{old}m'.format(new=c, text=text, old=39)
return inner
grey = color('0')
black = color('30')
red = color('31')
green = color('32')
yellow = color('33')
blue = color('34')
magenta = purple = color('35')
cyan = color('36')
white = color('37')
default = color('39')
|
import numpy as np
import pandas as pd
import cv2
import matplotlib.pyplot as plt
import yaml
default_colors=[(255,96,208),(1,0,255),(255,0,0),(255,255,0),(0,255,0),(160,128,96),(255,128,0),(153,0,153),(153,153,0),(102,0,0)] #default colors for plotting phases
default_colors_2=[(255,128,0),(153,0,153),(153,153,0),(102,0,0),(253,76,0),(0,25,51),(255,255,102)]
def create_new_data_frame(scorer,config_file):
''' creating a new data frame for data from 2 cameras'''
with open(config_file) as f:
cfg=yaml.full_load(f)
body_part=cfg['upper_body_parts' ]
for x in cfg['bottom_body_parts']:
if x not in body_part:
body_part.append(x)
coordinate=['x','y','likelihood']
col_levels=[scorer,body_part,coordinate]
cols=pd.MultiIndex.from_product(col_levels)
new_df=pd.DataFrame(index=range(100000),columns=cols)
return new_df
def find_coords(frame,body_parts):
'''finding coords of different body parts in a specific frame
'''
colors=[(255,96,208),(1,0,255),(255,0,0),(255,255,0),(0,255,0),(160,128,96),(255,128,0),(153,0,153),(153,153,0),(102,0,0)]
loc_df=pd.DataFrame(index=['x','y'],columns=body_parts)
loc_df.columns.name='bodyParts'
for body_part,color in zip(body_parts,colors):
all_ind=np.where(np.all(frame==color,axis=-1))
all_x_ind=all_ind[0][:]
all_y_ind=all_ind[1][:]
x_ind=np.mean(all_x_ind)
y_ind=np.mean(all_y_ind)
loc_df[body_part]=np.around([y_ind,x_ind])
return loc_df
def update_data_frame(frame_number,data_frame,update):
''' updating a data frame with new locations
updade: data frame, columns=body parts, index='x','y'
'''
idx=pd.IndexSlice
for col in update.columns:
x,y=update.index[0],update.index[1]
data_frame.loc[frame_number,idx[:,col,['x','y']]]=update[col][x],update[col][y]
def fix_label(target_body_part,ref_body_part,target_frame_number,ref_frame_number,target_df,ref_df,new_data_frame,index):
''' fixing wrong labeling based on a corresponding frame from another camera, and updating the relevant data frame.
inputs:
target_body_part: the incorrectly labeled body part (string)
ref_body_part: a reference body part for fixing (usually head/tail, from another camera) (string)
target_frame_number: the frame number of the mislabeled bodypart (integer_)
ref_frame_number: a corresponding frame from another camera (integer)
target_df: data frame of target body_part (pandas data frame)
ref_df: data frame with data from another camera
'''
#if likelihood is insufficient , fix the label coordinates
idx=pd.IndexSlice
if float(target_df.loc(axis=1)[:,target_body_part,'likelihood'].values[target_frame_number])<0.2:
ref_dist=ref_df.loc[ref_frame_number,idx[:,ref_body_part,['x','y']]].values-ref_df.loc[ref_frame_number,idx[:,target_body_part,['x','y']]].values
target_label=target_df.loc[target_frame_number,idx[:,ref_body_part,['x','y']]].values-ref_dist
target_df.loc[target_frame_number,idx[:,target_body_part,['x','y']]]=target_label #updating the data frame with the fixed label coords
target_df.loc[target_frame_number,idx[:,target_body_part,['likelihood']]]='label was editted using fix_label'
update_likelihood(new_data_frame,index,target_body_part,0.5)
def merge_labels(target_frame,source_frame,target_frame_num,source_frame_num,target_df,source_df,config_file,new_data_frame,index,plot=True):
'''
merging frames from 2 different cameras, if plot=True the fnction plots the merged frame and the source frame
body_parts: the body parts of the source frame (can be imported from the config file)
'''
with open(config_file) as f:
cfg=yaml.full_load(f)
ref_point=cfg['ref_point']
body_parts=cfg['bottom_body_parts']
colors=[(255,128,0),(153,0,153),(153,153,0),(102,0,0),(253,76,0),(0,25,51),(255,255,102)]
Idx=pd.IndexSlice
source_ref_point=source_df.loc[source_frame_num,Idx[:,ref_point,['x','y']]].values
target_ref_point=target_df.loc[target_frame_num,Idx[:,ref_point,['x','y']]].values
source_dist={}
target_loc=pd.DataFrame(index=['x','y'],columns=body_parts)
target_loc.columns.name='bodyPart'
for val in body_parts:
source_dist[val]=source_df.loc[source_frame_num,Idx[:,val,['x','y']]].values-source_ref_point
result=target_ref_point+source_dist[val]
target_loc[val]=result[0],result[1]
# plotting the estimated legs coordinates
for body_part,color in zip(body_parts,colors):
image=cv2.circle(target_frame,(int(target_loc[body_part]['x']),int(target_loc[body_part]['y'])),6,color,-1)
if plot==True:
f=plt.figure(figsize=(30,15))
f.add_subplot(1,2,1)
plt.title('merged frames')
plt.imshow(target_frame)
f.add_subplot(1,2,2)
plt.imshow(source_frame)
plt.title('source frame')
for body_part in body_parts:
if np.isnan(float(new_data_frame.loc[source_frame_num,Idx[:,body_part,'likelihood']])): ###index=source_frame_num vs target
if body_part==cfg['ref_point']:
update_likelihood(new_data_frame,index,body_part,1)
else:
update_likelihood(new_data_frame,index,body_part,0.75)
all_body_parts=set(cfg['bottom_body_parts']+cfg['upper_body_parts'])########
parts=[i for i in all_body_parts if i not in body_parts] ########
update_likelihood(new_data_frame,index,parts,1)
return image,target_loc
def update_likelihood(data_frame,index,body_part,likelihood):
Idx=pd.IndexSlice
data_frame.loc[index,pd.IndexSlice[:,body_part,'likelihood']]=likelihood
|
import time
from Pages.base_page import BasePage
from Utils.locators import *
class Alerts(BasePage):
def __init__(self, driver):
self.locator = AlertsLocators
super().__init__(driver)
def click_autoclosable_buttons(self):
self.driver.find_element(*self.locator.autoclosable_btn_success).click()
time.sleep(2)
self.driver.find_element(*self.locator.autoclosable_btn_warning).click()
time.sleep(2)
self.driver.find_element(*self.locator.autoclosable_btn_danger).click()
time.sleep(2)
self.driver.find_element(*self.locator.autoclosable_btn_info).click()
time.sleep(2)
def is_success_message_displayed(self):
return self.driver.find_element(*self.locator.autoclosable_btn_success).is_displayed()
def is_warning_message_displayed(self):
return self.driver.find_element(*self.locator.autoclosable_btn_warning).is_displayed()
def is_danger_message_displayed(self):
return self.driver.find_element(*self.locator.autoclosable_btn_danger).is_displayed()
def is_info_message_displayed(self):
return self.driver.find_element(*self.locator.autoclosable_btn_info).is_displayed()
def click_normal_buttons(self):
self.driver.find_element(*self.locator.normal_btn_success).click()
time.sleep(2)
self.driver.find_element(*self.locator.normal_btn_warning).click()
time.sleep(2)
self.driver.find_element(*self.locator.normal_btn_danger).click()
time.sleep(2)
self.driver.find_element(*self.locator.normal_btn_info).click()
time.sleep(2)
def click_close_buttons(self):
buttons = self.driver.find_elements(*self.locator.close_buttons_group)
buttons_list = list(buttons)
for i in buttons_list:
i.click()
time.sleep(2)
|
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from musee.frontend.app import create_app
from musee.frontend.model import db, KeyWords
from musee.collect_text_data.textFromUrl import TextFromUrl
from musee.keyword_extract.extractKeywords import ExtractKeywords
app = create_app()
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
def gen_input_text_keywords(url):
'''Generate example inputs'''
extractText = TextFromUrl(url)
text = extractText.extract_text_from_html()
extractKeywords = ExtractKeywords(text, 10)
extractKeywords.extract_keywords()
keywords = extractKeywords.keywords
return keywords
@manager.command
def seed():
url1 = "https://www.animalwised.com/blood-in-cat-urine-home-remedies-3068.html"
keywords1 = gen_input_text_keywords(url1)
str_keywords1 = ", ".join(str(x) for x in keywords1)
KeyWords(url=url1, release_date=str_keywords1).insert()
url2 = "https://www.akc.org/expert-advice/health/why-wont-my-dog-eat/"
keywords2 = gen_input_text_keywords(url2)
str_keywords2 = ", ".join(str(x) for x in keywords2)
KeyWords(url=url2, release_date=str_keywords2).insert()
if __name__ == '__main__':
manager.run()
|
"""empty message
Revision ID: 83e3c2ddae30
Revises: 16949f631586
Create Date: 2017-05-25 00:36:45.441822
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '83e3c2ddae30'
down_revision = '16949f631586'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('forum_reply',
sa.Column('rip', sa.Integer(), nullable=False),
sa.Column('text', sa.String(), nullable=False),
sa.Column('time', sa.DateTime(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('thread_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['thread_id'], ['forum_thread.tid'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.uid'], ),
sa.PrimaryKeyConstraint('rip')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('forum_reply')
# ### end Alembic commands ###
|
# coding=utf-8
import unittest
from katas.kyu_4.strip_comments import solution
class StripCommentsTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(solution(
'apples, pears # and bananas\ngrapes\nbananas !apples',
['#', '!']), 'apples, pears\ngrapes\nbananas')
def test_equals_2(self):
self.assertEqual(solution('#', ['#', '!']), '')
def test_equals_3(self):
self.assertEqual(solution('\n§', ['#', '\xc2\xa7']), '\n')
|
# Create a function called calc_dollars. In the function body, define a dictionary and store it in a variable named piggyBank. The dictionary should have the following keys defined.
# quarters
# nickels
# dimes
# pennies
# For each coin type, give yourself as many as you like.
# piggyBank = {
# "pennies": 342,
# "nickels": 9,
# "dimes": 32
# }
# Once you have given yourself a large stash of coins in your piggybank, look at each key and perform the appropriate math on the integer value to determine how much money you have in dollars. Store that value in a variable named dollarAmount and print it.
# Given the coins shown above, the output would be 7.07 when you call calc_dollars()
def calc_dollars():
piggy_bank = {
"quarters":143,
"dimes":22,
"nickels":10,
"pennies": 48
}
total_quarters = piggy_bank["quarters"] * .25
total_dimes = piggy_bank["dimes"] * .10
total_nickels = piggy_bank["nickels"] * .05
total_pennies = piggy_bank["pennies"] * .01
dollar_amount = total_pennies + total_nickels + total_dimes + total_quarters
print('$',dollar_amount)
calc_dollars()
#I wasn't sure how to do it this way below...
# for key, value in piggyBank.items():
# if key == "quarters":
# dollars += (value * .25)
# if key == "dimes":
# dollars += (value * .10)
# if key == "nickels":
# dollars +=(value * .05)
# if key == "pennies":
# dollars += (value * .01)
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.testutil.pants_integration_test import run_pants, setup_tmpdir
def test_build_ignore_list() -> None:
with setup_tmpdir({"dir/BUILD": "target()"}) as tmpdir:
ignore_result = run_pants([f"--build-ignore={tmpdir}/dir", "list", f"{tmpdir}/dir:dir"])
no_ignore_result = run_pants(["list", f"{tmpdir}/dir:dir"])
ignore_result.assert_failure()
assert f"{tmpdir}/dir" in ignore_result.stderr
no_ignore_result.assert_success()
assert f"{tmpdir}/dir" in no_ignore_result.stdout
def test_build_ignore_dependency() -> None:
sources = {
"dir1/f.txt": "",
"dir1/BUILD": "files(sources=['*.txt'])",
"dir2/f.txt": "",
"dir2/BUILD": "files(sources=['*.txt'], dependencies=['{tmpdir}/dir1'])",
}
with setup_tmpdir(sources) as tmpdir:
ignore_result = run_pants(
[f"--build-ignore={tmpdir}/dir1", "dependencies", f"{tmpdir}/dir2/f.txt"]
)
no_ignore_result = run_pants(["dependencies", f"{tmpdir}/dir2/f.txt"])
ignore_result.assert_failure()
assert f"{tmpdir}/dir1" in ignore_result.stderr
no_ignore_result.assert_success()
assert f"{tmpdir}/dir1" in no_ignore_result.stdout
|
{'city':'北京', 'num':18297}
{'city':'南京', 'num':18223497}
{'city':'上海', 'num':197}
{'city':'扬州', 'num':1823397}
{'city':'泰州', 'num':182297}
{'city':'徐州', 'num':18291247} |
#!/usr/bin/python2.6
###################
# Michael Molho #
# 2014 #
###################
import sys
import struct
if len(sys.argv) != 4:
sys.stderr.write('Usage ' + sys.argv[0] + ' <template file> <ip> <port> \n')
sys.exit(1)
template = sys.argv[1]
ip = sys.argv[2]
port = sys.argv[3]
raw = open(template, 'rb').read()
ref = "AAA.AAA.AAA.AAA" + '\0' + "AAAAA" + '\0'
ip_offset = raw.find(ref)
if ip_offset > 0:
raw = list(raw)
raw[ip_offset:ip_offset+len(ip)+1] = ip + '\0'
port_offset = ip_offset + 16
raw[port_offset:port_offset+len(port)+1] = port + '\0'
xor_offset = port_offset + 8
use_xor = struct.unpack('<I', ''.join(raw[xor_offset:xor_offset+4]))[0]
raw = ''.join(raw)
sys.stdout.write(raw)
sys.stderr.write("IP => " + ip + "\nPORT => " + port + "\nSucceed !\n")
else:
sys.stderr.write("Error : IP/Port pattern not found ... \n")
|
import pygame
import os
##############################
#Robot
enemywalk=[pygame.image.load(os.path.join("sprites/Robot/Robot_walk",image)) for image in os.listdir(os.path.join("sprites/Robot","Robot_Walk"))]
enemyattackleft=[pygame.image.load(os.path.join("sprites/Robot/Robotattackleft",image)) for image in os.listdir(os.path.join("sprites/Robot","Robotattackleft"))]
enemyattackright=[pygame.image.load(os.path.join("sprites/Robot/Robotattackright",image)) for image in os.listdir(os.path.join("sprites/Robot","Robotattackright"))]
############################################################################################################################################
#Shooter
shooterwalk=[pygame.image.load(os.path.join("sprites/Shooter/shooterwalk",image)) for image in os.listdir(os.path.join("sprites/Shooter","shooterwalk"))]
shootershootleft = [pygame.image.load(os.path.join("sprites/Shooter","shootershootleft.png")),pygame.image.load(os.path.join("sprites/Shooter","shootershootleft.png"))]
shootershootright= [pygame.image.load(os.path.join("sprites/Shooter","shootershootright.png")),pygame.image.load(os.path.join("sprites/Shooter","shootershootright.png"))]
############################################################################################################################################
#red
redwalking=[pygame.image.load(os.path.join("sprites/Redguy/Redwalk",image)) for image in os.listdir(os.path.join("sprites/Redguy","Redwalk"))]
redattackingright=[pygame.image.load(os.path.join("sprites/Redguy/Redattackingright",image)) for image in os.listdir(os.path.join("sprites/Redguy","Redattackingright"))]
redattackingleft =[pygame.image.load(os.path.join("sprites/Redguy/Redattackingleft",image)) for image in os.listdir(os.path.join("sprites/Redguy","Redattackingleft"))]
#Flyer
flyerwalking=[pygame.image.load(os.path.join("sprites/Flyer/flyerwalk",image)) for image in os.listdir(os.path.join("sprites/Flyer","flyerwalk"))]
flyerattackingleft=[pygame.image.load(os.path.join("sprites/Flyer/flyerattackleft",image)) for image in os.listdir(os.path.join("sprites/Flyer","flyerattackleft"))]
flyerattackingright = [pygame.image.load(os.path.join("sprites/Flyer/flyerattackright",image)) for image in os.listdir(os.path.join("sprites/Flyer","flyerattackright"))]
#User
character = pygame.image.load(os.path.join("sprites/User","char.png"))
healthlogo = pygame.image.load(os.path.join("sprites/User","healthlogo.png"))
charshootleft=[pygame.image.load(os.path.join("sprites/User/charshootleft",image)) for image in os.listdir(os.path.join("sprites/User","charshootleft"))]
charshootright=[pygame.image.load(os.path.join("sprites/User/charshootright",image)) for image in os.listdir(os.path.join("sprites/User","charshootright"))]
walkright =[pygame.image.load(os.path.join("sprites/User/walkright",image)) for image in os.listdir(os.path.join("sprites/User","walkright"))]
walkleft=[pygame.image.load(os.path.join("sprites/User/walkleft",image)) for image in os.listdir(os.path.join("sprites/User","walkleft"))]
############################################################################################################################################
#Extras
damage=pygame.image.load(os.path.join("sprites/Items","damage.png"))
ammo_pic=pygame.image.load(os.path.join("sprites/Items","bullet.png"))
health_pic=pygame.image.load(os.path.join("sprites/Items","healthitem.png"))
#######################################
bosswalk= [pygame.image.load(os.path.join("sprites/Trevor/Trevorwalk",image)) for image in os.listdir(os.path.join("sprites/Trevor","Trevorwalk"))]
bossfireleft = [pygame.image.load(os.path.join("sprites/Trevor/Trevorattackleft",image)) for image in os.listdir(os.path.join("sprites/Trevor","Trevorattackleft"))]
bossfireright = [pygame.image.load(os.path.join("sprites/Trevor/Trevorattackright",image)) for image in os.listdir(os.path.join("sprites/Trevor","Trevorattackright"))]
bossdeathleft=pygame.image.load(os.path.join("sprites/Trevor","bossdeathleft.png"))
#####
stevewalk = [pygame.image.load(os.path.join("sprites/Steve/stevewalk",image)) for image in os.listdir(os.path.join("sprites/Steve","Stevewalk"))]
steveattackleft = [pygame.image.load(os.path.join("sprites/Steve","steveattackleft.png")),pygame.image.load(os.path.join("sprites/Steve","steveattackleft.png"))]
steveattackright =[pygame.image.load(os.path.join("sprites/Steve","steveattackright.png")),pygame.image.load(os.path.join("sprites/Steve","steveattackright.png"))]
stevedeathleft=pygame.image.load(os.path.join("sprites/Steve","stevedeathleft.png"))
############
signuswalk = [pygame.image.load(os.path.join("sprites/Signus/Signuswalk",image)) for image in os.listdir(os.path.join("sprites/Signus","Signuswalk"))]
signusattackleft=[pygame.image.load(os.path.join("sprites/Signus/signusattackleft",image)) for image in os.listdir(os.path.join("sprites/Signus","signusattackleft"))]
signusattackright= [pygame.image.load(os.path.join("sprites/Signus/signusattackright",image)) for image in os.listdir(os.path.join("sprites/Signus","signusattackright"))]
signusdeathleft= pygame.image.load(os.path.join("sprites/Signus","signusdeath.png"))
area1=[pygame.image.load(os.path.join("sprites/Backgrounds/Level 1",image)) for image in os.listdir(os.path.join("sprites/Backgrounds","Level 1"))]
area2=[pygame.image.load(os.path.join("sprites/Backgrounds/Level 2",image)) for image in os.listdir(os.path.join("sprites/Backgrounds","Level 2"))]
area3=[pygame.image.load(os.path.join("sprites/Backgrounds/Level 3",image)) for image in os.listdir(os.path.join("sprites/Backgrounds","Level 3"))]
|
from urllib.parse import urlparse
from threading import Thread
import http.client, sys
from queue import Queue
import requests
from bs4 import BeautifulSoup
concurrent = 10
def doWork():
while True:
url = q.get()
print(url)
html_content = requests.get(url).text
soup = BeautifulSoup(html_content, "lxml")
print((soup.title.text))
if soup.title.text == "Page not found – KMHD Links":
print("skipped--"+url+"\n")
else:
print("Saved--"+url+"\n")
file = open('movie-links.txt','a')
file.write("\n----------------------------------------------------\n")
file.write(url+"----"+soup.title.text)
file.close()
q.task_done()
def getStatus(ourl):
try:
url = urlparse(ourl)
conn = http.client.HTTPConnection(url.netloc)
conn.request("HEAD", url.path)
res = conn.getresponse()
return res.status, ourl
except:
return "error", ourl
def doSomethingWithResult(status, url):
print((status, url))
q = Queue(concurrent * 2)
for i in range(concurrent):
t = Thread(target=doWork)
t.daemon = True
t.start()
try:
'''
for url in open('urllist.txt'):
q.put(url.strip()) #to take urls from txt file
'''
for i in range(50000, 60000):
url="https://example.com/id/file/"+str(i)+""
q.put(url)
q.join()
except KeyboardInterrupt:
sys.exit(1)
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import pytest
from pants.backend.python import target_types_rules
from pants.backend.python.lint.pyupgrade.rules import PyUpgradeFieldSet, PyUpgradeRequest
from pants.backend.python.lint.pyupgrade.rules import rules as pyupgrade_rules
from pants.backend.python.lint.pyupgrade.subsystem import PyUpgrade
from pants.backend.python.lint.pyupgrade.subsystem import rules as pyupgrade_subsystem_rules
from pants.backend.python.target_types import PythonSourcesGeneratorTarget
from pants.core.goals.fix import FixResult
from pants.core.util_rules import config_files, source_files
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.addresses import Address
from pants.engine.target import Target
from pants.testutil.python_interpreter_selection import all_major_minor_python_versions
from pants.testutil.rule_runner import QueryRule, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*pyupgrade_rules(),
*pyupgrade_subsystem_rules(),
*source_files.rules(),
*config_files.rules(),
*target_types_rules.rules(),
QueryRule(FixResult, (PyUpgradeRequest.Batch,)),
QueryRule(SourceFiles, (SourceFilesRequest,)),
],
target_types=[PythonSourcesGeneratorTarget],
)
# see: https://github.com/asottile/pyupgrade#redundant-open-modes
PY_36_GOOD_FILE = "open('hello.txt')"
PY_36_BAD_FILE = "open('jerry.txt', 'r')"
PY_36_FIXED_BAD_FILE = "open('jerry.txt')"
# see: https://github.com/asottile/pyupgrade#is--is-not-comparison-to-constant-literals
PY_38_BAD_FILE = "x is 920"
PY_38_FIXED_BAD_FILE = "x == 920"
def run_pyupgrade(
rule_runner: RuleRunner,
targets: list[Target],
*,
extra_args: list[str] | None = None,
pyupgrade_arg: str = "--py36-plus",
) -> FixResult:
rule_runner.set_options(
[
"--backend-packages=pants.backend.python.lint.pyupgrade",
f'--pyupgrade-args="{pyupgrade_arg}"',
*(extra_args or ()),
],
env_inherit={"PATH", "PYENV_ROOT", "HOME"},
)
field_sets = [PyUpgradeFieldSet.create(tgt) for tgt in targets]
input_sources = rule_runner.request(
SourceFiles,
[
SourceFilesRequest(field_set.source for field_set in field_sets),
],
)
fix_result = rule_runner.request(
FixResult,
[
PyUpgradeRequest.Batch(
"",
input_sources.snapshot.files,
partition_metadata=None,
snapshot=input_sources.snapshot,
),
],
)
return fix_result
@pytest.mark.platform_specific_behavior
@pytest.mark.parametrize(
"major_minor_interpreter",
all_major_minor_python_versions(PyUpgrade.default_interpreter_constraints),
)
def test_passing(rule_runner: RuleRunner, major_minor_interpreter: str) -> None:
rule_runner.write_files({"f.py": PY_36_GOOD_FILE, "BUILD": "python_sources(name='t')"})
tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="f.py"))
fix_result = run_pyupgrade(
rule_runner,
[tgt],
extra_args=[f"--pyupgrade-interpreter-constraints=['=={major_minor_interpreter}.*']"],
)
assert fix_result.output == rule_runner.make_snapshot({"f.py": PY_36_GOOD_FILE})
assert fix_result.did_change is False
def test_convergance(rule_runner: RuleRunner) -> None:
# NB: Testing the fact that we re-run pyupgrade until it converges
percent_s_string_formatting = '"%s %s" % (foo, bar)\n'
rule_runner.write_files(
{"f.py": percent_s_string_formatting, "BUILD": "python_sources(name='t')"}
)
tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="f.py"))
fix_result = run_pyupgrade(rule_runner, [tgt], extra_args=["--pyupgrade-args=--py36-plus"])
assert fix_result.output == rule_runner.make_snapshot({"f.py": 'f"{foo} {bar}"\n'})
assert fix_result.did_change is True
def test_failing(rule_runner: RuleRunner) -> None:
rule_runner.write_files({"f.py": PY_36_BAD_FILE, "BUILD": "python_sources(name='t')"})
tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="f.py"))
fix_result = run_pyupgrade(rule_runner, [tgt])
assert fix_result.output == rule_runner.make_snapshot({"f.py": PY_36_FIXED_BAD_FILE})
assert fix_result.did_change is True
def test_multiple_targets(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{"good.py": PY_36_GOOD_FILE, "bad.py": PY_36_BAD_FILE, "BUILD": "python_sources(name='t')"}
)
tgts = [
rule_runner.get_target(Address("", target_name="t", relative_file_path="good.py")),
rule_runner.get_target(Address("", target_name="t", relative_file_path="bad.py")),
]
fix_result = run_pyupgrade(rule_runner, tgts)
assert fix_result.output == rule_runner.make_snapshot(
{"good.py": PY_36_GOOD_FILE, "bad.py": PY_36_FIXED_BAD_FILE}
)
assert fix_result.did_change is True
def test_passthrough_args(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{"some_file_name.py": PY_38_BAD_FILE, "BUILD": "python_sources(name='t')"}
)
tgt = rule_runner.get_target(
Address("", target_name="t", relative_file_path="some_file_name.py")
)
fix_result = run_pyupgrade(
rule_runner,
[tgt],
pyupgrade_arg="--py38-plus",
)
assert fix_result.output == rule_runner.make_snapshot(
{"some_file_name.py": PY_38_FIXED_BAD_FILE}
)
assert fix_result.did_change is True
|
import requests # for making standard html requests
from bs4 import BeautifulSoup # magical tool for parsing html data
import json # for parsing data
from pandas import DataFrame as df # premier library for data organization
#requesting data
page = requests.get("https://locations.familydollar.com/id/")
soup = BeautifulSoup(page.text, 'html.parser')
#page.text() for text (most common)
#page.content() for byte-by-byte output
#page.json() for JSON objects
#page.raw() for the raw socket response (no thank you)
#print(soup.prettify()) #view the entire source code
#get the address information using href tag
dollar_tree_list = soup.find_all('href')
dollar_tree_list = soup.find_all(class_ = 'itemlist')
type(dollar_tree_list)
len(dollar_tree_list)
for i in dollar_tree_list:
print(i)
#The content from this BeautifulSoup "ResultSet" can be extracted using .contents
example = dollar_tree_list[2] # a representative example
example_content = example.contents
print(example_content)
#Use .attr to find what attributes are present in the contents of this object.
#print("using attribute")
#example_content = example.contents[0]
#example_content.attrs
#example_href = example_content['href']
#print(example_href)
city_hrefs = [] # initialise empty list
for i in dollar_tree_list:
cont = i.contents[0]
href = cont['href']
city_hrefs.append(href)
# check to be sure all went well
for i in city_hrefs[:2]:
print(i)
#page2 = requests.get(city_hrefs[2]) # again establish a representative example
#soup2 = BeautifulSoup(page2.text, 'html.parser')
#arco = soup2.find_all(type="application/ld+json")
#print(arco[1].contents[0])
locs_dict = [] # initialise empty list
for link in city_hrefs:
locpage = requests.get(link) # request page info
locsoup = BeautifulSoup(locpage.text, 'html.parser')
# parse the page's content
locinfo = locsoup.find_all(type="application/ld+json")
# extract specific element
loccont = locinfo[1].contents[0]
# get contents from the bs4 element set
locjson = json.loads(loccont) # convert to json
locaddr = locjson['address'] # get address
locs_dict.append(locaddr) # add address to list
#convert to a pandas data frame, drop the unneeded columns "@type" and "country")
locs_df = df.from_record(locs_dict)
locs_df.drop(['@type', 'addressCountry'], axis = 1, inplace = True)
locs_df.head(n = 5)
df.to_csv(locs_df, "family_dollar_ID_locations.csv", sep = ",", index = False) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.