blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
18ea9c1d548e6e3119b4684d8c530279484c3dbf
|
c29993dd92b361f316d43b9288ed60a9c685503b
|
/hw3/hw3_test.py
|
9abc8e0891e5b2329ffa1f5246b110fa812024cf
|
[] |
no_license
|
Che-Cheng-Chang/ML2018SPRING
|
ec6185e687ebd6ae00f4c15fec4a93b3911f5674
|
fbaddbdea0fd9203fb5a8bd5de648d8106a3f195
|
refs/heads/master
| 2021-04-06T10:20:02.819713
| 2018-06-04T08:55:08
| 2018-06-04T08:55:08
| 125,029,492
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
import os
import math
import csv
import sys
import numpy as np
import pandas as pd
import random
from keras.models import load_model
from keras.layers import *
from keras.utils import np_utils, to_categorical
from keras import applications
def load_data():
Para = np.load(os.getcwd() + '/Para/Para.npy')
path_test = sys.argv[1]
test = pd.read_csv(path_test)
x_test = test['feature'].str.split(' ',expand=True).values.astype('float')
mu = Para[:len(Para)//2]
sigma = Para[len(Para)//2:]
x_test = (x_test -mu) / (sigma+1e-21)
return x_test.reshape(len(x_test),48,48,1)
if sys.argv[3] == 'private':
filepath = "./model/weights.best_68375.hdf5"
elif sys.argv[3] == 'public':
filepath = "./model/weights.best_67957.hdf5"
x_test = load_data()
model = load_model(filepath)
predict = model.predict(x_test)
result = predict.argmax(axis=1)
# output result
f = open(sys.argv[2],'w')
f.write('id' + ',' + 'label\n')
for i in range(len(result)):
f.write(str(i) + ',' + str(int(result[i])) + '\n')
f.close()
|
[
"r06921002@ntu.edu.tw"
] |
r06921002@ntu.edu.tw
|
d222e48fddce5d6ad25228562c1db53f5cd1aa8a
|
13df69a9f0d9cfa0344d6b05f9ed408211c26bf6
|
/game_num.py
|
c9d74de1ca8cda16beccaca975f8af7b77de9115
|
[] |
no_license
|
A-Pustovalov/my_python
|
1e0b850d407a8c123b0f5ca6570b87cca94696cd
|
45bf8da039b3e65563b7dce3e59957f780b1502b
|
refs/heads/master
| 2020-07-28T07:51:55.055500
| 2019-10-31T09:12:54
| 2019-10-31T09:12:54
| 209,355,918
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
l=input('vvod '))
f=list(l)
d=0
for i in range(0,len(f)):
if f[i]%2!=0:
d=d+f[i]*f[i]
print(d)
|
[
"noreply@github.com"
] |
A-Pustovalov.noreply@github.com
|
e5748c3f5af2e72efb037bb2e3604c249722cf57
|
f96ec6d710d82b7774f49669646e36730470a18f
|
/DAMA/model/VGG16.py
|
3b4c180b0da8a61d61f061bf4bd6a6a6d2dd834e
|
[] |
no_license
|
saqib22/Distributed_Deep_Learning_Inference
|
7134448d26bb133bc06ac7379fbeefe65c901cd3
|
93ee911a8dd541eac248fbb4a6550ba8e815b869
|
refs/heads/master
| 2023-08-10T21:02:21.475313
| 2021-09-15T14:24:54
| 2021-09-15T14:24:54
| 390,674,712
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,857
|
py
|
import torch
import torch.nn as tnn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
BATCH_SIZE = 10
LEARNING_RATE = 0.01
EPOCH = 50
N_CLASSES = 25
transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean = [ 0.485, 0.456, 0.406 ],
std = [ 0.229, 0.224, 0.225 ]),
])
trainData = dsets.ImageFolder('../data/imagenet/train', transform)
testData = dsets.ImageFolder('../data/imagenet/test', transform)
trainLoader = torch.utils.data.DataLoader(dataset=trainData, batch_size=BATCH_SIZE, shuffle=True)
testLoader = torch.utils.data.DataLoader(dataset=testData, batch_size=BATCH_SIZE, shuffle=False)
def conv_layer(chann_in, chann_out, k_size, p_size):
layer = tnn.Sequential(
tnn.Conv2d(chann_in, chann_out, kernel_size=k_size, padding=p_size),
tnn.BatchNorm2d(chann_out),
tnn.ReLU()
)
return layer
def vgg_conv_block(in_list, out_list, k_list, p_list, pooling_k, pooling_s):
layers = [ conv_layer(in_list[i], out_list[i], k_list[i], p_list[i]) for i in range(len(in_list)) ]
layers += [ tnn.MaxPool2d(kernel_size = pooling_k, stride = pooling_s)]
return tnn.Sequential(*layers)
def vgg_fc_layer(size_in, size_out):
layer = tnn.Sequential(
tnn.Linear(size_in, size_out),
tnn.BatchNorm1d(size_out),
tnn.ReLU()
)
return layer
class VGG16(tnn.Module):
def __init__(self, n_classes=1000):
super(VGG16, self).__init__()
# Conv blocks (BatchNorm + ReLU activation added in each block)
self.layer1 = vgg_conv_block([3,64], [64,64], [3,3], [1,1], 2, 2)
self.layer2 = vgg_conv_block([64,128], [128,128], [3,3], [1,1], 2, 2)
self.layer3 = vgg_conv_block([128,256,256], [256,256,256], [3,3,3], [1,1,1], 2, 2)
self.layer4 = vgg_conv_block([256,512,512], [512,512,512], [3,3,3], [1,1,1], 2, 2)
self.layer5 = vgg_conv_block([512,512,512], [512,512,512], [3,3,3], [1,1,1], 2, 2)
# FC layers
self.layer6 = vgg_fc_layer(7*7*512, 4096)
self.layer7 = vgg_fc_layer(4096, 4096)
# Final layer
self.layer8 = tnn.Linear(4096, n_classes)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
vgg16_features = self.layer5(out)
out = vgg16_features.view(out.size(0), -1)
out = self.layer6(out)
out = self.layer7(out)
out = self.layer8(out)
return vgg16_features, out
vgg16 = VGG16(n_classes=N_CLASSES)
vgg16.cuda()
# Loss, Optimizer & Scheduler
cost = tnn.CrossEntropyLoss()
optimizer = torch.optim.Adam(vgg16.parameters(), lr=LEARNING_RATE)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)
# Train the model
for epoch in range(EPOCH):
avg_loss = 0
cnt = 0
for images, labels in trainLoader:
images = images.cuda()
labels = labels.cuda()
# Forward + Backward + Optimize
optimizer.zero_grad()
_, outputs = vgg16(images)
loss = cost(outputs, labels)
avg_loss += loss.data
cnt += 1
print("[E: %d] loss: %f, avg_loss: %f" % (epoch, loss.data, avg_loss/cnt))
loss.backward()
optimizer.step()
scheduler.step(avg_loss)
# Test the model
vgg16.eval()
correct = 0
total = 0
for images, labels in testLoader:
images = images.cuda()
_, outputs = vgg16(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted.cpu() == labels).sum()
print(predicted, labels, correct, total)
print("avg acc: %f" % (100* correct/total))
# Save the Trained Model
torch.save(vgg16.state_dict(), 'cnn.pkl')
|
[
"saqib.khan@visionx.io"
] |
saqib.khan@visionx.io
|
799885b4f64b8eebfe298aca2f016e286bad19f0
|
a996556fe89a849a0481d158ecb7acd46ac6f51d
|
/saludar.py
|
8defac7172cdcec5b075bee564b6f69c45449ba8
|
[] |
no_license
|
ricpelo/pro2021
|
2b0f1811709ed66f4bd6f3fe65f5ff15443f2cba
|
e4977bd378be89bd4de7c4645d41a5c1a73981ae
|
refs/heads/master
| 2023-05-07T13:50:54.628567
| 2021-05-17T19:22:38
| 2021-05-17T19:22:38
| 308,722,187
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 41
|
py
|
import sys
print('¡Hola!', sys.argv[1])
|
[
"ricpelo@gmail.com"
] |
ricpelo@gmail.com
|
7e03000f563761c8e586bd6e9962786d75d8c028
|
4964e9e91f126a9677697ad7fa84c7f82919e85b
|
/frontend222.py
|
7e4c842672560f15e998eec5108657bd92066a0b
|
[] |
no_license
|
ManuMaddali/Books-Handling-
|
9783ba4733cfca87d3931e0cb3957776c5e2c12d
|
cfb71c66070ea72a10660cd7ae6fa4ccc4dbfd93
|
refs/heads/master
| 2021-02-19T07:54:29.077003
| 2020-03-06T00:11:05
| 2020-03-06T00:11:05
| 245,293,057
| 0
| 0
| null | 2020-03-06T00:13:52
| 2020-03-06T00:02:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,657
|
py
|
from tkinter import *
from backend import Database
"""
This program stores this book information:
Title, Author
Year, ISNB
User can:
View all records
Search an entry
Add entry
Update entry
Delete
Close
Created by: Manu Maddali
"""
database=Database("books.db")
class Window(object):
def __init__(self,window):
self.window = window
self.window.wm_title("BookStore")
l1=Label(window,text="Title")
l1.grid(row=0,column=0)
l2=Label(window,text="Author")
l2.grid(row=0,column=2)
l3=Label(window,text="Year")
l3.grid(row=1,column=0)
l4=Label(window,text="ISBN")
l4.grid(row=1,column=2)
self.title_text=StringVar()
self.e1=Entry(window,textvariable=self.title_text)
self.e1.grid(row=0,column=1)
self.author_text=StringVar()
self.e2=Entry(window,textvariable=self.author_text)
self.e2.grid(row=0,column=3)
self.year_text=StringVar()
self.e3=Entry(window,textvariable=self.year_text)
self.e3.grid(row=1,column=1)
self.isbn_text=StringVar()
self.e4=Entry(window,textvariable=self.isbn_text)
self.e4.grid(row=1,column=3)
self.list1=Listbox(window, height=6,width=35)
self.list1.grid(row=2,column=0,rowspan=6,columnspan=2)
sb1=Scrollbar(window)
sb1.grid(row=2,column=2,rowspan=6)
self.list1.configure(yscrollcommand=sb1.set)
sb1.configure(command=self.list1.yview)
self.list1.bind('<<ListboxSelect>>',self.get_selected_row)
b1=Button(window,text="View all", width=12,command=self.view_command)
b1.grid(row=2,column=3)
b2=Button(window,text="Search entry", width=12,command=self.search_command)
b2.grid(row=3,column=3)
b3=Button(window,text="Add entry", width=12,command=self.add_command)
b3.grid(row=4,column=3)
b4=Button(window,text="Update selected", width=12,command=self.update_command)
b4.grid(row=5,column=3)
b5=Button(window,text="Delete selected", width=12,command=self.delete_command)
b5.grid(row=6,column=3)
b6=Button(window,text="Close", width=12,command=window.destroy)
b6.grid(row=7,column=3)
def get_selected_row(self,event):
index=self.list1.curselection()[0]
self.selected_tuple=self.list1.get(index)
self.e1.delete(0,END)
self.e1.insert(END,self.selected_tuple[1])
self.e2.delete(0,END)
self.e2.insert(END,self.selected_tuple[2])
self.e3.delete(0,END)
self.e3.insert(END,self.selected_tuple[3])
self.e4.delete(0,END)
self.e4.insert(END,self.selected_tuple[4])
def view_command(self):
self.list1.delete(0,END)
for row in database.view():
self.list1.insert(END,row)
def search_command(self):
self.list1.delete(0,END)
for row in database.search(self.title_text.get(),self.author_text.get(),self.year_text.get(),self.isbn_text.get()):
self.list1.insert(END,row)
def add_command(self):
database.insert(self.title_text.get(),self.author_text.get(),self.year_text.get(),self.isbn_text.get())
self.list1.delete(0,END)
self.list1.insert(END,(self.title_text.get(),self.author_text.get(),self.year_text.get(),self.isbn_text.get()))
def delete_command(self):
database.delete(self.selected_tuple[0])
def update_command(self):
database.update(self.selected_tuple[0],self.title_text.get(),self.author_text.get(),self.year_text.get(),self.isbn_text.get())
window=Tk()
Window(window)
window.mainloop()
|
[
"noreply@github.com"
] |
ManuMaddali.noreply@github.com
|
12e36a9742ff7dac2154b2aa130e31eacc716b26
|
a4c7a4361bbda9f1d21d6d370aca1823c3e54646
|
/ludmag/core/views.py
|
563fff079dfc629f054502b0120e5d4f3bd04baa
|
[] |
no_license
|
ArheliCortes/Machtili-Web
|
5a44b1bf1681d0482a3c158c85025958595ea674
|
8296aafe791f5be99586765163cd27bf0a945a4c
|
refs/heads/master
| 2023-04-20T10:32:10.800072
| 2021-03-30T01:42:14
| 2021-03-30T01:42:14
| 286,372,193
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,467
|
py
|
from django.shortcuts import render
from django.shortcuts import HttpResponse
from django.http import HttpResponseRedirect
from django.conf import settings
from django.contrib import messages
from django.core.mail import EmailMultiAlternatives
from django.core.mail import BadHeaderError
from .form import *
from .models import ProfesorResume,Paragraph
from django.urls import resolve
# Create your views here.
def home (request):
if request.method == "POST" :
response = sendEmailInfo(request)
else:
context = {}
context['form'] = ClientePotencialForm()
context['form_doubt'] = FormDoubts()
context['profesor'] = ProfesorResume.objects.all()
template_name = "core/home.html"
response = render(request,template_name,context)
return response
def resume(request,profesor_id=1):
try:
current_url = resolve(request.path_info).kwargs
profesor_id = int(current_url['profesor_id'])
profesor_selected = ProfesorResume.objects.get(pk=profesor_id)
paragraph = Paragraph.objects.filter(profesor__name__contains=profesor_selected)
template_name = "core/base_resume.html"
return render(request,template_name,{'profesor':profesor_selected,'paragraph':paragraph})
except KeyError:
raise Http404
def doubt (request):
if request.method == "POST" :
response = sendEmailDoubt(request)
else:
context = {}
context['form'] = ClientePotencialForm()
context['form_doubt'] = FormDoubts()
template_name = "core/home.html"
response = render(request,template_name,context)
return response
def sendEmailInfo(request):
form = ClientePotencialForm(request.POST)
form_info = ClientePotencialForm()
form_name_info='form'
form_doubt = FormDoubts()
form_name_doubt='form_doubt'
context = {form_name_info: form_info,form_name_doubt:form_doubt}
if form.is_valid():
name = form.cleaned_data['name']
lastname = form.cleaned_data['lastname']
phone = form.cleaned_data['phone']
email = form.cleaned_data['email']
grado = str(form.cleaned_data['grado'])
curso = str(form.cleaned_data['curso'])
plan = str(form.cleaned_data['plan'])
subject = "Un nuevo cliente solicita información"
from_email = "Machtili<"+settings.EMAIL_HOST_USER+">"
#to = 'humad.ludimagistri@gmail.com'
to = "arhelicortes2303@gmail.com"
text_content = "Contactate con :"+name+" "+lastname+"tel: "+str(phone)+" Le interesa el curso:"+curso+" del grado:"+grado+"Plan:"+plan
html_content = "<p>Contactate con : <strong>"+name+" "+lastname+"</strong> <br> tel: <strong>"+str(phone)+"</strong><br> correo: <strong>"+email+"</strong><br> Le interesa el curso:<strong>"+curso+"</strong><br>Grado escolar/Días:<strong>"+grado+"</strong><br>Plan:<strong>"+plan+"</strong></p>"
msg_response = "Su información fue enviada!. Un asesor académico se comunicará con usted en breve."
response = _sendEmail(request,subject,text_content,html_content,from_email,to,msg_response,form_name_info,form_info,form_name_doubt,form_doubt)
else :
response = render(request, 'core/home.html', context)
return response
def sendEmailDoubt(request):
form = FormDoubts(request.POST)
form_info = ClientePotencialForm()
form_name_info='form'
form_doubt = FormDoubts()
form_name_doubt='form_doubt'
if form.is_valid():
name = form.cleaned_data['name']
phone = form.cleaned_data['phone']
email = form.cleaned_data['email']
context = {form_name_info: form_info,form_name_doubt:form_doubt}
subject = "Un cliente tiene una duda"
msg = form.cleaned_data['message']
from_email = "Machtili<"+settings.EMAIL_HOST_USER+">"
#to = 'humad.ludimagistri@gmail.com'
to = "arhelicortes2303@gmail.com"
text_content = msg+"Contactate con :"+name+"email: "+email+"tel: "+str(phone)
html_content = "<p>Contactate con : <strong>"+name+"</strong><br>email:<strong>"+email+"</strong><br>tel:<strong>"+str(phone)+"</strong><br><br>"+msg+"</p>"
msg_response = "Su mensaje fue enviado. Un asesor académico tratara de aclarar todas sus dudas lo antes posible."
response = _sendEmail(request,subject,text_content,html_content,from_email,to,msg_response,form_name_info,form_info,form_name_doubt,form_doubt)
else :
response = render(request, 'core/home.html', context)
return response
def _sendEmail(request,subject,text_content,html_content,from_email,to,msg_response,form_name_info,form_info,form_name_doubt,form_doubt):
msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
msg.attach_alternative(html_content, "text/html")
context={form_name_info: form_info,form_name_doubt:form_doubt}
if subject and msg and from_email:
try:
msg.send()
messages.error(request,msg_response)
except BadHeaderError:
return HttpResponse('Se encontró un encabezado no válido.')
else:
messages.warning(request, 'Asegúrese de que todos los campos estén ingresados y sean válidos.')
return render(request, 'core/home.html',context)
return render(request, 'core/home.html',context)
def stop (request):
return render(request,"core/under_construction.html")
|
[
"arhelicortes2303@gmail.com"
] |
arhelicortes2303@gmail.com
|
1688dd2b8c92420cee33c33f22f960edc5375b31
|
e2c65159e811bab0679200c5ec4bed5fd452f146
|
/ietf/name/migrations/0025_add-important-dates.py
|
543e6d87db849c226f4bb05ce3d2453852207ce7
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ekr/ietfdb
|
f002078e60ddf11c96f91ad039afa4cfeb5fd792
|
8d936836b0b9ff31cda415b0a423e3f5b33ab695
|
refs/heads/master
| 2021-07-10T10:59:52.026021
| 2017-10-10T15:02:00
| 2017-10-10T15:02:00
| 106,584,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 930
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-19 07:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('name', '0024_merge_20170606_1320'),
]
operations = [
migrations.CreateModel(
name='ImportantDateName',
fields=[
('slug', models.CharField(max_length=32, primary_key=True, serialize=False)),
('name', models.CharField(max_length=255)),
('desc', models.TextField(blank=True)),
('used', models.BooleanField(default=True)),
('order', models.IntegerField(default=0)),
('default_offset_days', models.SmallIntegerField()),
],
options={
'ordering': ['order', 'name'],
'abstract': False,
},
),
]
|
[
"henrik@levkowetz.com@7b24d068-2d4e-4fce-9bd7-cbd2762980b0"
] |
henrik@levkowetz.com@7b24d068-2d4e-4fce-9bd7-cbd2762980b0
|
fc4052ab38601e36aa49cbd726871d9c3e2817d4
|
ca5d2aeb93a5f197e722cb9a60d5c70b17a278cd
|
/scripts/mnist_prediction_test.py
|
8e377761e25449864cf541d37dbfde2fc277345e
|
[] |
no_license
|
deerme/OCR-1
|
19b72d181e82c831e5a13a0beb4d3ace5dd475ef
|
73d7da2596c0f13c3fa6b5ef1c028c7620b35b22
|
refs/heads/master
| 2020-05-21T10:09:54.054518
| 2016-03-30T05:17:47
| 2016-03-30T05:17:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,536
|
py
|
import sys
import os
import time
import numpy as np
import theano
import theano.tensor as T
import lasagne
import matplotlib
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.image as img
def load_image(filename):
image = img.imread(filename)
return image
def load_dataset():
# We first define a download function, supporting both Python 2 and 3.
if sys.version_info[0] == 2:
from urllib import urlretrieve
else:
from urllib.request import urlretrieve
def download(filename, source='http://yann.lecun.com/exdb/mnist/'):
print("Downloading %s" % filename)
urlretrieve(source + filename, filename)
# We then define functions for loading MNIST images and labels.
# For convenience, they also download the requested files if needed.
import gzip
def load_mnist_images(filename):
if not os.path.exists(filename):
download(filename)
# Read the inputs in Yann LeCun's binary format.
with gzip.open(filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
# The inputs are vectors now, we reshape them to monochrome 2D images,
# following the shape convention: (examples, channels, rows, columns)
data = data.reshape(-1, 1, 28, 28)
# The inputs come as bytes, we convert them to float32 in range [0,1].
# (Actually to range [0, 255/256], for compatibility to the version
# provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)
return data / np.float32(256)
def load_mnist_labels(filename):
if not os.path.exists(filename):
download(filename)
# Read the labels in Yann LeCun's binary format.
with gzip.open(filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=8)
# The labels are vectors of integers now, that's exactly what we want.
return data
# We can now download and read the training and test set images and labels.
X_train = load_mnist_images('train-images-idx3-ubyte.gz')
y_train = load_mnist_labels('train-labels-idx1-ubyte.gz')
X_test = load_mnist_images('t10k-images-idx3-ubyte.gz')
y_test = load_mnist_labels('t10k-labels-idx1-ubyte.gz')
# We reserve the last 10000 training examples for validation.
X_train, X_val = X_train[:-10000], X_train[-10000:]
y_train, y_val = y_train[:-10000], y_train[-10000:]
# We just return all the arrays in order, as expected in main().
# (It doesn't matter how we do this as long as we can read them again.)
return X_train, y_train, X_val, y_val, X_test, y_test
# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
def build_mlp(input_var=None):
# This creates an MLP of two hidden layers of 800 units each, followed by
# a softmax output layer of 10 units. It applies 20% dropout to the input
# data and 50% dropout to the hidden layers.
# Input layer, specifying the expected input shape of the network
# (unspecified batchsize, 1 channel, 28 rows and 28 columns) and
# linking it to the given Theano variable `input_var`, if any:
l_in = lasagne.layers.InputLayer(shape=(None, 1, 28, 28),
input_var=input_var)
# Apply 20% dropout to the input data:
l_in_drop = lasagne.layers.DropoutLayer(l_in, p=0.2)
# Add a fully-connected layer of 800 units, using the linear rectifier, and
# initializing weights with Glorot's scheme (which is the default anyway):
l_hid1 = lasagne.layers.DenseLayer(
l_in_drop, num_units=800,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
# We'll now add dropout of 50%:
l_hid1_drop = lasagne.layers.DropoutLayer(l_hid1, p=0.5)
# Another 800-unit layer:
l_hid2 = lasagne.layers.DenseLayer(
l_hid1_drop, num_units=800,
nonlinearity=lasagne.nonlinearities.rectify)
# 50% dropout again:
l_hid2_drop = lasagne.layers.DropoutLayer(l_hid2, p=0.5)
# Finally, we'll add the fully-connected output layer, of 10 softmax units:
l_out = lasagne.layers.DenseLayer(
l_hid2_drop, num_units=10,
nonlinearity=lasagne.nonlinearities.softmax)
# Each layer is linked to its incoming layer(s), so we only need to pass
# the output layer to give access to a network in Lasagne:
return l_out
print("Loading data...")
# Load the dataset
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
# Prepare Theano variables for inputs and targets
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
# Create neural network model (depending on first command line parameter)
network = build_mlp(input_var)
# load trained weights
with np.load('../trainedResult/model.npz') as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
lasagne.layers.set_all_param_values(network, param_values)
prediction = lasagne.layers.get_output(network, deterministic=True)
f_output = theano.function([input_var], prediction)
plt.subplot(211)
test = load_image('../pics/cropped/0.png')
plt.imshow(test, cmap=cm.binary)
#test image
instance = test.reshape(-1, 1, 28, 28)
instane = instance[0][None,:,:]
#instance = X_test[1][None, :,:]
#plt.imshow(X_test[1][0], cmap=cm.binary)
pred = f_output(instance)
N = pred.shape[1]
plt.subplot(212)
#result with probability
plt.bar(range(N), pred.ravel())
plt.show()
|
[
"ouyangchenxing@hotmail.com"
] |
ouyangchenxing@hotmail.com
|
ca1be87f60983fcd605bba51b1755c0b5cff3f9b
|
730e0b802daa7504d3dfb71802cb0a5990410f21
|
/recotakd/plugins/showmount/showmount.py
|
7763a52af3109055dd5f90854aa21c116d5e9930
|
[] |
no_license
|
yashraj0077/recotak-framework
|
261dfe631255c5cbc7c0de56aca2484b04e586a9
|
2ccb5b378c0ca7bd2a550ac7f7190e1b6db1db0b
|
refs/heads/master
| 2021-05-28T06:11:02.452802
| 2015-01-11T17:57:53
| 2015-01-11T17:57:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,562
|
py
|
#!/usr/bin/env python2
# Copyright (c) 2014, curesec GmbH
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from datetime import datetime
from ctools import cUtil
import ccdClasses as ccd
import argparse
from ctools import cTuples
from ctools import cRpc
import __builtin__
import logging
import logging.handlers
__plgname__ = "showmount"
# logging
LOG = "/tmp/showmount.log"
FORMAT = '%(asctime)s - %(name)s - ' + \
'%(levelname)s - %(threadName)s - %(message)s'
logging.basicConfig(filename=LOG,
filemode="w",
format=FORMAT,
level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
CRLF = '\r\n'
def showmount(target):
ip = ''
port = None
try:
ip, port = target
except:
ip = target
dirs = []
try:
rpc = cRpc.Rpc(ip)
# more comfy like this, because rpc does all the work of
# getting port info n co
if not port:
dirs = rpc.call2('MOUNTD', 'EXPORT')
else:
# but this works as well
dirs = rpc.call('MOUNTD', showmount.version, 'EXPORT', port, cRpc.PROTO['TCP'])
yield (ip, port, dirs)
except Exception, e:
logger.debug(e)
finally:
if rpc:
rpc.close()
raise StopIteration
class Showmount():
def __init__(self,
args,
dbh=None):
logger.debug('Init %s' % __name__)
# database handle
self.dbh = dbh
opt = self.parse(args)
if opt.fn_rpc:
cRpc.RPC_PATH = opt.fn_rpc
# check if rpc file is readable
try:
fd = __builtin__.openany(cRpc.RPC_PATH, "r")
fd.close()
except:
print 'Error: Could not open RPC program number data base %s' % cRpc.RPC_PATH
sys.exit(1)
showmount.version = 1
try:
ig_targets = cTuples.cInputGenerator(filename=opt.target_fn,
data=opt.targets,
maxthreads=opt.nthreads,
circle=False,
expand_cb=lambda t: cUtil.mkIP2(t,
ports=[opt.port],
noResolve=True
),
)
except cTuples.ENoInput:
print 'Error: No Targets specified, please use the -t/T parameter to set target(s)'
sys.exit(1)
ig_targets.start()
self.tuples = cTuples.cTuples(inputs=[ig_targets],
prep_callback=showmount,
maxthreads=opt.nthreads,
delay=opt.delay)
self.tuples.start()
def parse(self, args):
parser = argparse.ArgumentParser(
prog=__plgname__,
description='showmount queries the mount daemon on a remote host' +
'for information about the state of the NFS server on that machine..',
epilog="""Examples:
showmount.plg -t 192.168.1.0/24
""",
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument("-T",
dest='target_fn',
default='',
help="path to file with a newline seperated list of ips (or ranges or masks)," +
" e.g -i targets.txt")
parser.add_argument("-t",
dest='targets',
default=[],
nargs='+',
help="list of IPs or IP ranges to resolve, e.g. -t 8.8.8.0/24 82.165.197.1")
parser.add_argument('-delay',
help='delay in between requests',
dest='delay',
default=0.0,
type=float
)
parser.add_argument("-nt",
dest='nthreads',
help="number of threads to scan with (default 50)",
type=int,
default=50)
parser.add_argument('-P',
help='target port (default as req. by rpcinfo)',
dest='port',
default=None,
type=int
)
parser.add_argument('-rpc',
help='File containing RPC numbers, '
'if the path does start with \'/\', '
'it is interpreted as absolute and not redirected into the user directory',
default='rpc',
dest='fn_rpc',
)
if not args:
parser.print_help()
sys.exit(1)
args = parser.parse_args(args)
return args
def start(self):
start = datetime.now()
row_fmt = '{:30}{:}'
for ip, port, dirs in self.tuples.tuple_q:
if dirs:
print '\nExport list for %s:' % ip
for d in dirs:
groups = d[1]
for client in groups:
print row_fmt.format(d[0], client)
if self.dbh:
br = ccd.ShowmountResult(
ip=ip,
port=port,
export=d[0],
client=client,
)
self.dbh.add(br)
print 'Finished in %f seconds' % cUtil.secs(start)
|
[
"dash@recotak.org"
] |
dash@recotak.org
|
8ca3a7e2ab27f5ce79805ab06ce8f49c81e9194c
|
797e52def2f0acd59dd14b1585892106a592e849
|
/setup.py
|
0274138e540a3a7ce0dd14ac6ff7103e6a32f1dd
|
[
"Apache-2.0"
] |
permissive
|
bponsler/graph_goggles
|
b7fa4ab6efa4b29b1761553ad97ba19d228ae093
|
8d523bc1ec726d7147f3314dd783058f9830f24e
|
refs/heads/master
| 2021-01-16T19:08:14.087307
| 2017-08-13T00:13:08
| 2017-08-13T00:13:08
| 100,143,896
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 858
|
py
|
#!/usr/bin/env python
from distutils.core import setup
# Make sure python_qt_binding is installed
try:
import python_qt_binding
except:
print "ERROR: missing requirement: 'python_qt_binding'"
print
print "Please install python_qt_binding and try again:"
print
print " sudo pip install python_qt_binding"
exit(1)
# Make sure pygraphviz is installed
try:
import pygraphviz
except:
print "ERROR: missing requirement: 'pygraphviz'"
print
print "Please install pygraphviz and try again:"
print
print " sudo pip install pygraphviz"
exit(1)
setup(
name="graph_goggles",
version="0.0.1",
description="View a dot graph through controllable goggles",
author="Brett Ponsler",
author_email="ponsler@gmail.com",
packages=["graph_goggles"],
scripts=["scripts/graph-goggles"]
)
|
[
"ponsler@gmail.com"
] |
ponsler@gmail.com
|
9a19b433dc0b84e14cd5e95095d3365ad3966b78
|
29b029f9c9bc1f5775c145d69471c9936688c172
|
/linkedin_jobs_scraper/utils/chrome_driver.py
|
1c2e5941f1479ba49f53eb4c158724816054820e
|
[
"MIT"
] |
permissive
|
hypefi/linked_scrape
|
03e0f0d41b421668cbe4c1afee91a1c375c7ca44
|
7f7acdb9e31ac056882533567fdda9bb8d9cde90
|
refs/heads/main
| 2023-03-08T01:54:23.532350
| 2021-02-25T08:36:50
| 2021-02-25T08:36:50
| 341,243,647
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,858
|
py
|
import urllib3
import json
from selenium import webdriver
from selenium.webdriver.common.proxy import Proxy, ProxyType
from selenium.webdriver.chrome.options import Options
from linkedin_jobs_scraper.utils.logger import debug
def get_default_driver_options(width=1472, height=828, headless=True) -> Options:
"""
Generate default Chrome driver options
:param width: int
:param height: int
:param headless: bool
:return: Options
"""
chrome_options = Options()
chrome_options.headless = headless
chrome_options.page_load_strategy = 'normal'
chrome_options.add_argument("--enable-automation"),
chrome_options.add_argument("--start-maximized"),
chrome_options.add_argument(f"--window-size={width},{height}"),
chrome_options.add_argument("--lang=en-GB"),
chrome_options.add_argument("--no-sandbox"),
chrome_options.add_argument("--disable-setuid-sandbox"),
chrome_options.add_argument("--disable-dev-shm-usage"),
chrome_options.add_argument("--disable-gpu"),
chrome_options.add_argument("--disable-accelerated-2d-canvas"),
# chrome_options.add_argument("--proxy-server='direct://"),
# chrome_options.add_argument("--proxy-bypass-list=*"),
chrome_options.add_argument("--allow-running-insecure-content"),
chrome_options.add_argument("--disable-web-security"),
chrome_options.add_argument("--disable-client-side-phishing-detection"),
chrome_options.add_argument("--disable-notifications"),
chrome_options.add_argument("--mute-audio"),
chrome_options.add_argument("--ignore-certificate-errors"),
# Disable downloads
chrome_options.add_experimental_option(
'prefs', {
'safebrowsing.enabled': 'false',
'download.prompt_for_download': False,
'download.default_directory': '/dev/null',
'download_restrictions': 3,
'profile.default_content_setting_values.notifications': 2,
}
)
return chrome_options
def get_driver_proxy_capabilities(proxy: str):
"""
Use a single proxy directly from the browser
:param proxy:
:return:
"""
proxy = Proxy()
proxy.proxy_type = ProxyType.MANUAL
proxy.http_proxy = proxy
proxy.ssl_proxy = proxy
proxy.ftp_proxy = proxy
proxy.auto_detect = False
capabilities = webdriver.DesiredCapabilities.CHROME.copy()
proxy.add_to_capabilities(capabilities)
return capabilities
def build_driver(executable_path: str = None, options: Options = None, headless=True, timeout=20) -> webdriver:
"""
Build Chrome driver instance
:param executable_path: str
:param options: Options
:param headless: bool
:param timeout: int
:return: webdriver
"""
kwargs = {}
if executable_path is not None:
kwargs['executable_path'] = executable_path
kwargs['options'] = options if options is not None else get_default_driver_options(headless=headless)
# kwargs['desired_capabilities'] = get_driver_proxy_capabilities('http://localhost:8888')
driver = webdriver.Chrome(**kwargs)
driver.set_page_load_timeout(timeout)
return driver
def get_debugger_url(driver: webdriver) -> str:
"""
Get Chrome debugger url
:param driver: webdriver
:return: str
"""
chrome_debugger_url = f"http://{driver.capabilities['goog:chromeOptions']['debuggerAddress']}"
debug('Chrome Debugger Url', chrome_debugger_url)
return chrome_debugger_url
def get_websocket_debugger_url(driver: webdriver) -> str:
"""
Get Chrome websocket debugger url
:param driver: webdriver
:return: str
"""
chrome_debugger_url = get_debugger_url(driver)
http = urllib3.PoolManager()
response = json.loads(http.request('GET', chrome_debugger_url + '/json').data.decode())
return response[0]['webSocketDebuggerUrl']
|
[
"ludovico.fabbri@gmail.com"
] |
ludovico.fabbri@gmail.com
|
c505b33ada2cdd7e0daf2cb5f9fe5a33ff8a9060
|
48fdb938f020c2297af7eb30736987aa90272e50
|
/baekjoon_online_judge/boj_11720_숫자의합.py
|
af75cc5a38b7d58fa4e2e9bc18d37cd85707ab43
|
[] |
no_license
|
seventeeen/algorithm
|
6207016ef9449ee4bb36e6de31d68832b652c23d
|
b481a47880dcf52d96ec33f3118765325b5b5743
|
refs/heads/master
| 2021-01-19T12:04:32.293658
| 2017-05-10T10:04:03
| 2017-05-10T10:04:03
| 82,780,154
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
"""
* About
*
* Author: seventeeen@GitHub <powdragon1@gmail.com>
* Date : 2017-02-22
* URL : https://www.acmicpc.net/problem/11720
*
"""
n=input()
x=raw_input()
result=0
for i in range(n):
result+=int(x[i])
print result
|
[
"powdragon1@gmail.com"
] |
powdragon1@gmail.com
|
e0e93d23204768c1ecf59fba66d9bd713d7a9644
|
e24d3952dd465b7172301963c0b7ffbacd39239b
|
/hellodjango/apps/brading/migrations/0002_auto__add_bookmark.py
|
a9b31c24a3db5b67219ad4236a029b7e8256f779
|
[] |
no_license
|
shunt94/bradingwebsite
|
93ce7cdf85b27e70f438be6f397f953ba4cb6ee0
|
255893df09a559a061698805b90069b8fc282e15
|
refs/heads/master
| 2020-05-17T19:30:25.826267
| 2015-04-22T09:55:57
| 2015-04-22T09:55:57
| 38,458,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,146
|
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Bookmark'
db.create_table(u'brading_bookmark', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('url', self.gf('django.db.models.fields.CharField')(max_length=255)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'brading', ['Bookmark'])
def backwards(self, orm):
# Deleting model 'Bookmark'
db.delete_table(u'brading_bookmark')
models = {
u'brading.bookmark': {
'Meta': {'object_name': 'Bookmark'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['brading']
|
[
"conninie@gmail.com"
] |
conninie@gmail.com
|
7495a5856afe6c41aa4534f15d4665d0edee3fb5
|
6044a811cb2861b4585f050a66c558ee7251ca81
|
/app/email.py
|
c8018ae9a0481007edab2edff35927301b4307ca
|
[] |
no_license
|
th11011/simple_and_naive
|
120c0a04b8ea3f2105a743a14c443b3d272e069a
|
28e6d9fd4e967a569be6739dd3c35bace1f9ecf7
|
refs/heads/master
| 2021-01-11T16:00:01.667605
| 2017-01-23T18:32:41
| 2017-01-23T18:32:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
from flask.ext.mail import Message
from . import mail
from flask import render_template, current_app
def send_email(to, subject, template, **kwargs):
msg = Message(current_app.config['MAIL_SUBJECT_PREFIX'] + subject, sender=current_app.config['MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
mail.send(msg)
|
[
"downwargers@163.com"
] |
downwargers@163.com
|
4fe2c6f23229a13aa71de40bceb585c9181b2565
|
c2e02be39f3f8a656c3306c50173900b3b1cf8e1
|
/apps/upload/admin.py
|
06017a232f62a98fc6c2c26b3cc59055f386bdde
|
[] |
no_license
|
big-jump/dumponus
|
45d1effa5dd03ff3fe64de706ba237180fe14c28
|
5fc465947d208e4c33a3354279c1a43716d74a6d
|
refs/heads/master
| 2020-07-26T22:26:55.605032
| 2011-01-21T03:23:09
| 2011-01-21T03:23:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
from django.contrib import admin
from upload.models import Upload
class UploadAdmin(admin.ModelAdmin):
list_display = ('name', 'type', 'ip')
admin.site.register(Upload, UploadAdmin)
|
[
"tsopor@gmail.com"
] |
tsopor@gmail.com
|
548fc6b49b5de48b04bcc9df9be399daa005a4fa
|
772ab1b286bf740cd2b8432dffbcb67cfc57890d
|
/venv/Lib/site-packages/plotly/version.py
|
4a37f9ba002680fc41c2cb27bd794b8e57ceba11
|
[
"Apache-2.0"
] |
permissive
|
kteegarden/PollGraph
|
71eb18d6009317a3c69c1a7d3735a970c657f9a2
|
c16fbdbd7a1cd46da7d1cd7beb789d681c46ffaa
|
refs/heads/master
| 2020-04-22T02:03:33.228011
| 2019-06-23T01:08:21
| 2019-06-23T01:08:21
| 170,035,167
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 448
|
py
|
__version__ = '3.6.0'
__frontend_version__ = '^0.7.0'
def stable_semver():
"""
Get the stable portion of the semantic version string (the first three
numbers), without any of the trailing labels
'3.0.0rc11' -> '3.0.0'
"""
from distutils.version import LooseVersion
version_components = LooseVersion(__version__).version
stable_ver_str = '.'.join(str(s) for s in version_components[0:3])
return stable_ver_str
|
[
"kyle.tee@gmail.com"
] |
kyle.tee@gmail.com
|
0a245ea678453d0cbb4d5623f0ce53e3474fbd80
|
979caf4e5c94d2b34db11b9099a1d3a33ee4b460
|
/graphplot.py
|
8d85d63db410ab05dcace35240ca48944403687c
|
[] |
no_license
|
NorthernWilly/python_work
|
d7997baec68f2fd512e0b444c96485dd10bc93e4
|
bc0f4db91bf348e3e793e8e7cfcaed02d7e62383
|
refs/heads/master
| 2020-05-26T21:49:35.152010
| 2019-05-31T10:25:12
| 2019-05-31T10:25:12
| 188,386,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 227
|
py
|
import matplotlib.pyplot as plt
Year=[2010,2011,2012,2013,2014,2015]
Rice=[1100,1300,1200,1600,1300,1400]
plt.xlabel("Years")
plt.ylabel("Rice Production")
plt.title("Rice Production by Year")
plt.plot(Year,Rice)
plt.show()
|
[
"ianwilson2791@gmail.com"
] |
ianwilson2791@gmail.com
|
4472217979bf09ca6d27d2ed299e2ede7b822ca2
|
4e38062fc1e34fc1e7b69d7a4aa4aca03f790a06
|
/tensorflow_probability/python/distributions/platform_compatibility_test.py
|
a63d72a45eeea66b51323da56d2af2bd4b476de6
|
[
"Apache-2.0"
] |
permissive
|
Sayam753/probability
|
c39fd5d37bc8d64fca26eda77f99f0f390d29cce
|
3975ea01f182fd80d65cd5a32e8dba29eb1caf00
|
refs/heads/master
| 2021-08-07T07:46:27.635928
| 2021-01-15T18:08:32
| 2021-01-15T18:09:53
| 239,078,542
| 0
| 0
|
Apache-2.0
| 2020-02-08T06:05:28
| 2020-02-08T06:05:28
| null |
UTF-8
|
Python
| false
| false
| 19,176
|
py
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Property-based testing for TFP platform compatibility.
- GradientTape
- XLA compilation
- tf.vectorized_map
Compatibility with JAX transformations is in jax_transformation_test.py.
General distribution properties are in distribution_properties_test.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl.testing import parameterized
import hypothesis as hp
from hypothesis import strategies as hps
import numpy as np
import six
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python import experimental
from tensorflow_probability.python.distributions import hypothesis_testlib as dhps
from tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import test_util
XLA_UNFRIENDLY_DISTS = frozenset([
# TODO(b/159995894): SegmentMean not registered for XLA.
'Bates',
# TODO(b/159996966)
'Gamma',
# TODO(b/173546024)
'GeneralizedExtremeValue',
# TODO(b/163118820)
'LogLogistic',
'LogNormal',
# TODO(b/162935914): Needs to use XLA friendly Poisson sampler.
'NegativeBinomial',
# TODO(b/137956955): Add support for hypothesis testing
'PoissonLogNormalQuadratureCompound',
# TODO(b/159999573): XLA / non-XLA computation seems to have
# completely arbitrary differences!
'Poisson',
# TODO(b/137956955): Add support for hypothesis testing
'SinhArcsinh',
# TruncatedCauchy has log_probs that are very far off.
'TruncatedCauchy',
# TODO(b/159997353): StatelessTruncatedNormal missing in XLA.
'TruncatedNormal',
'Weibull',
'WishartTriL', # log_probs are very far off.
# TODO(b/159997700) No XLA Zeta
'Zipf',
])
NO_SAMPLE_PARAM_GRADS = {
'Deterministic': ('atol', 'rtol'),
}
NO_LOG_PROB_PARAM_GRADS = ('Deterministic', 'Empirical')
NO_KL_PARAM_GRADS = ('Deterministic',)
EXTRA_TENSOR_CONVERSION_DISTS = {
'RelaxedBernoulli': 1,
'WishartTriL': 3, # not concretizing linear operator scale
'Chi': 2, # subclasses `Chi2`, runs redundant checks on `df` parameter
}
# TODO(b/130815467) All distributions should be auto-vectorizeable.
# The lists below contain distributions from INSTANTIABLE_BASE_DISTS that are
# blocked for the autovectorization tests. Since not all distributions are
# in INSTANTIABLE_BASE_DISTS, these should not be taken as exhaustive.
SAMPLE_AUTOVECTORIZATION_IS_BROKEN = [
'Bates', # tf.repeat and tf.range do not vectorize. (b/157665707)
'DirichletMultinomial', # Times out. (b/164143676)
'Multinomial', # TensorListConcatV2 fallback broken: b/166658748
'PlackettLuce', # No converter for TopKV2
'Skellam',
# 'TruncatedNormal', # No converter for ParameterizedTruncatedNormal
]
LOGPROB_AUTOVECTORIZATION_IS_BROKEN = [
'Bates', # tf.repeat and tf.range do not vectorize. (b/157665707)
'ExponentiallyModifiedGaussian', # b/174778704
'HalfStudentT', # Numerical problem: b/149785284
'Skellam',
'StudentT', # Numerical problem: b/149785284
'TruncatedNormal', # Numerical problem: b/150811273
'VonMisesFisher', # No converter for CheckNumerics
'Wishart', # Actually works, but disabled because log_prob of sample is
# ill-conditioned for reasons unrelated to pfor.
'WishartTriL', # Same as Wishart.
]
# Vectorization can rewrite computations in ways that (apparently) lead to
# minor floating-point inconsistency.
# TODO(b/142827327): Bring tolerance down to 0 for all distributions.
VECTORIZED_LOGPROB_ATOL = collections.defaultdict(lambda: 1e-6)
VECTORIZED_LOGPROB_ATOL.update({
'Beta': 1e-5,
'BetaBinomial': 1e-5,
'CholeskyLKJ': 1e-4,
'GammaGamma': 2e-5,
'LKJ': 1e-3,
'PowerSpherical': 2e-5,
})
VECTORIZED_LOGPROB_RTOL = collections.defaultdict(lambda: 1e-6)
VECTORIZED_LOGPROB_RTOL.update({
'Beta': 1e-5,
'GammaGamma': 1e-4,
'NegativeBinomial': 1e-5,
'PERT': 1e-5,
'PowerSpherical': 5e-5,
})
# TODO(b/142827327): Bring tolerance down to 0 for all distributions.
XLA_LOGPROB_ATOL = collections.defaultdict(lambda: 1e-6)
XLA_LOGPROB_ATOL.update({
'Beta': 1e-4,
'BetaBinomial': 5e-6,
'Binomial': 5e-6,
'DeterminantalPointProcess': 1e-5,
'DirichletMultinomial': 1e-4,
'ExpGamma': 2e-3, # TODO(b/166257329)
'ExpInverseGamma': 1.5e-3, # TODO(b/166257329)
'ExpRelaxedOneHotCategorical': 3e-5,
'HalfCauchy': 2e-6,
'InverseGamma': 1e-4,
'Kumaraswamy': 3e-6,
'Logistic': 3e-6,
'Multinomial': 2e-4,
'PowerSpherical': 2e-5,
'Skellam': 1e-4
})
XLA_LOGPROB_RTOL = collections.defaultdict(lambda: 1e-6)
XLA_LOGPROB_RTOL.update({
'Beta': 5e-4,
'BetaBinomial': 5e-4,
'Binomial': 4e-6,
'Categorical': 6e-6,
'Chi': 2e-4,
'Chi2': 5e-5,
'CholeskyLKJ': 1e-4,
'ContinuousBernoulli': 2e-6,
'Dirichlet': 1e-3,
'DirichletMultinomial': 2e-4,
'ExpRelaxedOneHotCategorical': 1e-3, # TODO(b/163118820)
'ExpGamma': 5e-2, # TODO(b/166257329)
'ExpInverseGamma': 5e-2, # TODO(b/166257329)
'FiniteDiscrete': 6e-6,
'GammaGamma': 5e-4,
'Geometric': 5e-5,
'InverseGamma': 5e-3,
'JohnsonSU': 1e-2,
'LKJ': .07,
'Multinomial': 3e-4,
'OneHotCategorical': 1e-3, # TODO(b/163118820)
'Pareto': 2e-2, # TODO(b/159997708)
'PERT': 5e-4,
'Poisson': 3e-2, # TODO(b/159999573)
'PowerSpherical': .003,
'RelaxedBernoulli': 3e-3,
'VonMises': 2e-2, # TODO(b/160000258):
'VonMisesFisher': 5e-3,
'WishartTriL': 1e-5,
})
SKIP_KL_CHECK_DIST_VAR_GRADS = [
'Kumaraswamy', # TD's KL gradients do not rely on bijector variables.
'JohnsonSU', # TD's KL gradients do not rely on bijector variables.
'GeneralizedExtremeValue', # TD's KL gradients do not rely on bijector
# variables.
]
def extra_tensor_conversions_allowed(dist):
"""Returns number of extra tensor conversions allowed for the input dist."""
extra_conversions = EXTRA_TENSOR_CONVERSION_DISTS.get(type(dist).__name__)
if extra_conversions:
return extra_conversions
if isinstance(dist, tfd.TransformedDistribution):
return 1
if isinstance(dist, tfd.BatchReshape):
# One for the batch_shape_tensor needed by _call_reshape_input_output.
# One to cover inability to turn off validate_args for the base
# distribution (b/143297494).
return 2
return 0
@test_util.test_graph_and_eager_modes
class DistributionGradientTapeAndConcretizationTest(test_util.TestCase):
@parameterized.named_parameters(
{'testcase_name': dname, 'dist_name': dname}
for dname in dhps.TF2_FRIENDLY_DISTS)
@hp.given(hps.data())
@tfp_hps.tfp_hp_settings()
def testDistribution(self, dist_name, data):
seed = test_util.test_seed()
# Explicitly draw event_dim here to avoid relying on _params_event_ndims
# later, so this test can support distributions that do not implement the
# slicing protocol.
event_dim = data.draw(hps.integers(min_value=2, max_value=6))
dist = data.draw(dhps.distributions(
dist_name=dist_name, event_dim=event_dim, enable_vars=True))
batch_shape = dist.batch_shape
batch_shape2 = data.draw(tfp_hps.broadcast_compatible_shape(batch_shape))
dist2 = data.draw(
dhps.distributions(
dist_name=dist_name,
batch_shape=batch_shape2,
event_dim=event_dim,
enable_vars=True))
self.evaluate([var.initializer for var in dist.variables])
# Check that the distribution passes Variables through to the accessor
# properties (without converting them to Tensor or anything like that).
for k, v in six.iteritems(dist.parameters):
if not tensor_util.is_ref(v):
continue
self.assertIs(getattr(dist, k), v)
# Check that standard statistics do not read distribution parameters more
# than twice (once in the stat itself and up to once in any validation
# assertions).
max_permissible = 2 + extra_tensor_conversions_allowed(dist)
for stat in sorted(data.draw(
hps.sets(
hps.one_of(
map(hps.just, [
'covariance', 'entropy', 'mean', 'mode', 'stddev',
'variance'
])),
min_size=3,
max_size=3))):
hp.note('Testing excessive var usage in {}.{}'.format(dist_name, stat))
try:
with tfp_hps.assert_no_excessive_var_usage(
'statistic `{}` of `{}`'.format(stat, dist),
max_permissible=max_permissible):
getattr(dist, stat)()
except NotImplementedError:
pass
# Check that `sample` doesn't read distribution parameters more than twice,
# and that it produces non-None gradients (if the distribution is fully
# reparameterized).
with tf.GradientTape() as tape:
# TDs do bijector assertions twice (once by distribution.sample, and once
# by bijector.forward).
max_permissible = 2 + extra_tensor_conversions_allowed(dist)
with tfp_hps.assert_no_excessive_var_usage(
'method `sample` of `{}`'.format(dist),
max_permissible=max_permissible):
sample = dist.sample(seed=seed)
if dist.reparameterization_type == tfd.FULLY_REPARAMETERIZED:
grads = tape.gradient(sample, dist.variables)
for grad, var in zip(grads, dist.variables):
var_name = var.name.rstrip('_0123456789:')
if var_name in NO_SAMPLE_PARAM_GRADS.get(dist_name, ()):
continue
if grad is None:
raise AssertionError(
'Missing sample -> {} grad for distribution {}'.format(
var_name, dist_name))
# Turn off validations, since TODO(b/129271256) log_prob can choke on dist's
# own samples. Also, to relax conversion counts for KL (might do >2 w/
# validate_args).
dist = dist.copy(validate_args=False)
dist2 = dist2.copy(validate_args=False)
# Test that KL divergence reads distribution parameters at most once, and
# that is produces non-None gradients.
try:
for d1, d2 in (dist, dist2), (dist2, dist):
if dist_name in SKIP_KL_CHECK_DIST_VAR_GRADS:
continue
with tf.GradientTape() as tape:
with tfp_hps.assert_no_excessive_var_usage(
'`kl_divergence` of (`{}` (vars {}), `{}` (vars {}))'.format(
d1, d1.variables, d2, d2.variables),
max_permissible=1): # No validation => 1 convert per var.
kl = d1.kl_divergence(d2)
wrt_vars = list(d1.variables) + list(d2.variables)
grads = tape.gradient(kl, wrt_vars)
for grad, var in zip(grads, wrt_vars):
if grad is None and dist_name not in NO_KL_PARAM_GRADS:
raise AssertionError('Missing KL({} || {}) -> {} grad:\n' # pylint: disable=duplicate-string-formatting-argument
'{} vars: {}\n{} vars: {}'.format(
d1, d2, var, d1, d1.variables, d2,
d2.variables))
except NotImplementedError:
# Raised by kl_divergence if no registered KL is found.
pass
# Test that log_prob produces non-None gradients, except for distributions
# on the NO_LOG_PROB_PARAM_GRADS blocklist.
if dist_name not in NO_LOG_PROB_PARAM_GRADS:
with tf.GradientTape() as tape:
lp = dist.log_prob(tf.stop_gradient(sample))
grads = tape.gradient(lp, dist.variables)
for grad, var in zip(grads, dist.variables):
if grad is None:
raise AssertionError(
'Missing log_prob -> {} grad for distribution {}'.format(
var, dist_name))
# Test that all forms of probability evaluation avoid reading distribution
# parameters more than once.
for evaluative in sorted(data.draw(
hps.sets(
hps.one_of(
map(hps.just, [
'log_prob', 'prob', 'log_cdf', 'cdf',
'log_survival_function', 'survival_function'
])),
min_size=3,
max_size=3))):
hp.note('Testing excessive var usage in {}.{}'.format(
dist_name, evaluative))
try:
# No validation => 1 convert. But for TD we allow 2:
# dist.log_prob(bijector.inverse(samp)) + bijector.ildj(samp)
max_permissible = 2 + extra_tensor_conversions_allowed(dist)
with tfp_hps.assert_no_excessive_var_usage(
'evaluative `{}` of `{}`'.format(evaluative, dist),
max_permissible=max_permissible):
getattr(dist, evaluative)(sample)
except NotImplementedError:
pass
class DistributionCompositeTensorTest(test_util.TestCase):
def _test_sample_and_log_prob(self, dist_name, dist):
seed = test_util.test_seed(sampler_type='stateless')
num_samples = 3
# Sample from the distribution before composite tensoring
sample1 = self.evaluate(dist.sample(num_samples, seed=seed))
hp.note('Drew samples {}'.format(sample1))
# Sample from the distribution after composite tensoring
composite_dist = experimental.as_composite(dist)
flat = tf.nest.flatten(composite_dist, expand_composites=True)
unflat = tf.nest.pack_sequence_as(
composite_dist, flat, expand_composites=True)
sample2 = self.evaluate(unflat.sample(num_samples, seed=seed))
hp.note('Drew samples {}'.format(sample2))
# Check that the samples are the same
self.assertAllClose(sample1, sample2)
# Check that all the log_probs agree for the samples from before ...
ct_lp1 = unflat.log_prob(sample1)
orig_lp1 = dist.log_prob(sample1)
ct_lp1_, orig_lp1_ = self.evaluate((ct_lp1, orig_lp1))
self.assertAllClose(ct_lp1_, orig_lp1_)
# ... and after. (Even though they're supposed to be the same anyway.)
ct_lp2 = unflat.log_prob(sample2)
orig_lp2 = dist.log_prob(sample2)
ct_lp2_, orig_lp2_ = self.evaluate((ct_lp2, orig_lp2))
self.assertAllClose(ct_lp2_, orig_lp2_)
# TODO(alexeev): Add coverage for meta distributions, in addition to base
# distributions.
@parameterized.named_parameters(
{'testcase_name': dname, 'dist_name': dname}
for dname in dhps.TF2_FRIENDLY_DISTS)
@hp.given(hps.data())
@tfp_hps.tfp_hp_settings()
def testCompositeTensor(self, dist_name, data):
dist = data.draw(
dhps.distributions(
dist_name=dist_name, enable_vars=False, validate_args=False))
with tfp_hps.no_tf_rank_errors():
self._test_sample_and_log_prob(dist_name, dist)
@test_util.test_graph_mode_only
class DistributionXLATest(test_util.TestCase):
def _test_sample_and_log_prob(self, dist_name, dist):
seed = test_util.test_seed(sampler_type='stateless')
num_samples = 3
sample = self.evaluate(
tf.function(dist.sample, jit_compile=True)(
num_samples, seed=seed))
hp.note('Trying distribution {}'.format(
self.evaluate_dict(dist.parameters)))
hp.note('Drew samples {}'.format(sample))
xla_lp = self.evaluate(
tf.function(dist.log_prob, jit_compile=True)(
tf.convert_to_tensor(sample)))
with tfp_hps.no_tf_rank_errors():
graph_lp = self.evaluate(dist.log_prob(sample))
self.assertAllClose(xla_lp, graph_lp,
atol=XLA_LOGPROB_ATOL[dist_name],
rtol=XLA_LOGPROB_RTOL[dist_name])
@parameterized.named_parameters(
{'testcase_name': dname, 'dist_name': dname}
for dname in dhps.TF2_FRIENDLY_DISTS if dname not in XLA_UNFRIENDLY_DISTS)
@hp.given(hps.data())
@tfp_hps.tfp_hp_settings()
def testXLACompile(self, dist_name, data):
dist = data.draw(dhps.distributions(
dist_name=dist_name, enable_vars=False,
validate_args=False)) # TODO(b/142826246): Enable validate_args.
self._test_sample_and_log_prob(dist_name, dist)
# Autovectorization tests run in graph mode only, because vectorized_map
# works in graph mode (applies tf.function wrapping) internally. The purpose of
# these tests is to verify that converters exist for all distribution ops and
# give results consistent with manual batching; searching for discrepancies
# between graph and eager behavior is out of scope.
@test_util.test_graph_mode_only
class DistributionsWorkWithAutoVectorizationTest(test_util.TestCase):
def _test_vectorization(self, dist_name, dist):
seed = test_util.test_seed()
# TODO(b/171752261): New stateless samplers don't work with pfor.
enable_auto_vectorized_sampling = False
num_samples = 3
if (not enable_auto_vectorized_sampling or
dist_name in SAMPLE_AUTOVECTORIZATION_IS_BROKEN):
sample = self.evaluate(dist.sample(num_samples, seed=seed))
else:
sample = self.evaluate(tf.vectorized_map(
lambda i: dist.sample(seed=seed),
tf.range(num_samples),
fallback_to_while_loop=False))
hp.note('Drew samples {}'.format(sample))
if dist_name not in LOGPROB_AUTOVECTORIZATION_IS_BROKEN:
pfor_lp = tf.vectorized_map(
dist.log_prob,
tf.convert_to_tensor(sample),
fallback_to_while_loop=False)
batch_lp = dist.log_prob(sample)
pfor_lp_, batch_lp_ = self.evaluate((pfor_lp, batch_lp))
self.assertAllClose(pfor_lp_, batch_lp_,
atol=VECTORIZED_LOGPROB_ATOL[dist_name],
rtol=VECTORIZED_LOGPROB_RTOL[dist_name])
@parameterized.named_parameters(
{'testcase_name': dname, 'dist_name': dname}
for dname in sorted(list(dhps.INSTANTIABLE_BASE_DISTS.keys())))
@hp.given(hps.data())
@tfp_hps.tfp_hp_settings()
def testVmap(self, dist_name, data):
dist = data.draw(dhps.distributions(
dist_name=dist_name, enable_vars=False,
validate_args=False)) # TODO(b/142826246): Enable validate_args.
with tfp_hps.no_tf_rank_errors():
self._test_vectorization(dist_name, dist)
if __name__ == '__main__':
# Hypothesis often finds numerical near misses. Debugging them is much aided
# by seeing all the digits of every floating point number, instead of the
# usual default of truncating the printed representation to 8 digits.
np.set_printoptions(floatmode='unique', precision=None)
tf.test.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
65f42ab02ee5ed689c58d40b096a17724d9d856d
|
5a84837ef0b45f575dc4eddb36fe327d7e38f47a
|
/app_events/models.py
|
1fb6eff714a888aa5916c62b7fc738cbcf808480
|
[
"MIT"
] |
permissive
|
adamdubey/py-microservice-webapp-demo
|
2281fa1e65834907dd93583f212d6fcb2823273b
|
d96154248fe5068cab208d83b9736dab3403b566
|
refs/heads/main
| 2023-07-19T11:00:58.085757
| 2021-09-08T21:16:56
| 2021-09-08T21:16:56
| 402,856,480
| 0
| 0
|
MIT
| 2021-09-08T21:16:57
| 2021-09-03T17:56:16
| null |
UTF-8
|
Python
| false
| false
| 556
|
py
|
import uuid
from django.db import models
class Event(models.Model):
slug = models.SlugField(unique=True, blank=False, null=False, editable=False, max_length=50)
title = models.CharField(max_length=255, null=False, blank=False)
description = models.TextField(null=False, blank=False)
date = models.DateField(null=False, blank=False)
number_of_participants = models.PositiveIntegerField(null=False, blank=False)
def save(self, **kwargs):
if not self.slug:
self.slug = uuid.uuid4()
super().save(**kwargs)
|
[
"adubey0311@gmail.com"
] |
adubey0311@gmail.com
|
ad09a2bc7ba7734593be31f383df4a8513a517fa
|
544cfadc742536618168fc80a5bd81a35a5f2c99
|
/tools/test/connectivity/acts_tests/tests/google/power/tel/PowerTelIdle_UMTS_Test.py
|
7af7fea242b64d457bf21044ade7a85f9228de38
|
[] |
no_license
|
ZYHGOD-1/Aosp11
|
0400619993b559bf4380db2da0addfa9cccd698d
|
78a61ca023cbf1a0cecfef8b97df2b274ac3a988
|
refs/heads/main
| 2023-04-21T20:13:54.629813
| 2021-05-22T05:28:21
| 2021-05-22T05:28:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 997
|
py
|
#!/usr/bin/env python3
#
# Copyright 2018 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import acts_contrib.test_utils.power.cellular.cellular_idle_power_test as cipt
class PowerTelIdle_UMTS_Test(cipt.PowerTelIdleTest):
def test_umts_idle_r_8_band_1_pul_low_rrcstatuschangetimer_10_1(self):
self.power_tel_idle_test()
def test_umts_idle_r_7_band_4_pul_low_rrcstatuschangetimer_20_2(self):
self.power_tel_idle_test()
|
[
"rick_tan@qq.com"
] |
rick_tan@qq.com
|
2244af3de04361fe846534ff8517f5b89c373d8d
|
4b5ce2383b8b7f47a85397fb6f1bf185df047606
|
/4-20mA_G2.py
|
395e763f5db3404ff9c7c827d1b4ba580ee769a6
|
[] |
no_license
|
bhaskar-anil429/Onion-Omega-2-4-20mA-Current-Receiver
|
40a79cbae226f504bfb9aa8a18928ce1e1780774
|
753c4e25c057201a666f53f8e926fdb3e5d4bee5
|
refs/heads/master
| 2021-06-21T01:55:31.990771
| 2017-08-18T15:54:20
| 2017-08-18T15:54:20
| 100,283,491
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,821
|
py
|
# Distributed with a free-will license.
# Use it any way you want, profit or free, provided it fits in the licenses of its associated works
# MCP3428
# This code is designed to work with the MCP3428 4-20mA current receiver board available from Contr
# https://shop.controleverything.com/collections/4-20-ma-current-loop-input/products/4-20ma-current
# https://shop.controleverything.com/collections/4-20-ma-current-loop-input/products/4-channel-4-20
from OmegaExpansion import onionI2C
import time
DL = 0.01
DL1 = 0.01
# Get I2C bus
i2c = onionI2C.OnionI2C()
# MCP3428 address, 0x68(104)
# Send configuration command
while True:
#0x10(16)Continuous conversion mode, Channel-1, 12-bit Resolution
data = [0x11]
i2c.write(0x68, data)
time.sleep(DL1)
# MCP3428 address, 0x68(104)
# Read data back from 0x00(0), 2 bytes
# raw_adc MSB, raw_adc LSB
data = i2c.readBytes(0x68, 0x00, 2)
# Convert the data to 12-bits
raw_adc = (data[0] & 0x0F) * 256 + data[1]
if raw_adc > 2047 :
raw_adc -= 4095
current = (raw_adc * 0.01109)
# Output data to screen
print "Current Input at Channel One is : %.3f" %current
time.sleep(DL)
#0x30 Continuous conversion mode, Channel-2, 12-bit Resolution
data = [0x31]
i2c.write(0x68, data)
time.sleep(DL1)
# MCP3428 address, 0x68(104)
# Read data back from 0x00(0), 2 bytes
# raw_adc MSB, raw_adc LSB
data = i2c.readBytes(0x68, 0x00, 2)
# Convert the data to 12-bits
raw_adc = (data[0] & 0x0F) * 256 + data[1]
if raw_adc > 2047 :
raw_adc -= 4095
current = (raw_adc * 0.01109)
# Output data to screen
print "Current Input at Channel Two is : %.3f" %current
time.sleep(DL)
#0x50 Continuous conversion mode, Channel-3, 12-bit Resolution
data = [0x51]
i2c.write(0x68, data)
time.sleep(DL1)
# MCP3428 address, 0x68(104)
# Read data back from 0x00(0), 2 bytes
# raw_adc MSB, raw_adc LSB
data = i2c.readBytes(0x68, 0x00, 2)
# Convert the data to 12-bits
raw_adc = (data[0] & 0x0F) * 256 + data[1]
if raw_adc > 2047 :
raw_adc -= 4095
current = (raw_adc * 0.01101)
# Output data to screen
print "Current Input at Channel Three is : %.3f" %current
time.sleep(DL)
#0x70 Continuous conversion mode, Channel-4, 12-bit Resolution
data = [0x71]
i2c.write(0x68, data)
time.sleep(DL1)
# MCP3428 address, 0x68(104)
# Read data back from 0x00(0), 2 bytes
# raw_adc MSB, raw_adc LSB
data = i2c.readBytes(0x68, 0x00, 2)
# Convert the data to 12-bits
raw_adc = (data[0] & 0x0F) * 256 + data[1]
if raw_adc > 2047 :
raw_adc -= 4095
current = (raw_adc * 0.01094)
# Output data to screen
print "Current Input at Channel Four is : %.3f" %current
time.sleep(DL)
|
[
"noreply@github.com"
] |
bhaskar-anil429.noreply@github.com
|
1d8ca42dd844396a7c688f012a918bd42f06991e
|
33f8856fa758db4a9f554277431b438188b91f3a
|
/squdata/urls.py
|
edc7b09736d7b7960f656ed4f99473c05d95c3d2
|
[] |
no_license
|
chensiyuan0214/Squirrel-Project
|
1fac14fc7d86d83d4e33eb20209bc82ca5cda094
|
51ba8078363302ca75475e133154f18015caa0b0
|
refs/heads/master
| 2022-03-26T20:19:00.613052
| 2019-12-05T23:40:06
| 2019-12-05T23:40:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 863
|
py
|
"""squirreltracker URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from . import views
app_name = 'squdata'
urlpatterns = [
path('', views.sightings, name = 'sightings'),
path('add/', views.add, name = 'add'),
]
|
[
"yongyizhao@Yongyis-MBP.cable.rcn.com"
] |
yongyizhao@Yongyis-MBP.cable.rcn.com
|
40433806d7b2af76197c94b0a993f5a6137ad47e
|
2ee222d6988cdea7da1a71bdcc7755e544c95d1a
|
/tempest/services/compute/json/security_group_default_rules_client.py
|
6d298373192c3aa6233b3a4919c23615a19a85e0
|
[
"Apache-2.0"
] |
permissive
|
Mirantis/tempest
|
b0747cdb943505c9f48dde6caf5fad7b87df662e
|
ae7e033fef80f2a4728a13bba18123f6fe32839a
|
refs/heads/master
| 2021-01-16T19:06:25.564538
| 2014-08-18T13:43:17
| 2014-08-18T13:43:17
| 3,309,678
| 3
| 0
|
Apache-2.0
| 2020-02-26T11:41:07
| 2012-01-30T22:39:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,960
|
py
|
# Copyright 2014 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from tempest.common import rest_client
from tempest import config
CONF = config.CONF
class SecurityGroupDefaultRulesClientJSON(rest_client.RestClient):
def __init__(self, auth_provider):
super(SecurityGroupDefaultRulesClientJSON,
self).__init__(auth_provider)
self.service = CONF.compute.catalog_type
def create_security_default_group_rule(self, ip_protocol, from_port,
to_port, **kwargs):
"""
Creating security group default rules.
ip_protocol : ip_protocol (icmp, tcp, udp).
from_port: Port at start of range.
to_port : Port at end of range.
cidr : CIDR for address range.
"""
post_body = {
'ip_protocol': ip_protocol,
'from_port': from_port,
'to_port': to_port,
'cidr': kwargs.get('cidr'),
}
post_body = json.dumps({'security_group_default_rule': post_body})
url = 'os-security-group-default-rules'
resp, body = self.post(url, post_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['security_group_default_rule']
def delete_security_group_default_rule(self,
security_group_default_rule_id):
"""Deletes the provided Security Group default rule."""
resp, body = self.delete('os-security-group-default-rules/%s' % str(
security_group_default_rule_id))
self.expected_success(204, resp.status)
return resp, body
def list_security_group_default_rules(self):
"""List all Security Group default rules."""
resp, body = self.get('os-security-group-default-rules')
self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['security_group_default_rules']
def get_security_group_default_rule(self, security_group_default_rule_id):
"""Return the details of provided Security Group default rule."""
resp, body = self.get('os-security-group-default-rules/%s' % str(
security_group_default_rule_id))
self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['security_group_default_rule']
|
[
"ghanshyam.mann@nectechnologies.in"
] |
ghanshyam.mann@nectechnologies.in
|
cd4840afd5e0a89f9035ab55bc86e721ef4ae5ed
|
798abf905b3adf4a12d4f03589f71dfe2fdad6d8
|
/blogproject/blog/models.py
|
cf7a0e8c3ca6c581cf9375d7a06887fd3eb6e83d
|
[] |
no_license
|
javedattar99/blog-project
|
eda4c604bd2350e510c5d64c355c9308a9963d89
|
c3cd595719b8b1c12c764baf8d8980ebe714dd99
|
refs/heads/master
| 2021-05-18T00:09:43.044909
| 2020-03-29T11:24:20
| 2020-03-29T11:24:20
| 251,017,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,769
|
py
|
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from django.urls import *
from taggit.managers import TaggableManager
# Create your models here.
class CustomManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(status='published')
class Post(models.Model):
STATUS_CHOICES = (('draft','Draft'),('published','Published'))
title = models.CharField(max_length=260)
slug = models.SlugField(max_length=260,unique_for_date='publish')
author = models.ForeignKey(User,related_name='blog_post', on_delete=models.CASCADE)
body = models.TextField()
publish = models.DateTimeField(default=timezone.now)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
status = models.CharField(max_length=10,choices=STATUS_CHOICES,default='draft')
objects = CustomManager()
tags = TaggableManager()
class Meta:
ordering = ('-publish',)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post_detail',args=[self.publish.year,self.publish.strftime('%m'),self.publish.strftime('%d'),self.slug])
#Model related To Comment
class Comment(models.Model):
post = models.ForeignKey(Post,related_name='comments',on_delete=models.CASCADE)
name = models.CharField(max_length=32)
email = models.EmailField()
body = models.TextField()
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
active = models.BooleanField(default=True)
class Meta:
ordering = ('-created',)
def __str__(self):
return 'Commented By {} on {}'.format(self.name,self.post)
|
[
"javedattar99@gmail.com"
] |
javedattar99@gmail.com
|
4a728295c3a472fc80f7a3207565e655f337be70
|
5d906e7ef93c289ab1864cc9607f7d5af2e41684
|
/Goat Latin.py
|
0d4208327ccb28a1892bf6158fe86c3bca5acefd
|
[] |
no_license
|
hsinhuibiga/August
|
09fff55fa80dc4fc72421477dc3d4c3c23ab6ae8
|
aeda268e6583b60275a361793bda3343f6994a59
|
refs/heads/master
| 2022-12-12T16:17:05.138127
| 2020-09-02T16:13:27
| 2020-09-02T16:13:27
| 284,263,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
#Goat Latin
class Solution(object):
def toGoatLatin(self, S):
"""
:type S: str
:rtype: str
"""
vowels = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']
words = S.split(' ')
new_words = []
for i, word in enumerate(words):
if word[0] in vowels:
word += 'ma'
else:
word = word[1:] + word[0] + 'ma'
word += 'a' * (i + 1)
new_words.append(word)
return ' '.join(new_words)
|
[
"noreply@github.com"
] |
hsinhuibiga.noreply@github.com
|
9a86b4989b2c27732d9bee9417a7f53863026ae3
|
75c5bc4c44774ab1844c749fef8a07834b55d345
|
/wiki/migrations/0001_initial.py
|
8f8df634ab9b0c6b783cbbd37a5ebf1be213741f
|
[] |
no_license
|
3kwa/django-wiki
|
f928673d7671882a21fcc22757cc9391caf0a34a
|
6333ec5ec4e5cc9f063769d37ab16a933ec70260
|
refs/heads/master
| 2020-04-06T03:35:10.972937
| 2012-05-08T07:19:04
| 2012-05-08T07:19:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,083
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Page'
db.create_table('wiki_page', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200, db_index=True)),
('body', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('wiki', ['Page'])
# Adding model 'Edit'
db.create_table('wiki_edit', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.Page'])),
('comment', self.gf('django.db.models.fields.TextField')()),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('wiki', ['Edit'])
def backwards(self, orm):
# Deleting model 'Page'
db.delete_table('wiki_page')
# Deleting model 'Edit'
db.delete_table('wiki_edit')
models = {
'wiki.edit': {
'Meta': {'object_name': 'Edit'},
'comment': ('django.db.models.fields.TextField', [], {}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Page']"})
},
'wiki.page': {
'Meta': {'object_name': 'Page'},
'body': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'})
}
}
complete_apps = ['wiki']
|
[
"eugene.vandenbulke@gmail.com"
] |
eugene.vandenbulke@gmail.com
|
6e5fc815cf54f78ce4ea6c2cf2747addd77f84d4
|
0b85dfc5dc873b138653312c93fcd0c3edab9e07
|
/tools/indicators/__init__.py
|
16360f23f0f5c6818648c3a3118912c5ec65b7cf
|
[
"Apache-2.0"
] |
permissive
|
openself/TradzQAI
|
7891247ea6ff4b299d4fc01ff8c209e8eb8481ab
|
e63202df74f8820d32df5a8d1111fb25025a191f
|
refs/heads/master
| 2020-03-16T16:30:25.446362
| 2018-05-07T17:09:42
| 2018-05-07T17:09:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 41
|
py
|
from .build_indicators import indicators
|
[
"awakeproduction@hotmail.fr"
] |
awakeproduction@hotmail.fr
|
d62080b3a759cf30a52a107aa1608fd1699e6488
|
0c6f7cd441dfe45b734dfc9711666ce0a41c8ead
|
/16a.py
|
36ffb7cabf3122d23121994cd01434e046311c46
|
[] |
no_license
|
parapente/AoC2017
|
5672f34bec5639c79c003b30da031c1e9fce4e3a
|
11a6e694eff170ae55374c24c959ffae37007f95
|
refs/heads/master
| 2020-04-23T17:45:32.687450
| 2019-02-18T19:31:08
| 2019-02-18T19:31:08
| 171,343,480
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 905
|
py
|
#!/usr/bin/python3
prog = []
for i in range(16):
prog.append(chr(ord('a') + i))
with open('16.dat') as f:
data = f.read()
data = data.rstrip()
moves = data.split(',')
print("Starting pos: ", prog)
for move in moves:
cmd = list(move)
if cmd[0] == 's':
# Spin
num = int(''.join(cmd[1:]))
# print(prog[0:len(prog)-num])
# print(prog[len(prog)-num:])
prog = prog[len(prog) - num:] + prog[0:len(prog) - num]
if cmd[0] == 'x':
# eXchange
pos = ''.join(cmd[1:]).split('/')
pos = [int(pos[0]), int(pos[1])]
prog[pos[0]], prog[pos[1]] = prog[pos[1]], prog[pos[0]]
if cmd[0] == 'p':
# Partner
pos = [prog.index(cmd[1]), prog.index(cmd[3])]
prog[pos[0]], prog[pos[1]] = prog[pos[1]], prog[pos[0]]
# print(move, " - ", prog)
# input()
print("Program order after dance: ", ''.join(prog))
|
[
"int.teo@gmail.com"
] |
int.teo@gmail.com
|
ee539696d58ae256e4f69f6a983ed29e654579ae
|
966a6cad2bb800583aaaaae38fdddd2696abbe96
|
/users/migrations/0002_auto_20190824_1213.py
|
1c30d64ed9596a121fb2b5a5b2d877f00c35273d
|
[
"MIT"
] |
permissive
|
ispmor/space_reservation_system
|
a2e791161eedddfdc02769ac9cd116795bb560cd
|
459843c94bad82110a532db6e16d1075bc88f39b
|
refs/heads/master
| 2022-12-11T12:38:10.491789
| 2019-10-08T19:04:45
| 2019-10-08T19:04:45
| 186,705,958
| 0
| 1
|
NOASSERTION
| 2022-12-08T06:36:33
| 2019-05-14T21:53:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,433
|
py
|
# Generated by Django 2.2 on 2019-08-24 12:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='customuser',
name='archived',
field=models.BooleanField(blank=True, default=False, null=True),
),
migrations.AddField(
model_name='customuser',
name='group',
field=models.CharField(blank=True, choices=[('s', 'Student'), ('l', 'Lecturer'), ('e', 'External')], help_text='To which group does User qualify', max_length=1),
),
migrations.AddField(
model_name='customuser',
name='indeks',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='customuser',
name='permission',
field=models.CharField(blank=True, choices=[('b', 'Banned'), ('a', 'Allowed')], help_text='Is User allowed to create a reservation', max_length=1),
),
migrations.AlterField(
model_name='customuser',
name='first_name',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='customuser',
name='last_name',
field=models.CharField(max_length=50),
),
]
|
[
"jbryl7@gmail.com"
] |
jbryl7@gmail.com
|
7236e834838fb0c1a6b2fd9f74564bf7db4c7838
|
7e96ffa77f62b6b085c5da1bc583399d42955096
|
/Interface/bottleneck_manually.py
|
e0a73bb866ad0c2e7271a63c90d355f42d726f9d
|
[] |
no_license
|
SumantSakhalkar/hotNAS-CODES20
|
0c18686f3a00ff7f188476e96cde00d7e0f70db3
|
44ad9e37b0ee477bbe9b78dda486d49272cb40e9
|
refs/heads/master
| 2022-12-11T17:57:46.069796
| 2020-08-31T20:04:53
| 2020-08-31T20:04:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,040
|
py
|
from torchvision import models
from torchvision.models import *
from torch import nn
import torch
import sys
import math
sys.path.append("../Performance_Model")
sys.path.append("../")
import cifar10_models
import PM_Config
import PM_Layer
import PM_FPGA_Template
from search_space import *
from CONV_PM_IF import *
import argparse
from ztNAS_model_change import *
import copy_conv2d
from utility import *
def get_max_k(model):
max_k = 0
for layer_name, layer in model.named_modules():
if isinstance(layer, nn.Conv2d):
cur_k = is_same(layer.kernel_size)
if cur_k > max_k:
max_k = cur_k
return max_k
def get_performance(model, dataset_name, HW1, HW2,device=None):
if dataset_name == "imagenet":
input = torch.Tensor(torch.Size([1, 3, 224, 224])).to(torch.float32)
elif dataset_name == "cifar10":
input = torch.Tensor(torch.Size([1, 3, 32, 32])).to(torch.float32)
cTT = 0
dTT = 0
count = [0,0,0,0]
cconv_quan_ss = []
cconv_quan_sn = []
quan_idx = 0
cconv_pattern = {}
for layer_name, layer in model.named_modules():
if isinstance(layer, nn.Conv2d) or isinstance(layer,copy_conv2d.Conv2d_Custom):
input_shape = list(input.shape)
input_shape[1] = layer.in_channels
input = torch.Tensor(torch.Size(input_shape)).to(torch.float32)
if device is not None:
input = input.to(device)
input = layer(input)
[B, M, N, R, C, K, S, T, P] = (
1, layer.out_channels, layer.in_channels, input.shape[2], input.shape[3], is_same(layer.kernel_size),
is_same(layer.stride), tell_conv_type(layer.in_channels, layer.groups), is_same(layer.padding))
if T == "cconv":
# w = model.state_dict()[layer_name + ".weight"]
# x = max(abs(float(w.min())), abs(float(w.max())))
# int_num, frac_num = re_quantize(x, 16, True)
# print('''quan_paras["{}"] = [{}, {}, True]'''.format(layer_name, int_num, frac_num))
[Tm, Tn, Tr, Tc, Tk, W_p, I_p, O_p] = HW2
[r_Ports, r_DSP, r_BRAM, r_BRAM_Size, BITWIDTH] = (
HW_constraints["r_Ports_BW"], HW_constraints["r_DSP"],
HW_constraints["r_BRAM"], HW_constraints["r_BRAM_Size"],
HW_constraints["BITWIDTH"])
# print("\t",layer_name,M, N, R, C, K, S, T)
Layer = PM_Layer.Layer_Class(B, M, N, R, C, K, S, "cconv", P)
acc_1 = PM_FPGA_Template.FPGA_Templates(Tm, Tn, Tr, Tc,
Tk, W_p, I_p, O_p, "cconv", r_Ports, r_DSP, r_BRAM, r_BRAM_Size,
BITWIDTH)
if acc_1.Success == False:
print(Tm, Tn, Tr, Tc,Tk, W_p, I_p, O_p, "cconv", r_Ports, r_DSP, r_BRAM, r_BRAM_Size, BITWIDTH)
return -1
else:
if isinstance(layer, copy_conv2d.Conv2d_Custom):
perf = acc_1.get_layer_latency(Layer, layer.pattern_ones, layer.quan_paras)
else:
perf = acc_1.get_layer_latency(Layer)
# print(perf[0])
cTT += perf[0]
if perf[1] == "loading Weight":
w = model.state_dict()[layer_name + ".weight"]
# # For conv_std only
# if True:
# v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
# w = (w - m) / torch.sqrt(v + 1e-10)
x = max(abs(float(w.min())), abs(float(w.max())))
int_num, frac_num = re_quantize(x, 16, True)
print('''quan_paras["{}"] = [{}, q_list[{}], True]'''.format(layer_name, int_num, quan_idx))
quan_idx+=1
# print("cconv", layer_name, "Kernel:", K, perf[0] / 10 ** 5, perf[1],
# [x / 10 ** 5 for x in perf[2]])
sorted_per = torch.tensor(perf[2]).sort()[0]
max_lat = sorted_per[-1].item()
sec_lat = sorted_per[-2].item()
quan_ceil = 17 - int_num
quan_floor = min(max(math.floor(16/(float(max_lat)/sec_lat))-int_num,1),quan_ceil-1)
quan_count = 6
step = max(math.ceil((quan_ceil - quan_floor)/quan_count),1)
# print(range(quan_floor,quan_ceil,step))
cconv_quan_ss.append(list(range(quan_floor,quan_ceil,step)))
cconv_quan_sn.append("Qu")
if perf[1] == "computing":
# cconv_pattern.append(layer_name)
if K not in cconv_pattern.keys():
cconv_pattern[K] = [layer_name]
else:
cconv_pattern[K].append(layer_name)
# print(layer_name)
# print("cconv",layer_name, "Kernel:", K, perf[0] / 10 ** 5, perf[1], [x / 10 ** 5 for x in perf[2]])
if perf[1] == "loading Weight":
count[1]+=1
elif perf[1] == "loading IFM":
count[0]+=1
elif perf[1] == "storing OFM":
count[2] += 1
elif perf[1] == "computing":
count[3] += 1
else:
print(perf[1],"not recognized")
sys.exit(0)
elif T == "dconv":
# w = model.state_dict()[layer_name + ".weight"]
# x = max(abs(float(w.min())), abs(float(w.max())))
# int_num, frac_num = re_quantize(x, 16, True)
# print('''quan_paras["{}"] = [{}, {}, True]'''.format(layer_name, int_num, frac_num))
# print("\t",layer_name,M, N, R, C, K, S, T)
[Tm, Tn, Tr, Tc, Tk, W_p, I_p, O_p] = HW1
[r_Ports, r_DSP, r_BRAM, r_BRAM_Size, BITWIDTH] = (
HW_constraints["r_Ports_BW"], HW_constraints["r_DSP"],
HW_constraints["r_BRAM"], HW_constraints["r_BRAM_Size"],
HW_constraints["BITWIDTH"])
Layer = PM_Layer.Layer_Class(B, M, N, R, C, K, S, "dconv", P)
acc_2 = PM_FPGA_Template.FPGA_Templates(Tm, Tn, Tr, Tc,
Tk, W_p, I_p, O_p, "dconv", r_Ports, r_DSP, r_BRAM, r_BRAM_Size,
BITWIDTH)
if acc_2.Success == False:
return -1
else:
if isinstance(layer, copy_conv2d.Conv2d_Custom):
perf = acc_2.get_layer_latency(Layer, layer.pattern_ones, layer.quan_paras)
else:
perf = acc_2.get_layer_latency(Layer)
# print(perf[0])
dTT+=perf[0]
# if perf[1] == "loading Weight":
# w = model.state_dict()[layer_name + ".weight"]
# x = max(abs(float(w.min())), abs(float(w.max())))
# int_num, frac_num = re_quantize(x, 16, True)
# print('''quan_paras["{}"] = [{}, {}, True]'''.format(layer_name, int_num, frac_num))
#
#
if perf[1] == "computing":
# print(layer_name)
# print("dconv",layer_name, "Kernel:", K, perf[0] / 10 ** 5, perf[1], [x / 10 ** 5 for x in perf[2]])
if K not in cconv_pattern.keys():
cconv_pattern[K] = [layer_name]
else:
cconv_pattern[K].append(layer_name)
if perf[1] == "loading Weight":
count[1]+=1
elif perf[1] == "loading IFM":
count[0]+=1
elif perf[1] == "storing OFM":
count[2] += 1
elif perf[1] == "computing":
count[3] += 1
else:
print(perf[1],"not recognized")
sys.exit(0)
elif isinstance(layer, nn.MaxPool2d) or isinstance(layer, nn.AdaptiveAvgPool2d) or isinstance(layer,
nn.AvgPool2d):
input = layer(input)
print(len(cconv_quan_ss),cconv_quan_ss)
print(cconv_quan_sn)
for k,v in cconv_pattern.items():
print(k,len(v),v)
# print(len(cconv_pattern),cconv_pattern)
# 2 is 200 MHz
print(cTT,dTT)
return (cTT+dTT) / 10 ** 5 / 2, count
if __name__== "__main__":
parser = argparse.ArgumentParser('Parser User Input Arguments')
parser.add_argument(
'-m', '--model',
default='mobilenet_v2'
)
parser.add_argument(
'-c', '--cconv',
default="160, 12, 32, 32, 3, 10, 10, 10",
help="hardware desgin of cconv",
)
parser.add_argument(
'-dc', '--dconv',
default="576, 1, 32, 32, 3, 10, 10, 10",
help="hardware desgin of cconv",
)
parser.add_argument(
'-d', '--dataset',
default='imagenet'
)
parser.add_argument("--pretrained", dest="pretrained", help="Use pre-trained models from the modelzoo",
action="store_true", )
args = parser.parse_args()
model_name = args.model
dataset_name = args.dataset
if dataset_name == "imagenet":
if "proxyless" in model_name:
model = torch.hub.load('mit-han-lab/ProxylessNAS', model_name, pretrained=args.pretrained)
elif "FBNET" in model_name:
model = torch.hub.load('rwightman/gen-efficientnet-pytorch', 'fbnetc_100')
else:
model = globals()[model_name](pretrained=args.pretrained)
elif dataset_name == "cifar10":
model = getattr(cifar10_models, model_name)(pretrained=args.pretrained)
#
# print(model)
#
# for name, para in model.named_parameters():
# print(name)
HW1 = [int(x.strip()) for x in args.dconv.split(",")]
HW2 = [int(x.strip()) for x in args.cconv.split(",")]
# print("="*10,model_name,"performance analysis:")
# print(HW1)
# print(HW2)
# print(model)
total_lat,count = get_performance(model, dataset_name, HW1, HW2)
# print("=" * 10, model_name, "performance analysis end")
print(model_name, count, total_lat)
|
[
"jiang.wwen@gmail.com"
] |
jiang.wwen@gmail.com
|
12ee8ccb3fb5c7f285ab4ea4e1f23ef93bccf793
|
7970b23786e3266fcef19898506666345aa61895
|
/myself/northernlights/northernlights/settings.py
|
1fdd305250d381f86eac577bd994aba4c7d23980
|
[] |
no_license
|
hikaruendo/django
|
16703924d0cbb811ebcce048016a1a2dca94ebbc
|
bceb77784bb4aace149e623cf79d7ee49caad6d2
|
refs/heads/master
| 2020-04-07T23:12:25.439499
| 2018-12-03T10:25:30
| 2018-12-03T10:25:30
| 158,788,023
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,246
|
py
|
"""
Django settings for northernlights project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i^1g49_nla391$zab5hq!g*i!fvj@ec*696%=Yby!@v(m08'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'penguin',
'rest',
'upload',
'rest_framework',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'northernlights.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'northernlights.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'ja-JP'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
|
[
"hika.blue91@gmail.com"
] |
hika.blue91@gmail.com
|
c0ea5bd89da5ab7c7d8075fedbc8eae7fedd937b
|
66f2dbe83d8ddd8a3dc323e1414ffb7534074c55
|
/zajecia 11/Function.py
|
e5acfc0e0fbc5056da0e0a6ea2d5c08e96ca328b
|
[] |
no_license
|
Kebsiula2007/Klasa-2
|
13fc5f9c161248fd9792f6d76bbbd18df818bec9
|
ab85b4181f6af45b7d609fd7d90def777e4b83e9
|
refs/heads/master
| 2023-08-10T17:28:33.337111
| 2021-06-07T20:47:36
| 2021-06-07T20:47:36
| 307,505,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 156
|
py
|
from abc import ABC
class Function(ABC):
@classmethod
def create_function(cls, **kwargs):
raise Exception("Something went wrong.")
|
[
"noreply@github.com"
] |
Kebsiula2007.noreply@github.com
|
d8fab346d521cb6463e34992254d59013c61b259
|
6883889c573014cb0b51d56073fa50aff9a37e89
|
/sr_pdc_management/models/pdc_payment.py
|
0d0d5b5a6a6ded9e12c1c0b7c60bb99940d1eba5
|
[] |
no_license
|
dionisiotorres/ATPM
|
4bff421dd26f92a63d49549e5385d2b87a79c3c5
|
7b38a39dba277854171ac98c9530b9e09c1b08b0
|
refs/heads/master
| 2022-11-30T01:44:54.019996
| 2020-05-14T15:39:51
| 2020-05-14T15:39:51
| 285,966,205
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 34,966
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This module uses OpenERP, Open Source Management Solution Framework.
# Copyright (C) 2017-Today Sitaram
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from odoo import models, fields, api, _
from odoo.exceptions import UserError
from odoo.tools import float_is_zero
from dateutil.relativedelta import *
import datetime
from datetime import timedelta
MAP_INVOICE_TYPE_PAYMENT_SIGN = {
'out_invoice': 1,
'in_refund': -1,
'in_invoice': -1,
'out_refund': 1,
}
# class ResConfigSettings(models.TransientModel):
# _inherit = 'res.config.settings'
#
# customer_pdc_payment_account = fields.Many2one('account.account', 'PDC Payment Account for Customer')
# vendor_pdc_payment_account = fields.Many2one('account.account', 'PDC Payment Account for Vendors/Suppliers')
#
# @api.model
# def get_values(self):
# res = super(ResConfigSettings, self).get_values()
#
# res['customer_pdc_payment_account'] = int(
# self.env['ir.config_parameter'].sudo().get_param('customer_pdc_payment_account', default=0))
# res['vendor_pdc_payment_account'] = int(
# self.env['ir.config_parameter'].sudo().get_param('vendor_pdc_payment_account', default=0))
#
# return res
#
# @api.model
# def set_values(self):
# self.env['ir.config_parameter'].sudo().set_param('customer_pdc_payment_account',
# self.customer_pdc_payment_account.id)
# self.env['ir.config_parameter'].sudo().set_param('vendor_pdc_payment_account',
# self.vendor_pdc_payment_account.id)
#
# super(ResConfigSettings, self).set_values()
class AccountMoveLine(models.Model):
_inherit = "account.move.line"
pdc_id = fields.Many2one('sr.pdc.payment', 'Post Dated Cheques')
class AccountInvoice(models.Model):
_inherit = "account.move"
is_pdc_invoice = fields.Boolean('PDC')
pdc_id = fields.Many2one('sr.pdc.payment', 'Post Dated Cheques')
@api.onchange('partner_id')
def _onchange_partner_id(self):
if self._context.get('default_type', False) == 'out_invoice':
return {'domain': {'partner_id': [('customer_rank', '>', 0)]}}
elif self._context.get('default_type', False) == 'in_invoice':
return {'domain': {'partner_id': [('supplier_rank', '>', 0)]}}
def name_get(self):
name_array = []
for record in self:
if record.name != '/':
name_array.append(
(record.id, record.name + ' ' + '[' + '{:.2f}'.format(record.amount_residual) + ']'))
else:
name_array.append((record.id, 'Draft Invoice (*{})'.format(record.id)))
return name_array
@api.model
def _get_default_invoice_date(self):
return fields.Date.today() if self._context.get('default_type', 'entry') in (
'in_invoice', 'in_refund', 'in_receipt') else False
invoice_date = fields.Date(string='Invoice/Bill Date', readonly=True, index=True, copy=False,
states={'draft': [('readonly', True)]},
default=_get_default_invoice_date)
@api.model
def default_get(self, fields_list):
res = super(AccountInvoice, self).default_get(fields_list)
if res.get('invoice_date') == False:
res.update({'invoice_date': datetime.date.today()})
return res
@api.depends(
'state', 'currency_id', 'invoice_line_ids.price_subtotal',
'move_id.line_ids.amount_residual',
'move_id.line_ids.currency_id')
def _compute_residual(self):
residual = 0.0
residual_company_signed = 0.0
sign = self.type in ['in_refund', 'out_refund'] and -1 or 1
for line in self.sudo().move_id.line_ids:
if line.account_id == self.account_id:
residual_company_signed += line.amount_residual
if line.currency_id == self.currency_id:
residual += line.amount_residual_currency if line.currency_id else line.amount_residual
else:
from_currency = line.currency_id or line.company_id.currency_id
residual += from_currency._convert(line.amount_residual, self.currency_id, line.company_id,
line.date or fields.Date.today())
if self._context.get('pdc'):
residual = 0
self.residual_company_signed = abs(residual_company_signed) * sign
self.residual_signed = abs(residual) * sign
self.residual = abs(residual)
digits_rounding_precision = self.currency_id.rounding
if float_is_zero(self.residual, precision_rounding=digits_rounding_precision):
self.reconciled = True
else:
self.reconciled = False
# When you click on add in invoice then this method called
def assign_outstanding_credit(self, credit_aml_id):
self.ensure_one()
credit_aml = self.env['account.move.line'].browse(credit_aml_id)
if not credit_aml.currency_id and self.currency_id != self.company_id.currency_id:
amount_currency = self.company_id.currency_id._convert(credit_aml.balance, self.currency_id,
self.company_id,
credit_aml.date or fields.Date.today())
credit_aml.with_context(allow_amount_currency=True, check_move_validity=False).write({
'amount_currency': amount_currency,
'currency_id': self.currency_id.id})
if credit_aml.payment_id:
credit_aml.payment_id.write({'invoice_ids': [(4, self.id, None)]})
if credit_aml.pdc_id:
credit_aml.pdc_id.write({'invoice_ids': [(4, self.id, None)]})
return self.register_payment(credit_aml)
class PdcPayment(models.Model):
_name = "sr.pdc.payment"
_inherit = ['mail.thread', 'mail.activity.mixin']
invoice_ids = fields.Many2many('account.move', 'account_invoice_pdc_rel', 'pdc_id', 'invoice_id',
string="Invoices", copy=False, readonly=True)
partner_id = fields.Many2one('res.partner', string='Partner', required=True, copy=False)
state = fields.Selection(
[('draft', 'Draft'), ('register', 'Registered'), ('return', 'Returned'), ('deposit', 'Deposited'),
('bounce', 'Bounced'), ('done', 'Done'), ('cancel', 'Cancelled')], readonly=True, default='draft', copy=False,
string="Status")
journal_id = fields.Many2one('account.journal', string='Payment Journal', required=True,
domain=[('type', 'in', ['bank'])])
# company_id = fields.Many2one('res.company', related='journal_id.company_id', string='Company', readonly=True)
# 6/4/2020 JIQ
company_id = fields.Many2one('res.company', 'Company', default=lambda self: self.env.company, required=True)
amount = fields.Monetary(string='Payment Amount', required=True)
currency_id = fields.Many2one('res.currency', string='Currency', required=True,
default=lambda self: self.env.user.company_id.currency_id)
payment_date = fields.Date(string='Payment Date', default=fields.Date.context_today, required=True, copy=False)
due_date = fields.Date(string='Due Date', default=fields.Date.context_today, required=True, copy=False)
# Muhammad Jawaid Iqbal
# communication = fields.Char(string='Memo')
communication = fields.Many2many('account.move', string='Memo')
cheque_ref = fields.Char('Cheque No.')
agent = fields.Char('Agent / Person')
bank = fields.Many2one('res.bank', string="Cheque Bank")
name = fields.Char('Name')
employee_id = fields.Many2one('hr.employee', 'Receiver')
receipt_no = fields.Integer('Receipt No.')
payment_type = fields.Selection([('outbound', 'Send Money'), ('inbound', 'Receive Money')], string='Payment Type',
required=True)
customer_pdc_payment_account = fields.Many2one('account.account', 'PDC Payment Account for Customer')
vendor_pdc_payment_account = fields.Many2one('account.account', 'PDC Payment Account for Vendors/Suppliers')
supplier_rank = fields.Integer(related='partner_id.supplier_rank')
customer_rank = fields.Integer(related='partner_id.customer_rank')
attchment_ids = fields.One2many('ir.attachment', 'payment_id', string='Create Attachment')
attachment_count = fields.Integer(compute='_compute_attachment_count', string='Attachment')
journal_items_count = fields.Integer(compute='_compute_journal_items_count', string='Journal Items')
journal_entry_count = fields.Integer(compute='_compute_journal_entry_count', string='Journal Entries')
def _compute_attachment_count(self):
self.env.cr.execute(
"""select count(id) from ir_attachment where payment_id = {}""".format(self.id))
rs = self.env.cr.dictfetchone()
self.attachment_count = rs['count']
def _compute_journal_items_count(self):
self.env.cr.execute(
"""select count(id) from account_move_line where partner_id = {} and pdc_id = {}""".format(
self.partner_id.id, self.id))
rs = self.env.cr.dictfetchone()
self.journal_items_count = rs['count']
def _compute_journal_entry_count(self):
self.env.cr.execute(
"""select count(id) from account_move where pdc_id = {} and type = 'entry'""".format(self.id))
rs = self.env.cr.dictfetchone()
self.journal_entry_count = rs['count']
def attachment_on_account_cheque(self):
return {
'type': 'ir.actions.act_window',
'name': 'Attachment.Details',
'res_model': 'ir.attachment',
'view_mode': 'tree,form',
'domain': [('payment_id', '=', self.id)]
}
def action_view_jornal_items(self):
return {
'type': 'ir.actions.act_window',
'name': 'Journal Items',
'res_model': 'account.move.line',
'view_mode': 'tree,form',
'domain': [('partner_id', '=', self.partner_id.id), ('pdc_id', '=', self.id)]
}
def action_view_jornal_entry(self):
return {
'type': 'ir.actions.act_window',
'name': 'Journal Entries',
'res_model': 'account.move',
'view_mode': 'tree,form',
'domain': [('pdc_id', '=', self.id), ('type', '=', 'entry')]
}
@api.onchange('partner_id')
def _onchange_partner_id(self):
if self.partner_id:
self.customer_pdc_payment_account = self.partner_id.customer_pdc_payment_account.id
self.vendor_pdc_payment_account = self.partner_id.vendor_pdc_payment_account.id
invoices = self.partner_id.unpaid_invoices
if invoices:
return {'domain': {'communication': [('id', 'in', invoices.ids)]}}
def get_amount_in_words(self, amount_total):
amount_in_words = self.company_id.currency_id.amount_to_text(amount_total)
return amount_in_words.upper() + ' ONLY'
def get_default_date_format(self, payment_date):
lang_id = self.env['res.lang'].search([('code', '=', self.env.context.get('lang', 'en_US'))])
if payment_date:
return datetime.datetime.strptime(str(payment_date), '%Y-%m-%d').strftime(lang_id.date_format)
def _cron_send_payment_history(self):
payments = self.search([('due_date', '=', datetime.date.today() + timedelta(days=1))])
if payments:
self.env.ref('sr_pdc_management.email_template_reminder_payment').send_mail(payments.ids[0])
def get_payment_history(self):
payments = self.search([('due_date', '=', datetime.date.today() + timedelta(days=1))])
if payments:
return payments
def get_user(self):
return self.env.user.name
def get_currency_symbol(self):
if self.env['res.company']._company_default_get():
return self.env['res.company']._company_default_get().currency_id.symbol
# @api.onchange('payment_type')
# def _onchange_payment_type(self):
# self.ensure_one()
# # Set partner_id domain
# if self.payment_type == 'inbound':
# return {'domain': {'partner_id': [('customer_rank', '=', 1)]}}
# else:
# return {'domain': {'partner_id': [('supplier_rank', '=', 1)]}}
@api.onchange('journal_id')
def _default_currency(self):
if self.journal_id:
journal = self.journal_id
currency_id = journal.currency_id or journal.company_id.currency_id or self.env.user.company_id.currency_id
self.currency_id = currency_id.id
else:
self.currency_id = False
@api.model
def default_get(self, fields):
rec = super(PdcPayment, self).default_get(fields)
context = dict(self._context or {})
# Checks on received invoice records
invoices = self.env['account.move'].browse(context.get('active_ids'))
if len(invoices.mapped('partner_id')) > 1:
raise UserError(_("You must have one partner for both the invoices"))
# if any(invoice.state != 'open' for invoice in invoices):
# raise UserError(_("You can only register check for open invoices"))
total_amount = sum(inv.amount_residual * MAP_INVOICE_TYPE_PAYMENT_SIGN[inv.type] for inv in invoices)
# Muhammad Jawaid Iqbal
# communication = ' '.join([ref for ref in invoices.mapped('invoice_payment_ref') if ref])
if invoices:
if invoices.mapped('type')[0] == 'in_invoice':
payment_type = 'outbound'
else:
payment_type = 'inbound'
else:
payment_type = 'inbound'
rec.update({
'payment_type': payment_type,
'name': invoices.mapped('name'),
'amount': abs(total_amount),
'currency_id': invoices[0].currency_id.id if invoices else False,
'partner_id': invoices[0].commercial_partner_id.id if invoices else False,
'customer_pdc_payment_account': invoices[
0].commercial_partner_id.customer_pdc_payment_account.id if invoices else False,
'vendor_pdc_payment_account': invoices[
0].commercial_partner_id.vendor_pdc_payment_account.id if invoices else False,
'communication': [(6, 0, self._context.get('active_ids'))],
})
return rec
def get_credit_entry(self, partner_id, move, credit, debit, amount_currency, journal_id, name,
account_id, currency_id, payment_date):
return {
'partner_id': partner_id.id,
# 'invoice_id': invoice_ids.id if len(invoice_ids) == 1 else False,
'move_id': move.id,
'debit': debit,
'credit': credit,
'amount_currency': amount_currency or False,
'payment_id': False,
'journal_id': journal_id.id,
'name': name,
'account_id': account_id,
'currency_id': currency_id if currency_id != self.company_id.currency_id.id else False,
'date_maturity': payment_date,
'exclude_from_invoice_tab': True,
'pdc_id': self.id
}
def get_debit_entry(self, partner_id, move, credit, debit, amount_currency, journal_id, name,
account_id, currency_id):
return {
'partner_id': partner_id.id,
# 'invoice_id': invoice_ids.id if len(invoice_ids) == 1 else False,
'move_id': move.id,
'debit': debit,
'credit': credit,
'amount_currency': amount_currency or False,
'payment_id': False,
'journal_id': journal_id.id,
'name': name,
'account_id': account_id,
'currency_id': currency_id if currency_id != self.company_id.currency_id.id else False,
'exclude_from_invoice_tab': True,
'pdc_id': self.id
}
def cancel(self):
self.state = 'cancel'
def register(self):
if self.customer_pdc_payment_account or self.vendor_pdc_payment_account:
all_move_vals = []
inv = self.env['account.move'].browse(self._context.get('active_ids')) or self.invoice_ids
if inv:
inv.write({'is_pdc_invoice': True, 'pdc_id': self.id})
self.state = 'register'
if self.customer_rank > 0:
self.name = self.env['ir.sequence'].next_by_code('pdc.payment')
account_id = self.partner_id.property_account_receivable_id
balance = -self.amount
else:
self.name = self.env['ir.sequence'].next_by_code('pdc.payment.vendor')
account_id = self.partner_id.property_account_payable_id
balance = self.amount
move_vals = {
'date': self.payment_date,
'ref': ','.join(self.communication.mapped('name')),
'journal_id': self.journal_id.id,
'currency_id': self.journal_id.currency_id.id or self.company_id.currency_id.id,
'partner_id': self.partner_id.id,
'line_ids': [
# Receivable / Payable / Transfer line.
(0, 0, {
'name': '{} Payment: '.format('Customer' if self.customer_rank > 0 else 'Vendor') + ','.join(
self.communication.mapped('name')),
'amount_currency': 0.0 + 0.0,
'currency_id': False,
'debit': balance + 0.0 > 0.0 and balance + 0.0 or 0.0,
'credit': balance + 0.0 < 0.0 and -balance - 0.0 or 0.0,
'date_maturity': self.payment_date,
'partner_id': self.partner_id.id,
'account_id': account_id.id,
'pdc_id': self.id,
}),
# Liquidity line .
(0, 0, {
'name': self.name,
'amount_currency': -0.0,
'currency_id': False,
'debit': balance < 0.0 and -balance or 0.0,
'credit': balance > 0.0 and balance or 0.0,
'date_maturity': self.payment_date,
'partner_id': self.partner_id.id,
'account_id': self.partner_id.customer_pdc_payment_account.id if self.customer_rank > 0 else self.partner_id.vendor_pdc_payment_account.id,
'pdc_id': self.id,
}),
],
}
all_move_vals.append(move_vals)
moves = self.env['account.move'].with_context(default_type='entry').create(all_move_vals)
moves.filtered(lambda move: move.journal_id.post_at != 'bank_rec').post()
if self.payment_type in ('inbound', 'outbound'):
if inv:
(moves[0] + inv).line_ids.filtered(
lambda line: not line.reconciled and line.account_id == account_id).reconcile()
else:
raise UserError(_("Configuration Error: Please define account for the PDC payment."))
return
def return_cheque(self):
self.state = 'return'
return
def deposit(self):
# if self.customer_pdc_payment_account or self.vendor_pdc_payment_account:
# # JIQ 20/4/2020
# inv = self.env['account.move'].browse(self._context.get('active_ids'))
# # JIQ 20/4/2020
# aml_obj = self.env['account.move.line'].with_context(check_move_validity=False)
# if inv:
# inv.invoice_payment_state = 'paid'
# custom_currency_id = inv.currency_id
# company_currency_id = inv.company_id.currency_id
# account_id = inv.account_id.id
# else:
# custom_currency_id = self.currency_id
# company_currency_id = self.env.user.company_id.currency_id
# if self.payment_type == 'inbound':
# account_id = self.partner_id.property_account_receivable_id.id
# else:
# account_id = self.partner_id.property_account_payable_id.id
# amount_currency, debit, credit = list(
# aml_obj._get_fields_onchange_subtotal_model(self.amount, 'entry', self.currency_id,
# self.env.user.company_id, self.payment_date).values())
# move = self.env['account.move'].create(self._get_move_vals())
# ################# Credit Entry ######################
# name = ''
# if inv:
# name += 'PDC Payment: '
# for record in inv:
# if record.move_id:
# name += record.number + ', '
# name = name[:len(name) - 2]
# if self.payment_type == 'inbound':
# credit_entry = self.get_credit_entry(self.partner_id, move, debit, credit, amount_currency,
# self.journal_id, name, account_id, custom_currency_id.id,
# self.payment_date)
# else:
# credit_entry = self.get_credit_entry(self.partner_id, move, debit, credit, amount_currency,
# self.journal_id, name, self.vendor_pdc_payment_account.id,
# custom_currency_id.id,
# self.payment_date)
# aml_obj.create(credit_entry)
# ################ Debit Entry #############################
# if self.payment_type == 'inbound':
# debit_entry = self.get_debit_entry(self.partner_id, move, credit, debit, amount_currency,
# self.journal_id, name, self.customer_pdc_payment_account.id,
# custom_currency_id.id)
# else:
# debit_entry = self.get_debit_entry(self.partner_id, move, credit, debit, amount_currency,
# self.journal_id, name, account_id, custom_currency_id.id)
# aml_obj.create(debit_entry)
# move.post()
# else:
# raise UserError(_("Configuration Error: Please define account for the PDC payment."))
self.state = 'deposit'
return True
def bounce(self):
if self.customer_pdc_payment_account or self.vendor_pdc_payment_account:
if self.payment_type == 'inbound':
account_id = self.partner_id.property_account_receivable_id.id
else:
account_id = self.partner_id.property_account_payable_id.id
aml_obj = self.env['account.move.line'].with_context(check_move_validity=False)
amount_currency, debit, credit = list(
aml_obj._get_fields_onchange_subtotal_model(self.amount, 'entry', self.currency_id,
self.env.user.company_id, self.payment_date).values())
move = self.env['account.move'].create(self._get_move_vals())
################# Credit Entry ######################
# name = ''
# if self.invoice_ids:
# name += 'PDC Payment: '
# for record in self.invoice_ids:
# if record.move_id:
# name += record.number + ', '
# name = name[:len(name) - 2]
name = 'PDC return'
if self.payment_type == 'inbound':
credit_entry = self.get_credit_entry(self.partner_id, move, debit, credit,
amount_currency,
self.journal_id, name, self.customer_pdc_payment_account.id, False,
self.payment_date)
else:
credit_entry = self.get_credit_entry(self.partner_id, move, debit, credit,
amount_currency,
self.journal_id, name, account_id, False,
self.payment_date)
aml_obj.create(credit_entry)
################ Debit Entry #############################
if self.payment_type == 'inbound':
debit_entry = self.get_debit_entry(self.partner_id, move, credit, debit,
amount_currency,
self.journal_id, name, account_id, False)
else:
debit_entry = self.get_debit_entry(self.partner_id, move, credit, debit,
amount_currency,
self.journal_id, name, self.vendor_pdc_payment_account.id, False)
aml_obj.create(debit_entry)
move.post()
self.state = 'bounce'
for record in self.invoice_ids:
record.state = 'posted'
else:
raise UserError(_("Configuration Error: Please define account for the PDC payment."))
return True
def done(self):
if self.customer_pdc_payment_account or self.vendor_pdc_payment_account:
aml_obj = self.env['account.move.line'].with_context(check_move_validity=False)
amount_currency, debit, credit = list(
aml_obj._get_fields_onchange_subtotal_model(self.amount, 'entry', self.currency_id,
self.env.user.company_id, self.payment_date).values())
move = self.env['account.move'].create(self._get_move_vals())
if self.payment_type == 'inbound':
account_id = self.journal_id.default_debit_account_id.id
else:
account_id = self.journal_id.default_credit_account_id.id
################# Credit Entry ######################
name = ''
if self.invoice_ids:
name += 'PDC Payment: '
for record in self.invoice_ids:
if record:
name += record.name + ', '
name = name[:len(name) - 2]
if self.payment_type == 'inbound':
credit_entry = self.get_credit_entry(self.partner_id, move, debit, credit,
amount_currency,
self.journal_id, name, self.customer_pdc_payment_account.id, False,
self.payment_date)
else:
credit_entry = self.get_credit_entry(self.partner_id, move, debit, credit,
amount_currency,
self.journal_id, name, account_id, False,
self.payment_date)
aml_obj.create(credit_entry)
################ Debit Entry #############################
if self.payment_type == 'inbound':
debit_entry = self.get_debit_entry(self.partner_id, move, credit, debit,
amount_currency,
self.journal_id, name, account_id, False)
else:
debit_entry = self.get_debit_entry(self.partner_id, move, credit, debit,
amount_currency,
self.journal_id, name, self.vendor_pdc_payment_account.id, False)
aml_obj.create(debit_entry)
move.post()
self.state = 'done'
else:
raise UserError(_("Configuration Error: Please define account for the PDC payment."))
return True
def _get_move_vals(self, journal=None):
""" Return dict to create the payment move
"""
journal = journal or self.journal_id
return {
'date': self.payment_date,
'ref': ','.join(self.communication.mapped('name')) or '',
'company_id': self.company_id.id,
'journal_id': journal.id,
'pdc_id': self.id
}
class PdcReportWizard(models.TransientModel):
_name = 'pdc.report.wizard'
from_date = fields.Date(string="Start Date")
to_date = fields.Date(string="End Date")
partner_ids = fields.Many2many('res.partner', string='Partner')
partner_type = fields.Selection([('customer', 'Customer'), ('supplier', 'Vendor')])
payment_type = fields.Selection([('outbound', 'Send Money'), ('inbound', 'Receive Money')], string='Payment Type')
state = fields.Selection(
[('register', 'Registered'), ('return', 'Returned'), ('deposit', 'Deposited'),
('bounce', 'Bounced'), ('done', 'Done'), ('cancel', 'Cancelled')], string="Status")
def get_state_label(self, state):
return dict(self.env['sr.pdc.payment'].fields_get(['state'])['state']['selection'])[state]
def get_default_date_format(self, payment_date):
lang_id = self.env['res.lang'].search([('code', '=', self.env.context.get('lang', 'en_US'))])
if payment_date:
return datetime.datetime.strptime(str(payment_date), '%Y-%m-%d').strftime(lang_id.date_format)
@api.onchange('payment_type')
def _onchange_payment_type(self):
if self.payment_type:
if self.payment_type == 'outbound':
partners = self.env['res.partner'].search([('supplier_rank', '>', 0)]).ids
return {'domain': {'partner_ids': [('id', 'in', partners)]}}
else:
return {'domain': {
'partner_ids': [('id', 'in', self.env['res.partner'].search([('customer_rank', '>', 0)]).ids)]}}
@api.onchange('partner_type')
def _onchange_partner_type(self):
if self.partner_type:
if self.partner_type == 'customer':
partners = self.env['res.partner'].search([('customer_rank', '>', 0)]).ids
return {'domain': {'partner_ids': [('id', 'in', partners)]}}
else:
return {'domain': {
'partner_ids': [('id', 'in', self.env['res.partner'].search([('supplier_rank', '>', 0)]).ids)]}}
def generate_report(self):
data = {}
data['form'] = self.read(['from_date', 'to_date', 'state', 'partner_ids', 'partner_type', 'payment_type'])[0]
return self.env.ref('sr_pdc_management.sr_pdc_report_action').report_action(self, data=data)
class ReportPdc(models.AbstractModel):
_name = 'report.sr_pdc_management.pdc_report_template'
@api.model
def _get_report_values(self, docids, data=None):
result_arr = []
partner_payments = self.env['sr.pdc.payment'].search(
[('payment_date', '>=', data['form']['from_date']), ('payment_date', '<=', data['form']['to_date'])])
if data['form']['partner_type']:
partners = partner_payments.mapped('partner_id').filtered(
lambda p: p.customer_rank if data['form']['partner_type'] == 'customer' else p.supplier_rank)
if data['form']['partner_ids']:
partners = self.env['res.partner'].browse(data['form']['partner_ids'])
if not data['form']['partner_type'] and not data['form']['partner_ids']:
partners = partner_payments.mapped('partner_id')
for partner in partners:
payments = partner_payments.filtered(lambda payment: payment.partner_id == partner)
if data['form']['state']:
payments = payments.filtered(lambda payment: payment.state == data['form']['state'])
if data['form']['payment_type']:
payments = payments.filtered(lambda payment: payment.payment_type == data['form']['payment_type'])
result_arr.append({partner.name: payments})
return {
'get_payments_vals': result_arr,
# 'partner_type': data['form']['partner_type'],
'partners': partners,
'doc': self.env['pdc.report.wizard'].browse(data['form']['id'])
}
class MailMail(models.Model):
_inherit = 'mail.mail'
@api.model
def create(self, vals):
mail = super(MailMail, self).create(vals)
mail.send()
return mail
class IrAttachment(models.Model):
_inherit = 'ir.attachment'
payment_id = fields.Many2one('sr.pdc.payment')
class BulkChequeDeposit(models.TransientModel):
_name = 'bulk.cheque.deposit'
def cheque_button_deposit(self):
pdc_ids = self.env['sr.pdc.payment'].browse(self._context.get('active_ids'))
for pdc_id in pdc_ids:
if pdc_id.state != 'register':
raise UserError(_('%s should be in Registered state !!!' % (pdc_id.name)))
pdc_id.deposit()
class AccountAccount(models.Model):
_inherit = 'account.account'
is_pdc_account = fields.Boolean('PDC')
class ResPartner(models.Model):
_inherit = 'res.partner'
customer_pdc_payment_account = fields.Many2one('account.account', 'PDC Payment Account for Customer')
vendor_pdc_payment_account = fields.Many2one('account.account', 'PDC Payment Account for Vendors/Suppliers')
|
[
"jawaid94@outlook.com"
] |
jawaid94@outlook.com
|
84cb99ebac76ac20f4051bfe5540a4f52fcc2d3d
|
6e6ba2da921bd63d60c8f8ff1a0b181d2584dd3f
|
/rock_paper_scissor.py
|
80cbaca5d39cd371358515225aa6ab901fc7f604
|
[] |
no_license
|
SohailHaqyar/Python-Games
|
85409dc06547c72e5cdf3155446d7e647178dce8
|
d5dca446be33a98418d4bacb534dfc1b71f4cdab
|
refs/heads/main
| 2023-06-19T23:35:42.794656
| 2021-07-21T15:09:20
| 2021-07-21T15:09:20
| 388,156,251
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 988
|
py
|
import random
user_wins = 0
computer_wins = 0
options = ["rock", "paper", "scissors"]
while True:
user_input = input('Type Rock/Paper/Scissors or Q to quit: ').lower()
if user_input == 'q':
break
if user_input not in options:
continue
random_number = random.randint(0, 2)
# rock:0, paper:1, scissor:2
computer_pick = options[random_number]
print('Computer picked', computer_pick)
if user_input == "rock" and computer_pick == 'scissors':
print('You won!')
user_wins += 1
elif user_input == "paper" and computer_pick == 'rock':
print('You won!')
user_wins += 1
elif user_input == "scissors" and computer_pick == 'paper':
print('You won!')
user_wins += 1
elif user_input == computer_pick:
print('It is a draw')
else:
print('You lost!')
computer_wins += 1
print('You won ', user_wins, 'time(s)')
print('Computer won ', computer_wins, 'time(s)')
|
[
"haqyarsohail@gmail.com"
] |
haqyarsohail@gmail.com
|
3998d75cb6c814054e8b7112f74ffd8040e33b79
|
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
|
/sdk/network/azure-mgmt-network/generated_samples/local_network_gateway_create.py
|
142a0b77bdf2d6d39ea9b095884f701e7e844cfa
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
openapi-env-test/azure-sdk-for-python
|
b334a2b65eeabcf9b7673879a621abb9be43b0f6
|
f61090e96094cfd4f43650be1a53425736bd8985
|
refs/heads/main
| 2023-08-30T14:22:14.300080
| 2023-06-08T02:53:04
| 2023-06-08T02:53:04
| 222,384,897
| 1
| 0
|
MIT
| 2023-09-08T08:38:48
| 2019-11-18T07:09:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,867
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.network import NetworkManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-network
# USAGE
python local_network_gateway_create.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = NetworkManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.local_network_gateways.begin_create_or_update(
resource_group_name="rg1",
local_network_gateway_name="localgw",
parameters={
"location": "Central US",
"properties": {
"fqdn": "site1.contoso.com",
"gatewayIpAddress": "11.12.13.14",
"localNetworkAddressSpace": {"addressPrefixes": ["10.1.0.0/16"]},
},
},
).result()
print(response)
# x-ms-original-file: specification/network/resource-manager/Microsoft.Network/stable/2022-11-01/examples/LocalNetworkGatewayCreate.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
openapi-env-test.noreply@github.com
|
8101896bd5492cdcabf2ae1004c54432dc1ed030
|
982d2ab45497796ecfddc644c5d77ea65de96a69
|
/ms/tpl.py
|
04c9221979946ea012e79dc40032a2a6c96e7d3c
|
[] |
no_license
|
richardhaslam/dfn
|
0701893dc859548d9465227c2e5ef849aa188a93
|
60ef114ea1dc14ec46253706f3d5bab38d62ae15
|
refs/heads/master
| 2022-02-15T21:55:29.080808
| 2019-07-29T07:29:34
| 2019-07-29T07:29:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,163
|
py
|
import numpy as np
class TPL:
def __init__(self, k, l_boundary, h_boundary, r_0):
self.k = k
self.l_boundary = l_boundary
self.h_boundary = h_boundary
self.f_0 = -(r_0 ** k)
self.r_0 = r_0
def rnd_number(self, size=1):
"""
:param size:
:return:
"""
x = np.random.random(size=size)
l_boundary_g = np.float_power(self.l_boundary, -self.k)
h_boundary_g = np.float_power(self.h_boundary, -self.k)
y = x/(-(self.r_0**self.k))
y_max = 0
y_min = (1 / -(self.r_0 ** self.k))
y_range = y_max - y_min
boundary_range = h_boundary_g - l_boundary_g
new_x = (((y - y_min) * boundary_range)/ y_range) + l_boundary_g
return new_x ** (-1/self.k)
# def pdf(self, x):
# l_boundary_g = np.float_power(self.l_boundary, -self.k)
# h_boundary_g = np.float_power(self.h_boundary, -self.k)
#
# return self.k *(self.r_0 ** self.k) *x ** (-self.k - 1)#/ (h_boundary_g - l_boundary_g)
# #return (self.k * (self.r_0 ** self.k)) * (x ** (-self.k - 1)) #/ (h_boundary_g - l_boundary_g)
|
[
"martin.spetlik@tul.cz"
] |
martin.spetlik@tul.cz
|
9292505eebb12c126697dbd91edc35acde689b27
|
e5a86ca7675a0979109439bd5290401ff14e1de3
|
/src/capra_lidar/scripts/max_range.py
|
875075f5fcefc8496af4e9e585adcebe0670ed0d
|
[] |
no_license
|
clubcapra/urial-DEPRECATED
|
eaf7bd26b7e00f11217cf09d852f2f309eb7805b
|
e367cc1e9d161bf67690815989b51936eedad232
|
refs/heads/master
| 2021-06-17T20:39:23.996432
| 2017-05-31T16:20:57
| 2017-05-31T16:20:57
| 62,470,305
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 759
|
py
|
#!/usr/bin/env python
import rospy
from sensor_msgs.msg import LaserScan
from math import *
def scan_cb(msg):
new_range = []
new_intens = []
for i in range(len(msg.ranges)):
if msg.ranges[i] < 0.001 or isnan(msg.ranges[i]):
new_range.append(19.0)
new_intens.append(500.0)
else:
new_range.append(msg.ranges[i])
new_intens.append(msg.intensities[i])
msg.ranges = new_range
msg.intensities = new_intens
pub_scan.publish(msg)
if __name__ == "__main__":
rospy.init_node('max_range')
max_range = rospy.get_param("~max_range", 20)
pub_scan = rospy.Publisher("/scan_out", LaserScan, queue_size=10)
rospy.Subscriber("/scan_in", LaserScan, scan_cb)
rospy.spin()
|
[
"log@clubcapra.com"
] |
log@clubcapra.com
|
1e4e1bbe0b1043fada3a979412a97bb3f8a9daa1
|
f639401226c6bfc8261585ce51707412d7458ac3
|
/ml_challenge/task/submission.py
|
85360eeb9b7260328c0d308badea73fe1ad3ddeb
|
[
"MIT"
] |
permissive
|
fernandocamargoai/ml-challenge-2021
|
7d4ce1ba0db49b14cf6cf69979ffc37c6795a6b1
|
7ae082aeb2fe7674956e309afedbd25464d71525
|
refs/heads/main
| 2023-08-13T20:27:24.754402
| 2021-09-20T21:46:53
| 2021-09-20T21:46:53
| 392,701,152
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,332
|
py
|
import functools
import gzip
import json
import os
from functools import cached_property
from multiprocessing import Pool
from typing import Union, Tuple
import luigi
import numpy as np
import torch
import pytorch_lightning as pl
from gluonts.model.forecast import SampleForecast
from sklearn.preprocessing import MinMaxScaler
from tqdm import tqdm
from ml_challenge.dataset import (
SameSizeTransformedDataset,
UseMinutesActiveForecastingTransformation,
UseMeanOfLastMinutesActiveTransformation,
)
from ml_challenge.lookahead_generator import LookaheadGenerator
from ml_challenge.submission import (
pool_forecast_transform_fn,
calculate_out_of_stock_days_from_samples,
apply_tweedie,
apply_normal,
apply_ecdf,
apply_beta,
apply_fitted_negative_binomial,
apply_poisson,
apply_negative_binomial,
apply_fitted_gamma, calculate_out_out_stock_days_from_quantiles,
)
from ml_challenge.task.training import (
DeepARTraining,
DeepARForMinutesActiveTraining,
CausalDeepARTraining, TemporalFusionTransformerTraining,
)
def get_suffix(
task: Union["GenerateOutOfStockDaySamplePredictions", "GenerateSubmission"]
) -> str:
if task.minutes_active_task_path:
return "_%s_%s" % (
os.path.split(task.minutes_active_task_path)[-1],
task.minutes_active_forecast_method,
)
elif task.use_mean_of_last_minutes_active:
return "_using_mean_of_last_minutes_active"
else:
return ""
class GenerateOutOfStockDaySamplePredictions(luigi.Task):
task_path: str = luigi.Parameter()
minutes_active_task_path: str = luigi.Parameter(default=None)
minutes_active_forecast_method: str = luigi.ChoiceParameter(
choices=["mean", "max"], default="mean"
)
use_mean_of_last_minutes_active: bool = luigi.BoolParameter(default=False)
num_samples: int = luigi.IntParameter(default=100)
seed: int = luigi.IntParameter(default=42)
@cached_property
def training(self) -> Union[DeepARTraining, CausalDeepARTraining, TemporalFusionTransformerTraining]:
with open(os.path.join(self.task_path, "params.json"), "r") as params_file:
params = json.load(params_file)
training_class = {
DeepARTraining.__name__: DeepARTraining,
CausalDeepARTraining.__name__: CausalDeepARTraining,
TemporalFusionTransformerTraining.__name__: TemporalFusionTransformerTraining,
}[os.path.split(os.path.split(self.task_path)[0])[1]]
return training_class(**params)
@cached_property
def minutes_active_training(self) -> DeepARForMinutesActiveTraining:
with open(
os.path.join(self.minutes_active_task_path, "params.json"), "r"
) as params_file:
params = json.load(params_file)
return DeepARForMinutesActiveTraining(**params)
def output(self):
suffix = get_suffix(self)
return luigi.LocalTarget(
os.path.join(
self.task_path,
f"out_of_stock_day_sample_predictions_num-samples={self.num_samples}_seed={self.seed}{suffix}.npy",
)
)
def process_minutes_active_forecast(self, forecast: SampleForecast) -> np.ndarray:
if self.minutes_active_forecast_method == "mean":
return np.clip(forecast.mean, 0.0, 1.0)
elif self.minutes_active_forecast_method == "max":
return np.clip(forecast.samples.max(axis=0), 0.0, 1.0)
else:
raise ValueError()
def run(self):
pl.seed_everything(self.seed, workers=True)
if self.minutes_active_task_path:
minutes_active_predictor = self.minutes_active_training.get_trained_predictor(
torch.device("cuda")
)
minutes_active_predictor.batch_size = 512
minutes_active_forecasts = [
self.process_minutes_active_forecast(forecast)
for forecast in tqdm(
minutes_active_predictor.predict(
self.minutes_active_training.test_dataset,
num_samples=self.num_samples,
),
total=len(self.minutes_active_training.test_dataset),
)
]
minutes_active_forecasts_dict = {
sku: forecast
for sku, forecast in zip(
self.minutes_active_training.test_df["sku"],
minutes_active_forecasts,
)
}
del minutes_active_predictor
test_dataset = SameSizeTransformedDataset(
self.training.test_dataset,
transformation=UseMinutesActiveForecastingTransformation(
self.training.real_variables.index("minutes_active"),
minutes_active_forecasts_dict,
),
)
elif self.use_mean_of_last_minutes_active:
test_dataset = SameSizeTransformedDataset(
self.training.test_dataset,
transformation=UseMeanOfLastMinutesActiveTransformation(
self.training.real_variables.index("minutes_active"),
self.training.test_steps,
),
)
else:
test_dataset = self.training.test_dataset
predictor = self.training.get_trained_predictor(torch.device("cuda"))
predictor.batch_size = 512
if isinstance(self.training, DeepARTraining):
forecast_transform_fn = calculate_out_of_stock_days_from_samples
else:
forecast_transform_fn = calculate_out_out_stock_days_from_quantiles
with Pool(max(os.cpu_count(), 8)) as pool:
with LookaheadGenerator(
predictor.predict(test_dataset, num_samples=self.num_samples)
) as forecasts:
all_days_samples = list(
tqdm(
pool.imap(
functools.partial(
pool_forecast_transform_fn,
forecast_transform_fn=forecast_transform_fn,
),
zip(forecasts, self.training.test_df["target_stock"]),
),
total=len(self.training.test_df),
)
)
np.save(self.output().path, np.array(all_days_samples))
class GenerateSubmission(luigi.Task):
task_path: str = luigi.Parameter()
tft_quantile_indices: int = luigi.ListParameter(default=[0])
minutes_active_task_path: str = luigi.Parameter(default=None)
minutes_active_forecast_method: str = luigi.ChoiceParameter(
choices=["mean", "max"], default="mean"
)
use_mean_of_last_minutes_active: bool = luigi.BoolParameter(default=False)
distribution: str = luigi.ChoiceParameter(
choices=[
"tweedie",
"normal",
"ecdf",
"beta",
"fitted_negative_binomial",
"negative_binomial",
"fitted_gamma",
"poisson",
]
)
fixed_std: float = luigi.FloatParameter(default=None)
min_max_std: Tuple[float, float] = luigi.TupleParameter(default=None)
tweedie_phi: float = luigi.FloatParameter(default=2.0)
tweedie_power: float = luigi.FloatParameter(default=1.3)
num_samples: int = luigi.IntParameter(default=100)
seed: int = luigi.IntParameter(default=42)
def requires(self):
return GenerateOutOfStockDaySamplePredictions(
task_path=self.task_path,
minutes_active_task_path=self.minutes_active_task_path,
minutes_active_forecast_method=self.minutes_active_forecast_method,
use_mean_of_last_minutes_active=self.use_mean_of_last_minutes_active,
num_samples=self.num_samples,
seed=self.seed,
)
def output(self):
suffix = get_suffix(self)
if "TemporalFusionTransformerTraining" in self.task_path:
suffix += f"_tft_quantile_indices={self.tft_quantile_indices}"
distribution = self.distribution
if distribution == "tweedie":
distribution += f"_phi={self.tweedie_phi}_power={self.tweedie_power}"
if self.fixed_std:
distribution += f"_std={self.fixed_std}"
if self.min_max_std:
distribution += f"_std={self.min_max_std}"
return luigi.LocalTarget(
os.path.join(
self.task_path,
f"submission_{distribution}_num-samples={self.num_samples}_seed={self.seed}{suffix}.csv.gz",
)
)
def run(self):
out_of_stock_day_preds = np.load(self.input().path)
if "TemporalFusionTransformerTraining" in self.task_path:
if self.tft_quantile_indices:
out_of_stock_day_preds = out_of_stock_day_preds[:, self.tft_quantile_indices]
std_scaler = (
MinMaxScaler(self.min_max_std).fit(
np.std(out_of_stock_day_preds, axis=1).reshape(-1, 1)
)
if self.min_max_std
else None
)
if self.distribution == "tweedie":
apply_dist_fn = functools.partial(
apply_tweedie,
phi=self.tweedie_phi,
power=self.tweedie_power,
fixed_std=self.fixed_std,
std_scaler=std_scaler,
)
elif self.distribution == "normal":
apply_dist_fn = functools.partial(
apply_normal, fixed_std=self.fixed_std, std_scaler=std_scaler,
)
elif self.distribution == "ecdf":
apply_dist_fn = apply_ecdf
elif self.distribution == "beta":
apply_dist_fn = functools.partial(
apply_beta, fixed_std=self.fixed_std, std_scaler=std_scaler,
)
elif self.distribution == "fitted_negative_binomial":
apply_dist_fn = apply_fitted_negative_binomial
elif self.distribution == "negative_binomial":
apply_dist_fn = functools.partial(
apply_negative_binomial,
fixed_std=self.fixed_std,
std_scaler=std_scaler,
)
elif self.distribution == "fitted_gamma":
apply_dist_fn = apply_fitted_gamma
else: # if self.distribution == "poisson":
apply_dist_fn = apply_poisson
with Pool(max(os.cpu_count(), 8)) as pool:
all_probas = list(
tqdm(
pool.imap(apply_dist_fn, out_of_stock_day_preds),
total=out_of_stock_day_preds.shape[0],
)
)
# default_probas = ([0.0] * (len(all_probas[0]) - 1) + [1.0])
default_probas = [1.0 / len(all_probas[0])] * len(all_probas[0])
all_probas = [
probas if sum(probas) > 0 else default_probas for probas in all_probas
]
with gzip.open(self.output().path, "wb") as f:
for probas in all_probas:
f.write(
(
",".join([str(round(proba, 4)) for proba in probas]) + "\n"
).encode("utf-8")
)
|
[
"fernando.camargo.ai@gmail.com"
] |
fernando.camargo.ai@gmail.com
|
81f695120dd47b9cf4268ab582afdda4d242ed6c
|
2dd560dc468af0af4ca44cb4cd37a0b807357063
|
/Leetcode/350. Intersection of Two Arrays II/solution2.py
|
f182622aef3131704a5a6758d3105da6619b19e0
|
[
"MIT"
] |
permissive
|
hi0t/Outtalent
|
460fe4a73788437ba6ce9ef1501291035c8ff1e8
|
8a10b23335d8e9f080e5c39715b38bcc2916ff00
|
refs/heads/master
| 2023-02-26T21:16:56.741589
| 2021-02-05T13:36:50
| 2021-02-05T13:36:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
from collections import Counter
class Solution:
def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:
counter1 = Counter(nums1)
counter2 = Counter(nums2)
result = []
for n in counter1:
if n in counter2:
result.extend([n] * min(counter1[n], counter2[n]))
return result
|
[
"info@crazysquirrel.ru"
] |
info@crazysquirrel.ru
|
0e7a5e166ace9aa0ff742c52fb7cd5755f39db37
|
9ec99e190c21510d81b9322f1ce7b8f20a7778e3
|
/knots_package/image_loaded.py
|
091df1dc5dcac7037114267c376f2d540a60a36f
|
[] |
no_license
|
SanSunB/image_processing
|
823955b1e60a631bbabe9f8b2d2ce081d79b598e
|
e77ce29db8cfa49975ba6a157448565f8cf66148
|
refs/heads/master
| 2023-01-13T04:58:11.596637
| 2019-09-14T08:12:58
| 2019-09-14T08:12:58
| 176,768,020
| 2
| 0
| null | 2023-01-07T09:16:27
| 2019-03-20T15:52:44
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 2,072
|
py
|
from Image import Image
import cv2 # Opencv
class ImageLoaded(Image):
""" Image class for images that are loaded from a preexisting file.
Read the image, get its size and create a 2 dimention list with the pixels color values. """
def __init__(self, image_name):
""" While Creating the class read the image and set the image's size values """
self.image_name = image_name
# Read the picture we received in the image_name argument
self.img = self.set_image()
# Set the image size parameters
self.width = self.set_width()
self.height = self.set_height()
self.color_grid = [] # The grid will be set outside of the init for readability
def set_image(self):
""" Read the image with opencv and check it was loaded successfuly"""
# Read the image from the name the class received
img = cv2.imread(self.image_name)
# Check if the picture was not loaded successfully, stop the program
if img is None:
print("Problem reading the image")
quit() # Stop all processing
return img
def set_height(self):
""" Get the height of the image - y axis"""
height = self.img.shape[0]
return height
def set_width(self):
""" Get the width of the picture - x axis"""
width = self.img.shape[1]
return width
def set_color_grid(self):
""" The image is represented as a 2 dimensional list fo colors.
Build a 2 dimensional list with the color values of all the pixels in the image.
The color in the matrix will be in the same x-y location as the pixel in the image."""
# The image is represented as a 2 dimantional list fo colors, we iterate
for y in range(0, self.width):
self.color_grid.append([]) # create a new list in the array to represent a new line
for x in range(0, self.height):
# Store the pixel color value in RGB
self.color_grid[y].append(self.img[x, y])
|
[
"sanbitan3@gmail.com"
] |
sanbitan3@gmail.com
|
ae8adedd44e0bd5952e608bbe65b84e69da3afd0
|
14108cdee11a087d62b81c4bdf2c5f837b53d908
|
/loophole/polar/pb/errors_pb2.py
|
e9c3740e6b995d1e5ce878d1ab57f2937e4446ed
|
[
"MIT"
] |
permissive
|
edeckers/loophole
|
66657d20ca5cf5c10c1ba59cd9f584ca691a6cb9
|
38fd9226148543e32b6b9582eb52fc4582e1fb2f
|
refs/heads/master
| 2022-07-05T13:24:35.201208
| 2018-10-29T06:47:58
| 2018-10-29T06:47:58
| 262,670,158
| 0
| 0
|
MIT
| 2020-05-09T22:28:30
| 2020-05-09T22:28:29
| null |
UTF-8
|
Python
| false
| true
| 4,622
|
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: errors.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='errors.proto',
package='data',
serialized_pb=_b('\n\x0c\x65rrors.proto\x12\x04\x64\x61ta\"C\n\x15PbConstraintViolation\x12\x11\n\tvalueName\x18\x01 \x02(\t\x12\x17\n\x0fviolationReason\x18\x02 \x02(\t\"p\n\x08PbErrors\x12\x0f\n\x07message\x18\x01 \x02(\t\x12/\n\nviolations\x18\x02 \x03(\x0b\x32\x1b.data.PbConstraintViolation\x12\x0e\n\x06\x65rrors\x18\x03 \x03(\t\x12\x12\n\nstackTrace\x18\x04 \x03(\t')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_PBCONSTRAINTVIOLATION = _descriptor.Descriptor(
name='PbConstraintViolation',
full_name='data.PbConstraintViolation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='valueName', full_name='data.PbConstraintViolation.valueName', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='violationReason', full_name='data.PbConstraintViolation.violationReason', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=22,
serialized_end=89,
)
_PBERRORS = _descriptor.Descriptor(
name='PbErrors',
full_name='data.PbErrors',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='message', full_name='data.PbErrors.message', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='violations', full_name='data.PbErrors.violations', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='errors', full_name='data.PbErrors.errors', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stackTrace', full_name='data.PbErrors.stackTrace', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=91,
serialized_end=203,
)
_PBERRORS.fields_by_name['violations'].message_type = _PBCONSTRAINTVIOLATION
DESCRIPTOR.message_types_by_name['PbConstraintViolation'] = _PBCONSTRAINTVIOLATION
DESCRIPTOR.message_types_by_name['PbErrors'] = _PBERRORS
PbConstraintViolation = _reflection.GeneratedProtocolMessageType('PbConstraintViolation', (_message.Message,), dict(
DESCRIPTOR = _PBCONSTRAINTVIOLATION,
__module__ = 'errors_pb2'
# @@protoc_insertion_point(class_scope:data.PbConstraintViolation)
))
_sym_db.RegisterMessage(PbConstraintViolation)
PbErrors = _reflection.GeneratedProtocolMessageType('PbErrors', (_message.Message,), dict(
DESCRIPTOR = _PBERRORS,
__module__ = 'errors_pb2'
# @@protoc_insertion_point(class_scope:data.PbErrors)
))
_sym_db.RegisterMessage(PbErrors)
# @@protoc_insertion_point(module_scope)
|
[
"roscoe@o2.pl"
] |
roscoe@o2.pl
|
e297f2e3273abeeab6846620b94d0f20bd6b724b
|
f06ddca5258290a1e7448a18e1d24a9d20226fbd
|
/pytext/metric_reporters/metric_reporter.py
|
f187b76d041b1d1f730283c2cf7fde5d9be476af
|
[
"BSD-3-Clause"
] |
permissive
|
mruberry/pytext
|
6d64bc37429e3dd5581e5b3b6bf60bd216b6f445
|
3bba58a048c87d7c93a41830fa7853896c4b3e66
|
refs/heads/master
| 2022-07-16T07:41:47.781126
| 2020-05-14T04:52:35
| 2020-05-14T04:54:33
| 263,892,770
| 2
| 0
|
NOASSERTION
| 2020-05-14T11:11:33
| 2020-05-14T11:11:32
| null |
UTF-8
|
Python
| false
| false
| 10,861
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Dict, List
import numpy as np
import torch
from pytext.common.constants import (
BatchContext,
DatasetFieldName,
RawExampleFieldName,
Stage,
)
from pytext.config.component import Component, ComponentType
from pytext.config.pytext_config import ConfigBase
from pytext.metrics import RealtimeMetrics
from pytext.utils import cuda
from pytext.utils.meter import TimeMeter
from .channel import ConsoleChannel
class MetricReporter(Component):
"""
MetricReporter is responsible of three things:
#. Aggregate output from trainer, which includes model inputs, predictions,
targets, scores, and loss.
#. Calculate metrics using the aggregated output, and define how the metric
is used to find best model
#. Optionally report the metrics and aggregated output to various channels
Attributes:
lower_is_better (bool): Whether a lower metric indicates better performance.
Set to True for e.g. perplexity, and False for e.g. accuracy. Default
is False
channels (List[Channel]): A list of Channel that will receive metrics and
the aggregated trainer output then format and report them in any customized
way.
MetricReporter is tightly-coupled with metric aggregation and computation which
makes inheritance hard to reuse the parent functionalities and attributes. Next
step is to decouple the metric aggregation and computation vs metric reporting.
"""
__COMPONENT_TYPE__ = ComponentType.METRIC_REPORTER
lower_is_better: bool = False
class Config(ConfigBase):
output_path: str = "/tmp/test_out.txt"
pep_format: bool = False
#: Useful for KD training, column names that used by student but not teacher.
student_column_names: List[str] = []
def __init__(self, channels, pep_format=False) -> None:
self._reset()
self.channels = channels
self.pep_format = pep_format
self._reset_realtime()
def _reset(self):
self.all_preds: List = []
self.all_targets: List = []
self.all_context: Dict = {}
self.all_loss: List = []
self.all_scores: List = []
self.n_batches = 0
self.batch_size: List = []
def _reset_realtime(self):
self.realtime_meters: Dict = {}
self.realtime_meters["tps"] = TimeMeter() # tokens per second
self.realtime_meters["ups"] = TimeMeter() # updates per second
def add_batch_stats(
self, n_batches, preds, targets, scores, loss, m_input, **context
):
"""
Aggregates a batch of output data (predictions, scores, targets/true labels
and loss).
Args:
n_batches (int): number of current batch
preds (torch.Tensor): predictions of current batch
targets (torch.Tensor): targets of current batch
scores (torch.Tensor): scores of current batch
loss (double): average loss of current batch
m_input (Tuple[torch.Tensor, ...]): model inputs of current batch
context (Dict[str, Any]): any additional context data, it could be
either a list of data which maps to each example, or a single value
for the batch
"""
self.n_batches = n_batches
self.aggregate_preds(preds, context)
self.aggregate_targets(targets, context)
self.aggregate_scores(scores)
for key, val in context.items():
if not (isinstance(val, torch.Tensor) or isinstance(val, List)):
continue
if key not in self.all_context:
self.all_context[key] = []
self.aggregate_data(self.all_context[key], val)
# some loss functions (eg: in NewBertRegressionTask) return a tensor
# convert tensor to float
if loss is not None:
self.all_loss.append(float(loss))
self.batch_size.append(len(m_input[0]))
# realtime stats
if DatasetFieldName.NUM_TOKENS in context:
self.realtime_meters["tps"].update(context[DatasetFieldName.NUM_TOKENS])
self.realtime_meters["ups"].update(1)
def aggregate_preds(self, batch_preds, batch_context=None):
self.aggregate_data(self.all_preds, batch_preds)
def aggregate_targets(self, batch_targets, batch_context=None):
self.aggregate_data(self.all_targets, batch_targets)
def aggregate_scores(self, batch_scores):
self.aggregate_data(self.all_scores, batch_scores)
@classmethod
def aggregate_data(cls, all_data, new_batch):
"""
Aggregate a batch of data, basically just convert tensors to list of native
python data
"""
if new_batch is None:
return
simple_list = cls._make_simple_list(new_batch)
all_data.extend(simple_list)
@classmethod
def _make_simple_list(cls, data):
if isinstance(data, torch.Tensor):
return data.tolist()
elif isinstance(data, List) and all(
isinstance(elem, torch.Tensor) for elem in data
):
return [elem.tolist() for elem in data]
elif (
isinstance(data, List)
and all(isinstance(elem, List) for elem in data)
and all(
isinstance(elem, torch.Tensor) for elemlist in data for elem in elemlist
)
):
return [[elem.tolist() for elem in elemlist] for elemlist in data]
elif isinstance(data, List):
return data
elif isinstance(data, tuple):
return data[0].tolist()
else:
raise NotImplementedError()
def add_channel(self, channel):
self.channels.append(channel)
def batch_context(self, raw_batch, batch):
context = {
BatchContext.INDEX: [
row[RawExampleFieldName.ROW_INDEX] for row in raw_batch
]
}
if DatasetFieldName.NUM_TOKENS in batch:
context.update(
{DatasetFieldName.NUM_TOKENS: batch[DatasetFieldName.NUM_TOKENS]}
)
return context
def calculate_loss(self):
"""
Calculate the average loss for all aggregated batch
"""
return np.average(self.all_loss, weights=self.batch_size)
def calculate_metric(self):
"""
Calculate metrics, each sub class should implement it
"""
raise NotImplementedError()
def predictions_to_report(self):
"""
Generate human readable predictions
"""
return self.all_preds
def targets_to_report(self):
"""
Generate human readable targets
"""
return self.all_targets
# TODO this function can be merged with batch_context once data migration is done
def gen_extra_context(self):
"""
Generate any extra intermediate context data for metric calculation
"""
pass
# TODO this method can be removed by moving Channel construction to Task
def get_meta(self):
"""
Get global meta data that is not specific to any batch, the data will be
pass along to channels
"""
return {}
def report_metric(
self, model, stage, epoch, reset=True, print_to_channels=True, optimizer=None
):
"""
Calculate metrics and average loss, report all statistic data to channels
Args:
model (nn.Module): the PyTorch neural network model.
stage (Stage): training, evaluation or test
epoch (int): current epoch
reset (bool): if all data should be reset after report, default is True
print_to_channels (bool): if report data to channels, default is True
"""
self.gen_extra_context()
self.total_loss = self.calculate_loss()
metrics = self.calculate_metric()
model_select_metric = self.get_model_select_metric(metrics)
# print_to_channels is true only on gpu 0, but we need all gpus to sync
# metric
self.report_realtime_metric(stage)
if print_to_channels:
for channel in self.channels:
if stage in channel.stages:
channel.report(
stage,
epoch,
metrics,
model_select_metric,
self.total_loss,
self.predictions_to_report(),
self.targets_to_report(),
self.all_scores,
self.all_context,
self.get_meta(),
model,
optimizer,
)
if reset:
self._reset()
self._reset_realtime()
return metrics
def report_realtime_metric(self, stage):
if stage != Stage.TRAIN:
return
samples_total = self.n_batches + 1
tps_total = self.realtime_meters["tps"].n
ups_total = self.realtime_meters["ups"].n
elapsed_time = self.realtime_meters["tps"].elapsed_time
if cuda.DISTRIBUTED_WORLD_SIZE > 1:
tensor = torch.cuda.IntTensor([samples_total, tps_total, ups_total])
torch.distributed.all_reduce(tensor)
[samples_total, tps_total, ups_total] = tensor.data.tolist()[:]
tps = tps_total / elapsed_time
ups = ups_total / elapsed_time
if not tps or not ups:
return
metrics = RealtimeMetrics(samples=samples_total, tps=tps, ups=ups)
print(metrics, flush=True)
def get_model_select_metric(self, metrics):
"""
Return a single numeric metric value that is used for model selection, returns
the metric itself by default, but usually metrics will be more complicated
data structures
"""
return metrics
def compare_metric(self, new_metric, old_metric):
"""
Check if new metric indicates better model performance
Returns:
bool, true if model with new_metric performs better
"""
if not old_metric:
return True
new = self.get_model_select_metric(new_metric)
old = self.get_model_select_metric(old_metric)
if new == old:
return False
return (new < old) == self.lower_is_better
class PureLossMetricReporter(MetricReporter):
lower_is_better = True
@classmethod
def from_config(cls, config, *args, **kwargs):
return cls([ConsoleChannel()], config.pep_format)
def calculate_metric(self):
return self.calculate_loss()
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
58db9e29c075f0fe4ea8121514f0d827b5087a30
|
7193f9861329a35841a7689a1976f4a27b42cc1a
|
/app/coffee/crud.py
|
eca42bf98e216078c64504b7ecfd5e1846362452
|
[] |
no_license
|
amosctlee/coffee-fastapi
|
d83c3ce797d6a6ff863b634cfae7314564534bb2
|
1cd928993a22c74aca439fe92f1ba13939eb8fb3
|
refs/heads/main
| 2023-06-26T20:57:44.439876
| 2021-08-01T14:33:22
| 2021-08-01T14:33:22
| 391,388,886
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,907
|
py
|
from sqlalchemy.orm import Session
from . import models, schemas
from . import dependencies
def get_user(db: Session, user_id: int):
return db.query(models.User).filter(models.User.id == user_id).first()
def get_user_by_email(db: Session, email: str):
return db.query(models.User).filter(models.User.email == email).first()
def get_user_by_username(db: Session, username: str):
return db.query(models.User).filter(models.User.username == username).first()
def get_users(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.User).offset(skip).limit(limit).all()
def create_user(db: Session, user: schemas.UserCreate):
hashed_password = dependencies.get_password_hash(user.password)
db_user = models.User(
username=user.username,
email=user.email,
hashed_password=hashed_password
)
db.add(db_user)
db.commit()
db.refresh(db_user)
return db_user
def get_coffee_beans_by_variety(db: Session, variety: str, skip: int = 0, limit: int = 100):
return (
db.query(models.CoffeeBean)
.filter(models.CoffeeBean.variety == variety)
.offset(skip)
.limit(limit)
.all()
)
def get_coffee_beans_by_origin(db: Session, origin: str, skip: int = 0, limit: int = 100):
return (
db.query(models.CoffeeBean)
.filter(models.CoffeeBean.origin == origin)
.offset(skip)
.limit(limit)
.all()
)
def create_coffee_bean(db: Session, coffee_bean: schemas.CoffeeBeanCreate):
db_coffee_bean = models.CoffeeBean(**coffee_bean.dict())
db.add(db_coffee_bean)
db.commit()
db.refresh(db_coffee_bean)
return db_coffee_bean
def create_brewing(db: Session, brewing: schemas.BrewingCreate):
db_brewing = models.Brewing(**brewing.dict())
db.add(db_brewing)
db.commit()
db.refresh(db_brewing)
return db_brewing
|
[
"amosctlee@gmail.com"
] |
amosctlee@gmail.com
|
6ebe8a7700fc7db88c936ba1a15805eaeefbb421
|
ab7a491ebc0a6694f65c52203b51e2f449072815
|
/contents/2020/03/basic-2/solution.py
|
e1783c572456ddb5cfd0fe28391caf2593b711a2
|
[] |
no_license
|
EEIC-Algorithms/marine
|
fede3954ac3fb4576cbc7a082a9609d13ebb738e
|
3c9f11e9d24eabf3c1eff00e21bcc39942b0c7fd
|
refs/heads/master
| 2023-04-02T03:25:49.678602
| 2021-04-05T03:58:29
| 2021-04-05T03:58:29
| 242,078,935
| 0
| 0
| null | 2021-04-05T03:58:30
| 2020-02-21T07:23:58
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 349
|
py
|
N = int(input())
A = list(map(int, input().split()))
Q = int(input())
for _ in range(Q):
x = int(input())
lv = -1
rv = N
while rv - lv > 1:
mid = (rv + lv) // 2
if A[mid] >= x:
rv = mid
else:
lv = mid
pos = rv
if pos == N:
print("No")
else:
if A[pos] == x:
print("Yes")
else:
print("No")
|
[
"yuji9511yuji@gmail.com"
] |
yuji9511yuji@gmail.com
|
2517301e417f5d9dfd6aefda32ef2714ad4697d1
|
d60a084c3ff66b4ecb36e3ade75a64d70aed38ed
|
/tests/test_entities.py
|
b735116616861c331110e4c5e15c8bbbfa2a579c
|
[
"MIT"
] |
permissive
|
pjf02536-2/hubitat
|
f8e9f92560c052f0857ce6543d7ddc4b93f55e13
|
f55d263b3cbeb3cbc27983bfe11fc8e3e7d9a8af
|
refs/heads/master
| 2023-07-01T10:35:14.331551
| 2021-08-06T18:39:41
| 2021-08-06T18:39:41
| 393,467,889
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,390
|
py
|
from asyncio import Future
from typing import Awaitable, Dict, Optional
from unittest.mock import NonCallableMock, call
from custom_components.hubitat.device import Hub
from hubitatmaker import Device
from pytest_homeassistant_custom_component.common import Mock, patch
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_registry import EntityRegistry
def mock_get_reg(_: HomeAssistant) -> Awaitable[EntityRegistry]:
MockReg = Mock(spec=EntityRegistry)
mock_reg = MockReg()
mock_reg.configure_mock(entities={})
future = Future()
future.set_result(mock_reg)
return future
@patch("custom_components.hubitat.entities.get_hub")
@patch(
"custom_components.hubitat.entities.entity_registry.async_get_registry",
new=mock_get_reg,
)
async def test_entity_migration(get_hub: Mock) -> None:
mock_device_1 = NonCallableMock(type="switch", attributes=["state"])
mock_device_2 = NonCallableMock(type="fan", attributes=["state"])
MockHub = Mock(spec=Hub)
mock_hub = MockHub()
mock_hub.configure_mock(
devices={"id1": mock_device_1, "id2": mock_device_2}, token="12345"
)
get_hub.return_value = mock_hub
from custom_components.hubitat.switch import HubitatSwitch
from custom_components.hubitat.entities import create_and_add_entities
mock_hass = Mock(spec=["async_create_task"])
MockConfigEntry = Mock(spec=ConfigEntry)
mock_entry = MockConfigEntry()
def _is_switch(device: Device, overrides: Optional[Dict[str, str]] = None) -> bool:
return device.type == "switch"
is_switch = Mock(side_effect=_is_switch)
mock_async_add_entities = Mock()
await create_and_add_entities(
mock_hass,
mock_entry,
mock_async_add_entities,
"switch",
HubitatSwitch,
is_switch,
)
@patch("custom_components.hubitat.entities.get_hub")
@patch("custom_components.hubitat.entities.HubitatEventEmitter")
async def test_add_event_emitters(HubitatEventEmitter: Mock, get_hub: Mock) -> None:
mock_device_1 = NonCallableMock(type="switch", attributes=["state"])
mock_device_2 = NonCallableMock(type="button", attributes=["state"])
MockHub = Mock(spec=Hub)
mock_hub = MockHub()
mock_hub.devices = {"id1": mock_device_1, "id2": mock_device_2}
get_hub.return_value = mock_hub
HubitatEventEmitter.return_value.update_device_registry = Mock(
return_value="update_registry"
)
from custom_components.hubitat.entities import create_and_add_event_emitters
mock_hass = Mock(spec=["async_create_task"])
MockConfigEntry = Mock(spec=ConfigEntry)
mock_entry = MockConfigEntry()
def mock_is_emitter(device: Device) -> bool:
return device.type == "button"
is_emitter = Mock(side_effect=mock_is_emitter)
await create_and_add_event_emitters(mock_hass, mock_entry, is_emitter)
assert HubitatEventEmitter.call_count == 1, "expected 1 emitter to be created"
assert mock_hass.async_create_task.call_count == 1, "expected 1 async creations"
assert mock_hass.async_create_task.has_calls(
[call("update_registry")]
), "1 update_device_registry task should have been created"
assert (
mock_hub.add_event_emitters.call_count == 1
), "event emitters should have been added to hub"
|
[
"jason@jasoncheatham.com"
] |
jason@jasoncheatham.com
|
b92ea734fdc2ebe4df06501f70b001526d9ba975
|
3554907b188ffa4d7f750c7035fa1842b13ce3ac
|
/Archive/LPBB-ECS-17-2-20/BSMolf.py
|
01ac84ffc0f871a2de4eb7f5d21b31cc5dd5595d
|
[] |
no_license
|
MAly79/PhD-Code
|
4720741f0fc82e783b5cac90bcde30f60b835f96
|
203f221277bffcb5e0fdabf9e22b5f1a1131dd23
|
refs/heads/master
| 2020-09-06T17:49:08.656115
| 2020-05-12T04:12:12
| 2020-05-12T04:12:12
| 220,500,531
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,104
|
py
|
# This is a python script that will generate a LAMMPS molecule file for use in
# Polymer Brush
import numpy as np
def BSMolf(n_m):
# Number of atoms to create along the main chain
#n_m = 10
# Number of atoms on the side chains
n_s = 0
# Number of atom types
Ntype = 1
# Write LAMMPS data file
with open('bsmol.txt','w') as fdata:
# First line is a description
fdata.write('Bead-Spring Polymer molecule\n\n')
#--- Header ---#
#Specify number of atoms and atom types
fdata.write('{} atoms\n' .format(n_m + n_s))
#Specify the number of bonds
fdata.write('{} bonds\n' .format(n_m - 1))
#--- Body ---#
# Coords assignment
fdata.write('Coords\n\n')
# Write the line format for the Coords:
# atom-ID x y z
for i in range(n_m):
fdata.write('{} {} {} {}\n' .format(i+1,0,0,i))
# Type assignment
fdata.write('Types\n\n')
fdata.write('{} {}\n' .format(1,1))
for i in range(n_m-2):
fdata.write('{} {}\n' .format(i+2,2))
fdata.write('{} {}\n' .format(n_m,3))
# Bonds section
fdata.write('Bonds\n\n')
# Write the line format for the bonds:
# bond-ID type atom1 atom2
for i in range(n_m-1):
fdata.write('{} 1 {} {}\n' .format(i+1,i+1,i+2))
# Special Bond Counts assignment
fdata.write('Special Bond Counts\n\n')
# Write the line format for the Coords:
# ID N1 N2 N3
fdata.write('{} {} {} {}\n' .format(1,1,0,0))
for i in range(n_m-2):
fdata.write('{} {} {} {}\n' .format(i+2,2,0,0))
fdata.write('{} {} {} {}\n' .format(n_m,1,0,0))
# Special Bonds assignment
fdata.write('Special Bonds\n\n')
# Write the line format for the Coords:
# ID a b c d
fdata.write('{} {}\n' .format(1,2))
for i in range(n_m-2):
fdata.write('{} {} {}\n' .format(i+2,i+1,i+3))
fdata.write('{} {}\n' .format(n_m,n_m-1))
return None
|
[
"maa4617@ic.ac.uk"
] |
maa4617@ic.ac.uk
|
1127e95ffd19edac9b2fb33082a31628b3e8c91e
|
22209fb76ba21a2c381e6bacbeb21d9fa0d92edb
|
/Mundo01/Exercicios/venv/Scripts/pip-script.py
|
300ff616a4d0b49a200ca98c67ac0576388d4ef4
|
[
"MIT"
] |
permissive
|
molonti/CursoemVideo---Python
|
cfba0bd1824f547b24cf216811b1447160a47bd5
|
4f6a7af648f7f619d11e95fa3dc7a33b28fcfa11
|
refs/heads/master
| 2021-05-21T08:09:52.031493
| 2020-04-03T02:22:05
| 2020-04-03T02:22:05
| 252,613,245
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
#!C:\Users\Molon\Documents\CursoemVideo\Exercicios\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
|
[
"molonti@gmail.com"
] |
molonti@gmail.com
|
573b280a471fd51baab60b29258cfde7cfc3bf85
|
475e2fe71fecddfdc9e4610603b2d94005038e94
|
/Amazon/RotateMatrix.py
|
6f1e68efc82f49d8cba50273d17bf54689f8df40
|
[] |
no_license
|
sidhumeher/PyPractice
|
770473c699aab9e25ad1f8b7b7cd8ad05991d254
|
2938c14c2e285af8f02e2cfc7b400ee4f8d4bfe0
|
refs/heads/master
| 2021-06-28T20:44:50.328453
| 2020-12-15T00:51:39
| 2020-12-15T00:51:39
| 204,987,730
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 716
|
py
|
'''
Created on Oct 22, 2020
@author: sidteegela
'''
# Technique: Transpose and then reverse
# Time Complexity: O(n ^ 2)
def rotateMatrix(ipList):
if len(ipList) == 0:
return
n = len(ipList[0])
# Transpose- rows to columns, columns to rows
for i in range(n):
for j in range(i, n):
ipList[j][i], ipList[i][j] = ipList[i][j], ipList[j][i]
# Reverse
for item in range(len(ipList)):
ipList[item].reverse()
print(ipList)
if __name__ == '__main__':
ipList = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
rotateMatrix(ipList)
ipList = [[5, 1, 9, 11], [2, 4, 8, 10], [13, 3, 6, 7], [15, 14, 12, 16]]
rotateMatrix(ipList)
|
[
"sidhumeher@yahoo.co.in"
] |
sidhumeher@yahoo.co.in
|
0ae9e3303c4ba207a4417d48d6b17265799b8907
|
268bb9bbd6d3ebc031393e24322efd53b49a97c7
|
/tests/v0x01/test_controller2switch/test_stats_reply.py
|
b4650b51c15cd2a969c18f11b1d5ce15741bdbce
|
[
"MIT"
] |
permissive
|
smythtech/python-openflow-legacy
|
ba84303f18de9b10b7372e01808f11f0b3c006a1
|
f4ddb06ac8c98f074c04f027df4b52542e41c123
|
refs/heads/master
| 2020-03-17T15:45:12.455272
| 2018-05-16T21:40:14
| 2018-05-16T21:40:14
| 133,722,727
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 905
|
py
|
"""Test for StatsReply message."""
import unittest
from pyof.v0x01.controller2switch import common, stats_reply
class TestStatsReply(unittest.TestCase):
"""Test for StatsReply message."""
def setUp(self):
"""Baisc Test Setup."""
self.message = stats_reply.StatsReply()
self.message.header.xid = 1
self.message.type = common.StatsTypes.OFPST_FLOW
self.message.flags = 0x0001
self.message.body = []
def test_get_size(self):
"""[Controller2Switch/StatsReply] - size 12."""
self.assertEqual(self.message.get_size(), 12)
@unittest.skip('Not yet implemented')
def test_pack(self):
"""[Controller2Switch/StatsReply] - packing."""
# TODO
pass
@unittest.skip('Not yet implemented')
def test_unpack(self):
"""[Controller2Switch/StatsReply] - unpacking."""
# TODO
pass
|
[
"dylan@admin-dell.admin-dell"
] |
dylan@admin-dell.admin-dell
|
7ae6d710e82aefe54d71e6d7ec88ee6ecf317f9a
|
c6eecca574102fc8f9363f541fad6db174ed7120
|
/utilities/get_hostname.py
|
693c1d6027053882275c360a7e9e5dddde3d60dd
|
[] |
no_license
|
kapeed2091/ib-binderhub
|
cb554af29eec645e13f1777e774a6d386bb68ce7
|
79f4442c93a68b099a88c389879d92fa843a6c0e
|
refs/heads/master
| 2020-11-28T16:37:09.967492
| 2019-12-24T08:25:20
| 2019-12-24T08:25:20
| 229,870,147
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 112
|
py
|
def get_hostname():
import socket
hostname = socket.gethostname()
return hostname.strip('Jupyter-')
|
[
"kapeed2091@gmail.com"
] |
kapeed2091@gmail.com
|
877410fe0c3d1fd8e247aab224fa96df43f6e0de
|
134f1789e55b3765ac61149fe34f46e070d941a8
|
/fileshow/apps.py
|
338224e91bcaf9a3ec69560de311cdff4013f3a0
|
[
"MIT"
] |
permissive
|
sty16/django-educational-website
|
ef7d77b2b1fe0c21cda82094b04e95724ca69a92
|
741925af23485d7c0ea9a8553646fc5b5af75258
|
refs/heads/master
| 2022-12-12T13:30:22.908952
| 2019-11-21T02:52:51
| 2019-11-21T02:52:51
| 232,685,249
| 0
| 0
|
MIT
| 2022-12-08T06:17:42
| 2020-01-09T00:10:00
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 91
|
py
|
from django.apps import AppConfig
class FileshowConfig(AppConfig):
name = 'fileshow'
|
[
"2016011130@secoder.net"
] |
2016011130@secoder.net
|
108a4675176acb256c9c588c2575f3f3b78ae495
|
12b0b03af690db63fef63a390afa07f09c5719a3
|
/flask/app.py
|
2c5be457514d3853022c8d1827eb9321a1979d75
|
[
"MIT"
] |
permissive
|
anshulsingh8101/book-recommendation-system
|
deb27327fc6e1e6bdf9e58bbcf00377191f2d901
|
8c40af1c49d84699f1aa6672179d903220614ab4
|
refs/heads/master
| 2022-03-13T12:30:57.410222
| 2019-12-03T23:25:40
| 2019-12-03T23:25:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,361
|
py
|
# https://stackoverflow.com/questions/41487473/nameerror-name-request-is-not-defined
# https://stackoverflow.com/questions/34704997/jquery-autocomplete-in-flask
# https://kite.com/python/docs/flask.jsonify
import json
import flask
from api import BookSimilarity
app = flask.Flask(__name__)
booksim = BookSimilarity()
@app.route('/')
def index():
return flask.render_template('index.html')
@app.route('/autocomplete', methods=['GET'])
def autocomplete():
search = flask.request.args.get('q')
filtered_books = list(booksim.search(search)['title'].values)
return flask.jsonify(matching_results=filtered_books)
@app.route('/recommend')
def recommend():
# Dynamic page help:
# https://stackoverflow.com/questions/40963401/flask-dynamic-data-update-without-reload-page/40964086
searchText = flask.request.args.get('jsdata')
output = ''
if searchText:
print(f'Search text: {searchText}')
results = booksim.recommend(searchText)
if results is not None:
output = json.loads(results[['title', 'url']].to_json(orient='records'))#results.title.values
# TODO: Convert a fuller version to JSON rather than just an array (title, url, etc.) and render as a table instead.
# https://stackoverflow.com/questions/48050769/pandas-dataframe-to-flask-template-as-a-json
print(output)
return flask.render_template('results.html', recommendations=output)
|
[
"KasumiL5x@gmail.com"
] |
KasumiL5x@gmail.com
|
9a405e58c20f5998b280c0d03f6836e4a065abf4
|
2a2d1fb9de3896655ed715c423dfdf5b4ad7b403
|
/bar.py
|
22a3cfe76085c1f7ee569041d7e8ec586f296008
|
[] |
no_license
|
Varian72/prak
|
78e6ede698c6dc4f12d1e1277ef430d81837a930
|
3bd72069c278b6beccd671a387612d2066695416
|
refs/heads/master
| 2021-07-20T06:51:03.202819
| 2017-10-29T18:35:24
| 2017-10-29T18:35:24
| 104,173,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,391
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'bar.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Proggres(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(617, 377)
self.progressBar = QtGui.QProgressBar(Form)
self.progressBar.setGeometry(QtCore.QRect(120, 130, 401, 61))
self.progressBar.setProperty("value", 24)
self.progressBar.setObjectName(_fromUtf8("progressBar"))
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Form = QtGui.QWidget()
ui = Proggres()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
|
[
"rostyslav.tanchak@keenethics.com"
] |
rostyslav.tanchak@keenethics.com
|
459fbbadc603786221ff5b6bf6a638eb4312b998
|
13036fae0234b2401b000ea9b46d2aca6be897f0
|
/providers/google/tests/test_common.py
|
abec7650f67811036a55beae55c32cbc754ab1c4
|
[] |
no_license
|
smlgit/cloud-backup
|
7a62fd51259125c229a39ea0a9ff7768f69f0855
|
f4f1d6fd1d53027d7cb0571b6b7724613f30ef15
|
refs/heads/master
| 2023-05-11T01:39:26.818467
| 2023-05-01T07:48:28
| 2023-05-01T07:48:28
| 336,930,457
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 888
|
py
|
import json
import common.http_server_utils as http_server_utils
class GoogleTestBaseHandler(http_server_utils.MyHttpServerBaseHandler):
def testing_handle_google_token_refresh(self):
"""
If the incoming request is for google token refresh, this function
will return some dummy credentials so that testing http handlers don't
have to do it themselves.
:return: True if the request was a token refresh and was handled.
"""
if self.path == '/token':
self.send_success_response(response_content_string=json.dumps(
{
'access_token': 'dummy_access_token',
'expires_in': 10000,
'scope': 'whatever_scope',
'refresh_token': 'dummy_refresh_token'
}
))
return True
return False
|
[
"smlgit@protonmail.com"
] |
smlgit@protonmail.com
|
fb7ca8793d5eec690578e6feba5b1b93205d814a
|
438a49e176fb2340b09d6dd5f55af279f1aa991c
|
/parking/test.py
|
7babe4378fabd592789b3842a843d9994b0b6b57
|
[] |
no_license
|
saiprasannasastry/parking_lot
|
89663fee1b465e130b5951947fcfb7f97e6a78d2
|
b5d9f52a39d3fd0204bba56ce2f44eeafd4a6748
|
refs/heads/master
| 2020-09-20T06:41:52.968823
| 2019-09-27T07:05:23
| 2019-09-27T07:05:23
| 224,402,319
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 647
|
py
|
from parking_lot import Parking
x=Parking()
x.create_parking_lot(6)
#print(x.slots)
print(' -----------------' )
x.park('ka-41-y-8957','black')
x.park('ka-41-y-8957','black')
print('****************')
x.leave(1)
x.park('ka-41-y-8957','black')
x.park('ka-41-y-8957','black')
print('****************')
print('****************')
x.status()
x.park('ka-41-y-8957','black')
x.registration_numbers_for_cars_wirh_color("black")
x.registration_numbers_for_cars_wirh_color("Black")
x.slot_number_for_cars_wirh_color("black")
x.slot_number_for_cars_wirh_color("Black")
x.slot_number_for_registration_number("ka-41-y-8957")
x.create_parking_lot(6)
|
[
"ssastry@infoblox.com"
] |
ssastry@infoblox.com
|
a3e9e4f714c3572c21cae146d4d1f71f56326a50
|
7bd37f5ddadf07f3cd43318ecdfa583e033571a9
|
/mailadmin/migrations/0014_auto_20170108_1535.py
|
cb0cf26694e7b4c22d0ff8e7d8d28bde7cb1b14a
|
[
"BSD-3-Clause"
] |
permissive
|
thommyhh/django-mailadmin
|
328dcf29d8ec017ebabf6f87455887756d7f0b96
|
7ee6bf0bb7e8dc49cb226849888f6cb37bf924f0
|
refs/heads/master
| 2021-01-11T17:10:09.817864
| 2018-08-23T17:57:48
| 2018-08-23T17:57:48
| 79,728,686
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,015
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-08 14:35
from __future__ import unicode_literals
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import re
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('mailadmin', '0013_useradditions'),
]
operations = [
migrations.AddField(
model_name='useradditions',
name='user',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='useradditions',
name='roles',
field=models.CharField(max_length=10, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:\\,\\d+)*\\Z', 32), code='invalid', message='Enter only digits separated by commas.')]),
),
]
|
[
"thorben.nissen@kapp-hamburg.de"
] |
thorben.nissen@kapp-hamburg.de
|
0d353d6b2a498b5c79aa1daddca3649f9fcb95b2
|
e9e2a2ba998f54c367029ef9b031d115d7f6de3d
|
/xlwings/_xlmac.py
|
5ebfe5f35a26c35554940776190e4668279eacb9
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
sebix/xlwings
|
e89f9323319652f8ccae9da9f866a1892576597d
|
904d6dde518a117eb2e7a4b8fe7c25b60484cdd5
|
refs/heads/master
| 2021-01-17T20:02:05.039418
| 2014-11-02T15:33:20
| 2014-11-02T15:33:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,608
|
py
|
# TODO: create classes
# TODO: align clean_xl_data and prepare_xl_data (should work on same dimensions of data)
import os
import datetime as dt
from appscript import app
from appscript import k as kw
import psutil
import atexit
try:
import pandas as pd
except ImportError:
pd = None
# Time types
time_types = (dt.date, dt.datetime)
@atexit.register
def clean_up():
"""
Since AppleScript cannot access Excel while a Macro is running, we have to run the Python call in a
background process which makes the call return immediately: we rely on the StatusBar to give the user
feedback.
This function is triggered when the interpreter exits and runs the CleanUp Macro in VBA to show any
errors and to reset the StatusBar.
"""
if is_excel_running():
app('Microsoft Excel').run_VB_macro('CleanUp')
def is_file_open(fullname):
"""
Checks if the file is already open
"""
for proc in psutil.process_iter():
if proc.name() == 'Microsoft Excel':
for i in proc.open_files():
if i.path.lower() == fullname.lower():
return True
return False
def is_excel_running():
for proc in psutil.process_iter():
if proc.name() == 'Microsoft Excel':
return True
return False
def get_workbook(fullname):
"""
Get the appscript Workbook object.
On Mac, it seems that we don't have to deal with >1 instances of Excel,
as each spreadsheet opens in a separate window anyway.
"""
filename = os.path.basename(fullname)
xl_workbook = app('Microsoft Excel').workbooks[filename]
xl_app = app('Microsoft Excel')
return xl_app, xl_workbook
def get_workbook_name(xl_workbook):
return xl_workbook.name.get()
def get_worksheet_name(xl_sheet):
return xl_sheet.name.get()
def get_xl_sheet(xl_workbook, sheet_name_or_index):
return xl_workbook.sheets[sheet_name_or_index]
def set_worksheet_name(xl_sheet, value):
return xl_sheet.name.set(value)
def get_worksheet_index(xl_sheet):
return xl_sheet.entry_index.get()
def get_app(xl_workbook):
# Since we can't have multiple instances of Excel on Mac (?), we ignore xl_workbook
return app('Microsoft Excel')
def open_workbook(fullname):
filename = os.path.basename(fullname)
xl_app = app('Microsoft Excel')
xl_app.activate()
xl_app.open(fullname)
xl_workbook = xl_app.workbooks[filename]
return xl_app, xl_workbook
def close_workbook(xl_workbook):
xl_workbook.close(saving=kw.no)
def new_workbook():
is_running = is_excel_running()
xl_app = app('Microsoft Excel')
xl_app.activate()
if is_running:
# If Excel is being fired up, a "Workbook1" is automatically added
# If its already running, we create an new one that Excel unfortunately calls "Sheet1".
# It's a feature though: See p.14 on Excel 2004 AppleScript Reference
xl_workbook = xl_app.make(new=kw.workbook)
else:
xl_workbook = xl_app.workbooks[1]
return xl_app, xl_workbook
def get_active_sheet(xl_workbook):
return xl_workbook.active_sheet
def activate_sheet(xl_workbook, sheet_name_or_index):
return xl_workbook.sheets[sheet_name_or_index].activate_object()
def get_worksheet(xl_workbook, sheet_name_or_index):
return xl_workbook.sheets[sheet_name_or_index]
def get_first_row(xl_sheet, range_address):
return xl_sheet.cells[range_address].first_row_index.get()
def get_first_column(xl_sheet, range_address):
return xl_sheet.cells[range_address].first_column_index.get()
def count_rows(xl_sheet, range_address):
return xl_sheet.cells[range_address].count(each=kw.row)
def count_columns(xl_sheet, range_address):
return xl_sheet.cells[range_address].count(each=kw.column)
def get_range_from_indices(xl_sheet, first_row, first_column, last_row, last_column):
first_address = xl_sheet.columns[first_column].rows[first_row].get_address()
last_address = xl_sheet.columns[last_column].rows[last_row].get_address()
return xl_sheet.cells['{0}:{1}'.format(first_address, last_address)]
def get_value_from_range(xl_range):
return xl_range.value.get()
def get_value_from_index(xl_sheet, row_index, column_index):
return xl_sheet.columns[column_index].rows[row_index].value.get()
def clean_xl_data(data):
"""
appscript returns empty cells as ''. So we replace those with None to be in line with pywin32
"""
return [[None if c == '' else c for c in row] for row in data]
def prepare_xl_data(data):
# This transformation seems to be only needed on Python 2.6 (?)
if hasattr(pd, 'tslib') and isinstance(data, pd.tslib.Timestamp):
data = data.to_datetime()
return data
def set_value(xl_range, data):
xl_range.value.set(data)
def get_selection_address(xl_app):
return str(xl_app.selection.get_address())
def clear_contents_worksheet(xl_workbook, sheets_name_or_index):
xl_workbook.sheets[sheets_name_or_index].used_range.clear_contents()
def clear_worksheet(xl_workbook, sheet_name_or_index):
xl_workbook.sheets[sheet_name_or_index].used_range.clear_range()
def clear_contents_range(xl_range):
xl_range.clear_contents()
def clear_range(xl_range):
xl_range.clear_range()
def get_formula(xl_range):
return xl_range.formula.get()
def set_formula(xl_range, value):
xl_range.formula.set(value)
def get_row_index_end_down(xl_sheet, row_index, column_index):
ix = xl_sheet.columns[column_index].rows[row_index].get_end(direction=kw.toward_the_bottom).first_row_index.get()
return ix
def get_column_index_end_right(xl_sheet, row_index, column_index):
ix = xl_sheet.columns[column_index].rows[row_index].get_end(direction=kw.toward_the_right).first_column_index.get()
return ix
def get_current_region_address(xl_sheet, row_index, column_index):
return str(xl_sheet.columns[column_index].rows[row_index].current_region.get_address())
def get_chart_object(xl_workbook, sheet_name_or_index, chart_name_or_index):
return xl_workbook.sheets[sheet_name_or_index].chart_objects[chart_name_or_index]
def get_chart_index(xl_chart):
return xl_chart.entry_index.get()
def get_chart_name(xl_chart):
return xl_chart.name.get()
def add_chart(xl_workbook, sheet_name_or_index, left, top, width, height):
# With the sheet name it won't find the chart later, so we go with the index (no idea why)
sheet_index = xl_workbook.sheets[sheet_name_or_index].entry_index.get()
return xl_workbook.make(at=xl_workbook.sheets[sheet_index],
new=kw.chart_object,
with_properties={kw.width: width,
kw.top: top,
kw.left_position: left,
kw.height: height})
def set_chart_name(xl_chart, name):
xl_chart.name.set(name)
def set_source_data_chart(xl_chart, xl_range):
xl_chart.chart.set_source_data(source=xl_range)
def get_chart_type(xl_chart):
return xl_chart.chart.chart_type.get()
def set_chart_type(xl_chart, chart_type):
xl_chart.chart.chart_type.set(chart_type)
def activate_chart(xl_chart):
"""
activate() doesn't seem to do anything so resolving to select() for now
"""
xl_chart.select()
def autofit(range_, axis):
address = range_.xl_range.get_address()
app('Microsoft Excel').screen_updating.set(False)
if axis == 0 or axis == 'rows' or axis == 'r':
range_.xl_sheet.rows[address].autofit()
elif axis == 1 or axis == 'columns' or axis == 'c':
range_.xl_sheet.columns[address].autofit()
elif axis is None:
range_.xl_sheet.rows[address].autofit()
range_.xl_sheet.columns[address].autofit()
app('Microsoft Excel').screen_updating.set(True)
def autofit_sheet(sheet, axis):
#TODO: combine with autofit that works on Range objects
num_columns = sheet.xl_sheet.count(each=kw.column)
num_rows = sheet.xl_sheet.count(each=kw.row)
xl_range = get_range_from_indices(sheet.xl_sheet, 1, 1, num_rows, num_columns)
address = xl_range.get_address()
app('Microsoft Excel').screen_updating.set(False)
if axis == 0 or axis == 'rows' or axis == 'r':
sheet.xl_sheet.rows[address].autofit()
elif axis == 1 or axis == 'columns' or axis == 'c':
sheet.xl_sheet.columns[address].autofit()
elif axis is None:
sheet.xl_sheet.rows[address].autofit()
sheet.xl_sheet.columns[address].autofit()
app('Microsoft Excel').screen_updating.set(True)
def set_xl_workbook_current(xl_workbook):
global xl_workbook_current
xl_workbook_current = xl_workbook
def get_xl_workbook_current():
try:
return xl_workbook_current
except NameError:
return None
def get_number_format(range_):
return range_.xl_range.number_format.get()
def set_number_format(range_, value):
app('Microsoft Excel').screen_updating.set(False)
range_.xl_range.number_format.set(value)
app('Microsoft Excel').screen_updating.set(True)
def get_address(xl_range, row_absolute, col_absolute, external):
return xl_range.get_address(row_absolute=row_absolute, column_absolute=col_absolute, external=external)
def add_sheet(xl_workbook, before, after):
if before:
position = before.xl_sheet.before
else:
position = after.xl_sheet.after
return xl_workbook.make(new=kw.worksheet, at=position)
def count_worksheets(xl_workbook):
return xl_workbook.count(each=kw.worksheet)
|
[
"fzumstein@gmail.com"
] |
fzumstein@gmail.com
|
ad91051572c2211638df639230b7336659d1fbe2
|
8dd00c349220fb0175e87f0e8a962bb9d0310c75
|
/algorithm/tools/factor_generator.py
|
3027ec132efe56d9bbec432a59163e0ba20ab978
|
[] |
no_license
|
MForrest00/Algusic-algorithm
|
ebc5660731043deb1e93a9e3b1d3d60659f4f9a8
|
d20749acc984676a629f10714bc437aaeee8dae2
|
refs/heads/master
| 2023-06-04T18:12:49.326941
| 2021-06-29T18:08:01
| 2021-06-29T18:08:01
| 258,203,821
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,902
|
py
|
from random import gauss
from typing import Optional, Union
class FactorGenerator:
BASE_PROBABILITY = 0.5
BASE_STANDARD_DEVIATION = 0.15
def __init__(
self,
base_probability: Optional[Union[int, float]] = None,
base_standard_deviation: Optional[Union[int, float]] = None,
):
self.base_probability = self.set_base_probability(base_probability)
self.base_standard_deviation = self.set_base_standard_deviation(base_standard_deviation)
def set_base_probability(self, base_probability: Optional[Union[int, float]] = None) -> float:
if base_probability is None:
return FactorGenerator.BASE_PROBABILITY
if not isinstance(base_probability, (int, float)):
raise TypeError("Base probability must be an integer or float")
if not 0 <= base_probability <= 1:
raise ValueError("Base probability must be between 0.0 and 1.0")
return float(base_probability)
def set_base_standard_deviation(self, base_standard_deviation: Optional[Union[int, float]] = None) -> float:
if base_standard_deviation is None:
return FactorGenerator.BASE_STANDARD_DEVIATION
if not isinstance(base_standard_deviation, (int, float)):
raise TypeError("Base standard deviation must be an integer or float")
if not 0 <= base_standard_deviation <= 1:
raise ValueError("Base standard deviation must be between 0.0 and 1.0")
return float(base_standard_deviation)
def generate_factor(
self,
factor: Optional[Union[int, float]] = None,
probability: Optional[Union[int, float]] = None,
standard_deviation: Optional[Union[int, float]] = None,
) -> float:
if factor is not None:
if not isinstance(factor, (int, float)):
raise TypeError("Factor must be an integer or float")
if not 0 <= factor <= 1:
raise ValueError("Factor must be between 0.0 and 1.0")
return float(factor)
base_probability = probability if probability is not None else self.base_probability
if not isinstance(base_probability, (int, float)):
raise TypeError("Probability must be an integer or float")
if not 0 <= base_probability <= 1:
raise ValueError("Probability must be between 0.0 and 1.0")
base_standard_deviation = standard_deviation if standard_deviation is not None else self.base_standard_deviation
if not isinstance(base_standard_deviation, (int, float)):
raise TypeError("Standard deviation must be an integer or float")
if not 0 <= base_standard_deviation <= 1:
raise ValueError("Standard deviation must be between 0.0 and 1.0")
possible_factor = gauss(base_probability, base_standard_deviation)
return max(0.0, min(1.0, possible_factor))
|
[
"mforrest00@gmail.com"
] |
mforrest00@gmail.com
|
e6acc50cfab3b520a3844b6743f17457b54592a8
|
b240f9a8a2b22a2a4a7e0b44b3db8b46dac7dea0
|
/trips/admin.py
|
7daae3467294415ca86315da0a977a972c3d5384
|
[] |
no_license
|
SUPERPUPU/blog
|
0cd90c793269107772d2355a6cbe7d3f7c790269
|
4489b2c644d1153ccf7a41a4800fda27e71aba1a
|
refs/heads/master
| 2021-01-18T01:19:19.190608
| 2015-11-05T12:06:26
| 2015-11-05T12:06:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
from django.contrib import admin
# Register your models here.
from django.contrib import admin
from trips.models import Post
admin.site.register(Post)
|
[
"Eric@EricdeMacBook-Air.local"
] |
Eric@EricdeMacBook-Air.local
|
b2c9113e887e6f9330a69aaf0ca6e8e23cce899f
|
61c07d6314f06556898aad5d790071cbf639d7e4
|
/backend/model/__init__.py
|
736ef3d48a58956478d0adc287635806d41e17c0
|
[] |
no_license
|
Nicolas44Hernandez/backend
|
57851a9895fb52e76915b0a9bbf9af4bc9f75e54
|
75791efb55b192af3492d5d335965881c1813c53
|
refs/heads/main
| 2023-03-12T06:04:53.802022
| 2021-02-14T19:28:22
| 2021-02-14T19:28:22
| 327,448,914
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25
|
py
|
"""Data model package"""
|
[
"nicolas.hernandezpaez@orange.com"
] |
nicolas.hernandezpaez@orange.com
|
b7ad894f64081b12213952ebd6ff79d89994d631
|
79aaa3d38c0296d0c0572fd77bfbe24bb2ee4adb
|
/plotting_5clusters_evidence.py
|
7baff735ce9b001551d9b91658ac92efd4e75a53
|
[] |
no_license
|
ixicrom/droplets
|
809cb3e939211b6a50acd71a47468707e01661f3
|
c0e52a0a1be2aed8ad321430feee6c4c3b510a85
|
refs/heads/master
| 2021-07-20T06:01:24.962827
| 2021-02-11T16:39:04
| 2021-02-11T16:39:04
| 241,429,544
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,012
|
py
|
from full_analysis_tools import read_files, format_rectangles, read_calc_format_wedges, clust, optimalK, gini_score, PCA_transform
from data_tools import norm_data
import matplotlib.pyplot as pl
import os
import numpy as np
import glob
import pandas as pd
import matplotlib
matplotlib.style.core.reload_library()
pl.style.use('thesis')
graph_folder = '/Users/s1101153/OneDrive - University of Edinburgh/Files/OCP/Graphs/'
# %% rectangular/theta-averaged data
filePath_um = '/Users/s1101153/OneDrive - University of Edinburgh/Files/OCP_working/droplet_stacks/63x/rect_pickles_final'
imagePath = '/Users/s1101153/OneDrive - University of Edinburgh/Files/OCP_working/droplet_stacks/63x/final_images/ims_to_read/'
# dat = read_files(filePath)
dat_list = []
for file in glob.glob(os.path.join(filePath_um,'*.pkl')):
dat_list.append(pd.read_pickle(file))
dat = pd.concat(dat_list, axis=1)
print(dat.head())
file_suffix = ''
if input('Scale each slice to between 0 and 1? y/n: ') == 'y':
dat_scaled = norm_data(dat)
dat = dat_scaled
file_suffix += '_slices-scaled'
dat = dat.dropna()
print(dat.shape)
print(dat.head())
r_dat_theta = format_rectangles(dat, scale='standard', theta_av=True, rCol=dat.columns[0])
r_dat_rect = format_rectangles(dat, scale='standard', theta_av=False)
# %% calculate wedge data
wedge_path = '/Users/s1101153/OneDrive - University of Edinburgh/Files/OCP_working/droplet_stacks/63x/'
info_file = os.path.join(wedge_path, 'stack_info.csv')
save_file = os.path.join(wedge_path, 'wedges')+'.pkl'
info_file_A = os.path.join(wedge_path, 'stack_info_2020-08-28_A.csv')
save_file_A = os.path.join(wedge_path, 'wedges_A')+'.pkl'
info_file_B = os.path.join(wedge_path, 'stack_info_2020-08-28_B.csv')
save_file_B = os.path.join(wedge_path, 'wedges_B')+'.pkl'
wedges = read_calc_format_wedges(scale='standard',
fileName=save_file,
reslice=True,
imPath=imagePath,
infoFile=info_file,
hp=False)
wedges_A = read_calc_format_wedges(scale='standard',
fileName=save_file_A,
reslice=True,
imPath=imagePath,
infoFile=info_file_A,
hp=False)
wedges_B = read_calc_format_wedges(scale='standard',
fileName=save_file_B,
reslice=True,
imPath=imagePath,
infoFile=info_file_B,
hp=False)
r_dat = r_dat_rect[r_dat_rect.index.isin(wedges.index)]
r_dat_oneA = r_dat_rect[r_dat_rect.index.isin(wedges_A.index)]
r_dat_oneB = r_dat_rect[r_dat_rect.index.isin(wedges_B.index)]
r_dat_oneA = PCA_transform(r_dat_oneA, 0.99)[1]
r_dat_oneB = PCA_transform(r_dat_oneB, 0.99)[1]
r_dat_pca = PCA_transform(r_dat, 0.99)[1]
# %% gini score plots
if input('Make gini score plot? y/n: ') == 'y':
score_rect = []
score_theta = []
score_pca = []
score_oneA = []
score_oneB = []
for i in range(3, 23):
count_rect = clust('h', r_dat_rect, i, col_name='Hier_cluster')[1]
count_theta = clust('h', r_dat_theta, i, col_name='Hier_cluster')[1]
count_pca = clust('h', r_dat_pca, i, col_name='Hier_cluster')[1]
count_oneA = clust('h', r_dat_oneA, i, col_name='Hier_cluster')[1]
count_oneB = clust('h', r_dat_oneB, i, col_name='Hier_cluster')[1]
score_rect.append(np.mean(gini_score(count_rect)))
score_theta.append(np.mean(gini_score(count_theta)))
score_pca.append(np.mean(gini_score(count_pca)))
score_oneA.append(np.mean(gini_score(count_oneA)))
score_oneB.append(np.mean(gini_score(count_oneB)))
x = [* range(3, 23)]
pl.plot(x, score_rect, '-o', label='Rectangular')
pl.plot(x, score_theta, '-o', label='Theta-averaged')
pl.plot(x, score_pca, '-o', label='PCA 99\%')
pl.plot(x, score_oneA, '-o', label='oneA PCA 99\%')
pl.plot(x, score_oneB, '-o', label='oneB PCA 99\%')
pl.legend(loc='lower right')
pl.xlabel('Number of clusters')
pl.ylabel('Gini score')
pl.title('Gini score for hierchical clustering', loc='center', wrap=True)
pl.vlines(5, 0, 0.35, linestyles='dashed', colors='k')
pl.savefig(graph_folder+'gini_method_overview_um'+file_suffix+'.png')
pl.show()
# %% gap statistic plots
if input('Make gap statistic plot? y/n: ') == 'y':
gap_rect = optimalK(r_dat_rect, maxClusters=23)
gap_theta = optimalK(r_dat_theta, maxClusters=23)
gap_pca = optimalK(r_dat_pca, maxClusters=23)
gap_oneA = optimalK(r_dat_oneA, maxClusters=23)
gap_oneB = optimalK(r_dat_oneB, maxClusters=23)
gap_rect[1]['gap'][0]
pl.plot(gap_rect[1]['clusterCount'],
gap_rect[1]['gap']-gap_rect[1]['gap'][0],
'-o',
label='Rectangular')
pl.plot(gap_theta[1]['clusterCount'],
gap_theta[1]['gap']-gap_theta[1]['gap'][0],
'-o',
label='Theta-averaged')
pl.plot(gap_pca[1]['clusterCount'],
gap_pca[1]['gap']-gap_pca[1]['gap'][0],
'-o',
label='PCA 99\%')
pl.plot(gap_oneA[1]['clusterCount'],
gap_oneA[1]['gap']-gap_oneA[1]['gap'][0],
'-o',
label='oneA PCA 99\%')
pl.plot(gap_oneB[1]['clusterCount'],
gap_oneB[1]['gap']-gap_oneB[1]['gap'][0],
'-o',
label='oneB PCA 99\%')
pl.xlabel('Number of clusters')
pl.ylabel('Gap statistic (offset to start at 0)')
pl.title('Gap statistic for k-means clustering', wrap=True)
pl.legend(loc='upper left')
pl.vlines(5, 0, 1.75, linestyles='dashed', colors='k')
pl.savefig(graph_folder+'gap_method_overview_um'+file_suffix+'.png', bbox_inches='tight')
pl.tight_layout()
pl.show()
|
[
"e.m.gould@sms.ed.ac.uk"
] |
e.m.gould@sms.ed.ac.uk
|
cdbe66e7e54a536843154d004f8bdc4e3493c7b0
|
7fc37d5ff9d8928ddd89a21e4b559763531674b4
|
/TP2-JSON-LD/src/merge_data.py
|
e7f01e06c4e9f062fd3a205b100c1a3a86b14960
|
[] |
no_license
|
Keykor/Web-Social-Semantica-TPs
|
812e527c9e5c9a03a0eb59849f0c62515c7077f7
|
9192fcd43517f0e1096108e32dbed51a8a12828c
|
refs/heads/main
| 2023-05-27T01:22:29.101039
| 2021-06-07T21:17:26
| 2021-06-07T21:17:26
| 352,993,118
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,977
|
py
|
import json
import os
def create_data_directory_path(file_name):
return os.path.abspath(os.path.join(*[os.path.dirname(__file__), os.pardir, "data", file_name]))
def dict_prop_comparison(item1, item2):
if item1['@type'] == 'Person' and item2['@type'] == 'Person':
return item1['name'] == item2['name']
elif item1['@type'] == 'Organization' and item2['@type'] == 'Organization':
return item1['url'] == item2['url']
return False
def string_prop_comparison(item1, item2):
return item1 == item2
def review_prop_comparison(item1, item2):
return item1['author']['name'] == item2['author']['name']
def main():
pages_to_scrap = None
with open(create_data_directory_path('pages_to_scrap.json'), encoding="utf-8") as file:
pages_to_scrap = json.load(file)
# Datos elegidos para guardar en el mergeo
properties_list_simple = ['@context', '@type', 'name', 'image', 'description', 'duration', 'contentRating', 'trailer', 'productionCompany', 'countryOfOrigin', 'releasedEvent', 'hasPart']
properties_list_list = ['aggregateRating', 'genre', 'actor', 'director', 'creator', 'review', 'author', 'character']
movie = {}
for prop in properties_list_simple:
movie[prop] = None
for prop in properties_list_list:
movie[prop] = []
for page in pages_to_scrap:
with open(create_data_directory_path(page["name"] + '.json'), encoding="utf-8") as file:
page_movie = json.load(file)
for prop in properties_list_simple:
if prop in page_movie and not movie[prop]:
movie[prop] = page_movie[prop]
for prop in properties_list_list:
if prop in page_movie:
# Según la clave, utilizo una forma diferente de comparación
if prop == 'genre' or prop == 'character':
comparison_method = string_prop_comparison
elif prop == 'review':
comparison_method = review_prop_comparison
else:
comparison_method = dict_prop_comparison
# A veces el dato que quiero que sea una lista en la extraccion no es una lista
if not isinstance(page_movie[prop], list):
page_movie[prop] = [page_movie[prop]]
for item_page_movie in page_movie[prop]:
founded = False
for item_movie in movie[prop]:
if comparison_method(item_movie, item_page_movie):
founded = True
break
if not founded:
movie[prop].append(item_page_movie)
with open(create_data_directory_path("merge.json"),"w",encoding="utf-8") as file:
json.dump(movie, file, ensure_ascii=False, indent=4)
if __name__ == "__main__":
main()
|
[
"lozabonora@gmail.com"
] |
lozabonora@gmail.com
|
fff2f0f774b7230cff0ccf88b15b0f3145a5f9d6
|
3c3b41bb9cdfc23cc95727636f0995560728098a
|
/FullDestroyAnalysis2016/Wheel0/Working/20160425/CMSSW_8_0_2/tmp/slc6_amd64_gcc530/src/HLTrigger/Egamma/src/HLTriggerEgamma/edm_write_config/hltEgammaDoubleEtFilter_cfi.py
|
26be1c890b9bae84fd0f2313bac17a1944ab19ba
|
[] |
no_license
|
FlorianScheuch/MassiveProductionMuonGun
|
eb5a2916345c21edf5fd0c5d6694333a0306c363
|
a9a336364309cb7c9e952c0cd85060032d1dccd1
|
refs/heads/master
| 2016-08-12T23:12:01.159605
| 2016-05-04T11:26:35
| 2016-05-04T11:26:35
| 53,405,775
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
import FWCore.ParameterSet.Config as cms
hltEgammaDoubleEtFilter = cms.EDFilter('HLTEgammaDoubleEtFilter',
saveTags = cms.bool(False),
candTag = cms.InputTag('hltTrackIsolFilter'),
L1IsoCand = cms.InputTag('hltL1IsoRecoEcalCandidate'),
L1NonIsoCand = cms.InputTag('hltL1NonIsoRecoEcalCandidate'),
relaxed = cms.untracked.bool(True),
etcut1 = cms.double(30),
etcut2 = cms.double(20),
npaircut = cms.int32(1)
)
|
[
"scheuch@physik.rwth-aachen.de"
] |
scheuch@physik.rwth-aachen.de
|
931763a8c78cde0bb0b977095ee6a21a5d1a133c
|
a75f243bc639398f8c19a0bf9ebdf02eb45d6d80
|
/07_PCA.py
|
887ee30d333880034192463d2dfad59e60f24712
|
[] |
no_license
|
v123582/scikit-learn-primer-guide
|
9b2d0e5087cd52a835a50723fc8d9c42ed9909b8
|
e5731511cb331f3f379854af9dad5452e4d13e8e
|
refs/heads/master
| 2020-04-08T19:00:21.872122
| 2018-02-02T12:24:10
| 2018-02-02T12:24:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 907
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('Wine.csv')
X = dataset.iloc[:, 0:13].values
y = dataset.iloc[:, 13].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
from sklearn.decomposition import PCA
pca = PCA(n_components = 2)
X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)
explained_variance = pca.explained_variance_ratio_
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state = 0)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
print(cm)
|
[
"noreply@github.com"
] |
v123582.noreply@github.com
|
6f11e3d8b325f3143e511f0a5725c0b9bee4ae34
|
374fabd00f56ac040d8cd02519e8c8dc55baa2ff
|
/main.py
|
1e384f19806e33d3729e74773e3cf728c1691857
|
[] |
no_license
|
rshoger/alg-p2
|
d5c0d3fac839d83f59ddea4272c6067c9f077d0f
|
ee94dc6ba129eddad8a3ce013276a2815dfb6155
|
refs/heads/master
| 2021-01-22T02:13:38.425742
| 2017-02-06T02:11:47
| 2017-02-06T02:11:47
| 81,032,788
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,854
|
py
|
import argparse
from os.path import splitext
from timeit import timeit
from itertools import izip_longest as zip2
## Parse arguments ##
parser = argparse.ArgumentParser(description='Test some algorithms.')
parser.add_argument('filename', type=str, nargs='?', default='Amount.txt',
help='The name of the file to process')
args = parser.parse_args()
## Algorithm functions ##
# Slow change algorithm
def changeslow(amounts):
return 0
# Greedy algorithm
def changegreedy(amounts):
return 0
# Dynamic programming algorithm
def changedp(amounts):
return 0
## Test class ##
class some_test:
def __init__(self, input_filename):
self.sets = []
self.process_input(input_filename)
# File-names
output_filename = splitext(input_filename)[0] + 'change.txt'
time_results_filename = 'timing_results.txt'
# Open files for writing and store handles
self.output_file = open(output_filename, 'w')
self.time_results_file = open(time_results_filename, 'w')
def __del__(self):
print('\nClosing file handles...')
self.output_file.close()
self.time_results_file.close()
def process_input(self, filename):
with open(filename, 'r') as f:
# Unzip the file into two sets
z_1, z_2 = zip2(*zip(f,f))
# Create the test set
for i, l in enumerate(z_1):
self.sets.append([l.replace('\n',''), z_2[i].replace('\n','')])
def test_sets(self, f):
if not self.sets:
self.sets.append([1,2,3,4])
self.time_results_file.write(f.__name__ + '\n')
for i in self.sets:
# Call the function on the set
print('\nCalling ' + f.__name__ + '...\n--------\n Change: '
+ i[0] + '\n Amount: ' + i[1])
# Run and store results for algorithm
r = self.test_algorithm(f, i[0])
# Write to stdout and file handle
print(' Returned: ' + str(r))
self.output_file.write(i[0] + '\n' + str(r) + '\n')
# Perform timing
t = self.time_algorithm(f, i[0])
# Print results to stdout and file handle
print('\n Time: ' + str(t) + ' seconds')
self.time_results_file.write(str(t)+'\n')
def test_algorithm(self, f, s=[1,2,3,4]):
# Call the function and test the set
return f(s)
def time_algorithm(self, f, s=[1,2,3,4], number=1):
# Print timing event information
return timeit(lambda:f(s), number=number)
## Program entry point ##
if __name__ == '__main__':
# Create a tester 't'
t = some_test(args.filename)
# Call each algorithm individually for testing and timing
t.test_sets(changeslow)
t.test_sets(changegreedy)
t.test_sets(changedp)
|
[
"rossshoger6152@mail.linnbenton.edu"
] |
rossshoger6152@mail.linnbenton.edu
|
4e71af6baba16b9d9050a18c3468ec4a34d450c1
|
9399ebe214bd8d126428073c9a230f4f28eb1b58
|
/Shell/mysql_保留代码.py
|
562ae5d2fec3b981da319c3999d6ed4b66ed35c6
|
[] |
no_license
|
aarongo/Linux_script_python_old
|
c468c19360b129d3cacafa8d2c24b026ab9ccfe4
|
b80aca87cfee5f75878d1b50999d8c5ea4a056a9
|
refs/heads/master
| 2020-04-13T12:52:07.949210
| 2016-01-05T07:50:32
| 2016-01-05T07:50:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,077
|
py
|
# -*- coding: utf-8 -*-
import MySQLdb
import time
class MySQL:
u'''对MySQLdb常用函数进行封装的类'''
error_code = '' # MySQL错误号码
_instance = None # 本类的实例
_conn = None # 数据库conn
_cur = None # 游标
_TIMEOUT = 30 # 默认超时30秒
_timecount = 0
def __init__(self, dbconfig):
u'构造器:根据数据库连接参数,创建MySQL连接'
try:
self._conn = MySQLdb.connect(host=dbconfig['host'],
port=dbconfig['port'],
user=dbconfig['user'],
passwd=dbconfig['passwd'],
db=dbconfig['db'],
charset=dbconfig['charset'])
except MySQLdb.Error, e:
self.error_code = e.args[0]
error_msg = 'MySQL error! ', e.args[0], e.args[1]
print error_msg
# 如果没有超过预设超时时间,则再次尝试连接,
if self._timecount < self._TIMEOUT:
interval = 5
self._timecount += interval
time.sleep(interval)
return self.__init__(dbconfig)
else:
raise Exception(error_msg)
self._cur = self._conn.cursor()
self._instance = MySQLdb
def query(self, sql):
u'执行 SELECT 语句'
try:
self._cur.execute("SET NAMES utf8")
result = self._cur.execute(sql)
except MySQLdb.Error, e:
self.error_code = e.args[0]
print "数据库错误代码:", e.args[0], e.args[1]
result = False
return result
def update(self, sql):
u'执行 UPDATE 及 DELETE 语句'
try:
self._cur.execute("SET NAMES utf8")
result = self._cur.execute(sql)
self._conn.commit()
except MySQLdb.Error, e:
self.error_code = e.args[0]
print "数据库错误代码:", e.args[0], e.args[1]
result = False
return result
def insert(self, sql):
u'执行 INSERT 语句。如主键为自增长int,则返回新生成的ID'
try:
self._cur.execute("SET NAMES utf8")
self._cur.execute(sql)
self._conn.commit()
return self._conn.insert_id()
except MySQLdb.Error, e:
self.error_code = e.args[0]
return False
def fetchAllRows(self):
u'返回结果列表'
return self._cur.fetchall()
def fetchOneRow(self):
u'返回一行结果,然后游标指向下一行。到达最后一行以后,返回None'
return self._cur.fetchone()
def getRowCount(self):
u'获取结果行数'
return self._cur.rowcount
def commit(self):
u'数据库commit操作'
self._conn.commit()
def rollback(self):
u'数据库回滚操作'
self._conn.rollback()
def __del__(self):
u'释放资源(系统GC自动调用)'
try:
self._cur.close()
self._conn.close()
except:
pass
def close(self):
u'关闭数据库连接'
self.__del__()
if __name__ == '__main__':
'''使用样例'''
# 数据库连接参数
dbconfig = {'host': 'localhost',
'port': 3306,
'user': 'root',
'passwd': 'aarongo',
'db': 'python_linux',
'charset': 'utf8'}
# 连接数据库,创建这个类的实例
db = MySQL(dbconfig)
# 操作数据库
sql = ""
db.query(sql);
# 获取结果列表
result = db.fetchAllRows();
# 相当于php里面的var_dump
print result
# 对行进行循环
for row in result:
# 使用下标进行取值
# print row[0]
# 对列进行循环
for colum in row:
print colum
# 关闭数据库
db.close()
|
[
"lonnyliu@126.com"
] |
lonnyliu@126.com
|
fb6fcca2fc2530f217075a46e463172671d62b9c
|
322cc47eb3b7e596a7b136ea44a82f19b140fff9
|
/src/videos/migrations/0010_auto_20150923_0441.py
|
c117e3c9045ac7393dbb072f906c1c77d620218b
|
[] |
no_license
|
summerbt/srvup_dance
|
e3b753e809858d132a2ed5279098692f08660375
|
9fff98ea1eb09d0141256197d3a7ad207e98aea4
|
refs/heads/master
| 2021-01-10T03:30:26.232837
| 2015-11-08T22:09:54
| 2015-11-08T22:09:54
| 45,801,484
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('videos', '0009_auto_20150923_0357'),
]
operations = [
migrations.AlterField(
model_name='video',
name='share_message',
field=models.TextField(default=b'Check out this awesome video!'),
preserve_default=True,
),
]
|
[
"summerbartontaylor@gmail.com"
] |
summerbartontaylor@gmail.com
|
5f93909a0420fb933fa2121fee16a1d6a17db140
|
4a6015fb2f27add398776a12a9de045cc36c1e7a
|
/tests/test_problem5.py
|
dfdea26473e0ce319a55900322258878ffd2843b
|
[
"MIT"
] |
permissive
|
virtualmadden/Project.Euler
|
95955b044f20a8e3c8f02e81cc9d535e6c4a8751
|
e68df9ad4c37759faee97f9a3ed1dbe46686f3b3
|
refs/heads/master
| 2021-04-30T06:26:30.413738
| 2018-02-17T00:36:11
| 2018-02-17T00:36:11
| 121,442,202
| 0
| 0
|
MIT
| 2018-02-17T00:36:12
| 2018-02-13T22:06:46
|
Python
|
UTF-8
|
Python
| false
| false
| 213
|
py
|
import unittest
from euler.problem5 import Problem5
class TestEngine(unittest.TestCase):
def test_shouldReturnSolution(self):
self.assertEqual(232792560, Problem5(range(1, 21)).smallest_multiples())
|
[
"virtualmadden@users.noreply.github.com"
] |
virtualmadden@users.noreply.github.com
|
0ecdbd0762217635f28f8ebf03f38ed1864fe41c
|
2d1d3909314e86e9edc9e8395ec1e91a49ff4710
|
/onlinestore/urls.py
|
578de085a5c0d9abcb15711cbd3ff62007a2716f
|
[] |
no_license
|
eldolinskaya/online-bookstore-Django
|
eeb6c507dac8ce157e793a70f0190e34f51fe8cf
|
ba65f03ad265a20cc50234ccde935374fb822b20
|
refs/heads/master
| 2020-12-04T03:30:26.539491
| 2020-01-03T13:16:48
| 2020-01-03T13:16:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,552
|
py
|
"""onlinestore URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [#ссылки должны идти от частного к общему
path('admin/', admin.site.urls),
path('', include('refers.urls')),
path('', include('books.urls')),
path('', include('home.urls')),
path('accounts/', include('django.contrib.auth.urls')),
path('cart/', include('cart.urls'), name = 'cart'), #, namespace='cart'
path('order/', include('order.urls')), #, namespace='order'
] + static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root = settings.STATIC_ROOT)
#urlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
[
"helen.dolinskaya@gmail.com"
] |
helen.dolinskaya@gmail.com
|
14589c3f62a3f4be1320992b9bd0e5a12d0c9f96
|
a222e2999251ba7f0d62c428ba8cc170b6d0b3b7
|
/AtC_Beg_Con_101-110/ABC110/D-Factorization.py
|
e78d3f74138761284d46dbadaba66090a0ab7426
|
[
"MIT"
] |
permissive
|
yosho-18/AtCoder
|
3e1f3070c5eb44f154c8104fbd5449f47446ce14
|
50f6d5c92a01792552c31ac912ce1cd557b06fb0
|
refs/heads/master
| 2020-06-02T10:21:29.458365
| 2020-05-29T12:40:48
| 2020-05-29T12:40:48
| 188,795,239
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,524
|
py
|
import math
from collections import defaultdict
n, m = map(int, input().split())
"""def primes(x):
if x < 2: return []
primes = [i for i in range(x)]
primes[1] = 0 # 1は素数ではない
# エラトステネスのふるい
for prime in primes:
if prime > math.sqrt(x): break
if prime == 0: continue
for non_prime in range(2 * prime, x, prime): primes[non_prime] = 0
return [prime for prime in primes if prime != 0]
pr = primes(m + 1)
di = {}
di = defaultdict(int)
for i in pr:
while m % i == 0:
m = m // i
di[i] += 1"""
def prime_factors(n):
i = 2
factors = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(i)
if n > 1:
factors.append(n)
return factors
pr = prime_factors(m)
di = {}
di = defaultdict(int)
for i in pr:
while m % i == 0:
m = m // i
di[i] += 1
"""def comb(n, r):
return math.factorial(n) // (math.factorial(n - r) * math.factorial(r))"""
mod = 10 ** 9 + 7
MAX_N = 10 ** 5 + 100
factorial = [1] * MAX_N
#事前に階上テーブルを用意
def calc_factorial():
for i in range(1, MAX_N):
factorial[i] = i * factorial[i - 1] % mod
def comb(n, k):
a = factorial[n] % mod
b = (factorial[k] * factorial[n - k]) % mod
b_ = pow(b, mod - 2, mod)
return (a * b_) % mod
# 階乗を用意
calc_factorial()
ans = 1
for i in di:
ans = ans * comb(di[i] + n - 1, di[i]) % mod
print(ans)
|
[
"44283410+wato18@users.noreply.github.com"
] |
44283410+wato18@users.noreply.github.com
|
d5c1d28f0dee17d8703e23afb9b76efb144e5319
|
053ffbc38880613ea1a5f2666f4b1fd965975916
|
/euler.py
|
719b408329e491caf8e7b3602ac1ca151e98fc80
|
[] |
no_license
|
davidchen/project-euler
|
43f0c790e1a0478a6cc4a7d7c6d74bbaf497555a
|
da1cfeabc30fb41ae7f2333d896bf49e5bf0bb83
|
refs/heads/master
| 2021-04-28T20:47:02.897609
| 2018-02-18T08:56:28
| 2018-02-18T08:56:28
| 121,934,891
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,019
|
py
|
import time
import timeit
from num2words import num2words
def main(problem_number):
n = problem_number
if n in range(1, 1000):
n = 'euler' + str(n)
answer = globals()[n]()
print ('Answer: {}'.format(answer))
else:
print ('Problem has not been solved yet.')
return
def euler1():
# If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
# Find the sum of all the multiples of 3 or 5 below 1000.
s = 0 # keeps track of the sum so far
for x in range(1, 1000):
if x % 3 == 0 or x % 5 == 0: # if the number is divisible by either 3 or 5, add that to the sum
s += x
return s
def euler2():
# Each new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 1 and 2, the first 10 terms will be:
# 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
# By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms.
s = 0 # tracks the sum
fib1 = 1 # the first fib number
fib2 = 2 # the second fib number
while fib2 < 4000000: # make sure the process does not exceed 4 million
if fib1 % 2 == 0: # checks both current fib numbers to see if they are even
s += fib1 # if even, adds to the sum
if fib2 % 2 == 0:
s += fib2
fib1 += fib2 # updates the 1st fib num by adding the next one to itself
fib2 += fib1 # updates the 2nd fib num by adding itself to the newly updated 1st fib num
return s
def euler3():
# The prime factors of 13195 are 5, 7, 13 and 29.
# What is the largest prime factor of the number 600851475143?
num = 600851475143 # set the number to num
for i in range(2, 100000): # assuming the max prime won't be greater than 100000
while num % i == 0: # first test to see if the number is actually a prime, which filters out non-prime factors
num //= i # if it is prime, divide num by this prime and continue
if num == 1 or num == i: # we reach 1 or when num is only self-divisible; i is the largest prime factor
return i
def euler4():
# A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 * 99.
# Find the largest palindrome made from the product of two 3-digit numbers.
largest_palindrome = 0
for i in range(100, 1000): # first three digit number
for j in range(100, 1000): # second three digit number
test_num = str(i * j) # makes a string from the product and compares with the reverse
if test_num == test_num[
::-1] and i * j > largest_palindrome: # if greater than last palindrome found, update
largest_palindrome = i * j
return largest_palindrome
def euler5():
# 2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
# What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?
i = 9699690
while True:
if all(i % j == 0 for j in range(1, 21)): # tests to see if this number is evenly divisible by 1-20
return i
else:
i += 9699690
def euler6():
# The sum of the squares of the first ten natural numbers is,
# 1^2 + 2^2 + ... + 10^2 = 385
# The square of the sum of the first ten natural numbers is,
# (1 + 2 + ... + 10)^2 = 55^2 = 3025
# Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025 - 385 = 2640.
# Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.
sum_squares = squared_sum = 0
for i in range(1, 101): # keeps track of the first one hundred natural numbers
s_1 = i * i # squares i
sum_squares += s_1 # adds the square to the first sum
squared_sum += i # adds i to the second sum in preparation for final squaring
squared_sum *= squared_sum # squares the second sum
return squared_sum - sum_squares # returns the difference
def euler7():
# By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
# What is the 10,001st prime number?
primes = [2] # begin list of primes, starting with 2
count = 1
for x in range(3, 2000000, 2): #
for p in primes:
if x % p == 0:
break
if x < p * p:
# print x,p
primes.append(x)
count += 1
if count == 10001:
return x
break
return
def euler8():
# The four adjacent digits in the 1000-digit number that have the greatest product are 9*9*8*9 = 5832.
#
# 73167176531330624919225119674426574742355349194934
# 96983520312774506326239578318016984801869478851843
# 85861560789112949495459501737958331952853208805511
# 12540698747158523863050715693290963295227443043557
# 66896648950445244523161731856403098711121722383113
# 62229893423380308135336276614282806444486645238749
# 30358907296290491560440772390713810515859307960866
# 70172427121883998797908792274921901699720888093776
# 65727333001053367881220235421809751254540594752243
# 52584907711670556013604839586446706324415722155397
# 53697817977846174064955149290862569321978468622482
# 83972241375657056057490261407972968652414535100474
# 82166370484403199890008895243450658541227588666881
# 16427171479924442928230863465674813919123162824586
# 17866458359124566529476545682848912883142607690042
# 24219022671055626321111109370544217506941658960408
# 07198403850962455444362981230987879927244284909188
# 84580156166097919133875499200524063689912560717606
# 05886116467109405077541002256983155200055935729725
# 71636269561882670428252483600823257530420752963450
#
# Find the thirteen adjacent digits in the 1000-digit number that have the greatest product. What is the value of this product?
num_str = \
'73167176531330624919225119674426574742355349194934969835203127745063262395783180169848018694788518438586' \
'15607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648' \
'95044524452316173185640309871112172238311362229893423380308135336276614282806444486645238749303589072962' \
'90491560440772390713810515859307960866701724271218839987979087922749219016997208880937766572733300105336' \
'78812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064' \
'95514929086256932197846862248283972241375657056057490261407972968652414535100474821663704844031998900088' \
'95243450658541227588666881164271714799244429282308634656748139191231628245861786645835912456652947654568' \
'28489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987' \
'87992724428490918884580156166097919133875499200524063689912560717606058861164671094050775410022569831552' \
'0005593572972571636269561882670428252483600823257530420752963450'
greatest_prod = 0
for index in range(0, len(num_str) - 12): # iterate from the first number to the 13th from last number
tmp_prod = int(num_str[index]) # tmp_prod begins with the starting digit of the 13th adjacent digits
for i in range(1, 13):
tmp_prod *= int(num_str[index + i]) # tmp_prod updates with the product of all 13 digits
greatest_prod = max(greatest_prod, tmp_prod) # max product updates
return greatest_prod
def euler9():
# A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
#
# a^2 + b^2 = c^2
# For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.
#
# There exists exactly one Pythagorean triplet for which a + b + c = 1000.
# Find the product abc.
for c in range(335, 1000): # c has to be at least 335 because c > b > a, so if c < 335: 334+333+332 = 999 < 1000
for b in range(1, c):
for a in range(1, b):
if a + b + c == 1000 and \
(a * a) + (
b * b) == c * c: # simple, but we make sure the addition check is first
return a * b * c
def euler10():
# The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.
# Find the sum of all the primes below two million.
primes = [2] # keep track of all primes found up to two million
for test_n in range(3, 2000001, 2): # check up to the limit using odd numbers beginning from 3
for p in primes: # for each new number, check against current list of primes
if test_n % p == 0: # if divisible by any number in primes, number is known to be composite and loop breaks
break
if test_n < p * p: # we only need to loop up to the first prime that is greater than the square root of n
primes.append(test_n) # if non-composite and passes above condition, this must be a prime number
break
return sum(primes)
def euler11():
# In the 20*20 grid below, four numbers along a diagonal line have been marked in red.
# 08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
# 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
# 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
# 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
# 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
# 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
# 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
# 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
# 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
# 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
# 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
# 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
# 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
# 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
# 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
# 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
# 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
# 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
# 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
# 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
# The product of these numbers is 26*63*78*14 = 1788696.
# What is the greatest product of four adjacent numbers in the same direction (up, down, left, right, or diagonally) in the 20*20 grid?
max_num = 0
numbers = [
[8, 2, 22, 97, 38, 15, 0, 40, 0, 75, 4, 5, 7, 78, 52, 12, 50, 77, 91, 8],
[49, 49, 99, 40, 17, 81, 18, 57, 60, 87, 17, 40, 98, 43, 69, 48, 4, 56, 62, 0],
[81, 49, 31, 73, 55, 79, 14, 29, 93, 71, 40, 67, 53, 88, 30, 3, 49, 13, 36, 65],
[52, 70, 95, 23, 4, 60, 11, 42, 69, 24, 68, 56, 1, 32, 56, 71, 37, 2, 36, 91],
[22, 31, 16, 71, 51, 67, 63, 89, 41, 92, 36, 54, 22, 40, 40, 28, 66, 33, 13, 80],
[24, 47, 32, 60, 99, 3, 45, 2, 44, 75, 33, 53, 78, 36, 84, 20, 35, 17, 12, 50],
[32, 98, 81, 28, 64, 23, 67, 10, 26, 38, 40, 67, 59, 54, 70, 66, 18, 38, 64, 70],
[67, 26, 20, 68, 2, 62, 12, 20, 95, 63, 94, 39, 63, 8, 40, 91, 66, 49, 94, 21],
[24, 55, 58, 5, 66, 73, 99, 26, 97, 17, 78, 78, 96, 83, 14, 88, 34, 89, 63, 72],
[21, 36, 23, 9, 75, 0, 76, 44, 20, 45, 35, 14, 0, 61, 33, 97, 34, 31, 33, 95],
[78, 17, 53, 28, 22, 75, 31, 67, 15, 94, 3, 80, 4, 62, 16, 14, 9, 53, 56, 92],
[16, 39, 5, 42, 96, 35, 31, 47, 55, 58, 88, 24, 0, 17, 54, 24, 36, 29, 85, 57],
[86, 56, 0, 48, 35, 71, 89, 7, 5, 44, 44, 37, 44, 60, 21, 58, 51, 54, 17, 58],
[19, 80, 81, 68, 5, 94, 47, 69, 28, 73, 92, 13, 86, 52, 17, 77, 4, 89, 55, 40],
[4, 52, 8, 83, 97, 35, 99, 16, 7, 97, 57, 32, 16, 26, 26, 79, 33, 27, 98, 66],
[88, 36, 68, 87, 57, 62, 20, 72, 3, 46, 33, 67, 46, 55, 12, 32, 63, 93, 53, 69],
[4, 42, 16, 73, 38, 25, 39, 11, 24, 94, 72, 18, 8, 46, 29, 32, 40, 62, 76, 36],
[20, 69, 36, 41, 72, 30, 23, 88, 34, 62, 99, 69, 82, 67, 59, 85, 74, 4, 36, 16],
[20, 73, 35, 29, 78, 31, 90, 1, 74, 31, 49, 71, 48, 86, 81, 16, 23, 57, 5, 54],
[1, 70, 54, 71, 83, 51, 54, 69, 16, 92, 33, 48, 61, 43, 52, 1, 89, 19, 67, 48]
]
for i_x in range(0, 20):
for i_y in range(0, 17):
h_product = numbers[i_x][i_y] * numbers[i_x][i_y + 1] * numbers[i_x][i_y + 2] * numbers[i_x][i_y + 3]
v_product = numbers[i_y][i_x] * numbers[i_y + 1][i_x] * numbers[i_y + 2][i_x] * numbers[i_y + 3][i_x]
d_product_1 = d_product_2 = 0
if i_x < 17:
d_product_1 = numbers[i_x][i_y] * numbers[i_x + 1][i_y + 1] * numbers[i_x + 2][i_y + 2] * \
numbers[i_x + 3][i_y + 3]
d_product_2 = numbers[i_y + 3][i_x] * numbers[i_y + 2][i_x + 1] * numbers[i_y + 1][i_x + 2] * \
numbers[i_y][i_x + 3]
max_num = max(max_num, h_product, v_product, d_product_1, d_product_2)
return max_num
def euler12():
# The sequence of triangle numbers is generated by adding the natural numbers. So the 7th triangle number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28. The first ten terms would be:
#
# 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
#
# Let us list the factors of the first seven triangle numbers:
#
# 1: 1
# 3: 1,3
# 6: 1,2,3,6
# 10: 1,2,5,10
# 15: 1,3,5,15
# 21: 1,3,7,21
# 28: 1,2,4,7,14,28
# We can see that 28 is the first triangle number to have over five divisors.
#
# What is the value of the first triangle number to have over five hundred divisors?
primes_up_to_lim = [2] # keep track of all primes found up to lim; increase lim as needed
lim = 20000
for test_n in range(3, lim + 1, 2): # check up to the limit using odd numbers beginning from 3
for p in primes_up_to_lim: # for each new number, check against current list of primes
if test_n % p == 0: # if divisible by any number in primes, number is known to be composite and loop breaks
break
if test_n < p * p: # we only need to loop up to the first prime that is greater than the square root of n
primes_up_to_lim.append(
test_n) # if noncomposite and passes above condition, this must be a prime number
break
tri_num = tri_num_count = 0
for n in range(1, 100000):
prime_factors = []
tri_num_count += 1
tri_num += n
tmp_tri_num = tri_num
while tmp_tri_num != 1:
for prime in primes_up_to_lim:
if tmp_tri_num % prime == 0:
tmp_tri_num /= prime
prime_factors.append(prime)
break
# print (tri_num,prime_factors)
product = 0
if len(prime_factors) > 0:
dup_count = 1
product = 1
old_f = prime_factors[0]
for f in prime_factors:
new_f = f
if new_f == old_f:
dup_count += 1
else:
product *= dup_count
dup_count = 2
old_f = new_f
product *= dup_count
if product > 500:
return tri_num
return
def euler13():
# Work out the first ten digits of the sum of the following one-hundred 50-digit numbers.
#
# 37107287533902102798797998220837590246510135740250
# 46376937677490009712648124896970078050417018260538
# 74324986199524741059474233309513058123726617309629
# 91942213363574161572522430563301811072406154908250
# 23067588207539346171171980310421047513778063246676
# 89261670696623633820136378418383684178734361726757
# 28112879812849979408065481931592621691275889832738
# 44274228917432520321923589422876796487670272189318
# 47451445736001306439091167216856844588711603153276
# 70386486105843025439939619828917593665686757934951
# 62176457141856560629502157223196586755079324193331
# 64906352462741904929101432445813822663347944758178
# 92575867718337217661963751590579239728245598838407
# 58203565325359399008402633568948830189458628227828
# 80181199384826282014278194139940567587151170094390
# 35398664372827112653829987240784473053190104293586
# 86515506006295864861532075273371959191420517255829
# 71693888707715466499115593487603532921714970056938
# 54370070576826684624621495650076471787294438377604
# 53282654108756828443191190634694037855217779295145
# 36123272525000296071075082563815656710885258350721
# 45876576172410976447339110607218265236877223636045
# 17423706905851860660448207621209813287860733969412
# 81142660418086830619328460811191061556940512689692
# 51934325451728388641918047049293215058642563049483
# 62467221648435076201727918039944693004732956340691
# 15732444386908125794514089057706229429197107928209
# 55037687525678773091862540744969844508330393682126
# 18336384825330154686196124348767681297534375946515
# 80386287592878490201521685554828717201219257766954
# 78182833757993103614740356856449095527097864797581
# 16726320100436897842553539920931837441497806860984
# 48403098129077791799088218795327364475675590848030
# 87086987551392711854517078544161852424320693150332
# 59959406895756536782107074926966537676326235447210
# 69793950679652694742597709739166693763042633987085
# 41052684708299085211399427365734116182760315001271
# 65378607361501080857009149939512557028198746004375
# 35829035317434717326932123578154982629742552737307
# 94953759765105305946966067683156574377167401875275
# 88902802571733229619176668713819931811048770190271
# 25267680276078003013678680992525463401061632866526
# 36270218540497705585629946580636237993140746255962
# 24074486908231174977792365466257246923322810917141
# 91430288197103288597806669760892938638285025333403
# 34413065578016127815921815005561868836468420090470
# 23053081172816430487623791969842487255036638784583
# 11487696932154902810424020138335124462181441773470
# 63783299490636259666498587618221225225512486764533
# 67720186971698544312419572409913959008952310058822
# 95548255300263520781532296796249481641953868218774
# 76085327132285723110424803456124867697064507995236
# 37774242535411291684276865538926205024910326572967
# 23701913275725675285653248258265463092207058596522
# 29798860272258331913126375147341994889534765745501
# 18495701454879288984856827726077713721403798879715
# 38298203783031473527721580348144513491373226651381
# 34829543829199918180278916522431027392251122869539
# 40957953066405232632538044100059654939159879593635
# 29746152185502371307642255121183693803580388584903
# 41698116222072977186158236678424689157993532961922
# 62467957194401269043877107275048102390895523597457
# 23189706772547915061505504953922979530901129967519
# 86188088225875314529584099251203829009407770775672
# 11306739708304724483816533873502340845647058077308
# 82959174767140363198008187129011875491310547126581
# 97623331044818386269515456334926366572897563400500
# 42846280183517070527831839425882145521227251250327
# 55121603546981200581762165212827652751691296897789
# 32238195734329339946437501907836945765883352399886
# 75506164965184775180738168837861091527357929701337
# 62177842752192623401942399639168044983993173312731
# 32924185707147349566916674687634660915035914677504
# 99518671430235219628894890102423325116913619626622
# 73267460800591547471830798392868535206946944540724
# 76841822524674417161514036427982273348055556214818
# 97142617910342598647204516893989422179826088076852
# 87783646182799346313767754307809363333018982642090
# 10848802521674670883215120185883543223812876952786
# 71329612474782464538636993009049310363619763878039
# 62184073572399794223406235393808339651327408011116
# 66627891981488087797941876876144230030984490851411
# 60661826293682836764744779239180335110989069790714
# 85786944089552990653640447425576083659976645795096
# 66024396409905389607120198219976047599490197230297
# 64913982680032973156037120041377903785566085089252
# 16730939319872750275468906903707539413042652315011
# 94809377245048795150954100921645863754710598436791
# 78639167021187492431995700641917969777599028300699
# 15368713711936614952811305876380278410754449733078
# 40789923115535562561142322423255033685442488917353
# 44889911501440648020369068063960672322193204149535
# 41503128880339536053299340368006977710650566631954
# 81234880673210146739058568557934581403627822703280
# 82616570773948327592232845941706525094512325230608
# 22918802058777319719839450180888072429661980811197
# 77158542502016545090413245809786882778948721859617
# 72107838435069186155435662884062257473692284509516
# 20849603980134001723930671666823555245252804609722
# 53503534226472524250874054075591789781264330331690
numbers = [
37107287533902102798797998220837590246510135740250,
46376937677490009712648124896970078050417018260538,
74324986199524741059474233309513058123726617309629,
91942213363574161572522430563301811072406154908250,
23067588207539346171171980310421047513778063246676,
89261670696623633820136378418383684178734361726757,
28112879812849979408065481931592621691275889832738,
44274228917432520321923589422876796487670272189318,
47451445736001306439091167216856844588711603153276,
70386486105843025439939619828917593665686757934951,
62176457141856560629502157223196586755079324193331,
64906352462741904929101432445813822663347944758178,
92575867718337217661963751590579239728245598838407,
58203565325359399008402633568948830189458628227828,
80181199384826282014278194139940567587151170094390,
35398664372827112653829987240784473053190104293586,
86515506006295864861532075273371959191420517255829,
71693888707715466499115593487603532921714970056938,
54370070576826684624621495650076471787294438377604,
53282654108756828443191190634694037855217779295145,
36123272525000296071075082563815656710885258350721,
45876576172410976447339110607218265236877223636045,
17423706905851860660448207621209813287860733969412,
81142660418086830619328460811191061556940512689692,
51934325451728388641918047049293215058642563049483,
62467221648435076201727918039944693004732956340691,
15732444386908125794514089057706229429197107928209,
55037687525678773091862540744969844508330393682126,
18336384825330154686196124348767681297534375946515,
80386287592878490201521685554828717201219257766954,
78182833757993103614740356856449095527097864797581,
16726320100436897842553539920931837441497806860984,
48403098129077791799088218795327364475675590848030,
87086987551392711854517078544161852424320693150332,
59959406895756536782107074926966537676326235447210,
69793950679652694742597709739166693763042633987085,
41052684708299085211399427365734116182760315001271,
65378607361501080857009149939512557028198746004375,
35829035317434717326932123578154982629742552737307,
94953759765105305946966067683156574377167401875275,
88902802571733229619176668713819931811048770190271,
25267680276078003013678680992525463401061632866526,
36270218540497705585629946580636237993140746255962,
24074486908231174977792365466257246923322810917141,
91430288197103288597806669760892938638285025333403,
34413065578016127815921815005561868836468420090470,
23053081172816430487623791969842487255036638784583,
11487696932154902810424020138335124462181441773470,
63783299490636259666498587618221225225512486764533,
67720186971698544312419572409913959008952310058822,
95548255300263520781532296796249481641953868218774,
76085327132285723110424803456124867697064507995236,
37774242535411291684276865538926205024910326572967,
23701913275725675285653248258265463092207058596522,
29798860272258331913126375147341994889534765745501,
18495701454879288984856827726077713721403798879715,
38298203783031473527721580348144513491373226651381,
34829543829199918180278916522431027392251122869539,
40957953066405232632538044100059654939159879593635,
29746152185502371307642255121183693803580388584903,
41698116222072977186158236678424689157993532961922,
62467957194401269043877107275048102390895523597457,
23189706772547915061505504953922979530901129967519,
86188088225875314529584099251203829009407770775672,
11306739708304724483816533873502340845647058077308,
82959174767140363198008187129011875491310547126581,
97623331044818386269515456334926366572897563400500,
42846280183517070527831839425882145521227251250327,
55121603546981200581762165212827652751691296897789,
32238195734329339946437501907836945765883352399886,
75506164965184775180738168837861091527357929701337,
62177842752192623401942399639168044983993173312731,
32924185707147349566916674687634660915035914677504,
99518671430235219628894890102423325116913619626622,
73267460800591547471830798392868535206946944540724,
76841822524674417161514036427982273348055556214818,
97142617910342598647204516893989422179826088076852,
87783646182799346313767754307809363333018982642090,
10848802521674670883215120185883543223812876952786,
71329612474782464538636993009049310363619763878039,
62184073572399794223406235393808339651327408011116,
66627891981488087797941876876144230030984490851411,
60661826293682836764744779239180335110989069790714,
85786944089552990653640447425576083659976645795096,
66024396409905389607120198219976047599490197230297,
64913982680032973156037120041377903785566085089252,
16730939319872750275468906903707539413042652315011,
94809377245048795150954100921645863754710598436791,
78639167021187492431995700641917969777599028300699,
15368713711936614952811305876380278410754449733078,
40789923115535562561142322423255033685442488917353,
44889911501440648020369068063960672322193204149535,
41503128880339536053299340368006977710650566631954,
81234880673210146739058568557934581403627822703280,
82616570773948327592232845941706525094512325230608,
22918802058777319719839450180888072429661980811197,
77158542502016545090413245809786882778948721859617,
72107838435069186155435662884062257473692284509516,
20849603980134001723930671666823555245252804609722,
53503534226472524250874054075591789781264330331690
]
return str(sum(numbers))[0:10]
def euler14():
chain_length_dict = {}
for num in range(1, 1000001):
chain_count = 1
tmp_num = num
while tmp_num != 1:
if tmp_num < num:
chain_count += (chain_length_dict[tmp_num] - 1)
chain_length_dict[num] = chain_count
break
elif tmp_num % 2 == 0: # num is even
chain_count += 1
tmp_num /= 2
# print(' -> {}'.format(tmp_num), end='')
else: # num is odd
chain_count += 1
tmp_num = 3 * tmp_num + 1
if num not in chain_length_dict:
chain_length_dict[num] = chain_count
return max(chain_length_dict.keys(), key=(lambda k: chain_length_dict[k]))
def euler15():
print (
'\033[91mNOTE:\033[0m Euler #15 was not solved using a program. Use C(40,20) as the logic to the 20x20 grid.')
return 137846528820
def euler16():
num = str(2 ** 1000)
sum = 0
for n in num:
sum += int(n)
return sum
def euler17():
sum = 0
for x in range(1, 1001):
word = num2words(x).replace(' ', '').replace('-', '')
sum += len(word)
return sum
def euler18(triangle=[]):
if not triangle:
triangle = [
[75],
[95, 64],
[17, 47, 82],
[18, 35, 87, 10],
[20, 4, 82, 47, 65],
[19, 1, 23, 75, 3, 34],
[88, 2, 77, 73, 7, 63, 67],
[99, 65, 4, 28, 6, 16, 70, 92],
[41, 41, 26, 56, 83, 40, 80, 70, 33],
[41, 48, 72, 33, 47, 32, 37, 16, 94, 29],
[53, 71, 44, 65, 25, 43, 91, 52, 97, 51, 14],
[70, 11, 33, 28, 77, 73, 17, 78, 39, 68, 17, 57],
[91, 71, 52, 38, 17, 14, 91, 43, 58, 50, 27, 29, 48],
[63, 66, 4, 68, 89, 53, 67, 30, 73, 16, 69, 87, 40, 31],
[4, 62, 98, 27, 23, 9, 70, 98, 73, 93, 38, 53, 60, 4, 23]
]
rows = len(triangle)
start_row_index = rows-2
while start_row_index > -1:
for x in range(0, len(triangle[start_row_index])):
num_tmp = triangle[start_row_index][x]
child_one = triangle[start_row_index+1][x]
child_two = triangle[start_row_index+1][x+1]
triangle[start_row_index][x] = num_tmp + max(child_one, child_two)
start_row_index-=1
return triangle[0][0]
def euler20(n=100):
product = 1
sum = 0
for x in range(1, n+1):
product *= x
for digit in str(product):
sum += int(digit)
return sum
def euler67():
f = open("p067_triangle.txt")
triangle = []
for l in f:
numbers = l.strip()
x = [int(i) for i in numbers.split()]
triangle.append(x)
return euler18(triangle)
if __name__ == "__main__":
problem_num = int(input("problem number? "))
main(problem_num)
number_of_times_to_run = 1
t = timeit.Timer("euler" + str(problem_num) + "()", "from __main__ import euler" + str(problem_num))
time = t.timeit(number_of_times_to_run)
avg = time / (number_of_times_to_run)
print ("%.7f seconds" % avg)
|
[
"chendavid96@gmail.com"
] |
chendavid96@gmail.com
|
7c8681b0015f816578c843094fbb7d812fadd71e
|
98c78530c0f73125b64582e3bed755d8779f773d
|
/Bai 7.4.py
|
796e814118e5158005f7d3cc914a3cfd643d2978
|
[] |
no_license
|
tranquoctrung1/python_week01
|
8695faba51bcfb006428e4d8ccea534c970438d3
|
6685fadb9dbfd1e395bfba08ab737189052440db
|
refs/heads/master
| 2020-08-01T10:57:35.706507
| 2019-09-28T12:35:41
| 2019-09-28T12:35:41
| 210,975,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
def isEmptyDictionary(dict):
if(len(dict) == 0):
print("Dictionary is empty!!")
else:
print('Dictionary is not empty!!')
def main():
dict ={
'ban': "Đạo",
'tui': "Trung"
}
isEmptyDictionary(dict)
return 0
main()
|
[
"tranquoctrung.12k9@gmail.com"
] |
tranquoctrung.12k9@gmail.com
|
f83961108c952f7ba19d54ce7f46a2cf8d7b550e
|
3f15edc4afd3d8f813aaf2cd71bcde26a9ff9540
|
/users/tests.py
|
b2aa524ceaa3a9e35e1916d002624ac3e46a9aa3
|
[] |
no_license
|
kanikamital0606/Custom-user-model-Admin-Testing
|
7650d8522edf11daea469af24fce016454b7598b
|
610e6bcb0309827f6ab9e57169d78585364353c6
|
refs/heads/master
| 2023-07-21T23:42:54.708033
| 2021-08-30T23:01:30
| 2021-08-30T23:01:30
| 401,090,233
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,968
|
py
|
from django.test import TestCase
from django.contrib.auth import get_user_model
# Create your tests here.
class UserAccountTests(TestCase):
def test_new_superuser(self):
db = get_user_model()
super_user = db.objects.create_superuser(
'testuser@super.com', 'username', 'firstname', 'password')
self.assertEqual(super_user.email, 'testuser@super.com')
self.assertEqual(super_user.user_name, 'username')
self.assertEqual(super_user.first_name, 'firstname')
self.assertTrue(super_user.is_superuser)
self.assertTrue(super_user.is_staff)
self.assertTrue(super_user.is_active)
self.assertEqual(str(super_user), "username")
with self.assertRaises(ValueError):
db.objects.create_superuser(
email='testuser@super.com', user_name='username1', first_name='first_name', password='password', is_superuser=False)
with self.assertRaises(ValueError):
db.objects.create_superuser(
email='testuser@super.com', user_name='username1', first_name='first_name', password='password', is_staff=False)
with self.assertRaises(ValueError):
db.objects.create_superuser(
email='', user_name='username1', first_name='first_name', password='password', is_superuser=True)
def test_new_user(self):
db = get_user_model()
user = db.objects.create_user(
'testuser@user.com', 'username', 'firstname', 'password')
self.assertEqual(user.email, 'testuser@user.com')
self.assertEqual(user.user_name, 'username')
self.assertEqual(user.first_name, 'firstname')
self.assertFalse(user.is_superuser)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_active)
with self.assertRaises(ValueError):
db.objects.create_user(
email='', user_name='a', first_name='first_name', password='password')
|
[
"kanikamittal0661996@gmail.com"
] |
kanikamittal0661996@gmail.com
|
2ae60a4728a75afca6e25d8e73f3da3331c76ea1
|
0535bff60a9a3d167178f93b859c706e23ca3196
|
/old_stuff_2018/sflowtest/myutils/gsheetsutils.py
|
fd6d7181067f09668009c87f8704d683e4a945f4
|
[] |
no_license
|
giditre/unibo-agh_monitoring
|
114b22c8d965f20b1d3859b5ae2d2dfb59a4b280
|
b3a17a5b7ae1de2ac79da3696b32e669e6ff31e2
|
refs/heads/master
| 2023-01-01T22:39:33.668147
| 2020-10-29T21:55:02
| 2020-10-29T21:55:02
| 296,078,017
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,224
|
py
|
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from json import dumps
def get_service(scopes='https://www.googleapis.com/auth/spreadsheets', clientsecrets_f_name='credentials.json', store_f_name='token.json'):
# check if method called as part of main program or library function
if __name__ != '__main__':
# if method called as library function, need to emulate parsing parameters from command line
import argparse
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.argparser])
args = parser.parse_args(['--noauth_local_webserver'])
store = file.Storage(store_f_name)
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets(clientsecrets_f_name, scopes)
creds = tools.run_flow(flow, store)
service = build('sheets', 'v4', http=creds.authorize(Http()))
return service
def get_sheet_ID(service, spreadsheet_ID, sheet_name):
data_sheet_ID = None
response = service.spreadsheets().get(spreadsheetId=spreadsheet_ID).execute()
for s in response['sheets']:
if s['properties']['title'] == sheet_name:
data_sheet_ID = s['properties']['sheetId']
break
# print_err('Sheet ID: {}'.format(data_sheet_ID))
return data_sheet_ID
def get_sheet_ID_removing_other_sheets(service, spreadsheet_ID, sheet_name):
# discover sheet ID
keep_sheet_ID = None
response = service.spreadsheets().get(spreadsheetId=spreadsheet_ID).execute()
for s in response['sheets']:
if s['properties']['title'] == sheet_name:
keep_sheet_ID = s['properties']['sheetId']
break
# print_err('Sheet ID: {}'.format(keep_sheet_ID))
# remove other sheets
requests = []
for s in response['sheets']:
if s['properties']['sheetId'] != keep_sheet_ID:
requests.append({
"deleteSheet": {
"sheetId": s['properties']['sheetId']
}
})
# proceed only if there are sheets to be removed
if requests:
# prepare body of batchUpdate
body = {
'requests': requests
}
# send batchUpdate
response = service.spreadsheets().batchUpdate(
spreadsheetId=spreadsheet_ID,
body=body).execute()
return keep_sheet_ID
def clear_values(service, spreadsheet_ID, range_to_clear):
# clear sheet in range
request = service.spreadsheets().values().clear(spreadsheetId=spreadsheet_ID, range=range_to_clear, body={})
response = request.execute()
def add_data(service, spreadsheet_ID, start_position, rows_list):
# add data to sheet
body = {
'values': rows_list
}
response = service.spreadsheets().values().update(
spreadsheetId=spreadsheet_ID, range=start_position,
valueInputOption='USER_ENTERED', body=body).execute()
# return number of updated cells
return response.get('updatedCells')
def add_data_and_chart(service, spreadsheet_ID, start_position, rows_list, data_sheet_ID, chart_type='LINE', chart_title='Test Chart', x_title='x', y_title='y'):
# first, add data
add_data(service, spreadsheet_ID, start_position, rows_list)
# add chart
endRow = len(rows_list)
series = []
for start_column_index in range(len(rows_list[0])):
series.append({
"series": {
"sourceRange": {
"sources": [
{
"sheetId": data_sheet_ID,
"startRowIndex": 0,
"endRowIndex": endRow,
"startColumnIndex": start_column_index,
"endColumnIndex": start_column_index+1
}
]
}
},
"targetAxis": "LEFT_AXIS"
})
# build request body
body = {
"requests": [
{
"addChart": {
"chart": {
"spec": {
"title": chart_title,
"basicChart": {
"chartType": chart_type,
"legendPosition": "BOTTOM_LEGEND",
"axis": [
{
"position": "BOTTOM_AXIS",
"title": x_title
},
{
"position": "LEFT_AXIS",
"title": y_title
}
],
"domains": [],
"series": series,
"headerCount": 1
}
},
"position": {
"newSheet": True
}
}
}
}
]
}
request = service.spreadsheets().batchUpdate(
spreadsheetId=spreadsheet_ID, body=body)
response = request.execute()
# TODO
# def update_sheet(...)
# function that takes data as input and clears datasheet, removes old chart and adds new one
def rewrite_sheet(scopes, clientsecrets_f_name, store_f_name, spreadsheet_ID, sheet_name, rows_list,
chart_type='LINE', chart_title='Test Chart', x_title='x', y_title='y'):
service = get_service(scopes, clientsecrets_f_name, store_f_name)
data_sheet_ID = get_sheet_ID_removing_other_sheets(service, spreadsheet_ID, sheet_name)
clear_values(service, spreadsheet_ID, sheet_name+'!$A$1:$YY')
add_data_and_chart(service, spreadsheet_ID, sheet_name+'!A1', rows_list, data_sheet_ID, chart_type, chart_title, x_title, y_title)
# MAIN that will be executed when library called as script
if __name__ == '__main__':
from sys import stderr, argv
def print_err(s):
print(s, file=stderr)
# parse parameters from command line
import argparse
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.argparser])
#parser = argparse.ArgumentParser()
# SAMPLE_SPREADSHEET_ID = '1fk2AuFigFR_g66etEJEeGm_Hb8ERF8IlEIuaf974nSk'
# SAMPLE_RANGE_NAME = 'Foglio1!$A$1:$YY'
parser.add_argument('spreadsheet_ID', help="Google Sheet ID (as in the URL)")
parser.add_argument('data_sheet_name', help="Name of the sheet with the data (e.g. Sheet1)", nargs='?', default='Sheet1')
parser.add_argument('scopes', help="Google API authorization scope(s)", nargs='?', default='https://www.googleapis.com/auth/spreadsheets')
args = parser.parse_args(['--noauth_local_webserver'] + argv[1:])
spreadsheet_ID = args.spreadsheet_ID
print_err('Spreadsheet ID: {}'.format(spreadsheet_ID))
data_sheet_name = args.data_sheet_name
print_err('Data sheet name: {}'.format(data_sheet_name))
scopes = args.scopes
print_err('Scopes: {}'.format(scopes))
service = get_service(scopes)
data_sheet_ID = get_sheet_ID_removing_other_sheets(service, spreadsheet_ID, data_sheet_name)
print_err('Sheet {} has Sheet ID {}'.format(data_sheet_name, data_sheet_ID))
clear_values(service, spreadsheet_ID, data_sheet_name+'!$A$1:$YY')
# generate example data
from random import randint
rows_list = [['', 'Data1', 'Data2', 'Data3']]
for row_index in range(randint(33, 66)):
rows_list.append(['Point {}'.format(row_index), row_index*randint(0,3), row_index*randint(4,6), row_index*randint(7,9)])
add_data_and_chart(service, spreadsheet_ID, data_sheet_name+'!A1', rows_list, data_sheet_ID, chart_type='COLUMN', chart_title='Test Chart', x_title='Test X', y_title='Test Y')
|
[
"giditre@gmail.com"
] |
giditre@gmail.com
|
b79830f6ad363db65149bf531663534a3e3a697d
|
bd0804494ada914068b500470933f908df59a105
|
/img-processor.py
|
110fcdadbff75e90ceae2bca1812dc451854e03f
|
[] |
no_license
|
TDoGoodT/face-cropper
|
dc44b93a0fe5ee3dadafad3030293ad3ab2a2fee
|
6230b5e3206f9dcc34da0edbfd79407158e48974
|
refs/heads/master
| 2022-09-05T17:21:15.520147
| 2020-05-21T21:00:01
| 2020-05-21T21:00:01
| 265,949,943
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,569
|
py
|
import cv2
import imutils
import os
def crop_detected_face(img,face):
(x, y, w, h) = face
crop_img = img[y:y+h, x:x+w]
if crop_img is not None and crop_img.size != 0:
#crop_img = imutils.resize(crop_img, width=400)
crop_img = cv2.resize(crop_img, (400,400), interpolation=cv2.INTER_AREA)
return crop_img
def crop_face(path,err_path):
img = cv2.imread(path, cv2.IMREAD_COLOR)
faces_detected = face_cascade.detectMultiScale(img, scaleFactor=1.1, minNeighbors=5)
if len(faces_detected) != 1:
for face in faces_detected:
img = crop_detected_face(img,face)
if img.size != 0:
(name,num) = path.split('/')[-2:]
err_path = err_path + 'err-' +name + name + '/' + num
cv2.imwrite(err_path,img)
return None
else:
return crop_detected_face(img,faces_detected[0])
path = '/home/snir/Projects/Python/hefers/'
raw_data_path = path + 'photos/raw-data/'
proccesed_path = path + 'photos/filtered/'
err_path = path + 'photos/errors/'
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
eyes_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye.xml')
names = os.listdir(raw_data_path)
for name in names:
dst = proccesed_path + name + '/'
imgs = os.listdir(raw_data_path + name)
for img in imgs:
img_path = raw_data_path + name + '/' + img
face = crop_face(img_path,err_path)
if(face is not None):
cv2.imwrite(dst+img,face)
|
[
"bachar.snir@gmail.com"
] |
bachar.snir@gmail.com
|
422d82c00e12bb709db5d298c7268cb65d7ebf66
|
14438f8c8bb4250a7fa8da0ecd40c5a4902bdfcd
|
/Player/set-21/201.py
|
42c37dd464797aa4d63a9160dc55d013bd684a0c
|
[] |
no_license
|
nikhilvarshney2/GUVI
|
c51b1fa3bd1026eb74fc536e938a14c2e92089b2
|
79717ae5b26540101169e512204fb7236f7c839f
|
refs/heads/master
| 2020-04-01T00:40:27.699963
| 2019-04-30T13:46:46
| 2019-04-30T13:46:46
| 152,707,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
def catalan(n):
if n <=1 :
return 1
res = 0
for u in range(n):
res += catalan(u) * catalan(n-u-1)
return res
for u in range(int(input())+1):
print(catalan(u),end=" ")
|
[
"nikhilvarshney9292@gmail.com"
] |
nikhilvarshney9292@gmail.com
|
7cb434aff4849f5e476c1004e56c4c8f0cfa8119
|
f31f97a79d74bd8ff69f4b8e9665026daec4a8c5
|
/python_crash_course/ch04/pizzas.py
|
2ab0e7293f23439e1fa5f1e43edc6ca45ded994f
|
[] |
no_license
|
sillyfish/learn-python
|
9c02c0109347fea479c4b1d5d8e2ac84af91fb75
|
72e084dd6e09325530447c2886f43fafd725dbb3
|
refs/heads/master
| 2023-08-06T03:18:40.667342
| 2023-07-28T09:19:18
| 2023-07-28T09:19:18
| 245,551,889
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
"""
Title:
Date: 2023-03-31 19:28:12
LastEditTime: 2023-04-01 13:42:06
Category:
Tags:
Slug:
Authors: 惊人不易
Summary:
"""
"""
Title:
Date: 2023-03-31 19:28:12
LastEditTime: 2023-03-31 19:28:13
Category:
Tags:
Slug:
Authors: 惊人不易
Summary:
"""
pizzas = ["pepperoni", "cheese", "mushroom"]
for pizza in pizzas:
print(f"i like {pizza} pizza.")
print("i really love pizza!")
friend_pizzas = pizzas[:]
pizzas.append("sausage")
friend_pizzas.append("pineapple")
print("My favorite pizzas are:")
for pizza in pizzas:
print(pizza)
print("My friend's favorite pizzas are:")
for pizza in friend_pizzas:
print(pizza)
|
[
"sillyfish87@gmail.com"
] |
sillyfish87@gmail.com
|
fd97170f2e5324193c4a982b92e11263d63bd22f
|
2432996ac1615cd36d61f0feeff8a359d2b438d8
|
/env/lib/python3.8/site-packages/PyInstaller/hooks/hook-gi.repository.Champlain.py
|
c663aac500bf9745d6a506e872818e2f3257c1ac
|
[
"Apache-2.0"
] |
permissive
|
Parveshdhull/AutoTyper
|
dd65d53ece7c13fbc1ead7ce372947483e05e2e3
|
7fabb30e15b770d790b69c2e4eaf9bbf5a4d180c
|
refs/heads/main
| 2023-05-08T14:10:35.404160
| 2023-05-07T20:43:15
| 2023-05-07T20:43:15
| 315,415,751
| 26
| 18
|
Apache-2.0
| 2023-05-07T20:43:16
| 2020-11-23T19:13:05
|
Python
|
UTF-8
|
Python
| false
| false
| 702
|
py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
"""
Import hook for PyGObject's "gi.repository.Champlain" package.
"""
from PyInstaller.utils.hooks import get_gi_typelibs
binaries, datas, hiddenimports = get_gi_typelibs('Champlain', '0.12')
|
[
"parvesh.dhullmonu@gmail.com"
] |
parvesh.dhullmonu@gmail.com
|
ca7ae6028d7aa044976c8d93dd16cfb9698e9d0a
|
a31de016611f3b4efc7a576e7113cad1a738419b
|
/hackercup_quali/Basketball/basketball_copy.py
|
ab306bac5726355cc1cdd727379d814829a65f9d
|
[] |
no_license
|
Ing-Josef-Klotzner/python
|
9d4044d632672fff966b28ab80e1ef77763c78f5
|
3913729d7d6e1b7ac72b46db7b06ca0c58c8a608
|
refs/heads/master
| 2022-12-09T01:40:52.275592
| 2022-12-01T22:46:43
| 2022-12-01T22:46:43
| 189,040,355
| 0
| 0
| null | 2022-12-01T19:52:37
| 2019-05-28T14:05:16
|
Python
|
UTF-8
|
Python
| false
| false
| 3,291
|
py
|
#! /usr/bin/env python2
# -*- coding: iso-8859-15 -*-
from operator import itemgetter
import re
PT = 0 # playtime
i_file_line_ct=0
#fobj = open("basketball_game_example_input.txt", "r")
fobj = open("testdata3-filtered.txt", "r")
fobj2 = open("basketball_output.txt", "w")
for line in fobj: # line is a string
line_list = re.findall(r'\w+',line) # converts string line to list line_list
line_list.append(PT)
i_file_line_ct += 1
is_nmbr = line_list[0].isdigit() # true if first tuple is number
if i_file_line_ct == 1:
T=line # line 1 of input file helds count of test cases
T_c=1
if i_file_line_ct>1 and is_nmbr:
# N number of players M minutes P playercount playing
# 1 <= T <= 50 2 * P <= N <= 30 1 <= M <= 120 1 <= P <= 5
team=[]
px = []
PL = []
PLS = ""
N = int(line_list[0])
NC = N
M = int(line_list[1])
P = int(line_list[2])
elif i_file_line_ct <> 1 and (N < 1 or N > 30) and is_nmbr:
print ("invalid count of N teamplayers "+N+" in line "+str(i_file_line_ct)+" of inputfile")
size_out_of_range=1
if not is_nmbr:
# next N lines are in format "<name> <shot_percentage> <height>".
line_list[1] = int(line_list[1])
line_list[2] = int(line_list[2])
team.append(line_list)
NC -= 1
if NC == 0:
# print "calculation for team rotation takes place"
BL = " "
# team.sort(key=itemgetter(2),reverse=True) #pre-sort height -- only if needed
team.sort(key=itemgetter(1),reverse=True) #sort shot percentage
# create lists for team_bench and team_pl
team_pl = team[0:2*P]
team_bench = team[2*P:len(team)]
MC = 0
if N > P*2:
while MC < M:
# rotate team list and add 1 minute to current players
TC = 0
while TC < P*2:
team_pl[TC][3] += 1 #add 1 to playtime of playing players
TC += 1
# team rotate by moving 2 players from/to team_pl and team_bench lists
team_bench.append(team_pl.pop())
team_bench.append(team_pl.pop())
team_bench.sort(key=itemgetter(1),reverse=False)
team_bench.sort(key=itemgetter(3),reverse=True)
team_pl.append(team_bench.pop())
team_pl.append(team_bench.pop())
team_pl.sort(key=itemgetter(1),reverse=True)
team_pl.sort(key=itemgetter(3),reverse=False)
MC += 1
# now testcase is complete - write to output
TC=0 # create teamlist
while TC < P*2:
PL.append(team_pl[TC][0])
TC += 1
PL.sort()
BLC = 0
for px in PL:
if BLC == len(PL)-1:
BL=""
PLS += str(px) + BL
BLC += 1
print >> fobj2,"Case #"+str(T_c)+": "+str(PLS)
# print "Case #"+str(T_c)+": "+str(PLS)
T_c += 1
fobj.close()
fobj2.close()
print
print ("count of testcases: "+T)
|
[
"noreply@github.com"
] |
Ing-Josef-Klotzner.noreply@github.com
|
026ba1047765713944238928b56b1d5fb695eade
|
6f43c06e9492ba786855e53649c92339d451c20b
|
/books_app/views.py
|
7bc75758ac13be8f816b11becc87e461cc94f035
|
[] |
no_license
|
ninjaguydan/keeper
|
fe9db6650e8fb9263af223f0368855d2b5f304fb
|
1a62283e41d7f3fdc5b31bb825e0b6ab208d9474
|
refs/heads/master
| 2023-04-22T04:09:35.516779
| 2021-05-14T14:55:12
| 2021-05-14T14:55:12
| 367,394,254
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,103
|
py
|
from django.shortcuts import render, redirect
from .models import *
from django.contrib import messages
from django.db.models import Q
# Create your views here.
def logout(request):
request.session.clear()
return redirect('/')
def books(request):
if not "userid" in request.session:
return redirect('/')
if request.session['userid'] == 0:
context = {
"reviews" : Review.objects.all().order_by("-created_at")[:5],
"books" : Book.objects.all(),
}
return render(request, "books.html", context)
context = {
"user" : User.objects.get(id = request.session['userid']),
"reviews" : Review.objects.all().order_by("-created_at")[:5],
"books" : Book.objects.all(),
}
return render(request,'books.html', context)
def addbook(request):
context = {
"user" : User.objects.get(id = request.session['userid']),
"authors" : Author.objects.all(),
}
return render(request, "addbook.html", context)
def add(request):
if request.method == "GET":
return redirect('/books/addbook')
errors = Book.objects.validator(request.POST)
if errors:
for key, value in errors.items():
messages.error(request, value)
return redirect('/books/addbook')
if request.POST['new_author']:
author = Author.objects.create(name = request.POST['new_author'])
else:
author = Author.objects.create(name = request.POST['author'])
user = User.objects.get(id = request.session['userid'])
new_book = Book.objects.create(
title= request.POST['title'],
desc = request.POST['desc'],
author = author,
added_by = user
)
Review.objects.create(
content = request.POST['review'],
rating = request.POST['rating'],
book = new_book,
user = user
)
return redirect(f'/books/{new_book.id}')
def delete_review(request, review_id):
review_to_delete = Review.objects.get(id = review_id)
book_id = review_to_delete.book.id
review_to_delete.delete()
return redirect(f'/books/{book_id}')
def post_review(request, book_id):
if request.method == "GET":
return redirect(f"/books/{book_id}")
book = Book.objects.get(id = book_id)
user = User.objects.get(id = request.session['userid'])
Review.objects.create(
content=request.POST['review'],
rating=request.POST['rating'],
book=book,
user=user
)
context = {
"book" : book,
"user" : user,
}
return render(request, "review-partial.html", context)
def display_book(request, book_id):
if request.session['userid'] == 0:
context = {
"book" : Book.objects.get(id = book_id)
}
return render(request, "reviews.html", context)
book = Book.objects.get(id = book_id)
user = User.objects.get(id = request.session['userid'])
context = {
"user" : user,
"book" : book,
}
return render(request, "reviews.html", context)
def favorite_book(request, book_id):
user = User.objects.get(id = request.session['userid'])
book = Book.objects.get(id = book_id)
user.favorites.add(book)
return redirect(f'/books/{book_id}')
def unfavorite_book(request, book_id):
user = User.objects.get(id = request.session['userid'])
book = Book.objects.get(id = book_id)
user.favorites.remove(book)
return redirect(f'/books/{book_id}')
def delete_book(request, book_id):
book = Book.objects.get(id = book_id)
if request.session['userid'] == book.added_by.id:
book.delete()
return redirect('/books/')
return redirect(f'/books/{book_id}')
def display_profile(request, profile_id):
profile = User.objects.get(id=profile_id)
if request.session['userid'] == 0:
context = {
"profile" : profile,
}
return render(request, "user.html", context)
context = {
"profile" : profile,
"user" : User.objects.get(id=request.session['userid']),
}
return render(request, "user.html", context)
def delete_profile(request, profile_id):
user_to_delete = User.objects.get(id = profile_id)
user_to_delete.delete()
return redirect('/books/admin')
def admin(request):
context = {
"user" : User.objects.get(id = request.session['userid']),
"profiles" : User.objects.all(),
}
return render(request, "admin.html", context)
# def likes(request, review_id):
# user = User.objects.get(id = request.session['userid'])
# review = Review.objects.get(id = review_id)
# if review.likes.filter(user = user):
# like = Like.objects.filter(Q(review = review), Q(user = user))
# like.delete()
# return redirect(f'/books/{review.book.id}')
# else:
# Like.objects.create(review = review, user = user)
# context = {
# "book" : Book.objects.get(id = review.book.id),
# "user" : user,
# }
# return render(request, "review-partial.html", context)
|
[
"dthompson0190@gmail.com"
] |
dthompson0190@gmail.com
|
c17985b6645a23be5b20639a957af0eaf74debdf
|
c99078fcc67c45f41f49d869685f78a85cf0f244
|
/analyzer.py
|
be620ff5342f69a603403890bd0984a302dd6f4f
|
[
"MIT"
] |
permissive
|
chengziyi/comment_analyse
|
a79e2289f189a8f1faa2f99602d930172aeffbb5
|
e947b93651c6a5b27bb4c0b29bb3619185d5e44a
|
refs/heads/master
| 2022-12-22T02:54:40.289969
| 2020-10-01T12:48:32
| 2020-10-01T12:48:32
| 291,438,127
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,572
|
py
|
import pandas as pd
import pymongo
import time
# import fasttext
import re
import jieba
import jieba.posseg as pseg
import gensim
from collections import Counter
# 加载停用词
print('loading stop_words...')
stop_words = []
with open(r'./train_model/stopwords.txt', 'r', encoding='utf-8') as f:
stop_words=f.readlines()
stop_words=list(map(lambda x:x.replace('\n',''),stop_words))
##根据name查找数据库
def get_comments(name):
comments=[]
time=[]
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["comment_db"]
mycol = mydb["shop_comments"]
myquery = { "name": name }
mydoc = mycol.find(myquery)
##此处速度较慢
for x in mydoc:
comments.append(x['comment'])
time.append(x['timestamp'])
return comments,time
##fasttext分类
def token(string):
return "".join(re.findall(r'[\u4e00-\u9fa5]+', string))
def cut(string):
return ' '.join(jieba.lcut(string))
##方法自己加载模型
def fasttext_classifier(comments):
sentences=list(map(lambda x:cut(token(x)),comments))
print('loading model...')
model_path='./train_model/fasttext_ftz_model.ftz'
model=fasttext.load_model(model_path)
label,p=model.predict(sentences)
# print(sentences)
labels=list(map(lambda x:x[0].replace('__label__',''),label))
# print(labels)
# print(labels.count('1'))
return labels
##调用方法时将模型作为参数传入
def fasttext_classifier_with_model(comments,model):
sentences=list(map(lambda x:cut(token(x)),comments))
label,p=model.predict(sentences)
# print(sentences)
labels=list(map(lambda x:x[0].replace('__label__',''),label))
# print(labels)
# print(labels.count('1'))
return labels
'''
折线图显示评论数量随时间的变化
取出的每条评论都有对应的timestamp
把timestamp按固定间隔分成时间段
如:delta=(max_time-min_time)/11
时间和评论是对应的,所以统计每个时间段的timestamp数量就是评论数量
input:从mongodb中取出的一组timestamp字符串
return:折线图的x轴,y轴
'''
def time_process(timestamps):
timestamps=list(map(lambda x:int(x[:-3]),timestamps))
min_time=min(timestamps)
max_time=max(timestamps)
time_x=[]
times=[]
#分11个时间段并转化成'年/月'的形式
delta=int((max_time-min_time)/11)
for i in range(11):
item=max_time-i*delta
times.append(item)
time_array=time.localtime(item)
strtime=str(time_array.tm_year)+'/'+str(time_array.tm_mon)
time_x.append(strtime)
time_x=time_x[::-1]
times=times[::-1]
##计算每个时间段的timestamp数量
timestamps.sort()
time_y=[]
for t in times:
timestamps_copy=timestamps.copy()
pre=len(timestamps)
for i in timestamps_copy:
if i<=t:timestamps.remove(i)
else:break
after=len(timestamps)
time_y.append(pre-after)
# time_y[-1]+=after
line_x=['<'+time_x[0]]
for i in range(len(time_x)-1):
line_x.append(time_x[i]+'-'+time_x[i+1])
# print(line_x)
# print(time_y)
return line_x,time_y
##LDA主题分析
def split_text(text):
return ' '.join(jieba.lcut(token(text)))
##分词去停用词
def get_seg_content(text):
line=split_text(text)
words=line.split()
return [ w for w in words if w not in stop_words ]
# lda_model 为已经训练好的LDA模型
# content 为一条文本内容
def get_topic(lda_model,content,dictionary):
corpus = dictionary.doc2bow(content) # 文档转换成bow
topics = lda_model.get_document_topics(corpus) # 得到新文档的主题分布
##取出的主题按相关性高到低排序
topics=sorted(topics,key=lambda x:x[1],reverse=True)
##用jieba.pseg取出相关性最高的形容词作为该评论的主题
# topic=topic[-1][0]
topic=[dictionary[i] for i,_ in topics]
topic=' '.join(topic)
# print(topic)
words = pseg.cut(topic)
for word, flag in words:
if flag=='a':
return word
return dictionary[topics[0][0]]
##将评论按主题分成6类,第六类为其它
##input:一组评论
##output:主题及数量,用于显示雷达图
def lda_get_topic(lda_model,comments):
##use lda to get topics
comments_cut = [get_seg_content(i) for i in comments]
id2word = gensim.corpora.Dictionary(comments_cut)
topics=[]
for i in range(len(comments_cut)):
topic=get_topic(lda_model,comments_cut[i],id2word)
# topics+=topic
topics.append(topic)
# print(len(topics))
# print(topics)
##统计主题及数量,用于显示雷达图
word_count=dict(Counter(topics))
word_count=sorted(word_count.items(),key=lambda x:x[1],reverse=True)
# print(word_count)
radar_x=[]
radar_y=[]
for k,v in word_count:
radar_x.append(k)
radar_y.append(v)
if len(radar_x)==5:
break
radar_x.append('其它')
radar_y.append(len(topics)-sum(radar_y))
# print(radar_x)
# print(radar_y)
return radar_x,radar_y
##pos_num,neg_num,total_num,各时间段的comment数量
if __name__=='__main__':
##get comments
comments,timestamps=get_comments('KFC')
print(len(comments))
##LDA主题分析,雷达图
print('loading lda model...')
model_path='./train_model/lda_model/LDA_model'
lda_model = gensim.models.ldamodel.LdaModel.load(model_path)
x,y=lda_get_topic(lda_model,comments)
print(x,y)
##分类并统计
## classify by fasttext
labels=fasttext_classifier(comments)
# print(labels)
##饼图
pos_num=labels.count('1')
neg_num=labels.count('0')
total=pos_num+neg_num
pos_per=float('{:.3f}'.format(pos_num/total))
# neg_per=neg_num/total
print('pos_per:',pos_per)
print('neg_per:',1-pos_per)
##条形图
print('pos_num:',pos_num)
print('neg_num:',neg_num)
##折线图
line_x,line_y=time_process(timestamps)
print(line_x)
print(line_y)
|
[
"1145440472@qq.com"
] |
1145440472@qq.com
|
3aecc7bc8f4e8a29ed1eb9cad44e458a19a67b87
|
6f31a15cb73175084f2c4485d3dea0b8975b2ec9
|
/egs/rimes/v1/local/process_data.py
|
b87d9fbc5e2d7eebd18e446df0ec72925ad4497e
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
Idlak/idlak
|
c7cd5e6c0b02918cda85dbb2fb5c7333a789c304
|
4be6f7d951ba0d707a84a2cf8cbfc36689b85a3c
|
refs/heads/master
| 2021-11-23T13:28:43.709163
| 2021-11-01T15:51:46
| 2021-11-01T15:51:46
| 127,285,931
| 65
| 26
|
NOASSERTION
| 2021-11-01T15:51:47
| 2018-03-29T12:06:52
|
Shell
|
UTF-8
|
Python
| false
| false
| 5,186
|
py
|
#!/usr/bin/env python3
""" This script reads xml file and creates the following files :text, utt2spk, images.scp.
It also creates line images from page image and stores it into
data/local/rimes_data/train/lines.
Eg. local/process_data.py data/local/rimes_data/train train
Eg. text file: writer000000_train2011-0_000001 Je vous adresse ce courrier afin
utt2spk file: writer000000_train2011-0_000001 writer000000
images.scp file: writer000000_train2011-0_000001 \
data/local/rimes_data/train/lines/train2011-0_000001.png
"""
import argparse
import xml.dom.minidom as minidom
from PIL import Image
import os
import random
parser = argparse.ArgumentParser(description="""Creates line images from page image.""")
parser.add_argument('database_path', type=str,
help='Path to the downloaded (and extracted) mdacat data')
parser.add_argument('dataset', type=str,
help='Subset of data to process.')
parser.add_argument("--augment", type=lambda x: (str(x).lower()=='true'), default=False,
help="performs image augmentation")
parser.add_argument('--pixel-scaling', type=int, default=20,
help='padding across horizontal/verticle direction')
args = parser.parse_args()
def expand_aabb(left, right, top, bottom, delta_pixel):
""" Increases size of axis aligned bounding box (aabb).
"""
left = left - delta_pixel
right = right + delta_pixel
top = top - delta_pixel
bottom = bottom + delta_pixel
return left, right, top, bottom
def get_line_images_from_page_image(file_name, left, right, top, bottom, line_id):
""" Given a page image, extracts the line images from it.
Input
-----
file_name (string): name of the page image.
left, right, top, bottom (int): coordinates corresponding to the line image.
line_id (int): line number on the page image.
"""
page_image_path = os.path.join(page_image_folder, file_name)
im = Image.open(page_image_path)
box = (left, top, right, bottom)
region = im.crop(box)
base_name = os.path.splitext(os.path.basename(file_name))[0]
line_image_file_name = base_name + '_' + str(line_id).zfill(6) + '.png'
imgray = region.convert('L')
line_image_path = os.path.join(args.database_path, 'line_image', args.dataset, line_image_file_name)
imgray.save(line_image_path)
return base_name, line_image_path
def write_kaldi_process_data_files(base_name, line_id, text):
"""creates files requires for dictionary and feats.scp.
Input
-----
image_path (string): name of the page image.
line_id (str): line number on the page image.
text: transcription of the line image.
base_name (string):
"""
writer_id = str(base_name.split('-')[1])
writer_id = str(writer_id).zfill(6)
writer_id = 'writer' + writer_id
utt_id = writer_id + '_' + base_name + '_' + str(line_id).zfill(6)
line_image_file_name = base_name + '_' + str(line_id).zfill(6) + '.png'
image_path = os.path.join(args.database_path, 'line_image', args.dataset, line_image_file_name)
text_fh.write(utt_id + ' ' + text + '\n')
utt2spk_fh.write(utt_id + ' ' + writer_id + '\n')
image_fh.write(utt_id + ' ' + image_path + '\n')
### main ###
text_file = os.path.join('data', args.dataset, 'text')
text_fh = open(text_file, 'w', encoding='utf-8')
utt2spk_file = os.path.join('data', args.dataset, 'utt2spk')
utt2spk_fh = open(utt2spk_file, 'w', encoding='utf-8')
image_file = os.path.join('data', args.dataset, 'images.scp')
image_fh = open(image_file, 'w', encoding='utf-8')
xml_path = os.path.join(args.database_path, 'xml', args.dataset) + '/rimes_2011.xml'
page_image_folder = os.path.join(args.database_path, 'page_image', args.dataset)
doc = minidom.parse(xml_path)
single_page = doc.getElementsByTagName('SinglePage')
for page in single_page:
file_name = page.getAttribute('FileName')
line = page.getElementsByTagName('Line')
id = 0
for node in line:
id += 1
bottom = int(node.getAttribute('Bottom'))
left = int(node.getAttribute('Left'))
right = int(node.getAttribute('Right'))
top = int(node.getAttribute('Top'))
text = node.getAttribute('Value')
text_vect = text.split() # this is to avoid non-utf-8 spaces
text = " ".join(text_vect)
if args.augment:
base_name, image_path = get_line_images_from_page_image(file_name, left, right, top, bottom, str(id))
write_kaldi_process_data_files(base_name, str(id), text)
additional_pixel = random.randint(1, args.pixel_scaling)
left, right, top, bottom = expand_aabb(left, right, top, bottom, args.pixel_scaling + additional_pixel + 1)
line_id = str(id) + '_scale' + str(2)
base_name, image_path = get_line_images_from_page_image(file_name, left, right, top, bottom, line_id)
write_kaldi_process_data_files(base_name, line_id, text)
else:
base_name, image_path = get_line_images_from_page_image(file_name, left, right, top, bottom, str(id))
write_kaldi_process_data_files(base_name, str(id), text)
|
[
"dpovey@gmail.com"
] |
dpovey@gmail.com
|
d1363189cdb8d713022e1fd153a0c0eb425c26fc
|
61bf66ad3947343a016a8b36a31477b7dc5d436a
|
/com/algos/gfg/mustdo/linked_lists/ReverseBySize.py
|
fdada97c910be80742ce8f18e2e3fca224ce4106
|
[
"MIT"
] |
permissive
|
sasikrishna/python-programs
|
fe53223869ca2ac0366ccb83682be05551666f76
|
937002f37c86efc5c876b37c7b42634ca629fffc
|
refs/heads/master
| 2020-04-22T23:22:57.613655
| 2019-06-11T18:17:07
| 2019-06-11T18:17:07
| 170,739,813
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,273
|
py
|
'''
Problem statement: Given a linked list of size N. The task is to reverse every k nodes (where k is an input to the
function) in the linked list.
'''
from com.algos.gfg.mustdo.linked_lists.SingleLinkedList import LinkedList
def reverse_by_size(head, size):
grp_start_node = None
curr, prev, next = head, None, None
while curr is not None:
count = 0
prev = None
while count < size and curr is not None:
next = curr.next
curr.next = prev
prev = curr
curr = next
count += 1
if grp_start_node is None:
grp_start_node = head
head = prev
else:
# print("grp_start_node", curr.data)
grp_start_node.next = prev
return head
if __name__ == '__main__':
test_cases = int(input())
num_count, num_list, size = [], [], []
for i in range(test_cases):
num_count.append(int(input()))
num_list.append(list(map(int, input().split())))
size.append(int(input()))
for i in range(test_cases):
list = LinkedList()
for j in range(num_count[i]):
list.add(num_list[i][j])
list.head = reverse_by_size(list.head, size[i])
list.print_list()
|
[
"sasi.jolla@gmail.com"
] |
sasi.jolla@gmail.com
|
0db5ca1756a431d02423eb8fb002333b24501d9f
|
bc36aca18e0aee0716288b919a72714c935bf9db
|
/setup.py
|
c17120b2a2ebe0de0ab113a3be018bebd75c640a
|
[
"MIT"
] |
permissive
|
cuulee/asarapi
|
a667530ca4466145f362a5d8befd2a787685ec80
|
1a416c5fbd735790d356b0706ecdbf01d81b66e5
|
refs/heads/master
| 2020-03-27T02:24:59.091565
| 2018-08-22T18:39:50
| 2018-08-22T18:39:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,431
|
py
|
import codecs
from os import path
from setuptools import find_packages, setup
HERE = path.abspath(path.dirname(__file__))
with codecs.open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
setup(
name='asarapi',
version='0.1',
description='Search and download ERS-1, ERS-2, and Envisat products.',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
url='https://github.com/yannforget/asarapi',
author='Yann Forget',
author_email='yannforget@mailbox.org',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: GIS',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
keywords=['earth observation', 'gis', 'remote sensing'],
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=[
'requests',
'click',
'pandas',
'tqdm',
'shapely',
'appdirs',
'bs4'
],
include_package_data=True,
zip_safe=False,
entry_points="""
[console_scripts]
asarapi=asarapi.cli:cli
""",
)
|
[
"yannforget@mailbox.org"
] |
yannforget@mailbox.org
|
21772d0f2daf6514af5908fdcba122d0bea91045
|
7bed49464abc88e8ccc9c7bbcecc49aef54d55e7
|
/Christmas2017/question/admin.py
|
bd5cbc2460223fb47ef4a7fa2cb978b7d1034c5e
|
[] |
no_license
|
JMorris1575/christmas17
|
4839711a74d9082383190360301061c2f4f769c5
|
11738ae83e2cbf207b5f75e1e753524763a4c5fc
|
refs/heads/master
| 2021-09-29T13:30:55.559623
| 2018-11-24T18:13:27
| 2018-11-24T18:13:27
| 107,828,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 162
|
py
|
from django.contrib import admin
from .models import Question, Response
# Register your models here.
admin.site.register(Question)
admin.site.register(Response)
|
[
"FrJamesMorris@gmail.com"
] |
FrJamesMorris@gmail.com
|
5cd9de28217ecf079afb11199f3b292faea767c4
|
23e5f2ebafaab1e67aea4d7bcac7988bcdf31aed
|
/h1/venv/bin/flask
|
0a61a038dd44e82782fd31541292d96b48d216e4
|
[] |
no_license
|
chengfeiZhou/FlaskStudy
|
ae904a7603e5a253e7fae8d40d4a8ade334f311d
|
fafc4a933029fac2e8d9ede757513c3ccbe66e5f
|
refs/heads/master
| 2021-09-14T18:19:25.145140
| 2018-05-17T03:23:59
| 2018-05-17T03:23:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
#!/home/zhou/Desktop/study/h1/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"zzz1171284619@gmail.com"
] |
zzz1171284619@gmail.com
|
|
99b2cfb5be731f934b67d38a73fed53f2f680cff
|
60c9cd9adf0575d6d046a2e6ad813986d44803c3
|
/backend/appengine/routes/users/home.py
|
2c8468da95d4e1a831a55a1eefcb250cf907cad4
|
[
"MIT"
] |
permissive
|
MarcosVn/turinginformatica
|
3c3f95675dfa3d4897cec6050562d6127d2744be
|
21eba5f105ad052aa7505b6e77ca5871167a9dc2
|
refs/heads/master
| 2021-01-17T11:15:16.316267
| 2015-06-16T01:37:44
| 2015-06-16T01:37:44
| 31,993,345
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,057
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from google.appengine.ext import ndb
from config.template_middleware import TemplateResponse
from user.user_model import User
from gaecookie.decorator import no_csrf
from gaepermission.decorator import login_not_required
from routes.users import edit
from routes.users.new import salvar
from tekton.gae.middleware.redirect import RedirectResponse
from tekton.router import to_path
__author__ = 'marcos'
@no_csrf
@login_not_required
def index():
query = User.query_ordenada_por_criacao()
edit_path_base = to_path(edit)
user = query.fetch()
if len(user) >= 1:
user = user[len(user)-1]
key = user.key
key_id = key.id()
user.edit_path = to_path(edit_path_base, key_id)
ctx = {'user': user,
'salvar_path': to_path(salvar)}
return TemplateResponse(ctx, 'users/user_home.html')
@login_not_required
def deletar(user_id):
key = ndb.Key(User, int(user_id))
key.delete()
return RedirectResponse(index)
|
[
"marcos.vnaraujo@gmail.com"
] |
marcos.vnaraujo@gmail.com
|
c5fceac2032e499204fca66d635a092558c0e9b0
|
fb7c63cc5cf02b4c2ebda0c14b45fd4ac6669227
|
/CECS 451 - AI/9/speech.py
|
d2249effe272426084ca9309397fcf3a886e8d2d
|
[] |
no_license
|
cdelprato0/Long-Beach
|
0d7289b6c53e972f2d07e3f1531a16b79a1365e7
|
8c3628f26097f1dec196d02c65214070317d430f
|
refs/heads/master
| 2022-12-11T12:46:44.159590
| 2020-09-22T21:13:51
| 2020-09-22T21:13:51
| 297,773,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
class Speech:
def __init__(self):
self.original = []
self.recognized = []
self.similarity = []
def read_original(self, inFile):
def conv_audio(self, inDir):
def comp_string(self):
if __name__ == '__main__':
|
[
"chaz.delprato@student.csulb.edu"
] |
chaz.delprato@student.csulb.edu
|
227a006374c22f855ccabf3c95bc78fb70cd43bf
|
1c8c09a475242f1fc13d1ea483c5819a9859b811
|
/src/game_stats.py
|
f82b8dd9fe900344283d52139e4b8b2265712a95
|
[] |
no_license
|
PiggerZZM/Snake
|
9dda09d6fe649664880bb57f6e90c80cc0db94e1
|
6ab50657e4196a8d93496500d2479202abf00ced
|
refs/heads/master
| 2022-11-05T15:37:30.795826
| 2020-06-25T13:15:58
| 2020-06-25T13:15:58
| 274,874,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
class GameStats:
"""跟踪游戏的统计信息"""
def __init__(self):
# 非活动状态
self.game_active = False
|
[
"442823793@qq.com"
] |
442823793@qq.com
|
cbcd5cdd212ee35f15dde41110d8afff2e882335
|
357a65e4cceba7148badce3a9386976fc90528e3
|
/cadmin/views.py
|
2d9ac303a847267266247f240bd98c91a5146b2e
|
[] |
no_license
|
FroggyMix/compteur-billard-DB-web
|
d6a199f5e307c75b0d14ee74e900d5b5965e983d
|
507a27f4bfab12b15082006f9e4d15106e51a073
|
refs/heads/master
| 2022-10-20T00:14:38.957326
| 2020-06-14T16:47:27
| 2020-06-14T16:47:27
| 260,715,620
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,557
|
py
|
from django.shortcuts import render, redirect, get_object_or_404, reverse
from gestionnaire import models
from gestionnaire.models import Match, Joueur, JeuVariantes, Frame
from gestionnaire.forms import JoueurForm, MatchForm, JeuVariantesForm
from django.contrib import messages
# Create your views here.
def joueur_add(request):
# If request is POST, create a bound form (form with data)
print ("definition de joueur_add")
if request.method == "POST":
f = JoueurForm(request.POST)
# If form is invalid show form with errors again
if f.is_valid():
# save data to db
f.save()
messages.add_message(request, messages.INFO, 'Joueur ajouté.')
return redirect('joueur_add')
# if request is GET the show unbound form to the user
else:
print ("methode = GET")
f = JoueurForm()
return render(request, 'cadmin/joueur_add.html', {'form': f})
def joueur_update(request, pk):
joueur = get_object_or_404(Joueur, pk=pk)
# If request is POST, create a bound form(form with data)
if request.method == "POST":
f = JoueurForm(request.POST, instance=joueur)
# If form is invalid show form with errors again
if f.is_valid():
f.save()
messages.add_message(request, messages.INFO, 'Joueur updated.')
return redirect(reverse('joueur_update', args=[joueur.id]))
# if request is GET the show unbound form to the user, along with data
else:
f = JoueurForm(instance=joueur)
return render(request, 'cadmin/joueur_update.html', {'form': f, 'joueur': joueur})
def match_add(request):
# If request is POST, create a bound form (form with data)
if request.method == "POST":
f = MatchForm(request.POST)
if f.is_valid():
# save data to db
f.save()
messages.add_message(request, messages.INFO, 'Match ajouté.')
return redirect('match_add')
# if request is GET the show unbound form to the user
else:
f = MatchForm()
return render(request, 'cadmin/match_add.html', {'form': f})
def match_update(request, pk):
match = get_object_or_404(Match, pk=pk)
# If request is POST, create a bound form(form with data)
if request.method == "POST":
f = MatchForm(request.POST, instance=match)
if f.is_valid():
f.save()
messages.add_message(request, messages.INFO, 'Match updated.')
return redirect(reverse('match_update', args=[match.id]))
# if request is GET the show unbound form to the user, along with data
else:
f = MatchForm(instance=match)
return render(request, 'cadmin/match_update.html', {'form': f, 'match': match})
def jv_add(request):
# If request is POST, create a bound form (form with data)
if request.method == "POST":
f = JeuVariantesForm(request.POST)
if f.is_valid():
# save data to db
f.save()
messages.add_message(request, messages.INFO, 'Variante de jeu ajoutée.')
return redirect('jv_add')
# if request is GET the show unbound form to the user
else:
f = JeuVariantesForm()
return render(request, 'cadmin/jv_add.html', {'form': f})
def jv_update(request, pk):
jv = get_object_or_404(JeuVariantes, pk=pk)
# If request is POST, create a bound form(form with data)
if request.method == "POST":
f = JeuVariantesForm(request.POST, instance=jv)
# If form is invalid show form with errors again
if f.is_valid():
f.save()
messages.add_message(request, messages.INFO, 'JeuVariantes updated.')
return redirect(reverse('jv_update', args=[jv.id]))
# if request is GET the show unbound form to the user, along with data
else:
f = JeuVariantesForm(instance=jv)
return render(request, 'cadmin/jv_update.html', {'form': f, 'jv': jv})
|
[
"franck.rapold@gmail.com"
] |
franck.rapold@gmail.com
|
d3877b4195972786be1400b560b533a80f0b3a3c
|
58f34cbb4b04925fc9b5054b750355c245ef698f
|
/main.py
|
df21366a54242c0525ffc9c89e97a0a1cdd70e6d
|
[] |
no_license
|
mmartinb75/nd131-openvino-fundamentals-Project-1
|
02ba8017e0b03d8400f67e567eb59a505881d407
|
73336ce7bd3ebfe63396f22edf5934dfa2e15da8
|
refs/heads/master
| 2021-05-18T15:54:58.933353
| 2020-04-02T10:01:27
| 2020-04-02T10:01:27
| 251,305,464
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,974
|
py
|
"""People Counter."""
"""
Copyright (c) 2018 Intel Corporation.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit person to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import sys
import time
import socket
import json
import cv2
import queue
import time
import numpy as np
import logging as log
import paho.mqtt.client as mqtt
from argparse import ArgumentParser
from inference import Network
# MQTT server environment variables
HOSTNAME = socket.gethostname()
IPADDRESS = socket.gethostbyname(HOSTNAME)
MQTT_HOST = IPADDRESS
MQTT_PORT = 3001
MQTT_KEEPALIVE_INTERVAL = 60
SECUENCIAL_SAFE_FRAMES = 10
def build_argparser():
"""
Parse command line arguments.
:return: command line arguments
"""
parser = ArgumentParser()
parser.add_argument("-m", "--model", required=True, type=str,
help="Path to an xml file with a trained model.")
parser.add_argument("-i", "--input", required=True, type=str,
help="Path to image or video file")
parser.add_argument("-l", "--cpu_extension", required=False, type=str,
default=None,
help="MKLDNN (CPU)-targeted custom layers."
"Absolute path to a shared library with the"
"kernels impl.")
parser.add_argument("-d", "--device", type=str, default="CPU",
help="Specify the target device to infer on: "
"CPU, GPU, FPGA or MYRIAD is acceptable. Sample "
"will look for a suitable plugin for device "
"specified (CPU by default)")
parser.add_argument("-pt", "--prob_threshold", type=float, default=0.5,
help="Probability threshold for detections filtering"
"(0.5 by default)")
return parser
def connect_mqtt():
# Connect to the MQTT client
client = mqtt.Client()
client.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL)
return client
def infer_on_stream(args, client):
"""
Initialize the inference network, stream video to network,
and output stats and video.
:param args: Command line arguments parsed by `build_argparser()`
:param client: MQTT client
:return: None
"""
# Initialise some counters:
# total count -> to store total count of people
# current people -> state of total number of people in current frames
# last_two_counters -> to manage recognition errors. The same number of people
# has to be recognized in three consecutive frames.
# Frame counters of current people in order to calculate duration.
single_image_mode = False
total_count = 0
current_people = 0
last_n_counters = [0 for _ in range(SECUENCIAL_SAFE_FRAMES)]
frame_counter = 0
# queues used to calculate duration of each people.
# It is asumed that when several are people in video their
# follow first in first out behaviour. This does not have to be
# true, but does not affect sum of all people duration.
init_frames = queue.Queue()
durations = queue.Queue()
# Initialise the class
infer_network = Network()
# Set Probability threshold for detections
prob_threshold = args.prob_threshold
# Load the model through `infer_network`
infer_network.load_model(args.model, args.device, args.cpu_extension)
# Handle the input stream ###
net_input_shape = infer_network.get_input_shape()
# Check if input is a web-CAM, an image, or a video.
if args.input == 'CAM':
cap = cv2.VideoCapture(0)
cap.open(0)
else:
resources_file_name = os.path.splitext(args.input)[0]
resources_file_ext = os.path.splitext(args.input)[1]
if resources_file_ext in ['.png', '.jpg', '.jpeg']:
# Is a image. rename it in order to read as a sequence
single_image_mode = True
new_name = resources_file_name + '01' + resources_file_ext
inp = os.rename(args.input, new_name)
cap = cv2.VideoCapture(new_name, cv2.CAP_IMAGES)
cap.open(new_name, cv2.CAP_IMAGES)
os.rename(new_name, args.input)
else:
cap = cv2.VideoCapture(args.input)
cap.open(args.input)
# inizialize vide capture
# Check frames per second of video or cam
# in order to calculate time of person in frame.
fps = cap.get(cv2.CAP_PROP_FPS)
# Get width and heigh of video to calcule box positions.
width = int(cap.get(3))
height = int(cap.get(4))
# Loop until stream is over
while cap.isOpened():
### Read from the video capture ###
flag, frame = cap.read()
if not flag:
break
key_pressed = cv2.waitKey(60)
### Pre-process the image as needed ###
#p_frame = cv2.resize(frame, (net_input_shape[3], net_input_shape[2]))
#p_frame = p_frame - 127.5
#p_frame = p_frame * 0.007843
#p_frame = p_frame.transpose((2,0,1))
#p_frame = p_frame.reshape(1, *p_frame.shape)
p_frame = infer_network.preproces_input(frame)
# Increase frame_counter in each frame.
frame_counter += 1
### Start asynchronous inference for specified request ###
infer_network.exec_net(p_frame)
### Wait for the result ###
if infer_network.wait() == 0:
### Get the results of the inference request ###
result, infer_time = infer_network.get_output()
infer_text = "inference time: " + str(round(infer_time,3)) + " ms"
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 0.4
color = (255, 0, 0)
org = (15, 15)
frame = cv2.putText(frame, infer_text, org, font, fontScale, color, 1)
current_count = 0
safe_counter = 0
for boxes in result[0][0]:
if (boxes[1] == infer_network.get_person_classId() and boxes[2] > prob_threshold):
x_1 = int(width* boxes[3])
y_1 = int(height* boxes[4])
x_2 = int(width* boxes[5])
y_2 = int(height* boxes[6])
frame = cv2.rectangle(frame, (x_1, y_1), (x_2, y_2), (255,0,0), 2)
current_count += 1
# Safe control in order to minimize recoginition error.
# A counter is considered valid when are the same in three
# consecutives frames
if all([l == current_count for l in last_n_counters]):
safe_counter = current_count
else:
safe_counter = current_people
for i in range(SECUENCIAL_SAFE_FRAMES - 1, 0, -1):
last_n_counters[i] = last_n_counters[i-1]
last_n_counters[0] = current_count
delta_people = safe_counter - current_people
if delta_people > 0:
for e in range(delta_people):
init_frames.put(frame_counter)
total_count += delta_people
current_people = safe_counter
elif delta_people < 0:
frames_duration = frame_counter - init_frames.get()
durations.put(frames_duration/fps)
current_people = safe_counter
# Extract any desired stats from the results
# Calculate and send relevant information on
# current_count, total_count and duration to the MQTT server
client.publish("person", json.dumps({"count": safe_counter, "total":total_count}))
# Topic "person": keys of "count" and "total"
# Topic "person/duration": key of "duration"
while not durations.empty():
client.publish("person/duration", json.dumps({"duration": durations.get()}))
# Send the frame to the FFMPEG server
if not single_image_mode:
sys.stdout.buffer.write(frame)
sys.stdout.flush()
# Write an output image if `single_image_mode`
if single_image_mode:
resources_file_name = os.path.splitext(args.input)[0]
resources_file_ext = os.path.splitext(args.input)[1]
cv2.imwrite(resources_file_name + "_proccesed_" + resources_file_ext, frame)
def main():
"""
Load the network and parse the output.
:return: None
"""
# Grab command line args
args = build_argparser().parse_args()
# Connect to the MQTT server
client = connect_mqtt()
# Perform inference on the input stream
infer_on_stream(args, client)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
mmartinb75.noreply@github.com
|
5a0c02f15dd0a0ad0084548a23dcc49471786b19
|
99b062cb9f5f3ff10c9f1fa00e43f6e8151a43a6
|
/algorithm/day23/최소이동거리.py
|
0ef629a482a61f1e038063ef0f74ddbcd26fb047
|
[] |
no_license
|
HSx3/TIL
|
92acc90758015c2e31660617bd927f7f100f5f64
|
981c9aaaf09c930d980205f68a28f2fc8006efcb
|
refs/heads/master
| 2020-04-11T21:13:36.239246
| 2019-05-08T08:18:03
| 2019-05-08T08:18:03
| 162,099,042
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 982
|
py
|
import sys
sys.stdin = open("최소이동거리_input.txt")
def Dijkstra(G, r):
D = [9876543210] * (N + 1)
P = [-1] * (N + 1)
visited = [0] * (N + 1)
D[r] = 0
for _ in range(N + 1):
minindex = -1
min = 9876543210
for i in range(N + 1):
if not visited[i] and D[i] < min:
min = D[i]
minindex = i
visited[minindex] = 1
for i in range(N + 1):
if not visited[i] and graph[minindex][i] != 0 and D[minindex] + graph[minindex][i] < D[i]:
D[i] = D[minindex] + graph[minindex][i]
P[i] = minindex
return D[N]
T = int(input())
for test_case in range(T):
N, E = map(int, input().split())
data = [list(map(int, input().split())) for _ in range(E)]
graph = [[0] * (N + 1) for _ in range(N + 1)]
for i in range(E):
graph[data[i][0]][data[i][1]] = data[i][2]
print("#{} {}".format(test_case+1, Dijkstra(N, 0)))
|
[
"hs.ssafy@gmail.com"
] |
hs.ssafy@gmail.com
|
efb5b11f00dc276a2338aeb432679bcec3c7df80
|
a1a42b2165f8cb899bff02f10d8b081b372ce9d3
|
/mshub-gc/tools/mshub-gc/proc/preproc/internorm.py
|
8fdc14fad138386fc98bdc0af5c30bacabe391c5
|
[
"Apache-2.0"
] |
permissive
|
CCMS-UCSD/GNPS_Workflows
|
47ca90ba177b3ddc1a71877a3ee92473e0fe392b
|
8c6772e244c6f3e5ea7cd5eae873b6faa48b7313
|
refs/heads/master
| 2023-08-31T13:38:51.911232
| 2023-08-24T06:06:37
| 2023-08-24T06:06:37
| 57,243,138
| 41
| 45
|
NOASSERTION
| 2023-06-13T16:54:49
| 2016-04-27T19:52:07
|
HTML
|
UTF-8
|
Python
| false
| false
| 12,846
|
py
|
# -*- coding: utf-8 -*-
"""
*********************************
Inter-sample Normalization Module
*********************************
The module is designed to account for overall intensity differences between
MSI datasets of multiple tissue samples. This unwanted variation can be caused
by a variety of reasons, including differences in sample preparation steps or
tissue section thickness.
See also intra-sample normalization module to account for overall intensity
(pixel to pixel) variation between spectra within individual datasets.
run python.exe internorm.py --help to get info about parameters of the script
References:
[1] Veselkov KA, et al. (2011) Optimized preprocessing of ultra-
performance liquid chromatography/mass spectrometry urinary metabolic
profiles for improved information recovery. Anal Chem 83(15):5864-5872.
[2] Veselkov KA, et al. (2014), Chemo-informatic strategy for imaging mass
spectrometry-based hyperspectral profiling of lipid signatures in
colorectal cancer.
PNAS, 111: 1216-122.
"""
#===========================Import section=================================
#Importing standard and external modules
import h5py
import numpy as np
import os
import time
import traceback
import warnings
import sys;
#If run independently - check system endianness and add path to local modules
if __name__ == "__main__":
if sys.byteorder!='little':
print('Only little endian machines currently supported! bye bye ....');
quit();
module_path = os.path.abspath('%s/../..'%os.path.dirname(os.path.realpath(__file__)));
sys.path.insert(0, module_path)
#Import local/internal modules
import basis.io.manageh5db as mh5
from basis.preproc import intranorm
from basis.procconfig import InterNorm_options;
from basis.utils.cmdline import OptionsHolder, AttributeCollection;
from basis.utils.typechecker import is_string, is_number
from proc.utils.timing import tic, toc;
from proc.utils.printlog import printlog, start_log, stop_log;
#==========================================================================
#From here the main code starts
def do_normalize(dbprocpath='', method='', params = '', mzrange=''):
"""
** Performs inter-sample normalization to account for overall intensity varation between tissue samples.**
Args:
dbprocpath: The path to a hdf5-based msi database for storage and organization of pre-processed MSI data.
The intra/inter-sample normalization procedures are applied after peak alignment and lock
mass correction steps.
method: The choice of an inter-sample normalization method {"MFC" for median fold change (default), ``mean`` or ``median``}
Additional methods can be added in a modular fashion.
params: The set of parameters for inter-sample normalization method. The median fold change normalization requires
the refence dataset with respect to which the fold intensity changes of other datasets are
calculated, ``mean`` by default. The ``offset`` parameter disregards peak intensity smaller that its value.
``{'reference': 'mean', 'offset': 0}`` by default for median fold change
mzrange: [mzmin mzmax] - the mass-to-charge ratio (m/z) range desired to be processed. If not specified, the full mz range is used.
"""
if is_string(dbprocpath) and os.path.exists(dbprocpath):
datasets = mh5.get_dataset_names(dbprocpath,dataset_names=[])
if not datasets:
printlog(dbprocpath + ' database file doesn''t contain any MSI datasets')
return
else:
printlog(str(dbprocpath) + ' database file is not found')
return
# Exctract all inter-sample normalization parameters if available
isdbparam = False
#!--------------------------------------
#Needs reworking!
objattrs = mh5.load_preproc_obj(dbprocpath,'Inter-sample normalization')
if isinstance(objattrs,dict):
method = objattrs['method']
params = objattrs['params']
gscale = objattrs['gscale']
isdbparam = True
else:
isdbparam = False
#printlog(InterNormObj.params)
printlog('\n\n\n' '...Initializing inter-sample normalization procedure... ')
h5proc = h5py.File(dbprocpath,'a')
# derive normalization scaling factors per dataset in an iterative fashion
refX = []
passdatasets = []
for datasetid in datasets:
try:
printlog('\n'+os.path.basename(dbprocpath) +": Preparing for inter-sample normalization " + datasetid)
# prepare data
mz = mh5.load_dataset(h5proc,datasetid+'/mz')
X = mh5.load_dataset(h5proc,datasetid+'/Sp')
# get dataset specific summary values for estimation of scaling factors
refx = get_scaling_factors(X, method, params, mzrange, mz)
refX.append(refx)
printlog(os.path.basename(dbprocpath) +": Done... " + datasetid)
passdatasets.append(datasetid)
except Exception as inst:
printlog(os.path.basename(datasetid) + ": Failed...")
printlog(inst)
traceback.print_exc()
h5proc[datasetid].attrs['is_OK'] = False;
# get scaling factors
refX=np.transpose(np.squeeze(np.array(refX)))
if method=='mfc':
mfcparams = params
if isdbparam==True:
normRef = mh5.load_dataset(h5proc,'normRef')
# use pre-existing reference to get the scaling factors
mfcparams['reference'] = normRef
scX,normRef = intranorm.mfc(refX,mfcparams)
if isdbparam == False:
mh5.save_dataset(h5proc,'normRef',normRef)
else:
scX = refX
# pickle and save intra-sample normalization parameters
if isdbparam==False:
gscale = np.nanmedian(scX)
# To be reworked properly
#!------------------------------------
InterNormObj=AttributeCollection();
InterNormObj.method=method;
InterNormObj.params=params;
InterNormObj.do='yes';
InterNormObj.description='Iternormalization Settings'
InterNormObj.gscale = gscale
mh5.save_preproc_obj(h5proc, InterNormObj)
internormattr = vars(InterNormObj)
mh5.save_preproc2matlab(h5proc,internormattr['do'],3,
internormattr['description'], internormattr['method'], internormattr['params'])
# maintain original data scale
scX = scX/gscale
scX = scX.squeeze()
scX[np.isnan(scX)] = 1
scX[scX==0] = 1
# now apply normalization procedure in an iterative fashion, one sample at a time
i = 0
for datasetid in passdatasets:
try:
printlog('\n'+os.path.basename(dbprocpath) +": Performing inter-sample normalization " + datasetid)
X = mh5.load_dataset(h5proc,datasetid+'/Sp')
X = X/scX[i]
# re-write data-set in the hdf5 database file
mh5.save_dataset(h5proc,datasetid+'/Sp', X)
printlog("The inter-sample normalization of dataset: " +datasetid+" has successfully completed!")
except Exception as inst:
printlog("The inter-sample normalization of dataset: " +datasetid+": has failed!")
printlog(inst)
traceback.print_exc()
h5proc[datasetid].attrs['is_OK'] = False;
i = i+1
h5proc.close()
return
def get_scaling_factors(X, method, params, mzrange='', mz=''):
"""
**Caclulates scaling factors for inter-sample normalization procedure.**
Args:
X: MSI dataset (number of m/z features, number of rows, number of columns).
method: The choice of an inter-sample normalization method {``median fold change`` (default), ``mean`` or ``median``}
Additional methods can be added in a modular fashion.
params: The set of parameters for inter-sample normalization method. The median fold change normalization requires the refence profile with respect to which the fold intensity changes are
calculated, ``mean`` by default. The ``offset`` parameter disregards peak intensity smaller that its value.
``{'reference': 'mean', 'offset': 0}`` by default for median fold change
mzrange: [mzmin mzmax] - The mass-to-charge ratio (m/z) range desired to be processed. If not specified, the full data range will be used.
mz: The mass to charge feature vector.
Returns:
refX: reference profile or scaling factor for inter-sample normalization.
"""
warnings.filterwarnings("ignore")
methodselector = {'mean': get_refmean,
'median': get_refmedian,
'mfc': get_refmfc};
normfunc = methodselector.get(method);
## prepare data for normalization
nmz,nobs = X.shape
#X = X.reshape(nmz,nrows*ncols)
Xn = X
try:
if (len(mzrange)>0) and (len(mz)>0):
Xn = X[(mz>mzrange[0]) & (mz<mzrange[1]),:]
except Exception as inst:
printlog(inst)
traceback.print_exc()
offset = params['offset'];
if is_number(offset):
Xn[Xn<=offset] = 0
sumx = np.nansum(Xn,axis=0)
## remove spectra with all zeros
Xn = Xn[:,sumx!=0]
Xn[Xn==0] = np.nan
refX = normfunc(Xn,params)
return refX
def get_refmean(X,params):
"""
**Caclulates global median value across all spectra in a dataset, **mean inter-sample normalization.**
Args:
X: MSI dataset (number of spectra x number of m/z features)
"""
refx = np.nanmean(X)
return refx
def get_refmedian(X,params):
"""
**Caclulates global median value across all spectra in a dataset, **mean inter-sample normalization.**
Args:
X: MSI dataset (number of spectra x number of m/z features)
"""
refx = np.nanmedian(X)
return refx
def get_refmfc(X,params):
"""
**Caclulates median profile across all spectra in a dataset, **median fold change inter-sample normalization.**
Args:
X: MSI dataset (number of spectra x number of m/z features)
params: {'reference': 'mean'}, the choice of representative profile profile for median fold change normalization, 'mean' by default.
"""
ref = params['reference']
if ref=='mean':
refx = np.nanmean(X,axis=1)
elif ref =='median':
refx = np.nanmedian(X,axis=1)
refx[refx==0]=np.nan
refx = refx.reshape(len(refx),1)
return refx
if __name__ == "__main__":
tic();
settings=OptionsHolder(__doc__, InterNorm_options);
settings.description='Iternormalization Settings';
settings.do='yes';
settings.gscale=[];
printlog(settings.program_description);
#Parse command line parameters
try:
settings.parse_command_line_args()
except Exception as inst:
printlog('!!! Error in command line parameters: !!!');
printlog(inst);
printlog('\nRun python ' + sys.argv[0] + ' --help for command line options information!');
sys.exit(-1)
parameters = settings.parameters;
if parameters['logfile'] != '':
start_log(parameters['logfile'], overwrite_existing = (parameters['overwrite_logfile'] == 'yes'), verbosity_level = parameters['verbose']);
printlog(settings.program_description, print_enabled = False);
printlog('Started on %s ...'%(time.strftime("%a, %d %b %Y at %H:%M:%S")));
printlog(settings.format_parameters());
#settings.parameters['h5dbname'] = '/Users/kv/desktop/test/pyproc_data__1928_22_03_2017.h5'
do_normalize(dbprocpath=settings.parameters['h5dbname'],\
method=settings.parameters['method'],\
params = settings.parameters['params'],\
mzrange=[settings.parameters['min_mz'], settings.parameters['max_mz']]);
printlog('\nFinished on %s in'%(time.strftime("%a, %d %b %Y at %H:%M:%S")));
toc();
printlog(settings.description_epilog);
stop_log();
|
[
"mwang87@gmail.com"
] |
mwang87@gmail.com
|
5e387aeb8d2b5755240df3b43ecfa66b3a093634
|
8e0e4e1d47dc2e90704edfd650f3b1c7c719a845
|
/deeplearning/make_label.py
|
164b3c98e405e620475b37e1000808a4b989aa37
|
[] |
no_license
|
longhushi/work
|
5eb482cef37dd12d72a888a12c1731f0ae5d6304
|
4e35980a17c1b0de7d01fd406eecb34f1a128dbb
|
refs/heads/master
| 2021-06-29T21:01:11.244809
| 2021-03-02T07:28:58
| 2021-03-02T07:28:58
| 67,460,310
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,072
|
py
|
"""
make label
@author: ZhangJian
"""
from skimage.io import imread, imsave
import skimage
import numpy as np
import os
def label(img_path, new_path, file_name="label2.tif"):
img = imread(img_path)
#print(img)
print(img.shape)
width = img.shape[0]
height = img.shape[1]
print(type(img[0][0]))
new_image = np.random.randint(0, 256, size=[width, height], dtype=np.uint8)
print(np.array([0, 255, 0]))
for i in range(img.shape[0]):
for j in range(img.shape[1]):
if all(np.array(img[i][j]) == np.array([255, 0, 0])): # 红色,建成区
#new_image[i][j] = 100
new_image[i][j] = 1
elif all(np.array(img[i][j]) == np.array([0, 255, 0])): # 绿色,农用地
#new_image[i][j] = 50
new_image[i][j] = 2
elif all(np.array(img[i][j]) == np.array([0, 255, 255])): # 天蓝色,林地
#new_image[i][j] = 150
new_image[i][j] = 3
elif all(np.array(img[i][j]) == np.array([255, 255, 0])): # 黄色,草地
#new_image[i][j] = 200
new_image[i][j] = 4
elif all(np.array(img[i][j]) == np.array([0, 0, 255])): # 蓝色,水系
#new_image[i][j] = 250
new_image[i][j] = 5
else:
new_image[i][j] = 0
pass
imsave(new_path+file_name, new_image)
#skimage.io.imshow(new_image)
#skimage.io.show()
if __name__ == "__main__":
# 读取文件夹下的所有图片,并进行处理,存储到相应文件夹下
files = os.listdir("H:\深度学习二期\武大公开数据\label_5classes")
for file in files:
print("--------------%s" % file)
label("H:\\深度学习二期\\武大公开数据\\label_5classes\\"+file, "H:\\深度学习二期\武大公开数据\\mylabel\\", file)
#label("H:\深度学习二期\武大公开数据\label_5classes\GF2_PMS1__L1A0000564539-MSS1_label.tif")
|
[
"noreply@github.com"
] |
longhushi.noreply@github.com
|
ecdac1f4d7f176ebe320de8db6d545735aa185b2
|
89a90707983bdd1ae253f7c59cd4b7543c9eda7e
|
/data_structures_and_algorithms_in_python/ch04/reverse_iterative.py
|
5545d9efd7456da1ec391721b21d2a5bf49fedb4
|
[] |
no_license
|
timothyshull/python_reference_code
|
692a7c29608cadfd46a6cc409a000023e95b9458
|
f3e2205dd070fd3210316f5f470d371950945028
|
refs/heads/master
| 2021-01-22T20:44:07.018811
| 2017-03-17T19:17:22
| 2017-03-17T19:17:22
| 85,346,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
def reverse_iterative(S):
start, stop = 0, len(S)
while start < stop - 1:
S[start], S[stop - 1] = S[stop - 1], S[start] # swap first and last
start, stop = start + 1, stop - 1 # narrow the range
|
[
"timothyshull@gmail.com"
] |
timothyshull@gmail.com
|
3353ea5a6424f103ed94bd6f9d48f9e122eca5e2
|
c5c968d6d94755fa5f36bbee6ee8daa6203ec2c0
|
/apps/game/models/tests/__init__.py
|
8306cc362e5e0562079485d5f3aae495a46e7671
|
[] |
no_license
|
hien-cao/Game-online-shop
|
b1ce76a5a8e2c9592a8d8163cf76633824cade38
|
e2c58c351e62296635051c720093c037755008c5
|
refs/heads/master
| 2020-04-25T02:51:23.391462
| 2019-02-20T12:46:26
| 2019-02-20T12:46:26
| 172,455,390
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
from .test_game import *
from .test_highscore import *
from .test_purchase import *
from .test_save import *
from .test_tag import *
|
[
"juho.jokela@atalent.fi"
] |
juho.jokela@atalent.fi
|
6f8ffe769e58be39383edbd86029e3f3f3437018
|
57308b9364066ece78a1095654bae2395c41a61b
|
/polls/tests.py
|
93795385795694def21727400c0f5480ac8ae7b5
|
[] |
no_license
|
SimeonAleksov/qpick_test
|
f4927093c51e06bc5c0986040c11bc8a7d59ecc6
|
762b26763eec3f8e4e02c66e2becc030ed698166
|
refs/heads/main
| 2023-05-01T21:02:31.863783
| 2021-05-24T13:17:09
| 2021-05-24T13:17:09
| 370,352,458
| 0
| 0
| null | 2021-05-24T13:08:03
| 2021-05-24T12:57:45
|
Python
|
UTF-8
|
Python
| false
| false
| 4,776
|
py
|
import datetime
from django.test import TestCase
from django.utils import timezone
from django.urls import reverse
from .models import Question
def create_question(question_text, days):
"""
Create a question with the given `question_text` and published the
given number of `days` offset to now (negative for questions published
in the past, positive for questions that have yet to be published).
"""
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text, pub_date=time)
class QuestionIndexViewTests(TestCase):
def test_no_questions(self):
"""
If no questions exist, an appropriate message is displayed.
"""
response = self.client.get(reverse("polls:index"))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context["latest_question_list"], [])
def test_past_question(self):
"""
Questions with a pub_date in the past are displayed on the
index page.
"""
question = create_question(question_text="Past question.", days=-30)
response = self.client.get(reverse("polls:index"))
self.assertQuerysetEqual(
response.context["latest_question_list"],
[question],
)
def test_future_question(self):
"""
Questions with a pub_date in the future aren't displayed on
the index page.
"""
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse("polls:index"))
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context["latest_question_list"], [])
def test_future_question_and_past_question(self):
"""
Even if both past and future questions exist, only past questions
are displayed.
"""
question = create_question(question_text="Past question.", days=-30)
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse("polls:index"))
self.assertQuerysetEqual(
response.context["latest_question_list"],
[question],
)
def test_two_past_questions(self):
"""
The questions index page may display multiple questions.
"""
question1 = create_question(question_text="Past question 1.", days=-30)
question2 = create_question(question_text="Past question 2.", days=-5)
response = self.client.get(reverse("polls:index"))
self.assertQuerysetEqual(
response.context["latest_question_list"],
[question2, question1],
)
class QuestionModelTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() returns False for questions whose pub_date
is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recently() returns False for questions whose pub_date
is older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days=1, seconds=1)
old_question = Question(pub_date=time)
self.assertIs(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() returns True for questions whose pub_date
is within the last day.
"""
time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)
recent_question = Question(pub_date=time)
self.assertIs(recent_question.was_published_recently(), True)
class QuestionDetailViewTests(TestCase):
def test_future_question(self):
"""
The detail view of a question with a pub_date in the future
returns a 404 not found.
"""
future_question = create_question(question_text="Future question.", days=5)
url = reverse("polls:detail", args=(future_question.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_past_question(self):
"""
The detail view of a question with a pub_date in the past
displays the question's text.
"""
past_question = create_question(question_text="Past Question.", days=-5)
url = reverse("polls:detail", args=(past_question.id,))
response = self.client.get(url)
self.assertContains(response, past_question.question_text)
|
[
"aleksov_s@outlook.com"
] |
aleksov_s@outlook.com
|
889d46a1f5c7c60e2d4dcb5e65b166d1e279b52d
|
bbc556d7cfb0e503f9499f956e454fd9ba385ac2
|
/black_widow/apps.py
|
b404f6e278a9617fd14942cd8906873f0a9b3347
|
[] |
no_license
|
Dmitri86/avengers
|
f1aa4c071969a41ec26a55b852ed3c5ab30fe3ff
|
17775e6237642929b6fef2c4710ee947d3067d46
|
refs/heads/master
| 2020-04-27T01:55:47.112867
| 2019-03-06T13:56:21
| 2019-03-06T13:56:21
| 173,978,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 96
|
py
|
from django.apps import AppConfig
class BlackWidowConfig(AppConfig):
name = 'black_widow'
|
[
"zhirkod@gmail.com"
] |
zhirkod@gmail.com
|
1a80da1f9f45c9392f2b83b62267583ba81a050b
|
4a4ee033cc97d395e3515ba6f5cacc1638a48429
|
/scripts/print_subtokenunits.py
|
992ac1e3ef715d8b21cc1f717879c5bc1d517f7d
|
[] |
no_license
|
Mahjiong/bpe_analysis
|
cb6b900833ad992718c511f411b69c5f7365e7bd
|
ef116cab3d0bbf74c39d85de97dce2205d36af23
|
refs/heads/master
| 2020-07-04T23:44:09.649138
| 2019-05-29T22:35:14
| 2019-05-29T22:35:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
for line in sys.stdin:
for token in line.strip().split():
if '▁' not in token:
print(token)
|
[
"notani@cs.cmu.edu"
] |
notani@cs.cmu.edu
|
9795a4c191f376aefac130da137447b76827e061
|
d82b879f41e906589a0a6ad5a6a09e0a0032aa3f
|
/ObservationScripts/test_snap_if2.py
|
56caefba8dc7370d7430c7cfb8f23665226a4433
|
[] |
no_license
|
SETIatHCRO/ATA-Utils
|
66718eed669882792148fe0b7a2f977cd0f6ac2e
|
59f4d21b086effaf41d5e11e338ce602c803cfd0
|
refs/heads/master
| 2023-08-16T20:41:44.233507
| 2023-08-10T20:39:13
| 2023-08-10T20:39:13
| 137,617,987
| 5
| 5
| null | 2023-08-10T20:39:14
| 2018-06-17T00:07:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 322
|
py
|
from SNAPobs import snap_if
ant_list = ["1c", "1e", "1g", "1h", "1k", "2a", "2b", "2c",
"2e", "2h", "2j", "2k", "2l", "2m", "3c", "3d",
"4j", "5b", "4g"]
lo = "B"
antlo_list = [ant+lo for ant in ant_list]
ifs = {antlo+pol:20 for antlo in antlo_list for pol in ["x","y"]}
snap_if.setatten(ifs)
|
[
"wael.a.farah@gmail.com"
] |
wael.a.farah@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.