blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
18b9abf7d45659415762dd267fd8276b625830d3 | c7656039903f8a418b8736fcf8af302e5bc8ad52 | /apis/company_info_api.py | ed6311e3d08f69ec983eaea479f93e4b82902cb2 | [] | no_license | traderbestcoin/Stock_Trading_Platform | fd3d68e02c5a7c1459d7e431a63d16d8f627e936 | 5814fbfd4a6f920a081a39ac0e20d4aec50842a1 | refs/heads/master | 2023-07-18T08:56:14.122403 | 2018-01-23T02:13:28 | 2018-01-23T02:13:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | from django.http import HttpResponse, Http404
import urllib.request
# Self-written api to pull company write-up from Reuters
def get_company_info(request, symbol=''):
if symbol != '':
reuter_link = 'https://www.reuters.com/finance/stocks/company-profile/' + symbol + '.O'
try:
contents = urllib.request.urlopen(reuter_link).read().decode('utf_8')
description_link = 'Full Description</a>'
i0 = contents.find(description_link)
p_tag = '<p>'
i1 = contents[i0:].find(p_tag)
start_index = i0 + i1 + len(p_tag)
profile = contents[start_index:start_index+300]
return HttpResponse(profile)
except Exception as e:
err_msg = 'Err:' + e.message
raise Http404(err_msg)
else:
raise Http404('Invalid request.')
| [
"hua940201@gmail.com"
] | hua940201@gmail.com |
acc5472bf08c41d46cacbcdf8691564b002f4e5b | b30d4b451de9cb3ae5a4ac8418da0326f63262a8 | /PURotationProbability-exp/pythonexports/Pre-trained-RotNetPUProbs.py | 0ec7da75e11d93c39524b02761cb66bee3751915 | [] | no_license | tusharsangam/ImprovingFeatureDecouplingSelfSupervised | 16f7e97f88d909eedd014aa1109849a60016b550 | 9f15e12815059a86ff20d6983150321f6d5edf79 | refs/heads/main | 2023-01-25T04:52:39.016757 | 2020-12-09T13:07:05 | 2020-12-09T13:07:05 | 319,954,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,871 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from ImageNet_RotNet_AlexNet.AlexNet import AlexNet as AlexNet
import torch
from DataLoader import get_dataloaders, get_short_dataloaders
import numpy as np
from torch.utils.tensorboard import SummaryWriter
import os
from torch import nn
from torch.nn import functional as F
# In[2]:
use_cuda = torch.cuda.is_available()
device = "cuda" if use_cuda else "cpu"
learning_rate = 0.1
momentum = 0.9
weight_decay = 5e-4
nesterov = True
batch_size = 192
num_epochs = 35
LUT_lr = [(5, 0.01),(15, 0.002),(25, 0.0004),(35, 0.00008)]
# In[3]:
class BottleNeck(nn.Module):
def __init__(self):
super(BottleNeck, self).__init__()
self.bottleneck = AlexNet(num_classes=4)
pretrained_weights = "./ImageNet_RotNet_AlexNet/model_net_epoch50"
pretrained_weights = torch.load(pretrained_weights)
self.bottleneck.load_state_dict(pretrained_weights['network'])
for param in self.bottleneck.parameters():
param.requires_grad = False
def forward(self, x):
with torch.no_grad():
x = self.bottleneck(x, ["fc_block"])
#print(x.size())
return x
# In[4]:
class RotNet(nn.Module):
def __init__(self):
super(RotNet, self).__init__()
self.classifier = nn.Linear(4096, 1)
def forward(self, x):
x = self.classifier(x)
return x
# In[5]:
bottleneck = BottleNeck().to(device)
rotnet = RotNet().to(device)
# In[6]:
optimizer = torch.optim.SGD(rotnet.parameters(), lr=learning_rate, momentum=momentum, nesterov=nesterov, weight_decay=weight_decay)
# In[7]:
criterion = torch.nn.BCEWithLogitsLoss()
# In[8]:
loaders = get_short_dataloaders('imagenet', batch_size=batch_size, num_workers=2)
# In[9]:
def adjust_lr(current_epoch):
new_lr = next((lr for (max_epoch, lr) in LUT_lr if max_epoch>current_epoch), LUT_lr[-1][1])
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
# In[10]:
import time
def train(data_loader, model, epoch):
model.train()
losses = []
correct = 0.0
train_loss = np.Inf
train_acc = 0.0
#adjust_lr(epoch)
start_time = time.time()
for batch_idx, sample in enumerate(data_loader):
optimizer.zero_grad()
data, _ = sample
batch_size = data.size(0)
data = data.to(device)
data_90 = torch.flip(torch.transpose(data,2,3),[2])
data_180 = torch.flip(torch.flip(data,[2]),[3])
data_270 = torch.transpose(torch.flip(data,[2]),2,3)
data = torch.stack([data, data_90, data_180, data_270], dim=1)
batch_size, rotations, channels, height, width = data.size()
data = data.view(batch_size*rotations, channels, height, width)
#print(data.size())
target = torch.FloatTensor([1]*batch_size+[0]*batch_size+[0]*batch_size+[0]*batch_size)
#target.requires_grad = False
target = target.to(device)
randomize = np.arange(len(data))
np.random.shuffle(randomize)
#print(randomize)
data = data[randomize]
target = target[randomize]
with torch.no_grad():
feature = bottleneck(data)
output = model(feature).squeeze_(dim=1)
loss = criterion(output, target)
loss.backward()
optimizer.step()
losses.append(loss.item())
pred = torch.sigmoid(output)
zeros = torch.zeros_like(pred)
ones = torch.ones_like(pred)
pred = torch.where(pred>0.5, ones, zeros)
correct += pred.eq(target.view_as(pred)).sum().item()
end_time = time.time()
print("Time for epoch pass {}".format(end_time-start_time))
train_loss = float(np.mean(losses))
train_acc = correct / float(len(data_loader.dataset)*4)
print('Train set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(train_loss, correct, len(data_loader.dataset)*4, 100*train_acc))
return train_loss, train_acc
# In[11]:
def valid(data_loader, model):
model.eval()
losses = []
correct = 0.0
valid_loss = np.Inf
valid_acc = 0.0
start_time = time.time()
for batch_idx, sample in enumerate(data_loader):
with torch.no_grad():
data, _ = sample
batch_size = data.size(0)
data = data.to(device)
data_90 = torch.flip(torch.transpose(data,2,3),[2])
data_180 = torch.flip(torch.flip(data,[2]),[3])
data_270 = torch.transpose(torch.flip(data,[2]),2,3)
data = torch.stack([data, data_90, data_180, data_270], dim=1)
batch_size, rotations, channels, height, width = data.size()
data = data.view(batch_size*rotations, channels, height, width)
#print(data.size())
target = torch.FloatTensor([1]*batch_size+[0]*batch_size+[0]*batch_size+[0]*batch_size)
#target.requires_grad = False
target = target.to(device)
feature = bottleneck(data)
output = model(feature).squeeze_(dim=1)
loss = criterion(output, target)
losses.append(loss.item())
pred = torch.sigmoid(output)
zeros = torch.zeros_like(pred)
ones = torch.ones_like(pred)
pred = torch.where(pred>0.5, ones, zeros)
correct += pred.eq(target.view_as(pred)).sum().item()
end_time = time.time()
print("Time for valid epoch pass {}".format(end_time-start_time))
valid_loss = float(np.mean(losses))
valid_acc = correct / float(len(data_loader.dataset)*4)
print('Valid set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(valid_loss, correct, len(data_loader.dataset)*4,100*valid_acc))
return valid_loss, valid_acc
# In[12]:
def run_main_loop(model, loaders, num_epochs):
writer = SummaryWriter('./logs/AlexNet_PULearning')
#os.unlink('./logs/AlexNet_PULearning')
save_path = "weights/AlexNet_RotNet_PU.pth"
best_acc = 0.0
for epoch in range(num_epochs):
print("Performing {}th epoch".format(epoch))
train_loss, train_acc = train(loaders['train_loader'], model, epoch)
val_loss, val_acc = valid(loaders['valid_loader'], model)
writer.add_scalar('Loss/train', train_loss, epoch)
writer.add_scalar('Loss/Valid', val_loss, epoch)
writer.add_scalar('Accuracy/train', train_acc, epoch)
writer.add_scalar('Accuracy/Valid', val_acc, epoch)
writer.add_scalar('LR', optimizer.param_groups[0]['lr'], epoch)
if val_acc > best_acc :
best_acc = val_acc
#save model
states = {
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'best_accuracy': best_acc
}
torch.save(states, save_path)
print('Model Saved')
# In[13]:
run_main_loop(rotnet, loaders, num_epochs)
# In[ ]:
'''
import time
data_loader = loaders['valid_loader']
criterion = nn.BCEWithLogitsLoss()
correct = 0
train_loss = np.Inf
train_acc = 0.0
with torch.no_grad():
start_time = time.time()
losses = []
for sample in data_loader:
data, _ = sample
batch_size = data.size(0)
data = data.to(device)
data_90 = torch.flip(torch.transpose(data,2,3),[2])
data_180 = torch.flip(torch.flip(data,[2]),[3])
data_270 = torch.transpose(torch.flip(data,[2]),2,3)
data = torch.stack([data, data_90, data_180, data_270], dim=1)
batch_size, rotations, channels, height, width = data.size()
data = data.view(batch_size*rotations, channels, height, width)
target = torch.FloatTensor([0]*batch_size+[1]*batch_size+[1]*batch_size+[1]*batch_size)
target = target.to(device)
with torch.no_grad():
feature = bottleneck(data)
output = rotnet(feature)
output = output.squeeze_(dim=1)
loss = criterion(output, target)
losses.append(loss.item())
#output = F.softmax(output, dim=1)
pred = torch.sigmoid(output) #.argmax(dim=1, keepdim=True)
ones = torch.ones_like(pred)
zeros = torch.zeros_like(pred)
pred = torch.where(pred>0.5, ones, zeros)
correct += pred.eq(target.view_as(pred)).sum().item()
#print(correct)
end_time = time.time()
print("Time for epoch pass {}".format(end_time-start_time))
train_loss = float(np.mean(losses))
train_acc = correct / float(len(data_loader.dataset)*4)
print('Train set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(train_loss, correct, len(data_loader.dataset)*4, 100*train_acc))
# In[ ]:
| [
"tusharsangam5@gmail.com"
] | tusharsangam5@gmail.com |
730cbe7694c1b542beab8a12f14a78bee08249f2 | 4995dca279e341bd7f0b44e39cb6935ab4b39393 | /app/core/tests/test_admin.py | d7ea3f54baba661c33266bdf9201bcbe229269f7 | [
"MIT"
] | permissive | pola255/recipe-app-api | d27e62da02793a57fda632f829bf53d82905446d | cb0738ad3aeca0a0012cfc12cd0326e1e73ddb54 | refs/heads/main | 2023-09-04T11:22:28.879335 | 2021-10-26T17:29:28 | 2021-10-26T17:29:28 | 365,495,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,623 | py | from django.test import TestCase
from django.contrib.auth import get_user_model
# generate url from Django admin page
from django.urls import reverse
# Give test request to aor application
from django.test import Client
class AdminSiteTests(TestCase):
# Run before every test run
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='admin@londonappdev.com',
password='password123'
)
# help automatically login to test
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@londonappdev.com',
password='password123',
name='Test User Full Name',
)
def test_users_listed(self):
"""Test that users are listed on the user page"""
# Generate url for our user page
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
# assertContains is a Django customization that check on contain
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_page_change(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| [
"paulette255@gmail.com"
] | paulette255@gmail.com |
4762e0ea1a67769779afd94942b63864c6dd0c7b | 6b968220facf0ae6e747dfcd83cab8edce4af3dd | /Baitap4.py | b92fe7e2f848161382c3f9e238f53f06ca0f1326 | [] | no_license | thachhoang1909/homework_1 | 367270a63c44679f1250893dc1e549ebd1baa1c0 | c7ae7791ec048483126609dee631d3aa2063b41c | refs/heads/master | 2021-07-15T17:34:13.189000 | 2017-10-20T16:49:20 | 2017-10-20T16:49:20 | 105,155,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,212 | py |
# coding: utf-8
# <a id='baitap4'></a>
# # 4. Bài tập 4: Human Dataset
# - Mục tiêu: Gom nhóm hình có sự xuất hiện của con người và không có con người.
# - Dataset: INRIA Person Dataset.
#
# In[2]:
# import library
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
import matplotlib.pyplot as plt
#plt.gray()
get_ipython().magic('matplotlib inline')
# In[3]:
# import library
from scipy import misc
import glob
images = glob.glob('dataset/*.png')
# create a fig to show image
fig = plt.figure(figsize=(10,5))
plt.title('Some samples in dataset')
plt.axis('off')
# for all 0-9 labels
k= 0
for i in range(10):
# initialize subplots in a grid 2x5 at i+1th position
if(i==4):
k+= 600
image = misc.imread(images[i+k], mode='L')
ax = fig.add_subplot(2, 5, 1+i)
# display image
ax.imshow(image, cmap=plt.cm.binary)
#don't show the axes
plt.axis('off')
plt.show()
# - Dataset gồm 1044 64x47. Gồm 2 class: 558 hình person và 486 hình non-person
# - Tiến hành rút trích HOG feature từ mỗi hình và lưu lại.
# <a id='baitap4_feature'></a>
# ## 4.1 Extract HoG Feature
# - Sử dụng hàm HoG của thư viện Skimage để thực hiện rút trích đặc trưng HoG từ mỗi ảnh.
# In[6]:
from skimage.feature import hog
from skimage import data, color, exposure
import numpy as np
target = [] # To get true lable
fds, hog_images = [],[]
for imagePath in images:
if(imagePath[8]=='c'):
target.append(1)
else:
target.append(0)
image = misc.imread(imagePath, mode='L')
fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),cells_per_block=(1, 1), visualise=True)
fds.append(fd)
hog_images.append(hog_image)
# Convert fds, hog_image from list to numpy array
feature_HOG = np.array(fds)
hog_images = np.array(hog_images)
# In[7]:
fig = plt.figure(figsize=(15,5))
plt.title('Two sample of HOG feature')
plt.axis('off')
image = misc.imread(images[0], mode='L')
ax = fig.add_subplot(1,4,1)
ax.imshow(image,cmap=plt.cm.binary)
ax.set_title('Orignal Image')
ax = fig.add_subplot(1,4,2)
ax.imshow(hog_images[0],cmap=plt.cm.binary)
ax.set_title('HOG Image')
image = misc.imread(images[800], mode='L')
ax = fig.add_subplot(1,4,3)
ax.imshow(image,cmap=plt.cm.binary)
ax.set_title('Orignal Image')
ax = fig.add_subplot(1,4,4)
ax.imshow(hog_images[0],cmap=plt.cm.binary)
ax.set_title('HOG Image')
plt.show()
# In[13]:
#import DBSCAN
from sklearn.cluster import DBSCAN
# import library
from sklearn.cluster import spectral_clustering
from sklearn.metrics.pairwise import cosine_similarity
# import library
from sklearn.cluster import AgglomerativeClustering
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
TSNE_model = TSNE(learning_rate=100)
# In[8]:
from sklearn import metrics
def compareAlgorithm(algorithms, targetLabel, data):
print('#Sample: %d\t#Class: %d\t#feature: %d'%(data.shape[0], len(set(label_kmean)),data.shape[1]))
print(82*'_')
print('init\t\thomo\tcompl\tv-meas\tARI\tAMI\tsilhouette')
for algorithm in algorithms:
print('%-9s\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f'
% (algorithm,
metrics.homogeneity_score(targetLabel, algorithms[algorithm]),
metrics.completeness_score(targetLabel, algorithms[algorithm]),
metrics.v_measure_score(targetLabel, algorithms[algorithm]),
metrics.adjusted_rand_score(targetLabel, algorithms[algorithm]),
metrics.adjusted_mutual_info_score(targetLabel, algorithms[algorithm]),
metrics.silhouette_score(data, algorithms[algorithm],
metric='euclidean',
sample_size=300)))
print(82*'_')
# <a id='baitap4_clustering'></a>
# ## 4.2 Áp dụng thuật toán KMean, Spectral, Agglomerative Clustering
# ### KMean
# In[10]:
import pandas as pd
model_kmean = KMeans(n_clusters=2)
label_kmean = model_kmean.fit_predict(feature_HOG)
print("Cross Table")
print(50*'_')
df = pd.DataFrame({'label':label_kmean, 'True Label':target})
ct = pd.crosstab(df['label'], df['True Label'])
print(ct)
# ### Spectral Clustering
# In[14]:
graph = cosine_similarity(feature_HOG)
label_spectral = spectral_clustering(graph, n_clusters=2)
print("Cross Table")
print(50*'_')
df = pd.DataFrame({'label':label_spectral, 'True Label':target})
ct = pd.crosstab(df['label'], df['True Label'])
print(ct)
# ### Agglomerative Clustering
# In[15]:
aggModel = AgglomerativeClustering(n_clusters=2)
label_agglomerative = aggModel.fit_predict(feature_HOG)
print("Cross Table")
print(50*'_')
df = pd.DataFrame({'label':label_agglomerative, 'True Label':target})
ct = pd.crosstab(df['label'], df['True Label'])
print(ct)
# - Nhận thấy cả 3 thuật toán đề không cho ra kết quả tốt. Khó xác định giữa 2 class.
# ### Visualize kết quả
# #### PCA
# pca = PCA(n_components=2).fit_transform(feature_HOG)
# In[17]:
# create a fig to show image
fig = plt.figure(figsize=(10,8))
ax = fig.add_subplot(2, 2, 1)
ax.scatter(pca[:,0],pca[:,1], c=label_kmean)
ax.set_title('KMean')
ax = fig.add_subplot(2, 2, 2)
ax.scatter(pca[:,0],pca[:,1], c=label_spectral)
ax.set_title('Spectral Clustering')
ax = fig.add_subplot(2, 2, 3)
ax.scatter(pca[:,0],pca[:,1], c=label_agglomerative)
ax.set_title('Agglomerative')
ax = fig.add_subplot(2, 2, 4)
ax.scatter(pca[:,0],pca[:,1], c=target)
ax.set_title('True Label')
plt.show()
# ### T-SNE
# In[18]:
tnse = TSNE_model.fit_transform(feature_HOG)
# ### Evaluate
# In[19]:
compareAlgorithm({'KMean':label_kmean, 'Spectral':label_spectral,'Agglomerative':label_agglomerative}, target, feature_HOG)
# ### Nhận xét:
# - Kết quả không tốt. Với dữ liệu khó có thể cluster thành 2 nhóm phân biệt person và non-person.
# - Nguyên nhân: có thể do input đầu vào là hình grayscale, một số hình bị quá sáng, mất mát thông tin trong quá trình chuyển ảnh màu sang ảnh grayscale.
#
| [
"thachhoang2410@gmail.com"
] | thachhoang2410@gmail.com |
7a55740348b8940be1cbf402885d1f4a3994e426 | 05a70c12df808455100598d8a6fdb5635c641ab8 | /Ene-Jun-2020/bucio-montes-hector-daniel/Ordinario/marvelHeroes-master/app/querys.py | dc6ac3a156dcf5571e2303318c47af3ac37ad977 | [
"MIT"
] | permissive | Jonathan-aguilar/DAS_Sistemas | 991edcc929c33ba9bb8bc84e741b55c10a8420a3 | 4d02efc64161871084df1bff258112351e5d1241 | refs/heads/development | 2023-07-24T12:26:54.698452 | 2021-09-02T20:52:26 | 2021-09-02T20:52:26 | 289,764,892 | 1 | 0 | MIT | 2021-09-02T20:52:27 | 2020-08-23T20:54:55 | Python | UTF-8 | Python | false | false | 3,642 | py | from modelsMarvel import *
class querys():
def __init__(self):
self.schema={
'id':"",
'name':"",
'image':"",
'powerstats':{'combat':"", 'durability':"", 'intelligence':"",'power':"",'speed':"",'strength':""},
'appearance':{'eyeColor':"",'gender':"", 'hairColor':"",'height':"",'race':"",'weight':"" },
'biography':{'aliases':"",'alignment':"",'alterEgos':"",'firstAppearance':"",'fullName':"",'placeOfBirth':"",'publisher':""},
'connections':{'groupAffiliation':"",'relatives':""},
'work':{'base':"",'occupation':""}
}
def existId(self,id):# CACHEAR QUERY
print("from existId Postgres")
try:
hero=heroes.select(heroes.id).where(heroes.id==id)
herod=hero.dicts()
if (len(herod)>0):
return id
return 0
except:
return 1
def getIdsSet(self):
print("from getIdsSet Postgres")
ids=[]
try:
heroesIds=heroes.select(heroes.id)
heroesd=heroesIds.dicts()
if (len(heroesd)>0):
for row in heroesd:
for x, y in row.items():
ids.append(y)
return ids
except:
print("error")
return 0
def getHeroes1(self): #CACHEAR QUERY
print("from getHeroes1 Postgres")
try:
hero=heroes.select(heroes.id, heroes.name)
herod=hero.dicts()
heroesL={}
if (len(herod)>0):
for heroi in range(len(herod)):
# for x,y in row.items():
myHero={}
heroesL[herod[heroi]['id']]=herod[heroi]['name']
return heroesL
return 0
except:
print("error")
return 0
def getHero(self,id): #from database get all in one table
print("from getHero Postgres")
if(str(id).startswith('hero:')):
id=str(id)[5:]
try:
hero=heroes.select()
pwr=powerstats.alias()
bio=biography.alias()
her=heroes.alias()
hero=(heroes
.select( heroes, powerstats, biography, appearance, connections, work)
.join(powerstats)
.switch(heroes)
.join(biography)
.switch(heroes)
.join(appearance)
.switch(heroes)
.join(connections)
.switch(heroes)
.join(work)
.where(heroes.id==id))
herod=hero.dicts()
if(len(herod)>0):
len(herod)
myHero={}
for key,value in herod[0].items():
myHero[key]=value
return myHero
return 0
except:
print("error")
return 0
def heroToSchema(self,hero):
schemaR={
'id':hero['id'],
'name':hero['name'],
'image':hero['image'],
'powerstats': {k:v for (k,v) in hero.items() if k in self.schema['powerstats'] },
'appearance': {k:v for (k,v) in hero.items() if k in self.schema['appearance'] },
'biography': {k:v for (k,v) in hero.items() if k in self.schema['biography'] },
'connections':{k:v for (k,v) in hero.items() if k in self.schema['connections'] },
'work': {k:v for (k,v) in hero.items() if k in self.schema['work'] }
}
return schemaR
| [
"daniel.mont@hotmail.com"
] | daniel.mont@hotmail.com |
2166da79b5ff85a9257bf136e8eaff195762ecbf | 26535bf852a8504d1508a0138523556afd4a53c5 | /snippets/urls.py | 3db324cb3431ab3d2bfc11a8a1e6a6c6760933d3 | [] | no_license | nehus/my | c1ed691916e43d0e9cda777be473a92fffbc8f08 | f523ead939e6764f0bc45174b4d4d814832b5ce6 | refs/heads/master | 2021-01-19T21:13:05.410384 | 2017-04-18T13:12:48 | 2017-04-18T13:12:48 | 88,626,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | from django.conf.urls import url
from snippets.views import *
from snippets import models
from . import views
app_name = 'snippets'
urlpatterns = [
# url(r'^snippets/$', UserSignUp.as_view(),name='UserSignUp'),
url(r'^$', views.UserSignUp, name='UserSignUp'),
#url(r'^otp$', views.OTP,name='OTP'),
# url(r'^otp/$', views.OTP, name='OTP'),
url(r'^(?P<temp_id>[0-9]+)/$', views.OTP, name='OTP'),
url(r'^login/$', views.Login, name='Login'),
# url(r'^resend_otp/$', views.resend_otp, name='resend_otp'),
url(r'^(?P<temp_id>[0-9]+)/resend_otp/$', views.resend_otp, name='resend_otp'),
]
| [
"nehasagarb1@gmail.com"
] | nehasagarb1@gmail.com |
b04b42e59b78aa2efe3eb9622d8fa877244cbc67 | fd65e1c644141818d81743a9b2a6239b182fb157 | /flask_mysql/full_demo/flask_app/models/ice_cream.py | f776ce83430d49fa8efb7011f17b4ad4c2de35e2 | [] | no_license | smartinez9535/python | 67ea03892acf6deb45a6add20fffa74ff32171d6 | 2cea59bf9e9a90d909b1a89abd598321a613a0e3 | refs/heads/master | 2023-07-03T17:40:21.758644 | 2021-07-30T17:02:06 | 2021-07-30T17:02:06 | 383,599,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,417 | py | from flask_app.config.mysqlconnection import connectToMySQL
from flask_app.models import user
from flask import flash
class IceCream:
schema = "full_demo"
def __init__(self, data):
self.id = data['id']
#if "user" in data:
# self.user = data['user']
#else:
# self.user = user.User.get_by_id({"id": data['user_id]']}) # the user object of the creator of the ice cream
if data['user_id']:
self.user = user.User.get_by_id({"id": data['user_id]']})
self.flavor = data['flavor']
self.cone = data['cone']
self.topping = data['topping']
self.created_at = data['created_at']
self.updated_at = data['updated_at']
@classmethod
def create(cls, data):
query = """
INSERT INTO ice_creams (user_id, flavor, cone, topping, created_at, updated_at)
VALUES (%(user_id)s, %(flavor)s, %(cone)s, %(topping)s, NOW(), NOW());
"""
# this returns the id of the newly created ice cream
return connectToMySQL(cls.schema).query_db(query, data)
@classmethod
def get_all(cls):
query = "SELECT * FROM ice_creams;"
results = connectToMySQL(cls.schema).query_db(query)
ice_creams = []
for row in results:
ice_creams.append(cls(row))
return ice_creams
@classmethod
def get_one(cls, data):
query = "SELECT * FROM ice_creams WHERE id = %(id)s;"
results = connectToMySQL(cls.schema).query_db(query, data)
return cls(results[0])
@classmethod
def update(cls, data):
query = """
UPDATE ice_Creams SET flavor = %(flavor)s, cone = %(cone)s, topping = %(topping)s,
updated_at = NOW()
"""
@classmethod
def delete(cls, data):
query = "DELETE FROM ice_creams WHERE id = %(id)s;"
return connectToMySQL(cls.schema).query_db(query, data)
@staticmethod
def validate(post_data)
is_valid = True
if len(post_data['flavor']) < 2:
flash("Flavor must be at least 2 characters")
is_valid = False
if len(post_data['flavor']) < 2:
flash("Flavor must be at least 2 characters")
is_valid = False
if len(post_data['flavor']) < 2:
flash("Flavor must be at least 2 characters")
is_valid = False
| [
"smartinez9535@gmail.com"
] | smartinez9535@gmail.com |
7427962433afa25dffcb72fa5a094b4873b034a2 | 561ba9d3c50e616a2dd35742622cf10a8cb4bbb8 | /birthday/birthday.py | 9a20cf22fabb76738370678d2a265a4ab5736fb3 | [
"MIT"
] | permissive | QingyuChai/RiceCogs | 62e98747e9d4f6e063594baa03b1a12944c0c0c2 | ae6d7640514ded57416d28842fb28d2459c93ce2 | refs/heads/master | 2021-01-13T12:17:28.967461 | 2020-08-09T19:00:52 | 2020-08-09T19:00:52 | 78,309,687 | 6 | 13 | MIT | 2020-08-09T19:00:33 | 2017-01-07T23:24:10 | Python | UTF-8 | Python | false | false | 5,127 | py | """Birthday cog"""
import discord
import os
import datetime
import time
import asyncio
from __main__ import send_cmd_help
from .utils.chat_formatting import *
from .utils.dataIO import fileIO, dataIO
from .utils import checks
from discord.ext import commands
class Birthday:
def __init__(self, bot):
self.bot = bot
self.day = "data/account/birthday.json"
self.riceCog = dataIO.load_json(self.day)
async def _check_date(self):
now = datetime.datetime.now()
user_count = 0
users_today = ""
for users in self.riceCog:
user = discord.utils.get(self.bot.get_all_members(), id=users)
if now.day == self.riceCog[users]["day"] and now.month == self.riceCog[users]["month"]:
msg = "Happy Birthday!"
await self.bot.send_message(user, msg)
user_count += 1
user_day = user.name
users_today += "{} has their birthday today!\n".format(user.name)
if msg:
if user_count == 1:
await self.bot.say("{} is the only one who has their birthday today!".format(user_day))
else:
await self.bot.say("{} users have birthday today!".format(user_count))
await self.bot.say(users_today)
else:
await self.bot.say("Nobody has birthday today!")
@commands.group(pass_context=True)
async def birthday(self, ctx):
"""
Birthday options"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
return
@birthday.command(pass_context=True, name="set")
async def _set(self, ctx, day, month, year):
"""
Set your birthday"""
try:
day = int(day)
month = int(month)
year = int(year)
except ValueError:
await self.bot.say("You did not enter a number. Try again.")
return
if month > 12 or month < 1:
await self.bot.say("You did not enter a valid date. Try again.")
return
if day > 31 or day < 1:
await self.bot.say("You did not enter a valid date. Try again.")
return
if year < 1940 or year > 2017:
await self.bot.say("You did not enter a valid date. Try again.")
return
author = ctx.message.author
if author.id not in self.riceCog or self.riceCog[author.id] == False:
self.riceCog[author.id] = {}
dataIO.save_json(self.day, self.riceCog)
self.riceCog[author.id].update({"day" : day})
self.riceCog[author.id].update({"month" : month})
self.riceCog[author.id].update({"year" : year})
dataIO.save_json(self.day, self.riceCog)
day = self.riceCog[author.id]["day"]
month = self.riceCog[author.id]["month"]
year = self.riceCog[author.id]["year"]
await self.bot.say("Your birthday is: {}/{}/{} (DD/MM/YY).".format(day, month, year))
@birthday.command(pass_context=True, name="remove", aliases=["del", "rem"])
async def _remove(self, ctx):
"""Remove your birthday from the list"""
author = ctx.message.author
if author.id not in self.riceCog or self.riceCog[author.id] == False:
await self.bot.say("You did not set your birthday yet!")
return
else:
self.riceCog[author.id] = False
dataIO.save_json(self.day, self.riceCog)
await self.bot.say("Birthday succesfully removed!")
@birthday.command(pass_context=True)
async def show(self, ctx, *, user: discord.Member=None):
"""
Show the birthday of a user"""
author = ctx.message.author
prefix = ctx.prefix
if user == None:
user = author
if user.id in self.riceCog and self.riceCog[user.id] != False:
day = self.riceCog[user.id]["day"]
month = self.riceCog[user.id]["month"]
year = self.riceCog[user.id]["year"]
await self.bot.say(str(user.name) + "'s birthday is: {}/{}/{} (DD/MM/YY).".format(day, month, year))
else:
msg = "You have not set your birthday yet! Do it now with {}birthday set!".format(prefix)
await self.bot.say(msg)
async def dmloop(self):
global loopedieloop
loopedieloop = True
while loopedieloop:
await self._check_date()
await asyncio.sleep(86400)
async def on_ready(self):
await self.dmloop()
def __unload(self):
global loopedieloop
loopedieloop = False
print("Stopped checking for birthdays...")
def check_folder():
if not os.path.exists("data/account"):
print("Creating data/account folder")
os.makedirs("data/account")
def check_file():
data = {}
f = "data/account/birthday.json"
if not dataIO.is_valid_json(f):
print("Creating data/account/birthday.json")
dataIO.save_json(f, data)
def setup(bot):
check_folder()
check_file()
n = Birthday(bot)
bot.add_cog(n)
| [
"chai.qingyu00@gmail.com"
] | chai.qingyu00@gmail.com |
e6704f2afe2a86ba86badac312d25537de66c7d2 | 8274d7dfc1ff4383ec41b302d760f598d8fc8b37 | /weekly-contest-131/remove-outermost-parentheses.py | 30a568cf534831fbb40da25446b965f2be5046d2 | [] | no_license | Carolusian/leetcode | 82c1743420bc931df959c4443f2ffa88e8acb682 | ebaa4d7731cb0eb4498eae8bc9535ea4ef5fb447 | refs/heads/master | 2022-06-12T04:45:41.158597 | 2022-05-25T23:17:04 | 2022-05-25T23:17:04 | 176,007,340 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,964 | py | """
5016. Remove Outermost Parentheses
A valid parentheses string is either empty (""), "(" + A + ")", or A + B, where A and B are valid parentheses strings, and + represents string concatenation. For example, "", "()", "(())()", and "(()(()))" are all valid parentheses strings.
A valid parentheses string S is primitive if it is nonempty, and there does not exist a way to split it into S = A+B, with A and B nonempty valid parentheses strings.
Given a valid parentheses string S, consider its primitive decomposition: S = P_1 + P_2 + ... + P_k, where P_i are primitive valid parentheses strings.
Return S after removing the outermost parentheses of every primitive string in the primitive decomposition of S.
Example 1:
Input: "(()())(())"
Output: "()()()"
Explanation:
The input string is "(()())(())", with primitive decomposition "(()())" + "(())".
After removing outer parentheses of each part, this is "()()" + "()" = "()()()".
Example 2:
Input: "(()())(())(()(()))"
Output: "()()()()(())"
Explanation:
The input string is "(()())(())(()(()))", with primitive decomposition "(()())" + "(())" + "(()(()))".
After removing outer parentheses of each part, this is "()()" + "()" + "()(())" = "()()()()(())".
Example 3:
Input: "()()"
Output: ""
Explanation:
The input string is "()()", with primitive decomposition "()" + "()".
After removing outer parentheses of each part, this is "" + "" = "".
Note:
S.length <= 10000
S[i] is "(" or ")"
S is a valid parentheses string
"""
def removeOuterParentheses(S: str) -> str:
ret, nest_level = '', 0
for c in S:
if c == '(':
ret += c if nest_level else ''
nest_level += 1
if c == ')':
nest_level -= 1
ret += c if nest_level else ''
return ret
print(removeOuterParentheses("(()())(())"))
print(removeOuterParentheses("(()())(())(()(()))"))
print(removeOuterParentheses("()()"))
print(removeOuterParentheses("(()())(())"))
| [
"pkufashuo400@gmail.com"
] | pkufashuo400@gmail.com |
4dceb156a659418f6339cbfba392841890d57d8a | 51c33304feee6ef68f31cfd72ff15073c0b7eca6 | /eppe/ontologies.py | aadd38c8fbe693e0ec94eae2153271c1043e3c49 | [
"MIT"
] | permissive | paleocore/paleocore110 | 44a5238b0ef7c8740497a80f47f9a7b7f51adbf1 | 808022a11e6b70020b0b6f448567b96d21341974 | refs/heads/master | 2022-07-22T06:06:36.326993 | 2021-12-13T20:12:30 | 2021-12-13T20:12:30 | 92,423,385 | 0 | 0 | MIT | 2022-07-06T19:46:56 | 2017-05-25T16:36:37 | Python | UTF-8 | Python | false | false | 2,097 | py | # EPPE Areas Vocabulary
laetoli = 'Laetoli'
kakesio = 'Kakesio'
esere = 'Esere'
LAETOLI_AREAS = (
(laetoli, 'Laetoli'),
(kakesio, 'Kakesio'),
(esere, 'Esere-Noiti'),
)
# Laetoli Stratigraphic Units
ngaloba = 'Ngaloba Beds'
upper_ngaloba = "Upper Ngaloba Beds"
lower_ngaloba = "Lower Ngaloba Beds"
qngaloba = '?Ngaloba Beds'
olpiro = 'Olpiro Beds'
naibadad = 'Naibadad Beds'
olgol = 'Olgol Lavas'
ndolanya = 'Ndolanya Beds'
upper_ndolanya = 'Upper Ndolanya Beds'
lower_ndolayna = 'Lower Ndolanya Beds'
laetolil = 'Laetolil Beds'
upper_laetolil = 'Upper Laetolil Beds'
lower_laetolil = 'Lower Laetolil Beds'
LAETOLI_UNITS = (
(ngaloba, 'Ngaloba Beds'),
(lower_ngaloba, "Lower Ngaloba Beds"),
(upper_ngaloba, "Upper Ngaloba Beds"),
(qngaloba, '?Ngaloba Beds'),
(olpiro, 'Olpiro Beds'),
(naibadad, 'Naibadad Beds'),
(olgol, 'Olgol Lavas'),
(ndolanya, 'Ndolanya Beds'),
(upper_ndolanya, 'Upper Ndolanya Beds'),
(lower_ndolayna, 'Lower Ndolanya Beds'),
(laetolil, 'Laetolil Beds'),
(upper_laetolil, 'Upper Laetolil Beds'),
(lower_laetolil, 'Lower Laetolil Beds'),
)
LIFE_STAGE_CHOICES = (
('infant', 'infant'),
('juvenile', 'juvenile')
)
SIZE_CLASS_CHOICES = (
('indeterminate', 'indeterminate'),
('1', '1'),
('2', '2'),
('3', '3'),
('4', '4'),
('5', '5')
)
argon_argon = '40Ar/39Ar dating'
argon_bayes = argon_argon + ', ' + 'bayesian interpolation'
amino_acid = 'Amino acid racimization'
biochrnology = 'Biochronology'
DATING_PROTOCOLS = (
(amino_acid, 'Amino acid racimization'),
(argon_argon, '40Ar/39Ar dating'),
(argon_bayes, '40Ar/39Ar dating, bayesian interpolation'),
(biochrnology, 'Biochronology')
)
deino_2011 = 'Deino AL. 2011. 40Ar/39Ar Dating of Laetoli, Tanzania. In: Harrison T, editor. ' \
'Paleontology and Geology of Laetoli: Human Evolution in Context: Volume 1: Geology, ' \
'Geochronology, Paleoecology and Paleoenvironment. Dordrecht: Springer Netherlands. p 77–97.'
DATING_REFERENCES = (
(deino_2011, 'Deino et al., 2011'),
)
| [
"reedd@austin.utexas.edu"
] | reedd@austin.utexas.edu |
1fe3437d1a0aefab11070ff5921432a80678cb00 | f9d3aa1ded1ffa16b9a8ac34549938ebfae5987a | /.venv/bin/chardetect | 8cd23278de4d6dd26338d2b5e85e7bec0f61f40f | [] | no_license | jonimng/classCloud | fadca4268007dbe6188e65047c3cbe9da35f7010 | 7557e5adc9cceb8cde308f78b750494db2410705 | refs/heads/master | 2021-04-03T04:35:23.106308 | 2018-06-17T14:16:53 | 2018-06-17T14:16:53 | 124,774,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | #!/home/ec2-user/environment/classCloud/.venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"yonaten08@gmail.com"
] | yonaten08@gmail.com | |
d87e9a269411fcc3c56246dadee5b16f4b383d98 | 5b586901b7e5c3f0a3e4b1167186c2c0b9e7b72d | /data_preparation.py | 7a30bac97b5b1aa7f1e604e8d6af9457be938625 | [] | no_license | justinaung/poi-recommendation | a26fde7ef37bb7f8d416dcfe36997b826d135b4c | b690660b7afecdb6c66033caf03d8383a0681600 | refs/heads/master | 2022-10-25T08:45:42.349097 | 2020-06-14T08:02:23 | 2020-06-14T08:02:23 | 272,155,931 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | import pandas as pd
train_df = pd.read_csv('data/Foursquare_train.txt', sep='\t')
train_users = train_df.loc[:, ['userID']].rename({'userID': 'userId:ID(User-ID)'}, axis=1)
train_users = train_users.drop_duplicates()
train_users[':LABEL'] = 'User'
train_places = train_df.loc[:, ['placeID']].rename({'placeID': 'placeId:ID(Place-ID)'}, axis=1)
train_places = train_places.drop_duplicates('placeId:ID(Place-ID)')
train_places[':LABEL'] = 'Place'
train_checkins = (
train_df.loc[:, ['userID', 'placeID']]
.rename({
'userID': ':START_ID(User-ID)',
'placeID': ':END_ID(Place-ID)',
}, axis=1)
)
train_checkins[':TYPE'] = 'CHECKED_IN'
train_users.to_csv('data/neo4j/train_users.csv', index=False)
train_places.to_csv('data/neo4j/train_places.csv', index=False)
train_checkins.to_csv('data/neo4j/train_checkins.csv', index=False)
| [
"justin.aung19@gmail.com"
] | justin.aung19@gmail.com |
8e6bcec82b09657a3cc7770bd5d022d998610ab4 | 819cadad71b7e4eabed08dc27f1d38f429ed27a0 | /NPM Project/webdriverio-test/node_modules/fibers/build/config.gypi | d7b187a09001ba74cd48acc3ffeef03be80a429c | [
"MIT",
"ISC"
] | permissive | HienNguyenT/group_feature_of_portal | e99f4ef60d9eb68d3eeb074fc8fbc7beae489770 | 58a020ae28324fe9d2a2a8f9aa11f7d0183ab46d | refs/heads/master | 2022-10-22T16:54:16.737116 | 2020-06-18T09:59:41 | 2020-06-18T09:59:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,519 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_gyp_path": "tools/icu/icu-system.gyp",
"icu_small": "false",
"icu_ver_major": "66",
"is_debug": 0,
"llvm_version": "0.0",
"napi_build_version": "6",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "false",
"node_module_version": 83,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local/Cellar/node/14.3.0",
"node_release_urlbase": "",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"shlib_suffix": "83.dylib",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/thiethuynh/Library/Caches/node-gyp/14.3.0",
"standalone_static_library": 1,
"save_dev": "true",
"dry_run": "",
"legacy_bundling": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/bash",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/thiethuynh/.npm-init.js",
"userconfig": "/Users/thiethuynh/.npmrc",
"cidr": "",
"node_version": "14.3.0",
"user": "",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "1497946264",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/thiethuynh/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"fund": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.14.5 node/v14.3.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/21/cw63scws6mg9wt7p5s1d76z485lhrg/T",
"unsafe_perm": "true",
"prefix": "/usr/local",
"format_package_lock": "true",
"link": ""
}
}
| [
"thiethuynh@c02vv2wahv2h.kms.com.vn"
] | thiethuynh@c02vv2wahv2h.kms.com.vn |
0438c4c2933a41d6024eb24bacef608107e54ced | 0abbfa3b72b024ab4097fca3972a12373ca65c86 | /blog.py | 4924efedf188d68afa28026f49ed4d52faccf432 | [] | no_license | abergmanson/flask-blog | 94d6e5ee96ffb3c9120c173dace9a6405c457285 | 212d7c30662177bf1d9f405d3c502ae5bb3cfc88 | refs/heads/master | 2020-04-02T19:48:31.215163 | 2018-10-27T23:39:04 | 2018-10-27T23:39:04 | 154,747,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,321 | py | # blog.py - controller
from flask import Flask, render_template, request, session, \
flash, redirect, url_for, g
from functools import wraps
import sqlite3
DATABASE = 'blog.db'
USERNAME = 'admin'
PASSWORD = 'admin'
SECRET_KEY = 'a\xb2\x06\x8e\xc4\x95\xd5\xcf\x8e\xe7\xdaI\xfe\xe9{\x9ab\x87yQH\x11\xd3v'
app = Flask(__name__)
# pulls in app configuration by looking for UPPERCASE variables
app.config.from_object(__name__)
# function used for connecting to the database
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def login_required(test):
@wraps(test)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return test(*args, **kwargs)
else:
flash('You need to log in first.')
return redirect(url_for('login'))
return wrap
@app.route('/', methods=['GET', 'POST'])
def login():
error = None
status_code = 200
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME'] or \
request.form['password'] != app.config['PASSWORD']:
error = "Invalid Credentials. Please try again."
status_code = 401
else:
session['logged_in'] = True
return redirect(url_for('main'))
return render_template('login.html', error=error), status_code
@app.route('/add', methods=['POST'])
@login_required
def add():
title = request.form['title']
post = request.form['post']
if not title or not post:
flash('All fields are required. Please try again.')
return redirect(url_for('main'))
else:
g.db = connect_db()
g.db.execute('insert into posts (title, post) values (?, ?)',[title, post])
g.db.commit()
g.db.close()
flash('New entry was successfully posted!')
return redirect(url_for('main'))
@app.route('/main')
@login_required
def main():
g.db = connect_db()
cur = g.db.execute('select * from posts')
posts = [dict(title=row[0], post=row[1]) for row in cur.fetchall()]
g.db.close()
return render_template('main.html', posts=posts)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash("You were logged out.")
return redirect(url_for('login'))
if __name__ == '__main__':
app.run(debug=True)
| [
"abergmanson@gmail.com"
] | abergmanson@gmail.com |
6f140bcd1b8f4ceb583b9198043bbe563cd41c57 | f63c7ed620b92d7a658c6914bb615d06451966be | /CourseWork/DB_utils/db_manager.py | 6fa8040615db263ed407e5373eaafca7749efff8 | [] | no_license | Bodichelly/DataBaseTermSix | f5b7c31b04d8d54d608cfd737180c6b57cd0e864 | 43547a910bd43f40161fc7fe111c22e3e6d69b62 | refs/heads/master | 2023-05-19T07:21:33.006979 | 2021-05-31T19:08:11 | 2021-05-31T19:08:11 | 340,970,018 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | class DBManager:
def __init__(self, collection):
self.__collection = collection
def insert(self, item) -> str:
return self.__collection.insert_one(item).inserted_id
def find_all(self, query={}):
return self.find(query, True)
def find(self, query={}, multiple=False):
if multiple:
results = self.__collection.find(query)
return [r for r in results]
else:
return self.__collection.find_one(query)
def update_one(self, query_elements, new_values):
return self.__collection.update_one(query_elements, {'$set': new_values})
def delete_one(self, query):
self.__collection.delete_one(query)
def delete_all(self):
self.__collection.delete_many({})
| [
"mrychko.b@gmail.com"
] | mrychko.b@gmail.com |
77f634fccb8884bcd7d3af19f31237f47d667fa6 | 9741a626ea58ef334d4280af1a756be06f69c871 | /apps/funcionarios/migrations/0003_auto_20200814_1320.py | af6160d851fff32f620e4becbd055f0764628477 | [] | no_license | diogo20lemos/gestao_rh | a8a86c7afcad0649dbdbb64e4855e08907917068 | d78139812b13a679699160e0c2be382548864f0f | refs/heads/master | 2023-01-12T11:51:44.918602 | 2020-11-17T16:44:54 | 2020-11-17T16:44:54 | 287,319,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | # Generated by Django 2.1.2 on 2020-08-14 16:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('funcionarios', '0002_auto_20200814_1315'),
]
operations = [
migrations.AlterField(
model_name='funcionario',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
]
| [
"diogolemos@EDiagnose.local"
] | diogolemos@EDiagnose.local |
8403af3bcd079db0b7c6de141beec687298b30e4 | 885a4843dc25d881cd3200862c61134806f3c0db | /geekshop/authapp/forms.py | 44154f1041a4f71352948dbe6589a58c7fd3e5cb | [] | no_license | Iuliia-Fedkovych/DjangoGeekshop | 5ed9a9a3f8c54124bdae953a425b388835ce184b | 755d26216864257e5aae5e19a0b1648dd906db1e | refs/heads/master | 2022-12-22T19:49:54.811691 | 2019-10-08T15:48:23 | 2019-10-08T15:48:23 | 204,206,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,664 | py | from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.forms import UserChangeForm
from django.forms import ValidationError, HiddenInput
from django import forms
import random, hashlib
from .models import ShopUser
from .models import ShopUserProfile
class ShopUserLoginForm(AuthenticationForm):
class Meta:
model = ShopUser
fields = ('username', 'password')
def __init__(self, *args, **kwargs):
super(ShopUserLoginForm, self).__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control'
class ShopUserRegisterForm(UserCreationForm):
class Meta:
model = ShopUser
fields = ('username', 'first_name', 'password1', 'password2', 'email', 'age', 'avatar')
def __init__(self, *args, **kwargs):
super(ShopUserRegisterForm, self).__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control'
field.help_text = ''
def clean_age(self):
data = self.cleaned_data['age']
if data < 18:
raise ValidationError("Your are too young")
return data
def save(self, commit=True):
user = super(ShopUserRegisterForm, self).save()
user.is_active = False
salt = hashlib.sha1(str(random.random()).encode('utf8')).hexdigest()
user.activation_key = hashlib.sha1((user.email+salt).encode('utf8')).hexdigest()
user.save()
return user
class ShopUserEditForm(UserChangeForm):
class Meta:
model = ShopUser
fields = ('username', 'first_name', 'email', 'age', 'avatar')
def __init__(self, *args, **kwargs):
super(ShopUserEditForm, self).__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control'
field.help_text = ''
if field_name == 'password':
field.widget = HiddenInput()
def clean_age(self):
data = self.cleaned_data['age']
if data < 18:
raise ValidationError("Your are too young")
return data
class ShopUserProfileEditForm(forms.ModelForm):
class Meta:
model = ShopUserProfile
fields = ('tagline', 'aboutMe', 'gender', 'language', 'url_social')
def __init__(self, *args, **kwargs):
super(ShopUserProfileEditForm, self).__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control'
| [
"Julia_Fedkovich@mail.ru"
] | Julia_Fedkovich@mail.ru |
ec70fb006e54fd288c4aceedd402f1be199e39aa | ba57832efe7845f0806fe0bb724fa92e6e10379d | /inst/ete2/webplugin/webapp.py | e717293fbeb52b5b80c973401184ec5bf5e6ba7d | [] | no_license | didacs/ggsunburst | fad6b5b82bb610b31c59ec7414c2212c903269fa | aec6ad9e3ea036e1a3d1a766dcd78f9dde3e4213 | refs/heads/master | 2021-07-04T03:02:06.459449 | 2020-09-11T21:34:24 | 2020-09-11T21:34:24 | 174,634,181 | 11 | 3 | null | null | null | null | UTF-8 | Python | false | false | 15,184 | py | # -*- coding: utf-8 -*-
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://ete.cgenomics.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2011).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit are available in the documentation.
#
# More info at http://ete.cgenomics.org
#
#
# #END_LICENSE#############################################################
__VERSION__="ete2-2.2rev1056"
import sys
import os
import time
import cgi
from hashlib import md5
import cPickle
ALL = ["WebTreeApplication"]
class WebTreeApplication(object):
""" Provides a basic WSGI application object which can handle ETE
tree visualization and interactions. Please, see the
webplugin example provided with the ETE installation package
(http://pypi.python.org/pypi/ete2)."""
def __init__(self):
# Redirects normal output msgs to stderr, since stdout in web
# application is for the browser
sys.stdout = sys.stderr
self.TreeConstructor = None
self.NODE_TARGET_ACTIONS = ["node", "face"]
self.TREE_TARGET_ACTIONS = ["layout", "search"]
self.actions = []
self._layout = None
self._tree_style = None
self._width = None
self._height = None
self._size_units = "px"
self._custom_tree_renderer = None
self._treeid2layout = {}
self._external_app_handler = None
self._treeid2tree = {}
self._treeid2cache = {}
self._treeid2index = {}
self.queries = {}
self.CONFIG = {
"temp_dir":"/var/www/webplugin/",
"temp_url":"http://localhost/webplugin/tmp",
"DISPLAY" :":0" # Used by ete to render images
}
def set_tree_size(self, w, h, units="px"):
""" Fix the size of tree image """
self._width = w
self._height = h
self._size_units = units
def set_external_app_handler(self, handler):
""" Sets a custom function that will extend current WSGI
application."""
self._external_app_handler = handler
def set_external_tree_renderer(self, handler):
""" If the tree needs to be processed every time is going to
be drawn, the task can be delegated. """
self._custom_tree_renderer = handler
def register_action(self, name, target, handler, checker, html_generator):
""" Adds a new web interactive function associated to tree
nodes. """
self.actions.append([name, target, handler, checker, html_generator])
def set_tree_loader(self, TreeConstructor):
""" Delegate tree constructor. It allows to customize the Tree
class used to create new tree instances. """
self._tree = TreeConstructor
def set_default_layout_fn(self, layout_fn):
""" Fix the layout function used to render the tree. """
self._layout = layout_fn
def set_tree_style(self, handler):
""" Fix a :class:`TreeStyle` instance to render tree images. """
self._tree_style = handler
def _get_html_map(self, img_map, treeid, mapid, tree):
# Scans for node-enabled actions.
nid2actions = {}
nid2face_actions = {}
for n in tree.traverse():
for aindex, (action, target, handler, checker, html_generator) in enumerate(self.actions):
if target == "node" and (not checker or checker(n)):
nid2actions.setdefault(int(n._nid), []).append(aindex)
elif target == "face" and (not checker or checker(n)):
nid2face_actions.setdefault(int(n._nid), []).append(aindex)
html_map = '<MAP NAME="%s" class="ete_tree_img">' %(mapid)
if img_map["nodes"]:
for x1, y1, x2, y2, nodeid, text in img_map["nodes"]:
text = "" if not text else text
area = img_map["node_areas"].get(int(nodeid), [0,0,0,0])
html_map += """ <AREA SHAPE="rect" COORDS="%s,%s,%s,%s" onMouseOut='unhighlight_node();' onMouseOver='highlight_node("#%s", "%s", %s, %s, %s, %s);' onClick='show_context_menu("%s", "%s", "%s");' href="javascript:void('%s');">""" %\
(int(x1), int(y1), int(x2), int(y2),
treeid, text, area[0], area[1], area[2]-area[0], area[3]-area[1],
treeid, nodeid, ','.join(map(str, nid2actions.get(nodeid,[]))), str(nodeid) )
if img_map["faces"]:
for x1, y1, x2, y2, nodeid, text in img_map["faces"]:
text = "" if not text else text
area = img_map["node_areas"].get(int(nodeid), [0,0,0,0])
html_map += """ <AREA SHAPE="rect" COORDS="%s,%s,%s,%s" onMouseOut='unhighlight_node(); hide_face_popup();' onMouseOver='highlight_node("#%s", "%s", %s, %s, %s, %s); show_face_popup("%s", "%s", "%s", "%s");' onClick='show_context_menu("%s", "%s", "%s", "%s");' href="javascript:void('%s');">""" %\
(int(x1),int(y1),int(x2),int(y2),
treeid, text, area[0], area[1], area[2]-area[0], area[3]-area[1],
treeid, nodeid, ','.join(map(str, nid2actions.get(nodeid,[])+nid2face_actions.get(nodeid,[]) )), text,
treeid, nodeid, ','.join(map(str, nid2actions.get(nodeid,[])+nid2face_actions.get(nodeid,[]) )), text,
text,
)
html_map += '</MAP>'
return html_map
def _load_tree(self, treeid, tree=None, cache_file=None):
# if a tree is given, it overwrites previous versions
if tree and isinstance(tree, str):
tree = self._tree(tree)
self._treeid2tree[treeid] = tree
self._load_tree_index(treeid)
elif tree:
self._treeid2tree[treeid] = tree
self._load_tree_index(treeid)
self._treeid2cache[treeid] = cache_file if cache_file else "%s.pkl" %treeid
# if no tree is given, and not in memmory, it tries to loaded
# from previous sessions
if treeid not in self._treeid2tree:
self._load_tree_from_path(self._treeid2cache[treeid])
# Returns True if tree and indexes are loaded
return (treeid in self._treeid2tree) and (treeid in self._treeid2index)
def _load_tree_from_path(self, pkl_path):
tree_path = os.path.join(self.CONFIG["temp_dir"], pkl_path)
if os.path.exists(tree_path):
print cPickle.load(open(tree_path))
t = self._treeid2tree[treeid] = cPickle.load(open(tree_path))
self._load_tree_index(treeid)
return True
else:
return False
def _load_tree_index(self, treeid):
if not self._treeid2index.get(treeid, {}):
tree_index = self._treeid2index[treeid] = {}
t = self._treeid2tree[treeid]
for n in t.traverse():
if hasattr(n, "_nid"):
tree_index[str(n._nid)] = n
return True
else:
return False
def _dump_tree_to_file(self, t, treeid):
tree_path = os.path.join(self.CONFIG["temp_dir"], treeid+".pkl")
cPickle.dump(t, open(tree_path, "w"))
#open(tree_path, "w").write(t.write(features=[]))
def _get_tree_img(self, treeid, pre_drawing_action=None):
img_url = os.path.join(self.CONFIG["temp_url"], treeid+".png?"+str(time.time()))
img_path = os.path.join(self.CONFIG["temp_dir"], treeid+".png")
t = self._treeid2tree[treeid]
tree_index = self._treeid2index[treeid]
if pre_drawing_action:
atype, handler, arguments = pre_drawing_action
if atype in set(["node", "face"]) and len(arguments)==1 and handler:
nid = arguments[0]
node = tree_index.get(str(nid), None)
handler(node)
elif atype == "tree":
handler(t, arguments[0])
elif atype == "search":
handler(t, arguments[0])
elif atype == "layout":
self._treeid2layout[treeid] = handler
layout_fn = self._treeid2layout.get(treeid, self._layout)
mapid = "img_map_"+str(time.time())
img_map = _render_tree(t, img_path, self.CONFIG["DISPLAY"], layout = layout_fn,
tree_style = self._tree_style,
w=self._width,
h=self._height,
units=self._size_units)
html_map = self._get_html_map(img_map, treeid, mapid, t)
for n in t.traverse():
self._treeid2index[treeid][str(n._nid)]=n
if hasattr(n, "_QtItem_"):
n._QtItem_ = None
delattr(n, "_QtItem_")
tree_actions = []
for aindex, (action, target, handler, checker, html_generator) in enumerate(self.actions):
if target in self.TREE_TARGET_ACTIONS and (not checker or checker(t)):
tree_actions.append(aindex)
try:
version_tag = __VERSION__
except NameError:
version_tag = "ete2"
self._dump_tree_to_file(t, treeid)
ete_publi = '<div style="margin:0px;padding:0px;text-align:left;"><a href="http://ete.cgenomics.org" style="font-size:7pt;" target="_blank" >%s</a></div>' %\
(version_tag)
img_html = """<img id="%s" class="ete_tree_img" src="%s" USEMAP="#%s" onLoad='javascript:bind_popup();' onclick='javascript:show_context_menu("%s", "", "%s");' >""" %\
(treeid, img_url, mapid, treeid, ','.join(map(str, tree_actions)))
tree_div_id = "ETE_tree_"+str(treeid)
return html_map+ '<div id="%s" >'%tree_div_id + img_html + ete_publi + "</div>"
# WSGI web application
def __call__(self, environ, start_response):
""" This function is executed when the application is called
by the WSGI apache module. It is, therefore, in charge of
answering web requests."""
path = environ['PATH_INFO'].split("/")
start_response('202 OK', [('content-type', 'text/plain')])
if environ['REQUEST_METHOD'].upper() == 'GET' and environ['QUERY_STRING']:
self.queries = cgi.parse_qs(environ['QUERY_STRING'])
elif environ['REQUEST_METHOD'].upper() == 'POST' and environ['wsgi.input']:
self.queries = cgi.parse_qs(environ['wsgi.input'].read())
else:
self.queries = {}
method = path[1]
treeid = self.queries.get("treeid", [None])[0]
nodeid = self.queries.get("nid", [None])[0]
textface = self.queries.get("textface", [None])[0]
actions = self.queries.get("show_actions", [None])[0]
tree = self.queries.get("tree", [None])[0]
search_term = self.queries.get("search_term", [None])[0]
aindex = self.queries.get("aindex", [None])[0]
if method == "draw":
# if not treeid is given, generate one
if not treeid:
treeid = md5(str(time.time())).hexdigest()
if not self._load_tree(treeid, tree):
return "draw: Cannot load the tree: %s" %treeid
if self._custom_tree_renderer:
t = self._treeid2tree[treeid]
return self._custom_tree_renderer(t, treeid, self)
elif t and treeid:
return self._get_tree_img(treeid=treeid)
else:
return "No tree to draw"
elif method == "get_menu":
if not self._load_tree(treeid):
return "get_menu: Cannot load the tree: %s" %treeid
if nodeid:
tree_index = self._treeid2index[treeid]
node = tree_index[nodeid]
else:
node = None
if textface:
header = str(textface).strip()
else:
header = "Menu"
html = """<div id="ete_popup_header"><span id="ete_popup_header_text">%s</span><div id="ete_close_popup" onClick='hide_popup();'></div></div><ul>""" %\
(header)
for i in map(int, actions.split(",")):
aname, target, handler, checker, html_generator = self.actions[i]
if html_generator:
html += html_generator(i, treeid, nodeid, textface, node)
else:
html += """<li><a href='javascript:void(0);' onClick='hide_popup(); run_action("%s", "%s", "%s");'> %s </a></li> """ %\
(treeid, nodeid, i, aname)
html += '</ul>'
return html
elif method == "action":
if not self._load_tree(treeid):
return "action: Cannot load the tree: %s" %treeid
if aindex is None:
# just refresh tree
return self._get_tree_img(treeid=treeid)
else:
aname, target, handler, checker, html_generator = self.actions[int(aindex)]
if target in set(["node", "face", "layout"]):
return self._get_tree_img(treeid=treeid, pre_drawing_action=[target, handler, [nodeid]])
elif target in set(["search"]):
return self._get_tree_img(treeid=treeid, pre_drawing_action=[target, handler, [search_term]])
elif target in set(["refresh"]):
return self._get_tree_img(treeid=treeid)
return "Bad guy"
elif self._external_app_handler:
return self._external_app_handler(environ, start_response, self.queries)
else:
return '\n'.join(map(str, environ.items())) + str(self.queries) + '\t\n'.join(environ['wsgi.input'])
def _render_tree(t, img_path, display, layout=None, tree_style=None,
w=None, h=None, units="px"):
os.environ["DISPLAY"]=display
return t.render(img_path, layout = layout, tree_style=tree_style,
w=w, h=h, units=units)
| [
"didac.santesmasses@crg.eu"
] | didac.santesmasses@crg.eu |
f5f5a7f0c044be26dad716d7d8acebcbc7cd9915 | e42ce4d913a05b200c5eebe33c2ad1e28295a7f2 | /ProgramaPrincipal/BackEnd/KarplusStrongSynthesis/testing3.py | 65664997cfb9e2288e9751a6665f83060fd7e5f4 | [] | no_license | grupo-1-ASSD-E2/ASSD-TP2 | f8ef179c87bfcc41b33bb058b3061b606a55b3fd | c0f09bcf52dfdc01b3d904cade82256351261dbd | refs/heads/master | 2022-07-02T17:46:24.457589 | 2020-05-16T12:54:14 | 2020-05-16T12:54:14 | 258,070,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py |
import numpy as np
import matplotlib.pyplot as plt
import control
import scipy
from sympy.core.numbers import pi, I, oo
from sympy import re, im
from scipy import signal
l = 199
z = np.linspace(0, 2*np.pi, 10000)
#r=0.999
num = 0.5 * z**(l+1) + 0.5 * z**l
den = z**(l+1) - 0.5 * 0.1 * (z + 1)
num_c = np.array([0.5, 0.5])
num_ceros = np.zeros(l)
num_c = np.concatenate((num_c, num_ceros)) #coefs del num
den_c = np.array([1])
den_cc = np.array([- 0.5 * 0.999, - 0.5 * 0.999])
den_ceros = np.zeros(l-1)
den_c = np.concatenate((den_c, den_ceros, den_cc))
tf = control.TransferFunction(num_c,den_c)
control.pzmap(tf, Plot = True, title = 'Diagrama de polos y ceros')
#control.bode_plot(tf)
plt.grid(which = 'both')
plt.show()
import scipy
from scipy import signal
| [
"sarribere@itba.edu.ar"
] | sarribere@itba.edu.ar |
36859f62160f94e4c4d427461f1f1f7aaa00bab4 | 5efc7ab8a298a026bad44596e18de104985a4b71 | /fn_wiki/tests/test_funct_fn_wiki_create_update.py | 9aa1fce69272eadbcde79a40b9ddf69ec5e91908 | [
"MIT"
] | permissive | RomanDerkach/resilient-community-apps | 4cf0abe443411582e9f57491364ecc2d844ba30d | 1f60fb100e6a697df7b901d7a4aad707fea3dfee | refs/heads/master | 2023-03-11T21:56:18.307942 | 2021-03-02T16:09:33 | 2021-03-02T16:09:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,815 | py | # -*- coding: utf-8 -*-
"""Tests using pytest_resilient_circuits"""
import pytest
from resilient_circuits.util import get_config_data, get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
PACKAGE_NAME = "fn_wiki"
FUNCTION_NAME = "fn_wiki_create_update"
# Read the default configuration-data section from the package
config_data = get_config_data(PACKAGE_NAME)
# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)
resilient_mock = "pytest_resilient_circuits.BasicResilientMock"
def call_fn_wiki_create_update_function(circuits, function_params, timeout=5):
# Create the submitTestFunction event
evt = SubmitTestFunction("fn_wiki_create_update", function_params)
# Fire a message to the function
circuits.manager.fire(evt)
# circuits will fire an "exception" event if an exception is raised in the FunctionComponent
# return this exception if it is raised
exception_event = circuits.watcher.wait("exception", parent=None, timeout=timeout)
if exception_event is not False:
exception = exception_event.args[1]
raise exception
# else return the FunctionComponent's results
else:
event = circuits.watcher.wait("fn_wiki_create_update_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestFnWikiCreateUpdate:
""" Tests for the fn_wiki_create_update function"""
def test_function_definition(self):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
mock_fail_path = {
"wiki_path": None,
"wiki_body": "sample text",
"wiki_create_if_missing": False
}
mock_fail_page_not_found = {
"wiki_path": "not found",
"wiki_body": "sample text",
"wiki_create_if_missing": False,
}
mock_fail_parent_not_found = {
"wiki_path": "parent not found/new page",
"wiki_body": "sample text",
"wiki_create_if_missing": False
}
@pytest.mark.parametrize("mock_inputs, expected_results", [
(mock_fail_path, None),
(mock_fail_page_not_found, None),
(mock_fail_parent_not_found, None),
])
def test_fail_update(self, circuits_app, mock_inputs, expected_results):
""" Test calling with sample values for the parameters """
with pytest.raises(ValueError):
results = call_fn_wiki_create_update_function(circuits_app, mock_inputs)
assert(results['success'] == False)
assert(results['reason'])
mock_success_title = {
"wiki_path": "ΣΤ",
"wiki_body": "ΣΤ",
"wiki_create_if_missing": True
}
mock_success_w_parent_title = {
"wiki_path": "ΣΤ3/new3",
"wiki_body": "new3",
"wiki_create_if_missing": True
}
@pytest.mark.parametrize("mock_inputs, expected_results", [
(mock_success_title, None),
(mock_success_w_parent_title, None),
])
def test_create_success(self, circuits_app, mock_inputs, expected_results):
""" Test calling with sample values for the parameters """
results = call_fn_wiki_create_update_function(circuits_app, mock_inputs)
assert(results['success'])
mock_success_update_title = {
"wiki_path": "parent1/json2",
"wiki_body": "new3 ΣΤ3",
"wiki_create_if_missing": False
}
@pytest.mark.parametrize("mock_inputs, expected_results", [
(mock_success_update_title, None)
])
def test_update_success(self, circuits_app, mock_inputs, expected_results):
""" Test calling with sample values for the parameters """
results = call_fn_wiki_create_update_function(circuits_app, mock_inputs)
assert(results['success'])
mock_success_update_parent_title = {
"wiki_path": "ΣΤ3/ΣΤ4",
"wiki_body": "ΣΤ4",
"wiki_create_if_missing": True
}
mock_success_update_parent_subparent = {
"wiki_path": "parent1/json2/ΣΤ5",
"wiki_body": "ΣΤ5",
"wiki_create_if_missing": True
}
@pytest.mark.parametrize("mock_inputs, expected_results", [
(mock_success_update_parent_title, None),
(mock_success_update_parent_subparent, None)
])
def test_update_parent_success(self, circuits_app, mock_inputs, expected_results):
""" Test calling with sample values for the parameters """
results = call_fn_wiki_create_update_function(circuits_app, mock_inputs)
assert(results['success'])
| [
"ihor.husar@ibm.com"
] | ihor.husar@ibm.com |
4716c0945281c11c5115ee97263bdd2283f2cc7f | 623cfa641cef0b804472e7de22bed294fc79222d | /726. Number of Atoms.py | 07e3792c6f0957b2d2f9ecb3ba079598e7ca9846 | [] | no_license | arunnair018/Leetcode_problems | edfaf502f5849d890591d05c79aad73e6b91e60f | 224cb9c2f628221eb0ed4b76a6c166077a82427a | refs/heads/master | 2021-04-22T17:32:50.601458 | 2020-05-27T15:57:11 | 2020-05-27T15:57:11 | 249,865,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | # input
formula = "Mg(OH)2"
formula = "K4(ON(SO3)2)2"
# solution start here
# solution one - Takes so much of memory
'''
import re
from collections import Counter
parse = re.findall('([A-Z][a-z]*)(\d*)|(\()|(\))(\d*)',formula)
stack=[[]]
for name,m1,o,c,m2 in parse:
if o:
stack.append([])
if name:
stack[-1].append(name * int(m1 or 1))
if c:
stack[-1] = stack[-1]* int(m2 or 1)
stack[-2].extend(stack[-1])
stack.pop()
s=''
for i in stack:
s+=''.join(i)
s = re.findall('[A-Z][a-z]*',s)
d = dict(Counter(s))
out = ''
for i in sorted(d):
out+=str(i)
if d[i] != 1:
out+=str(d[i])
print(out)
'''
# solution two - takes less memory
import re
from collections import Counter
parse = re.findall('([A-Z][a-z]*)(\d*)|(\()|(\))(\d*)',formula)
for i in parse:
print(i)
stack = [Counter()]
for name, m1, o, c, m2 in parse:
if name:
stack[-1][name] += int(m1 or 1)
if o:
stack.append(Counter())
if c:
top = stack.pop()
for k in top:
stack[-1][k] += top[k] * int(m2 or 1)
out = ''
stack = dict(stack[-1])
for i in sorted(stack):
out+=str(i)
if stack[i] != 1:
out+=str(stack[i])
print(out) | [
"arunnair018@gmail.com"
] | arunnair018@gmail.com |
9fbf418d526b5c6f843174ac7e1e448dd6c7381a | 4604906df0b2ca72d4c70aeaa5d59809403d4eee | /app/controllers/Books.py | 2364d4c31a44c970e864bcb0e24021213626cc90 | [] | no_license | SarahPak/red_belt_prep | 2d8a5b17c9b5b9d5f2b2e3aa47287a8bbb896627 | 872bffef9ed8766734a543bd8fd1eaff5d524af5 | refs/heads/master | 2021-01-22T16:37:13.252225 | 2016-01-19T00:22:20 | 2016-01-19T00:22:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | from system.core.controller import *
class Books(Controller):
def __init__(self, action):
super(Books, self).__init__(action)
self.load_model('Book')
self.load_model('Review')
def index(self):
recent_books = self.models['Book'].index_recent()
remaining_books = self.models['Book'].index_remaining()
return self.load_view('books/index.html', recent=recent_books, remaining=remaining_books)
def show(self, id):
book = self.models['Book'].show(id)
return self.load_view('books/show.html', book=book)
def create_review(self):
print request.form
self.models['Review'].create(request.form)
return redirect('/books/'+request.form['book_id'])
def delete_review(self, id):
self.models['Review'].delete(id)
return redirect ('/books/'+request.form['book_id'])
| [
"garik@Gariks-MBP.hsd1.wa.comcast.net"
] | garik@Gariks-MBP.hsd1.wa.comcast.net |
a53f3d0502b7a1b2876957108f43474496804be5 | 83164751a398ac3440ec04eea56cd04649e454cc | /1036.py | 7d4fdc0f2d14c0dc5fa50358c52900c24e20266e | [] | no_license | rezoanahmed/uri_solutions_python | 68df0d2df7640c20d9b94461aa04975ec7bb967f | 18d23a51ed0f15e1540a34c62d1d8220cec9461a | refs/heads/master | 2023-01-24T03:31:40.318351 | 2020-11-20T04:58:10 | 2020-11-20T04:58:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | import math
A,B,C = input().split()
x = (float(B)*float(B)) - (4*float(A)*float(C))
if(float(A)==0 or x < 0):
print('Impossivel calcular')
else:
first = (-float(B) + math.sqrt(x))/ (2*float(A))
second = (-float(B) - math.sqrt(x))/ (2*float(A))
print('R1 =',"%.5f"%first)
print('R2 =',"%.5f"%second) | [
"rezoanahmed@users.noreply.github.com"
] | rezoanahmed@users.noreply.github.com |
bd6139114bbba9ceb2caf32b1111ce53801c7afd | 16dee776459d28150b21e959a357ce772e66db5c | /batch_op_py3.py | 6dad21b8ec71f52a0713cd2e1f86ae27acefcfa9 | [] | no_license | ixbear/bear_ops | 8fb4a39ea2333bf55684d0875e5a2380cab6ed60 | b4daee3bcee96197e98ea5505f886d6e64cd6d10 | refs/heads/master | 2022-12-15T00:00:45.296840 | 2020-09-15T13:50:35 | 2020-09-15T13:50:35 | 76,638,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,053 | py | #!/usr/bin/python
__author__ = "https://zhukun.net"
import getpass
import sys
from io import StringIO
from optparse import OptionParser
from multiprocessing import Process
from multiprocessing import Queue
import traceback
try:
import paramiko
except ImportError:
sys.stderr.write('SYS: Import lib paramiko first.\n')
sys.exit()
class RemoteTask(object):
def __init__(self, ip, user, pwd_lst, cmd, timeout=5):
self.ip = ip
self.user = user
self.pwd_lst = pwd_lst[:]
self.cmd = cmd
self.timeout = timeout
class TaskResult(object):
def __init__(self, task):
self.task = task
self.sout = None
self.serr = None
self.rc = None
task_q = Queue()
result_q = Queue()
def work(task_q, result_q):
while True:
task = task_q.get()
result = TaskResult(task)
rc = 0
sout = ''
serr = ''
try:
rc, sout, serr = run_cmd(task)
except Exception as e:
s = StringIO()
traceback.print_exc(file=s)
rc = -1
sout = ''
serr = s.getvalue()
s.close()
finally:
result = TaskResult(task)
result.rc = rc
result.sout = sout
result.serr = serr
result_q.put(result)
def run_cmd(task):
connected = False
ip = task.ip
rc = -1
sout = ''
serr = ''
for password in task.pwd_lst:
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname=ip, port=22, username=task.user, password=password)
stdin, stdout, stderr = client.exec_command(cmd)
connected = True
break
except paramiko.AuthenticationException as e:
connected = False
rc = -1
serr = 'SSH authentication failed\n'
continue
except Exception as e:
rc = -1
serr = 'Run cmd exception %s\n' % e
connected = False
continue
if connected == True:
sout = stdout.read()
serr = stderr.read()
rc = stdout.channel.recv_exit_status()
return rc, sout, serr
def process_result(result):
#log_result(result)
print_result(result)
def log_result(result):
ip = result.task.ip
try:
rc_file = open('./log/%s.rc' % ip)
rc_file.write('%s' % result.rc)
rc_file.close()
finally:
pass
try:
out_file = open('./log/%s.out' % ip)
out_file.write(result.sout)
out_file.close()
finally:
pass
try:
err_file = open('./log/%s.err' % ip)
err_file.write(result.out)
err_file.close()
finally:
pass
def print_result(result):
ip = result.task.ip
rc = result.rc
sout= result.sout
serr = result.serr
if rc != 0:
sys.stderr.write('{ip}|exitcode|{rc}\n'.format(ip=ip, rc=rc))
for line in sout.splitlines():
print('{ip}|stdout|{line}'.format(ip=ip, line=line))
for line in serr.splitlines():
sys.stderr.write('{ip}|stderr|{line}\n'.format(ip=ip, line=line))
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-f", "--file", dest="ipfile",
help="IP list file", metavar="IPLIST")
parser.add_option('-u', '--user', dest='user', default='admin',
help='Username to logon server', metavar='USER')
parser.add_option('-b', '--trybest', dest='trybest', action='store_true', default=False,
help='Try best to finish task', metavar='TRYBEST')
parser.add_option('-n', '--num_worker', dest='num_worker', default=16,
help='Number of worker', metavar='NUM_WORKER')
options, args = parser.parse_args()
if not options.ipfile:
parser.error('IPFILE must be present.')
ipfile = options.ipfile
username = options.user
try_best = options.trybest
num_worker = options.num_worker
cmd = ' '.join(args)
password_lst = []
index = 1
while True:
password = getpass.getpass(prompt='Password %s:' % index)
index += 1
if password != '':
password_lst.append(password)
continue
else:
break
ip_list = []
ip_list.extend([ x.strip() for x in open(ipfile).read().splitlines() ])
pool = []
for x in range(num_worker):
worker = Process(target=work, args=(task_q, result_q))
worker.daemon = True
pool.append(worker)
tasks = []
for ip in ip_list:
task = RemoteTask(ip, username, password_lst, cmd)
task_q.put(task)
for worker in pool:
worker.start()
res_num = 0
while res_num < len(ip_list):
result = result_q.get()
process_result(result)
res_num += 1
for worker in pool:
worker.terminate()
for worker in pool:
worker.join()
| [
"noreply@github.com"
] | noreply@github.com |
edffebf7c4e3acfe778acdda02406168fda32c52 | 5c9e381622db7a49b76b28767a882f31b71485e2 | /Junior_Training_Sheet_template_V6.3/CF_A/Bit++.py | 3aa18f40050e27ce6b73f7f20b3c129bdb66f674 | [] | no_license | mhrous/algorithm | c93e8aaab446f7670c115b849d9bf13ec84dcd75 | 80cad374ae30be39b76354e4fab7e969b92be37a | refs/heads/master | 2022-03-20T00:32:20.553603 | 2019-11-29T05:53:42 | 2019-11-29T05:53:42 | 197,979,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | n = int(input())
count = 0
for i in range(n):
_str = input()
if "+" in _str:
count += 1
else:
count -= 1
print(count) | [
"your_email_address@example.com"
] | your_email_address@example.com |
b2a7944978607aeb4c5ab3204ff8c59a1050edd8 | e1f1699c79cd709818aae5f9d2d73a8d104365e0 | /Computer-vidion/Archive/opencv_deneme_Kirmizi_DireklandAtma_1.py | 79e56c8ad09590908abea3fd8daaf446539778d2 | [] | no_license | sdeveloper35/DEHA_Algorithm | 966db3cbf98c8f43ef1feb23bac6674fd25510c6 | 15a55981e65ae8e462b7306dfd42b85faeb378ab | refs/heads/master | 2023-07-15T19:57:23.048192 | 2021-08-26T11:05:17 | 2021-08-26T11:05:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,696 | py | # import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
import numpy as np
import time
from dronekit import connect, VehicleMode, LocationGlobalRelative, Command, LocationGlobal
from pymavlink import mavutil
import math
target = False
land_sensivity = 50 #pixel
#Dronekit
connection_address = '/dev/ttyACM0' #kontrol et
baud_rate = 115200
take_off_altitude = 5 #in meter
ground_speed = 5 # m/s
air_speed = 5 # m/s
land_speed= 60 # cm/s
rtl_altitude = 5 #in meter
Velocity_x = 1 #X ekseni hızı
Velocity_y = 0 #Y ekseni hızı
Velocity_z = 0 #Z ekseni hızı
# Hızların değişimini kontrol etmek için eksen hızlarını tutan değişkenler
Velx_d = Velocity_x
Vely_d = Velocity_y
Velz_d = Velocity_z
alt_sensivity = 0.2
#Connect to the vehicle on given address
print ("\nConnecting to vehicle on: " + connection_address + " with baud rate: " + str(baud_rate))
vehicle = connect(connection_address, wait_ready=True, baud=baud_rate)
# -- Define arm and takeoff
def arm_and_takeoff(altitude):
while not vehicle.is_armable:
print("waiting to be armable")
time.sleep(1)
print("Arming motors")
vehicle.mode = VehicleMode("GUIDED")
vehicle.armed = True
while not vehicle.armed: time.sleep(1)
print("Taking Off")
vehicle.simple_takeoff(altitude)
while True:
v_alt = vehicle.location.global_relative_frame.alt
print(">> Altitude = %.1f m" % v_alt)
if v_alt >= altitude - 1.0:
print("Target altitude reached")
break
time.sleep(1)
# -- Define the function for sending mavlink velocity command in body frame
def set_velocity_body(vehicle, vx, vy, vz):
""" Remember: vz is positive downward!!!
http://ardupilot.org/dev/docs/copter-commands-in-guided-mode.html
Bitmask to indicate which dimensions should be ignored by the vehicle
(a value of 0b0000000000000000 or 0b0000001000000000 indicates that
none of the setpoint dimensions should be ignored). Mapping:
bit 1: x, bit 2: y, bit 3: z,
bit 4: vx, bit 5: vy, bit 6: vz,
bit 7: ax, bit 8: ay, bit 9:
"""
msg = vehicle.message_factory.set_position_target_local_ned_encode(
0,
0, 0,
mavutil.mavlink.MAV_FRAME_BODY_NED,
0b0000111111000111, # -- BITMASK -> Consider only the velocities
0, 0, 0, # -- POSITION
vx, vy, vz, # -- VELOCITY
0, 0, 0, # -- ACCELERATIONS
0, 0)
vehicle.send_mavlink(msg)
vehicle.flush()
def yukseklik_ayarla(vehicle, yukseklik, hiz, Vx, Vy):
v_altitude = vehicle.location.global_relative_frame.alt
if yukseklik > v_altitude:
print("Yukseliniyor...")
set_velocity_body(vehicle, Vx, Vy, -hiz)
while True:
v_alt = vehicle.location.global_relative_frame.alt
print(">> Altitude = %.1f m" % v_alt)
if v_alt >= yukseklik - alt_sensivity:
print("Target altitude reached")
break
else:
print("Alcaliniyor...")
set_velocity_body(vehicle, Vx, Vy, hiz)
while True:
v_alt = vehicle.location.global_relative_frame.alt
print(">> Altitude = %.1f m" % v_alt)
if v_alt <= yukseklik + alt_sensivity:
print("Target altitude reached")
break
contour_area = 0
cX=0
cY=0
def findRedContours():
global target
global contour_area #alanı global değişken olarak tanımladım
global cX
global cY
global ptin_contour
# En büyük contouru seçiyoruz
if len(contours) != 0:
c = max(contours, key=cv2.contourArea) #maximum alana sahip contour
contour_area = int(cv2.contourArea(c))
#if showCircleArea:
# print(contour_area)
if contour_area >= 2000 and contour_area <= 180000:
target = True
Contour_Check = cv2.pointPolygonTest(c, (img_Center_X , img_Center_Y),False)
#print(Contour_Check)
if Contour_Check >= 0:
ptin_contour = True
contour_color = (0 , 255 , 0)
#pool_font_color = (255 , 255 , 0) #Havuz yazısının rengi tek renk olsun diye bu satırı çıkardım
else:
ptin_contour = False
contour_color = (255 , 0 , 0)
#pool_font_color = (0 , 0 , 255) #Havuz yazısının rengi tek renk olsun diye bu satırı çıkardım
# Calculate Moments for each contour
M = cv2.moments(c)
if M["m00"] != 0:
# Calculate x,y coordinate of center
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
cv2.circle(image, (cX, cY), 5, (0, 0, 255), -1)
cv2.drawContours(image, [c], 0, contour_color, 3)
cv2.arrowedLine(image, (320, 240), (cX, cY), (255, 255, 0), 2)
cv2.putText(image,'Havuz', (cX+10, cY+10),cv2.FONT_HERSHEY_SIMPLEX, 1, pool_font_color, 2, cv2.LINE_AA)
else:
target = False
ptin_contour = False
upt = False
ptin_contour = False
circle_color = (0 , 255 , 0)
contour_color = (0 , 255 , 0)
pool_font_color = (255 , 255 , 0)
Use_Circle_Check = False
Aim_Length = 40
total_Mean_Check = False
hsv_Mean_limit = 60
showCircleArea = True
ShowMessage = True #içinde olup olmadığı (mesajın bir kere yazması için anahtar) (logging)
ShowMessageTarget = True #daireyi görüp görmediği (mesajın bir kere yazması için anahtar) (logging)
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640, 480))
# allow the camera to warmup
time.sleep(0.1)
#Dronekit
#Set ground speed
#vehicle.groundspeed = ground_speed
#print (" Ground speed is set to " + str(ground_speed) + " m/s")
#Set air speed
#vehicle.airspeed = air_speed
#print ("Air speed is set to " + str(air_speed) + " m/s")
#Set rtl altitude
#vehicle.parameters['RTL_ALT'] = rtl_altitude
#Set landing speed
#vehicle.parameters['LAND_SPEED'] = land_speed
user_approval = input("Please press type 'arm' to start mission or type 'cancel' to cancel mission: ")
while not (user_approval == "arm" or user_approval == "cancel"):
print("Invalid input, please type again...")
user_approval = input("Please press type 'arm' to start mission or type 'cancel' to cancel mission: ")
if user_approval == "arm":
# From Copter 3.3 you will be able to take off using a mission item. Plane must take off using a mission item (currently).
arm_and_takeoff(take_off_altitude)
set_velocity_body(vehicle, Velocity_x, Velocity_y, Velocity_z)
firstMessage = True #ilk frame alındı mesajını 1 kere yazdırmak için değişken (ilk kare gelince konsola print atar ve false olur böylelikle birdaha print yazmaz)
frame_counter = 0
#capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
image = frame.array
firstMessage
if firstMessage:
print("ilk frame alindi...")
firstMessage = False
frame_counter
frame_counter = frame_counter+1
if frame_counter >= 300:
print("Frame alındı (300 frame)")
frame_counter = 0
img_Center_X = int(320)
img_Center_Y = int(240)
#HSV'ye dönüştür
hsv = cv2.cvtColor(image,cv2.COLOR_BGR2HSV)
lower_red = np.array([150, 50, 50])
upper_red = np.array([200, 255, 255])
#Thresholding
red_hue_range = cv2.inRange(hsv, lower_red, upper_red)
#Threshold ile ayrılan resmi tekrar maskele
res = cv2.bitwise_and(image, image, mask=red_hue_range)
#3x3 blurla (Kullanılmıyor)
gray_blurred1 = cv2.blur(red_hue_range,(3,3))
#kırmızı ayıkladığın yuvarlağı gray yap
gray_blurred2 = cv2.cvtColor(res,cv2.COLOR_BGR2GRAY)
#3x3 blue uygula (üstteki gereksiz)
gray_blurred = cv2.blur(gray_blurred2,(10,10))
#kenarları bul - KULLANMADIN
#canny_edge = cv2.Canny(red_hue_range,50,240)
#--------------------------------------------------
#detected_circles = cv2.HoughCircles(image=gray_blurred,method=cv2.HOUGH_GRADIENT,dp=1,minDist=600,param1=50,param2=30,minRadius=50,maxRadius=150)
contours, hierarchy = cv2.findContours(red_hue_range, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# resmin hsv değerlerinin ortalamasını alma
hsv2 = cv2.cvtColor(res, cv2.COLOR_BGR2HSV)
# Disassemble for each channel
h, s, v = cv2.split(hsv2)
mean1 = h.mean()
mean2 = s.mean()
mean3 = v.mean()
#---print('H:% .1f S:% .1f V:% .1f' % (mean1, mean2, mean3))
# ek (toplam ortalama)
total_mean = (mean1 + mean2 + mean3) / 3
if total_mean <= hsv_Mean_limit:
total_Mean_Check = True
else:
total_Mean_Check = False
if Use_Circle_Check:
if total_mean <= hsv_Mean_limit:
total_Mean_Check = True
#draw circles that are detected--------------------------------------
if detected_circles is not None:
target = True
upt=True
#convert the circle parameters a, b and r to integers
detected_circles = np.uint16(np.around(detected_circles))
for pt in detected_circles[0, :]:
a, b, r = pt[0], pt[1], pt[2]
#Draw the circumference of the circle
cv2.circle(image, (a,b), r, circle_color,3)
#Draw a small circle to show th ecenter
cv2.circle(image, (a,b), 1, (0,0,255),3)
#Belirlenen daireleri çizme buraya kadar---------------------------- (Altta contour muhabbeti var)
cv2.putText(image,'Havuz', (a+10, b+10),cv2.FONT_HERSHEY_SIMPLEX, 1, pool_font_color, 2, cv2.LINE_AA)
else:
target = False
upt = False
findRedContours()
else:
findRedContours()
total_Mean_Check = False
else:
findRedContours()
v_alt = vehicle.location.global_relative_frame.alt
if target:
if ShowMessageTarget:
print("Daire tespit EDILDI")
ShowMessageTarget = False
if ptin_contour == True:
time.sleep(1)
if ShowMessage:
print("Daire'nin ICINDE")
vehicle.mode = VehicleMode("LAND")
ShowMessage = False
break
#v_alt = vehicle.location.global_relative_frame.alt
#Alçalmaya başla
if v_alt >= 2:
Velocity_z = 0.2
else:
Velocity_z = 0
if abs(cX-320) > land_sensivity:
if (cX-320) > 0:
Velocity_y = -0.2
else:
Velocity_y = 0.2
else:
Velocity_y = 0
if abs(cY-240) > land_sensivity:
if (cY-240) > 0:
Velocity_x = 0.2
else:
Velocity_x = -0.2
else:
Velocity_x = 0
#Daire'nin dışında ise
else:
if not ShowMessage:
print("Daire'nin DISINDA")
ShowMessage = True
Velocity_z = 0
if abs(cX-320) > land_sensivity:
if (cX-320) > 0:
Velocity_y = -1
else:
Velocity_y = 1
else:
Velocity_y = 0
if abs(cY-240) > land_sensivity:
if (cY-240) > 0:
Velocity_x = 1
else:
Velocity_x = -1
else:
Velocity_x = 0
#v_alt = vehicle.location.global_relative_frame.alt
#if v_alt <= 2:
# Velocity_z = 0
if v_alt <= 2:
Velocity_z = 0
#print("Arac 2m'ye indi zorunlu LAND atiyor...")
#vehicle.mode = VehicleMode("LAND")
#Araç 2m'ye inmiş ve daireyi ortalamış ise
if (v_alt - 0.2) <= 2 and abs(cY-240) <= land_sensivity and abs(cX-320) <= land_sensivity:
print('Vehicle landing...')
vehicle.mode = VehicleMode("LAND")
#Eğer daireyi görmüyorsa
else:
if not ShowMessageTarget:
print("Daire tespit EDILEMEDI")
ShowMessageTarget = True
#araç 2m'ye inmiş ve daireyi görmüyorsa
if v_alt <= 2 and not ptin_contour:
Velocity_z = 0
print("Arac 2m'ye indi zorunlu LAND atiyor...")
vehicle.mode = VehicleMode("LAND")
#Herhangi bir hız değeri değişti ise
if Velocity_x != Velx_d or Velocity_y != Vely_d or Velocity_z != Velz_d:
print("X hizi : %f - Y hizi : %f - Z hizi %f" % (Velocity_x,Velocity_y,Velocity_z))
print("cX : %d - cY : %d - abs x = %d - abs y = %d" % (cX,cY,abs(cX-320),abs(cY-240)))
Velx_d = Velocity_x
Vely_d = Velocity_y
Velz_d = Velocity_z
#Eğer herhangi bir hız değişti ise gönder
set_velocity_body(vehicle, Velocity_x, Velocity_y, Velocity_z)
#Dairenin içinde mi?
if upt:
distance = (((a-img_Center_X)**2)+((b-img_Center_Y)**2))**0.5
if abs(distance) < r:
circle_color = (0,255,0)
else:
circle_color = (255,0,0)
cv2.arrowedLine(image,(img_Center_X , img_Center_Y) , (a,b) , (255,255,0) , 2)
#hsv ortalamalarını yazdır------------------------------
image = cv2.putText(image,'H:% .1f S:% .1f V:% .1f Tot: % .1f' % (mean1, mean2, mean3,total_mean),(50,50),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,0,0),2,cv2.LINE_AA )
if showCircleArea:#Tamamlanmadı
cv2.putText(image, 'Kirmizi Alan : %d' % contour_area, (50, 100),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
#Kameranın pist tesğit edip etmediği
#if total_Mean_Check:
# if target:
# image = cv2.putText(image, 'Pist Tespit Edildi' , (50, 75),
# cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
# else:
# image = cv2.putText(image, 'Pist Tespit Edilemedi', (50, 75),
# cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
# Merkez noktası ekle (Drone Aim)
#cv2.rectangle(image, (img_Center_X - int(Aim_Length / 2), img_Center_Y - int(Aim_Length / 2)),
# (img_Center_X + int(Aim_Length / 2), img_Center_Y + int(Aim_Length / 2)), (0, 255, 255), 1)
# cv2.circle(image , ( img_Center_X , img_Center_Y ) , int(Aim_Length/2) , (50,255,255) , 2)
cv2.line(image , ( img_Center_X , img_Center_Y - int(Aim_Length/2) ) , ( img_Center_X , img_Center_Y + int(Aim_Length/2) ) , (50,255,255) , 2)
cv2.line(image , ( img_Center_X - int(Aim_Length/2) , img_Center_Y ) , ( img_Center_X + int(Aim_Length/2) , img_Center_Y ) , (50,255,255) , 2)
#cv2.namedWindow('DETECTED CIRCLE',cv2.WINDOW_NORMAL) #Window oluştur (Bu satır olmadan sadece imshow() ile de yapılabilir)
#cv2.resizeWindow('DETECTED CIRCLE',1280,920) #Windowun size'ını değiştir
#video.write(image) #Şuan video kaydetmek istemediğim için kullanmıyorum
#cv2.imshow('DETECTED CIRCLE',image)
#cv2.imshow('Gray',gray_blurred)
#cv2.imshow('MASK', res)
#cv2.imshow('CANNY',canny_edge)
#cv2.imshow('RedHue',red_hue_range)
#cv2.setMouseCallback('DETECTED CIRCLE', on_EVENT_LBUTTONDOWN)
#cv2.waitKey(0)
"""
key=cv2.waitKey(10)
if key & 0xFF == ord('q'):
break
if key & 0xFF == ord('p'):
while(True):
if cv2.waitKey(10) & 0xFF == ord('s'):
break
"""
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
#cv2.destroyAllWindows()
| [
"furkanhtk@gmail.com"
] | furkanhtk@gmail.com |
5f5a0e831a68c5ef684d354ca570acf953792cea | f75ec2c20c3208350d310038a2cd0a67253b44df | /src/swagger_codegen/api/response_deserializer.py | 5abc7163627b96210e3b445db6500c9493171408 | [] | no_license | vichooz/swagger_codegen | e53f59f3cd2c080157863698f932a606705db4e4 | 8238356075eea4218b2e6a645c7ea2b8826b1044 | refs/heads/master | 2022-08-03T04:32:49.291426 | 2020-05-27T06:09:28 | 2020-05-27T06:09:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 850 | py | import abc
from typing import Any
from typing import Optional
import pydantic
from swagger_codegen.api.types import ResponseType
class ResponseDeserializer(abc.ABC):
@abc.abstractmethod
def deserialize(self, deserialize_to: ResponseType, model_body):
pass
class DefaultResponseDeserializer(ResponseDeserializer):
def deserialize(self, deserialize_to: ResponseType, model_body) -> Optional[Any]:
if deserialize_to is None:
return None
if model_body is None:
return None
class Config(pydantic.BaseConfig):
arbitrary_types_allowed = True
pydantic_validator_model = pydantic.create_model(
"PydanticValidatorModel", __root__=(deserialize_to, ...), __config__=Config
)
return pydantic_validator_model(__root__=model_body).__root__
| [
"n10101010@gmail.com"
] | n10101010@gmail.com |
23abf7000a44f260fb7ca4f7eda590f62d2f544b | ab7a85f449049ccefe5456b60848802a58613e97 | /ex19.py | bb3b8c138ce890a033a4cca57a28641e77e666cd | [] | no_license | nomanoma/lpthw | ecdd665cb25a71b876de51edbf5235a0c9b24080 | 88f8fea7556263e2246fe77a8a77da63ecaa22ca | refs/heads/master | 2021-01-13T02:16:02.843745 | 2012-08-22T14:58:28 | 2012-08-22T14:58:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,420 | py | #-- coding: UTF-8 --
# сделаем функцию, принимающую на вход количство сырков и количество крэкеров
# и распечатывающую их и еще пару строк
def cheese_and_crackers(cheese_count, boxes_of_crackers):
print "You have %d cheeses!" % cheese_count
print "You have %d boxes of crackers!" % boxes_of_crackers
print "Man that's enogh for the party!"
print "Get a blanket.\n"
# вызовем функцию передав числа прямо
print "We can just give the function numbers directly:"
cheese_and_crackers(20, 30)
# вызовем функцию, передав параметры через переменные
print "OR, we can use variables from out script:"
amount_of_cheese = 10
amount_of_crackers = 50
cheese_and_crackers(amount_of_cheese, amount_of_crackers)
# вызовем функцию, используя математические операции при передаче параметров
print "We can even do the math inside too:"
cheese_and_crackers(10 + 20, 5 + 6)
# вызовем функцию, передав параметры с которыми при передаче совершаются математические операции
print "And we can combine the two, variables and math:"
cheese_and_crackers(amount_of_cheese + 100, amount_of_crackers + 1000)
| [
"noma"
] | noma |
89790569a98610f739df75c0eddd5032f7f32c20 | b0330aa21a18820c47d9a9d1742c563b36a932e6 | /mmrotate/datasets/dota.py | f893bdb9062ee093b4d44285e2ddd75704fca1cf | [
"Apache-2.0"
] | permissive | ZJW700/MUS-CDB | b34cd18b4a55ade1067e04834ac7365eb7c2a74d | 6066d2064cc654bbd8690ab1fbf5fd4bdee249c1 | refs/heads/master | 2023-05-23T21:41:07.679963 | 2023-05-17T07:13:55 | 2023-05-17T07:13:55 | 573,043,634 | 9 | 2 | null | null | null | null | UTF-8 | Python | false | false | 14,865 | py | # Copyright (c) OpenMMLab. All rights reserved.
import glob
import os
import os.path as osp
import re
import tempfile
import time
import zipfile
from collections import defaultdict
from functools import partial
import mmcv
import numpy as np
import torch
from mmcv.ops import nms_rotated
from mmdet.datasets.custom import CustomDataset
from mmrotate.core import eval_rbbox_map, obb2poly_np, poly2obb_np
from .builder import ROTATED_DATASETS
@ROTATED_DATASETS.register_module()
class DOTADataset(CustomDataset):
"""DOTA dataset for detection.
Args:
ann_file (str): Annotation file path.
pipeline (list[dict]): Processing pipeline.
version (str, optional): Angle representations. Defaults to 'oc'.
difficulty (bool, optional): The difficulty threshold of GT.
"""
CLASSES = ('plane', 'baseball-diamond', 'bridge', 'ground-track-field',
'small-vehicle', 'large-vehicle', 'ship', 'tennis-court',
'basketball-court', 'storage-tank', 'soccer-ball-field',
'roundabout', 'harbor', 'swimming-pool', 'helicopter')
PALETTE = [(165, 42, 42), (189, 183, 107), (0, 255, 0), (255, 0, 0),
(138, 43, 226), (255, 128, 0), (255, 0, 255), (0, 255, 255),
(255, 193, 193), (0, 51, 153), (255, 250, 205), (0, 139, 139),
(255, 255, 0), (147, 116, 116), (0, 0, 255)]
def __init__(self,
X_L_file,
label_type,
load_type,
ann_file,
pipeline,
version='oc',
difficulty=100,
**kwargs):
self.version = version
self.difficulty = difficulty
self.X_L_file = X_L_file
self.label_type = label_type
self.load_type = load_type
super(DOTADataset, self).__init__(X_L_file, label_type, load_type, ann_file, pipeline, **kwargs)
def __len__(self):
"""Total number of samples of data."""
return len(self.data_infos)
def load_annotations(self, ann_folder, X_L_file, load_type):
"""
Args:
ann_folder: folder that contains DOTA v1 annotations txt files
"""
cls_map = {c: i
for i, c in enumerate(self.CLASSES)
} # in mmdet v2.0 label is 0-based
ann_files_ori = glob.glob(ann_folder + '*.txt')
if not ann_files_ori:
ann_files = []
else:
# train phase:normal
img_ids = mmcv.list_from_file(self.X_L_file)
ann_files = []
for i in img_ids:
ann_files.append(ann_folder + i + ".txt")
data_infos = []
if not ann_files: # test phase
if load_type == 'select':
img_ids = mmcv.list_from_file(self.X_L_file)
ann_files = []
for i in img_ids:
ann_files.append(ann_folder + i + ".txt")
else:
ann_files = glob.glob(ann_folder + '/*.png')
# ann_files = sorted(glob.glob(ann_folder + '/*.png'))
for ann_file in ann_files:
data_info = {}
img_id = osp.split(ann_file)[1][:-4]
img_name = img_id + '.png'
data_info['filename'] = img_name
data_info['ann'] = {}
data_info['ann']['bboxes'] = []
data_info['ann']['labels'] = []
data_infos.append(data_info)
else:
for ann_file in ann_files:
data_info = {}
img_id = osp.split(ann_file)[1][:-4]
img_name = img_id + '.png'
data_info['filename'] = img_name
data_info['ann'] = {}
gt_bboxes = []
gt_labels = []
gt_polygons = []
gt_bboxes_ignore = []
gt_labels_ignore = []
gt_polygons_ignore = []
if os.path.getsize(ann_file) == 0:
continue
with open(ann_file) as f:
s = f.readlines()
for si in s:
bbox_info = si.split()
poly = np.array(bbox_info[:8], dtype=np.float32)
try:
x, y, w, h, a = poly2obb_np(poly, self.version)
except: # noqa: E722
continue
cls_name = bbox_info[8]
if cls_name == "background":
continue
difficulty = int(bbox_info[9])
label = cls_map[cls_name]
if difficulty > self.difficulty:
pass
else:
gt_bboxes.append([x, y, w, h, a])
gt_labels.append(label)
gt_polygons.append(poly)
if gt_bboxes:
data_info['ann']['bboxes'] = np.array(
gt_bboxes, dtype=np.float32)
data_info['ann']['labels'] = np.array(
gt_labels, dtype=np.int64)
data_info['ann']['polygons'] = np.array(
gt_polygons, dtype=np.float32)
else:
data_info['ann']['bboxes'] = np.zeros((0, 5),
dtype=np.float32)
data_info['ann']['labels'] = np.array([], dtype=np.int64)
data_info['ann']['polygons'] = np.zeros((0, 8),
dtype=np.float32)
if gt_polygons_ignore:
data_info['ann']['bboxes_ignore'] = np.array(
gt_bboxes_ignore, dtype=np.float32)
data_info['ann']['labels_ignore'] = np.array(
gt_labels_ignore, dtype=np.int64)
data_info['ann']['polygons_ignore'] = np.array(
gt_polygons_ignore, dtype=np.float32)
else:
data_info['ann']['bboxes_ignore'] = np.zeros(
(0, 5), dtype=np.float32)
data_info['ann']['labels_ignore'] = np.array(
[], dtype=np.int64)
data_info['ann']['polygons_ignore'] = np.zeros(
(0, 8), dtype=np.float32)
data_infos.append(data_info)
self.img_ids = [*map(lambda x: x['filename'][:-4], data_infos)]
return data_infos
def _filter_imgs(self):
"""Filter images without ground truths."""
valid_inds = []
for i, data_info in enumerate(self.data_infos):
if data_info['ann']['labels'].size > 0:
valid_inds.append(i)
return valid_inds
def _set_group_flag(self):
"""Set flag according to image aspect ratio.
All set to 0.
"""
self.flag = np.zeros(len(self), dtype=np.uint8)
def evaluate(self,
results,
metric='mAP',
logger=None,
proposal_nums=(100, 300, 1000),
iou_thr=0.5,
scale_ranges=None,
nproc=4):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thr (float | list[float]): IoU threshold. It must be a float
when evaluating mAP, and can be a list when evaluating recall.
Default: 0.5.
scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP.
Default: None.
nproc (int): Processes used for computing TP and FP.
Default: 4.
"""
nproc = min(nproc, os.cpu_count())
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ['mAP']
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = {}
if metric == 'mAP':
assert isinstance(iou_thr, float)
mean_ap, _ = eval_rbbox_map(
results,
annotations,
scale_ranges=scale_ranges,
iou_thr=iou_thr,
dataset=self.CLASSES,
logger=logger,
nproc=nproc)
eval_results['mAP'] = mean_ap
else:
raise NotImplementedError
return eval_results
def merge_det(self, results, nproc=4):
"""Merging patch bboxes into full image.
Args:
results (list): Testing results of the dataset.
nproc (int): number of process. Default: 4.
"""
collector = defaultdict(list)
for idx in range(len(self)):
result = results[idx]
img_id = self.img_ids[idx]
splitname = img_id.split('__')
oriname = splitname[0]
pattern1 = re.compile(r'__\d+___\d+')
x_y = re.findall(pattern1, img_id)
x_y_2 = re.findall(r'\d+', x_y[0])
x, y = int(x_y_2[0]), int(x_y_2[1])
new_result = []
for i, dets in enumerate(result):
bboxes, scores = dets[:, :-1], dets[:, [-1]]
ori_bboxes = bboxes.copy()
ori_bboxes[..., :2] = ori_bboxes[..., :2] + np.array(
[x, y], dtype=np.float32)
labels = np.zeros((bboxes.shape[0], 1)) + i
new_result.append(
np.concatenate([labels, ori_bboxes, scores], axis=1))
new_result = np.concatenate(new_result, axis=0)
collector[oriname].append(new_result)
merge_func = partial(_merge_func, CLASSES=self.CLASSES, iou_thr=0.1)
if nproc <= 1:
print('Single processing')
merged_results = mmcv.track_iter_progress(
(map(merge_func, collector.items()), len(collector)))
else:
print('Multiple processing')
merged_results = mmcv.track_parallel_progress(
merge_func, list(collector.items()), nproc)
return zip(*merged_results)
def _results2submission(self, id_list, dets_list, out_folder=None):
"""Generate the submission of full images.
Args:
id_list (list): Id of images.
dets_list (list): Detection results of per class.
out_folder (str, optional): Folder of submission.
"""
if osp.exists(out_folder):
raise ValueError(f'The out_folder should be a non-exist path, '
f'but {out_folder} is existing')
os.makedirs(out_folder)
files = [
osp.join(out_folder, 'Task1_' + cls + '.txt')
for cls in self.CLASSES
]
file_objs = [open(f, 'w') for f in files]
for img_id, dets_per_cls in zip(id_list, dets_list):
for f, dets in zip(file_objs, dets_per_cls):
if dets.size == 0:
continue
bboxes = obb2poly_np(dets, self.version)
for bbox in bboxes:
txt_element = [img_id, str(bbox[-1])
] + [f'{p:.2f}' for p in bbox[:-1]]
f.writelines(' '.join(txt_element) + '\n')
for f in file_objs:
f.close()
target_name = osp.split(out_folder)[-1]
with zipfile.ZipFile(
osp.join(out_folder, target_name + '.zip'), 'w',
zipfile.ZIP_DEFLATED) as t:
for f in files:
t.write(f, osp.split(f)[-1])
return files
def format_results(self, results, submission_dir=None, nproc=4, **kwargs):
"""Format the results to submission text (standard format for DOTA
evaluation).
Args:
results (list): Testing results of the dataset.
submission_dir (str, optional): The folder that contains submission
files. If not specified, a temp folder will be created.
Default: None.
nproc (int, optional): number of process.
Returns:
tuple:
- result_files (dict): a dict containing the json filepaths
- tmp_dir (str): the temporal directory created for saving \
json files when submission_dir is not specified.
"""
nproc = min(nproc, os.cpu_count())
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
f'The length of results is not equal to '
f'the dataset len: {len(results)} != {len(self)}')
if submission_dir is None:
submission_dir = tempfile.TemporaryDirectory()
else:
tmp_dir = None
print('\nMerging patch bboxes into full image!!!')
start_time = time.time()
id_list, dets_list = self.merge_det(results, nproc)
stop_time = time.time()
print(f'Used time: {(stop_time - start_time):.1f} s')
result_files = self._results2submission(id_list, dets_list,
submission_dir)
return result_files, tmp_dir
def _merge_func(info, CLASSES, iou_thr):
"""Merging patch bboxes into full image.
Args:
CLASSES (list): Label category.
iou_thr (float): Threshold of IoU.
"""
img_id, label_dets = info
label_dets = np.concatenate(label_dets, axis=0)
labels, dets = label_dets[:, 0], label_dets[:, 1:]
big_img_results = []
for i in range(len(CLASSES)):
if len(dets[labels == i]) == 0:
big_img_results.append(dets[labels == i])
else:
try:
cls_dets = torch.from_numpy(dets[labels == i]).cuda()
except: # noqa: E722
cls_dets = torch.from_numpy(dets[labels == i])
nms_dets, keep_inds = nms_rotated(cls_dets[:, :5], cls_dets[:, -1],
iou_thr)
big_img_results.append(nms_dets.cpu().numpy())
return img_id, big_img_results
| [
"754136925@qq.com"
] | 754136925@qq.com |
663fd7bfc9ed17d43ea490e922bb177e6409a02a | 048354b7a25ea2bd5ad519101f945fb4a1cd0618 | /Classtagram/backend/classtagram/view/user.py | a4df11a405cdd1815d3ebb7a7974023109f4ea71 | [] | no_license | sjho/classtagram | bcb3f904edd8e834a4a22ead818fb807479b6a18 | 0a22a99017d9778d9fac3d6581c04ec520a9ce42 | refs/heads/master | 2023-05-11T07:23:44.068422 | 2019-06-20T00:17:34 | 2019-06-20T00:17:34 | 180,547,136 | 4 | 3 | null | 2023-04-30T21:01:20 | 2019-04-10T09:21:52 | JavaScript | UTF-8 | Python | false | false | 3,633 | py | from django.shortcuts import render
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.response import Response
from classtagram.models import User, Course, Photo, Tag
from classtagram.serializers import UserSerializer, RegisterSerializer
from django.views.decorators.csrf import csrf_exempt
from rest_framework.views import APIView
from django.http import JsonResponse, HttpResponse
from django.contrib.auth import login
from django.http import Http404
import json
#from django.contrib.auth.models import User
#from rest_auth.registration.views import RegisterView
# 회원가입 뷰
class Register(APIView):
queryset = User.objects.all()
serializer_class = RegisterSerializer
def get(self, request, format=None):
users = User.objects.all()
serializer = RegisterSerializer(users, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = RegisterSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return JsonResponse({'success':True, 'message':'register successed!'})
else:
return JsonResponse({'success':False, 'message':'error'})
#def perform_create(self, serializer):
# serializer.save()
# 강의 수정/삭제 뷰
class UserDetail(APIView):
queryset = User.objects.all()
serializer_class = UserSerializer
def get_object(self, pk):
try:
obj = User.objects.get(pk=pk)
self.check_object_permissions(self.request, obj)
return obj
except User.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
User = self.get_object(pk)
serializer = UserSerializer(User)
return Response(serializer.data)
def put(self, request, pk, format=None):
User = self.get_object(pk)
serializer = UserSerializer(User, data=request.data)
if serializer.is_valid():
serializer.save(user=self.request.user)
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
User = self.get_object(pk)
User.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
# 수업별 유저 get
class UserCourseList(APIView):
queryset = User.objects.all()
serializer_class = UserSerializer
def get_object(self, pk):
try:
course = Course.objects.get(pk=pk)
objects = course.users.all()
return objects
except User.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
users = self.get_object(pk)
serializer = UserSerializer(users, many=True)
for uobj in serializer.data :
count = Tag.objects.filter(course=pk, user=uobj['id']).count()
wholecount = Photo.objects.filter(course=pk).count()
uobj.update({'wholecount':wholecount})
uobj.update({'count':count})
return Response(serializer.data)
# 로그인 뷰
@csrf_exempt
def Login(request):
body = json.loads(request.body)
username = body['username']
pwd = body['password']
try:
user = User.objects.get(username=username)
if not user.check_password(pwd):
return JsonResponse({'success':False, 'message':'error'})
login(request, user)
token, _ = Token.objects.get_or_create(user=user)
return JsonResponse({'success':True,'token': token.key, 'user': user.id, 'message':'login successed!'})
except Exception as e:
return JsonResponse({'success':False, 'message':'error'}) | [
"sjh2177@gmail.com"
] | sjh2177@gmail.com |
b69c7064129a5c117f22cd1587f8aee3325f0e34 | df4fe22c9b1c56d70125694cb377f44855d0d764 | /wordlistmaker.py | 8e2692b1ddbe717baccf2a894dc666c80395869f | [] | no_license | MrSw7G3R/wordlistmaker | 004c67fdf8f1f591fae574c42594e560707d4d63 | 891c6eeebe29ea2256fd489e47578d206ae7dd81 | refs/heads/master | 2020-09-28T05:55:35.495143 | 2016-08-28T14:42:57 | 2016-08-28T14:42:57 | 66,772,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,317 | py | #-*- coding: cp857 -*-
import time,os,sys,random,re,traceback
os.system("title MrSw7g3r Wordlist Maker")
reload(sys)
sys.setdefaultencoding('cp857')
MrSw7g3r = True
from itertools import permutations
wordlist = []
values = []
bilgiler = {}
renkler = ['\033[95m','\033[94m','\033[93m','\033[91m','\033[1m']
def retsekil(metin="Empty"):
return random.choice(renkler) + "["+time.strftime("%H:%M:%S")+ "]" + metin.decode('utf-8').encode(sys.stdout.encoding) + '\033[0m'
def sekilyaz(metin):
print random.choice(renkler) + "["+time.strftime("%H:%M:%S")+ "]" + metin.decode('utf-8').encode(sys.stdout.encoding) + '\033[0m'
def ekle(ad,bilgi):
if bilgi == "":
sekilyaz("Empty.")
else:
if len(bilgi.split(" ")) > 1:
tmp = len(bilgi.split(" "))
for number in range(0,tmp):
bilgiler[ad + str(number)] = bilgi.split(" ")[number]
else:
bilgiler[ad.strip()] = bilgi
info = """
-------------------------------------------------------------------
MrSw7g3r Wordlist Maker
Github: https://github.com/MrSw7G3R
Inspired from Elliot,
This tool takes all the information from user
and uses permutations to spread all the words so
you don't miss any passwords.
If you want to add another word you can use the last part.
The syntax for last part is
Nameofinformation:information
Lastly, I'm not native speaker. So forgive me
for my bad english :p
-------------------------------------------------------------------
"""
def premadewordlist():
sekilyaz("Writing common passwords")
donald = bilgiler["name"]
wordlist.append("abcd1234")
wordlist.append("password")
wordlist.append("password123")
wordlist.append("123password")
wordlist.append("1234abcd")
wordlist.append("abcd1234")
wordlist.append("q1w2e3r4")
wordlist.append("696969")
wordlist.append("123456")
wordlist.append("password")
wordlist.append("sifre123")
wordlist.append("1234567890")
wordlist.append("letmein")
wordlist.append("shadow")
wordlist.append("trustno1")
wordlist.append("fuckyou123")
wordlist.append(donald + "123")
wordlist.append(donald + "1234")
wordlist.append(donald + "12345")
wordlist.append(donald + "123456")
wordlist.append(donald + "1234567")
wordlist.append(donald + "12345678")
wordlist.append(donald + bilgiler["year"])
wordlist.append(donald + "31")
wordlist.append(donald + "fb1907")
wordlist.append(donald + "fener1907")
wordlist.append(donald + "fenerbahce1907")
wordlist.append(donald + "gs1905")
wordlist.append(donald + "galatasaray1905")
wordlist.append(donald + "bjk1903")
wordlist.append(donald + "besiktas1903")
wordlist.append(donald + "741852963")
wordlist.append(donald + "000")
wordlist.append("xxx" + donald + "xxx")
wordlist.append(donald + "741852963")
reverseadd(donald , "123")
reverseadd(donald , "1234")
reverseadd(donald , "12345")
reverseadd(donald , "123456")
reverseadd(donald , "1234567")
reverseadd(donald , "12345678")
reverseadd(donald , bilgiler["year"])
reverseadd(donald , "31")
reverseadd(donald , "fb1907")
reverseadd(donald , "fener1907")
reverseadd(donald , "fenerbahce1907")
reverseadd(donald , "gs1905")
reverseadd(donald , "galatasaray1905")
reverseadd(donald , "bjk1903")
reverseadd(donald , "besiktas1903")
reverseadd(donald , "741852963")
reverseadd(donald , "000")
reverseadd(donald , "741852963")
wordlist.append(donald + bilgiler["telno"][7:11])
wordlist.append(donald + bilgiler["telno"][5:11])
wordlist.append(donald + bilgiler["telno"][:4])
wordlist.append(donald + bilgiler["telno"][:])
reverseadd(donald , bilgiler["telno"][7:11])
reverseadd(donald , bilgiler["telno"][5:11])
reverseadd(donald , bilgiler["telno"][:4])
reverseadd(donald , bilgiler["telno"][:])
def reverseadd(item1,item2):
wordlist.append(item2+item1)
def mix():
sekilyaz("Writing the permutations...")
values = list(bilgiler.values())
for number in range(1,len(values) + 1):
if number > 3:
break
sekilyaz("Now making %s words permutation." % number)
for tuples in permutations(values,number):
za = ''.join(tuples)
wordlist.append(za)
#sekilyaz(za)
def main():
sekilyaz(info)
name = raw_input(retsekil("Name: "))
ekle("name",name)
secname = raw_input(retsekil("Second Name: "))
ekle("secname",secname)
lastname = raw_input(retsekil("Soyadı: "))
ekle("lastname",lastname)
dogum = raw_input(retsekil("Birthday(xx.x.xxxx): ")).split('.')
ekle("day",dogum[0])
ekle("month",dogum[1])
ekle("year",dogum[2])
momname = raw_input(retsekil("Mom's name: "))
ekle("momname",momname)
fathername = raw_input(retsekil("Father's name: "))
ekle("fathername",fathername)
animalname = raw_input(retsekil("Animal's name: "))
ekle("animalname",animalname)
soccerteam = raw_input(retsekil("Soccer Team: "))
ekle("soccerteam",soccerteam)
basketteam = raw_input(retsekil("Basketball Team: "))
ekle("basketteam",basketteam)
singer = raw_input(retsekil("Favorite Singer: "))
ekle("singer",singer)
oyuncu = raw_input(retsekil("Favorite Holywood Famous: "))
ekle("famous",oyuncu)
basketci= raw_input(retsekil("Favorite Basketball Player: "))
ekle("basketplayer",basketci)
food = raw_input(retsekil("Favorite Food: "))
ekle("food",food)
hayvanturu = raw_input(retsekil("Favorite animal type: "))
ekle("animaltype",hayvanturu)
kari = raw_input(retsekil("Wife Name: "))
ekle("wifename",kari)
cocukad = raw_input(retsekil("Child's name: "))
ekle("childname",cocukad)
telno = raw_input(retsekil("Tel no: "))
ekle("telno",telno)
while MrSw7g3r:
sekilyaz("If you want to add something different, use me! Else, type x.")
baska = raw_input("["+time.strftime("%H:%M:%S")+ "]")
if baska.lower() == "x":
break
try:
ekle(baska.split(":")[0],baska.split(":")[1])
sekilyaz("Added, %s to %s" % (baska.split()[0],baska.split()[1]))
except:
continue
with open("information.txt","a") as f:
sekilyaz("Saving information for future uses...")
f.writelines("-"*50)
f.writelines("\n")
for key,value in bilgiler.items():
f.writelines("%s:%s" % (key,value))
f.writelines("\n")
f.writelines("-"*50)
f.writelines("\n")
sekilyaz("Wordlist maker started...")
premadewordlist()
mix()
with open("%swordlist.txt" % bilgiler["name"],"a") as f:
f.writelines("-"*50)
f.writelines("\n")
for i in wordlist:
f.writelines(i)
f.writelines("\n")
f.writelines("-"*50)
f.writelines("\n")
if __name__ == '__main__':
try:
main()
except Exception,e:
print "\n"
print traceback.print_exc()
print bilgiler
| [
"noreply@github.com"
] | noreply@github.com |
10beb2eace943fe6797f09a9b32580897e70e79a | e2ded65dacdbb251dba1d6d15193d1722f960ee4 | /oldmacdonald/macdonald.py | f6b7e67eef2e820358cf1026dd1952775fc7d599 | [] | no_license | ewilson/codeclubstarters | 87bfe3d861be7288bc1fc85d60d6e6c545f7a898 | 8818889d9207129c0c544a6c1206fbc347d57405 | refs/heads/master | 2016-09-08T20:51:16.477784 | 2014-10-28T21:05:57 | 2014-10-28T21:05:57 | 25,890,120 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 948 | py | def had_a_farm():
return 'Old MacDonald had a farm. E-I-E-I-O!'
def and_on_that_farm(animal):
article = choose_article(animal)
return 'And on that farm he had {0} {1}. E-I-E-I-O!'.format(article, animal)
def with_a(sound):
article = choose_article(sound)
with_a_ = 'With {0} {1}, {1} here, and {0} {1}, {1} there. '.format(article, sound)
here_a_ = 'Here {0} {1}, there {0} {1}, everywhere {0} {1}, {1}.'.format(article, sound)
return with_a_ + here_a_
def choose_article(sound):
return 'an' if sound[0].lower() in 'aeiou' else 'a'
def build_verse(animal, sound):
lines = [had_a_farm(), and_on_that_farm(animal), with_a(sound), had_a_farm()]
return "\n".join(lines) + "\n"
animals = {'pig': 'oink',
'cow': 'moo',
'duck': 'quack',
'sheep': 'baa',
'alpaca':'humng',
'dog': 'bark'}
for name in animals:
print build_verse(name, animals[name])
| [
"wilson.eric.n@gmail.com"
] | wilson.eric.n@gmail.com |
4a67d8771aca07434a51aa7be4bb84f2c069a433 | 34745a8d54fa7e3d9e4237415eb52e507508ad79 | /Python Fundamentals/Final exam/02_emoji_detector.py | 2561bef627873327e10a68ef7de9312ae81415d8 | [] | no_license | DilyanTsenkov/SoftUni-Software-Engineering | 50476af0dc88b267d72c56fa87eeb88d841164b2 | fe446e3a50a00bb2e48d71ab8f783e0a4a406094 | refs/heads/main | 2023-08-12T18:18:42.144210 | 2021-09-25T11:10:38 | 2021-09-25T11:10:38 | 317,235,419 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | import re
def threshold(input_string):
c_threshold = 1
digit_regex = r"[0-9]"
digits = re.findall(digit_regex, input_string)
for digit in digits:
c_threshold *= int(digit)
return c_threshold
def emoji_checker(input_string, cool):
all_of_emojis = []
cool_of_emojis = []
emoji_regex = r"(?P<symbols>\:\:|\*\*)(?P<emoji>[A-Z][a-z][a-z]+)(?P=symbols)"
emojis = re.finditer(emoji_regex, input_string)
for data in emojis:
coolness = 0
d = data.groupdict()
for char in d["emoji"]:
coolness += ord(char)
emoji_found = d["symbols"] + d["emoji"] + d["symbols"]
all_of_emojis.append(emoji_found)
if coolness > cool:
cool_of_emojis.append(emoji_found)
return all_of_emojis, cool_of_emojis
string = input()
cool_threshold = threshold(string)
all_emojis, cool_emojis = emoji_checker(string, cool_threshold)
print(f"Cool threshold: {cool_threshold}")
print(f"{len(all_emojis)} emojis found in the text. The cool ones are:")
cool_emojis = [print(_, end="\n") for _ in cool_emojis]
| [
"noreply@github.com"
] | noreply@github.com |
53eb76cc3091e5e09ec6e1351a58da31dda2f052 | a2108e1d60534c9d99e7826084a42df19395bdfc | /python/18_aleatorios.py | c416097887cf7a58f9da16461cf4f58910e12bde | [] | no_license | Fran8/CursoLeonEoiPythonDjango | 3e1a738d417667ca97b697da0e65e47a9154accd | 0770a6d9168f62fb15aa4376c9b70988c991f9ae | refs/heads/master | 2021-08-31T04:43:33.922807 | 2017-12-20T11:54:28 | 2017-12-20T11:54:28 | 114,881,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | """
generando numeros aleatorios
"""
import random
for n in range(10):
print('entero aleatorio:', random.randint(10,20))
# numeros aleatorios entre 0 y 1
for n in range(4):
print(random.random())
L = ['oscar', 'lucia', 'jaime', 'pepe', 'cris', 'yolanda', 'taimi']
# elemento aleatorio de una lista
for n in range(8):
print(random.choice(L))
# elementos aleatorios de una lista
r = random.choices(L, k=2) # k es el numero de elementos que queremos
print(n)
# cambiar orden de elementos de una lista, de forma aleatoria
random.shuffle(L)
print(r)
random.shuffle(L)
print(r)
# a partir de una lista, crear otra con 'k' elementos que no estén repetidos
print(random.sample(L, k=2))
print(random.sample(L, k=2))
print(random.sample(L, k=2))
print(random.sample(L, k=2))
| [
"francisco.yaguez@gmail.com"
] | francisco.yaguez@gmail.com |
9f26e018ae25328256dfa983aa642dd60728fd96 | e80efd2e6cad1886843ac3803c23695f598d840e | /Framework/pages/adminPortalPage.py | e94e915666f2490ca130ac42f1f1f3bd6986d77b | [] | no_license | MYGCREATIONS/mhGuideProjectLocal | a64d028c3fd7f5fcbd47ed2e90ec42c76859d65c | 3a875a57a36aac7b16fd311185602146cb1477f9 | refs/heads/master | 2020-04-06T09:55:22.231407 | 2018-11-13T10:26:18 | 2018-11-13T10:26:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,180 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 20 08:30:21 2018
@author: mdcayoglu
"""
from Framework.common.locators import AdminportalLocators
from features.terrain import *
class Adminportal:
def __init__(self):
self.aL = AdminportalLocators()
def verifyAddLabMenu(self):
explicitWaitVisibility(self.aL.verifierAddLabMenu)
def verifyLabEditorTab(self):
explicitWaitVisibility(self.aL.verifierLabEditorTab)
def verifyLabtestEditorTab(self):
explicitWaitVisibility(self.aL.verifierLabtestEditorTab)
def verifyOrgUnitEditorTab(self):
explicitWaitVisibility(self.aL.verifierOrgUnitEditorTab)
def verifyUserEditorTab(self):
explicitWaitVisibility(self.aL.verifierUserEditorTab)
def goToLabEditor(self):
x = explicitWaitClickable(self.aL.labEditor)
x.click()
def goToLabtestEditor(self):
x = explicitWaitClickable(self.aL.labtestEditor)
x.click()
def goToOrgUnitEditor(self):
x = explicitWaitClickable(self.aL.orgunitEditor)
x.click()
def goToUserEditor(self):
x = explicitWaitClickable(self.aL.userEditor)
x.click()
| [
"yousuf.gohar@molecularhealth.com"
] | yousuf.gohar@molecularhealth.com |
fe659f430372740598e895d452a82be9ad1bb7a4 | 505b43646dda8edb5e5e3be4b9ab05c53d8c5c9d | /scrapper/spiders/tuoitre.py | a02a0807321e6422b16690253dd1afc4e1b459f4 | [] | no_license | trungtrinh44/scrapper | e415a10bf493778339dc5a7c77faedfe1b26faf6 | 2a4ee968c425da98477a4d1f485f354c13c45c62 | refs/heads/master | 2022-01-08T01:06:20.630201 | 2019-05-09T09:21:46 | 2019-05-09T09:21:46 | 104,972,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,573 | py | import unicodedata
import scrapy
class TuoiTreSpider(scrapy.Spider):
name = "tuoitre"
path = {'Thoi su/Thoi su': 3, 'Thoi su/Xa hoi': 200003, 'Thoi su/Phong su': 89, 'Thoi su/Nghi': 87,
'The gioi/The gioi': 2, 'The gioi/Binh luan': 94, 'The gioi/Kieu bao': 312, 'The gioi/Muon mau': 442, 'The gioi/Ho so': 20,
'Phap luat/Phap luat': 6, 'Phap luat/Chuyen phap dinh': 266, 'Phap luat/Tu van': 79, 'Phap luat/Phap ly': 200005,
'Kinh doanh/Kinh doanh': 11, 'Kinh doanh/Tai chinh': 86, 'Kinh doanh/Doanh nghiep': 775, 'Kinh doanh/Mua sam': 200006, 'Kinh doanh/Dau tu': 200007,
'Xe/Xe': 659,
'Nhip song tre/Nhip song tre': 7, 'Nhip song tre/Xu huong': 200012, 'Nhip song tre/Kham pha': 200013, 'Nhip song tre/Yeu': 194, 'Nhip song tre/Nhan vat': 200014, 'Nhip song tre/Viec lam': 269,
'Van hoa/Van hoa': 200017, 'Van hoa/Doi song': 200018, 'Van hoa/Van hoa doc sach': 61,
'Giai tri/Giai tri': 10, 'Giai tri/Am nhac': 58, 'Giai tri/Dien anh': 57, 'Giai tri/TV show': 385, 'Giai tri/Thoi trang': 919, 'Giai tri/Hau truong': 922,
'Giao duc/Giao duc': 13, 'Giao duc/Hoc duong': 1507, 'Giao duc/Du hoc': 85, 'Giao duc/Cau chuyen giao duc': 913, 'Giao duc/Goc hoc tap': 200020,
'Khoa hoc/Khoa hoc': 661, 'Khoa hoc/Thuong thuc': 200010, 'Khoa hoc/Phat minh': 200011,
'Ban doc lam bao/Ban doc lam bao': 118, 'Ban doc lam bao/Duong day nong': 937, 'Ban doc lam bao/Tieu diem': 1360, 'Ban doc lam bao/Chia se': 940}
root_path = 'http://tuoitre.vn'
url = root_path + "/timeline/%d/trang-%d.htm"
def __init__(self):
self.count = {x: 1 for x in TuoiTreSpider.path}
def start_requests(self):
for x in TuoiTreSpider.path:
yield scrapy.http.Request(url=TuoiTreSpider.url % (TuoiTreSpider.path[x], self.count[x]), callback=self.parse_with_type(x))
def parse_with_type(self, _type):
def parse(response):
paths = response.selector.xpath('//a/@href').extract()
if paths:
for path in paths:
yield scrapy.http.Request(url=TuoiTreSpider.root_path + path, callback=self.parse_article(_type))
self.count[_type] += 1
yield scrapy.http.Request(url=TuoiTreSpider.url % (TuoiTreSpider.path[_type], self.count[_type]), callback=parse)
return parse
def parse_article(self, _type):
def parse(response):
left_side = response.css("section#content div.content div#main-detail")
title = left_side.css("div.w980 h1.article-title").xpath('.//text()').extract_first()
title = title if title is None else title.strip()
date = left_side.css("div.w980 div.date-time").xpath('.//text()').extract_first().strip()
left_side = left_side.css("section.detail-w div#mainContentDetail div.column-first-second div.main-content-body")
summary = left_side.css("h2.sapo").xpath('.//text()').extract_first()
if summary.startswith("TTO -"):
summary = summary[5:]
summary = summary.strip()
content = '\n'.join(' '.join(unicodedata.normalize('NFKC', y).strip() for y in x.xpath('.//text()').extract()) for x in left_side.css("div#main-detail-body > p"))
# tags = left_side.xpath('//ul[@class="block-key"]/li/a/text()').extract()
return {'_id': response.url, 'date': date, 'title': title, 'summary': summary, 'content': content, 'type': _type} # , 'tags': tags}
return parse
| [
"trinhtrung96@gmail.com"
] | trinhtrung96@gmail.com |
f8cb4f1d453c8ae3d2bee953aa3ade01ac2a9141 | 42f28a3e77c8e252bbc041ce3ecad25e67b85ba8 | /python/practice_python/exer12.py | 52ee0c1519d3f1332e268be2644da5480176e186 | [] | no_license | linhnvfpt/homework | f7eae10670df0adc4038b2856be8215d89695010 | 31556dad588b77cafed577671cb56b46cc063406 | refs/heads/master | 2023-01-12T01:20:22.738126 | 2020-06-28T16:28:15 | 2020-06-28T16:28:15 | 60,790,466 | 0 | 0 | null | 2023-01-06T06:21:59 | 2016-06-09T16:41:20 | Python | UTF-8 | Python | false | false | 529 | py | # Write a program that takes a list of numbers ( for example, a = [5, 10, 15, 20, 25])
# and makes a new list of only the first and last elements of the given list.
# For practice, write this code inside a function.
# Concepts to practice
# Lists and properties of lists
# List comprehensions (maybe)
# Functions
def getInput(aList):
lst = []
lst.append(aList[0])
lst.append(aList[len(aList)-1])
return lst
aList = [5, 10, 15, 20, 25]
bList = getInput(aList)
print(bList)
| [
"linh_nv@ehicas.com.vn"
] | linh_nv@ehicas.com.vn |
fc74cec53971ab0d9ae559bb2a7635cfa18c76d3 | 34e4a711e3edeafc398f6aa7700ddd169e571d44 | /mysite/settings.py | 7032beb6aa40b961a079d6bc2961f2bf99e32a77 | [] | no_license | anaisperez/my-first-blog | da4094db3cdd2475a808bacd29cdad2c95c53f55 | d7bf3d53cc0a952eeba30a03259143217727a9ff | refs/heads/master | 2022-04-27T00:10:09.936859 | 2020-04-27T15:34:53 | 2020-04-27T15:34:53 | 259,375,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,208 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.12.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ygyp2hy!*ured0y!(7+a82%#u3^4rz!=x1&a#6*3)b$2&oy&x#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Madrid'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"anaisperez@gmail.com"
] | anaisperez@gmail.com |
8bae3bf83adbd836e241582cdafb339dc0026803 | 95741702b1ea65aaf87027b650001c5a8b55de55 | /blog/migrations/0003_delete_comment.py | 20fd5c623649c1efc741cf9f6d589e2b886734e9 | [] | no_license | ahmuh1306/django_project | 94cecb65041b2f374704f896251eb1c006af6b77 | 7961a4bce505ae6bd21e0cb1a8ed03cafbc40984 | refs/heads/main | 2023-02-02T13:25:08.622695 | 2020-12-20T10:03:14 | 2020-12-20T10:03:14 | 318,462,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | # Generated by Django 3.1.2 on 2020-10-29 09:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_comment'),
]
operations = [
migrations.DeleteModel(
name='Comment',
),
]
| [
"info@ahmadmuhammad.co.za"
] | info@ahmadmuhammad.co.za |
b051ab10452e97184e6c5c25d45280afd3d9d969 | b28ad9503649e3d8cf5b533c91ef7fe8b229c3b6 | /zabbix/zabbix_auth.py | b742da70618f0afefe4165fa3aaf39fb1b609c49 | [] | no_license | tianyongkang/es | ceff36f7220d039598767648a8d1dbd44df571b3 | 86a9bd96b85ceadba3e73ef10ff2caad246ae17d | refs/heads/master | 2021-01-21T17:36:59.287035 | 2017-05-21T14:56:07 | 2017-05-21T14:56:07 | 91,964,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | #!/usr/bin/python
#--*-- coding=utf8 --*--
import json
import urllib2
def zabbix_auth():
#url = 'http://10.10.68.11/zabbix/api_jsonrpc.php'
url = 'http://172.31.0.5/api_jsonrpc.php'
header = {"Content-Type": "application/json-rpc"}
value = {"jsonrpc":"2.0","method":"user.login","params": {"user": "Admin","password": "zabbix"},"id": 1}
try:
data = json.dumps(value)
request = urllib2.Request(url, data)
for key in header:
request.add_header(key,header[key])
print eval(urllib2.urlopen(request).read())["result"]
#return '17ffc16bbd803845d79da0d13ebb94df'
except (urllib2.URLError,urllib2.HTTPError, KeyError), e:
print 'zabbix auth have an error:', e
zabbix_auth()
| [
"tianyongkang@qtonecloud.cn"
] | tianyongkang@qtonecloud.cn |
7bc661d91c56c7e28245b5dcb909e1c98575d890 | d08aedc2c8c437c2354c2ff27fc5c428b2ea2e77 | /quiz_main.py | c038f11a77eafefc0fa67115ec9ffeb0b39ce40f | [] | no_license | gauravjaiswal97/Quiz_Application | 319a2ff4c5c6aeed4d1d692dd06bd78cc8a47b16 | 0be4df72c2eaec958bee58451df6b0254b6f319c | refs/heads/master | 2022-12-17T07:41:13.515219 | 2020-09-20T05:42:57 | 2020-09-20T05:42:57 | 297,010,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,608 | py | from tkinter import *
from tkinter import messagebox,ttk
import time
from dbhelper import DB
from quiz_app import m
class Quiz_gui:
def __init__(self):
self.db=DB()
self.m=Tk( )
self.m.title("quiz")
self.m.minsize(500, 500)
self.m.configure(bg='#0a3d62')
# C=Canvas(self.m, bg="blue", height=250, width=300)
# filename=PhotoImage(file="C:\\Users\\Designit\\PycharmProjects\\guirough\\trial.png")
# background_label=Label(self.m, image=filename)
# background_label.place(x=0, y=0, relwidth=1, relheight=1)
#
# C.pack( )
self.login( )
# self.cl=Button(self.m,text='close',lambda :self.m.close())
self.m.mainloop( )
def colors(self):
self.color1='#0a3d62'
self.button_color='#079992'
self.button_color1='#ffffff'
self.font1='white'
self.font2='black'
# 0a3d62
# 182C61
def login(self):
self.clear()
self.colors()
self.label_1=Label(self.m, text='QUIZ GAME', bg=self.color1, fg='white', font=('Times New Roman', 20))
self.label_1.pack(pady=(42, 92))
self.user_Frame=Frame(self.m,bg=self.color1)
self.user_Frame.pack( pady=(15,15))
self.username=Label(self.user_Frame, text='Username', bg=self.color1, fg='white', font=('Arial', 10))
self.username.grid(sticky='NW')
self.user_input=Entry(self.user_Frame, width=40, font=('Arial', 8, 'bold'))
self.user_input.grid(sticky='s')
self.pswd_Frame=Frame(self.m, bg=self.color1)
self.pswd_Frame.pack( pady=(0,25))
self.password=Label(self.pswd_Frame, text='Password', bg=self.color1, fg='white', font=('Arial', 10)).grid(sticky='NW')
self.password_input=Entry(self.pswd_Frame, show='*', width=40, font=('Times', 9, 'bold'))
self.password_input.grid(sticky='s')
self.frame3=Frame(self.m,bg=self.color1)
self.frame3.pack()
self.loginbutton=Button(self.frame3, text='Login',bg=self.button_color,padx=21,pady=1,fg='#ffffff',font=('Times New Roman',12),command=lambda :self.dologin())
self.loginbutton.pack(padx=(10,20),side=LEFT)
self.sign_in_button=Button(self.frame3, text='Sign in',bg=self.button_color,padx=21,pady=1,fg='#ffffff',font=('Times New Roman',12),command=lambda : self.registration())
self.sign_in_button.pack(side=RIGHT,padx=15)
def dologin(self):
self.user_name=self.user_input.get()
print(self.user_name)
password=self.password_input.get()
#send this data to database llambda :land check( if user exists or not
self.data=self.db.checklogin(self.user_name,password)
if len(self.data)>0:
#print the GUI
self.user_id=self.data[0][0]
self.homegui()
else:
messagebox.showerror("Error","Incorrect credentials")
def clear(self):
for i in self.m.pack_slaves():
i.destroy()
def registration(self):
self.clear()
self.label_1=Label(self.m, text='QUIZ GAME', bg=self.color1, fg='white', font=('Arial', 20, 'bold')).pack(
pady=(42, 72))
self.frame2=Frame(self.m,bg=self.color1,relief='raised')
self.frame2.pack(pady=(5,30))
# email id
self.email_Frame=Frame(self.frame2, bg=self.color1)
self.email_Frame.pack(padx=(30,30),pady=(20,20))
self.email=Label(self.email_Frame, text='Email ID', bg=self.color1, fg='white',
font=('Arial', 10)).grid(
sticky='NW')
self.email_input=Entry(self.email_Frame, width=60, font=('Times', 9, 'bold'))
self.email_input.grid()
# username
self.username_Frame=Frame(self.frame2, bg=self.color1)
self.username_Frame.pack(padx=(30,30),pady=(20,20))
self.username=Label(self.username_Frame, text='Username', bg=self.color1, fg='white', font=('Arial', 10))
self.username.grid(sticky='NW')
self.username_input=Entry(self.username_Frame, width=60, font=('Arial', 8, 'bold'))
self.username_input.grid()
# first name
self.First_Name_Frame=Frame(self.frame2,bg=self.color1)
self.First_Name_Frame.pack(padx=(30,30),pady=(20,20))
self.first_name=Label(self.First_Name_Frame, text='First Name', bg=self.color1, fg='white', font=('Arial', 10))
self.first_name.grid(sticky='NW')
self.first_name_input=Entry(self.First_Name_Frame, width=60, font=('Arial', 8, 'bold'))
self.first_name_input.grid(sticky='s')
#last name
self.Last_Name_Frame=Frame(self.frame2, bg=self.color1)
self.Last_Name_Frame.pack(padx=(30,30),pady=(20,20))
self.Last_Name=Label(self.Last_Name_Frame, text='Last Name', bg=self.color1, fg='white', font=('Arial', 10)).grid(
sticky='NW')
self.Last_Name_input=Entry(self.Last_Name_Frame, width=60, font=('Times', 9, 'bold'))
self.Last_Name_input.grid(sticky='s')
#Age and Gender Frame
# self.age_gender_frame=Frame(self.m,bg=self.color1)
# self.age_gender_frame.pack(pady=(10,30))
#Gender
self.gender_frame=Frame(self.frame2, bg=self.color1)
self.gender_frame.pack(padx=(30,30),pady=(20,20))
self.gender=Label(self.gender_frame, text='Gender', bg=self.color1, fg='white', font=('Arial', 10))
self.gender.grid(sticky='NW')
self.gender_input=ttk.Combobox(self.gender_frame,width=57,values=('Male','Female','Transgender'))
self.gender_input.grid(sticky='s')
# Age
self.age_frame=Frame(self.frame2, bg=self.color1)
self.age_frame.pack(padx=(30,30),pady=(20,20))
self.age=Label(self.age_frame, text='Age', bg=self.color1, fg='white', font=('Arial', 10)).grid(sticky='NW')
self.age_input=Spinbox(self.age_frame,width=57,from_=8,to=101)
self.age_input.grid(sticky='s')
#setting password
set_pwd=Frame(self.frame2, bg=self.color1)
set_pwd.pack(pady=(20,20))
self.new_password=Label(set_pwd, text='Password', bg=self.color1, fg='white', font=('Arial', 10)).grid(sticky='NW')
self.new_password_input=Entry(set_pwd, show='*', width=60)
self.new_password_input.grid(sticky='s')
re_set_pwd=Frame(self.frame2, bg=self.color1)
re_set_pwd.pack(pady=(20,20))
self.re_password=Label(re_set_pwd, text='Re-enter Password', bg=self.color1, fg='white', font=('Arial', 10)).grid(sticky='NW')
self.re_password_input=Entry(re_set_pwd, show='*', width=60)
self.re_password_input.grid(sticky='s')
register=Button(self.frame2,text='Register', bg=self.button_color, fg='white', font=('Times New Roman', 12),command=lambda :self.do_registration())
register.pack(pady=(25,20))
go_back=Button(self.frame2,text='Go back', bg=self.button_color, fg='white', font=('Times New Roman', 12),command=lambda :self.login())
go_back.pack(pady=(12,40))
def do_registration(self):
email=self.email_input.get()
username=self.username_input.get( )
fname=self.first_name_input.get( )
lname=self.Last_Name_input.get( )
gender=self.gender_input.get( )
age=self.age_input.get( )
if self.new_password_input.get()==self.re_password_input.get():
password=self.new_password_input.get( )
response=self.db.performRegistration(email, username, fname, lname, gender, age, password)
if (response == 0):
messagebox.showerror("Error", "User already exists")
else:
messagebox.showinfo("Registration Successful", "Successfully Registered")
time.sleep(1)
self.login( )
else:
messagebox.showerror(' ', 'Password does not match !')
def display(self):
print(self.email_input.get())
def homegui(self):
self.clear( )
self.colors( )
logout=Button(self.m, text='Log out', bg=self.color1,relief='flat',padx=10,activebackground=self.color1,activeforeground=self.font1, fg=self.font1, font=('Times New Roman', 12),
command=lambda: self.login( ))
logout.pack(anchor='ne',side=RIGHT)
editprofile = Button(self.m, text='Edit Profile', bg=self.color1, relief='flat', padx=5,pady=2,
activebackground=self.color1,
activeforeground=self.font1, fg=self.font1, font=('Times New Roman', 12),
command=lambda: self.editprofileGUI())
editprofile.pack(anchor='nw',side=LEFT)
self.label_1=Label(self.m, text='QUIZ GAME', bg=self.color1, fg='white', font=('Times New Roman', 20))
self.label_1.pack(pady=(42, 92),padx=(0,12))
self.frame4=Frame(self.m, bg=self.color1)
self.frame4.pack( )
self.quiz_button=Button(self.frame4, text='Take Quiz',bd=5, bg=self.button_color1, padx=49, pady=1, fg=self.font2,relief='raised',font=('Times New Roman', 14),command=lambda :self.topics())
self.quiz_button.pack(pady=(10,25))
self.leaderboard_button=Button(self.frame4, text='Leaderboard', bg=self.button_color1,bd=5, padx=41, pady=1, fg=self.font2,relief='raised',font=('Times New Roman', 14),command=lambda :self.show_leaderboard())
self.leaderboard_button.pack(pady=(25,35))
def show_leaderboard(self):
self.clear()
logout=Button(self.m, text='Log out', bg=self.color1, relief='flat', padx=10, activebackground=self.color1,
activeforeground=self.font1, fg=self.font1, font=('Times New Roman', 12),
command=lambda: self.login( ))
logout.pack(anchor='ne')
title=Label(self.m,text='Leaderboard', bg=self.color1, fg='white', font=('Times New Roman', 30))
title.pack(pady=(20,25))
# time.sleep(2)
# reading name from leaderboard
lframe=Frame(self.m,bd=5,relief='raised',padx=5)
lframe.pack(expand=1)
leadernames=self.db.retrive_leaderboard_names()
string="ID" + " " + "Username" + ((23 - len("Username")) * " ") + "score "
Label(lframe, text=string, font=('Times New Roman', 13)).pack( pady=(0,5))
for i in leadernames:
string=str(i[0])+". "+i[1]+((30-len(i[1]))*" ")+str(i[2])+" \n"
Label(lframe,text=string,font=('Times New Roman', 13)).pack()
# displaying names on window
#home button
home_button=Button(self.m,text='Home',bg=self.color1,fg=self.font1,relief='flat', font=('Times New Roman', 20),command=lambda :self.homegui())
home_button.pack(pady=(25,15))
def topics(self):
self.clear( )
self.colors( )
logout=Button(self.m, text='Log out', bg=self.color1, relief='flat', padx=10, activebackground=self.color1,
activeforeground=self.font1, fg=self.font1, font=('Times New Roman', 12),
command=lambda: self.login( ))
logout.pack(anchor='ne')
#title
self.label_1=Label(self.m, text='QUIZ GAME', bg=self.color1, fg='white', font=('Times New Roman', 20))
self.label_1.pack(pady=(16, 92))
#action button
self.frame4=Frame(self.m, bg=self.color1)
self.frame4.pack( )
self.history=Button(self.frame4, text='History', bd=5, bg=self.button_color1, padx=49, pady=1,
fg=self.font2, relief='raised',
font=('Times New Roman', 14), command=lambda: self.start_quiz(1) )
self.history.pack(pady=(10, 25))
self.GK=Button(self.frame4, text='GK', bg=self.button_color1, bd=5, padx=64, pady=1,
fg=self.font2, relief='raised',
font=('Times New Roman', 14), command=lambda: self.start_quiz(2))
self.GK.pack(pady=(25, 35))
#home button
home_button=Button(self.m, text='Home', bg=self.color1, fg=self.font1, relief='flat', font=('Times New Roman', 20),
command=lambda: self.homegui( ))
home_button.pack(pady=(25, 15))
def start_quiz(self,flag):
self.clear()
self.response=m(flag)
self.index,self.r=0,0
self.start()
#displaying question
def display_question(self,r):
# self.frame1=Frame(self.m,bg='#8B008B',width=1500,height=200)
# self.frame1.pack(expand=False,fill=None)
# 8B008B
#question label
Label(self.m, bg='#0a3d62', fg='#ffffff', text=r.questions, font=('Times New Roman', 19)).pack(pady=50)
self.var=StringVar( )
#options for the question
b1=Radiobutton(self.m, bg='#0a3d62', fg='#ffffff', text=r.answers[0], variable=self.var,
value=r.answers[0], command=lambda :self.check(r))
b2=Radiobutton(self.m, text=r.answers[1], bg='#0a3d62', fg='#ffffff', variable=self.var,
value=r.answers[1], command=lambda :self.check(r))
b3=Radiobutton(self.m, text=r.answers[2], variable=self.var, bg='#0a3d62', fg='#ffffff',
value=r.answers[2], command=lambda :self.check(r))
b4=Radiobutton(self.m, text=r.answers[3], variable=self.var, value=r.answers[3], bg='#0a3d62',
fg='#ffffff', command=lambda :self.check(r))
b1.pack(pady=10)
b2.pack(pady=10)
b3.pack(pady=10)
b4.pack(pady=(10,20))
Button(self.m,text='Quit',padx=20,relief='raised',bd=3,font=('Times New Roman',12),command=lambda :self.topics() if messagebox.askquestion('.','Do you want to quit the Quiz ?') == 'yes' else print('hey works')).pack(pady=10)
#checking whether the answer is correct
def check(self,r):
l1=Label(self.m)
l1.pack()
if self.var.get( ) == r.correct_answer:
self.r+=1
l1.configure(text='Right')
l1.after(800, self.change())
else:
l1.configure(text='Wrong')
l1.after(800, self.change())
#jumps to next question
def change(self):
self.clear()
self.start()
#starts the quiz
def start(self):
fr1=Frame(self.m,bg=self.color1)
#if all questions are attempted then display a message
if self.index == len(self.response):
result='Your score is ' + str(self.r) + ' out of ' + str(
len(self.response)) + '\n\n' + 'Thank you for attending the quiz.'
Label(self.m, text=result,bd=5,pady=15,font=('Times New Roman',12),relief='groove').pack(pady=(200,90),ipadx=50 )
fr1.pack(pady=25)
Button(fr1,text='Home',padx=38,bg=self.button_color1,font=('Times New Roman',12),command=lambda :self.homegui()).pack(side=LEFT,padx=15)
Button(fr1,text='Leaderboard',padx=19,bg=self.button_color1,font=('Times New Roman',12),command=lambda :self.show_leaderboard()).pack(side=RIGHT,padx=15)
res=self.db.add_score(self.user_name, self.r)
#show next question if question are remaining
else:
self.display_question(self.response[self.index])
self.index+=1
def editprofileGUI(self):
self.clear()
self.label_1 = Label(self.m, text='QUIZ GAME', bg=self.color1, fg='white', font=('Times New Roman', 20))
self.label_1.pack(pady=(42, 72))
self.ageLabel = Label(self.m, text="Set New Age:", bg=self.color1, fg='white')
self.ageLabel.pack(padx=(10,210))
self.ageInput = Entry(self.m, width=45, font=('Arial', 8, 'bold'))
self.ageInput.pack(pady=(0,23))
self.usernameLabel = Label(self.m, text="Enter New Username:", bg=self.color1, fg='white')
self.usernameLabel.pack(padx=(10,166))
self.usernameInput = Entry(self.m, width=45, font=('Arial', 8, 'bold'))
self.usernameInput.pack(pady=(0,23))
self.passwordLabel = Label(self.m, text="Enter New Password:", bg=self.color1, fg='white')
self.passwordLabel.pack(padx=(10,167))
self.passwordInput = Entry(self.m,show='*', width=45, font=('Arial', 8, 'bold'))
self.passwordInput.pack(pady=(0,23))
self.re_passwordLabel = Label(self.m, text="Confirm Your New Password: ", bg=self.color1, fg='white')
self.re_passwordLabel.pack(padx=(10,121))
self.passwordInput = Entry(self.m,show='*', width=45, font=('Arial', 8, 'bold'))
self.passwordInput.pack(pady=(0,40))
self.editBtn = Button(self.m, text="Submit",font=('Times New Roman', 12), command=lambda: self.editProfile())
self.editBtn.pack(pady=15,ipadx=14)
# home button
back_button = Button(self.m, text='Home', bg=self.color1, fg=self.font1, relief='flat',
font=('Times New Roman', 20),
command=lambda: self.homegui())
back_button.pack(pady=(25, 15))
def editProfile(self):
age=self.ageInput.get()
username=self.usernameInput.get()
password=self.passwordInput.get()
response=self.db.editProfile(age,username,password,self.user_id,)
if response==1:
#destination="C:\\Codes\\Python Codes\\Pycharm\\tinder\\images\\"+self.pathname.split("/")[-1]
#shutil.copyfile(self.pathname, destination)
messagebox.showinfo("Success","Profile updated successfully")
else:
messagebox.showerror("Error","Some error occured")
run=Quiz_gui( )
| [
"gauravjaiswal4499@gmail.com"
] | gauravjaiswal4499@gmail.com |
3fca2e5fb0daf10fb1dfa277a7ae5ebde42e8964 | 38510f6cf958883e8bef02e5869f6c3fa9f75619 | /leetcode/394_decodeString.py | 933849db3575dcc4e16ea5905b03c433e8c7b902 | [] | no_license | kapitsa2811/leetcode-algos-python | c6f6521789eaab521db7e687bd3418f35bc5f6e1 | 19b040360b03c883c2e3ff6dd5c836ff40124137 | refs/heads/master | 2022-11-20T22:23:48.972259 | 2020-07-19T03:26:54 | 2020-07-19T03:26:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,172 | py | """
Given an encoded string, return its decoded string.
The encoding rule is: k[encoded_string], where the encoded_string inside the square brackets is being repeated exactly k times. Note that k is guaranteed to be a positive integer.
You may assume that the input string is always valid; No extra white spaces, square brackets are well-formed, etc.
Furthermore, you may assume that the original data does not contain any digits and that digits are only for those repeat numbers, k. For example, there won't be input like 3a or 2[4].
Input: s = "2[abc]3[cd]ef"
Output: "abcabccdcdcdef"
Algorithm:
1) Iterate through string
2) if char is num - append to num string (could be more than one digit)
3) if char is letter - append to res string
4) if char is [ - append num and letter strings to num stack and letter stack ++ reset num and letter string to ""
5) if char is ] - pop num and letter strings (this will be the previous string) + multiply existing string by num and append popped letter string to front
Result:
Runtime: 32 ms, faster than 59.45% of Python3 online submissions for Decode String.
Memory Usage: 14 MB, less than 9.86% of Python3 online submissions for Decode String.
"""
class Solution:
def decodeString(self, s: str) -> str:
if not s:
return ""
numStack = []
sStack = []
res = ""
num = ""
def isInt(char): # check if int
try:
int(char)
return True
except ValueError:
return False
for char in s: # iterate through s
if isInt(char):
num = num + char
elif char == "[":
sStack.append(res)
numStack.append(int(num))
res = ""
num = ""
elif char == "]":
multiplier = numStack.pop()
preString = sStack.pop()
res = res * multiplier
res = preString + res
else:
res = res + char
return res
| [
"dpark068@gmail.com"
] | dpark068@gmail.com |
817ae7aaf5f6ab259c4e3c83fd9685f8337e1205 | 05f9045cdb134b1ddb265f55543d096937f6e34e | /Nutrin/Consulta/Services/Ocupado/createOcupado.py | d7a50aa3fa1bb0a3a102f43638996adf3e60352f | [] | no_license | EmersonCarbono/API-Nutrin | 941241ca6b48371d89717a5e33381793776937fb | b4e1ef1875cfc70e29441cf8696fb2c9a9ca60f5 | refs/heads/master | 2022-12-10T21:49:30.568277 | 2018-11-26T18:04:33 | 2018-11-26T18:04:33 | 136,726,462 | 1 | 2 | null | 2022-09-16T17:48:26 | 2018-06-09T13:40:10 | Python | UTF-8 | Python | false | false | 1,122 | py | from Nutrin import db
from Nutrin.Consulta.Services.Ocupado.readOcupado import readOcupadoNoPeriodo
from Nutrin.Consulta.Model.Ocupado import Ocupado
def createOcupado(periodo_id,hI,hF):
#print(readOcupadoNoPeriodo(periodo_id))
status, dado = readOcupadoNoPeriodo(periodo_id)
#print('aquiiiiii --- {} {}'.format(status, dado))
if status:
for ocup in dado:
#int(p['horaFim'][:2]) - int(p['horaInicio'][:2]
#if int(hF[:2]) >= int(ocup['horaI'][:2]) and int(hI[:2]) <= int(ocup['horaF'][:2]) and int(hI[:2]) != int(ocup['horaI'][:2]):
if int(hI[:2]) != int(ocup['horaI'][:2]):
permissao = True
else:
return False, "Horário esta ocupado"
if permissao:
o = Ocupado(periodo_id, hI, hF)
db.session.add(o)
db.session.commit()
return True, "Horário foi preenchido com sucesso"
else:
#print('aqui03')
o = Ocupado(periodo_id, hI, hF)
db.session.add(o)
db.session.commit()
return True, "Horário foi preenchido com sucesso"
| [
"gabiconde@users.noreply.github.com"
] | gabiconde@users.noreply.github.com |
0ca5a72de3b3dc06796c0fa1adff4f3bd021f9dc | 0b3bfe36a69a09cc30a8e7ff4880cdc48d2474ca | /lpthw/ex15.py | 282335c9111fc7d2af9599360c81409611e629d2 | [
"MIT"
] | permissive | GoudaOnBeamPro/learning | 765af1e80d7a275021bb7838cbd0d58a628955bd | 1d1767ea78a8f9f72275b18147d47bfc44a1696e | refs/heads/master | 2021-05-31T22:47:05.774325 | 2016-06-26T20:04:29 | 2016-06-26T20:04:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 891 | py | # This get s the argv function thing from the sys module
from sys import argv
# This unpacks argv into two variables
script, filename = argv
# This creates a variable called txt and the file name from argv is inserted
# The vriable is now a file object.
txt = open(filename)
# this orints the filename unpacked from argv
print "Here's your file %r:" % filename
# This is hwere shit gets crazy. This tells the txt file object to read itself
# and the result is printed (from the rint func, not by itself)
print txt.tell()
# print "Type the filename again:"
# this is where we do it again but take input from the raw_input func
# file_again = raw_input("> ")
# This opens the file in the variable, making it a file object
# txt_again = open(file_again)
# Prints it with the read func on itself. rememebr, the .read() doens't make
# it orint, the print func does
# print txt_again.read()
| [
"jaredm@openmailbox.org"
] | jaredm@openmailbox.org |
f9c2d367eacb2c5165444c10a3e65cdace2510d4 | bb0f07404380f0d9bb9d6c8d4537c9c46ec62ad5 | /cross_validation/TrialsRecorder.py | 8a900714ea285c9ee87541a98ab631ac9b33dcc8 | [] | no_license | abell25/EnsembleMachine | 846c4b0e958df097807b2a23783a7f3197387bda | 669ed534133106d625bd0e700315713efe314dcd | refs/heads/master | 2021-01-10T14:44:54.199560 | 2016-01-26T08:06:06 | 2016-01-26T08:06:06 | 44,482,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,875 | py | from pymongo import MongoClient
from bson.binary import Binary
import pickle
class TrialsRecorder:
def __init__(self, dataset_name, host="localhost", port=27017):
self.client = MongoClient(host, port)
self.db_name = "Trials"
self.dataset_name = dataset_name
if not dataset_name:
raise Exception("dataset_name is required to save trials to mongodb!")
self.trials = self.client[self.db_name][dataset_name]
def saveTrial(self, trial):
if "y_oof" in trial:
trial["y_oof"] = Binary(pickle.dumps(trial["y_oof"], protocol=2), subtype=128)
if "y_val" in trial:
trial["y_val"] = Binary(pickle.dumps(trial["y_val"], protocol=2), subtype=128)
self.trials.insert_one(trial)
def getAllTrials(self, query=None):
for trial in self.trials.find(query):
if "y_oof" in trial:
trial["y_oof"] = pickle.loads(trial["y_oof"])
if "y_val" in trial:
trial["y_val"] = pickle.loads(trial["y_val"])
yield trial
def getAllTrialsWithScoreAtLeast(self, score):
return self.getAllTrials({"score": {"$gte": score}})
def getAllTrialsWithScoreAtMost(self, score):
return self.getAllTrials({"score": {"$lte": score}})
def clear_all_trials(self):
self.client[self.db_name].drop_collection(self.dataset_name)
@staticmethod
def showTables(client=None):
if not client:
client = MongoClient()
for db_name in client.database_names():
print("DB: {0}".format(db_name))
db = client[db_name]
for tbl_name in db.collection_names():
tbl = db[tbl_name]
print(" collection: {0} (count: {1}, size: {2})".format(tbl_name, tbl.count(), db.command("collstats", tbl_name)['size']))
| [
"anthbell@ebay.com"
] | anthbell@ebay.com |
7914d464a0555795ee71c006de4f327cf52c8442 | 1e648983311c93a14b20516bd7acaf183ed12b10 | /Bioinformatics Stronghold/Complementing_a_Strand_of_DNA.py | b6bff42215665245ce3c28e6d6b54e81776b51a2 | [] | no_license | biomathcode/Rosalind_solutions | 5279b852b15e634dea57822ce12d63b5c3cd6715 | c84bbd570b22d8de4d29d417c99d2a4b96170453 | refs/heads/master | 2023-02-26T22:07:12.119786 | 2021-01-25T11:54:45 | 2021-01-25T11:54:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | """FINDIND THE REVERSE COMPLEMENT"""
## AAAACCCGGT
## ACCGGGTTTT
s = input("Type the DNA sequence here")
complement = s.translate(str.maketrans({'A': 'T', 'T':'A', 'C': 'G', 'G' : 'C'}))
def reverse(string):
string = string[::-1]
return string
reverse_complement = reverse(complement)
print(reverse_complement)
file = open('reverse_complement.txt', 'w')
file.write(reverse_complement)
| [
"sharma.pratik2016@gmail.com"
] | sharma.pratik2016@gmail.com |
ea8b745d92f8a3f2fdd060e2fe99c294e95f0a11 | 79082d4a89215de4b900595dbcc48811604e8e83 | /RNN_Stopping_Grid/test2.py | 1373dd29e0bd758734f30140e4a6af24bec2c17e | [] | no_license | Mostafizor/Bioprocess-Simulation-using-Machine-Learning | df0078803e27c76b6345e433f2f8f33e408bb08c | fcc8f5656c125737f6d1293f5ae42557ab53060c | refs/heads/master | 2022-12-20T14:09:07.565532 | 2020-09-06T18:55:57 | 2020-09-06T18:55:57 | 238,298,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,259 | py | import torch
import numpy as np
import pandas as pd
from torch.autograd import Variable
def test(test_inputs, test_labels, net):
net.eval()
test_X = Variable(torch.Tensor(test_inputs))
test_y = Variable(torch.Tensor(test_labels))
hidden = net.init_hidden(test_X)
with torch.no_grad():
net_out, _ = net(test_X, hidden) # Hidden state not required for manual feeding
squared_error_X = []
squared_error_N = []
squared_error_L = []
for index1, element in enumerate(test_y):
for index2, row in enumerate(element):
X_error = row[0] - net_out[index1][index2][0]
N_error = row[1] - net_out[index1][index2][1]
L_error = row[2] - net_out[index1][index2][2]
squared_error_X.append(X_error**2)
squared_error_N.append(N_error**2)
squared_error_L.append(L_error**2)
MSE_X1 = sum(squared_error_X[0:12])/12
MSE_N1 = sum(squared_error_N[0:12])/12
MSE_L1 = sum(squared_error_L[0:12])/12
MSE_X2 = sum(squared_error_X[12:24])/12
MSE_N2 = sum(squared_error_N[12:24])/12
MSE_L2 = sum(squared_error_L[12:24])/12
MSE_list = [MSE_X1, MSE_N1, MSE_L1, MSE_X2, MSE_N2, MSE_L2]
AVG_MSE = sum(MSE_list)/6
return AVG_MSE
| [
"mostafizor1997@gmail.com"
] | mostafizor1997@gmail.com |
29403ba36aaf82ac2df41c8cc8ef663839da750f | a8066523f886ac3aad396fa2b2c8636c606d7b0d | /cozmo_taste_game/plate/colorful_plate.py | 2b90c45d22ce5b6f8ca6758db4464315761be247 | [] | no_license | scalisesamuel/cozmo_group_project_software_engineering | 7c77976e662c229c2a724d61d40a7a5390ebd4b3 | 95b1e69fe9aa7f33d05403737da9bf44a6e4013f | refs/heads/master | 2020-08-02T14:42:18.220880 | 2019-09-27T20:10:56 | 2019-09-27T20:10:56 | 211,393,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,527 | py | from .plate import Plate
from cozmo_taste_game import FoodProp
from typing import List
class ColorfulPlate(Plate):
"""Colorful plate class: the goal is to fill the plate with colorful foods."""
def __init__(self):
super().__init__()
self.goal_num_colors = 4
def add_food(self, food: FoodProp) -> None:
"""Adds a food to the plate
:param food: A food prop object to add to the plate
:return: None
"""
if not self.can_place_food(food):
raise Exception('Cannot place a food with that color on the colorful plate')
self.foods.append(food)
def can_place_food(self, food_to_be_placed: FoodProp) -> bool:
"""Determines whether or not food can be placed on the plate.
:param food_to_be_placed: The food prop object to be placed on the plate
:return: bool
"""
if food_to_be_placed.color not in self.colors:
return True
else:
return False
@property
def colors(self) -> List[str]:
"""The colors currently on the plate.
:return: The colors currently on the plate.
"""
return [food.color for food in self.foods]
@property
def is_full(self) -> bool:
"""The plate is full if there is a certain number of colors.
:return: bool
"""
if self.goal_num_colors == len(self.colors):
return True
else:
return False
| [
"noreply@github.com"
] | noreply@github.com |
496484889ad0aab3a5bb6ae3eb25a685ddb7a73a | e256c8adec64e0423e9f1e77f09e2b32623af433 | /socien_api_server/route/__m_equip/parklist.py | 850e2042cb7f52be1719a7e0e9ea5be58d7eb51e | [] | no_license | Socien/GreenGym_Admin | edce3a9e80631b00257b0ffb9b96d7635d547bdd | 8b478439bdb12c973b9d9085cdd17be92d475fd3 | refs/heads/master | 2023-05-14T22:06:53.641295 | 2021-06-05T21:05:09 | 2021-06-05T21:05:09 | 371,674,992 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,142 | py | #공원list
from flask_restx import Resource
from ._m_equip import Mequip
import app
from sqlalchemy import text
@Mequip.route('/parklist')
class ParkList(Resource):
@Mequip.response(200, 'Success')
@Mequip.response(500, 'Internal Server Error')
def get(self):
sql = 'SELECT * FROM park'
rows = app.app.database.execute(text(sql),).fetchall()
#쿼리결과 null
if not rows:
return {
'code': 'error',
'message': '(park table is empty) error'
}, 500
retVal = []
for row in rows:
r = {
'p_id' : row['p_id'],
'p_name' : row['p_name'],
'x' : row['x'],
'y' : row['y'],
'website' : row['website']
}
retVal.append(r)
return {
'code':'successs',
'message':'',
'response': {
'List': retVal
}
}, 200 | [
"kyh970211@ajou.ac.kr"
] | kyh970211@ajou.ac.kr |
5174a44af8c926802861953b0b1363023e845e9b | 122957cf3213ffc8d8d04a9e75d3d634c0990d06 | /conduit/apps/authentication/models.py | 69440feb0e84ce5b7be71da0b7b9d5edae18de20 | [] | no_license | DavidMaximaWang/djangoAPIProject | 8fd4917e54e4850ef5a51d7f38b33bab51273515 | 7a03a473db27d6ccab204679e1d84791da750277 | refs/heads/master | 2023-01-09T03:53:39.614231 | 2019-10-24T21:00:59 | 2019-10-24T21:00:59 | 216,703,907 | 0 | 0 | null | 2023-01-07T10:56:35 | 2019-10-22T02:11:41 | Python | UTF-8 | Python | false | false | 4,852 | py | import jwt
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.auth.models import (
AbstractBaseUser, BaseUserManager, PermissionsMixin
)
from django.db import models
from conduit.apps.core.models import TimestampedModel
class UserManager(BaseUserManager):
"""
Django requires that custom users define their own Manager class. By
inheriting from `BaseUserManager`, we get a lot of the same code used by
Django to create a `User`.
All we have to do is override the `create_user` function which we will use
to create `User` objects.
"""
def create_user(self, username, email, password=None):
"""Create and return a `User` with an email, username and password."""
if username is None:
raise TypeError('Users must have a username.')
if email is None:
raise TypeError('Users must have an email address.')
user = self.model(username=username, email=self.normalize_email(email))
user.set_password(password)
user.save()
return user
def create_superuser(self, username, email, password):
"""
Create and return a `User` with superuser (admin) permissions.
"""
if password is None:
raise TypeError('Superusers must have a password.')
user = self.create_user(username, email, password)
user.is_superuser = True
user.is_staff = True
user.save()
return user
class User(AbstractBaseUser, PermissionsMixin, TimestampedModel):
# Each `User` needs a human-readable unique identifier that we can use to
# represent the `User` in the UI. We want to index this column in the
# database to improve lookup performance.
username = models.CharField(db_index=True, max_length=255, unique=True)
# We also need a way to contact the user and a way for the user to identify
# themselves when logging in. Since we need an email address for contacting
# the user anyways, we will also use the email for logging in because it is
# the most common form of login credential at the time of writing.
email = models.EmailField(db_index=True, unique=True)
# When a user no longer wishes to use our platform, they may try to delete
# their account. That's a problem for us because the data we collect is
# valuable to us and we don't want to delete it. We
# will simply offer users a way to deactivate their account instead of
# letting them delete it. That way they won't show up on the site anymore,
# but we can still analyze the data.
is_active = models.BooleanField(default=True)
# The `is_staff` flag is expected by Django to determine who can and cannot
# log into the Django admin site. For most users this flag will always be
# false.
is_staff = models.BooleanField(default=False)
# More fields required by Django when specifying a custom user model.
# The `USERNAME_FIELD` property tells us which field we will use to log in.
# In this case we want it to be the email field.
USERNAME_FIELD = 'username'
# REQUIRED_FIELDS = ['email']
# Tells Django that the UserManager class defined above should manage
# objects of this type.
objects = UserManager()
def __str__(self):
"""
Returns a string representation of this `User`.
This string is used when a `User` is printed in the console.
"""
return self.email
@property
def token(self):
"""
Allows us to get a user's token by calling `user.token` instead of
`user.generate_jwt_token().
The `@property` decorator above makes this possible. `token` is called
a "dynamic property".
"""
return self._generate_jwt_token()
def get_full_name(self):
"""
This method is required by Django for things like handling emails.
Typically this would be the user's first and last name. Since we do
not store the user's real name, we return their username instead.
"""
return self.username
def get_short_name(self):
"""
This method is required by Django for things like handling emails.
Typically, this would be the user's first name. Since we do not store
the user's real name, we return their username instead.
"""
return self.username
def _generate_jwt_token(self):
"""
Generates a JSON Web Token that stores this user's ID and has an expiry
date set to 60 days into the future.
"""
dt = datetime.now() + timedelta(days=60)
token = jwt.encode({
'id': self.pk,
'exp': int(dt.strftime('%s'))
}, settings.SECRET_KEY, algorithm='HS256')
return token.decode('utf-8') | [
"qunlingwang@gmail.com"
] | qunlingwang@gmail.com |
c8a8f30335dc23ab837f1ee123fbde87fd5009b9 | 49900ba50d4f6c979d6d433577828c8007973125 | /data_utils/ner.py | 597d37cdb973d60f1d05f4fd35b70b226eb1faac | [] | no_license | weizhenzhao/cs224d_nlp_problem_set2 | 9661414965a58b97113f828a47932c5b9d8411df | 302f0e53cdd88147a5c1727d06f0be18270d8a2a | refs/heads/master | 2021-10-22T18:22:31.063591 | 2019-03-12T14:03:36 | 2019-03-12T14:03:36 | 104,356,708 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 591 | py | ##
# Utility functions for NER assignment
# Assigment 2, part 1 for CS224D
##
from data_utils.utils import invert_dict
from numpy import *
def load_wv(vocabfile, wvfile):
wv = loadtxt(wvfile, dtype=float)
with open(vocabfile) as fd:
words = [line.strip() for line in fd]
num_to_word = dict(enumerate(words))
word_to_num = invert_dict(num_to_word)
return wv, word_to_num, num_to_word
def save_predictions(y, filename):
"""Save predictions, one per line."""
with open(filename, 'w') as fd:
fd.write("\n".join(map(str, y)))
fd.write("\n") | [
"958904120@qq.com"
] | 958904120@qq.com |
004c698ad82166a4cfbe72fc87221ff0316da9c8 | e9a527123db5290a15ecb092b254ff447390379e | /modules/simclr.py | eb9e9cbcb0e677814083ba22154abb34c0a9f47a | [
"MIT"
] | permissive | yhliu918/simclr-noisy-label | b52d945fa6fb2c24515c5179dcdcb892d702afaf | c80d3d061f1d3de3b1692fb40288d36dbae26629 | refs/heads/master | 2022-11-06T01:10:08.605528 | 2020-06-15T15:33:31 | 2020-06-15T15:33:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,856 | py | import torch.nn as nn
import torchvision
import torch
import torch.nn.init as init
import torch.nn.functional as F
import torch.optim as optim
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
import math
def call_bn(bn, x):
return bn(x)
class CNN(nn.Module):
def __init__(self, input_channel=3, n_outputs=10, dropout_rate=0.25, top_bn=False):
self.dropout_rate = dropout_rate
self.top_bn = top_bn
super(CNN, self).__init__()
self.c1=nn.Conv2d(input_channel,128,kernel_size=3,stride=1, padding=1)
self.c2=nn.Conv2d(128,128,kernel_size=3,stride=1, padding=1)
self.c3=nn.Conv2d(128,128,kernel_size=3,stride=1, padding=1)
self.c4=nn.Conv2d(128,256,kernel_size=3,stride=1, padding=1)
self.c5=nn.Conv2d(256,256,kernel_size=3,stride=1, padding=1)
self.c6=nn.Conv2d(256,256,kernel_size=3,stride=1, padding=1)
self.c7=nn.Conv2d(256,512,kernel_size=3,stride=1, padding=0)
self.c8=nn.Conv2d(512,256,kernel_size=3,stride=1, padding=0)
self.c9=nn.Conv2d(256,128,kernel_size=3,stride=1, padding=0)
self.fc=nn.Linear(128,n_outputs)
self.bn1=nn.BatchNorm2d(128)
self.bn2=nn.BatchNorm2d(128)
self.bn3=nn.BatchNorm2d(128)
self.bn4=nn.BatchNorm2d(256)
self.bn5=nn.BatchNorm2d(256)
self.bn6=nn.BatchNorm2d(256)
self.bn7=nn.BatchNorm2d(512)
self.bn8=nn.BatchNorm2d(256)
self.bn9=nn.BatchNorm2d(128)
def forward(self, x,):
h=x
h=self.c1(h)
h=F.leaky_relu(call_bn(self.bn1, h), negative_slope=0.01)
h=self.c2(h)
h=F.leaky_relu(call_bn(self.bn2, h), negative_slope=0.01)
h=self.c3(h)
h=F.leaky_relu(call_bn(self.bn3, h), negative_slope=0.01)
h=F.max_pool2d(h, kernel_size=2, stride=2)
h=F.dropout2d(h, p=self.dropout_rate)
h=self.c4(h)
h=F.leaky_relu(call_bn(self.bn4, h), negative_slope=0.01)
h=self.c5(h)
h=F.leaky_relu(call_bn(self.bn5, h), negative_slope=0.01)
h=self.c6(h)
h=F.leaky_relu(call_bn(self.bn6, h), negative_slope=0.01)
h=F.max_pool2d(h, kernel_size=2, stride=2)
h=F.dropout2d(h, p=self.dropout_rate)
h=self.c7(h)
h=F.leaky_relu(call_bn(self.bn7, h), negative_slope=0.01)
h=self.c8(h)
h=F.leaky_relu(call_bn(self.bn8, h), negative_slope=0.01)
h=self.c9(h)
h=F.leaky_relu(call_bn(self.bn9, h), negative_slope=0.01)
h=F.avg_pool2d(h, kernel_size=h.data.shape[2])
h = h.view(h.size(0), h.size(1))
logit=self.fc(h)
return logit
class SimCLR(nn.Module):
"""
We opt for simplicity and adopt the commonly used ResNet (He et al., 2016) to obtain hi = f(x_i) = ResNet(x_i) where hi Rd is the output after the average pooling layer.
"""
def __init__(self, args):
super(SimCLR, self).__init__()
self.args = args
self.encoder = self.get_net(args.net)
self.n_features = self.encoder.fc.in_features # get dimensions of fc layer
self.encoder.fc = Identity() # remove fully-connected layer after pooling layer
# We use a MLP with one hidden layer to obtain z_i = g(h_i) = W(2)(W(1)h_i) where is a ReLU non-linearity.
self.projector = nn.Sequential(
nn.Linear(self.n_features, self.n_features, bias=False),
nn.ReLU(),
nn.Linear(self.n_features, args.projection_dim, bias=False),
)
def get_net(self, name):
nets = {
"resnet18": torchvision.models.resnet18(),
"resnet50": torchvision.models.resnet50(),
"9cnn":CNN(),
}
if name not in nets.keys():
raise KeyError(f"{name} is not a valid Net version")
return nets[name]
def forward(self, x):
h = self.encoder(x)
z = self.projector(h)
if self.args.normalize:
z = nn.functional.normalize(z, dim=1)
return h, z
class simclrnet(nn.Module):
def __init__(self, args, model,n_classes,in_feature):
super(simclrnet, self).__init__()
self.args = args
self.encoder = model
self.n_features = in_feature # get dimensions of fc layer
self.classifier = nn.Sequential(
nn.Linear(self.n_features, self.n_features, bias=True),
nn.ReLU(),
nn.Linear(self.n_features, self.n_features, bias=True),
nn.ReLU(),
nn.Linear(self.n_features, n_classes, bias=True),
)
def forward(self, x):
h = self.encoder(x)
output = self.classifier(h)
#if self.args.normalize:
#z = nn.functional.normalize(z, dim=1)
return output | [
"1700017801@pku.edu.cn"
] | 1700017801@pku.edu.cn |
5f147125d008245508dab79793ef5a2506d65e95 | 7a43e8b026c4545dab1a6e2992e8e195d4431ed7 | /demos/D009_tuple.py | a28420c8d6d5f43b4a1c71356680d43149623ee8 | [] | no_license | Alan6584/PythonLearn | 579b1536ff2f3fb880ad3251b902f9f4176e7978 | c889d60104355d114ec86972c887b3e9e9548367 | refs/heads/master | 2021-05-07T04:17:57.093611 | 2018-11-29T13:45:25 | 2018-11-29T13:45:25 | 111,259,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | #!/usr/bin/python
# -*- coding:UTF-8 -*-
tup = ('a', 'b', 'c')
print tup
print 'tup[0] =', tup[0]
# tup[1] = 'bb' # 非法操作,元组的元素不能修改,object does not support item assignment
print 'tup[1:3] =',tup[1:3]
print 'tup[1:] =',tup[1:]
print 'len(tup) =',len(tup)
print 'max(tup) =',max(tup)
print 'min(tup) =',min(tup)
list = [1, 2, 3, 4, 5]
print 'tuple(list) =',tuple(list)
tup1 = ('Hello')
print 'tup1 =',tup1
tup2 = ('Alan',) #括号()既可以表示tuple,又可以表示数学公式中的小括号,所以,如果元组只有1个元素,就必须加一个逗号,防止被当作括号运算
print 'tup2 =',tup2
| [
"alanwang6584@gmail.com"
] | alanwang6584@gmail.com |
d9d8d108b0e4724e37827247112eb08e0ee40bc7 | 071ab5fa2060a7da6f0068b04ff7aae8bb4c3e3e | /multiagent/logger.py | f84f217a93a68dd4ea39bc075b60a3b314e0d31c | [] | no_license | YZNYEE/TrafficLights_control_pytorch_rl | e4047951b7aaa4b36326db37bb08b18ebd063214 | 000029139e04c5d974bc665248b6bff91877cae5 | refs/heads/master | 2022-07-26T03:08:15.464356 | 2018-05-17T09:06:41 | 2018-05-17T09:06:41 | 133,244,981 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,054 | py | import os
import logging
class MyLogger(logging.Logger):
def __init__(self, filename='log/test.log'):
# super(MyLogger, self).__init__(filename)
logging.Logger.__init__(self, filename)
# 设置日志格式
fmtHandler = logging.Formatter('%(asctime)s [%(filename)s:%(lineno)s][%(levelname)s] %(message)s')
# 终端log输出流设置
try:
consoleHd = logging.StreamHandler()
consoleHd.setLevel(logging.ERROR)
consoleHd.setFormatter(fmtHandler)
self.addHandler(consoleHd)
except Exception as reason:
self.error("%s" % reason)
# 设置log文件
try:
os.makedirs(os.path.dirname(filename))
except Exception as reason:
pass
try:
fileHd = logging.FileHandler(filename)
fileHd.setLevel(logging.DEBUG)
fileHd.setFormatter(fmtHandler)
self.addHandler(fileHd)
except Exception as reason:
self.error("%s" % reason)
return
# 设置回滚日志,每个日志最大10M,最多备份5个日志
try:
rtfHandler = logging.BaseRotatingHandler(
filename, maxBytes=10*1024*1024, backupCount=5)
except Exception as reason:
self.error("%s" % reason)
else:
self.addHandler(rtfHandler)
def main():
logfile1 = 'log/test1.log'
logger1 = MyLogger(logfile1)
logger1.debug("debug msg")
logger1.info("info msg")
logger1.warning("warning msg")
logger1.error("error msg")
logger1.critical("critical msg")
logfile2 = 'log/test2.log'
logger1 = MyLogger(logfile2)
logger1.debug("debug msg")
logger1.info("info msg")
logger1.warning("warning msg")
logger1.error("error msg")
logger1.critical("critical msg")
if __name__ == '__main__':
main() | [
"1447367196@qq.com"
] | 1447367196@qq.com |
a1f0028d1d83290eb414b6e2ffe6780027deb006 | beeb3163027cf0655c5a171e7232d26429e55d0d | /dataloader/DataloaderTemplate.py | bac27b33f0b3b7fe2cc5050dfd0bfb1679b0432d | [
"MIT"
] | permissive | DanielMontecino/Embeddings | 05642f99534add1ee4109cd1eea7c0d2a7ad0d36 | d0c187cd5f7b3cabb747baaefae50f0450e87940 | refs/heads/master | 2020-04-04T18:39:53.090540 | 2019-07-26T08:05:37 | 2019-07-26T08:05:37 | 156,172,955 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,023 | py | from random import shuffle
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
class DataloaderTemplate(object):
def __init__(self, ims_per_id=4, ids_per_batch=32,
target_image_size=(224, 224), data_gen_args={}, n_out=1, **kwards):
self.ims_per_id = ims_per_id
self.ids_per_batch = ids_per_batch
self.input_shape = target_image_size
self.data_gen_args = data_gen_args
self.n_out = n_out
self.train_dict, self.test_dict = self.set_dicts()
self.train_labels_list = self.set_labels_list(self.train_dict)
self.test_labels_list = self.set_labels_list(self.test_dict)
self.train_ids_per_batch = np.min([len(self.train_labels_list), self.ids_per_batch])
self.test_ids_per_batch = np.min([len(self.test_labels_list), self.ids_per_batch])
self.show_info()
def show_info(self):
print("\n ---- Data loader info --- ")
print("Number of train images:", self.get_size(self.train_dict))
print("Number of test images:", self.get_size(self.test_dict))
print("Number of train ids:", len(self.train_labels_list))
print("Number of test ids:", len(self.test_labels_list))
print("Number of train ids per batch:", self.train_ids_per_batch)
print("Number of test ids per batch:", self.test_ids_per_batch)
print("Number of images por id:", self.ims_per_id)
print("Number of train steps:", self.get_train_steps())
print("Number of test steps:", self.get_test_steps())
@staticmethod
def get_size(_dict):
n = 0
for key, item in _dict.items():
n += len(item)
return n
def set_dicts(self):
# return set_dict(test_param), set_dict(test_param)
raise NotImplementedError("Method not implemented")
def set_dict(self, *args):
raise NotImplementedError("Method not implemented")
@staticmethod
def copy_dict(original_dict):
''' Copy a dict to another, because the only assignment =,
implies that changes in one dict affect the other.
Input:
original_dict: The Dictionary to copy.
Output:
new_dict: The new dictionary, identical to the
original
'''
new_dict = {}
for key, items in original_dict.items():
new_dict[key] = items.copy()
return new_dict
@staticmethod
def set_labels_list(_dict):
'''
Set the list with the labels as the keys of a dictionary
'''
labels_list = []
for key, val in _dict.items():
labels_list += [key]
return labels_list
@staticmethod
def get_labels_list(_labels_list):
shuffle(_labels_list)
return _labels_list.copy()
def triplet_generator(self, _dict, _labels_list, ids_per_batch, is_train=True):
ids_to_train = self.get_labels_list(_labels_list)
dict_to_train = self.copy_dict(_dict)
batch_size = ids_per_batch * self.ims_per_id
while True:
x_batch = []
y_batch = []
if len(ids_to_train) <= ids_per_batch:
ids_to_train = self.get_labels_list(_labels_list)
for _ in range(ids_per_batch):
id_ = ids_to_train.pop()
if len(dict_to_train[id_]) < self.ims_per_id:
dict_to_train[id_] = _dict[id_].copy()
shuffle(dict_to_train[id_])
for __ in range(self.ims_per_id):
im_link = dict_to_train[id_].pop()
im = self.get_image(im_link, is_train)
x_batch.append(im)
y_batch.append(id_)
x_batch = np.concatenate(x_batch)
if len(self.data_gen_args.keys()) > 0 and is_train:
datagen = ImageDataGenerator(**self.data_gen_args)
datagen.fit(x_batch)
x_batch = next(datagen.flow(x_batch, shuffle=False,
batch_size=batch_size))
y_batch = np.array(y_batch).astype(np.int32)
if self.n_out == 1:
yield x_batch, y_batch
else:
yield x_batch, [y_batch for k_ in range(self.n_out)]
def get_image(self, im_link, is_train):
raise NotImplementedError("This method is not implemented")
def triplet_train_generator(self, train=True):
return self.triplet_generator(self.train_dict, self.train_labels_list, self.train_ids_per_batch, train)
def triplet_test_generator(self, train=False):
return self.triplet_generator(self.test_dict, self.test_labels_list, self.test_ids_per_batch, train)
def get_train_steps(self):
return self.get_size(self.train_dict) / float(self.train_ids_per_batch*self.ims_per_id)
def get_test_steps(self):
return self.get_size(self.test_dict) / float(self.test_ids_per_batch*self.ims_per_id)
| [
"daniel.montecino@ug.uchile.cl"
] | daniel.montecino@ug.uchile.cl |
10968627eda9b64f7fffc189cfbce24d977f0b4b | 957ca83aaef14b129a7890c58377e1cc5374c4bb | /rpi/ECG_server.py | b0a00a864b749faf30aaae4b0def55b79cebc0a4 | [] | no_license | FrancoRosa/mj-medic | f9f5c32b93fd5953a443f1fc93415ea7330fed2a | c2d090f68ddfe85f6d6115abafadaf1da5278385 | refs/heads/main | 2023-03-11T06:14:22.167800 | 2021-02-25T14:44:00 | 2021-02-25T14:44:00 | 341,766,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | from flask import request, jsonify, Flask
from flask_cors import CORS, cross_origin
from random import random
import json
app = Flask(__name__)
CORS(app)
app.config["DEBUG"] = True
@app.route('/', methods=['GET'])
def home():
return "<h1>Server & Redis Works!</h1>\n"
@app.route('/api/v1', methods=['GET'])
def api_save_data():
values = []
for z in range(10):
values.append(random())
return jsonify({'message': False, 'values': values })
app.run(port = 9999); | [
"km115.franco@gmail.com"
] | km115.franco@gmail.com |
e7b9297c30fdaccf248a09c5f6eabef8e640017d | 8c705b82e35f992c3d54d939f8ab4207473c56a4 | /03_BFS_DFS/02_virus/virus.py | 453335cced0514fc223d52665fe4567b314cc468 | [
"MIT"
] | permissive | ki-yungkim/AlgorithmStudy | 5a2bb7ec534906a2d7daf5fd8d8966a1e279f75e | 14798d37166c8803d3bf3021e79df920a15eae10 | refs/heads/main | 2023-08-25T03:09:52.449344 | 2021-10-20T11:52:02 | 2021-10-20T11:52:02 | 375,719,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | graph = {}
computer = int(input())
edge = int(input())
for i in range(edge):
edge_number = input().split(' ')
c1, c2 = map(int, edge_number)
if c1 not in graph:
graph[c1] = [c2]
elif c2 not in graph[c1]:
graph[c1].append(c2)
if c2 not in graph:
graph[c2] = [c1]
elif c1 not in graph[c2]:
graph[c2].append(c1)
def BFS(graph):
visited = []
queue = [1]
while queue:
n = queue.pop(0)
if n not in visited:
visited.append(n)
if n in graph:
tmp = list(set(graph[n]) - set(visited))
tmp.sort()
queue += tmp
return len(visited)
print(BFS(graph)-1) | [
"kiyungdev@gmail.com"
] | kiyungdev@gmail.com |
eafff43e3b8925cdecb6e773eecaba4b7b8d6460 | ce7ccb30af3f3f97d4532c12e67b3d034591502a | /Python-peng/window.py | c50e959a73059fa267a6f346c3e7e1bb8cfdffdd | [] | no_license | imppppp7/time | 0292605e6d71796368d207b4e1da78e022677ced | a8d4f5e31320dd137614be73ff31bf7a80a44f50 | refs/heads/master | 2020-05-09T12:50:35.844250 | 2019-06-25T07:05:38 | 2019-06-25T07:05:38 | 181,125,266 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,509 | py | import sys
from PyQt5.QtWidgets import QApplication, QDialog
from PyQt5 import QtGui
from laserspeckle_process.camera import Ui_Form
import cv2
import numpy as np
from laserspeckle_process import mvsdk
def main_loop():
# 枚举相机
DevList = mvsdk.CameraEnumerateDevice()
nDev = len(DevList)
if nDev < 1:
print("No camera was found!")
return
DevInfo = DevList[0]
print(DevInfo)
# 打开相机
hCamera = 0
try:
hCamera = mvsdk.CameraInit(DevInfo, -1, -1)
except mvsdk.CameraException as e:
print("CameraInit Failed({}): {}".format(e.error_code, e.message))
return
# 获取相机特性描述
cap = mvsdk.CameraGetCapability(hCamera)
# 判断是黑白相机还是彩色相机
monoCamera = (cap.sIspCapacity.bMonoSensor != 0)
# 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度
if monoCamera:
mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)
# 相机模式切换成连续采集
mvsdk.CameraSetTriggerMode(hCamera, 0)
# 手动曝光,曝光时间30ms
mvsdk.CameraSetAeState(hCamera, 0)
mvsdk.CameraSetExposureTime(hCamera, 30 * 1000)
# 让SDK内部取图线程开始工作
mvsdk.CameraPlay(hCamera)
# 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配
FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (1 if monoCamera else 3)
# 分配RGB buffer,用来存放ISP输出的图像
# 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer)
pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)
while (cv2.waitKey(1) & 0xFF) != ord('q'):
# 从相机取一帧图片
try:
pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 200)
mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer, FrameHead)
mvsdk.CameraReleaseImageBuffer(hCamera, pRawData)
# 此时图片已经存储在pFrameBuffer中,对于彩色相机pFrameBuffer=RGB数据,黑白相机pFrameBuffer=8位灰度数据
# 把pFrameBuffer转换成opencv的图像格式以进行后续算法处理
frame_data = (mvsdk.c_ubyte * FrameHead.uBytes).from_address(pFrameBuffer)
frame = np.frombuffer(frame_data, dtype=np.uint8)
frame = frame.reshape((FrameHead.iHeight, FrameHead.iWidth,
1 if FrameHead.uiMediaType == mvsdk.CAMERA_MEDIA_TYPE_MONO8 else 3))
showImage = QtGui.QImage(frame.data, frame.shape[1], frame.shape[0], QtGui.QImage.Format_RGB888)
ui.label_VideoDisplay.setPixmap(QtGui.QPixmap.fromImage(showImage))
cv2.imshow("capture", frame)
except mvsdk.CameraException as e:
if e.error_code != mvsdk.CAMERA_STATUS_TIME_OUT:
print("CameraGetImageBuffer failed({}): {}".format(e.error_code, e.message))
# 关闭相机
mvsdk.CameraUnInit(hCamera)
# 释放帧缓存
mvsdk.CameraAlignFree(pFrameBuffer)
def main():
app = QApplication(sys.argv)
window = QDialog()
global ui
ui = Ui_Form()
ui.setupUi(window)
window.show()
sys.exit(app.exec_())
try:
main_loop()
finally:
cv2.destroyAllWindows()
main()
| [
"49544578+imppppp7@users.noreply.github.com"
] | 49544578+imppppp7@users.noreply.github.com |
0f2d6c77048b25d311a0bc2823df8cb79c5985fb | 4135df2cd68ea5a17ff0e4f57537156942e4139e | /main.py | c231d25d82f4c649fe490cb321e348bac07eb9bd | [] | no_license | DaniloMarques1/Avl-tree | 5dc031363cd80dd53ddce4031af5edf0c405f030 | 5f643fbecc491d0b258dc1804ba2cca5bf931b1a | refs/heads/master | 2020-06-05T01:12:46.721952 | 2019-06-17T18:08:23 | 2019-06-17T18:08:23 | 192,262,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,200 | py | from Musica import Musica
from Node import Node
from musica_mock import create_mock
def menu():
print("="*30)
print("1- Inserir uma música")
print("2- Procurar uma música pelo nome")
print("3- Procurar todas as músicas de um determinado ano")
print("4- Remover uma música pelo nome")
print("5- Listar todas as músicas")
print("6- Saber a altura da arvore")
print("7- Esta balanceada? ")
print("8- Sair")
print("="*30)
choice = int(input("Option: "))
return choice
def create_musica_and_insert(root):
nome = input("Nome da musica: ")
autor = input("Nome do autor: ")
album = input("Nome do album: ")
ano = int(input("Ano de lançamento: "))
musica = Musica(nome=nome, autor=autor, album=album, ano=ano)
root = root.insert(musica)
return root
def search(root, nome):
result = root.search_by_name(nome)
if result is not None:
print(result.get_nome())
print(result.get_ano())
print(result.get_autor())
print(result.get_album())
else:
print("Musica nao encontrada")
def main():
load = input("Carregar o mock? (s/n)")
if load == "s":
#arvore mockada
root = create_mock()
else:
#arvore vazia
root = Node()
choice = menu()
while choice != 8:
if choice == 1:
root = create_musica_and_insert(root)
elif choice == 2:
nome = input("Nome da musica: ")
search(root, nome)
elif choice == 3:
ano = int(input("Ano: "))
root.search_by_year(ano)
elif choice == 4:
nome = input("Nome da musica que deseja remover: ")
root = root.remove(nome)
elif choice == 5:
print("Lista de todas as musicas: ")
root.list_items()
elif choice == 6:
print("Altura da arvore: ", root.height(root) - 1)
elif choice == 7:
result = root.is_balanced(root)
if result == True:
print("Arvore balanceada")
else:
print("Arvore nao esta balanceada")
choice = menu()
if __name__ == "__main__":
main()
| [
"danilomarques20@hotmail.com"
] | danilomarques20@hotmail.com |
e7e9fa6434a16225e474221aa384d53273c495e4 | 76c28accf0b89aa9f9632e46c4382ed83848fd61 | /index_serve/IndexSearcherTest.py | ec38525bf93efe92ad08d17e6738893ec4ad1856 | [] | no_license | xiaotianwu/py_search | 9d4d86f63ca687e5e0e9a2b0a1980aada248e9d8 | 2c20a216117c033898940179ff626e20e32ffcb1 | refs/heads/master | 2020-05-19T16:32:57.190049 | 2014-03-23T07:58:37 | 2014-03-23T07:58:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,682 | py | #!/usr/bin/python
import os
import random
import time
import unittest
from common.Common import GenRandomIndex
from common.simple_index.SimpleIndex import *
from IndexConfig import IndexHandlerFactory
from IndexManager import IndexManager
from IndexSearcher import IndexSearcher
class IndexSearcherTest(unittest.TestCase):
def CreateTestIndex(self):
self.s1 = SimpleIndex()
for i in range(0, 300):
self.s1.Add(i, GenRandomIndex())
testFile1 = 'test1.mem.index'
writer = SimpleIndexWriter()
writer.Write(self.s1, testFile1)
print testFile1, 'created'
self.s2 = SimpleIndex()
for i in range(300, 600):
self.s2.Add(i, GenRandomIndex())
testFile2 = 'test2.disk.index'
writer = SimpleIndexWriter()
writer.Write(self.s2, testFile2)
print testFile2, 'created'
self.s3 = SimpleIndex()
for i in range(600, 1100):
self.s3.Add(i, GenRandomIndex())
testFile3 = 'test3.mem.index'
writer = SimpleIndexWriter()
writer.Write(self.s3, testFile3)
print testFile3, 'created'
self.s4 = SimpleIndex()
for i in range(1100, 2000):
self.s4.Add(i, GenRandomIndex())
testFile4 = 'test4.disk.index'
writer = SimpleIndexWriter()
writer.Write(self.s4, testFile4)
print testFile4, 'created'
self.s5 = SimpleIndex()
for i in range(2000, 3000):
self.s5.Add(i, GenRandomIndex())
testFile5 = 'test5.disk.index'
writer = SimpleIndexWriter()
writer.Write(self.s5, testFile5)
print testFile5, 'created'
self.s6 = SimpleIndex()
for i in range(3000, 3500):
self.s6.Add(i, GenRandomIndex())
testFile6 = 'test6.mem.index'
writer = SimpleIndexWriter()
writer.Write(self.s6, testFile6)
print testFile6, 'created'
self.s7 = SimpleIndex()
for i in range(3500, 4500):
self.s7.Add(i, GenRandomIndex())
testFile7 = 'test7.disk.index'
writer = SimpleIndexWriter()
writer.Write(self.s7, testFile7)
print testFile7, 'created'
self.s8 = SimpleIndex()
for i in range(4500, 5500):
self.s8.Add(i, GenRandomIndex())
testFile8 = 'test8.mem.index'
writer = SimpleIndexWriter()
writer.Write(self.s8, testFile8)
print testFile8, 'created'
self._indexFiles = [testFile1, testFile2, testFile3, testFile4,
testFile5, testFile6, testFile7, testFile8]
def setUp(self):
self.CreateTestIndex()
mappingStr =\
'0,299:test1.mem.index:mem;300,599:test2.disk.index:disk;' +\
'600,1099:test3.mem.index:mem;1100,1999:test4.disk.index:disk;' +\
'2000,2999:test5.disk.index:disk;3000,3499:test6.mem.index:mem;' +\
'3500,4499:test7.disk.index:disk;4500,5499:test8.mem.index:mem'
self._indexManager = IndexManager(4, 3, '.', mappingStr)
self._indexSearcher = IndexSearcher(20, self._indexManager)
self._indexMap = [(0, self.s1), (300, self.s2), (600, self.s3),
(1100, self.s4), (2000, self.s5), (3000, self.s6),
(3500, self.s7), (4500, self.s8), (9999999999,None)]
def FindIndexFromMap(self, termId):
for i in range(0, len(self._indexMap)):
if self._indexMap[i][0] <= termId and\
self._indexMap[i + 1][0] > termId:
return self._indexMap[i][1]
def tearDown(self):
print 'test done'
self._indexManager.Stop()
for f in self._indexFiles:
os.remove(f)
def GenRandomTermIdList(self):
termIdSetSize = random.randint(1, 10)
s = set()
for i in range(0, termIdSetSize):
cand = random.randint(0, 100000) % 5500
if cand not in s:
s.add(cand)
return list(s)
def testSearcher(self):
termIdList = []
for i in range(0, 20000):
termIdList = self.GenRandomTermIdList()
result1 = self._indexSearcher.Search(termIdList)
indexHandler = IndexHandlerFactory.Get()
for termId in termIdList:
postingList = self.FindIndexFromMap(termId).Fetch(termId)
self.assertTrue(postingList != None)
indexHandler.Add(postingList)
result2 = indexHandler.Intersect()
self.assertSetEqual(set(result1), set(result2))
print 'case %d passed' % i
if __name__ == '__main__':
unittest.main()
| [
"wxt1984@gmail.com"
] | wxt1984@gmail.com |
1291e6dd7c3df9eebf0732a33965f2c78ddd59bc | dce7f7b5f7e680d3c02c714a9546bc5c2bd01750 | /1color_tracking.py | c12501ea96d82d4800b264eaa26c22852ce56dbb | [] | no_license | Vazma/Arm-color | 206a0f166014713e42a0e4a0266d3ea367cb9ff0 | 5c9b8561da343d6aeb34b4293d07ffd6223752f4 | refs/heads/master | 2023-06-19T23:44:27.175634 | 2019-05-06T19:50:33 | 2019-05-06T19:50:33 | 388,264,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,717 | py | import cv2
import numpy as np
# Detectar colores
greenLowerBound = np.array([33, 80, 40])
greenUpperBound = np.array([102, 255, 255])
orangeLowerBound = np.array([18, 40, 90])
orangeUpperBound = np.array([27, 255, 255])
yellowLowerBound = np.array([20, 100, 100])
yellowUpperBound = np.array([30, 255, 255])
redLowerBound = np.array([0, 100, 100])
redUpperBound = np.array([10, 255, 255])
# Iniciar la captura de video
cam = cv2.VideoCapture(0, cv2.CAP_DSHOW)
kernelOpen = np.ones((5, 5))
kernelClose = np.ones((20, 20))
font = cv2.FONT_HERSHEY_SIMPLEX
while True:
ret, img = cam.read()
img = cv2.resize(img, (340, 220))
# BGR a HSV
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# crear la máscara
mask = cv2.inRange(imgHSV, redLowerBound, redUpperBound)
greenmask = cv2.inRange(imgHSV, greenLowerBound, greenUpperBound)
yellowmask = cv2.inRange(imgHSV, yellowLowerBound, yellowUpperBound)
redmask = cv2.inRange(imgHSV, redLowerBound, redUpperBound)
# morfología
maskOpen = cv2.morphologyEx(yellowmask, cv2.MORPH_OPEN, kernelOpen)
maskClose = cv2.morphologyEx(maskOpen, cv2.MORPH_CLOSE, kernelClose)
maskFinal = maskClose
conts, h = cv2.findContours(maskFinal.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(img, conts, -1, (255, 0, 0), 3)
for i in range(len(conts)):
x, y, w, h = cv2.boundingRect(conts[i])
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.putText(img, str(i + 1), (x, y + h), font, 4, (0, 255, 255), 2, cv2.LINE_AA)
cv2.imshow("maskClose", maskClose)
cv2.imshow("maskOpen", maskOpen)
cv2.imshow("mask", mask)
cv2.imshow("cam", img)
cv2.waitKey(10) | [
"a01208050@itesm.mx"
] | a01208050@itesm.mx |
79a015db8806cac557a7226e13680d63327c5194 | 2208b518d08d923a98dc17da96080d824939507a | /quiz1/wsgi.py | 118eeeceb12b0401064d9a32c4d77395fa55692c | [] | no_license | gargpriyanka791/quiz-application | 56d27aa96f91b326731b5277b2c1de050e122863 | 1eb1b8b11fec39a4a00bb7514f016999c2dfe6f3 | refs/heads/main | 2023-03-04T04:53:01.825759 | 2021-02-14T18:04:57 | 2021-02-14T18:04:57 | 334,464,120 | 0 | 0 | null | 2021-02-14T18:04:58 | 2021-01-30T16:54:01 | Python | UTF-8 | Python | false | false | 387 | py | """
WSGI config for quiz1 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'quiz1.settings')
application = get_wsgi_application()
| [
"garg.ankit59@gmail.com"
] | garg.ankit59@gmail.com |
e6342b3777cd782c8409cf8f33beab754f0f2f7a | 743e5872200168ee7f641337bc123a9a80638592 | /python_hi.py | cb90a120484a56f07652d0b297b9f0be205abd91 | [] | no_license | el-musso/Bioinfo_Helloworlds | dac5159fbc2a6403c27209a65acbc7d4fe13f6d8 | 0b280b2e220682a6478e02e5f7b8a9e9b52edded | refs/heads/master | 2021-01-16T03:42:00.752394 | 2020-02-25T10:06:40 | 2020-02-25T10:06:40 | 242,965,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | #!/usr/bin/python3
import random
print("Hello world!")
print("Hello from the master branch!")
if random.random() >= 0.5:
print("Califano!")
| [
"bio1926@binp17.mbfys.lu.se"
] | bio1926@binp17.mbfys.lu.se |
b64762dcc66d6ece3f265efcab7e3272e0b81ce3 | 233e3fdaf1ffe523c84c3e258c50f472cac9219e | /replay/prob1/new_binary.py | 24cb20ef00df3c38265325b23df2a8bcc8023b8d | [] | no_license | loneworld/defcon-2017-tools | 7f821f235fc10615d859eb5ac51d2afc930bfbdf | 7adb9377d298d142a1e132d064ce4aed1973f934 | refs/heads/master | 2020-04-15T06:54:50.349634 | 2017-08-03T12:40:13 | 2017-08-03T12:40:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | import sys,os
from shutil import move,copyfile
from hashlib import md5
from utils import check
bin_name= sys.argv[1]
files = os.listdir('bin/')
version = len(files)+1
filemd5 = md5(open(bin_name).read()).hexdigest()
name = str(version)+'_'+filemd5
copyfile(bin_name,'bin/'+name)
dirname = 'json/'+str(version)
os.mkdir(dirname)
if version > 1:
dirname2 = 'json/'+str(version-1)
os.system('cp '+dirname2+'/ok/ '+dirname+'/ok -r')
os.mkdir(dirname+'/exception')
os.mkdir(dirname+'/touch_flag')
check(name,dirname2+'/exception',dirname)
check(name,dirname2+'/touch_flag',dirname)
else:
os.mkdir(dirname+'/ok')
os.mkdir(dirname+'/exception')
os.mkdir(dirname+'/touch_flag')
| [
"b00507010@ntu.edu.tw"
] | b00507010@ntu.edu.tw |
f85c74259d441009082badd38b4c9f8c39a142de | 6e366c25bfab97691589d85512e5b7b515f5bb32 | /codingDOJO_python_stack/django/django_intro/test_graph/test_graph/urls.py | 7649af78415b22718604cb733784e566258c1735 | [] | no_license | Sota1218/codingDojo-in-Seattle | 09bcae69ffec934b6498ed378c7c6922728a11b5 | c1f5d9b8e0d8a12607784786d6351fed9e98b377 | refs/heads/master | 2022-12-04T17:13:39.298423 | 2020-08-06T09:41:02 | 2020-08-06T09:41:02 | 285,524,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py | """test_graph URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('', include('test_app.urls')),
]
| [
"sotayoshihiro@USERnoMacBook-Air.local"
] | sotayoshihiro@USERnoMacBook-Air.local |
c6b0fff397968dd94e5107fba24304c98ee0d322 | e6cbf405993b752e8a1c862e3e792fa889ebb2b4 | /menuwatch/apps/menus/migrations/0009_auto__del_field_food_myhash.py | 88f0836ac2bd63aff870d819147ba27a9e31fc54 | [] | no_license | bjacobel/menuwatch | 9b4ed009412663bb506f66f28869e2f3575e76c4 | 6cc66de190b69c24d70b27fecfbce4fa173d2359 | refs/heads/master | 2020-04-04T15:27:46.740250 | 2014-07-17T20:58:15 | 2014-07-17T20:58:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,883 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Food.myhash'
db.delete_column(u'menus_food', 'myhash')
def backwards(self, orm):
# Adding field 'Food.myhash'
db.add_column(u'menus_food', 'myhash',
self.gf('django.db.models.fields.CharField')(default=None, max_length=32),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'menus.food': {
'Meta': {'object_name': 'Food'},
'attrs': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'foodgroup': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_date': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_date_food_was_offered'", 'null': 'True', 'to': u"orm['menus.FoodDate']"}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'meal': ('django.db.models.fields.CharField', [], {'max_length': '9'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'next_dates': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'upcoming_dates_for_food'", 'null': 'True', 'to': u"orm['menus.FoodDate']"})
},
u'menus.fooddate': {
'Meta': {'object_name': 'FoodDate'},
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'menus.profile': {
'Meta': {'object_name': 'Profile'},
'frequency': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locations': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'pro': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'menus.watch': {
'Meta': {'object_name': 'Watch'},
'food': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['menus.Food']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['menus.Profile']"})
}
}
complete_apps = ['menus'] | [
"bjacobel@gmail.com"
] | bjacobel@gmail.com |
64e54360a08ce6dbbb24c1460fac84b3c9895da4 | 90da0f9a0694da690da51185e39f23ea7e884156 | /config/CONST.py | 59b079aa7e0eb0d4f3626e7f7d6463d6f0141b54 | [] | no_license | vchlum/shnopp | ea64c2f04a8a88d3a41028f94c13a2ecfc4880ae | 50b227679f2c1a713fee8b262a5fa2f3d0bf8b6b | refs/heads/master | 2021-07-02T12:27:19.415965 | 2019-05-31T11:57:24 | 2019-05-31T11:57:24 | 59,756,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
HOSTNAME = os.uname()[1]
APP_NAME = "SmartHomeNotOnlyPiPowered"
APP_SHORT_NAME = "shnopp"
DESCRIPTION = ""
AUTHORS = ["Václav Chlumský <chlumskyvaclav@gmail.com>"]
VERSION = "0.55~alfa"
COPYRIGHT = "(C) 2016 Václav Chlumský"
DELIMITER = "::"
# neni finalni, upravit, opravit...
RET_OK = 0
RET_ERROR = 1
RET_IO_ERROR = 100
| [
"chlumskyvaclav@gmail.com"
] | chlumskyvaclav@gmail.com |
ca29d0db6649c0311170cc6c4b70e63bb1d627b5 | e7c70a02e61f6d4a97c5933f3550bca22afa6acb | /ros_ws/devel/lib/python2.7/dist-packages/final_lab/srv/_path.py | 5f36d5349329d6f7860095b20230cfa7a98e99cc | [] | no_license | amitf82/Final_Proj_Mobile_Robotics | 14cfe7b182df1294a873283c91688c8ca9526fee | 435a6c1562df030fc462fe1b0a84f968a27a2b85 | refs/heads/master | 2021-01-20T03:22:51.387095 | 2017-04-30T08:25:33 | 2017-04-30T08:25:33 | 89,532,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,227 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from final_lab/pathRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import nav_msgs.msg
import std_msgs.msg
class pathRequest(genpy.Message):
_md5sum = "58d6f138c7de7ef47c75d4b7e5df5472"
_type = "final_lab/pathRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """
nav_msgs/Path path
================================================================================
MSG: nav_msgs/Path
#An array of poses that represents a Path for a robot to follow
Header header
geometry_msgs/PoseStamped[] poses
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: geometry_msgs/PoseStamped
# A Pose with reference coordinate frame and timestamp
Header header
Pose pose
================================================================================
MSG: geometry_msgs/Pose
# A representation of pose in free space, composed of postion and orientation.
Point position
Quaternion orientation
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
"""
__slots__ = ['path']
_slot_types = ['nav_msgs/Path']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
path
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(pathRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.path is None:
self.path = nav_msgs.msg.Path()
else:
self.path = nav_msgs.msg.Path()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.path.header.seq, _x.path.header.stamp.secs, _x.path.header.stamp.nsecs))
_x = self.path.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.path.poses)
buff.write(_struct_I.pack(length))
for val1 in self.path.poses:
_v1 = val1.header
buff.write(_struct_I.pack(_v1.seq))
_v2 = _v1.stamp
_x = _v2
buff.write(_struct_2I.pack(_x.secs, _x.nsecs))
_x = _v1.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_v3 = val1.pose
_v4 = _v3.position
_x = _v4
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
_v5 = _v3.orientation
_x = _v5
buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.path is None:
self.path = nav_msgs.msg.Path()
end = 0
_x = self
start = end
end += 12
(_x.path.header.seq, _x.path.header.stamp.secs, _x.path.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.path.header.frame_id = str[start:end].decode('utf-8')
else:
self.path.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.path.poses = []
for i in range(0, length):
val1 = geometry_msgs.msg.PoseStamped()
_v6 = val1.header
start = end
end += 4
(_v6.seq,) = _struct_I.unpack(str[start:end])
_v7 = _v6.stamp
_x = _v7
start = end
end += 8
(_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v6.frame_id = str[start:end].decode('utf-8')
else:
_v6.frame_id = str[start:end]
_v8 = val1.pose
_v9 = _v8.position
_x = _v9
start = end
end += 24
(_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
_v10 = _v8.orientation
_x = _v10
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])
self.path.poses.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.path.header.seq, _x.path.header.stamp.secs, _x.path.header.stamp.nsecs))
_x = self.path.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.path.poses)
buff.write(_struct_I.pack(length))
for val1 in self.path.poses:
_v11 = val1.header
buff.write(_struct_I.pack(_v11.seq))
_v12 = _v11.stamp
_x = _v12
buff.write(_struct_2I.pack(_x.secs, _x.nsecs))
_x = _v11.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_v13 = val1.pose
_v14 = _v13.position
_x = _v14
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
_v15 = _v13.orientation
_x = _v15
buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.path is None:
self.path = nav_msgs.msg.Path()
end = 0
_x = self
start = end
end += 12
(_x.path.header.seq, _x.path.header.stamp.secs, _x.path.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.path.header.frame_id = str[start:end].decode('utf-8')
else:
self.path.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.path.poses = []
for i in range(0, length):
val1 = geometry_msgs.msg.PoseStamped()
_v16 = val1.header
start = end
end += 4
(_v16.seq,) = _struct_I.unpack(str[start:end])
_v17 = _v16.stamp
_x = _v17
start = end
end += 8
(_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v16.frame_id = str[start:end].decode('utf-8')
else:
_v16.frame_id = str[start:end]
_v18 = val1.pose
_v19 = _v18.position
_x = _v19
start = end
end += 24
(_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
_v20 = _v18.orientation
_x = _v20
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])
self.path.poses.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_4d = struct.Struct("<4d")
_struct_3I = struct.Struct("<3I")
_struct_2I = struct.Struct("<2I")
_struct_3d = struct.Struct("<3d")
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from final_lab/pathResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class pathResponse(genpy.Message):
_md5sum = "3a1255d4d998bd4d6585c64639b5ee9a"
_type = "final_lab/pathResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """
bool status
"""
__slots__ = ['status']
_slot_types = ['bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
status
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(pathResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.status is None:
self.status = False
else:
self.status = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_struct_B.pack(self.status))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.status,) = _struct_B.unpack(str[start:end])
self.status = bool(self.status)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_struct_B.pack(self.status))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.status,) = _struct_B.unpack(str[start:end])
self.status = bool(self.status)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_B = struct.Struct("<B")
class path(object):
_type = 'final_lab/path'
_md5sum = '87fbad184f990f6671a31d6fd2678f60'
_request_class = pathRequest
_response_class = pathResponse
| [
"adf38@case.edu"
] | adf38@case.edu |
b0e23344c3cfc477b0aa8a229c58a16ef4401be6 | 2030f8501a1c93c00b1e509d40bfa967f7aa6a6b | /nn/gctc.py | 24b71a8a43f1de5c22d7e574ab765ed113f66414 | [
"BSD-3-Clause"
] | permissive | tangkk/tangkk-mirex-ace | 7acfbd425fd9fae479d07132b920bdebb7b186aa | c93259eac0309235dc3c6461dbcc07a78067e351 | refs/heads/master | 2021-07-04T22:18:32.791585 | 2021-06-02T18:12:51 | 2021-06-02T18:12:51 | 34,000,979 | 19 | 3 | null | null | null | null | UTF-8 | Python | false | false | 28,994 | py | '''
A chord classifier based on unidirectional CTC - with dynamic programming
'''
from collections import OrderedDict
import cPickle as pkl
import sys
import time
import numpy
import theano
from theano import config
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from sklearn import preprocessing
from acesongdb import load_data_song
dataset = sys.argv[1] #'../data/ch/Jsong-ch-noinv.pkl'
dumppath = sys.argv[2] #'lstm_model.npz'
xdim = int(sys.argv[3])#24
ydim = int(sys.argv[4])#61 or 252
dim_proj = int(sys.argv[5])#500
use_dropout=True
max_epochs = 16000 # give it long enough time to train
batch_size = 500 # length of a sample training piece within a song in terms of number of frames
# Set the random number generators' seeds for consistency
SEED = 123
numpy.random.seed(SEED)
rng = numpy.random.RandomState(SEED)
def numpy_floatX(data):
return numpy.asarray(data, dtype=config.floatX)
def numpy_intX(data):
return numpy.asarray(data, dtype='int32')
def get_minibatches_idx(n, minibatch_size, shuffle=False):
"""
Used to shuffle the dataset at each iteration.
"""
idx_list = numpy.arange(n, dtype="int32")
if shuffle:
numpy.random.shuffle(idx_list)
minibatches = []
minibatch_start = 0
for i in range(n // minibatch_size):
minibatches.append(idx_list[minibatch_start:
minibatch_start + minibatch_size])
minibatch_start += minibatch_size
if (minibatch_start != n):
# Make a minibatch out of what is left
minibatches.append(idx_list[minibatch_start:])
return zip(range(len(minibatches)), minibatches)
def get_dataset(name):
return datasets[name][0], datasets[name][1]
def zipp(params, tparams):
"""
When we reload the model. Needed for the GPU stuff.
"""
for kk, vv in params.iteritems():
tparams[kk].set_value(vv)
def unzip(zipped):
"""
When we pickle the model. Needed for the GPU stuff.
"""
new_params = OrderedDict()
for kk, vv in zipped.iteritems():
new_params[kk] = vv.get_value()
return new_params
def dropout_layer(state_before, use_noise, trng):
# at training time, use_noise is set to 1,
# dropout is applied to proj, each unit of proj is presented at a chance of p
# at test/validation time, use_noise is set to 0,
# each unit of proj is always presented, and their activations are multiplied by p
# by default p=0.5 (can be changed)
# and different p can be applied to different layers, even the input layer
proj = T.switch(use_noise,
(state_before *
trng.binomial(state_before.shape,
p=0.5, n=1,
dtype=state_before.dtype)),
state_before * 0.5)
return proj
def _p(pp, name):
return '%s_%s' % (pp, name)
def init_params(options):
"""
Global (not LSTM) parameter. For the embeding and the classifier.
"""
params = OrderedDict()
params = get_layer(options['encoder'])[0](options,
params,
prefix=options['encoder'])
# classifier
params['U'] = 0.01 * numpy.random.randn(options['dim_proj'],
options['ydim']).astype(config.floatX)
params['b'] = numpy.zeros((options['ydim'],)).astype(config.floatX)
return params
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.iteritems():
if kk not in pp:
raise Warning('%s is not in the archive' % kk)
params[kk] = pp[kk]
return params
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.iteritems():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
def get_layer(name):
fns = layers[name]
return fns
def random_weight(n_in,n_out=0):
if n_out == 0:
n_out = n_in
W = numpy.asarray(
rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
return W
def ortho_weight(ndim1, ndim2):
W = numpy.random.randn(ndim1, ndim2)
if ndim1 == ndim2:
u, s, v = numpy.linalg.svd(W)
return u.astype(config.floatX)
elif ndim1 < ndim2:
u, s, v = numpy.linalg.svd(W,full_matrices=0)
return v.astype(config.floatX)
elif ndim1 > ndim2:
u, s, v = numpy.linalg.svd(W,full_matrices=0)
return u.astype(config.floatX)
def param_init_lstm(options, params, prefix='lstm'):
"""
Init the LSTM parameter:
:see: init_params
"""
# W are the input weights (maps xdim to dim_proj)
W = numpy.concatenate([ortho_weight(options['xdim'],options['dim_proj']),
ortho_weight(options['xdim'],options['dim_proj']),
ortho_weight(options['xdim'],options['dim_proj']),
ortho_weight(options['xdim'],options['dim_proj'])], axis=1)
params[_p(prefix, 'W')] = W # "lstm_W"
# U are recurrent weights
U = numpy.concatenate([ortho_weight(options['dim_proj'],options['dim_proj']),
ortho_weight(options['dim_proj'],options['dim_proj']),
ortho_weight(options['dim_proj'],options['dim_proj']),
ortho_weight(options['dim_proj'],options['dim_proj'])], axis=1)
params[_p(prefix, 'U')] = U # "lstm_U"
# b are bias
b = numpy.zeros((4 * options['dim_proj'],))
params[_p(prefix, 'b')] = b.astype(config.floatX) # "lstm_b"
return params #[lstm_W, lstm_U, lstm_b]
def lstm_layer(tparams, x, y, use_noise, options, prefix='lstm'):
n_timesteps = x.shape[0]
def _slice(_x, n, dim):
return _x[n * dim:(n + 1) * dim]
# construct a transition matrix
ydim = options['ydim']
def _step(x_, h_, c_):
preact = T.dot(h_, tparams[_p(prefix, 'U')])
preact += T.dot(x_, tparams[_p(prefix, 'W')])
preact += tparams[_p(prefix, 'b')]
i = T.nnet.sigmoid(_slice(preact, 0, options['dim_proj']))
f = T.nnet.sigmoid(_slice(preact, 1, options['dim_proj']))
o = T.nnet.sigmoid(_slice(preact, 2, options['dim_proj']))
c = T.tanh(_slice(preact, 3, options['dim_proj']))
c = f * c_ + i * c
h = o * T.tanh(c)
op = T.dot(h, tparams['U']) + tparams['b']
return h, c, op
dim_proj = options['dim_proj']
# the scan function takse one dim_proj vector of x and one target of y at a time
# the output is:
# rval[0] -- n_timesteps of h -- n_timesteps * dim_proj
# rval[1] -- n_timesteps of c -- n_timesteps * dim_proj
# rval[2] -- n_timesteps of opp -- n_timesteps * ydim
# rval[3] -- n_timesteps of op -- n_timesteps * ydim (softmax)
rval, updates = theano.scan(_step,
sequences=[x],
outputs_info=[T.alloc(0.,dim_proj),
T.alloc(0.,dim_proj),
None],
name=_p(prefix, '_layers'),
n_steps=n_timesteps)
pred = rval[-1]
pred = T.nnet.softmax(pred)
trng = RandomStreams(SEED)
if options['use_dropout']:
pred = dropout_layer(pred, use_noise, trng)
'''
########################################################################################
# use dynamic programming to find out the optimal path
# viterbi algorithm is adapted from Pattern Recognition, Chapter 9
# transition matrix - impose high self-transition
st = 1e6 # self-transition factor
A = T.eye(ydim) * st + 1
def _dp(opred_,odmax_):
odmax = T.zeros_like(odmax_)
odargmax = T.zeros_like(odmax_)
for i in range(ydim):
dtemp = odmax_ + T.log(A[:,i]*opred_[i])
T.set_subtensor(odmax[i],T.max(dtemp))
T.set_subtensor(odargmax[i],T.argmax(dtemp))
return odmax, odargmax
# this scan loops over the index k to perform iterative dynamic programming
rval, updates = theano.scan(_dp,
sequences=[pred],
outputs_info=[T.zeros_like(pred[0]),
None],
name=_p(prefix, '_dp'),
n_steps=n_timesteps)
dargmax = rval[1].astype(config.floatX)
dmax = rval[0]
dlast = T.argmax(dmax[-1]).astype(config.floatX)
darglast = T.fill(T.zeros_like(dargmax[0]),dlast)
darglast = darglast.reshape([1,ydim])
dargmax_ = dargmax[::-1]
dargmax_ = T.concatenate([darglast,dargmax_[:-1]])
def _bt(odargmax, point_):
point = odargmax[point_.astype('int32')]
return point
# this scan backtracks and findout the path
rval, updates = theano.scan(_bt,
sequences=[dargmax_],
outputs_info=[T.zeros_like(dargmax_[0][0])],
name=_p(prefix, '_bt'),
n_steps=n_timesteps)
path_ = rval
path = path_[::-1]
# this is viterbi based algorithm adapted from a course (use additive weights)
# for k in range(x.shape[0]):
# for i in range(ydim):
# dtemp = numpy.zeros(ydim)
# dtemp = dmax[k-1,:] + numpy.log(A[:,i]*pred[k,i])
# dmax[k,i] = numpy.max(dtemp)
# dargmax[k,i] = numpy.argmax(dtemp)
# set starting point
# dargmax[0,0] = numpy.argmax(dmax[-1])
# reverse
# dargmax_ = dargmax[::-1]
# path_ = numpy.zeros_like(path)
# for k in range(x.shape[0]):
# path_[k] = dargmax_[k-1,path_[k-1]]
# path = path_[::-1]
# the path yield from the above process should be the most probable path of pred
# set it as the prediction instead
########################################################################################
f_pred_prob = theano.function([x], pred, name='f_pred_prob')
f_pred = theano.function([x], path, name='f_pred')
def _cost(path_, y_, cost_):
if T.neq(path_, y_):
cost_ = cost_ + 1
return cost_
# this scan backtracks and findout the path
rval, updates = theano.scan(_cost,
sequences=[path,y],
outputs_info=[T.zeros_like(path[0])],
name=_p(prefix, '_cost'),
n_steps=n_timesteps)
cost = -T.log(rval[-1]).mean()
'''
# pred will be -- n_timesteps * ydim posterior probs
f_pred_prob = theano.function([x], pred, name='f_pred_prob')
# pred.argmax(axis=1) will be -- n_timesteps * 1 one hot prediction
# f_pred = theano.function([x], pred.argmax(axis=1), name='f_pred')
f_pred = theano.function([x], pred, name='f_pred')
# cost will be a scaler value
# compile a function for the cost for one frame given op and one_y (or y_)
# cost is a scaler, where y is a n_timesteps * 1 target vector
# Each prediction is conditionally independent give x
# thus the posterior of p(pi|x) should be the product of each posterior
off = 1e-8
cost = -T.log(pred[T.arange(y.shape[0]), y] + off).mean()
return f_pred_prob, f_pred, cost
# ff: Feed Forward (normal neural net), only useful to put after lstm
# before the classifier.
layers = {'lstm': (param_init_lstm, lstm_layer)}
def sgd(lr, tparams, grads, x, y, cost):
""" Stochastic Gradient Descent
:note: A more complicated version of sgd then needed. This is
done like that for adadelta and rmsprop.
"""
# New set of shared variable that will contain the gradient
# for a mini-batch.
gshared = [theano.shared(p.get_value() * 0., name='%s_grad' % k)
for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
# Function that computes gradients for a mini-batch, but do not
# updates the weights.
f_grad_shared = theano.function([x, y], cost, updates=gsup,
name='sgd_f_grad_shared')
pup = [(p, p - lr * g) for p, g in zip(tparams.values(), gshared)]
# Function that updates the weights from the previously computed
# gradient.
f_update = theano.function([lr], [], updates=pup,
name='sgd_f_update')
return f_grad_shared, f_update
def adadelta(lr, tparams, grads, x, y, cost):
"""
An adaptive learning rate optimizer
Parameters
----------
lr : Theano SharedVariable
Initial learning rate
tpramas: Theano SharedVariable
Model parameters
grads: Theano variable
Gradients of cost w.r.t to parameres
x: Theano variable
Model inputs
y: Theano variable
Targets
cost: Theano variable
Objective fucntion to minimize
Notes
-----
For more information, see [ADADELTA]_.
.. [ADADELTA] Matthew D. Zeiler, *ADADELTA: An Adaptive Learning
Rate Method*, arXiv:1212.5701.
"""
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.iteritems()]
running_up2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rup2' % k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function([x, y], cost, updates=zgup + rg2up,
name='adadelta_f_grad_shared')
updir = [-T.sqrt(ru2 + 1e-6) / T.sqrt(rg2 + 1e-6) * zg
for zg, ru2, rg2 in zip(zipped_grads,
running_up2,
running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(tparams.values(), updir)]
f_update = theano.function([lr], [], updates=ru2up + param_up,
on_unused_input='ignore',
name='adadelta_f_update')
return f_grad_shared, f_update
def rmsprop(lr, tparams, grads, x, y, cost):
"""
A variant of SGD that scales the step size by running average of the
recent step norms.
Parameters
----------
lr : Theano SharedVariable
Initial learning rate
tpramas: Theano SharedVariable
Model parameters
grads: Theano variable
Gradients of cost w.r.t to parameres
x: Theano variable
Model inputs
y: Theano variable
Targets
cost: Theano variable
Objective fucntion to minimize
Notes
-----
For more information, see [Hint2014]_.
.. [Hint2014] Geoff Hinton, *Neural Networks for Machine Learning*,
lecture 6a,
http://cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf
"""
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.iteritems()]
running_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad' % k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rgup = [(rg, 0.95 * rg + 0.05 * g) for rg, g in zip(running_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function([x, y], cost,
updates=zgup + rgup + rg2up,
name='rmsprop_f_grad_shared')
updir = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_updir' % k)
for k, p in tparams.iteritems()]
updir_new = [(ud, 0.9 * ud - 1e-4 * zg / T.sqrt(rg2 - rg ** 2 + 1e-4))
for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads,
running_grads2)]
param_up = [(p, p + udn[1])
for p, udn in zip(tparams.values(), updir_new)]
f_update = theano.function([lr], [], updates=updir_new + param_up,
on_unused_input='ignore',
name='rmsprop_f_update')
return f_grad_shared, f_update
def build_model(tparams, options):
trng = RandomStreams(SEED)
# Used for dropout.
use_noise = theano.shared(numpy_floatX(0.))
x = T.matrix('x', dtype=config.floatX)
y = T.vector('y', dtype='int32')
f_pred_prob, f_pred, cost = get_layer(options['encoder'])[1](tparams, x, y, use_noise, options,
prefix=options['encoder'])
return use_noise, x, y, f_pred_prob, f_pred, cost
'''
def pred_error(f_pred, data, verbose=False):
"""
Just compute the error
f_pred: Theano fct computing the prediction
prepare_data: usual prepare_data for that dataset.
"""
# idx0 = numpy.random.randint(0,len(data[0]))
lendata = len(data[0])
sum_valid_err = 0
# loop over the valid/test set and predict the error for every song
for idx0 in range(lendata):
# on one whole random song
x = data[0][idx0]
y = data[1][idx0]
# emission probs
preds = f_pred(x)
targets = y
valid_err = (preds == targets).sum()
valid_err = 1. - numpy_floatX(valid_err) / len(y)
sum_valid_err += valid_err
sum_valid_err = sum_valid_err / lendata
return sum_valid_err
'''
# this pred_error function use viterbi algorithm to smooth outputs of LSTM
def pred_error(f_pred, data, verbose=False):
"""
Just compute the error
f_pred: Theano fct computing the prediction
prepare_data: usual prepare_data for that dataset.
"""
# idx0 = numpy.random.randint(0,len(data[0]))
lendata = len(data[0])
sum_valid_err = 0
# loop over the valid/test set and predict the error for every song
for idx0 in range(lendata):
# on one whole random song
x = data[0][idx0]
y = data[1][idx0]
# emission probs
e_probs = f_pred(x)
########################################################################################
# use dynamic programming to find out the optimal path
# viterbi algorithm is adapted from Pattern Recognition, Chapter 9
# transition matrix - impose high self-transition
st = 1e6 # self-transition factor
A = numpy.eye(ydim) * st + 1
# normalize with l1-norm
A = preprocessing.normalize(A, 'l1')
path = numpy.zeros_like(e_probs.argmax(axis=1))
dmax = numpy.zeros_like(e_probs)
dargmax = numpy.zeros_like(e_probs)
# this is viterbi based algorithm adapted from a course (use additive weights)
for k in range(x.shape[0]):
for i in range(ydim):
dtemp = numpy.zeros(ydim)
dtemp = dmax[k-1,:] + numpy.log(A[:,i]*e_probs[k,i])
dmax[k,i] = numpy.max(dtemp)
dargmax[k,i] = numpy.argmax(dtemp)
# set starting point
dargmax[0,0] = numpy.argmax(dmax[-1])
# reverse
dargmax_ = dargmax[::-1]
path_ = numpy.zeros_like(path)
for k in range(x.shape[0]):
path_[k] = dargmax_[k-1,path_[k-1]]
path = path_[::-1]
# the path yield from the above process should be the most probable path of pred
# set it as the prediction instead
########################################################################################
targets = y
valid_err = (path == targets).sum()
valid_err = 1. - numpy_floatX(valid_err) / len(y)
sum_valid_err += valid_err
sum_valid_err = sum_valid_err / lendata
return sum_valid_err
def train_lstm(
patience=10, # Number of epoch to wait before early stop if no progress
max_epochs=500, # The maximum number of epoch to run
dispFreq=10, # Display to stdout the training progress every N updates
decay_c=0., # Weight decay for the classifier applied to the U weights.
lrate=0.001, # Learning rate for sgd (not used for adadelta and rmsprop)
# n_words=10000, # Vocabulary size
optimizer=adadelta, # sgd, adadelta and rmsprop available, sgd very hard to use, not recommanded (probably need momentum and decaying learning rate).
encoder='lstm', # TODO: can be removed must be lstm.
dumppath='ctc_model.npz', # The best model will be saved there
validFreq=200, # Compute the validation error after this number of update.
saveFreq=1000, # Save the parameters after every saveFreq updates
maxlen=None, # Sequence longer then this get ignored
batch_size=100, # The batch size during training.
valid_batch_size=100, # The batch size used for validation/test set.
dataset=None,
# Parameter for extra option
noise_std=0.,
use_dropout=True, # if False slightly faster, but worst test error
# This frequently need a bigger model.
reload_model=None, # Path to a saved model we want to start from.
test_size=-1, # If >0, we keep only this number of test example.
scaling=1,
):
# Model options
model_options = locals().copy()
print "model options", model_options
print 'Loading data'
# the dateset is organized as:
# X - n_songs * n_timesteps * dim_proj (dim_proj = 24 for chromagram based dataset)
# y - n_songs * n_timesteps * 1
train, valid, test = load_data_song(dataset=dataset, valid_portion=0.05, test_portion=0.05)
print 'data loaded'
model_options['xdim'] = xdim
model_options['dim_proj'] = dim_proj
model_options['ydim'] = ydim
print 'Building model'
# This create the initial parameters as numpy ndarrays.
# Dict name (string) -> numpy ndarray
params = init_params(model_options)
if reload_model:
load_params('lstm_model.npz', params)
# This create Theano Shared Variable from the parameters.
# Dict name (string) -> Theano Tensor Shared Variable
# params and tparams have different copy of the weights.
tparams = init_tparams(params)
# use_noise is for dropout
# the model takes input of:
# x -- n_timesteps * dim_proj * n_samples (in a simpler case, n_samples = 1 in ctc)
# y -- n_timesteps * 1 * n_samples (in a simpler case, n_samples = 1 in ctc)
(use_noise, x,
y, f_pred_prob, f_pred, cost) = build_model(tparams, model_options)
if decay_c > 0.:
decay_c = theano.shared(numpy_floatX(decay_c), name='decay_c')
weight_decay = 0.
weight_decay += (tparams['U'] ** 2).sum()
weight_decay *= decay_c
cost += weight_decay
f_cost = theano.function([x, y], cost, name='f_cost')
grads = T.grad(cost, wrt=tparams.values())
f_grad = theano.function([x, y], grads, name='f_grad')
lr = T.scalar(name='lr')
f_grad_shared, f_update = optimizer(lr, tparams, grads,
x, y, cost)
print 'Optimization'
print "%d train examples" % len(train[0])
print "%d valid examples" % len(valid[0])
print "%d test examples" % len(test[0])
history_errs = []
best_p = None
bad_count = 0
uidx = 0 # the number of update done
estop = False # early stop
start_time = time.time()
try:
for eidx in xrange(max_epochs):
n_samples = 0
# Get random sample a piece of length batch_size from a song
idx0 = numpy.random.randint(0,len(train[0]))
idx1 = numpy.random.randint(0,len(train[0][idx0])-batch_size) # 500 in our case
uidx += 1
use_noise.set_value(1.)
# Select the random examples for this minibatch
x = train[0][idx0][idx1:idx1+batch_size]
y = train[1][idx0][idx1:idx1+batch_size]
# Get the data in numpy.ndarray format
# This swap the axis!
# Return something of shape (minibatch maxlen, n samples)
n_samples += 1
cost = f_grad_shared(x, y)
f_update(lrate)
# if numpy.isnan(cost) or numpy.isinf(cost):
# print 'NaN detected'
# return 1., 1., 1.
if numpy.mod(uidx, dispFreq) == 0:
print 'Epoch ', eidx, 'Update ', uidx, 'Cost ', cost
if dumppath and numpy.mod(uidx, saveFreq) == 0:
print 'Saving...',
# save the best param set to date (best_p)
if best_p is not None:
params = best_p
else:
params = unzip(tparams)
numpy.savez(dumppath, history_errs=history_errs, **params)
pkl.dump(model_options, open('%s.pkl' % dumppath, 'wb'), -1)
print 'Done'
if numpy.mod(uidx, validFreq) == 0:
use_noise.set_value(0.)
# train_err = pred_error(f_pred, train)
valid_err = pred_error(f_pred, valid)
# test_err = pred_error(f_pred, test)
# history_errs.append([valid_err, test_err])
history_errs.append([valid_err, 1])
# save param only if the validation error is less than the history minimum
if (uidx == 0 or
valid_err <= numpy.array(history_errs)[:,
0].min()):
best_p = unzip(tparams)
bad_counter = 0
# print ('Train ', train_err, 'Valid ', valid_err,
# 'Test ', test_err)
print ('Valid ', valid_err)
# early stopping
if (len(history_errs) > patience and
valid_err >= numpy.array(history_errs)[:-patience,
0].min()):
bad_counter += 1
if bad_counter > patience:
print 'Early Stop!'
estop = True
break
# print 'Seen %d samples' % n_samples
if estop:
break
except KeyboardInterrupt:
print "Training interupted"
end_time = time.time()
if best_p is not None:
zipp(best_p, tparams)
else:
best_p = unzip(tparams)
use_noise.set_value(0.)
train_err = pred_error(f_pred, train)
valid_err = pred_error(f_pred, valid)
test_err = pred_error(f_pred, test)
print 'Train ', train_err, 'Valid ', valid_err, 'Test ', test_err
if dumppath:
numpy.savez(dumppath, train_err=train_err,
valid_err=valid_err, test_err=test_err,
history_errs=history_errs, **best_p)
print 'The code run for %d epochs, with %f sec/epochs' % (
(eidx + 1), (end_time - start_time) / (1. * (eidx + 1)))
print >> sys.stderr, ('Training took %.1fs' %
(end_time - start_time))
return train_err, valid_err, test_err
if __name__ == '__main__':
# See function train for all possible parameter and there definition.
train_lstm(
dataset=dataset,
dim_proj=dim_proj,
xdim=xdim,
ydim=ydim,
dumppath=dumppath
max_epochs=max_epochs,
use_dropout=use_dropout,
batch_size=batch_size
)
| [
"dengjunqi0632311@gmail.com"
] | dengjunqi0632311@gmail.com |
da4a51fe5418cf701794690ff06118bece17cfc2 | 488c5cd536987d516e167d4050145d7f8da10d12 | /python/CousinsinBinaryTree.py | 6e98a722f226291d0568f8e9ba45b069a09c11b7 | [] | no_license | jhadheeraj1986/leetCodeMay2020 | 0cc90b96df8cbf5ee4e0275278d0f1cafaa7f677 | ba1adb1eae345174e43b08dc08a1db83005ef514 | refs/heads/master | 2022-06-20T14:39:59.699755 | 2020-05-12T18:11:08 | 2020-05-12T18:11:08 | 260,703,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,539 | py | '''
In a binary tree, the root node is at depth 0, and children of each depth k node are at depth k+1.
Two nodes of a binary tree are cousins if they have the same depth, but have different parents.
We are given the root of a binary tree with unique values, and the values x and y of two different nodes in the tree.
Return true if and only if the nodes corresponding to the values x and y are cousins.
Example 1:
Input: root = [1,2,3,4], x = 4, y = 3
Output: false
Example 2:
Input: root = [1,2,3,null,4,null,5], x = 5, y = 4
Output: true
Example 3:
Input: root = [1,2,3,null,4], x = 2, y = 3
Output: false
Note:
The number of nodes in the tree will be between 2 and 100.
Each node has a unique integer value from 1 to 100.
'''
mapTemp = dict()
class Solution:
def findElement(self, root: TreeNode, z: int, level:int):
if root is None:
return
else:
if (z == root.val) or(root.left and z == root.left.val) or (root.right and z == root.right.val):
mapTemp[z] = [level, root.val]
return
self.findElement(root.left, z, level+1)
self.findElement(root.right, z,level+1)
def isCousins(self, root: TreeNode, x: int, y: int) -> bool:
self.findElement(root,x, 1)
self.findElement(root, y, 1)
print(mapTemp[x]," ", mapTemp[y])
if (mapTemp[x][0] == mapTemp[y][0]) and (mapTemp[x][1] != mapTemp[y][1]):
return True
else:
return False
| [
"noreply@github.com"
] | noreply@github.com |
09a6ae1c85c670fc71a8fd2625c77c98c7f81c37 | d79e377a21000aa94959e5f40054e1e935c46a8c | /pong_player.py | f6352a4db3a5ce91fc523044f172f2297cba871d | [] | no_license | daMichaelB/kivy-demo | a20a74795379495a0c65eef14bb9f391f184bb6e | 2eb2f1eb07192de7628022101add5ccc5e451222 | refs/heads/master | 2020-08-02T09:21:35.968316 | 2019-09-27T11:49:46 | 2019-09-27T11:49:46 | 211,301,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | from kivy.uix.widget import Widget
from kivy.properties import NumericProperty
from kivy.vector import Vector
class PongPlayer(Widget):
score = NumericProperty(0)
def bounce_ball(self, ball):
if self.collide_widget(ball):
vx, vy = ball.velocity
offset = (ball.center_y - self.center_y) / (self.height / 2)
bounced = Vector(-1 * vx, vy)
vel = bounced * 1.1
ball.velocity = vel.x, vel.y + offset | [
"michael.boesl@t-online.de"
] | michael.boesl@t-online.de |
2ee4b361f7e0f5c97c4211a6a2645488e1d6f0b5 | 9fc3caf6774f4e8266ff78e11edb4589953cd056 | /eshopdev/eshop/eshopadmin/apps.py | 6727a3b495705d68a03e6dd1f8e196114105475b | [] | no_license | bopopescu/ecom-django | 6995cb7fca1609a12faa87bc3c7717506f4e0b7f | be98b9848e68ceed685f2a65bae34316a059c5e0 | refs/heads/master | 2022-11-19T08:20:24.874817 | 2020-02-28T19:00:12 | 2020-02-28T19:00:12 | 281,838,999 | 0 | 0 | null | 2020-07-23T03:17:49 | 2020-07-23T03:17:48 | null | UTF-8 | Python | false | false | 95 | py | from django.apps import AppConfig
class EshopadminConfig(AppConfig):
name = 'eshopadmin'
| [
"tasnim.hosen.ewu@gmail.com"
] | tasnim.hosen.ewu@gmail.com |
e998797bde8ca831a7f45d8d2ef31f968a7790ae | 05c1c11bbeb5be7ecaa837eda9e907c9128622d2 | /webempresarial/core/urls.py | 222e856ba0c54dc0e01aca08a5e4f5f844963d4d | [] | no_license | javierescolar/web-empresa | 2f9dcc0c98eb89bf550c7bc5d3bf7e4319f4ca26 | bdc06ef038218a75da948022d8629dc619ed218f | refs/heads/master | 2020-07-04T03:36:52.866103 | 2019-08-13T12:47:04 | 2019-08-13T12:47:04 | 202,141,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | from django.urls import path
from . import views
urlpatterns = [
path('',views.home,name='home'),
path('about/',views.about,name='about'),
path('store/',views.store,name='store'),
] | [
"javierescolar10@gmail.com"
] | javierescolar10@gmail.com |
7722fb46c2c820d5c4afe95d9aea8e35da673d71 | dc399f13d79b4fe80734b4ff9a47e6daac046efb | /ControlFlow/IterItems().py | 98e735d55cc6fdd2a1e4737b4d7cea9a8af47272 | [] | no_license | brrbaral/pythonbasic | 22531a75c8fc5e36dd7e9c1dfae31c5809365206 | dc8fbddc7d3671ac14ee5f53db32f2d210f71de0 | refs/heads/master | 2020-07-11T03:12:54.691238 | 2019-08-26T08:41:51 | 2019-08-26T08:41:51 | 204,433,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | d1={"Bishow":"Pokhara","shree":"hetauda"}
print("The key-value pait is :")
for i,j in d1.items():
print(i,j) | [
"brr.baral@gmail.com"
] | brr.baral@gmail.com |
d5f376ce543ef920ad3197131b8bd756a745399c | 9130bdbd90b7a70ac4ae491ddd0d6564c1c733e0 | /venv/lib/python3.8/site-packages/future/moves/xmlrpc/server.py | ec5409cb2504d3f8fd228a1760fa053fa5767669 | [] | no_license | baruwaa12/Projects | 6ca92561fb440c63eb48c9d1114b3fc8fa43f593 | 0d9a7b833f24729095308332b28c1cde63e9414d | refs/heads/main | 2022-10-21T14:13:47.551218 | 2022-10-09T11:03:49 | 2022-10-09T11:03:49 | 160,078,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | /home/runner/.cache/pip/pool/11/20/d7/7695204cac9e1660834a7266069f3338d8c9925b1124ebf2e0eb5a00b7 | [
"45532744+baruwaa12@users.noreply.github.com"
] | 45532744+baruwaa12@users.noreply.github.com |
f225babc8403680d28288ebf49150e7c4d9c2893 | f8da830331428a8e1bbeadf23345f79f1750bd98 | /msgraph-cli-extensions/v1_0/usersactions_v1_0/azext_usersactions_v1_0/vendored_sdks/usersactions/operations/_user_onenote_notebook_section_group_section_page_parent_notebook_operations.py | 9cbe41f6e394adb19707b01f0c335867f7b93f53 | [
"MIT"
] | permissive | ezkemboi/msgraph-cli | e023e1b7589461a738e42cbad691d9a0216b0779 | 2ceeb27acabf7cfa219c8a20238d8c7411b9e782 | refs/heads/main | 2023-02-12T13:45:03.402672 | 2021-01-07T11:33:54 | 2021-01-07T11:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,768 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UserOnenoteNotebookSectionGroupSectionPageParentNotebookOperations(object):
"""UserOnenoteNotebookSectionGroupSectionPageParentNotebookOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~users_actions.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def copy_notebook(
self,
user_id, # type: str
notebook_id, # type: str
section_group_id, # type: str
onenote_section_id, # type: str
onenote_page_id, # type: str
group_id=None, # type: Optional[str]
rename_as=None, # type: Optional[str]
notebook_folder=None, # type: Optional[str]
site_collection_id=None, # type: Optional[str]
site_id=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphOnenoteOperation"
"""Invoke action copyNotebook.
Invoke action copyNotebook.
:param user_id: key: id of user.
:type user_id: str
:param notebook_id: key: id of notebook.
:type notebook_id: str
:param section_group_id: key: id of sectionGroup.
:type section_group_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param onenote_page_id: key: id of onenotePage.
:type onenote_page_id: str
:param group_id:
:type group_id: str
:param rename_as:
:type rename_as: str
:param notebook_folder:
:type notebook_folder: str
:param site_collection_id:
:type site_collection_id: str
:param site_id:
:type site_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphOnenoteOperation, or the result of cls(response)
:rtype: ~users_actions.models.MicrosoftGraphOnenoteOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphOnenoteOperation"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.PathsFm3Zd0UsersUserIdOnenoteNotebooksNotebookIdSectiongroupsSectiongroupIdSectionsOnenotesectionIdPagesOnenotepageIdParentnotebookMicrosoftGraphCopynotebookPostRequestbodyContentApplicationJsonSchema(group_id=group_id, rename_as=rename_as, notebook_folder=notebook_folder, site_collection_id=site_collection_id, site_id=site_id)
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.copy_notebook.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'notebook-id': self._serialize.url("notebook_id", notebook_id, 'str'),
'sectionGroup-id': self._serialize.url("section_group_id", section_group_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenotePage-id': self._serialize.url("onenote_page_id", onenote_page_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'PathsFm3Zd0UsersUserIdOnenoteNotebooksNotebookIdSectiongroupsSectiongroupIdSectionsOnenotesectionIdPagesOnenotepageIdParentnotebookMicrosoftGraphCopynotebookPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphOnenoteOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
copy_notebook.metadata = {'url': '/users/{user-id}/onenote/notebooks/{notebook-id}/sectionGroups/{sectionGroup-id}/sections/{onenoteSection-id}/pages/{onenotePage-id}/parentNotebook/microsoft.graph.copyNotebook'} # type: ignore
| [
"japhethobalak@gmail.com"
] | japhethobalak@gmail.com |
ddbcf1cb316324fb3fa764443ecdc62a136db604 | fa690c6abf35b956255632f08b204cf4f673c9d0 | /OCRDemo/01-tesseract_ocr.py | 53ca5871e21e54c3df8fa5962b8e11775d39ae78 | [] | no_license | whatkind/web-crawler-study | e529c72cfbfccef1dbe97c11c737956f75cf7b65 | 132517558188a32e92f0eaa8873a57221eb1c457 | refs/heads/master | 2023-03-05T10:14:54.016167 | 2021-02-18T15:09:25 | 2021-02-18T15:09:25 | 337,470,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | import pytesseract
from PIL import Image
img = Image.open('img.png')
code = pytesseract.image_to_string(img)
print(code)
| [
"15681215882@163.com"
] | 15681215882@163.com |
7b616218a1ea0c0674bd50683aa208dfa0ec2191 | 91a276499d81615ad703bd48407681694918cb5d | /students/AndrewMiotke/session06/test_mailroom.py | 981ac5f901a30e2b2fca8ac1d990f7103eb77713 | [] | no_license | colephalen/SP_2019_210A_classroom | 2ed68fea0ffe322d2f55d4ebc5bdde6bf17ee842 | a17145bdcd235f9853f5f0c2feca20cf49df7f30 | refs/heads/master | 2020-05-07T12:08:25.854445 | 2019-06-15T03:38:45 | 2019-06-15T03:38:45 | 180,490,097 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | """Test for mailroom"""
import pytest
import mailroom
def test_donor_list():
database = mailroom.donors_list
assert "Rufio" in database
assert "Gus" in database
assert "Spike" not in database
def test_get_donor_not():
donor = mailroom.get_donor("Lemon")
assert donor is None
def test_send_thank_you_letter():
donor = ("Rufio", [897, 200 , 200])
letter = mailroom.thank_you_letter(donor)
assert "Rufio" in letter
assert "$200" in letter
def test_add_donor():
donor_name = "Lemon "
donor_to_add = mailroom.add_donor(donor_name)
# database = mailroom.donors_list
assert donor_to_add == ("Lemon", []) | [
"andrewmiotke@gmail.com"
] | andrewmiotke@gmail.com |
0b2a72f11c0480ad7dcc642060b3be4e2cd32c42 | ae62c303e86a482928f2d4a5fce825489fca509d | /BeesEtAl/Base_Sorter.py | a7b983d10234eb9dc8a9b41c99bc1892aa2b5f01 | [
"MIT"
] | permissive | FJFranklin/BeesEtAl | 8d025205316041920d3e129f1ff7f7e5625ffe3b | 260d4ac4a3f638a423e8004ef3ef5d60385a75bd | refs/heads/master | 2022-12-15T16:06:42.840333 | 2022-12-05T09:59:36 | 2022-12-05T09:59:36 | 197,208,735 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,515 | py | import csv
import numpy as np
class Base_Sorter(object):
def __init__(self, Ndim, bPareto=False):
self.Ndim = Ndim
self.ibest = None # index of best record
self.record = None
self.Nrecord = 0
self.Ncost = 0
self.bMESO = False
self.bPareto = bPareto
def best(self):
if self.ibest is not None:
cost = self.record[self.ibest,1:(1+self.Ncost)]
X = self.record[self.ibest,(1+self.Ncost):(1+self.Ncost+self.Ndim)]
else:
cost = None
X = None
return cost, X
def lookup(self, X):
if X.ndim == 1:
index, rank = self.__lookup(X)
if rank is not None:
cost = self.record[index,1:(1+self.Ncost)]
else:
cost = None
else:
rank = []
cost = []
for ix in range(0, len(X)):
index, rthis = self.__lookup(X[ix])
if rthis is not None:
cthis = self.record[index,1:(1+self.Ncost)]
else:
cthis = None
rank.append(rthis)
cost.append(cthis)
return rank, cost
def compare(self, X, Y): # returns True if X < Y
lt = False
xi, xr = self.__lookup(X)
if xr is not None:
yi, yr = self.__lookup(Y)
if yr is not None:
if xr < yr:
lt = True
return lt
def __lookup(self, X):
rank = None
index = None
imin = 0
imax = self.Nrecord
if imax > imin:
for ix in range(0, self.Ndim):
ir = ix + self.Ncost + 1
iL = np.searchsorted(self.record[imin:imax,ir], X[ix], side='left')
iR = np.searchsorted(self.record[imin:imax,ir], X[ix], side='right')
imax = imin + iR
imin = imin + iL
if imin == imax:
index = imin
break
if index is None:
index = imin
rank = int(self.record[index,0])
return index, rank
def pop(self, index=None):
cost = None
X = None
M = None
if self.bPareto: # return a random (or by index, if specified) Pareto solution without removing it
if self.Nrecord > 0:
if index is None:
i = np.random.randint(self.Nrecord)
else:
i = index
cost = self.record[i,1:(1+self.Ncost)]
X = self.record[i,(1+self.Ncost):(1+self.Ncost+self.Ndim)]
if self.bMESO:
M = self.record[i,(1+self.Ncost+self.Ndim):]
else:
if self.ibest is not None:
cost = self.record[self.ibest,1:(1+self.Ncost)]
X = self.record[self.ibest,(1+self.Ncost):(1+self.Ncost+self.Ndim)]
if self.bMESO:
M = self.record[self.ibest,(1+self.Ncost+self.Ndim):]
if self.Nrecord == 1:
self.ibest = None
self.record = None
self.Nrecord = 0
elif self.Nrecord > 1:
self.record = np.delete(self.record, self.ibest, axis=0)
self.Nrecord = self.Nrecord - 1
self.__rank()
#print('Pop: cost={c}, X={x}, M={m}'.format(c=cost, x=X, m=M))
return cost, X, M
def push(self, cost, X, M=None):
if self.record is None:
if M is None:
self.record = np.asarray([[0, *cost, *X],], dtype=np.float64)
else:
self.record = np.asarray([[0, *cost, *X, *M],], dtype=np.float64)
self.bMESO = True
self.Nrecord = 1
self.Ncost = len(cost)
self.ibest = 0
elif self.bPareto:
dominated = []
bDominant = True
bDominated = False
for ir in range(0, self.Nrecord):
rcost = self.record[ir,1:(1+self.Ncost)]
if self.dominates(rcost, cost):
bDominated = True
bDominant = False
break
if self.dominates(cost, rcost):
dominated.append(ir)
else:
bDominant = False
if bDominant:
if M is None:
self.record = np.asarray([[0, *cost, *X],], dtype=np.float64)
else:
self.record = np.asarray([[0, *cost, *X, *M],], dtype=np.float64)
self.Nrecord = 1
self.ibest = 0
elif not bDominated:
if len(dominated) > 0:
self.record = np.delete(self.record, dominated, axis=0)
self.Nrecord = self.Nrecord - len(dominated)
self.__rank()
index, rank = self.__lookup(X)
if rank is None:
if M is None:
self.record = np.insert(self.record, index, [[0, *cost, *X],], axis=0)
else:
self.record = np.insert(self.record, index, [[0, *cost, *X, *M],], axis=0)
self.Nrecord = self.Nrecord + 1
self.__rank()
#print('~~~~( Optimal: {r} removed; new total = {t} )~~~~'.format(r=len(dominated), t=self.Nrecord))
else:
index, rank = self.__lookup(X)
if rank is None:
if M is None:
self.record = np.insert(self.record, index, [[0, *cost, *X],], axis=0)
else:
self.record = np.insert(self.record, index, [[0, *cost, *X, *M],], axis=0)
self.Nrecord = self.Nrecord + 1
self.__rank()
#print('Push: X={x}, record={r}'.format(x=X, r=self.record))
def __rank(self):
multi = np.zeros(self.Nrecord)
for ic in range(0, self.Ncost):
order = self.record[:,(1+ic)].argsort()
for r in range(0, self.Nrecord):
multi[order[r]] = multi[order[r]] + r
order = multi.argsort()
for r in range(0, self.Nrecord):
self.record[order[r],0] = r
self.ibest = order[0]
def get_by_index(self, r):
rank = self.record[r,0]
cost = self.record[r,1:(1+self.Ncost)]
X = self.record[r,(1+self.Ncost):(1+self.Ncost+self.Ndim)]
return rank, cost, X
def dominates(self, X_cost, Y_cost): # returns true if X dominates Y
bDominates = True
for ic in range(0, self.Ncost):
if X_cost[ic] < Y_cost[ic]:
continue
bDominates = False
break
return bDominates
def pareto(self, file_name=None):
if self.Nrecord == 0:
return None, None
the_dominant = []
the_front = []
for ip in range(0, self.Nrecord):
pcost = self.record[ip,1:(1+self.Ncost)]
bDominant = True
bDominated = False
for ir in range(0, self.Nrecord):
if ip == ir:
continue
rcost = self.record[ir,1:(1+self.Ncost)]
if self.dominates(rcost, pcost):
bDominated = True
bDominant = False
break
if self.dominates(pcost, rcost) == False:
bDominant = False
if bDominant:
the_dominant.append(ip)
if file_name is not None:
self.__save('Dominant', the_dominant, file_name)
elif bDominated == False:
the_front.append(ip)
if file_name is not None:
self.__save('Optimal', the_front, file_name)
return the_dominant, the_front
def __save(self, title, indices, file_name):
rank, cost, X = self.get_by_index(indices)
with open(file_name, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow([title] + [''] * (len(cost[0]) + len(X[0]) - 1))
for i in range(0, len(indices)):
writer.writerow([*(cost[i,:]), *(X[i,:])])
| [
"fjf@alinameridon.com"
] | fjf@alinameridon.com |
97766fdf195a5859bff784f39f2a91ed519b88aa | 22761810521e125a5f3aa1d020d9261ce79551f0 | /Binary_search.py | 56d84e1b9b0d6b6db6b7013be0890bbb44516b7e | [] | no_license | BANSAL-NISHU/JOCP | bfe45fdc74cee62397b3c2eceb65e2153cbc74a4 | da095c9bf3c003fbccceea7571469036aaa8781a | refs/heads/main | 2023-08-14T17:07:22.436317 | 2021-09-26T10:38:14 | 2021-09-26T10:38:14 | 337,938,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | def binary_search(arr, x):
first_position = 0
last_position = len(arr) - 1
flag = 0 # flag 0 means that the element has not been found yet
count = 0
while first_position <= last_position and flag == 0:
count += 1
mid = (first_position + last_position) // 2
if x == arr[mid]:
flag = 1
print("Number is found at position: "+str(mid))
print("Number of iterations: "+str(count))
return
else:
if x < arr[mid]:
last_position = mid - 1
else:
first_position = mid + 1
print("The number is not found.")
arr = []
for i in range(501):
arr.append(i)
binary_search(arr, 96)
| [
"bansalnishu418@gmail.com"
] | bansalnishu418@gmail.com |
bd01566b4e6d8ca42a82ef1dfdb095994b042611 | 9ee4381b23fc76a6c3819eacd67cfcce873f7119 | /migrations/versions/0c6379be7d05_.py | 67791e78005673f73c2fbd00a19e8e80e6309d78 | [] | no_license | jasonsie1024/WLPCS | 51c6ae34110bd86974075f82028b9ae0a03a1123 | d0d5331344cfe977bbf5524e0bfbaa99f9b229de | refs/heads/master | 2022-07-30T09:53:18.303528 | 2019-02-03T06:47:21 | 2019-02-03T06:47:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,672 | py | """empty message
Revision ID: 0c6379be7d05
Revises:
Create Date: 2019-02-03 10:28:59.904314
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0c6379be7d05'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('messages',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('content', sa.Text(length=200), nullable=True),
sa.Column('content_html', sa.Text(), nullable=True),
sa.Column('t', sa.String(length=20), nullable=True),
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('problems',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(), nullable=True),
sa.Column('abbr', sa.String(), nullable=True),
sa.Column('content', sa.Text(), nullable=True),
sa.Column('content_html', sa.Text(), nullable=True),
sa.Column('total_score', sa.Integer(), nullable=True),
sa.Column('scoring_script', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('settings',
sa.Column('setting', sa.String(), nullable=False),
sa.Column('value', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('setting')
)
op.create_index(op.f('ix_settings_setting'), 'settings', ['setting'], unique=False)
op.create_table('submissions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('pid', sa.Integer(), nullable=True),
sa.Column('uid', sa.Integer(), nullable=True),
sa.Column('code', sa.Text(), nullable=True),
sa.Column('code_hash', sa.String(), nullable=True),
sa.Column('time', sa.Integer(), nullable=True),
sa.Column('memory', sa.Integer(), nullable=True),
sa.Column('verdict', sa.String(length=10), nullable=True),
sa.Column('score', sa.Integer(), nullable=True),
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('status', sa.Text(), nullable=True),
sa.Column('score', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=False)
op.create_table('testdatas',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('pid', sa.Integer(), nullable=True),
sa.Column('time_limit', sa.Integer(), nullable=True),
sa.Column('memory_limit', sa.Integer(), nullable=True),
sa.Column('input', sa.Text(), nullable=True),
sa.Column('answer', sa.Text(), nullable=True),
sa.Column('score', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['pid'], ['problems.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('testdatas')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
op.drop_table('submissions')
op.drop_index(op.f('ix_settings_setting'), table_name='settings')
op.drop_table('settings')
op.drop_table('problems')
op.drop_table('messages')
# ### end Alembic commands ###
| [
"jason.plainlog@gmail.com"
] | jason.plainlog@gmail.com |
f455aa80b1be82e67994e7569290d6fb86b815af | bee46467988577b6f1af14add54a7fc4a2678d6a | /TestScript.py | 6c0b4a7c18571e6c48be7251efd8c59ffcc6a284 | [
"MIT"
] | permissive | jsharf/pseudo-ocr | 641bc4ed89725f0b91fca6859f5d1a3e46611368 | 5df2dee38d7b5c6557b6faa983c9f888ac020378 | refs/heads/master | 2021-01-22T10:17:52.763074 | 2014-12-01T20:10:06 | 2014-12-01T20:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | import os
import sys
import subprocess
tests = os.listdir("tests")
testnum = 1
while (True):
try:
if (str(testnum)+".txt" in tests):
testfile = "./tests/"+str(testnum)+".txt"
ansfile = "./tests/"+str(testnum)+".ans"
test = open(testfile)
ans = open(ansfile)
testproc = subprocess.Popen("./readpass", stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
testproc.stdin.write(test.read())
result = testproc.stdout.read()
answer = ans.read()
if (result == answer):
print("Test " + str(testnum) + " Success")
print("Answer: " + str(result))
else:
print("Test " + str(testnum) + " Failure")
print("Program Output: " + str(result))
print("Expected Output: " + str(answer))
testnum += 1
else:
break
except OSError as ose:
print "OS Error: " + ose.strerror
raise
except:
print "Unexpected error:", sys.exc_info()[0]
raise
| [
"jacobsharf@gmail.com"
] | jacobsharf@gmail.com |
2e7285463e78bdf7208f4375c725fbf3a18645cb | f743e7fd9ad7d9af200dcb077fe6f8d95a5025f9 | /tests/scripts/thread-cert/test_inform_previous_parent_on_reattach.py | 6809bc08bcd81da01b7c592357d7327526bb6796 | [
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Abhayakara/openthread | 6ea1cf8053290db20eb3a61732f30bea0083971c | fb83c90ffec5dee2194ee9bb5280fa48ce9a24fc | refs/heads/main | 2023-08-28T00:32:37.427937 | 2021-10-07T20:16:38 | 2021-10-07T20:16:38 | 295,905,892 | 5 | 0 | BSD-3-Clause | 2021-10-05T18:42:28 | 2020-09-16T02:48:57 | C++ | UTF-8 | Python | false | false | 4,994 | py | #!/usr/bin/env python3
#
# Copyright (c) 2021, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import os
import unittest
import config
import thread_cert
from pktverify.packet_filter import PacketFilter
# Test description:
# The purpose of this test is to verify a MED will inform its previous parent when re-attaches to another parent.
#
# Initial Topology:
#
# LEADER ----- ROUTER
# |
# MED
#
LEADER = 1
ROUTER = 2
MED = 3
LONG_CHILD_TIMEOUT = 120
class TestReset(thread_cert.TestCase):
SUPPORT_NCP = False
USE_MESSAGE_FACTORY = False
TOPOLOGY = {
LEADER: {
'name': 'LEADER',
'mode': 'rdn',
'allowlist': [ROUTER, MED]
},
ROUTER: {
'name': 'ROUTER',
'mode': 'rdn',
'allowlist': [LEADER]
},
MED: {
'name': 'MED',
'is_mtd': True,
'mode': 'rn',
'allowlist': [LEADER],
'timeout': LONG_CHILD_TIMEOUT,
},
}
def test(self):
if 'posix' in os.getenv('OT_CLI_PATH', ''):
self.skipTest("skip for posix tests")
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER].start()
self.simulator.go(7)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[MED].start()
self.simulator.go(7)
self.assertEqual(self.nodes[MED].get_state(), 'child')
self.assertIsChildOf(MED, LEADER)
self.nodes[LEADER].remove_allowlist(self.nodes[MED].get_addr64())
self.nodes[MED].remove_allowlist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER].add_allowlist(self.nodes[MED].get_addr64())
self.nodes[MED].add_allowlist(self.nodes[ROUTER].get_addr64())
self.nodes[MED].set_timeout(config.DEFAULT_CHILD_TIMEOUT)
self.simulator.go(config.DEFAULT_CHILD_TIMEOUT * 2)
self.assertIsChildOf(MED, ROUTER)
# Verify MED is not in the LEADER's Child Table.
med_extaddr = self.nodes[MED].get_addr64()
self.assertFalse(any(info['extaddr'] == med_extaddr for info in self.nodes[LEADER].get_child_table().values()))
self.collect_ipaddrs()
self.collect_rlocs()
def verify(self, pv):
pkts: PacketFilter = pv.pkts
pv.summary.show()
MED = pv.vars['MED']
LEADER_RLOC = pv.vars['LEADER_RLOC']
# MED should attach to LEADER first.
pv.verify_attached('MED', 'LEADER', child_type='MTD')
# MED should re-attach to ROUTER.
pv.verify_attached('MED', 'ROUTER', child_type='MTD')
# MED should send empty IPv6 message to inform previous parent (LEADER).
pkts.filter_wpan_src64(MED).filter('lowpan.dst == {LEADER_RLOC} and lowpan.next == 0x3b',
LEADER_RLOC=LEADER_RLOC).must_next()
def assertIsChildOf(self, childid, parentid):
childRloc16 = self.nodes[childid].get_addr16()
parentRloc16 = self.nodes[parentid].get_addr16()
self.assertEqual(parentRloc16 & 0xfc00, parentRloc16)
self.assertEqual(childRloc16 & 0xfc00, parentRloc16)
child_extaddr = self.nodes[childid].get_addr64()
self.assertTrue(
any(info['extaddr'] == child_extaddr for info in self.nodes[parentid].get_child_table().values()))
if __name__ == '__main__':
unittest.main()
| [
"elemon@apple.com"
] | elemon@apple.com |
8cbeef5ea8b3d7e0aa7655c31e01ef7d0da11446 | 8541f4118c6093c84e78d768285e7007ee5f6a6c | /apps/tax/migrations/0005_auto_20160306_1353.py | 8077c34962cafafda6e7398270db44898639bd2a | [] | no_license | iraycd/awecounting | c81a8ca6b7a4a942e63cf6b7d723f9883e57a107 | 388df4de63146e0a9a211afa522ec50e0f3df443 | refs/heads/master | 2021-01-15T23:30:27.439759 | 2016-03-16T10:34:40 | 2016-03-16T10:34:40 | 57,046,467 | 1 | 0 | null | 2016-04-25T14:03:40 | 2016-04-25T14:03:40 | null | UTF-8 | Python | false | false | 574 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-06 08:08
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tax', '0004_partytaxpreference'),
]
operations = [
migrations.AlterField(
model_name='partytaxpreference',
name='party',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='tax_preference', to='ledger.Party'),
),
]
| [
"roshanshrestha01@gmail.com"
] | roshanshrestha01@gmail.com |
f678eca13bb410b8ca617257bcb4b17d8de6f813 | 803c5084fb83c1b50c3bd0734f48f30792f783e1 | /Codeforces/educational3/a.py | 429eca18b8bb549c478a86f9141c4ec8fe697f5d | [] | no_license | guilhermeleobas/maratona | 9627570276d4fd843918013c1fd5a702bbada0e4 | 5d46263b335d2e70b0156001116f7030a068cf5a | refs/heads/master | 2022-09-20T11:28:26.367178 | 2022-09-05T05:20:43 | 2022-09-05T05:20:43 | 21,397,024 | 5 | 5 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | n = map(int, raw_input().split())[0]
m = map(int, raw_input().split())[0]
s = []
for i in range(n):
s.append (map(int, raw_input().split())[0])
s.sort(reverse=True)
ans = 0
for i in s:
m -= i
ans += 1
if m <= 0:
break
print ans
| [
"guilhermeleobas@gmail.com"
] | guilhermeleobas@gmail.com |
640eb0af4af465436d6daf67648ac33bddd48fca | 648086fe6ca10b207b6910d676cbbcdfa1a41295 | /src/read_xy.py | c28d71383e6ceafb1d4a1372619a7a9b34605cde | [
"MIT",
"CC-BY-3.0"
] | permissive | SwindaKJ/Introduction-to-Python-programming-for-MPECDT | f9f4daa03936b56428c180688622889561f2cbd2 | ec709ffebe733dab6c79e3d69496bd35ace51b2f | refs/heads/master | 2020-03-28T12:34:06.513164 | 2018-09-17T13:52:01 | 2018-09-17T13:52:01 | 148,311,845 | 0 | 0 | null | 2018-09-11T12:13:21 | 2018-09-11T12:13:21 | null | UTF-8 | Python | false | false | 478 | py | import numpy as np
import pylab as pl
#infile = open("../data/xy.dat", "r")
#This closes the file automatically after use
with open("../data/xy.dat", "r") as infile:
x = []
y = []
for line in infile:
# x_, y_ = [ float(w) for w in line.split() ]
x_, y_ = np.array( line.split(), dtype = float) # Alternative formulation
x.append(x_)
y.append(y_)
# infile.close()
x = np.array(x)
y = np.array(y)
pl.plot(x,y)
pl.grid()
pl.show()
| [
"skf1018@imperial.ac.uk"
] | skf1018@imperial.ac.uk |
b923127047254c84445608e989311a4fb0eb0b40 | 4f4776eb69cbea9ee1c87a22732c5d778855c83a | /leetcode/Number_Complement.py | 6f4cc7d18ff977b72e60674c3283b67bee1f0ecb | [] | no_license | k4u5h4L/algorithms | 4a0e694109b8aadd0e3b7a66d4c20692ecdef343 | b66f43354792b1a6facff90990a7685f5ed36a68 | refs/heads/main | 2023-08-19T13:13:14.931456 | 2021-10-05T13:01:58 | 2021-10-05T13:01:58 | 383,174,341 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | '''
Number Complement
Easy
The complement of an integer is the integer you get when you flip all the 0's to 1's and all the 1's to 0's in its binary representation.
For example, The integer 5 is "101" in binary and its complement is "010" which is the integer 2.
Given an integer num, return its complement.
Example 1:
Input: num = 5
Output: 2
Explanation: The binary representation of 5 is 101 (no leading zero bits), and its complement is 010. So you need to output 2.
Example 2:
Input: num = 1
Output: 0
Explanation: The binary representation of 1 is 1 (no leading zero bits), and its complement is 0. So you need to output 0.
'''
class Solution:
def findComplement(self, num: int) -> int:
binary = bin(num)[2:]
b = ""
for bit in binary:
if bit == '1':
b += '0'
else:
b += '1'
dec = 0
for i, char in enumerate(reversed(b)):
if char == '1':
dec += (2 ** i)
return dec
| [
"noreply@github.com"
] | noreply@github.com |
1c77e24b4bc144d88c51f323d1c8162383c278ae | 67216a79fe61b0ad144ab7b65c69713cbbb584f5 | /migrations/versions/3e3ffcf756f4_.py | b7afea4765f6dd3df4e1534d5f74bb1d91e0b16d | [] | no_license | BenMini/Fyyur | 430072a92cf951bb1a871e2a15143c7b15c80153 | ab354c40ed172e74373bbc4c0889de922e6fe1b8 | refs/heads/master | 2022-12-26T08:53:05.140030 | 2020-03-07T04:26:00 | 2020-03-07T04:26:00 | 242,560,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | """empty message
Revision ID: 3e3ffcf756f4
Revises: 6c476e731f0c
Create Date: 2020-02-10 22:36:19.986512
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3e3ffcf756f4'
down_revision = '6c476e731f0c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_Artist_name'), 'Artist', ['name'], unique=False)
op.create_index(op.f('ix_Venue_name'), 'Venue', ['name'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_Venue_name'), table_name='Venue')
op.drop_index(op.f('ix_Artist_name'), table_name='Artist')
# ### end Alembic commands ###
| [
"benminifie@Bens-MacBook-Pro.local"
] | benminifie@Bens-MacBook-Pro.local |
31fafff04d22006623a7bc672c81339d13885407 | 22ccc673a522b52f2678b6ac96e3ff2a104864ff | /jobs/migrations/0005_auto_20150902_0600.py | e6e2f4f6043f8e5e142666aeaf0c677401c0f62a | [] | no_license | ivlevdenis/pythondigest | 07e448da149d92f37b8ce3bd01b645ace1fa0888 | f8ccc44808a26960fb69a4c4c3491df3e6d3d24e | refs/heads/master | 2021-01-18T02:09:42.121559 | 2016-05-15T22:44:34 | 2016-05-15T22:44:34 | 58,350,368 | 0 | 0 | null | 2016-05-09T05:21:39 | 2016-05-09T05:21:39 | null | UTF-8 | Python | false | false | 1,435 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('jobs', '0004_jobfeed_is_activated'),
]
operations = [
migrations.RemoveField(
model_name='jobitem',
name='salary_currency',
),
migrations.RemoveField(
model_name='jobitem',
name='salary_from',
),
migrations.RemoveField(
model_name='jobitem',
name='salary_till',
),
migrations.RemoveField(
model_name='jobitem',
name='url_api',
),
migrations.RemoveField(
model_name='jobitem',
name='url_logo',
),
migrations.AddField(
model_name='jobitem',
name='description',
field=models.TextField(null=True, blank=True, verbose_name='Описание вакансии'),
),
migrations.AlterField(
model_name='jobitem',
name='employer_name',
field=models.CharField(null=True, max_length=255, blank=True, verbose_name='Работодатель'),
),
migrations.AlterField(
model_name='jobitem',
name='place',
field=models.CharField(null=True, max_length=255, blank=True, verbose_name='Место'),
),
]
| [
"sapronov.alexander92@gmail.com"
] | sapronov.alexander92@gmail.com |
234fb35529a945d7a473a628234aa6389cbc3044 | f35cc10fc2cc064bc887a52e05d46a8297bec88a | /python/django/MyWeb/books/migrations/0003_auto_20170720_1035.py | 442786e898d953256a96d61c2bd21c21438c4af1 | [] | no_license | privatesky911/theBusyCoderDemo | 0ba0047d3e7b719f54584854f10acf9e48bafcda | 4225d039a7a7c223b383dfaa818535fce15219ba | refs/heads/master | 2020-04-12T06:31:14.405151 | 2017-07-20T10:01:56 | 2017-07-20T10:01:56 | 65,343,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-20 02:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('books', '0002_auto_20170719_1447'),
]
operations = [
migrations.AlterField(
model_name='book',
name='authors',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='books.Author'),
),
]
| [
"huxj-win7"
] | huxj-win7 |
47010cc8029ce26f087bc5af210729e2ad8964d0 | 6062dc6c23a4013a879617cd9dd8d60fba582964 | /day23/machine.py | 16384f67fcfce888fea5c9cb2095b5fec6a57bfc | [] | no_license | grey-area/advent-of-code-2017 | 8134a1213e69460e24a821ff96e38cbc7f83b480 | 87c213277e4535fff0a1dcf7ad26e182e20b8165 | refs/heads/master | 2020-04-13T05:38:36.852721 | 2018-12-30T23:31:00 | 2018-12-30T23:31:00 | 162,997,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,514 | py | from collections import defaultdict
from collections import namedtuple
from collections import deque
import operator
from functools import partial
Instruction = namedtuple('Instruction', ['op', 'args'])
class Machine():
def __init__(self, filename):
self.registers = defaultdict(int)
self.load_program(filename)
self.ip = 0
self.terminated = False
self.mul_called = 0
def cast(self, X):
try:
return int(X)
except ValueError:
return self.registers[X]
def sub(self, X, Y):
self.registers[X] = self.registers[X] - self.cast(Y)
def mul(self, X, Y):
self.registers[X] = self.registers[X] * self.cast(Y)
self.mul_called += 1
def jnz(self, X, Y):
if self.cast(X) != 0:
self.ip += self.cast(Y) - 1
def set(self, X, Y):
self.registers[X] = self.cast(Y)
def load_program(self, filename):
ops = {}
self.program = []
ops['jnz'] = self.jnz
ops['set'] = self.set
ops['sub'] = self.sub
ops['mul'] = self.mul
with open(filename) as f:
text = f.read().splitlines()
for line in text:
op_str, *args = line.split(' ')
self.program.append(Instruction(ops[op_str], args))
def step(self):
op, args = self.program[self.ip]
op(*args)
self.ip += 1
if self.ip < 0 or self.ip >= len(self.program):
self.terminated = True
| [
"andrew@awebb.info"
] | andrew@awebb.info |
a64b0a2dd100c7fbe907d347b9754069df72d64e | ac9eb074ba07262e233f78b4f6922a29f164d914 | /week4_paths2/2_negative_cycle/negative_cycle.py | 67718c63742900ba788513c84152733d6ae46e59 | [] | no_license | own3dh2so4/coursera_algorithms-on-graphs | eb07f99b9626bf52b2f50ed143b97b3fcbe2f1dc | 063bd68e887298c2f6396aef5c1a1c0e07b78253 | refs/heads/master | 2022-06-09T05:18:29.981022 | 2020-05-03T18:30:55 | 2020-05-03T18:30:55 | 260,906,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,507 | py | #Uses python3
import sys
def relax(dist, u, v, cost, prev):
if dist[v] is None or dist[v] > dist[u] + cost:
dist[v] = dist[u] + cost
prev[v] = u
return True
return False
def negative_cycle(adj, cost):
visited = [ False for _ in range(len(adj))]
for k in range(len(adj)):
if not visited[k]:
distances = [ None for _ in range(len(adj))]
prev = [ -1 for _ in range(len(adj))]
distances[k] = 0
visited[k] = True
changes = True
i = 0
max_i = len(adj)
while i < max_i and changes:
changes = False
for u in range(len(adj)):
if distances[u] is not None:
j = 0
for v in adj[u]:
visited[v] = True
changes = relax(distances, u, v, cost[u][j], prev)
j += 1
i += 1
if changes:
return 1
return 0
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n, m = data[0:2]
data = data[2:]
edges = list(zip(zip(data[0:(3 * m):3], data[1:(3 * m):3]), data[2:(3 * m):3]))
data = data[3 * m:]
adj = [[] for _ in range(n)]
cost = [[] for _ in range(n)]
for ((a, b), w) in edges:
adj[a - 1].append(b - 1)
cost[a - 1].append(w)
print(negative_cycle(adj, cost))
| [
"davidga1993@gmail.com"
] | davidga1993@gmail.com |
29a4a3873def8d66a7550cce0d72f34abc3dfd80 | 6d2abfef2cb571b2dccd345ad752ba6bdfec92ee | /generators/html.py | 8a3366dbfbbd6aa62b89e429d08e5655ed3845d0 | [] | no_license | jariberi/apprework | 26e42b1c4c03443df3e2ddc39c70dd85c775981d | c4e033fd30854c650d1cc37f02ab8e01d989cb29 | refs/heads/master | 2016-09-06T05:29:43.931358 | 2015-03-17T01:46:11 | 2015-03-17T01:46:11 | 31,931,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | # TODO
from generators.base_gen import ReportGenerator
class HTMLGenerator(ReportGenerator):
"""This is a generator to output a XHTML that uses CSS and best practices
on standards."""
filename = None
def __init__(self, report, filename):
super(HTMLGenerator, self).__init__(report, *args, **kwargs)
self.filename = filename
def execute(self):
pass
| [
"jariberi@gmail.com"
] | jariberi@gmail.com |
a5be6c3635c89f6e75ff82ea96ced8ca2ec222b1 | b3b104acc91e26c14d8d1d3211f5e7a6094041a5 | /src/utils/str2bool.py | 5247a3770550993a24fda8d670b863cc7e40ec33 | [
"MIT"
] | permissive | HappyGradu/Machine-Learning-Cookbooks | c88c877001fe586e789be5f38d48136eda476246 | 7afded6544a47039723fdf64f74c209db0a3f83a | refs/heads/master | 2020-06-05T20:17:46.040209 | 2019-05-31T03:23:22 | 2019-05-31T03:23:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | """
This script defines a function on that how to convert a string from ('yes', 'true') to boolean variable 'True'.
Author:
Hailiang Zhao
"""
def str2bool(judge):
"""
Convert the input judgement (yes or no) into boolean variable (True or False).
:param judge: the input judgement
:return: True or False
"""
return judge.lower() in ('yes', 'true')
| [
"hliangzhao@163.com"
] | hliangzhao@163.com |
bdad5676c7a24243a779d22cf5efebad95f11153 | 0c48cdf7075c478c489bcf305c27f8c3b5292856 | /python/brunnerdx.py | a28b62645e8c1c006af679c8218bb874c40992fb | [] | no_license | billydragon/brunnerdx | 5f7ecceea3e58390720eda6fec889edb2bc5f48c | 507fae91f3f432c47fa30771f48d3894e2c5b4c8 | refs/heads/master | 2023-04-15T02:06:14.356242 | 2021-04-30T20:13:47 | 2021-04-30T20:13:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,048 | py | from libs.robust_serial.robust_serial import read_order, write_order, Order, read_i32, write_i16, read_i8
from libs.robust_serial.utils import open_serial_port
import socket
import struct
import time
# CLS2Sim network configuration
HOST = '127.0.0.1'
PORT = 15090
TIMEOUT = 8
# change these numbers for stronger or weaker forces
FORCE_MULTIPLIER = 0.4
import collections
class BrunnerDx():
def __init__(self, host, port):
self.host = host
self.port = port
self.pos = [0,0,0,0]
self.force = (0, 0, 0, 0)
self.semaphore = 10
self.pos_history = collections.deque(maxlen=100)
try:
self.serial_file = open_serial_port(baudrate=115200, timeout=None)
except Exception as e:
raise e
def write_order(self, order):
if self.semaphore > 0:
write_order(self.serial_file, order)
self.semaphore -= 1
def start(self):
serial_file = self.serial_file
is_connected = False
# Initialize communication with Arduino
while not is_connected:
print("Waiting for Arduino...")
self.write_order(Order.HELLO)
bytes_array = bytearray(serial_file.read(1))
if not bytes_array:
time.sleep(2)
continue
byte = bytes_array[0]
if byte in [Order.HELLO.value, Order.ALREADY_CONNECTED.value]:
is_connected = True
self.semaphore = 10
print("Connected to Arduino")
# Create a UDP socket
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Set TTL to 5 to allow jumping over routers
self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 5)
# Set socket timeout
self.sock.settimeout(TIMEOUT)
@staticmethod
# accept a number in the range 0.0-1.0 and
# return a number in the range -32767,32767
# for axis position
def translate_brunner_pos(pos):
return -int((pos-0.5) * 2.0 * 32767)
@staticmethod
# accept a force in the range -255,255 and
# return a force that Brunner understands
def translate_arduino_force(force):
return int(force * FORCE_MULTIPLIER)
# send a request to the Arduino to send us the forces soon
def request_forces(self):
self.write_order(Order.FORCES)
# read the forces the Arduino is sending us right now
def read_forces(self):
force_x = self.translate_arduino_force(read_i32(self.serial_file))
force_y = self.translate_arduino_force(read_i32(self.serial_file))
self.force = (force_x, force_y, 0, 0)
return self.force
# read the log the Arduino is sending us right now
def read_log(self):
return self.serial_file.readline().decode('ascii')
# this will send the current forces to the Brunner base
# and read back the current position of the joystick
def sendforces_readposition(self):
force = self.force
pos = self.pos
# notice the opposite order of y,x
request = struct.pack('<Iiiii', 0xAF, force[1], force[0], force[2], force[3])
self.sock.sendto(request, (self.host, self.port))
response, address = self.sock.recvfrom(8192)
result, pos[1], pos[0], pos[2], pos[3] = struct.unpack('<Iffff', response)
self.pos_history.append(pos.copy())
# this will send the current position to the Arduino
# so it will make the joystick appear in the new position
def send_position(self):
if len(self.pos_history) < 2 or self.pos_history[-1][0] != self.pos_history[-2][0] or self.pos_history[-1][1] != self.pos_history[-2][1] :
x,y = self.pos_history[-1][0], self.pos_history[-1][1]
self.write_order(Order.POSITION)
write_i16(self.serial_file, self.translate_brunner_pos(x))
write_i16(self.serial_file, self.translate_brunner_pos(y))
# do we have messages from the Arduino
@property
def in_waiting(self):
return self.serial_file.in_waiting
def loop(self):
next_update = 0
while True:
now = time.time()
if self.in_waiting:
try:
order = read_order(self.serial_file)
except ValueError:
order = None
if order == Order.RECEIVED:
self.semaphore += 1
elif order == Order.FORCES:
self.read_forces()
elif order == Order.LOG:
log_line = self.read_log()
if now >= next_update:
next_update = now + 0.02 # update loop time
# this communicates with the Brunner base
self.sendforces_readposition()
# this communicates with the Arduino device
self.send_position()
self.request_forces()
if __name__ == '__main__':
brunnerdx = BrunnerDx(HOST, PORT)
brunnerdx.start()
brunnerdx.loop()
| [
"jmriego@telefonica.net"
] | jmriego@telefonica.net |
4bb3e3cad9993c564bdbd34a95aacde329ea98b1 | 742af726182ec3f2e5fc82c3745e3d28c2c79e3c | /setup.py | 089d24f1ff2f86dc7733d5ae988358ba84397bd0 | [
"MIT"
] | permissive | sommersoft/RosiePi_Node_Server | 0a06680390e669ac366cf8db36ad5cae2ae0c75f | 2cb46ee8e7e9f1a42f76adc83ea9d17dc4f61335 | refs/heads/master | 2020-12-26T20:31:31.677853 | 2020-06-06T15:50:13 | 2020-06-06T15:50:13 | 237,633,814 | 0 | 0 | MIT | 2020-06-06T15:50:14 | 2020-02-01T15:21:16 | Python | UTF-8 | Python | false | false | 295 | py | from setuptools import find_packages, setup
setup(
name='rosiepi_node_server',
version='0.0.1',
packages=find_packages('node_server'),
include_package_data=True,
zip_safe=False,
install_requires=[
'flask',
'jinja2',
'rq',
'redis'
]
)
| [
"sommersoft@gmail.com"
] | sommersoft@gmail.com |
7bccdd943219008b9ab87f2c0d3a9f60a25927c6 | 9b20743ec6cd28d749a4323dcbadb1a0cffb281b | /03_Linear_Algebra_for_Machine_Learning/04/05_vector_division.py | 7a5cbfcb42a4a5db9dfe2635df84d2ce76f0ddf3 | [] | no_license | jggrimesdc-zz/MachineLearningExercises | 6e1c7e1f95399e69bba95cdfe17c4f8d8c90d178 | ee265f1c6029c91daff172b3e7c1a96177646bc5 | refs/heads/master | 2023-03-07T19:30:26.691659 | 2021-02-19T08:00:49 | 2021-02-19T08:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | # vector division
from numpy import array
# define first vector
a = array([1, 2, 3])
print(a)
# define second vector
b = array([1, 2, 3])
print(b)
# divide vectors
c = a / b
print(c)
| [
"jgrimes@jgrimes.tech"
] | jgrimes@jgrimes.tech |
fcd73cacbfdd0a804119b055112f108cc641cd1f | fd29e3d63e1741760f189e80b5c108ec966aa051 | /client.tac | 651526af44ae23c17338420eb63e9d17c4208fef | [] | no_license | hiyeshin/yoga_socket | f76ce8992a3def5baa1378e09209706ed32a2f66 | 18f855f6a73f16f11236a09e34bf3b6c74e653f7 | refs/heads/master | 2020-06-05T07:56:54.874350 | 2013-04-27T09:06:01 | 2013-04-27T09:06:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | tac | from twisted.application import service, internet
from rpi_ws.client_protocol import RPIClientProtocol, ReconnectingWebSocketClientFactory
from rpi_ws import settings
def getService():
server_url = "ws://192.168.1.3:9000/rpi/" #maybe I should change this later
#server_url = "ws://172.26.13.137:9000/rpi/"
factory = ReconnectingWebSocketClientFactory(server_url, useragent = settings.RPI_USER_AGENT, debug = False)
factory.protocol = RPIClientProtocol
return internet.TCPClient(factory.host, factory.port, factory)
# this is the core part of any tac file, the creation of the root-level application object
application = service.Application("PiIOClient")
# attach the serevice to its parent application
service = getService()
service.setServiceParent(application) | [
"hiyeshin@gmail.com"
] | hiyeshin@gmail.com |
341667ce1934f3f04a05b6ae9b9ca48c9360002e | 6676867d1b645bd6d8fc7c79d85d7e943a3a3550 | /ROS2/skinny/launch/launch_pdp.py | 62d287557d9e65624a939a9f10fa606265ab64a3 | [] | no_license | Razorbotz/RMC-Code-20-21 | 88604410293c5edb7a877365aa8bbf2a9cb4141b | f1a5a9c99a3af840188f8dc3a680c0453600ee02 | refs/heads/master | 2023-07-18T03:44:26.445419 | 2021-05-11T18:08:24 | 2021-05-11T18:08:24 | 336,593,274 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | from launch import LaunchDescription
from launch_ros.actions import Node
def generate_launch_description():
return LaunchDescription([
Node(
package='power_distribution_panel',
namespace='',
name='power_distribution_panel',
executable='power_distribution_panel_node'
)
]
)
| [
"andrewburroughs17@gmail.com"
] | andrewburroughs17@gmail.com |
fd30cbf33b7a94e9cba9d39a95ec3ac9243b5d48 | e89509b453632747077bc57dbec265a7703d5c7c | /function/globalfunc/baiscadd.py | 8c7c28e3fdb91a3022af78a3ba7e14b70d447686 | [] | no_license | Madhav2108/udemy-python-as | a9dcfdbfdc1bb85471aa66de77957e962a7c5486 | 0bc6a501516618fb3c7ab10be6bc16c047aeec3f | refs/heads/master | 2023-03-30T11:25:16.064592 | 2021-03-30T18:10:46 | 2021-03-30T18:10:46 | 286,001,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | a = 15
b = 10
def add():
c = a + b
print(c)
add() | [
"noreply@github.com"
] | noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.