blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
954a6ccc30e4d525232ba97980373c8886d90bd8
|
bc23b3d14d06521bc200df1e827dc9b92dbe3741
|
/wake_up.spec
|
f28eaedb3b078efdaaab1dbb075f2aebb29cbc43
|
[] |
no_license
|
leowf/wake_up
|
72d014a648e3e6d1ab32a6722641971a0f6e6c90
|
b90ac4e2675ef42506ee23aa35ff83e5f34a46f6
|
refs/heads/master
| 2021-10-25T18:34:05.477721
| 2019-04-06T02:56:47
| 2019-04-06T02:56:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 708
|
spec
|
# -*- mode: python -*-
block_cipher = None
a = Analysis(['wake_up.py'],
pathex=['F:\\record'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
name='wake_up',
debug=False,
strip=False,
upx=True,
console=False )
|
[
"noreply@github.com"
] |
leowf.noreply@github.com
|
aa922c10b77f10d3df45a5ae0ba90283e6185d9b
|
ab89371666c085c667a058c40426633dbd5bdf70
|
/autourl/urls.py
|
a49bc297cb3e8f4e3f51416d166f05496ed5dfd3
|
[] |
no_license
|
kns003/autourl
|
5e70bbb2a1bdcf4e878f85df8bbc66716dbe9d9f
|
c11a58505a1e3c68f030036e734c02b9e6fc47c2
|
refs/heads/master
| 2021-01-23T18:50:20.715694
| 2015-08-22T10:50:57
| 2015-08-22T10:50:57
| 41,196,824
| 0
| 0
| null | 2015-08-22T14:29:02
| 2015-08-22T07:49:17
|
HTML
|
UTF-8
|
Python
| false
| false
| 805
|
py
|
"""autourl URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^ubuild/', include('ubuild.urls')),
]
|
[
"kns003@gmail.com"
] |
kns003@gmail.com
|
59a429c78f14208eef1fe82f32f2f42e0ff1864b
|
81a503a94e3345192e0b7f044266e5fc5287cd2f
|
/simple-3layer-perceptron-predict.py
|
3172f11df857db87b6fafa34be626954b5b4aea4
|
[] |
no_license
|
aux-di/chainer-samples
|
95cc442f042747e8d1445ecdd9b78dcb08e8283a
|
5850a0dc5ed98a9916e9f618d73d052b6248e3a2
|
refs/heads/master
| 2020-04-03T09:52:34.226078
| 2016-08-12T13:47:02
| 2016-08-12T13:47:02
| 64,039,987
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,290
|
py
|
# -*- coding: utf-8 -*-
import argparse
import numpy as np
import chainer
from chainer import cuda, Function, gradient_check, report, training, utils, Variable
from chainer import datasets, iterators, optimizers, serializers
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
from chainer.training import extensions
import cv2
#from matplotlib import pylab as plt
# Network definition
class MLP(chainer.Chain):
def __init__(self, n_in, n_units, n_out):
super(MLP, self).__init__(
l1=L.Linear(n_in, n_units), # first layer
l2=L.Linear(n_units, n_units), # second layer
l3=L.Linear(n_units, n_out), # output layer
)
def __call__(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
return self.l3(h2)
def main():
parser = argparse.ArgumentParser(description='Chainer example: MNIST')
parser.add_argument('--batchsize', '-b', type=int, default=1,
help='Number of images in each mini batch')
parser.add_argument('--epoch', '-e', type=int, default=100,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--unit', '-u', type=int, default=10,
help='Number of units')
args = parser.parse_args()
# load a color image
img1 = cv2.imread('images/zero.jpg', cv2.IMREAD_COLOR)
img2 = cv2.imread('images/black.jpg', cv2.IMREAD_COLOR)
img3 = cv2.imread('images/white.jpg', cv2.IMREAD_COLOR)
# color -> grayscale
imggray1 = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
imggray2 = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)
imggray3 = cv2.cvtColor(img3, cv2.COLOR_RGB2GRAY)
# image -> array
gray = []
for y in range(len(imggray1)):
for x in range(len(imggray1[y])):
gray.append(imggray1[y][x])
imgdata1 = np.array(gray, dtype='f')
imgdata1 = imgdata1.reshape(1, 1, 32, 32)
imgdata1 = imgdata1 / 255.0
gray = []
for y in range(len(imggray2)):
for x in range(len(imggray2[y])):
gray.append(imggray2[y][x])
imgdata2 = np.array(gray, dtype='f')
imgdata2 = imgdata2.reshape(1, 1, 32, 32)
imgdata2 = imgdata2 / 255.0
gray = []
for y in range(len(imggray3)):
for x in range(len(imggray3[y])):
gray.append(imggray3[y][x])
imgdata3 = np.array(gray, dtype='f')
imgdata3 = imgdata3.reshape(1, 1, 32, 32)
imgdata3 = imgdata3 / 255.0
n_in = 32*32
print('GPU: {}'.format(args.gpu))
print('# unit: {}'.format(args.unit))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
model = L.Classifier(MLP(n_in, args.unit, 3))
# Setup an optimizer
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
# Load dataset
x1 = imgdata1
x2 = imgdata2
x3 = imgdata3
y1 = np.array(0, dtype=np.int32)
y2 = np.array(1, dtype=np.int32)
y3 = np.array(2, dtype=np.int32)
dd = [(x1, y1), (x2, y2), (x3, y3)]
train, test = dd, dd
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
# Set up a trainer
updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
# Evaluate the model with the test dataset for each epoch
trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))
# Dump a computational graph from 'loss' variable at the first iteration
# The "main" refers to the target link of the "main" optimizer.
trainer.extend(extensions.dump_graph('main/loss'))
# Take a snapshot at each epoch
trainer.extend(extensions.snapshot())
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport())
# Print selected entries of the log to stdout
# Here "main" refers to the target link of the "main" optimizer again, and
# "validation" refers to the default name of the Evaluator extension.
# Entries other than 'epoch' are reported by the Classifier link, called by
# either the updater or the evaluator.
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy']))
# Print a progress bar to stdout
trainer.extend(extensions.ProgressBar())
# Resume from a snapshot
#chainer.serializers.load_npz(resume, trainer)
# Run the training
trainer.run()
# Predictor
xx = Variable(np.array([x1, ]), volatile=True)
y = model.predictor(xx)
print y.data
print np.argmax(y.data)
if __name__ == '__main__':
main()
|
[
"aux_di@yahoo.co.jp"
] |
aux_di@yahoo.co.jp
|
10bb46b132d695bceec1c40fbcb8bbcad04ea9a7
|
8c488fd4b680919e777e34f8d61bcc03fb5948eb
|
/news.py
|
026eae8a862958769903c585437e85b2c2c8f237
|
[
"MIT"
] |
permissive
|
atultherajput/iPyNews
|
0f6e95f441edf5ecf105ce93545fece8385f2b26
|
1c93d218a8148a7c62c23f9a6ccf9a5924cac871
|
refs/heads/master
| 2020-03-08T08:29:09.199397
| 2018-04-04T08:47:52
| 2018-04-04T08:47:52
| 128,023,038
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,583
|
py
|
import requests
def PyNews(option, c, q):
apiKey = "9f6bb23dade24e2a9215192*********" #get api from https://newsapi.org
top_headlines = "https://newsapi.org/v2/top-headlines?country="+c+"&apiKey="+apiKey
everything = "https://newsapi.org/v2/everything?language=en&sortBy=popularity&q="+q+"&apiKey="+apiKey
sources = "https://newsapi.org/v2/sources?apiKey="+apiKey
results = []
if option == 1:
news = requests.get(top_headlines).json()
articles = news["articles"]
for article in articles:
results.append(article["title"])
elif option == 2:
news = requests.get(everything).json()
articles = news["articles"]
for article in articles:
results.append(article["title"]+":\n"+ article["description"])
elif option == 3:
news = requests.get(sources).json()
articles = news["sources"]
for article in articles:
results.append(article["name"])
for i in range(len(results)):
print(str(i + 1)+'.', results[i])
def driver(option=1, c="in", q="india"):
option = int(input("1. Top Headlines by country \n2. Everything by keyword \n3. News Sources \nSelect option: "))
if option == 1:
c = input("Enter two digit country code: ")
elif option == 2:
q = input("Search Keyword: ")
elif option == 3:
pass
else:
print("Wrong Option!")
driver()
PyNews(option, c, q)
if __name__ == '__main__':
driver()
|
[
"atultherajput@gmail.com"
] |
atultherajput@gmail.com
|
020766db10b5c980437a181a36ed6fd1ce13eb34
|
dde59eec69a064e8f595b348022cb8449185c3e0
|
/Ex9.5.py
|
9f96624db90a781fd2d15cac44ccb92d802ac271
|
[] |
no_license
|
inwk6312fall2019/wordplay-Tarang97
|
32de9f579f813c38bf2862a9c8f4b01062a5a5a6
|
f66917b7a539c24e0d6268496fe5eb7b6507c01c
|
refs/heads/master
| 2020-07-30T01:04:07.531630
| 2019-09-21T21:57:47
| 2019-09-22T01:04:21
| 210,028,297
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
#Write a function named uses_all that takes a word and a string of required letters,
#and that returns True if the word uses all the required letters at least once.
#How many words are there that use all the vowels aeiou? How about aeiouy?
def uses_all(word, letters):
for letter in letters:
if letter not in word:
return False
return True
fin = open('words.txt')
count = 0
for line in fin:
word = line.strip()
if uses_all(word, 'aeiouy') == True:
print(word)
count += 1
print(count)
|
[
"tarangdube8@outlook.com"
] |
tarangdube8@outlook.com
|
7a3fc601cdbc6083904f9ef361444dc21ecd088f
|
4abda16354fd81596ff9550aea2cc882e0d0718b
|
/json-quiz/1.py
|
083a52ef84ea75a8ea54fc163ca15e445f43d7b3
|
[] |
no_license
|
hsayres/compjour-hw
|
6d89e8580fe1a43a74284aad6fde9d406fd1c11f
|
30f1c3f7c287b0ea90e8fdeff8ed61a95d15304a
|
refs/heads/master
| 2021-01-18T15:12:33.080458
| 2015-06-10T01:20:31
| 2015-06-10T01:20:31
| 33,575,361
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
import requests
import json
data_url = "http://www.compjour.org/files/code/json-examples/analyticsgov-realtime.json"
response = requests.get(data_url)
text = response.text
data = json.loads(text)
print('A. ', data['name'])
print('B. ', data['taken_at'])
print('C. ', data['meta']['name'])
print('D. ', data['data'][0]['active_visitors'])
print('E. ', ', '.join(data.keys()))
|
[
"hsayres@stanford.edu"
] |
hsayres@stanford.edu
|
ebba7615e5e9b41c5297f37d0a48fe88c1c66f93
|
c5a120d7e7bd85af81151214499dc1284fadec67
|
/A_C_TD_CNN_3_LSTM_GAMA_Navigation.py
|
ee44fb774b0578c5f1a33666b7482bcd195c295f
|
[] |
no_license
|
ZHONGJunjie86/Mixed_Input_PPO_CNN_LSTM_Car_Navigation
|
725fd9cc4d90c6ade1bccfa7c243353ec356f2b3
|
32f3788a25cc1bcb0090f52a6473ab02edfcf177
|
refs/heads/master
| 2023-05-03T05:09:40.605705
| 2021-05-22T09:47:17
| 2021-05-22T09:47:17
| 281,865,039
| 12
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,582
|
py
|
from utils import cross_loss_curve, GAMA_connect,reset,send_to_GAMA
from CV_input import generate_img
import os
from itertools import count
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import MultivariateNormal
import numpy as np
import pandas as pd
import warnings
import math
warnings.filterwarnings("ignore")
device = torch.device("cuda" if torch.cuda.is_available() else"cpu")
save_curve_pic = os.path.abspath(os.curdir)+'/PPO_Mixedinput_Navigation_Model/result/Actor_Critic_3loss_curve.png'
save_critic_loss = os.path.abspath(os.curdir)+'/PPO_Mixedinput_Navigation_Model/training_data/AC_critic_3loss.csv'
save_reward = os.path.abspath(os.curdir)+'/PPO_Mixedinput_Navigation_Model/training_data/AC_3reward.csv'
save_speed = os.path.abspath(os.curdir)+'/PPO_Mixedinput_Navigation_Model/training_data/AC_average_speed.csv'
save_NPC_speed = os.path.abspath(os.curdir)+'/PPO_Mixedinput_Navigation_Model/training_data/NPC_speed.csv'
state_size = 9
action_size = 1
Memory_size = 4
torch.set_default_tensor_type(torch.DoubleTensor)
class Memory:
def __init__(self):
self.actions = []
self.states = []
self.states_next = []
self.states_img = []
self.states_img_next = []
self.logprobs = []
self.rewards = []
self.is_terminals = []
def clear_memory(self):
del self.actions[:]
del self.states[:]
del self.states_next[:]
del self.states_img[:]
del self.states_img_next[:]
del self.logprobs[:]
del self.rewards[:]
del self.is_terminals[:]
class Actor(nn.Module):
def __init__(self, state_size, action_size):
super(Actor, self).__init__()
self.conv1 = nn.Conv2d(3,16, kernel_size=3, stride=2, padding=0) # 237*237*N ->57*57*16
self.maxp1 = nn.MaxPool2d(3, stride = 2, padding=0) #79*79*16-> 39*39*32
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=0) # 39*39*16-> 19*19*32
self.conv3 = nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=0) # 19*19*32 -> 9*9*64
#self.maxp2 = nn.MaxPool2d(3, stride = 2, padding=0) # 19*19*64 -> 9*9*64
self.linear_CNN_1 = nn.Linear(5184, 256)
self.linear_CNN_2 = nn.Linear(256*Memory_size,16*Memory_size) #(768,256)
#
self.state_size = state_size
self.action_size = action_size
self.linear0 = nn.Linear(self.state_size, 64)
self.linear1 = nn.Linear(64, 128)
self.linear2 = nn.Linear(128, 16)#128, 85)
self.linear3 = nn.Linear(32*Memory_size,12)#(511,128)
self.linear4 = nn.Linear(12,8) #(128,32)
self.mu = nn.Linear(8,self.action_size) #32
self.sigma = nn.Linear(8,self.action_size)
self.distribution = torch.distributions.Normal
def forward(self, state,tensor_cv):
# CV
x = F.relu(self.maxp1(self.conv1(tensor_cv)))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
#x = F.relu(self.conv3(x))
x = x.view(x.size(0), -1) #展開
x = F.relu(self.linear_CNN_1(x)).reshape(1,256*Memory_size)
x = F.relu(self.linear_CNN_2(x)).reshape(1,16*Memory_size) #.reshape(1,256)
# num
output_0 = F.relu(self.linear0(state))
output_1 = F.relu(self.linear1(output_0))
output_2 = F.relu(self.linear2(output_1)).reshape(1,16*Memory_size) #(1,255)
# merge
output_2 = torch.cat((x,output_2),1)
output_3 = F.relu(self.linear3(output_2) )
#
output_4 =F.relu(self.linear4(output_3)) #F.relu(self.linear4(output_3.view(-1,c))) #
mu = torch.tanh(self.mu(output_4)) #有正有负 sigmoid 0-1
sigma = F.relu(self.sigma(output_4)) + 0.001
mu = torch.diag_embed(mu).to(device)
sigma = torch.diag_embed(sigma).to(device) # change to 2D
#dist = MultivariateNormal(mu,sigma)
dist = self.distribution(mu, sigma)#MultivariateNormal(mu,sigma) #N(μ,σ^2)
action = dist.sample()
action_logprob = dist.log_prob(action)
action = torch.clamp(action.detach(), -0.8, 0.6)
#entropy = torch.sum(dist.entropy())
#entropy = dist.entropy().mean() #torch.sum(m_probs.entropy())
#entropy = 0.5 + 0.5 * math.log(2 * math.pi) + torch.log(dist.scale)
entropy = -torch.exp(action_logprob) * action_logprob
return action,action_logprob,entropy
class Critic(nn.Module):
def __init__(self, state_size, action_size):
super(Critic, self).__init__()
self.conv1 = nn.Conv2d(3,16, kernel_size=3, stride=2, padding=0) # 237*237*N ->57*57*16
self.maxp1 = nn.MaxPool2d(3, stride = 2, padding=0) #79*79*16-> 39*39*32
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=0) # 39*39*16-> 19*19*32
self.conv3 = nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=0) # 19*19*32 -> 9*9*64
#self.maxp2 = nn.MaxPool2d(3, stride = 2, padding=0) # 19*19*64 -> 9*9*64
self.linear_CNN = nn.Linear(5184, 256)
self.lstm_CNN = nn.LSTM(256,16, 1,batch_first=True)
#
self.state_size = state_size
self.action_size = action_size
self.linear0 = nn.Linear(self.state_size, 64)
self.linear1 = nn.Linear(64, 128)
self.linear2 = nn.Linear(128, 128)
self.lstm3 = nn.LSTM(128,16,1, batch_first=True)#85
#
self.LSTM_layer_3 = nn.LSTM(32*Memory_size,16,1, batch_first=True) #510,128
self.linear4 = nn.Linear(16,4) #128,32
self.linear5 = nn.Linear(4, action_size) #32
def forward(self, state, tensor_cv,h_state_cv_c=(torch.zeros(1,Memory_size,16).to(device),
torch.zeros(1,Memory_size,16).to(device)),h_state_n_c=(torch.zeros(1,Memory_size,16).to(device), #1,3,85)
torch.zeros(1,Memory_size,16).to(device)),h_state_3_c=(torch.zeros(1,1,16).to(device),
torch.zeros(1,1,16).to(device))): #(1,1,128)
# CV
x = F.relu(self.maxp1(self.conv1(tensor_cv)))
x = F.relu( self.conv2(x))
x = F.relu(self.conv3(x)).reshape(Memory_size,1,5184)
#x = x.view(x.size(0), -1) #展開
x = F.relu(self.linear_CNN(x))#.reshape(1,768)
x,h_state_cv_c = self.lstm_CNN(x ,h_state_cv_c) # #.unsqueeze(0)
x = F.relu( x).reshape(1,16*Memory_size) #.reshape(1,255) torch.tanh
# num
output_0 = F.relu(self.linear0(state))
output_1 = F.relu(self.linear1(output_0))
output_2 = F.relu(self.linear2(output_1))
output_2,h_state_n_c = self.lstm3(output_2,h_state_n_c) #
output_2 = F.relu(output_2) #
output_2 = output_2.squeeze().reshape(1,16*Memory_size)
# LSTM
output_2 = torch.cat((x,output_2),1)
output_2 = output_2.unsqueeze(0)
output_3 , h_state_3_c= self.LSTM_layer_3(output_2,h_state_3_c ) # #,self.hidden_cell
a,b,c = output_3.shape
#
output_4 = F.relu(self.linear4(output_3.view(-1,c)))
value = torch.tanh(self.linear5(output_4))
return value ,(h_state_cv_c[0].data,h_state_cv_c[1].data),(h_state_n_c[0].data,h_state_n_c[1].data),(h_state_3_c[0].data,h_state_3_c[1].data)
def main():
################ load ###################
actor_path = os.path.abspath(os.curdir)+'/PPO_Mixedinput_Navigation_Model/weight/AC_TD3_actor.pkl'
critic_path = os.path.abspath(os.curdir)+'/PPO_Mixedinput_Navigation_Model/weight/AC_TD3_critic.pkl'
if os.path.exists(actor_path):
actor = Actor(state_size, action_size).to(device)
actor.load_state_dict(torch.load(actor_path))
print('Actor Model loaded')
else:
actor = Actor(state_size, action_size).to(device)
if os.path.exists(critic_path):
critic = Critic(state_size, action_size).to(device)
critic.load_state_dict(torch.load(critic_path))
print('Critic Model loaded')
else:
critic = Critic(state_size, action_size).to(device)
critic_next = Critic(state_size, action_size).to(device)
critic_next.load_state_dict(critic.state_dict())
print("Waiting for GAMA...")
################### initialization ########################
reset()
episode = 4000
training_stage = 70 #100#80
Decay = training_stage*18
lr = 0.0001
sample_lr = [
0.0001, 0.00009, 0.00008, 0.00007, 0.00006, 0.00005, 0.00004, 0.00003,
0.00002, 0.00001, 0.000009, 0.000008, 0.000007, 0.000006, 0.000005,
0.000004, 0.000003, 0.000002, 0.000001
] #900 960 1020 1080 1140
if episode >= training_stage : #50 100
try:
lr = sample_lr[int(episode // training_stage)]
except(IndexError):
lr = 0.000001* (0.9 ** ((episode- Decay// training_stage)) ) #100-1800#80-1440#65-1170#570 -- 30
optimizerA = optim.Adam(actor.parameters(), lr, betas=(0.95, 0.999))
optimizerC = optim.Adam(critic.parameters(), lr, betas=(0.95, 0.999))
test = "GAMA"
state,reward,done,time_pass,over,_ = GAMA_connect(test) #connect
print("done:",done,"timepass:",time_pass)
log_probs = [] #log probability
values = []
rewards = []
masks = []
total_loss = []
total_rewards = []
entropy = 0
loss = []
value = 0
log_prob = 0
gama = 0.9
average_speed = []
memory = Memory ()
################## start #########################
while over!= 1:
#普通の場合
if(done == 0 and time_pass != 0):
#前回の報酬
average_speed.append(state[0])
reward = torch.tensor([reward], dtype=torch.float, device=device)
rewards.append(reward)
state = torch.DoubleTensor(state).reshape(1,state_size).to(device)
state_img = generate_img()
tensor_cv = torch.from_numpy(np.transpose(state_img, (2, 0, 1))).double().to(device)/255
if len(memory.states_next) ==0: #IndexError
#for _ in range(3):
memory.states_next = memory.states
memory.states_next[Memory_size-1] = state
memory.states_img_next = memory.states_img
memory.states_img_next [Memory_size-1]= tensor_cv
else:
del memory.states_next[:1]
del memory.states_img_next[:1]
memory.states_next.append(state)
memory.states_img_next.append(tensor_cv)
state_next = torch.stack(memory.states_next).to(device).detach()
tensor_cv_next = torch.stack(memory.states_img_next).to(device).detach()
value_next,_,_,_ = critic_next(state_next,tensor_cv_next,h_state_cv_c,h_state_n_c,h_state_3_c ) #_next
with torch.autograd.set_detect_anomaly(True):
# TD:r(s) + gama*v(s+1) - v(s)
advantage = reward.detach() + gama*value_next.detach() - value
actor_loss = -(log_prob * advantage.detach() + 10*lr *entropy)#
#critic_loss = (reward.detach() + gama*value_next.detach() - value).pow(2)
critic_loss =torch.nn.SmoothL1Loss()(reward.detach() + gama*value_next.detach() , value)
optimizerA.zero_grad()
optimizerC.zero_grad()
critic_loss.backward()
actor_loss.backward()
loss.append(critic_loss)
optimizerA.step()
optimizerC.step()
critic_next.load_state_dict(critic.state_dict())
del memory.states[:1]
del memory.states_img[:1]
memory.states.append(state)
memory.states_img.append(tensor_cv)
state = torch.stack(memory.states).to(device).detach() ###
tensor_cv = torch.stack(memory.states_img).to(device).detach()
value,h_state_cv_c,h_state_n_c,h_state_3_c = critic(state,tensor_cv,h_state_cv_c,h_state_n_c,h_state_3_c)
action,log_prob,entropy= actor(state,tensor_cv) # ,h_state_cv_a,h_state_n_a,h_state_3_a #,h_state_cv_a,h_state_n_a,h_state_3_a
log_prob = log_prob.unsqueeze(0)
#entropy += entropy
send_to_GAMA([[1,float(action.cpu().numpy()*10)]]) #行
masks.append(torch.tensor([1-done], dtype=torch.float, device=device))
values.append(value)
log_probs.append(log_prob)
# 終わり
elif done == 1:
average_speed.append(state[0])
send_to_GAMA([[1,0]])
#先传后计算
print(state)
rewards.append(reward) #contains the last
reward = torch.tensor([reward], dtype=torch.float, device=device)
rewards.append(reward) #contains the last
total_reward = sum(rewards).cpu().detach().numpy()
total_rewards.append(total_reward)
#state = torch.FloatTensor(state).reshape(1,4).to(device)
#last_value= critic(state)
with torch.autograd.set_detect_anomaly(True):
advantage = reward.detach() - value #+ last_value 最后一回的V(s+1) = 0
actor_loss = -( log_prob * advantage.detach() +10*lr * entropy) #+ lr * entropy
#critic_loss = (reward.detach() - value).pow(2) #+ last_value
critic_loss =torch.nn.SmoothL1Loss()(reward.detach() + gama*value_next.detach() , value)
optimizerA.zero_grad()
optimizerC.zero_grad()
critic_loss.backward()
actor_loss.backward()
loss.append(critic_loss)
optimizerA.step()
optimizerC.step()
critic_next.load_state_dict(critic.state_dict())
log_probs = []
values = []
rewards = []
masks = []
loss_sum = sum(loss).cpu().detach().numpy()
total_loss.append(loss_sum)
cross_loss_curve(loss_sum.squeeze(0),total_reward,save_curve_pic,save_critic_loss,save_reward,np.mean(average_speed)*10,save_speed,average_speed_NPC,save_NPC_speed) #total_loss,total_rewards
loss = []
average_speed = []
memory.clear_memory()
if episode > training_stage : #50 100
try:
lr = sample_lr[int(episode // training_stage)]
except(IndexError):
lr = 0.000001* (0.9 ** ((episode-Decay) //training_stage))
optimizerA = optim.Adam(actor.parameters(), lr, betas=(0.95, 0.999))
optimizerC = optim.Adam(critic.parameters(), lr, betas=(0.95, 0.999))
torch.save(actor.state_dict(),actor_path)
torch.save(critic.state_dict(),critic_path)
print("----------------------------------Net_Trained---------------------------------------")
print('--------------------------Iteration:',episode,'over--------------------------------')
episode += 1
#最初の時
else:
print('Iteration:',episode,"lr:",lr)
state = np.reshape(state,(1,len(state))) #xxx
state_img = generate_img()
tensor_cv = torch.from_numpy(np.transpose(state_img, (2, 0, 1))).double().to(device)/255
state = torch.DoubleTensor(state).reshape(1,state_size).to(device)
for _ in range(Memory_size):
memory.states.append(state)
memory.states_img.append(tensor_cv)
state = torch.stack(memory.states).to(device).detach() ###
tensor_cv = torch.stack(memory.states_img).to(device).detach()
value,h_state_cv_c,h_state_n_c,h_state_3_c = critic(state,tensor_cv) #dist, # now is a tensoraction = dist.sample()
action,log_prob,entropy = actor(state,tensor_cv) #, h_state_cv_a,h_state_n_a,h_state_3_a
print("acceleration: ",action.cpu().numpy())
send_to_GAMA([[1,float(action.cpu().numpy()*10)]])
log_prob = log_prob.unsqueeze(0)
#entropy += entropy
state,reward,done,time_pass,over,average_speed_NPC = GAMA_connect(test)
return None
if __name__ == '__main__':
main()
"""
class Actor(nn.Module):
def __init__(self, state_size, action_size):
super(Actor, self).__init__()
self.conv1 = nn.Conv2d(3,8, kernel_size=8, stride=4, padding=0) # 500*500*3 -> 124*124*8
self.maxp1 = nn.MaxPool2d(4, stride = 2, padding=0) # 124*124*8 -> 61*61*8
self.conv2 = nn.Conv2d(8, 16, kernel_size=4, stride=1, padding=0) # 61*61*8 -> 58*58*16
self.maxp2 = nn.MaxPool2d(2, stride=2, padding=0) # 58*58*16 -> 29*29*16 = 13456
self.linear_CNN_1 = nn.Linear(13456, 256)
self.linear_CNN_2 = nn.Linear(768,48) #(768,256)
#
self.state_size = state_size
self.action_size = action_size
self.linear1 = nn.Linear(self.state_size, 128)
self.linear2 = nn.Linear(128, 16)#128, 85)
self.linear3 = nn.Linear(96,12)#(511,128)
self.linear4 = nn.Linear(12,8) #(128,32)
self.mu = nn.Linear(8,self.action_size) #32
self.sigma = nn.Linear(8,self.action_size)
self.distribution = torch.distributions.Normal
def forward(self, state,tensor_cv):
# CV
x = F.relu(self.maxp1( self.conv1(tensor_cv)))
x = F.relu(self.maxp2( self.conv2(x)))
x = x.view(x.size(0), -1) #展開
x = F.relu(self.linear_CNN_1(x)).reshape(1,768)
x = F.relu(self.linear_CNN_2(x)).reshape(1,48) #.reshape(1,256)
# num
output_1 = F.relu(self.linear1(state))
output_2 = F.relu(self.linear2(output_1)).reshape(1,48) #(1,255)
# merge
output_2 = torch.cat((x,output_2),1)
output_3 = F.relu(self.linear3(output_2) )
#
output_4 =F.relu(self.linear4(output_3)) #F.relu(self.linear4(output_3.view(-1,c))) #
mu = torch.tanh(self.mu(output_4)) #有正有负 sigmoid 0-1
sigma = F.relu(self.sigma(output_4)) + 0.001
mu = torch.diag_embed(mu).to(device)
sigma = torch.diag_embed(sigma).to(device) # change to 2D
#dist = MultivariateNormal(mu,sigma)
dist = self.distribution(mu, sigma)#MultivariateNormal(mu,sigma) #N(μ,σ^2)
action = dist.sample()
action_logprob = dist.log_prob(action)
action = torch.clamp(action.detach(), -0.8, 0.6)
#entropy = torch.sum(dist.entropy())
#entropy = dist.entropy().mean() #torch.sum(m_probs.entropy())
#entropy = 0.5 + 0.5 * math.log(2 * math.pi) + torch.log(dist.scale)
entropy = -torch.exp(action_logprob) * action_logprob
return action,action_logprob,entropy
class Critic(nn.Module):
def __init__(self, state_size, action_size):
super(Critic, self).__init__()
self.conv1 = nn.Conv2d(3,8, kernel_size=8, stride=4, padding=0) # 500*500*3 -> 124*124*8
self.maxp1 = nn.MaxPool2d(4, stride = 2, padding=0) # 124*124*8 -> 61*61*8
self.conv2 = nn.Conv2d(8, 16, kernel_size=4, stride=1, padding=0) # 61*61*8 -> 58*58*16
self.maxp2 = nn.MaxPool2d(2, stride=2, padding=0) # 58*58*16 -> 29*29*16 = 13456
self.linear_CNN = nn.Linear(13456, 256)
self.lstm_CNN = nn.LSTM(256,16, 1,batch_first=True) #768,255
#
self.state_size = state_size
self.action_size = action_size
self.linear1 = nn.Linear(self.state_size, 128)
self.linear2 = nn.Linear(128, 128)
self.lstm3 = nn.LSTM(128,16,1, batch_first=True)#85
#
self.LSTM_layer_3 = nn.LSTM(96,16,1, batch_first=True) #510,128
self.linear4 = nn.Linear(16,4) #128,32
self.linear5 = nn.Linear(4, action_size) #32
def forward(self, state, tensor_cv,h_state_cv_c=(torch.zeros(1,3,16).to(device),
torch.zeros(1,3,16).to(device)),h_state_n_c=(torch.zeros(1,3,16).to(device), #1,3,85)
torch.zeros(1,3,16).to(device)),h_state_3_c=(torch.zeros(1,1,16).to(device),
torch.zeros(1,1,16).to(device))): #(1,1,128)
# CV
x = F.relu(self.maxp1( self.conv1(tensor_cv)))
x = F.relu(self.maxp2( self.conv2(x))).reshape(3,1,13456)
#x = x.view(x.size(0), -1) #展開
x = F.relu(self.linear_CNN(x))#.reshape(1,768)
x,h_state_cv_c = self.lstm_CNN(x ,h_state_cv_c) # #.unsqueeze(0)
x = F.relu( x).reshape(1,48) #.reshape(1,255) torch.tanh
# num
output_1 = F.relu(self.linear1(state))
output_2 = F.relu(self.linear2(output_1))
output_2,h_state_n_c = self.lstm3(output_2,h_state_n_c) #
output_2 = F.relu(output_2) #
output_2 = output_2.squeeze().reshape(1,48)
# LSTM
output_2 = torch.cat((x,output_2),1)
output_2 = output_2.unsqueeze(0)
output_3 , h_state_3_c= self.LSTM_layer_3(output_2,h_state_3_c ) # #,self.hidden_cell
a,b,c = output_3.shape
#
output_4 = F.relu(self.linear4(output_3.view(-1,c)))
value = torch.tanh(self.linear5(output_4))
return value ,(h_state_cv_c[0].data,h_state_cv_c[1].data),(h_state_n_c[0].data,h_state_n_c[1].data),(h_state_3_c[0].data,h_state_3_c[1].data)
"""
"""
class Actor(nn.Module):
def __init__(self, state_size, action_size):
super(Actor, self).__init__()
self.conv1 = nn.Conv2d(3,8, kernel_size=8, stride=4, padding=0) # 500*500*3 -> 124*124*8
self.maxp1 = nn.MaxPool2d(4, stride = 2, padding=0) # 124*124*8 -> 61*61*8
self.conv2 = nn.Conv2d(8, 16, kernel_size=4, stride=1, padding=0) # 61*61*8 -> 58*58*16
self.maxp2 = nn.MaxPool2d(2, stride=2, padding=0) # 58*58*16 -> 29*29*16 = 13456
self.linear_CNN = nn.Linear(13456, 256)
self.lstm_CNN = nn.LSTM(256, 85,1,batch_first=True)
#
self.state_size = state_size
self.action_size = action_size
self.linear1 = nn.Linear(self.state_size, 128)
self.linear2 = nn.Linear(128, 128)
self.lstm3 = nn.LSTM(128,85, 1,batch_first=True)
self.LSTM_layer_3 = nn.LSTM(510,128,1, batch_first=True) #864
self.linear4 = nn.Linear(128,32)
self.mu = nn.Linear(32,self.action_size) #256 linear2
self.sigma = nn.Linear(32,self.action_size)
def forward(self, state,tensor_cv,h_state_cv_a=(torch.zeros(1,3,85).to(device),
torch.zeros(1,3,85).to(device)),h_state_n_a=(torch.zeros(1,3,85).to(device),
torch.zeros(1,3,85).to(device)),h_state_3_a=(torch.zeros(1,1,128).to(device),
torch.zeros(1,1,128).to(device))):
# CV
x = F.relu(self.maxp1(self.conv1(tensor_cv)))
x = F.relu(self.maxp2(self.conv2(x))).reshape(3,1,13456)
#x = x.view(x.size(0), -1) #展開
x = F.relu(self.linear_CNN(x))#.reshape(1,768)
x,h_state_cv_a = self.lstm_CNN(x) #,h_state_cv_a #.unsqueeze(0)
x = F.relu( x).reshape(1,255) #torch.tanh
# num
output_1 = F.relu(self.linear1(state))
output_2 = F.relu(self.linear2(output_1))
output_2,h_state_n_a = self.lstm3(output_2) #,h_state_n_a
output_2 = F.relu(output_2) #
output_2 = output_2.squeeze().reshape(1,255)
# LSTM
output_2 = torch.cat((x,output_2),1)
output_2 = output_2.unsqueeze(0)
output_3 , h_state_3_a= self.LSTM_layer_3(output_2,h_state_3_a) #,self.hidden_cell
a,b,c = output_3.shape
#
output_4 = F.relu(self.linear4(output_3.view(-1,c))) #
mu = torch.tanh(self.mu(output_4)) #有正有负 sigmoid 0-1
sigma = F.relu(self.sigma(output_4)) + 0.001
mu = torch.diag_embed(mu).to(device)
sigma = torch.diag_embed(sigma).to(device) # change to 2D
dist = MultivariateNormal(mu,sigma) #N(μ,σ^2)
entropy = dist.entropy().mean()
action = dist.sample()
action_logprob = dist.log_prob(action)
action = torch.clamp(action.detach(), -0.8, 0.6)
return action,action_logprob,entropy,(h_state_cv_a[0].data,h_state_cv_a[1].data),(h_state_n_a[0].data,h_state_n_a[1].data),(h_state_3_a[0].data,h_state_3_a[1].data)
"""
|
[
"is0541hx@ed.ritsumei.ac.jp"
] |
is0541hx@ed.ritsumei.ac.jp
|
baf9440ec4050ee7b1f88215ed7d9fe4e3b4c53c
|
41ed3a71252f418f9331ee7a63023c6d9899b254
|
/classify_car_model.py
|
237a0a590493c774269b9c5d0f7211323082c24c
|
[] |
no_license
|
may1114/triplet-loss
|
f2c83dcc095e92fc841022ab832f90fc3c15790f
|
0f2a8c339d3c74fa74928e55f879bc8ca11989ed
|
refs/heads/master
| 2022-04-21T23:51:49.767383
| 2020-04-16T13:11:57
| 2020-04-16T13:11:57
| 256,210,796
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,397
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
from math import ceil
from keras.applications.inception_v3 import InceptionV3
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from keras.layers import Dense, GlobalAveragePooling2D
from keras.models import Model
from keras.optimizers import SGD
from sklearn.utils import class_weight
from utils import generator_batch, generator_batch_multitask
FINE_TUNE = True
USE_PROCESSING =False
NEW_OPTIMIZER = True
LEARNING_RATE = 0.001
NBR_EPOCHS = 100
BATCH_SIZE = 64
IMG_WIDTH = 299
IMG_HEIGHT = 299
monitor_index = 'acc'
NBR_MODELS = 250
USE_CLASS_WEIGHTS = False
RANDOM_SCALE = True
MULTI_TASK = False
NBR_COLOR = 7
train_path = './train_vehicleModel_list.txt'
val_path = './val_vehicleModel_list.txt'
# In[ ]:
from keras.models import load_model
if FINE_TUNE:
#can load already fine_tune trained model
if MULTI_TASK:
model = load_model('./models/inception3_fine_tune_multi_task.h5')
else:
model = load_model('./models/inception3_fine_tune.h5')
else:
inception_model = InceptionV3(include_top = False, wegihts = None, input_tensor = None, input_shape(IMG_WIDTH, IMG_HEIGHT, 3), pooling='avg')
x = inception_model.get_layer(index = -1).output
x = Dense(1024, activation='relu')(x)
model_out = Dense(NBR_MODELS, activation='softmax')(x)
if MULTI_TASK:
color_out = = Dense(NBR_COLOR, activation='softmax')(x)
model = Model(inputs=inception_model.input, outputs = [model_out, color_out])
#after create model, create optimizer
optimizer = SGD(lr = LEARNING_RATE, momentum = 0.9, nesterov = True)
#compile model
save_file = './models/inceptionv3_category_model.h5'
if MULTI_TASK:
save_file = './models/inceptionv3_model_color.h5'
model.compile(loss=["categorical_crossentropy", "categorical_crossentropy"],
#loss_weights = [0.6, 0.4],
optimizer=optimizer, metrics=["accuracy"])
else:
model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
#create check point, early stop, change learning rate
best_model = ModelCheckpoint('./models/inceptionv3_category_model.h5', monitor='val_'+monitor_index,
verbose = 1, save_best_only = True)
reduce_lr = ReduceLROnPlateau(monitor='val_'+monitor_index, factor = 0.5, patience = 3, verbose = 1, min_lr = 0.00001)
early_stop = EarlyStop(monitor='val_'+monitor_index, patience=15, verbose = 1)
train_data, train_label, val_data = get_train_val_data()
steps_per_epoch = int(ceil(len(train_data) * 1. / BATCH_SIZE))
validation_steps = int(ceil(len(val_data) * 1. / BATCH_SIZE))
if USE_CLASS_WEIGHTS:
# balance the wegiht
class_weights = class_weight('balanced', np.unique(train_label), train_label)
#train model
if MULTI_TASK:
model.fit_generator(generator_batch_multitask(train_data_lines,
nbr_class_one = NBR_MODELS, nbr_class_two = NBR_COLORS,
batch_size = BATCH_SIZE, img_width = IMG_WIDTH,
img_height = IMG_HEIGHT, random_scale = RANDOM_SCALE,
shuffle = True, augment = True),
steps_per_epoch = steps_per_epoch, epochs = NBR_EPOCHS, verbose = 1,
validation_data = generator_batch_multitask(val_data_lines,
nbr_class_one = NBR_MODELS, nbr_class_two = NBR_COLORS, batch_size = BATCH_SIZE,
img_width = IMG_WIDTH, img_height = IMG_HEIGHT,
shuffle = False, augment = False),
validation_steps = validation_steps,
class_weight = class_weights, callbacks = [best_model, reduce_lr],
max_queue_size = 80, workers = 8, use_multiprocessing=True)
else:
model.fit_generator(generator_batch(train_data_lines, NBR_MODELS = NBR_MODELS,
batch_size = BATCH_SIZE, img_width = IMG_WIDTH,
img_height = IMG_HEIGHT, random_scale = RANDOM_SCALE,
shuffle = True, augment = True),
steps_per_epoch = steps_per_epoch, epochs = NBR_EPOCHS, verbose = 1,
validation_data = generator_batch(val_data_lines,
NBR_MODELS = NBR_MODELS, batch_size = BATCH_SIZE,
img_width = IMG_WIDTH, img_height = IMG_HEIGHT,
shuffle = False, augment = False),
validation_steps = validation_steps,
class_weight = class_weights, callbacks = [best_model, reduce_lr, early_stop],
max_queue_size = 80, workers = 8, use_multiprocessing=True)
# In[ ]:
def get_train_val_data():
train_data_lines = open(train_path).readlines()
#only need those existed image files
train_data_lines = [w for w in train_data_lines if os.path.exists(w.strip().split(' ')[0])]
train_labels = [int(w.strip().split(' ')[-1]) for w in train_data_lines]
val_data_lines = open(val_path).readlines()
val_data_lines = [w for w in val_data_lines if os.path.exists(w.strip().split(' ')[0])]
return train_data_lines, train_labels, val_data_lines
# In[ ]:
def get_fine_tune_model(train_data_lines, val_data_lines, steps_per_epoch, validation_steps, best_model_file):
base_model = InceptionV3(include_top = False)
x = base_model.output
x = GlobalAveragePooling2D(x)
x = Dense(1024, activation='relu')(x)
pred = Dense(NBR_MODELS, activation='softmax')(x)
model = Model(inputs = base_model.input, outputs = pred)
#first, lock the base model layers weight
#only train the top layers
for layer in model.layers:
layer.trainable = False
best_model = ModelCheckpoint(best_model_file, monitor='val_acc',
verbose = 1, save_best_only = True)
reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.5,
patience=3, verbose=1, min_lr=0.00001)
#compile model
model.compile(optimizer='rmsprop', loss = 'categorical_crossentropy')
model.fit_generator(generator_batch(train_data_lines, NBR_MODELS = NBR_MODELS,
batch_size = BATCH_SIZE, img_width = IMG_WIDTH,
img_height = IMG_HEIGHT, random_scale = RANDOM_SCALE,
shuffle = True, augment = True),
steps_per_epoch = steps_per_epoch, epochs = NBR_EPOCHS, verbose = 1,
validation_data = generator_batch(val_data_lines,
NBR_MODELS = NBR_MODELS, batch_size = BATCH_SIZE,
img_width = IMG_WIDTH, img_height = IMG_HEIGHT,
shuffle = False, augment = False),
validation_steps = validation_steps,
callbacks = [best_model, reduce_lr],
max_queue_size = 80, workers = 8, use_multiprocessing=True)
# after train the top weights, then make from the 250th layer of the base model can train
for layer in base_model.layers[:249]:
layer.trainable = False
for layer in base_model.layers[249:]:
layer.trainable = True
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy')
model.fit_generator(generator_batch(train_data_lines, NBR_MODELS = NBR_MODELS,
batch_size = BATCH_SIZE, img_width = IMG_WIDTH,
img_height = IMG_HEIGHT, random_scale = RANDOM_SCALE,
shuffle = True, augment = True),
steps_per_epoch = steps_per_epoch, epochs = NBR_EPOCHS, verbose = 1,
validation_data = generator_batch(val_data_lines,
NBR_MODELS = NBR_MODELS, batch_size = BATCH_SIZE,
img_width = IMG_WIDTH, img_height = IMG_HEIGHT,
shuffle = False, augment = False),
validation_steps = validation_steps,
callbacks = [best_model, reduce_lr],
max_queue_size = 80, workers = 8, use_multiprocessing=True)
|
[
"noreply@github.com"
] |
may1114.noreply@github.com
|
8cdee2cf494f61ac77b1ecbfbc512b1a7d9019cc
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/DGpxmRkADuZaWHJxZ_12.py
|
41dd07642af65e0da32ac3763fd2a8b55fa61e9c
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,561
|
py
|
"""
Steve and Maurice have racing snails. They each have three, a slow `s`, medium
`m` and fast `f` one. Although Steve's snails are all a bit stronger than
Maurice's, Maurice has a trick up his sleeve. His plan is:
1. Round 1: `[s, f]` Sacrifice his slowest snail against Steve's fastest.
2. Round 2: `[m, s]` Use his middle snail against Steve's slowest.
3. Round 3: `[f, m]` Use his fastest snail against Steve's middle.
Create a function that determines whether Maurice's plan will work by
outputting `True` if Maurice wins 2/3 games.
The function inputs:
1. List 1: `[s, m, f]` for Maurice.
2. List 2: `[s, m, f]` for Steve.
### Examples
maurice_wins([3, 5, 10], [4, 7, 11]) ➞ True
# Since the matches are (3, 11), (5, 4) and (10, 7), Maurice wins 2 out of 3.
maurice_wins([6, 8, 9], [7, 12, 14]) ➞ False
# Since the matches are (6, 14), (8, 7) and (9, 12), Steve wins 2 out of 3.
maurice_wins([1, 8, 20], [2, 9, 100]) ➞ True
### Notes
* Maurice wins if his competing snail's speed **strictly** exceeds Steve's snail's speed.
* Steve will always play in this order: `[f, s, m]`.
* The order you'll get the snails is always in ascending order.
"""
def maurice_wins(m_snails, s_snails):
did_m_won = 0
if m_snails[0] > s_snails[2]:
did_m_won += 1
else:
pass
if m_snails[1] > s_snails[0]:
did_m_won += 1
else:
pass
if m_snails[2] > s_snails[1]:
did_m_won += 1
else:
pass
if did_m_won == 2 or did_m_won == 3:
return True
else:
return False
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
4061d719724d305c83bf4d0fbac69f6b220e16a7
|
34374d6db2a177c12ac3325a49b9ff791541ec9e
|
/qiyou/migrations/0010_hot_route.py
|
d2bd3ae2b72ab098cafbab3730a0e1f58f29d6a4
|
[] |
no_license
|
ge-xiao/qiyou
|
2a0742cb4de6ee346c51704dc546403a2a603d71
|
1221cc8557b9f43e6d9ef9b4a90f4d9f5f125451
|
refs/heads/master
| 2022-11-25T13:03:46.580715
| 2020-07-24T05:04:51
| 2020-07-24T05:04:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 706
|
py
|
# Generated by Django 3.0.8 on 2020-07-19 12:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('qiyou', '0009_city_route'),
]
operations = [
migrations.CreateModel(
name='hot_route',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('route_name', models.CharField(max_length=100)),
('day_consume', models.CharField(max_length=20)),
('schedule', models.CharField(max_length=500)),
('scenery', models.CharField(max_length=500)),
],
),
]
|
[
"gx213213@126.com"
] |
gx213213@126.com
|
db1b90dc116d1fcbb3ac1b117fdd082c92cd9c08
|
276db4291a895c8c310fd20e4b1e108d4a8b4112
|
/shop/api/products/schemas.py
|
945cf352444e8693893c50b5645f2e55d8700f15
|
[] |
no_license
|
DMS/shop.flask.zc
|
3f84248f86f9fcfd93243c299cff4928ed39d4be
|
b0c8cf616c4ebb9aaf5b6818fda3d527fec69199
|
refs/heads/master
| 2021-02-19T17:21:12.044994
| 2020-02-21T20:41:32
| 2020-02-21T20:41:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 849
|
py
|
from marshmallow import Schema, fields
class TagSchema(Schema):
tag_name = fields.Str()
class CategorySchema(Schema):
category_name = fields.Str()
class ProductReviewSchema(Schema):
id = fields.Integer()
product_id = fields.Integer()
rating = fields.Integer()
review = fields.Str()
class ProductSchema(Schema):
id = fields.Integer()
name = fields.Str()
price = fields.Decimal()
capacity = fields.Int()
category = fields.Nested(CategorySchema)
produced_at = fields.DateTime()
reviews = fields.List(fields.Nested(ProductReviewSchema), required=False)
tag_schema = TagSchema()
category_schema = CategorySchema()
product_schema = ProductSchema()
products_schema = ProductSchema(many=True)
product_review_schema = ProductReviewSchema()
product_reviews_schema = ProductReviewSchema(many=True)
|
[
"huiyueguan111@gmail.com"
] |
huiyueguan111@gmail.com
|
946ad2a8b370e24b92ae5d25b1d8fa26ac317a2b
|
505506f12bf43f8693b95d4b19bc4e0aded8cab0
|
/agents/a2c.py
|
84382e8eb62cf1398354feae941e7c3aeb759546
|
[
"BSD-3-Clause"
] |
permissive
|
Aubret/gym-minigrid
|
8be3fe596a0a071af4c504d215655114d4c7bc76
|
fc622913333da4564a7e3343920ce4415e47c5ab
|
refs/heads/master
| 2021-06-06T20:23:48.382545
| 2021-06-04T12:31:37
| 2021-06-04T12:31:37
| 169,751,153
| 0
| 0
| null | 2019-02-08T14:58:54
| 2019-02-08T14:58:53
| null |
UTF-8
|
Python
| false
| false
| 8,055
|
py
|
import time
import functools
import tensorflow as tf
import numpy as np
from baselines import logger
from baselines.common import set_global_seeds, explained_variance
from baselines.common import tf_util
from baselines.common.policies import build_policy
from baselines.a2c.utils import Scheduler, find_trainable_variables
from tensorflow import losses
#from baselines.a2c.runner import Runner
from runner import Runner
class Model(object):
"""
We use this class to :
__init__:
- Creates the step_model
- Creates the train_model
train():
- Make the training part (feedforward and retropropagation of gradients)
save/load():
- Save load the model
"""
def __init__(self, policy, env, nsteps,
ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5, lr=7e-4,
alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear'):
sess = tf_util.get_session()
nenvs = env.num_envs
nbatch = nenvs*nsteps
with tf.variable_scope('a2c_model', reuse=tf.AUTO_REUSE):
# step_model is used for sampling
step_model = policy(nenvs, 1, sess) #just do one action step
# train_model is used to train our network
train_model = policy(nbatch, nsteps, sess)
A = tf.placeholder(train_model.action.dtype, train_model.action.shape)
ADV = tf.placeholder(tf.float32, [nbatch])
R = tf.placeholder(tf.float32, [nbatch])
LR = tf.placeholder(tf.float32, [])
# Calculate the loss
# Total loss = Policy gradient loss - entropy * entropy coefficient + Value coefficient * value loss
# Policy loss
neglogpac = train_model.pd.neglogp(A)
# L = A(s,a) * -logpi(a|s)
pg_loss = tf.reduce_mean(ADV * neglogpac)
# Entropy is used to improve exploration by limiting the premature convergence to suboptimal policy.
entropy = tf.reduce_mean(train_model.pd.entropy())
# Value loss
vf_loss = losses.mean_squared_error(tf.squeeze(train_model.vf), R)
loss = pg_loss - entropy*ent_coef + vf_loss * vf_coef
# Update parameters using loss
# 1. Get the model parameters
params = find_trainable_variables("a2c_model")
# 2. Calculate the gradients
grads = tf.gradients(loss, params)
if max_grad_norm is not None:
# Clip the gradients (normalize)
grads, grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads = list(zip(grads, params))
# zip aggregate each gradient with parameters associated
# For instance zip(ABCD, xyza) => Ax, By, Cz, Da
# 3. Make op for one policy and value update step of A2C
trainer = tf.train.RMSPropOptimizer(learning_rate=LR, decay=alpha, epsilon=epsilon)
_train = trainer.apply_gradients(grads)
lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule)
def train(obs, states, rewards, masks, actions, values):
# Here we calculate advantage A(s,a) = R + yV(s') - V(s)
# rewards = R + yV(s')
advs = rewards - values
for step in range(len(obs)):
cur_lr = lr.value()
td_map = {train_model.X:obs, A:actions, ADV:advs, R:rewards, LR:cur_lr}
if states is not None:
td_map[train_model.S] = states
td_map[train_model.M] = masks
policy_loss, value_loss, policy_entropy, _ = sess.run(
[pg_loss, vf_loss, entropy, _train],
td_map
)
return policy_loss, value_loss, policy_entropy
self.train = train
self.train_model = train_model
self.step_model = step_model
self.step = step_model.step
self.value = step_model.value
self.initial_state = step_model.initial_state
self.save = functools.partial(tf_util.save_variables, sess=sess)
self.load = functools.partial(tf_util.load_variables, sess=sess)
tf.global_variables_initializer().run(session=sess)
class a2c:
def __init__(self,
network,
env,
seed=None,
nsteps=4,
total_timesteps=int(80e6),
vf_coef=0.5,
ent_coef=0.01,
max_grad_norm=0.5,
lr=7e-4,
lrschedule='linear',
epsilon=1e-5,
alpha=0.99,
gamma=0.95,
print_freq=100,
load_path=None,
**network_kwargs):
set_global_seeds(seed)
self.env= env
self.nsteps=nsteps
self.gamma = gamma
self.print_freq = print_freq
# Get the nb of env
#self.env.num_envs=1 #probably parallelism
self.policy = build_policy(env, network, **network_kwargs) #generate policy function, with step and evaluate method
# Instantiate the model object (that creates step_model and train_model)
self.model = Model(policy=self.policy, env=env, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule) #contain all necessery function
if load_path is not None:
self.model.load(load_path)
# Instantiate the runner object
self.runner = Runner(env, self.model, nsteps=self.nsteps, gamma=self.gamma) #Use to run n step in environment and give observations
# Calculate the batch_size
self.nbatch = self.env.num_envs*nsteps
self.total_timesteps = total_timesteps
# Start total timer
self.tstart = time.time()
self.freq_timer = self.tstart
def step(self,obs):
#self.episode_rewards = [0.0]
#action = self.act(obs, update_eps=self.exploration.value(self.learning_steps))[0] #obs[None]
actions, values, states, neglogp = self.model.step(obs)#,S=None,M=[False])#states is the initial state, run n steps in environment
obs, rewards, dones, _ = self.env.step(actions)#look like S is the next state for evaluation
#self.episode_rewards[-1] += rew # Last element
#self.episode_rewards.append(0)
#if done and len(self.episode_rewards) % self.print_freq == 0:
# logger.record_tabular("steps", self.learning_steps)
# logger.record_tabular("episodes", len(self.episode_rewards))
# logger.record_tabular("mean episode reward", round(np.mean(self.episode_rewards[-101:-1]), 1))
# logger.dump_tabular()
return obs,actions,rewards,dones
def learn(self,num_timesteps,timesteps=1):
for update in range(timesteps):
# Get mini batch of experiences
obs, states, rewards, masks, actions, values =self.runner.run() # run n steps in environment
policy_loss, value_loss, policy_entropy = self.model.train(obs, states, rewards, masks, actions, values)
if num_timesteps % self.print_freq == 0:
# Calculate the fps (frame per second)
fps = int((num_timesteps * self.nbatch) / (time.time()-self.freq_timer))
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
print(values,rewards)
ev = explained_variance(values, rewards)
logger.record_tabular("nupdates", num_timesteps)
logger.record_tabular("total_timesteps", num_timesteps*self.nbatch)
logger.record_tabular("fps", fps)
logger.record_tabular("policy_entropy", float(policy_entropy))
logger.record_tabular("value_loss", float(value_loss))
logger.record_tabular("explained_variance", float(ev))
logger.record_tabular("total_time", float(time.time()-self.tstart))
logger.dump_tabular()
self.freq_timer = time.time()
return self.model
|
[
"lalumiere3@hotmail.fr"
] |
lalumiere3@hotmail.fr
|
949138441ac7adbac527b9d2f42adb4ad8de2df9
|
72ad38d33d7739b28b52c228c45f703e076e25fa
|
/t1.py
|
0e7c27c76d811b83a45cd49b2db9443daf87f1c3
|
[
"MIT"
] |
permissive
|
Manthanc007/APS-2o2o
|
c39dbd0606325ab29363a86afc1da7d0dd2aef01
|
a84337c4e658a93b6c67515fa3ef59b09f2e5e94
|
refs/heads/master
| 2020-12-20T22:05:01.871426
| 2020-04-23T20:35:10
| 2020-04-23T20:35:10
| 236,221,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
n=int(input())
l=list(map(int,input().split()))
l.sort()
alice=l[:len(l)//2]
bob=l[len(l)//2:]
bob.sort(reverse=True)
if(n>2):
alice=[i*10 for i in alice]
final=sum(alice+bob)
print(final,alice,bob)
else:
print(sum(l))
|
[
"noreply@github.com"
] |
Manthanc007.noreply@github.com
|
46cf1136357e541718505dc7f2d65b45ec650ee0
|
3c912c24fe47bbe58add168ec4aa7bc488f542f7
|
/cms/plugins/text/migrations/0004_table_rename.py
|
874d07231d4bbc1709dba02663e9cd701b04c6c8
|
[
"BSD-3-Clause"
] |
permissive
|
eduncan911/django_cms
|
eb96617f108de19c41f8502c055647a28f2d6a06
|
66c27f059ca0779157a7c3cc2e007d8090f10351
|
refs/heads/master
| 2021-01-20T03:17:50.894858
| 2017-04-26T20:24:08
| 2017-04-26T20:24:08
| 89,523,211
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,086
|
py
|
from south.db import db
from django.db import models
from cms.plugins.text.models import *
class Migration:
depends_on = (
("cms", "0018_site_permissions"),
)
def forwards(self, orm):
db.rename_table("text_text", "cmsplugin_text")
db.rename_table("text_publictext", "cmsplugin_textpublic")
db.alter_column('cmsplugin_text', 'public_id', orm['text.text:public'])
try:
db.delete_foreign_key('cmsplugin_text' ,'public_id')
except:
pass
db.drop_primary_key("cmsplugin_textpublic")
db.rename_column("cmsplugin_textpublic", "publiccmsplugin_ptr_id", "cmspluginpublic_ptr_id")
db.create_primary_key("cmsplugin_textpublic", ("cmspluginpublic_ptr_id",))
db.foreign_key_sql('cmsplugin_text' ,'public_id', 'cmsplugin_textpublic', 'cmspluginpublic_ptr_id')
def backwards(self, orm):
db.delete_foreign_key('cmsplugin_text' ,'public_id')
db.drop_primary_key("cmsplugin_textpublic")
db.rename_column("cmsplugin_textpublic", "cmspluginpublic_ptr_id", "publiccmsplugin_ptr_id")
db.create_primary_key("cmsplugin_textpublic", ("publiccmsplugin_ptr_id",))
db.foreign_key_sql('cmsplugin_text' ,'public_id', 'cmsplugin_textpublic', "publiccmsplugin_ptr_id")
db.rename_table("cmsplugin_text", "text_text")
db.rename_table("cmsplugin_textpublic", "text_publictext")
db.alter_column('cmsplugin_text', 'public_id', orm['text.text:public'])
models = {
'cms.publiccmsplugin': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True', 'blank': 'True'})
},
'sites.site': {
'Meta': {'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'cms.cmsplugin': {
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2009, 7, 2, 6, 35, 26, 823213)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inherited_public': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'inherited_origin'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.CMSPluginPublic']"}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'text.textpublic': {
'Meta': {'db_table': "'cmsplugin_textpublic'"},
'body': ('django.db.models.fields.TextField', [], {}),
'cmspluginpublic_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPluginPublic']", 'unique': 'True', 'primary_key': 'True'}),
'mark_delete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'cms.page': {
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2009, 7, 2, 6, 35, 27, 318807)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'blank': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'public': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'origin'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.PagePublic']"}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.cmspluginpublic': {
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2009, 7, 2, 6, 35, 28, 96687)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'mark_delete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.PagePublic']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPluginPublic']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'text.text': {
'Meta': {'db_table': "'cmsplugin_text'"},
'body': ('django.db.models.fields.TextField', [], {}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'public': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'origin'", 'unique': 'True', 'null': 'True', 'to': "orm['text.TextPublic']"})
},
'cms.pagepublic': {
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2009, 7, 2, 6, 35, 27, 704298)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'mark_delete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'blank': 'True', 'null': 'True', 'to': "orm['cms.PagePublic']"}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
}
}
complete_apps = ['text']
|
[
"eduncan911+github@gmail.com"
] |
eduncan911+github@gmail.com
|
f012bf3372a3e6fe81130ba3ca388f4bc6b1b321
|
87247fc6010499ee572dead1ef5474ae952034c6
|
/venv/bin/python-config
|
d1c3fe7df82310ef6aab97caad6c21eb0f152af5
|
[] |
no_license
|
fernandasj/vue-django
|
d4c6b0f198e1ce0e491657828f035c8307af761c
|
be0a92cfa1c6acd993b165f834bc9b9e28e2b558
|
refs/heads/master
| 2020-09-20T09:07:40.588250
| 2019-11-27T13:00:18
| 2019-11-27T13:00:18
| 224,431,748
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,361
|
#!/home/fernanda/IdeaProjects/vue-django/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"fernanda.silva_sj@hotmail.com"
] |
fernanda.silva_sj@hotmail.com
|
|
82f1a9eac4f13240266abc89375c0500b47a5771
|
e2ec890cc48d10087bac70223c307a5b788583e2
|
/test_precision.py
|
f95d30cf097e2a52495a8e42707cc7b149265f13
|
[] |
no_license
|
abhi1kumar/CS6190_pml_project
|
489bc18bb2537be38f58fc61396111dede07cfb4
|
11b0bcbb6395216bc98f141cb0f3f5f2081f73f5
|
refs/heads/master
| 2022-03-26T12:46:01.576929
| 2019-12-14T05:27:41
| 2019-12-14T05:27:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,876
|
py
|
import argparse
from hyperparameters import *
from util import *
from test.test_read_data_split import *
from hyperparameters import *
from EM_Algorithm import *
parser = argparse.ArgumentParser(description='Read XML data from files')
parser.add_argument('--dir_path' , type=str, default='data/Bibtex' , help='path of the directory containing data-file')
parser.add_argument('--data_filename' , type=str, default='Bibtex_data.txt' , help='rel path of data_filename')
parser.add_argument('--train_split_filename', type=str, default='bibtex_trSplit.txt' , help='rel path of train_split_filename')
parser.add_argument('--test_split_filename' , type=str, default='bibtex_tstSplit.txt', help='rel path of test_split_filename')
args = parser.parse_args()
X, Y = get_data_labels_from_datafile(args.dir_path, args.data_filename)
X, Y = torch.tensor(X).float().to(device), torch.tensor(Y).float().to(device)
#normalise X
X = normaliseX(X)
#shuffle Y
num_all_labels = Y.shape[1]
shuffled_indices = np.array(random.sample(range(num_all_labels), num_all_labels))
print("shuffled_indices : ", shuffled_indices)
Y = Y[:, shuffled_indices]
train_indices_for_each_split = get_indices_for_different_splits(args.dir_path, args.train_split_filename)
test_indices_for_each_split = get_indices_for_different_splits(args.dir_path, args.test_split_filename)
num_splits = train_indices_for_each_split.shape[0]
# Get some stats
print("")
print("Number of splits = {}".format(num_splits))
print("Train indices for each split =", train_indices_for_each_split.shape)
print("Test indices for each split =", test_indices_for_each_split.shape)
iteration = 25
for split_idx in range(num_splits):
train_indices = train_indices_for_each_split[split_idx]
test_indices = test_indices_for_each_split [split_idx]
# Test data with seen and all labels
test_X = X[test_indices]
test_Y_seen = Y[test_indices, :num_seen_labels]
test_Y = Y[test_indices]
V = torch.load(save_folder+"/v_"+str(iteration)+".pt")
W = torch.load(save_folder+"/w_"+str(iteration)+".pt")
beta = torch.load(save_folder+"/beta_"+str(iteration)+".pt")
psi = get_psi(beta, V, lambda_psi)
print("Test Data X shape =", test_X.shape)
print("Test Labels Y seen shape =", test_Y_seen.shape)
prec_seen = precision_at_k(test_X, test_Y, W, beta, psi, V, topk, num_seen_labels, isSeen = True)
prec_seen_unseen = precision_at_k(test_X, test_Y, W, beta, psi, V, topk, num_seen_labels, isSeenUnseen = True)
print("Test_Precision_Seen : @1 : {:.4f} \t @3 : {:.4f} \t @5 : {:.4f} \n Test_Precision_Seen+Unseen : @1 : {:.4f} \t @3 : {:.4f} \t @5 : {:.4f}".format
(prec_seen[0], prec_seen[1], prec_seen[2], prec_seen_unseen[0], prec_seen_unseen[1], prec_seen_unseen[2]))
print("\n========================================================================\n")
break
|
[
"aish322@gmail.com"
] |
aish322@gmail.com
|
1ee934da947a05b39ed31df40719477ff5ab88a7
|
d8b798048bb56efc949b31771bf7eb4e98cb2a96
|
/1.5/Eligibility/Eligibility.py
|
b8e8e6a8046169fffc3d30580a6c1f81c4abd67e
|
[] |
no_license
|
Varfor/Kattis
|
c144b535bc5b6c03c65c419d242e99f61731768a
|
c2c63e51a0e55e2e45d67ae37b1d178e707ad1cd
|
refs/heads/master
| 2020-03-24T06:42:30.023405
| 2020-01-29T08:59:38
| 2020-01-29T08:59:38
| 142,538,602
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
for i in range(int(input())):
a, b, c, d = [x for x in input().split()]
print(a, end = " ")
if (int(b[:4])>=2010 or int(c[:4])>=1991):
print('eligible')
elif int(d)>=41:
print('ineligible')
else:
print('coach petitions')
|
[
"38240861+Varfor@users.noreply.github.com"
] |
38240861+Varfor@users.noreply.github.com
|
48b0be489716eeef9c8e8c096a4b1143b2c1b8ff
|
6a946e748aed8922feffecaca77b79b5d447c48f
|
/main.py
|
29d2f03fa800da7508cda85aeeac7e94a99484d8
|
[] |
no_license
|
Zeroqube/Yandex_lyceum_project_api_bor
|
5171b594cd17c6c4e3c182804cc5a6dfe5925f69
|
25cd22c2a4b9c438469cb88dac54aceb5adf11e6
|
refs/heads/master
| 2023-04-12T01:03:21.812134
| 2021-04-26T19:00:48
| 2021-04-26T19:00:48
| 360,931,559
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,437
|
py
|
import asyncio
import sqlite3
from discord.ext import commands
import datetime as dt
# в data храниться только токен для удобного использования и сохранения секретности токена
from data import TOKEN
SQL_FILE = 'bot_db'
def normalize(a):
res = []
for line in a:
res.append('\t'.join(str(item) for item in line))
return res
bot = commands.Bot(command_prefix="!")
con = sqlite3.connect(SQL_FILE)
cur = con.cursor()
@bot.event
async def on_ready():
print(f'{bot.user} has connected to Discord!')
for guild in bot.guilds:
print(
f'{bot.user} подключились к чату:\n'
f'{guild.name}(id: {guild.id})')
@bot.command(name='set_task')
async def set_task(ctx, name):
if len(name) > 100:
await ctx.send('Название задачи не должно превышать 100 символов!')
return
channel_id = ctx.author.id
if not cur.execute('SELECT id FROM channels WHERE id = ?', (channel_id, )).fetchone():
cur.execute('INSERT INTO channels VALUES (?)', (channel_id, ))
if cur.execute('SELECT task_name FROM todo WHERE channel_id = ? AND task_name = ?', (channel_id, name)).fetchone():
await ctx.send(f'Задача с именнем {name} уже есть!')
return
cur.execute('INSERT INTO todo(channel_id, task_name) VALUES (?, ?)', (channel_id, name, ))
con.commit()
await ctx.message.add_reaction('👍')
print(channel_id, name)
@bot.command(name='set_tasks')
async def set_tasks(ctx, *names):
channel_id = ctx.author.id
if not cur.execute('SELECT id FROM channels WHERE id = ?', (channel_id,)).fetchone():
cur.execute('INSERT INTO channels VALUES (?)', (channel_id,))
con.commit()
for name in names:
if len(name) > 100:
await ctx.send('Название задачи не должно превышать 100 символов!')
return
if cur.execute('SELECT task_name FROM todo WHERE channel_id = ? AND task_name = ?', (channel_id, name)).fetchone():
await ctx.send(f'Задача с именнем {name} уже есть!')
return
cur.execute('INSERT INTO todo(channel_id, task_name) VALUES (?, ?)', (channel_id, name, ))
await ctx.message.add_reaction('👍')
print(channel_id, name)
con.commit()
@bot.command(name='set_date')
async def set_date(ctx, name, date):
channel_id = ctx.author.id
if not cur.execute('SELECT id FROM channels WHERE id = ?', (channel_id,)).fetchone():
await ctx.send('У вас ещё не было задач')
return
if not cur.execute('SELECT task_name FROM todo WHERE channel_id = ? AND task_name = ?',
(channel_id, name)).fetchone():
await ctx.send(f'Задачи с именнем {name} ещё нет!')
return
day, month, year = map(int, date.split('.'))
date = dt.date(year, month, day)
cur.execute('''UPDATE todo
SET date = ?
WHERE channel_id = ? AND task_name = ?''', (date, channel_id, name))
con.commit()
await ctx.message.add_reaction('👍')
@bot.command(name='set_time')
async def set_time(ctx, name, time):
channel_id = ctx.author.id
if not cur.execute('SELECT id FROM channels WHERE id = ?', (channel_id,)).fetchone():
await ctx.send('У вас ещё не было задач')
return
if not cur.execute('SELECT task_name FROM todo WHERE channel_id = ? AND task_name = ?',
(channel_id, name)).fetchone():
await ctx.send(f'Задачи с именнем {name} ещё нет!')
return
hours, minutes = map(int, time.split(':'))
print(hours, minutes)
time = dt.time(hour=hours, minute=minutes)
print(time)
cur.execute('''UPDATE todo
SET time = ?
WHERE channel_id = ? AND task_name = ?''', (time, channel_id, name))
con.commit()
await ctx.message.add_reaction('👍')
@bot.command(name='set_description')
async def set_description(ctx, name, *description):
channel_id = ctx.author.id
if not cur.execute('SELECT id FROM channels WHERE id = ?', (channel_id,)).fetchone():
await ctx.send('У вас ещё не было задач')
return
if not cur.execute('SELECT task_name FROM todo WHERE channel_id = ? AND task_name = ?',
(channel_id, name)).fetchone():
await ctx.send(f'Задачи с именнем {name} ещё нет!')
return
description = ' '.join(description)
cur.execute('''UPDATE todo
SET description = ?
WHERE channel_id = ? AND task_name = ?''', (description, channel_id, name))
con.commit()
await ctx.message.add_reaction('👍')
@bot.command(name='get_description')
async def get_description(ctx, name):
channel_id = ctx.author.id
if not cur.execute('SELECT id FROM channels WHERE id = ?', (channel_id,)).fetchone():
await ctx.send('У вас ещё не было задач')
return
if not cur.execute('SELECT task_name FROM todo WHERE channel_id = ? AND task_name = ?',
(channel_id, name)).fetchone():
await ctx.send(f'Задачи с именнем {name} ещё нет!')
return
res = cur.execute('SELECT date, time, description FROM todo WHERE channel_id = ?', (channel_id, )).fetchone()
s = f'Вы описали задачу {name} {res[0]} {res[1]} так:\n' + str(res[2])
await ctx.send(s)
@bot.command(name='get_tasks')
async def get_tasks(ctx):
channel_id = ctx.author.id
if not cur.execute('SELECT id FROM channels WHERE id = ?', (channel_id, )).fetchone():
await ctx.send('У вас ещё не было задач')
return
if not cur.execute('SELECT task_name, date, time FROM todo WHERE channel_id = ?', (channel_id, )).fetchone():
await ctx.send('У вас нет сейчас задач')
return
res = cur.execute('SELECT task_name, date, time FROM todo WHERE channel_id = ?', (channel_id, )).fetchall()
s = 'Ваши задачи:\n' + '\n'.join(normalize(res))
await ctx.send(s)
@bot.command(name='get_with_date')
async def get_with_date(ctx, date):
channel_id = ctx.author.id
if not cur.execute('SELECT id FROM channels WHERE id = ?', (channel_id, )).fetchone():
await ctx.send('У вас ещё не было задач')
return
if not cur.execute('SELECT task_name, date, time FROM todo WHERE channel_id = ?', (channel_id, )).fetchone():
await ctx.send('У вас нет сейчас задач')
return
if date == 'None':
date = 'Nulle'
else:
day, month, year = map(int, date.split('.'))
date = dt.date(year, month, day)
res = cur.execute('SELECT task_name, date, time FROM todo WHERE channel_id = ? AND date = ?',
(channel_id, date, )).fetchall()
s = 'Ваши задачи:\n' + '\n'.join(normalize(res))
await ctx.send(s)
@bot.command(name='delete')
async def delete(ctx, name):
channel_id = ctx.author.id
if not cur.execute('SELECT id FROM channels WHERE id = ?', (channel_id,)).fetchone():
await ctx.send('У вас ещё не было задач')
return
if not cur.execute('SELECT task_name FROM todo WHERE channel_id = ? AND task_name = ?',
(channel_id, name)).fetchone():
await ctx.send(f'Задачи с именнем {name} ещё нет!')
return
cur.execute('''DELETE FROM todo
WHERE channel_id = ? AND task_name = ?''', (channel_id, name, ))
await ctx.send(f'Задача с именнем {name} успешно удалена')
@bot.command(name='delete_all')
async def delete_all(ctx):
channel_id = ctx.author.id
if not cur.execute('SELECT id FROM channels WHERE id = ?', (channel_id,)).fetchone():
await ctx.send('У вас ещё не было задач')
return
if not cur.execute('SELECT task_name, date, time FROM todo WHERE channel_id = ?', (channel_id, )).fetchone():
await ctx.send('У вас нет сейчас задач')
return
cur.execute('''DELETE FROM todo
WHERE channel_id = ?''', (channel_id, ))
await ctx.send('Все задачи успешно удалены')
bot.run(TOKEN)
|
[
"maxbystrov2006@yandex.ru"
] |
maxbystrov2006@yandex.ru
|
eb2e1b0baf857817dbf21814013801747d376aa2
|
b73e16120ea4e76991de735ee84d62c9a375f6af
|
/text.py
|
8dea38ef992717c27c69d1404b7fdeadfc36b690
|
[] |
no_license
|
coke-killer/matploblibDemo
|
00e25aab9f300c04dd9a9b4854f0319bbb97f37d
|
45850510ffbe87911f141a670c63c5ef2550e3c6
|
refs/heads/master
| 2023-04-07T11:50:24.302751
| 2021-04-14T08:40:41
| 2021-04-14T08:40:41
| 350,229,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 700
|
py
|
# __author__: "yudongyue"
# date: 2021/3/24
import matplotlib.pyplot as plt
fig = plt.figure()
plt.axis([0, 10, 0, 10])
t = "This is a really long string that I'd rather have wrapped so that it" \
" doesn't go outside of the figure, but if it's long enough it will go" \
" off the top or bottom!"
# plt.text(4, 1, t, ha='left', rotation=15, wrap=True)
# plt.text(6, 5, t, ha='left', rotation=15, wrap=True)
# plt.text(5, 5, t, ha='right', rotation=-15, wrap=True)
plt.text(5, 10, t, fontsize=18, style='oblique', ha='center', va='top', wrap=True)
# plt.text(3, 4, t, family='serif', style='italic', ha='right', wrap=True)
# plt.text(-1, 0, t, ha='left', rotation=-15, wrap=True)
plt.show()
|
[
"yudongyue@daqo.com"
] |
yudongyue@daqo.com
|
7a3143c4c3bb3832522335c9af725558d0b63e22
|
ad761920baf5fc381c0dd5b8b17e924b84813a9d
|
/Python/GeneratingData/test2.py
|
38734e419f20aee97484068b959a133c9f7fe308
|
[] |
no_license
|
lmasciangioli/python
|
1aea5a2de6985f344417ac2d0339ed1a956a5fca
|
4a3cd5cb00947b1d90d6d0fc91498a8c84b7c95e
|
refs/heads/master
| 2023-05-20T02:48:18.211846
| 2020-07-16T12:09:39
| 2020-07-16T12:09:39
| 280,143,866
| 0
| 0
| null | 2021-06-10T23:10:21
| 2020-07-16T12:07:24
|
Python
|
UTF-8
|
Python
| false
| false
| 566
|
py
|
num = str(9119)
str_num = str(num)
first_digit = str_num[0]
second_digit = str_num[1]
third_digit = str_num[2]
fourth_digit = str_num[3]
first = int(first_digit)
square_first = first**2
first_str = str(square_first)
second = int(second_digit)
square_second = second**2
second_str = str(square_second)
third = int(third_digit)
square_third = third**2
third_str = str(square_third)
fourth = int(fourth_digit)
square_fourth = fourth**2
fourth_str = str(square_fourth)
numeros = first_str + second_str + third_str + fourth_str
integer = int(numeros)
print(integer)
|
[
"l.masciangioli@gmail.com"
] |
l.masciangioli@gmail.com
|
c78cd1d4353ca1a0bf3d26428106e8899a0a7514
|
304b56a2f914d348864dcea27a2d2418a8628833
|
/练习11.py
|
ae7b0fd50d750ee5e67184037147793b1a3dd92a
|
[] |
no_license
|
hjw520wsr/BigdDataHomework
|
10e39101c197306c1ec5478e3d79de37951aab47
|
6766ac39fa18d5e7dd9ff6c43e7ec33f63ab31d7
|
refs/heads/master
| 2020-03-23T21:41:39.940712
| 2018-07-24T08:21:08
| 2018-07-24T08:21:08
| 142,125,873
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 10:09:10 2018
@author: Nothing
"""
ls=open('学校和编号.txt',encoding='gbk').readlines()
schoolls=[]
numls=[]
f=open('学校名称2.txt','a',encoding='utf-8')
f1=open('学校招生人数1.txt','a',encoding='utf-8')
for line in ls:
schoolls.append(line.split(',')[0][1:])
numls.append(int(line.split(',')[1][0:-2]))
f.write(str(schoolls))
f1.write(str(numls))
f.close()
f1.close()
|
[
"noreply@github.com"
] |
hjw520wsr.noreply@github.com
|
72f3aa2e088dad39db8146de3b2363c8aaab344a
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/XYvyirQMkmPHGLaZi_3.py
|
63738c3b1b003d83bf113ea0603818eacc7891de
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
def boom_intensity(n):
if n < 2 :
return 'boom'
boom = 'B'
for i in range(n) :
boom += 'o'
boom += 'm'
if n % 2 == 0:
boom += '!'
if n % 5 == 0:
boom = boom.upper()
return boom
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
0edcb93dc8387adeb8f9bb707020cdbed6a3aced
|
11574e9050699592ebf68419e3acb0efd17afa43
|
/advertisements/migrations/0009_auto_20210311_1001.py
|
5a54fe6b4add220ff8f9049256d7e5d59e472b20
|
[] |
no_license
|
MonikaLu/Sellpoint
|
ec9ff96efb929c9e878cda6ce9a686946cedac24
|
3aa19b64bc9832a8f01f9356eaf3e231a969a10f
|
refs/heads/main
| 2023-08-12T00:29:47.969980
| 2021-10-01T10:11:33
| 2021-10-01T10:11:33
| 412,400,697
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
# Generated by Django 3.1.6 on 2021-03-11 09:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('advertisements', '0008_advertisement_category'),
]
operations = [
migrations.AlterField(
model_name='advertisement',
name='category',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.DO_NOTHING, related_name='advertisement', to='advertisements.category'),
),
]
|
[
"monika-luu@hotmail.com"
] |
monika-luu@hotmail.com
|
0413e65cabbf039e6af3017c3f65c9cfa72d4387
|
f40ff4e12f0ee8ca15d8262a6570f025d726d165
|
/ml3/envs/bullet_sim.py
|
11bf844481f3af0885e6a59611069154bde53c32
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
rally12/LearningToLearn
|
3b884bb3ff126f6c750c4a351395909bdbd355c4
|
fa32b98b40402fa15982b450ed09d9d3735ec924
|
refs/heads/main
| 2023-06-29T21:42:07.423866
| 2021-07-23T04:01:36
| 2021-07-23T04:01:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,846
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import numpy as np
import pybullet_utils.bullet_client as bc
import pybullet_data
import pybullet
class BulletSimulation(object):
def __init__(self, gui, controlled_joints, ee_idx, torque_limits,target_pos):
if gui:
self.sim = bc.BulletClient(connection_mode=pybullet.GUI)
else:
self.sim = bc.BulletClient(connection_mode=pybullet.DIRECT)
self.sim.setAdditionalSearchPath(pybullet_data.getDataPath())
self.ee_idx = ee_idx
self.cur_joint_pos = None
self.cur_joint_vel = None
self.curr_ee = None
self.babbling_torque_limits = None
self.logger = None
self.controlled_joints = controlled_joints
self.torque_limits = torque_limits
self.n_dofs = len(controlled_joints)
#pybullet.setAdditionalSearchPath(pybullet_data.getDataPath())
# TODO: the following should be extracted into some world model that is loaded independently of the robot
#self.planeId = pybullet.loadURDF("plane.urdf",[0,0,0])
if target_pos is not None:
self.cubeId = pybullet.loadURDF("sphere_small.urdf",target_pos)
def disconnect(self):
self.sim.disconnect()
#pybullet.disconnect()
def get_random_torques(self, time_horizon, bounds_low, bounds_high):
trajectory= []
for t in range(time_horizon):
torque_limits_babbling = bounds_high
torques = np.random.uniform(-torque_limits_babbling, torque_limits_babbling)
trajectory.append(np.array(torques))
return np.array(trajectory)
def get_random_torques_uinit(self, time_horizon):
trajectory= []
for t in range(time_horizon):
torques = np.random.uniform(-0.3*self.torque_limits, 0.3*self.torque_limits)
trajectory.append(np.array(torques))
return np.array(trajectory)
def get_target_joint_configuration(self, target_position):
self.reset()
des_joint_state = self.sim.calculateInverseKinematics(self.robot_id,
self.ee_idx,
np.array(target_position), jointDamping = [0.1 for i in range(self.n_dofs)])
return np.asarray(des_joint_state)
def reset(self, joint_pos=None, joint_vel=None):
if joint_vel is None:
joint_vel = list(np.zeros(self.n_dofs))
if joint_pos is None:
joint_pos = list(np.zeros(self.n_dofs))
for i in range(self.n_dofs):
self.sim.resetJointState(bodyUniqueId=self.robot_id,
jointIndex=self.controlled_joints[i],
targetValue=joint_pos[i],
targetVelocity=joint_vel[i])
self.sim.stepSimulation()
self.cur_joint_pos = self.get_current_joint_pos()
self.cur_joint_vel = self.get_current_joint_vel()
self.curr_ee = self.get_current_ee_state()
return np.hstack([self.get_current_joint_pos(),self.get_current_joint_vel()])
def move_to_joint_positions(self, joint_pos, joint_vel=None):
if joint_vel is None:
joint_vel = [0]*len(joint_pos)
for i in range(self.n_dofs):
self.sim.resetJointState(bodyUniqueId=self.robot_id,
jointIndex=self.controlled_joints[i],
targetValue=joint_pos[i],
targetVelocity=joint_vel[i])
self.sim.stepSimulation()
self.cur_joint_pos = self.get_current_joint_pos()
self.cur_joint_vel = self.get_current_joint_vel()
self.curr_ee = self.get_current_ee_state()
return np.hstack([self.cur_joint_pos,self.cur_joint_vel])
def get_MassM(self,angles):
for link_idx in self.controlled_joints:
self.sim.changeDynamics(self.robot_id, link_idx, linearDamping=0.0, angularDamping=0.0, jointDamping=0.0)
cur_joint_angles = list(angles)
mass_m = self.sim.calculateMassMatrix(bodyUniqueId=self.robot_id,
objPositions = cur_joint_angles)
return np.array(mass_m)
def get_F(self,angles,vel):
for link_idx in self.controlled_joints:
self.sim.changeDynamics(self.robot_id, link_idx, linearDamping=0.0, angularDamping=0.0, jointDamping=0.0)
cur_joint_angles = list(angles)
cur_joint_vel = list(vel)
torques = self.sim.calculateInverseDynamics(self.robot_id,
cur_joint_angles,
cur_joint_vel,
[0]*self.action_dim)
return np.asarray(torques)
def joint_angles(self):
return self.cur_joint_pos
def joint_velocities(self):
return self.cur_joint_vel
def forwad_kin(self,state):
return self.endeffector_pos()
def endeffector_pos(self):
return self.curr_ee
def get_target_ee(self, state):
for i in range(self.n_dofs):
self.sim.resetJointState(bodyUniqueId=self.robot_id,
jointIndex=self.controlled_joints[i],
targetValue=state[i],
targetVelocity=0.0)
self.sim.stepSimulation()
ls = self.sim.getLinkState(self.robot_id, self.ee_idx)[0]
return ls
def reset_then_step(self, des_joint_state, torque):
# for link_idx in self.controlled_joints:
# self.sim.changeDynamics(self.robot_id, link_idx, linearDamping=0.0, angularDamping=0.0, jointDamping=0.0)
for i in range(self.n_dofs):
self.sim.resetJointState(bodyUniqueId=self.robot_id,
jointIndex=self.controlled_joints[i],
targetValue=des_joint_state[i],
targetVelocity=des_joint_state[(i+self.n_dofs)])
return self.apply_joint_torque(torque)[0]
def step_model(self,state,torque):
return self.sim_step(state,torque)
def sim_step(self,state,torque):
for link_idx in self.controlled_joints:
self.sim.changeDynamics(self.robot_id, link_idx, linearDamping=0.0, angularDamping=0.0, jointDamping=0.0)
if str(state.dtype).startswith('torch'):
state = state.clone().detach().numpy()
if str(torque.dtype).startswith('torch'):
torque = torque.clone().detach().numpy()
return self.reset_then_step(state,torque)
def step(self,state,torque):
for link_idx in self.controlled_joints:
self.sim.changeDynamics(self.robot_id, link_idx, linearDamping=0.0, angularDamping=0.0, jointDamping=0.0)
if str(state.dtype).startswith('torch'):
state = state.clone().detach().numpy()
if str(torque.dtype).startswith('torch'):
torque = torque.clone().detach().numpy()
return self.reset_then_step(state,torque)
def apply_joint_torque(self, torque):
self.grav_comp = self.inverse_dynamics([0] * self.action_dim)
torque = torque + self.grav_comp
full_torque = torque.copy()
#torque = torque.clip(-self.torque_limits, self.torque_limits)
self.sim.setJointMotorControlArray(bodyIndex=self.robot_id,
jointIndices=self.controlled_joints,
controlMode=pybullet.TORQUE_CONTROL,
forces=torque)
self.sim.stepSimulation()
cur_joint_states = self.sim.getJointStates(self.robot_id, self.controlled_joints)
cur_joint_angles = [cur_joint_states[i][0] for i in range(self.n_dofs)]
cur_joint_vel = [cur_joint_states[i][1] for i in range(self.n_dofs)]
next_state = cur_joint_angles + cur_joint_vel
ls = list(self.sim.getLinkState(self.robot_id, self.ee_idx)[0])
self.cur_joint_pos = self.get_current_joint_pos()
self.cur_joint_vel = self.get_current_joint_vel()
self.curr_ee = self.get_current_ee_state()
return np.hstack([self.cur_joint_pos,self.cur_joint_vel]),self.curr_ee
def get_current_ee_state(self):
ee_state = self.sim.getLinkState(self.robot_id, self.ee_idx)
return np.array(ee_state[0])
def get_current_joint_pos(self):
cur_joint_states = self.sim.getJointStates(self.robot_id, self.controlled_joints)
cur_joint_angles = [cur_joint_states[i][0] for i in range(self.n_dofs)]
return np.array(cur_joint_angles)
def get_current_joint_vel(self):
cur_joint_states = self.sim.getJointStates(self.robot_id, self.controlled_joints)
cur_joint_vel = [cur_joint_states[i][1] for i in range(self.n_dofs)]
return np.array(cur_joint_vel)
def get_current_joint_state(self):
cur_joint_states = self.sim.getJointStates(self.robot_id, self.controlled_joints)
cur_joint_angles = [cur_joint_states[i][0] for i in range(self.n_dofs)]
cur_joint_vel = [cur_joint_states[i][1] for i in range(self.n_dofs)]
return np.hstack([cur_joint_angles, cur_joint_vel])
def get_ee_jacobian(self):
cur_joint_states = self.sim.getJointStates(self.robot_id, self.controlled_joints)
cur_joint_angles = [cur_joint_states[i][0] for i in range(self.n_dofs)]
cur_joint_vel = [cur_joint_states[i][1] for i in range(self.n_dofs)]
bullet_jac_lin, bullet_jac_ang = self.sim.calculateJacobian(
bodyUniqueId=self.robot_id,
linkIndex=self.ee_idx,
localPosition=[0, 0, 0],
objPositions=cur_joint_angles,
objVelocities=cur_joint_vel,
objAccelerations=[0] * self.n_dofs,
)
return np.asarray(bullet_jac_lin), np.asarray(bullet_jac_ang)
def inverse_dynamics(self, des_acc):
for link_idx in self.controlled_joints:
self.sim.changeDynamics(self.robot_id, link_idx, linearDamping=0.0, angularDamping=0.0, jointDamping=0.0)
cur_joint_states = self.sim.getJointStates(self.robot_id, self.controlled_joints)
cur_joint_angles = [cur_joint_states[i][0] for i in range(self.n_dofs)]
cur_joint_vel = [cur_joint_states[i][1] for i in range(self.n_dofs)]
torques = self.sim.calculateInverseDynamics(self.robot_id,
cur_joint_angles,
cur_joint_vel,
des_acc)
return np.asarray(torques)
def detect_collision(self):
return False
def return_grav_comp_torques(self):
return 0.0
def get_pred_error(self,x,u):
return np.zeros(len(u))
def sim_step_un(self,x,u):
return np.zeros(len(u)),np.zeros(len(u))
def get_gravity_comp(self):
return 0.0
class BulletSimulationFromURDF(BulletSimulation):
def __init__(self, rel_urdf_path, gui, controlled_joints, ee_idx, torque_limits, target_pos):
super(BulletSimulationFromURDF, self).__init__(gui, controlled_joints, ee_idx, torque_limits, target_pos)
urdf_path = os.getcwd()+'/envs/'+rel_urdf_path
print("loading urdf file: {}".format(urdf_path))
self.robot_id = self.sim.loadURDF(urdf_path, basePosition=[-0.5, 0, 0.0], useFixedBase=True)
self.n_dofs = len(controlled_joints)
pybullet.setAdditionalSearchPath(pybullet_data.getDataPath())
#self.planeId = pybullet.loadURDF("plane.urdf")
self.sim.resetBasePositionAndOrientation(self.robot_id,[-0.5,0,0.0],[0,0,0,1])
self.sim.setGravity(0, 0, -9.81)
dt = 1.0/240.0
self.dt = dt
self.sim.setTimeStep(dt)
self.sim.setRealTimeSimulation(0)
self.sim.setJointMotorControlArray(self.robot_id,
self.controlled_joints,
pybullet.VELOCITY_CONTROL,
forces=np.zeros(self.n_dofs))
class BulletSimulationFromMJCF(BulletSimulation):
def __init__(self, rel_mjcf_path, gui, controlled_joints, ee_idx, torque_limits):
super(BulletSimulationFromMJCF, self).__init__(gui, controlled_joints, ee_idx, torque_limits, None)
print('hierhierhierhier')
xml_path = os.getcwd()+'/envs/'+rel_mjcf_path
if rel_mjcf_path[0] != os.sep: xml_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), rel_mjcf_path)
else: xml_path = rel_mjcf_path
#xml_path = '/Users/sarah/Documents/GitHub/LearningToLearn/ml3/envs/mujoco_robots/reacher.xml'
print("loading this mjcf file: {}".format(xml_path))
self.world_id, self.robot_id = self.sim.loadMJCF(xml_path)
pybullet.setAdditionalSearchPath(pybullet_data.getDataPath())
print(pybullet_data.getDataPath())
self.planeId = pybullet.loadURDF("plane.urdf")
#self.cubeId = pybullet.loadURDF("sphere_small.urdf", [0.02534078, -0.19863741, 0.01]) #0.02534078, -0.19863741 0.10534078, 0.1663741
self.n_dofs = len(controlled_joints)
self.sim.setGravity(0, 0, -9.81)
dt = 1.0/100.0
self.dt = dt
self.sim.setTimeStep(dt)
self.sim.setRealTimeSimulation(0)
self.sim.setJointMotorControlArray(self.robot_id,
self.controlled_joints,
pybullet.VELOCITY_CONTROL,
forces=np.zeros(self.n_dofs))
|
[
"sarahbechtle@googlemail.com"
] |
sarahbechtle@googlemail.com
|
91766d1f77c32ec4c16e9725f4ffebca650eff16
|
f82757475ea13965581c2147ff57123b361c5d62
|
/gi-stubs/repository/GData/GDWhenClass.py
|
fa46cb618eef4de0a00aea3add814235951fb549
|
[] |
no_license
|
ttys3/pygobject-stubs
|
9b15d1b473db06f47e5ffba5ad0a31d6d1becb57
|
d0e6e93399212aada4386d2ce80344eb9a31db48
|
refs/heads/master
| 2022-09-23T12:58:44.526554
| 2020-06-06T04:15:00
| 2020-06-06T04:15:00
| 269,693,287
| 8
| 2
| null | 2020-06-05T15:57:54
| 2020-06-05T15:57:54
| null |
UTF-8
|
Python
| false
| false
| 4,838
|
py
|
# encoding: utf-8
# module gi.repository.GData
# from /usr/lib64/girepository-1.0/GData-0.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.Gio as __gi_repository_Gio
import gobject as __gobject
class GDWhenClass(__gi.Struct):
"""
:Constructors:
::
GDWhenClass()
"""
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
parent = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_g_reserved0 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_g_reserved1 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(GDWhenClass), '__module__': 'gi.repository.GData', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'GDWhenClass' objects>, '__weakref__': <attribute '__weakref__' of 'GDWhenClass' objects>, '__doc__': None, 'parent': <property object at 0x7fd5e17433b0>, '_g_reserved0': <property object at 0x7fd5e17434a0>, '_g_reserved1': <property object at 0x7fd5e1743590>})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(GDWhenClass)
|
[
"ttys3@outlook.com"
] |
ttys3@outlook.com
|
429886a3d1302e9f6f99ed2f817c67cba9e96e1e
|
0602c50eb27021751502022ad98d8a21820de9ff
|
/src/solution/p122-efficient exponentiation/p122.py
|
4b8d92eb111f4b3b6d0700f504b194dea967d1ec
|
[] |
no_license
|
glimmercn/Euler-Project
|
c34b4ac787c1926e23d289bd699282588b04ee87
|
d56249f274d0df74cb4465e25bd24ad1e36bb956
|
refs/heads/master
| 2021-01-25T07:39:57.640527
| 2014-03-31T22:13:34
| 2014-03-31T22:13:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
'''
Created on 2013-3-27
@author: kan
'''
import numpy as np
import numpy as np
l = [1]
s = [l]
idx = 0
reach200 = False
expons = np.zeros(201)
expons[0:2] = 1
dic = set()
while not np.all(expons):
for i in range(len(s[idx])):
for j in range(i, len(s[idx])):
if 201 > s[idx][i] + s[idx][j] > s[idx][-1] :
new = s[idx][i] + s[idx][j]
p1 = s[idx] + [new]
if str(p1) not in dic:
s.append(p1)
dic.add(str(p1))
if expons[new] == 0:
expons[new] = len(p1) - 1
idx += 1
print(np.sum(expons))
|
[
"huangkandiy@gmail.com"
] |
huangkandiy@gmail.com
|
893778dab6a8e3cb9a2788d183809792c484e0ce
|
764a543c3c36b22928beceb0bdcc75426679d9b7
|
/articles/migrations/0001_initial.py
|
dff6b4f114f46f6e25a73f9b9f4da755382fd280
|
[] |
no_license
|
kashtanov1111/qwqw
|
18d122b8f23b2e60f9f79302b8e08d947922c561
|
bfb0038f3266c081b957abfbca623be5a43acf0f
|
refs/heads/master
| 2023-08-05T03:29:13.853978
| 2021-09-25T15:00:36
| 2021-09-25T15:00:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 856
|
py
|
# Generated by Django 3.1.13 on 2021-09-25 14:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('body', models.TextField()),
('date', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"1kashtanov1111@gmail.com"
] |
1kashtanov1111@gmail.com
|
db802e7f87ce8b5dfce20126b66862776b36bd35
|
86e145ec81f82e54bc6e6ee46e7028232effb996
|
/globals.py
|
8548ae636adef5b51015e6a2a434506a19ed9916
|
[
"MIT"
] |
permissive
|
SamirOmarov/galactic-collision
|
7acca71aab01caa7302a4e31af22eb4aee7ac7ca
|
74694f7efcd8aa7d9aee9e1b30559f4b3275b5b5
|
refs/heads/master
| 2022-07-12T06:46:10.574344
| 2020-01-23T19:36:47
| 2020-01-23T19:36:47
| 234,775,368
| 37
| 12
|
MIT
| 2022-06-22T00:54:06
| 2020-01-18T18:05:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,612
|
py
|
from vpython import vector, color, sqrt, sphere, rate, scene
from math import fsum
from random import gauss
import numpy as np
# CONSTANTS
# Universal gravitational constant
G = 6.673e-11
scene.width = 1300
scene.height = 650
# Solar mass in kg (assume average stellar mass)
SOLAR_MASS = 2.000e30
# Precalculated bounds to solar mass
MIN_SOLAR_MASS = SOLAR_MASS * 0.5
MAX_SOLAR_MASS = SOLAR_MASS * 250
AVG_SOLAR_MASS = SOLAR_MASS * 3.0
# Scale distances for galactic scales
DIST_SCALE = 1e20 # 1e20
# Galactic parameters
MAX_ORBITAL_RADIUS = DIST_SCALE * 10
MIN_ORBITAL_RADIUS = DIST_SCALE * 0.15
MILKY_WAY_GALAXY_THICKNESS = DIST_SCALE * 0.9
ANDROMEDA_GALAXY_THICKNESS = DIST_SCALE * 0.2
# Milky Way contains about 300 billion stars
NUM_STARS_MILKY_WAY = 700
# Andromeda Galaxy contains about 1 trillion (10^12) stars
NUM_STARS_ANDROMEDA = 1400
# Graphical constants
STAR_RADIUS = 0.025
dt = 1e17
# FUNCTIONS
# Limit x between lower and upper
def clamp(x, lower, upper):
return max(min(x, upper), lower)
# Return the force due to gravity on an object
def gravity(mass1, mass2, radius):
return G * mass1 * mass2 / radius**2
# Return the acceleration due to gravity on an object.
def g_accel(mass, radius):
# Limit minimum radius to avoid flinging out too many particles
radius = max(radius, MIN_ORBITAL_RADIUS)
return G * mass / radius / radius
# Calculate acceleration on an object caused by galaxy
def accel(obj, galaxy):
r_galaxy = galaxy.pos - obj.pos
# We have a = F / m = G * m_center / r ^2
return r_galaxy.norm() * g_accel(galaxy.mass, r_galaxy.mag)
|
[
"samir.omarov@gmail.com"
] |
samir.omarov@gmail.com
|
a41a2330895be55c82a376cf0b7add8f53bbbd9a
|
7bdc3d746cc9d6bff78b8a77d06253b7c489c00a
|
/env/lib/python3.6/site-packages/stripe/api_resources/transfer.py
|
0d99a47148b44a60097f2973a84939872e558f5b
|
[
"MIT"
] |
permissive
|
Ao99/e-commerce-django
|
fadc0de705f67b16e8dd82f6cc962fd9efd548d9
|
6fe69a03047619b199c236532f53f7c2cb22a079
|
refs/heads/master
| 2022-12-14T01:56:19.266754
| 2021-09-09T18:18:47
| 2021-09-09T18:18:47
| 229,646,356
| 2
| 2
|
MIT
| 2022-12-08T03:22:13
| 2019-12-22T23:52:46
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 921
|
py
|
from __future__ import absolute_import, division, print_function
from stripe import util
from stripe.api_resources.abstract import CreateableAPIResource
from stripe.api_resources.abstract import ListableAPIResource
from stripe.api_resources.abstract import UpdateableAPIResource
from stripe.api_resources.abstract import custom_method
from stripe.api_resources.abstract import nested_resource_class_methods
@custom_method("cancel", http_verb="post")
@nested_resource_class_methods(
"reversal", operations=["create", "retrieve", "update", "list"]
)
class Transfer(
CreateableAPIResource, ListableAPIResource, UpdateableAPIResource
):
OBJECT_NAME = "transfer"
def cancel(self, idempotency_key=None, **params):
url = self.instance_url() + "/cancel"
headers = util.populate_headers(idempotency_key)
self.refresh_from(self.request("post", url, params, headers))
return self
|
[
"aodong99@gmail.com"
] |
aodong99@gmail.com
|
13af078c635c2f1334c54bf57a9af89cd72bd397
|
934e6bc1fb3e5c900c9c89c8d667703f23bd82c0
|
/app.py
|
ad20643131f81029dfe78ef5951ea57f16d0eeaf
|
[] |
no_license
|
suneelisk/Word-Cloud
|
e59375eef8fd679fa6685716ac724f97e32b5acf
|
9ca4b52975a30197f89df92341d6615c4156cdcb
|
refs/heads/main
| 2023-06-11T08:28:26.089204
| 2021-06-29T02:55:16
| 2021-06-29T02:55:16
| 375,201,402
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,736
|
py
|
import streamlit as st
import pandas as pd
import requests
from bs4 import BeautifulSoup
import regex as re
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
st.title('Simple Streamlit App')
st.markdown("""
This app performs Word Cloud
* **Python libraries:** streamlit, pandas, BeautifulSoup, wordcloud ......
* **Need to cantact: ** [SeaportAI.com](https://seaportai.com/contact-us/).
""")
st.set_option('deprecation.showPyplotGlobalUse', False)
#file_bytes = st.file_uploader("Upload a file", type="csv")
#if file_bytes is not None:
st.sidebar.header("Select Link")
links = ["https://seaportai.com/blog-predictive-maintenance/",
"https://seaportai.com/2019/04/22/healthcare-analytics/",
"https://seaportai.com/blog-rpameetsai/",
"https://seaportai.com/covid-19/",
"https://seaportai.com/industry4-0/"]
URL = st.sidebar.selectbox('Link', links)
st.sidebar.header("Select No.of words you want to display")
words = st.sidebar.selectbox("No.of Words", range(10,1000,10))
if URL is not None:
r = requests.get(URL)
soup = BeautifulSoup(r.content, 'html.parser')
table = soup.find('div', attrs = {'id':'main-content'})
text = table.text
cleaned_text = re.sub('\t', "", text)
cleaned_texts = re.split('\n', cleaned_text)
cleaned_textss = "".join(cleaned_texts)
#st.write(cleaned_textss)
st.write("Word Cloud Plot")
stopwords = set(STOPWORDS)
wordcloud = WordCloud(background_color="white", max_words=words,
stopwords=stopwords).generate(cleaned_textss)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.show()
st.pyplot()
|
[
"noreply@github.com"
] |
suneelisk.noreply@github.com
|
71f5d3ffbdcb46269583870c43a0b79100a19b81
|
2c9c3dacea395e14e7749a96acd4859bb7f1ddf2
|
/Function_temp.py
|
9c5758247252acba205b4dc6c6729178d9b21a57
|
[] |
no_license
|
Yangzhichao95/qwertyuiop
|
b1370607a75df29206dc278f81c0d441dd1f277d
|
702041535ecdb3cb32374f204b7dad10d8178f37
|
refs/heads/master
| 2020-03-21T22:10:08.188261
| 2018-07-10T02:52:24
| 2018-07-10T02:52:24
| 139,108,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,951
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 1 10:02:52 2018
@author: 25008
This is a Temp file for the function match project contract and value
"""
import re
def refine_output_contract(contract):
# 修改合同最终输出格式
contract_return = []
for i in contract:
contract_temp = re.sub('<', '<', i)
contract_temp = re.sub('<', '<', contract_temp)
contract_temp = re.sub('>', '>', contract_temp)
contract_temp = re.sub('(', '(', contract_temp)
contract_temp = re.sub(')', ')', contract_temp)
contract_temp = re.sub('《|》|/', '', contract_temp)
contract_return.append(contract_temp.capitalize())
return (contract_return)
def refine_output_project(project):
# 修改项目最终输出格式
project_return = []
for i in project:
project_temp = re.sub('“|”|/', '', i)
project_temp = re.sub('(', '(', project_temp)
project_temp = re.sub(')', ')', project_temp)
if re.search('编号', project_temp):
project_temp = re.sub('\(\w*编号.+\)', '', project_temp)
if len(project_temp) > 1 and re.search('\d', project_temp[len(project_temp) - 1]):
project_temp = project_temp[:(len(project_temp) - 1)]
project_return.append(project_temp.capitalize())
return(project_return)
# 中文字典
chs_arabic_map = {u'〇' : 0, u'一' : 1, u'二' : 2, u'三' : 3, u'四' : 4,
u'五' : 5, u'六' : 6, u'七' : 7, u'八' : 8, u'九' : 9,
u'十' : 10, u'百' : 100, u'千' : 1000, u'万' : 10000,
u'零' : 0, u'壹' : 1, u'贰' : 2, u'叁' : 3, u'肆' : 4,
u'伍' : 5, u'陆' : 6, u'柒' : 7, u'捌' : 8, u'玖' : 9,
u'拾' : 10, u'佰' : 100, u'仟' : 1000, u'萬' : 10000,
u'亿' : 10**8, u'億' : 10**8, u'幺' : 1}
def convert_chinese_digits_to_arabic(chinese_digits, chs_arabic_map):
"""
将中文表述的数字转换成阿拉伯数字
:param chinese_digits: 待转换的中文表达数字
:return:转换结果
"""
# 如果chinese_digits中有角分,先将元与角分分离
yuan_index = chinese_digits.index('元')
jiao_fen = chinese_digits[(yuan_index+1):]
jiao = 0
fen = 0
if len(jiao_fen):
jiao_fen_reg = re.search('([零一二三四五六七八九〇壹贰叁肆伍陆柒捌玖]角)?([零一二三四五六七八九〇壹贰叁肆伍陆柒捌玖]分)?', jiao_fen)
if jiao_fen_reg.group(1):
jiao = chs_arabic_map[jiao_fen_reg.group(1)[0]]/10
if jiao_fen_reg.group(2):
fen = chs_arabic_map[jiao_fen_reg.group(2)[0]]/100
jiaofen = jiao + fen
result = 0
tmp = 0
hnd_mln = 0
for count in range(len(chinese_digits[:(yuan_index+1)])):
curr_char = chinese_digits[count]
curr_digit = chs_arabic_map.get(curr_char, None)
if curr_digit is not None:
if curr_digit == 10**8:
result = result + tmp
result = result * curr_digit
hnd_mln = hnd_mln*10**8 + result
result = 0
tmp = 0
elif curr_digit == 10**4:
result = result + tmp
result = result * curr_digit
tmp = 0
elif curr_digit >=10:
tmp = 1 if tmp ==0 else tmp
result = result + curr_digit*tmp
tmp = 0
elif curr_digit is not None:
tmp = tmp*10 + curr_digit
else:
return (str(result + jiaofen) + '元')
result = result + tmp
result = result + hnd_mln
return (str(result + jiaofen) + '元')
def refine_money(str_money):
# 修改金钱格式
if re.search('[零一二三四五六七八九〇壹贰叁肆伍陆柒捌玖拾佰仟萬億幺]', str_money):
str_money = convert_chinese_digits_to_arabic(str_money, chs_arabic_map)
money_raw = re.search('((\d*)(,|,)?)*(\d+)(\.?)(\d*)', str_money).group()
money_raw = float(re.sub(',|,', '', money_raw))
unit = re.search('亿|惩|千万|百万|十万|万|千|百|十|元', str_money).group()
if re.search('\.(\d*)', str(money_raw)):
num_decimal = len(re.search('\.(\d*)', str(money_raw)).group(1))
else:
num_decimal = 0
if unit == '亿' or unit == '惩':
money = round(money_raw*100000000, max(num_decimal-8, 0))
elif unit == '千万':
money = round(money_raw*10000000, max(num_decimal-7, 0))
elif unit == '百万':
money = round(money_raw*1000000, max(num_decimal-6, 0))
elif unit == '十万':
money = round(money_raw*100000, max(num_decimal-5, 0))
elif unit == '万':
money = round(money_raw*10000, max(num_decimal-4, 0))
elif unit == '千':
money = round(money_raw*1000, max(num_decimal-3, 0))
elif unit == '百':
money = round(money_raw*100, max(num_decimal-2, 0))
elif unit == '十':
money = round(money_raw*10, max(num_decimal-1, 0))
else:
money = money_raw
if money < 5000:
return ''
if float(money) == int(money):
return (int(money))
else:
return (float(money))
def refine_up_low(str_money):
# 对于上下限不同的金额,提取出两个不同的金额
up_low = [x.group() for x in re.finditer('((\d*)(,|,)?)*(\d+)(\.?)(\d*)', str_money)]
up_money = up_low[0]
low_money = up_low[1]
unit = re.search('亿|千万|百万|十万|万|千|百|十|元', str_money).group()
return (refine_money(up_money + unit), refine_money(low_money + unit))
def refine_partya(partya):
# 修改甲方的中间输出
partya = [re.sub('\-', '', x) for x in partya]
#partya = [re.sub('(', '(', x) for x in partya]
#partya = [re.sub(')', ')', x) for x in partya]
return (partya)
def refine_output_partya(partya):
# 修改甲方的最终输出
partya = [re.sub('(', '(', x) for x in partya]
partya = [re.sub(')', ')', x) for x in partya]
return (partya)
def refine_output_partyb(partyb):
# 修改乙方的最终输出
partyb = [re.sub('(', '(', x) for x in partyb]
partyb = [re.sub(')', ')', x) for x in partyb]
partyb = [re.sub('\(.*\)', '', x) for x in partyb]
partyb = [re.sub('\d|公司全资|全资', '', x) for x in partyb]
# For some case
partyb = [re.sub('\)|)', '', x) for x in partyb]
return (partyb)
def tiejian_key_value(soup):
# 只匹配中国铁建
soupcontent = re.sub('<.+>|\n|\s', '', str(soup))
soupcontent = re.sub('<.+?>', '', soupcontent)
content_split = re.split('一、本|二、本|三、本|四、本|五、本|六、本|七、本|八、本|九、本|十、本|\d、本|\d.本公司', soupcontent)
if type(content_split) is list:
content_split.pop(0)
pat_partya = '公司收到(.+?)(发出的)?中标通知书'
pat_partyb = '(所属子公司|下属子公司|所属|下属|子公司|、|及|和)([\w|(|)|\(|\)]+?)(公司)'
pat_project = '中标(.+?)(,|。|;)'
pat_money = '((\d*)(,|,)?)*(\d+)(\.?)(\d*) *(亿|惩|千万|百万|十万|万|千|百|十)?元'
ob_partya = []
ob_partyb = []
ob_project = []
ob_contract = []
ob_money = []
ob_combo = []
for content in content_split:
if re.search(pat_partya, content):
ob_partya.append(re.search(pat_partya, content).group(1))
else:
ob_partya.append('')
ob_b = re.findall(pat_partyb, content)
ob_b = [x[1] + x[2] for x in ob_b]
ob_b = [x for x in ob_b if len(x) > 5]
if len(ob_b) == 0:
ob_partyb.append('中国铁建股份有限公司')
else:
ob_partyb.append(ob_b[0])
ob_combo.append('、'.join(ob_b[1:]))
content = re.sub('中标通知书', '', content)
if re.search(pat_project, content):
ob_project.append(re.search(pat_project, content).group(1))
else:
ob_project.append('')
if re.search(pat_money, content):
ob_money.append(refine_money(re.search(pat_money, content).group()))
else:
ob_money.append('')
ob_contract.append('')
ob_partya = refine_partya(ob_partya)
return(zip(ob_partya, ob_partyb, ob_project, ob_contract, ob_money, ob_money, ob_combo))
def beiche_key_value(soup, fullname):
# 只匹配中国北车、中国中车、中国南车
soupcontent = re.sub('<.+>|\n|\s', '', str(soup))
soupcontent = re.sub('<.+?>', '', soupcontent)
content_split = re.split('\d、|\d.本公司', soupcontent)
if type(content_split) is list:
content_split.pop(0)
pat_partya = '与(.+?)签订(了)?'
pat_partyb = '(子公司|、|及|和)([\w|(|)|\(|\)]+?)(公司)'
pat_contract = '签订(了)?([\w|\-|\.|,|,]+?)(合同)'
pat_money = '((\d*)(,|,)?)*(\d+)(\.?)(\d*) *(亿|惩|千万|百万|十万|万|千|百|十)?元'
ob_partya = []
ob_partyb = []
ob_project = []
ob_contract = []
ob_money = []
ob_combo = []
for content in content_split:
if re.search(pat_partya, content):
ob_partya.append(re.search(pat_partya, content).group(1))
else:
ob_partya.append('')
if re.search(pat_contract, content):
contract_raw = re.search(pat_contract, content).group(2) + re.search(pat_contract, content).group(3)
if re.search(pat_money, contract_raw):
# 如果提取出的部分中有涉及金额
ob_money.append(refine_money(re.search(pat_money, contract_raw).group()))
ob_contract.append(re.split('元(的?)', contract_raw)[2])
else:
ob_contract.append(contract_raw)
if re.search(pat_money, content):
ob_money.append(refine_money(re.search(pat_money, content).group()))
ob_b = re.findall(pat_partyb, content)
ob_b = [x[1] + x[2] for x in ob_b]
ob_b = [x for x in ob_b if len(x) > 5]
if len(ob_b) == 0:
ob_partyb.append(fullname)
else:
ob_partyb.append(ob_b[0])
ob_combo.append('、'.join(ob_b[1:]))
# 乙对多个甲
if re.search('分别', ob_partya[len(ob_partya)-1]):
partya = ob_partya.pop()
partya = re.sub('分别', '', partya)
partya = re.split('、|和|以及|及|', partya)
_ = [ob_partya.append(x) for x in partya]
ob_partyb.pop()
_ = [ob_partyb.append(ob_b[0]) for x in partya]
ob_combo.pop()
_ = [ob_combo.append('') for x in partya]
ob_contract.pop()
contract_raw = re.search(pat_contract, content)
contract = contract_raw.group(2) + contract_raw.group(3)
if re.search(pat_money, contract):
contract = re.split('元(的?)', contract)[2]
contract = re.split('和|及|以及', contract)
if len(contract) == len(partya):
_ = [ob_contract.append(x) for x in contract]
else:
_ = [ob_contract.append(contract[0]) for x in partya]
ob_money.pop()
money = [x.group() for x in re.finditer(pat_money, content)]
if len(money) == len(partya):
_ = [ob_money.append(refine_money(x)) for x in money]
else:
_ = [ob_money.append(refine_money(money[0])) for x in partya]
ob_partya = refine_partya(ob_partya)
_ = [ob_project.append('') for x in ob_partya]
return(zip(ob_partya, ob_partyb, ob_project, ob_contract, ob_money, ob_money, ob_combo))
def find_contract(soup):
# 寻找合同
contract = ''
pat_contract = '《.+?》'
soupcontent = re.sub('<.+>|\n | ', '', str(soup))
soupcontent = re.sub('<.+?>', '', soupcontent)
section1 = str(soup.find(id = 'SectionCode_1'))
section1 = re.sub('<.+>|\n | ', '', section1)
section1 = re.sub('<.+>', '', section1)
# div = soup.findAll('div')
# re.search('合同名称:([\n]*)([\w|(|)|\(|\)|\-|—|×|\+]+)', soupcontent)
# 2 然后在section1里找是否有带有书名号的字段
if re.findall(pat_contract, section1):
contract = re.findall(pat_contract, section1)
if len(contract) > 0:
contractcopy = contract.copy()
for sub_contract in contractcopy:
if re.search('议案|公告|公示|合同法|备忘录',sub_contract):
contract.remove(sub_contract)
continue
if re.search('合同|协议|[a-zA-Z]', sub_contract) is None:
contract.remove(sub_contract)
if len(contract) > 0:
count = dict()
for i in contract:
if i in count:
count[i] = count[i] + 1
else:
count[i] = 1
return(max(count, key = count.get))
# 3 最后在全文里找是否有带有书名号的字段
if re.findall(pat_contract, soupcontent[100:]):
contract = re.findall(pat_contract, soupcontent[100:])
if len(contract) > 0:
contractcopy = contract.copy()
for sub_contract in contractcopy:
if re.search('议案|公告|公示|合同法|备忘录',sub_contract):
contract.remove(sub_contract)
continue
if re.search('合同|协议|[a-zA-Z]', sub_contract) is None:
contract.remove(sub_contract)
if len(contract) > 0:
count = dict()
for i in contract:
if i in count:
count[i] = count[i] + 1
else:
count[i] = 1
return(max(count, key = count.get))
# 4 全文找关键词
if re.search('(签订了|签署了)([\w|(|)|\(|\)|\-|—|×|\+|/|《]+?)(合同|协议|合同书|协议书)', soupcontent):
contract = re.search('(签订了|签署了)([\w|(|)|\(|\)|\-|—|×|\+|/|《]+?)(合同|协议|合同书|协议书)', soupcontent).group(2) + re.search('(签订了|签署了)([\w|(|)|\(|\)|\-|—|×|\+|/|《]+?)(合同|协议|合同书|协议书)', soupcontent).group(3)
if len(contract) > 4:
return(contract)
else:
return('')
return('')
def find_project(soup, contract):
# 寻找项目名称
project = ''
soupcontent = re.sub('\n | ', '', soup.get_text())
div = soup.findAll('div')
strline = ''
for line in div[1:]:
# 1 在title里面找
if line.get('title') and ('、项目名称' in line.get('title') or '、工程名称' in line.get('title') or '、中标项目' in line.get('title') or '、采购项目名称' in line.get('title')):
if re.search(':([、\w—\-~#·\(\)()\[\]/\+:]+)', line.get('title')):
project = re.search(':([、\w—\-~#·\(\)()\[\]/\+:]+)', line.get('title')).group(1)
return(project)
else:
strline = line.get_text()
strline = re.sub('\n | ', '', strline)
strline_split = re.split('\n', strline)
strline_split = [x for x in strline_split if len(x) > 0]
if len(strline_split) == 1 and re.search(',', strline_split[0]) is None:
return (re.sub('。', '',strline_split[0]))
for line in div[1:]:
# 2 项目XX
if line.get('title') and '项目' in line.get('title'):
strline = re.sub('<.+>|\n | ', '', str(line))
strline = re.sub('<.+>', '', strline)
if re.search('(项目名称|工程名称|中标名称|中标内容|中标项目)\w*:(\n)*([、\w—\-~#·\(\)()\[\]/\+:]+)(,|。|;|\n)', strline):
project = re.search('(项目名称|工程名称|中标名称|中标内容|中标项目)\w*:(\n)*([、\w—\-~#·\(\)()\[\]/\+:]+)(,|。|;|\n)', strline).group(3)
if re.search('中标', project) is None:
return (project)
if re.search('(项目名称|工程名称|中标名称|中标内容|中标项目)\w*:(\n)*([、\w—\-~#·\(\)()\[\]/\+:]+)(,|。|;|\n)', soupcontent):
project = re.search('(项目名称|工程名称|中标名称|中标内容|中标项目)\w*:(\n)*([、\w—\-~#·\(\)()\[\]/\+:]+)(,|。|;|\n)', soupcontent).group(3)
if len(project) > 7 and re.search('中标', project) is None:
return (project)
if re.search('为([、|\w|—|\-|~|#|·|\(|\)|(|)|\[|\]]+?)的?预?(中标单位|中标人|中标候选人)', soupcontent):
project = re.search('为([、|\w|—|\-|~|#|·|\(|\)|(|)|\[|\]]+?)的?预?(中标单位|中标人|中标候选人)', soupcontent).group(1)
if len(project) > 7 and re.search('中标|推荐', project) is None:
return (project)
if re.search('中标项目为([、|\w|—|\-|~|#|·|\(|\)|(|)|\[|\]]+?)(项目|工程|标段|采购.标|采购|活动|,|。)', soupcontent):
content = re.search('中标项目为([、|\w|—|\-|~|#|·|\(|\)|(|)|\[|\]]+?)(项目|工程|标段|采购.标|采购|活动|,|。)', soupcontent)
if re.search(',|。', content.group(2)) is None:
project = content.group(1) + content.group(2)
else:
project = content.group(1)
if len(project) > 7 and re.search('中标', project) is None:
return (project)
if re.search('“([、\w—\-~#·\(\)()\[\]/\+:]+)”(.{2})', soupcontent):
loc = [[i.start(), i.end()] for i in re.finditer('“([、\w—\-~#·\(\)()\[\]/\+:]+)”(\w{2})', soupcontent)]
content = []
for j in range(len(loc)):
if j == 0:
content.append(soupcontent[max(0, loc[j][0]-5) : loc[j][1]])
else:
content.append(soupcontent[max(0, loc[j][0]-5, loc[j-1][1]) : loc[j][1]])
content = [re.search('“([、\w—\-~#·\(\)()\[\]/\+:]+)(项目|标段|采购.标|采购|工程|”项目|”)', x).group() for x in content if re.search('以下简称|公示|公告|名单', x) is None and re.search('“([、\w—\-~#·\(\)()\[\]/\+:]+)(项目|标段|采购.标|采购|工程|”项目|”)', x)]
content = [x for x in content if re.search('项目|标段|采购.标|采购|工程', x)]
if len(content) > 0 and len(content[0]) > 7:
#只取第一个
return(content[0])
if re.search('中标([、\w—\-~#·\(\)()\[\]/\+:]+)(项目|工程|标段|采购.标|采购|活动|))', soupcontent):
project_raw = re.findall('中标([、\w—\-~#·\(\)()\[\]/\+:]+)(项目|工程|标段|采购.标|采购|活动|))', soupcontent)
project_raw = [x[0] + x[1] for x in project_raw]
project_raw = [x for x in project_raw if len(x) > 7 and re.search('通知书', x) is None]
if len(project_raw) > 0:
len_project_raw = [len(x) for x in project_raw]
return(project_raw[len_project_raw.index(max(len_project_raw))])
if re.search('在([、\w—\-~#·\(\)()\[\]/\+:]+?)(项目|工程|标段|采购.标|采购)中', soupcontent):
content = re.search('在([、\w—\-~#·\(\)()\[\]/\+:]+?)(项目|工程|标段|采购.标|采购)中', soupcontent)
project = content.group(1) + content.group(2)
if len(project) > 7:
return (project)
return('')
def find_money(soup):
#寻找金钱
#pat_up_low_foreign = '((\d*)(,|,)?)*(\d+)(\.?)(\d*) *(—|\-|~)((\d*)(,|,)?)*(\d+)\.?(\d*)(亿|千万|百万|十万|万|千|百|十)?\w?元'
pat_foreign = '((?![百千万佰仟萬亿億])[零一二三四五六七八九十百千万〇壹贰叁肆伍陆柒捌玖拾佰仟萬亿億幺]+(元|角|分))|((\d*)(,|,)?)*(\d+)(\.?)(\d*)(亿|千万|百万|十万|万|千|百|十)?\w?元'
pat_foreign = '((?![百千万佰仟萬亿億])(零|一|二|三|四|五|六|七|八|九|十|百|千|万|〇|壹|贰|叁|肆|伍|陆|柒|捌|玖|拾|佰|仟|萬|亿|億|幺)+(元)(零|一|二|三|四|五|六|七|八|九|十|百|千|万|〇|壹|贰|叁|肆|伍|陆|柒|捌|玖|拾|佰|仟|萬|亿|億|幺)?(角)?(零|一|二|三|四|五|六|七|八|九|十|百|千|万|〇|壹|贰|叁|肆|伍|陆|柒|捌|玖|拾|佰|仟|萬|亿|億|幺)?(分)?)|((\d*)(,|,)?)*(\d+)(\.?)(\d*)(亿|千万|百万|十万|万|千|百|十)?\w?元'
#pat = '([零一二三四五六七八九十百千万〇壹贰叁肆伍陆柒捌玖拾佰仟萬亿億幺]+?|((\d*)(,|,)?)*(\d+)(\.?)(\d*)(亿|千万|百万|十万|万|千|百|十)?)\w?元'
#pat_up_low = '((\d*)(,|,)?)*(\d+)(\.?)(\d*)(—|\-|~)((\d*)(,|,)?)*(\d+)\.?(\d*) *(亿|千万|百万|十万|万|千|百|十)?元'
#pat_money = '((?![百千万佰仟萬亿億])[零一二三四五六七八九十百千万〇壹贰叁肆伍陆柒捌玖拾佰仟萬亿億幺]+(元|角|分))|((\d*)(,|,)?)*(\d+)(\.?)(\d*)(亿|千万|百万|十万|万|千|百|十)?元'
pat_money = '((?![百千万佰仟萬亿億])(零|一|二|三|四|五|六|七|八|九|十|百|千|万|〇|壹|贰|叁|肆|伍|陆|柒|捌|玖|拾|佰|仟|萬|亿|億|幺)+(元)(零|一|二|三|四|五|六|七|八|九|十|百|千|万|〇|壹|贰|叁|肆|伍|陆|柒|捌|玖|拾|佰|仟|萬|亿|億|幺)?(角)?(零|一|二|三|四|五|六|七|八|九|十|百|千|万|〇|壹|贰|叁|肆|伍|陆|柒|捌|玖|拾|佰|仟|萬|亿|億|幺)?(分)?)|((\d*)(,|,)?)*(\d+)(\.?)(\d*)(亿|千万|百万|十万|万|千|百|十)?元'
soupcontent = re.sub('<.+>|\n | ', '', soup.get_text())
soupcontent = re.sub('=\d+', '', soupcontent) # For 250247.html
soupcontent = re.sub('编码:\d+', '', soupcontent)
section1 = str(soup.find(id = 'SectionCode_1'))
section1 = re.sub('<.+>|\n | ', '', section1)
section1 = re.sub('<.+?>', '', section1)
section1 = re.sub('=\d+', '', section1) # For 250247.html
section1 = re.sub('编码:\d+', '', section1)
# 1 首先匹配合同金额上下限不一样
#if re.search(pat_up_low_foreign, soupcontent):
# if re.search(pat_up_low, soupcontent):
# up_low_raw = re.search(pat_up_low, soupcontent).group()
# up_money, low_money = refine_up_low(up_low_raw)
#else:
# up_low_raw = re.search(pat_up_low_foreign, soupcontent).group()
# up_money, low_money = refine_up_low(up_low_raw)
#return (up_money, low_money)
# 2 匹配上下限一样的金额
div = soup.findAll('div')
strline = ''
for line in div[1:]:
# 1 在title里面找
if line.get('title') and re.search('((中标|合同)总?(金额|价))', line.get('title')):
if re.search(':' + pat_money, line.get('title')):
money = re.search(':' + pat_money, line.get('title')).group()
return(refine_money(money), refine_money(money))
else:
strline = line.get_text()
strline = re.sub('\n | ', '', strline)
strline_split = re.split('\n', strline)
strline_split = [x for x in strline_split if len(x) > 0]
if len(strline_split) == 1 and re.search(pat_foreign, strline):
money = refine_money(re.search(pat_foreign, strline).group())
return(money, money)
if [x.start() for x in re.finditer(pat_foreign, section1)]:
loc = [[x.start(), x.end()] for x in re.finditer(pat_money, section1)]
if len(loc) == 0:
loc = [[x.start(), x.end()] for x in re.finditer(pat_foreign, section1)]
if len(loc) > 1:
money = []
for j in range(len(loc)):
if j == 0:
money.append(section1[max(0,loc[j][0]-20):loc[j][1]])
else:
money.append(section1[max(0, loc[j][0]-20, loc[j-1][1]) : loc[j][1]])
moneycopy = money.copy()
for i in range(len(moneycopy)):
if re.search('资本|资产|收入|利润|合计|总共|收款|服务费', moneycopy[i]) and re.search('中标', moneycopy[i]) is None:
money.remove(moneycopy[i])
continue
if re.search('((中标|合同)总?(金额|价))', moneycopy[i]):
if i == 0 or loc[i][0]-20 > loc[i-1][1]:
raw_return = refine_money(re.search(pat_foreign, moneycopy[i][20:]).group())
else:
raw_return = refine_money(re.search(pat_foreign, moneycopy[i][(loc[i][0]-loc[i-1][1]):]).group())
return (raw_return, raw_return)
money = [re.search(pat_foreign, x).group() for x in money if re.search(pat_foreign, x)]
if len(money) == 0:
return('', '')
else:
money = [refine_money(x) for x in money if type(refine_money(x)) is not str]
if len(money) == 0:
return('', '')
else:
return (max(money), max(money))
else:
money = section1[max(0,loc[0][0]-10):min(len(section1), loc[0][1])]
if re.search('资本|资产|收入|利润|合计|总共|收款|服务费', money) and re.search('中标', money) is None:
return('', '')
else:
money = refine_money(re.search(pat_foreign, money[10:]).group())
return (money, money)
elif [x.start() for x in re.finditer(pat_foreign, soupcontent)]:
loc = [[x.start(), x.end()] for x in re.finditer(pat_money, soupcontent)]
if len(loc) == 0:
loc = [[x.start(), x.end()] for x in re.finditer(pat_foreign, soupcontent)]
if len(loc) > 1:
money = []
for j in range(len(loc)):
if j == 0:
money.append(soupcontent[max(0,loc[j][0]-20):loc[j][1]])
else:
money.append(soupcontent[max(0, loc[j][0]-20, loc[j-1][1]) : loc[j][1]])
moneycopy = money.copy()
for i in range(len(moneycopy)):
if re.search('资本|资产|收入|利润|合计|总共', moneycopy[i]) and re.search('中标', moneycopy[i]) is None:
money.remove(moneycopy[i])
continue
if re.search('((中标|合同)总?(金额|价))', moneycopy[i]):
if i == 0 or loc[i][0]-20 > loc[i-1][1]:
raw_return = refine_money(re.search(pat_foreign, moneycopy[i][20:]).group())
else:
raw_return = refine_money(re.search(pat_foreign, moneycopy[i][(loc[i][0]-loc[i-1][1]):]).group())
return (raw_return, raw_return)
money = [re.search(pat_foreign, x).group() for x in money if re.search(pat_foreign, x)]
if len(money) == 0:
return('', '')
else:
money = [refine_money(x) for x in money if type(refine_money(x)) is not str]
if len(money) == 0:
return('', '')
else:
return (max(money), max(money))
else:
money = soupcontent[max(0,loc[0][0]-10):min(len(soupcontent), loc[0][1])]
if re.search('资本|资产|收入|利润|合计|总共', money) and re.search('中标', money) is None:
return('', '')
else:
money = refine_money(re.search(pat_foreign, money[10:]).group())
return (money, money)
else:
return('', '')
|
[
"y1995z02c07@gmail.com"
] |
y1995z02c07@gmail.com
|
72ace06c05b70250bf6b0869b66f53f909901aa2
|
fc68fcf50fa6b62c346b6ee79718f59a9fdb7530
|
/Semantic-Segmentation/preprocess_semantic.py
|
c786576db67010b06c0e317251aeb0bea2165347
|
[] |
no_license
|
Jeffrey-Ede/NN
|
2d5be7f2fc9c3fb7bc1a8fec5dc20184b0d9b57c
|
30370ea408f8f5e8f6c35448a847e1b171b372a4
|
refs/heads/master
| 2021-09-13T03:03:06.032555
| 2018-04-24T08:06:34
| 2018-04-24T08:06:34
| 125,896,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,889
|
py
|
import cv2
import numpy as np
import os
def sigmoidalise(name, size):
def fancy_sigmoidaliser(img, scale=3):
'''
Rescale intensities by projecting them with a signmoid based on their variance
scale is the number of standard deviations to devide the image's deviations from the mean by
'''
mean = np.mean(img)
sigma = np.sqrt(np.var(img))
def sigmoidalise(x):
res = (x-mean) / (scale*sigma)
return np.exp(res) / (np.exp(res)+1.0)
sigmoidaliser = np.vectorize(sigmoidalise, otypes=[np.float])
return sigmoidaliser(img.copy())
img = cv2.imread(name, cv2.IMREAD_UNCHANGED)
#Crop image down to size, from the top left corner
img = img[0:size, 0:size]
return fancy_sigmoidaliser(img)
#cv2.imshow("sdfsd", img)
#cv2.waitKey(0)
if __name__ == "__main__":
dir = "Documents/semantic_stacks/lacy_carbon/raw1/"
new_dir = "Documents/semantic_stacks/lacy_carbon/train/"
def multiple_mags(raw_dir, proc_dir, sizes, save_size):
'''Sigmoidalise the images at multiple magnification scales'''
for size in sizes:
#Apply variance-based sigmoidalisation to reduce the effect of outliers of the network
sigmoidalisation = sigmoidalise(raw_dir, size)
#Resize the image to the specified save size
if size is not save_size:
sigmoidalisation = cv2.resize(img, (save_size, save_size),
interpolation=cv2.INTER_AREA if size < save_size else cv2.INTER_CUBIC)
sigmoidalisation.save(proc_dir)
#Sigmoidalise every file in the directory and save the sigmoidalisations
sizes = [667, 512, 384, 256, 192, 128]
save_size = 256
for f in os.listdir(dir):
multiple_mags(dir+f, new_dir+f, sizes, save_size)
|
[
"JeffreyEde1@outlook.com"
] |
JeffreyEde1@outlook.com
|
d4d5b2d5ccb2e68d72ec5e9eb7dfc674ae446598
|
96a34a048c783a75736bf0ec775df22142f9ee53
|
/packages/models-library/src/models_library/resource_tracker.py
|
412e587f451e696f7879886a351f752c4ab5b432
|
[
"MIT"
] |
permissive
|
ITISFoundation/osparc-simcore
|
77e5b9f7eb549c907f6ba2abb14862154cc7bb66
|
f4c57ffc7b494ac06a2692cb5539d3acfd3d1d63
|
refs/heads/master
| 2023-08-31T17:39:48.466163
| 2023-08-31T15:03:56
| 2023-08-31T15:03:56
| 118,596,920
| 39
| 29
|
MIT
| 2023-09-14T20:23:09
| 2018-01-23T10:48:05
|
Python
|
UTF-8
|
Python
| false
| false
| 447
|
py
|
from enum import auto
from typing import TypeAlias
from pydantic import PositiveInt
from .utils.enums import StrAutoEnum
ServiceRunId: TypeAlias = str
PricingPlanId: TypeAlias = PositiveInt
PricingDetailId: TypeAlias = PositiveInt
class ResourceTrackerServiceType(StrAutoEnum):
COMPUTATIONAL_SERVICE = auto()
DYNAMIC_SERVICE = auto()
class ServiceRunStatus(StrAutoEnum):
RUNNING = auto()
SUCCESS = auto()
ERROR = auto()
|
[
"noreply@github.com"
] |
ITISFoundation.noreply@github.com
|
f54cb6e71d41318e8e5d77aede25cf52f6813fd1
|
670033ae1c1792b84041f587180d52e12210e0db
|
/ch9/tooltip_test.py
|
c3bf33002d63af4879e52eb21751660d116d320f
|
[] |
no_license
|
CheoR/STTWP
|
6adb0d79a6c81cdd810c5e943ef950a12f4f8b7f
|
c84ed364602840ed501831ef368e69850f07a57f
|
refs/heads/master
| 2021-01-18T00:13:58.570223
| 2015-06-24T17:42:07
| 2015-06-24T17:42:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,510
|
py
|
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
import unittest
class ToolTiptest(unittest.TestCase):
"""ToolTiptest
Call mouse move event moves mouse cursor from current location
to supplied element.
"""
URL = "http://jqueryui.com/tooltip/"
def setUp(self):
self.driver = webdriver.Firefox()# Chrome non-op
self.driver.get(self.URL)
self.driver.implicitly_wait(30)
self.driver.maximize_window()
def tearDown(self):
self.driver.quit()
def test_tool_tip(self):
driver = self.driver
frame_element = driver.find_element_by_class_name("demo-frame")
driver.switch_to.frame(frame_element)
age_field = driver.find_element_by_id("age")
ActionChains(self.driver).move_to_element(
age_field
).perform()
tool_tip_element = WebDriverWait(self.driver, 10).until(
expected_conditions.visibility_of_element_located(
(By.CLASS_NAME, "ui-tooltip-content")
)
)
# verify tooltip message
self.assertEqual(
"We ask for your age only for statistical purposes.",
tool_tip_element.text
)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
[
"cheo.roman+github@gmail.com"
] |
cheo.roman+github@gmail.com
|
eaf0583389c889dcce3ea401465d2eb4fa6033e9
|
88c67aed0f059523f545053286c92ed78f82227c
|
/padmin.py
|
6739836756cb6a44addd980affe0de6484180993
|
[] |
no_license
|
dravix/pyventa
|
bcc173342d3880fff4a77eb22f115447e6a2f744
|
2080925db7198ce9e799863c261671cef37b05d0
|
refs/heads/master
| 2021-01-01T18:38:11.089306
| 2018-07-17T22:59:59
| 2018-07-17T22:59:59
| 7,473,803
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,305
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, datetime, urllib2, tarfile, base64, sqlite3
from time import time
#aqui=os.getcwd()
aqui="/usr/share/pyventa/"
#sys.path.append(os.path.join("/usr/share/pyventa/admin"))
home=os.path.join(os.path.expanduser('~'),"pyventa")
if sys.platform == 'linux2':
home=os.path.join(os.path.expanduser('~'),".pyventa")
# Set process name. Only works on Linux >= 2.1.57.
try:
import ctypes
libc = ctypes.CDLL('libc.so.6')
libc.prctl(15, 'padmin\0')
except:
pass
else:
sys.stdout = open(os.path.join(os.path.expanduser('~'),"pad_salida.log"),"w")
sys.stderr = open(os.path.join(os.path.expanduser('~'),"pad_error.log"),"w")
#sys.path.append(os.path.join(aqui,'plugins'))
#sys.path.append(os.path.join(aqui,'import'))
#sys.path.append(os.path.join(aqui,"admin"))
import datetime, tempfile
#from PyQt4.QtGui import *
#from PyQt4.QtCore import *
#from PyQt4.QtCore import *
from ui.ui_admin import Ui_admin
import MySQLdb, ConfigParser
#from db_conf import configurador
from PyQt4.QtGui import QMessageBox, QDialog, QMainWindow, QInputDialog, QApplication,QTableWidgetItem
from PyQt4.QtCore import SIGNAL, Qt,QTimer, QLocale
from modulos.productos import Productos
from modulos.impuestos import Impuestos
from modulos.departamentos import Departamentos
from modulos.familias import Familias
from modulos.unidades import Unidades
from modulos.control1 import Admin1
from modulos.compras import Compras
from modulos.cajas import Cajas
from modulos.clientes import Clientes
from modulos.proveedores import Proveedores
from modulos.usuarios import Usuarios
from modulos.inventario import Inventario
from modulos.faltantes import Faltantes
from modulos.ofertas import Oferta
from modulos.conexiones import Conexiones
from ui.dlg_buscador import buscador
from lib.utileria import MyListModel, MyTableModel
from lib import libutil
from lib.librerias.configurador import Configurador
from lib.librerias.conexion import Conexion
from lib.librerias.seguridad import Seguridad
class Administrador(QMainWindow, Ui_admin):
def __init__(self):
QDialog.__init__(self)
self.setupUi(self)
self.curser=None
self.cursor=None
self.home=home
self.cfg=Configurador(self)
if self.cfg.stat:
self.conexion=Conexion(self,self.cfg)
if self.conexion.stat:
self.cursor=self.conexion.cursor
self.curser=self.conexion.curser
self.conexion.db.text_factory = str
self.iniciarEstilo()
self.iniciarSesion()
else:
print "Error al iniciar configuraciones"
sys.exit()
#Inicio de funciones
self.showMaximized()
self.stack.setCurrentIndex(1)
self.impuesto=Impuestos(self)
self.departamento=Departamentos(self)
self.familia=Familias(self)
self.unidad=Unidades(self)
self.productos=Productos(self)
self.clientes=Clientes(self,True)
self.proveedores=Proveedores(self,True)
self.usuarios=Usuarios(self)
self.ofertas=Oferta(self)
self.cajas=Cajas(self,True)
self.inv=Inventario(self)
self.faltantes=Faltantes(self)
self.compras=Compras(self)
try:
self.conexiones=Conexiones(self)
except:
self.tConexiones.setVisible()
#Inicio de combos
self.iniciarCombos()
#self.familias=self.familia.getLista()
self.menuHerramientas.addAction("Recargar estilo",self.iniciarEstilo)
self.iniciarEstilo()
def conexion():
self.conexion=Conexion(self,self.cfg)
if self.conexion.stat:
self.cursor=self.conexion.cursor
self.curser=self.conexion.curser
self.iniciarEstilo()
self.iniciarSesion()
def iniciarEstilo(self):
try:
kcss = open(os.path.join(os.path.dirname(os.path.realpath(__file__)),"perfil","estilos",self.cfg.get("pyventa","estilo")),"r")
estilo=kcss.read()
self.setStyleSheet(estilo)
kcss.close()
except:
try:
kcss = open(os.path.join(self.home,"estilos",self.cfg.get("pyventa","estilo")),"r")
estilo=kcss.read()
self.setStyleSheet(estilo)
kcss.close()
except:
print "No se cargo el estilo "+self.cfg.get("pyventa","estilo")
def iniciarSesion(self):
dlg=Seguridad(self,nivel=5,logo=":/actions/images/padmin.simple.logo.png", nombre="Administrador de Pyventa")
acceso=dlg.exec_()
if acceso>-1:
self.usuario=dlg.usuario
self.cursor=dlg.cursor
self.curser=dlg.curser
else:
sys.exit()
def iniciarCombos(self):
#self.cbpDepartamentos.clear()
#self.cbpFamilias.clear()
self.cbeFamilias.clear()
self.cbeImpuestos.clear()
self.cbeUnidades.clear()
#combo=self.departamento.getModelo()
#self.cbpDepartamentos.setModel(combo)
#self.cbpDepartamentos.setModelColumn(1)
#self.cbpFamilias.setModel(self.familia.getModelo())
self.cbeFamilias.setModel(self.familia.getModelo())
#self.cbpFamilias.setModelColumn(1)
self.cbeFamilias.setModelColumn(1)
self.cbeImpuestos.setModel(self.impuesto.getModelo())
self.cbeImpuestos.setModelColumn(1)
self.cbeUnidades.setModel(self.unidad.getModelo())
self.cbeUnidades.setModelColumn(1)
def goHome(self):
self.stack.setCurrentIndex(0)
#self.tbrProductos.hide()
def tabular(self,tabla,sql,head):
#Recibe una consulta de sql la ejecuta y tabula el resultado
lista=[]
try:
self.cursor.execute(sql)
result = self.cursor.fetchall()
except MySQLdb.Error, e:
print e
else:
tabla.setColumnCount(len(head))
tabla.setRowCount(len(result))
for i,data in enumerate(head):
item = QTableWidgetItem(1)
item.setText(str(data))
tabla.setHorizontalHeaderItem(i,item)
for i,elem in enumerate(result):
if len(elem)==len(head):
lista.append(list(elem))
for j,data in enumerate(elem):
item = QTableWidgetItem(1)
item.setText(str(data))
tabla.setItem(i,j,item)
result=None
tabla.resizeColumnsToContents()
return lista
def entablar(self,tabla,lista,head):
#Cumple la misma funcion que tabular, con la diferencia que esta recibe una lista de tuplas
tabla.clear()
tabla.setColumnCount(len(head))
tabla.setRowCount(len(lista))
for i,data in enumerate(head):
item = QTableWidgetItem(1)
item.setText(str(data))
tabla.setHorizontalHeaderItem(i,item)
for i,elem in enumerate(lista):
for j,data in enumerate(elem):
item = QTableWidgetItem(1)
item.setText(str(data))
tabla.setItem(i,j,item)
tabla.resizeColumnsToContents()
def tablar(self,tabla,lista,head):
#Cumple la misma funcion que entablar, con la diferencia que esta recibe una lista de diccionarios
tabla.clear()
tabla.setColumnCount(len(head))
tabla.setRowCount(len(lista))
for i,data in enumerate(head):
item = QTableWidgetItem(1)
item.setText(str(data))
tabla.setHorizontalHeaderItem(i,item)
for i,elem in enumerate(lista):
for j,data in enumerate(elem):
item = QTableWidgetItem(1)
item.setText(str(elem[data]))
tabla.setItem(i,j,item)
tabla.resizeColumnsToContents()
def copy(self,a):
b=[]
for i,fila in enumerate(a):
tmp=[]
for j,col in enumerate(fila):
tmp.append(col)
b.append(tmp)
return b
def setStatus(self,stat,msg='Estado de operaciones'):
timer=QTimer()
if stat==0: #Normal
#self.pbStat.setStyleSheet("border:1.3px solid rgb(42, 129, 159); border-radius:12px; color:#fff;\nbackground-color: qlineargradient(spread:pad, x1:1, y1:1, x2:1, y2:0, stop:0 rgba(0, 102, 128, 255), stop:0.563636 rgba(0, 34, 43, 255), stop:0.568182 rgba(0, 65, 82, 255), stop:1 rgba(0, 67, 85, 255));")
self.pbStat.setText("Sin operaciones")
self.lblStatus.setText(msg)
elif stat==1:
#self.pbStat.setStyleSheet("border:1.3px solid rgb(84, 126, 28);border-radius:12px; color:#fff;background-color: qlineargradient(spread:pad, x1:1, y1:1, x2:1, y2:0, stop:0 rgba(111, 159, 42, 255), stop:0.495455 rgba(25, 114, 28, 255), stop:0.507692 rgba(87, 149, 42, 255), stop:1 rgba(110, 202, 58, 255));")
self.pbStat.setText("Hecho")
self.lblStatus.setText(msg)
timer.start(3000)
self.connect(timer, SIGNAL("timeout()"), lambda:self.setStatus(0))
elif stat==2: #ERROR
self.pbStat.setStyleSheet("border:1.3px solid #700;border-radius:12px; color:#fff;\nbackground-color: qlineargradient(spread:pad, x1:1, y1:1, x2:0.995, y2:0.011, stop:0 rgba(86, 19, 19, 255), stop:1 rgba(176, 50, 50, 255));")
self.pbStat.setText("Error!")
self.lblStatus.setText(msg)
timer.start(4000)
self.connect(timer, SIGNAL("timeout()"), lambda:self.setStatus(0))
#def listarInventario(self):
#sql="SELECT * FROM "
#self.entabla(self.tblInventario,)
def entabla(self,tabla,header,query,modelo=None):
#tabla, header, query, modelo
sql=str(query)
try:
self.cursor.execute(sql)
lista=self.cursor.fetchall()
except sqlite3.Error, e:
print "Error al entablar"
raise(e)
except:
print "Problema al ejecutar el query"
else:
if lista!=None:
#print lista
if modelo==None:
modelo = MyTableModel(lista, header, self)
tabla.setModel(modelo)
else:
modelo.setVector(lista)
return modelo
def listaTabla(self,tabla,header,lista,modelo=None):
#tabla, header, lista, modelo
if lista!=None:
#print lista
if modelo==None:
modelo = MyTableModel(lista, header, self)
tabla.setModel(modelo)
else:
modelo.setVector(lista)
return modelo
def enlista(self,lista,query,modelo=None):
#lista, header, query, modelo
sql=str(query)
try:
self.cursor.execute(sql)
res=self.cursor.fetchall()
except:
print "Problema al ejecutar el query"
else:
if res!=None:
res=list(res[0][1])
print res
if modelo==None:
modelo = MyListModel(res, self)
lista.setModel(modelo)
else:
modelo.setVector(res)
return modelo
def aut(self,nivel):
#recibe una clave (key) y un valor requerido (req) para ver si tiene el nivel necesario
#if (self.stack.currentIndex()==modulo):
Acceso =QInputDialog.getText(self, self.tr("Filtro de Acceso"),self.tr("Ingrese su clave de autorizacion."),QLineEdit.Password)
key=str(Acceso[0])
self.cursor.execute('select id_usuario,nivel from usuarios where clave=MD5(\''+key+'\');')
val=self.cursor.fetchone()
if val!=None:
if int(val[1])>=int(nivel):
return val[0]
else :
return False
def setUsuario(self,usuario):
self.usuario=usuario
def notify(self,tipo,event,detail='',time=5):
dia=QMessageBox(self)
timer=QTimer.singleShot(time*1000,dia.accept)
# dia=QtGui.QDialog(self)
dia.setText(event)
dia.setInformativeText(detail)
# dia.setDetailedText(detail)
dia.setWindowModality(0)
dia.setWindowOpacity(.8)
dia.setStandardButtons(QMessageBox.NoButton)
if tipo=='error':
dia.setStyleSheet(".QMessageBox{background:rgba(250,30,10,255);color:#fff}QLabel{background:transparent;color:#fff}")
dia.setIcon(QMessageBox.Critical)
elif tipo=='info':
dia.setStyleSheet(".QMessageBox{background:rgba(30,30,10,255);color:#fff}QLabel{background:transparent;color:#fff}")
dia.setIcon(QMessageBox.Information)
elif tipo=='advertencia':
dia.setStyleSheet(".QMessageBox{background:rgba(255,200,0,255);color:#fff}QLabel{background:transparent;color:#fff}")
dia.setIcon(QMessageBox.Warning)
elif tipo=='exito':
dia.setStyleSheet(".QMessageBox{background:rgba(0,128,0,255);color:#fff}QLabel{background:transparent;color:#fff}")
dia.setIcon(QMessageBox.Information)
dia.move(self.width()-dia.width(),104)
# dia.addWidget(QLabel(event))
dia.setWindowFlags(dia.windowFlags()|Qt.FramelessWindowHint)
dia.show()
def closeEvent(self, event):
self.conexion.commit()
event.accept()
print "Cerrando el administrador"
#self.destory()
from lib.dlg_ingreso import acceso
if __name__=="__main__":
app = QApplication(sys.argv)
app.processEvents()
QLocale.setDefault(QLocale(111,139))
#dlg=acceso()
#key=-1
#key=dlg.exec_()
#if key>0:
aw = Administrador()
#aw.setUsuario()
aw.show()
app.exec_()
|
[
"dravix@gmail.com"
] |
dravix@gmail.com
|
6e3e3109f325e9b910cf211332c84e5546b3042c
|
4140a1eecd862356d7d41b171d8956a7ab96be7b
|
/nitro-python-1.0/nssrc/com/citrix/netscaler/nitro/resource/config/network/bridgegroup_nsip6_binding.py
|
2fb82fdd0791085d6769ccc6ee1dbb863a4cb702
|
[
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Citrix-TechSpecialist/NS-Init
|
1c4311fef80d47d80fb5bfe107df058f5ff93e20
|
bd1b695584a6acadec0140457782c7f4e97c266b
|
refs/heads/master
| 2020-12-02T10:00:13.245312
| 2017-07-09T09:30:51
| 2017-07-09T09:30:51
| 96,673,273
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,836
|
py
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class bridgegroup_nsip6_binding(base_resource) :
""" Binding class showing the nsip6 that can be bound to bridgegroup.
"""
def __init__(self) :
self._ipaddress = ""
self._td = 0
self._rnat = False
self._id = 0
self._netmask = ""
self.___count = 0
@property
def id(self) :
ur"""The integer that uniquely identifies the bridge group.<br/>Minimum value = 1<br/>Maximum value = 1000.
"""
try :
return self._id
except Exception as e:
raise e
@id.setter
def id(self, id) :
ur"""The integer that uniquely identifies the bridge group.<br/>Minimum value = 1<br/>Maximum value = 1000
"""
try :
self._id = id
except Exception as e:
raise e
@property
def td(self) :
ur"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Minimum value = 0<br/>Maximum value = 4094.
"""
try :
return self._td
except Exception as e:
raise e
@td.setter
def td(self, td) :
ur"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Minimum value = 0<br/>Maximum value = 4094
"""
try :
self._td = td
except Exception as e:
raise e
@property
def netmask(self) :
ur"""A subnet mask associated with the network address.
"""
try :
return self._netmask
except Exception as e:
raise e
@netmask.setter
def netmask(self, netmask) :
ur"""A subnet mask associated with the network address.
"""
try :
self._netmask = netmask
except Exception as e:
raise e
@property
def ipaddress(self) :
ur"""The IP address assigned to the bridge group.
"""
try :
return self._ipaddress
except Exception as e:
raise e
@ipaddress.setter
def ipaddress(self, ipaddress) :
ur"""The IP address assigned to the bridge group.
"""
try :
self._ipaddress = ipaddress
except Exception as e:
raise e
@property
def rnat(self) :
ur"""Temporary flag used for internal purpose.
"""
try :
return self._rnat
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(bridgegroup_nsip6_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.bridgegroup_nsip6_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.id is not None :
return str(self.id)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = bridgegroup_nsip6_binding()
updateresource.id = resource.id
updateresource.ipaddress = resource.ipaddress
updateresource.netmask = resource.netmask
updateresource.td = resource.td
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [bridgegroup_nsip6_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].id = resource[i].id
updateresources[i].ipaddress = resource[i].ipaddress
updateresources[i].netmask = resource[i].netmask
updateresources[i].td = resource[i].td
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = bridgegroup_nsip6_binding()
deleteresource.id = resource.id
deleteresource.ipaddress = resource.ipaddress
deleteresource.netmask = resource.netmask
deleteresource.td = resource.td
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [bridgegroup_nsip6_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].id = resource[i].id
deleteresources[i].ipaddress = resource[i].ipaddress
deleteresources[i].netmask = resource[i].netmask
deleteresources[i].td = resource[i].td
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, id) :
ur""" Use this API to fetch bridgegroup_nsip6_binding resources.
"""
try :
obj = bridgegroup_nsip6_binding()
obj.id = id
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, id, filter_) :
ur""" Use this API to fetch filtered set of bridgegroup_nsip6_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = bridgegroup_nsip6_binding()
obj.id = id
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, id) :
ur""" Use this API to count bridgegroup_nsip6_binding resources configued on NetScaler.
"""
try :
obj = bridgegroup_nsip6_binding()
obj.id = id
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, id, filter_) :
ur""" Use this API to count the filtered set of bridgegroup_nsip6_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = bridgegroup_nsip6_binding()
obj.id = id
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class bridgegroup_nsip6_binding_response(base_response) :
def __init__(self, length=1) :
self.bridgegroup_nsip6_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.bridgegroup_nsip6_binding = [bridgegroup_nsip6_binding() for _ in range(length)]
|
[
"Mayank@Mandelbrot.local"
] |
Mayank@Mandelbrot.local
|
1ec730ee58eb570f7330d115e12bc4c37560f281
|
7722ef476351fa40675fc97b5a330ca7f94cb9db
|
/FuzzyDrink/wsgi.py
|
4b1bfadc9873174713ff39177378e0bb17d8b775
|
[] |
no_license
|
drmarkthrasher/fuzzydrink
|
a73c94f01fc395e9e4fe9ce39ff19df75585ea38
|
09a297fdd679a5fc6b737ba11a9c30d767bb6cac
|
refs/heads/master
| 2020-04-09T01:10:15.756392
| 2018-12-06T04:21:14
| 2018-12-06T04:21:14
| 159,894,301
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
WSGI config for FuzzyDrink project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'FuzzyDrink.settings')
application = get_wsgi_application()
|
[
"drmarkthrasher@gmail.com"
] |
drmarkthrasher@gmail.com
|
f6aa0262cb69c272dd529ab640244a9ad4403ac9
|
a9d54de9e0b47f6e518e0ca8c66fd264531dfd9a
|
/back/app/views/__init__.py
|
05b33e32a9d976d39316d65c0ef9477a5b552804
|
[] |
no_license
|
astex/fable
|
80d7bfecfff22fc2bc0ba090162716738762c8ed
|
96a4ae2aac35ec004b3ed8b481f357324c3c4088
|
refs/heads/master
| 2020-03-30T03:34:31.041247
| 2015-08-05T22:00:56
| 2015-08-05T22:00:56
| 40,268,591
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
from app.views import user, deployment, collection, run
def register(app):
user.UserView.register(app)
user.SessionView.register(app)
deployment.DeploymentView.register(app)
collection.CollectionView.register(app)
collection.StoryView.register(app)
collection.TargetView.register(app)
run.RunView.register(app)
run.StepView.register(app)
run.ResultView.register(app)
run.ActionView.register(app)
|
[
"0astex@gmail.com"
] |
0astex@gmail.com
|
27af94b12828d72d9f3e81820266d4f6f92c7ba8
|
1cf1698142740120ac344b79dc72683bd62c09bf
|
/projeto 1.py
|
e8c2134265fa9310a1e664ceb591505373d63162
|
[] |
no_license
|
PedroMagaieski/projetos_python
|
bd66f4949dd0f51970102fd0f3bd85793b3ccf88
|
dbf37d5fe2f52d821956b045806d910a22be8d62
|
refs/heads/main
| 2023-02-17T03:25:53.678503
| 2021-01-13T20:13:13
| 2021-01-13T20:13:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,684
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[13]:
names = []
notes = []
idn = 'S'
def get_avarage(notes):
return sum(notes) / len(notes)
def get_approved(notes):
return [notes.index(note) for note in list(filter(lambda note: note >= 6, notes))]
def get_failed(notes):
return [notes.index(note) for note in list(filter(lambda note: note < 6, notes))]
while idn == 'S' or idn == 's':
names.append(input('Digite o nome do aluno: '))
notes.append(int(input('Digite a nota do aluno: ')))
idn = input('Deseja digitar a noda de outro aluno? (\'S\' para sim): ')
print('\n\n')
for index in range(len(names)):
print('* Aluno %s: %d' % (names[index], notes[index]))
print('\nA média da turma é: ', get_avarage(notes))
print('\nAprovados')
for index in get_approved(notes):
print('- ', names[index])
print('\nReprovados')
for index in get_failed(notes):
print('- ', names[index])
# In[8]:
alunos = []
notas = []
aprovados = []
reprovados = []
continuar = 'S'
cont = 0
while continuar.upper()=='S':
cont+=1
aluno = input("Digite o nome do aluno: ")
alunos.append(aluno)
nota = int(input("Digite a nota do aluno"))
notas.append(nota)
if(nota<6):
reprovados.append(aluno)
else:
aprovados.append(aluno)
continuar = input("Digite S se deseja digitar a nota de outro aluno: ")
print("Todos os alunos cadastrados: ")
for i in range(len(alunos)):
print("%s: %s"%(alunos[i],notas[i]))
print("\nA media da sala é: ",sum(notas)/cont)
print("\naprovados: ")
for i in aprovados:
print(i)
print("\nreprovados")
for i in reprovados:
print(i)
# In[ ]:
|
[
"noreply@github.com"
] |
PedroMagaieski.noreply@github.com
|
dbf346401a7da23e6c669222c0e8921531980339
|
3f286a92caf2ab828c64a4edfd773d3a5982378d
|
/checkers/pieces.py
|
936a29365ada7b37882c004e69b2a05470c5b591
|
[] |
no_license
|
amitbiderman/Checkers
|
f0e3a0c6c0b068d0cc7b28ad570bfa7b8d1476ad
|
34ed866bef5f3b4626e066dd59c4a5a9f503d5f9
|
refs/heads/master
| 2023-02-01T00:24:12.019680
| 2020-12-07T12:43:27
| 2020-12-07T12:43:27
| 297,669,488
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,037
|
py
|
from .constants import *
import pygame
class Piece:
PADDING = 20
OUTLINE = 2
def __init__(self, row, col, color):
self.row = row
self.col = col
self.color = color
self.king = False
self.x = 0
self.y = 0
self.calc_position()
def calc_position(self):
self.x = SQUARE_SIZE * self.col + SQUARE_SIZE // 2
self.y = SQUARE_SIZE * self.row + SQUARE_SIZE // 2
def make_king(self):
self.king = True
def move(self, row, col):
self.row = row
self.col = col
self.calc_position()
def draw(self, win):
radius = SQUARE_SIZE // 2 - self.PADDING
pygame.draw.circle(win, GREY, (self.x, self.y), radius + self.OUTLINE)
pygame.draw.circle(win, self.color, (self.x, self.y), radius)
if self.king:
win.blit(CROWN, (self.x - CROWN.get_width() // 2, self.y - CROWN.get_height() // 2))
def __repr__(self):
return str(self.color)
|
[
"noreply@github.com"
] |
amitbiderman.noreply@github.com
|
78b3ac2a33c639e1092ccd15d876df8032b1b0ec
|
8e11807883da1e02a71d9629e1c835e16af1663b
|
/part2/part2.py
|
e3c86ff43f814601fd1ab548dc2f4b0e2996eaf0
|
[] |
no_license
|
haqueo/M3C
|
a773b342edd6cfda5db73d75a57f102c2f804f37
|
ff8b086174dfea722263d11f51ed543891b7e2c7
|
refs/heads/master
| 2021-07-07T04:43:35.838273
| 2017-10-05T11:59:28
| 2017-10-05T11:59:28
| 105,883,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,728
|
py
|
"""Project, part 2"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from n1 import network as net
from p1 import flunet as fn
import time
def initialize(N0,L,Nt,pflag):
"""Generate network, and initial conditions
If pflag is true, construct and return transport matrix
"""
#Generate network
qmax,qnet,enet = net.generate(N0,L,Nt)
#THERE ARE N = N0+NT NODES IN TOTAL
N = N0+Nt
#locate the first node with the highest degree
for i in range(N0+Nt):
if (qnet[i] == qmax):
InfectedNode = i #The actual value of InfectedNode is i+1, this is just its index
break
#Setup the initial conditions
InitialConditions = np.zeros((3*N,1)) #It is 3Nx1
InitialConditions[:N,0] = float(1) #first N are all 1, rest are 0
#apart from the infected node:
InitialConditions[InfectedNode,0] = float(0.1)
InitialConditions[InfectedNode+N,0] = 0.05
InitialConditions[InfectedNode+2*N,0] = 0.05
if pflag == True:
#find the adjacency matrix, A
A = net.adjacency_matrix(N0+Nt,enet)
P = np.zeros((N,N))
scalar = np.dot(qnet,A).astype(float)
#for i in range(N):
# for j in range(N):
# P[i,j] = float(qnet[i]*A[i,j])/float(np.dot(qnet,A[:,j]))
for i in range(N):
P[i,:] = (float(qnet[i])*A[i,:].astype(float))/scalar
return InitialConditions[:,0], InfectedNode, P
return InitialConditions[:,0], InfectedNode
def solveFluNet(T,Ntime,a,b0,b1,g,k,w,y0,N0,L,Nt,P,verFlag):
"""Simulate network flu model at Ntime equispaced times
between 0 and T (inclusive). Add additional input variables
as needed clearly commenting what they are and how they are
used
additional input:
P - the transport matrix assosciated with the generated network,
used to calculate RHS
N0 - initial number of nodes
L - number of links added at each timestep
Nt - total number of timesteps
verFlag - tells you which version of RHS to use
- "python", "fortran", "omp"
"""
#add input variables to RHS functions if needed
def RHSnet(y,t,a,b0,b1,g,k,w,N,P):
"""RHS used by odeint to solve Flu model
requires i, the index of the node being solved
and N, the total number of nodes
"""
#y is really the initial condition, this needs to change
#Split initial conditions into S,E,C
#beta (b) definition
b = b0 + (b1*(float(1)+np.cos(2.0*(np.pi)*float(t))))
#Here I have expanded the sum within dS/dt, dE/dt and dC/dt given,
# and used the fact that the sum of Pij over i = 1
#
#dS = k*(1-S[i])- S[i]*((b*C[i])+w)+(w*(np.dot(S[:,0],P[:,i])))
#dE = (b*C[i]*S[i]) - E[i]*(k+a+w)+(w*(np.dot(E[:,0],P[:,i])))
#dC = (a*E[i])-(C[i]*(g+k+w))+(w*(np.dot(C[:,0],P[:,i])))
#
#
dyprime = np.zeros((3*N,1))
S = np.zeros(N)
E = np.zeros(N)
C = np.zeros(N)
S[:] = y[0:N]
E[:] = y[N:2*N]
C[:] = y[2*N:3*N]
for i in range(N):
#dyprime[i,0] = k*(1-y[i])- y[i]*((b*y[(2*N)+i])+w)+(w*(np.dot(y[0:N],P[i,:])))
dyprime[i,0] = k*(1-S[i]) - b*C[i]*S[i] + w*np.dot(P[i,:],S) - w*S[i]
dyprime[i+N,0] = (b*y[(2*N)+i]*y[i]) - y[N+i]*(k+a+w)+(w*(np.dot(y[N:2*N],P[i,:])))
dyprime[i+(2*N),0] = (a*y[N+i])-(y[(2*N)+i]*(g+k+w))+(w*(np.dot(y[2*N:3*N],P[i,:])))
return dyprime[:,0]
#def RHSnet(y,t,a,b0,b1,g,k,w,N,initialConditions,P):
#
#for i in range(N):
# myInit = InitialConditions[i][0],InitialConditions[N+i][0],InitialConditions[2*N+i][0]
# #for each node, extract its initial condition (could use if statement dependent on if infectedNode or not, but still O(1)
#
# Y = odeint(RHSnet,myInit,t,args=(a,b0,b1,g,k,w,N,i,InitialConditions)) #i is the node under consideration
#
# S_sol[:,i] = Y[:,0]
# E_sol[:,i] = Y[:,1]
# C_sol[:,i] = Y[:,2]
#solveFluNet(T,Ntime,a,b0,b1,g,k,w,y0,N0,L,Nt):
def RHSnetF(y,t,a,b0,b1,g,k,w,N,P):
"""RHS used by odeint to solve Flu model"
Calculations carried out by fn.rhs
"""
#!rhs(n,y,t,a,b0,b1,g,k,w,dy,qnet,Anet)
dy = fn.rhs(y,t,a,b0,b1,g,k,w,P,N)
#for some reason, this takes N as the last value?!
return dy
def RHSnetFomp(y,t,a,b0,b1,g,k,w,N,P):
"""RHS used by odeint to solve Flu model
Calculations carried out by fn.rhs_omp
"""
dy = fn.rhs_omp(y,t,a,b0,b1,g,k,w,P,2,N)
return dy
#there are N0+Nt nodes in total
N = N0+Nt
#Get the initial conditions
#Initialise the time
tprime = np.linspace(float(0),T,Ntime)
#Add code here and to RHS functions above to simulate network flu model
if (verFlag == 'python'):
Y = odeint(RHSnet,y0,tprime,args=(a,b0,b1,g,k,w,N,P))
return tprime, Y[:,0:N],Y[:,N:2*N],Y[:,(2*N):3*N]
elif (verFlag == 'fortran'):
Y = odeint(RHSnetF,y0,tprime,args=(a,b0,b1,g,k,w,N,P))
return tprime, Y[:,0:N],Y[:,N:2*N],Y[:,(2*N):3*N]
elif (verFlag == 'omp'):
Y = odeint(RHSnetFomp,y0,tprime,args=(a,b0,b1,g,k,w,N,P))
return tprime, Y[:,0:N],Y[:,N:2*N],Y[:,(2*N):3*N]
def analyze(N0,L,Nt,T,Ntime,a,b0,b1,g,k,threshold,warray,display=False):
"""analyze influence of omega on:
1. maximum contagious fraction across all nodes, Cmax
2. time to maximum, Tmax
3. maximum fraction of nodes with C > threshold, Nmax
Input: N0,L,Nt: recursive network parameters
T,Ntime,a,b0,b1,g,k: input for solveFluNet
threshold: use to compute Nmax
warray: contains values of omega
"""
Wlen = len(warray)
Cmaxarray = np.zeros((Wlen,1))
Tmaxarray = np.zeros((Wlen,1))
Nmaxarray = np.zeros((Wlen,1))
InitialConditions, InfectedNode, P = initialize(N0,L,Nt,True)
N = N0+Nt
for i in range(Wlen):
#FOR EVERY W, SOLVE FLU NET
t,S,E,C = solveFluNet(T,Ntime,a,b0,b1,g,k,warray[i],InitialConditions,N0,L,Nt,P,'omp')
Tmaxarray[i,0] = t[np.argmax(C,0)[0]]
arrayNs = np.zeros((Ntime,1))
for j in range(Ntime):
#TAKE VALUES OF C OVER THRESHOLD
arrayNs[j,0] = (C[j,:] > threshold).sum()
#FIND MAX X
Cmaxarray[i] = np.amax(C)
#NUMBER OF Cs over THRESHOLD
Nmaxarray[i] = max(arrayNs)/float(N)
if display == True:
plt.figure()
plt.title('Omar Haque, analyze. Graph of Nmax against w')
plt.xlabel('w')
plt.ylabel('Nmax')
plt.plot(warray,Nmaxarray)
plt.show()
plt.figure()
plt.title('Omar Haque, analyze. Graph of Cmax against w')
plt.xlabel('w')
plt.ylabel('Cmax')
plt.plot(warray,Cmaxarray)
plt.show()
plt.figure()
plt.title('Omar Haque, analyze. Graph of Tmax against w')
plt.xlabel('w')
plt.ylabel('Tmax')
plt.plot(warray,Tmaxarray)
plt.show()
return Cmaxarray,Tmaxarray,Nmaxarray
def visualize(enet,C,threshold):
"""Optional, not for assessment: Create figure showing nodes with C > threshold.
Use crude network visualization algorithm from homework 3
to display nodes. Contagious nodes should be red circles, all
others should be black"""
return None
def performance(N0,L,Nt,T,Ntime,a,b0,b1,g,k,w):
"""function to analyze performance of python, fortran, and fortran+omp approaches
Add input variables as needed, add comment clearly describing the functionality
of this function including figures that are generated and trends shown in figures
that you have submitted
This function plots the time taken to complete solveFluNet, against N (total number of nodes), for the three RHS functions.
The first (p24png) shows how the two Fortran functions are far superior than the python version, and it seems to grow linearly with the problem size.
The second (p25.png) gives a log-log plot of the Fortran RHS with the Fortran+OpenMP RHS functions, against N. It is clear that there does not seem to be much of a difference between them, although
the openMP version does seem to take over for larger N
"""
N = N0+Nt
Nvalues = np.arange((N))
pythonTimes = np.zeros(N)
fortranTimes = np.zeros(N)
openMPTimes = np.zeros(N)
for i in Nvalues:
#Generate initial values
InitialConditions, InfectedNode, P = initialize(N0,L,Nvalues[i],True)
pythonTime1 = time.time()
#solveFluNet(T,Ntime,a,b0,b1,g,k,w,y0,N0,L,Nt,P,verFlag):
solveFluNet(T,Ntime,a,b0,b1,g,k,w,InitialConditions,N0,L,Nvalues[i],P,'python')
pythonTime2 = time.time()
fortranTime1 = time.time()
solveFluNet(T,Ntime,a,b0,b1,g,k,w,InitialConditions,N0,L,Nvalues[i],P,'fortran')
fortranTime2 = time.time()
openMPtime1 = time.time()
solveFluNet(T,Ntime,a,b0,b1,g,k,w,InitialConditions,N0,L,Nvalues[i],P,'omp')
openMPtime2 = time.time()
#solveFluNet(T,Ntime,a,b0,b1,g,k,w,y0,N0,L,Nt,verFlag):
pythonTimes[i] = pythonTime2-pythonTime1
fortranTimes[i] = fortranTime2-fortranTime1
openMPTimes[i] = openMPtime2-openMPtime1
plt.figure()
plt.title('Omar Haque, performance. Comparison of different RHS methods')
plt.plot(Nvalues,pythonTimes,label='RHS from python')
plt.plot(Nvalues,fortranTimes,label='RHS from fortran')
plt.plot(Nvalues, openMPTimes,label='RHS using openMP')
plt.xlabel('N')
plt.ylabel('time taken for solveFluNet()')
plt.legend(loc = 'best')
plt.show()
plt.figure()
plt.title('Omar Haque, performance. Comparison of Fortran vs Fortran+OpenMP (logplot)')
plt.plot(np.log(Nvalues),np.log(fortranTimes),label='RHS from fortran')
plt.plot(np.log(Nvalues), np.log(openMPTimes),label='RHS using openMP')
plt.xlabel('log(N)')
plt.ylabel('log(time taken for solveFluNet)')
plt.legend(loc = 'best')
plt.show()
if __name__ == '__main__':
a,b0,b1,g,k,w = 45.6,750.0,0.5,73.0,1.0,0.1
#CODE FOR P21,P22,P23
N0,L,Nt = 5,2,500
T = 2
threshold = 0.1
warray = [0.0,0.01,0.1,0.2,0.5,1.0]
display = True
#def analyze(N0,L,Nt,T,Ntime,a,b0,b1,g,k,threshold,warray,display=False):
analyze(N0,L,Nt,T,1000,a,b0,b1,1000,k,threshold,warray,True)
#CODE FOR PERFORMANCE
#I have chosen Nt = 200 as it covers the section where Fortran+OpenMP is faster than Fortran.
# InitialConditions, InfectedNode, P = initialize(5,3,3,True)
# N0,L,Nt = 5,2,200
# T = 2
# threshold = 0.1
# Ntime = 1000
# warray = [0,np.power(10,-2),0.1,0.2,0.5,1.0]
# performance(N0,L,Nt,T,Ntime,a,b0,b1,g,k,w)
|
[
"omarhaque@hotmail.co.uk"
] |
omarhaque@hotmail.co.uk
|
113207597caf8b45781ab2940297f9b274b27958
|
4ef5e2e04249b718d386ff1d9210475098a16beb
|
/ContainsDuplicate.py
|
32c3c6ea2e0cb8387800a98561413b61368d5d3d
|
[] |
no_license
|
9shivansh/CodingProblems
|
af1ca77ee815af9527fd8c0675f6d96d792ae881
|
573453dddff2174c84812f16a1e8d6763af5df0b
|
refs/heads/master
| 2022-12-13T08:15:55.092539
| 2020-08-10T17:00:49
| 2020-08-10T17:00:49
| 260,389,411
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
class Solution(object):
def containsNearbyDuplicate(self, nums, k):
a = {}
for i in range(len(nums)):
if nums[i] in a:
if(i - a[nums[i]] <= k):
return True
a[nums[i]] = i
return False
|
[
"noreply@github.com"
] |
9shivansh.noreply@github.com
|
aaeee56e7233b0b752d5fe7265af6a6fa95ac16c
|
ba8e1b26b87338e4a0e1f7ae81ab41967f5526fc
|
/unit6_lesson_04_understanding_fileio.py
|
90edc81ca9054450121adb3980ab83591be70b88
|
[] |
no_license
|
blackjackal982/Mission-RND
|
e60b38627f5062494a737d00f0700b5826f39510
|
d318c1e01cd2144ce8d7b74d3c211f8ed2a238eb
|
refs/heads/master
| 2020-03-27T10:58:06.272002
| 2018-10-13T07:48:09
| 2018-10-13T07:48:09
| 146,456,334
| 0
| 0
| null | 2018-10-13T07:48:10
| 2018-08-28T14:02:21
|
Python
|
UTF-8
|
Python
| false
| false
| 7,414
|
py
|
__author__ = 'Kalyan'
notes = '''
Python has a good api to deal with text and binary files. We explore that
in this module. You may notice how much this is easier than the corresponding c api :-).
Use help(file.XXX) to find help on file method XXX (tell, seek etc.)
'''
from placeholders import *
import inspect
import os
def get_module_dir():
mod_file = inspect.getfile(inspect.currentframe())
mod_dir = os.path.dirname(mod_file)
return mod_dir
def get_temp_dir():
module_dir = get_module_dir()
temp_dir = os.path.join(module_dir, "tempfiles")
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
return temp_dir
# open files generated as a part of the tests. Allow them to be in a different dir via DATA_DIR
def open_temp_file(file, mode):
data_dir = os.getenv("DATA_DIR", default=get_temp_dir())
out_file = os.path.join(data_dir, file)
return open(out_file, mode)
#opens a file from the module directory for reading. These are input files that are part of course material
#and must be submitted along with this lesson.
#Read: https://docs.python.org/3/library/functions.html#open
def open_input_file(file, mode="rt"):
mod_dir = get_module_dir()
test_file = os.path.join(mod_dir, file)
return open(test_file, mode)
def test_file_readlines():
f = open_input_file("unit6_input_data.txt")
lines = f.readlines()
assert ['one\n', 'two\n', 'three\n', 'four\n', 'five\n']== lines
def test_file_read():
f = open_input_file("unit6_input_data.txt")
data = f.read()
assert "one\ntwo\nthree\nfour\nfive\n" == data
def test_file_end():
f = open_input_file("unit6_input_data.txt")
s = f.read() # read till end.
assert '' == f.read()
assert '' == f.read()
def test_file_read_binary():
f = open_input_file("unit6_input_data.txt", "rb")
lines = f.readlines()
assert [b"one\r\n",b"two\r\n",b"three\r\n",b"four\r\n",b"five\r\n"] == lines
f.seek(0, 0)
data = f.read()
assert b"one\r\ntwo\r\nthree\r\nfour\r\nfive\r\n" == data
def test_file_windows_newlines():
f = open_temp_file("newlines_tmp.txt", "wb")
# what does encode do? why do you need it?, run this code without it and explore.
f.write("one\r\ntwo\rthree\n".encode())
f.close()
f = open_temp_file("newlines_tmp.txt", "rt")
assert "one\ntwo\nthree\n" == f.read()
f.close()
f = open_temp_file("newlines_tmp.txt", "rb")
data = f.read()
assert not '' == f.read()
#windows behavior : https://docs.python.org/3/tutorial/inputoutput.html#reading-and-writing-files
def test_file_universal_newlines():
f = open_temp_file("newlines_tmp.txt", "wb")
f.write(b"one\r\ntwo\rthree\n")
f.close()
f = open_temp_file("newlines_tmp.txt", "rU")
assert "one\ntwo\nthree\n" == f.read()
assert ('\r','\n','\r\n') == f.newlines
def test_file_readline():
f = open_input_file("unit6_input_data.txt")
lines = []
while True:
line = f.readline()
if not line:
break
lines.append(line)
assert ['one\n', 'two\n', 'three\n', 'four\n', 'five\n'] == lines
#files can be used as iterable objects!
def test_file_iteration():
f = open_input_file("unit6_input_data.txt")
lines = []
# this will not read the whole file into memory like readlines would.
for x in f:
lines.append(x)
assert ['one\n', 'two\n', 'three\n', 'four\n', 'five\n'] == lines
def test_file_tell():
tells = []
f = open_input_file("unit6_input_data.txt")
while True:
line = f.readline()
tells.append(f.tell())
if not line:
break
assert [5, 10, 17, 23, 29, 29] == tells
def test_file_readlines_tell():
tells = []
f = open_input_file("unit6_input_data.txt", "rb")
for line in f.readlines():
tells.append(f.tell())
assert [29, 29, 29, 29, 29] == tells
def test_file_iteration_tell():
tells = []
f = open_input_file("unit6_input_data.txt", "rb")
for line in f:
tells.append(f.tell())
assert [5,10,17,23,29] == tells # is there really no difference between readlines and iteration?
# as an exercise do both this test and previous test in rt mode. discuss your
# observations and findings on forums.
def test_file_seek():
f = open_input_file("unit6_input_data.txt", "rb")
assert 0 == f.tell()
f.read()
assert 29 == f.tell()
assert b'' == f.read()
f.seek(0, 0)
assert b'one\r\ntwo\r\nthree\r\nfour\r\nfive\r\n' == f.read()
f.seek(-3, 2)
assert b'e\r\n' == f.read()
f.seek(-2, 1)
assert b'\r\n'== f.read()
#windows has a few newlines quirks.
def test_file_write_text():
f = open_temp_file("test_write.txt", "w") # same as "wt"
f.write("one\ntwo\nthree\n")
f.close()
f = open_temp_file("test_write.txt", "rb")
assert b"one\r\ntwo\r\nthree\r\n" == f.read()
f = open_temp_file("test_write.txt", "rt")
assert "one\ntwo\nthree\n" == f.read()
def test_file_write_binary():
f = open_temp_file("test_write.txt", "wb")
f.write(b"one\ntwo\nthree\n")
f.close()
f = open_temp_file("test_write.txt", "rb")
assert b"one\ntwo\nthree\n" == f.read()
f = open_temp_file("test_write.txt", "rt")
assert "one\ntwo\nthree\n" == f.read()
# It is generally a good practice to close files after their use is over
def test_file_close():
f = open_input_file("unit6_input_data.txt")
assert False == f.closed
try:
lines = [line.strip() for line in f.readlines()]
finally:
# putting it in finally so that it is closed even if an exception is raised
f.close()
assert True == f.closed
# http://effbot.org/zone/python-with-statement.htm
# you can close files with a 'with' statement irrespective of exceptions.
# This is similar to using statement in c#
def test_with_statement():
try:
with open_input_file("unit6_input_data.txt") as f:
assert False == f.closed
raise Exception("some random exception")
except Exception as ex:
print(ex)
pass
assert True == f.closed
def test_file_iteration1():
with open_input_file("unit6_input_data.txt") as f:
lines = []
# we are walking through f as an iterator, so even if file is huge this code acts like a buffered reader!!
for line in f:
lines.append(line.strip())
assert ['one', 'two', 'three', 'four', 'five'] == lines
lines = []
for line in f:
lines.append(line.strip())
# what happened here? read the link i gave above.
assert [] == lines
# a common pattern to load files into an in memory list
# after doing something on each line (for e.g. strip and upper in this case)
# notice how little code gets so much work done!
def test_file_iteration2():
def process(input):
return input.strip().upper()
with open_input_file("unit6_input_data.txt") as f:
data = [process(line) for line in f]
assert ["ONE","TWO","THREE","FOUR","FIVE"] == data
three_things_i_learnt = """
-Usage of with statements
-Difference between rb and rt and their uses
-
"""
|
[
"noreply@github.com"
] |
blackjackal982.noreply@github.com
|
215d0469b5731e032dab5f8191e509e6048cbb2c
|
28a86d4ba5435f799739b3310151b96162d3ea57
|
/pyzoo/zoo/pipeline/api/keras/metrics.py
|
c44cc6081a40bcc419b03e1c39e74a8aaad980d9
|
[
"Apache-2.0"
] |
permissive
|
lightbend/analytics-zoo
|
8f46625cc6064602e1f36e996de7efceb2537af2
|
6b75874f774a53bfaa4489b4edf4161285d988b5
|
refs/heads/master
| 2023-06-03T05:43:55.099261
| 2018-11-28T00:10:27
| 2018-11-28T00:10:27
| 133,023,978
| 2
| 1
|
Apache-2.0
| 2019-05-30T00:21:52
| 2018-05-11T10:08:56
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,449
|
py
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bigdl.util.common import *
from zoo.pipeline.api.keras.base import ZooKerasCreator
if sys.version >= '3':
long = int
unicode = str
class AUC(JavaValue):
"""
Metric for binary(0/1) classification, support single label and multiple labels.
# Arguments
threshold_num: The number of thresholds. The quality of approximation
may vary depending on threshold_num.
>>> meter = AUC(20)
creating: createAUC
"""
def __init__(self, threshold_num=200, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, threshold_num)
class Accuracy(ZooKerasCreator, JavaValue):
"""
Measures top1 accuracy for multi-class classification
or accuracy for binary classification.
# Arguments
zero_based_label: Boolean. Whether target labels start from 0. Default is True.
If False, labels start from 1.
Note that this only takes effect for multi-class classification.
For binary classification, labels ought to be 0 or 1.
>>> acc = Accuracy()
creating: createZooKerasAccuracy
"""
def __init__(self, zero_based_label=True, bigdl_type="float"):
super(Accuracy, self).__init__(None, bigdl_type,
zero_based_label)
class Top5Accuracy(ZooKerasCreator, JavaValue):
"""
Measures top5 accuracy for multi-class classification.
# Arguments
zero_based_label: Boolean. Whether target labels start from 0. Default is True.
If False, labels start from 1.
>>> acc = Top5Accuracy()
creating: createZooKerasTop5Accuracy
"""
def __init__(self, zero_based_label=True, bigdl_type="float"):
super(Top5Accuracy, self).__init__(None, bigdl_type,
zero_based_label)
|
[
"boris.lublinsky@lightbend.com"
] |
boris.lublinsky@lightbend.com
|
9ff8cc9aba0bad549bc96c4951caa2e327054a7f
|
738e54ead99e912a43a6b0cf0521bcf7324825cc
|
/ProjectX/bin/python-config
|
b74928de377945a534b2306dce0f927bef8d3d85
|
[] |
no_license
|
gugajung/HotelReservation
|
413abba1a64c3cafde909fbb48cd6c0a219c3bef
|
d3e150aa732fe94e95f43aa708ec21c654b4be4c
|
refs/heads/master
| 2020-04-10T19:23:11.068556
| 2018-12-04T02:25:55
| 2018-12-04T02:25:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,353
|
#!/Users/kevinnguyen/Desktop/ProjectX/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"austin_yam@yahoo.com"
] |
austin_yam@yahoo.com
|
|
0d7f0740e4eedfeabdffdd9112ffa2b5ef7cef51
|
41914073db8bf974d72b7f68fd26f593a5c8ea20
|
/gramlogin/views.py
|
6a395e955d350dbc036844ec55b2a95edb1d7893
|
[
"MIT"
] |
permissive
|
k4ml/gramlogin
|
6951a92c0b26e65106cc78feb7f45f24aa44bde5
|
19f4c96f6deef86666b17e8d5f8290a65e9ed93d
|
refs/heads/master
| 2020-09-17T06:51:51.694032
| 2017-06-18T02:13:47
| 2017-06-18T02:13:47
| 94,492,104
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,496
|
py
|
import os
import json
import pprint
from django.http import HttpResponse, HttpResponseForbidden
from django.contrib.auth import authenticate
from django.views.decorators.csrf import csrf_exempt
from django.utils.crypto import get_random_string
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login as auth_login
from django.core.signing import TimestampSigner
from django.contrib.auth.decorators import login_required
from django.contrib.auth import logout as auth_logout
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
import telegram
BOT_TOKEN = os.environ['BOT_TOKEN']
BOT_WHTOKEN = os.environ['BOT_WHTOKEN']
bot = telegram.Bot(token=BOT_TOKEN)
signer = TimestampSigner()
def build_menu(buttons,
n_cols,
header_buttons=None,
footer_buttons=None):
menu = [buttons[i:i + n_cols] for i in range(0, len(buttons), n_cols)]
if header_buttons:
menu.insert(0, header_buttons)
if footer_buttons:
menu.append(footer_buttons)
return menu
@login_required(login_url='/login/')
def index(request):
auths = signer.sign('logout')
context = {
'logout_url': reverse('logout') + '?auths=%s' % auths,
}
return render(request, 'index.html', context)
def login(request):
auths = request.GET.get('auths', None) or request.POST.get('auths', None)
if auths is None:
return render(request, 'login.html')
if request.method == 'POST':
user = authenticate(request, auths=auths)
if user is None:
return render(request, 'login.html', context={"message": "Login Fail"})
else:
auth_login(request, user)
return redirect(index)
username = auths.split(':')[0]
return render(request, 'login.html', context={'auths': auths,'username': username})
@login_required(login_url='/login/')
def logout(request):
try:
auths = signer.unsign(request.GET.get('auths', ''))
except Exception as e:
print(e)
return redirect(index)
auth_logout(request)
return redirect(login)
@csrf_exempt
def handle_bot(request, token):
if token != BOT_WHTOKEN:
return HttpResponseForbidden()
if request.method == 'POST':
data = json.loads(request.body.decode("utf-8"))
print(json.dumps(data, indent=4))
update = telegram.Update.de_json(data, bot)
print(update.message.chat.id, update.message.chat.username, update.message.text)
chat_id = update.message.chat.id
username = update.message.chat.username
if not username:
tg_faq = 'https://telegram.org/faq#usernames-and-t-me'
bot.sendMessage(chat_id=chat_id, text='To login with Telegram, you must create a username. Refer %s for details.' % tg_faq)
return HttpResponse()
auths = signer.sign(username)
user, created = User.objects.get_or_create(username=username)
login_url = request.build_absolute_uri('/login/') + '?auths=%s' % auths
button_list = [
telegram.InlineKeyboardButton("Login", url=login_url),
]
reply_markup = telegram.InlineKeyboardMarkup(build_menu(button_list, n_cols=1))
bot.sendMessage(chat_id=chat_id, text="Click the button below to login. It only valid for 2 minutes. After that, you have to type 'Login' again.", reply_markup=reply_markup)
return HttpResponse()
|
[
"kamal.mustafa@gmail.com"
] |
kamal.mustafa@gmail.com
|
0c475a602aed8cc06b2cee1b466023136bc6caac
|
5c567d4ff35239d31cb4cf44b4f1dc1dbfcc2e50
|
/testEmbedding.py
|
bd48b9e0cb51d3e7bdd41135320886b4fd802a77
|
[] |
no_license
|
lgiancaUTH/vascEmbeddings
|
ba172f373bd481cfd1e7e8a03201b14f2ad1bb3a
|
c6d32b344f86ced8cb4c2662ffe68e7d5c511e59
|
refs/heads/master
| 2022-02-20T04:07:44.437637
| 2018-03-26T17:18:01
| 2018-03-26T17:18:01
| 122,401,676
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,631
|
py
|
from __future__ import print_function
import sys
# include subdirectories
sys.path.insert(0, './lib/')
sys.path.insert(0, './src/')
import time
import numpy as np
import matplotlib.pyplot as plt
import keras as k
import tensorflow as tf
assert k.backend.image_dim_ordering() == 'tf'
import skimage.io as skio
import skimage.transform as sktr
import segmentVasculature as sv
if __name__ == '__main__':
VISUAL_OUTPUT = False
# load example image
imgStr = 'data/' + '20051020_55701_0100_PP.tif'
img = skio.imread(imgStr)
t0 = time.time()
# create embedding in one step
embVec, resEncCoordKept = sv.createOMIAembedding( img )
t1 = time.time()
total = t1-t0
print ("-"*20,"Embeddings in ", total, " sec")
#or run a visual test to see segmentation driving the embeddings
#===========================================================
# load model segmentation
model = sv.getTrainedModel()
# create model encoding output
modelEnc = sv.genEncModel()
if modelEnc is None:
print ('encoding layer not available')
# segment
resImg = sv.segmentImage( img, model )
#encoding (if available)
if modelEnc is not None:
resEnc, resEncCoord = sv.getImageEncoding( img, modelEnc )
# visual output
imgOut = np.hstack((sktr.resize(img[:, :, 1], resImg.shape), resImg))
if VISUAL_OUTPUT:
skio.imshow( imgOut )
sv.checkFiltering(resImg, resEnc, resEncCoord)
plt.show()
else:
skio.imsave( 'data/testEmbOutput.png', imgOut )
# ===========================================================
|
[
"you@example.com"
] |
you@example.com
|
d8befb9fa09f1770ae63891b3bf66a15f5585aac
|
82fe32eed0992a0c78bc5d09a984b23d0ba44941
|
/day_wise_work_done/3_july/django_cache/store/migrations/0001_initial.py
|
2a3112d76feeffbb1368091349b46c89a87c95fc
|
[] |
no_license
|
harsha444/toppr_training
|
64d0c9838c14b7b3332c980d6a7b05ce20fc07a5
|
1e4ef786f2f06f242ecf483ff3dcd4481ac2d99f
|
refs/heads/master
| 2020-03-21T07:02:48.313119
| 2018-07-03T14:11:44
| 2018-07-03T14:11:44
| 138,257,217
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 779
|
py
|
# Generated by Django 2.0.2 on 2018-07-03 05:00
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
('price', models.IntegerField(blank=True, null=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"sriharshashm@gmail.com"
] |
sriharshashm@gmail.com
|
0dbacaded161597e4ded3ab63e28e65e8b5de709
|
80935021d3d1af541fe52fc3c25a658a79d0dbc1
|
/bank_account.py
|
a0e476f79ab3496091a54dde67e883f22dd28d61
|
[] |
no_license
|
boitshepo97/bank-account
|
ac575cbedd4968f17099c894ca3f425246cd9490
|
f8f188ffdfa66ccdb1ee10dab4eedcdfce631a8c
|
refs/heads/master
| 2021-01-02T05:31:17.958891
| 2020-03-23T10:55:51
| 2020-03-23T10:55:51
| 239,510,353
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 715
|
py
|
class BankAccount:
def __init__(self,balance,interest_rate,monthly_fee,customer):
self.balance =balance
self.interest_rate = interest_rate
self.monthly_fee = monthly_fee
self.customer = customer
def deposit(self,amount):
self.balance += amount
return self.balance
def withdraw(self,amount):
if self.balance <= 0:
return 'insufficient funds'
else:
self.balance -= amount
def transfer(self,amount):
self.balance -= amount
return self.balance
def finish_month(self):
self.balance = self.balance + self.balance*self.interest_rate/12 - self.monthly_fee
return self.balance
|
[
"boitshepo.masemola@gmail.com"
] |
boitshepo.masemola@gmail.com
|
0b933e671812494c2b266f50f5b7331e36259a82
|
bb77a939d49a1d6e789d655aa30dfc5a4bf4f50f
|
/week-1/single-number.py
|
de36f76bc10cfee90040f686f39e957fa6f00c4b
|
[] |
no_license
|
johnsanc/wb-cohort3
|
0778eacb908a79c9aca124c7800c5370649b2457
|
8faeb2c3416b19c4e1950abfb11a2c3a5651babf
|
refs/heads/master
| 2020-06-10T04:14:56.489619
| 2019-08-19T05:56:58
| 2019-08-19T05:56:58
| 193,579,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
class Solution:
def singleNumber(self, nums: List[int]) -> int:
num = 0
for n in nums:
num ^= n
return num
|
[
"johnny.ss@icloud.com"
] |
johnny.ss@icloud.com
|
14edd5c9176df33293595eaa83f3ed2de69f9a7e
|
f9ec5287004d14eaab61b1fbfb1feef3f1a823a8
|
/str116/old_str116_reference
|
311413f4ca8891696cb651725dc9efcd63d3cf3a
|
[
"MIT"
] |
permissive
|
llamicron/str116
|
3171b516df9b15ab8d2dd858453175f52b683e85
|
647726096c33fff6444e74ac8edb66b2c4090a20
|
refs/heads/master
| 2021-05-15T04:27:59.989049
| 2018-01-25T06:18:51
| 2018-01-25T06:18:51
| 118,678,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,122
|
#!/usr/bin/env python
import serial
from . import settings
import time
import binascii
import os
import logging
import sys
logging.basicConfig(filename=settings.logfile, level=getattr(
logging, settings.log_level.upper()))
class Device():
def __init__(self):
self.ser = Device.connect()
self.ser.timeout = settings.timeout
@staticmethod
def connect():
''' returns a serial device, or raises an IOError '''
try:
return serial.Serial(settings.port, settings.baudRate)
except IOError as er:
logging.error("Failed to connect to STR116 device")
raise
def write(self, data):
data = data.encode(settings.encoding_type)
try:
self.ser.write(data)
return True
except IOError as err:
logging.error("Can't write to the STR116 device (%s)" % (err))
raise
def write_with_response(self, data):
self.write(data)
if self.ser.open:
# This needs a small delay
time.sleep(0.02)
size = self.ser.inWaiting()
if size:
data = self.ser.read(size)
return binascii.hexlify(data.encode(settings.encoding_type))
return None
# ------------------------------------------------
def _write_message_with_response(data):
# usart = serial.Serial(settings.port,settings.baudRate)
# usart.timeout = settings.timeout
# message_bytes = data.encode("utf8")
try:
usart.write(message_bytes)
#print usart.open # True for opened
if usart.open:
time.sleep(0.02)
size = usart.inWaiting()
if size:
data = usart.read(size)
# print binascii.hexlify(data)
else:
print('no data')
else:
print('usart not open')
except IOError as e :
print(("Failed to write to the port. ({})".format(e)))
return binascii.hexlify(data.encode('utf8'))
def _get_checksum(data):
checksum = sum(bytearray.fromhex(data))
checksumstripped = hex(checksum).replace('0x', '')
return checksumstripped.zfill(2)
def set_relay(relaynumber, onoff):
#command to turn on relay is 0x08 0x17
#format is
#MA0, MA1, 0x08, 0x17, CN, start number output (relaynumber), \
#number of outputs (usually 0x01), 00/01 (off/on), CS (calculated), MAE
#need to do a checksum on 0x08, 0x17, CN, relaynumber, 0x01, 0x01
relaynumberhex = hex(relaynumber).replace('0x', '').zfill(2)
str_to_checksum = '0817' + settings.CN + str(relaynumberhex) \
+ '01' + str(onoff).zfill(2)
CS = _get_checksum(str_to_checksum)
bytestring = settings.MA0 + settings.MA1 + str_to_checksum \
+ str(CS) + settings.MAE
_write_message(bytestring)
def get_relay(relaynumber):
'''Get the status of the requested relay (true/false)'''
str_to_checksum = '0714020010'
CS = _get_checksum(str_to_checksum) # 2d
bytestring = settings.MA0 + settings.MA1 + str_to_checksum \
+ str(CS) + settings.MAE
relaystatus = _write_message_with_response(bytestring)[6:-4]
test = relaystatus[relaynumber*2:relaynumber*2+2]
if int(test) > 0:
return True
else:
return False
def get_relays_status():
#command to get the status of all of the relays in an array.
#format is
#MA0, MA1, 0x07, 0x14, CN, start number output, number of outputs, CS, MAE
#returns
#SL0, SL1, BC, output first, output next,..., CS, SLE
str_to_checksum = '0714' + settings.CN + '0010'
CS = _get_checksum(str_to_checksum)
bytestring = settings.MA0 + settings.MA1 + str_to_checksum \
+ str(CS) + settings.MAE
print(('relay status bytestring: ' + bytestring))
relaystatus = _write_message_with_response(bytestring)[6:-4]
print(('relay status: ' + relaystatus))
totalrelays = len(relaystatus)
n = 0
while(n < totalrelays):
if str(relaystatus[n:n+2]) == '00':
print(('relay ' + str(n/2).zfill(2) + ': off'))
else:
print(('relay ' + str(n/2).zfill(2) + ': on'))
n += 2
def set_baudrate():
"""
This function changes the baudrate on the str116 controller card.
IMPORTANT! if you change the baudrate using this function, you MUST
update the baudrate listed in the settings file, or your communications
will not work. This is mainly a setup function that can be used to
initialize the controller.
"""
#usart = serial.Serial (settings.port,settings.baudRate) #current settings
d = {'300':0,'600':1,'1200':2,'2400':3,'4800':4,'9600':5,'19200':6, \
'38400':7,'57600':8,'115200':9}
print(d)
bytestring = "55AA083302AA55064277" #set baudrate to 19200
print(bytestring)
#message_bytes = bytestring.decode("hex")
#usart.write(message_bytes)
rimsAddress = 1
switchAddress = 2
baudRate = 19200
timeout = 0.05
#master start byte
MA0 = '55'
#master1 byte
MA1 = 'AA'
#master end byte
MAE = '77'
#hex byte for controller number
CN = '02'
|
[
"sweeney@tamu.edu"
] |
sweeney@tamu.edu
|
|
c55a3038bc53084146757c47f6efabad8bc5d680
|
74506c5bf1141c848d8c5000a73140a35c386ff2
|
/mc/tools/Colors.py
|
e3cce85b0842aea09eedaa1b55d9eafc7db70097
|
[
"MIT"
] |
permissive
|
zy-sunshine/falkon-pyqt5
|
398bc520c21c1cbb4eecbfc786cc7bfc61889177
|
bc2b60aa21c9b136439bd57a11f391d68c736f99
|
refs/heads/master
| 2020-07-22T02:54:09.635104
| 2019-11-03T15:36:44
| 2019-11-03T15:36:44
| 207,053,449
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,629
|
py
|
from PyQt5.Qt import QPalette
from PyQt5.Qt import QColor
class Colors:
@classmethod
def CLAMP(cls, x, l, u):
if x < l:
return l
elif x > u:
return u
else:
return x
@classmethod
def bg(cls, pal, widget):
'''
@param: pal QPalette
@param: widget QWidget
@return: QColor
'''
role = QPalette.NoRole
if not widget:
role = QPalette.Window
elif widget.parentWidget():
role = widget.parentWidget().backgroundRole()
else:
role = widget.backgroundRole()
return pal.color(role)
@classmethod
def contrast(cls, acolor, bcolor):
'''
@brief: return contrast value of acolor and bcolor
@param: acolor QColor
@param: bcolor QColor
@return: int
'''
ar, ag, ab = acolor.getRgb()
br, bg, bb = bcolor.getRgb()
diff = 299 * (ar - br) + 587 * (ag - bg) + 114 * (ab - bb)
if diff < 0:
diff = -diff
else:
diff = 90 * diff // 100
perc = diff // 2550
diff = max(ar, br) + max(ag, bg) + max(ab, bb) - \
(min(ar, br) + min(ag, bg) + min(ab, bb))
perc += diff // 765
perc //= 2
return perc
@classmethod
def counterRole(cls, role):
'''
@brief: return inverted role(color) of specified role
@param: role QPalette.ColorRole
@return: QPalette.ColorRole
'''
if role == QPalette.ButtonText: # 8
return QPalette.Button
elif role == QPalette.WindowText: # 10
return QPalette.Window
elif role == QPalette.HighlightedText: # 13
return QPalette.Highlight
elif role == QPalette.Window: # 10
return QPalette.WindowText
elif role == QPalette.Base: # 9
return QPalette.Text
elif role == QPalette.Text: # 6
return QPalette.Base
elif role == QPalette.Highlight: # 12
return QPalette.HighlightedText
elif role == QPalette.Button: # 1
return QPalette.ButtonText
else:
return QPalette.Window
@classmethod
def counterRoleWithDef(cls, from_, defFrom=QPalette.WindowText,
defTo=QPalette.Window):
'''
@param: from_ QPalette.ColorRole
@param: to_ QPalette.ColorRole
@param: defFrom QPalette.ColorRole
@param: defTo QPalette.ColorRole
@return: bool, (from_, to_)
'''
if from_ == QPalette.WindowText: # 0
to_ = QPalette.Window
elif from_ == QPalette.Window: # 10
to_ = QPalette.WindowText
elif from_ == QPalette.Base: # 9
to_ = QPalette.Text
elif from_ == QPalette.Text: # 6
to_ = QPalette.Base
elif from_ == QPalette.Button: # 1
to_ = QPalette.ButtonText
elif from_ == QPalette.ButtonText: # 8
to_ = QPalette.Button
elif from_ == QPalette.Highlight: # 12
to_ = QPalette.HighlightedText
elif from_ == QPalette.HighlightedText: # 13
to_ = QPalette.Highlight
else:
from_ = defFrom
to_ = defTo
return False, (from_, to_)
return True, (from_, to_)
@classmethod
def emphasize(cls, color, value=10):
'''
@param: color QColor
@param: value int
@return: QColor
'''
ret = QColor()
h, s, v, a = color.getHsv()
if v < 75 + value:
ret.setHsv(h, s, cls.CLAMP(85 + value, 85, 255), a)
return ret
if v > 200:
if s > 30:
h -= 5
if h < 0:
h = 360 + h
s = (s << 3) // 9
v += value
ret.setHsv(h, cls.CLAMP(s, 30, 255), cls.CLAMP(v, 0, 255), a)
return ret
if v > 230:
ret.setHsv(h, s, cls.CLAMP(v - value, 0, 255), a)
return ret
if v > 128:
ret.setHsv(h, s, cls.CLAMP(v + value, 0, 255), a)
else:
ret.setHsv(h, s, cls.CLAMP(v - value, 0, 255), a)
return ret
@classmethod
def haveContrast(cls, acolor, bcolor):
'''
@brief: check whether two color are contrasting
@param: acolor QColor
@param: bcolor QColor
@return: bool
'''
ar, ag, ab = acolor.getRgb()
br, bg, bb = bcolor.getRgb()
diff = (299 * (ar - br) + 587 * (ag - bg) + 114 * (ab - bb))
if abs(diff) < 91001:
return False
diff = max(ar, br) + max(ag, bg) + max(ab, bb) - \
(min(ar, br) + min(ag, bg) + (ab, bb))
return diff > 300
@classmethod
def light(cls, color, value):
'''
@param: color QColor
@param: value int
@return: QColor
'''
h, s, v, a = color.getHsv()
ret = QColor()
if v < 255 - value:
ret.setHsv(h, s, cls.CLAMP(v + value, 0, 255), a) # value could be negative
return ret
# psychovisual uplightning, i.e. shift hue and lower saturation
if s > 30:
h -= (value * 5 / 20)
if h < 0:
h = 400 + h
s = cls.CLAMP((s << 3) / 9, 30, 255)
ret.setHsv(h, s, 255, a)
return ret
else: # hue shifting has no sense, half saturation (btw, white won't get brighter)
ret.setHsv(h, s >> 1, 255, a)
return ret
@classmethod
def mid(cls, color1, color2, w1=1, w2=1):
'''
@param: color1 QColor
@param: color2 QColor
@param: w1 int
@param: w2 int
@return: QColor
'''
sum0 = w1 + w2
if not sum0:
return Qt.Black
r = (w1 * color1.red() + w2 * color2.red()) / sum0
r = cls.CLAMP(r, 0, 255)
g = (w1 * color1.green() + w2 * color2.green()) / sum0
g = cls.CLAMP(g, 0, 255)
b = (w1 * color1.blue() + w2 * color2.blue()) / sum0
b = cls.CLAMP(b, 0, 255)
a = (w1 * color1.alpha() + w2 * color2.alpha()) / sum0
a = cls.CLAMP(a, 0, 255)
return QColor(r, g, b, a)
@classmethod
def value(cls, color):
'''
@param: color QColor
@return: int
'''
v = color.red()
if c.green() > v:
v = c.green()
if c.blue() > v:
v = c.blue()
return v
|
[
"260328360@qq.com"
] |
260328360@qq.com
|
0083b4b2c1f7ee7f9bcaf28976ca06a5d973ae1f
|
2675506079d3ef42c94176cd1ac1ac86a1b98a15
|
/test/test_add_contact_to_group.py
|
5435cf7696145a6aeccddfda318852bd9f199208
|
[
"Apache-2.0"
] |
permissive
|
TatyanaNesmeyanna/python_training
|
e13875993597933e7a7ccb6a264953381e976754
|
75d66dd922b9e41acb05c8aa4f765c6fc4783652
|
refs/heads/master
| 2020-07-07T15:22:40.863626
| 2019-10-15T06:07:04
| 2019-10-15T06:07:04
| 203,387,864
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,365
|
py
|
# -*- coding: utf-8 -*-
from fixture.orm import ORMFixture
from model.contact import Contact
from model.group import Group
import random
import pytest
import allure
def test_add_contact_to_group(app):
db = ORMFixture(host="127.0.0.1", name="addressbook", user="root", password="")
contact_for_test = None
group_for_test = None
if len(db.get_group_list()) == 0:
group_for_test = Group(name="test_group")
app.group.create(group_for_test)
if len(db.get_contact_list()) == 0:
contact_for_test = Contact(firstname = "test_contact")
app.contact.create(contact_for_test)
else:
contact_for_test = random.choice(db.get_contact_list())
else:
if len(db.get_contact_list()) == 0:
contact_for_test = Contact(firstname = "test_contact")
app.contact.create(contact_for_test)
group_for_test = random.choice(db.get_group_list())
else:
groups = db.get_group_list()
contacts = db.get_contact_list()
for c in contacts:
if len(db.get_contacts_groups(c)) == len(groups):
continue
else:
groups_without_contact= [g for g in groups if g not in db.get_contacts_groups(c)]
group_for_test = random.choice(groups_without_contact)
contact_for_test = c
break
if contact_for_test is None:
contact_for_test = Contact(firstname="test_contact")
group_for_test = random.choice(db.get_group_list())
with allure.step('Given contacts in groups list'):
old_contact_list_in_group = db.get_contacts_in_group(group_for_test)
with allure.step('When I add a contact %s to the gtoup %s' % (group_for_test , contact_for_test)):
app.contact.add_contact_to_group(contact_for_test, group_for_test)
with allure.step('Then the the contact %s is in this group %s ' % (group_for_test ,contact_for_test)):
new_contact_list_in_group = db.get_contacts_in_group(group_for_test)
assert len(new_contact_list_in_group) == len(old_contact_list_in_group)+1
old_contact_list_in_group.append(contact_for_test)
assert sorted(old_contact_list_in_group, key = Contact.id_or_max) == sorted(new_contact_list_in_group, key = Contact.id_or_max)
|
[
"tatyana.kalimullina@abbyy.com"
] |
tatyana.kalimullina@abbyy.com
|
17dfe5439db44e75385301fb61ec3226664fda5a
|
c30d4f174a28aac495463f44b496811ee0c21265
|
/python/helpers/python-skeletons/django/forms/formsets.py
|
2f229065ce19aaf26dded3a08847eb71c13e3295
|
[
"Apache-2.0"
] |
permissive
|
sarvex/intellij-community
|
cbbf08642231783c5b46ef2d55a29441341a03b3
|
8b8c21f445550bd72662e159ae715e9d944ba140
|
refs/heads/master
| 2023-05-14T14:32:51.014859
| 2023-05-01T06:59:21
| 2023-05-01T06:59:21
| 32,571,446
| 0
| 0
|
Apache-2.0
| 2023-05-01T06:59:22
| 2015-03-20T08:16:17
|
Java
|
UTF-8
|
Python
| false
| false
| 534
|
py
|
from django.forms import Form
class BaseFormSet(object):
"""
A collection of instances of the same Form class.
"""
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, form_kwargs=None):
"""
:rtype: BaseFormSet[T <= Form]
"""
def __iter__(self):
"""
:rtype: collections.Iterator[T]
"""
pass
def __getitem__(self, index):
"""
:rtype: T
"""
pass
|
[
"Ilya.Kazakevich@jetbrains.com"
] |
Ilya.Kazakevich@jetbrains.com
|
1bd8ec8127ce59cbb88e9906b20e8ae52d37e54d
|
194082c8bc92699ca2fb7d3965123a0727cd3c95
|
/2D_2 Likelihood Function.py
|
e3121f8465b6a38640e0d03b06c14363cdc8f2a7
|
[] |
no_license
|
ivanlstc/computational-physics-project
|
d42dfc8a44aa855eb4d8a334f9e598519050d598
|
17a91764360ed331b833517e8cf4cad60ffe6010
|
refs/heads/master
| 2020-06-16T04:07:39.173532
| 2019-07-05T23:16:32
| 2019-07-05T23:16:32
| 195,475,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,726
|
py
|
#Importing necessary modules
import numpy as np
import scipy.integrate
import scipy.special
import scipy.interpolate
import matplotlib.pyplot as plt
from Project_1 import fitfunc2, NLL2
#Reading the datafile and extracting the decay times and errors
with open('lifetime-2018.txt', 'r') as f:
lines = [np.float(element) for element in f.read().split()]
dtimes = lines[::2]
derrors = lines[1::2]
#Range of tau and a values to plot 2D NLL function
tau_arr1 = np.linspace(0.1, 0.9, 10)
a_arr1 = np.linspace(0.001, 1, 10)
#Plotting the 2D NLL function for tau ranging from 0.1 to 0.9, and a ranging
#from 0.001 to 1. It can be seen from the graph that the minimum lies in the
#range where tau is between about 0.40 and 0.42, and a is between 0.97 and 0.99
print('Generating 2D NLL function...')
#Note: 2D NLL plot is interpolated for a smoother plot, not 100% indicative of minimum
#but it gives a good enough estimate for the naked eye
data = NLL2(tau_arr1, a_arr1)
X, Y = np.meshgrid(tau_arr1, a_arr1)
f = scipy.interpolate.interp2d(X, Y, data, kind='cubic')
xnew = np.linspace(0.1, 0.9, 100)
ynew = np.linspace(0.001, 1, 100)
data1 = f(xnew,ynew)
Xn, Yn = np.meshgrid(xnew, ynew)
plt.figure(figsize=(15,8))
plt.pcolormesh(Xn, Yn, data1, cmap='RdBu', rasterized=True)
cbar = plt.colorbar()
cbar.ax.tick_params(labelsize=12)
plt.xlabel('tau (ps)', fontsize=20)
plt.ylabel('a', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=20)
plt.title('Negative Log Likelihood (NLL)', fontsize=20)
#plt.savefig(r"C:\Users\Ivan\Desktop\Year 3\Computational Physics\Project\Graphs_2D\NLL_2D.pdf", fmt="pdf", transparent=True, rasterized=True, bbox_tight=True)
print('Done')
|
[
"ivanlstc@gmail.com"
] |
ivanlstc@gmail.com
|
dea66b35cb13ae793a269b41b7bdece235e10962
|
e6ff0823bf0bd21214582e74c6a277153372e41e
|
/website1/website1/__init__.py
|
9668ebb7542b4af9cfd14f2fb4db6c7e8dbe2e62
|
[] |
no_license
|
DarkerPWQ/Amazon
|
573295b60f024ee345a898fb8fb7ec6116055c09
|
c5e8e46d7664854a26b6ef84b1d33cd02abb440c
|
refs/heads/master
| 2021-01-11T00:20:09.142432
| 2016-10-11T07:31:03
| 2016-10-11T07:31:04
| 70,566,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 278
|
py
|
# -*- coding: utf-8 -*-
#!/bin/python
from __future__ import absolute_import
# This will make sure the app is always imported when
# Django starts so that shared_task will use this app.
from .celery import app as celery_app
#为了确保在django启动时加载了celery应用
|
[
"584371093@qq.com"
] |
584371093@qq.com
|
40948ef1504eef221b62201a06cce21a36eeac30
|
4bfa230b77e398e2d7a116cd7eeae0e7df505c30
|
/meshrcnn/utils/__init__.py
|
f086fcb97804d3b4cb6b5e59211a716187f12f16
|
[
"BSD-3-Clause"
] |
permissive
|
ShashwatNigam99/meshrcnn
|
ca08d41545724fa66074ab0ca9644bb69e6c7d42
|
80bbc2c2d2024f9788f8d937bcc4cdee7c77f040
|
refs/heads/master
| 2022-11-25T04:50:21.511418
| 2020-07-27T13:15:42
| 2020-07-27T13:15:42
| 279,386,054
| 1
| 1
|
NOASSERTION
| 2020-07-14T09:48:55
| 2020-07-13T18:50:03
|
Python
|
UTF-8
|
Python
| false
| false
| 121
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from . import model_zoo # registers pathhandlers
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
659a537b477cbd532ae9cee185bc1aab1fa753fa
|
eefcdd462355d844c2ae215cbc6de0f115679244
|
/make_hr.py
|
6bf22b3d00eba38860e3e925557a10e6bf35868a
|
[] |
no_license
|
StarSTRUQ/UQ-mesa
|
d9b6be059ab44bb5a4a1c2a1ce20eaf52ffc70f5
|
b0163ae07f0546d6a6d6437b50ed50551d388b06
|
refs/heads/master
| 2021-01-22T14:28:38.917228
| 2020-02-04T15:58:28
| 2020-02-04T15:58:28
| 82,343,176
| 1
| 1
| null | 2017-05-24T19:06:02
| 2017-02-17T22:20:45
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 7,650
|
py
|
#!/bin/usr/python
# Make H-R diagram for chose folder
# M. Hoffman March 19 2016
# Try to make it so you can plot multiple folders or just the one
import mesa as ms
import numpy as np
import matplotlib.pyplot as plt
import os, re, shutil
import datetime
#from pylab import *
Q = raw_input('Do you want a single H-R plot [S] per folder or all H-R paths on one diagram[A]? [S/A] :')
# if not os.path.exists('HRs'):
# os.mkdir('HRs')
youarehere = os.getcwd()
now = datetime.datetime.now()
mo = str(now.month)
da = str(now.day)
hr = str(now.hour)
mn = str(now.minute)
#cols = ['voilet','r','orange','y','g','b','indigo','grey','k']
#nums = [0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0,1.1]
#keys = dict(zip(nums,cols))
def ReadInls(inlist,value):
inl = open(inlist,'r')
for line in inl:
if str(value)+'_scaling_' in line:
a = line
lastV = re.sub(' '+value+'\_scaling\_factor \= ','',a)
Val = re.sub('d','e',lastV)
V = float(Val)
return V
def MakeHRPlot(L,T,kind,M,savename):
fig, ax = plt.subplots()
if kind == 'l':
ax.plot(T,L,color=M)
if kind == 's':
ax.scatter(T,L,s=40,c=M,cmap=plt.cm.gnuplot_r,vmin=min(M),vmax=max(M),edgecolor='none')
fig.colorbar()
cbar.set_label(Mass)
ax.set_xlabel('Temperature')
ax.set_xlim([max(T)+0.1,min(T)-0.1])
ax.xaxis.label.set_fontsize(18)
ax.set_ylabel('Luminosity')
ax.set_ylim([min(L)-0.1,max(L)+0.1])
ax.yaxis.label.set_fontsize(18)
plt.savefig(savename)
#FV = raw_input('Folder path name: ')
FV = '/data/mhoffman/NUQ/fine_grid'
X = []
Y = []
os.chdir(FV)
print(os.getcwd())
for file in os.listdir(FV):
os.chdir(file)
file_cab = os.getcwd()
for file in os.listdir(file_cab):
matchc = re.match('\Ac([0-9]*)\Z',file)
if matchc:
os.chdir(file)
print('Currently in...'+os.getcwd())
x = ReadInls('inlist_1.0','Blocker')
y = ReadInls('inlist_1.0','Reimers')
if x not in X:
X.append(x)
if y not in Y:
Y.append(y)
os.chdir(file_cab)
os.chdir(FV)
print X
print Y
print len(X)
cmap = plt.get_cmap('jet')
Colors = [cmap(i) for i in Y]
print Colors
keys = dict(zip(Y,Colors))
print keys
if Q == 'S':
os.chdir(FV)
if not os.path.exists('HRs'):
os.mkdir('HRs')
today = mo+da+'_'+hr+mn
os.chdir(FV+'/HRs')
if not os.path.exists(today):
os.mkdir(today)
os.chdir(FV+'/HRs/'+today)
if not os.path.exists('failed'):
os.mkdir('failed')
os.chdir(FV)
for file in os.listdir(FV):
matchf = re.match(reg,file)
if matchf:
os.chdir(file)
file_cab = os.getcwd()
for file in os.listdir(file_cab):
matchc = re.match('\Ac([0-9]*)\Z',file)
if matchc:
os.chdir(file)
print('Currently in...'+os.getcwd())
# Get Stellar Mass
s = ms.history_data()
Mass = s.get('star_mass')
Temp = s.get('log_Teff')
Lumi = s.get('log_L')
lastn = len(Lumi) - 1
final_L = Lumi[lastn]
B = ReadInls('inlist_1.0','Blocker')
print B
R = ReadInls('inlist_1.0','Reimers')
print R
# Make an HR plot
os.chdir(FV+'/HRs/'+today)
name = 'R'+str(R)+'_B'+str(B)+'.png'
Sh = keys[R]
#MakeHRPlot(Lumi,Temp,'l',Sh,name)
if final_L > -0.999:
os.chdir(FV+'/HRs/'+today+'/failed')
MakeHRPlot(Lumi,Temp,'l',Sh,name)
plt.close()
os.chdir(file_cab)
else:
MakeHRPlot(Lumi,Temp,'l',Sh,name)
plt.close()
os.chdir(file_cab)
os.chdir(FV)
os.chdir(youarehere)
if Q == 'A':
Masses = []
Temperatures = []
Luminosities = []
Reimers = []
Blocker = []
Textrema = []
FV = input('Folder path name with \'\' : ')
os.chdir(FV)
if not os.path.exists('HRs'):
os.mkdir('HRs')
# for file in os.listdir(FV):
# matchf = re.match('\Abloc\-rand\_reim\-[0-9]\.[0-9]\Z',file)
# if matchf:
# os.chdir(file)
file_cab = os.getcwd()
for file in os.listdir(file_cab):
matchc = re.match('\Ac([0-9]*)\Z',file)
if matchc:
os.chdir(file)
s = ms.history_data()
Mass = s.get('star_mass')
Temp = s.get('log_Teff')
TM = max(Temp)
Tm = min(Temp)
Lumi = s.get('log_L')
lastn = len(Lumi) - 1
final_L = Lumi[lastn]
if final_L < 0:
B = ReadInls('inlist_1.0','Blocker')
R = ReadInls('inlist_1.0','Reimers')
Ones = np.ones(lastn+1)
Bl = B * Ones
Rf = R * Ones
Blocker.append(Bl)
Reimers.append(Rf)
Masses.append(Mass)
Luminosities.append(Lumi)
Temperatures.append(Temp)
Textrema.append(TM)
Textrema.append(Tm)
else:
print('Luminosity too low, excluding point!')
os.chdir(file_cab)
os.chdir(file_cab)
os.chdir(FV)
os.chdir(FV+'/HRs/')
name = 'HR_'+mo+da+'_'+hr+mn+'.png'
Color = []
for i in Reimers:
row = []
for j in i:
q = keys[j]
row.append(q)
Color.append(row)
for i in range(len(Masses)):
A = Luminosities[i]
B = Temperatures[i]
C = Color[i][0]
plt.plot(B,A,color=C)
plt.xlim(max(Textrema)+0.1,min(Textrema)-0.1)
plt.show()
# ptl.gcf()
# plt.savefig('HR_'+mo+da+'_'+hr+mn+'.png')
# plt.show()
|
[
"melissa.m.hoffman@stonybrook.edu"
] |
melissa.m.hoffman@stonybrook.edu
|
cab2ac38055ee00121b77316f8a0fe38891f2b90
|
fcf65fabe7a82e1cd590221ec6d6d66aa1638f30
|
/python/XlsxWriter-0.6.4/examples/hide_sheet.py
|
822fdadd7ed2f8ff9f703f745237a13e57dcfc18
|
[
"BSD-2-Clause-Views"
] |
permissive
|
phanirajkiran/leaning-work
|
845c3c766fec7d2935396d84b97405fc04f444c1
|
24336659e35b89c6d308a154390b02a084d0722d
|
refs/heads/master
| 2020-07-29T08:45:34.599753
| 2019-04-02T10:07:06
| 2019-04-02T10:07:06
| 209,734,507
| 2
| 0
| null | 2019-09-20T07:44:29
| 2019-09-20T07:44:27
| null |
UTF-8
|
Python
| false
| false
| 718
|
py
|
#######################################################################
#
# Example of how to hide a worksheet with XlsxWriter.
#
# Copyright 2013-2014, John McNamara, jmcnamara@cpan.org
#
import xlsxwriter
workbook = xlsxwriter.Workbook('hide_sheet.xlsx')
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
worksheet3 = workbook.add_worksheet()
worksheet1.set_column('A:A', 30)
worksheet2.set_column('A:A', 30)
worksheet3.set_column('A:A', 30)
# Hide Sheet2. It won't be visible until it is unhidden in Excel.
worksheet2.hide()
worksheet1.write('A1', 'Sheet2 is hidden')
worksheet2.write('A1', "Now it's my turn to find you!")
worksheet3.write('A1', 'Sheet2 is hidden')
workbook.close()
|
[
"yanglei.fage@gmail.com"
] |
yanglei.fage@gmail.com
|
4b38b664c1409617c648990cf6218ac67782c60d
|
226cbc68b5c2a00955656bbf28e5a7d5b62c5146
|
/yatube/posts/migrations/0001_initial.py
|
3a5275345e8c36f2776fc29fb05a4f23e51496d9
|
[
"MIT"
] |
permissive
|
Pavel-qy/yatube
|
10378aa48428721ef1e1f97bf24b09c17a88ab26
|
172643b2e5e064bc36e028edc349d548c01144fe
|
refs/heads/main
| 2023-05-10T02:09:51.790727
| 2021-05-31T06:18:34
| 2021-05-31T06:18:34
| 339,048,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 848
|
py
|
# Generated by Django 3.1.6 on 2021-02-16 10:27
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='date published')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"pavel.qy@gmail.com"
] |
pavel.qy@gmail.com
|
e4eb39d3df19108aa18ea11fa146fe5ed083136d
|
0717e8a4336b88cd61a11f6bc4a7976baadbdf65
|
/mentor001/urls.py
|
25232a8c10e366231db94c634ef836f4bd4c8f70
|
[] |
no_license
|
JoanNabusoba/mentor
|
b46e639f6e6a5bc0b39e29e8bff8327f04320b87
|
b5e9285f1a0ea6d883576c82bf0deeb366cc6360
|
refs/heads/master
| 2021-01-18T16:00:39.589958
| 2017-03-30T12:56:22
| 2017-03-30T12:56:22
| 86,701,462
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
from django.conf.urls import url
from django.contrib import admin
from mentor001 import views
urlpatterns = [
url(r'^$', views.index, name='home'),
url(r'^admin/', admin.site.urls),
]
|
[
"joannabusoba@gmail.com"
] |
joannabusoba@gmail.com
|
11c7e3fd7ce286e2101ca94a5177311b7d5e2fa4
|
06b16b78bca548ff41bafceff66e216ff1d6be8d
|
/Day31-33/Logging1.py
|
ee03f7565752434ccd3a190c935059e439afd34a
|
[
"MIT"
] |
permissive
|
craig3050/100DaysOfCode
|
4a37eab4b6484094f2363950cae3e5746160ceb3
|
16abc8b54217d210537590acd014a820a56641b3
|
refs/heads/master
| 2020-03-25T09:03:31.225720
| 2019-01-22T19:20:04
| 2019-01-22T19:20:04
| 143,645,208
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 919
|
py
|
import sys
import logbook
app_log = logbook.Logger('App')
def main():
print("This is a test of how to put together a log")
_var = input("Give me a number")
try:
_var = int(_var) * 1000
print(_var)
except Exception as x:
print(f"Oh that didn't work!: {x}")
app_log.exception(x)
finally:
print("Thank's for stopping by")
def init_logging(filename: str = None):
level = logbook.TRACE
if filename:
logbook.TimedRotatingFileHandler(filename, level=level).push_application()
else:
logbook.StreamHandler(sys.stdout, level=level).push_application()
msg = 'Logging initialized, level: {}, mode: {}'.format(
level,
"stdout mode" if not filename else 'file mode: ' + filename
)
logger = logbook.Logger('Startup')
logger.notice(msg)
if __name__ == '__main__':
init_logging('test.log')
main()
|
[
"craig3050@gmail.com"
] |
craig3050@gmail.com
|
e11d6451d075b7c422415308bb9c757a784ab567
|
9a4a607a57e3a5edc4d7ce11918efedbf3f0fd02
|
/Working with list/Counting_toTwenty.py
|
df7f5feda0b8aef37c4067b207d1c12e0f40fc12
|
[] |
no_license
|
Lossme8/Python_Fundamentals
|
c559194a630948a99da4f778dbf867c0e85b2f12
|
b6e5ca8b6e941bd9ac024c62fbd005059d74ee5d
|
refs/heads/master
| 2021-01-07T21:07:43.158676
| 2020-02-20T07:52:10
| 2020-02-20T07:52:10
| 241,820,922
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 50
|
py
|
for number in range(1,1000000):
print(number)
|
[
"noreply@github.com"
] |
Lossme8.noreply@github.com
|
01e8fbfaa54d0c96a8bdbbc8e80906db0ec8fc59
|
64de952ae89570de9be36d0b04aaf02981a3aa28
|
/env/energy-linux-no-gpu/bin/jupyter-bundlerextension
|
2e2751d6e7c3960562156c19ba6bc8515dfe86fc
|
[
"MIT"
] |
permissive
|
vipulroxx/tensorflow-environments
|
821a0017c0fb90938959643f302c668d0a4b60e8
|
de5da11f9930ddb57752037568c2247350d30ae7
|
refs/heads/master
| 2022-12-25T01:28:43.221405
| 2020-01-15T16:49:38
| 2020-01-15T16:49:38
| 125,478,902
| 1
| 0
|
MIT
| 2022-12-09T19:03:32
| 2018-03-16T07:21:00
|
Python
|
UTF-8
|
Python
| false
| false
| 298
|
#!/home/sharm493/energy-project-tensor-flow/env/energy-linux-no-gpu/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from notebook.bundler.bundlerextensions import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"vipulsharma936@gmail.com"
] |
vipulsharma936@gmail.com
|
|
01d7a877171b8d150218d1f20171a9a083b4ffca
|
157bc0a640074a4de0cbce7bf4988bddaaad3d86
|
/strstr.py
|
80918a1f19110cafa1dfd2e93a3ed272812f21fb
|
[] |
no_license
|
pururaj1908/leetcode
|
ffd36b480950c707d7d49ad7dc24da354a4431d7
|
9ce6d7df12b778fc5c31c88e89fd0d1f7893fea4
|
refs/heads/master
| 2022-04-03T20:14:30.851237
| 2020-02-14T09:22:55
| 2020-02-14T09:22:55
| 204,348,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 837
|
py
|
# -*- coding: utf-8 -*-
class Solution:
def strStr(self, haystack, needle):
n = len(needle)
h = len(haystack)
idx = -1
if n == 0 or needle == "":
return 0
if n <= h:
if n == h:
if needle == haystack:
return 0
else:
i = 0
j = 0
for i in range(len(haystack)):
if j < len(needle) and needle[j] != haystack[i]:
j = 0
continue
if j == len(needle) - 1:
idx = i - j
break
j += 1
return idx
return -1
s = Solution()
print(s.strStr("aabaaabaaac","aabaaac"))
|
[
"pdave2@ncsu.edu"
] |
pdave2@ncsu.edu
|
5a3d088220426e75b7381094a25274d8a4310820
|
2f28f376ee313c370d9a4807073de8858e36bae2
|
/QueryConstructor.py
|
6eb3fa4c9283a20c112bafde5429e313a8e643dc
|
[] |
no_license
|
XacoboPineiro/GestorConcesionario
|
5d309984b2c47a993c67478a336eac60818fb569
|
28224530d0228ad7e8d1800ffacb098dbcdc7e33
|
refs/heads/master
| 2023-04-18T02:04:14.681571
| 2021-05-14T15:47:17
| 2021-05-14T15:47:17
| 367,412,098
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 575
|
py
|
class QueryConstructor:
def __init__(self):
self.table = None
self.fields = None
self.paramenters = None
def noParametersQuery(self, table, *fields):
query = "SELECT "
for i in fields:
query = query + i + ", "
query = query[:-2]
query = query + " FROM " + table
return query
def singleWhereQuery(self, table, parameter, operand, *fields):
query = self.noParametersQuery(table, *fields)
query = query + " WHERE " + parameter + " " + operand + " ?"
return query
|
[
"xpineirocacabelos@danielcastelao.org"
] |
xpineirocacabelos@danielcastelao.org
|
4dcea38e68a220b9c577c2b5743103048b7f45a7
|
34c9b0516eae51944a32450c0cfe9e9598f5983e
|
/Search by Image/feature_extractor.py
|
de7138850c1c08863d33f39a6bb67a3317af18ca
|
[] |
no_license
|
MuzammilPIAIC/Deep-Learning
|
ed7711ccc23d55cd1d6dd1c206ec9ee4f8fdb0a6
|
7293593defac2f369927b4e7c0a85f1c5e3df400
|
refs/heads/master
| 2022-03-03T15:34:51.203282
| 2022-03-03T08:15:44
| 2022-03-03T08:15:44
| 230,601,732
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,305
|
py
|
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input
from tensorflow.keras.models import Model
import numpy as np
# See https://keras.io/api/applications/ for details
class FeatureExtractor:
def __init__(self):
base_model = VGG16(weights='imagenet')
self.model = Model(inputs=base_model.input, outputs=base_model.get_layer('fc1').output)
def extract(self, img):
"""
Extract a deep feature from an input image
Args:
img: from PIL.Image.open(path) or tensorflow.keras.preprocessing.image.load_img(path)
Returns:
feature (np.ndarray): deep feature with the shape=(4096, )
"""
img = img.resize((224, 224)) # VGG must take a 224x224 img as an input
img = img.convert('RGB') # Make sure img is color
x = image.img_to_array(img) # To np.array. Height x Width x Channel. dtype=float32
x = np.expand_dims(x, axis=0) # (H, W, C)->(1, H, W, C), where the first elem is the number of img
x = preprocess_input(x) # Subtracting avg values for each pixel
feature = self.model.predict(x)[0] # (1, 4096) -> (4096, )
return feature / np.linalg.norm(feature) # Normalize
|
[
"noreply@github.com"
] |
MuzammilPIAIC.noreply@github.com
|
79f06b25bf42f600f1fe28bc6334a84c6abe68f9
|
ed4c68ea69f937e5d93ac75a253e2c25b9b21935
|
/thehackernews/news.py
|
f6f8748c1d50ad01b61e955bdafe50b4f4cea6e1
|
[] |
no_license
|
ozcanyarimdunya/mini-scrapy
|
dd738be24e9ca2e9c809619baf6b32bd824c5050
|
f3c36c176900778028445dfd0bfd0ec32cb69db7
|
refs/heads/master
| 2022-12-10T03:15:11.669537
| 2020-08-09T13:08:14
| 2020-08-09T13:08:14
| 198,700,124
| 1
| 0
| null | 2021-06-02T00:47:05
| 2019-07-24T19:47:15
|
Python
|
UTF-8
|
Python
| false
| false
| 826
|
py
|
from bs4 import BeautifulSoup
from base import BaseScrapper
class TheHackerNewsScrapper(BaseScrapper):
url = 'https://thehackernews.com/'
next_selector = '.blog-pager-older-link-mobile'
filename = 'news.json'
max_result = 5
def scrap(self, soup: BeautifulSoup):
print('Current page: {}/{}'.format(self.get_current_page(), '~'), end='\r')
for post in soup.select('.body-post'):
yield {
'url': post.select_one('.story-link').attrs.get('href'),
'img': post.select_one('img.home-img-src').attrs.get('data-src'),
'title': post.select_one('h2.home-title').get_text(strip=True),
'description': post.select_one('.home-desc').get_text(strip=True),
}
if __name__ == '__main__':
TheHackerNewsScrapper()
|
[
"ozcanyd@gmail.com"
] |
ozcanyd@gmail.com
|
cb08da143960e7b8b76a69177a1486cf0584a81d
|
cf99f0dfd2ae3a50ac4dfe95dddd74d2308e7fd4
|
/src/scalbo/scalbo/benchmark/dhb_loglin2.py
|
3b25a26597cd84bc45c912aaacebd965395745a6
|
[
"BSD-2-Clause"
] |
permissive
|
deephyper/scalable-bo
|
33923598181799410b790addcaf4ea799b276444
|
44f0afc28a19213252b59868f76a8f6918f8aabc
|
refs/heads/main
| 2023-07-28T10:33:52.291460
| 2023-07-20T09:44:50
| 2023-07-20T09:44:50
| 464,852,027
| 2
| 2
|
BSD-2-Clause
| 2022-10-18T12:15:19
| 2022-03-01T10:43:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 152
|
py
|
import deephyper_benchmark as dhb
dhb.load("LCu/loglin2")
from deephyper_benchmark.lib.lcu.loglin2 import hpo
hp_problem = hpo.problem
run = hpo.run
|
[
"romainegele@gmail.com"
] |
romainegele@gmail.com
|
be118f5655fb92998cccbfd8f080c291bbfc9541
|
a8b37bd399dd0bad27d3abd386ace85a6b70ef28
|
/airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/streams.py
|
2f447af8630ed8e98a5169cf031e206369e91193
|
[
"MIT",
"LicenseRef-scancode-free-unknown",
"Elastic-2.0"
] |
permissive
|
thomas-vl/airbyte
|
5da2ba9d189ba0b202feb952cadfb550c5050871
|
258a8eb683634a9f9b7821c9a92d1b70c5389a10
|
refs/heads/master
| 2023-09-01T17:49:23.761569
| 2023-08-25T13:13:11
| 2023-08-25T13:13:11
| 327,604,451
| 1
| 0
|
MIT
| 2021-01-07T12:24:20
| 2021-01-07T12:24:19
| null |
UTF-8
|
Python
| false
| false
| 11,039
|
py
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import base64
import logging
from typing import Any, Iterable, List, Mapping, Optional, Set
import pendulum
import requests
from airbyte_cdk.models import SyncMode
from cached_property import cached_property
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.adaccount import AdAccount as FBAdAccount
from facebook_business.adobjects.adimage import AdImage
from facebook_business.adobjects.user import User
from .base_insight_streams import AdsInsights
from .base_streams import FBMarketingIncrementalStream, FBMarketingReversedIncrementalStream, FBMarketingStream
logger = logging.getLogger("airbyte")
def fetch_thumbnail_data_url(url: str) -> Optional[str]:
"""Request thumbnail image and return it embedded into the data-link"""
try:
response = requests.get(url)
if response.status_code == requests.status_codes.codes.OK:
_type = response.headers["content-type"]
data = base64.b64encode(response.content)
return f"data:{_type};base64,{data.decode('ascii')}"
else:
logger.warning(f"Got {repr(response)} while requesting thumbnail image.")
except requests.exceptions.RequestException as exc:
logger.warning(f"Got {str(exc)} while requesting thumbnail image.")
return None
class AdCreatives(FBMarketingStream):
"""AdCreative is append only stream
doc: https://developers.facebook.com/docs/marketing-api/reference/ad-creative
"""
entity_prefix = "adcreative"
enable_deleted = False
def __init__(self, fetch_thumbnail_images: bool = False, **kwargs):
super().__init__(**kwargs)
self._fetch_thumbnail_images = fetch_thumbnail_images
@cached_property
def fields(self) -> List[str]:
"""Remove "thumbnail_data_url" field because it is computed field and it's not a field that we can request from Facebook"""
return [f for f in super().fields if f != "thumbnail_data_url"]
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
"""Read with super method and append thumbnail_data_url if enabled"""
for record in super().read_records(sync_mode, cursor_field, stream_slice, stream_state):
if self._fetch_thumbnail_images:
thumbnail_url = record.get("thumbnail_url")
if thumbnail_url:
record["thumbnail_data_url"] = fetch_thumbnail_data_url(thumbnail_url)
yield record
def list_objects(self, params: Mapping[str, Any]) -> Iterable:
return self._api.account.get_ad_creatives(params=params)
class CustomConversions(FBMarketingStream):
"""doc: https://developers.facebook.com/docs/marketing-api/reference/custom-conversion"""
entity_prefix = "customconversion"
enable_deleted = False
def list_objects(self, params: Mapping[str, Any]) -> Iterable:
return self._api.account.get_custom_conversions(params=params)
class CustomAudiences(FBMarketingStream):
"""doc: https://developers.facebook.com/docs/marketing-api/reference/custom-conversion"""
entity_prefix = "customaudience"
enable_deleted = False
# The `rule` field is excluded from the list because it caused the error message "Please reduce the amount of data" for certain connections.
# https://github.com/airbytehq/oncall/issues/2765
fields_exceptions = ["rule"]
def list_objects(self, params: Mapping[str, Any]) -> Iterable:
return self._api.account.get_custom_audiences(params=params)
class Ads(FBMarketingIncrementalStream):
"""doc: https://developers.facebook.com/docs/marketing-api/reference/adgroup"""
entity_prefix = "ad"
def list_objects(self, params: Mapping[str, Any]) -> Iterable:
return self._api.account.get_ads(params=params)
class AdSets(FBMarketingIncrementalStream):
"""doc: https://developers.facebook.com/docs/marketing-api/reference/ad-campaign"""
entity_prefix = "adset"
def list_objects(self, params: Mapping[str, Any]) -> Iterable:
return self._api.account.get_ad_sets(params=params)
class Campaigns(FBMarketingIncrementalStream):
"""doc: https://developers.facebook.com/docs/marketing-api/reference/ad-campaign-group"""
entity_prefix = "campaign"
def list_objects(self, params: Mapping[str, Any]) -> Iterable:
return self._api.account.get_campaigns(params=params)
class Activities(FBMarketingIncrementalStream):
"""doc: https://developers.facebook.com/docs/marketing-api/reference/ad-activity"""
entity_prefix = "activity"
cursor_field = "event_time"
primary_key = None
def list_objects(self, fields: List[str], params: Mapping[str, Any]) -> Iterable:
return self._api.account.get_activities(fields=fields, params=params)
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
"""Main read method used by CDK"""
loaded_records_iter = self.list_objects(fields=self.fields, params=self.request_params(stream_state=stream_state))
for record in loaded_records_iter:
if isinstance(record, AbstractObject):
yield record.export_all_data() # convert FB object to dict
else:
yield record # execute_in_batch will emmit dicts
def _state_filter(self, stream_state: Mapping[str, Any]) -> Mapping[str, Any]:
"""Additional filters associated with state if any set"""
state_value = stream_state.get(self.cursor_field)
since = self._start_date if not state_value else pendulum.parse(state_value)
potentially_new_records_in_the_past = self._include_deleted and not stream_state.get("include_deleted", False)
if potentially_new_records_in_the_past:
self.logger.info(f"Ignoring bookmark for {self.name} because of enabled `include_deleted` option")
since = self._start_date
return {"since": since.int_timestamp}
class Videos(FBMarketingReversedIncrementalStream):
"""See: https://developers.facebook.com/docs/marketing-api/reference/video"""
entity_prefix = "video"
def list_objects(self, params: Mapping[str, Any]) -> Iterable:
# Remove filtering as it is not working for this stream since 2023-01-13
return self._api.account.get_ad_videos(params=params, fields=self.fields)
class AdAccount(FBMarketingStream):
"""See: https://developers.facebook.com/docs/marketing-api/reference/ad-account"""
use_batch = False
enable_deleted = False
def get_task_permissions(self) -> Set[str]:
"""https://developers.facebook.com/docs/marketing-api/reference/ad-account/assigned_users/"""
res = set()
me = User(fbid="me", api=self._api.api)
for business_user in me.get_business_users():
assigned_users = self._api.account.get_assigned_users(params={"business": business_user["business"].get_id()})
for assigned_user in assigned_users:
if business_user.get_id() == assigned_user.get_id():
res.update(set(assigned_user["tasks"]))
return res
@cached_property
def fields(self) -> List[str]:
properties = super().fields
# https://developers.facebook.com/docs/marketing-apis/guides/javascript-ads-dialog-for-payments/
# To access "funding_source_details", the user making the API call must have a MANAGE task permission for
# that specific ad account.
if "funding_source_details" in properties and "MANAGE" not in self.get_task_permissions():
properties.remove("funding_source_details")
if "is_prepay_account" in properties and "MANAGE" not in self.get_task_permissions():
properties.remove("is_prepay_account")
return properties
def list_objects(self, params: Mapping[str, Any]) -> Iterable:
"""noop in case of AdAccount"""
return [FBAdAccount(self._api.account.get_id())]
class Images(FBMarketingReversedIncrementalStream):
"""See: https://developers.facebook.com/docs/marketing-api/reference/ad-image"""
def list_objects(self, params: Mapping[str, Any]) -> Iterable:
return self._api.account.get_ad_images(params=params, fields=self.fields)
def get_record_deleted_status(self, record) -> bool:
return record[AdImage.Field.status] == AdImage.Status.deleted
class AdsInsightsAgeAndGender(AdsInsights):
breakdowns = ["age", "gender"]
class AdsInsightsCountry(AdsInsights):
breakdowns = ["country"]
class AdsInsightsRegion(AdsInsights):
breakdowns = ["region"]
class AdsInsightsDma(AdsInsights):
breakdowns = ["dma"]
class AdsInsightsPlatformAndDevice(AdsInsights):
breakdowns = ["publisher_platform", "platform_position", "impression_device"]
# FB Async Job fails for unknown reason if we set other breakdowns
# my guess: it fails because of very large cardinality of result set (Eugene K)
action_breakdowns = ["action_type"]
class AdsInsightsActionType(AdsInsights):
breakdowns = []
action_breakdowns = ["action_type"]
class AdsInsightsActionCarouselCard(AdsInsights):
action_breakdowns = ["action_carousel_card_id", "action_carousel_card_name"]
class AdsInsightsActionConversionDevice(AdsInsights):
breakdowns = ["device_platform"]
action_breakdowns = ["action_type"]
class AdsInsightsActionProductID(AdsInsights):
breakdowns = ["product_id"]
action_breakdowns = []
class AdsInsightsActionReaction(AdsInsights):
action_breakdowns = ["action_reaction"]
class AdsInsightsActionVideoSound(AdsInsights):
action_breakdowns = ["action_video_sound"]
class AdsInsightsActionVideoType(AdsInsights):
action_breakdowns = ["action_video_type"]
class AdsInsightsDeliveryDevice(AdsInsights):
breakdowns = ["device_platform"]
action_breakdowns = ["action_type"]
class AdsInsightsDeliveryPlatform(AdsInsights):
breakdowns = ["publisher_platform"]
action_breakdowns = ["action_type"]
class AdsInsightsDeliveryPlatformAndDevicePlatform(AdsInsights):
breakdowns = ["publisher_platform", "device_platform"]
action_breakdowns = ["action_type"]
class AdsInsightsDemographicsAge(AdsInsights):
breakdowns = ["age"]
action_breakdowns = ["action_type"]
class AdsInsightsDemographicsCountry(AdsInsights):
breakdowns = ["country"]
action_breakdowns = ["action_type"]
class AdsInsightsDemographicsDMARegion(AdsInsights):
breakdowns = ["dma"]
action_breakdowns = ["action_type"]
class AdsInsightsDemographicsGender(AdsInsights):
breakdowns = ["gender"]
action_breakdowns = ["action_type"]
|
[
"noreply@github.com"
] |
thomas-vl.noreply@github.com
|
37ea73835115b9136162d3f4b7b8f19c2f974fe4
|
181154ddaaa87683ab72f9c42b86a4967c6d3db7
|
/dijkstra.py
|
d26cee17a0ed2fbd8d10f81448034d2a7e914f10
|
[] |
no_license
|
DiegoAlosilla/A
|
2b3db64af707eb07744bb69c0a47fd9455a457af
|
25a64964b48a68ca6cfe0daa97def4428f1ec764
|
refs/heads/master
| 2020-06-14T05:32:19.721385
| 2019-07-02T18:58:59
| 2019-07-02T18:58:59
| 194,918,871
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 714
|
py
|
import math
import heapq
#pack rodrigo
def dijkstra(G, s):
n = len(G)
visited = [False]*n
weights = [math.inf]*n
path = [None]*n
queue = []
weights[s] = 0
heapq.heappush(queue, (0, s))
while len(queue) > 0:
g, u = heapq.heappop(queue)
visited[u] = True
for v, w in G[u]:
if not visited[v]:
f = g + w
if f < weights[v]:
weights[v] = f
path[v] = u
heapq.heappush(queue, (f, v))
return path, weights
G = [[(1, 6), (3, 7)],
[(2, 5), (3, 8), (4, -4)],
[(1, -2), (4, 7)],
[(2, -3), (4, 9)],
[(0, 2)]]
|
[
"noreply@github.com"
] |
DiegoAlosilla.noreply@github.com
|
00f77802c4c9209974a0f9ed5f236882c3816423
|
56656fb0293eca2776b570c84b79a800bd81832f
|
/logikart/shop/views.py
|
fc7527bf6426322ddc4f9c64bd989f840217dbe1
|
[] |
no_license
|
sumitkumar22299/logi-kart-
|
2e7c85104f4a62cc15297372fee82cc8247cd225
|
1e9a39f0eb4fa3dd033698739cd7def7d34c0d3c
|
refs/heads/main
| 2023-06-14T00:30:42.946808
| 2021-07-07T04:04:39
| 2021-07-07T04:04:39
| 383,063,487
| 0
| 1
| null | 2021-07-05T08:47:13
| 2021-07-05T08:09:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,783
|
py
|
from django.shortcuts import render
from .models import Product, Contact
from math import ceil
# Create your views here.
from django.http import HttpResponse
def index(request):
# products = Product.objects.all()
# print(products)
# n = len(products)
# nSlides = n//4 + ceil((n/4)-(n//4))
allProds = []
catprods = Product.objects.values('category', 'id')
cats = {item['category'] for item in catprods}
for cat in cats:
prod = Product.objects.filter(category=cat)
n = len(prod)
nSlides = n // 4 + ceil((n / 4) - (n // 4))
allProds.append([prod, range(1, nSlides), nSlides])
# params = {'no_of_slides':nSlides, 'range': range(1,nSlides),'product': products}
# allProds = [[products, range(1, nSlides), nSlides],
# [products, range(1, nSlides), nSlides]]
params = {'allProds':allProds}
return render(request, 'shop/index.html', params)
def about(request):
return render(request, 'shop/about.html')
def contact(request):
if request.method=="POST":
name = request.POST.get('name', '')
email = request.POST.get('email', '')
phone = request.POST.get('phone', '')
desc = request.POST.get('desc', '')
contact = Contact(name=name, email=email, phone=phone, desc=desc)
contact.save()
return render(request, 'shop/contact.html')
def tracker(request):
return render(request, 'shop/tracker.html')
def search(request):
return render(request, 'shop/search.html')
def productView(request, myid):
# Fetch the product using the id
product = Product.objects.filter(id=myid)
return render(request, 'shop/prodView.html', {'product':product[0]})
def checkout(request):
return render(request, 'shop/checkout.html')
|
[
"sumitkumar22299@Gmail.com"
] |
sumitkumar22299@Gmail.com
|
08ef0ce795a6ca63ba5977fa4b11d41869236e0a
|
2ac7b97e702a40bd256d8788309f312951130514
|
/chapter01/input1.py
|
de38f56cd30004669b88e901ff2d81068fcadbdb
|
[] |
no_license
|
ZeroMin-K/Doit_Algorithm
|
62991700d70649e8fe72f0fccc50699092526826
|
51d9738fcc1a9452625fc25daec0f0ca5274cde2
|
refs/heads/main
| 2023-03-28T13:50:46.582142
| 2021-03-29T12:45:13
| 2021-03-29T12:45:13
| 328,176,294
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 98
|
py
|
# input name and greet
print("input name:", end=' ')
name = input()
print(f"Greeting {name}")
|
[
"noreply@github.com"
] |
ZeroMin-K.noreply@github.com
|
efe534ee7957dfd1a6d79dee5373abd183277a49
|
cc0e9f312355cf296dd4c1a6d82597848e11e9cd
|
/python_dailies/d10/employee.py
|
c867be32e7caf4130cfd42f4e881d7b5e02f82db
|
[] |
no_license
|
atowneND/paradigms
|
3dc77016290e4e223fab1c6532a645467c346fbe
|
1bfca0dbd07ce9588b8774d11d1658eb93325aa7
|
refs/heads/master
| 2021-01-10T14:01:08.113898
| 2016-04-28T04:08:59
| 2016-04-28T04:08:59
| 51,034,874
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,451
|
py
|
class Employee:
next_employee_number = 0
def __init__(self, name, title, dept, salary):
employee_number = Employee.next_employee_number
Employee.next_employee_number += 1
self.name = name
self.title = title
self.dept = dept
self.salary = salary
def __lt__(self, other):
return self.salary < other.salary
def __gt__(self, other):
return self.salary > other.salary
def __eq__(self, other):
return self.salary == other.salary
def getName(self):
return self.name
def getTitle(self):
return self.title
def getDept(self):
return self.dept
def getSalary(self):
return self.salary
def setName(self,newval):
self.name = newval
def setTitle(self,newval):
self.title = newval
def setDept(self,newval):
self.dept = newval
def setSalary(self,newval):
self.salary = newval
def giveRaise(self,percentage):
self.salary = self.salary*(1.0+(percentage*1.0)/100.)
def __str__(self):
print "[Employee name: %s, %s, %s, $%0.2f]" % (self.name.upper(),self.title.upper(),self.dept.upper(),self.salary)
#######################################
#if __name__=="__main__":
# print "hi"
# e = Employee("a",'woo',"wut",10000)
# print e.next_employee_number
# print e.getSalary()
# e.giveRaise(10)
# print e.getSalary()
# e.__str__()
|
[
"atowne@nd.edu"
] |
atowne@nd.edu
|
972808daf49ef962ecfe264fbbc399342c5d90f5
|
3be88b3343d3ae4be17eb39600e6ffe3ed8c8389
|
/devito/ir/stree/algorithms.py
|
cb3003e2b9e1333ad0670d77eae1d8d653395bfc
|
[
"MIT"
] |
permissive
|
pnmoralesh/Devito
|
3938c79250784c2bf8f4c1f1151430d5100b66f9
|
1e619c0208ecf5f3817d614cd49b8df89d0beb22
|
refs/heads/master
| 2023-04-03T05:00:47.644490
| 2021-03-16T11:40:46
| 2021-03-16T11:40:46
| 348,607,884
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,428
|
py
|
from anytree import findall
from devito.ir.stree.tree import (ScheduleTree, NodeIteration, NodeConditional,
NodeSync, NodeExprs, NodeSection, NodeHalo, insert)
from devito.ir.support import SEQUENTIAL, IterationSpace
from devito.mpi import HaloScheme, HaloSchemeException
from devito.parameters import configuration
from devito.tools import Bunch, DefaultOrderedDict, flatten
__all__ = ['stree_build']
def stree_build(clusters):
"""
Create a ScheduleTree from a ClusterGroup.
"""
# ClusterGroup -> ScheduleTree
stree = stree_schedule(clusters)
# Add in section nodes
stree = stree_section(stree)
# Add in halo update nodes
stree = stree_make_halo(stree)
return stree
def stree_schedule(clusters):
"""
Arrange an iterable of Clusters into a ScheduleTree.
"""
stree = ScheduleTree()
prev = None
mapper = DefaultOrderedDict(lambda: Bunch(top=None, bottom=None))
def attach_metadata(cluster, d, tip):
if d in cluster.guards:
tip = NodeConditional(cluster.guards[d], tip)
if d in cluster.syncs:
tip = NodeSync(cluster.syncs[d], tip)
return tip
for c in clusters:
pointers = list(mapper)
index = 0
tip = stree
for it0, it1 in zip(c.itintervals, pointers):
if it0 != it1:
break
index += 1
d = it0.dim
# The reused sub-trees might acquire some new sub-iterators
mapper[it0].top.ispace = IterationSpace.union(mapper[it0].top.ispace,
c.ispace.project([d]))
# Different guards or syncops cannot be further nested
if c.guards.get(d) != prev.guards.get(d) or \
c.syncs.get(d) != prev.syncs.get(d):
tip = mapper[it0].top
tip = attach_metadata(c, d, tip)
mapper[it0].bottom = tip
break
else:
tip = mapper[it0].bottom
# Nested sub-trees, instead, will not be used anymore
for it in pointers[index:]:
mapper.pop(it)
# Add in Iterations, Conditionals, and Syncs
for it in c.itintervals[index:]:
d = it.dim
tip = NodeIteration(c.ispace.project([d]), tip, c.properties.get(d))
mapper[it].top = tip
tip = attach_metadata(c, d, tip)
mapper[it].bottom = tip
# Add in Expressions
NodeExprs(c.exprs, c.ispace, c.dspace, c.ops, c.traffic, tip)
# Prepare for next iteration
prev = c
return stree
def stree_make_halo(stree):
"""
Add NodeHalos to a ScheduleTree. A NodeHalo captures the halo exchanges
that should take place before executing the sub-tree; these are described
by means of a HaloScheme.
"""
# Build a HaloScheme for each expression bundle
halo_schemes = {}
for n in findall(stree, lambda i: i.is_Exprs):
try:
halo_schemes[n] = HaloScheme(n.exprs, n.ispace)
except HaloSchemeException as e:
if configuration['mpi']:
raise RuntimeError(str(e))
# Split a HaloScheme based on where it should be inserted
# For example, it's possible that, for a given HaloScheme, a Function's
# halo needs to be exchanged at a certain `stree` depth, while another
# Function's halo needs to be exchanged before some other nodes
mapper = {}
for k, hs in halo_schemes.items():
for f, v in hs.fmapper.items():
spot = k
ancestors = [n for n in k.ancestors if n.is_Iteration]
for n in ancestors:
# Place the halo exchange right before the first
# distributed Dimension which requires it
if any(i.dim in n.dim._defines for i in v.halos):
spot = n
break
mapper.setdefault(spot, []).append(hs.project(f))
# Now fuse the HaloSchemes at the same `stree` depth and perform the insertion
for spot, halo_schemes in mapper.items():
insert(NodeHalo(HaloScheme.union(halo_schemes)), spot.parent, [spot])
return stree
def stree_section(stree):
"""
Add NodeSections to a ScheduleTree. A NodeSection, or simply "section",
defines a sub-tree with the following properties:
* The root is a node of type NodeSection;
* The immediate children of the root are nodes of type NodeIteration;
* The Dimensions of the immediate children are either:
* identical, OR
* different, but all of type SubDimension;
* The Dimension of the immediate children cannot be a TimeDimension.
"""
class Section(object):
def __init__(self, node):
self.parent = node.parent
try:
self.dim = node.dim
except AttributeError:
self.dim = None
self.nodes = [node]
def is_compatible(self, node):
return self.parent == node.parent and self.dim.root == node.dim.root
# Search candidate sections
sections = []
for i in range(stree.height):
# Find all sections at depth `i`
section = None
for n in findall(stree, filter_=lambda n: n.depth == i):
if any(p in flatten(s.nodes for s in sections) for p in n.ancestors):
# Already within a section
continue
elif n.is_Sync:
# SyncNodes are self-contained
sections.append(Section(n))
section = None
elif n.is_Iteration:
if n.dim.is_Time and SEQUENTIAL in n.properties:
# If n.dim.is_Time, we end up here in 99.9% of the cases.
# Sometimes, however, time is a PARALLEL Dimension (e.g.,
# think of `norm` Operators)
section = None
elif section is None or not section.is_compatible(n):
section = Section(n)
sections.append(section)
else:
section.nodes.append(n)
else:
section = None
# Transform the schedule tree by adding in sections
for i in sections:
insert(NodeSection(), i.parent, i.nodes)
return stree
|
[
"f.luporini12@imperial.ac.uk"
] |
f.luporini12@imperial.ac.uk
|
6bd632262a2e99b1d09b8b51534e09364716dd60
|
56b1569a62c6a155ce9cf4b8059bd085848dd859
|
/Python/search/print_path.py
|
1f3ff3089c3f877da624f629274af167bd846224
|
[] |
no_license
|
Marius-Juston/Advanced-Autonomous-Vehicule
|
1485ccc1a3dafbb875f845b2ba00cb05c6d6ca40
|
7f188428aafe0c0dfff75dd8567199c7067be17d
|
refs/heads/master
| 2022-11-15T04:52:19.713449
| 2020-07-10T22:09:04
| 2020-07-10T22:09:04
| 266,609,193
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,713
|
py
|
# -----------
# User Instructions:
#
# Modify the the search function so that it returns
# a shortest path as follows:
#
# [['>', 'v', ' ', ' ', ' ', ' '],
# [' ', '>', '>', '>', '>', 'v'],
# [' ', ' ', ' ', ' ', ' ', 'v'],
# [' ', ' ', ' ', ' ', ' ', 'v'],
# [' ', ' ', ' ', ' ', ' ', '*']]
#
# Where '>', '<', '^', and 'v' refer to right, left,
# up, and down motions. Note that the 'v' should be
# lowercase. '*' should mark the goal cell.
#
# You may assume that all test cases for this function
# will have a path from init to goal.
# ----------
from pprint import pprint
grid = [[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0],
[0, 0, 1, 0, 1, 0],
[0, 0, 1, 0, 1, 0]]
init = [0, 0]
goal = [len(grid) - 1, len(grid[0]) - 1]
cost = 1
delta = [[-1, 0], # go up
[0, -1], # go left
[1, 0], # go down
[0, 1]] # go right
delta_name = ['<', 'v', '>', '^']
def search(grid, init, goal, cost):
# ----------------------------------------
# modify code below
# ----------------------------------------
expand = [[' ' for _ in range(len(grid[0]))] for _ in range(len(grid))]
index = 0
searched = {tuple(init): None}
open = {(0, init[0], init[1]), }
while len(open) > 0:
cell = min(open, key=lambda x: x[0])
open.remove(cell)
g, x, y = cell
index += 1
for (d_x, d_y) in delta:
x_n = x + d_x
y_n = y + d_y
if (x_n, y_n) not in searched and 0 <= x_n < len(grid) and 0 <= y_n < len(grid[0]) and grid[x_n][
y_n] == 0:
path = (g + cost, x_n, y_n)
open.add(path)
searched[(x_n, y_n)] = (x, y)
if x_n == goal[0] and y_n == goal[1]:
expand[x_n][y_n] = '*'
previous = (x_n, y_n)
while not previous == tuple(init):
next_point = searched[previous]
index_x = (previous[0] - next_point[0])
index_y = (previous[1] - next_point[1])
if index_x > 0:
expand[next_point[0]][next_point[1]] = 'v'
elif index_x < 0:
expand[next_point[0]][next_point[1]] = '^'
elif index_y < 0:
expand[next_point[0]][next_point[1]] = '<'
else:
expand[next_point[0]][next_point[1]] = '>'
previous = next_point
return expand
return expand
path = search(grid, init, goal, cost)
pprint(path)
|
[
"Marius.juston@hotmail.fr"
] |
Marius.juston@hotmail.fr
|
169a03b6a286b98dc2a18e031348a396fa9db442
|
ffb3027c11db1b51569414815bcae0066c00c37f
|
/scripts/hzz2l2q/DelPanj/forDaniel.py
|
3c3e1d8eeda4b95e8c4f21cbeb9ca889fa6c0768
|
[] |
no_license
|
syuvivida/usercode
|
af8e4eba513a4791aa27d6e8075c111cf521eb8a
|
54c57e39fa4765a7ead478bbf34e302c7df6d9d5
|
refs/heads/master
| 2020-06-05T19:41:20.506171
| 2016-12-14T17:05:38
| 2016-12-14T17:05:38
| 11,925,748
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,948
|
py
|
import FWCore.ParameterSet.Config as cms
isMergedJet = True
## Below are default values for the names of higgs collections
## and whether the analysis is a merged-jet analysis or not
higgsCollectionEE = "hzzeejj:h"
higgsCollectionMM = "hzzmmjj:h"
MERGED = False
if isMergedJet:
higgsCollectionEE = "hzzee1j:h"
higgsCollectionMM = "hzzmm1j:h"
MERGED = True
process = cms.Process("COMBINE")
#runOnMC = False # to run on DATA
runOnMC = True
## MessageLogger
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'file:/afs/cern.ch/work/s/syu/h2l2qSkimData.root'
# 'file:/data4/syu/patsample/53X_MC/h2l2qSkimData_10_1_CL3.root'
# 'file:/scratch/syu/update_PU/CMSSW_5_3_3_patch3/src/runJob/testData.root'
)
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1))
process.load("RecoBTag.PerformanceDB.PoolBTagPerformanceDB062012")
process.load("RecoBTag.PerformanceDB.BTagPerformanceDB062012")
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True))
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
baseJetSel = cms.PSet(
Jets=cms.InputTag("cleanPatJetsNoPUIsoLept")
)
from DelPanj.TreeMaker.eSel2012HZZ_cff import *
from DelPanj.TreeMaker.muSel2012HZZ_cff import *
process.tree = cms.EDAnalyzer(
'TreeMaker',
fillPUweightInfo_ = cms.bool(True),
fillEventInfo_ = cms.bool(True),
fillGenInfo_ = cms.bool(True),
fillMuonInfo_ = cms.bool(True),
fillElecInfo_ = cms.bool(True),
fillElecIsoInfo_ = cms.bool(False),
fillJetInfo_ = cms.bool(True),
fillMetInfo_ = cms.bool(False),
fillTrigInfo_ = cms.bool(False),
fillPhotInfo_ = cms.bool(False),
fillZZInfo_ = cms.bool(True),
# hzzeejjTag = cms.InputTag("hzzeejj:h"),
# hzzmmjjTag = cms.InputTag("hzzmmjj:h"),
hzzeejjTag = cms.InputTag(higgsCollectionEE),
hzzmmjjTag = cms.InputTag(higgsCollectionMM),
merged_ = cms.bool(MERGED),
genPartLabel = cms.InputTag("genParticles"),
patMuons = cms.InputTag("userDataSelectedMuons"),
patElectrons = cms.InputTag("userDataSelectedElectrons"),
e2012IDSet = eSel2012HZZ,
mu2012IDSet = muSel2012HZZ,
eleRhoIso = cms.InputTag("kt6PFJets","rho"),
muoRhoIso = cms.InputTag("kt6PFJetsCentralNeutral", "rho"),
patMet = cms.InputTag("patMETs"),
beamSpotLabel = cms.InputTag("offlineBeamSpot"),
patJetPfAk05 = baseJetSel,
outFileName = cms.string('outputFileName.root')
)
process.counter_original = cms.EDAnalyzer('MyEventCounter',
instance = cms.int32(1)
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string("hzz2l2q.root"),
closeFileFast = cms.untracked.bool(True)
)
process.p = cms.Path(
process.counter_original*
process.tree##Trigger Applied.
)
|
[
""
] | |
b0484d6fe57864f6d725447f7223632ceda1c563
|
22dd89c57fcff91bd577df8fa4981c04497be9d6
|
/restapis/restapis/urls.py
|
9269905f7e48f192235108400550c3ab60a60794
|
[] |
no_license
|
kinjal1993/DjangoRestDemo
|
bf060e6ee7ae74a2738afd9f7a576b8d007867f6
|
5c7e23504557464fec771435adcc336b70663d6b
|
refs/heads/main
| 2023-05-02T20:41:06.468483
| 2021-05-28T09:12:09
| 2021-05-28T09:12:09
| 370,721,975
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 162
|
py
|
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('apis.urls')),
]
|
[
"kinjal.pathak20061993@gmail.com"
] |
kinjal.pathak20061993@gmail.com
|
ce9f5d635ff6a4f79ab49f9bf5dbf9ef502906f8
|
a84901c27e7ed258785905d88d08c7ebe68afb8e
|
/testing/test_base_processing.py
|
38b4f4e0439d15cd1ba431fbb3b2ddd51a170df0
|
[] |
no_license
|
Ichinaru/slideSelector
|
7cc27fddc68f9baed7a9a671e65c8ca7260ab070
|
7e196ddd8ef50fe00c1d8c82091a55dfb935c6ee
|
refs/heads/master
| 2021-01-10T20:43:45.687687
| 2011-01-14T14:02:03
| 2011-01-14T14:02:03
| 841,114
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 860
|
py
|
__author__ = 'Administrator'
import unittest
from numpy import zeros_like
from numpy.testing import assert_array_equal
from src.processing.base_processing import BaseExporter
from src.annotations import get_annotation_list
annotation_folder_test = 'C:/Data'
annotation_file_test = 'C:/Data/2.ndpi.ndpa'
class TestBaseExporter(unittest.TestCase):
def test_init(self):
be = BaseExporter(annotation_file_test)
self.assertEqual(be.image_file, 'C:/Data/2.ndpi')
self.assertEqual(be.export_folder, 'C:/Data/2')
fp = be.annotations
fp_pl = [ann.point_list for ann in fp]
fa = get_annotation_list(annotation_file_test)
fa_pl = [ann.point_list for ann in fa]
for a,b in zip(fp_pl, fa_pl):
assert_array_equal(a,b)
if __name__ == '__main__':
unittest.main()
|
[
"xmoleslo@gmail.com"
] |
xmoleslo@gmail.com
|
ee5333aa77f9785064d17219e0f4de854bba55a4
|
37ecbb099d5d4452af91847a88c9fa7efd673a61
|
/05_multiplication.py
|
dea80dedb0636d90e8f01678489f0bb73e7756d7
|
[] |
no_license
|
jaymitchell/miniflow
|
570ce29182385ea8c6e664b2afc87881c472cc06
|
bb716d5f90ebfef22a8519bbd0c7f1525f32b2bb
|
refs/heads/master
| 2021-01-19T11:32:25.311675
| 2017-02-19T19:30:45
| 2017-02-19T19:30:45
| 82,252,098
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
"""
No need to change anything here!
If all goes well, this should work after you
modify the Add class in miniflow.py.
"""
from miniflow import *
x, y, z = Input(), Input(), Input()
f = Mul(x, y, z)
feed_dict = {x: 4, y: 5, z: 10}
graph = topological_sort(feed_dict)
output = forward_pass(f, graph)
# should output 19
print("{} + {} + {} = {} (according to miniflow)".format(feed_dict[x], feed_dict[y], feed_dict[z], output))
|
[
"jaybmitchell@gmail.com"
] |
jaybmitchell@gmail.com
|
e799436d7ed93eb4507f35eac9b3e23398b0c220
|
a17278755b413858e61159593d733fac95f6d00c
|
/tests/cmake/test_unquoted_args.py
|
26d7c5aeb16960ea3bebb653a8b2cb73b4a4ff7e
|
[
"MIT"
] |
permissive
|
arjo129/ros_build_assistant
|
80ace56875ed7c8da61290745e697693b8497d4e
|
9c8420230713a43f73390d75d19bd7a7a751474d
|
refs/heads/master
| 2023-01-22T15:36:11.585367
| 2020-12-06T09:09:19
| 2020-12-06T09:09:19
| 318,992,629
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,212
|
py
|
from ros_build_assistant.parser.cmake.grammar import UnQuotedArgument, CombinatorState
import unittest
def parse_item(inp):
ba = UnQuotedArgument()
for x in inp:
res, data = ba.next_char(x)
return res,data
class TestUnquotedArguments(unittest.TestCase):
def test_unquoted_argument_default(self):
res, data = parse_item("helloworld ")
self.assertEqual(res, CombinatorState.FINISHED)
self.assertDictEqual(data, {'body': 'helloworld', 'type': 'unquoted_argument'})
def test_unquoted_argument_with_embedded_quote(self):
res, data = parse_item("hello\" \"world;")
self.assertEqual(res, CombinatorState.FINISHED)
self.assertDictEqual(data, {'body': 'hello\" \"world', 'type': 'unquoted_argument'})
def test_unquoted_argument_escaped(self):
res, data = parse_item("hello\\ world ")
self.assertEqual(res, CombinatorState.FINISHED)
self.assertDictEqual(data, {'body': 'hello\\ world', 'type': 'unquoted_argument'})
def test_idempotency(self):
inp = "hello\\ world "
res, data = parse_item(inp)
out = UnQuotedArgument().code_gen(data)
self.assertEqual(inp[:-1], out)
|
[
"arjo129@gmail.com"
] |
arjo129@gmail.com
|
6d1a80fda589561046176a5ae996e52b2423e532
|
1495f47b20b963ffbe12ebb6aac1f9850718d03d
|
/apps/user/serializers.py
|
d2c34316e93d0ac1180700c216610f4fe4cba31c
|
[] |
no_license
|
clq0528/callQ-backend-master
|
6223050ff1174fb7f0448e9a8fff06a94c4b2f45
|
de0b83b22464e735f209269d7cd009788d1785d4
|
refs/heads/master
| 2022-12-23T20:20:17.928538
| 2019-11-09T14:06:42
| 2019-11-09T14:06:42
| 220,650,843
| 0
| 0
| null | 2022-11-22T04:18:18
| 2019-11-09T14:04:32
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,443
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/10/9 15:18
# @Author : qinmin
# @File : serializers.py
import re
from datetime import datetime,timedelta
from rest_framework import serializers
from rest_framework.validators import UniqueValidator
from django.contrib.auth import get_user_model
from backend.settings import REGEX_MOBILE
from .models import VerifyCode
User = get_user_model()
class SmsSerializer(serializers.Serializer):
mobile = serializers.CharField(max_length=11)
def validate_mobile(self,mobile):
"""
验证手机号码
:param mobile:
:return:
"""
#手机是否注册
if User.objects.filter(mobile=mobile).count():
raise serializers.ValidationError("用户已经注册")
#验证手机号码是否合法
if not re.match(REGEX_MOBILE,mobile):
raise serializers.ValidationError("手机号码非法")
#限制验证码发送频率
one_mintes_ago = datetime.now() - timedelta(hours=0,minutes=1,seconds=0)
if VerifyCode.objects.filter(add_time__gt=one_mintes_ago,mobile=mobile).count():
raise serializers.ValidationError("请求过于频繁,请稍后再试")
return mobile
class UserDetailSerializer(serializers.ModelSerializer):
"""
用户详情序列化类
"""
class Meta:
model = User
fields = ("name","gender","birthday","email","mobile")
class UserRegSerializer(serializers.ModelSerializer):
code = serializers.CharField(required=True,write_only=True,max_length=6,min_length=6,label="验证码",
error_messages={
"blank":"请输入验证码",
"required":"请输入验证码",
"max_length":"验证码格式错误",
"min_length":"验证码格式错误"
},help_text="验证码")
username = serializers.CharField(label="用户名",help_text="用户名",required=True,allow_blank=False,
validators=[UniqueValidator(queryset=User.objects.all(),message="用户已经存在")])
password = serializers.CharField(
style={'input_type':'password'},help_text="密码",label="密码",write_only=True
)
def create(self, validated_data):
user = super(UserRegSerializer,self).create(validated_data=validated_data)
user.set_password(validated_data["password"])
user.save()
return user
def validated_code(self,code):
verify_records = VerifyCode.objects.filter(mobile=self.initial_data["username"]).order_by("-add_time")
if verify_records:
last_record = verify_records[0]
five_mintes_ago = datetime.now() - timedelta(hours=0,minutes=5,seconds=0)
if five_mintes_ago > last_record.add_time:
raise serializers.ValidationError("验证码过期")
if last_record.code != code:
raise serializers.ValidationError("验证码错误")
else:
raise serializers.ValidationError("验证码错误")
def validate(self, attrs):
attrs["mobile"] = attrs["username"]
del attrs["code"]
return attrs
class Meta:
model = User
fields = ("username","code","mobile","password")
|
[
"w-feng.jun@rootcloud.com"
] |
w-feng.jun@rootcloud.com
|
a88f91e9bb9d42632c237da6e3a3a3b442862d09
|
da3a1da66289ee08c68072543be6022393edabde
|
/assets/tuned/daemon/tuned/profiles/loader.py
|
050c1d8d410109202da7b24197afe98058cd4ff2
|
[
"GPL-2.0-or-later",
"CC-BY-SA-3.0",
"GPL-2.0-only",
"Apache-2.0"
] |
permissive
|
osherdp/cluster-node-tuning-operator
|
83e8c6ae5301f1a4fafe421c372e81653f6be78f
|
8134c31d54a10f489e1e1955e03e83e848394496
|
refs/heads/master
| 2023-04-01T03:30:17.739249
| 2020-12-03T20:48:19
| 2020-12-03T20:48:19
| 319,049,275
| 0
| 0
|
Apache-2.0
| 2020-12-06T14:13:23
| 2020-12-06T14:13:22
| null |
UTF-8
|
Python
| false
| false
| 4,570
|
py
|
import tuned.profiles.profile
import tuned.profiles.variables
from configobj import ConfigObj, ConfigObjError
import tuned.consts as consts
import os.path
import collections
import tuned.logs
import re
from tuned.profiles.exceptions import InvalidProfileException
log = tuned.logs.get()
class Loader(object):
"""
Profiles loader.
"""
__slots__ = ["_profile_locator", "_profile_merger", "_profile_factory", "_global_config", "_variables"]
def __init__(self, profile_locator, profile_factory, profile_merger, global_config, variables):
self._profile_locator = profile_locator
self._profile_factory = profile_factory
self._profile_merger = profile_merger
self._global_config = global_config
self._variables = variables
def _create_profile(self, profile_name, config):
return tuned.profiles.profile.Profile(profile_name, config)
@classmethod
def safe_name(cls, profile_name):
return re.match(r'^[a-zA-Z0-9_.-]+$', profile_name)
@property
def profile_locator(self):
return self._profile_locator
def load(self, profile_names):
if type(profile_names) is not list:
profile_names = profile_names.split()
profile_names = list(filter(self.safe_name, profile_names))
if len(profile_names) == 0:
raise InvalidProfileException("No profile or invalid profiles were specified.")
if len(profile_names) > 1:
log.info("loading profiles: %s" % ", ".join(profile_names))
else:
log.info("loading profile: %s" % profile_names[0])
profiles = []
processed_files = []
self._load_profile(profile_names, profiles, processed_files)
if len(profiles) > 1:
final_profile = self._profile_merger.merge(profiles)
else:
final_profile = profiles[0]
final_profile.name = " ".join(profile_names)
if "variables" in final_profile.units:
self._variables.add_from_cfg(final_profile.units["variables"].options)
del(final_profile.units["variables"])
# FIXME hack, do all variable expansions in one place
self._expand_vars_in_devices(final_profile)
self._expand_vars_in_regexes(final_profile)
return final_profile
def _expand_vars_in_devices(self, profile):
for unit in profile.units:
profile.units[unit].devices = self._variables.expand(profile.units[unit].devices)
def _expand_vars_in_regexes(self, profile):
for unit in profile.units:
profile.units[unit].cpuinfo_regex = self._variables.expand(profile.units[unit].cpuinfo_regex)
profile.units[unit].uname_regex = self._variables.expand(profile.units[unit].uname_regex)
def _load_profile(self, profile_names, profiles, processed_files):
for name in profile_names:
filename = self._profile_locator.get_config(name, processed_files)
if filename is None:
raise InvalidProfileException("Cannot find profile '%s' in '%s'." % (name, list(reversed(self._profile_locator._load_directories))))
processed_files.append(filename)
config = self._load_config_data(filename)
profile = self._profile_factory.create(name, config)
if "include" in profile.options:
include_names = re.split(r"\b\s*[,;]\s*", self._variables.expand(profile.options.pop("include")))
self._load_profile(include_names, profiles, processed_files)
profiles.append(profile)
def _expand_profile_dir(self, profile_dir, string):
return re.sub(r'(?<!\\)\$\{i:PROFILE_DIR\}', profile_dir, string)
def _load_config_data(self, file_name):
try:
config_obj = ConfigObj(file_name, raise_errors = True, list_values = False, interpolation = False)
except ConfigObjError as e:
raise InvalidProfileException("Cannot parse '%s'." % file_name, e)
config = collections.OrderedDict()
for section in list(config_obj.keys()):
config[section] = collections.OrderedDict()
try:
keys = list(config_obj[section].keys())
except AttributeError:
raise InvalidProfileException("Error parsing section '%s' in file '%s'." % (section, file_name))
for option in keys:
config[section][option] = config_obj[section][option]
dir_name = os.path.dirname(file_name)
# TODO: Could we do this in the same place as the expansion of other functions?
for section in config:
for option in config[section]:
config[section][option] = self._expand_profile_dir(dir_name, config[section][option])
# TODO: HACK, this needs to be solved in a better way (better config parser)
for unit_name in config:
if "script" in config[unit_name] and config[unit_name].get("script", None) is not None:
script_path = os.path.join(dir_name, config[unit_name]["script"])
config[unit_name]["script"] = [os.path.normpath(script_path)]
return config
|
[
"jmencak@users.noreply.github.com"
] |
jmencak@users.noreply.github.com
|
c6c02cdd6224c65352f710d16f9e022c41e3b450
|
a8c1c4306ff55219dd03f9370b844832a88d3ac8
|
/other/test/test_write_tfrecord.py
|
90615161d3bb3966f4b82ca27c6cd8222ae72598
|
[] |
no_license
|
Peilun-Li/distributed-Keras
|
0e84abd4acfb28c620c38330b4be31abd8793b62
|
95ab392e103e07ec9d600c9c463d0737b2b0224b
|
refs/heads/master
| 2021-06-16T06:25:01.682433
| 2017-05-09T14:40:04
| 2017-05-09T14:40:04
| 72,376,001
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,395
|
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts MNIST data to TFRecords file format with Example protos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets import mnist
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz' # MNIST filenames
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
FLAGS = None
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def convert_to(data_set, name):
"""Converts a dataset to tfrecords."""
images = data_set.images
labels = data_set.labels
num_examples = data_set.num_examples
if images.shape[0] != num_examples:
raise ValueError('Images size %d does not match label size %d.' %
(images.shape[0], num_examples))
rows = images.shape[1]
cols = images.shape[2]
depth = images.shape[3]
filename = os.path.join(FLAGS.directory, name + '.tfrecords')
print('Writing', filename)
writer = tf.python_io.TFRecordWriter(filename)
for index in range(num_examples):
image_raw = images[index].tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'height': _int64_feature(rows),
'width': _int64_feature(cols),
'depth': _int64_feature(depth),
'label': _int64_feature(int(labels[index])),
'image_raw': _bytes_feature(image_raw)}))
writer.write(example.SerializeToString())
writer.close()
def main(unused_argv):
# Get the data.
data_sets = mnist.read_data_sets(FLAGS.directory,
dtype=tf.uint8,
reshape=False,
validation_size=FLAGS.validation_size)
# Convert to Examples and write the result to TFRecords.
convert_to(data_sets.train, 'train')
convert_to(data_sets.validation, 'validation')
convert_to(data_sets.test, 'test')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--directory',
type=str,
default='/tmp/data',
help='Directory to download data files and write the converted result'
)
parser.add_argument(
'--validation_size',
type=int,
default=5000,
help="""\
Number of examples to separate from the training data for the validation
set.\
"""
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run()
|
[
"peilunli1994@gmail.com"
] |
peilunli1994@gmail.com
|
03a733e629c37003b623c532ad69ab5714b4ebed
|
742f15ee3880306a946df7efee0020e42684b109
|
/out/oneoff/python/test/test_config_option.py
|
36c9d8ccaf041fe70916d5b260b8b0e7c020abf6
|
[] |
no_license
|
potiuk/airflow-api-clients
|
d0196f80caf6e6f4ecfa6b7c9657f241218168ad
|
325ba127f1e9aa808091916d348102844e0aa6c5
|
refs/heads/master
| 2022-09-14T00:40:28.592508
| 2020-05-31T10:05:42
| 2020-05-31T10:15:55
| 268,128,082
| 0
| 0
| null | 2020-05-30T17:28:04
| 2020-05-30T17:28:03
| null |
UTF-8
|
Python
| false
| false
| 1,384
|
py
|
# coding: utf-8
"""
Airflow API (Stable)
Apache Airflow management API. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: dev@airflow.apache.org
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import airflow_client
from airflow_client.models.config_option import ConfigOption # noqa: E501
from airflow_client.rest import ApiException
class TestConfigOption(unittest.TestCase):
"""ConfigOption unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ConfigOption
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = airflow_client.models.config_option.ConfigOption() # noqa: E501
if include_optional :
return ConfigOption(
key = '0',
value = '0'
)
else :
return ConfigOption(
)
def testConfigOption(self):
"""Test ConfigOption"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
[
"kamil.bregula@polidea.com"
] |
kamil.bregula@polidea.com
|
f9fb3d39e27b883fc6a3d2df45aeb86fa1e59f87
|
0247b4427dd37569202594dd7bea3e9a7f2abf92
|
/projects/django/fupload/fup_app/serializers.py
|
ef304e88f918671977a238b019e3cd50a7dffa1e
|
[] |
no_license
|
DenisStepanoff/LP
|
0062caa26b1ded875752ebd897ae3881cdb56633
|
5a15df361c7754a09c12fc26a25b46b373bdcf11
|
refs/heads/master
| 2020-04-08T03:12:00.425492
| 2018-12-28T12:32:54
| 2018-12-28T12:32:54
| 158,965,978
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
py
|
from rest_framework import serializers
from .models import File
class FileSerializer(serializers.ModelSerializer):
class Meta():
model = File
#fields = ('file', 'remark', 'timestamp')
fields = ('file', 'timestamp')
|
[
"dstepanov1630@gmail.com"
] |
dstepanov1630@gmail.com
|
5ba84dbef3b4021c39c9648302274af505faf852
|
e96a1ff8bbe1af069a850dba378118129f5aa612
|
/Stat/Help_Pos.py
|
0c0816bba31bfcbf13b842c4524fc1f53dc3ac8b
|
[] |
no_license
|
AaronWhy/Turiss
|
e51dac82a6d3511e5c2f1988929b7c330f67e3ee
|
6b1be44cd0993d0fd9580f18c56693c5c732bebf
|
refs/heads/master
| 2022-03-29T22:53:24.393133
| 2020-01-17T03:16:00
| 2020-01-17T03:16:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,952
|
py
|
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
class Comment(object):
def __init__(self, seq, score, help):
super(Comment, self).__init__()
self.seq = seq
self.score = score
self.help = help
def getKey(x):
return x.score * 100 + x.help
max_iter = 100
init_comments = 100
GT_data = pd.read_csv("../data/res.csv")
PR_data = pd.read_csv("../data/res.csv")
size_of_data = len(list(GT_data['Score'])) # size_of_data comments in the pool
# suppose we have list Seq, Score, and Help
pos_error = []
help_error = []
for iter in range(max_iter):
GT_list = []
PR_list = []
for i in range(init_comments): # randomly select init_comments comments
index = np.random.randint(size_of_data)
GT_list.append(Comment(GT_data['CleanedText'][index], GT_data['Score'][index], GT_data['NormalizedHelpfulness'][index]))
GT_list.sort(key=getKey, reverse=True)
PR_list = GT_list[:]
index = np.random.randint(size_of_data)
GT_add = Comment(GT_data['CleanedText'][index], GT_data['Score'][index], GT_data['NormalizedHelpfulness'][index])
GT_help = GT_data['NormalizedHelpfulness'][index]
GT_list.append(GT_add)
GT_list.sort(key=getKey, reverse=True)
GT_position = GT_list.index(GT_add)
GT_list.remove(GT_add)
PR_add = Comment(PR_data['CleanedText'][index], GT_data['Score'][index], PR_data['helpfulness_preds'][index])
PR_help = PR_data['helpfulness_preds'][index]
PR_list.append(PR_add)
PR_list.sort(key=getKey, reverse=True)
PR_position = PR_list.index(PR_add)
PR_list.remove(PR_add)
pos_error.append(GT_position-PR_position)
help_error.append(GT_help-PR_help)
# print(score_error)
plt.figure()
sns.kdeplot(help_error, pos_error, cmap="Blues", shade=True, shade_lowest=False)
plt.xlabel('Helpfulness error')
plt.ylabel('System error (position)')
plt.show()
|
[
"racoon727@racoon727deMacBook-Pro.local"
] |
racoon727@racoon727deMacBook-Pro.local
|
0981da73cff0fee83df38f71e17c2db6d0220801
|
976abd6cc78ba212850c6b1dc872d1bfbba9815d
|
/shortener/urls.py
|
30d70120022dad58c1feabb9fe8ca052eb9a2256
|
[] |
no_license
|
Likael/DjangoShortener
|
162460a5f87f8ea911ea9b51532b4028c81c895b
|
748ba313fd382513ba44336684e7bcee1ca4044e
|
refs/heads/master
| 2023-04-16T14:45:23.336281
| 2021-04-24T22:35:07
| 2021-04-24T22:35:07
| 358,045,816
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('create', views.create, name='create'),
path('<str:pk>', views.go, name='go')
]
|
[
"glasekkamil@gmail.com"
] |
glasekkamil@gmail.com
|
c877d9b518282909602e37d29fb09658411051d4
|
687928e5bc8d5cf68d543005bb24c862460edcfc
|
/nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnvserver_vpnclientlessaccesspolicy_binding.py
|
c34cfbf4b0f4ccbe51d39eb85d542913ee1c45ce
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] |
permissive
|
mbs91/nitro
|
c6c81665d6abd04de8b9f09554e5e8e541f4a2b8
|
be74e1e177f5c205c16126bc9b023f2348788409
|
refs/heads/master
| 2021-05-29T19:24:04.520762
| 2015-06-26T02:03:09
| 2015-06-26T02:03:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,814
|
py
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class vpnvserver_vpnclientlessaccesspolicy_binding(base_resource) :
""" Binding class showing the vpnclientlessaccesspolicy that can be bound to vpnvserver.
"""
def __init__(self) :
self._policy = ""
self._priority = 0
self._bindpoint = ""
self._gotopriorityexpression = ""
self._acttype = 0
self._name = ""
self._secondary = False
self._groupextraction = False
self.___count = 0
@property
def priority(self) :
"""The priority, if any, of the VPN virtual server policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""The priority, if any, of the VPN virtual server policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Next priority expression.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
"""Next priority expression.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def policy(self) :
"""The name of the policy, if any, bound to the VPN virtual server.
"""
try :
return self._policy
except Exception as e:
raise e
@policy.setter
def policy(self, policy) :
"""The name of the policy, if any, bound to the VPN virtual server.
"""
try :
self._policy = policy
except Exception as e:
raise e
@property
def groupextraction(self) :
"""Binds the authentication policy to a tertiary chain which will be used only for group extraction. The user will not authenticate against this server, and this will only be called if primary and/or secondary authentication has succeeded.
"""
try :
return self._groupextraction
except Exception as e:
raise e
@groupextraction.setter
def groupextraction(self, groupextraction) :
"""Binds the authentication policy to a tertiary chain which will be used only for group extraction. The user will not authenticate against this server, and this will only be called if primary and/or secondary authentication has succeeded.
"""
try :
self._groupextraction = groupextraction
except Exception as e:
raise e
@property
def name(self) :
"""Name of the virtual server.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the virtual server.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def secondary(self) :
"""Binds the authentication policy as the secondary policy to use in a two-factor configuration. A user must then authenticate not only via a primary authentication method but also via a secondary authentication method. User groups are aggregated across both. The user name must be exactly the same for both authentication methods, but they can require different passwords.
"""
try :
return self._secondary
except Exception as e:
raise e
@secondary.setter
def secondary(self, secondary) :
"""Binds the authentication policy as the secondary policy to use in a two-factor configuration. A user must then authenticate not only via a primary authentication method but also via a secondary authentication method. User groups are aggregated across both. The user name must be exactly the same for both authentication methods, but they can require different passwords.
"""
try :
self._secondary = secondary
except Exception as e:
raise e
@property
def bindpoint(self) :
"""Bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE, ICA_REQUEST, OTHERTCP_REQUEST.
"""
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
"""Bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE, ICA_REQUEST, OTHERTCP_REQUEST
"""
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def acttype(self) :
try :
return self._acttype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(vpnvserver_vpnclientlessaccesspolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.vpnvserver_vpnclientlessaccesspolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = vpnvserver_vpnclientlessaccesspolicy_binding()
updateresource.name = resource.name
updateresource.policy = resource.policy
updateresource.secondary = resource.secondary
updateresource.groupextraction = resource.groupextraction
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.bindpoint = resource.bindpoint
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [vpnvserver_vpnclientlessaccesspolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policy = resource[i].policy
updateresources[i].secondary = resource[i].secondary
updateresources[i].groupextraction = resource[i].groupextraction
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].bindpoint = resource[i].bindpoint
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = vpnvserver_vpnclientlessaccesspolicy_binding()
deleteresource.name = resource.name
deleteresource.policy = resource.policy
deleteresource.secondary = resource.secondary
deleteresource.groupextraction = resource.groupextraction
deleteresource.bindpoint = resource.bindpoint
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [vpnvserver_vpnclientlessaccesspolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policy = resource[i].policy
deleteresources[i].secondary = resource[i].secondary
deleteresources[i].groupextraction = resource[i].groupextraction
deleteresources[i].bindpoint = resource[i].bindpoint
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch vpnvserver_vpnclientlessaccesspolicy_binding resources.
"""
try :
obj = vpnvserver_vpnclientlessaccesspolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of vpnvserver_vpnclientlessaccesspolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnvserver_vpnclientlessaccesspolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count vpnvserver_vpnclientlessaccesspolicy_binding resources configued on NetScaler.
"""
try :
obj = vpnvserver_vpnclientlessaccesspolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of vpnvserver_vpnclientlessaccesspolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnvserver_vpnclientlessaccesspolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Staaddresstype:
IPV4 = "IPV4"
IPV6 = "IPV6"
class Bindpoint:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
ICA_REQUEST = "ICA_REQUEST"
OTHERTCP_REQUEST = "OTHERTCP_REQUEST"
class vpnvserver_vpnclientlessaccesspolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.vpnvserver_vpnclientlessaccesspolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.vpnvserver_vpnclientlessaccesspolicy_binding = [vpnvserver_vpnclientlessaccesspolicy_binding() for _ in range(length)]
|
[
"bensassimaha@gmail.com"
] |
bensassimaha@gmail.com
|
ebab06ad0e2a42021a16e2dcb7c1071bdf73b56e
|
8f0a99b4fb7bdd542cf77897b2db36536bf368a8
|
/btcexchange/urls.py
|
ebaba73b13ba05797ab9de5eec2373b67877daa5
|
[] |
no_license
|
wasuaje/btcexchange
|
e9dc326cbe696b70e9c62eb614e4dd44439bcf83
|
b2f102fb5178ad428b80e66364dda6bdc0dbc215
|
refs/heads/main
| 2023-03-29T09:59:53.031999
| 2021-03-26T16:24:21
| 2021-03-26T16:24:21
| 351,843,326
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 889
|
py
|
"""btcexchange URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
import app.views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('^', include('django.contrib.auth.urls')),
url(r'^$', app.views.main, name='main'),
]
|
[
"wasuaje@gmail.com"
] |
wasuaje@gmail.com
|
aab46026352578a4da1faea5beb7e1274ec2ccc6
|
d432e50b7cf01e3d9efaf6fd698e8bc966a1d1da
|
/loaded-api/flask/lib/python3.7/encodings/cp869.py
|
ff8973cc4cac060792ee89db50cc007d5efeb2df
|
[] |
no_license
|
dylangrosz/Disarming_Loaded_Words
|
ab87a01c5ff3018891cdea7afa16fa3aca00cb45
|
d61129979b946c00277c50953b64b4161a59651c
|
refs/heads/master
| 2022-12-15T08:13:50.046628
| 2019-12-05T04:20:15
| 2019-12-05T04:20:15
| 214,930,325
| 0
| 0
| null | 2022-12-08T06:55:39
| 2019-10-14T02:26:02
|
Python
|
UTF-8
|
Python
| false
| false
| 108
|
py
|
C:/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/encodings/cp869.py
|
[
"dylangrosz@gmail.com"
] |
dylangrosz@gmail.com
|
07b32ab8ed6d8d8994575a024efc6cc37c3a066e
|
11f1d9a7d7a256f7dd5af472799dd5698a43c93a
|
/pythonbase/04_函数/hm_05_函数的参数.py
|
8232b7cdfc18e4bda1b09a342b30e5e524b4d081
|
[] |
no_license
|
hejusl/My_Learn
|
13933172a198e19ef68de684d4e79aac5c9b2652
|
f327905b133e60b5c18d79303361adef63c97266
|
refs/heads/master
| 2020-04-13T17:52:42.553492
| 2019-01-08T12:23:22
| 2019-01-08T12:23:22
| 163,358,517
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
# !/usr/local/Cellar/python3/3.6.3/bin
# coding=utf-8
# 是定义好函数之后,只表示这个函数封装了一段代码
# 如果不主动调用函数,函数是不主动执行的
def sum_2_num(num1, num2):
"""两个数字的求和"""
# num1 = 10
# num2 = 20
result = num1 + num2
print('%d + %d = %d' % (num1, num2, result))
sum_2_num(50, 20)
|
[
"44864501@qq.com"
] |
44864501@qq.com
|
0d699012b8eeb8b27f755199578dd7ae5f3a5d8d
|
43a1b4e79a80663e5237651f6630b2a87a7b6b24
|
/api/controllers/mutant_controller.py
|
2593fc1817d76a6de4b57f1fb19d76d523234dea
|
[] |
no_license
|
gusdiazml/magneto-api
|
f5ba9ed0e21fe32c9f56d4103a7ae029f0360eaa
|
bfe342f7065b9f0eea8a86aa64d5faa64ba28ffe
|
refs/heads/master
| 2023-06-29T17:54:40.362011
| 2020-10-22T16:54:42
| 2020-10-22T16:54:42
| 306,436,978
| 0
| 0
| null | 2020-12-09T19:14:59
| 2020-10-22T19:17:28
| null |
UTF-8
|
Python
| false
| false
| 5,052
|
py
|
import re
from api.models import Dna
import datetime
class MutantController:
MUTANT_SEQUENCE_SIZE = 4
MINIMUM_MUTANTS_SEQUENCES = 1
def is_mutant(self, dna):
is_mutant = False
self._validate_dna(dna)
sequences = self._decompose_dna(dna)
count = self._count_mutant_sequences(sequences)
if count > self.MINIMUM_MUTANTS_SEQUENCES:
is_mutant = True
dna_model = Dna()
dna_model.sequences = dna
dna_model.is_mutant = is_mutant
dna_model.created_date = datetime.datetime.now()
dna_model.updated_date = datetime.datetime.now()
dna_model.save()
return is_mutant
def _validate_dna(self, dna):
is_valid = False
dna_size = len(dna)
for seq in dna:
if len(seq) != dna_size:
raise ValueError("The DNA sequence is corrupt")
x = re.findall("[^ATCG]", seq, re.IGNORECASE)
if x:
raise ValueError("The Nitrogen base corrupt")
return is_valid
def _decompose_dna(self, dna):
sequences = []
sequences.extend(dna)
sequences.extend(self._decompose_vertically(dna))
sequences.extend(self._decompose_diagonally(dna))
sequences.extend(self._decompose_diagonally_but_backwards(dna))
return sequences
@staticmethod
def _decompose_vertically(dna):
dna_vertors_len = len(dna)
vertical_sequences = []
for i in range(dna_vertors_len):
temporal_vertical_seq = ""
for sequences in dna:
temporal_vertical_seq += sequences[i]
vertical_sequences.append(temporal_vertical_seq)
return vertical_sequences
def _decompose_diagonally(self, dna):
sequences = []
middle_diagonal = self._decompose_middle_diagonal(dna)
down_diagonals = self._decompose_lower_diagonals(dna[1:])
upper_diagonals = self._decompose_upper_diagonals([x[1:] for x in dna[:-1]])
sequences.append(middle_diagonal)
sequences.extend(down_diagonals)
sequences.extend(upper_diagonals)
return sequences
def _decompose_middle_diagonal(self, dna):
temp_sequence = ""
for i, seq in enumerate(dna):
temp_sequence += seq[i]
return temp_sequence
def _decompose_upper_diagonals(self, dna):
diagonal_list = [self._decompose_middle_diagonal(dna)]
if len(dna) > 1:
diagonal_list.extend(self._decompose_upper_diagonals([x[1:] for x in dna[:-1]]))
return diagonal_list
def _decompose_lower_diagonals(self, dna):
diagonal_list = [self._decompose_middle_diagonal(dna)]
if len(dna) > 1:
diagonal_list.extend(self._decompose_lower_diagonals(dna[1:]))
return diagonal_list
def _decompose_diagonally_but_backwards(self, dna):
reversed_dna = self._reverse_sequences(dna)
return self._decompose_diagonally(reversed_dna)
def _reverse_sequences(self, sequences):
reversed_sequences = []
for sequence in sequences:
reversed_sequences.append("".join(reversed(sequence)))
return reversed_sequences
def _count_mutant_sequences(self, sequences):
verified_mutant_sequences_count = 0
for sequence in sequences:
verified_mutant_sequences_count += self._verify_mutant_sequence(sequence)
return verified_mutant_sequences_count
def _verify_mutant_sequence(self, sequence):
found_base_count = 0
sequences_found = 0
last_base = None
sequence_len = len(sequence)
if sequence_len >= self.MUTANT_SEQUENCE_SIZE:
for i, base in enumerate(sequence):
if i == 0:
last_base = base
continue
if last_base == base:
found_base_count += 1
else:
found_base_count = 0
if found_base_count == self.MUTANT_SEQUENCE_SIZE - 1:
sequences_found += 1
found_base_count = 0
last_base = base
return sequences_found
@staticmethod
def get_stats():
stats = Dna.get_stats()
count_mutant_dna = 0
count_human_dna = 0
for stat in stats:
if stat[0] == True:
count_mutant_dna = stat[1]
else:
count_human_dna = stat[1]
stats = {
"count_mutant_dna": count_mutant_dna,
"count_human_dna": count_human_dna,
"ratio": MutantController._calculate_ratio(count_mutant_dna, count_human_dna)
}
return stats
@staticmethod
def _calculate_ratio(mutant_count, human_count):
if human_count == 0 or human_count is None:
ratio = 1
elif mutant_count == 0 or mutant_count is None:
ratio = 0
else:
ratio = mutant_count / human_count
return ratio
|
[
"diazchg@gmail.com"
] |
diazchg@gmail.com
|
928db3ff5ae561f17567082ff3595807d26cecbc
|
daccd137b963061d96c93ec6b38e4f9a69fb44f2
|
/decorators.py
|
e492accdcd2c10e3f31baef7b2abea88d7ca7ecf
|
[
"MIT"
] |
permissive
|
Steven-Eardley/pibot_motor
|
6391ec7a3b7abef10068fd748c9477da300dc13e
|
8e3f96db47dd9b8687ad628f5a7f1129263df06a
|
refs/heads/master
| 2016-08-05T21:03:08.079362
| 2015-07-12T17:49:17
| 2015-07-12T17:49:17
| 28,645,072
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
from functools import wraps
def expect_kb_interrupt(fn):
@wraps(fn)
def decorated_fn(*args, **kwargs):
try:
fn(*args, **kwargs)
except KeyboardInterrupt:
pass
return decorated_fn
def expect_attribute_errors(fn):
@wraps(fn)
def decorated_fn(*args, **kwargs):
try:
fn(*args, **kwargs)
except AttributeError:
pass
return decorated_fn
|
[
"steve@eardley.xyz"
] |
steve@eardley.xyz
|
ce10a2b1967ba47c01da55faf0f791e338de85d3
|
b733a463ba1a21ac4c2756e8552ff4b251ce6b55
|
/Inpainting/PIC-EC/color_domain/flist_train_split.py
|
a3eaff8e0ef110a8cb1051dc296ca9228a2b3e6b
|
[] |
no_license
|
fengjiran/tensorflow_learning
|
003047cdb396572e3535043dfff8671befcd0f21
|
44dce5bb39c8d6421ea6181338b0ea4b0e5e9797
|
refs/heads/master
| 2023-04-04T07:53:55.707703
| 2022-03-30T09:35:52
| 2022-03-30T09:35:52
| 100,839,372
| 3
| 0
| null | 2023-03-24T22:50:54
| 2017-08-20T04:58:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,595
|
py
|
import os
import argparse
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, help='path to the dataset')
parser.add_argument('--train', type=int, default=28, help='number of train in a iter')
parser.add_argument('--val', type=int, default=1, help='number of val in a iter')
parser.add_argument('--test', type=int, default=1, help='number of test in a iter')
parser.add_argument('--output', type=str, help='path to the three file lists')
args = parser.parse_args()
ext = {'.jpg', '.png', '.JPG'}
total = args.train + args.val + args.test
images_train = []
images_val = []
images_test = []
num = 1
for root, dirs, files in os.walk(args.path):
print('loading ' + root)
for file in files:
if os.path.splitext(file)[1] in ext:
path = os.path.join(root, file)
if num % total > (args.val + args.test) or num % total == 0:
images_train.append(path)
elif num % total <= args.val and num % total > 0:
images_val.append(path)
else:
images_test.append(path)
num += 1
images_train.sort()
images_val.sort()
images_test.sort()
print('train number =', len(images_train))
print('val number =', len(images_val))
print('test number =', len(images_test))
if not os.path.exists(args.output):
os.mkdir(args.output)
np.savetxt(os.path.join(args.output, 'train.flist'), images_train, fmt='%s')
np.savetxt(os.path.join(args.output, 'val.flist'), images_val, fmt='%s')
np.savetxt(os.path.join(args.output, 'test.flist'), images_test, fmt='%s')
|
[
"fengjiran@foxmail.com"
] |
fengjiran@foxmail.com
|
88ee67d2de1dcfcdf033dd13c68e62b6f9761986
|
136be839cd958e5e7de33e46cebee1786e7b502e
|
/appNomas/migrations/0002_cliente.py
|
e4235091fa64a2c7860799338c5fb947bdb61b8b
|
[] |
no_license
|
Joacaro/NomasAccidentes
|
623da00181fc12fe7f6d3d48dff4af86a30c8d8c
|
a791fdbdb251bee3e91bde8b6eb0186a59a416da
|
refs/heads/master
| 2023-06-09T10:28:00.522898
| 2021-07-02T05:00:12
| 2021-07-02T05:00:12
| 381,887,090
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 967
|
py
|
# Generated by Django 3.1.1 on 2021-07-01 03:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('appNomas', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Cliente',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rut', models.CharField(max_length=10, unique=True)),
('direccion', models.CharField(max_length=50)),
('nombres', models.CharField(max_length=50)),
('password', models.CharField(max_length=50)),
('correo', models.EmailField(max_length=50)),
('telefono', models.CharField(max_length=50)),
('last_login', models.DateTimeField(null=True)),
],
options={
'abstract': False,
},
),
]
|
[
"joa.caro@duocuc.cl"
] |
joa.caro@duocuc.cl
|
fe512120422d5ec22a1f1710f9b6a8d6c042e47c
|
597c7a552afc38d6df556b277f6f74d2a1029438
|
/properties/tst/test_properties.py
|
469b985536691566494253f147ca002224db57f7
|
[] |
no_license
|
hurenkam/tracker-py
|
80a5fa966ca486c82c2ccf28938101e0d4607fcb
|
b74101f33b301c9644f2a094cb18deb0a941265d
|
refs/heads/master
| 2021-01-25T12:01:57.006662
| 2009-09-27T23:18:21
| 2009-09-27T23:18:21
| 33,602,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,598
|
py
|
from properties import *
from e32 import ao_sleep as Sleep
sid = 1
def Wait(text = None):
if text != None:
print text,
Sleep(1)
def RunSidTests():
global sid
print "Test GetSid... ",
sid = GetSid()
#print sid,
#if sid == 4028634395L: # emulator sid
if sid == 537013993L: # on-device sid
print "ok!"
else:
print "failed!"
def RunEIntTests():
global sid
print "Test EInt... "
Wait("Define...")
Property.Define(sid,1,Property.EInt)
Wait("Create...")
n = Property()
Wait("Attach...")
n.Attach(sid,1,Property.EInt)
Wait("Set...")
n.Set(5)
Wait("Get...")
i = n.Get()
Wait("Delete...")
Property.Delete(sid,1)
Wait()
if i == 5:
print "ok."
else:
print "failed! (i=%i)" % i
del n
del i
def RunETextTests():
global sid
print "Test EText... "
Wait("Define...")
Property.Define(sid,1,Property.EText)
Wait("Create...")
t = Property()
Wait("Attach...")
t.Attach(sid,1,Property.EText)
Wait("Set...")
t.Set("Hello world!")
Wait("Get...")
s = t.Get(32)
Wait("Delete...")
Property.Delete(sid,1)
Wait()
if s == "Hello world!":
print "ok."
else:
print "failed! (s=\"%s\")" % s
del t
del s
cbcount = 0
setcount = 0
result = [ ]
should = [ 1,-1,0x7fffffff,-0x80000000,0x10 ]
def RunSubscribeTests():
def Callback():
global result
global p
r = p.Get()
#print r,
result.append(r)
global p
global sid
global result
print "Test Subscribe & Callback... "
Wait("Define...")
Property.Define(sid,0x10,Property.EInt)
p = Property()
Wait("Attach...")
p.Attach(sid,0x10,Property.EInt)
Wait("Set(0)...")
p.Set(0)
Wait("Subscribe...")
p.Subscribe(Callback)
Wait("Set(1)...")
p.Set(1)
Wait("Set(-1)...")
p.Set(-1)
Wait("Set(0x7fffffff)...")
p.Set(0x7fffffff)
Wait("Set(-0x80000000)...")
p.Set(-0x80000000)
Wait("Set(0x10)...")
p.Set(0x10)
Wait("Cancel...")
p.Cancel()
Wait("Set(0)...")
p.Set(0)
if result == should:
print "ok!"
else:
print "failed!" #, result, should
print "Running tests for properties module..."
RunSidTests()
RunEIntTests()
RunETextTests()
RunSubscribeTests()
print Property.__dict__.keys()
print PowerState.__dict__.keys()
print SystemAgent.__dict__.keys()
print "Done!"
|
[
"mark.hurenkamp@xs4all.nl@ef54a0f8-d952-0410-8fc1-d1a2e12a3fa0"
] |
mark.hurenkamp@xs4all.nl@ef54a0f8-d952-0410-8fc1-d1a2e12a3fa0
|
330d36924cf7df5102ada150cb056163cd33e9a3
|
f0d671a7be0136f14158c78385ceaf284358d9d7
|
/data_analysis/ipython_log.py
|
57dc224cc77430125280541fdf7fec8f5d671229
|
[] |
no_license
|
epicarts/python3_practice
|
3d0bd30d53976606402159d6a507c872284cd5e7
|
e092d3adacd44fd9722050267b07630022cd11df
|
refs/heads/master
| 2021-07-10T06:43:30.651712
| 2020-07-01T14:55:54
| 2020-07-01T14:55:54
| 143,696,860
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,391
|
py
|
# IPython log file
import numpy as np
import numpy as np
def f(x, y, z):
return (x + y) / z
def f(x, y, z):
return (x + y) / z
result = f(5, 6, 7.5)
a = np.random.randn(100, 100)
get_ipython().run_line_magic('timeit', 'np.dot(a, a)')
get_ipython().run_cell_magic('automagic', '', '')
get_ipython().run_cell_magic('automagic', '', '')
get_ipython().run_cell_magic('automagic', '', '')
get_ipython().run_cell_magic('automagic', '', '')
get_ipython().run_cell_magic('automagic', '', '')
get_ipython().run_cell_magic('automagic', '', '')
get_ipython().run_cell_magic('automagic', '', '')
get_ipython().run_line_magic('automagic', '')
get_ipython().run_line_magic('automagic', '')
get_ipython().run_line_magic('automagic', '')
get_ipython().run_line_magic('automagic', '')
get_ipython().run_line_magic('automagic', '')
get_ipython().run_line_magic('automagic', '')
get_ipython().run_line_magic('automagic', '')
get_ipython().run_line_magic('automagic', '')
get_ipython().run_line_magic('automagic', '')
get_ipython().run_line_magic('automagic', '')
get_ipython().run_line_magic('automagic', '')
get_ipython().run_line_magic('automagic', '')
get_ipython().run_line_magic('automagic', '')
get_ipython().run_line_magic('automagic', '')
get_ipython().run_line_magic('quickref', '')
get_ipython().run_line_magic('', '')
get_ipython().run_line_magic('quickref', '')
get_ipython().run_line_magic('quickref', '')
get_ipython().run_line_magic('automagic', '')
get_ipython().run_line_magic('quickref', '')
get_ipython().run_line_magic('automagic', '')
get_ipython().run_line_magic('automagic', '')
get_ipython().run_line_magic('timeit', 'np.dot(a, a)')
a = np.random.randn(10, 10)
get_ipython().run_line_magic('timeit', 'np.dot(a, a)')
get_ipython().run_line_magic('automagic', '')
get_ipython().run_line_magic('timeit', 'np.dot(a, a)')
get_ipython().run_line_magic('who', '')
get_ipython().run_line_magic('who', '')
get_ipython().run_line_magic('who_is', '')
get_ipython().run_line_magic('who_ls', '')
2 ** 27
_
__
foo = 'bar'
_
__
foo
_i27
get_ipython().run_line_magic('automagic', '')
_i27
_i27
get_ipython().run_line_magic('automagic', '')
get_ipython().run_line_magic('run', 'test1')
get_ipython().run_line_magic('run', 'test1.py')
get_ipython().run_line_magic('run', '/')
_i27|
_i27
_i2
_i21
_i23
_i5
get_ipython().run_line_magic('logstart', '')
get_ipython().run_line_magic('logstop', '')
|
[
"0505zxc@gmail.com"
] |
0505zxc@gmail.com
|
351523efcc8b35f6d0ea6c19f56d447f2a7e4ac0
|
99697559d046cdd04dd9068bd518e4da4177aaa2
|
/Finish/M545_Boundary_of_Binary_Tree.py
|
88c576130740e0f831bb5332c6a40362d95be6cf
|
[] |
no_license
|
Azurisky/Leetcode
|
3e3621ef15f2774cfdfac8c3018e2e4701760c3b
|
8fa215fb0d5b2e8f6a863756c874d0bdb2cffa04
|
refs/heads/master
| 2020-03-18T22:46:35.780864
| 2018-10-07T05:45:30
| 2018-10-07T05:45:30
| 135,364,168
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,307
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def boundaryOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
def dfs_leftmost(node):
if not node or not node.left and not node.right:
return
boundary.append(node.val)
if node.left:
dfs_leftmost(node.left)
else:
dfs_leftmost(node.right)
def dfs_leaves(node):
if not node:
return
dfs_leaves(node.left)
if node != root and not node.left and not node.right:
boundary.append(node.val)
dfs_leaves(node.right)
def dfs_rightmost(node):
if not node or not node.left and not node.right:
return
if node.right:
dfs_rightmost(node.right)
else:
dfs_rightmost(node.left)
boundary.append(node.val)
if not root:
return []
boundary = [root.val]
dfs_leftmost(root.left)
dfs_leaves(root)
dfs_rightmost(root.right)
return boundary
|
[
"andrew0704us@gmail.com"
] |
andrew0704us@gmail.com
|
c496121f8836365b61237c1ab508a1fd4c2bf41d
|
5e239849b5588d1924f44a69f0eeac040c0454bd
|
/core/serializers.py
|
c2bf36123c0bdbe5a996af81ca76a5a5b3ccf269
|
[] |
no_license
|
mare-alta/backend
|
53be33244efeab12b1e548d499bc942807275d8f
|
d76fc822c6bdf3f2127887fab6885d8b967ac501
|
refs/heads/master
| 2022-12-10T15:31:01.419861
| 2019-12-08T15:45:49
| 2019-12-08T15:45:49
| 226,612,261
| 0
| 0
| null | 2021-09-22T18:05:45
| 2019-12-08T04:05:45
|
Python
|
UTF-8
|
Python
| false
| false
| 547
|
py
|
from rest_framework import serializers
from .models import *
class LevelSerializer(serializers.ModelSerializer):
class Meta:
model = Level
fields = '__all__'
class ComplaintSerializer(serializers.ModelSerializer):
class Meta:
model = Complaint
fields = '__all__'
class AnswerSerializer(serializers.ModelSerializer):
class Meta:
model = Answer
fields = '__all__'
class ImageSerializer(serializers.ModelSerializer):
class Meta:
model = Image
fields = '__all__'
|
[
"thales.gibbon@gmail.com"
] |
thales.gibbon@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.