blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5835bb5009219c382f7cf2d57f0cd5d74a3e5abd
|
9d16bc0ff4d4554f6bd51718f145ab3d82467877
|
/BubbleBuster.py
|
d933ffa7e70212cee0186be0dbbdd30678e3e69d
|
[] |
no_license
|
Arisan39/BubbleBuster
|
32a5475012cb7ddede272c662e00142a134cdf7c
|
7b870c4be16f04efeee1a9c2de07a7385111b03b
|
refs/heads/master
| 2020-12-15T21:23:33.158294
| 2020-01-21T04:51:07
| 2020-01-21T04:51:07
| 235,257,681
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,752
|
py
|
import pygame
import sys
from pygame.locals import *
from Bubble import Bubble
from Player import Player
pygame.init()
screen= pygame.display.set_mode((640, 460))# add a screen & screen size
screen.fill((255, 255, 255))#this change the background's color
pygame.display.set_caption('Bubble Buster!')#add caption to the display
font = pygame.font.SysFont(None, 36)
main_clock = pygame.time.Clock()
score = 0
#Adding lives
lives = 3
alive = True
#create and set up values for the player
player = Player()
player.rect.x = 250
player_speed = player.speed
draw_group = pygame.sprite.Group()
draw_group.add(player)
bubble_group = pygame.sprite.Group()
move_left = False #these are here so that the player won't be able to move at the begining of the game
move_right = False
def draw_screen():
screen.fill((255, 255, 255))
def draw_player():
pygame.draw.rect(screen, (47, 216, 163), player)
def draw_text(display_string, font, surface, x, y):
text_display = font.render(display_string, 1, (178, 16, 242))
text_rect = text_display.get_rect()
text_rect.topleft = (x, y)
surface.blit(text_display, text_rect)
x_position = 320
y_position = 380
last_x = x_position
last_y = y_position
ball = pygame.draw.circle(screen, (242, 16, 99), (x_position, y_position), 5, 0)
ball_can_move = False
speed =[5, -5]
#values for all bubbles to use
all_bubbles = []
bubble_radius = 20
bubble_edge = 1
initial_bubble_position = 30
bubble_spacing = 60
def create_bubbles():# from here to...
bubble_x = initial_bubble_position
bubble_y = initial_bubble_position
for rows in range(0, 3):
for columns in range(0, 10):
bubble = Bubble(bubble_x, bubble_y)
bubble_group.add(bubble)
bubble_x += bubble_spacing
bubble_y += bubble_spacing
bubble_x = initial_bubble_position
create_bubbles()
def draw_bubbles():
for bubble in bubble_group:
bubble = bubble_group.draw(screen)
while True:#this can be run (or exit) without crashing
#check for events
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
#Keyboard input for players
if event.type == KEYDOWN:
if event.key == K_a:
move_right = False
move_left = True
if event.key == K_d:
move_left = False
move_right = True
if event.type == KEYUP:
if event.key == K_a: #'K_a mean 'A key'
move_left = False
if event.key == K_d:
move_right = False #just these mean we didn't update any graphic yet.
if alive:# from here, these are game over check
if event.key == K_SPACE:
ball_can_move = True
if not alive:
if event.key == K_RETURN:
lives = 3
alive = True
score = 0# from here, these are how to reset the game
ball_can_move = False
for bubble in bubble_group:
bubble_group.remove(bubble)
create_bubbles()
#Ensure consistent frames per second
main_clock.tick(50)
#Move the player
if move_left and player.rect.left > 0: #this means player can move no farther than from the left of the screen
player.rect.x -= player_speed
if move_right and player.rect.right < 640:#this means player can move no farther than from the right of the screen
player.rect.x += player_speed
#Move the ball
if ball_can_move:
last_x = x_position
last_y = y_position
x_position += speed[0]
y_position += speed[1]
if ball.x <= 0:
x_position = 15
speed[0] = -speed[0]
elif ball.x >= 640:
x_position = 625
speed[0] = -speed[0]
if ball.y <= 0:
y_position = 15
speed[1] = -speed[1]
#Test collisions with the player
if ball.colliderect(player):
y_position -= 15
speed[1] = -speed[1]
#Subtracting lives
elif ball.y >= 460:
lives -= 1
ball_can_move = False
#Move direction vector
move_direction = ((x_position - last_x), (y_position - last_y))
#Test collisions with the bubbles
for bubble in bubble_group:
if ball.colliderect(bubble.rect):
if move_direction[1] > 0:
speed[1] = -speed[1]
y_position -= 10
elif move_direction[1] < 0:
speed[1] = -speed[1]
y_position += 10
bubble_group.remove(bubble)
pygame.display.update()
score += 100
break
else:
x_position = player.rect.x + 30
if lives <= 0:
alive = False
draw_screen()
draw_group.draw(screen)
draw_bubbles()
ball = pygame.draw.circle(screen,(242, 16, 99), (x_position, y_position), 5, 0)
if alive:
draw_text('Score: %s' % (score), font, screen, 5, 5)
draw_text('Lives: %s' % (lives), font, screen, 540, 5)
else:
draw_text('Game Over', font, screen, 255, 5)
draw_text('Press Enter to Play Again', font, screen, 180, 50)
pygame.display.update()#this update the background
|
[
"noreply@github.com"
] |
noreply@github.com
|
0b09fac1656f2a6cd2b578afb6640cc93695b34a
|
76776170a8fe1c065bce42b314e77018d7a127cb
|
/home/migrations/0001_initial.py
|
65fda3fe9f85c2042cdae21d990004521914c034
|
[] |
no_license
|
himanshu98/sample-Django
|
d942e282d3ba16baeaad2e2eb54f594a4619c877
|
f8556860d7de97685da303d7da35c000e2513b31
|
refs/heads/master
| 2022-11-28T12:59:37.885537
| 2020-08-13T19:20:22
| 2020-08-13T19:20:22
| 287,359,857
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 699
|
py
|
# Generated by Django 3.1 on 2020-08-12 22:52
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=122)),
('email', models.CharField(max_length=122)),
('phone', models.CharField(max_length=122)),
('desc', models.TextField()),
('date', models.DateField()),
],
),
]
|
[
"tomarhimanshu98@gmail.com"
] |
tomarhimanshu98@gmail.com
|
ddd05ad17c156557bab875374be46009351bf83e
|
560567db6f9805ee2bb715f550c88cfc6e4187cf
|
/CueCreator.py
|
0d5ae722078ae4a15fe9aa8e38c0c4b6b031618f
|
[] |
no_license
|
freerainx/CueCreator
|
a9007329d5e6b0125872541bb115c03c409e71fe
|
cfa6326052ac61fca3aafbc3c995829009b6aeb8
|
refs/heads/main
| 2023-04-09T06:21:19.013391
| 2021-04-25T17:41:38
| 2021-04-25T17:41:38
| 361,485,472
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,685
|
py
|
import sys
from PyQt5 import QtWidgets, QtGui, QtCore
import PyQt5.sip
from PyQt5.QtWidgets import QApplication, QWidget, QLineEdit, QMessageBox, QGridLayout, QLabel, QPushButton, QFrame
from MainUI import Ui_Dialog
from Cue import cueFile
class mainWindow (QtWidgets.QWidget, Ui_Dialog):
CueDir ='F:\\Music\\Collections\\'
def __init__(self):
super(mainWindow, self).__init__()
self.setupUi(self)
self.btnBrower.clicked.connect(self.BrowseDir)
self.btnCreate.clicked.connect(self.CreatCue)
self.btnClear.clicked.connect(self.ClearText)
def BrowseDir(self):
self.CueDir = QtWidgets.QFileDialog.getExistingDirectory(self, 'Open Directory',self.CueDir, QtWidgets.QFileDialog.ShowDirsOnly)
print(self.CueDir)
self.edtDir.setText(self.CueDir)
def ClearText(self):
self.txtCue.setPlainText("")
def CreatCue(self):
desDir=self.edtDir.text()
print(desDir)
if desDir[len(desDir)-1] != '/':
desDir += '/'
print(desDir)
myCue = cueFile("CD.cue")
if len(self.edtAlbum.text()) > 0:
myCue.SetTitle(self.edtAlbum.text())
if len(self.edtPerformer.text()) >0:
myCue.SetPerformer(self.edtPerformer.text())
myCue.CueFromDir(desDir)
cuetext=""
for str1 in myCue.GetContent():
cuetext += (str1 + "\r\n")
self.txtCue.setPlainText(cuetext)
QMessageBox.information(self, "信息", "Cue文件生成完毕!!!")
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
myDialog = mainWindow()
myDialog.show()
sys.exit(app.exec_())
|
[
"freejxt@126"
] |
freejxt@126
|
57cbb17eae32ce8daed7bf554a568c0f8d9328db
|
36e13e0219419b6a0c9d913b99b9330c7894f32a
|
/LifelongMixture_64_Dirichlet.py
|
5c20b33432794fcd54c1b387c2aa5778c3182873
|
[] |
no_license
|
WN1695173791/LifelongMixtureVAEs
|
4ef6f5c62f3a9480bd010fabce249020cca71b5b
|
b1f858cae35f8f0b91981f398ec431d9a8afb061
|
refs/heads/main
| 2023-06-15T10:09:31.087605
| 2021-07-09T15:11:53
| 2021-07-09T15:11:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,976
|
py
|
import tensorflow as tf
import mnist_data
import tensorflow.contrib.slim as slim
import time
import seaborn as sns
from Assign_Dataset import *
from tensorflow.examples.tutorials.mnist import input_data
from keras.datasets import mnist
from Support import *
from Mnist_DataHandle import *
from HSICSupport import *
from scipy.misc import imsave as ims
from utils import *
from glob import glob
import keras
from keras.datasets import reuters
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.preprocessing.text import Tokenizer
import os, gzip
from data_hand import *
os.environ['CUDA_VISIBLE_DEVICES']='7'
distributions = tf.distributions
from Mixture_Models import *
import keras.datasets.cifar10 as cifar10
def file_name2_(file_dir):
t1 = []
for root, dirs, files in os.walk(file_dir):
for a1 in dirs:
b1 = "C:/CommonData//rendered_chairs/" + a1 + "/renders/*.png"
img_path = glob(b1)
t1.append(img_path)
cc = []
for i in range(len(t1)):
a1 = t1[i]
for p1 in a1:
cc.append(p1)
return cc
def file_name_(file_dir):
t1 = []
file_dir = "E:/LifelongMixtureModel/data/images_background/"
for root, dirs, files in os.walk(file_dir):
for a1 in dirs:
b1 = "E:/LifelongMixtureModel/data/images_background/" + a1 + "/renders/*.png"
b1 = "E:/LifelongMixtureModel/data/images_background/" + a1
for root2, dirs2, files2 in os.walk(b1):
for c1 in dirs2:
b2 = b1 + "/" + c1 + "/*.png"
img_path = glob(b2)
t1.append(img_path)
cc = []
for i in range(len(t1)):
a1 = t1[i]
for p1 in a1:
cc.append(p1)
return cc
def file_name(file_dir):
t1 = []
file_dir = "../images_background/"
for root, dirs, files in os.walk(file_dir):
for a1 in dirs:
b1 = "../images_background/" + a1 + "/renders/*.png"
b1 = "../images_background/" + a1
for root2, dirs2, files2 in os.walk(b1):
for c1 in dirs2:
b2 = b1 + "/" + c1 + "/*.png"
img_path = glob(b2)
t1.append(img_path)
cc = []
for i in range(len(t1)):
a1 = t1[i]
for p1 in a1:
cc.append(p1)
return cc
def file_name2(file_dir):
t1 = []
for root, dirs, files in os.walk(file_dir):
for a1 in dirs:
b1 = "../rendered_chairs/" + a1 + "/renders/*.png"
img_path = glob(b1)
t1.append(img_path)
print('root_dir:', root) # 当前目录路径
print('sub_dirs:', dirs) # 当前路径下所有子目录
print('files:', files) # 当前路径下所有非目录子文件
cc = []
for i in range(len(t1)):
a1 = t1[i]
for p1 in a1:
cc.append(p1)
return cc
# Gateway
def autoencoder(x_hat, x, dim_img, dim_z, n_hidden, keep_prob, task_state, disentangledCount):
# encoding
mu1, sigma1 = Encoder_64(x_hat, "encoder1")
mu2, sigma2 = Encoder_64(x_hat, "encoder2")
mu3, sigma3 = Encoder_64(x_hat, "encoder3")
mu4, sigma4 = Encoder_64(x_hat, "encoder4")
z1 = mu1 + sigma1 * tf.random_normal(tf.shape(mu1), 0, 1, dtype=tf.float32)
z2 = mu2 + sigma2 * tf.random_normal(tf.shape(mu2), 0, 1, dtype=tf.float32)
z3 = mu3 + sigma3 * tf.random_normal(tf.shape(mu3), 0, 1, dtype=tf.float32)
z4 = mu4 + sigma4 * tf.random_normal(tf.shape(mu4), 0, 1, dtype=tf.float32)
s1 = Generator_64(z1, "decoder1")
s2 = Generator_64(z2, "decoder2")
s3 = Generator_64(z3, "decoder3")
s4 = Generator_64(z4, "decoder4")
imageSize = 64
s1_1 = tf.reshape(s1,(-1,imageSize*imageSize*3))*task_state[:, 0:1]
s2_1 = tf.reshape(s2,(-1,imageSize*imageSize*3))*task_state[:, 1:2]
s3_1 = tf.reshape(s3,(-1,imageSize*imageSize*3))*task_state[:, 2:3]
s4_1 = tf.reshape(s4,(-1,imageSize*imageSize*3))*task_state[:, 3:4]
reco = s1_1 + s2_1 + s3_1 + s4_1
reco = reco / (task_state[0, 0] + task_state[0, 1] + task_state[0, 2] + task_state[0, 3])
reco = tf.reshape(reco,(-1,imageSize,imageSize,3))
#Calculate task relationship
# Select tasks
reco1 = tf.reduce_mean(tf.reduce_sum(tf.square(s1 - x_hat), [1, 2, 3]))
reco2 = tf.reduce_mean(tf.reduce_sum(tf.square(s2 - x_hat), [1, 2, 3]))
reco3 = tf.reduce_mean(tf.reduce_sum(tf.square(s3 - x_hat), [1, 2, 3]))
reco4 = tf.reduce_mean(tf.reduce_sum(tf.square(s4 - x_hat), [1, 2, 3]))
reco1_ = reco1 + (1 - task_state[0, 0]) * 1000000
reco2_ = reco2 + (1 - task_state[0, 1]) * 1000000
reco3_ = reco3 + (1 - task_state[0, 2]) * 1000000
reco4_ = reco4 + (1 - task_state[0, 3]) * 1000000
totalScore = tf.stack((reco1_, reco2_, reco3_, reco4_), axis=0)
mixParameter = task_state[0]
sum = mixParameter[0] + mixParameter[1] + mixParameter[2] + mixParameter[3]
mixParameter = mixParameter / sum
dist = tf.distributions.Dirichlet(mixParameter)
mix_samples = dist.sample()
b1 = mix_samples[0] * task_state[0, 0]
b2 = mix_samples[1] * task_state[0, 1]
b3 = mix_samples[2] * task_state[0, 2]
b4 = mix_samples[3] * task_state[0, 3]
mix_samples2 = tf.stack((b1,b2,b3,b4),axis=0)
# loss
reco1_loss = reco1 * mix_samples2[0]
reco2_loss = reco2 * mix_samples2[1]
reco3_loss = reco3 * mix_samples2[2]
reco4_loss = reco4 * mix_samples2[3]
# loss
marginal_likelihood = (reco1_loss + reco2_loss + reco3_loss + reco4_loss)
k1 = 0.5 * tf.reduce_sum(
tf.square(mu1) + tf.square(sigma1) - tf.log(1e-8 + tf.square(sigma1)) - 1,
1)
k2 = 0.5 * tf.reduce_sum(
tf.square(mu2) + tf.square(sigma2) - tf.log(1e-8 + tf.square(sigma2)) - 1,
1)
k3 = 0.5 * tf.reduce_sum(
tf.square(mu3) + tf.square(sigma3) - tf.log(1e-8 + tf.square(sigma3)) - 1,
1)
k4 = 0.5 * tf.reduce_sum(
tf.square(mu4) + tf.square(sigma4) - tf.log(1e-8 + tf.square(sigma4)) - 1,
1)
k1 = tf.reduce_mean(k1)
k2 = tf.reduce_mean(k2)
k3 = tf.reduce_mean(k3)
k4 = tf.reduce_mean(k4)
KL_divergence = k1 * mix_samples2[0] + k2 * mix_samples2[1] + k3 * mix_samples2[2] + k4 * mix_samples2[3]
KL_divergence = KL_divergence
p2 = 1
gamma = 4
loss = marginal_likelihood + gamma * tf.abs(KL_divergence - disentangledCount)
z = z1
y = reco
return y, z, loss, -marginal_likelihood, KL_divergence,totalScore
def decoder(z, dim_img, n_hidden):
y = bernoulli_MLP_decoder(z, n_hidden, dim_img, 1.0, reuse=True)
return y
n_hidden = 500
IMAGE_SIZE_MNIST = 28
dim_img = IMAGE_SIZE_MNIST ** 2 # number of pixels for a MNIST image
dim_z = 256
# train
n_epochs = 100
batch_size = 64
learn_rate = 0.001
train_total_data, train_size, _, _, test_data, test_labels = mnist_data.prepare_MNIST_data()
n_samples = train_size
# input placeholders
# In denoising-autoencoder, x_hat == x + noise, otherwise x_hat == x
x_hat = tf.placeholder(tf.float32, shape=[64, 64, 64, 3], name='input_img')
x = tf.placeholder(tf.float32, shape=[64, 64, 64, 3], name='target_img')
# dropout
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# input for PMLR
z_in = tf.placeholder(tf.float32, shape=[None, dim_z], name='latent_variable')
task_state = tf.placeholder(tf.float32, shape=[64, 4])
disentangledCount = tf.placeholder(tf.float32)
# network architecture
y, z, loss, neg_marginal_likelihood, KL_divergence,totalScore = autoencoder(x_hat, x, dim_img, dim_z,
n_hidden, keep_prob,
task_state, disentangledCount)
# optimization
t_vars = tf.trainable_variables()
train_op = tf.train.AdamOptimizer(learn_rate).minimize(loss, var_list=t_vars)
# train
total_batch = int(n_samples / batch_size)
min_tot_loss = 1e99
ADD_NOISE = False
train_data2_ = train_total_data[:, :-mnist_data.NUM_LABELS]
train_y = train_total_data[:, 784:784 + mnist_data.NUM_LABELS]
# MNIST dataset load datasets
img_path = glob('../img_celeba2/*.jpg') # 获取新文件夹下所有图片
data_files = img_path
data_files = sorted(data_files)
data_files = np.array(data_files) # for tl.iterate.minibatches
celebaFiles = data_files
# load 3D chairs
img_path = glob('../CACD2000/CACD2000/*.jpg') # 获取新文件夹下所有图片
data_files = img_path
data_files = sorted(data_files)
data_files = np.array(data_files) # for tl.iterate.minibatches
cacdFiles = data_files
file_dir = "../rendered_chairs/"
files = file_name2(file_dir)
data_files = files
data_files = sorted(data_files)
data_files = np.array(data_files) # for tl.iterate.minibatches
chairFiles = data_files
files = file_name(1)
data_files = files
data_files = sorted(data_files)
data_files = np.array(data_files) # for tl.iterate.minibatches
zimuFiles = data_files
saver = tf.train.Saver()
isWeight = False
currentTask = 4
def max_list(lt):
temp = 0
for i in lt:
if lt.count(i) > temp:
max_str = i
temp = lt.count(i)
return max_str
isWeight = False
with tf.Session() as sess:
sess.run(tf.global_variables_initializer(), feed_dict={keep_prob: 0.9})
if isWeight:
saver.restore(sess, 'models/LifelongMixture_64_Dirichlet')
img_path = glob('C:/CommonData/img_celeba2/*.jpg') # 获取新文件夹下所有图片
data_files = img_path
data_files = sorted(data_files)
data_files = np.array(data_files) # for tl.iterate.minibatches
myIndex = 10
celebaFiles = data_files[myIndex * batch_size:(myIndex + 2) * batch_size]
# load 3D chairs
img_path = glob('C:/CommonData/CACD2000/CACD2000/*.jpg') # 获取新文件夹下所有图片
data_files = img_path
data_files = sorted(data_files)
data_files = np.array(data_files) # for tl.iterate.minibatches
cacdFiles = data_files[myIndex * batch_size:(myIndex + 2) * batch_size]
file_dir = "C:/CommonData/rendered_chairs/"
files = file_name2_(file_dir)
data_files = files
data_files = sorted(data_files)
data_files = np.array(data_files) # for tl.iterate.minibatches
chairFiles = data_files[myIndex * batch_size:(myIndex + 2) * batch_size]
files = file_name_(1)
data_files = files
data_files = sorted(data_files)
data_files = np.array(data_files) # for tl.iterate.minibatches
zimuFiles = data_files[myIndex * batch_size:(myIndex + 2) * batch_size]
dataArray = []
for taskIndex in range(4):
taskIndex = 2
if taskIndex == 0:
x_train = celebaFiles
x_fixed = x_train[0:batch_size]
x_fixed2 = x_train[batch_size:batch_size * 2]
elif taskIndex == 1:
x_train = cacdFiles
x_fixed = x_train[0:batch_size]
x_fixed2 = x_train[batch_size:batch_size * 2]
elif taskIndex == 2:
x_train = chairFiles
x_fixed = x_train[0:batch_size]
x_fixed2 = x_train[batch_size:batch_size * 2]
elif taskIndex == 3:
x_train = zimuFiles
x_fixed = x_train[0:batch_size]
x_fixed2 = x_train[batch_size:batch_size * 2]
batchFiles = x_fixed
batchFiles2 = x_fixed2
if taskIndex == 0:
batch = [get_image(
sample_file,
input_height=128,
input_width=128,
resize_height=64,
resize_width=64,
crop=True)
for sample_file in batchFiles]
batch2 = [get_image(
sample_file,
input_height=128,
input_width=128,
resize_height=64,
resize_width=64,
crop=True)
for sample_file in batchFiles2]
elif taskIndex == 1:
batch = [get_image(
sample_file,
input_height=250,
input_width=250,
resize_height=64,
resize_width=64,
crop=True)
for sample_file in batchFiles]
batch2 = [get_image(
sample_file,
input_height=250,
input_width=250,
resize_height=64,
resize_width=64,
crop=True)
for sample_file in batchFiles2]
elif taskIndex == 2:
image_size = 64
batch = [get_image2(batch_file, 300, is_crop=True, resize_w=image_size, is_grayscale=0) \
for batch_file in batchFiles]
batch2 = [get_image2(batch_file, 300, is_crop=True, resize_w=image_size, is_grayscale=0) \
for batch_file in batchFiles2]
elif taskIndex == 3:
batch = [get_image(batch_file, 105, 105,
resize_height=64, resize_width=64,
crop=False, grayscale=False) \
for batch_file in batchFiles]
batch = np.array(batch)
batch = np.reshape(batch, (64, 64, 64, 1))
batch = np.concatenate((batch, batch, batch), axis=-1)
batch2 = [get_image(batch_file, 105, 105,
resize_height=64, resize_width=64,
crop=False, grayscale=False) \
for batch_file in batchFiles2]
batch2 = np.array(batch2)
batch2 = np.reshape(batch2, (64, 64, 64, 1))
batch2 = np.concatenate((batch2, batch2, batch2), axis=-1)
dataArray.append(batch)
x_fixed = batch
x_fixed = np.array(x_fixed)
x_fixed2 = batch2
x_fixed2 = np.array(x_fixed2)
# select the most relevant component
stateState = np.zeros((batch_size, 4))
stateState[:, 0] = 1
stateState[:, 1] = 1
stateState[:, 2] = 1
stateState[:, 3] = 1
score = sess.run(totalScore, feed_dict={x_hat: x_fixed, keep_prob: 1, task_state: stateState})
a = np.argmin(score, axis=0)
index = a
z = 0
generator_outputs = 0
if index == 0:
mu1, sigma1 = Encoder_64(x_hat, "encoder1", reuse=True)
z1 = mu1 + sigma1 * tf.random_normal(tf.shape(mu1), 0, 1, dtype=tf.float32)
Reco = Generator_64(z1, "decoder1", reuse=True)
generator_outputs = Generator_64(z_in, "decoder1", reuse=True)
z = z1
elif index == 1:
mu2, sigma2 = Encoder_64(x_hat, "encoder2", reuse=True)
z2 = mu2 + sigma2 * tf.random_normal(tf.shape(mu2), 0, 1, dtype=tf.float32)
Reco = Generator_64(z2, "decoder2", reuse=True)
generator_outputs = Generator_64(z_in, "decoder2", reuse=True)
z = z2
elif index == 2:
mu3, sigma3 = Encoder_64(x_hat, "encoder3", reuse=True)
z3 = mu3 + sigma3 * tf.random_normal(tf.shape(mu3), 0, 1, dtype=tf.float32)
Reco = Generator_64(z3, "decoder3", reuse=True)
generator_outputs = Generator_64(z_in, "decoder3", reuse=True)
z = z3
elif index == 3:
mu4, sigma4 = Encoder_64(x_hat, "encoder4", reuse=True)
z4 = mu4 + sigma4 * tf.random_normal(tf.shape(mu4), 0, 1, dtype=tf.float32)
Reco = Generator_64(z4, "decoder4", reuse=True)
generator_outputs = Generator_64(z_in, "decoder4", reuse=True)
z = z4
code1 = sess.run(z, feed_dict={x_hat: x_fixed, keep_prob: 1, task_state: stateState})
code2 = sess.run(z, feed_dict={x_hat: x_fixed2, keep_prob: 1, task_state: stateState})
recoArr = []
minV = -3
maxV = 3
tv = 6.0 / 12.0
'''
for j in range(256):
code1 = sess.run(z, feed_dict={x_hat: x_fixed, keep_prob: 1, task_state: stateState})
recoArr = []
myIndex = 0
for i in range(12):
code1[:, j] = minV + tv * i
myReco = sess.run(generator_outputs, feed_dict={z_in: code1, keep_prob: 1, task_state: stateState})
recoArr.append(myReco[myIndex])
recoArr = np.array(recoArr)
ims("results/" + "inter" + str(j) + ".png", merge2(recoArr, [1, 12]))
bc = 2
BC =0
'''
for t1 in range(64):
for j in range(256):
code1 = sess.run(z, feed_dict={x_hat: x_fixed, keep_prob: 1, task_state: stateState})
recoArr = []
j = 224
myIndex = t1
for i in range(12):
code1[:,j] = minV + tv * i
myReco = sess.run(generator_outputs, feed_dict={z_in: code1, keep_prob: 1, task_state: stateState})
recoArr.append(myReco[myIndex])
recoArr = np.array(recoArr)
ims("results/" + "inter" + str(t1) + ".png", merge2(recoArr, [1, 12]))
bc = 2
break
c=0
for t in range(2):
if t ==1 :
t = t+20
recoArr.append(x_fixed2[t])
for i in range(10):
newCode = code2 + distance*i
myReco = sess.run(generator_outputs, feed_dict={z_in: newCode, keep_prob: 1, task_state: stateState})
recoArr.append(myReco[t])
recoArr.append(x_fixed[t])
recoArr = np.array(recoArr)
ims("results/" + "inter" + str(taskIndex) + ".png", merge2(recoArr, [2, 12]))
myReco = sess.run(Reco, feed_dict={x_hat: x_fixed, keep_prob: 1, task_state: stateState})
ims("results/" + "Dataset" + str(taskIndex) + "_mini.png", merge2(x_fixed[:16], [2, 8]))
ims("results/" + "Reco" + str(taskIndex) + "_H_mini.png", merge2(myReco[:16], [2, 8]))
bc = 0
bc = 0
# training
n_epochs = 20
stateState = np.zeros((batch_size, 4))
stateState[:, 0] = 1
stateState[:, 1] = 1
stateState[:, 2] = 1
stateState[:, 3] = 1
disentangledScore = 0.5
vChange = 25.0 / n_epochs
for taskIndex in range(currentTask):
taskIndex = 1
if taskIndex == 0:
x_train = celebaFiles
x_fixed = x_train[0:batch_size]
elif taskIndex == 1:
x_train = cacdFiles
x_fixed = x_train[0:batch_size]
elif taskIndex == 2:
x_train = chairFiles
x_fixed = x_train[0:batch_size]
elif taskIndex == 3:
x_train = zimuFiles
x_fixed = x_train[0:batch_size]
disentangledScore = disentangledScore + vChange
n_samples = np.shape(np.array(x_train))[0]
total_batch = int(n_samples / batch_size)
for epoch in range(n_epochs):
# Random shuffling
index = [i for i in range(np.shape(x_train)[0])]
random.shuffle(index)
x_train = x_train[index]
image_size = 64
# Loop over all batches
for i in range(total_batch):
batchFiles = x_train[i * batch_size:i * batch_size + batch_size]
if taskIndex == 0:
batch = [get_image(
sample_file,
input_height=128,
input_width=128,
resize_height=64,
resize_width=64,
crop=True)
for sample_file in batchFiles]
elif taskIndex == 1:
batch = [get_image(
sample_file,
input_height=250,
input_width=250,
resize_height=64,
resize_width=64,
crop=True)
for sample_file in batchFiles]
elif taskIndex == 2:
batch = [get_image2(batch_file, 300, is_crop=True, resize_w=image_size, is_grayscale=0) \
for batch_file in batchFiles]
elif taskIndex == 3:
batch = [get_image(batch_file, 105, 105,
resize_height=64, resize_width=64,
crop=False, grayscale=False) \
for batch_file in batchFiles]
batch = np.array(batch)
batch = np.reshape(batch, (64, 64, 64, 1))
batch = np.concatenate((batch, batch, batch), axis=-1)
# Compute the offset of the current minibatch in the data.
batch_xs_target = batch
x_fixed = batch
batch_xs_input = batch
if ADD_NOISE:
batch_xs_input = batch_xs_input * np.random.randint(2, size=batch_xs_input.shape)
batch_xs_input += np.random.randint(2, size=batch_xs_input.shape)
_, tot_loss, loss_likelihood, loss_divergence = sess.run(
(train_op, loss, neg_marginal_likelihood, KL_divergence),
feed_dict={x_hat: batch_xs_input, x: batch_xs_target, keep_prob: 1.0, task_state: stateState,disentangledCount:disentangledScore})
print("epoch %f: L_tot %03.2f L_likelihood %03.2f L_divergence %03.2f" % (
epoch, tot_loss, loss_likelihood, loss_divergence))
y_PRR = sess.run(y, feed_dict={x_hat: x_fixed, keep_prob: 1,task_state:stateState,disentangledCount:disentangledScore})
y_RPR = np.reshape(y_PRR, (-1, 64, 64,3))
ims("results/" + "VAE" + str(epoch) + ".jpg", merge2(y_RPR[:64], [8, 8]))
if epoch > 0:
x_fixed_image = np.reshape(x_fixed, (-1, 64, 64,3))
ims("results/" + "Real" + str(epoch) + ".jpg", merge2(x_fixed_image[:64], [8, 8]))
#select the most relevant component
score = sess.run(totalScore, feed_dict={x_hat: x_fixed, keep_prob: 1, task_state: stateState})
a = np.argmin(score, axis=0)
index = a
if index == 0:
stateState[:, 0:1] = 0
elif index == 1:
stateState[:, 1:2] = 0
elif index == 2:
stateState[:, 2:3] = 0
elif index == 3:
stateState[:, 3:4] = 0
saver.save(sess, 'models/LifelongMixture_64_Dirichlet')
|
[
"noreply@github.com"
] |
noreply@github.com
|
8cf1337f8036de2054ba11a4c1ef5921ff9e2863
|
641f76328bfeb7e54f0793a18c5b7c00595b98fd
|
/apps/goods/migrations/0015_auto_20181019_1007.py
|
a9bf43d5073534905d8a89c4b1ee68ce1ac10451
|
[
"Apache-2.0"
] |
permissive
|
lianxiaopang/camel-store-api
|
1d16060af92eb01607757c0423377a8c94c3a726
|
b8021250bf3d8cf7adc566deebdba55225148316
|
refs/heads/master
| 2020-12-29T13:23:18.118617
| 2020-02-09T08:38:53
| 2020-02-09T08:38:53
| 238,621,246
| 0
| 0
|
Apache-2.0
| 2020-02-07T14:28:35
| 2020-02-06T06:17:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,439
|
py
|
# Generated by Django 2.1.2 on 2018-10-19 02:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('goods', '0014_auto_20181011_1646'),
]
operations = [
migrations.AlterModelOptions(
name='goodscategory',
options={'ordering': ('index', '-is_active'), 'verbose_name': '商品类别', 'verbose_name_plural': '商品类别'},
),
migrations.AlterModelOptions(
name='goodtype',
options={'ordering': ('index',), 'verbose_name': '商品规格', 'verbose_name_plural': '商品规格'},
),
migrations.AddField(
model_name='goodscategory',
name='index',
field=models.PositiveSmallIntegerField(default=0, verbose_name='优先级'),
),
migrations.AddField(
model_name='goodscategory',
name='is_active',
field=models.BooleanField(default=True, verbose_name='是否启用'),
),
migrations.AddField(
model_name='goodtype',
name='asset_ratio',
field=models.PositiveSmallIntegerField(default=0, help_text='单位:%', verbose_name='返利比例'),
),
migrations.AddField(
model_name='goodtype',
name='index',
field=models.PositiveSmallIntegerField(default=0, verbose_name='优先级'),
),
]
|
[
"lyh@gzqichang.com"
] |
lyh@gzqichang.com
|
3b34003880bed4318fd90ace0533ced787c31225
|
cc9405d9b7233b103e66660054db1f640ca6147a
|
/core/urls.py
|
719d616cdf74716ca76d53fcf6f2864b82983328
|
[] |
no_license
|
devjass/WebPlayGround
|
d6f5f1704fffacfe6c2a683a533b24b20d07aaff
|
8e8600078895d9e91847bcf5bb71f4bbc98ca082
|
refs/heads/master
| 2023-05-15T19:25:48.091753
| 2021-06-12T05:50:20
| 2021-06-12T05:50:20
| 376,207,388
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
from django.urls import path
from .views import HomePageView,SamplePageView
urlpatterns = [
path('', HomePageView.as_view(), name="home"),
path('sample/', SamplePageView.as_view(), name="sample"),
]
|
[
"development.jass@gmail.com"
] |
development.jass@gmail.com
|
d01b1468d7aaf781d587e8b861611e92d26f28dd
|
e8f99a162207cba82d4e0f969d7bcdb2b9d8b522
|
/imooc/python3_shizhan/ten/c1.py
|
6a78a3e875eb35796ea35e07c606f9f44d0ef637
|
[] |
no_license
|
TesterCC/Python3Scripts
|
edb5446278ebf13edb64336001081941ca27d67d
|
58be67e1ffc74ef50289a885aa4ad05f58e2c383
|
refs/heads/master
| 2023-08-30T21:16:38.328045
| 2023-08-17T11:23:08
| 2023-08-17T11:23:08
| 93,401,996
| 6
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 721
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'MFC'
__time__ = '18/5/2 21:48'
"""
第10章 正则表达式与JSON
正则表达式
JSON XML
正则表达式是一个特殊的字符序列,一个字符串是否与我们所设定的这样的字符序列相匹配。
快速检索文本、实现一些替换文本的操作
1.检查一串数字是否是电话号码
2.检测一个字符串是否符合email
3.把一个文本里指定的单词替换为另外一个单词
如果正则用的6,可以不用很多内置方法
"""
a = 'C|C++|Java|C#|Python|Javascript'
# Python内置函数,用来判断字符串是否包含Python
print(a.index('Python'))
print(a.index('Python') > -1)
print('Python' in a)
|
[
"liyanxi07@gmail.com"
] |
liyanxi07@gmail.com
|
b07084a05c9106fed5e9f3fceaf902363990afb6
|
50c668e9e0c10c1bcfd093b824e58ab66867cf30
|
/17-POO-constructor/main.py
|
3127bcbe5c9a58779b831af0c6c681b5e346165c
|
[] |
no_license
|
bthecs/Python
|
1d4e9f424fce633c2fe50455654b21a1e56b3a19
|
b587f67bc6f999de4e80ebb53982430e48a68242
|
refs/heads/master
| 2023-03-29T00:40:36.071294
| 2021-03-30T00:31:34
| 2021-03-30T00:31:34
| 352,788,286
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
from coche import Coche
carro = Coche("Naranja","Gallardo","Ferrari",400,1000,2)
carro1 = Coche("Azul","Clio","Renault",400,1000,2)
carro2 = Coche("Blanco","Argo","Fiat",400,1000,2)
print(carro.getInfo())
print(carro1.getInfo())
print(carro2.getInfo())
#Detectar tipado
carro1 = "Perro"
if type(carro1) == Coche:
print("Es un objeto correcto!!!")
else:
print("No es un objeto coche")
#Visibilidad de atributos
print(carro.soy_publico)
print(carro.__privado)
|
[
"fl.gimenez@alumno.um.edu.ar"
] |
fl.gimenez@alumno.um.edu.ar
|
ee99cd3db0efef6feba5b3f967b69c3244f87446
|
f6284c82a06e6a6037d7d6eb488337ce099f7566
|
/geektrust_challenges/make_space/utils/constants.py
|
8cbd4a9dde7f9d79cf613a0f6dcf7227c08aa1c0
|
[] |
no_license
|
kartiky9/machine_coding
|
3677805c8836a6f8d32a7b2af283f3fa8ce090a5
|
30045db300a36564f6d27f002438059f329cb2e0
|
refs/heads/main
| 2023-07-27T08:03:56.576660
| 2021-09-09T07:27:32
| 2021-09-09T07:27:32
| 404,340,789
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
class InputType:
BOOK = 'BOOK'
VACANCY = 'VACANCY'
class Output:
INCORRECT_INPUT = 'INCORRECT_INPUT'
NO_VACANT_ROOM = 'NO_VACANT_ROOM'
MINUTE_INTERVALS = 15
|
[
"13693180+kartiky9@users.noreply.github.com"
] |
13693180+kartiky9@users.noreply.github.com
|
8aa68c99463545c8c82d13104e1a46c6ea0065c7
|
07e12ec5f9b8eb898c0c7c67d1e0a50ea66ca14d
|
/clear.py
|
3b56cdcc871c022a6484c208bf29a9e8f90d1f20
|
[] |
no_license
|
EzAccount/LBM
|
d14566511800a330c076d2d4642e740d9a45e36f
|
8fd36968646335e3ed6389e50cdf1e4399eb167b
|
refs/heads/master
| 2022-04-18T19:32:41.252861
| 2020-03-03T20:18:35
| 2020-03-03T20:18:35
| 139,371,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 64
|
py
|
#! /usr/bin/python
import os
os.system("rm -rf results/*.dat");
|
[
"misha7322@hotmail.com"
] |
misha7322@hotmail.com
|
d8ee391707950c00d257afd550aa1669106703ba
|
66aecca0128d9823fd18e8840b8f341d028e7052
|
/account/migrations/0003_auto_20181225_1816.py
|
1f24da8d2a809860fa0d86bad72c5702b3e147ca
|
[] |
no_license
|
maksimes/my-first-blog
|
a23b3db3f789273c58c91a9cdf9a36adc5749b1b
|
c57863490e1582fa840e66dfb0ce0b17dce4fcbb
|
refs/heads/master
| 2020-04-05T12:14:31.286179
| 2019-04-03T19:46:36
| 2019-04-03T19:46:36
| 156,261,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 742
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-12-25 15:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0002_auto_20181225_0044'),
]
operations = [
migrations.AddField(
model_name='personprofile',
name='city',
field=models.CharField(default='', max_length=30, verbose_name='Город'),
),
migrations.AlterField(
model_name='personprofile',
name='gender',
field=models.CharField(choices=[('MAN', 'Мужской'), ('WOMAN', 'Женский')], max_length=5, verbose_name='Пол'),
),
]
|
[
"maksimes@mail.ru"
] |
maksimes@mail.ru
|
2445240430a4f61b9f76afca22102c4397f33bd7
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/gkioxari_RstarCNN/RstarCNN-master/lib/datasets/attr_bpad.py
|
1d8c0fb80696afdd175613117b34dc6d6c4573fd
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 10,478
|
py
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) Microsoft. All rights reserved.
# Written by Ross Girshick, 2015.
# Licensed under the BSD 2-clause "Simplified" license.
# See LICENSE in the project root for license information.
# --------------------------------------------------------
# --------------------------------------------------------
# R*CNN
# Written by Georgia Gkioxari, 2015.
# See LICENSE in the project root for license information.
# --------------------------------------------------------
import datasets.pascal_voc
import os
import datasets.imdb
import xml.dom.minidom as minidom
import numpy as np
import scipy.sparse
import scipy.io as sio
import utils.cython_bbox
import cPickle
import subprocess
import pdb
class attr_bpad(datasets.imdb):
def __init__(self, image_set, devkit_path=None):
datasets.imdb.__init__(self, 'bpad_' + image_set)
self._year = '2015'
self._image_set = image_set
self._devkit_path = self._get_default_path() if devkit_path is None \
else devkit_path
self._base_path = os.path.join(self._devkit_path, 'BAPD')
self._classes = ('is_male', 'has_long_hair', 'has_glasses',
'has_hat', 'has_tshirt', 'has_long_sleeves',
'has_shorts', 'has_jeans', 'has_long_pants')
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self._image_ext = '.jpg'
self._image_index = self._load_image_set_index()
# Default to roidb handler
self._roidb_handler = self.selective_search_roidb
# PASCAL specific config options
self.config = {'cleanup' : True,
'use_salt' : True}
assert os.path.exists(self._devkit_path), \
'VOCdevkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._base_path), \
'Path does not exist: {}'.format(self._base_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._base_path, 'Images',
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._base_path, 'selective_search',
'ss_attributes_' + self._image_set + '.mat')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
raw_data = sio.loadmat(image_set_file)
images = raw_data['images'].ravel()
image_index = [im[0].strip() for im in images]
return image_index
def _get_default_path(self):
"""
Return the default path where data is expected to be installed.
"""
return os.path.join(datasets.ROOT_DIR, 'data')
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} gt roidb loaded from {}'.format(self.name, cache_file)
return roidb
# Load all annotation file data (should take < 30 s).
gt_roidb = self._load_annotation()
# print number of ground truth classes
cc = np.zeros(len(self._classes), dtype = np.int16)
for i in xrange(len(gt_roidb)):
gt_classes = gt_roidb[i]['gt_classes']
num_objs = gt_classes.shape[0]
for n in xrange(num_objs):
valid_classes = np.where(gt_classes[n] == 1)[0]
cc[valid_classes] +=1
for ic,nc in enumerate(cc):
print "Count {:s} : {:d}".format(self._classes[ic], nc)
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
return gt_roidb
def selective_search_roidb(self):
"""
Return the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
self.name + '_selective_search_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} ss roidb loaded from {}'.format(self.name, cache_file)
return roidb
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_roidb(gt_roidb)
roidb = self._merge_roidbs(gt_roidb, ss_roidb)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote ss roidb to {}'.format(cache_file)
return roidb
def _merge_roidbs(self, a, b):
assert len(a) == len(b)
for i in xrange(len(a)):
a[i]['boxes'] = np.vstack((a[i]['boxes'], b[i]['boxes']))
a[i]['gt_classes'] = np.vstack((a[i]['gt_classes'],
b[i]['gt_classes']))
a[i]['gt_overlaps'] = scipy.sparse.vstack([a[i]['gt_overlaps'],
b[i]['gt_overlaps']])
return a
def _load_selective_search_roidb(self, gt_roidb):
filename = os.path.join(self._base_path, 'selective_search',
'ss_attributes_' + self._image_set + '.mat')
# filename = op.path.join(self.cache_path, 'MCG_data', self.name + '.mat')
assert os.path.exists(filename), \
'Selective search data not found at: {}'.format(filename)
raw_data = sio.loadmat(filename)
num_images = raw_data['boxes'].ravel().shape[0]
ss_roidb = []
for i in xrange(num_images):
boxes = raw_data['boxes'].ravel()[i][:, (1, 0, 3, 2)] - 1
num_boxes = boxes.shape[0]
gt_boxes = gt_roidb[i]['boxes']
num_objs = gt_boxes.shape[0]
gt_classes = gt_roidb[i]['gt_classes']
gt_overlaps = \
utils.cython_bbox.bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
overlaps = scipy.sparse.csr_matrix(gt_overlaps)
ss_roidb.append({'boxes' : boxes,
'gt_classes' : np.zeros((num_boxes, self.num_classes),
dtype=np.int32),
'gt_overlaps' : overlaps,
'flipped' : False})
return ss_roidb
def _load_annotation(self):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
gt_roidb = []
filename = os.path.join(self._base_path, 'ground_truth',
'gt_attributes_' + self._image_set + '.mat')
assert os.path.exists(filename), \
'Selective search data not found at: {}'.format(filename)
raw_data = sio.loadmat(filename, mat_dtype=True)
all_boxes = raw_data['boxes'].ravel()
all_images = raw_data['images'].ravel()
all_attributes = raw_data['attributes'].ravel()
num_images = len(all_images)
for imi in xrange(num_images):
num_objs = all_boxes[imi].shape[0]
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs, self.num_classes), dtype=np.int32)
overlaps = np.zeros((num_objs, num_objs), dtype=np.float32)
# Load object bounding boxes into a data frame.
for i in xrange(num_objs):
# Make pixel indexes 0-based
box = all_boxes[imi][i]
assert(not np.any(np.isnan(box)))
# Read attributes labels
attr = all_attributes[imi][i]
# Change attributes labels
# -1 -> 0
# 0 -> -1
unknown_attr = attr == 0
neg_attr = attr == -1
attr[neg_attr] = 0
attr[unknown_attr] = -1
boxes[i, :] = box - 1
gt_classes[i, :] = attr
overlaps[i, i] = 1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
gt_roidb.append({'boxes' : boxes,
'gt_classes': gt_classes,
'gt_overlaps' : overlaps,
'flipped' : False})
return gt_roidb
def _write_results_file(self, all_boxes, comp):
path = os.path.join(self._devkit_path, 'results', 'BAPD')
print 'Writing results file'.format(cls)
filename = path + comp + '.txt'
with open(filename, 'wt') as f:
for i in xrange(all_boxes.shape[0]):
ind = all_boxes[i,0].astype(np.int64)
index = self.image_index[ind-1]
voc_id = all_boxes[i,1].astype(np.int64)
f.write('{:s} {:d}'.format(index, voc_id))
for cli in xrange(self.num_classes):
score = all_boxes[i,2+cli]
f.write(' {:.3f}'.format(score))
f.write('\n')
if __name__ == '__main__':
d = datasets.pascal_voc('trainval', '2012')
res = d.roidb
from IPython import embed; embed()
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
9eaf1ce6cbbbcedac5832c605917bc09ed334036
|
da154bed336f6806b3c916ba1c969099b55fcc2e
|
/Samples and Demos(For review)/basic_transmit.py
|
ab8674cd84e2272e5c8ccbb7bfc48916b4adaf02
|
[] |
no_license
|
utadahikaru/Self-CV-Practice
|
e3b7b3bda5f99335eb8f8dcf6e891a654e593ccb
|
ffc4ef3f9980f037ffb5344004752c7d43c1f13c
|
refs/heads/master
| 2020-03-29T01:07:42.988699
| 2018-11-26T10:35:15
| 2018-11-26T10:35:15
| 149,372,440
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,979
|
py
|
# coding:utf-8
# 0导入模块,生成模拟数据集。
import tensorflow as tf
import numpy as np
BATCH_SIZE = 8
SEED = 23455
# 基于seed产生随机数
rdm = np.random.RandomState(SEED)
# 随机数返回32行2列的矩阵 表示32组 体积和重量 作为输入数据集
X = rdm.rand(32, 2)
# 从X这个32行2列的矩阵中 取出一行 判断如果和小于1 给Y赋值1 如果和不小于1 给Y赋值0
# 作为输入数据集的标签(正确答案)
Y_ = [[int(x0 + x1 < 1)] for (x0, x1) in X]
print("X:\n", X)
print("Y_:\n", Y_)
# 1定义神经网络的输入、参数和输出,定义前向传播过程。
x = tf.placeholder(tf.float32, shape=(None, 2))
y_ = tf.placeholder(tf.float32, shape=(None, 1))
w1 = tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1))
w2 = tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1))
a = tf.matmul(x, w1)
y = tf.matmul(a, w2)
# 2定义损失函数及反向传播方法。
loss_mse = tf.reduce_mean(tf.square(y - y_))
train_step = tf.train.GradientDescentOptimizer(0.001).minimize(loss_mse)
# train_step = tf.train.MomentumOptimizer(0.001,0.9).minimize(loss_mse)
# train_step = tf.train.AdamOptimizer(0.001).minimize(loss_mse)
# 3生成会话,训练STEPS轮
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
# 输出目前(未经训练)的参数取值。
print("w1:\n", sess.run(w1))
print("w2:\n", sess.run(w2))
print("\n")
# 训练模型。
STEPS = 3000
for i in range(STEPS):
start = (i * BATCH_SIZE) % 32
end = start + BATCH_SIZE
sess.run(train_step, feed_dict={x: X[start:end], y_: Y_[start:end]})
if i % 500 == 0:
total_loss = sess.run(loss_mse, feed_dict={x: X, y_: Y_})
print("After %d training step(s), loss_mse on all data is %g" % (i, total_loss))
# 输出训练后的参数取值。
print("\n")
print("w1:\n", sess.run(w1))
print("w2:\n", sess.run(w2))
|
[
"kanaliushijun@gmail.com"
] |
kanaliushijun@gmail.com
|
89043c094193f8acc281258306eb8f8f0765498e
|
31766af2b2e0957e58078095d8822ffc760189ba
|
/baekjoon/Python/q1717.py
|
562d7e9d57778af114df55ce84c6238c37ac3f20
|
[] |
no_license
|
ha-yujin/algorithm
|
618d0c7c55dfee0a9b4f0ff15018feceb5f4d07f
|
3318b5d7c703f5c3cb4a6475e04b2f0aaa7e7432
|
refs/heads/master
| 2023-01-18T16:30:28.628344
| 2020-11-30T14:14:56
| 2020-11-30T14:14:56
| 279,265,017
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 676
|
py
|
# 집합의 표현 - Union Find
def find_parent(x):
if parent[x]==x:
return x
else:
parent[x]=find_parent(parent[x])
return parent[x]
def union(x,y):
r1 = find_parent(x)
r2=find_parent(y)
if r1 > r2:
parent[r1]=r2
else:
parent[r2]=r1
def check(x,y):
r1= find_parent(x)
r2=find_parent(y)
if r1==r2:
print("YES")
else:
print("NO")
n, m = map(int,input().split())
operation = [ list(map(int,input().split())) for _ in range(m)]
parent = [ i for i in range(n+1)]
for op in operation:
if op[0]==0:
union(op[1],op[2])
elif op[0]==1:
check(op[1],op[2])
|
[
"hoj2887@dongguk.edu"
] |
hoj2887@dongguk.edu
|
6e412c2830f0c0210c5542502eff73dfa2776a76
|
1b78ca7f3250ebed418717c6ea28b5a77367f1b8
|
/411.minimum-unique-word-abbreviation/minimum-unique-word-abbreviation.py
|
70887cecba089f780017d17a96ca6739c187979c
|
[] |
no_license
|
JaniceLC/lc-all-solutions
|
ced854f31b94f44c0b03a0677988805e3b9ee718
|
3f2a4ee8c09a8890423c6a22c73f470eccf979a2
|
refs/heads/master
| 2020-04-05T19:53:31.307528
| 2018-11-12T04:18:45
| 2018-11-12T04:18:45
| 157,155,285
| 0
| 2
| null | 2018-11-12T04:13:22
| 2018-11-12T04:13:22
| null |
UTF-8
|
Python
| false
| false
| 1,290
|
py
|
class Solution(object):
def minAbbreviation(self, target, dictionary):
"""
:type target: str
:type dictionary: List[str]
:rtype: str
"""
def dfs(w, start, res):
res.append(w)
for i in xrange(start, len(w)):
for l in reversed(xrange(1, len(w) - i + 1)):
dfs(w[:i] + [str(l)] + w[i+l:], i + 2, res)
def match(src, dest):
i = 0
for c in src:
if c.isdigit():
jump = int(c)
i += jump
else:
if c != dest[i]:
return False
i += 1
return True
if not dictionary:
return str(len(target))
wordLen = len(target)
res = []
dfs(list(target), 0, res)
res.sort(key=lambda x:len(x))
dictionary = filter(lambda s: len(s) == wordLen, dictionary)
for w in res:
allMiss = True
for d in dictionary:
if match(w, d):
allMiss = False
break
if allMiss:
return "".join(w)
return None
|
[
"jedihy@yis-macbook-pro.local"
] |
jedihy@yis-macbook-pro.local
|
3330ec5ca7f6b0fb66c55b33c5965f82536c61ca
|
72cbc497c1a36ad66cedaf6fd0a880ee331f11e7
|
/uri-problem90-100.py
|
2a28f871ddbca7d798ad7dff5b7bb15dfcbd4f3b
|
[] |
no_license
|
Anik85/uri-begineer-solution-in-python
|
9e53ce44109388f91596587f49a6c3657907c867
|
5b387c4efa007881dcc42e8bfd9e7974d2b123f3
|
refs/heads/master
| 2023-01-31T23:21:22.156292
| 2020-12-05T18:01:46
| 2020-12-05T18:01:46
| 291,981,824
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
'''#just practice
digit={
"1": "one ",
"2": "two",
"3": "three",
"4": "four"
}
phone=input()
output=""
for ch in phone:
output += digit.get(ch,"!")+" "
print(output)
def function(N):
for i in range(1,N):
print(i,end=" ")
print(N)
N=int(input())
if 1<=N<=1000:
function(N)
#lucky divisions
n=int(input())
if 1<=n<=1000:
arr=[4,7,47,74,44,444,447,474,477,777,774,744]
flag=0
for i in range(len(arr)):
if n%arr[i]==0:
flag=True
if flag:
print("YES")
else:
print("NO")'''
|
[
"noreply@github.com"
] |
noreply@github.com
|
e5811eaa99eb0ea2a9e3f35b55128c42962f6ab6
|
ffd1413f7ed9c78726addccb328a616e4c62a635
|
/migrations/log_ignore.py
|
53120ab5f78177fe275ba273fb81bd08b3a9d050
|
[
"MIT"
] |
permissive
|
NotSoPrivate/NotSoBot
|
4be3fd33954830887e98f0abadb3df985f8ca917
|
75a30ead638f824035bc06d93a62ba726845ceaa
|
refs/heads/master
| 2023-03-14T17:00:24.941399
| 2021-03-22T13:05:34
| 2021-03-22T13:05:34
| 350,292,977
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 959
|
py
|
import pymysql
import datetime
connection = pymysql.connect(host='localhost',
user='discord',
password='q3cnvtvWIy62BQlx',
db='discord',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor()
cursor.execute('SELECT * FROM `logs`')
result = cursor.fetchall()
sql = 'INSERT INTO `logs_ignore` (`type`, `server`, `id`) VALUES (%s, %s, %s)'
count = 0
for r in result:
try:
server, ignore_users, avatar_ignore = r['server'], r['ignore_users'], r['avatar_ignore']
if ignore_users:
for user in ignore_users.split(', '):
cursor.execute(sql, (False, server, int(user)))
if avatar_ignore:
for user in avatar_ignore.split(', '):
cursor.execute(sql, (True, server, int(user)))
connection.commit()
finally:
count += 1
print('done', count)
|
[
"root@mods.nyc"
] |
root@mods.nyc
|
0ec404b9b92a1950ead916d9356841cf3bb18eb4
|
d7bf691c35d7bf2a5707e47d7aca98b509e02eb9
|
/pddlstream/algorithms/algorithm.py
|
7a29c0eba6f399ea3752c4684788b164a65873f9
|
[
"MIT"
] |
permissive
|
himanshisyadav/pddlstream
|
7d43c16da903504a0232408a7d8077fd4da95d87
|
1038e702f1d4625791f1da7867d6226b02af8c3a
|
refs/heads/master
| 2020-04-11T11:48:19.324553
| 2018-11-14T18:28:27
| 2018-11-14T18:28:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,291
|
py
|
import time
from collections import OrderedDict, deque, namedtuple, Counter
from pddlstream.algorithms.downward import parse_domain, get_problem, task_from_domain_problem, \
parse_lisp, sas_from_pddl, parse_goal
from pddlstream.algorithms.search import abstrips_solve_from_task
from pddlstream.language.constants import get_prefix, get_args
from pddlstream.language.conversion import obj_from_value_expression, obj_from_pddl_plan, \
evaluation_from_fact, substitute_expression
from pddlstream.language.exogenous import compile_to_exogenous, replace_literals
from pddlstream.language.external import External, DEBUG, get_plan_effort
from pddlstream.language.function import parse_function, parse_predicate, Function, Predicate
from pddlstream.language.object import Object
from pddlstream.language.rule import parse_rule
from pddlstream.language.stream import parse_stream, Stream
from pddlstream.utils import elapsed_time, INF, get_mapping, find_unique, get_length, str_from_plan
from pddlstream.language.optimizer import parse_optimizer, VariableStream, ConstraintStream
# TODO: way of programmatically specifying streams/actions
INITIAL_EVALUATION = None
def parse_constants(domain, constant_map):
obj_from_constant = {}
for constant in domain.constants:
if constant.name.startswith(Object._prefix): # TODO: check other prefixes
raise NotImplementedError('Constants are not currently allowed to begin with {}'.format(Object._prefix))
if constant.name not in constant_map:
raise ValueError('Undefined constant {}'.format(constant.name))
value = constant_map.get(constant.name, constant.name)
obj_from_constant[constant.name] = Object(value, name=constant.name) # TODO: remap names
# TODO: add object predicate
for name in constant_map:
for constant in domain.constants:
if constant.name == name:
break
else:
raise ValueError('Constant map value {} not mentioned in domain :constants'.format(name))
del domain.constants[:] # So not set twice
return obj_from_constant
def check_problem(domain, streams, obj_from_constant):
for action in domain.actions + domain.axioms:
for p, c in Counter(action.parameters).items():
if c != 1:
raise ValueError('Parameter [{}] for action [{}] is not unique'.format(p.name, action.name))
# TODO: check that no undeclared parameters & constants
#action.dump()
for stream in streams:
# TODO: domain.functions
facts = list(stream.domain)
if isinstance(stream, Stream):
facts.extend(stream.certified)
for fact in facts:
name = get_prefix(fact)
if name not in domain.predicate_dict: # Undeclared predicate: {}
print('Warning! Undeclared predicate used in stream [{}]: {}'.format(stream.name, name))
elif len(get_args(fact)) != domain.predicate_dict[name].get_arity(): # predicate used with wrong arity: {}
print('Warning! predicate used with wrong arity in stream [{}]: {}'.format(stream.name, fact))
for constant in stream.constants:
if constant not in obj_from_constant:
raise ValueError('Undefined constant in stream [{}]: {}'.format(stream.name, constant))
def parse_problem(problem, stream_info={}):
# TODO: just return the problem if already written programmatically
domain_pddl, constant_map, stream_pddl, stream_map, init, goal = problem
domain = parse_domain(domain_pddl)
if len(domain.types) != 1:
raise NotImplementedError('Types are not currently supported')
obj_from_constant = parse_constants(domain, constant_map)
streams = parse_stream_pddl(stream_pddl, stream_map, stream_info)
evaluations = OrderedDict((evaluation_from_fact(obj_from_value_expression(f)), INITIAL_EVALUATION) for f in init)
goal_expression = obj_from_value_expression(goal)
check_problem(domain, streams, obj_from_constant)
parse_goal(goal_expression, domain) # Just to check that it parses
#normalize_domain_goal(domain, goal_expression)
# TODO: refactor the following?
compile_to_exogenous(evaluations, domain, streams)
compile_fluent_streams(domain, streams)
enforce_simultaneous(domain, streams)
return evaluations, goal_expression, domain, streams
##################################################
def get_predicates(expression):
import pddl.conditions
if isinstance(expression, pddl.conditions.ConstantCondition):
return set()
if isinstance(expression, pddl.conditions.JunctorCondition) or \
isinstance(expression, pddl.conditions.QuantifiedCondition):
predicates = set()
for part in expression.parts:
predicates.update(get_predicates(part))
return predicates
if isinstance(expression, pddl.conditions.Literal):
return {expression.predicate}
raise ValueError(expression)
def enforce_simultaneous(domain, externals):
axiom_predicates = set()
for axiom in domain.axioms:
axiom_predicates.update(get_predicates(axiom.condition))
for external in externals:
if (type(external) in [VariableStream, ConstraintStream]) and not external.info.simultaneous:
predicates = {get_prefix(fact) for fact in external.certified}
if predicates & axiom_predicates:
external.info.simultaneous = True
#print(external, (predicates & axiom_predicates))
##################################################
def has_costs(domain):
for action in domain.actions:
if action.cost is not None:
return True
return False
def solve_finite(evaluations, goal_expression, domain, unit_costs=None, debug=False, **kwargs):
if unit_costs is None:
unit_costs = not has_costs(domain)
problem = get_problem(evaluations, goal_expression, domain, unit_costs)
task = task_from_domain_problem(domain, problem)
sas_task = sas_from_pddl(task, debug=debug)
plan_pddl, cost = abstrips_solve_from_task(sas_task, debug=debug, **kwargs)
return obj_from_pddl_plan(plan_pddl), cost
##################################################
Solution = namedtuple('Solution', ['plan', 'cost'])
class SolutionStore(object):
def __init__(self, max_time, max_cost, verbose):
# TODO: store evaluations here as well as map from head to value?
self.start_time = time.time()
self.max_time = max_time
#self.cost_fn = get_length if unit_costs else None
self.max_cost = max_cost
self.verbose = verbose
self.best_plan = None
self.best_cost = INF
#self.best_cost = self.cost_fn(self.best_plan)
self.solutions = []
def add_plan(self, plan, cost):
# TODO: double-check that this is a solution
self.solutions.append(Solution(plan, cost))
if cost < self.best_cost:
self.best_plan = plan
self.best_cost = cost
def is_solved(self):
return self.best_cost < self.max_cost
def elapsed_time(self):
return elapsed_time(self.start_time)
def is_timeout(self):
return self.max_time <= self.elapsed_time()
def is_terminated(self):
return self.is_solved() or self.is_timeout()
def add_facts(evaluations, fact, result=None):
new_evaluations = []
for fact in fact:
evaluation = evaluation_from_fact(fact)
if evaluation not in evaluations:
evaluations[evaluation] = result
new_evaluations.append(evaluation)
return new_evaluations
def add_certified(evaluations, result):
return add_facts(evaluations, result.get_certified(), result=result)
##################################################
def get_domain_predicates(external):
return set(map(get_prefix, external.domain))
def get_certified_predicates(external):
if isinstance(external, Stream):
return set(map(get_prefix, external.certified))
if isinstance(external, Function):
return {get_prefix(external.head)}
raise ValueError(external)
def get_non_producers(externals):
# TODO: handle case where no domain conditions
pairs = set()
for external1 in externals:
for external2 in externals:
if get_certified_predicates(external1) & get_domain_predicates(external2):
pairs.add((external1, external2))
producers = {e1 for e1, _ in pairs}
non_producers = set(externals) - producers
# TODO: these are streams that be evaluated at the end as tests
return non_producers
##################################################
def apply_rules_to_streams(rules, streams):
# TODO: can actually this with multiple condition if stream certified contains all
# TODO: do also when no domain conditions
processed_rules = deque(rules)
while processed_rules:
rule = processed_rules.popleft()
if len(rule.domain) != 1:
continue
[rule_fact] = rule.domain
rule.info.p_success = 0 # Need not be applied
for stream in streams:
if not isinstance(stream, Stream):
continue
for stream_fact in stream.certified:
if get_prefix(rule_fact) == get_prefix(stream_fact):
mapping = get_mapping(get_args(rule_fact), get_args(stream_fact))
new_facts = set(substitute_expression(rule.certified, mapping)) - set(stream.certified)
stream.certified = stream.certified + tuple(new_facts)
if new_facts and (stream in rules):
processed_rules.append(stream)
def parse_streams(streams, rules, stream_pddl, procedure_map, procedure_info):
stream_iter = iter(parse_lisp(stream_pddl))
assert('define' == next(stream_iter))
pddl_type, pddl_name = next(stream_iter)
assert('stream' == pddl_type)
for lisp_list in stream_iter:
name = lisp_list[0] # TODO: refactor at this point
if name in (':stream', ':wild-stream'):
externals = [parse_stream(lisp_list, procedure_map, procedure_info)]
elif name == ':rule':
externals = [parse_rule(lisp_list, procedure_map, procedure_info)]
elif name == ':function':
externals = [parse_function(lisp_list, procedure_map, procedure_info)]
elif name == ':predicate': # Cannot just use args if want a bound
externals = [parse_predicate(lisp_list, procedure_map, procedure_info)]
elif name == ':optimizer':
externals = parse_optimizer(lisp_list, procedure_map, procedure_info)
else:
raise ValueError(name)
for external in externals:
if any(e.name == external.name for e in streams):
raise ValueError('Stream [{}] is not unique'.format(external.name))
if name == ':rule':
rules.append(external)
external.pddl_name = pddl_name # TODO: move within constructors
streams.append(external)
def parse_stream_pddl(pddl_list, procedures, infos):
streams = []
if pddl_list is None:
return streams
if isinstance(pddl_list, str):
pddl_list = [pddl_list]
#if all(isinstance(e, External) for e in stream_pddl):
# return stream_pddl
if procedures != DEBUG:
procedures = {k.lower(): v for k, v in procedures.items()}
infos = {k.lower(): v for k, v in infos.items()}
rules = []
for pddl in pddl_list:
parse_streams(streams, rules, pddl, procedures, infos)
apply_rules_to_streams(rules, streams)
return streams
##################################################
def compile_fluent_streams(domain, externals):
state_streams = list(filter(lambda e: isinstance(e, Stream) and
(e.is_negated() or e.is_fluent()), externals))
predicate_map = {}
for stream in state_streams:
for fact in stream.certified:
predicate = get_prefix(fact)
assert predicate not in predicate_map # TODO: could make a conjunction condition instead
predicate_map[predicate] = stream
if not predicate_map:
return state_streams
# TODO: could make free parameters free
# TODO: allow functions on top the produced values?
# TODO: check that generated values are not used in the effects of any actions
# TODO: could treat like a normal stream that generates values (but with no inputs required/needed)
def fn(literal):
if literal.predicate not in predicate_map:
return literal
# TODO: other checks on only inputs
stream = predicate_map[literal.predicate]
certified = find_unique(lambda f: get_prefix(f) == literal.predicate, stream.certified)
mapping = get_mapping(get_args(certified), literal.args)
#assert all(arg in mapping for arg in stream.inputs) # Certified must contain all inputs
if not all(arg in mapping for arg in stream.inputs):
# TODO: this excludes typing. This is not entirely safe
return literal
blocked_args = tuple(mapping[arg] for arg in stream.inputs)
blocked_literal = literal.__class__(stream.blocked_predicate, blocked_args).negate()
if stream.is_negated():
# TODO: add stream conditions here
return blocked_literal
return pddl.Conjunction([literal, blocked_literal])
import pddl
for action in domain.actions:
action.precondition = replace_literals(fn, action.precondition).simplified()
# TODO: throw an error if the effect would be altered
for effect in action.effects:
if not isinstance(effect.condition, pddl.Truth):
raise NotImplementedError(effect.condition)
#assert(isinstance(effect, pddl.Effect))
#effect.condition = replace_literals(fn, effect.condition)
for axiom in domain.axioms:
axiom.condition = replace_literals(fn, axiom.condition).simplified()
return state_streams
def dump_plans(stream_plan, action_plan, cost):
print('Stream plan ({}, {:.1f}): {}\nAction plan ({}, {}): {}'.format(get_length(stream_plan),
get_plan_effort(stream_plan),
stream_plan,
get_length(action_plan), cost,
str_from_plan(action_plan)))
def partition_externals(externals):
functions = list(filter(lambda s: type(s) is Function, externals))
predicates = list(filter(lambda s: type(s) is Predicate, externals)) # and s.is_negative()
negated_streams = list(filter(lambda s: (type(s) is Stream) and s.is_negated(), externals)) # and s.is_negative()
negative = predicates + negated_streams
streams = list(filter(lambda s: s not in (functions + negative), externals))
#optimizers = list(filter(lambda s: type(s) in [VariableStream, ConstraintStream], externals))
return streams, functions, negative #, optimizers
|
[
"caelan@mit.edu"
] |
caelan@mit.edu
|
5920ba78e09eb4f5be44b465dda4879c3b817140
|
1bfebc7e1c95cd3c25024b6b1adbf518e55513bf
|
/src/pykit/strutil/test/test_hex.py
|
111d8a160a9a91f0c53b0653ae2f85d8536d8489
|
[
"MIT"
] |
permissive
|
bsc-s2/ops
|
a9a217a47dad558285ca8064fa29fdff10ab4ad7
|
6fb8ad758b328a445005627ac1e5736f17088cee
|
refs/heads/master
| 2021-06-24T09:32:49.057026
| 2020-11-02T06:50:01
| 2020-11-02T06:50:01
| 123,527,739
| 8
| 0
|
MIT
| 2020-09-03T04:58:26
| 2018-03-02T03:54:20
|
Python
|
UTF-8
|
Python
| false
| false
| 5,256
|
py
|
#!/usr/bin/env python2
# coding: utf-8
import os
import unittest
from pykit import strutil
from pykit.strutil import Hex
from pykit import ututil
from pykit import utfjson
dd = ututil.dd
class TestHex(unittest.TestCase):
def test_init(self):
byte_length = 3
cases = (
(0, 0),
('000000', 0),
('\0\0\0', 0),
(256**2 + 2*256 + 3, 0x010203),
('010203', 0x010203),
('\1\2\3', 0x010203),
)
for inp, expected in cases:
dd(inp, expected)
c = Hex(inp, byte_length)
self.assertEqual(expected, c.int)
self.assertEqual('%06x' % expected, c)
def test_attr(self):
c = Hex('010203', 3)
self.assertEqual('010203', c.hex)
self.assertEqual('\1\2\3', c.bytes)
self.assertEqual(256**2 + 2*256 + 3, c.int)
self.assertIs('010203', c.hex)
self.assertIsNot('010203', c)
def test_init_invalid(self):
byte_length = 3
cases = (
(256**3-1, None),
(256**3, ValueError),
(-1, ValueError),
('\1\2', ValueError),
('\1\2\3\4', ValueError),
('0102', ValueError),
('01020', ValueError),
('0102030', ValueError),
('01020304', ValueError),
({}, TypeError),
)
for inp, err in cases:
dd(inp, err)
if err is None:
c = Hex(inp, byte_length)
else:
self.assertRaises(err, Hex, inp, byte_length)
def test_named_length(self):
val = 0x010203
cases = (
('crc32', '00010203'),
('Crc32', '00010203'),
('CRC32', '00010203'),
('md5', '00000000000000000000000000010203'),
('Md5', '00000000000000000000000000010203'),
('MD5', '00000000000000000000000000010203'),
('sha1', '0000000000000000000000000000000000010203'),
('Sha1', '0000000000000000000000000000000000010203'),
('SHA1', '0000000000000000000000000000000000010203'),
('sha256', '0000000000000000000000000000000000000000000000000000000000010203'),
('Sha256', '0000000000000000000000000000000000000000000000000000000000010203'),
('SHA256', '0000000000000000000000000000000000000000000000000000000000010203'),
)
for typ, expected in cases:
c = Hex(val, typ)
self.assertEqual(expected, c)
def test_checksum_shortcut(self):
val = 0x010203
self.assertEqual(Hex(val, 'crc32'), Hex.crc32(val))
self.assertEqual(Hex(val, 'md5'), Hex.md5(val))
self.assertEqual(Hex(val, 'sha1'), Hex.sha1(val))
self.assertEqual(Hex(val, 'sha256'), Hex.sha256(val))
def test_prefix(self):
pref = '1234'
cases = (
('crc32', '12340000'),
('md5', '12340000000000000000000000000000'),
('sha1', '1234000000000000000000000000000000000000'),
('sha256', '1234000000000000000000000000000000000000000000000000000000000000'),
)
for typ, expected in cases:
dd('typ:', typ)
c = Hex((pref, 0), typ)
self.assertEqual(expected, c)
self.assertEqual('12340101', Hex((pref, 1), 'crc32'))
def test_str_repr(self):
c = Hex.crc32(1)
self.assertEqual('00000001', str(c))
self.assertEqual("'00000001'", repr(c))
def test_json(self):
c = Hex.crc32(('0002', 0))
rst = utfjson.dump(c)
self.assertEqual('"00020000"', rst)
self.assertEqual(c, utfjson.load(rst))
def test_arithmetic(self):
c = Hex.crc32(5)
self.assertEqual(6, (c+1).int)
self.assertEqual(10, (c*2).int)
self.assertEqual(2, (c/2).int)
self.assertEqual(0, (c/6).int)
self.assertEqual(1, (c % 2).int)
self.assertEqual(25, (c**2).int)
self.assertEqual('00000006', (c+1))
self.assertEqual('0000000a', (c*2))
self.assertEqual('00000002', (c/2))
self.assertEqual('00000000', (c/6))
self.assertEqual('00000001', (c % 2))
self.assertEqual('00000019', (c**2))
self.assertEqual(6, (c + Hex.crc32(1)).int)
# overflow protection
self.assertEqual(0, (c-5).int)
self.assertEqual(0, (c-6).int)
d = Hex.crc32(('', 0xff))
self.assertEqual(d, d+1)
def test_arithmetic_error(self):
c = Hex.crc32(5)
cases = (
[],
(),
{},
'x',
u'我',
)
for inp in cases:
with self.assertRaises(TypeError):
c + inp
with self.assertRaises(TypeError):
c - inp
with self.assertRaises(TypeError):
c * inp
with self.assertRaises(TypeError):
c / inp
with self.assertRaises(TypeError):
c % inp
with self.assertRaises(TypeError):
c ** inp
|
[
"drdr.xp@gmail.com"
] |
drdr.xp@gmail.com
|
c38c19f67d976a9f4044f56d3cdcc1eb31710082
|
c9cafe2123fd348174f36e110865dc7915c45a8f
|
/blog/models.py
|
153ed935b43349f992606f862b716cf7eb37955c
|
[] |
no_license
|
CESAREOMARIO/mi_segundo_blog
|
899e11565d3f156a631e0f0de421ad476f1b2e7c
|
229264b46435f20a9aa71ab17f73dd5f7ed9d44c
|
refs/heads/master
| 2020-08-06T18:10:36.591464
| 2019-10-06T03:08:11
| 2019-10-06T03:08:11
| 213,102,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
from django.db import models
from django.utils import timezone
# Create your models here.
class Post(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null= True)
imagen = models.ImageField()
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
|
[
"cguajardomur@gmail.com"
] |
cguajardomur@gmail.com
|
a502baacd568f4ec8f715ef459a5d0689434064b
|
5e557741c8867bca4c4bcf2d5e67409211d059a3
|
/torch/distributed/elastic/agent/server/local_elastic_agent.py
|
c84df1a8e434267abf07aca90210e89b834c1b00
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
Pandinosaurus/pytorch
|
a2bb724cfc548f0f2278b5af2fd8b1d2758adb76
|
bb8978f605e203fbb780f03010fefbece35ac51c
|
refs/heads/master
| 2023-05-02T20:07:23.577610
| 2021-11-05T14:01:30
| 2021-11-05T14:04:40
| 119,666,381
| 2
| 0
|
NOASSERTION
| 2021-11-05T19:55:56
| 2018-01-31T09:37:34
|
C++
|
UTF-8
|
Python
| false
| false
| 9,100
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
import signal
import tempfile
from typing import Any, Dict, Optional, Tuple
from torch.distributed.elastic.agent.server.api import (
RunResult,
SimpleElasticAgent,
WorkerGroup,
WorkerSpec,
WorkerState,
)
from torch.distributed.elastic.metrics.api import prof
from torch.distributed.elastic.multiprocessing import PContext, start_processes
from torch.distributed.elastic.utils import macros
from torch.distributed.elastic.utils.logging import get_logger
log = get_logger()
class LocalElasticAgent(SimpleElasticAgent):
"""
An implementation of :py:class:`torchelastic.agent.server.ElasticAgent`
that handles host-local workers.
This agent is deployed per host and is configured to spawn ``n`` workers.
When using GPUs, ``n`` maps to the number of GPUs available on the host.
The local agent does not communicate to other local agents deployed on
other hosts, even if the workers may communicate inter-host. The worker id
is interpreted to be a local process. The agent starts and stops all worker
processes as a single unit.
The worker function and argument passed to the worker function must be
python multiprocessing compatible. To pass multiprocessing data structures
to the workers you may create the data structure in the same multiprocessing
context as the specified ``start_method`` and pass it as a function argument.
The ``exit_barrier_timeout`` specifies the amount of time (in seconds) to wait
for other agents to finish. This acts as a safety net to handle cases where
workers finish at different times, to prevent agents from viewing workers
that finished early as a scale-down event. It is strongly advised that the
user code deal with ensuring that workers are terminated in a synchronous
manner rather than relying on the exit_barrier_timeout.
Example launching function
::
def trainer(args) -> str:
return "do train"
def main():
start_method="spawn"
shared_queue= multiprocessing.get_context(start_method).Queue()
spec = WorkerSpec(
role="trainer",
local_world_size=nproc_per_process,
entrypoint=trainer,
args=("foobar",),
...<OTHER_PARAMS...>)
agent = LocalElasticAgent(spec, start_method)
results = agent.run()
if results.is_failed():
print("trainer failed")
else:
print(f"rank 0 return value: {results.return_values[0]}")
# prints -> rank 0 return value: do train
Example launching binary
::
def main():
spec = WorkerSpec(
role="trainer",
local_world_size=nproc_per_process,
entrypoint="/usr/local/bin/trainer",
args=("--trainer_args", "foobar"),
...<OTHER_PARAMS...>)
agent = LocalElasticAgent(spec)
results = agent.run()
if not results.is_failed():
print("binary launches do not have return values")
"""
def __init__(
self,
spec: WorkerSpec,
start_method="spawn",
exit_barrier_timeout: float = 300,
log_dir: Optional[str] = None,
):
super().__init__(spec, exit_barrier_timeout)
self._start_method = start_method
self._pcontext: Optional[PContext] = None
rdzv_run_id = spec.rdzv_handler.get_run_id()
self._log_dir = self._make_log_dir(log_dir, rdzv_run_id)
def _make_log_dir(self, log_dir: Optional[str], rdzv_run_id: str):
base_log_dir = log_dir or tempfile.mkdtemp(prefix="torchelastic_")
os.makedirs(base_log_dir, exist_ok=True)
dir = tempfile.mkdtemp(prefix=f"{rdzv_run_id}_", dir=base_log_dir)
log.info(f"log directory set to: {dir}")
return dir
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.metrics.prof`.
@prof
def _stop_workers(self, worker_group: WorkerGroup) -> None:
self._shutdown()
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.metrics.prof`.
@prof
def _start_workers(self, worker_group: WorkerGroup) -> Dict[int, Any]:
spec = worker_group.spec
store = worker_group.store
assert store is not None
master_addr, master_port = super()._get_master_addr_port(store)
restart_count = spec.max_restarts - self._remaining_restarts
use_agent_store = spec.rdzv_handler.get_backend() == "static"
args: Dict[int, Tuple] = {}
envs: Dict[int, Dict[str, str]] = {}
for worker in worker_group.workers:
local_rank = worker.local_rank
worker_env = {
"LOCAL_RANK": str(local_rank),
"RANK": str(worker.global_rank),
"GROUP_RANK": str(worker_group.group_rank),
"ROLE_RANK": str(worker.role_rank),
"ROLE_NAME": spec.role,
"LOCAL_WORLD_SIZE": str(spec.local_world_size),
"WORLD_SIZE": str(worker.world_size),
"GROUP_WORLD_SIZE": str(worker_group.group_world_size),
"ROLE_WORLD_SIZE": str(worker.role_world_size),
"MASTER_ADDR": master_addr,
"MASTER_PORT": str(master_port),
"TORCHELASTIC_RESTART_COUNT": str(restart_count),
"TORCHELASTIC_MAX_RESTARTS": str(spec.max_restarts),
"TORCHELASTIC_RUN_ID": spec.rdzv_handler.get_run_id(),
"TORCHELASTIC_USE_AGENT_STORE": str(use_agent_store),
"NCCL_ASYNC_ERROR_HANDLING": str(1),
}
if "OMP_NUM_THREADS" in os.environ:
worker_env["OMP_NUM_THREADS"] = os.environ["OMP_NUM_THREADS"]
envs[local_rank] = worker_env
worker_args = list(spec.args)
worker_args = macros.substitute(worker_args, str(local_rank))
args[local_rank] = tuple(worker_args)
# scaling events do not count towards restarts (gets same attempt #)
# remove existing log dir if this restart is due to a scaling event
attempt_log_dir = os.path.join(self._log_dir, f"attempt_{restart_count}")
shutil.rmtree(attempt_log_dir, ignore_errors=True)
os.makedirs(attempt_log_dir)
assert spec.entrypoint is not None
self._pcontext = start_processes(
name=spec.role,
entrypoint=spec.entrypoint,
args=args,
envs=envs,
log_dir=attempt_log_dir,
start_method=self._start_method,
redirects=spec.redirects,
tee=spec.tee,
)
return self._pcontext.pids()
def _shutdown(self, death_sig: signal.Signals = signal.SIGTERM) -> None:
if self._pcontext:
self._pcontext.close(death_sig)
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.metrics.prof`.
@prof
def _monitor_workers(self, worker_group: WorkerGroup) -> RunResult:
role = worker_group.spec.role
worker_pids = {w.id for w in worker_group.workers}
assert self._pcontext is not None
pc_pids = set(self._pcontext.pids().values())
if worker_pids != pc_pids:
log.error(
f"[{role}] worker pids do not match process_context pids."
f" Expected: {worker_pids}, actual: {pc_pids}"
)
return RunResult(state=WorkerState.UNKNOWN)
result = self._pcontext.wait(0)
if result:
if result.is_failed():
# map local rank failure to global rank
worker_failures = {}
for local_rank, failure in result.failures.items():
worker = worker_group.workers[local_rank]
worker_failures[worker.global_rank] = failure
return RunResult(
state=WorkerState.FAILED,
failures=worker_failures,
)
else:
# copy ret_val_queue into a map with a global ranks
workers_ret_vals = {}
for local_rank, ret_val in result.return_values.items():
worker = worker_group.workers[local_rank]
workers_ret_vals[worker.global_rank] = ret_val
return RunResult(
state=WorkerState.SUCCEEDED,
return_values=workers_ret_vals,
)
else:
return RunResult(state=WorkerState.HEALTHY)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
b4e2926b4134199eaadf96a67e52631ed4a9bbce
|
427200bdf814d859665f449542fe6c9c1de5a96c
|
/doc/source/conf.py
|
a9715d0ad0714672009bacc401a85b5984fd9da9
|
[
"BSD-3-Clause"
] |
permissive
|
giltis/pyRafters
|
c54f6c4c8f02370ad168a3c90d1ce490077b5d78
|
94bf0e1d671ce58f6cbc09600e99a6d2a4b0127c
|
refs/heads/master
| 2021-01-22T13:22:19.768905
| 2014-03-28T13:40:24
| 2014-03-28T13:40:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,737
|
py
|
# -*- coding: utf-8 -*-
#
# PyLight documentation build configuration file, created by
# sphinx-quickstart on Thu Jan 30 13:08:54 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'numpydoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyLight'
copyright = u'2014, Brookhaven National Lab'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0'
# The full version, including alpha/beta/rc tags.
release = '0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyLightdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'PyLight.tex', u'PyLight Documentation',
u'Brookhaven National Lab', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pylight', u'PyLight Documentation',
[u'Brookhaven National Lab'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PyLight', u'PyLight Documentation',
u'Brookhaven National Lab', 'PyLight', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
[
"tcaswell@bnl.gov"
] |
tcaswell@bnl.gov
|
197926393868d21e6ae154a9dd519b9c67bbad9c
|
cd014fae6791f51a9a382f34dbdcee6d61d84e30
|
/64_eqf_fveqf_fvf_fvegf/64.py
|
64fae91ef51cb384faf818ac502876f63733d358
|
[
"Apache-2.0"
] |
permissive
|
ckclark/Hackquest
|
1505f50fc2c735db059205d1c9bbba1832cc5059
|
65ed5fd32e79906c0e36175bbd280d976c6134bd
|
refs/heads/master
| 2021-01-16T19:32:29.434790
| 2015-09-29T13:39:04
| 2015-09-29T13:39:04
| 42,388,846
| 13
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
lines = [x.strip() for x in open('64.txt').readlines()]
for shift in [16]: #range(len(lines[0])):
out_graph = []
for line in lines:
out_line = []
for i in range(len(line) - shift):
if line[i] == line[i + shift]:
out_line.append(' ')
else:
out_line.append('*')
out_line = ''.join(out_line)
out_graph.append(out_line)
print shift
print '\n'.join(out_graph)
|
[
"clark.ck@gmail.com"
] |
clark.ck@gmail.com
|
4325bb0a9a24eb4fd75d2dd52a78330a20b42d2b
|
3ca599bf6998f36e283f2024e8869a233931a965
|
/lib/output.py
|
8b74b540821afbeabbb430a110794eb7ec52133f
|
[
"BSD-2-Clause"
] |
permissive
|
johnjohnsp1/mesc
|
6b23ba0b208c084cb926ff7631087afea825a24b
|
bfc3a0e5d710f586ea75a9d23a29cd8f2307d500
|
refs/heads/master
| 2020-12-25T23:46:51.436435
| 2014-11-13T21:53:03
| 2014-11-13T21:53:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,634
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__license__ = """
███╗ ███╗███████╗███████╗ ██████╗
████╗ ████║██╔════╝██╔════╝██╔════╝
██╔████╔██║█████╗ ███████╗██║
██║╚██╔╝██║██╔══╝ ╚════██║██║
██║ ╚═╝ ██║███████╗███████║╚██████╗
╚═╝ ╚═╝╚══════╝╚══════╝ ╚═════╝
MESC: Minimun Essential Security Checks
Author: https://twitter.com/1_mod_m/
Project site: https://github.com/1modm/mesc
Copyright (c) 2014, Miguel Morillo
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of copyright holders nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
#------------------------------------------------------------------------------
# Plugins
#------------------------------------------------------------------------------
from lib.htmloutput import htmlinfo, htmltitle
from lib.txtoutput import print_result_txt, print_title_txt
from lib.consoleoutput import print_result_console, print_title_console
__all__ = [
"print_results",
"print_titles"
]
#------------------------------------------------------------------------------
def print_results(help_command, outputresult, checkresult, checkmessage,
checkhtmlmessage, commandresult, cmdresults, tableresult, txtfile,
htmlfile, outputdirectory):
print_result_txt(help_command, outputresult, checkresult, checkmessage,
commandresult, cmdresults, txtfile, outputdirectory)
print_result_console(help_command, outputresult, checkresult,
checkmessage, commandresult, cmdresults, tableresult)
htmlinfo(htmlfile, outputdirectory, help_command, outputresult,
checkresult, checkhtmlmessage, commandresult, cmdresults)
def print_titles(title_name, hr_title, hrefsection, txtfile, htmlfile,
outputdirectory, tableresult):
print_title_txt(title_name, hr_title, txtfile, outputdirectory)
print_title_console(title_name, hr_title, tableresult)
htmltitle(htmlfile, outputdirectory, title_name, hrefsection)
|
[
"miguel.morillo@gmail.com"
] |
miguel.morillo@gmail.com
|
1175d5410bea7a625814a1ad364a134aea18001a
|
64c491fbb983a2c35bc0a31e18b4797bf915525a
|
/search_folds.py
|
59b231963c6acb5ad0e8e252a8fd773cfa350f26
|
[
"MIT"
] |
permissive
|
fabiohsmachado/bn_learning_milp
|
75e7c9fd1486d46084c41db8bf11efb8650609da
|
05ec0999969ac7c439c0cd881925399beef0613a
|
refs/heads/master
| 2021-05-29T08:37:45.459331
| 2015-08-11T18:07:10
| 2015-08-11T18:07:10
| 33,931,330
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 507
|
py
|
import sys
from milp import ComputeMILP
def SearchFold(scoreFile, treewidth):
print "Managing dataset ", scoreFile;
ComputeMILP(scoreFile, treewidth);
print "Finished managing dataset", scoreFile, "with time.\n";
def SearchFolds(fileList, treewidth):
for scoreFile in fileList:
SearchFold(scoreFile, treewidth);
def Error():
print "Usage:", sys.argv[0], "score_files", "treewidth";
exit(0);
if __name__ == "__main__":
try:
SearchFolds(sys.argv[1:-1], int(sys.argv[-1]));
except:
Error();
|
[
"fabiohsmachado@gmail.com"
] |
fabiohsmachado@gmail.com
|
531af58373c2595fa690550bdb0e1fe88237820e
|
13c5a070c180a7cdac899ee40e094896694becfa
|
/employeeproject/employeeproject/settings.py
|
f0c7ff6f25cd46f51a6470d818e90333fa02f751
|
[
"Apache-2.0"
] |
permissive
|
cs-fullstack-2019-spring/django-formclassv2-cw-clyde5649
|
3a0c87be10b961b5c6b90759bfd65f1e1dc3be43
|
4986a3145c2b06d309ac9c2ebf9231b83bf3c279
|
refs/heads/master
| 2020-04-25T17:28:40.551451
| 2019-03-01T20:05:36
| 2019-03-01T20:05:36
| 172,949,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,130
|
py
|
"""
Django settings for employeeproject project.
Generated by 'django-admin startproject' using Django 2.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$9_#$520o4n93rcg5osuv6a660wgg8dl814_p9__cixkfbh6f^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'emplapp',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'employeeproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'employeeproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"clyde.bledsoe@yahoo.com"
] |
clyde.bledsoe@yahoo.com
|
c2f1bce72a4047a4cf454934f74b03c2b20d0bdf
|
8fd6ee528f4699559174b80fe88965486a669523
|
/Futu/trade/kline2.py
|
813aa85eefbb631fe887752e027290544a784de6
|
[] |
no_license
|
aptentity/futu_quoter
|
60ce51616b0c93e06beca4ce59a2d86641b75a7a
|
78c7df1b3de25d605415f01b5bb6cf3f235ba6df
|
refs/heads/master
| 2023-03-27T14:40:45.853176
| 2021-03-18T07:04:27
| 2021-03-18T07:04:27
| 328,276,512
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,033
|
py
|
# -*- coding: utf-8 -*-
from pyecharts import options as opts
from pyecharts.charts import Kline
data = [
[2320.26, 2320.26, 2287.3, 2362.94],
[2300, 2291.3, 2288.26, 2308.38],
[2295.35, 2346.5, 2295.35, 2345.92],
[2347.22, 2358.98, 2337.35, 2363.8],
[2360.75, 2382.48, 2347.89, 2383.76],
[2383.43, 2385.42, 2371.23, 2391.82],
[2377.41, 2419.02, 2369.57, 2421.15],
[2425.92, 2428.15, 2417.58, 2440.38],
[2411, 2433.13, 2403.3, 2437.42],
[2432.68, 2334.48, 2427.7, 2441.73],
[2430.69, 2418.53, 2394.22, 2433.89],
[2416.62, 2432.4, 2414.4, 2443.03],
[2441.91, 2421.56, 2418.43, 2444.8],
[2420.26, 2382.91, 2373.53, 2427.07],
[2383.49, 2397.18, 2370.61, 2397.94],
[2378.82, 2325.95, 2309.17, 2378.82],
[2322.94, 2314.16, 2308.76, 2330.88],
[2320.62, 2325.82, 2315.01, 2338.78],
[2313.74, 2293.34, 2289.89, 2340.71],
[2297.77, 2313.22, 2292.03, 2324.63],
[2322.32, 2365.59, 2308.92, 2366.16],
[2364.54, 2359.51, 2330.86, 2369.65],
[2332.08, 2273.4, 2259.25, 2333.54],
[2274.81, 2326.31, 2270.1, 2328.14],
[2333.61, 2347.18, 2321.6, 2351.44],
[2340.44, 2324.29, 2304.27, 2352.02],
[2326.42, 2318.61, 2314.59, 2333.67],
[2314.68, 2310.59, 2296.58, 2320.96],
[2309.16, 2286.6, 2264.83, 2333.29],
[2282.17, 2263.97, 2253.25, 2286.33],
[2255.77, 2270.28, 2253.31, 2276.22],
]
c = (Kline().add_xaxis(["2017/7/{}".format(i + 1) for i in range(len(data))])
.add_yaxis('kline', data, itemstyle_opts=opts.ItemStyleOpts(
color="#ec0000",
color0="#00da3c",
border_color="#8A0000",
border_color0="#008F28",
)).set_global_opts(
xaxis_opts=opts.AxisOpts(is_scale=True),
yaxis_opts=opts.AxisOpts(
is_scale=True,
splitarea_opts=opts.SplitAreaOpts(
is_show=True, areastyle_opts=opts.AreaStyleOpts(opacity=1)
)
),
datazoom_opts=[opts.DataZoomOpts(type_='inside')],
title_opts=opts.TitleOpts(title='Kline-ItemStyle'),
).rend('K线图鼠标缩放.html'))
|
[
"aptentity@163.com"
] |
aptentity@163.com
|
957a10da60dedb198e915925d91593a08f1c4fba
|
67a94314b9a64078ac6463592555d80f1236a7e4
|
/KNeighborsClassifier of my_eMeL/KNeighborsClassifier of my_eMeL.py
|
a60ed17817642c01e1f3e533f70f0e7254e4354c
|
[] |
no_license
|
UlucFVardar/my_eMeL
|
b413b9171d7a599e358a16c836e7a4a2b05711e4
|
255fd6da0ff8ab3406af9b6bceecf746cc473f45
|
refs/heads/master
| 2020-04-23T12:32:36.667338
| 2019-03-01T21:07:21
| 2019-03-01T21:16:36
| 171,172,505
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,585
|
py
|
#!/usr/bin/env python
# coding: utf-8
# ## Needed Libs
# For this lecture I started to develop a library.
# with using this lib all ML lecture can be coverted.
# with every assignment lib will grove
# In[1]:
import my_eMeL.my_eMeL as my_eMeL
import my_eMeL.data_loader as data_loader
# ### Reading data from a file
# Using the lib a data file can read very easly
# In[2]:
# import some known data
iris_data_df, iris_label_df = data_loader.load_known_txt( file_path = './iris_data.txt',
delimiter = ',' ,
data_column_asList = [0,3] ,
label_column = 4 )
# #### Take a look to data
# In[3]:
from IPython.display import display_html
df1_styler = iris_data_df[:10].style.set_table_attributes("style='display:inline'").set_caption('Data of Iris')
df2_styler = iris_label_df[:10].style.set_table_attributes("style='display:inline'").set_caption('Label of Iris')
display_html(df1_styler._repr_html_()+df2_styler._repr_html_(), raw=True)
# ### Splitting the data into Train and Test.
# Not randomly - (random function will implement for lib)
# Assignmnet wants first 30 row as Train others as Test Data
# In[4]:
train_data_df, train_label_df, test_data_df, test_label_df = my_eMeL.split_Train_and_Test ( data = iris_data_df ,
label = iris_label_df ,
label_col_name = 'Labels',
uniq_lables = list(iris_label_df.Labels.unique()),
first_n_number_train = 30)
df1_styler = train_data_df[:10].style.set_table_attributes("style='display:inline'").set_caption('Train Data of Iris')
df2_styler = train_label_df[:10].style.set_table_attributes("style='display:inline'").set_caption('Train Label of Iris')
df3_styler = test_data_df[:10].style.set_table_attributes("style='display:inline'").set_caption('Test Data of Iris')
df4_styler = test_label_df[:10].style.set_table_attributes("style='display:inline'").set_caption('Test Label of Iris')
display_html(df1_styler._repr_html_() +df2_styler._repr_html_() +df3_styler._repr_html_() +df4_styler._repr_html_(), raw=True)
# ---
#
# ## KNeighborsClassifier of my_eMeL
# Lib has some custom function for testing and accuracy table
# According to the assignment. pred-desired k numbers and distance metrics selected and with iterating the values
# wanted table will created by lib
#
# In[5]:
table = my_eMeL.create_AccuracyTable( index = 'K-Value',
columns = ['Accuracy (%)','Error Count'] )
for distance_metric_for_clf in ['Euclidean','Manhattan','Cosine']:
table_header_column_name = str(distance_metric_for_clf + ' Distance' )
for k in [1,3,5,7,9,11,15]:
clf = my_eMeL.KNeighborsClassifier( k_number = k ,
distance_metric = distance_metric_for_clf )
clf.fit( data = train_data_df ,
label = train_label_df,
label_col_name = 'Labels')
predicted = clf.predict_test( test_data_df = test_data_df ,
test_label_df = test_label_df )
accuracy, error_count = clf.get_accuracy_values()
table.add_subTable_row( header_name = table_header_column_name,
data = [accuracy, error_count],
index_name = 'K = %s'%(k) )
table.get_table()
# ### Desired Decision Boundries Graphs
#
# In[6]:
k = 3
distance_metric_for_clf = 'Euclidean'
my_eMeL.draw_decisionBoundries (train_data_df, train_label_df, 'Labels', k, distance_metric_for_clf , h = 0.02)
# In[7]:
k = 3
distance_metric_for_clf = 'Manhattan'
my_eMeL.draw_decisionBoundries (train_data_df, train_label_df, 'Labels', k, distance_metric_for_clf , h = 0.02)
# In[8]:
k = 3
distance_metric_for_clf = 'Cosine'
my_eMeL.draw_decisionBoundries (train_data_df, train_label_df, 'Labels', k, distance_metric_for_clf , h = 0.02)
# In[9]:
k = 1
distance_metric_for_clf = 'Euclidean'
my_eMeL.draw_decisionBoundries (train_data_df, train_label_df, 'Labels', k, distance_metric_for_clf , h = 0.02)
|
[
"ulucfurkanvardar@gmail.com"
] |
ulucfurkanvardar@gmail.com
|
d20052ac78b0218a2ba50a2894ff44eaf07bc208
|
ec5b0e75b17489c264107ea5d9152ae3d2717a5b
|
/reconstructShamir.py
|
b7013b53463a4063bfd48e1614e7a31b49ab3ef1
|
[] |
no_license
|
taabishm2/Proactive-FDH-RSA-Signature
|
b3e571914bf22c6aa692f5ddd619fad4c54b96fe
|
69f2889d4dc580b3a958dce75ff651f8cbb7c271
|
refs/heads/master
| 2020-05-07T18:46:21.694210
| 2019-07-24T15:23:14
| 2019-07-24T15:23:14
| 180,783,053
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 815
|
py
|
import fileOp
import RSAFeldmanVSS
def reconstruct_shamir(shares,i,t=0): #Do we have to mention which additive share these backups belong to? i.e. need for 'i'?
'''Verify first using VSS and then reconstruct, i is index of the additive share for vss_p, etc'''
vss_q = fileOp.read_list("FvssQ")[0]
vss_p = fileOp.read_list("FvssP")[0]
gen = fileOp.read_list("FvssGen")[0]
commitment_list = fileOp.read_list("FvssCommitmentList")[0]
res = True
for si in shares:
if RSAFeldmanVSS.verify_share(si,gen[i],vss_p[i],commitment_list[i]) == False:
res = False
break
if res == False:
print("Share:",si,"invalid")
raise Exception("Backup Reconstruction Failed")
return
else:
return (ShamirSS.tncombine(shares,vss_q[i],t))
|
[
"taabishm2@gmail.com"
] |
taabishm2@gmail.com
|
8cb65e961c15c1d5dd99cefb8b667cdf46ad9471
|
8d2c2f2f80204c4d90ed691dc0c8ed148cbe20af
|
/code/defaults.py
|
3d4f9dc1b8cfa1ce1bf13f88555bf351f55567d9
|
[] |
no_license
|
matttrd/information_sampler
|
1dbf434622fd383a60b8f36a03a55c0681ef0cd2
|
f2cdbd00a7828bdf526cf7b4869e0a899f559d2b
|
refs/heads/master
| 2022-04-09T06:39:20.278305
| 2020-03-31T21:29:21
| 2020-03-31T21:29:21
| 176,366,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,526
|
py
|
SAVE_EPOCHS = [0,59,119,159]
TRAINING_DEFAULTS = {
'cifar10': {
#"epochs": 180,
#"b": 128,
"save_epochs" : SAVE_EPOCHS,
#"wd":5e-4,
#"lrs": '[[0,0.1],[60,0.02],[120,0.004],[160,0.0008]]'
},
'cifar100': {
"epochs": 180,
#"b": 128,
"save_epochs" : SAVE_EPOCHS,
"wd":5e-4,
"lrs": '[[0,0.1],[60,0.02],[120,0.004],[160,0.0008]]'
},
'cinic': {
"epochs": 180,
#"b": 128,
"save_epochs" : SAVE_EPOCHS,
"wd":5e-4,
"lrs": '[[0,0.1],[50,0.01],[100,0.001],[150,0.0001]]'
},
'imagenet': {
"epochs": 350,
#"b":256,
"save_epochs" : SAVE_EPOCHS,
"wd":1e-4,
"lrs": '[[0,0.1],[150,0.01],[300,0.001]]'
},
'imagenet_lt': {
#"epochs": 150,
#"b": 256,
"save_epochs" : SAVE_EPOCHS,
"wd": 5e-4,
#"lrs": '[[0,0.1],[50,0.01],[100,0.001]]'
},
'inaturalist': {
#"epochs": 150,
#"b": 256,
"save_epochs" : SAVE_EPOCHS,
"wd": 1e-4,
#"lrs": '[[0,0.1],[50,0.01],[100,0.001]]'
},
'places_lt': {
"epochs": 180,
#"b": 256,
"save_epochs" : SAVE_EPOCHS,
"wd": 5e-4,
"lrs": '[[0,0.1],[50,0.01],[100,0.001],[150,0.0001]]'
}
}
def add_args_to_opt(dataset, opt):
'''
Set and OVERWRITES the default args
'''
defaults = TRAINING_DEFAULTS[dataset]
for k,v in defaults.items():
opt[k] = v
return opt
|
[
"matteoterzi.mt@gmail.com"
] |
matteoterzi.mt@gmail.com
|
a252840d3048e3d032bdb954edc31e7fcb80d614
|
4c444d7fd25c645cc48820fa103cad36ae963d81
|
/django_demo/settings.py
|
96a001869d4776c0b353dcde86f0a9a40cf8abc4
|
[] |
no_license
|
sanghee911/django-rest-api
|
8fd1ecf95b0490244f9b09c61298e950f2af4696
|
1697b26abda0493383f19e69eff912c2d1eace48
|
refs/heads/master
| 2021-09-02T01:16:58.258065
| 2017-12-29T15:28:49
| 2017-12-29T15:28:49
| 113,106,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,970
|
py
|
"""
Django settings for django_demo project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'odh43#4^^c37fj#w&)kmuv(8-e@5w20_a-85j+j*8%^0m#eei@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'rest_api',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_demo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql',
# 'NAME': 'django-demo',
# 'USER': 'postgres',
# 'PASSWORD': 'root123',
# 'HOST': 'localhost',
# 'PORT': 5432,
# }
# }
if 'DATABASE_HOST' in os.environ:
DATABASES['default']['HOST'] = os.getenv('DATABASE_HOST')
DATABASES['default']['ENGINE'] = 'django.db.backends.postgresql_psycopg2'
DATABASES['default']['NAME'] = os.getenv('DATABASE_NAME')
DATABASES['default']['USER'] = os.getenv('DATABASE_USER')
DATABASES['default']['PASSWORD'] = os.getenv('DATABASE_PASSWORD')
DATABASES['default']['PORT'] = 5432
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
CORS_ORIGIN_ALLOW_ALL = True
CORS_ORIGIN_WHITELIST = [
'localhost:4200',
]
|
[
"sanghee911@hotmail.com"
] |
sanghee911@hotmail.com
|
b2fcdf4b6dc405488bfc98d9ae9b64c0609f42ae
|
9668a304a46a77eef55a6fb6e2a097049f088a9e
|
/newBlog/models.py
|
58a613aa551dc436b8574e7c5ffd461ca5c8c5a3
|
[] |
no_license
|
AineKiraboMbabazi/Blog-Django
|
ce030b1786905ef044bf10bf4922c1d600a54529
|
6d72aec43effd453d198c079b30afa670c7c1c97
|
refs/heads/master
| 2021-06-23T06:09:26.704381
| 2019-03-19T09:50:35
| 2019-03-19T09:50:35
| 176,450,991
| 0
| 0
| null | 2021-06-10T21:17:05
| 2019-03-19T07:29:20
|
Python
|
UTF-8
|
Python
| false
| false
| 578
|
py
|
from django.db import models
# Create your models here.
from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=200)
description = models.TextField()
creation_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
|
[
"miraqomambasa@gmail.com"
] |
miraqomambasa@gmail.com
|
eee6d67932cc653ddbb810dc59c3e19fcdce12f3
|
8acb126606d430ae546fa13ebd3d6b8200b4a7d1
|
/lib/modeling/DSN.py
|
e249291d12012b67211ec8f9a2dc6489dcc63073
|
[
"MIT"
] |
permissive
|
MeowMeowLady/InstanceSeg-Without-Voxelwise-Labeling
|
7a3a65e2dc43d35655a1cd0bcc517038ace98923
|
5ac8ceb42d3c82b4c31871d14654e7444b3b1629
|
refs/heads/master
| 2020-08-22T06:44:16.602237
| 2020-04-20T15:07:53
| 2020-04-20T15:07:53
| 216,340,297
| 10
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,605
|
py
|
# -*- coding: utf-8 -*-
"""
Created on 18-11-16 下午3:34
IDE PyCharm
@author: Meng Dong
"""
import torch
import torch.nn as nn
import torch.nn.functional as nnf
from core.config import cfg
class dsn_body(nn.Module):
def __init__(self):
super(dsn_body, self).__init__()
self.conv1a = nn.Conv3d(1, 32, 5, 1, 2, bias = True)
self.bn1a=nn.BatchNorm3d(32, momentum = 0.001, affine=True)
self.pool1 = nn.MaxPool3d(2, 2, padding = 0)
self.conv2a = nn.Conv3d(32, 64, 3, 1, 1, bias = True)
self.bn2a = nn.BatchNorm3d(64, momentum = 0.001, affine = True)
self.conv2b = nn.Conv3d(64, 64, 3, 1, 1, bias = True)
self.bn2b = nn.BatchNorm3d(64, momentum = 0.001, affine = True)
self.pool2 = nn.MaxPool3d(2, 2, padding = 0)
self.conv3a = nn.Conv3d(64, 128, 3, 1, 1, bias = True)
self.bn3a = nn.BatchNorm3d(128, momentum = 0.001, affine = True)
self.conv3b = nn.Conv3d(128, 128, 3, 1, 1, bias = True)
self.bn3b = nn.BatchNorm3d(128, momentum = 0.001, affine = True)
if cfg.RPN.STRIDE == 8:
self.pool3 = nn.MaxPool3d(2, 2, padding = 0)
self.conv4a = nn.Conv3d(128, 256, 3, 1, 1, bias = True)
self.bn4a = nn.BatchNorm3d(256, momentum = 0.001, affine = True)
self.conv4b = nn.Conv3d(256, 256, 3, 1, 1, bias = True)
self.bn4b = nn.BatchNorm3d(256, momentum = 0.001, affine = True)
self.dim_out = 256
else:
self.dim_out = 128
self.__weight_init()
self.spatial_scale = 1./cfg.RPN.STRIDE
#weight initialization
def __weight_init(self):
for m in self.modules():
m.name = m.__class__.__name__
if m.name.find('Conv')!=-1:
nn.init.normal_(m.weight, std = 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
if m.name.find('BatchNorm3d')!=-1:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
def forward(self, main):
main = self.pool1(nnf.relu(self.bn1a(self.conv1a(main))))
main = nnf.relu(self.bn2a(self.conv2a(main)))
main = nnf.relu(self.bn2b(self.conv2b(main)))
main = self.pool2(main)
main = nnf.relu(self.bn3a(self.conv3a(main)))
main = nnf.relu(self.bn3b(self.conv3b(main)))
if cfg.RPN.STRIDE == 8:
main = self.pool3(main)
main = nnf.relu(self.bn4a(self.conv4a(main)))
main = nnf.relu(self.bn4b(self.conv4b(main)))
return main
|
[
"noreply@github.com"
] |
noreply@github.com
|
d90bbf146be2cf8d882bcac5844e04533816c778
|
4b40d911e2b3109717463437c9740f06eea9f4ce
|
/weighted_lottery.py
|
01696a38a194a2976223d334869a71cc1c683bf8
|
[] |
no_license
|
Code-JD/Python_Exercises_Notes
|
94eaba8716306324ad424d085c072e2f278a0ddd
|
b83b8391375eea6e8ea0a9ed17a635b370c2054f
|
refs/heads/master
| 2020-09-19T20:34:51.478650
| 2020-01-22T02:25:22
| 2020-01-22T02:25:22
| 224,291,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,485
|
py
|
# import numpy as np
# def weighted_lottery(weights):
# container_list = []
# for (name, weight) in weights.items():
# for _ in range(weight):
# container_list.append(name)
# return np.random.choice(container_list)
# weights = {
# 'winning': 1,
# 'losing': 1000
# }
# print(weighted_lottery(weights))
# other_weights = {
# 'winning': 1,
# 'break_even': 2,
# 'losing': 3
# }
# print(weighted_lottery(other_weights))
import numpy as np # import numpy library
def weighted_lottery(weights): # create a function weighted lottery and takes in weights
container_list = [] # create a container list to keep track of weights
for (name, weight) in weights.items(): # for the get key and value name and weight in weights items(gives ability to loop through KVP)
for _ in range(weight): # nested loop for counter -for _ (variable not used) in range weight
container_list.append(name) # container list and append the name(builds the list)
return np.random.choice(container_list) # call np and then say random.chioce (pulls out random sample)
"""
# weights = {
# 'winning': 1,
# 'losing': 1000
# }
#
# print(weighted_lottery(weights))
"""
other_weights = {
'green': 1,
'yellow': 2,
'red': 3
}
print(weighted_lottery(other_weights))
|
[
"jonathan.d.herring@gmail.com"
] |
jonathan.d.herring@gmail.com
|
33570dc3d1b45740d29e24bcb7b74956bd79dec5
|
fa5006f55b612d22e8d69a006065ac0eca8fccc6
|
/track.py
|
dd87681e917f76cd260e82e9f6337a38c3a05d3d
|
[] |
no_license
|
Arrowana/vroom
|
3bbc7a00c1eccbd41c6a6813e862837342ee9289
|
fb70b81e927373e96dd433718073044912eed3f1
|
refs/heads/master
| 2021-01-12T10:41:48.208368
| 2016-11-06T10:22:14
| 2016-11-06T10:22:14
| 72,621,878
| 0
| 0
| null | 2016-11-06T10:32:19
| 2016-11-02T09:00:37
|
Python
|
UTF-8
|
Python
| false
| false
| 3,002
|
py
|
from matplotlib import pyplot as plt
from random import random
import math
import numpy
import pdb
def convex_hull(points_input):
points = points_input[:]
points = sorted(points, key=lambda x: x[0])
print(points)
U=[]
L=[]
def cross_product(o, a, b):
return (a[0] - o[0])*(b[1] - o[1]) -\
(a[1] - o[1])*(b[0] - o[0])
for point in points:
while len(L) > 1 and cross_product(L[-2], L[-1], point) <= 0:
L.pop()
L.append(point)
for point in points[::-1]:
while len(U) > 1 and cross_product(U[-2], U[-1], point) <= 0:
U.pop()
U.append(point)
return L[:-1]+U[:-1]
def add_points(points):
points_output = points[:]
for point_a, point_b in zip(points[:-1], points[1:]):
x = 10*random()
y = 10*random()
point_i = [(point_a[0]+point_b[0])/2+x, (point_a[1] + point_b[1])/2+y]
points_output.extend([point_a, point_i, point_b])
return points_output
def CatmullRomSpline(P0, P1, P2, P3, nPoints=100):
"""
P0, P1, P2, and P3 should be (x,y) point pairs that define the Catmull-Rom spline.
nPoints is the number of points to include in this curve segment.
"""
# Convert the points to numpy so that we can do array multiplication
P0, P1, P2, P3 = map(numpy.array, [P0, P1, P2, P3])
# Calculate t0 to t4
alpha = 0.5
def tj(ti, Pi, Pj):
xi, yi = Pi
xj, yj = Pj
return ( ( (xj-xi)**2 + (yj-yi)**2 )**0.5 )**alpha + ti
t0 = 0
t1 = tj(t0, P0, P1)
t2 = tj(t1, P1, P2)
t3 = tj(t2, P2, P3)
# Only calculate points between P1 and P2
t = numpy.linspace(t1,t2,nPoints)
# Reshape so that we can multiply by the points P0 to P3
# and get a point for each value of t.
t = t.reshape(len(t),1)
A1 = (t1-t)/(t1-t0)*P0 + (t-t0)/(t1-t0)*P1
A2 = (t2-t)/(t2-t1)*P1 + (t-t1)/(t2-t1)*P2
A3 = (t3-t)/(t3-t2)*P2 + (t-t2)/(t3-t2)*P3
B1 = (t2-t)/(t2-t0)*A1 + (t-t0)/(t2-t0)*A2
B2 = (t3-t)/(t3-t1)*A2 + (t-t1)/(t3-t1)*A3
C = (t2-t)/(t2-t1)*B1 + (t-t1)/(t2-t1)*B2
return C
def CatmullRomChain(P):
"""
Calculate Catmull Rom for a chain of points and return the combined curve.
"""
sz = len(P)
# The curve C will contain an array of (x,y) points.
C = []
for i in range(sz-3):
c = CatmullRomSpline(P[i], P[i+1], P[i+2], P[i+3])
C.extend(c)
return C
def generate():
width = 100
height = 100
scale = 150
points = [(scale*random(), scale*random()) for i in range(10)]
hull=convex_hull(points)
with_more = add_points(hull)
C=CatmullRomChain(with_more+with_more[0:1])
print C
print 'points:', points
print 'hull:', hull
plt.title('Final')
plt.plot(*zip(*points))
plt.plot(*zip(*hull))
plt.plot(*zip(*with_more), marker='*')
x,y = zip(*C)
plt.plot(x,y)
plt.axis('equal')
plt.show()
if __name__ == '__main__':
generate()
|
[
"pierre.duval@gadz.org"
] |
pierre.duval@gadz.org
|
f43198ced2fc10d9bb99c03a434993608a6a4df1
|
e396c4a15caf661588cd73fdf1b46bfe7899d011
|
/Machine_Learning/linear_regression.py
|
fa089096e9717dde9fea7985ea42bb99afd472d9
|
[] |
no_license
|
VictorGulart/DataScience
|
42355bf7ce5b9e5e5f9d33238018b55a13620ceb
|
62910b8b5f3651a2d5621767071dc1c388da0003
|
refs/heads/master
| 2023-08-12T13:27:07.552707
| 2021-10-13T18:01:14
| 2021-10-13T18:01:14
| 295,250,559
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,874
|
py
|
import math
import numpy as np
from statistics import mean
import matplotlib.pyplot as plt
from matplotlib import style
style.use('fivethirtyeight')
'''
Defining my own liner regression algorithm
Linear Regression is basically the best fist line
-> applying linear algebra for Linear Regression
y = mx + b
m = ( mean(x) x mean(y) - mean(xy) ) / ( mean(x)^2 - mean(x^2) )
'''
xs = np.array([1,2,3,4,5,6], dtype=np.float64)
ys = np.array([5,4,6,5,6,7], dtype=np.float64)
def best_fit_slope(xs,ys):
''' Returns the slope of the best fit line for the expecified xs and ys'''
top = ( mean(xs) * mean(ys) ) - mean(xs*ys)
bottom = (mean(xs)**2) - mean(xs**2)
m = top / bottom
return m
def y_inter(xs, ys, m):
''' Returns the y intercept of the line with a specified slope
m and points xs and ys'''
return mean(ys) - ( m * mean(xs) )
def squared_error(ys_orig, ys_line):
'''For the calculation of the R squared to see of how good of a fit
the best fit line is to the data set. '''
return sum( (ys_line - ys_orig) ** 2)
def r_squared(ys_orig, ys_line):
y_mean_line = [mean(ys_orig) for y in ys_orig]
squared_error_regr = squared_error(ys_orig, ys_line)
squared_error_y_mean = squared_error(ys_orig, y_mean_line)
return 1 - (squared_error_regr / squared_error_y_mean)
m = best_fit_slope(xs, ys)
b = y_inter(xs, ys, m)
print(f'm is {m}')
print(f'b is {b}')
''' Create a line that fits the data that we have
we have m and b all we need is a list of Ys '''
regression_line = [ (m*x) + b for x in xs] # best fit line
#now we can predict
predict_x = 8
predict_y = (m*predict_x) + b
r_square = r_squared(ys, regressionn_line)
plt.scatter(xs, ys)
plt.scatter(predict_x, predict_y, color='g')
plt.plot(xs, regression_line)
plt.show()
##plt.scatter (xs, ys)
##plt.show()
|
[
"aureumtempus@protonmail.com"
] |
aureumtempus@protonmail.com
|
4314eb2e3669ee41547bd4d12cc4d8689c34d0aa
|
74b65dee638e73b07032b4d26a9e0ce7a50b7ccc
|
/neural_network/network.py
|
a8d781a35c4b16245ae5b9b31c4d9c88e34b61b6
|
[] |
no_license
|
mpwillia/Tensorflow-Network-Experiments
|
6aec1d0a645d18536f0293185be553d67b584ad6
|
6d43f9a71c0b80a4d634de812e5141a8b295a4f8
|
refs/heads/master
| 2021-01-11T16:23:44.949529
| 2017-04-27T23:10:06
| 2017-04-27T23:10:06
| 80,074,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 32,465
|
py
|
import tensorflow as tf
import tensorflow.contrib as tfc
import tensorflow.contrib.layers as tfcl
print("Using Tensorflow Version: {}".format(tf.__version__))
import numpy as np
import sys
import math
import random
import os
from functools import partial
from network_util import match_tensor_shape, batch_dataset, get_num_batches, \
make_per_class_eval_tensor, print_eval_results, print_fit_results
from summary import NetworkSummary
from collections import namedtuple
EvalResults = namedtuple('EvalResults', ['overall', 'per_class'])
FitResults = namedtuple('FitResults', ['train', 'validation', 'test'])
class Network(object):
def __init__(self, input_shape, layers, pred_act_fn = None,
logdir = None, network_name = 'network'):
"""
For |layers| see:
https://www.tensorflow.org/api_docs/python/contrib.layers/higher_level_ops_for_building_neural_network_layers_
"""
self.input_shape = input_shape
self.layers = layers
self.pred_act_fn = pred_act_fn
self.network_name = network_name
self.input_shape = input_shape
self.sess = None
self.saver = None
self.train_step = None
# setup global step counter
self.global_step = tf.Variable(0, trainable = False, name = "net_global_step")
# setup the network's summaries
self.logdir = logdir
self.network_summary = NetworkSummary(logdir, max_queue = 3, flush_secs = 60)
# setup the network's input shape and input placeholder
if type(input_shape) is int:
self.net_input_shape = [None, input_shape]
else:
self.net_input_shape = (None,) + tuple(input_shape)
with tf.name_scope('net_input'):
self.net_input = tf.placeholder(tf.float32, shape = self.net_input_shape,
name = "network_input_tensor")
print("\nConstructing {} Layer Network".format(len(layers)))
print(" {:35s} : {}".format("Input Shape", self.net_input.get_shape()))
self.using_dropout = False
self.keep_prob = tf.placeholder(tf.float32, shape=[], name = "dropout_keep_prob")
# layer states are only applicable for recurrent layers
self.layer_states = []
made_kernel_images = False
prev_layer_output = self.net_input
for layer_num, layer in enumerate(layers):
layer_type = layer.func.__name__
layer_name = "layer_{:d}_{}".format(layer_num, layer_type)
layer_kwargs = {'inputs' : prev_layer_output,
'scope' : layer_name}
# handle dropout layers
if 'dropout' in layer_type:
self.using_dropout = True
layer_kwargs['keep_prob'] = self.keep_prob
with tf.name_scope(layer_name) as layer_scope:
layer_output = layer(**layer_kwargs)
try:
# check if the layer is recurrent, if so extract the state
if len(layer_output) == 2:
prev_layer_output, state = layer_output
else:
prev_layer_output = layer_output[0]
state = None
except:
prev_layer_output = layer_output
state = None
self.layer_states.append(state)
self.network_summary.add_layer_summary(layer_name, prev_layer_output, layer_scope)
layer_msg = "Layer {:d} ({}) Shape".format(layer_num, layer_type)
print(" {:35s} : {}".format(layer_msg, prev_layer_output.get_shape()))
print("")
with tf.name_scope('net_output') as output_scope:
self.net_output = prev_layer_output
self.network_summary.add_output_summary(self.net_output, scope = output_scope)
if self.pred_act_fn is not None:
self.pred_net_output = self.pred_act_fn(prev_layer_output)
else:
self.pred_net_output = prev_layer_output
self.network_summary.add_output_summary(self.pred_net_output, scope = output_scope)
self.exp_output = tf.placeholder(tf.float32, self.net_output.get_shape(),
name = "loss_expected_output")
self.eval_net_output = tf.placeholder(tf.float32, self.net_output.get_shape(),
name = "eval_net_output")
# Various Network Getters -------------------------------------------------
def get_global_step(self):
"""
Returns the current global step if the network has an active session,
otherwise returns None
"""
if self.sess is not None:
return self.sess.run(self.global_step)
def _get_weight_variables(self):
vars = tf.trainable_variables()
return [v for v in vars if 'weight' in v.name]
# Session Handling --------------------------------------------------------
def init_session(self):
"""
Initializes the network's tensorflow session along with initializing
all tensorflow variables. Will also create a new tensorflow Saver instance
for the network if needed.
If a session has already been created when this method is called, for
example through loading a saved network, then all uninitialized variables
will be initialized.
If a session has not yet been created then:
- A new Saver instance will be created
- A new session will be created
- All variables will be initialized
If a session already exists (through loading a saved network) then:
- All uninitialized variables will be initialized
"""
if self.sess is None:
sess_config = tf.ConfigProto(
log_device_placement = False,
allow_soft_placement = False)
self.saver = tf.train.Saver()
self.sess = tf.Session(config = sess_config)
self.sess.run(tf.global_variables_initializer())
else:
list_of_variables = tf.global_variables()
uninitialized_variables = list(tf.get_variable(name) for name in
self.sess.run(tf.report_uninitialized_variables(list_of_variables)))
self.sess.run(tf.initialize_variables(uninitialized_variables))
def close(self):
"""
Closes and deletes this Network's session.
"""
if self.sess is not None:
self.sess.close()
self.sess = None
# Network Prediction ------------------------------------------------------
def predict(self, input_data, chunk_size = 500):
"""
Makes predictions on the given input data. Returns the index of the output
with the highest value for each item in the input data.
Arguments:
|input_data| the input data to make predictions on.
Optional:
|chunk_size| the maximum number of items to process with one run of
the network. All of the input data will be processed but it will
be broken into the smaller chunks for better memory usage. If
the chunk size is None then all of the input data will be processed
in one run of the network.
Returns the index of the output with the highest value for each item in
the input.
"""
with self.sess.as_default():
feed_dict = dict()
if self.using_dropout:
feed_dict[self.keep_prob] = 1.0
results = []
for chunk_x in batch_dataset(input_data, chunk_size, has_outputs = False):
feed_dict[self.net_input] = chunk_x
results.extend(self.pred_net_output.eval(feed_dict=feed_dict))
return np.argmax(results, 1)
def sample(self, input_data, temperature = 1.0, filter_zero = True, chunk_size = 500):
"""
Samples the networks response to the given input data. Returns a randomly
selected output index based on the network's predicted probability of each
possible output for each item in the input data. In otherwords the probability
of selecting a given output index is given by the network's predicted
probabilities.
Arguments:
|input_data| the input data to make predictions on.
Optional:
|temperature| the temperature value changes the distribution of the
network's predicted probabilities for each output. Accepts any
nonzero float value. Defaults to 1.0.
A higher temperature value makes the resulting probability
distribution more evenly spread while a lower temperature value
makes the resulting probability distribution less evenly spread.
There are four distinct effects that can be achieved with the
temperature value.
Temperature Effects (from low to high):
temp=0.0 - Has the same effect as calling predict() where the
output with the highest value will always be selected.
temp<1.0 - Higher probabilities are increased further while
lower probabilities are decreased further. Results
in less randomness in the output.
temp=1.0 - Has no effect on the network's predicted probabilities.
temp>1.0 - Higher probabilities are made smaller while lower
probabilities are made larger. Results in more
randomness in the output.
|filter_zero| if True then when applying temperature to the predicted
probabilities, probabilities of zero will be filtered out.
|chunk_size| the maximum number of items to process with one run of
the network. All of the input data will be processed but it will
be broken into the smaller chunks for better memory usage. If
the chunk size is None then all of the input data will be processed
in one run of the network.
"""
with self.sess.as_default():
feed_dict = dict()
if self.using_dropout:
feed_dict[self.keep_prob] = 1.0
results = []
for chunk_x in batch_dataset(input_data, chunk_size, has_outputs = False):
feed_dict[self.net_input] = chunk_x
results.extend(self.pred_net_output.eval(feed_dict=feed_dict))
results = np.asarray(results)
if temperature <= 0.0:
return np.argmax(results, 1)
num_choices = results.shape[1] # (batch, outputs)
if filter_zero:
choices = np.arange(num_choices)
def apply_temperature(results_1d):
non_zero = np.nonzero(results_1d)
nz_results = results_1d[non_zero]
nz_choices = choices[non_zero]
probs = np.exp(np.log(nz_results) / temperature)
probs /= np.sum(probs)
return np.random.choice(nz_choices, p = probs)
return np.apply_along_axis(apply_temperature, 1, results)
else:
probs = np.exp(np.log(results) / temperature)
probs /= np.sum(probs, 1)
f = lambda p: np.random.choice(num_choices, p=p)
return np.apply_along_axis(f, 1, probs)
# Network Training --------------------------------------------------------
def fit(self, train_data, optimizer, loss,
epochs, mb_size = None,
evaluation_freq = None, evaluation_func = None, evaluation_fmt = None,
evaluation_target = None, max_step = None,
per_class_evaluation = False,
validation_data = None,
test_data = None,
shuffle_freq = None,
l1_reg_strength = 0.0,
l2_reg_strength = 0.0,
dropout_keep_prob = 1.0,
summaries_per_epoch = None,
save_checkpoints = False, checkpoint_freq = None,
verbose = False):
"""
For |optimizer| see:
https://www.tensorflow.org/api_docs/python/train/optimizers
For |loss| see:
https://www.tensorflow.org/api_docs/python/contrib.losses/other_functions_and_classes
https://www.tensorflow.org/api_docs/python/nn/classification
"""
# reshape given data
#train_data = self._reshape_dataset(train_data)
#validation_data = self._reshape_dataset(validation_data)
#test_data = self._reshape_dataset(test_data)
train_feed_dict = dict()
# handle dropout
if self.using_dropout:
train_feed_dict[self.keep_prob] = dropout_keep_prob
if summaries_per_epoch <= 0:
summaries_per_epoch = None
self.network_summary.add_input_summary(self.net_input, mb_size)
# setting up our loss tensor
with tf.name_scope("loss") as loss_scope:
grad_loss = loss(self.net_output, self.exp_output)
# setup regularization
if l1_reg_strength > 0.0 or l2_reg_strength > 0.0:
l1_reg = None
if l1_reg_strength > 0.0:
l1_reg = tfcl.l1_regularizer(l1_reg_strength)
l2_reg = None
if l2_reg_strength > 0.0:
l2_reg = tfcl.l2_regularizer(l2_reg_strength)
l1_l2_reg = tfcl.sum_regularizer((l1_reg, l2_reg))
reg_penalty = tfcl.apply_regularization(l1_l2_reg, self._get_weight_variables())
loss_tensor = grad_loss + reg_penalty
else:
reg_penalty = None
loss_tensor = grad_loss
self.network_summary.add_loss_summary(loss_tensor, grad_loss, reg_penalty, loss_scope)
# adds a summary for all trainable variables
self.network_summary.add_variable_summary()
# setting up our optimizer
try:
opt_name = optimizer.__class__.__name__
except:
opt_name = 'optimizer'
with tf.name_scope(opt_name):
self.train_step = optimizer.minimize(loss_tensor, global_step = self.global_step)
# setting up our evaluation function and summaries
if evaluation_func is None:
evaluation_func = loss
with tf.name_scope('evaluation') as eval_scope:
# overall eval tensor
eval_tensor = evaluation_func(self.eval_net_output, self.exp_output)
self.network_summary.add_eval_summary(eval_tensor, 'train', eval_scope)
self.network_summary.add_eval_summary(eval_tensor, 'validation', eval_scope)
self.network_summary.add_eval_summary(eval_tensor, 'test', eval_scope)
# setting up our per class evaluation function and summaries
with tf.name_scope('per_class_evaluation') as per_class_eval_scope:
# per class eval tensor
per_class_evaluation = False,
per_class_eval_tensor = None
if per_class_evaluation:
per_class_eval_tensor = make_per_class_eval_tensor(evaluation_func,
self.eval_net_output,
self.exp_output,
scope = per_class_eval_scope)
def add_per_class_summary(name):
self.network_summary.add_per_class_eval_summary(per_class_eval_tensor,
max_val = 1.0,
name = name,
scope = per_class_eval_scope)
add_per_class_summary('train')
add_per_class_summary('validation')
add_per_class_summary('test')
# setting up the formating for printing the evaluation results
if evaluation_fmt is None: evaluation_fmt = ".5f"
# initialize our session
self.init_session()
# add a graph summary
self.network_summary.add_graph(self.sess.graph)
epoch_eval_results = []
initial_step = self.get_global_step()
for epoch in range(epochs):
# execute our training epoch
epoch_msg = "Training Epoch {:4d} / {:4d}".format(epoch, epochs)
self._run_training_epoch(train_data, mb_size,
feed_dict_kwargs = train_feed_dict,
summaries_per_epoch = summaries_per_epoch,
verbose = True,
verbose_prefix = epoch_msg)
# check for mid-train evaluations
if evaluation_freq is not None and epoch % evaluation_freq == 0:
# evaluate on the training dataset
if verbose > 1: print("\nMid-Train Evaluation")
train_eval = self._evaluate(train_data, eval_tensor, per_class_eval_tensor, name = 'train')
# evaluate on the validation dataset
if validation_data is not None:
validation_eval = self._evaluate(validation_data, eval_tensor, per_class_eval_tensor, name = 'validation')
else:
validation_eval = None
# check if we've met our early stopping evaluation target
if evaluation_target:
if validation_eval is not None:
met_target = validation_eval.overall >= evaluation_target
else:
met_target = train_eval.overall >= evaluation_target
else:
met_target = None
# add the mid train evaluation results to our list
epoch_fit_results = FitResults(train = train_eval, validation = validation_eval, test = None)
epoch_eval_results.append(epoch_fit_results)
# print the fit results
if verbose > 1: print_fit_results(epoch_fit_results, evaluation_fmt)
# break early if we've met our evaluation target
if met_target is not None and met_target:
print("\n\nReached Evaluation Target of {}".format(evaluation_target))
break
# break early if we've met our step target
if max_step is not None and self.get_global_step() >= max_step:
print("\n\nReached Max Step Target of {}".format(max_step))
break
if verbose > 1: print("")
# save a checkpoint if needed
if save_checkpoints and checkpoint_freq is not None and epoch % checkpoint_freq == 0:
if verbose > 1: print("Saving Mid-Train Checkpoint")
self._save_checkpoint()
# shuffle the dataset if needed
if shuffle_freq is not None and epoch % shuffle_freq == 0:
train_data = self._shuffle_dataset(train_data)
self.network_summary.flush()
# report the number of training steps taken
final_step = self.get_global_step()
total_steps = final_step - initial_step
if verbose > 0:
print("\nTrained for {:d} Steps".format(total_steps))
# save the final checkpoint
if save_checkpoints:
if verbose > 1: print("Saving Final Checkpoint")
self._save_checkpoint()
if verbose == 1: print("")
# Perform final fit result evaluations
# final training evaluation
if verbose > 1: print("Final Evaluation")
train_eval = self._evaluate(train_data, eval_tensor, per_class_eval_tensor, name = 'train')
# final validation evaluation
if validation_data is not None:
validation_eval = self._evaluate(validation_data, eval_tensor, per_class_eval_tensor, name = 'validation')
else:
validation_eval = None
# final test evaluation
if test_data is not None:
test_eval = self._evaluate(test_data, eval_tensor, per_class_eval_tensor, name = 'test')
else:
test_eval = None
# print and return the final fit results
fit_results = FitResults(train = train_eval, validation = validation_eval, test = test_eval)
if verbose > 1: print_fit_results(fit_results, evaluation_fmt)
self.network_summary.flush()
return fit_results
# Single Network Training Epoch -------------------------------------------
def _run_training_epoch(self, train_data,
mb_size = None,
feed_dict_kwargs = dict(),
summaries_per_epoch = None,
verbose = False,
verbose_prefix = None):
"""
Runs a single training epoch.
Arguments:
|train_data| the data to train on
Optional:
|mb_size| the size of the minibatches, if None then no minibatching
will be done.
|feed_dict_kwargs| any extra kwargs to be passed to the network.
|summaries_per_epoch| how many summaries to produce for this epoch.
The summaries will be evenly distributed across the minibatches.
If None then no summaries will be made.
|verbose| if True the progress information will be printed
|verbose_prefix| extra information to append to the progress messages
printed when the |verbose| argument is True.
"""
train_x, train_y = train_data
mb_total = get_num_batches(len(train_x), mb_size)
# Compute when to generate summaries
if summaries_per_epoch is None:
summary_every = None
elif summaries_per_epoch >= mb_total:
summary_every = 1
elif summaries_per_epoch == 1:
summary_every = mb_total
else:
summary_every = int(math.ceil(mb_total / float(summaries_per_epoch)))
with self.sess.as_default():
# Iterate over the batches
for mb_x, mb_y, mb_num, mb_total in self._batch_for_train(train_data, mb_size, True):
if verbose:
# print progress message if verbose
prefix = ''
if verbose_prefix is not None:
prefix = verbose_prefix + " "
mb_msg = "Mini-Batch {:5d} / {:5d}".format(mb_num, mb_total)
sys.stdout.write("{}{} \r".format(prefix, mb_msg))
sys.stdout.flush()
feed_dict_kwargs[self.net_input] = mb_x
feed_dict_kwargs[self.exp_output] = mb_y
fetches = [self.train_step]
# if this is a summary epoch then add those
if (summary_every is not None) and (mb_num >= mb_total-1 or (mb_num+1) % summary_every == 0):
train_summary = self.network_summary.get_training_summary()
if train_summary is not None:
fetches.extend([train_summary, self.global_step])
run_results = self.sess.run(fetches, feed_dict = feed_dict_kwargs)
self._process_run_results(run_results)
def _batch_for_train(self, dataset, batch_size, include_progress = False):
"""used to define batching specific to the training epochs"""
return batch_dataset(dataset, batch_size, include_progress, True)
# Network Performance Evaluation ------------------------------------------
def _evaluate(self, dataset, eval_tensor,
per_class_eval_tensor = None,
chunk_size = 2000,
name = 'eval'):
"""
Evaluates the network's performance on the given dataset.
Arguments:
|dataset| the dataset to evaluate the network's performance on
|eval_tensor| the tensor to use for evaluation. This tensor should
accept the results of the network's predictions on the dataset
and the expected outputs to produce a metric for how good the
network's predictions are. For example, it could compute the
network's accuracy.
Optional:
|per_class_eval_tensor| if the network is performing classification
then this tensor can be used to evaluate the network's performance
for each class individually. This tensor accepts the same inputs
as the |eval_tensor| but is expected to produce a vector of metrics
where each element is the metric for each class.
|chunk_size| the maximum number of items to process with one run of
the network. All of the input data will be processed but it will
be broken into the smaller chunks for better memory usage. If
the chunk size is None then all of the input data will be processed
in one run of the network.
|name| gives a name for the evaluation being performed. Used for
grouping like summaries together. For example, can be used to
group evaluation of validation data seperately from the evaluation
of the testing data.
Returns the evaluation results as an EvalResults tuple.
"""
eval_x, eval_y = dataset
with self.sess.as_default():
feed_dict = dict()
if self.using_dropout:
feed_dict[self.keep_prob] = 1.0
results = []
for chunk_x, chunk_y, in self._batch_for_eval(dataset, chunk_size):
feed_dict[self.net_input] = chunk_x
results.extend(self.net_output.eval(feed_dict=feed_dict))
feed_dict = {self.eval_net_output : results,
self.exp_output : eval_y}
fetches = [eval_tensor]
if per_class_eval_tensor is not None:
fetches.append(per_class_eval_tensor)
non_summary_size = len(fetches)
eval_summary = self.network_summary.get_evaluation_summary(name)
if eval_summary is not None:
fetches.extend([eval_summary, self.global_step])
eval_results = self.sess.run(fetches, feed_dict = feed_dict)
return self._process_eval_results(eval_results, non_summary_size)
def _batch_for_eval(self, dataset, batch_size, include_progress = False):
"""used to define batching specific to the evaluation epochs"""
return batch_dataset(dataset, batch_size, include_progress, True)
# Result Handling (Training and Evaluation) -------------------------------
def _process_eval_results(self, eval_results, non_summary_size = 1):
"""
Takes the raw, unprocessed evaluation results and extracts out the
relevant information as an EvalResults tuple.
Arguments:
|eval_results| the raw, unprocessed evaluation results
Optional:
|non_summary_size| the expected number of items not used for summaries
Returns the processed EvalResults tuple
"""
eval_results = self._process_run_results(eval_results, non_summary_size)
if len(eval_results) == 1:
return EvalResults(overall = eval_results[0])
elif len(eval_results) == 2:
return EvalResults(overall = eval_results[0], per_class = eval_results[1])
else:
raise ValueError("Don't know how to process eval_results with length {:d}!".format(len(eval_results)))
def _process_run_results(self, run_results, non_summary_size = 1):
"""
Takes the raw, unprocessed run results and extracts out the nonsummary
information as a tuple.
Arguments:
|run_results| the raw, unprocessed run results
Optional:
|non_summary_size| the expected number of items not used for summaries
Returns a tuple of the non-summary items from the run
"""
if len(run_results) == non_summary_size + 2:
summary, step = run_results[-2:]
self.network_summary.write(summary, step)
elif len(run_results) != non_summary_size:
raise ValueError("Don't know how to process run_results with length {:d}!".format(len(run_results)))
return tuple(run_results[:non_summary_size])
# Dataset Utilities -------------------------------------------------------
def _reshape_dataset(self, dataset):
if dataset is None: return None
x,y = dataset
return match_tensor_shape(x, self.net_input), \
match_tensor_shape(y, self.net_output)
def _shuffle_dataset(self, dataset):
zipped_dataset = zip(*dataset)
random.shuffle(zipped_dataset)
return list(zip(*zipped_dataset))
# Network Pickling and Variable I/O ---------------------------------------
def __getstate__(self):
odict = self.__dict__.copy()
# Strip Tensorflow Content
del odict['sess']
del odict['saver']
del odict['global_step']
del odict['train_step']
del odict['network_summary']
del odict['exp_output']
del odict['eval_net_output']
del odict['net_input']
del odict['net_output']
del odict['pred_net_output']
del odict['net_input_shape']
del odict['layer_states']
del odict['using_dropout']
del odict['keep_prob']
return odict
def __setstate__(self, state):
self.__init__(**state)
def save_variables(self, path):
"""
Saves the networks tensorflow variable states to the given filepath
Arguments:
|path| the filepath to save the tensorflow variable states to
"""
if self.saver is None or self.sess is None:
raise Exception("Cannot save variables without a session and saver")
self.saver.save(self.sess, path)
def load_variables(self, path):
"""
Loads the networks tensorflow variable states from the given filepath
Arguments:
|path| the filepath to load the tensorflow variable states from
"""
self.init_session()
self.saver.restore(self.sess, path)
def _save_checkpoint(self):
if self.sess is None:
raise Exception("Cannot save checkpoint without an active session!")
if self.saver is None:
raise Exception("Cannot save checkpoint without a tf.train.Saver instance!")
if self.logdir is not None:
save_path = os.path.join(self.logdir, self.network_name)
else:
save_path = os.path.join('./', self.network_name)
self.saver.save(self.sess, save_path, self.global_step)
|
[
"mike@clwill.com"
] |
mike@clwill.com
|
28bae91f06a0f3667e8316de61e7ad47890a2a95
|
5000676812f8ede0beb861c185df67b862b5be55
|
/src/get_reference_to_original_future_when_use_as_completed.py
|
313c6d58c11264f80f3a58aa367e5972da4a74d1
|
[] |
no_license
|
oleyeye/python_code
|
3cf3493dffcb23baa34deda083b488890e6cbf34
|
20ed14c5edfea4d156a5710f7f39bfdc10f2fdcc
|
refs/heads/master
| 2020-04-17T10:41:16.573058
| 2019-01-19T05:51:54
| 2019-01-19T05:51:54
| 166,510,070
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 852
|
py
|
import asyncio
async def coro(sec):
print(f'Coroutine {sec} is starting')
await asyncio.sleep(sec)
print(f'Coroutine {sec} is done')
return sec
async def main():
futures = {asyncio.ensure_future(coro(i)): f'item({i})' for i in range(1,5)}
for future in as_completed_hooked(futures.keys()):
real_future = await future
index = futures[real_future]
print(f'The item is {index}')
print(f'The result is {real_future.result()}')
def as_completed_hooked(futures):
wrappers = []
loop = asyncio.get_event_loop()
for future in futures:
wrapper = loop.create_future()
future.add_done_callback(wrapper.set_result)
wrappers.append(wrapper)
for x in asyncio.as_completed(wrappers):
yield x
if __name__ == '__main__':
asyncio.run(main())
|
[
"tigerlee7@163.com"
] |
tigerlee7@163.com
|
b336e9d406c8e195778f6588752748e100d7e6b6
|
97445678c009b02a32975abd464ca03216d185ef
|
/django_practice_2/load_initial_data_2.py
|
93e08008fda1d6c77d9d8b50739e4a48a14b268d
|
[] |
no_license
|
jwinf843/wdc-django-practice-2
|
edca54cf43c7f8926b85fddc867937acfafb68a7
|
c26d9a1818a2b624e409b09f82cc29b24a300d3c
|
refs/heads/master
| 2020-03-15T09:22:27.417114
| 2018-05-06T14:18:45
| 2018-05-06T14:18:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,180
|
py
|
import django
def main():
django.setup()
from artists.models import Artist
from django.contrib.auth.models import User
User.objects.all().delete()
Artist.objects.all().delete()
User.objects.create_superuser(
username='admin', email='admin@example.com', password='admin')
ARTISTS = [
('Stevland', 'Judkins', 'Stevie Wonders', 'https://upload.wikimedia.org/wikipedia/commons/thumb/5/54/Stevie_Wonder_1973.JPG/600px-Stevie_Wonder_1973.JPG', 90, 'rock'),
('James', 'Hendrix', 'Jimi Hendrix', 'https://upload.wikimedia.org/wikipedia/commons/a/ae/Jimi_Hendrix_1967.png', 80, 'rock'),
('Edward', 'Sheeran', 'Ed Sheeran', 'https://upload.wikimedia.org/wikipedia/commons/thumb/5/55/Ed_Sheeran_2013.jpg/500px-Ed_Sheeran_2013.jpg', 75, 'pop'),
]
for first_name, last_name, artistic_name, picture_url, popularity, genre in ARTISTS:
Artist.objects.create(
first_name=first_name,
last_name=last_name,
artistic_name=artistic_name,
picture_url=picture_url,
popularity=popularity,
genre=genre
)
if __name__ == '__main__':
main()
|
[
"zugnoni.ivan@gmail.com"
] |
zugnoni.ivan@gmail.com
|
b4b726de7dd2fe2197a67e6aa174fe63bd1eb9a1
|
1616557beba5f845fa909950f548254bb5e1a982
|
/dictionary.py
|
7ca13d0b34b96c4e7eb8940138988922fa388391
|
[] |
no_license
|
devendrasingh143/python
|
8123a98b5bef93b53e4791a191eba25f4582d6a2
|
4ff6f30c48670dc96c73274615e0230cb7fbb49d
|
refs/heads/master
| 2021-01-21T11:45:59.885607
| 2014-08-05T08:56:55
| 2014-08-05T08:56:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,394
|
py
|
eng2sp = dict()
eng2sp['a', 's', 'd']=['z', 'x', 'c']
print(eng2sp)
eng2sp = dict()
eng2sp={'one': 'uno', 'two': 'dos', 'three': 'tres'}
print('\n')
print(eng2sp)
print(len(eng2sp)) #number of keys
print('one' in eng2sp) #it tells whether something appears as a key in the dictionary.
print('uno' in eng2sp)
vals = eng2sp.values()
print('uno' in vals) #it tells whether something appears as a value in the dictionary.
def histogram(s):
d = dict()
for c in s:
if c not in d:
d[c] = 1
else:
d[c] += 1
return d
h = histogram('brontosaurus')
print('\n')
print(h)
print(h.get('a'))
print(h.get('u', 0))
def print_hist(h):
for c in h:
print(c, h[c])
h = histogram('parrot')
print('\n')
print_hist(h)
def reverse_lookup(d, v):
for k in d:
if d[k] == v:
return k
# raise ValueError
h = histogram('parrot')
print('\n')
k = reverse_lookup(h, 2)
print(k)
#l = reverse_lookup(h, 3)
#print(l)
def invert_dict(d):
inverse = dict()
for key in d:
val = d[key]
if val not in inverse:
inverse[val] = [key]
else:
inverse[val].append(key)
return inverse
hist = histogram('parrot')
print('\n')
print(hist)
inverse = invert_dict(hist)
print(inverse)
verbose = True
def example1():
if verbose:
print('\nRunning example1')
example1()
|
[
"deniedchrist.rathore5@gmail.com"
] |
deniedchrist.rathore5@gmail.com
|
316dbf8c733316c4baf8d3471c2d9aaf98e2ff79
|
85fdf45f4047e78bc92818debd69c8795aa31ce9
|
/home/api_1_0/verify_code.py
|
abb9d7a6647b5f23ba1b62116e8d98300e954f4c
|
[] |
no_license
|
Lgvcc/iHome
|
15cfe3891216f7c7b848e57f2a5c9a680df54024
|
71484600bb49459cf1fc79a8b87924ce9051b8f6
|
refs/heads/master
| 2020-06-15T22:44:30.206382
| 2019-07-09T15:44:18
| 2019-07-09T15:44:18
| 195,411,768
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,073
|
py
|
# coding:utf-8
from flask import current_app, jsonify, make_response
from . import api
from home.utils.captcha import captcha
from home import redis_store
from home.constants import REDIS_IMAGE_CODE_EXPIRE
from home.utils.response_code import RET
@api.route('/image_codes/<image_code_id>')
def get_image_code(image_code_id):
"""
获取图片验证码
:param image_code_id: 图片验证码编号
:return: 图片验证码
"""
# 业务逻辑处理
# 生成图片验证码 name验证码名称 text图片真实值 image_code 图片验证码
name, text, image_code = captcha.captcha.generate_captcha()
# 将图片验证码编号和验证码真实值保存到redis中
try:
redis_store.setex('image_code_%s' % image_code_id, REDIS_IMAGE_CODE_EXPIRE, text)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, message='保存图片验证码失败')
# 返回图片验证码
resp = make_response(image_code)
resp.headers['Content-Type'] = 'image/jpg'
return resp
|
[
"18790334713@163.com"
] |
18790334713@163.com
|
9ecc842f23895f3713c99a55702174b7192797fa
|
31e7aa5176876e6caf7ff9b37336b39292c9dd5b
|
/selfdrive/controls/lib/pathplanner.py
|
de43c041805990c89541efeab04f50f6241ea132
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
avolmensky/openpilot
|
02d822f7eb50bb74368c794a3d580f95a53c2ca4
|
dc61915529aabfad62061e784f277af311013cf1
|
refs/heads/devel
| 2021-12-15T01:43:10.994332
| 2020-02-14T01:30:43
| 2020-02-14T02:33:40
| 191,065,999
| 2
| 9
|
MIT
| 2019-06-26T10:13:29
| 2019-06-09T23:32:13
|
C
|
UTF-8
|
Python
| false
| false
| 9,158
|
py
|
import os
import math
from common.realtime import sec_since_boot, DT_MDL
from selfdrive.swaglog import cloudlog
from selfdrive.controls.lib.lateral_mpc import libmpc_py
from selfdrive.controls.lib.drive_helpers import MPC_COST_LAT
from selfdrive.controls.lib.lane_planner import LanePlanner
from selfdrive.config import Conversions as CV
import cereal.messaging as messaging
from cereal import log
LaneChangeState = log.PathPlan.LaneChangeState
LaneChangeDirection = log.PathPlan.LaneChangeDirection
LOG_MPC = os.environ.get('LOG_MPC', False)
LANE_CHANGE_SPEED_MIN = 45 * CV.MPH_TO_MS
LANE_CHANGE_TIME_MAX = 10.
DESIRES = {
LaneChangeDirection.none: {
LaneChangeState.off: log.PathPlan.Desire.none,
LaneChangeState.preLaneChange: log.PathPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.PathPlan.Desire.none,
LaneChangeState.laneChangeFinishing: log.PathPlan.Desire.none,
},
LaneChangeDirection.left: {
LaneChangeState.off: log.PathPlan.Desire.none,
LaneChangeState.preLaneChange: log.PathPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.PathPlan.Desire.laneChangeLeft,
LaneChangeState.laneChangeFinishing: log.PathPlan.Desire.laneChangeLeft,
},
LaneChangeDirection.right: {
LaneChangeState.off: log.PathPlan.Desire.none,
LaneChangeState.preLaneChange: log.PathPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.PathPlan.Desire.laneChangeRight,
LaneChangeState.laneChangeFinishing: log.PathPlan.Desire.laneChangeRight,
},
}
def calc_states_after_delay(states, v_ego, steer_angle, curvature_factor, steer_ratio, delay):
states[0].x = v_ego * delay
states[0].psi = v_ego * curvature_factor * math.radians(steer_angle) / steer_ratio * delay
return states
class PathPlanner():
def __init__(self, CP):
self.LP = LanePlanner()
self.last_cloudlog_t = 0
self.steer_rate_cost = CP.steerRateCost
self.setup_mpc()
self.solution_invalid_cnt = 0
self.lane_change_state = LaneChangeState.off
self.lane_change_direction = LaneChangeDirection.none
self.lane_change_timer = 0.0
self.prev_one_blinker = False
def setup_mpc(self):
self.libmpc = libmpc_py.libmpc
self.libmpc.init(MPC_COST_LAT.PATH, MPC_COST_LAT.LANE, MPC_COST_LAT.HEADING, self.steer_rate_cost)
self.mpc_solution = libmpc_py.ffi.new("log_t *")
self.cur_state = libmpc_py.ffi.new("state_t *")
self.cur_state[0].x = 0.0
self.cur_state[0].y = 0.0
self.cur_state[0].psi = 0.0
self.cur_state[0].delta = 0.0
self.angle_steers_des = 0.0
self.angle_steers_des_mpc = 0.0
self.angle_steers_des_prev = 0.0
self.angle_steers_des_time = 0.0
def update(self, sm, pm, CP, VM):
v_ego = sm['carState'].vEgo
angle_steers = sm['carState'].steeringAngle
active = sm['controlsState'].active
angle_offset = sm['liveParameters'].angleOffset
# Run MPC
self.angle_steers_des_prev = self.angle_steers_des_mpc
VM.update_params(sm['liveParameters'].stiffnessFactor, sm['liveParameters'].steerRatio)
curvature_factor = VM.curvature_factor(v_ego)
self.LP.parse_model(sm['model'])
# Lane change logic
one_blinker = sm['carState'].leftBlinker != sm['carState'].rightBlinker
below_lane_change_speed = v_ego < LANE_CHANGE_SPEED_MIN
if sm['carState'].leftBlinker:
self.lane_change_direction = LaneChangeDirection.left
elif sm['carState'].rightBlinker:
self.lane_change_direction = LaneChangeDirection.right
if (not active) or (self.lane_change_timer > LANE_CHANGE_TIME_MAX) or (not one_blinker):
self.lane_change_state = LaneChangeState.off
self.lane_change_direction = LaneChangeDirection.none
else:
torque_applied = sm['carState'].steeringPressed and \
((sm['carState'].steeringTorque > 0 and self.lane_change_direction == LaneChangeDirection.left) or \
(sm['carState'].steeringTorque < 0 and self.lane_change_direction == LaneChangeDirection.right))
lane_change_prob = self.LP.l_lane_change_prob + self.LP.r_lane_change_prob
# State transitions
# off
if self.lane_change_state == LaneChangeState.off and one_blinker and not self.prev_one_blinker and not below_lane_change_speed:
self.lane_change_state = LaneChangeState.preLaneChange
# pre
elif self.lane_change_state == LaneChangeState.preLaneChange:
if not one_blinker or below_lane_change_speed:
self.lane_change_state = LaneChangeState.off
elif torque_applied:
self.lane_change_state = LaneChangeState.laneChangeStarting
# starting
elif self.lane_change_state == LaneChangeState.laneChangeStarting and lane_change_prob > 0.5:
self.lane_change_state = LaneChangeState.laneChangeFinishing
# finishing
elif self.lane_change_state == LaneChangeState.laneChangeFinishing and lane_change_prob < 0.2:
if one_blinker:
self.lane_change_state = LaneChangeState.preLaneChange
else:
self.lane_change_state = LaneChangeState.off
if self.lane_change_state in [LaneChangeState.off, LaneChangeState.preLaneChange]:
self.lane_change_timer = 0.0
else:
self.lane_change_timer += DT_MDL
self.prev_one_blinker = one_blinker
desire = DESIRES[self.lane_change_direction][self.lane_change_state]
# Turn off lanes during lane change
if desire == log.PathPlan.Desire.laneChangeRight or desire == log.PathPlan.Desire.laneChangeLeft:
self.LP.l_prob = 0.
self.LP.r_prob = 0.
self.libmpc.init_weights(MPC_COST_LAT.PATH / 10.0, MPC_COST_LAT.LANE, MPC_COST_LAT.HEADING, self.steer_rate_cost)
else:
self.libmpc.init_weights(MPC_COST_LAT.PATH, MPC_COST_LAT.LANE, MPC_COST_LAT.HEADING, self.steer_rate_cost)
self.LP.update_d_poly(v_ego)
# account for actuation delay
self.cur_state = calc_states_after_delay(self.cur_state, v_ego, angle_steers - angle_offset, curvature_factor, VM.sR, CP.steerActuatorDelay)
v_ego_mpc = max(v_ego, 5.0) # avoid mpc roughness due to low speed
self.libmpc.run_mpc(self.cur_state, self.mpc_solution,
list(self.LP.l_poly), list(self.LP.r_poly), list(self.LP.d_poly),
self.LP.l_prob, self.LP.r_prob, curvature_factor, v_ego_mpc, self.LP.lane_width)
# reset to current steer angle if not active or overriding
if active:
delta_desired = self.mpc_solution[0].delta[1]
rate_desired = math.degrees(self.mpc_solution[0].rate[0] * VM.sR)
else:
delta_desired = math.radians(angle_steers - angle_offset) / VM.sR
rate_desired = 0.0
self.cur_state[0].delta = delta_desired
self.angle_steers_des_mpc = float(math.degrees(delta_desired * VM.sR) + angle_offset)
# Check for infeasable MPC solution
mpc_nans = any(math.isnan(x) for x in self.mpc_solution[0].delta)
t = sec_since_boot()
if mpc_nans:
self.libmpc.init(MPC_COST_LAT.PATH, MPC_COST_LAT.LANE, MPC_COST_LAT.HEADING, CP.steerRateCost)
self.cur_state[0].delta = math.radians(angle_steers - angle_offset) / VM.sR
if t > self.last_cloudlog_t + 5.0:
self.last_cloudlog_t = t
cloudlog.warning("Lateral mpc - nan: True")
if self.mpc_solution[0].cost > 20000. or mpc_nans: # TODO: find a better way to detect when MPC did not converge
self.solution_invalid_cnt += 1
else:
self.solution_invalid_cnt = 0
plan_solution_valid = self.solution_invalid_cnt < 2
plan_send = messaging.new_message()
plan_send.init('pathPlan')
plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState', 'liveParameters', 'model'])
plan_send.pathPlan.laneWidth = float(self.LP.lane_width)
plan_send.pathPlan.dPoly = [float(x) for x in self.LP.d_poly]
plan_send.pathPlan.lPoly = [float(x) for x in self.LP.l_poly]
plan_send.pathPlan.lProb = float(self.LP.l_prob)
plan_send.pathPlan.rPoly = [float(x) for x in self.LP.r_poly]
plan_send.pathPlan.rProb = float(self.LP.r_prob)
plan_send.pathPlan.angleSteers = float(self.angle_steers_des_mpc)
plan_send.pathPlan.rateSteers = float(rate_desired)
plan_send.pathPlan.angleOffset = float(sm['liveParameters'].angleOffsetAverage)
plan_send.pathPlan.mpcSolutionValid = bool(plan_solution_valid)
plan_send.pathPlan.paramsValid = bool(sm['liveParameters'].valid)
plan_send.pathPlan.sensorValid = bool(sm['liveParameters'].sensorValid)
plan_send.pathPlan.posenetValid = bool(sm['liveParameters'].posenetValid)
plan_send.pathPlan.desire = desire
plan_send.pathPlan.laneChangeState = self.lane_change_state
plan_send.pathPlan.laneChangeDirection = self.lane_change_direction
pm.send('pathPlan', plan_send)
if LOG_MPC:
dat = messaging.new_message()
dat.init('liveMpc')
dat.liveMpc.x = list(self.mpc_solution[0].x)
dat.liveMpc.y = list(self.mpc_solution[0].y)
dat.liveMpc.psi = list(self.mpc_solution[0].psi)
dat.liveMpc.delta = list(self.mpc_solution[0].delta)
dat.liveMpc.cost = self.mpc_solution[0].cost
pm.send('liveMpc', dat)
|
[
"user@comma.ai"
] |
user@comma.ai
|
50b28d0ed7daa7be97decf477b846c80cd2df47e
|
4f0385a90230c0fe808e8672bb5b8abcceb43783
|
/框架/crawler/scrapy/scrapy_demo/scrapy_demo/spiders/quotes.py
|
8c9928611b92d882b2c0eebf7d5163ee20e145da
|
[] |
no_license
|
lincappu/pycharmlearningproject
|
4084dab7adde01db9fa82a12769a67e8b26b3382
|
b501523e417b61373688ba12f11b384166baf489
|
refs/heads/master
| 2023-07-10T05:21:15.163393
| 2023-06-29T14:02:35
| 2023-06-29T14:02:35
| 113,925,289
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,268
|
py
|
# -*- coding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import scrapy
from scrapy_demo import items
from scrapy_demo import settings
import scrapy.settings
from scrapy.mail import MailSender
# 这是最普通的爬虫形式,
# class QuotesSpider(scrapy.Spider):
# name = "quotes"
# start_urls = [
# 'http://quotes.toscrape.com/page/1/',
# ]
#
# def parse(self, response):
# for quote in response.css('div.quote'):
# yield {
# 'text': quote.css('span.text::text').get(),
# 'author': quote.css('small.author::text').get(),
# 'tags': quote.css('div.tags a.tag::text').getall(),
# }
#
# next_page = response.css('li.next a::attr(href)').get()
# if next_page is not None:
# next_page = response.urljoin(next_page) # 这个urljoin 会用start_url中的域名。
# yield scrapy.Request(next_page, callback=self.parse)
# scrapy.follow 的形式,和Request的区别:不需要在urljoin一次,直接就是拼接好的url
# class QuotesSpider(scrapy.Spider):
# name = 'quotes'
# start_urls = [
# 'http://quotes.toscrape.com/tag/humor/',
# ]
#
# def parse(self, response):
# for quote in response.css('div.quote'):
# yield {
# 'author': quote.xpath('span/small/text()').get(),
# 'text': quote.css('span.text::text').get(),
# }
#
# next_page = response.css('li.next a::attr("href")').get()
# if next_page is not None:
# yield response.follow(next_page, self.parse)
# follow_all 的形式,然后加上另一个回调函数。
# class AuthorSpider(scrapy.Spider):
# name = 'author'
#
# start_urls = ['http://quotes.toscrape.com/']
#
# def parse(self, response):
# author_page_links = response.css('.author + a')
# yield from response.follow_all(author_page_links, self.parse_author)
#
# pagination_links = response.css('li.next a')
# yield from response.follow_all(pagination_links, self.parse)
#
# def parse_author(self, response):
# def extract_with_css(query):
# return response.css(query).get(default='').strip()
#
# yield {
# 'name': extract_with_css('h3.author-title::text'),
# 'birthdate': extract_with_css('.author-born-date::text'),
# 'bio': extract_with_css('.author-description::text'),
# }
#
#
# 在命令行中传入参数,然后重写start_request 这样就不用start_url
# class QuotesSpider(scrapy.Spider):
# name = "quotes"
#
# def start_requests(self):
# url = 'http://quotes.toscrape.com/'
# tag = getattr(self, 'tag', None)
# if tag is not None:
# url = url + 'tag/' + tag
# yield scrapy.Request(url, self.parse)
#
# def parse(self, response):
# for quote in response.css('div.quote'):
# yield {
# 'text': quote.css('span.text::text').extract_first(),
# 'author': quote.css('small.author::text').extract_first(),
# }
#
# next_page = response.css('li.next a::attr(href)').extract_first()
# if next_page is not None:
# next_page = response.urljoin(next_page)
# yield scrapy.Request(next_page, self.parse)
# class DianyingSpider(scrapy.Spider):
# MAIL_HOST = 'smtp.exmail.qq.com'
# MAIL_PORT = 25
# MAIL_USER = "monitor@icourt.cc"
# MAIL_PASS = "6bH9KPQoKD"
# MAIL_TLS = False
# MAIL_SSL = False
#
# name = "dianying"
# start_urls = [
# "https://www.dy2018.com/html/gndy/dyzz/"
]
# 这是使用FEED exporter的默认配置选项。这里没有用到itemexporter的配置
# custom_settings = {
# 'FEED_URI': "file:///tmp/zzz.marshal",
# 'FEED_FORMAT': 'marshal',
# 'FEED_EXPORT_ENCODING':'utf8',
# 'FEED_EXPORT_FIELDS': ["url", "title"]
# }
# 程序入口
# def parse(self, response):
# mailer = MailSender(
# smtphost=settings.py.MAIL_HOST,
# smtpuser=settings.py.MAIL_USER,
# mailfrom=settings.py.MAIL_USER,
# smtppass=settings.py.MAIL_PASS,
# smtpport=settings.py.MAIL_PORT,
# smtptls=settings.py.MAIL_TLS,
# smtpssl=settings.py.MAIL_SSL,
# )
# mailer = MailSender.from_settings(self.settings.py)
#
# mailer.send(to=["lincappu@163.com"], subject="北京新橙科技有限公司", body="Some body")
#
# # 遍历 最新电影 的所有页面
# for page in response.xpath("//select/option/@value").extract():
# url = "https://www.dy2018.com" + page
# self.logger.info('aaaaa %s' % url)
# yield scrapy.Request(url, callback=self.parsePage)
#
# # 处理单个页面
# def parsePage(self, response):
# # 获取到该页面的所有电影的详情页链接
# for link in response.xpath('//a[@class="ulink"]/@href').extract():
# url = "https://www.dy2018.com" + link
# self.logger.info('bbbbbb %s' % url)
# yield scrapy.Request(url, callback=self.parseChild)
#
# # 处理单个电影详情页
# def parseChild(self, response):
# # 获取电影信息,并提取数据
# item = items.DianyingItem()
# item['url'] = response.url
# item['title'] = response.xpath('//div[@class="title_all"]/h1/text()').extract()
# item['magnet'] = response.xpath('//div[@id="Zoom"]//a[starts-with(@href, "magnet:")]/@href').extract()
# self.logger.info('ccccc %s' % item)
# yield item
# itemloader 的形式
# class DianyingSpider(scrapy.Spider):
# name = "dianying"
# start_urls = [
# "https://www.dy2018.com/html/gndy/dyzz/"
# ]
#
# # 程序入口
# def parse(self, response):
# # 遍历 最新电影 的所有页面
# for page in response.xpath("//select/option/@value").extract():
# url = "https://www.dy2018.com" + page
# yield scrapy.Request(url, callback=self.parsePage)
#
# # 处理单个页面
# def parsePage(self, response):
# # 获取到该页面的所有电影的详情页链接
# for link in response.xpath('//a[@class="ulink"]/@href').extract():
# url = "https://www.dy2018.com" + link
# yield scrapy.Request(url, callback=self.parseChild)
#
#
# def parseChild(self, response):
# l = items.ArticleItemLoader(item=items.DianyingItem(), response=response)
# l.add_value('url', response.url)
# l.add_xpath('title', '//div[@class="title_all"]/h1/text()')
# l.add_xpath('magnet', '//div[@id="Zoom"]//img/@src')
# l.add_value('date', '20200611')
# l.add_value('name','fls')
# l.add_value('create_time','test')
# yield l.load_item()
#
# class DianyingSpider(scrapy.Spider):
#
# name = "dianying"
# start_urls = [
# "https://www.thepaper.cn/allGovUsers.jsp",
# ]
#
# def parse(self, response):
|
[
"lincappu@163.com"
] |
lincappu@163.com
|
5a53f221c372ba4f516ce29fa0811152cfe05e26
|
7cbd54c390f57982bb0f81ae67351cf512f08ad1
|
/Scripts/Sims/SLiM/PopExpansion/PopExpansionChangedRecRate/simulate_treeseqPopExpansionNeutral.py
|
dec79a1f2309f84c4393c9b714464ef20d8570d5
|
[] |
no_license
|
dortegadelv/HaplotypeDFEStandingVariation
|
ee9eaa9a44169523349bef09d836913221bf24cb
|
eb196acf6bbaa43f475f132b667f0f74b6f7cee4
|
refs/heads/master
| 2022-05-25T03:47:39.948444
| 2022-03-07T22:41:15
| 2022-03-07T22:41:15
| 108,029,910
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,624
|
py
|
#! /usr/bin/env python3
import gzip
import numpy as np
import sys
import argparse
import os
def read_boundaries_as_dict():
bounds = gzip.open('./annotations/hg19.recomb.boundaries.txt.gz')
bounds = bounds.readlines()
bounds = [x.decode('utf_8').strip('\n').split(' ') for x in bounds]
bounds = [[x[0], {"start": int(x[1]), "stop": int(x[2])}] for x in bounds]
bounds = dict(bounds)
return bounds
def overlap(a, b):
return max(0, min(a[1], b[1]) - max(a[0], b[0]) + 1)
def make_sim_seq_info(chrom="NULL", start="NULL", size=100000,
filename="sim_seq_info.txt"):
if chrom == "NULL":
start = 0
stop = start + size - 1
recRate = 1e-8 * 5
annots = []
recomb = ["recRate {} {}".format(stop, recRate)]
else:
bounds = read_boundaries_as_dict()
stop = start + size - 1
if (start < bounds[chrom]['start']) or (stop > bounds[chrom]['stop']):
raise ValueError("outside the bounds of the recomb map")
#read annotations
annots_file = './annotations/hg19.{}.annot.txt.gz'.format(chrom)
annots_chr = gzip.open(annots_file).readlines()
annots_chr = [x.decode('utf_8').strip('\n').split(' ') for
x in annots_chr]
annots_chr = [x for x in annots_chr if
overlap([start, stop], list(map(int, x[1:3]))) > 0]
annots_chr = [[x[0], int(x[1]), int(x[2])] for x in annots_chr]
#trim minimum
annots_chr = [x if x[1] >= start else [x[0],start,x[2]] for
x in annots_chr]
#trim maximum
annots_chr = [x if x[2] <= stop else [x[0],x[1],stop] for
x in annots_chr]
#read recomb file
recomb_file = './annotations/hg19.recomb.map.txt.gz'
recomb_chr = gzip.open(recomb_file).readlines()
recomb_chr = [x.decode('utf_8').strip("\n").split(" ")
for x in recomb_chr]
recomb_chr = [x for x in recomb_chr if x[0] == chrom]
recomb_chr = [x for x in recomb_chr if
overlap([start,stop], list(map(int, x[1:3]))) > 0]
recomb_chr = [[x[0], int(x[1]), int(x[2]), x[3]] for x in recomb_chr]
#trim minimum
recomb_chr = [x if x[1] >= start else [x[0],start,x[2],x[3]] for
x in recomb_chr]
#trim maximum
recomb_chr = [x if x[2] <= stop else [x[0],x[1],stop,x[3]] for
x in recomb_chr]
#assemble chromosome
annots = ['{} {} {}'.format(x[0],x[1]-start,x[2]-start) for
x in annots_chr]
recomb = ['recRate {} {}'.format(x[2] - start, x[3]) for
x in recomb_chr]
#now combine all annotations
sequence_info = annots + recomb
outfile = open(filename, 'w')
if chrom == "NULL":
outfile.write('Test chr1:{0}-{1}\n'.format(start,stop))
else:
outfile.write('Human {0}:{1}-{2}\n'.format(chrom,start,stop))
outfile.write('\n'.join(sequence_info))
outfile.close()
def init_block_fun(mu, scalingfactor, es, shape, smin, smax, simseqinfoname, cnc=False):
init= '''
// set up a simple neutral simulation
initialize() {{
initializeMutationRate({0}*(2.31/3.31)*{1});
initializeTreeSeq();
// m1 mutation type: nonsyn
// muts added at 2.31/3.31 the mutation rate, syn muts added w/msprime
initializeMutationType("m1", 0, "f", 0.0);
// m2 mutation type: adaptive
initializeMutationType("m2", 0.5, "s", "return runif(1,{4}, {5});");
m2.convertToSubstitution == T;
// m3 mutation type: cnc with DFE from Torgerson et al., 2009
initializeMutationType("m3", 0, "f", 0.0);
m3.convertToSubstitution == T;
//genomic element: exon and uses a mixture of syn and nonsyn at a 1:2.31 ratio (Huber et al.)
initializeGenomicElementType("g1", c(m1), c(1.0)); // no synonymous muts
//genomic element: cnc
initializeGenomicElementType("g2", c(m3), c(1.0));
//read in exon and recomb info
info_lines = readFile("{6}");
//recombination
rec_ends = NULL;
rec_rates = NULL;
for (line in info_lines[substr(info_lines, 0, 2) == "rec"])
{{
components = strsplit(line, " ");
rec_ends = c(rec_ends, asInteger(components[1]));
rec_rates = c(rec_rates, asFloat(components[2]));
}}
//multiply rec rates by scaling factor
initializeRecombinationRate(0.5*(1-(1-2*rec_rates)^{1}), rec_ends);
//exons
for (line in info_lines[substr(info_lines, 0, 2) == "exo"])
{{
components = strsplit(line, " ");
exon_starts = asInteger(components[1]);
exon_ends = asInteger(components[2]);
initializeGenomicElement(g1, exon_starts, exon_ends);
}}
//conserved non-coding
//maybe incorporate this later
for (line in info_lines[substr(info_lines, 0, 2) == "cnc"])
{{
components = strsplit(line, " ");
cnc_starts = asInteger(components[1]);
cnc_ends = asInteger(components[2]);
initializeGenomicElement(g2, cnc_starts, cnc_ends);
}}
}}
'''.format(mu, scalingfactor, es, shape, smin, smax, simseqinfoname)
return init.replace('\n ','\n')
def fitness_block_fun(Tcurr):
fitness_block='''
1:{0} fitness(m1) {{
h = mut.mutationType.dominanceCoeff;
if (homozygous) {{
return ((1.0 + 0.5*mut.selectionCoeff)*(1.0 + 0.5*mut.selectionCoeff));
}} else {{
return (1.0 + mut.selectionCoeff * h);
}}
}}
'''.format(10100)
return fitness_block.replace('\n ','\n')
def demog_block_fun_neu(nrep, Nanc, Tafnea, Nnea, Taf, Naf, Tb, Nb, mafb, Tadm,
mafnea, Teuas, Nas0, mafas, Tcurr, ras, Tneasamp):
rand_seeds = 'c(' + ','.join([str(__import__('random').
randint(0,10000000000000)) for x in range(0,nrep)]) + ')'
demog_block='''
// burn-in for ancestral population
1 early(){{
setSeed({0}[simnum]); //define with -d simnum=$SGE_TASK_ID
sim.addSubpop("p1", 1000); }}
10000 {{
p1.setSubpopulationSize(10000);
}}
'''.format(rand_seeds)
return demog_block.replace('\n ','\n')
def demog_block_fun_ai(nrep, Nanc, Tafnea, Nnea, Taf, Naf, Tb, Nb, mafb, Tadm,
mafnea, Teuas, Nas0, mafas, Tcurr, ras, Tneasamp):
ai_times = ('c(' + ','.join([str(__import__('random').
randint(tburn+1,tburn+tsplit1+tsplit2)) for x in range(0,nrep)]) +
')')
def output_block_fun(Tcurr, treefilename):
output_block='''
{0} late() {{
sim.treeSeqOutput("{1}");
}}
'''.format(10020, treefilename)
return output_block.replace('\n ','\n')
def main(args):
scalingfactor = args.scalingfactor
#demographic parameters from Gravel et al. PNAS
Nanc = 7300. #ancestral human size
Nnea = 1000. #neanderthal population size
Naf = 14474. #African population size
Nb = 1861. #out of Africa bottleneck size
Nas0 = 550. #Asian founder bottleneck size
Nasc = 45370. #Asian current day size
Tafnea = Nanc*10. #burn in for 10Na generations
Taf = Tafnea + 10400. #neanderthal - afr split time
Tb = Taf + 3560. #out of Africa bottleneck time
Tadm = Tb + 440. #neanderthal admixture time
Tneasamp = Tadm + 80. #Neanderthal lineage sampling time (age of sample)
Teuas = Tneasamp + 600. #Asian founder bottleneck time
Tcurr = 10000 #Current day time
mafnea = 0.10 #initial Neanderthal admixture proportion
mafb = 0.00015 #migration rate between Africa and Asn-Eur progenitor popn
mafas = 0.0000078 #migration rate between Africa and Asn
ras = 1 + 0.0048*scalingfactor #growth rate of modern Asn
#now, rescale all parameters by the scaing factor
Nanc = int(round(Nanc/scalingfactor))
Nnea = int(round(Nnea/scalingfactor))
Naf = int(round(Naf/scalingfactor))
Nb = int(round(Nb/scalingfactor))
Nas0 = int(round(Nas0/scalingfactor))
Nasc = int(round(Nasc/scalingfactor))
Tafnea = int(round(Tafnea/scalingfactor))
Taf = int(round(Taf/scalingfactor))
Tb = int(round(Tb/scalingfactor))
Tadm = int(round(Tadm/scalingfactor))
Tneasamp = int(round(Tneasamp/scalingfactor))
Teuas = int(round(Teuas/scalingfactor))
Tcurr = int(round(Tcurr/scalingfactor))
#population parameters
mu=args.mu*scalingfactor #mutation rate
es=args.es*scalingfactor #fixed human DFE
shape=args.shape
smin=args.smin*scalingfactor
smax=args.smax*scalingfactor
#set output file names
cwd = os.getcwd()
simseqinfoname = '{0}/sim_seq_info_{1}.txt'.format(cwd, args.output)
slimfilename = '{0}/sim_{1}.slim'.format(cwd, args.output)
treefilename = '{0}/trees_{1}_" + simnum + ".trees'.format(cwd, args.output)
#generate generic chromosome structure file with hg19
make_sim_seq_info(chrom=args.chrom, start=args.start, size=args.size,
filename=simseqinfoname)
#make pieces of slim script
init_block = init_block_fun(args.mu, scalingfactor, args.es, args.shape,
args.smin, args.smax, simseqinfoname)
fitness_block = fitness_block_fun(Tcurr)
demog_block = demog_block_fun_neu(args.nrep, Nanc, Tafnea, Nnea, Taf, Naf,
Tb, Nb, mafb, Tadm, mafnea, Teuas, Nas0,
mafas, Tcurr, ras, Tneasamp)
output_block = output_block_fun(Tcurr, treefilename)
#output slim file
outputs = (init_block + '\n' + fitness_block + '\n' + demog_block + '\n' +
output_block)
outfile = open(slimfilename, 'w')
outfile.write(outputs)
outfile.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="A script for preparing SLiM simulation scripts. Random variables and seeds are hardcoded into the SLiM script. Array job numbers are passed directly to SLiM: './slim -d simnum=$SGE_TASK_ID'.")
parser.add_argument('-o', '--output', action="store", dest="output",
help="output suffix, default: 'test'", default="test",
type=str)
parser.add_argument('-c', '--chr', action="store", dest="chrom",
help="chromosome format, default: 'chr1'",
default="chr1", type=str)
parser.add_argument('-s', '--start', action="store", dest="start",
help="starting coordinate, default: 10000000",
default=10000000, type=int)
parser.add_argument('-z', '--size', action="store", dest="size",
help="size of chunk to simulate, default: 10000000",
default=20000000, type=int)
parser.add_argument('-x', '--scalingfactor', action="store",
dest="scalingfactor", help="simulation scaling factor, default: 10",
default=5, type=float)
parser.add_argument('-m', '--mu', action="store", dest="mu",
help="mutation rate, default: 1.8e-8",
default=1.2e-8, type=float)
parser.add_argument('-e', '--es', action="store", dest="es",
help="expected sel coeff of gamma DFE, E[s], default:-0.01026",
default=-0.01026, type=float)
parser.add_argument('-a', '--alpha', action="store", dest="shape",
help="shape parameter of the gamma DFE, default: 0.186",
default=0.186, type=float)
parser.add_argument('-l', '--smin', action="store", dest="smin",
help="min advantageous sel coeff, default: 0.00125",
default=0.00125, type=float)
parser.add_argument('-u', '--smax', action="store", dest="smax",
help="max advantageous sel coeff, default: 0.0125",
default=0.0125, type=float)
parser.add_argument('-n', '--nrep', action="store", dest="nrep",
help="number of simulation replicates, default: 1000",
default=1000, type=int)
parser.add_argument('-t', '--simtype', action="store", dest="simtype",
help="simulation types: 'neutral', 'ai', 'ancient', 'hardsweep', or 'softsweep'. default: 'neutral'",
default="neutral", type=str)
args = parser.parse_args()
main(args)
|
[
"gochambas@gmail.com"
] |
gochambas@gmail.com
|
b63224119f103400cd98d53d767ebf99d1f01f61
|
ba921a5286df9a2d1c66f28a8bcdd6a60eb0eb0b
|
/organization/migrations/0001_initial.py
|
8f3b08e33973e47dd94562ef85c7ef68c20e56ef
|
[] |
no_license
|
Shaurya9923/Hackathon-Case-Management-System
|
33b3ae97582f9553d11df1f11c2970d07d03f5c5
|
6eb782d6e665260cb2abb795370ea58b5764f0c3
|
refs/heads/main
| 2023-02-15T22:18:23.751424
| 2021-01-03T11:08:24
| 2021-01-03T11:08:24
| 326,386,294
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,376
|
py
|
# Generated by Django 3.0.8 on 2020-07-24 14:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='organiation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('address', models.TextField()),
('city', models.CharField(max_length=20)),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
],
),
migrations.CreateModel(
name='department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('org', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='organization.organiation')),
],
),
]
|
[
"shauryamehta9923@gmail.com"
] |
shauryamehta9923@gmail.com
|
1f97596a4534396f4848c29caeee8100eb7f788e
|
de1abd0ebbb817aa5f23d369e7dda360fd6f1c32
|
/chapter3/scrapy/wikiSpider/wikiSpider/settings.py
|
9bf879252847b3f89efa7323e1c40f4f86ae3b30
|
[] |
no_license
|
CodedQuen/Web-Scraping-with-Python-
|
33aaa2e3733aa1f2b8c7a533d74f5d08ac868197
|
67f2d5f57726d5a943f5f044480e68c36076965b
|
refs/heads/master
| 2022-06-13T01:34:39.764531
| 2020-05-05T11:07:01
| 2020-05-05T11:07:01
| 261,435,932
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,258
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for wikiSpider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'wikiSpider'
SPIDER_MODULES = ['wikiSpider.spiders']
NEWSPIDER_MODULE = 'wikiSpider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'wikiSpider (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'wikiSpider.middlewares.WikispiderSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'wikiSpider.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'wikiSpider.pipelines.WikispiderPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"noreply@github.com"
] |
noreply@github.com
|
fcd8de835cfb135f10a819e6fd43dbc457b9f1eb
|
78c110eaf5b3b89a8d609e5b9d01aeec2c86d781
|
/03_multidimensional_lists/2021.02_multidimensional_lists_lab/01_Sum Matrix Elements.py
|
6036f2771433e15cbf45eff8c0a548d4da986bf4
|
[] |
no_license
|
NPencheva/Python_Advanced_Preparation
|
08c42db6fdecae92b12c335d689433eaaa43e182
|
beee92a4e39538e873936140840a09f770ae7aeb
|
refs/heads/master
| 2023-03-02T16:35:05.045227
| 2021-02-10T21:35:29
| 2021-02-10T21:35:29
| 332,894,438
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 298
|
py
|
number_of_rows, number_of_columns = [int(x) for x in input().split(", ")]
matrix = []
matrix_sum = 0
for row_index in range(number_of_rows):
row = [int(y) for y in input().split(", ")]
matrix.append(row)
for index in matrix:
matrix_sum += sum(index)
print(matrix_sum)
print(matrix)
|
[
"nvpencheva@gmail.com"
] |
nvpencheva@gmail.com
|
331c011eaa5c5078287cccdaa9759838135b7f83
|
a81ab54706f673f17abaf979d30eff2c08b5cf7b
|
/scripts/handle_path.py
|
e87634523705da2bff9e20cb1f1fe90a8a8ddab7
|
[] |
no_license
|
yhusr/future
|
af1feff82dc70f904ee23590ebdc09a5801eeb85
|
250f6fa1817b3cbbf46672b629cd2b0c7d590692
|
refs/heads/master
| 2021-06-26T01:42:00.520164
| 2021-04-05T13:39:13
| 2021-04-05T13:39:13
| 225,109,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 872
|
py
|
"""
Time:2019/11/17 0017
"""
import os
# 获取根目录路径
BASEPATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# 获取配置文件目录
CONFIGPATH = os.path.join(BASEPATH, 'configs')
# 获取配置文件的具体路径
YAMLPATH = os.path.join(CONFIGPATH, 'casesconf.yaml')
# 获取excel的data路径
DATAPATH = os.path.join(BASEPATH, 'datas')
# 获取excel的具体路径
EXCELPATH = os.path.join(DATAPATH, 'excelcases.xlsx')
# 获取logs的目录路径
LOGPATH = os.path.join(BASEPATH, 'logs')
# 获取reports的目录路径
REPORTSPATH = os.path.join(BASEPATH, 'reports')
# 获取三种身份人员的信息的yaml存放路径
PERSONPATH = os.path.join(CONFIGPATH, 'register_phone.yaml')
# 获取用例类的路径
CASESPATH = os.path.join(BASEPATH, 'cases')
#获取token路径
TOKENPATH = os.path.join(CONFIGPATH,'token_infor.yaml')
|
[
"904239064@qq.com"
] |
904239064@qq.com
|
70e19baa27259958c38615665bee3f6c8ac77d48
|
b8cc6d34ad44bf5c28fcca9e0df01d9ebe0ee339
|
/入门学习/threading_dead_lock-eg.py
|
277a2b79b337003460067bedae3cb0eeca00cd29
|
[] |
no_license
|
python-yc/pycharm_script
|
ae0e72898ef44a9de47e7548170a030c0a752eb5
|
c8947849090c71e131df5dc32173ebe9754df951
|
refs/heads/master
| 2023-01-05T06:16:33.857668
| 2020-10-31T08:09:53
| 2020-10-31T08:09:53
| 296,778,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,591
|
py
|
"""
import threading
import time
lock_1 = threading.Lock()
lock_2 = threading.Lock()
def func_1():
print("func_1 starting......")
lock_1.acquire()
print("func_1 申请了 lock 1 ......")
time.sleep(2)
print("func_1 等待 lock_2 .......")
lock_2.acquire()
print("func_1 申请了 lock 2 ......")
lock_2.release()
print("func_1 释放了lock_2")
lock_1.release()
print("func_1 释放了lock_1")
print("func_1 done......")
def func_2():
time.sleep(3)
print("func_2 starting......")
lock_2.acquire()
print("func_2 申请了 lock 2 ......")
#将这个函数内的第一个sleep注释,然后将下面这个取消注释,就会出现死锁现象
#time.sleep(3)
print("func_2 等待 lock_1 .......")
lock_1.acquire()
print("func_2 申请了 lock 1 ......")
lock_1.release()
print("func_2 释放了lock_1")
lock_2.release()
print("func_2 释放了lock_2")
print("func_2 done......")
if __name__ == '__main__':
print("主程序启动............")
t1 = threading.Thread(target=func_1,args=())
t2 = threading.Thread(target=func_2,args=())
t1.start()
t2.start()
t1.join()
t2.join()
print("主程序结束。。。。。。。。。。")
"""
import threading
import time
lock_1 = threading.Lock()
lock_2 = threading.Lock()
def func_1():
print("func_1 starting......")
#给一个申请时间,如果超时就放弃
lock_1.acquire(timeout=4)
print("func_1 申请了 lock 1 ......")
time.sleep(2)
print("func_1 等待 lock_2 .......")
rst = lock_2.acquire(timeout=2)
if rst:
print("func_1已经得到锁lock_2")
lock_2.release()
print("func_1 释放了lock_2")
else:
print("func_1注定没申请到lock_2....")
lock_1.release()
print("func_1 释放了lock_1")
print("func_1 done......")
def func_2():
print("func_2 starting......")
lock_2.acquire()
print("func_2 申请了 lock 2 ......")
time.sleep(3)
print("func_2 等待 lock_1 .......")
lock_1.acquire()
print("func_2 申请了 lock 1 ......")
lock_1.release()
print("func_2 释放了lock_1")
lock_2.release()
print("func_2 释放了lock_2")
print("func_2 done......")
if __name__ == '__main__':
print("主程序启动............")
t1 = threading.Thread(target=func_1,args=())
t2 = threading.Thread(target=func_2,args=())
t1.start()
t2.start()
t1.join()
t2.join()
print("主程序结束。。。。。。。。。。")
|
[
"15655982512.com"
] |
15655982512.com
|
ccf9c467b82d8823d29085edb6e3716535af8ad0
|
69f631bb9f2f1b48fd06eb57a6647dcc403addf2
|
/Face_Mask_Model.py
|
7815039579c6fbf18f26265e16a9eb544b1aee43
|
[
"MIT"
] |
permissive
|
Mridul20/Face-Mask-Detector
|
75436c552acb35426360a56fbe108f805a061a51
|
3cc9b42fb2ced83710814334308c258e1bb04770
|
refs/heads/main
| 2023-02-02T10:15:30.694803
| 2020-12-19T18:53:52
| 2020-12-19T18:53:52
| 322,915,678
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,598
|
py
|
#!/usr/bin/env python
# coding: utf-8
# <a href="https://colab.research.google.com/github/aarpit1010/Real-Time-Face-Mask-Detector/blob/master/Face_Mask_Model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# In[1]:
# IMPORTING THE REQUIRED LIBRARIES
import sys
import os
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn as sns
# uncomment the following line if 'imutils' is not installed in your python kernel
# !{sys.executable} -m pip install imutils
import imutils
from imutils import paths
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Activation, MaxPooling2D, Flatten
from keras.models import Sequential, load_model
from keras.losses import categorical_crossentropy, binary_crossentropy
from keras.optimizers import Adam
from keras.utils import np_utils
from keras.regularizers import l2
from keras import regularizers
from keras.callbacks import ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import img_to_array
from keras.applications.mobilenet_v2 import preprocess_input
from keras.preprocessing.image import load_img
from keras.utils import to_categorical
from sklearn.preprocessing import LabelBinarizer
import cv2
import time
import random
import shutil
# In[2]:
from google.colab import drive
drive.mount('/content/drive')
# # Let's have a look at our Data
# In[3]:
# Path to the folders containing images
data_path = '/content/drive/My Drive/Colab Notebooks/Face Mask Detector/dataset'
mask_path = '/content/drive/My Drive/Colab Notebooks/Face Mask Detector/train/with_mask/'
nomask_path = '/content/drive/My Drive/Colab Notebooks/Face Mask Detector/train/without_mask/'
test_path = '/content/drive/My Drive/Colab Notebooks/Face Mask Detector/test/'
train_path = '/content/drive/My Drive/Colab Notebooks/Face Mask Detector/train/'
# In[4]:
# function to show images from the input path
def view(path):
images = list()
for img in random.sample(os.listdir(path),9):
images.append(img)
i = 0
fig,ax = plt.subplots(nrows=3, ncols=3, figsize=(20,10))
for row in range(3):
for col in range(3):
ax[row,col].imshow(cv2.imread(os.path.join(path,images[i])))
i+=1
# In[5]:
# sample images of people wearing masks
view(mask_path)
# In[6]:
#sample images of people NOT wearning masks
view(nomask_path)
# # Splitting of Data
#
# - TRAINING SET
# - Mask : 658
# - No Mask : 656
#
# - TEST SET
# - Mask : 97
# - No Mask : 97
# <br><br>
# Since, the dataset is pretty small, image augmentation is performed so as to increase the dataset. We perform Data Augmentation generally to get different varients of the same image without collecting more data which may not be always possible to collect.
# <br><br>
# It is another way to reduce Overfitting on our model, where we increase the amount of training data using information only in our training data and leave the test set untouched.
# # Preparation of Data Pipelining
# In[7]:
batch_size = 32 # Batch Size
epochs = 50 # Number of Epochs
img_size = 224
# In[8]:
# Data Augmentation to increase training dataset size
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=20,
shear_range=0.2,
zoom_range=0.2,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory(
'/content/drive/My Drive/Colab Notebooks/Face Mask Detector/train',
target_size=(img_size,img_size),
batch_size=batch_size,
class_mode='binary')
test_set = test_datagen.flow_from_directory(
'/content/drive/My Drive/Colab Notebooks/Face Mask Detector/test',
target_size=(img_size,img_size),
batch_size=batch_size,
class_mode='binary')
# # Building the Model
#
# - In the next step, we build our Sequential CNN model with various layers such as Conv2D, MaxPooling2D, Flatten, Dropout and Dense.
# - In the last Dense layer, we use the ‘**softmax**’ function to output a vector that gives the probability of each of the two classes.
# - Regularization is done to prevent overfitting of the data. It is neccessary since our dataset in not very large and just around 5000 images in total.
# In[9]:
model=Sequential()
model.add(Conv2D(224,(3,3), activation ='relu', input_shape=(img_size,img_size,3), kernel_regularizer=regularizers.l2(0.003)))
model.add(MaxPooling2D() )
model.add(Conv2D(100,(3,3), activation ='relu', kernel_regularizer=regularizers.l2(0.003)))
model.add(MaxPooling2D() )
model.add(Conv2D(100,(3,3), activation ='relu', kernel_regularizer=regularizers.l2(0.003)))
model.add(MaxPooling2D() )
model.add(Conv2D(50,(3,3), activation ='relu', kernel_regularizer=regularizers.l2(0.003)))
model.add(MaxPooling2D() )
model.add(Conv2D(30,(3,3), activation ='relu', kernel_regularizer=regularizers.l2(0.003)))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(90, activation ='relu'))
model.add(Dense(30, activation = 'relu'))
model.add(Dense(1, activation ='sigmoid'))
model.summary()
# In[10]:
# Optimization of the model is done via Adam optimizer
# Loss is measures in the form of Binary Categorical Cross Entropy as our output contains 2 classes, with_mask and without_mask
model.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=['accuracy'])
# In[11]:
#Model Checkpoint to save the model after training, so that it can be re-used while detecting faces
# Include the epoch in the file name (uses `str.format`)
checkpoint_path = "/content/drive/My Drive/Colab Notebooks/Face Mask Detector/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
checkpoint = ModelCheckpoint(
filepath = checkpoint_path,
monitor='val_loss',
verbose=0,
save_best_only=True,
save_weights_only=True,
mode='auto'
)
# Save the weights using the `checkpoint_path` format
model.save_weights(checkpoint_path.format(epoch=0))
# Training of the Model is done
history=model.fit(training_set, epochs=epochs, validation_data=test_set)
# In[12]:
# Plotting the loss on validation set w.r.t the number of epochs
plt.plot(history.history['loss'],'r',label='Training Loss')
plt.plot(history.history['val_loss'],label='Validation Loss')
plt.xlabel('No. of Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# Plotting the accuracy on validation set w.r.t the number of epochs
plt.plot(history.history['accuracy'],'r',label='Training Accuracy')
plt.plot(history.history['val_accuracy'],label='Validation Accuracy')
plt.xlabel('No. of Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
# In[13]:
# print(model.evaluate(test_data,test_target))
# In[14]:
get_ipython().system('pip install pyyaml h5py # Required to save models in HDF5 format')
# Now, look at the resulting checkpoints and choose the latest one:
# In[15]:
# Saving the Model trained above, which will be used in future while using Real time data
model.save('/content/drive/My Drive/Colab Notebooks/Face Mask Detector/trained_model.model', history)
model.save('/content/drive/My Drive/Colab Notebooks/Face Mask Detector/trained_model.h5', history)
# In[16]:
# IMPLEMENTING LIVE DETECTION OF FACE MASK
# Importing the saved model from the IPython notebook
mymodel=load_model('/content/drive/My Drive/Colab Notebooks/Face Mask Detector/trained_model.h5')
# Importing the Face Classifier XML file containing all features of the face
face_classifier=cv2.CascadeClassifier('/content/drive/My Drive/Colab Notebooks/Face Mask Detector/haarcascade_frontalface_default.xml')
# To open a video via link to be inserted in the () of VideoCapture()
# To open the web cam connected to your laptop/PC, write '0' (without quotes) in the () of VideoCapture()
src_cap=cv2.VideoCapture(0)
while src_cap.isOpened():
_,img=src_cap.read()
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# detect MultiScale / faces
faces = face_classifier.detectMultiScale(rgb, 1.3, 5)
# Draw rectangles around each face
for (x, y, w, h) in faces:
#Save just the rectangle faces in SubRecFaces
face_img = rgb[y:y+w, x:x+w]
face_img=cv2.resize(face_img,(224,224))
face_img=face_img/255.0
face_img=np.reshape(face_img,(224,224,3))
face_img=np.expand_dims(face_img,axis=0)
pred=mymodel.predict_classes(face_img)
# print(pred)
if pred[0][0]==1:
cv2.rectangle(img, (x,y), (x+w, y+h), (0,0,255), 2)
cv2.rectangle(img, (x,y-40), (x+w,y), (0,0,255),-1)
cv2.putText(img,'NO MASK',(250,50),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),4)
else:
cv2.rectangle(img, (x,y), (x+w, y+h), (0,255,0), 2)
cv2.rectangle(img, (x,y-40), (x+w,y), (0,255,0),-1)
cv2.putText(img,'MASK',(250,50),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,0),4)
datet=str(datetime.datetime.now())
cv2.putText(img,datet,(400,450),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,255),1)
# Show the image
cv2.imshow('LIVE DETECTION',img)
# if key 'q' is press then break out of the loop
if cv2.waitKey(1)==ord('q'):
break
# Stop video
src_cap.release()
# Close all started windows
cv2.destroyAllWindows()
# In[16]:
|
[
"mridulmittal20@gmail.com"
] |
mridulmittal20@gmail.com
|
2e520ceaa6db8cdadf3aa8bd40a79b659dd16308
|
2c318438747613034dfb4c3f9099fba1c3c98d30
|
/run.py
|
f1a9f2fa006080e4b2d9a83ef9ff3e719c6e9bcf
|
[] |
no_license
|
wycstar/bthub_all
|
674e00239993b636c3af4d2937a73639c5859c38
|
96f1b2adc00e0515dedfe9f9b65624b0b5203ae4
|
refs/heads/master
| 2021-07-03T23:19:55.083423
| 2017-09-26T11:29:30
| 2017-09-26T11:29:30
| 103,232,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
#!/usr/bin/env python
# -*-coding:utf-8 -*-
from spider import Master
from spider import DHTServer
from server import SITE, SERVER
from db import DataProcess
if __name__ == '__main__':
q = DataProcess()
q.start()
# master = Master()
# master.start()
# dht = DHTServer(master, "0.0.0.0", 6881, max_node_qsize=200)
# dht.start()
# dht.auto_send_find_node()
# q.join()
SERVER.run(SITE, host='0.0.0.0', debug=True, port=28000)
|
[
"wycstar@live.com"
] |
wycstar@live.com
|
ea9891c42ef6fc7f1ca7896c9b1e6aadd4fe3db7
|
38221ca553059a83ed8f64e2cb25181ed88da275
|
/deeppavlov/models/spelling_correction/levenstein/tabled_trie.py
|
502376482ef26e8fc4feca5ddd880788e9dcc39f
|
[
"Apache-2.0",
"Python-2.0"
] |
permissive
|
stenpiren/DeepPavlov
|
7153ce828225d9d1fdf1c171794efe463f2e9dea
|
fe59facab4854f2fe56ed126e27eb9696ad6dfd8
|
refs/heads/master
| 2020-03-23T10:08:53.962961
| 2018-07-16T22:10:17
| 2018-07-16T22:10:17
| 141,427,836
| 1
| 0
|
Apache-2.0
| 2018-07-18T11:50:30
| 2018-07-18T11:50:30
| null |
UTF-8
|
Python
| false
| false
| 19,969
|
py
|
import copy
from collections import defaultdict
import numpy as np
class Trie:
"""
Реализация префиксного бора (точнее, корневого направленного ациклического графа)
Атрибуты
--------
alphabet: list, алфавит
alphabet_codes: dict, словарь символ:код
compressed: bool, индикатор сжатия
cashed: bool, индикатор кэширования запросов к функции descend
root: int, индекс корня
graph: array, type=int, shape=(число вершин, размер алфавита), матрица потомков
graph[i][j] = k <-> вершина k --- потомок вершины i по ребру, помеченному символом alphabet[j]
data: array, type=object, shape=(число вершин), массив с данными, хранящямися в вершинах
final: array, type=bool, shape=(число вершин), массив индикаторов
final[i] = True <-> i --- финальная вершина
"""
NO_NODE = -1
SPACE_CODE = -1
ATTRS = ['is_numpied', 'precompute_symbols', 'allow_spaces',
'is_terminated', 'to_make_cashed']
def __init__(self, alphabet, make_sorted=True, make_alphabet_codes=True,
is_numpied=False, to_make_cashed=False,
precompute_symbols=None, allow_spaces=False, dict_storage=False):
self.alphabet = sorted(alphabet) if make_sorted else alphabet
self.alphabet_codes = ({a: i for i, a in enumerate(self.alphabet)}
if make_alphabet_codes else self.alphabet)
self.alphabet_codes[" "] = Trie.SPACE_CODE
self.is_numpied = is_numpied
self.to_make_cashed = to_make_cashed
self.dict_storage = dict_storage
self.precompute_symbols = precompute_symbols
self.allow_spaces = allow_spaces
self.initialize()
def initialize(self):
self.root = 0
self.graph = [self._make_default_node()]
self.data, self.final = [None], [False]
self.nodes_number = 1
self.descend = self._descend_simple
self.is_terminated = False
def _make_default_node(self):
if self.dict_storage:
return defaultdict(lambda: -1)
elif self.is_numpied:
return np.full(shape=(len(self.alphabet),),
fill_value=Trie.NO_NODE, dtype=int)
else:
return [Trie.NO_NODE] * len(self.alphabet)
def save(self, outfile):
"""
Сохраняет дерево для дальнейшего использования
"""
with open(outfile, "w", encoding="utf8") as fout:
attr_values = [getattr(self, attr) for attr in Trie.ATTRS]
attr_values.append(any(x is not None for x in self.data))
fout.write("{}\n{}\t{}\n".format(
" ".join("T" if x else "F" for x in attr_values),
self.nodes_number, self.root))
fout.write(" ".join(str(a) for a in self.alphabet) + "\n")
for index, label in enumerate(self.final):
letters = self._get_letters(index, return_indexes=True)
children = self._get_children(index)
fout.write("{}\t{}\n".format(
"T" if label else "F", " ".join("{}:{}".format(*elem)
for elem in zip(letters, children))))
if self.precompute_symbols is not None:
for elem in self.data:
fout.write(":".join(",".join(
map(str, symbols)) for symbols in elem) + "\n")
return
def make_cashed(self):
'''
Включает кэширование запросов к descend
'''
self._descendance_cash = [dict() for _ in self.graph]
self.descend = self._descend_cashed
def make_numpied(self):
self.graph = np.array(self.graph)
self.final = np.asarray(self.final, dtype=bool)
self.is_numpied = True
def add(self, s):
'''
Добавление строки s в префиксный бор
'''
if self.is_terminated:
raise TypeError("Impossible to add string to fitted trie")
if s == "":
self._set_final(self.root)
return
curr = self.root
for i, a in enumerate(s):
code = self.alphabet_codes[a]
next = self.graph[curr][code]
if next == Trie.NO_NODE:
curr = self._add_descendant(curr, s[i:])
break
else:
curr = next
self._set_final(curr)
return self
def fit(self, words):
for s in words:
self.add(s)
self.terminate()
def terminate(self):
if self.is_numpied:
self.make_numpied()
self.terminated = True
if self.precompute_symbols is not None:
precompute_future_symbols(self, self.precompute_symbols,
allow_spaces=self.allow_spaces)
if self.to_make_cashed:
self.make_cashed()
def __contains__(self, s):
if any(a not in self.alphabet for a in s):
return False
# word = tuple(self.alphabet_codes[a] for a in s)
node = self.descend(self.root, s)
return (node != Trie.NO_NODE) and self.is_final(node)
def words(self):
"""
Возвращает итератор по словам, содержащимся в боре
"""
branch, word, indexes = [self.root], [], [0]
letters_with_children = [self._get_children_and_letters(self.root)]
while len(branch) > 0:
if self.is_final(branch[-1]):
yield "".join(word)
while indexes[-1] == len(letters_with_children[-1]):
indexes.pop()
letters_with_children.pop()
branch.pop()
if len(indexes) == 0:
raise StopIteration()
word.pop()
next_letter, next_child = letters_with_children[-1][indexes[-1]]
indexes[-1] += 1
indexes.append(0)
word.append(next_letter)
branch.append(next_child)
letters_with_children.append(self._get_children_and_letters(branch[-1]))
def is_final(self, index):
'''
Аргументы
---------
index: int, номер вершины
Возвращает
----------
True: если index --- номер финальной вершины
'''
return self.final[index]
def find_partitions(self, s, max_count=1):
"""
Находит все разбиения s = s_1 ... s_m на словарные слова s_1, ..., s_m
для m <= max_count
"""
curr_agenda = [(self.root, [], 0)]
for i, a in enumerate(s):
next_agenda = []
for curr, borders, cost in curr_agenda:
if cost >= max_count:
continue
child = self.graph[curr][self.alphabet_codes[a]]
# child = self.graph[curr][a]
if child == Trie.NO_NODE:
continue
next_agenda.append((child, borders, cost))
if self.is_final(child):
next_agenda.append((self.root, borders + [i+1], cost+1))
curr_agenda = next_agenda
answer = []
for curr, borders, cost in curr_agenda:
if curr == self.root:
borders = [0] + borders
answer.append([s[left:borders[i+1]] for i, left in enumerate(borders[:-1])])
return answer
def __len__(self):
return self.nodes_number
def __repr__(self):
answer = ""
for i, (final, data) in enumerate(zip(self.final, self.data)):
letters, children = self._get_letters(i), self._get_children(i)
answer += "{0}".format(i)
if final:
answer += "F"
for a, index in zip(letters, children):
answer += " {0}:{1}".format(a, index)
answer += "\n"
if data is not None:
answer += "data:{0} {1}\n".format(len(data), " ".join(str(elem) for elem in data))
return answer
def _add_descendant(self, parent, s, final=False):
for a in s:
code = self.alphabet_codes[a]
parent = self._add_empty_child(parent, code, final)
return parent
def _add_empty_child(self, parent, code, final=False):
'''
Добавление ребёнка к вершине parent по символу с кодом code
'''
self.graph[parent][code] = self.nodes_number
self.graph.append(self._make_default_node())
self.data.append(None)
self.final.append(final)
self.nodes_number += 1
return (self.nodes_number - 1)
def _descend_simple(self, curr, s):
'''
Спуск из вершины curr по строке s
'''
for a in s:
curr = self.graph[curr][self.alphabet_codes[a]]
if curr == Trie.NO_NODE:
break
return curr
def _descend_cashed(self, curr, s):
'''
Спуск из вершины curr по строке s с кэшированием
'''
if s == "":
return curr
curr_cash = self._descendance_cash[curr]
answer = curr_cash.get(s, None)
if answer is not None:
return answer
# для оптимизации дублируем код
res = curr
for a in s:
res = self.graph[res][self.alphabet_codes[a]]
# res = self.graph[res][a]
if res == Trie.NO_NODE:
break
curr_cash[s] = res
return res
def _set_final(self, curr):
'''
Делает состояние curr завершающим
'''
self.final[curr] = True
def _get_letters(self, index, return_indexes=False):
"""
Извлекает все метки выходных рёбер вершины с номером index
"""
if self.dict_storage:
answer = list(self.graph[index].keys())
else:
answer = [i for i, elem in enumerate(self.graph[index])
if elem != Trie.NO_NODE]
if not return_indexes:
answer = [(self.alphabet[i] if i >= 0 else " ") for i in answer]
return answer
def _get_children_and_letters(self, index, return_indexes=False):
if self.dict_storage:
answer = list(self.graph[index].items())
else:
answer = [elem for elem in enumerate(self.graph[index])
if elem[1] != Trie.NO_NODE]
if not return_indexes:
for i, (letter_index, child) in enumerate(answer):
answer[i] = (self.alphabet[letter_index], child)
return answer
def _get_children(self, index):
"""
Извлекает всех потомков вершины с номером index
"""
if self.dict_storage:
return list(self.graph[index].values())
else:
return [elem for elem in self.graph[index] if elem != Trie.NO_NODE]
class TrieMinimizer:
def __init__(self):
pass
def minimize(self, trie, dict_storage=False, make_cashed=False, make_numpied=False,
precompute_symbols=None, allow_spaces=False, return_groups=False):
N = len(trie)
if N == 0:
raise ValueError("Trie should be non-empty")
node_classes = np.full(shape=(N,), fill_value=-1, dtype=int)
order = self.generate_postorder(trie)
# processing the first node
index = order[0]
node_classes[index] = 0
class_representatives = [index]
node_key = ((), (), trie.is_final(index))
classes, class_keys = {node_key : 0}, [node_key]
curr_index = 1
for index in order[1:]:
letter_indexes = tuple(trie._get_letters(index, return_indexes=True))
children = trie._get_children(index)
children_classes = tuple(node_classes[i] for i in children)
key = (letter_indexes, children_classes, trie.is_final(index))
key_class = classes.get(key, None)
if key_class is not None:
node_classes[index] = key_class
else:
# появился новый класс
class_keys.append(key)
classes[key] = node_classes[index] = curr_index
class_representatives.append(curr_index)
curr_index += 1
# построение нового дерева
compressed = Trie(trie.alphabet, is_numpied=make_numpied,
dict_storage=dict_storage, allow_spaces=allow_spaces,
precompute_symbols=precompute_symbols)
L = len(classes)
new_final = [elem[2] for elem in class_keys[::-1]]
if dict_storage:
new_graph = [defaultdict(int) for _ in range(L)]
elif make_numpied:
new_graph = np.full(shape=(L, len(trie.alphabet)),
fill_value=Trie.NO_NODE, dtype=int)
new_final = np.array(new_final, dtype=bool)
else:
new_graph = [[Trie.NO_NODE for a in trie.alphabet] for i in range(L)]
for (indexes, children, final), class_index in\
sorted(classes.items(), key=(lambda x: x[1])):
row = new_graph[L-class_index-1]
for i, child_index in zip(indexes, children):
row[i] = L - child_index - 1
compressed.graph = new_graph
compressed.root = L - node_classes[trie.root] - 1
compressed.final = new_final
compressed.nodes_number = L
compressed.data = [None] * L
if make_cashed:
compressed.make_cashed()
if precompute_symbols is not None:
if (trie.is_terminated and trie.precompute_symbols
and trie.allow_spaces == allow_spaces):
# копируем будущие символы из исходного дерева
# нужно, чтобы возврат из финальных состояний в начальное был одинаковым в обоих деревьях
for i, node_index in enumerate(class_representatives[::-1]):
# будущие символы для представителя i-го класса
compressed.data[i] = copy.copy(trie.data[node_index])
else:
precompute_future_symbols(compressed, precompute_symbols, allow_spaces)
if return_groups:
node_classes = [L - i - 1 for i in node_classes]
return compressed, node_classes
else:
return compressed
def generate_postorder(self, trie):
"""
Обратная топологическая сортировка
"""
order, stack = [], []
stack.append(trie.root)
colors = ['white'] * len(trie)
while len(stack) > 0:
index = stack[-1]
color = colors[index]
if color == 'white': # вершина ещё не обрабатывалась
colors[index] = 'grey'
for child in trie._get_children(index):
# проверяем, посещали ли мы ребёнка раньше
if child != Trie.NO_NODE and colors[child] == 'white':
stack.append(child)
else:
if color == 'grey':
colors[index] = 'black'
order.append(index)
stack = stack[:-1]
return order
def load_trie(infile):
with open(infile, "r", encoding="utf8") as fin:
line = fin.readline().strip()
flags = [x=='T' for x in line.split()]
if len(flags) != len(Trie.ATTRS) + 1:
raise ValueError("Wrong file format")
nodes_number, root = map(int, fin.readline().strip().split())
alphabet = fin.readline().strip().split()
trie = Trie(alphabet)
for i, attr in enumerate(Trie.ATTRS):
setattr(trie, attr, flags[i])
read_data = flags[-1]
final = [False] * nodes_number
#print(len(alphabet), nodes_number)
if trie.dict_storage:
graph = [defaultdict(lambda: -1) for _ in range(nodes_number)]
elif trie.is_numpied:
final = np.array(final)
graph = np.full(shape=(nodes_number, len(alphabet)),
fill_value=Trie.NO_NODE, dtype=int)
else:
graph = [[Trie.NO_NODE for a in alphabet] for i in range(nodes_number)]
for i in range(nodes_number):
line = fin.readline().strip()
if "\t" in line:
label, transitions = line.split("\t")
final[i] = (label == "T")
else:
label = line
final[i] = (label == "T")
continue
transitions = [x.split(":") for x in transitions.split()]
for code, value in transitions:
graph[i][int(code)] = int(value)
trie.graph = graph
trie.root = root
trie.final = final
trie.nodes_number = nodes_number
trie.data = [None] * nodes_number
if read_data:
for i in range(nodes_number):
line = fin.readline().strip("\n")
trie.data[i] = [set(elem.split(",")) for elem in line.split(":")]
if trie.to_make_cashed:
trie.make_cashed()
return trie
def make_trie(alphabet, words, compressed=True, is_numpied=False,
make_cashed=False, precompute_symbols=False,
allow_spaces=False, dict_storage=False):
trie = Trie(alphabet, is_numpied=is_numpied, to_make_cashed=make_cashed,
precompute_symbols=precompute_symbols, dict_storage=dict_storage)
trie.fit(words)
if compressed:
tm = TrieMinimizer()
trie = tm.minimize(trie, dict_storage=dict_storage, make_cashed=make_cashed,
make_numpied=is_numpied, precompute_symbols=precompute_symbols,
allow_spaces=allow_spaces)
return trie
def precompute_future_symbols(trie, n, allow_spaces=False):
"""
Collecting possible continuations of length <= n for every node
"""
if n == 0:
return
if trie.is_terminated and trie.precompute_symbols:
# символы уже предпосчитаны
return
for index, final in enumerate(trie.final):
trie.data[index] = [set() for i in range(n)]
for index, (node_data, final) in enumerate(zip(trie.data, trie.final)):
node_data[0] = set(trie._get_letters(index))
if allow_spaces and final:
node_data[0].add(" ")
for d in range(1, n):
for index, (node_data, final) in enumerate(zip(trie.data, trie.final)):
children = set(trie._get_children(index))
for child in children:
node_data[d] |= trie.data[child][d - 1]
# в случае, если разрешён возврат по пробелу в стартовое состояние
if allow_spaces and final:
node_data[d] |= trie.data[trie.root][d - 1]
trie.terminated = True
|
[
"seliverstov.a@gmail.com"
] |
seliverstov.a@gmail.com
|
fde9e29a1d3f8167b29c9268be16769f23716b6f
|
d0c88770cac95cf837dc7ea33eb41c84588c5ee5
|
/game/collision.py
|
505730b4ac90102d2822809099a4ac2d83f0c024
|
[] |
no_license
|
bmaclean/ascii-zoo
|
bf671a49a529c9b5a57ca2710cbd3fc044d3b059
|
16e5a27f3a3d74a162b6cad2c01fff2a916444a3
|
refs/heads/master
| 2022-07-27T21:34:38.437654
| 2019-05-30T19:41:23
| 2019-05-30T19:41:23
| 159,284,851
| 0
| 0
| null | 2022-06-21T21:40:43
| 2018-11-27T06:08:58
|
Python
|
UTF-8
|
Python
| false
| false
| 785
|
py
|
import os
from app_config import root_dir
import pygame
class Collision:
sound_filepath = os.path.join(root_dir, 'assets/zapsplat_cartoon_punch_002_17900.wav')
sound = pygame.mixer.Sound(sound_filepath)
def __init__(self, animal1, animal2):
self.animal1 = animal1
self.animal2 = animal2
@property
def animals(self):
return self.animal1, self.animal2
def injure_animals_if_prey(self):
if self.animal1.wants_to_eat(self.animal2):
self.animal1.inflict_damage(self.animal2)
if self.animal2.wants_to_eat(self.animal1):
self.animal2.inflict_damage(self.animal1)
def animals_were_injured(self):
return self.animal1.wants_to_eat(self.animal2) or self.animal2.wants_to_eat(self.animal1)
|
[
"brendan.maclean94@gmail.com"
] |
brendan.maclean94@gmail.com
|
ba1cba5c8a2a1b7898a46fb6a4abeebd84541336
|
51885da54b320351bfea42c7dd629f41985454cd
|
/abc075/c.py
|
18f98c98169acb0c09d089c7c2b89ef4b8bc0bd0
|
[] |
no_license
|
mskt4440/AtCoder
|
dd266247205faeda468f911bff279a792eef5113
|
f22702e3932e129a13f0683e91e5cc1a0a99c8d5
|
refs/heads/master
| 2021-12-15T10:21:31.036601
| 2021-12-14T08:19:11
| 2021-12-14T08:19:11
| 185,161,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,777
|
py
|
#
# abc075 c
#
import sys
from io import StringIO
import unittest
from collections import deque
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """7 7
1 3
2 7
3 4
4 5
4 6
5 6
6 7"""
output = """4"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """3 3
1 2
1 3
2 3"""
output = """0"""
self.assertIO(input, output)
def test_入力例_3(self):
input = """6 5
1 2
2 3
3 4
4 5
5 6"""
output = """5"""
self.assertIO(input, output)
def resolve():
N, M = map(int, input().split())
AB = [list(map(int, input().split())) for _ in range(M)]
ans = 0
for i in range(M):
Target = AB[:]
Target.pop(i)
G = [[i+1, 0] for i in range(N)]
for ab in Target:
a, b = ab
G[a-1][1] += 1
G[b-1][1] += 1
G[a-1].append(b)
G[b-1].append(a)
F = [False] * N
Q = deque()
Q.append(1)
F[0] = True
while Q:
p = Q.pop()
if G[p-1][1] == 0:
continue
for np in G[p-1][2:]:
if F[np-1]:
continue
Q.append(np)
F[np-1] = True
for f in F:
if f == False:
ans += 1
break
print(ans)
if __name__ == "__main__":
# unittest.main()
resolve()
|
[
"mskt4440@gmail.com"
] |
mskt4440@gmail.com
|
ec31acbdb0cf41622d1a325d3f894382ad8fd78f
|
d4fa331d7d8a00865f99ee2c05ec8efc0468fb63
|
/alg/remove_k_digits.py
|
f25427c08b7db78277402c25b6aa25fed1054238
|
[] |
no_license
|
nyannko/leetcode-python
|
5342620c789a02c7ae3478d7ecf149b640779932
|
f234bd7b62cb7bc2150faa764bf05a9095e19192
|
refs/heads/master
| 2021-08-11T04:11:00.715244
| 2019-02-05T15:26:43
| 2019-02-05T15:26:43
| 145,757,563
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 537
|
py
|
class Solution(object):
def removeKdigits(self, num, k):
"""
:type num: str
:type k: int
:rtype: str
"""
if len(num) <= k:
return '0'
stack = []
for i in num:
while stack and k > 0 and stack[-1] > i:
stack.pop()
k -= 1
stack.append(i)
# while k > 0:
# stack.pop()
# k -= 1
if k:
stack = stack[:-k]
return ''.join(stack).lstrip('0') or '0'
|
[
"9638293+nyannko@users.noreply.github.com"
] |
9638293+nyannko@users.noreply.github.com
|
a334a0c204fac1c32004bf0b488df99ca06cd6c8
|
fdb91a44b774edb78ec904e2a76edd60b3aac528
|
/ex25.py
|
6db1261d6f3604391723219fe40743dc1a5979f9
|
[] |
no_license
|
xia0m/LPTHW
|
9447cdff2a84f2a867f34d6b3b2e9d4b46bf3c0a
|
4f23b0e60d2e2e38d8f989a3a7f616c6c5e90c1d
|
refs/heads/master
| 2020-05-14T14:26:36.742772
| 2019-04-22T06:12:09
| 2019-04-22T06:12:09
| 181,833,599
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,392
|
py
|
def break_words(stuff):
"""This function will break up words for us."""
words = stuff.split(' ')
return words
def sort_words(words):
"""Worts the words."""
return sorted(words)
def print_first_word(words):
"""Prints the first word after popping it off."""
word = words.pop(0)
print(word)
def print_last_word(words):
"""Prints the last word after popping if off."""
word = words.pop(-1)
print(word)
def sort_sentence(sentence):
"""Takes in a full sentence and returns the sorted words."""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of the sentence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words then prints the first and last one."""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words)
# 1 import ex25
# 2 sentence = "All good things come to those who wait."
# 3 words = ex25.break_words(sentence)
# 4 words
# 5 sorted_words = ex25.sort_words(words)
# 6 sorted_words
# 7 ex25.print_first_word(words)
# 8 ex25.print_last_word(words)
# 9 words
# 10 ex25.print_first_word(sorted_words)
# 11 ex25.print_last_word(sorted_words)
# 12 sorted_words
# 13 sorted_words = ex25.sort_sentence(sentence)
# 14 sorted_words
# 15 ex25.print_first_and_last(sentence)
# 16 ex25.print_first_and_last_sorted(sentence)
# STUDY DRILLS
# 1. Take the remaining lines of the What You Should See output and figure out what they are doing. Make sure you understand how you are running your functions in the ex25 module.
# 2. Try doing this: help(ex25) and also help(ex25.break_words). Notice how you get help for your module and how the help is those odd """ strings you put after each function in ex25? Those special strings are called documentation comments, and we’ll be seeing more of them.
# 3. Typing ex25. is annoying. A shortcut is to do your import like this: from ex25 import *. This is like saying, “Import everything from ex25.” Programmers like saying things backward. Start a new session and see how all your functions are right there.
# 4. Try breaking your file and see what it looks like in python when you use it. You will have to quit python with quit() to be able to reload it.
|
[
"alexma325@gmail.com"
] |
alexma325@gmail.com
|
e48c897fdb5024719e538c8eef85ba293d1b3b3b
|
77c6d0e5a25eb7b16d8c6a843b9e9915d6f6afd7
|
/apps/order/views.py
|
fc8724a12eaab415b3189f3403ae6c0a4a4dea0e
|
[] |
no_license
|
zhangwei725/shop_projects
|
28c794bcba6f79b4f017b17fc0942afb3ed5f2b1
|
24a98ae5ff4fb6552d3315f5b3690e3bc5b82ab6
|
refs/heads/master
| 2020-03-28T20:31:50.180680
| 2018-09-21T09:40:46
| 2018-09-21T09:40:46
| 149,079,157
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,585
|
py
|
import datetime
import random
from decimal import Decimal
from django.http import HttpResponse
from django.shortcuts import render
from apps.home.models import ShopCar, Order, Shop
from django.db import transaction
"""
拿到所有被选中的购物车记录显示
#
# 第一个 选择地址
# 第二个 选择支付方式
# 第三个 配送方式
# 提交订单 操作订单表
# 1>生成订单号
# 2> 把商品的库存量减
# 3> 购物车表
# 多个表的查询 使用关联查询
# 增删改 涉及多个表 ---> 事务 原子性 一致性 隔离性(隔离级别) 持久性
"""
def confirm(request):
# 被选中的商品信息
ids = [12, 13, 14]
cars = ShopCar.objects.filter(car_id__in=ids)
return render(request, 'confirm.html', {'cars': cars})
# 字段 限定符
# 表与表之间有关联关系
# 结算---确认订单--生成订单 主表是订单表(order)
# @transaction.atomic()使用装饰器
def create_order(request):
# 对多个表的操作 需要用到事务
ids = [12, 13, 14]
# 对订单表操作 --- 修改商品的库存 --- 修改购车表 将购物车记录状态设置订单的id
# 第一步 生成订单号(要求必须是站内唯一)
# 年月日时分秒
# 表示开启事务
order_code = f'{datetime.datetime.strftime(datetime.datetime.now(), "%Y%m%d%H%M%S")}{random.randint(10, 99)}'
try:
# 推荐使用
with transaction.atomic():
# 第二歩 往订单表增加记录
order = Order(order_code=order_code, address='湖北省武汉市', mobile=110, receiver='娇娇', user_message='请帮我带个男友',
user=request.user.userprofile)
order.save()
# 2表示用户已经购买了商品信息
cars = ShopCar.objects.filter(car_id__in=ids)
# 总金额
total = 0.00
for car in cars:
car.status = 2
car.order_id = order.pk
car.save(update_fields=['status', 'order_id'])
# 商品的库存
if car.shop.stock >= car.number:
car.shop.stock -= car.number
car.shop.save(update_fields=['stock'])
total += car.number * float(str(car.shop.promote_price.quantize(Decimal('0.00'))))
else:
# 生成订单
pass
# 发起支付
#
except Exception as e:
return HttpResponse('2222')
return HttpResponse('11111')
|
[
"18614068889@163.com"
] |
18614068889@163.com
|
6127e057f7dff15cd81fd6834820cf5db6e6a872
|
5f123b35d63e60982b0d034c40614ea1d8f288a4
|
/AndroidGuard/examples/omegacodee.py
|
addf94b6452ab83bbc81e581f64a8a6c0efe85a8
|
[
"Apache-2.0"
] |
permissive
|
Simplewyl2000/Similarity_Detection
|
2f5d76d8b50474bb226a4ea524f97c504aa1b2ac
|
9018c120cb4023a24de8032e8aa7d55cf42f2446
|
refs/heads/master
| 2020-05-22T23:37:39.885251
| 2019-05-10T03:13:29
| 2019-05-10T03:16:31
| 186,562,541
| 0
| 0
| null | 2019-05-14T06:52:18
| 2019-05-14T06:52:17
| null |
UTF-8
|
Python
| false
| false
| 909
|
py
|
import os
import numpy
file1="omegafrequency.txt"
file=open(file1,'r')
listomega=[]
listfrequency=[]
for eachline in file:
temp=eachline.strip('\n').strip("['").strip("]").split("',")
#print(temp)
listomega.append(int(temp[0]))
listfrequency.append(temp[1])
#print(istomega)
for j in listomega:
if int(j)<53:
filewritename='../omegacodee/'+str(j)+'.txt'
fwritename=open(filewritename,'w')
basedir="../outputremoveduplicate/"
lisfile=os.listdir(basedir)
for filename in lisfile:
#print (filename)
f=open(basedir+filename,'r')
for fline in f:
listnum=fline.strip(' ').strip('\n').strip('[').strip(']').split(',')
if int(listnum[1])==int(j):
fwritename.write(fline)
fwritename.write('\n')
f.close()
fwritename.close()
|
[
"734966463@qq.com"
] |
734966463@qq.com
|
be9ff97e74554405f78e2ae14f59d41d20871ca0
|
fd5de9c7489f38eae683582b0476a7685b09540a
|
/config.py
|
5c7740e6edfd8adda65027b1b6ac8137b8a88701
|
[] |
no_license
|
FrankArchive/ICPC_Challenges
|
8cdf979909014345c9e1198e82fce55eaa65a186
|
366c631c3464f4348c4cb07c4ff425640f84fe71
|
refs/heads/master
| 2022-03-01T11:53:11.326549
| 2019-09-19T05:49:14
| 2019-09-19T05:49:14
| 201,845,208
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
import os
JUDGE_ADDR = os.getenv('JUDGE_ADDR') or 'localhost'
JUDGE_PORT = os.getenv('JUDGE_PORT') or '5000'
JUDGE_PORT = int(JUDGE_PORT)
JUDGE_TOKEN = os.getenv('JUDGE_TOKEN') or 'set_token'
|
[
"frankli0324@hotmail.com"
] |
frankli0324@hotmail.com
|
90d662d9b82ee1a8490bdc09aa96fc25d2c0ce6e
|
832852c679816673f708860929a36a20ca8d3e32
|
/Configurations/HighMass/Full2017/configuration_mm.py
|
1ee0bb7d5dbf9cfab8779a7973ed2065f8bd52d3
|
[] |
no_license
|
UniMiBAnalyses/PlotsConfigurations
|
c4ec7376e2757b838930dfb2615e1dc99a64e542
|
578fe518cfc608169d3418bcb63a8342d3a24390
|
refs/heads/master
| 2023-08-31T17:57:45.396325
| 2022-09-01T10:13:14
| 2022-09-01T10:13:14
| 172,092,793
| 0
| 13
| null | 2023-04-27T10:26:52
| 2019-02-22T15:52:44
|
Python
|
UTF-8
|
Python
| false
| false
| 905
|
py
|
# example of configuration file
treeName= 'Events'
tag = 'Full2017_mm'
# used by mkShape to define output directory for root files
outputDir = 'rootFile_'+tag
# file with TTree aliases
aliasesFile = 'aliases.py'
# file with list of variables
variablesFile = 'variables.py'
# file with list of cuts
cutsFile = 'cuts_ee_mm.py'
# file with list of samples
samplesFile = 'samples.py'
# file with list of samples
plotFile = 'plot.py'
# luminosity to normalize to (in 1/fb)
lumi = 41.5
# used by mkPlot to define output directory for plots
# different from "outputDir" to do things more tidy
outputDirPlots = 'plot_'+tag
# used by mkDatacards to define output directory for datacards
outputDirDatacard = 'datacards'
# structure file for datacard
#structureFile = 'structure.py' # Is this even needed still?
# nuisances file for mkDatacards and for mkShape
nuisancesFile = 'nuisances.py'
|
[
"dennis.roy@cern.ch"
] |
dennis.roy@cern.ch
|
f0db0d024328299a986df6e4bece188d36f970c2
|
92f9fd4397d88619073c17174f3d52f5f489d4e4
|
/contrib/devtools/fix-copyright-headers.py
|
b87a96eb6f4f577fc786621653ac85686017456e
|
[
"LicenseRef-scancode-other-permissive",
"MIT"
] |
permissive
|
diablax2/bts
|
380df7562d73a292e641faaff1b0d1e17a10f0a8
|
fe3c727ce607e11bee64bb03afadb653e9bd23fd
|
refs/heads/master
| 2020-04-24T21:57:40.173603
| 2019-02-25T06:33:48
| 2019-02-25T06:33:48
| 172,295,667
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,336
|
py
|
#!/usr/bin/env python
'''
Run this script to update all the copyright headers of files
that were changed this year.
For example:
// Copyright (c) 2009-2012 The Bitcoin Core developers
it will change it to
// Copyright (c) 2009-2015 The Bitcoin Core developers
'''
import os
import time
import re
year = time.gmtime()[0]
CMD_GIT_DATE = 'git log --format=@%%at -1 %s | date +"%%Y" -u -f -'
CMD_REGEX= "perl -pi -e 's/(20\d\d)(?:-20\d\d)? The BTS/$1-%s The BTS/' %s"
REGEX_CURRENT= re.compile("%s The BTS" % year)
CMD_LIST_FILES= "find %s | grep %s"
FOLDERS = ["./qa", "./src"]
EXTENSIONS = [".cpp",".h", ".py"]
def get_git_date(file_path):
r = os.popen(CMD_GIT_DATE % file_path)
for l in r:
# Result is one line, so just return
return l.replace("\n","")
return ""
n=1
for folder in FOLDERS:
for extension in EXTENSIONS:
for file_path in os.popen(CMD_LIST_FILES % (folder, extension)):
file_path = os.getcwd() + file_path[1:-1]
if file_path.endswith(extension):
git_date = get_git_date(file_path)
if str(year) == git_date:
# Only update if current year is not found
if REGEX_CURRENT.search(open(file_path, "r").read()) is None:
print n,"Last git edit", git_date, "-", file_path
os.popen(CMD_REGEX % (year,file_path))
n = n + 1
|
[
"47169271+BitcoinSDN@users.noreply.github.com"
] |
47169271+BitcoinSDN@users.noreply.github.com
|
e1c8772a70ff0b7a5ead0b6c73d8adda9807dd1a
|
28c598bf75f3ab287697c7f0ff1fb13bebb7cf75
|
/testgame.mmo/genesis/spawn/spawnmain.py
|
d1a6e96ee033931ad1e1cf4df3507ff6d4965fc9
|
[] |
no_license
|
keaysma/solinia_depreciated
|
4cb8811df4427261960af375cf749903d0ca6bd1
|
4c265449a5e9ca91f7acf7ac05cd9ff2949214ac
|
refs/heads/master
| 2020-03-25T13:08:33.913231
| 2014-09-12T08:23:26
| 2014-09-12T08:23:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 338
|
py
|
import races
import animal
import npc
"""
#Critter Pack
#http://www.mmoworkshop.com/trac/mom/wiki/Store
"""
#import critters
"""
#Monster Pack Examples
#http://www.mmoworkshop.com/trac/mom/wiki/Store
"""
#import monsters
"""
Mythical Creature Pack Examples
http://www.mmoworkshop.com/trac/mom/wiki/Store
"""
#import mythical
|
[
"mixxit@soliniaonline.com"
] |
mixxit@soliniaonline.com
|
fc636c063c6ddb1fa97d59630f452e3e84f662d4
|
b51fcf9d94ad483139d5e5f17785f7dcb39404ae
|
/mdl/Guideline36Spring/QSS/run.py
|
d214ff42afa470f3d1f0bc98d8708f2972f25000
|
[] |
no_license
|
NREL/SOEP-QSS-Test
|
7d508f79dd3a49b609e6400b8c5a05757cb22928
|
bd8e3c39d0a205b7b3cf3c64bc2c500e761a76e8
|
refs/heads/main
| 2023-08-30T21:23:53.766525
| 2023-08-30T03:02:11
| 2023-08-30T03:02:11
| 97,882,896
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
#!/usr/bin/env python
import subprocess, sys
args = ' --zrFac=1000 --dtND=1e-4 --dtInf=0.001 --dtOut=100 --out=sSXL ' + ' '.join( sys.argv[1:] )
with open( 'run.log', 'w' ) as log:
subprocess.run( 'run_QSS.py' + args, stdout = log, stderr = subprocess.STDOUT, shell = True )
|
[
"Stuart_Mentzer@objexx.com"
] |
Stuart_Mentzer@objexx.com
|
cb7c05a54a44455c1eaa0a2c45bd633da858aa80
|
14bcdb37b818638fc9d6f2f4e4595c82685b8972
|
/network_visualizer.py
|
86481484d4093c96de66a95989456e0350b7778d
|
[] |
no_license
|
shainesh77/Testing
|
7e8b3cd135b722d8bff1ef29eedebfd862c954b5
|
45f593871657c57de4981512dcd2f6a841eb78d5
|
refs/heads/main
| 2023-07-13T23:56:10.835377
| 2021-08-18T12:19:35
| 2021-08-18T12:19:35
| 397,588,629
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,007
|
py
|
import os
from keras.engine.topology import InputLayer
from keras.models import Model
from keras.layers import Conv2D
from flask import Flask, jsonify, send_from_directory
from src.game_environment import GameEnvironment
from src.state_machine_game_environment import StateMachineGameEnvironment
import src.model_util as model_util
from src.experience_replay import ExperienceReplay
import kmri
game = GameEnvironment()
# game = StateMachineGameEnvironment()
model = model_util.get_model(
img_width=150,
img_height=38,
num_labeled_inputs=game.get_num_labeled_inputs(),
num_actions=len(game.actions),
weights_file_path='data/model_weights.h5'
)
layer_metadata = {
'dense_2': {
'max': 'auto'
}
}
exp_replay = ExperienceReplay(model=model, max_memory=200000, discount=.9)
exp_replay.load_memory()
img_inputs, labeled_inputs, targets = exp_replay.get_short_term_batch(num_frames_before_death=50, num_deaths=20)
kmri.visualize_model(model, [img_inputs, labeled_inputs])
|
[
"noreply@github.com"
] |
noreply@github.com
|
d6e82d1a43184d0b47aeb99ebe45b7630327dd6a
|
28e0e93f853e4d7f99edbbb83ceb91e4e2b50256
|
/src/rules.py
|
41746d9b1dac61ea2264e997bbefbbd865e1579b
|
[
"MIT"
] |
permissive
|
harry-124/sbsim-19
|
fe2fbfa5cf84b1e75190d0f1213169d654289dcf
|
d70f3d0caa6daa1db038c83c03cea215b49afb98
|
refs/heads/master
| 2020-07-23T15:06:25.113554
| 2020-01-28T16:55:43
| 2020-01-28T16:55:43
| 207,603,896
| 0
| 0
|
MIT
| 2020-01-28T16:55:44
| 2019-09-10T16:04:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,910
|
py
|
#!/usr/bin/env python
import sys
import physics as p
import pygame as pg
import pid
import rospy
import math as m
from geometry_msgs.msg import Pose, Twist
from sbsim.msg import goalmsg
from sbsim.msg import dribble
import controller as c
from std_msgs.msg import Int32
from std_msgs.msg import Float64
r10 = Pose()
r11 = Pose()
r20 = Pose()
r21 = Pose()
ball = Pose()
d = dribble()
def subinit():
rospy.Subscriber('ballpose',Pose,ballcallback)
rospy.Subscriber('robot1n0/pose',Pose,r10callback)
rospy.Subscriber('robot1n1/pose',Pose,r11callback)
rospy.Subscriber('robot2n0/pose',Pose,r20callback)
rospy.Subscriber('robot2n1/pose',Pose,r21callback)
rospy.Subscriber('game/dribdist',Float64,ddcallback)
rospy.Subscriber('game/dribbler',Int32,drcallback)
def boundcheck(a):
dir = [0,0]
if a.x >= 320:
dir[0] = 1
fx= 1
elif a.x <= -320:
dir[0] = -1
fx = 1
else:
dir[0] = 0
fx = 0
if a.y >= 303:
dir[0] = 1
fy = 1
elif a.y <= -303:
dir[0] = -1
fy = 1
else:
dir[0] = 0
fy = 0
f = fx + fy
if f ==2:
f =1
return f
def ddcallback(msg):
return 0
def drcallback(msg):
return 0
def ballcallback(msg):
global ball
ball = msg
return 0
def r10callback(msg):
global r10
r10 = msg
return 0
def r11callback(msg):
global r11
r11 = msg
return 0
def r20callback(msg):
global r20
r20 = msg
return 0
def r21callback(msg):
global r21
r21 = msg
return 0
def updatebpose(a,b):
b.x = a.position.x
b.y = a.position.y
def updaterpose(a,b):
b.x = a.position.x
b.y = a.position.y
b.theta = 2 * m.atan(a.orientation.z)
if __name__ == '__main__':
rospy.init_node('rules',anonymous=True)
statuspub = rospy.Publisher('game/status', Int32, queue_size=10)
rate = rospy.Rate(30)
subinit()
b = p.ball(x = ball.position.x,y = ball.position.y)
r1 = p.robot(x =0 ,y =0,yaw =0 ,ball =b)
r2 = p.robot(x =0 ,y =0,yaw =0 ,ball =b)
r3 = p.robot(x =0 ,y =0,yaw =0 ,ball =b)
r4 = p.robot(x =0 ,y =0,yaw =0 ,ball =b)
updaterpose(r10,r1)
updaterpose(r11,r2)
updaterpose(r20,r3)
updaterpose(r21,r4)
i = 0
while(True):
i = i+1
b.x = ball.position.x
b.y = ball.position.y
updaterpose(r10,r1)
updaterpose(r11,r2)
updaterpose(r20,r3)
updaterpose(r21,r4)
# checking for out of bounds
f = boundcheck(b)
if f == 1:
# checking for goals
if b.x>320 and b.y<180 and b.y>-180:
print 'goal for team 1'
f = 2
if b.x<-320 and b.y<180 and b.y>-180:
print 'goal for team 2'
f = 3
statuspub.publish(f)
f = 0
rate.sleep()
|
[
"srike27@gmail.com"
] |
srike27@gmail.com
|
a56a7a18cf5105747b45d3be8f72ba207bc8a2d8
|
eb7e4b062b7fc9c6434bed24f8f9c65c96df7914
|
/filesort.py
|
bcaf483272d37b9eb192e993c56331c86ed47985
|
[] |
no_license
|
kitizl/FileSorterPy
|
0ff545b32130e348be821263fae2613d58a753a4
|
5c386d40dff4ca3a2b0ef8009ae24c6874d1fa90
|
refs/heads/master
| 2020-03-27T13:23:18.384074
| 2018-08-29T17:04:39
| 2018-08-29T17:04:39
| 146,606,133
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,360
|
py
|
#!python3
import os
import sys
import re
import glob
import shutil
START_FOLDER = os.getcwd()
def numToMonth(number):
return ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"][number-1]
def getDate(filename):
# filename = yyyymmdd-nnnnnn
date = re.findall(r"(\d{4})(\d{2})(\d{2})\_\d{6}",filename)[0]
return (date[0],numToMonth(int(date[1])),date[2])
def createFolder(path):
if not os.path.exists(path):
os.mkdir(path)
os.chdir(path)
def createTree(filename):
date_data = getDate(filename)
yypath = os.path.join(".",date_data[0])
# create yy if not exists
createFolder(yypath)
mmpath = os.path.join(".",date_data[1])
# create mm if not exists
createFolder(mmpath)
ddpath = os.path.join(".",date_data[2])
# create dd if not exists
createFolder(ddpath)
# The "." was added in the paths because the directory will be changed anyway
return os.getcwd()
def movefiles():
# return list of files in the folder
filelist = [f for f in glob.glob("*.???*")]
for file in filelist:
currentpath = createTree(file)
# move each file from folder_path to finalpathstring
shutil.move(os.path.join(START_FOLDER,file),os.path.join(currentpath,file))
# go back to main folder_path
os.chdir(START_FOLDER)
return "Success"
# running the code below
movefiles()
|
[
"nithesh.dragoon98@gmail.com"
] |
nithesh.dragoon98@gmail.com
|
41e4fc4f2a66d20510bcfed184eb34f465362860
|
d1a7dcd0ffacd057dc09b19766d96db8d54334bf
|
/HW2/code/linear_regression/main_poly_model_selection.py
|
a37292c5e3221848fe63f402a6a816fb0d8bdd2e
|
[] |
no_license
|
brg3n3r/ComputationalIntelligenceUE
|
d64ff3435258a085dacbd6bbbe90ee6abfea8d6a
|
86db363d8132ceea30a696abc6e28983f8556927
|
refs/heads/master
| 2021-05-24T14:06:50.633304
| 2020-06-30T14:55:54
| 2020-06-30T14:55:54
| 253,597,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,741
|
py
|
#!/usr/bin/env python
import numpy as np
import json
import matplotlib.pyplot as plt
from plot_poly import plot_poly, plot_errors
import poly
#!!! import os
"""
Assignment: Linear and Logistic Regression
Section: Linear Regression with polynomial features
This file:
1) loads the data from 'data_linreg.json'
2) trains and tests a linear regression model for K degrees
3) TODO: select the degree that minimizes validation error
4) plots the optimal results
TODO boxes are here and in 'poly.py'
"""
def main():
# Number of possible degrees to be tested
K = 30
#files = os.getcwd()
#print(files)
#os.chdir('C:/Users/mbuergener/Desktop/CI_Temp/CI_HW2/code/linear_regression') #!!!!!!!!!!!!!!!!!
data_path = 'data_linreg.json'
# Load the data
f = open(data_path, 'r')
data = json.load(f)
for k, v in data.items():
data[k] = np.array(v).reshape((len(v), 1))
# Init vectors storing MSE (Mean squared error) values of each set at each degrees
mse_train = np.zeros(K)
mse_val = np.zeros(K)
mse_test = np.zeros(K)
theta_list = np.zeros(K, dtype=object)
degrees = np.arange(K) + 1
# Compute the MSE values
for i in range(K):
theta_list[i], mse_train[i], mse_val[i], mse_test[i] = poly.train_and_test(data, degrees[i])
######################
#
# TODO
#
# Find the best degree that minimizes the validation error.
# Store it in the variable i_best for plotting the results
#
# TIPs:
# - use the argmin function of numpy
# - the code above is already giving the vectors of errors
i_best_train = np.argmin(mse_train)
i_best_val = np.argmin(mse_val)
#i_best_test = np.argmin(mse_test)
#mse_train_norm = mse_train / np.max(mse_train)
#mse_val_norm = mse_val / np.max(mse_val)
#mse_test_norm = mse_test / np.max(mse_test)
#
# END TODO
######################
i_plots = np.array([1, 5, 10, 22]) - 1
i_plots = np.append(i_plots,[i_best_train, i_best_val])
#Plot the training error as a function of the degrees
#plt.figure()
#plot_errors(i_best, degrees, mse_train, mse_val, mse_test)
#plot_poly(data, best_degree, best_theta)
#plt.show()
for element in i_plots:
plot_poly(data, degrees[element], theta_list[element])
plt.tight_layout()
plt.show()
#plot_errors(i_best_test, degrees, mse_train_norm, mse_val_norm, mse_test_norm)
#plt.show()
plt.figure() #!!!
plot_errors(i_best_val, degrees, mse_train, mse_val, mse_test)
plt.show()
if __name__ == '__main__':
plt.close('all')
main()
|
[
"maxbuergener@me.com"
] |
maxbuergener@me.com
|
a3719f7dffa582ca17d74a9984494b0a1d048e71
|
499f2596cd40ad5ae8510d735f1d6c699b044050
|
/GlibcGenerator.py
|
bc0d0d4fdf577902720693075822b021bdfc4344
|
[] |
no_license
|
alicja-michniewicz/crypto-break-lcg
|
bdaab30c15a6938aed180f33402613ad59cc4f61
|
4ba9139faa7904db7f9cbff74b05974998b67bbd
|
refs/heads/master
| 2021-01-24T02:53:37.931661
| 2018-03-11T12:11:58
| 2018-03-11T12:11:58
| 122,865,781
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 299
|
py
|
import random
class GlibcGenerator:
def __init__(self, seed_int:int) -> None:
super().__init__()
self.gen = random.glibc_prng(seed_int)
def generate(self):
return next(self.gen)
def generate_n(self, n):
return list([self.generate() for i in range(n)])
|
[
"alicjamichniewicz@gmail.com"
] |
alicjamichniewicz@gmail.com
|
fc9e559deb7f5bddce6f8748ac93e3cc190dfb31
|
0130533e0f40a0f1cf476f519a3673b10ceabff3
|
/teste/maximo.py
|
b0fd9c6f4d4edd354a14ef1c57bb97f12fe9654e
|
[] |
no_license
|
danielcanuto/revisao_python
|
d79c8fbf475e1cea12ca9719d02868666e0591db
|
3dbd2af74c7cc94f8e1962acb4069f40d0e71772
|
refs/heads/main
| 2023-03-02T04:37:30.777336
| 2021-02-11T11:16:54
| 2021-02-11T11:16:54
| 337,031,753
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 141
|
py
|
def maior(x, y):
if x > y:
return x
else:
return y
def maximo(x, y, z):
a = maior(x, y)
return maior(a, z)
|
[
"danielpscanuto83@gmail.com"
] |
danielpscanuto83@gmail.com
|
0ce5054c29d7414e6c56e074af1b1ef1b32afe58
|
f95e73867e4383784d6fdd6a1c9fe06cffbfd019
|
/CheckIO/HOME/pawn_brotherhood.py
|
4b0929a05d3c3562eadcb0a6374c8a5fdf00444c
|
[] |
no_license
|
linxiaohui/CodeLibrary
|
da03a9ed631d1d44b098ae393b4bd9e378ab38d3
|
96a5d22a8c442c4aec8a064ce383aba8a7559b2c
|
refs/heads/master
| 2021-01-18T03:42:39.536939
| 2018-12-11T06:47:15
| 2018-12-11T06:47:15
| 85,795,767
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
#!/usr/bin/env python
# *-* coding:UTF-8 *-*
def safe_pawns(pawns):
cnt=0
for l in pawns:
col,row=l.lower()
if int(row)==1:
continue
if col>='b' and chr(ord(col)-1)+str(int(row)-1) in pawns or col<='g' and chr(ord(col)+1)+str(int(row)-1) in pawns:
cnt+=1
return cnt
if __name__ == '__main__':
#These "asserts" using only for self-checking and not necessary for auto-testing
assert safe_pawns({"b4", "d4", "f4", "c3", "e3", "g5", "d2"}) == 6
assert safe_pawns({"b4", "c4", "d4", "e4", "f4", "g4", "e5"}) == 1
|
[
"llinxiaohui@126.com"
] |
llinxiaohui@126.com
|
78792c4fe3cdb3800594e5d3efa5738bab851ebf
|
7b1067f680621b84c28571ba8488308b00b055f0
|
/week1/day4/test.py
|
3b771e0ec6ddb33b587048b7691e0b8b24eff933
|
[] |
no_license
|
wangfei1000/python-study
|
ddf9149e42cff02c75bca036243b603e25188a30
|
781e9edeca1d956325e56858b4d484beff121bec
|
refs/heads/master
| 2021-09-02T16:11:01.271486
| 2018-01-03T15:05:37
| 2018-01-03T15:05:37
| 116,148,771
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,105
|
py
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
# Authour wangfei
# def f1():
# return a1+100
#
# f2 = 100+1
#
#
# print(callable(f2))
# print(chr(65))
# print(ord("a"))
# LIST = []
# import random
# for line in range(6):
# num = random.randrange(9)
# if num == 3 or num == 5 or num == 1:
# # num = random.randrange(4)
# LIST.append(str(num))
# else:
#
# num = random.randrange(65,90)
# Str = chr(num)
# LIST.append(Str)
#
#
#
#
#
# listnum = "".join(LIST)
# print(listnum)
# 将字符串转换为python代码
# s = 'print("hehe")'
# r = compile(s,"<string>","eval")
# print(eval(r))
# print(r)
# print(exec(r))
# print(eval(r))
# s = "8*8"
# # r = eval(s)
# r2 = exec(s)
# print(r2)
# r = divmod(100,10)
# print(r)
# r = isinstance(s,dict)
# print(r)
# 传统的方法
def f1(a1):
rli = []
for i in a1:
if i > 3:
rli.append(i)
return rli
#
# li = [1, 2, 3, 4, 5, 6]
# # r = f1(li)
# # print(r)
#
#
# # filter方法
#
# def f2(a2):
# if a2 > 3:
# print(a2)
# return True
#
# r = filter(f2,li)
# f3 = lambda a2 : a2 > 3
# r2 = filter(f3,li)
# print(list(r2))
# print(list(f3))
#
#
# def f2(a2):
# nli = []
# for i in a2:
# nli.append(i+100)
# return nli
#
# li = [1,2,3,4,5,6,7,8]
# r = f2(li)
# print(r)
#
# def f3(a3):
# return a3 + "99"
#
# li = ["a","b","c"]
# r3 = map(f3,li)
# print(list(r3))
#
# print(list(map(lambda a4: a4+"99",li)))
# s = "name: %s ,age :%d" % ("wf",26)
# print(s)
# %[(name)][flags][width].[precision]typecode
# 右对齐
# s2 = "name:%(name) +10and age %(age)" %{'name':'Mm','age':40}
# print(s2)
# 左对齐
# s3 = "hehe%(name)-10sand" %{'name':'Mm','age':40}
# print(s3)
# 小数
# s4 = "hehe%(name) -2s and %(p)f" %{'name':'Mm','age':40,"p":1.234567}
# print(s4)
# 只保留2位小数
# s4 = "%(p).2f" %{"p":1.234567}
# print(s4)
#
# print("%c"%(65))
# print("%.2f" %(0.13455666))
# s = "%.2f" % (0.12345)
# s2 = "%(num).2f"%{"num":0.19345}
# s4 = "wangfei is %s" %("wangfei")
# print(s4)
# s1 = "my name is {0},i am {1} years old.".format("wangfei",20)
# print(s1)
# s2 = "my name is {name}, i am {age} year old.".format(name="wf",age=19)
# print(s2)
# s3 = "my name is {0},i am {1} years old.".format(*["wangfei",20])
# print(s3)
# s4 = "my name is {name},{age} years old.".format(name="wangfei",age=19)
# print(s4)
# s5 = "my name is {name},{age} years old.".format(**{"name":"wangfei","age":19})
# print(s5)
# def func():
# print("123")
# yield 1
# print("456")
# yield 2
# print("789")
# yield 3
#
# ret = func()
# print(ret.__next__())
# print(ret.__next__())
# print(ret.__next__())
# def f2(n):
# n+=1
# if n>10:
# return "end"
# print(n)
#
# return f2(n)
#
# print(f2(1))
def f1():
print("1")
yield 1
print("2")
yield 2
print("3")
yield 3
print("4")
yield 4
#
# r = f1()
# print(r.__next__())
# print(r.__next__())
# print(r.__next__())
# print(r.__next__())
# print(list(r)[0])
import s4
s4.logging()
|
[
"wangfei1000@yeah.net"
] |
wangfei1000@yeah.net
|
0f0de7f4f62d5363e19ca6ac55276c6a92bce3dc
|
d16292aad097ee66c356093731132ca148a39df2
|
/LeetCode_Python/Test205.py
|
4556c0f9c5a13095b83d22700504649331877601
|
[] |
no_license
|
zhuyingtao/leetcode
|
702189a521506b1d651f75e604aa98105ef7580c
|
b7c520e3fb4e487ed625733bea373f2429c217c9
|
refs/heads/master
| 2021-01-14T13:21:50.277663
| 2020-03-08T16:09:42
| 2020-03-08T16:09:42
| 39,811,678
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 676
|
py
|
__author__ = 'zyt'
class Solution:
# @param {string} s
# @param {string} t
# @return {boolean}
def isIsomorphic(self, s, t):
# ds = {} # can't ds=dt={}
# dt = {}
# cs = ct = 0
# for i in range(len(s)):
# if s[i] not in ds:
# ds[s[i]] = cs
# cs += 1
# if t[i] not in dt:
# dt[t[i]] = ct
# ct += 1
# for i in range(len(s)):
# if ds[s[i]] != dt[t[i]]:
# return False
# return True
return len(set(zip(s, t))) == len(set(s)) == len(set(t))
print(Solution().isIsomorphic("paper", "title"))
|
[
"yingtao.zhu@foxmail.com"
] |
yingtao.zhu@foxmail.com
|
abcfc7f85883e49ffa5113a31431886ddf533f5c
|
5b1b478b0e7b8069762855baa8a2a4f6ff48ebf4
|
/src/reviews/forms.py
|
bf83b29d371abc3b2b2686430c5fe69d7b383f5e
|
[
"MIT"
] |
permissive
|
junaidq1/greendot
|
9e4a0402fcee7182ca7531a0dd4a48edb43f79c5
|
cd9e7791523317d759e0f5f9cf544deff34a8c79
|
refs/heads/master
| 2020-04-06T06:54:07.994376
| 2016-09-11T18:33:15
| 2016-09-11T18:33:15
| 61,906,579
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,047
|
py
|
from django import forms
from .models import Review, Employee
from registration.forms import RegistrationFormUniqueEmail #this is to edit the registration redux form
# class ReviewForm(forms.ModelForm):
# class Meta:
# model = Review
# fields = [
# "content",
# "employee",
# "work_again",
# ]
#actual review post form
class ReviewForm2(forms.ModelForm):
class Meta:
model = Review
fields = ["length_working", "ques1", "ques2", "ques3","work_again", "content"]
# def content_clean(self):
# content = self.cleaned_data.get('content')
# print "jimmy"
# print len(content)
# if len(content) < 70:
# raise forms.ValidationError("Please provide a more impactful review")
# return content
#this form edits the registration redux form
class UserLevelRegistrationForm(RegistrationFormUniqueEmail):
LEVEL_CHOICES = (
('PPD', 'PPD'),
('BA', 'BA'),
('C', 'C'),
('SC', 'SC'),
('M', 'M'),
('SM', 'SM'),
('Other', 'other'),
)
OFFICE_CHOICES = (
('Kansas City', 'Kansas City'),
('Atlanta', 'Atlanta'),
('Austin', 'Austin'),
('Bengaluru', 'Bengaluru'),
('Boston', 'Boston'),
('Charlotte', 'Charlotte'),
('Chicago', 'Chicago'),
('Cincinnati', 'Cincinnati'),
('Cleveland', 'Cleveland'),
('Dallas', 'Dallas'),
('Denver', 'Denver'),
('Detroit', 'Detroit'),
('Gurgaon', 'Gurgaon'),
('Houston', 'Houston'),
('Los Angeles', 'Los Angeles'),
('McLean', 'McLean'),
('Miami', 'Miami'),
('Minneapolis', 'Minneapolis'),
('Mumbai', 'Mumbai'),
('New York City', 'New York City'),
('Orange County', 'Orange County'),
('Parsippany', 'Parsippany'),
('Philadelphia', 'Philadelphia'),
('Pittsburgh', 'Pittsburgh'),
('San Francisco', 'San Francisco'),
('Seattle', 'Seattle'),
('Other', 'other'),
)
ServiceArea_CHOICES = (
('S&O', 'S&O'),
('Tech', 'Tech'),
('Human Capital', 'Human Capital'),
)
level = forms.ChoiceField(choices=LEVEL_CHOICES, label="What is your level at the firm?")
office = forms.ChoiceField(choices=OFFICE_CHOICES, label="What office are you based out of?")
service_area = forms.ChoiceField(choices=ServiceArea_CHOICES, label="What Service Area are you a part of?")
# form to validate that person signing up knows the answer to the impact day question
class ValidationForm(forms.Form):
answer = forms.CharField()
class ContactForm(forms.Form):
username = forms.CharField(label="Please enter your username (if applicable)", required=False)
contact_email = forms.EmailField(label="Please provide a contact email")
message = forms.CharField(widget=forms.Textarea)
class AccessIssuesForm(forms.Form):
username = forms.CharField(label="Please enter your username", required=False)
contact_email = forms.EmailField(label="Please provide a contact email")
message = forms.CharField(label="Please describe the access issues you are having", widget=forms.Textarea)
class ReportDataForm(forms.Form):
DataReportChoices = (
('Incorrect', 'Incorrect practitioner data'),
('Missing', 'Missing practitioner data'),
)
data_issue = forms.ChoiceField(choices=DataReportChoices,
label="What kind of data issue would you like to report?")
practitioner_first_name = forms.CharField(label="First name of practitoner", max_length=120)
practitioner_last_name = forms.CharField(label="Last name of practitoner", max_length=120)
service_area = forms.CharField(label="Service Area of practitoner", max_length=120)
level = forms.CharField(label="Level of practitoner", max_length=120)
office = forms.CharField(label="Office of practitoner", max_length=120)
message = forms.CharField(label="Describe data issue", max_length=1500)
class PartnerForm(forms.Form):
service_area_options = (
('S&O', 'S&O'),
('Tech', 'Tech'),
('HCap', 'HCap'),
)
service_ar = forms.ChoiceField(choices=service_area_options,
label="What Service Area are you aligned with?")
message = forms.CharField(label="What makes you a good fit for the team?", widget=forms.Textarea)
contact_email = forms.EmailField(label="Email address")
|
[
"junaidq1@gmail.com"
] |
junaidq1@gmail.com
|
c20f62c857e46f2c593a8ca4715ae05c5d55b16e
|
0b448e2f8dc5f6637f1689ed9c3f122604ec50d5
|
/PyPoll/main.py
|
b881853c53ca1dd296e4abbbce958fa4f48aa595
|
[] |
no_license
|
mounicapokala/Python-challange
|
00b3f88683738b360b844f2e1f0c5ab8b4f54179
|
30e71cf2a9057390a0b3bc46f455ecc5baba226c
|
refs/heads/master
| 2020-04-04T21:02:24.749417
| 2019-03-13T16:48:16
| 2019-03-13T16:48:16
| 156,270,775
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,584
|
py
|
import os
import csv
import operator
csv_path="/Users/mouni/Documents/GitHub/election/UTAUS201810DATA2/Python/Homework/Instructions/PyPoll/Resources/election_data.csv"
output_path="/Users/mouni/Documents/GitHub/Python-challange/PyPoll/PyPoll.txt"
with open(csv_path) as csvfile:
csv_reader=csv.reader(csvfile,delimiter=",")
csv_header=next(csvfile)
count_voters=0
count=0
candidate_list=[]
all_cand_list=[]
candidate_dict={}
candidate_percent={}
for row in csv_reader:
count_voters=count_voters+1
if row[2] not in candidate_list:
candidate_list.append(row[2])
all_cand_list.append(row[2])
with open(output_path,'a') as out:
out.write("Election results\n------------\n")
out.write(f"Total votes: {count_voters}\n-------------\n")
print("Election results\n------------")
print(f"Total votes: {count_voters}\n-------------")
for candidate in candidate_list:
for vote_cand in all_cand_list:
if candidate==vote_cand:
count=count+1
candidate_dict[candidate]=count
count=0
for key,values in candidate_dict.items():
percent_vote=round((values/count_voters)*100,0)
candidate_percent[key]=percent_vote
print("%s: %.3f%% (%s)" %(key,percent_vote,values))
out.write("%s: %.3f%% (%s)\n" %(key,percent_vote,values))
winner=max(candidate_percent.items(), key=operator.itemgetter(1))[0]
print(f"----------\nWinner: {winner}\n-----------")
out.write(f"----------\nWinner: {winner}\n-----------")
|
[
"mounicadona@gmail.com"
] |
mounicadona@gmail.com
|
a7c910acf371d992b72da6e4efb6e5bbfc6eb773
|
875304da764ebd3d27491fd50852f7be5e9233b6
|
/Distance_Between_Points.py
|
3c9e45f089eb9d661861614cd3b031d327bca65a
|
[] |
no_license
|
IvetaY/PythonFundamentals
|
9ace0aaeddfc7e61f27fd3308e643627cd280875
|
cf0bb87ba399a697be335f8979d7284c8be15246
|
refs/heads/master
| 2022-01-26T00:43:00.627871
| 2019-08-14T19:13:32
| 2019-08-14T19:13:32
| 119,277,342
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 986
|
py
|
from math import sqrt
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def read_point():
liniq = input()
tokens = [float(num) for num in liniq.split(' ')]
x, y = tokens
point = Point(x, y)
return point
def distance_between_points(point1, point2):
delta_x = point2.x - point1.x
delta_y = point2.y - point1.y
distance = sqrt(delta_x ** 2 + delta_y ** 2)
return distance
point1 = read_point()
point2 = read_point()
distance = distance_between_points(point1, point2)
print(f'{distance:.3f}')
# def get_x(self):
# return self.x
#
# def get_y(self):
# return self.y
#
#
# def caldistance(point1, point2):
# delta_x = point2.get_x() - point1.get_x()
# delta_y = point2.get_y() - point1.get_y()
#
# distance = sqrt(delta_x**2 + delta_y**2)
# return distance
#
# p1 = Point(4,3)
# p2 = Point(0,0)
# print(f'{caldistance(p1,p2)}')
|
[
"noreply@github.com"
] |
noreply@github.com
|
dbd63181e26bf71a1fa5f35c11a8f3a74f5dc202
|
c25a6a30dcb773590669f5c5698547e9a550c460
|
/trace_gen.py
|
326d3e8c5e7d8b1bf1708489f232bd81378eea78
|
[] |
no_license
|
crusader2000/lrc_coding
|
f68eb621383399d980975f67568535d8b2ceb183
|
887fc27b72980381b27c52371ac716df9242331b
|
refs/heads/main
| 2023-07-11T02:02:44.995285
| 2021-08-12T16:10:28
| 2021-08-12T16:10:28
| 377,253,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,560
|
py
|
import csv
import requests
import re
import hashlib
import os
def cheaphash(string,length=6):
if length<len(hashlib.sha256(string).hexdigest()):
return hashlib.sha256(string).hexdigest()[:length]
else:
raise Exception("Length too long. Length of {y} when hash length is {x}.".format(x=str(len(hashlib.sha256(string).hexdigest())),y=length))
if not os.path.exists("./files"):
os.mkdir("./files")
data = []
with open('requests.txt','r') as f:
# first_line = f.readline()
# start_time = float(first_line.split(' ')[1])
# print(start_time)
count = 0
for row in f.readlines():
row = row.strip()
items = row.split(' ')
items.pop(-1)
items.pop(0)
url = items[-1]
filename = url.rsplit('/', 1)[1]
items.append(cheaphash(filename.encode('utf-8')))
if not os.path.exists("files/"+filename):
r = requests.get(url, allow_redirects=True)
open("files/"+items[-1], 'wb').write(r.content)
data.append(items)
count = count + 1
print(count)
# print(data)
data = sorted(data, key = lambda x: float(x[0]))
# print(data)
start_time = float(data[0][0])
for row in data:
row[0] = float(row[0]) - start_time
with open('trace.csv', mode='w') as trace_file:
trace_writer = csv.writer(trace_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
trace_writer.writerow(["Time","URL","File_Name(Hashed)"])
for row in data:
print(row)
trace_writer.writerow(row)
## Upload Trace
|
[
"ansh.puvvada@students.iiit.ac.in"
] |
ansh.puvvada@students.iiit.ac.in
|
8c693acb05745c20689b0a071a611d9b22ec6549
|
fa60536fbc7c0d8a2a8f08f0a5b6351c77d08054
|
/3]. Competitive Programming/03]. HackerRank/1]. Practice/12]. 10 Days of Statistics/Day_5.py
|
9205935baf59949bf266af43020d1b9d8c2f88cf
|
[
"MIT"
] |
permissive
|
poojitha2002/The-Complete-FAANG-Preparation
|
15cad1f9fb0371d15acc0fb541a79593e0605c4c
|
7910c846252d3f1a66f92af3b7d9fb9ad1f86999
|
refs/heads/master
| 2023-07-17T20:24:19.161348
| 2021-08-28T11:39:48
| 2021-08-28T11:39:48
| 400,784,346
| 5
| 2
|
MIT
| 2021-08-28T12:14:35
| 2021-08-28T12:14:34
| null |
UTF-8
|
Python
| false
| false
| 864
|
py
|
# 1st Solution---------------------------------------
from math import factorial, exp
f = float(input())
i = int(input())
eq = ((f**i) * exp(-f))/factorial(i)
print('%.3f' %eq)
# 2nd Solution-------------------------------------------
x,y = [float(i) for i in input().split(" ")]
cx = 160 + 40*(x + x**2)
cy = 128 + 40*(y + y**2)
print(round(cx, 3))
print(round(cy, 3))
# 3rd Solution---------------------------------------------
import math as m
mean, std = 20, 2
cd = lambda x: 0.5 * (1 + m.erf((x-mean) / (std * (2**0.5))))
print('{:.3f}'.format(cd(19.5)))
print('{:.3f}'.format(cd(22)-cd(20)))
# 4th Solution--------------------------------------------
import math as m
mean, std = 70, 10
cd = lambda x: 0.5 * (1 + m.erf((x - mean) / (std * (2 ** 0.5))))
print(round((1-cd(80))*100,2))
print(round((1-cd(60))*100,2))
print(round((cd(60))*100,2))
|
[
"akashsingh27101998@gmai.com"
] |
akashsingh27101998@gmai.com
|
e1c50ce55b94d0b8974045c6d12124d2db102332
|
21b39d50e4df56ea01453001845d1580729af1df
|
/jdcloud_sdk/services/redis/apis/DescribeClientListRequest.py
|
450146bb94baa2db571d11a497779f82c80cb4ac
|
[
"Apache-2.0"
] |
permissive
|
Tanc009/jdcloud-sdk-python
|
ef46eac7731aa8a1839b1fc1efd93249b7a977f0
|
8b045c99bc5b73ca7348e950b6f01e03a27982f5
|
refs/heads/master
| 2021-08-09T14:49:16.177709
| 2021-06-25T02:38:41
| 2021-06-25T02:38:41
| 141,714,695
| 0
| 0
|
Apache-2.0
| 2018-07-20T13:21:17
| 2018-07-20T13:21:16
| null |
UTF-8
|
Python
| false
| false
| 1,572
|
py
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class DescribeClientListRequest(JDCloudRequest):
"""
查询当前客户端IP列表
"""
def __init__(self, parameters, header=None, version="v1"):
super(DescribeClientListRequest, self).__init__(
'/regions/{regionId}/cacheInstance/{cacheInstanceId}/clientList', 'GET', header, version)
self.parameters = parameters
class DescribeClientListParameters(object):
def __init__(self, regionId, cacheInstanceId, ):
"""
:param regionId: 缓存Redis实例所在区域的Region ID。目前有华北-北京、华南-广州、华东-上海三个区域,Region ID分别为cn-north-1、cn-south-1、cn-east-2
:param cacheInstanceId: 缓存Redis实例ID,是访问实例的唯一标识
"""
self.regionId = regionId
self.cacheInstanceId = cacheInstanceId
|
[
"tancong@jd.com"
] |
tancong@jd.com
|
d2ba7f08e6dd720ddd8de76660a048f88cc8e038
|
622be10edbbab2c932c7c37ef63b6c3d88c9ed10
|
/rest/migrations/0001_initial.py
|
81bea894bbff28b14bb084c6232a6bd8d10fe9f2
|
[] |
no_license
|
Joycewaithaka/Framework
|
38ef8c872317c78175a9b774da789cb5ee10073c
|
9831b205011166b960a0773e95b34453b5d868ad
|
refs/heads/master
| 2021-07-01T05:17:08.170456
| 2017-09-22T06:32:36
| 2017-09-22T06:32:36
| 104,442,664
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-21 09:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('names', models.CharField(max_length=30)),
('course', models.CharField(max_length=30)),
('description', models.CharField(max_length=30)),
],
),
]
|
[
"joycewanjiruwaithaka@gmail.com"
] |
joycewanjiruwaithaka@gmail.com
|
c5a5fa1f10d00e0b202512be63246adc05209344
|
73079120d673a9ff71128049cf3d661409fc8870
|
/levelupapi/models/__init__.py
|
2ad4244c97a2ca116ea243496a11cd5a73a087f5
|
[] |
no_license
|
ConnorBlakeney/levelup-server
|
3974e80e0846e160c0fac34ca89c91ca9575a693
|
2488e03b0bae920c17f0a3ac85e97e91ceb78a59
|
refs/heads/main
| 2023-01-19T20:14:57.841307
| 2020-12-01T20:26:45
| 2020-12-01T20:26:45
| 308,676,715
| 0
| 0
| null | 2020-10-30T15:52:32
| 2020-10-30T15:52:31
| null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
from .gamer import Gamer
from .event import Event
from .eventgamer import EventGamer
from .gametype import GameType
from .game import Game
|
[
"connorblakeney@yahoo.com"
] |
connorblakeney@yahoo.com
|
1e4f57cb7ae54552f4520fc68b828043c2167752
|
e41c10e0b17265509fd460f860306784522eedc3
|
/basic_config.py
|
8e0791dbf7f899d792c04ef3414e39b0ef1d7b41
|
[
"CC0-1.0"
] |
permissive
|
hyyc116/research_paradigm_changing
|
c77ecf2533a6b2e2cd3f74fc3d3073454bffc55c
|
eac69c45a7a17eb70ace185fa22831ac785e504e
|
refs/heads/master
| 2020-11-24T05:48:07.973347
| 2019-12-18T12:17:02
| 2019-12-18T12:17:02
| 227,992,284
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,102
|
py
|
#coding:utf-8
import os
import sys
import json
from collections import defaultdict
from collections import Counter
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from sklearn.metrics import r2_score
import math
import numpy as np
import random
import logging
import networkx as nx
from itertools import combinations
import pylab
import itertools
from mpl_toolkits.mplot3d import Axes3D
from scipy.interpolate import spline
from multiprocessing.dummy import Pool as ThreadPool
from networkx.algorithms import isomorphism
from matplotlib import cm as CM
from collections import Counter
from scipy.signal import wiener
import matplotlib as mpl
from matplotlib.patches import Circle
from matplotlib.patheffects import withStroke
import matplotlib.colors as colors
from matplotlib.colors import LogNorm
from matplotlib.colors import LinearSegmentedColormap
from networkx.algorithms.core import core_number
from networkx.algorithms.core import k_core
import psycopg2
from cycler import cycler
import six
# from gini import gini
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',level=logging.INFO)
mpl.rcParams['agg.path.chunksize'] = 10000
color_sequence = ['#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c',
'#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5',
'#8c564b', '#c49c94', '#e377c2', '#f7b6d2', '#7f7f7f',
'#c7c7c7', '#bcbd22', '#dbdb8d', '#17becf', '#9edae5']
mpl.rcParams['axes.prop_cycle'] = cycler('color', color_sequence)
# color = plt.cm.viridis(np.linspace(0.01,0.99,6)) # This returns RGBA; convert:
# hexcolor = map(lambda rgb:'#%02x%02x%02x' % (rgb[0]*255,rgb[1]*255,rgb[2]*255),
# tuple(color[:,0:-1]))
# mpl.rcParams['axes.prop_cycle'] = cycler('color', hexcolor)
params = {'legend.fontsize': 8,
'axes.labelsize': 8,
'axes.titlesize':10,
'xtick.labelsize':8,
'ytick.labelsize':8}
pylab.rcParams.update(params)
# from paths import *
def circle(ax,x,y,radius=0.15):
circle = Circle((x, y), radius, clip_on=False, zorder=10, linewidth=1,
edgecolor='black', facecolor=(0, 0, 0, .0125),
path_effects=[withStroke(linewidth=5, foreground='w')])
ax.add_artist(circle)
def autolabel(rects,ax,total_count=None,step=1,):
"""
Attach a text label above each bar displaying its height
"""
for index in np.arange(len(rects),step=step):
rect = rects[index]
height = rect.get_height()
# print height
if not total_count is None:
ax.text(rect.get_x() + rect.get_width()/2., 1.005*height,
'{:}\n({:.6f})'.format(int(height),height/float(total_count)),
ha='center', va='bottom')
else:
ax.text(rect.get_x() + rect.get_width()/2., 1.005*height,
'{:}'.format(int(height)),
ha='center', va='bottom')
class dbop:
def __init__(self,insert_index=0):
self._insert_index=insert_index
self._insert_values=[]
logging.debug("connect database with normal cursor.")
self._db = psycopg2.connect(database='core_data',user="buyi",password = "ruth_hardtop_isthmus_bubbly")
self._cursor = self._db.cursor()
def query_database(self,sql):
self._cursor.close()
self._cursor = self._db.cursor()
self._cursor.execute(sql)
logging.debug("query database with sql {:}".format(sql))
return self._cursor
def insert_database(self,sql,values):
self._cursor.close()
self._cursor = self._db.cursor()
self._cursor.executemany(sql,values)
logging.debug("insert data to database with sql {:}".format(sql))
self._db.commit()
def batch_insert(self,sql,row,step,is_auto=True,end=False):
if end:
if len(self._insert_values)!=0:
logging.info("insert {:}th data into database,final insert.".format(self._insert_index))
self.insert_database(sql,self._insert_values)
else:
self._insert_index+=1
if is_auto:
row[0] = self._insert_index
self._insert_values.append(tuple(row))
if self._insert_index%step==0:
logging.info("insert {:}th data into database".format(self._insert_index))
self.insert_database(sql,self._insert_values)
self._insert_values=[]
def get_insert_count(self):
return self._insert_index
def execute_del_update(self,sql):
self._cursor.execute(sql)
self._db.commit()
logging.debug("execute delete or update sql {:}.".format(sql))
def execute_sql(self,sql):
self._cursor.execute(sql)
self._db.commit()
logging.debug("execute sql {:}.".format(sql))
def close_db(self):
self._db.close()
def hist_2_bar(data,bins=50):
n,bins,patches = plt.hist(data,bins=bins)
return [x for x in bins[:-1]],[x for x in n]
|
[
"hyyc116@gmail.com"
] |
hyyc116@gmail.com
|
8aa44c49b1ccdc0c8d55e6211a30bda0f2a9ade8
|
a0458c27f9f0f946b0071c7c8bf5dbbb3bde96f3
|
/src/settings.py
|
a3ce8e9248c34ed356b6eac5ad2b84d0d8566348
|
[
"MIT"
] |
permissive
|
sergachev/litex-template
|
b10175fe98e723539ff108f2db322b21cc3910ad
|
00c7b36b8f9b380bc509b76a44f3885ffa2a932d
|
refs/heads/main
| 2023-08-03T16:05:45.258246
| 2022-12-11T20:32:32
| 2022-12-11T20:32:32
| 214,885,817
| 17
| 3
|
MIT
| 2023-07-25T20:49:53
| 2019-10-13T20:04:27
|
Python
|
UTF-8
|
Python
| false
| false
| 49
|
py
|
device_model = "xc7a200tfbg484"
speed_grade = -1
|
[
"ilia.sergachev@protonmail.ch"
] |
ilia.sergachev@protonmail.ch
|
a1ff43af09345e62519dc8bc4ca87bc75b6d115a
|
0de0f7a797738387118ac8aecdf31a696c8800d1
|
/sampler.py
|
860c54b97697144bda1bb1ce910b9cb1aaf25d00
|
[] |
no_license
|
hyzcn/metriclearningbench
|
f4aa56849e9ae19a2f2298167ae7f76727cd0e30
|
79320fdfcdce2f9e65c9ecb39c14fbce8bf8b6ab
|
refs/heads/master
| 2021-06-20T12:23:06.208100
| 2017-07-17T14:16:15
| 2017-07-17T14:16:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,786
|
py
|
import random
import itertools
def index_dataset(dataset):
return {c : [example_idx for example_idx, (image_file_name, class_label_ind) in enumerate(dataset.imgs) if class_label_ind == c] for c in set(dict(dataset.imgs).values())}
def sample_from_class(images_by_class, class_label_ind):
return images_by_class[class_label_ind][random.randrange(len(images_by_class[class_label_ind]))]
def simple(batch_size, dataset, prob_other = 0.5):
'''lazy sampling, not like in lifted_struct. they add to the pool all postiive combinations, then compute the average number of positive pairs per image, then sample for every image the same number of negative pairs'''
images_by_class = index_dataset(dataset)
while True:
example_indices = []
for i in range(0, batch_size, 2):
perm = random.sample(images_by_class.keys(), 2)
example_indices += [sample_from_class(images_by_class, perm[0]), sample_from_class(images_by_class, perm[0 if i == 0 or random.random() > prob_other else 1])]
yield example_indices[:batch_size]
def triplet(batch_size, dataset):
images_by_class = index_dataset(dataset)
while True:
example_indices = []
for i in range(0, batch_size, 3):
perm = random.sample(images_by_class.keys(), 2)
example_indices += [sample_from_class(images_by_class, perm[0]), sample_from_class(images_by_class, perm[0]), sample_from_class(images_by_class, perm[1])]
yield example_indices[:batch_size]
def pddm(batch_size, dataset):
images_by_class = index_dataset(dataset)
while True:
class0 = random.choice(images_by_class.keys())
example_indices = [sample_from_class(images_by_class, class0) for k in range(4)]
for i in range(len(example_indices), batch_size):
example_indices.append(random.randrange(len(dataset)))
yield example_indices[:batch_size]
|
[
"vadimkantorov@gmail.com"
] |
vadimkantorov@gmail.com
|
d99c9108d337ed703b7f5b6063ed0429bfb22b1c
|
ecf52346badfccf15a8959cb36618ce1edbdec6d
|
/libs/utils.py
|
5d66b20503b295a01c43ec2b0d2dda2b54316f4d
|
[
"BSD-2-Clause"
] |
permissive
|
aoxiangzhang/tuxiangshijue
|
3571279296bce17b2a896d4512d594fabfb2f490
|
4b9541f64bf6e4c8e1b6b1ce8be141aaf8c67dae
|
refs/heads/master
| 2023-01-23T06:33:57.810171
| 2020-11-26T08:07:34
| 2020-11-26T08:07:34
| 316,161,258
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,966
|
py
|
from __future__ import division
import os
import cv2
import time
import torch
import scipy.misc
import numpy as np
import scipy.sparse
from PIL import Image
import scipy.sparse.linalg
from cv2.ximgproc import jointBilateralFilter
from torch.utils.serialization import load_lua
from numpy.lib.stride_tricks import as_strided
def whiten(cF):
cFSize = cF.size()
c_mean = torch.mean(cF,1) # c x (h x w)
c_mean = c_mean.unsqueeze(1).expand_as(cF)
cF = cF - c_mean
contentConv = torch.mm(cF,cF.t()).div(cFSize[1]-1) + torch.eye(cFSize[0]).double()
c_u,c_e,c_v = torch.svd(contentConv,some=False)
k_c = cFSize[0]
for i in range(cFSize[0]):
if c_e[i] < 0.00001:
k_c = i
break
c_d = (c_e[0:k_c]).pow(-0.5)
step1 = torch.mm(c_v[:,0:k_c],torch.diag(c_d))
step2 = torch.mm(step1,(c_v[:,0:k_c].t()))
whiten_cF = torch.mm(step2,cF)
return whiten_cF
def numpy2cv2(cont,style,prop,width,height):
cont = cont.transpose((1,2,0))
cont = cont[...,::-1]
cont = cont * 255
cont = cv2.resize(cont,(width,height))
#cv2.resize(iimg,(width,height))
style = style.transpose((1,2,0))
style = style[...,::-1]
style = style * 255
style = cv2.resize(style,(width,height))
prop = prop.transpose((1,2,0))
prop = prop[...,::-1]
prop = prop * 255
prop = cv2.resize(prop,(width,height))
#return np.concatenate((cont,np.concatenate((style,prop),axis=1)),axis=1)
return prop,cont
def makeVideo(content,style,props,outf):
print('Stack transferred frames back to video...')
layers,height,width = content[0].shape
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
video = cv2.VideoWriter(os.path.join(outf,'transfer.avi'),fourcc,10.0,(width,height))
ori_video = cv2.VideoWriter(os.path.join(outf,'content.avi'),fourcc,10.0,(width,height))
for j in range(len(content)):
prop,cont = numpy2cv2(content[j],style,props[j],width,height)
cv2.imwrite('prop.png',prop)
cv2.imwrite('content.png',cont)
# TODO: this is ugly, fix this
imgj = cv2.imread('prop.png')
imgc = cv2.imread('content.png')
video.write(imgj)
ori_video.write(imgc)
# RGB or BRG, yuks
video.release()
ori_video.release()
os.remove('prop.png')
os.remove('content.png')
print('Transferred video saved at %s.'%outf)
def print_options(opt):
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.outf)
os.makedirs(expr_dir,exist_ok=True)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
|
[
"zhang_ax@qq.com"
] |
zhang_ax@qq.com
|
595c27d7b42568980e69f0ab516589558e4603c5
|
82a682480d6ab5d082360b08a158bda42ae571b8
|
/music/migrations/0001_initial.py
|
dcd4ade0bb2e2454ba30be663909dd324eebd60c
|
[
"MIT"
] |
permissive
|
saddhu1005/Viberr
|
a19edd8e71503793f6035ce06f5827bf175ef746
|
f0847d479bce72b5da593d63848ae0fa79c3165a
|
refs/heads/master
| 2021-06-24T06:13:51.838927
| 2019-07-26T19:33:50
| 2019-07-26T19:33:50
| 172,378,410
| 0
| 0
|
MIT
| 2021-06-10T21:16:36
| 2019-02-24T19:08:36
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,173
|
py
|
# Generated by Django 2.1.7 on 2019-02-24 13:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('artist', models.CharField(max_length=250)),
('album_title', models.CharField(max_length=500)),
('genre', models.CharField(max_length=100)),
('album_logo', models.CharField(max_length=1000)),
],
),
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file_type', models.CharField(max_length=10)),
('song_title', models.CharField(max_length=250)),
('album', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='music.Album')),
],
),
]
|
[
"saddhu1005@gmail.com"
] |
saddhu1005@gmail.com
|
6fef01c2498c9a9b7a52d8a294080b7fe61d6627
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/CJ/16_2_1_Dom_ju.py
|
c726b4de6450f76ad915989d09c20461a1c9a8cd
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 538
|
py
|
DOWNLOAD_DIR = "/Users/Dom/Downloads/"
def jopen( filename ):
return open( DOWNLOAD_DIR+filename+".in", "r")
def jout( filename, results, linebreaks=False ):
f = open(DOWNLOAD_DIR+filename+".out","w")
for n in range(len(results)):
f.write( "Case #" + str(n+1) + ": " )
if isinstance(n, list):
if linebreaks:
f.write( "\n" )
f.write( " ".join(n) )
else:
if linebreaks:
f.write( "\n" )
f.write( str(results[n]) + "\n" )
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
b551aaf1cc0ed4d622b9137b4fb432ed71cb4f6b
|
88095fb5174ae3b0d15aa4ee56ceebe2411e8fb7
|
/dive-into-deep-learning-pytorch/3.3_linear-regression.py
|
01843a802551610d98c5b1b9fac8fd3fc5845791
|
[
"Apache-2.0"
] |
permissive
|
taotao1234abcd/machine-learning-and-artificial-intelligence-python
|
a5c04973767851ed7cef1187be50334f0326a8e3
|
04095d03a9bbe6b6189824a6a0f63b939ea04b65
|
refs/heads/master
| 2021-07-07T20:25:19.301573
| 2020-09-14T03:36:46
| 2020-09-14T03:36:46
| 180,847,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,352
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch import nn
import torch.utils.data as Data
torch.manual_seed(1)
print(torch.__version__)
torch.set_default_tensor_type('torch.FloatTensor')
num_inputs = 1
num_examples = 2000
true_w = 2.5
true_b = 4.2
features = torch.tensor(np.random.normal(0, 1.0, (num_examples, num_inputs)), dtype=torch.float)
labels = true_w * features[:, 0] + true_b
labels += torch.tensor(np.random.normal(0, 1.0, size=labels.size()), dtype=torch.float)
batch_size = 32
# 将训练数据的特征和标签组合
dataset = Data.TensorDataset(features, labels)
# 把 dataset 放入 DataLoader
data_iter = Data.DataLoader(
dataset=dataset, # torch TensorDataset format
batch_size=batch_size, # mini batch size
shuffle=True, # 要不要打乱数据 (打乱比较好)
num_workers=0, # 多线程来读数据, 注意多线程需要在 if __name__ == '__main__': 函数中运行
)
# num_workers=0 表示不用额外的进程来加速读取数据
# class LinearNet(nn.Module):
# def __init__(self, n_feature):
# super(LinearNet, self).__init__()
# self.linear = nn.Linear(n_feature, 1)
#
# def forward(self, x):
# y = self.linear(x)
# return y
# net = LinearNet(num_inputs)
net = nn.Sequential(
nn.Linear(num_inputs, 1)
)
loss_func = nn.MSELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.01)
loss_list = []
epoch_list = []
num_epochs = 30
for epoch in range(1, num_epochs + 1):
for x, y in data_iter:
prediction = net(x)
loss = loss_func(prediction, y.view(-1, 1))
optimizer.zero_grad() # 梯度清零
loss.backward()
optimizer.step()
print('epoch %d, loss: %f' % (epoch, loss))
loss_list.append(loss.data.numpy().tolist())
epoch_list.append(int(epoch))
xx = torch.unsqueeze(torch.linspace(-4, 4, 1000), dim=1)
prediction = net(xx)
plt.subplot(211)
plt.plot(features.data.numpy(), labels.data.numpy(), '.', ms=3)
plt.plot(xx.data.numpy(), prediction.data.numpy(), 'r-', lw=1)
plt.subplot(212)
plt.plot(epoch_list, loss_list, lw=1)
plt.xlabel('Epoches')
plt.ylabel('Loss')
# plt.ylim(0, 0.2)
plt.pause(0.01)
dense = net[0]
print(true_w, dense.weight)
print(true_b, dense.bias)
|
[
"49369890+taotao1234abcd@users.noreply.github.com"
] |
49369890+taotao1234abcd@users.noreply.github.com
|
9765259fe66a9c580fb6bcac5113e9f6a5e872f3
|
2376dcbb96c9fca65c10c8f8db66822ba01d6a6a
|
/src/api2db/ingest/api2pandas.py
|
9ac5015ae0e4c0caa29bf211e20968b32de46e20
|
[
"MIT"
] |
permissive
|
TristenHarr/api2db
|
054443456b0b07e047216142d74eb2dc30dabe15
|
8c8b14280441f5153ff146c23359a0eb91022ddb
|
refs/heads/main
| 2023-05-12T07:47:17.462089
| 2021-06-02T20:32:24
| 2021-06-02T20:32:24
| 364,407,770
| 46
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,808
|
py
|
# -*- coding: utf-8 -*-
"""
Contains the Api2Pandas class
=============================
"""
from ..app.log import get_logger
from .api_form import ApiForm
import pandas as pd
import os
from typing import Union, Callable
class Api2Pandas(object):
"""Used to extract incoming data from an API into a pandas DataFrame"""
def __init__(self, api_form: Callable[[], ApiForm]):
"""
Creates a Api2Pandas object and loads its ApiForm
Args:
api_form: The function that generates the ApiForm for the associated collector
"""
self.api_form = api_form()
def dependencies_satisfied(self) -> bool:
"""
Checks to ensure any data-linking dependency files exist
This feature currently only exists for :py:class:`api2db.ingest.post_process.merge_static.MergeStatic`
Returns:
True if all dependencies are satisfied, otherwise False
"""
logger = get_logger()
res = True
for pre in self.api_form.pre_process:
if pre.ctype in []:
if not os.path.isfile(pre.path):
logger.warning(f"Missing PreProcess Dependency File: {pre.path}")
res = False
for post in self.api_form.post_process:
if post.ctype in ["merge_static"]:
if not os.path.isfile(post.path):
logger.warning(f"Missing PostProcess Dependency File: {post.path}")
res = False
return res
def extract(self, data: dict) -> Union[pd.DataFrame, None]:
"""
Performs data-extraction from data arriving from an API.
Workflow:
1. Perform all pre-processing on data
2. Perform all data-feature extraction
3. Perform all post-processing on data
4. Return a DataFrame containing the cleaned data.
Args:
data: The data arriving from an API to perform data extraction on.
Returns:
The cleaned data if it is possible to clean the data otherwise None
"""
# Global extraction dictionary
pre_2_post = {}
# For each pre-processor
for pre in self.api_form.pre_process:
# If the pre-processor is a global extraction, add the feature extracted to the global extraction dictionary
if pre.ctype == "global_extract":
pre_2_post[pre.key] = pre(lam_arg=data)
else:
# Perform the pre-processor and replace the existing data with the new data
data = pre(lam_arg=data)
if data is None:
return data
rows = []
# For each row in the data
for data_point in data:
row = {}
# Extract all the features from the row
for feat in self.api_form.data_features:
row[feat.key] = feat(data_point)
rows.append(row)
# Create the DataFrame from the rows
df = pd.DataFrame(rows)
# Cast the DataFrame to the correct dtypes
df = df.astype(self.api_form.pandas_typecast())
# Add all globally extracted data to the DataFrame
for k, v in pre_2_post.items():
df[k] = v["value"]
df[k] = df[k].astype(self.api_form.typecast(v["dtype"]))
# For each post-processor
for post in self.api_form.post_process:
if post.ctype == "futures": # FUTURES MAY REQUIRE DIFFERENT OPERATIONS
pass
else:
# Perform the post-processing operation on the DataFrame
df = post(df)
# Get rid of the data index
df = df.reset_index(drop=True)
# Return the clean Data Hooray!
return df
|
[
"tjhm9c@mail.missouri.edu"
] |
tjhm9c@mail.missouri.edu
|
1b4ab7cf2f915702b202e3a75ac79732075f6950
|
2746d27fa7c6669e7782527f010c514c6ba17058
|
/Django/timetable/env/lib/python3.6/os.py
|
4614206a11b2a35caec1cb3a51a04e40fb2fbeaa
|
[] |
no_license
|
samoyl11/TimeTable_optimizer
|
4dd86f31cf9b9f7413e73dc0af60211efcb96d57
|
ddbf908121792a2335c9ecd0f8ee2bc783c44a1b
|
refs/heads/master
| 2020-05-25T17:56:54.566081
| 2019-05-21T19:21:03
| 2019-05-21T19:21:03
| 187,918,798
| 2
| 0
| null | 2019-05-21T21:53:58
| 2019-05-21T21:53:57
| null |
UTF-8
|
Python
| false
| false
| 42
|
py
|
/Users/bulat/anaconda3/lib/python3.6/os.py
|
[
"bulatuseinov@gmail.com"
] |
bulatuseinov@gmail.com
|
b1514bede7b460561ff960cecb8def7bbc963dde
|
af9a37d2ef29f49d0bc037e5397d448f3097aef6
|
/alarm/alarm_db.py
|
843f5ce3f39ce9e5135fa7c1ed5ff20bc1e1db1d
|
[] |
no_license
|
amsuredev/alarm
|
ea954f429f79c1d2c5998bec4c121e3ea914f81f
|
809be964ff7b8fa5613231a987eb37abac7568b8
|
refs/heads/master
| 2023-02-20T23:24:52.969006
| 2021-01-28T16:56:52
| 2021-01-28T16:56:52
| 332,046,403
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,842
|
py
|
import sqlite3
from alarm import Alarm
class AlarmDatabase:
def __init__(self):
conn = sqlite3.connect('alarm.db') # create file if not exist;connect if exist
cursor = conn.cursor()
cursor.execute("""CREATE TABLE IF NOT EXISTS alarms (
_id INTEGER PRIMARY KEY AUTOINCREMENT,
ACTIVE NUMERIC,
MIN INTEGER,
HOUR INTEGER,
DAY INTEGER,
MONTH INTEGER,
YEAR INTEGER,
MELODY_PATH TEXT
)""")
cursor.close()
conn.commit()
conn.close()
def insert_alarm(self, alarm: Alarm):
conn = sqlite3.connect('alarm.db') # create file if not exist;connect if exist
cursor = conn.cursor()
params = (alarm.active, alarm.min, alarm.hour, alarm.day, alarm.month, alarm.year, alarm.melody_path)
cursor.execute(
"INSERT INTO alarms(ACTIVE, MIN, HOUR, DAY, MONTH, YEAR, MELODY_PATH) VALUES (?, ?, ?, ?, ?, ?, ?)", params)
cursor.close()
conn.commit()
conn.close()
def get_active_alarms(self):
conn = sqlite3.connect('alarm.db') # create file if not exist;connect if exist
cursor = conn.cursor()
cursor.execute("""SELECT * FROM alarms WHERE ACTIVE = TRUE""")
active_alarms_tumple = cursor.fetchall()
cursor.close()
conn.commit()
conn.close()
return [Alarm.createFromTumple(alarm_tumpe) for alarm_tumpe in active_alarms_tumple]#list of alarm objects
def mark_alarm_as_inactive(self, id):
conn = sqlite3.connect('alarm.db') # create file if not exist;connect if exist
cursor = conn.cursor()
cursor.execute("""UPDATE alarms SET ACTIVE = FALSE
WHERE _id = :id""", {'id': id})
cursor.close()
conn.commit()
conn.close()
def print_all_lines(self):
conn = sqlite3.connect('alarm.db') # create file if not exist;connect if exist
cursor = conn.cursor()
cursor.execute("""SELECT * FROM alarms""")
lines = cursor.fetchall()
for line in lines:
print(line)
cursor.close()
conn.commit()
conn.close()
def update_alarm_time(self, alarm:Alarm):
conn = sqlite3.connect('alarm.db') # create file if not exist;connect if exist
cursor = conn.cursor()
cursor.execute("""UPDATE alarms SET MIN = :min, HOUR = :hour, DAY = :day, MONTH = :month, YEAR = :year
WHERE _id = :id;""", {'min': alarm.min, 'hour': alarm.hour, 'day': alarm.day, 'month': alarm.month, 'year': alarm.year, 'id': alarm.id})
cursor.close()
conn.commit()
conn.close()
if __name__=="__main__":
alarm_db = AlarmDatabase()
alarm_db.print_all_lines()
|
[
"71019216+amsuredev@users.noreply.github.com"
] |
71019216+amsuredev@users.noreply.github.com
|
aed9b1d04dab1509879d9b416a9b84cdf20a89e3
|
61a4d618f8b6b50863171fd52776ff6583ee5665
|
/house lease/logic/house_unit.py
|
74318e0274180b7dfb5822fedd839af7bc9d2bf0
|
[] |
no_license
|
Dark-0-forest/house_lease_system
|
231ce42678d3fd3620783c798301d5f79ec7f95a
|
b10fcd89f31deee84014990315d9db36b0aa3c94
|
refs/heads/master
| 2023-01-11T00:23:30.451356
| 2020-10-24T05:31:06
| 2020-10-24T05:31:06
| 306,812,969
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,418
|
py
|
"""
@作者:余宗源
@文件名:house_unit.py
@时间:2020/9/18
@文档说明: 完成关于房屋信息的一些操作,并将其封装为函数,为上层调用提供接口
"""
import mysql_connection as mct
# 插入房屋信息
def house_insert(he):
# 初始化mysql的连接
conn = mct.create_connection()
cur = conn.cursor()
# 插入房屋信息
sql = "insert into houselease.house values(null, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
cur.execute(sql, (he.gethaddress(), he.gethnum(), he.getlid(), he.gethtype(), he.getfurnish(), he.getharea(),
he.getfloor(), he.getlift(), he.getmaxtenant(), he.getrent(), he.getleased(), he.getcharge()))
conn.commit()
# 获取房号
sql = "select landlordID from houselease.house where houseAddress = %s and houseNum = %s"
cur.execute(sql, (he.gethaddress(), he.gethnum()))
he.sethid(cur.fetchall()[0][0])
# 关闭调用
mct.close_conn(conn, cur)
# 修改房屋的信息
def house_update(he):
# 初始化mysql的连接
conn = mct.create_connection()
cur = conn.cursor()
# 更新值
sql = "update houselease.house set houseAddress = %s, houseNum = %s, houseType = %s, furnished = %s, " \
"houseArea = %s, floor = %s, lift = %s, maxtenant = %s, rent = %s, leased = %s where houseID = %s"
cur.execute(sql, (he.gethaddress(), he.gethnum(), he.gethtype(), he.getfurnish(), he.getharea(),
he.getfloor(), he.getlift(), he.getmaxtenant(), he.getrent(), he.getleased(), he.gethid()))
conn.commit()
# 关闭调用
mct.close_conn(conn, cur)
# 根据条件对house表进行查询
def house_select(he):
# 初始化mysql的连接
conn = mct.create_connection()
cur = conn.cursor()
# 根据条件查询
sql = "select * from houselease.house where 1 "
if he.gethid() != 0:
sql += "and houseID = %d " % he.gethid()
if he.gethaddress() != "":
sql += "and houseAddress like '%%%s%%' " % he.gethaddress()
if he.gethnum() != "":
sql += "and houseNum like '%%%s%%' " % str(he.gethnum())
if he.getlid() != 0:
sql += "and landlordID like '%s' " % str(he.getlid())
if he.gethtype() != "":
sql += "and houseType like '%%%s%%' " % he.gethtype()
if he.getfurnish() != "":
sql += "and furnished like '%s' " % he.getfurnish()
if he.getharea() != 0:
sql += "and houseArea >= %f and houseArea <= %f " % (he.getharea()-20, he.getharea()+20)
if he.getfloor() != "":
sql += "and floor = '%s' " % he.getfloor()
if he.getlift() != "":
sql += "and lift = '%s' " % he.getlift()
if he.getmaxtenant() != "":
sql += "and maxtenant = '%s' " % (he.getmaxtenant())
if he.getrent() != 0:
sql += "and rent >= %f and rent <= %f " % (he.getrent()-1000.0, he.getrent()+1000.0)
if he.getleased() != "":
sql += "and leased = '%s' " % he.getleased()
cur.execute(sql)
houses = cur.fetchall()
# 关闭调用
mct.close_conn(conn, cur)
return houses
# 删除记录
def house_delete(he):
# 初始化mysql的连接
conn = mct.create_connection()
cur = conn.cursor()
# 根据编号删除
sql = "delete from houselease.house where houseID = %s"
cur.execute(sql, (he.gethid(), ))
conn.commit()
# 关闭调用
mct.close_conn(conn, cur)
|
[
"928774025@qq.com"
] |
928774025@qq.com
|
bb54dfe78d3f41b4a773888239740f927f839580
|
a7477f153eebf6d2848beecde7ca88cedd26dfa8
|
/learning_log/settings.py
|
3f0c23d1867b5cdfe7354c0aabfa268e5d94e304
|
[] |
no_license
|
Artem19861910/learning_log
|
83f26595929fb691bccf2f4f50f0eebda6f4cede
|
e9dbfcffa9559c4cb6bafa75affb5161a18eac56
|
refs/heads/master
| 2021-01-18T02:03:25.353534
| 2016-09-15T09:17:01
| 2016-09-15T09:17:01
| 68,282,095
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,975
|
py
|
"""
Django settings for learning_log project.
Generated by 'django-admin startproject' using Django 1.9.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i_1v1g&#k0oi9e+-&ml(^p3$g4u8xv=f$rsezu-p1%^o+%+x@h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third-party apps
'bootstrap3',
# My apps
'learning_logs',
'users',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'learning_log.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'learning_log.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'EET'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
# My settings
LOGIN_URL = '/users/login/'
# Settings from django-bootstrap3
BOOTSTRAP3 = {
'include_jquery': True,
}
# Heroku settings
if os.getcwd() == '/app':
import dj_database_url
DATABASES = {
'default': dj_database_url.config(default='postgres://localhost')
}
# Honor the 'X-Forwarded-Proto' header for request.is_secure().
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = 'staticfiles'
STATICFILES_DIRS = list(
os.path.join(BASE_DIR, 'static'),
)
|
[
"defenite@meta.ua"
] |
defenite@meta.ua
|
40dba682efd38da2e3964fce6502dc8d0e62ec5c
|
9f59acd956f1e8985f8d1d2b93a2f69009de0fed
|
/src_panic_app/panic/tests/noses/trivial_tests/test_trivial01.py
|
71b07bca1c6d29d2637fb73b9d47ee84b83a8fe1
|
[] |
no_license
|
MarCialR/flask_docker
|
cda3aefc48c8dd9a9e9608ae4e41bf0509a9ef02
|
f78dc22f4fde98640b2adf66c5980eb68e79fd54
|
refs/heads/master
| 2020-06-05T20:22:55.136539
| 2015-10-04T18:38:58
| 2015-10-04T18:38:58
| 28,040,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
import unittest
class TC(unittest.TestCase):
pass
class TrivialTestCase(TC):
def test_01_diquesi(self):
assert "diquesi" == "diquesi"
def test_02_diqueno(self):
assert "diqueno" == "diqueno"
def test_03_faildiquesi(self):
assert "diqueSI" == "diquesi"
def test_04_faildiqueNO(self):
assert "diqueNO" == "diquesi"
|
[
"marcialemilio@gmail.com"
] |
marcialemilio@gmail.com
|
425bbfbbe5ae1399dac988c42a53fa836aa09111
|
cbfddfdf5c7fa8354162efe50b41f84e55aff118
|
/venv/lib/python3.7/site-packages/nltk/tokenize/punkt.py
|
f0dcaca359521808d4344948c5389317ab0fdec1
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
tclerico/SAAC
|
8d2245221dd135aea67c5e079ac7eaf542b25e2f
|
2f52007ae8043096662e76da828a84e87f71091e
|
refs/heads/master
| 2022-12-09T21:56:33.430404
| 2019-02-20T14:23:51
| 2019-02-20T14:23:51
| 153,152,229
| 3
| 0
|
MIT
| 2022-09-16T17:52:47
| 2018-10-15T17:13:29
|
Python
|
UTF-8
|
Python
| false
| false
| 62,162
|
py
|
# Natural Language Toolkit: Punkt sentence tokenizer
#
# Copyright (C) 2001-2018 NLTK Project
# Algorithm: Kiss & Strunk (2006)
# Author: Willy <willy@csse.unimelb.edu.au> (original Python port)
# Steven Bird <stevenbird1@gmail.com> (additions)
# Edward Loper <edloper@gmail.com> (rewrite)
# Joel Nothman <jnothman@student.usyd.edu.au> (almost rewrite)
# Arthur Darcet <arthur@darcet.fr> (fixes)
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
r"""
Punkt Sentence Tokenizer
This tokenizer divides a text into a list of sentences
by using an unsupervised algorithm to build a model for abbreviation
words, collocations, and words that start sentences. It must be
trained on a large collection of plaintext in the target language
before it can be used.
The NLTK data package includes a pre-trained Punkt tokenizer for
English.
>>> import nltk.data
>>> text = '''
... Punkt knows that the periods in Mr. Smith and Johann S. Bach
... do not mark sentence boundaries. And sometimes sentences
... can start with non-capitalized words. i is a good variable
... name.
... '''
>>> sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
>>> print('\n-----\n'.join(sent_detector.tokenize(text.strip())))
Punkt knows that the periods in Mr. Smith and Johann S. Bach
do not mark sentence boundaries.
-----
And sometimes sentences
can start with non-capitalized words.
-----
i is a good variable
name.
(Note that whitespace from the original text, including newlines, is
retained in the output.)
Punctuation following sentences is also included by default
(from NLTK 3.0 onwards). It can be excluded with the realign_boundaries
flag.
>>> text = '''
... (How does it deal with this parenthesis?) "It should be part of the
... previous sentence." "(And the same with this one.)" ('And this one!')
... "('(And (this)) '?)" [(and this. )]
... '''
>>> print('\n-----\n'.join(
... sent_detector.tokenize(text.strip())))
(How does it deal with this parenthesis?)
-----
"It should be part of the
previous sentence."
-----
"(And the same with this one.)"
-----
('And this one!')
-----
"('(And (this)) '?)"
-----
[(and this. )]
>>> print('\n-----\n'.join(
... sent_detector.tokenize(text.strip(), realign_boundaries=False)))
(How does it deal with this parenthesis?
-----
) "It should be part of the
previous sentence.
-----
" "(And the same with this one.
-----
)" ('And this one!
-----
')
"('(And (this)) '?
-----
)" [(and this.
-----
)]
However, Punkt is designed to learn parameters (a list of abbreviations, etc.)
unsupervised from a corpus similar to the target domain. The pre-packaged models
may therefore be unsuitable: use ``PunktSentenceTokenizer(text)`` to learn
parameters from the given text.
:class:`.PunktTrainer` learns parameters such as a list of abbreviations
(without supervision) from portions of text. Using a ``PunktTrainer`` directly
allows for incremental training and modification of the hyper-parameters used
to decide what is considered an abbreviation, etc.
The algorithm for this tokenizer is described in::
Kiss, Tibor and Strunk, Jan (2006): Unsupervised Multilingual Sentence
Boundary Detection. Computational Linguistics 32: 485-525.
"""
from __future__ import print_function, unicode_literals, division
# TODO: Make orthographic heuristic less susceptible to overtraining
# TODO: Frequent sentence starters optionally exclude always-capitalised words
# FIXME: Problem with ending string with e.g. '!!!' -> '!! !'
import re
import math
from collections import defaultdict
from six import string_types
from nltk.compat import unicode_repr, python_2_unicode_compatible
from nltk.probability import FreqDist
from nltk.tokenize.api import TokenizerI
######################################################################
# { Orthographic Context Constants
######################################################################
# The following constants are used to describe the orthographic
# contexts in which a word can occur. BEG=beginning, MID=middle,
# UNK=unknown, UC=uppercase, LC=lowercase, NC=no case.
_ORTHO_BEG_UC = 1 << 1
"""Orthographic context: beginning of a sentence with upper case."""
_ORTHO_MID_UC = 1 << 2
"""Orthographic context: middle of a sentence with upper case."""
_ORTHO_UNK_UC = 1 << 3
"""Orthographic context: unknown position in a sentence with upper case."""
_ORTHO_BEG_LC = 1 << 4
"""Orthographic context: beginning of a sentence with lower case."""
_ORTHO_MID_LC = 1 << 5
"""Orthographic context: middle of a sentence with lower case."""
_ORTHO_UNK_LC = 1 << 6
"""Orthographic context: unknown position in a sentence with lower case."""
_ORTHO_UC = _ORTHO_BEG_UC + _ORTHO_MID_UC + _ORTHO_UNK_UC
"""Orthographic context: occurs with upper case."""
_ORTHO_LC = _ORTHO_BEG_LC + _ORTHO_MID_LC + _ORTHO_UNK_LC
"""Orthographic context: occurs with lower case."""
_ORTHO_MAP = {
('initial', 'upper'): _ORTHO_BEG_UC,
('internal', 'upper'): _ORTHO_MID_UC,
('unknown', 'upper'): _ORTHO_UNK_UC,
('initial', 'lower'): _ORTHO_BEG_LC,
('internal', 'lower'): _ORTHO_MID_LC,
('unknown', 'lower'): _ORTHO_UNK_LC,
}
"""A map from context position and first-letter case to the
appropriate orthographic context flag."""
# } (end orthographic context constants)
######################################################################
######################################################################
# { Decision reasons for debugging
######################################################################
REASON_DEFAULT_DECISION = 'default decision'
REASON_KNOWN_COLLOCATION = 'known collocation (both words)'
REASON_ABBR_WITH_ORTHOGRAPHIC_HEURISTIC = 'abbreviation + orthographic heuristic'
REASON_ABBR_WITH_SENTENCE_STARTER = 'abbreviation + frequent sentence starter'
REASON_INITIAL_WITH_ORTHOGRAPHIC_HEURISTIC = 'initial + orthographic heuristic'
REASON_NUMBER_WITH_ORTHOGRAPHIC_HEURISTIC = 'initial + orthographic heuristic'
REASON_INITIAL_WITH_SPECIAL_ORTHOGRAPHIC_HEURISTIC = 'initial + special orthographic heuristic'
# } (end decision reasons for debugging)
######################################################################
######################################################################
# { Language-dependent variables
######################################################################
class PunktLanguageVars(object):
"""
Stores variables, mostly regular expressions, which may be
language-dependent for correct application of the algorithm.
An extension of this class may modify its properties to suit
a language other than English; an instance can then be passed
as an argument to PunktSentenceTokenizer and PunktTrainer
constructors.
"""
__slots__ = ('_re_period_context', '_re_word_tokenizer')
def __getstate__(self):
# All modifications to the class are performed by inheritance.
# Non-default parameters to be pickled must be defined in the inherited
# class.
return 1
def __setstate__(self, state):
return 1
sent_end_chars = ('.', '?', '!')
"""Characters which are candidates for sentence boundaries"""
@property
def _re_sent_end_chars(self):
return '[%s]' % re.escape(''.join(self.sent_end_chars))
internal_punctuation = ',:;' # might want to extend this..
"""sentence internal punctuation, which indicates an abbreviation if
preceded by a period-final token."""
re_boundary_realignment = re.compile(r'["\')\]}]+?(?:\s+|(?=--)|$)',
re.MULTILINE)
"""Used to realign punctuation that should be included in a sentence
although it follows the period (or ?, !)."""
_re_word_start = r"[^\(\"\`{\[:;&\#\*@\)}\]\-,]"
"""Excludes some characters from starting word tokens"""
_re_non_word_chars = r"(?:[?!)\";}\]\*:@\'\({\[])"
"""Characters that cannot appear within words"""
_re_multi_char_punct = r"(?:\-{2,}|\.{2,}|(?:\.\s){2,}\.)"
"""Hyphen and ellipsis are multi-character punctuation"""
_word_tokenize_fmt = r'''(
%(MultiChar)s
|
(?=%(WordStart)s)\S+? # Accept word characters until end is found
(?= # Sequences marking a word's end
\s| # White-space
$| # End-of-string
%(NonWord)s|%(MultiChar)s| # Punctuation
,(?=$|\s|%(NonWord)s|%(MultiChar)s) # Comma if at end of word
)
|
\S
)'''
"""Format of a regular expression to split punctuation from words,
excluding period."""
def _word_tokenizer_re(self):
"""Compiles and returns a regular expression for word tokenization"""
try:
return self._re_word_tokenizer
except AttributeError:
self._re_word_tokenizer = re.compile(
self._word_tokenize_fmt %
{
'NonWord': self._re_non_word_chars,
'MultiChar': self._re_multi_char_punct,
'WordStart': self._re_word_start,
},
re.UNICODE | re.VERBOSE
)
return self._re_word_tokenizer
def word_tokenize(self, s):
"""Tokenize a string to split off punctuation other than periods"""
return self._word_tokenizer_re().findall(s)
_period_context_fmt = r"""
\S* # some word material
%(SentEndChars)s # a potential sentence ending
(?=(?P<after_tok>
%(NonWord)s # either other punctuation
|
\s+(?P<next_tok>\S+) # or whitespace and some other token
))"""
"""Format of a regular expression to find contexts including possible
sentence boundaries. Matches token which the possible sentence boundary
ends, and matches the following token within a lookahead expression."""
def period_context_re(self):
"""Compiles and returns a regular expression to find contexts
including possible sentence boundaries."""
try:
return self._re_period_context
except:
self._re_period_context = re.compile(
self._period_context_fmt %
{
'NonWord': self._re_non_word_chars,
'SentEndChars': self._re_sent_end_chars,
},
re.UNICODE | re.VERBOSE)
return self._re_period_context
_re_non_punct = re.compile(r'[^\W\d]', re.UNICODE)
"""Matches token types that are not merely punctuation. (Types for
numeric tokens are changed to ##number## and hence contain alpha.)"""
# }
######################################################################
# ////////////////////////////////////////////////////////////
# { Helper Functions
# ////////////////////////////////////////////////////////////
def _pair_iter(it):
"""
Yields pairs of tokens from the given iterator such that each input
token will appear as the first element in a yielded tuple. The last
pair will have None as its second element.
"""
it = iter(it)
prev = next(it)
for el in it:
yield (prev, el)
prev = el
yield (prev, None)
######################################################################
# { Punkt Parameters
######################################################################
class PunktParameters(object):
"""Stores data used to perform sentence boundary detection with Punkt."""
def __init__(self):
self.abbrev_types = set()
"""A set of word types for known abbreviations."""
self.collocations = set()
"""A set of word type tuples for known common collocations
where the first word ends in a period. E.g., ('S.', 'Bach')
is a common collocation in a text that discusses 'Johann
S. Bach'. These count as negative evidence for sentence
boundaries."""
self.sent_starters = set()
"""A set of word types for words that often appear at the
beginning of sentences."""
self.ortho_context = defaultdict(int)
"""A dictionary mapping word types to the set of orthographic
contexts that word type appears in. Contexts are represented
by adding orthographic context flags: ..."""
def clear_abbrevs(self):
self.abbrev_types = set()
def clear_collocations(self):
self.collocations = set()
def clear_sent_starters(self):
self.sent_starters = set()
def clear_ortho_context(self):
self.ortho_context = defaultdict(int)
def add_ortho_context(self, typ, flag):
self.ortho_context[typ] |= flag
def _debug_ortho_context(self, typ):
c = self.ortho_context[typ]
if c & _ORTHO_BEG_UC:
yield 'BEG-UC'
if c & _ORTHO_MID_UC:
yield 'MID-UC'
if c & _ORTHO_UNK_UC:
yield 'UNK-UC'
if c & _ORTHO_BEG_LC:
yield 'BEG-LC'
if c & _ORTHO_MID_LC:
yield 'MID-LC'
if c & _ORTHO_UNK_LC:
yield 'UNK-LC'
######################################################################
# { PunktToken
######################################################################
@python_2_unicode_compatible
class PunktToken(object):
"""Stores a token of text with annotations produced during
sentence boundary detection."""
_properties = [
'parastart', 'linestart',
'sentbreak', 'abbr', 'ellipsis'
]
__slots__ = ['tok', 'type', 'period_final'] + _properties
def __init__(self, tok, **params):
self.tok = tok
self.type = self._get_type(tok)
self.period_final = tok.endswith('.')
for p in self._properties:
setattr(self, p, None)
for k in params:
setattr(self, k, params[k])
# ////////////////////////////////////////////////////////////
# { Regular expressions for properties
# ////////////////////////////////////////////////////////////
# Note: [A-Za-z] is approximated by [^\W\d] in the general case.
_RE_ELLIPSIS = re.compile(r'\.\.+$')
_RE_NUMERIC = re.compile(r'^-?[\.,]?\d[\d,\.-]*\.?$')
_RE_INITIAL = re.compile(r'[^\W\d]\.$', re.UNICODE)
_RE_ALPHA = re.compile(r'[^\W\d]+$', re.UNICODE)
# ////////////////////////////////////////////////////////////
# { Derived properties
# ////////////////////////////////////////////////////////////
def _get_type(self, tok):
"""Returns a case-normalized representation of the token."""
return self._RE_NUMERIC.sub('##number##', tok.lower())
@property
def type_no_period(self):
"""
The type with its final period removed if it has one.
"""
if len(self.type) > 1 and self.type[-1] == '.':
return self.type[:-1]
return self.type
@property
def type_no_sentperiod(self):
"""
The type with its final period removed if it is marked as a
sentence break.
"""
if self.sentbreak:
return self.type_no_period
return self.type
@property
def first_upper(self):
"""True if the token's first character is uppercase."""
return self.tok[0].isupper()
@property
def first_lower(self):
"""True if the token's first character is lowercase."""
return self.tok[0].islower()
@property
def first_case(self):
if self.first_lower:
return 'lower'
elif self.first_upper:
return 'upper'
return 'none'
@property
def is_ellipsis(self):
"""True if the token text is that of an ellipsis."""
return self._RE_ELLIPSIS.match(self.tok)
@property
def is_number(self):
"""True if the token text is that of a number."""
return self.type.startswith('##number##')
@property
def is_initial(self):
"""True if the token text is that of an initial."""
return self._RE_INITIAL.match(self.tok)
@property
def is_alpha(self):
"""True if the token text is all alphabetic."""
return self._RE_ALPHA.match(self.tok)
@property
def is_non_punct(self):
"""True if the token is either a number or is alphabetic."""
return _re_non_punct.search(self.type)
# ////////////////////////////////////////////////////////////
# { String representation
# ////////////////////////////////////////////////////////////
def __repr__(self):
"""
A string representation of the token that can reproduce it
with eval(), which lists all the token's non-default
annotations.
"""
typestr = (' type=%s,' % unicode_repr(self.type)
if self.type != self.tok else '')
propvals = ', '.join(
'%s=%s' % (p, unicode_repr(getattr(self, p)))
for p in self._properties
if getattr(self, p)
)
return '%s(%s,%s %s)' % (self.__class__.__name__,
unicode_repr(self.tok), typestr, propvals)
def __str__(self):
"""
A string representation akin to that used by Kiss and Strunk.
"""
res = self.tok
if self.abbr:
res += '<A>'
if self.ellipsis:
res += '<E>'
if self.sentbreak:
res += '<S>'
return res
######################################################################
# { Punkt base class
######################################################################
class PunktBaseClass(object):
"""
Includes common components of PunktTrainer and PunktSentenceTokenizer.
"""
def __init__(self, lang_vars=PunktLanguageVars(), token_cls=PunktToken,
params=None):
if params is None:
params = PunktParameters()
self._params = params
self._lang_vars = lang_vars
self._Token = token_cls
"""The collection of parameters that determines the behavior
of the punkt tokenizer."""
# ////////////////////////////////////////////////////////////
# { Word tokenization
# ////////////////////////////////////////////////////////////
def _tokenize_words(self, plaintext):
"""
Divide the given text into tokens, using the punkt word
segmentation regular expression, and generate the resulting list
of tokens augmented as three-tuples with two boolean values for whether
the given token occurs at the start of a paragraph or a new line,
respectively.
"""
parastart = False
for line in plaintext.split('\n'):
if line.strip():
line_toks = iter(self._lang_vars.word_tokenize(line))
yield self._Token(next(line_toks),
parastart=parastart, linestart=True)
parastart = False
for t in line_toks:
yield self._Token(t)
else:
parastart = True
# ////////////////////////////////////////////////////////////
# { Annotation Procedures
# ////////////////////////////////////////////////////////////
def _annotate_first_pass(self, tokens):
"""
Perform the first pass of annotation, which makes decisions
based purely based on the word type of each word:
- '?', '!', and '.' are marked as sentence breaks.
- sequences of two or more periods are marked as ellipsis.
- any word ending in '.' that's a known abbreviation is
marked as an abbreviation.
- any other word ending in '.' is marked as a sentence break.
Return these annotations as a tuple of three sets:
- sentbreak_toks: The indices of all sentence breaks.
- abbrev_toks: The indices of all abbreviations.
- ellipsis_toks: The indices of all ellipsis marks.
"""
for aug_tok in tokens:
self._first_pass_annotation(aug_tok)
yield aug_tok
def _first_pass_annotation(self, aug_tok):
"""
Performs type-based annotation on a single token.
"""
tok = aug_tok.tok
if tok in self._lang_vars.sent_end_chars:
aug_tok.sentbreak = True
elif aug_tok.is_ellipsis:
aug_tok.ellipsis = True
elif aug_tok.period_final and not tok.endswith('..'):
if (tok[:-1].lower() in self._params.abbrev_types or
tok[:-1].lower().split('-')[-1] in self._params.abbrev_types):
aug_tok.abbr = True
else:
aug_tok.sentbreak = True
return
######################################################################
# { Punkt Trainer
######################################################################
class PunktTrainer(PunktBaseClass):
"""Learns parameters used in Punkt sentence boundary detection."""
def __init__(self, train_text=None, verbose=False,
lang_vars=PunktLanguageVars(), token_cls=PunktToken):
PunktBaseClass.__init__(self, lang_vars=lang_vars,
token_cls=token_cls)
self._type_fdist = FreqDist()
"""A frequency distribution giving the frequency of each
case-normalized token type in the training data."""
self._num_period_toks = 0
"""The number of words ending in period in the training data."""
self._collocation_fdist = FreqDist()
"""A frequency distribution giving the frequency of all
bigrams in the training data where the first word ends in a
period. Bigrams are encoded as tuples of word types.
Especially common collocations are extracted from this
frequency distribution, and stored in
``_params``.``collocations <PunktParameters.collocations>``."""
self._sent_starter_fdist = FreqDist()
"""A frequency distribution giving the frequency of all words
that occur at the training data at the beginning of a sentence
(after the first pass of annotation). Especially common
sentence starters are extracted from this frequency
distribution, and stored in ``_params.sent_starters``.
"""
self._sentbreak_count = 0
"""The total number of sentence breaks identified in training, used for
calculating the frequent sentence starter heuristic."""
self._finalized = True
"""A flag as to whether the training has been finalized by finding
collocations and sentence starters, or whether finalize_training()
still needs to be called."""
if train_text:
self.train(train_text, verbose, finalize=True)
def get_params(self):
"""
Calculates and returns parameters for sentence boundary detection as
derived from training."""
if not self._finalized:
self.finalize_training()
return self._params
# ////////////////////////////////////////////////////////////
# { Customization Variables
# ////////////////////////////////////////////////////////////
ABBREV = 0.3
"""cut-off value whether a 'token' is an abbreviation"""
IGNORE_ABBREV_PENALTY = False
"""allows the disabling of the abbreviation penalty heuristic, which
exponentially disadvantages words that are found at times without a
final period."""
ABBREV_BACKOFF = 5
"""upper cut-off for Mikheev's(2002) abbreviation detection algorithm"""
COLLOCATION = 7.88
"""minimal log-likelihood value that two tokens need to be considered
as a collocation"""
SENT_STARTER = 30
"""minimal log-likelihood value that a token requires to be considered
as a frequent sentence starter"""
INCLUDE_ALL_COLLOCS = False
"""this includes as potential collocations all word pairs where the first
word ends in a period. It may be useful in corpora where there is a lot
of variation that makes abbreviations like Mr difficult to identify."""
INCLUDE_ABBREV_COLLOCS = False
"""this includes as potential collocations all word pairs where the first
word is an abbreviation. Such collocations override the orthographic
heuristic, but not the sentence starter heuristic. This is overridden by
INCLUDE_ALL_COLLOCS, and if both are false, only collocations with initials
and ordinals are considered."""
""""""
MIN_COLLOC_FREQ = 1
"""this sets a minimum bound on the number of times a bigram needs to
appear before it can be considered a collocation, in addition to log
likelihood statistics. This is useful when INCLUDE_ALL_COLLOCS is True."""
# ////////////////////////////////////////////////////////////
# { Training..
# ////////////////////////////////////////////////////////////
def train(self, text, verbose=False, finalize=True):
"""
Collects training data from a given text. If finalize is True, it
will determine all the parameters for sentence boundary detection. If
not, this will be delayed until get_params() or finalize_training() is
called. If verbose is True, abbreviations found will be listed.
"""
# Break the text into tokens; record which token indices correspond to
# line starts and paragraph starts; and determine their types.
self._train_tokens(self._tokenize_words(text), verbose)
if finalize:
self.finalize_training(verbose)
def train_tokens(self, tokens, verbose=False, finalize=True):
"""
Collects training data from a given list of tokens.
"""
self._train_tokens((self._Token(t) for t in tokens), verbose)
if finalize:
self.finalize_training(verbose)
def _train_tokens(self, tokens, verbose):
self._finalized = False
# Ensure tokens are a list
tokens = list(tokens)
# Find the frequency of each case-normalized type. (Don't
# strip off final periods.) Also keep track of the number of
# tokens that end in periods.
for aug_tok in tokens:
self._type_fdist[aug_tok.type] += 1
if aug_tok.period_final:
self._num_period_toks += 1
# Look for new abbreviations, and for types that no longer are
unique_types = self._unique_types(tokens)
for abbr, score, is_add in self._reclassify_abbrev_types(unique_types):
if score >= self.ABBREV:
if is_add:
self._params.abbrev_types.add(abbr)
if verbose:
print((' Abbreviation: [%6.4f] %s' %
(score, abbr)))
else:
if not is_add:
self._params.abbrev_types.remove(abbr)
if verbose:
print((' Removed abbreviation: [%6.4f] %s' %
(score, abbr)))
# Make a preliminary pass through the document, marking likely
# sentence breaks, abbreviations, and ellipsis tokens.
tokens = list(self._annotate_first_pass(tokens))
# Check what contexts each word type can appear in, given the
# case of its first letter.
self._get_orthography_data(tokens)
# We need total number of sentence breaks to find sentence starters
self._sentbreak_count += self._get_sentbreak_count(tokens)
# The remaining heuristics relate to pairs of tokens where the first
# ends in a period.
for aug_tok1, aug_tok2 in _pair_iter(tokens):
if not aug_tok1.period_final or not aug_tok2:
continue
# Is the first token a rare abbreviation?
if self._is_rare_abbrev_type(aug_tok1, aug_tok2):
self._params.abbrev_types.add(aug_tok1.type_no_period)
if verbose:
print((' Rare Abbrev: %s' % aug_tok1.type))
# Does second token have a high likelihood of starting a sentence?
if self._is_potential_sent_starter(aug_tok2, aug_tok1):
self._sent_starter_fdist[aug_tok2.type] += 1
# Is this bigram a potential collocation?
if self._is_potential_collocation(aug_tok1, aug_tok2):
self._collocation_fdist[
(aug_tok1.type_no_period, aug_tok2.type_no_sentperiod)] += 1
def _unique_types(self, tokens):
return set(aug_tok.type for aug_tok in tokens)
def finalize_training(self, verbose=False):
"""
Uses data that has been gathered in training to determine likely
collocations and sentence starters.
"""
self._params.clear_sent_starters()
for typ, ll in self._find_sent_starters():
self._params.sent_starters.add(typ)
if verbose:
print((' Sent Starter: [%6.4f] %r' % (ll, typ)))
self._params.clear_collocations()
for (typ1, typ2), ll in self._find_collocations():
self._params.collocations.add((typ1, typ2))
if verbose:
print((' Collocation: [%6.4f] %r+%r' %
(ll, typ1, typ2)))
self._finalized = True
# ////////////////////////////////////////////////////////////
# { Overhead reduction
# ////////////////////////////////////////////////////////////
def freq_threshold(self, ortho_thresh=2, type_thresh=2, colloc_thres=2,
sentstart_thresh=2):
"""
Allows memory use to be reduced after much training by removing data
about rare tokens that are unlikely to have a statistical effect with
further training. Entries occurring above the given thresholds will be
retained.
"""
if ortho_thresh > 1:
old_oc = self._params.ortho_context
self._params.clear_ortho_context()
for tok in self._type_fdist:
count = self._type_fdist[tok]
if count >= ortho_thresh:
self._params.ortho_context[tok] = old_oc[tok]
self._type_fdist = self._freq_threshold(self._type_fdist, type_thresh)
self._collocation_fdist = self._freq_threshold(
self._collocation_fdist, colloc_thres)
self._sent_starter_fdist = self._freq_threshold(
self._sent_starter_fdist, sentstart_thresh)
def _freq_threshold(self, fdist, threshold):
"""
Returns a FreqDist containing only data with counts below a given
threshold, as well as a mapping (None -> count_removed).
"""
# We assume that there is more data below the threshold than above it
# and so create a new FreqDist rather than working in place.
res = FreqDist()
num_removed = 0
for tok in fdist:
count = fdist[tok]
if count < threshold:
num_removed += 1
else:
res[tok] += count
res[None] += num_removed
return res
# ////////////////////////////////////////////////////////////
# { Orthographic data
# ////////////////////////////////////////////////////////////
def _get_orthography_data(self, tokens):
"""
Collect information about whether each token type occurs
with different case patterns (i) overall, (ii) at
sentence-initial positions, and (iii) at sentence-internal
positions.
"""
# 'initial' or 'internal' or 'unknown'
context = 'internal'
tokens = list(tokens)
for aug_tok in tokens:
# If we encounter a paragraph break, then it's a good sign
# that it's a sentence break. But err on the side of
# caution (by not positing a sentence break) if we just
# saw an abbreviation.
if aug_tok.parastart and context != 'unknown':
context = 'initial'
# If we're at the beginning of a line, then we can't decide
# between 'internal' and 'initial'.
if aug_tok.linestart and context == 'internal':
context = 'unknown'
# Find the case-normalized type of the token. If it's a
# sentence-final token, strip off the period.
typ = aug_tok.type_no_sentperiod
# Update the orthographic context table.
flag = _ORTHO_MAP.get((context, aug_tok.first_case), 0)
if flag:
self._params.add_ortho_context(typ, flag)
# Decide whether the next word is at a sentence boundary.
if aug_tok.sentbreak:
if not (aug_tok.is_number or aug_tok.is_initial):
context = 'initial'
else:
context = 'unknown'
elif aug_tok.ellipsis or aug_tok.abbr:
context = 'unknown'
else:
context = 'internal'
# ////////////////////////////////////////////////////////////
# { Abbreviations
# ////////////////////////////////////////////////////////////
def _reclassify_abbrev_types(self, types):
"""
(Re)classifies each given token if
- it is period-final and not a known abbreviation; or
- it is not period-final and is otherwise a known abbreviation
by checking whether its previous classification still holds according
to the heuristics of section 3.
Yields triples (abbr, score, is_add) where abbr is the type in question,
score is its log-likelihood with penalties applied, and is_add specifies
whether the present type is a candidate for inclusion or exclusion as an
abbreviation, such that:
- (is_add and score >= 0.3) suggests a new abbreviation; and
- (not is_add and score < 0.3) suggests excluding an abbreviation.
"""
# (While one could recalculate abbreviations from all .-final tokens at
# every iteration, in cases requiring efficiency, the number of tokens
# in the present training document will be much less.)
for typ in types:
# Check some basic conditions, to rule out words that are
# clearly not abbrev_types.
if not _re_non_punct.search(typ) or typ == '##number##':
continue
if typ.endswith('.'):
if typ in self._params.abbrev_types:
continue
typ = typ[:-1]
is_add = True
else:
if typ not in self._params.abbrev_types:
continue
is_add = False
# Count how many periods & nonperiods are in the
# candidate.
num_periods = typ.count('.') + 1
num_nonperiods = len(typ) - num_periods + 1
# Let <a> be the candidate without the period, and <b>
# be the period. Find a log likelihood ratio that
# indicates whether <ab> occurs as a single unit (high
# value of ll), or as two independent units <a> and
# <b> (low value of ll).
count_with_period = self._type_fdist[typ + '.']
count_without_period = self._type_fdist[typ]
ll = self._dunning_log_likelihood(
count_with_period + count_without_period,
self._num_period_toks, count_with_period,
self._type_fdist.N())
# Apply three scaling factors to 'tweak' the basic log
# likelihood ratio:
# F_length: long word -> less likely to be an abbrev
# F_periods: more periods -> more likely to be an abbrev
# F_penalty: penalize occurrences w/o a period
f_length = math.exp(-num_nonperiods)
f_periods = num_periods
f_penalty = (int(self.IGNORE_ABBREV_PENALTY)
or math.pow(num_nonperiods, -count_without_period))
score = ll * f_length * f_periods * f_penalty
yield typ, score, is_add
def find_abbrev_types(self):
"""
Recalculates abbreviations given type frequencies, despite no prior
determination of abbreviations.
This fails to include abbreviations otherwise found as "rare".
"""
self._params.clear_abbrevs()
tokens = (typ for typ in self._type_fdist if typ and typ.endswith('.'))
for abbr, score, is_add in self._reclassify_abbrev_types(tokens):
if score >= self.ABBREV:
self._params.abbrev_types.add(abbr)
# This function combines the work done by the original code's
# functions `count_orthography_context`, `get_orthography_count`,
# and `get_rare_abbreviations`.
def _is_rare_abbrev_type(self, cur_tok, next_tok):
"""
A word type is counted as a rare abbreviation if...
- it's not already marked as an abbreviation
- it occurs fewer than ABBREV_BACKOFF times
- either it is followed by a sentence-internal punctuation
mark, *or* it is followed by a lower-case word that
sometimes appears with upper case, but never occurs with
lower case at the beginning of sentences.
"""
if cur_tok.abbr or not cur_tok.sentbreak:
return False
# Find the case-normalized type of the token. If it's
# a sentence-final token, strip off the period.
typ = cur_tok.type_no_sentperiod
# Proceed only if the type hasn't been categorized as an
# abbreviation already, and is sufficiently rare...
count = self._type_fdist[typ] + self._type_fdist[typ[:-1]]
if (typ in self._params.abbrev_types or count >= self.ABBREV_BACKOFF):
return False
# Record this token as an abbreviation if the next
# token is a sentence-internal punctuation mark.
# [XX] :1 or check the whole thing??
if next_tok.tok[:1] in self._lang_vars.internal_punctuation:
return True
# Record this type as an abbreviation if the next
# token... (i) starts with a lower case letter,
# (ii) sometimes occurs with an uppercase letter,
# and (iii) never occus with an uppercase letter
# sentence-internally.
# [xx] should the check for (ii) be modified??
elif next_tok.first_lower:
typ2 = next_tok.type_no_sentperiod
typ2ortho_context = self._params.ortho_context[typ2]
if ((typ2ortho_context & _ORTHO_BEG_UC) and
not (typ2ortho_context & _ORTHO_MID_UC)):
return True
# ////////////////////////////////////////////////////////////
# { Log Likelihoods
# ////////////////////////////////////////////////////////////
# helper for _reclassify_abbrev_types:
@staticmethod
def _dunning_log_likelihood(count_a, count_b, count_ab, N):
"""
A function that calculates the modified Dunning log-likelihood
ratio scores for abbreviation candidates. The details of how
this works is available in the paper.
"""
p1 = count_b / N
p2 = 0.99
null_hypo = (count_ab * math.log(p1) +
(count_a - count_ab) * math.log(1.0 - p1))
alt_hypo = (count_ab * math.log(p2) +
(count_a - count_ab) * math.log(1.0 - p2))
likelihood = null_hypo - alt_hypo
return (-2.0 * likelihood)
@staticmethod
def _col_log_likelihood(count_a, count_b, count_ab, N):
"""
A function that will just compute log-likelihood estimate, in
the original paper it's described in algorithm 6 and 7.
This *should* be the original Dunning log-likelihood values,
unlike the previous log_l function where it used modified
Dunning log-likelihood values
"""
p = count_b / N
p1 = count_ab / count_a
try:
p2 = (count_b - count_ab) / (N - count_a)
except ZeroDivisionError as e:
p2 = 1
try:
summand1 = (count_ab * math.log(p) +
(count_a - count_ab) * math.log(1.0 - p))
except ValueError as e:
summand1 = 0
try:
summand2 = ((count_b - count_ab) * math.log(p) +
(N - count_a - count_b + count_ab) * math.log(1.0 - p))
except ValueError as e:
summand2 = 0
if count_a == count_ab or p1 <= 0 or p1 >= 1:
summand3 = 0
else:
summand3 = (count_ab * math.log(p1) +
(count_a - count_ab) * math.log(1.0 - p1))
if count_b == count_ab or p2 <= 0 or p2 >= 1:
summand4 = 0
else:
summand4 = ((count_b - count_ab) * math.log(p2) +
(N - count_a - count_b + count_ab) * math.log(1.0 - p2))
likelihood = summand1 + summand2 - summand3 - summand4
return (-2.0 * likelihood)
# ////////////////////////////////////////////////////////////
# { Collocation Finder
# ////////////////////////////////////////////////////////////
def _is_potential_collocation(self, aug_tok1, aug_tok2):
"""
Returns True if the pair of tokens may form a collocation given
log-likelihood statistics.
"""
return ((self.INCLUDE_ALL_COLLOCS or
(self.INCLUDE_ABBREV_COLLOCS and aug_tok1.abbr) or
(aug_tok1.sentbreak and
(aug_tok1.is_number or aug_tok1.is_initial)))
and aug_tok1.is_non_punct
and aug_tok2.is_non_punct)
def _find_collocations(self):
"""
Generates likely collocations and their log-likelihood.
"""
for types in self._collocation_fdist:
try:
typ1, typ2 = types
except TypeError:
# types may be None after calling freq_threshold()
continue
if typ2 in self._params.sent_starters:
continue
col_count = self._collocation_fdist[types]
typ1_count = self._type_fdist[typ1] + self._type_fdist[typ1 + '.']
typ2_count = self._type_fdist[typ2] + self._type_fdist[typ2 + '.']
if (typ1_count > 1 and typ2_count > 1
and self.MIN_COLLOC_FREQ <
col_count <= min(typ1_count, typ2_count)):
ll = self._col_log_likelihood(typ1_count, typ2_count,
col_count, self._type_fdist.N())
# Filter out the not-so-collocative
if (ll >= self.COLLOCATION and
(self._type_fdist.N() / typ1_count >
typ2_count / col_count)):
yield (typ1, typ2), ll
# ////////////////////////////////////////////////////////////
# { Sentence-Starter Finder
# ////////////////////////////////////////////////////////////
def _is_potential_sent_starter(self, cur_tok, prev_tok):
"""
Returns True given a token and the token that preceds it if it
seems clear that the token is beginning a sentence.
"""
# If a token (i) is preceded by a sentece break that is
# not a potential ordinal number or initial, and (ii) is
# alphabetic, then it is a a sentence-starter.
return (prev_tok.sentbreak and
not (prev_tok.is_number or prev_tok.is_initial) and
cur_tok.is_alpha)
def _find_sent_starters(self):
"""
Uses collocation heuristics for each candidate token to
determine if it frequently starts sentences.
"""
for typ in self._sent_starter_fdist:
if not typ:
continue
typ_at_break_count = self._sent_starter_fdist[typ]
typ_count = self._type_fdist[typ] + self._type_fdist[typ + '.']
if typ_count < typ_at_break_count:
# needed after freq_threshold
continue
ll = self._col_log_likelihood(self._sentbreak_count, typ_count,
typ_at_break_count,
self._type_fdist.N())
if (ll >= self.SENT_STARTER and
self._type_fdist.N() / self._sentbreak_count >
typ_count / typ_at_break_count):
yield typ, ll
def _get_sentbreak_count(self, tokens):
"""
Returns the number of sentence breaks marked in a given set of
augmented tokens.
"""
return sum(1 for aug_tok in tokens if aug_tok.sentbreak)
######################################################################
# { Punkt Sentence Tokenizer
######################################################################
class PunktSentenceTokenizer(PunktBaseClass, TokenizerI):
"""
A sentence tokenizer which uses an unsupervised algorithm to build
a model for abbreviation words, collocations, and words that start
sentences; and then uses that model to find sentence boundaries.
This approach has been shown to work well for many European
languages.
"""
def __init__(self, train_text=None, verbose=False,
lang_vars=PunktLanguageVars(), token_cls=PunktToken):
"""
train_text can either be the sole training text for this sentence
boundary detector, or can be a PunktParameters object.
"""
PunktBaseClass.__init__(self, lang_vars=lang_vars,
token_cls=token_cls)
if train_text:
self._params = self.train(train_text, verbose)
def train(self, train_text, verbose=False):
"""
Derives parameters from a given training text, or uses the parameters
given. Repeated calls to this method destroy previous parameters. For
incremental training, instantiate a separate PunktTrainer instance.
"""
if not isinstance(train_text, string_types):
return train_text
return PunktTrainer(train_text, lang_vars=self._lang_vars,
token_cls=self._Token).get_params()
# ////////////////////////////////////////////////////////////
# { Tokenization
# ////////////////////////////////////////////////////////////
def tokenize(self, text, realign_boundaries=True):
"""
Given a text, returns a list of the sentences in that text.
"""
return list(self.sentences_from_text(text, realign_boundaries))
def debug_decisions(self, text):
"""
Classifies candidate periods as sentence breaks, yielding a dict for
each that may be used to understand why the decision was made.
See format_debug_decision() to help make this output readable.
"""
for match in self._lang_vars.period_context_re().finditer(text):
decision_text = match.group() + match.group('after_tok')
tokens = self._tokenize_words(decision_text)
tokens = list(self._annotate_first_pass(tokens))
while not tokens[0].period_final:
tokens.pop(0)
yield dict(period_index=match.end() - 1,
text=decision_text,
type1=tokens[0].type,
type2=tokens[1].type,
type1_in_abbrs=bool(tokens[0].abbr),
type1_is_initial=bool(tokens[0].is_initial),
type2_is_sent_starter=tokens[1].type_no_sentperiod in self._params.sent_starters,
type2_ortho_heuristic=self._ortho_heuristic(tokens[1]),
type2_ortho_contexts=set(self._params._debug_ortho_context(tokens[1].type_no_sentperiod)),
collocation=(tokens[0].type_no_sentperiod,
tokens[1].type_no_sentperiod) in self._params.collocations,
reason=self._second_pass_annotation(tokens[0], tokens[1]) or REASON_DEFAULT_DECISION,
break_decision=tokens[0].sentbreak,
)
def span_tokenize(self, text, realign_boundaries=True):
"""
Given a text, generates (start, end) spans of sentences
in the text.
"""
slices = self._slices_from_text(text)
if realign_boundaries:
slices = self._realign_boundaries(text, slices)
for sl in slices:
yield (sl.start, sl.stop)
def sentences_from_text(self, text, realign_boundaries=True):
"""
Given a text, generates the sentences in that text by only
testing candidate sentence breaks. If realign_boundaries is
True, includes in the sentence closing punctuation that
follows the period.
"""
return [text[s:e] for s, e in self.span_tokenize(text, realign_boundaries)]
def _slices_from_text(self, text):
last_break = 0
for match in self._lang_vars.period_context_re().finditer(text):
context = match.group() + match.group('after_tok')
if self.text_contains_sentbreak(context):
yield slice(last_break, match.end())
if match.group('next_tok'):
# next sentence starts after whitespace
last_break = match.start('next_tok')
else:
# next sentence starts at following punctuation
last_break = match.end()
# The last sentence should not contain trailing whitespace.
yield slice(last_break, len(text.rstrip()))
def _realign_boundaries(self, text, slices):
"""
Attempts to realign punctuation that falls after the period but
should otherwise be included in the same sentence.
For example: "(Sent1.) Sent2." will otherwise be split as::
["(Sent1.", ") Sent1."].
This method will produce::
["(Sent1.)", "Sent2."].
"""
realign = 0
for sl1, sl2 in _pair_iter(slices):
sl1 = slice(sl1.start + realign, sl1.stop)
if not sl2:
if text[sl1]:
yield sl1
continue
m = self._lang_vars.re_boundary_realignment.match(text[sl2])
if m:
yield slice(sl1.start, sl2.start + len(m.group(0).rstrip()))
realign = m.end()
else:
realign = 0
if text[sl1]:
yield sl1
def text_contains_sentbreak(self, text):
"""
Returns True if the given text includes a sentence break.
"""
found = False # used to ignore last token
for t in self._annotate_tokens(self._tokenize_words(text)):
if found:
return True
if t.sentbreak:
found = True
return False
def sentences_from_text_legacy(self, text):
"""
Given a text, generates the sentences in that text. Annotates all
tokens, rather than just those with possible sentence breaks. Should
produce the same results as ``sentences_from_text``.
"""
tokens = self._annotate_tokens(self._tokenize_words(text))
return self._build_sentence_list(text, tokens)
def sentences_from_tokens(self, tokens):
"""
Given a sequence of tokens, generates lists of tokens, each list
corresponding to a sentence.
"""
tokens = iter(self._annotate_tokens(self._Token(t) for t in tokens))
sentence = []
for aug_tok in tokens:
sentence.append(aug_tok.tok)
if aug_tok.sentbreak:
yield sentence
sentence = []
if sentence:
yield sentence
def _annotate_tokens(self, tokens):
"""
Given a set of tokens augmented with markers for line-start and
paragraph-start, returns an iterator through those tokens with full
annotation including predicted sentence breaks.
"""
# Make a preliminary pass through the document, marking likely
# sentence breaks, abbreviations, and ellipsis tokens.
tokens = self._annotate_first_pass(tokens)
# Make a second pass through the document, using token context
# information to change our preliminary decisions about where
# sentence breaks, abbreviations, and ellipsis occurs.
tokens = self._annotate_second_pass(tokens)
## [XX] TESTING
# tokens = list(tokens)
# self.dump(tokens)
return tokens
def _build_sentence_list(self, text, tokens):
"""
Given the original text and the list of augmented word tokens,
construct and return a tokenized list of sentence strings.
"""
# Most of the work here is making sure that we put the right
# pieces of whitespace back in all the right places.
# Our position in the source text, used to keep track of which
# whitespace to add:
pos = 0
# A regular expression that finds pieces of whitespace:
WS_REGEXP = re.compile(r'\s*')
sentence = ''
for aug_tok in tokens:
tok = aug_tok.tok
# Find the whitespace before this token, and update pos.
ws = WS_REGEXP.match(text, pos).group()
pos += len(ws)
# Some of the rules used by the punkt word tokenizer
# strip whitespace out of the text, resulting in tokens
# that contain whitespace in the source text. If our
# token doesn't match, see if adding whitespace helps.
# If so, then use the version with whitespace.
if text[pos:pos + len(tok)] != tok:
pat = '\s*'.join(re.escape(c) for c in tok)
m = re.compile(pat).match(text, pos)
if m: tok = m.group()
# Move our position pointer to the end of the token.
assert text[pos:pos + len(tok)] == tok
pos += len(tok)
# Add this token. If it's not at the beginning of the
# sentence, then include any whitespace that separated it
# from the previous token.
if sentence:
sentence += ws
sentence += tok
# If we're at a sentence break, then start a new sentence.
if aug_tok.sentbreak:
yield sentence
sentence = ''
# If the last sentence is emtpy, discard it.
if sentence:
yield sentence
# [XX] TESTING
def dump(self, tokens):
print('writing to /tmp/punkt.new...')
with open('/tmp/punkt.new', 'w') as outfile:
for aug_tok in tokens:
if aug_tok.parastart:
outfile.write('\n\n')
elif aug_tok.linestart:
outfile.write('\n')
else:
outfile.write(' ')
outfile.write(str(aug_tok))
# ////////////////////////////////////////////////////////////
# { Customization Variables
# ////////////////////////////////////////////////////////////
PUNCTUATION = tuple(';:,.!?')
# ////////////////////////////////////////////////////////////
# { Annotation Procedures
# ////////////////////////////////////////////////////////////
def _annotate_second_pass(self, tokens):
"""
Performs a token-based classification (section 4) over the given
tokens, making use of the orthographic heuristic (4.1.1), collocation
heuristic (4.1.2) and frequent sentence starter heuristic (4.1.3).
"""
for t1, t2 in _pair_iter(tokens):
self._second_pass_annotation(t1, t2)
yield t1
def _second_pass_annotation(self, aug_tok1, aug_tok2):
"""
Performs token-based classification over a pair of contiguous tokens
updating the first.
"""
# Is it the last token? We can't do anything then.
if not aug_tok2:
return
tok = aug_tok1.tok
if not aug_tok1.period_final:
# We only care about words ending in periods.
return
typ = aug_tok1.type_no_period
next_tok = aug_tok2.tok
next_typ = aug_tok2.type_no_sentperiod
tok_is_initial = aug_tok1.is_initial
# [4.1.2. Collocation Heuristic] If there's a
# collocation between the word before and after the
# period, then label tok as an abbreviation and NOT
# a sentence break. Note that collocations with
# frequent sentence starters as their second word are
# excluded in training.
if (typ, next_typ) in self._params.collocations:
aug_tok1.sentbreak = False
aug_tok1.abbr = True
return REASON_KNOWN_COLLOCATION
# [4.2. Token-Based Reclassification of Abbreviations] If
# the token is an abbreviation or an ellipsis, then decide
# whether we should *also* classify it as a sentbreak.
if ((aug_tok1.abbr or aug_tok1.ellipsis) and
(not tok_is_initial)):
# [4.1.1. Orthographic Heuristic] Check if there's
# orthogrpahic evidence about whether the next word
# starts a sentence or not.
is_sent_starter = self._ortho_heuristic(aug_tok2)
if is_sent_starter == True:
aug_tok1.sentbreak = True
return REASON_ABBR_WITH_ORTHOGRAPHIC_HEURISTIC
# [4.1.3. Frequent Sentence Starter Heruistic] If the
# next word is capitalized, and is a member of the
# frequent-sentence-starters list, then label tok as a
# sentence break.
if (aug_tok2.first_upper and
next_typ in self._params.sent_starters):
aug_tok1.sentbreak = True
return REASON_ABBR_WITH_SENTENCE_STARTER
# [4.3. Token-Based Detection of Initials and Ordinals]
# Check if any initials or ordinals tokens that are marked
# as sentbreaks should be reclassified as abbreviations.
if tok_is_initial or typ == '##number##':
# [4.1.1. Orthographic Heuristic] Check if there's
# orthogrpahic evidence about whether the next word
# starts a sentence or not.
is_sent_starter = self._ortho_heuristic(aug_tok2)
if is_sent_starter == False:
aug_tok1.sentbreak = False
aug_tok1.abbr = True
if tok_is_initial:
return REASON_INITIAL_WITH_ORTHOGRAPHIC_HEURISTIC
else:
return REASON_NUMBER_WITH_ORTHOGRAPHIC_HEURISTIC
# Special heuristic for initials: if orthogrpahic
# heuristc is unknown, and next word is always
# capitalized, then mark as abbrev (eg: J. Bach).
if (is_sent_starter == 'unknown' and tok_is_initial and
aug_tok2.first_upper and
not (self._params.ortho_context[next_typ] & _ORTHO_LC)):
aug_tok1.sentbreak = False
aug_tok1.abbr = True
return REASON_INITIAL_WITH_SPECIAL_ORTHOGRAPHIC_HEURISTIC
return
def _ortho_heuristic(self, aug_tok):
"""
Decide whether the given token is the first token in a sentence.
"""
# Sentences don't start with punctuation marks:
if aug_tok.tok in self.PUNCTUATION:
return False
ortho_context = self._params.ortho_context[aug_tok.type_no_sentperiod]
# If the word is capitalized, occurs at least once with a
# lower case first letter, and never occurs with an upper case
# first letter sentence-internally, then it's a sentence starter.
if (aug_tok.first_upper and
(ortho_context & _ORTHO_LC) and
not (ortho_context & _ORTHO_MID_UC)):
return True
# If the word is lower case, and either (a) we've seen it used
# with upper case, or (b) we've never seen it used
# sentence-initially with lower case, then it's not a sentence
# starter.
if (aug_tok.first_lower and
((ortho_context & _ORTHO_UC) or
not (ortho_context & _ORTHO_BEG_LC))):
return False
# Otherwise, we're not sure.
return 'unknown'
DEBUG_DECISION_FMT = '''Text: %(text)r (at offset %(period_index)d)
Sentence break? %(break_decision)s (%(reason)s)
Collocation? %(collocation)s
%(type1)r:
known abbreviation: %(type1_in_abbrs)s
is initial: %(type1_is_initial)s
%(type2)r:
known sentence starter: %(type2_is_sent_starter)s
orthographic heuristic suggests is a sentence starter? %(type2_ortho_heuristic)s
orthographic contexts in training: %(type2_ortho_contexts)s
'''
def format_debug_decision(d):
return DEBUG_DECISION_FMT % d
def demo(text, tok_cls=PunktSentenceTokenizer, train_cls=PunktTrainer):
"""Builds a punkt model and applies it to the same text"""
cleanup = lambda s: re.compile(r'(?:\r|^\s+)', re.MULTILINE).sub('', s).replace('\n', ' ')
trainer = train_cls()
trainer.INCLUDE_ALL_COLLOCS = True
trainer.train(text)
sbd = tok_cls(trainer.get_params())
for l in sbd.sentences_from_text(text):
print(cleanup(l))
|
[
"timclerico@gmail.com"
] |
timclerico@gmail.com
|
db702c1abf2eafb2e4c4d19ae35c6078727e3948
|
2638bdaf737fd357cbef480ae04fe60c25c87f82
|
/0x04-python-more_data_structures/10-best_score.py
|
3a0339dd6d77d6bccfeccfc74f80e583d8d1f3c4
|
[] |
no_license
|
peytonbrsmith/holbertonschool-higher_level_programming
|
394993ba6f899768cd5e8b7a4beec31f9aba97a5
|
ac87fcb3527f73cc5c5d8214406edb2c6d47e1c7
|
refs/heads/master
| 2023-04-19T16:25:19.725354
| 2021-05-12T20:19:02
| 2021-05-12T20:19:02
| 319,422,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
#!/usr/bin/python3
def best_score(a_dictionary):
if a_dictionary is None:
return (None)
first = True
best = None
for key in a_dictionary:
if first:
prev = a_dictionary[key]
best = key
first = False
if a_dictionary[key] > prev:
prev = a_dictionary[key]
best = key
return (best)
|
[
"peytonbrsmith@gmail.com"
] |
peytonbrsmith@gmail.com
|
8038cea1fdf0b24c8720840e9409fe133a40824e
|
710f60cb392c18345af3861690dd8a47b469bb51
|
/booksapp/serializers.py
|
92ceca6ad775205069a8db0d4757bf5b856ba86c
|
[] |
no_license
|
mkazber/books
|
1ba6817f6509f76863a68a060ffcdeada2d63580
|
be0355ccff699bd367618aa0bff43e89e21b4965
|
refs/heads/main
| 2023-07-26T08:08:45.421636
| 2021-08-25T20:48:41
| 2021-08-25T20:48:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,391
|
py
|
# from rest_framework import serializers
# from django.contrib import admin
# from .models import Book, BookType, BookIsbn, Author, Rating, Publishing
#
# class BookTypeSerializer(serializers.ModelSerializer):
# class Meta:
# model = BookType
# fields = ['id', 'name']
#
# class BookIsbnSerializer(serializers.ModelSerializer):
# class Meta:
# model = BookIsbn
# fields = ['id', 'isbn_10', 'isbn_13']
#
# class AuthorSerializer(serializers.ModelSerializer):
# class Meta:
# model = Author
# fields = ['id', 'name', 'surname']
#
# class PublishingSerializer(serializers.ModelSerializer):
# class Meta:
# model = Publishing
# fields = ['id', 'name']
#
# class BookSerializer(serializers.ModelSerializer):
# type = BookTypeSerializer(many=True)
# isbn = BookIsbnSerializer(many=False)
# authors = AuthorSerializer(many=True)
# publishing = PublishingSerializer(many=False)
# class Meta:
# model = Book
# fields = ['id','title','authors','type','publishing','isbn','numberOfPages', 'releaseDate', 'desc','cover','slug']
#
# class BookMiniSerializer(serializers.ModelSerializer):
# class Meta:
# model = Book
# fields = ['id','title']
#
# class RatingSerializer(serializers.ModelSerializer):
# class Meta:
# model = Rating
# fields = ['id']
|
[
"noreply@github.com"
] |
noreply@github.com
|
b0ca8e5f2883d66123cfd050dbb3a54d90ba92d2
|
3bd892608e67f4acc50f00714d03927825f5ca14
|
/mynews/newsfeeder/newsfeeder.py
|
4a2af342522b6bbb124c79c8c7f21fcb5f61b4c1
|
[] |
no_license
|
tobetao/webtest1
|
bca522503dc1fd69b9478ad6cfbf72ed58b7c45f
|
cff60d36b014ab5d882ea20c82b02ba36e034e82
|
refs/heads/master
| 2020-04-06T03:41:58.375014
| 2014-12-10T15:30:35
| 2014-12-10T15:30:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,328
|
py
|
#-*- coding:utf-8 -*-
import urllib, pprint
from bs4 import BeautifulSoup
import urllib2, sys
import cookielib
import feedparser
import codecs
a = codecs.open("iteye.txt", "w", "utf-8")
# pretent to be a browser: firefox 18.0
header_data = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:18.0) Gecko/20100101 Firefox/18.0',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#'Accept-Language':'en-gb,zh-cn;q=0.8,en-us;q=0.5,en;q=0.3',
'Connection':'keep-alive'}
def GetSource(url):
# enable cookie
cookie = urllib2.HTTPCookieProcessor(cookielib.CookieJar())
opener = urllib2.build_opener(cookie,urllib2.HTTPHandler)
# install opener
urllib2.install_opener(opener)
# pretent to be a browser
request = urllib2.Request(url=url,headers=header_data)
# send the request
content = urllib2.urlopen(request)
if content:
return content.read()
else:
return ''
def getAllTopPythonArticles():
soup = BeautifulSoup(GetSource('http://www.iteye.com/blogs'))
#print(soup.prettify())
toparticleurls=list(set([i['href'] for i in soup.find_all('a') if len(i.contents[0])>10 and '_blank' in str(i) and 'iteye.com/blog/' in str(i)]))
topblogs=list(set([i.split('/blog')[0] for i in toparticleurls]))
topblogRSSes=[i+'/rss' for i in topblogs]
return toparticleurls, topblogs, topblogRSSes
def writeToFile(str):
print str
a.write(str+'\n')
def getArticle(toparticleurls, feeds):
for feed in feeds:
d = feedparser.parse(feed)
#print d['feed']['title'], ' '.join(d.channel.title.split()), ' '.join(d.channel.description.split()), feed
for e in d.entries:
#if e.link in toparticleurls:
print e.keys()
try:
writeToFile(', '.join(map(str, [e.title, '.'.join(e.id.split('/')[-3:]), e.published])))
writeToFile(BeautifulSoup(e['summary_detail']['value']).get_text().replace('\n\n', '\n').replace('\n\n', '\n').replace('\n\n', '\n').replace('\n\n', '\n'))
writeToFile("#"*50)
except: pass
#pprint.pprint(getAllTopPythonArticles())
toparticleurls, topblogs, topblogRSSes=getAllTopPythonArticles()
getArticle(toparticleurls, topblogRSSes)
a.close()
|
[
"Grace"
] |
Grace
|
d0698c08924aafd818b04986d915218db6465545
|
223eea1c29d87d9eb29b5133d08feb00b0dff09d
|
/hotDog/tester.py
|
078b02dedb04d8f5b626a110d62cdf2890c7e83b
|
[] |
no_license
|
hydroguy45/hotDog
|
626914e9090e912fa3d8f842a7813e490956c99c
|
a39e0a496d27707eb4aa47f25b0d7615740e6b13
|
refs/heads/master
| 2021-01-02T09:13:46.159476
| 2018-03-15T18:18:31
| 2018-03-15T18:18:31
| 99,170,313
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,048
|
py
|
import tensorflow as tf
from tflearn.layers.core import input_data, fully_connected, dropout
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.estimator import regression
from tflearn.layers.normalization import local_response_normalization
from tflearn.data_utils import shuffle
import tflearn as tflearn
from PIL import Image
import numpy
with tf.Graph().as_default():
#Input
neuralNet = input_data(shape=[None, 40, 40, 3])
#Convo
neuralNet = conv_2d(neuralNet, 40, 3, activation='relu6', regularizer="L2")
neuralNet = max_pool_2d(neuralNet, 2)
neuralNet = local_response_normalization(neuralNet)
neuralNet = conv_2d(neuralNet, 80, 3, activation='relu6', regularizer="L2")
neuralNet = max_pool_2d(neuralNet, 2)
neuralNet = local_response_normalization(neuralNet)
neuralNet = conv_2d(neuralNet, 160, 3, activation='leaky_relu', regularizer="L2")
neuralNet = max_pool_2d(neuralNet, 2)
neuralNet = local_response_normalization(neuralNet)
#Fully Connected
neuralNet = fully_connected(neuralNet, 80, activation='tanh')
neuralNet = fully_connected(neuralNet, 160, activation='tanh')
neuralNet = dropout(neuralNet, 0.8)
neuralNet = fully_connected(neuralNet, 240, activation='linear')
neuralNet = dropout(neuralNet, 0.8)
#Output
neuralNet = fully_connected(neuralNet, 2, activation='sigmoid')
neuralNet = regression(neuralNet, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy', name='target')
#Model
model = tflearn.DNN(neuralNet, tensorboard_verbose=2)
model.load('myModel.tflearn')
filePath = raw_input("What picture do you want to use:\n")
with Image.open(filePath) as img:
result = model.predict(numpy.asarray(img).reshape([-1,40,40,3]))[0]
if result[0]>result[1]:
print("Cat")
else:
print("Hot Dog")
|
[
"foleychriscpfjr@gmail.com"
] |
foleychriscpfjr@gmail.com
|
179681d70d25f6f5bf47fefd4828c21956e78241
|
68d135307198e316b7c66ef668cd88fc9b558c41
|
/desert_model/parse_file.py
|
c42f180cb79e97301d10501842852f7ce130c3e3
|
[] |
no_license
|
SynergisticDrugCombinationPrediction/DeepSignalingSynergy
|
64c3a14dbabae1145eadac4492842efdb4181d8c
|
1c14818050f1ba75d5fbf96c630deca79dfe7f4e
|
refs/heads/master
| 2023-01-13T08:34:21.017268
| 2020-11-21T13:44:51
| 2020-11-21T13:44:51
| 280,046,571
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,341
|
py
|
import numpy as np
import pandas as pd
from numpy import savetxt
from load_data import LoadData
from sklearn.model_selection import train_test_split
class ParseFile():
def __init__(self, dir_opt):
self.dir_opt = dir_opt
# use map dict to read input from deeplearning
# drug -[drug_map]-> drug_name(drug_i, drug_j)
# celllinename -[celllinemap]-> cellline_name
# -> gene_name -[drug_map][drug_target]-> (RNA, drug_i, drug_j)
# FIND THE DUPLICATE ROWS[Drug A, Drug B, Cell Line Name] THEN AVERAGE SCORE
def input_condense(self):
dir_opt = self.dir_opt
dl_input_df = pd.read_csv('.' + dir_opt + '/data/DeepLearningInput.csv')
dl_input_df = dl_input_df.groupby(['Drug A', 'Drug B', 'Cell Line Name']).agg({'Score':'mean'}).reset_index()
dl_input_df.to_csv('.' + dir_opt + '/mid_data/DeepLearningInput.txt', index = False, header = True)
# REMOVE INPUT ROWS WITH NO MAPPED DRUG NAME (48953 POINTS INPUT)
def input_drug_condense(self):
dir_opt = self.dir_opt
dl_input_df = pd.read_table('.' + dir_opt + '/mid_data/DeepLearningInput.txt', delimiter = ',')
drug_map_dict = ParseFile(dir_opt).drug_map_dict()
deletion_list = []
for row in dl_input_df.itertuples():
if pd.isna(drug_map_dict[row[1]]) or pd.isna(drug_map_dict[row[2]]):
deletion_list.append(row[0])
mid_dl_input_df = dl_input_df.drop(dl_input_df.index[deletion_list]).reset_index(drop = True)
mid_dl_input_df.to_csv('.' + dir_opt + '/mid_data/MidDeepLearningInput.txt', index = False, header = True)
# REMOVE INPUT ROWS WITH NO CORRESPONDING CELLLINE NAME ([, 37355] POINTS INPUT)
def input_cellline_condense(self, RNA_seq_filename):
dir_opt = self.dir_opt
cellline_gene_df = pd.read_csv('.' + dir_opt + '/filtered_data/' + RNA_seq_filename + '.csv')
cellline_name_list = list(cellline_gene_df.columns[2:])
mid_dl_input_df = pd.read_table('.' + dir_opt + '/mid_data/MidDeepLearningInput.txt', delimiter = ',')
cellline_map_dict = ParseFile(dir_opt).cellline_map_dict()
deletion_list = []
for row in mid_dl_input_df.itertuples():
if cellline_map_dict[row[3]] not in cellline_name_list:
deletion_list.append(row[0])
final_dl_input_df = mid_dl_input_df.drop(mid_dl_input_df.index[deletion_list]).reset_index(drop = True)
final_dl_input_df.to_csv('.' + dir_opt + '/mid_data/FinalDeepLearningInput.txt', index = False, header = True)
# REMOVE INPUT ROWS WITH ALL ZEROS ON DRUG TARGET GENE CONNECTION
def input_drug_gene_condense(self, RNA_seq_filename):
dir_opt = self.dir_opt
deletion_list = []
final_dl_input_df = pd.read_table('.' + dir_opt + '/mid_data/FinalDeepLearningInput.txt', delimiter = ',')
drug_map_dict, cellline_map_dict, drug_dict, gene_target_num_dict = LoadData(dir_opt, RNA_seq_filename).pre_load_dict()
target_index_list = gene_target_num_dict.values()
drug_target_matrix = np.load('.' + dir_opt + '/filtered_data/drug_target_matrix.npy')
for row in final_dl_input_df.itertuples():
drug_a = drug_map_dict[row[1]]
drug_b = drug_map_dict[row[2]]
cellline_name = cellline_map_dict[row[3]]
# DRUG_A AND 1130 TARGET GENES
drug_a_target_list = []
drug_index = drug_dict[drug_a]
for target_index in target_index_list:
if target_index == -1 :
effect = 0
else:
effect = drug_target_matrix[drug_index, target_index]
drug_a_target_list.append(effect)
# DRUG_B AND 1130 TARGET GENES
drug_b_target_list = []
drug_index = drug_dict[drug_b]
for target_index in target_index_list:
if target_index == -1 :
effect = 0
else:
effect = drug_target_matrix[drug_index, target_index]
drug_b_target_list.append(effect)
if all([a == 0 for a in drug_a_target_list]) or all([b == 0 for b in drug_b_target_list]):
deletion_list.append(row[0])
zero_final_dl_input_df = final_dl_input_df.drop(final_dl_input_df.index[deletion_list]).reset_index(drop = True)
zero_final_dl_input_df.to_csv('.' + dir_opt + '/mid_data/ZeroFinalDeepLearningInput.txt', index = False, header = True)
print(zero_final_dl_input_df)
# CALCULATE NUMBER OF UNIQUE DRUG IN ZEROFINAL_INPUT
def zero_final_drug_count(self):
dir_opt = self.dir_opt
zero_final_dl_input_df = pd.read_table('.' + dir_opt + '/mid_data/ZeroFinalDeepLearningInput.txt', delimiter = ',')
zero_final_drug_list = []
for drug in zero_final_dl_input_df['Drug A']:
if drug not in zero_final_drug_list:
zero_final_drug_list.append(drug)
for drug in zero_final_dl_input_df['Drug B']:
if drug not in zero_final_drug_list:
zero_final_drug_list.append(drug)
zero_final_drug_list = sorted(zero_final_drug_list)
print(zero_final_drug_list)
print(len(zero_final_drug_list))
# SPLIT DEEP LEARNING INPUT INTO TRAINING AND TEST
def split_train_test(self, test_size):
dir_opt = self.dir_opt
zero_final_dl_input_df = pd.read_table('.' + dir_opt + '/mid_data/ZeroFinalDeepLearningInput.txt', delimiter = ',')
train_input_df, test_input_df = train_test_split(zero_final_dl_input_df, test_size = test_size)
train_input_df = train_input_df.reset_index(drop = True)
test_input_df = test_input_df.reset_index(drop = True)
train_input_df.to_csv('.' + dir_opt + '/filtered_data/TrainingInput.txt', index = False, header = True)
test_input_df.to_csv('.' + dir_opt + '/filtered_data/TestInput.txt', index = False, header = True)
return train_input_df, test_input_df
# FIND UNIQUE DRUG NAME FROM DATAFRAME AND MAP
def drug_map(self):
dir_opt = self.dir_opt
dl_input_df = pd.read_table('.' + dir_opt + '/filtered_data/DeepLearningInput.txt', delimiter = ',')
drug_target_df = pd.read_table('.' + dir_opt + '/data/drug_tar_drugBank_all.txt')
drug_list = []
for drug in dl_input_df['Drug A']:
if drug not in drug_list:
drug_list.append(drug)
for drug in dl_input_df['Drug B']:
if drug not in drug_list:
drug_list.append(drug)
drug_list = sorted(drug_list)
drug_df = pd.DataFrame(data = drug_list, columns = ['Drug Name'])
drug_df.to_csv('.' + dir_opt + '/data/input_drug_name.txt', index = False, header = True)
mapped_drug_list = []
for drug in drug_target_df['Drug']:
if drug not in mapped_drug_list:
mapped_drug_list.append(drug)
mapped_drug_list = sorted(mapped_drug_list)
mapped_drug_df = pd.DataFrame(data = mapped_drug_list, columns = ['Mapped Drug Name'])
mapped_drug_df.to_csv('.' + dir_opt + '/data/mapped_drug_name.txt', index = False, header = True)
# LEFT JOIN TWO DATAFRAME
drug_map_df = pd.merge(drug_df, mapped_drug_df, how='left', left_on = 'Drug Name', right_on = 'Mapped Drug Name')
drug_map_df.to_csv('.' + dir_opt + '/data/drug_map.csv', index = False, header = True)
# AFTER AUTO MAP -> MANUAL MAP
# FROM MANUAL MAP TO DRUG MAP DICT
def drug_map_dict(self):
dir_opt = self.dir_opt
drug_map_df = pd.read_csv('.' + dir_opt + '/mid_data/drug_map.csv')
drug_map_dict = {}
for row in drug_map_df.itertuples():
drug_map_dict[row[1]] = row[2]
np.save('.' + dir_opt + '/filtered_data/drug_map_dict.npy', drug_map_dict)
return drug_map_dict
# FORM ADAJACENT MATRIX (DRUG x TARGET) (LIST -> SORTED -> DICT -> MATRIX) (ALL 5435 DRUGS <-> ALL 2775 GENES)
def drug_target(self):
dir_opt = self.dir_opt
drug_target_df = pd.read_table('.' + dir_opt + '/data/drug_tar_drugBank_all.txt')
# GET UNIQUE SORTED DRUGLIST AND TARGET(GENE) LIST
drug_list = []
for drug in drug_target_df['Drug']:
if drug not in drug_list:
drug_list.append(drug)
drug_list = sorted(drug_list)
target_list = []
for target in drug_target_df['Target']:
if target not in target_list:
target_list.append(target)
target_list = sorted(target_list)
# CONVERT THE SORTED LIST TO DICT WITH VALUE OF INDEX
drug_dict = {drug_list[i] : i for i in range((len(drug_list)))}
drug_num_dict = {i : drug_list[i] for i in range((len(drug_list)))}
target_dict = {target_list[i] : i for i in range(len(target_list))}
target_num_dict = {i : target_list[i] for i in range(len(target_list))}
# ITERATE THE DATAFRAME TO DEFINE CONNETIONS BETWEEN DRUG AND TARGET(GENE)
drug_target_matrix = np.zeros((len(drug_list), len(target_list))).astype(int)
for index, drug_target in drug_target_df.iterrows():
# BUILD ADJACENT MATRIX
drug_target_matrix[drug_dict[drug_target['Drug']], target_dict[drug_target['Target']]] = 1
drug_target_matrix = drug_target_matrix.astype(int)
np.save('.' + dir_opt + '/filtered_data/drug_target_matrix.npy', drug_target_matrix)
# np.savetxt("drug_target_matrix.csv", drug_target_matrix, delimiter=',')
# x, y = drug_target_matrix.shape
# for i in range(x):
# # FIND DRUG TARGET OVER 100 GENES
# row = drug_target_matrix[i, :]
# if len(row[row>=1]) >= 100: print(drug_num_dict[i])
np.save('.' + dir_opt + '/filtered_data/drug_dict.npy', drug_dict)
np.save('.' + dir_opt + '/filtered_data/drug_num_dict.npy', drug_num_dict)
np.save('.' + dir_opt + '/filtered_data/target_dict.npy', target_dict)
np.save('.' + dir_opt + '/filtered_data/target_num_dict.npy', target_num_dict)
return drug_dict, drug_num_dict, target_dict, target_num_dict
# FROM MANUAL CELLLINE NAME MAP TO DICT
def cellline_map_dict(self):
dir_opt = self.dir_opt
cellline_name_df = pd.read_table('.' + dir_opt + '/mid_data/nci60-ccle_cell_name_map1.txt')
cellline_map_dict = {}
for row in cellline_name_df.itertuples():
cellline_map_dict[row[1]] = row[2]
np.save('.' + dir_opt + '/filtered_data/cellline_map_dict.npy', cellline_map_dict)
return cellline_map_dict
# ]CCLE GENES : DRUG_TAR GENES] KEY : VALUE
def gene_target_num_dict(self, RNA_seq_filename):
dir_opt = self.dir_opt
drug_dict, drug_num_dict, target_dict, target_num_dict = ParseFile(dir_opt).drug_target()
cellline_gene_df = pd.read_csv('.' + dir_opt + '/filtered_data/' + RNA_seq_filename +'.csv')
# print(target_dict)
gene_target_num_dict = {}
for row in cellline_gene_df.itertuples():
if row[2] not in target_dict.keys():
map_index = -1
else:
map_index = target_dict[row[2]]
gene_target_num_dict[row[0]] = map_index
np.save('.' + dir_opt + '/filtered_data/gene_target_num_dict.npy', gene_target_num_dict)
return gene_target_num_dict
# FILTER DUPLICATED AND SPARSE GENES (FINALLY [1130, 1684] GENES)
def filter_cellline_gene(self, RNA_seq_filename):
dir_opt = self.dir_opt
cellline_gene_df = pd.read_table('.' + dir_opt + '/data/' + RNA_seq_filename + '.txt')
cellline_gene_df = cellline_gene_df.drop_duplicates(subset = ['geneSymbol'],
keep = 'first').sort_values(by = ['geneSymbol']).reset_index(drop = True)
threshold = int((len(cellline_gene_df.columns) - 3) / 3)
deletion_list = []
for row in cellline_gene_df.itertuples():
if list(row[3:]).count(0) > threshold:
deletion_list.append(row[0])
cellline_gene_df = cellline_gene_df.drop(cellline_gene_df.index[deletion_list]).reset_index(drop = True)
cellline_gene_df.to_csv('.' + dir_opt + '/filtered_data/' + RNA_seq_filename + '.csv', index = False, header = True)
print(cellline_gene_df)
# FORM ADAJACENT MATRIX (GENE x PATHWAY) (LIST -> SORTED -> DICT -> MATRIX) (ALL 1298 GENES <-> 16 PATHWAYS)
def gene_pathway(self, pathway_filename):
dir_opt = self.dir_opt
gene_pathway_df = pd.read_table('.' + dir_opt + '/data/' + pathway_filename + '.txt')
gene_list = sorted(list(gene_pathway_df['AllGenes']))
gene_pathway_df = gene_pathway_df.drop(['AllGenes'], axis = 1).sort_index(axis = 1)
pathway_list = list(gene_pathway_df.columns)
# CONVERT SORTED LIST TO DICT WITH INDEX
gene_dict = {gene_list[i] : i for i in range(len(gene_list))}
gene_num_dict = {i : gene_list[i] for i in range(len(gene_list))}
pathway_dict = {pathway_list[i] : i for i in range(len(pathway_list))}
pathway_num_dict = {i : pathway_list[i] for i in range(len(pathway_list))}
# ITERATE THE DATAFRAME TO DEFINE CONNETIONS BETWEEN GENES AND PATHWAYS
gene_pathway_matrix = np.zeros((len(gene_list), len(pathway_list))).astype(int)
for gene_row in gene_pathway_df.itertuples():
pathway_index = 0
for gene in gene_row[1:]:
if gene != 'test':
gene_pathway_matrix[gene_dict[gene], pathway_index] = 1
pathway_index += 1
np.save('.' + dir_opt + '/filtered_data/gene_pathway_matrix.npy', gene_pathway_matrix)
np.save('.' + dir_opt + '/filtered_data/gene_dict.npy', gene_dict)
np.save('.' + dir_opt + '/filtered_data/gene_num_dict.npy', gene_num_dict)
np.save('.' + dir_opt + '/filtered_data/pathway_dict.npy', pathway_dict)
np.save('.' + dir_opt + '/filtered_data/pathway_num_dict.npy', pathway_num_dict)
return gene_dict, gene_num_dict, pathway_dict, pathway_num_dict
def pre_parse():
dir_opt = '/datainfo1'
# # STABLE DICTIONARY NOT CHANGE WITH FILES
# ParseFile(dir_opt).drug_map()
# ParseFile(dir_opt).drug_map_dict()
# ParseFile(dir_opt).drug_target()
# ParseFile(dir_opt).cellline_map_dict()
RNA_seq_filename = 'nci60-ccle_RNAseq_tpm1'
# ParseFile(dir_opt).gene_target_num_dict(RNA_seq_filename)
# ParseFile(dir_opt).filter_cellline_gene(RNA_seq_filename)
pathway_filename = 'Selected_Kegg_Pathways1'
ParseFile(dir_opt).gene_pathway(pathway_filename)
def pre_input():
dir_opt = '/datainfo1'
RNA_seq_filename = 'nci60-ccle_RNAseq_tpm1'
ParseFile(dir_opt).input_condense()
ParseFile(dir_opt).input_drug_condense()
ParseFile(dir_opt).input_cellline_condense(RNA_seq_filename)
ParseFile(dir_opt).input_drug_gene_condense(RNA_seq_filename)
ParseFile(dir_opt).zero_final_drug_count()
def split_train_test():
dir_opt = '/datainfo1'
test_size = 0.2
ParseFile(dir_opt).split_train_test(test_size)
if __name__ == "__main__":
# pre_parse()
# pre_input()
split_train_test()
|
[
"hemingzhang@wustl.edu"
] |
hemingzhang@wustl.edu
|
e6c058cc3ef8715f0a37f9a90547d7d5b7bb90ca
|
581fe2ff2aba0824902d176c4f22218a2332c649
|
/CoreEngine5.0/src/CoreServices/DeviceControlService-remote
|
9701815dd7017c3d29a62d1cbdebfee71e871ade
|
[] |
no_license
|
guanxingquan/core-engine-five-unit-test
|
dcd17216da4e0a7afd66d38c0bc01da48ee67d48
|
cf33bc397501301e8ca06d25b1be5733f0c52e6d
|
refs/heads/master
| 2021-03-12T22:56:58.980184
| 2015-05-22T08:04:36
| 2015-05-22T08:04:36
| 34,090,561
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,291
|
#!/usr/bin/env python
#
# Autogenerated by Thrift Compiler (0.8.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
import sys
import pprint
from urlparse import urlparse
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.transport import THttpClient
from thrift.protocol import TBinaryProtocol
import DeviceControlService
from ttypes import *
if len(sys.argv) <= 1 or sys.argv[1] == '--help':
print ''
print 'Usage: ' + sys.argv[0] + ' [-h host[:port]] [-u url] [-f[ramed]] function [arg1 [arg2...]]'
print ''
print 'Functions:'
print ' string getDeviceStatus(string deviceId)'
print ' string getGPIO(string deviceId, string ioNumber)'
print ' string setGPIO(string deviceId, string ioNumber, string value)'
print ' string startPanDevice(string deviceId, string channelId, string direction)'
print ' string stopPanDevice(string deviceId, string channelId)'
print ' string startTiltDevice(string deviceId, string channelId, string direction)'
print ' string stopTiltDevice(string deviceId, string channelId)'
print ' string startZoomDevice(string deviceId, string channelId, string direction)'
print ' string stopZoomDevice(string deviceId, string channelId)'
print ' string writeData(string deviceId, string portNumber, data)'
print ' readData(string deviceId, string portNumber)'
print ''
sys.exit(0)
pp = pprint.PrettyPrinter(indent = 2)
host = 'localhost'
port = 9090
uri = ''
framed = False
http = False
argi = 1
if sys.argv[argi] == '-h':
parts = sys.argv[argi+1].split(':')
host = parts[0]
if len(parts) > 1:
port = int(parts[1])
argi += 2
if sys.argv[argi] == '-u':
url = urlparse(sys.argv[argi+1])
parts = url[1].split(':')
host = parts[0]
if len(parts) > 1:
port = int(parts[1])
else:
port = 80
uri = url[2]
if url[4]:
uri += '?%s' % url[4]
http = True
argi += 2
if sys.argv[argi] == '-f' or sys.argv[argi] == '-framed':
framed = True
argi += 1
cmd = sys.argv[argi]
args = sys.argv[argi+1:]
if http:
transport = THttpClient.THttpClient(host, port, uri)
else:
socket = TSocket.TSocket(host, port)
if framed:
transport = TTransport.TFramedTransport(socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = DeviceControlService.Client(protocol)
transport.open()
if cmd == 'getDeviceStatus':
if len(args) != 1:
print 'getDeviceStatus requires 1 args'
sys.exit(1)
pp.pprint(client.getDeviceStatus(args[0],))
elif cmd == 'getGPIO':
if len(args) != 2:
print 'getGPIO requires 2 args'
sys.exit(1)
pp.pprint(client.getGPIO(args[0],args[1],))
elif cmd == 'setGPIO':
if len(args) != 3:
print 'setGPIO requires 3 args'
sys.exit(1)
pp.pprint(client.setGPIO(args[0],args[1],args[2],))
elif cmd == 'startPanDevice':
if len(args) != 3:
print 'startPanDevice requires 3 args'
sys.exit(1)
pp.pprint(client.startPanDevice(args[0],args[1],args[2],))
elif cmd == 'stopPanDevice':
if len(args) != 2:
print 'stopPanDevice requires 2 args'
sys.exit(1)
pp.pprint(client.stopPanDevice(args[0],args[1],))
elif cmd == 'startTiltDevice':
if len(args) != 3:
print 'startTiltDevice requires 3 args'
sys.exit(1)
pp.pprint(client.startTiltDevice(args[0],args[1],args[2],))
elif cmd == 'stopTiltDevice':
if len(args) != 2:
print 'stopTiltDevice requires 2 args'
sys.exit(1)
pp.pprint(client.stopTiltDevice(args[0],args[1],))
elif cmd == 'startZoomDevice':
if len(args) != 3:
print 'startZoomDevice requires 3 args'
sys.exit(1)
pp.pprint(client.startZoomDevice(args[0],args[1],args[2],))
elif cmd == 'stopZoomDevice':
if len(args) != 2:
print 'stopZoomDevice requires 2 args'
sys.exit(1)
pp.pprint(client.stopZoomDevice(args[0],args[1],))
elif cmd == 'writeData':
if len(args) != 3:
print 'writeData requires 3 args'
sys.exit(1)
pp.pprint(client.writeData(args[0],args[1],eval(args[2]),))
elif cmd == 'readData':
if len(args) != 2:
print 'readData requires 2 args'
sys.exit(1)
pp.pprint(client.readData(args[0],args[1],))
else:
print 'Unrecognized method %s' % cmd
sys.exit(1)
transport.close()
|
[
"guanxingquan@kaisquare.com.cn"
] |
guanxingquan@kaisquare.com.cn
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.