blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
35b823ee571526aabe931d1cf528fedc446c7de5 | 55e9f3b00fc2e488597bab5225ed321c86efbd4b | /sdk/test/test_frequency_response.py | 8e97d49e14aa132ed3efe4ee80569106b6d29d8d | [
"MIT"
] | permissive | bs-yapily/yapily-sdk-python | ad9d04c28f3d744830734c3444c1cef8215206fd | 0bba45e351b674eb655425a51190f539c4e9896f | refs/heads/master | 2020-08-26T17:18:53.156429 | 2019-10-22T11:01:16 | 2019-10-22T11:01:16 | 217,085,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 965 | py | # coding: utf-8
"""
Yapily API
To access endpoints that require authentication, use your application key and secret created in the Dashboard (https://dashboard.yapily.com) # noqa: E501
OpenAPI spec version: 0.0.155
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import yapily
from yapily.models.frequency_response import FrequencyResponse # noqa: E501
from yapily.rest import ApiException
class TestFrequencyResponse(unittest.TestCase):
"""FrequencyResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFrequencyResponse(self):
"""Test FrequencyResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = yapily.models.frequency_response.FrequencyResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"systems@yapily.com"
] | systems@yapily.com |
71a8a66e76ca142acb18fe646e73586520d65752 | f53a307ff2cc14cebd76076384705e73e27ecbc1 | /visel.py | 624879eae93a60d6d1f1610cc279324815f11582 | [] | no_license | MrIvanushka/PaperGames | 49445e0223e95e004b1e46cc9ab409062b4e03a6 | b96424a547abb42568c6309979f4418141120d87 | refs/heads/master | 2023-04-26T08:36:09.229748 | 2021-05-12T14:34:16 | 2021-05-12T14:34:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,157 | py | from tkinter import *
import random
root = Tk()
root.title("Виселица")
canvas = Canvas(root, width=600, height=600)
canvas.pack()
def but():
y = 0
while y < 600:
x = 0
while x < 600:
canvas.create_rectangle(x, y, x + 33, y + 33, fill="white", outline="blue")
x = x + 33
y = y + 33
fag = '''Привет, игрок! Сыграем?
Принцип игры
Загадывается слово, пишется первая
и последняя буква, и отмечаются
места для остальных букв. '''
canvas.create_text(310, 240, text=fag, fill="purple", font=("Helvetica", "14"))
library = ["виселица", "смартфон", "маргарин", "страница", "микрофон", "мегагерц", "креветка"]
def arr():
but()
word = random.choice(library)
wo = word[1: -1]
wor = []
for i in wo:
wor.append(i)
a0 = canvas.create_text(282, 40, text=word[0], fill="purple", font=("Helvetica", "18"))
a1 = canvas.create_text(315, 40, text="_", fill="purple", font=("Helvetica", "18"))
a2 = canvas.create_text(347, 40, text="_", fill="purple", font=("Helvetica", "18"))
a3 = canvas.create_text(380, 40, text="_", fill="purple", font=("Helvetica", "18"))
a4 = canvas.create_text(412, 40, text="_", fill="purple", font=("Helvetica", "18"))
a5 = canvas.create_text(444, 40, text="_", fill="purple", font=("Helvetica", "18"))
a6 = canvas.create_text(477, 40, text="_", fill="purple", font=("Helvetica", "18"))
a7 = canvas.create_text(510, 40, text=word[-1], fill="purple", font=("Helvetica", "18"))
list1 = [1, 2, 3, 4, 5, 6]
alphabet = "aбвгдеёжзийклмнопрстуфхцчшщъыьэюя"
er = []
win = []
def a(v):
ind_alf = alphabet.index(v)
key = alphabet[ind_alf]
if v in wor:
ind = wor.index(v)
b2 = list1[ind]
wor[ind] = '1'
def krd():
if b2 == 1:
x1, y1 = 315, 40
if b2 == 2:
x1, y1 = 347, 40
if b2 == 3:
x1, y1 = 380, 40
if b2 == 4:
x1, y1 = 412, 40
if b2 == 5:
x1, y1 = 444, 40
if b2 == 6:
x1, y1 = 477, 40
return x1, y1
x1, y1 = krd()
win.append(v)
a2 = canvas.create_text(x1, y1, text=wo[ind], fill="purple", font=("Helvetica", "18"))
btn[key]["bg"] = "green"
if v not in wor:
btn[key]["state"] = "disabled"
if v in wor:
win.append(v)
ind2 = wor.index(v)
b2 = list1[ind2]
x1, y1 = krd()
canvas.create_text(x1, y1, text=wo[ind2], fill="purple", font=("Helvetica", "18"))
if len(win) == 6:
canvas.create_text(150, 150, text="Ты победил!", fill="purple", font=("Helvetica", "18"))
for i in alphabet:
btn[i]["state"] = "disabled"
else:
er.append(v)
btn[key]["bg"] = "red"
btn[key]["state"] = "disabled"
if len(er) == 1:
head()
elif len(er) == 2:
body()
elif len(er) == 3:
arm_r()
elif len(er) == 4:
arm_l()
elif len(er) == 5:
leg_l()
elif len(er) == 6:
leg_r()
end()
root.update()
btn = {}
def gen(u, x, y):
btn[u] = Button(root, text=u, width=3, height=1, command=lambda: a(u))
btn[u].place(x=str(x), y=str(y))
x = 265
y = 110
for i in alphabet[0:8]:
gen(i, x, y)
x = x+33
x = 265
y = 137
for i in alphabet[8:16]:
gen(i, x, y)
x = x + 33
x = 265
y = 164
for i in alphabet[16:24]:
gen(i, x, y)
x = x + 33
x = 265
y = 191
for i in alphabet[24:33]:
gen(i, x, y)
x = x + 33
def head():
canvas.create_oval(79, 59, 120, 80, width=4, fill='white')
root.update()
def body():
canvas.create_line(100, 80, 100, 200, width=4)
root.update()
def arm_r():
canvas.create_line(100, 80, 145, 100, width=4)
root.update()
def arm_l():
canvas.create_line(100, 80, 45, 100, width=4)
root.update()
def leg_l():
canvas.create_line(100, 200, 45, 300, width=4)
root.update()
def leg_r():
canvas.create_line(100, 200, 145, 300, width=4)
root.update()
def end():
canvas.create_text(150, 150, text="Ты проиграл", fill="purple", font=("Helvetica", "18"))
for i in alphabet:
btn[i]["state"] = "disabled"
btn01 = Button(root, text="Начать игру", width=10, height=1, command=lambda: arr())
btn01.place(x=258, y=442)
btn01["bg"] = "red"
root.mainloop()
| [
"77945441@mil.ru"
] | 77945441@mil.ru |
f924b1888a2653f0f5772334dba26dee8cade215 | 1013f2dcb64aa0ba3fb31b3e1c977e29d7ada36c | /env/bin/easy_install | ec2c06f53c26541d1ab14a3988a976a61d1f43e2 | [] | no_license | choww/dev-notebook | ef15a03410b6b03e0cc3dad7f7c2267fdc576ad5 | c1a230b162c6724e87907568df5b2b2f4c84fece | refs/heads/main | 2022-09-27T01:36:52.224469 | 2022-05-04T23:26:56 | 2022-05-05T23:48:27 | 108,310,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | #!/home/car/Desktop/dev-notebook/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"carmenn.choww@gmail.com"
] | carmenn.choww@gmail.com | |
2a6d632308a3f0c4c3ea95d3be11ae0d38c3de5b | 5de1aaa1eb3f5baa012a4f6c26460466952a2465 | /Arkanoid.py | 816d7e2c207578d0e68cbe4727706ef2fe20b119 | [] | no_license | Matth900/Arcade_Games | 765a32db279b30d1dda779072045c6e5c21a9e64 | 646b1625a3de23457b239481b6780fe672745aa9 | refs/heads/master | 2021-01-19T22:32:58.939092 | 2015-04-18T15:51:56 | 2015-04-18T15:51:56 | 32,549,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,802 | py | # FIXING...Not working
import simplegui
import random
# initialize globals - pos and vel encode vertical info fohr paddles
WIDTH = 600
HEIGHT = 400
BALL_RADIUS = 20
PAD_WIDTH = 15
PAD_HEIGHT = 80
HALF_PAD_WIDTH = PAD_WIDTH / 2
HALF_PAD_HEIGHT = PAD_HEIGHT / 2
brick = list(range(50)) # We're assuming the grid of bricks has 50 elements by default
# initialize ball_pos and ball_vel for new bal in middle of table
# if direction is RIGHT, the ball's velocity is upper right, else upper left
def spawn_ball(direction):
global ball_pos, ball_vel # these are vectors stored as lists
ball_pos = [WIDTH/2,HEIGHT/2]
ball_vel= [0,0]
# Select random velocity for vertical and horizontal movements
vel_hor = random.randrange(120,240)
vel_vert = random.randrange(60,180)
if direction == "RIGHT":
ball_vel = [vel_hor//60,-vel_vert//60]
elif direction == "LEFT":
ball_vel = [-vel_hor//60,-vel_vert//60]
# define event handlers
def new_game():
global paddle1_pos, paddle1_vel,brick # these are number
global score1, score2 # these are ints
spawn_ball("LEFT")
paddle1_pos = WIDTH/2
paddle1_vel = 0
score1 = 0
score2 = 0
#Initialize grid of bars - Default GRID of BRICKS = 5 * 10 MATRIX - Later on: Regulate with user inputs
for i in range(50):
brick[i] = 1
def restart():
new_game()
def draw(canvas):
global score1, score2, paddle1_pos, ball_pos, ball_vel,brick
# draw scores
canvas.draw_text(str(score1), ((WIDTH/3), 40), 40, 'White')
canvas.draw_text(str(score2), (2*WIDTH/3, 40), 40, 'White')
# draw bottom gutter
canvas.draw_line([0, HEIGHT-PAD_WIDTH],[WIDTH, HEIGHT-PAD_WIDTH], 1, "White")
# draw GRID OF BRICKS
for i in range(50):
if brick[i] == 1 and i <10:
l = 0
canvas.draw_polygon([(i*WIDTH/10,l),(WIDTH*(i+1)/10,l),(WIDTH*(i+1)/10,(l+1)*HEIGHT/20),(i*WIDTH/10,(l+1)*HEIGHT/20)],2,"Yellow","Blue")
elif brick[i] == 1 and i<20:
l = 1
canvas.draw_polygon([((i-10)*WIDTH/10,l*HEIGHT/20),((i-10+1)*WIDTH/10,l*HEIGHT/20),((i-10+1)*WIDTH/10,(l+1)*HEIGHT/20),((i-10)*WIDTH/10,(l+1)*HEIGHT/20)],2,"Yellow","Green")
elif brick[i] == 1 and i<30:
l = 2
canvas.draw_polygon([((i-20)*WIDTH/10,l*HEIGHT/20),((i-20+1)*WIDTH/10,l*HEIGHT/20),((i-20+1)*WIDTH/10,(l+1)*HEIGHT/20),((i-20)*WIDTH/10,(l+1)*HEIGHT/20)],2,"Yellow","Violet")
elif brick[i] == 1 and i<40:
l = 3
canvas.draw_polygon([((i-30)*WIDTH/10,l*HEIGHT/20),((i-30+1)*WIDTH/10,l*HEIGHT/20),((i-30+1)*WIDTH/10,(l+1)*HEIGHT/20),((i-30)*WIDTH/10,(l+1)*HEIGHT/20)],2,"Yellow","Red")
elif brick[i] == 1 and i<50:
l = 4
canvas.draw_polygon([((i-40)*WIDTH/10,l*HEIGHT/20),((i-40+1)*WIDTH/10,l*HEIGHT/20),((i-40+1)*WIDTH/10,(l+1)*HEIGHT/20),((i-40)*WIDTH/10,(l+1)*HEIGHT/20)],2,"Yellow","Brown")
# update ball
ball_pos[0] += ball_vel[0]
ball_pos[1] += ball_vel[1]
# draw ball
canvas.draw_circle(ball_pos,BALL_RADIUS,1,"Yellow","White")
# determine whether ball collide with the paddle or it with the lower gutter
if ball_pos[1] >= (HEIGHT-1)-BALL_RADIUS:
ball_vel[1] = - ball_vel[1]
# update paddle's horizontal, keep paddle on the screen
if (paddle1_pos + paddle1_vel) < (HALF_PAD_HEIGHT) or (paddle1_pos + paddle1_vel) > (WIDTH-HALF_PAD_HEIGHT):
paddle1_pos = paddle1_pos
else:
paddle1_pos += paddle1_vel
# draw paddles
canvas.draw_line([paddle1_pos-HALF_PAD_HEIGHT,HEIGHT], [paddle1_pos+HALF_PAD_HEIGHT,HEIGHT],PAD_WIDTH, "White")
# determine whether ball collide with the Right or Left Wall
if ball_pos[0] <= (BALL_RADIUS):
ball_vel[0]= -ball_vel[0]
if ball_pos[0] >= (WIDTH-1-PAD_WIDTH-BALL_RADIUS):
ball_vel[0]= -ball_vel[0]
# determine whether ball collide with some bricks (Vertical Position check)
# Gotta start from the lowerst bricks layer. In the example positioned at 2*HEIGHT/20
if ball_pos[1] <= (HEIGHT/4+BALL_RADIUS):
position = (ball_pos[0]) //( WIDTH/ 10)
if brick[position+40] == 1:
brick[position+40] = 0
ball_vel[1] = -ball_vel[1]*1.05
#print brick[position+40]
else:
if ball_pos[1] <= (HEIGHT/5+BALL_RADIUS):
position = (ball_pos[0]) //( WIDTH/ 10)
if brick[position+30] == 1:
brick[position+30] = 0
ball_vel[1] = -ball_vel[1]*1.05
#print brick[position+30]
else:
if ball_pos[1] <= (3*HEIGHT/20+BALL_RADIUS):
position = (ball_pos[0]) //( WIDTH/ 10)
if brick[position+20] == 1:
brick[position+20] = 0
ball_vel[1] = -ball_vel[1]*1.05
#print brick[position+20]
else:
if ball_pos[1] <= (HEIGHT/10+BALL_RADIUS):
position = (ball_pos[0]) //( WIDTH/ 10)
if brick[position+10] == 1:
brick[position+10] = 0
ball_vel[1] = -ball_vel[1]*1.05
#print brick[position+10]
else:
if ball_pos[1] <= (HEIGHT/20+BALL_RADIUS):
position = (ball_pos[0]) //( WIDTH/ 10)
if brick[position] == 1:
brick[position] = 0
ball_vel[1] = -ball_vel[1]*1.05
#print brick[position]
else:
if ball_pos[1] <= (BALL_RADIUS):
ball_vel[1] = -ball_vel[1]*1.05
def keydown(key):
global paddle1_vel
pixels_step = 4
if key == simplegui.KEY_MAP["right"]:
paddle1_vel += pixels_step
elif key == simplegui.KEY_MAP["left"]:
paddle1_vel -= pixels_step
def keyup(key):
global paddle1_vel
paddle1_vel = 0
# create frame
frame = simplegui.create_frame("Pong", WIDTH, HEIGHT)
frame.set_draw_handler(draw)
frame.set_keydown_handler(keydown)
frame.set_keyup_handler(keyup)
frame.add_button("Restart",restart,100)
# start frame
new_game()
frame.start()
| [
"1404268@studbocconi.it"
] | 1404268@studbocconi.it |
fcefeb835a401bd22559b96a215199270866a0b0 | 365bedfb999e9864b949d211cbb991a519fe48ab | /images/MNIST_loader.py | 72d870d182be4edf49d15dd60a2bcef8fa7457a6 | [] | no_license | levijpuckett/handwritten-digits | 9797533281ce456d5699639643496a7e76c0fd3f | f9ecb18ffb9424f96dcd85bb7aeaaed305c04fe8 | refs/heads/master | 2020-04-17T06:23:31.944587 | 2019-01-18T02:01:09 | 2019-01-18T02:01:09 | 166,322,980 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,370 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 8 17:32:53 2018
@author: levipuckett
"""
import numpy as np
import os.path
labelsPath = 'imagestrain-labels.idx1-ubyte'
imagesPath = 'train-images.idx3-ubyte'
def from_bytes(bytez):
return int.from_bytes(bytez, byteorder='big', signed=False)
def load_images():
'''load_images returns a tuple (images, labels).
images -> numpy array of size (60000,784)
labels -> numpy array of size (60000)
ordered.'''
with open(imagesPath, 'rb') as file:
from_bytes(file.read(4)) #get rid of the magic number.
num = from_bytes(file.read(4)) #number of images.
rows = from_bytes(file.read(4)) #number of rows.
cols = from_bytes(file.read(4)) #number of columns.
images = np.empty((num,rows * cols))
for image in range(num):
#update on progress.
print ('\r%.2f percent of images loaded.' % (image / num * 100.0), end='')
#get pixels for image.
for pixel in range(rows * cols):
images[image, pixel] = ( from_bytes(file.read(1)) / 255.0 )
file.close()
print ()
with open(labelsPath, "rb") as file:
file.read(4)
num = from_bytes(file.read(4))
labels = np.empty(num, dtype = int)
for i in range(num):
print ('\r%.2f percent of labels loaded.' % (i / num * 100.0), end='')
labels[i] = from_bytes(file.read(1))
file.close()
print ()
return images, labels
def make_pickle():
labelsPath = 'images/train-labels.idx1-ubyte'
imagesPath = 'images/train-images.idx3-ubyte'
train_images, train_labels = load_images()
np.save('training_images', train_images)
np.save('training_labels', train_labels)
labelsPath = 'images/t10k-labels.idx3-ubyte'
imagesPath = 'images/t10k-images.idx3-ubyte'
test_images, test_labels = load_images()
np.save('test_images', test_images)
np.save('test_labels', test_labels)
return train_images, train_labels, test_images, test_labels
def load_pickle():
train_images = np.load('images/training_images.npy')
train_labels = np.load('images/training_labels.npy')
test_images = np.load('images/test_images.npy')
test_labels = np.load('images/test_labels.npy')
return train_images, train_labels, test_images, test_labels
if not os.path.isfile('images/training_images.npy'):
print ('creating pickle.')
train_images, train_labels, test_images, test_labels = make_pickle()
print ('verifying pickle...')
Vtrain_images, Vtrain_labels, Vtest_images, Vtest_labels = load_pickle()
if train_images.all() == Vtrain_images.all():
print ('training images verified.')
else:
print ('training images pickle corrupt.')
if train_labels.all() == Vtrain_labels.all():
print ('training labels verified.')
else:
print ('training labels pickle corrupt.')
if test_images.all() == Vtest_images.all():
print ('test images verified.')
else:
print ('test images pickle corrupt.')
if test_labels.all() == Vtest_labels.all():
print ('test labels verified.')
else:
print ('test labels pickle corrupt.')
| [
"noreply@github.com"
] | noreply@github.com |
71e8829afac3e0a0c65027c407736ec43eeb6262 | 0cba5529e387ba0f077b4e8ddeb96f914004f5df | /malaya/emotion.py | dcd419468d7b3fce6dc88b499f1cc790ea1925c7 | [
"MIT"
] | permissive | AsyrafAzlan/Malaya | dc78398ee6880578f40c5646a48882a5913217ae | 3d5166173cf74881f7a56fffaaf391813c55d4f1 | refs/heads/master | 2021-05-21T22:47:41.863857 | 2020-04-03T15:00:21 | 2020-04-03T15:00:21 | 252,841,526 | 1 | 0 | MIT | 2020-04-03T21:04:44 | 2020-04-03T21:04:44 | null | UTF-8 | Python | false | false | 1,861 | py | from malaya.supervised import softmax
from malaya.path import PATH_EMOTION, S3_PATH_EMOTION
from herpetologist import check_type
_emotion_label = ['anger', 'fear', 'joy', 'love', 'sadness', 'surprise']
_availability = [
'bert',
'tiny-bert',
'albert',
'tiny-albert',
'xlnet',
'alxlnet',
]
def available_transformer_model():
"""
List available transformer emotion analysis models.
"""
return _availability
def multinomial(**kwargs):
"""
Load multinomial emotion model.
Returns
-------
BAYES : malaya._models._sklearn_model.BAYES class
"""
return softmax.multinomial(
PATH_EMOTION, S3_PATH_EMOTION, 'emotion', _emotion_label, **kwargs
)
@check_type
def transformer(model: str = 'xlnet', **kwargs):
"""
Load Transformer emotion model.
Parameters
----------
model : str, optional (default='bert')
Model architecture supported. Allowed values:
* ``'bert'`` - BERT architecture from google.
* ``'tiny-bert'`` - BERT architecture from google with smaller parameters.
* ``'albert'`` - ALBERT architecture from google.
* ``'tiny-albert'`` - ALBERT architecture from google with smaller parameters.
* ``'xlnet'`` - XLNET architecture from google.
* ``'alxlnet'`` - XLNET architecture from google + Malaya.
Returns
-------
MODEL : Transformer class
"""
model = model.lower()
size = size.lower()
if model not in _availability:
raise Exception(
'model not supported, please check supported models from malaya.emotion.available_transformer_model()'
)
return softmax.transformer(
PATH_EMOTION,
S3_PATH_EMOTION,
'emotion',
_emotion_label,
model = model,
size = size,
validate = validate,
)
| [
"husein.zol05@gmail.com"
] | husein.zol05@gmail.com |
199651930292784cddbb1dfed250ccfcd7ba621d | d38c54d3a5c8bfe741d2b02e702599d75f7de82d | /lab16/mininet/tcp_topo_myH1Server-myH2Client-BIGFILE.py | bf86e792478c8810e35f0c46f0229d9cbe360c50 | [] | no_license | yuhengy/NetworkLab | 2845eef8ec32fe00d0397b4bc313e2a171a40022 | 605ef1df2b529cf9a7046149bbb64def3947bb17 | refs/heads/master | 2023-02-24T17:36:59.923853 | 2021-01-28T11:12:40 | 2021-01-28T11:12:40 | 296,386,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,948 | py | #!/usr/bin/python
import time
import os
import sys
import glob
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.link import TCLink
from mininet.cli import CLI
script_deps = [ 'ethtool', 'arptables', 'iptables' ]
def check_scripts():
dir = os.path.abspath(os.path.dirname(sys.argv[0]))
for fname in glob.glob(dir + '/' + 'scripts/*.sh'):
if not os.access(fname, os.X_OK):
print '%s should be set executable by using `chmod +x $script_name`' % (fname)
sys.exit(1)
for program in script_deps:
found = False
for path in os.environ['PATH'].split(os.pathsep):
exe_file = os.path.join(path, program)
if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK):
found = True
break
if not found:
print '`%s` is required but missing, which could be installed via `apt` or `aptitude`' % (program)
sys.exit(2)
class TCPTopo(Topo):
def build(self):
h1 = self.addHost('h1')
h2 = self.addHost('h2')
self.addLink(h1, h2, delay='10ms')
if __name__ == '__main__':
check_scripts()
topo = TCPTopo()
net = Mininet(topo = topo, link = TCLink, controller = None)
h1, h2 = net.get('h1', 'h2')
h1.cmd('ifconfig h1-eth0 10.0.0.1/24')
h2.cmd('ifconfig h2-eth0 10.0.0.2/24')
h1.cmd('scripts/disable_ipv6.sh')
h2.cmd('scripts/disable_ipv6.sh')
h1.cmd('scripts/disable_offloading.sh && scripts/disable_tcp_rst.sh')
h2.cmd('scripts/disable_offloading.sh && scripts/disable_tcp_rst.sh')
# XXX: If you want to run user-level stack, you should execute
# disable_[arp,icmp,ip_forward].sh first.
h1.cmd('./scripts/disable_arp.sh && ./scripts/disable_icmp.sh && ./scripts/disable_ip_forward.sh')
h2.cmd('./scripts/disable_arp.sh && ./scripts/disable_icmp.sh && ./scripts/disable_ip_forward.sh')
net.start()
#CLI(net)
h1.cmd('tshark -a duration:30 -w /STEP5-wiresharkOutput-myH1Server.pcapng > result/STEP5-tsharkOutput-myH1Server.log 2>&1 &')
h2.cmd('tshark -a duration:30 -w /STEP5-wiresharkOutput-myH2Client.pcapng > result/STEP5-tsharkOutput-myH2Client.log 2>&1 &')
time.sleep(20)
#h1.cmd("python build/tcp_stack-BIGFILE.py server 10001 > result/STEP5-refH1Server.txt 2>&1 &")
h1.cmd("stdbuf -oL -eL ./build/tcp_stack server 10001 > result/STEP5-myH1Server.txt 2>&1 &")
time.sleep(1)
#h2.cmd("python build/tcp_stack-BIGFILE.py client 10.0.0.1 10001 > result/STEP5-refH2Client.txt 2>&1 &")
h2.cmd("stdbuf -oL -eL ./build/tcp_stack client 0x0a000001 10001 > result/STEP5-myH2Client.txt 2>&1 &")
time.sleep(39)
h1.cmd('mv /STEP5-wiresharkOutput-myH1Server.pcapng result/')
h2.cmd('mv /STEP5-wiresharkOutput-myH2Client.pcapng result/')
h2.cmd('diff mininet/client-input.dat mininet/server-output.dat > result/STEP5-diff.txt 2>&1')
net.stop()
| [
"yangyuheng17@mails.ucas.ac.cn"
] | yangyuheng17@mails.ucas.ac.cn |
5283860926b860c7a1b9143ddbcc38c695bb2efb | 95b60a3f7db73ac8a6da2fc7385841067a2e6ea7 | /app/cache/RedisCache.py | 326b2b58bc6e9d924cab301bf43412a539b456f1 | [
"MIT"
] | permissive | coder-yuan/vue-template-api | 6a8ecfd46916705de56750b37fae3f615c7df5f5 | 135f13d7c32b4a2830366fc0b79a1e2a1eda6923 | refs/heads/master | 2022-05-27T05:15:29.919821 | 2019-12-06T10:59:45 | 2019-12-06T10:59:45 | 226,309,030 | 0 | 0 | MIT | 2022-05-25T03:41:22 | 2019-12-06T10:56:18 | Python | UTF-8 | Python | false | false | 352 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : icode_flask_be
# @Package :
# @Author : jackeroo
# @Time : 2019/11/21 上午6:47
# @File : RedisCache.py
# @Contact :
# @Software : PyCharm
# @Desc :
from flask_caching import Cache
from app.config.Cache.RedisCache import RedisCache
cache = Cache(config=RedisCache)
| [
"1132524215@qq.com"
] | 1132524215@qq.com |
1d2541191e621eea166d59f4e565d1ffd53ef94e | fcf4287f91bac1ecf2f9c1c007f8529fe8fa040b | /Rentify/rentify/core/.~c9_invoke_ez9l69.py | 0e241685e7e176af0581138412d43637cc709e90 | [] | no_license | citi-onboarding/rentify | 7ca96126f6b6eac871cdfed5a965581b35222cef | f98fb79f666f92a4dc9b51a6032bf742f12285e5 | refs/heads/master | 2021-08-30T07:04:45.364733 | 2017-12-16T15:56:16 | 2017-12-16T15:56:16 | 112,296,266 | 0 | 0 | null | 2017-12-14T14:51:46 | 2017-11-28T06:35:44 | Python | UTF-8 | Python | false | false | 2,729 | py | from django.shortcuts import render, redirect
from django.contrib.auth import login, authenticate
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .models import *
from .forms import *
# Create your views here.
def home (request):
context = dict()
context["cars"] = Car.objects.filter(Availability=True)
return render(request, 'core/index.html', context)
def ourCars (request):
context = dict()
cars = Car.objects.filter(Availability=True)
# Paginator
paginator = Paginator(cars, 9)
page = request.GET.get('page', 1)
context["cars"] = paginator.page(page)
print(context["cars"].has_other_pages)
return render(request, 'core/ourCars.html', context)
def about (request):
return render(request, 'core/about.html')
def signin (request):
print("To PEGANDO!!!!!!")
if request.method == 'POST':
print("Verrrrrrrr")
form = SignInForm(request.POST)
if form.is_valid():
print("Testando se e valido")
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user is not None:
login(request,user)
redirect(home)
else:
login(request,user)
redirect(rentProfile)
else:
else:
form = SignInForm()
return render(request, 'core/login.html', {'form': form})
def signUp (request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
login(request, user)
return redirect(home)
else:
form = SignUpForm()
return render(request, 'core/register.html', {'form': form})
@login_required(login_url='core/login.html')
def rentProfile (request):
context = dict()
context["user"] = request.User
if Contract.objects.all() is not None:
context["rents"] = Contract.objects.filter(UserID=request.User.username).order_by('DateContract')[:4]
context["currentRent"] = Contract.objects.filter(UserID=request.User.username).order_by('DateContract').filter(Active=True).first()
return render(resquest, 'core/rent-profile.html', context)
@login_required(login_url='core/login.html')
def tenantProfile (request):
pass | [
"evfl@cin.ufpe.br"
] | evfl@cin.ufpe.br |
f68c22a3ebcff8045d9ad3131f9b30a050725a36 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_filthiness.py | d1e4e34d83d291300555681e0bf38feb72c2e796 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py |
#calss header
class _FILTHINESS():
def __init__(self,):
self.name = "FILTHINESS"
self.definitions = [u'the quality of being very dirty']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
822de060466784748690413911f1bd522c7cfdc4 | ebb0f88adedf4d5202185b27fd2b8cecc1e59ebb | /pplot/figure.py | 5f79f1facf388ef88e8cc178d7e5fd4a5acc9fd6 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | pmacosta/pplot | c528de8a6fcec83784ed92b84979a4d738444f57 | ac2e40aa1fc22a3c2aa39d894bc71c29ba33058a | refs/heads/master | 2020-12-31T04:28:02.763633 | 2019-06-11T18:35:59 | 2019-06-11T18:35:59 | 58,674,101 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45,520 | py | """
Generate presentation-quality plots.
[[[cog
import os, sys
if sys.hexversion < 0x03000000:
import __builtin__
else:
import builtins as __builtin__
sys.path.append(os.environ['TRACER_DIR'])
import trace_ex_plot_figure
exobj_plot = trace_ex_plot_figure.trace_module(no_print=True)
]]]
[[[end]]]
"""
# figure.py
# Copyright (c) 2013-2019 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0111,C0302,C0413,R0201,R0205,R0914,R0915,W0105,W0212
# Standard library imports
from __future__ import print_function
import math
import os
import sys
import warnings
# PyPI imports
if os.environ.get("READTHEDOCS", "") != "True": # pragma: no cover
import PIL
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.transforms import Bbox
import pmisc
import pexdoc.exh
import pexdoc.pcontracts
import peng
# Intra-package imports
from .constants import TITLE_FONT_SIZE
from .panel import Panel
from .functions import _F, _MF, _intelligent_ticks
###
# Global variables
###
INF = sys.float_info.max
SPACER = 0.2 # in inches
PANEL_SEP = 10 * SPACER
###
# Class
###
class Figure(object):
r"""
Generate presentation-quality plots.
:param panels: One or more data panels
:type panels: :py:class:`pplot.Panel` *or list of*
:py:class:`pplot.Panel` *or None*
:param indep_var_label: Independent variable label
:type indep_var_label: string
:param indep_var_units: Independent variable units
:type indep_var_units: string
:param indep_axis_tick_labels: Independent axis tick labels. If not None
overrides ticks automatically generated
or as given by the **indep_axis_ticks**
argument (ignored for figures with a
logarithmic independent axis)
:type indep_axis_tick_labels: list of strings or None
:param indep_axis_ticks: Independent axis tick marks. If not None
overrides automatically generated tick marks if
the axis type is linear. If None automatically
generated tick marks are used for the independent
axis
:type indep_axis_ticks: list, Numpy vector or None
:param fig_width: Hard copy plot width in inches. If None the width is
automatically calculated so that the figure has a 4:3
aspect ratio and there is no horizontal overlap between
any two text elements in the figure
:type fig_width: `PositiveRealNum <https://pexdoc.readthedocs.io/en/
stable/ptypes.html#positiverealnum>`_ or None
:param fig_height: Hard copy plot height in inches. If None the height is
automatically calculated so that the figure has a 4:3
aspect ratio and there is no vertical overlap between
any two text elements in the figure
:type fig_height: `PositiveRealNum <https://pexdoc.readthedocs.io/en/
stable/ptypes.html#positiverealnum>`_ or None
:param title: Plot title
:type title: string
:param log_indep_axis: Flag that indicates whether the independent
axis is linear (False) or logarithmic (True)
:type log_indep_axis: boolean
:param dpi: Dots per inch to be used while showing or displaying figure
:type dpi: positive number
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pplot.figure.Figure.__init__
:raises:
* RuntimeError (Argument \`dpi\` is not valid)
* RuntimeError (Argument \`fig_height\` is not valid)
* RuntimeError (Argument \`fig_width\` is not valid)
* RuntimeError (Argument \`indep_axis_tick_labels\` is not valid)
* RuntimeError (Argument \`indep_axis_ticks\` is not valid)
* RuntimeError (Argument \`indep_var_label\` is not valid)
* RuntimeError (Argument \`indep_var_units\` is not valid)
* RuntimeError (Argument \`log_indep_axis\` is not valid)
* RuntimeError (Argument \`panels\` is not valid)
* RuntimeError (Argument \`title\` is not valid)
* RuntimeError (Figure size is too small: minimum width *[min_width]*,
minimum height *[min_height]*)
* RuntimeError (Number of tick locations and number of tick labels
mismatch)
* TypeError (Panel *[panel_num]* is not fully specified)
* ValueError (Figure cannot be plotted with a logarithmic independent
axis because panel *[panel_num]*, series *[series_num]* contains
negative independent data points)
.. [[[end]]]
"""
# pylint: disable=R0902,R0913
def __init__(
self,
panels=None,
indep_var_label="",
indep_var_units="",
indep_axis_tick_labels=None,
indep_axis_ticks=None,
fig_width=None,
fig_height=None,
title="",
log_indep_axis=False,
dpi=100.0,
): # noqa
pexdoc.exh.addai(
"indep_axis_ticks",
(indep_axis_ticks is not None)
and (
(not isinstance(indep_axis_ticks, list))
and (not isinstance(indep_axis_ticks, np.ndarray))
),
)
pexdoc.exh.addai(
"indep_axis_tick_labels",
(indep_axis_tick_labels is not None)
and (
(not isinstance(indep_axis_tick_labels, list))
or (
isinstance(indep_axis_tick_labels, list)
and (indep_axis_ticks is not None)
and (len(indep_axis_tick_labels) != len(indep_axis_ticks))
)
),
)
# Private attributes
self._need_redraw = False
self._min_fig_width = None
self._min_fig_height = None
self._size_given = False
# Public attributes
self._dpi = None
self._indep_axis_ticks = None
self._indep_axis_tick_labels = None
self._fig = None
self._panels = None
self._indep_var_label = None
self._title = None
self._log_indep_axis = None
self._fig_width = None
self._fig_height = None
self._indep_var_units = None
self._indep_var_div = None
self._axes_list = []
self._scaling_done = False
self._indep_axis_dict = None
self._title_obj = None
# Assignment of arguments to attributes
self._set_dpi(dpi)
self._set_indep_var_label(indep_var_label)
self._set_indep_var_units(indep_var_units)
self._set_title(title)
self._set_log_indep_axis(log_indep_axis)
self._set_indep_axis_ticks(
indep_axis_ticks if not self.log_indep_axis else None
)
self._set_indep_axis_tick_labels(indep_axis_tick_labels)
self._set_panels(panels)
self._set_fig_width(fig_width)
self._set_fig_height(fig_height)
def __bool__(self): # pragma: no cover
"""
Test if the figure has at least a panel associated with it.
.. note:: This method applies to Python 3.x
"""
return self._panels is not None
def __iter__(self):
r"""
Return an iterator over the panel object(s) in the figure.
For example:
.. =[=cog
.. import pmisc
.. pmisc.incfile('plot_example_7.py', cog.out)
.. =]=
.. code-block:: python
# plot_example_7.py
from __future__ import print_function
import numpy as np
import pplot
def figure_iterator_example(no_print):
source1 = pplot.BasicSource(
indep_var=np.array([1, 2, 3, 4]),
dep_var=np.array([1, -10, 10, 5])
)
source2 = pplot.BasicSource(
indep_var=np.array([100, 200, 300, 400]),
dep_var=np.array([50, 75, 100, 125])
)
series1 = pplot.Series(
data_source=source1,
label='Goals'
)
series2 = pplot.Series(
data_source=source2,
label='Saves',
color='b',
marker=None,
interp='STRAIGHT',
line_style='--'
)
panel1 = pplot.Panel(
series=series1,
primary_axis_label='Average',
primary_axis_units='A',
display_indep_axis=False
)
panel2 = pplot.Panel(
series=series2,
primary_axis_label='Standard deviation',
primary_axis_units=r'$\sqrt{{A}}$',
display_indep_axis=True
)
figure = pplot.Figure(
panels=[panel1, panel2],
indep_var_label='Time',
indep_var_units='sec',
title='Sample Figure'
)
if not no_print:
for num, panel in enumerate(figure):
print('Panel {0}:'.format(num+1))
print(panel)
print('')
else:
return figure
.. =[=end=]=
.. code-block:: python
>>> import docs.support.plot_example_7 as mod
>>> mod.figure_iterator_example(False)
Panel 1:
Series 0:
Independent variable: [ 1.0, 2.0, 3.0, 4.0 ]
Dependent variable: [ 1.0, -10.0, 10.0, 5.0 ]
Label: Goals
Color: k
Marker: o
Interpolation: CUBIC
Line style: -
Secondary axis: False
Primary axis label: Average
Primary axis units: A
Secondary axis label: not specified
Secondary axis units: not specified
Logarithmic dependent axis: False
Display independent axis: False
Legend properties:
cols: 1
pos: BEST
<BLANKLINE>
Panel 2:
Series 0:
Independent variable: [ 100.0, 200.0, 300.0, 400.0 ]
Dependent variable: [ 50.0, 75.0, 100.0, 125.0 ]
Label: Saves
Color: b
Marker: None
Interpolation: STRAIGHT
Line style: --
Secondary axis: False
Primary axis label: Standard deviation
Primary axis units: $\sqrt{{A}}$
Secondary axis label: not specified
Secondary axis units: not specified
Logarithmic dependent axis: False
Display independent axis: True
Legend properties:
cols: 1
pos: BEST
<BLANKLINE>
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. [[[end]]]
"""
return iter(self._panels)
def __nonzero__(self): # pragma: no cover
"""
Test if the figure has at least a panel associated with it.
.. note:: This method applies to Python 2.x
"""
return self._panels is not None
def __str__(self):
r"""
Print figure information.
For example:
>>> from __future__ import print_function
>>> import docs.support.plot_example_7 as mod
>>> print(mod.figure_iterator_example(True)) #doctest: +ELLIPSIS
Panel 0:
Series 0:
Independent variable: [ 1.0, 2.0, 3.0, 4.0 ]
Dependent variable: [ 1.0, -10.0, 10.0, 5.0 ]
Label: Goals
Color: k
Marker: o
Interpolation: CUBIC
Line style: -
Secondary axis: False
Primary axis label: Average
Primary axis units: A
Secondary axis label: not specified
Secondary axis units: not specified
Logarithmic dependent axis: False
Display independent axis: False
Legend properties:
cols: 1
pos: BEST
Panel 1:
Series 0:
Independent variable: [ 100.0, 200.0, 300.0, 400.0 ]
Dependent variable: [ 50.0, 75.0, 100.0, 125.0 ]
Label: Saves
Color: b
Marker: None
Interpolation: STRAIGHT
Line style: --
Secondary axis: False
Primary axis label: Standard deviation
Primary axis units: $\sqrt{{A}}$
Secondary axis label: not specified
Secondary axis units: not specified
Logarithmic dependent axis: False
Display independent axis: True
Legend properties:
cols: 1
pos: BEST
Independent variable label: Time
Independent variable units: sec
Logarithmic independent axis: False
Title: Sample Figure
Figure width: ...
Figure height: ...
<BLANKLINE>
"""
# pylint: disable=C1801
self._create_figure()
fig_width, fig_height = self._fig_dims()
ret = ""
if (self.panels is None) or (len(self.panels) == 0):
ret += "Panels: None\n"
else:
for num, element in enumerate(self.panels):
ret += "Panel {0}:\n".format(num)
temp = str(element).split("\n")
temp = [3 * " " + line for line in temp]
ret += "\n".join(temp)
ret += "\n"
ret += "Independent variable label: {0}\n".format(
self.indep_var_label
if self.indep_var_label not in ["", None]
else "not specified"
)
ret += "Independent variable units: {0}\n".format(
self.indep_var_units
if self.indep_var_units not in ["", None]
else "not specified"
)
ret += "Logarithmic independent axis: {0}\n".format(self.log_indep_axis)
ret += "Title: {0}\n".format(
self.title if self.title not in ["", None] else "not specified"
)
ret += "Figure width: {0}\n".format(fig_width)
ret += "Figure height: {0}\n".format(fig_height)
return ret
def _bbox(self, obj):
"""Return bounding box of an object."""
renderer = self._fig.canvas.get_renderer()
return obj.get_window_extent(renderer=renderer).transformed(
self._fig.dpi_scale_trans.inverted()
)
def _calculate_min_figure_size(self):
"""Calculate minimum panel and figure size."""
dround = lambda x: math.floor(x) / self.dpi
title_width = 0
if self.title not in [None, ""]:
title_bbox = self._bbox(self._title_obj)
title_width = title_bbox.width
min_width = max(
[
(
max(panel._left_overhang for panel in self.panels)
+ max(
max(panel._min_spine_bbox.width, panel._legend_width)
for panel in self.panels
)
+ max(panel._right_overhang for panel in self.panels)
),
max(
panel._prim_yaxis_annot
+ panel._indep_label_width
+ panel._sec_yaxis_annot
for panel in self.panels
),
title_width,
]
)
self._min_fig_width = dround(min_width * self.dpi)
npanels = len(self.panels)
self._min_fig_height = dround(
npanels * max([panel._min_bbox.height * self.dpi for panel in self.panels])
+ ((npanels - 1) * PANEL_SEP)
)
def _check_figure_spec(self, fig_width=None, fig_height=None):
"""Validate given figure size against minimum dimension."""
small_ex = pexdoc.exh.addex(
RuntimeError,
"Figure size is too small: minimum width *[min_width]*, "
"minimum height *[min_height]*",
)
small_ex(
bool(
(fig_width and (fig_width < self._min_fig_width))
or (fig_height and (fig_height < self._min_fig_height))
),
[
_F("min_width", self._min_fig_width),
_F("min_height", self._min_fig_height),
],
)
def _create_figure(self, raise_exception=False):
"""Create and resize figure."""
if raise_exception:
specified_ex = pexdoc.exh.addex(
RuntimeError, "Figure object is not fully specified"
)
specified_ex(raise_exception and (not self._complete))
if not self._complete:
return Bbox([[0, 0], [0, 0]])
if self._need_redraw:
self._size_given = (self._fig_width is not None) and (
self._fig_height is not None
)
# First _draw call is to calculate approximate figure size, (until
# matplotlib actually draws the figure, all the bounding boxes of
# the elements in the figure are null boxes. The second _draw call
# is to draw figure with either the calculated minimum dimensions
# or the user-given dimensions, provided they are equal or greater
# than the minimum dimensions
self._draw()
if not self._size_given:
self._draw()
bbox = self._fig_bbox()
fig_width, fig_height = self._fig_dims()
self._fig.set_size_inches(fig_width, fig_height, forward=True)
self._need_redraw = False
# From https://github.com/matplotlib/matplotlib/issues/7984:
# When the Figure is drawn, its Axes are sorted based on zorder
# with a stable sort, and then drawn in that order. Then within
# each Axes, artists are sorted based on zorder. Therefore you
# can't interleave the drawing orders of artists from one Axes with
# those from another.
else:
bbox = self._fig_bbox()
fig_width, fig_height = self._fig_dims()
# Get figure pixel size exact
width = int(round(fig_width * self._dpi))
lwidth = int(round(width / 2.0))
rwidth = width - lwidth
height = int(round(fig_height * self._dpi))
bheight = int(round(height / 2.0))
theight = height - bheight
bbox_xcenter = bbox.xmin + 0.5 * bbox.width
bbox_ycenter = bbox.ymin + 0.5 * bbox.height
bbox = Bbox(
[
[
bbox_xcenter - (lwidth / self._dpi),
bbox_ycenter - (bheight / self._dpi),
],
[
bbox_xcenter + (rwidth / self._dpi),
bbox_ycenter + (theight / self._dpi),
],
]
)
return bbox
def _draw(self):
# pylint: disable=C0326,W0612
num_panels = len(self.panels)
if not self._scaling_done:
# Find union of the independent variable data set of all panels
indep_axis_ticks = self._get_global_xaxis()
self._indep_var_div = indep_axis_ticks.div
self._indep_axis_ticks = indep_axis_ticks.locs
# Scale all panel series
for panel_obj in self.panels:
panel_obj._scale_indep_var(self._indep_var_div)
self._indep_axis_tick_labels = (
self._indep_axis_tick_labels or indep_axis_ticks.labels
)
self._indep_axis_dict = {
"log_indep": self.log_indep_axis,
"indep_var_min": indep_axis_ticks.min,
"indep_var_max": indep_axis_ticks.max,
"indep_var_locs": indep_axis_ticks.locs,
"indep_var_labels": self._indep_axis_tick_labels,
"indep_axis_label": self.indep_var_label,
"indep_axis_units": self.indep_var_units,
"indep_axis_unit_scale": indep_axis_ticks.unit_scale,
}
self._scaling_done = True
# Create required number of panels
self._draw_panels()
# Draw figure otherwise some bounding boxes return NaN
FigureCanvasAgg(self._fig).draw()
self._calculate_min_figure_size()
def _draw_panels(self, fbbox=None):
def init_figure(num_panels, fbbox=None):
fig_width, fig_height = self._fig_dims()
figsize = (fig_width, fig_height) if fig_width and fig_height else None
plt.close("all")
self._fig, axesh = plt.subplots(
nrows=num_panels, ncols=1, dpi=self.dpi, figsize=figsize
)
plt.tight_layout(pad=0, h_pad=2, rect=fbbox)
axesh = [axesh] if num_panels == 1 else axesh
if self.title not in ["", None]:
self._title_obj = self._fig.suptitle(
self.title,
fontsize=TITLE_FONT_SIZE,
horizontalalignment="center",
verticalalignment="top",
multialignment="center",
y=1.0,
)
return axesh, fig_width, fig_height
num_panels = len(self.panels)
axesh, fig_width, fig_height = init_figure(num_panels, fbbox)
self._axes_list = []
top = right = -INF
bottom = left = +INF
if all(not panel.display_indep_axis for panel in self.panels):
self.panels[-1]._display_indep_axis = True
for panel, axish in zip(self.panels, axesh):
disp_indep_axis = (num_panels == 1) or panel.display_indep_axis
panel._draw(disp_indep_axis, self._indep_axis_dict, axish)
left = min(left, panel._panel_bbox.xmin)
bottom = min(bottom, panel._panel_bbox.ymin)
right = max(right, panel._panel_bbox.xmax)
top = max(top, panel._panel_bbox.ymax)
if self._title_obj:
title_bbox = self._bbox(self._title_obj)
left = min(title_bbox.xmin, left)
right = max(title_bbox.xmax, right)
if fig_width and fig_height:
xdelta_left = -left / fig_width
ydelta_bot = -bottom / fig_height
xdelta_right = 1 - ((right - fig_width) / fig_width)
ydelta_top = (
title_bbox.ymin / top
if self._title_obj
else 1 - ((top - fig_height) / fig_height)
)
fbbox = [xdelta_left, ydelta_bot, xdelta_right, ydelta_top]
axesh, _, _ = init_figure(num_panels, fbbox)
for panel, axish in zip(self.panels, axesh):
disp_indep_axis = (num_panels == 1) or panel.display_indep_axis
panel._draw(disp_indep_axis, self._indep_axis_dict, axish)
def _fig_bbox(self):
"""Return bounding box of figure."""
tleft = tbottom = +INF
tright = ttop = -INF
if self._title_obj:
title_bbox = self._bbox(self._title_obj)
tleft = title_bbox.xmin
tright = title_bbox.xmax
ttop = title_bbox.ymax
tbottom = title_bbox.ymin
left = min(tleft, min(pobj._left for pobj in self.panels))
bottom = min(tbottom, min(pobj._bottom for pobj in self.panels))
top = max(ttop, max(pobj._top for pobj in self.panels))
right = max(tright, max(pobj._right for pobj in self.panels))
fig_bbox = Bbox([[left, bottom], [right, top]])
return fig_bbox
def _fig_dims(self):
"""Get actual figure size, given or minimum calculated."""
fig_width = self._fig_width or self._min_fig_width
fig_height = self._fig_height or self._min_fig_height
return fig_width, fig_height
def _get_axes_list(self):
self._create_figure()
return self._axes_list
def _get_complete(self):
"""Return True if figure is fully specified, otherwise returns False."""
return (self.panels is not None) and len(self.panels)
def _get_dpi(self):
return self._dpi
def _get_fig(self):
self._create_figure()
return self._fig
def _get_fig_height(self):
if self._complete and (self._fig_height is None):
self._create_figure()
self._fig_height = self._min_fig_height
return self._fig_height
def _get_fig_width(self):
if self._complete and (self._fig_width is None):
self._create_figure()
self._fig_width = self._min_fig_width
return self._fig_width
def _get_global_xaxis(self):
log_ex = pexdoc.exh.addex(
ValueError,
"Figure cannot be plotted with a logarithmic "
"independent axis because panel *[panel_num]*, series "
"*[series_num]* contains negative independent data points",
)
ticks_num_ex = pexdoc.exh.addex(
RuntimeError, "Number of tick locations and number of tick labels mismatch"
)
glob_indep_var = []
for panel_num, panel_obj in enumerate(self.panels):
for series_num, series_obj in enumerate(panel_obj.series):
log_ex(
bool(self.log_indep_axis and (min(series_obj.indep_var) < 0)),
edata=_MF("panel_num", panel_num, "series_num", series_num),
)
glob_indep_var = np.unique(
np.append(
glob_indep_var,
np.array(
[
peng.round_mantissa(element, 10)
for element in series_obj.indep_var
]
),
)
)
indep_axis_ticks = _intelligent_ticks(
glob_indep_var,
min(glob_indep_var),
max(glob_indep_var),
tight=True,
log_axis=self.log_indep_axis,
tick_list=(None if self._log_indep_axis else self._indep_axis_ticks),
)
ticks_num_ex(
(self._indep_axis_tick_labels is not None)
and (len(self._indep_axis_tick_labels) != len(indep_axis_ticks.labels))
)
return indep_axis_ticks
def _get_indep_axis_scale(self):
self._create_figure()
return self._indep_var_div
def _get_indep_axis_ticks(self):
self._create_figure()
return self._indep_axis_ticks
def _get_indep_axis_tick_labels(self):
self._create_figure()
return self._indep_axis_tick_labels
def _get_indep_var_label(self):
return self._indep_var_label
def _get_indep_var_units(self):
return self._indep_var_units
def _get_log_indep_axis(self):
return self._log_indep_axis
def _get_panels(self):
return self._panels
def _get_title(self):
return self._title
@pexdoc.pcontracts.contract(dpi="None|positive_real_num")
def _set_dpi(self, dpi):
self._dpi = float(dpi)
@pexdoc.pcontracts.contract(fig_height="None|positive_real_num")
def _set_fig_height(self, fig_height):
if self._complete:
self._create_figure()
self._check_figure_spec(self.fig_width, fig_height)
self._fig_height = fig_height
self._need_redraw = True
@pexdoc.pcontracts.contract(fig_width="None|positive_real_num")
def _set_fig_width(self, fig_width):
if self._complete:
self._create_figure()
self._check_figure_spec(fig_width, self.fig_height)
self._fig_width = fig_width
self._need_redraw = True
@pexdoc.pcontracts.contract(indep_axis_ticks="None|increasing_real_numpy_vector")
def _set_indep_axis_ticks(self, indep_axis_ticks):
self._indep_axis_ticks = indep_axis_ticks
self._need_redraw = True
@pexdoc.pcontracts.contract(indep_axis_tick_labels="None|list(str)")
def _set_indep_axis_tick_labels(self, indep_axis_tick_labels):
if not self._log_indep_axis:
self._indep_axis_tick_labels = indep_axis_tick_labels
self._need_redraw = True
self._create_figure()
@pexdoc.pcontracts.contract(indep_var_label="None|str")
def _set_indep_var_label(self, indep_var_label):
self._indep_var_label = indep_var_label
self._need_redraw = True
@pexdoc.pcontracts.contract(indep_var_units="None|str")
def _set_indep_var_units(self, indep_var_units):
self._indep_var_units = indep_var_units
self._need_redraw = True
@pexdoc.pcontracts.contract(log_indep_axis="None|bool")
def _set_log_indep_axis(self, log_indep_axis):
self._log_indep_axis = log_indep_axis
self._need_redraw = True
@pexdoc.pcontracts.contract(title="None|str")
def _set_title(self, title):
self._title = title
self._need_redraw = True
def _set_panels(self, panels):
self._panels = (
(panels if isinstance(panels, list) else [panels])
if panels is not None
else panels
)
if self.panels is not None:
self._validate_panels()
self._need_redraw = True
def _validate_panels(self):
"""Verify elements of panel list are of the right type and fully specified."""
invalid_ex = pexdoc.exh.addai("panels")
specified_ex = pexdoc.exh.addex(
TypeError, "Panel *[panel_num]* is not fully specified"
)
for num, obj in enumerate(self.panels):
invalid_ex(not isinstance(obj, Panel))
specified_ex(not obj._complete, _F("panel_num", num))
@pexdoc.pcontracts.contract(fname="file_name", ftype="None|str", compress=bool)
def save(self, fname, ftype=None, compress=True):
r"""
Save the figure to a file.
:param fname: File name
:type fname: `FileName <https://pexdoc.readthedocs.io/en/stable/
ptypes.html#filename>`_
:param ftype: File type, either 'PNG' or 'EPS' (case insensitive). The
PNG format is a `raster
<https://en.wikipedia.org/wiki/Raster_graphics>`_ format
while the EPS format is a
`vector <https://en.wikipedia.org/wiki/
Vector_graphics>`_ format
:type ftype: string
:param compress: Flag that indicates whether the file saved is to be
compressed (True) or not (False). Only relevant for
PNG file type
:type compress: boolean
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pplot.figure.Figure.save
:raises:
* RuntimeError (Argument \`compress\` is not valid)
* RuntimeError (Argument \`fname\` is not valid)
* RuntimeError (Argument \`ftype\` is not valid)
* RuntimeError (Could not determine file type)
* RuntimeError (Figure object is not fully specified)
* RuntimeError (Incongruent file type and file extension)
* RuntimeError (Number of tick locations and number of tick labels
mismatch)
* RuntimeError (Unsupported file type: *[file_type]*)
* ValueError (Figure cannot be plotted with a logarithmic independent
axis because panel *[panel_num]*, series *[series_num]* contains
negative independent data points)
.. [[[end]]]
"""
unsupported_ex = pexdoc.exh.addex(
RuntimeError, "Unsupported file type: *[file_type]*"
)
no_ftype_ex = pexdoc.exh.addex(RuntimeError, "Could not determine file type")
incongruent_ftype = pexdoc.exh.addex(
RuntimeError, "Incongruent file type and file extension"
)
sup_ftypes = ["png", "eps", "pdf"]
unsupported_ex(
bool((ftype is not None) and (ftype.lower() not in sup_ftypes)),
_F("file_type", ftype),
)
basename, extension = os.path.splitext(fname)
extension = extension.lstrip(".")
no_ftype_ex(bool((ftype is None) and (extension.lower() not in sup_ftypes)))
incongruent_ftype(
bool(
(ftype is not None)
and extension
and (ftype.upper() != extension.upper())
)
)
ftype = (ftype or extension).upper()
extension = extension or ftype.lower()
fname = "{0}.{1}".format(basename, extension)
bbox = self._create_figure(raise_exception=True)
dpi = self.dpi if ftype == "PNG" else None
bbox = bbox if ftype == "PNG" else "tight"
# Matplotlib seems to have a problem with ~/, expand it to $HOME
fname = os.path.expanduser(fname)
pmisc.make_dir(fname)
self._fig_width, self._fig_height = self._fig_dims()
self._fig.savefig(
fname,
dpi=dpi,
bbox="tight",
format=ftype,
bbox_extra_artists=(self._title_obj,),
)
plt.close("all")
if (ftype == "PNG") and compress:
img = PIL.Image.open(fname)
# Remove alpha channel
img = img.convert("RGB")
# Move to index image if possible (maximum number of colors used
# has to be less that 256 as the palette is 8 bits)
# getcolors returns None if the number of colors exceeds the
# maxcolors argument
ncolors = img.getcolors(maxcolors=256)
if ncolors is not None:
img = img.convert("P", palette=PIL.Image.ADAPTIVE)
img.save(fname, quality=100, optimize=True)
def show(self):
"""
Display the figure.
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pplot.figure.Figure.show
:raises:
* RuntimeError (Figure object is not fully specified)
* RuntimeError (Number of tick locations and number of tick labels
mismatch)
* ValueError (Figure cannot be plotted with a logarithmic independent
axis because panel *[panel_num]*, series *[series_num]* contains
negative independent data points)
.. [[[end]]]
"""
self._create_figure(raise_exception=True)
self._fig_width, self._fig_height = self._fig_dims()
plt.show()
# Managed attributes
_complete = property(_get_complete)
axes_list = property(_get_axes_list, doc="Matplotlib figure axes handle list")
"""
Get Matplotlib figure axes handle list.
:code:`None` is returned if figure not fully specified. Useful if
annotations or further customizations to the panel(s) are needed. Each
panel has an entry in the list, which is sorted in the order the panels are
plotted (top to bottom). Each panel entry is a dictionary containing the
following key-value pairs:
* **number** (*integer*) -- panel number, panel 0 is the top-most panel
* **primary** (*Matplotlib axis object*) -- axis handle for the primary
axis, None if the figure has not primary axis
* **secondary** (*Matplotlib axis object*) -- axis handle for the
secondary axis, None if the figure has no secondary axis
:type: list
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pplot.figure.Figure.axes_list
:raises: (when retrieved)
* RuntimeError (Number of tick locations and number of tick labels
mismatch)
* ValueError (Figure cannot be plotted with a logarithmic independent
axis because panel *[panel_num]*, series *[series_num]* contains
negative independent data points)
.. [[[end]]]
"""
dpi = property(_get_dpi, _set_dpi, doc="Figure dots per inch (DPI)")
r"""
Get or set the dots per inch (DPI) of the figure.
:type: `PositiveRealNum <https://pexdoc.readthedocs.io/en/
stable/ptypes.html#positiverealnum>`_ or None
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for pplot.figure.Figure.dpi
:raises: (when assigned) RuntimeError (Argument \`dpi\` is not valid)
.. [[[end]]]
"""
fig = property(_get_fig, doc="Figure handle")
"""
Get the Matplotlib figure handle.
Useful if annotations or further customizations to the figure are needed.
:code:`None` is returned if figure is not fully specified
:type: Matplotlib figure handle or None
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for pplot.figure.Figure.fig
:raises: (when retrieved)
* RuntimeError (Number of tick locations and number of tick labels
mismatch)
* ValueError (Figure cannot be plotted with a logarithmic independent
axis because panel *[panel_num]*, series *[series_num]* contains
negative independent data points)
.. [[[end]]]
"""
fig_height = property(
_get_fig_height, _set_fig_height, doc="height of the hard copy plot"
)
r"""
Get or set the height (in inches) of the hard copy plot.
:code:`None` is returned if figure is not fully specified.
:type: `PositiveRealNum <https://pexdoc.readthedocs.io/en/
stable/ptypes.html#positiverealnum>`_ or None
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pplot.figure.Figure.fig_height
:raises: (when assigned)
* RuntimeError (Argument \`fig_height\` is not valid)
* RuntimeError (Figure size is too small: minimum width *[min_width]*,
minimum height *[min_height]*)
* RuntimeError (Number of tick locations and number of tick labels
mismatch)
* ValueError (Figure cannot be plotted with a logarithmic independent
axis because panel *[panel_num]*, series *[series_num]* contains
negative independent data points)
.. [[[end]]]
"""
fig_width = property(
_get_fig_width, _set_fig_width, doc="Width of the hard copy plot"
)
r"""
Get or set the width (in inches) of the hard copy plot.
:code:`None` is returned if figure is not fully specified.
:type: `PositiveRealNum <https://pexdoc.readthedocs.io/en/
stable/ptypes.html#positiverealnum>`_ or None
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pplot.figure.Figure.fig_width
:raises: (when assigned)
* RuntimeError (Argument \`fig_width\` is not valid)
* RuntimeError (Figure size is too small: minimum width *[min_width]*,
minimum height *[min_height]*)
* RuntimeError (Number of tick locations and number of tick labels
mismatch)
* ValueError (Figure cannot be plotted with a logarithmic independent
axis because panel *[panel_num]*, series *[series_num]* contains
negative independent data points)
.. [[[end]]]
"""
indep_axis_scale = property(_get_indep_axis_scale, doc="Independent axis scale")
"""
Get the scale of the figure independent axis.
:code:`None` is returned if figure is not fully specified.
:type: float or None if figure has no panels associated with it
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pplot.figure.Figure.indep_axis_scale
:raises: (when retrieved)
* RuntimeError (Number of tick locations and number of tick labels
mismatch)
* ValueError (Figure cannot be plotted with a logarithmic independent
axis because panel *[panel_num]*, series *[series_num]* contains
negative independent data points)
.. [[[end]]]
"""
indep_axis_ticks = property(
_get_indep_axis_ticks,
_set_indep_axis_ticks,
doc="Independent axis tick locations",
)
r"""
Get or set the independent axis (scaled) tick locations.
:type: list
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pplot.figure.Figure.indep_axis_ticks
:raises:
* When assigned
* RuntimeError (Argument \`indep_axis_ticks\` is not valid)
* When retrieved
* RuntimeError (Number of tick locations and number of tick labels
mismatch)
* ValueError (Figure cannot be plotted with a logarithmic
independent axis because panel *[panel_num]*, series *[series_num]*
contains negative independent data points)
.. [[[end]]]
"""
indep_axis_tick_labels = property(
_get_indep_axis_tick_labels,
_set_indep_axis_tick_labels,
doc="Independent axis tick labels",
)
r"""
Get or set the independent axis tick labels.
Labels are ignored for figures with a logarithmic independent axis
:type: list of strings
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pplot.figure.Figure.indep_axis_tick_labels
:raises:
* When assigned
* RuntimeError (Argument \`indep_axis_tick_labels\` is not valid)
* RuntimeError (Number of tick locations and number of tick labels
mismatch)
* ValueError (Figure cannot be plotted with a logarithmic
independent axis because panel *[panel_num]*, series *[series_num]*
contains negative independent data points)
* When retrieved
* RuntimeError (Number of tick locations and number of tick labels
mismatch)
* ValueError (Figure cannot be plotted with a logarithmic
independent axis because panel *[panel_num]*, series *[series_num]*
contains negative independent data points)
.. [[[end]]]
"""
indep_var_label = property(
_get_indep_var_label, _set_indep_var_label, doc="Figure independent axis label"
)
r"""
Get or set the figure independent variable label
:type: string or None
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pplot.figure.Figure.indep_var_label
:raises: (when assigned) RuntimeError (Argument \`indep_var_label\` is
not valid)
.. [[[end]]]
"""
indep_var_units = property(
_get_indep_var_units, _set_indep_var_units, doc="Figure independent axis units"
)
r"""
Get or set the figure independent variable units.
:type: string or None
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pplot.figure.Figure.indep_var_units
:raises: (when assigned) RuntimeError (Argument \`indep_var_units\` is
not valid)
.. [[[end]]]
"""
log_indep_axis = property(
_get_log_indep_axis, _set_log_indep_axis, doc="Figure log_indep_axis"
)
r"""
Get or set the figure logarithmic independent axis flag.
This flag indicates whether the independent axis is linear (False) or
logarithmic (True)
:type: boolean
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pplot.figure.Figure.log_indep_axis
:raises: (when assigned) RuntimeError (Argument \`log_indep_axis\` is
not valid)
.. [[[end]]]
"""
panels = property(_get_panels, _set_panels, doc="Figure panel(s)")
r"""
Get or set the figure panel(s).
:code:`None` is returned if no panels have been specified
:type: :py:class:`pplot.Panel`, list of
:py:class:`pplot.panel` or None
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pplot.figure.Figure.panels
:raises: (when assigned)
* RuntimeError (Argument \`panels\` is not valid)
* TypeError (Panel *[panel_num]* is not fully specified)
.. [[[end]]]
"""
title = property(_get_title, _set_title, doc="Figure title")
r"""
Get or set the figure title.
:type: string or None
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pplot.figure.Figure.title
:raises: (when assigned) RuntimeError (Argument \`title\` is not
valid)
.. [[[end]]]
"""
| [
"pmasdev@gmail.com"
] | pmasdev@gmail.com |
d8417c8ca8acec6cecbf368d3b3ad51f650efd6b | 475364e7eebdff89e2c78a07f84b098cfe7a445f | /mooc_algorithms/graph.py | 05398f2af02666d52aa02a900dd4c8e588e8ea96 | [] | no_license | ashishsubedi/DSA | 6f37d25cee857fd19faa7d24f0370bd222844381 | 9b87f88c6d9d131db4920ef0bfc6fa9d571908be | refs/heads/master | 2021-07-12T11:44:31.166006 | 2020-06-08T15:47:02 | 2020-06-08T15:47:02 | 158,732,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,313 | py | class Graph:
def __init__(self):
self.vertices = {}
self.routingTable = {}
self.MST = None
def addVertices(self, vertices, e=[]):
'''
Add array of vertices. If e is given, all vertices will be initialzed with e
@param vertices: List of all vertices
@param e: [Optional]List of vertices
'''
for v in vertices:
if v not in self.vertices:
self.vertices[v] = set(e)
def addEdges(self, v, e):
if v in self.vertices:
for edge in e:
self.vertices[v].add(edge)
else:
self.vertices[v] = set(e)
def removeVertices(self, vertices):
for v in vertices:
if v in self.vertices:
del self.vertices[v]
def removeEdges(self, v, e):
if v in self.vertices:
for edge in e:
if(edge in self.vertices[v]):
self.vertices[v].remove(edge)
def neighbours(self, v):
if v in self.vertices:
return list(self.vertices[v])
def BFS(self, startVertex, parent=None):
visited = set()
q = [(startVertex, parent)]
spanningTree = Graph()
routingTable = {}
while len(q) > 0:
v = q[0][0]
p = q[0][1]
q.remove(q[0])
if(v not in visited):
routingTable[v] = p
visited.add(v)
if p is not None:
if(v not in spanningTree.vertices):
spanningTree.addVertices([v], [p])
else:
spanningTree.addEdges(v, [p])
if(p not in spanningTree.vertices):
spanningTree.addVertices([p], [v])
else:
spanningTree.addEdges(p, [v])
for n in self.neighbours(v):
if(n not in visited):
q.append((n, v))
self.routingTable = routingTable
if self.MST is None:
self.MST = spanningTree
return spanningTree
def DFS(self, startVertex, parent=None):
visited = set()
q = [(startVertex, parent)]
spanningTree = Graph()
while len(q) > 0:
pair = q.pop()
v = pair[0]
p = pair[1]
if(v not in visited):
visited.add(v)
if p is not None:
if(v not in spanningTree.vertices):
spanningTree.addVertices([v], [p])
else:
spanningTree.addEdges(v, [p])
if(p not in spanningTree.vertices):
spanningTree.addVertices([p], [v])
else:
spanningTree.addEdges(p, [v])
for n in self.neighbours(v):
if(n not in visited):
q.append((n, v))
return spanningTree
g = Graph()
g.addVertices([1, 2, 3, 4, 5, 6, 7])
g.addEdges(1, [2, 3])
g.addEdges(2, [1, 4, 5])
g.addEdges(3, [1, 4, 6])
g.addEdges(4, [2, 3, 5, 6])
g.addEdges(5, [2, 4])
g.addEdges(6, [3, 4])
mst = g.BFS(4)
st = g.DFS(4)
print(g.routingTable)
print(mst.vertices)
print(st.vertices)
| [
"ashishsubedi10@outlook.com"
] | ashishsubedi10@outlook.com |
54eb14a012555c0ee5a15f3eb849bc2a25af597b | c137c1308bc62954e40e20e80eaa36e75a6766ef | /days/38/len-impl-py/solution.py | 1b2bc961874dfb7f57b805d1639e5dd39e9d2a31 | [] | no_license | shaversj/100-days-of-code-r2 | a7a45c07635fac52d1e506522cc18439a7e37da8 | 50dec098a60231eb4ac6972beb0adfeded215e10 | refs/heads/master | 2023-03-04T19:57:23.147768 | 2021-02-14T14:36:15 | 2021-02-14T14:36:15 | 259,045,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | from typing import List
class LenCustom:
@staticmethod
def find_length(string: str):
count = 0
for char in string:
count += 1
return count
print(LenCustom.find_length("test"))
print(LenCustom.find_length([1, 2, 3, 4])) | [
"shaversj@gmail.com"
] | shaversj@gmail.com |
511ca25210aebdea239ec9c00fcca35b9da49554 | 71aaefa30760ecc699f533db24ebe353084cc8e0 | /src/tornado-webserver.py | 5cb8d891a7f973f9fb2c48ba6bffd64a68544abe | [] | no_license | rmessner/docker-deis-dashboard | e190fc4b450fbb8ab298cdb76eecd2f3d62a8b88 | 3e879bb12bd2f192bf4403a37f8c96fdcfb02b45 | refs/heads/master | 2021-01-01T19:42:54.864826 | 2014-12-03T17:44:17 | 2014-12-03T17:44:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.log import enable_pretty_logging
from dashboard import app
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(80)
enable_pretty_logging()
IOLoop.instance().start()
| [
"raphael.messner@gmail.com"
] | raphael.messner@gmail.com |
8e898266becce9d780c6884cb25e12a18f5f26ab | 79b35425287245a7beb3bc4b6d287b6e958119da | /My first model.py | 472ab8cef07d39699161015eb672a37fa4ee2a57 | [] | no_license | Jashshor/Python_AI_Model-first-tested | 9ab4fba38bccab90816d32a20b60336300ce9763 | 84387aec4a3254cf63ea44ba10a18c4d8ad6dbad | refs/heads/master | 2022-11-11T11:32:41.245704 | 2020-06-26T07:45:17 | 2020-06-26T07:45:17 | 275,100,751 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,703 | py | import pandas
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import roc_auc_score
pd = pandas.read_csv("./data/bank-additional-full.csv", sep=";")
pd = pd.drop(["duration"], axis=1)
label = LabelEncoder()
fields = ['job', 'marital', 'education',
'default', 'housing', 'loan', 'contact',
'month', 'day_of_week', 'poutcome']
for field in fields:
pd[field] = label.fit_transform(pd[field])
y = label.fit_transform(pd["y"])
pd = pd.drop(["y"], axis=1)
# Split the data
x_train, x_test, y_train, y_test = train_test_split(pd, y, test_size=0.3)
# Tree_model
clf_tree = DecisionTreeClassifier() # using default criterion and splitter
clf_tree = clf_tree.fit(x_train, y_train)
y_predict_tree = clf_tree.predict(x_test)
accuracy_score_tree = accuracy_score(y_test, y_predict_tree)
precision_score_tree = precision_score(y_test, y_predict_tree)
recall_score_tree = recall_score(y_test, y_predict_tree)
roc_auc_tree = roc_auc_score(y_test, y_predict_tree)
# KNN_model
clf_KNN = KNeighborsClassifier()
clf_KNN = clf_KNN.fit(x_train, y_train)
y_predict_KNN = clf_KNN.predict(x_test)
accuracy_score_KNN = accuracy_score(y_test, y_predict_KNN)
precision_score_KNN = precision_score(y_test, y_predict_KNN)
recall_score_KNN = recall_score(y_test, y_predict_KNN)
roc_auc_KNN = roc_auc_score(y_test, y_predict_KNN)
# print(roc_auc_KNN,roc_auc_tree)
| [
"3099681787@qq.com"
] | 3099681787@qq.com |
f173160d77e0d5bea220f33633e5bb63c9668916 | 50c20d107f98eb6c78553c9a0dcc20298df5958d | /courses/udacity/Intro to Machine Learning/svm/svm_author_id.py | bbf9b60353c3d39fb2b87320cb3e84193a894362 | [] | no_license | arnaldog12/Deep-Learning | a7b9dade336f9977109e8de4de8b65db35b711e3 | b0d46d93203394692cf9ba8d5628f8edc1589b6a | refs/heads/master | 2022-05-02T16:05:40.978508 | 2022-04-29T12:34:27 | 2022-04-29T12:34:27 | 98,334,247 | 90 | 51 | null | null | null | null | UTF-8 | Python | false | false | 1,226 | py | #!/usr/bin/python
"""
This is the code to accompany the Lesson 2 (SVM) mini-project.
Use a SVM to identify emails from the Enron corpus by their authors:
Sara has label 0
Chris has label 1
"""
import sys
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
# features_train = features_train[:len(features_train)/100]
# labels_train = labels_train[:len(labels_train)/100]
#########################################################
### your code goes here ###
clf = SVC(C=10000.0, kernel='rbf')
t0 = time()
clf.fit(features_train, labels_train)
print("Training Time: {0:.3f}".format(time()-t0))
t1 = time()
pred = clf.predict(features_test)
print("Test Time: {0:.3f}".format(time()-t1))
# print(pred)
print(len(pred[pred == 1]))
# print(accuracy_score(pred, labels_test[10]))
#########################################################
| [
"arnaldo.g12@gmail.com"
] | arnaldo.g12@gmail.com |
2eb15e7a7809dccc58b91240a1a0bdbde8f2ea7a | 162e0e4791188bd44f6ce5225ff3b1f0b1aa0b0d | /examples/linear_model/plot_logistic_l1_l2_sparsity.py | afccba025af1f2bb50d6e3b57e30535232120bfa | [] | no_license | testsleeekGithub/trex | 2af21fa95f9372f153dbe91941a93937480f4e2f | 9d27a9b44d814ede3996a37365d63814214260ae | refs/heads/master | 2020-08-01T11:47:43.926750 | 2019-11-06T06:47:19 | 2019-11-06T06:47:19 | 210,987,245 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,328 | py | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1, L2 and Elastic-Net penalty are used for different values of C. We can see
that large values of C give more freedom to the model. Conversely, smaller
values of C constrain the model more. In the L1 penalty case, this leads to
sparser solutions. As expected, the Elastic-Net penalty sparsity is between
that of L1 and L2.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mrex.linear_model import LogisticRegression
from mrex import datasets
from mrex.preprocessing import StandardScaler
X, y = datasets.load_digits(return_X_y=True)
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
l1_ratio = 0.5 # L1 weight in the Elastic-Net regularization
fig, axes = plt.subplots(3, 3)
# Set regularization parameter
for i, (C, axes_row) in enumerate(zip((1, 0.1, 0.01), axes)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01, solver='saga')
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01, solver='saga')
clf_en_LR = LogisticRegression(C=C, penalty='elasticnet', solver='saga',
l1_ratio=l1_ratio, tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
clf_en_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
coef_en_LR = clf_en_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
sparsity_en_LR = np.mean(coef_en_LR == 0) * 100
print("C=%.2f" % C)
print("{:<40} {:.2f}%".format("Sparsity with L1 penalty:", sparsity_l1_LR))
print("{:<40} {:.2f}%".format("Sparsity with Elastic-Net penalty:",
sparsity_en_LR))
print("{:<40} {:.2f}%".format("Sparsity with L2 penalty:", sparsity_l2_LR))
print("{:<40} {:.2f}".format("Score with L1 penalty:",
clf_l1_LR.score(X, y)))
print("{:<40} {:.2f}".format("Score with Elastic-Net penalty:",
clf_en_LR.score(X, y)))
print("{:<40} {:.2f}".format("Score with L2 penalty:",
clf_l2_LR.score(X, y)))
if i == 0:
axes_row[0].set_title("L1 penalty")
axes_row[1].set_title("Elastic-Net\nl1_ratio = %s" % l1_ratio)
axes_row[2].set_title("L2 penalty")
for ax, coefs in zip(axes_row, [coef_l1_LR, coef_en_LR, coef_l2_LR]):
ax.imshow(np.abs(coefs.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
ax.set_xticks(())
ax.set_yticks(())
axes_row[0].set_ylabel('C = %s' % C)
plt.show()
| [
"shkolanovaya@gmail.com"
] | shkolanovaya@gmail.com |
223aba0f3a6f0830d35ca6c772b7bd4a586e3e03 | 5e7aee7be8f1e99129957bbd26b93a0a22638b56 | /py/model.py | e3bf74cc518d1bee778f0d12e3c3a9981afed8c8 | [
"MIT"
] | permissive | alexisperrier/rabotnik | fa0391ccc62bf1c203a227ffe83ac1c32c738821 | e629118a692ea65dc39bf323f74096eec6c120e5 | refs/heads/master | 2023-04-08T15:09:49.843081 | 2021-04-15T11:02:22 | 2021-04-15T11:02:22 | 277,247,779 | 0 | 0 | null | 2021-04-15T11:02:22 | 2020-07-05T06:53:47 | Python | UTF-8 | Python | false | false | 13,710 | py | '''
Contain classes for most major database tables
Each class offers methods to insert, update, upsert ...
For instance the class Channel has the following methods:
- create: inserts a new channel in table channel
- update: updates data for a given channel_id, data is from API
- update_from_feed: updates data for a given channel_id, data is from RSS feed
'''
from .text import *
from .job import *
import datetime
import pytz
import urllib
from xml.etree import ElementTree
import html
class Model(object):
# TODO rm, not used
def __init__(self):
pass
class Comment(Model):
@classmethod
def create(cls,d):
try:
sql = f'''
insert into comments (comment_id, video_id, discussion_id, parent_id,
author_name, author_channel_id,
text, reply_count, like_count,
published_at, created_at, updated_at)
values ('{d.comment_id}', '{d.video_id}', {d.discussion_id}, '{d.parent_id}',
$${TextUtils.to_db(d.author_name)}$$, '{d.author_channel_id}',
$${TextUtils.to_db(d.text)}$$, {d.reply_count}, {d.like_count},
'{d.published_at}', now(), now())
on conflict (comment_id) DO NOTHING
'''
job.execute(sql)
return job.db.cur.rowcount
except:
return 0
class Discussion(Model):
@classmethod
def create(cls,d):
try:
sql = f'''
insert into discussions (video_id, total_results, results_per_page, error, created_at, updated_at)
values ('{d.video_id}', {d.total_results}, {d.results_per_page}, $${TextUtils.to_db(d.error)}$$, now(), now())
on conflict (video_id) DO NOTHING
RETURNING id;
'''
job.execute(sql)
return job.db.cur.fetchone()[0]
except:
return None
class VideoStat(Model):
@classmethod
def create(cls,d):
try:
fields = "video_id, source, viewed_at"
values = f"'{d.video_id}', '{d.source}', '{d.viewed_at}'"
for field in ['views','like_count','dislike_count','favorite_count','comment_count']:
if hasattr(d,field):
val = int(d[field])
fields += f",{field}"
values += f", {val}"
sql = f'''
insert into video_stat as cs ({fields})
values ({values})
on conflict (video_id, viewed_at) DO NOTHING;
'''
job.execute(sql)
return job.db.cur.rowcount
except:
return 0
class Channel(object):
@classmethod
def create(cls, channel_id, origin ):
sql = f'''
insert into channel (channel_id, origin)
values ('{channel_id}','{origin}')
on conflict (channel_id) DO NOTHING;
'''
job.execute(sql)
return job.db.cur.rowcount
@classmethod
def update(cls,d):
sql = f'''
update channel set
created_at = '{d.created_at}',
title = $${TextUtils.to_db(d.title)}$$,
description = $${TextUtils.to_db(d.description)}$$,
thumbnail = '{d.thumbnail}',
show_related = '{d.show_related}',
custom_url = '{d.custom_url}',
country = '{d.country}',
retrieved_at = now()
where channel_id = '{d.channel_id}'
'''
job.execute(sql)
return job.db.cur.rowcount
@classmethod
def update_from_feed(cls,d):
if d.activity is not None:
str_activity = f"activity = '{d.activity}',"
else:
str_activity = f"activity = null,"
if d.activity is not None:
str_activity_score = f"activity_score = {d.activity_score},"
else:
str_activity_score = f"activity_score = null,"
sql = f'''
update channel set
{str_activity}
{str_activity_score}
rss_next_parsing = NOW() + interval '{d.frequency}',
retrieved_at = now()
where channel_id = '{d.channel_id}'
'''
job.execute(sql)
return job.db.cur.rowcount
class ChannelTopic(Model):
@classmethod
def upsert(cls,d):
if d.topics is None:
sql = f'''
insert into topic as tpc (channel_id, topics, created_at)
values ('{d.channel_id}',Null, now())
on conflict (channel_id) do update
set topics = Null, created_at = now()
where tpc.channel_id = '{d.channel_id}'
'''
else:
sql = f'''
insert into topic as tpc (channel_id, topics, created_at)
values ('{d.channel_id}','{d.topics}', now())
on conflict (channel_id) do update
set topics = '{d.topics}', created_at = now()
where tpc.channel_id = '{d.channel_id}'
'''
job.execute(sql)
class ChannelStat(Model):
@classmethod
def upsert(cls,d):
if d.hidden_subscribers_count:
sql = f'''
insert into channel_stat as cs
(channel_id, views, videos, retrieved_at)
values
('{d.channel_id}', {d.views}, {d.videos}, now())
on conflict (channel_id) do update
set views = {d.views},
videos = {d.videos},
retrieved_at = now()
where cs.channel_id = '{d.channel_id}'
'''
else:
sql = f'''
insert into channel_stat as cs
(channel_id, views, subscribers, videos, retrieved_at)
values
('{d.channel_id}', {d.views}, {d.subscribers}, {d.videos}, now())
on conflict (channel_id) do update
set views = {d.views},
subscribers = {d.subscribers},
videos = {d.videos},
retrieved_at = now()
where cs.channel_id = '{d.channel_id}'
'''
job.execute(sql)
return job.db.cur.rowcount
class IndexSearch(Model):
@classmethod
def upsert(cls,d):
sql = f'''
insert into augment as au (video_id, tsv_lemma, created_at)
values ( '{d.video_id}', to_tsvector('french', $${TextUtils.to_db(d.refined_lemma)}$$), now() )
on conflict (video_id) do update
set tsv_lemma = to_tsvector('french', $${TextUtils.to_db(d.refined_lemma)}$$),
created_at = now()
where au.video_id = '{d.video_id}'
'''
job.execute(sql)
return job.db.cur.rowcount
class Video(Model):
@classmethod
def update(cls,d):
sql = f'''
update video set
published_at = '{d.published_at}',
channel_id = '{d.channel_id}',
title = $${TextUtils.to_db(d.title)}$$,
summary = $${TextUtils.to_db(d.summary)}$$,
thumbnail = '{d.thumbnail}',
category_id = {d.category_id},
duration = '{d.duration}',
caption = {d.caption},
privacy_status = '{d.privacy_status}',
tags = $${TextUtils.to_db(d.tags)}$$,
pubdate = '{d.pubdate}',
live_content = '{d.live_content}',
default_audio_language = '{d.default_audio_language}',
default_language = '{d.default_language}',
wikitopics = $${TextUtils.to_db(d.wikitopics)}$$,
seconds = {d.seconds},
retrieved_at = now()
where video_id = '{d.video_id}'
'''
try:
job.execute(sql)
return job.db.cur.rowcount
except:
print("=="*20)
print("FAILED")
print(sql)
print("=="*20)
job.reconnect()
return 0
@classmethod
def create_from_feed(cls,d):
# ok
sql = f'''
insert into video
(video_id,channel_id,title,summary,origin,published_at)
values
('{d.video_id}', '{d.channel_id}',$${TextUtils.to_db(d.title)}$$,$${TextUtils.to_db(d.summary)}$$,'{d.origin}','{d.published_at}')
on conflict (video_id) DO NOTHING;
'''
job.execute(sql)
return job.db.cur.rowcount
@classmethod
def create_from_id(cls, video_id, origin):
sql = f'''
insert into video
(video_id,origin)
values
('{video_id}', '{origin}')
on conflict (video_id) DO NOTHING;
'''
job.execute(sql)
return job.db.cur.rowcount
@classmethod
def bulk_create(cls, video_ids, origin):
for video_id in video_ids:
values.append(f"('{video_id}', '{origin}')")
sql = f''' insert into video (video_id,origin) values {','.join(values)} '''
job.execute(sql)
return job.db.cur.rowcount
class Pipeline(Model):
@classmethod
def update_status(cls, **kwargs):
sql = f" update pipeline set status = '{kwargs['status']}' where {kwargs['idname']}= '{kwargs['item_id']}' "
job.execute(sql)
return job.db.cur.rowcount
@classmethod
def update_lang(cls, **kwargs):
sql = f" update pipeline set lang = '{kwargs['lang']}', lang_conf = '{kwargs['lang_conf']}' where {kwargs['idname']}= '{kwargs['item_id']}' "
job.execute(sql)
return job.db.cur.rowcount
@classmethod
def create(cls, **kwargs):
sql = f'''
insert into pipeline ({kwargs['idname']}, status)
values ('{kwargs['item_id']}','incomplete')
on conflict ({kwargs['idname']}) DO NOTHING;
'''
job.execute(sql)
return job.db.cur.rowcount
class RelatedChannels(object):
@classmethod
def insert(cls, **kwargs):
sql = f'''
insert into related_channels (channel_id, related_id, retrieved_at)
values ('{kwargs['channel_id']}','{kwargs['related_id']}',NOW())
on conflict (channel_id, related_id) DO NOTHING;
'''
job.execute(sql)
return job.db.cur.rowcount
class RecommendedVideos(object):
@classmethod
def insert(cls, d):
sql = f'''
insert into video_recommendations
(src_video_id, tgt_video_id, harvest_date, tgt_video_harvested_at)
values ('{d.src_video_id}','{d.tgt_video_id}', '{d.harvest_date}',NOW())
on conflict (src_video_id, tgt_video_id, harvest_date) DO NOTHING;
'''
job.execute(sql)
return job.db.cur.rowcount
class VideoScrape(Model):
@classmethod
def insert(cls,video_id):
completed_date = datetime.datetime.now(pytz.timezone('Europe/Amsterdam')).strftime("%Y-%m-%d")
sql = f'''
insert into video_scrape (video_id, completed_date, created_at)
values ('{video_id}', '{completed_date}', now())
on conflict (video_id) DO NOTHING;
'''
job.execute(sql)
return job.db.cur.rowcount
class Caption(object):
@classmethod
def get_lang(cls, url):
params = urllib.parse.parse_qs(urllib.parse.urlparse(url).query)
if ('lang' in params.keys()):
return params['lang'][0]
else:
return ''
@classmethod
def get_asr(cls, url):
params = urllib.parse.parse_qs(urllib.parse.urlparse(url).query)
if ('kind' in params.keys()):
if (params['kind'][0] == 'asr'):
return 'b_generated'
else:
return 'c_unknown'
else:
return 'a_manual'
@classmethod
def get_expire(cls, url):
return urllib.parse.parse_qs(urllib.parse.urlparse(url).query)['expire'][0]
@classmethod
def get_captions(cls, caption_urls):
HTML_TAG_REGEX = re.compile(r'<[^>]*>', re.IGNORECASE)
captions = []
for i,u in caption_urls.iterrows():
http_client = requests.Session()
result = requests.Session().get(u.url)
if (result.status_code == 200) and (len(result.text) > 0):
caption_text = [re.sub(HTML_TAG_REGEX, '', html.unescape(xml_element.text)).replace("\n",' ').replace("\'","'")
for xml_element in ElementTree.fromstring(result.text)
if xml_element.text is not None
]
# caption_text = ' '.join(caption_text)
else:
caption_text = None
captions.append({
'code': result.status_code,
'len_': len(result.text),
'expire': datetime.datetime.utcfromtimestamp( int(u.expire) ).strftime('%Y-%m-%d %H:%M:%S'),
'text': caption_text,
'caption_type': u.caption_type,
'lang': u.lang,
'caption_url': u.url
})
captions = pd.DataFrame(captions)
return captions
| [
"alexis.perrier@pm.me"
] | alexis.perrier@pm.me |
301e7d432329625c4c6abc24cb4ee6d962f715b5 | 311ed8e1b7d76d2dac128f853d54fd0890c6f1dc | /ee.py | 1b22dccdc0643cd5f7564acb87cb12dab25b15f8 | [] | no_license | alok1994/Python_Programs- | 445ac47ffbb4bb705ece697eca5b27bb99cc2ddc | 5da1c80f6d4e1469efdf8f849431bd8babfdc109 | refs/heads/master | 2022-06-05T11:01:23.835417 | 2022-05-13T12:54:53 | 2022-05-13T12:54:53 | 88,402,493 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | data_list=[1,3,2,9,4,6,7,8]
new_list=[]
while data_list:
minimum = data_list[0]
for x in data_list:
if x < minimum:
minimum = x
new_list.append(minimum)
data_list.remove(minimum)
print new_list
| [
"adeep@infoblox.com"
] | adeep@infoblox.com |
8f641be213c38d2d7cad0bb6497df44984f4c44f | ffe5bc9851a57851e70fbe9cf71b532482ad5813 | /CountSheep.py | 1f2256081516bc894c05223ea5450f6988fe0614 | [] | no_license | BigBricks/PythonChallenges | 0320e786cb0ceac0dce8ed098b44b3890abf1654 | 9ab39b2dfe07b0a23a205ed91d56296ac9a75828 | refs/heads/master | 2020-05-05T00:39:12.315967 | 2019-10-30T04:10:51 | 2019-10-30T04:10:51 | 179,581,968 | 0 | 0 | null | 2019-10-30T04:10:51 | 2019-04-04T21:46:50 | Python | UTF-8 | Python | false | false | 102 | py | def count_sheeps(arrayOfSheeps):
# TODO May the force be with you
return arrayOfSheeps.count(True) | [
"bsa6.23.94@gmail.com"
] | bsa6.23.94@gmail.com |
830407e09552cfb2cb0473e85960160bfe3aa607 | c6ccee43794d7aa95c81eb30afa986db1853a765 | /djangomediapil/fields.py | 9ca64ab3a447900cbde106de365ecfdd6521d6dc | [] | no_license | giorgi94/django-media-pil | b14eba7a661953aabb94e6f6959cc8ef8c77ef36 | 63dd25ecf81b0ef2b0d682c5ffeaddc016ef0249 | refs/heads/master | 2020-04-08T02:14:28.004119 | 2019-03-09T15:02:32 | 2019-03-09T15:02:32 | 158,928,252 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,055 | py | import os
import json
import datetime as dt
from django import forms
from django.db import models
from django.core import exceptions
from django.utils.translation import ugettext_lazy as _
from .mediaPIL import MediaPIL
from .widgets import ImagePILWidget
class ImagePILField(models.TextField):
description = "Image PIL Field"
def __init__(self, pathway="", point=(50, 50), quality=90,
upload_to=".", *args, **kwargs):
self.blank = kwargs.get('blank', False)
if pathway is None:
pathway = ""
self.default_kwargs = {
'pathway': pathway,
'point': point,
'quality': quality,
'upload_to': upload_to,
}
kwargs['default'] = json.dumps(
self.default_kwargs, ensure_ascii=False)
super().__init__(*args, **kwargs)
def from_db_value(self, value, expression, connection):
try:
if value is None:
return self.default_kwargs
if type(value) == str and '{' not in value:
kw = self.default_kwargs.copy()
kw['pathway'] = value
return kw
return json.loads(value)
except Exception as e:
return self.default_kwargs
def clean(self, value, model_instance):
val = json.loads(value)
if not val.get('pathway') and not self.blank:
raise forms.ValidationError(
_('This field is required'), code='invalid')
return value
def get_prep_value(self, value):
if type(value) == str:
return value
return json.dumps(value, ensure_ascii=False)
def value_to_string(self, obj):
return self.get_prep_value(obj.image)
def to_python(self, value):
return self.from_db_value(value=value)
def formfield(self, **kwargs):
widget = kwargs.get('widget')
if 'AdminTextareaWidget' in str(widget):
kwargs['widget'] = ImagePILWidget
return super().formfield(**kwargs)
| [
"giorgik1994@gmail.com"
] | giorgik1994@gmail.com |
0d3d361b190d5c8e94559f799d122150bbc60c1d | 0ba2975b23b7c15a804eb5a87490130c83c369f8 | /paper_experiment/compare_oversampling.py | 82420af7b26de0c1809a94789039b0c3cbbe29fe | [] | no_license | IqaHaziqah/on_the_way | eaf3cbda27d9c3935a2a849231133b8883cc3ba0 | bf3ad94535c046337eb0646ab355491f20475eaf | refs/heads/master | 2021-05-20T00:31:38.819902 | 2018-05-28T08:53:52 | 2018-05-28T08:53:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,138 | py | # -*- coding: utf-8 -*-
"""
Created on Tue May 8 20:01:28 2018
@author: zhouying
"""
import sys
sys.path.append('vae')
sys.path.append('distribution_ovsampling')
import pandas as pd
import numpy as np
import scipy.io as scio
from myutil2 import create_cross_validation,get_resultNB,compute
from vae6 import mnist_vae
from ndo import normal,smote
from sklearn.naive_bayes import GaussianNB
import sklearn
'''load the dataset'''
dataset = 'ionosphere'
mydata = scio.loadmat('MNIST_data\\UCI\\'+dataset+'.mat')
data = np.array(mydata['data'])
label = np.squeeze(mydata['label'])
para_o = pd.read_pickle('vae\\'+dataset+'.txt')
f1 = open('vae.txt','ab')
f2 = open('ndo.txt','ab')
f3 = open('smo.txt','ab')
result = create_cross_validation([data,label],1,10)
for i in range(1):
train,train_label,test,test_label = result[str(i)]
########vae
ov_vae,_,_ = mnist_vae(train[train_label==1],train.shape[0],para_o)
model = sklearn.neighbors.KNeighborsClassifier()
model.fit(train,np.arange(0,train_label.shape[0]))#求最近邻的编号
pre = model.predict(ov_vae)
info_0 = len(pre[train_label[pre]==0])#生成样本中0类标的个数
info_1 = len(pre[train_label[pre]==1])#生成样本中1类标的个数
pre = model.predict(ov_vae)
pre = np.array(list(set(pre)))
dive_0 = len(pre[train_label[pre]==0])#生成样本中不同的类标0的个数
dive_1 = len(pre[train_label[pre]==1])#生成样本中不同的类标1的个数
train_1 = np.concatenate((train,ov_vae),axis=0)
train_label1 = np.concatenate((train_label,np.ones(ov_vae.shape[0])),axis=0)
gnb = GaussianNB()
y_predne = gnb.fit(train_1,train_label1).predict(test)
y_pro = gnb.predict_proba(test)[:,1]
re = compute(test_label,y_predne,y_pro)
print(info_0,info_1,dive_0,dive_1)
print(re)
# np.savetxt(f1,[info_0,info_1,dive_0,dive_1],fmt='%d')
# np.savetxt(f1,np.array([re]),fmt='%.4f')
#######ndo
ov_ndo,_,_ = normal(train,100)
# ov_ndo,_,_ = mnist_vae(train[train_label==1],train.shape[0],para_o)
model = sklearn.neighbors.KNeighborsClassifier()
model.fit(train,np.arange(0,train_label.shape[0]))#求最近邻的编号
pre = model.predict(ov_ndo)
info_0 = len(pre[train_label[pre]==0])#生成样本中0类标的个数
info_1 = len(pre[train_label[pre]==1])#生成样本中1类标的个数
pre = model.predict(ov_ndo)
pre = np.array(list(set(pre)))
dive_0 = len(pre[train_label[pre]==0])
dive_1 = len(pre[train_label[pre]==1])
train_1 = np.concatenate((train,ov_ndo),axis=0)
train_label1 = np.concatenate((train_label,np.ones(ov_ndo.shape[0])),axis=0)
gnb = GaussianNB()
y_predne = gnb.fit(train_1,train_label1).predict(test)
y_pro = gnb.predict_proba(test)[:,1]
re = compute(test_label,y_predne,y_pro)
print(info_0,info_1,dive_0,dive_1)
print(re)
# np.savetxt(f2,[info_0,info_1,dive_0,dive_1],fmt='%d')
# np.savetxt(f2,np.array([re]),fmt='%.4f') #get_resultNB(1,result,ov_ndo)
#####smote
ov_smo,_,_ = smote(train)
# ov_smo,_,_ = mnist_vae(train[train_label==1],train.shape[0],para_o)
model = sklearn.neighbors.KNeighborsClassifier()
model.fit(train,np.arange(0,train_label.shape[0]))#求最近邻的编号
pre = model.predict(ov_smo)
info_0 = len(pre[train_label[pre]==0])#生成样本中0类标的个数
info_1 = len(pre[train_label[pre]==1])#生成样本中1类标的个数
pre = model.predict(ov_smo)
pre = np.array(list(set(pre)))
dive_0 = len(pre[train_label[pre]==0])
dive_1 = len(pre[train_label[pre]==1])
train_1 = np.concatenate((train,ov_smo),axis=0)
train_label1 = np.concatenate((train_label,np.ones(ov_smo.shape[0])),axis=0)
gnb = GaussianNB()
y_predne = gnb.fit(train_1,train_label1).predict(test)
y_pro = gnb.predict_proba(test)[:,1]
re = compute(test_label,y_predne,y_pro)
print(info_0,info_1,dive_0,dive_1)
print(re)
# np.savetxt(f3,[info_0,info_1,dive_0,dive_1],fmt='%d')
# np.savetxt(f3,np.array([re]),fmt='%.4f')
f1.close()
f2.close()
f3.close() | [
"442049887@qq.com"
] | 442049887@qq.com |
37a0377ba0a802b58950a24f45ab331d51f47c8c | e147cf167bb9d0985b563d8aa5cad4d42b075004 | /app/libs/http.py | dbd5cf01d8bb9b646ff517c9b9e436333d241c0c | [] | no_license | pilu01/kk | b3f54b6343e178a184992af9f68a0a30f60aba9b | 02fde0c9374f3652cdd5b0458b9b878ca33a9fff | refs/heads/master | 2021-07-10T14:50:05.611996 | 2020-10-13T12:49:41 | 2020-10-13T12:49:41 | 207,334,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | # -*- coding: utf-8 -*-
# @Time : 2020/9/14 15:31
# @Author : xhb
# @FileName: http.py
# @Software: PyCharm
import requests
class Http(object):
def __init__(self, url):
self.url = url
@staticmethod
def get(url, json_return=True):
r = requests.get(url)
if r.status_code != 200:
return {} if json_return else ''
return r.json() if json_return else r.text
| [
"xinhb@vastio.com"
] | xinhb@vastio.com |
e528b43a3e6dd339eefe897b88c870322865d82a | 4e6202e6c44fcde360a5cd22972556df2c3af975 | /src/compas_testing/rhino/gom.py | eadc3787c9cac4e165fd1cfb0e080c57c3af809f | [
"MIT"
] | permissive | franaudo/compas_testing | ce60e3cde066ce484b38dba472214b5104d0499f | f5c443b3c5f420793efda1bd156319cf7b85556e | refs/heads/master | 2023-05-31T12:46:09.603946 | 2020-01-27T21:24:02 | 2020-01-27T21:24:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,990 | py | from compas.geometry import Point
from compas_rhino.artists import PointArtist
__author__ = 'Francesco Ranaudo'
__copyright__ = 'Copyright 2020, BLOCK Research Group - ETH Zurich'
__license__ = 'MIT License'
__email__ = 'ranaudo@arch.ethz.ch'
__all__ = ['draw_point_cloud_color',
]
def draw_point_cloud_color(points_history_coord, color_map, stage_index):
"""
Draws point clouds for a chosen stage - in Rhino,
Point color is defined by its displacement from initial position
Parameters
----------
points_history_coord : dictionary
key: str - point geometric key
value : list - a list of locations of a given point in three-dimensional space (XYZ coordinates of the point)
color_map : dictionary
key: str - point geometric key
value : list of lists - RGB values for each point at each stage
stage_index : int - the stages to be drawn
"""
for key in points_history_coord.keys():
point = Point(points_history_coord[key][stage_index][0],
points_history_coord[key][stage_index][1],
points_history_coord[key][stage_index][2]
)
p = PointArtist(point, name=key.strip("()"), color=color_map[key][stage_index], layer='Stage_'+str(stage_index))
p.draw()
def draw_delaunay_mesh(point_cloud, color_map):
pass
# def draw_stages(points_history_coord, scaled_disp, start, stop):
# '''draw the point cloud for input stages'''
#
# """
# Draw point clouds for a sequence of stages - in Rhino,
# Point color is defined by its displacement from initial position
#
# Parameters
# ----------
# points_history_coord : dictionary
# key: *
# value : list - a list of locations of a given point in three-dimensional space (XYZ coordinates of the point)
#
# scaled_displ : dictionary
# key: *
# value : list - a list of scalars between 0 and 1
#
# start : the first stage to be drawn
#
# stop : the last stage to be drawn
#
# * condition : keys must be identical in points_history_coord and scaled_displ
#
# """
#
# for key, value in points_history_coord.items():
# for j in range(start, stop):
# point = Point(value[j][0], value[j][1], value[j][2])
# deformation = scaled_displ[key][j]
# rgb = ratio_to_rgb(deformation)
# p = PointArtist(point, name=key.strip("()"), color=rgb, layer='Stage8_' + str(j))
# p.draw()
#
# return p
#
#
# def point_trajectory(points_history, key, rgb=(255, 255, 255)):
# """
# Draw the locations in space of a point throughout the successive stages
#
# Parameters
# ----------
# points_history : dictionary
# key: string - the coordinates of a point in initial stage
# value : sequence - a sequence of tuples describing locations of a given point in three-dimensional space
# * tuple : distance to reference point, XYZ coordinates of the point, Stage of the point
#
# key : - key of the point that you want to draw
#
# color : the chosen color
#
# """
#
# values = points_history[key]
# for v in values:
# point = Point(v[0], v[1], v[2])
# p = PointArtist(point, name=key, color=rgb, layer=key.strip("()"))
# p.draw()
# return p
#
#
# def find_rhpoint_key():
# """
# Select a point on rhino and get its key in the points_history dictionary
# """
#
# points = select_points("select one point")
# coordinates = get_point_coordinates(points)
# name = get_object_names(points)
#
# parse = str(name[0])
# split = parse.split(",")
# key = '(' + split[0] + ',' + split[1] + ',' + split[2] + ')'
# return key
# ******************************************************************************
# Main
# ******************************************************************************
if __name__ == "__main__":
pass
| [
"ranaudo@arch.ethz.ch"
] | ranaudo@arch.ethz.ch |
39c2bccbbad0084903df070044113fb7721f2cfb | 24fec5484a82c7705185b3c8ff41ad4f93b7feeb | /pizza_shop/mainapp/migrations/0002_alter_cart_final_price.py | 82f7224fe27b3aea90a802ee9e7d25c950eb805e | [] | no_license | rita-mazets/isp_lab3-4 | 89d0e5ecc853b7e1630f59d49b33c7dbafff0c8d | 182ad669ac49a37249003b082abba707bb6ebf64 | refs/heads/master | 2023-06-06T09:34:31.860928 | 2021-06-14T20:16:26 | 2021-06-14T20:16:26 | 370,748,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | # Generated by Django 3.2 on 2021-04-29 08:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='cart',
name='final_price',
field=models.DecimalField(decimal_places=2, default=0, max_digits=9, verbose_name='Общая цена'),
),
]
| [
"mmmazets@mail.ru"
] | mmmazets@mail.ru |
9cf8249155e099a4f2d638b13a734b6f3276c7d7 | d93cb231c2c2fdda9615d11d150045886608a8d7 | /server/admin.py | 1996b7b7dd7e4dde85e917c95b8ae503b6e31b5d | [
"Apache-2.0"
] | permissive | Kiesum/tfrs-1 | 1aea5245a4d80bdb36c637ab158f8ca2a45c68df | 12a8dff77c5b94687117e9a0822a1f1e28c453c5 | refs/heads/master | 2021-01-01T17:59:45.536698 | 2017-08-24T19:09:25 | 2017-08-24T19:09:25 | 98,216,933 | 0 | 0 | null | 2017-07-24T17:31:03 | 2017-07-24T17:31:02 | null | UTF-8 | Python | false | false | 2,394 | py | """
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline compliance reporting for transportation fuel suppliers in accordance with the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.contrib import admin
from .models.Attachment import Attachment
from .models.Audit import Audit
from .models.Contact import Contact
from .models.CreditTrade import CreditTrade
from .models.CreditTradeLogEntry import CreditTradeLogEntry
from .models.FuelSupplier import FuelSupplier
from .models.Group import Group
from .models.GroupMembership import GroupMembership
from .models.History import History
from .models.LookupList import LookupList
from .models.Note import Note
from .models.Notification import Notification
from .models.NotificationEvent import NotificationEvent
from .models.Offer import Offer
from .models.Permission import Permission
from .models.Role import Role
from .models.RolePermission import RolePermission
from .models.User import User
from .models.UserFavourite import UserFavourite
from .models.UserRole import UserRole
admin.site.register(Attachment)
admin.site.register(Audit)
admin.site.register(Contact)
admin.site.register(CreditTrade)
admin.site.register(CreditTradeLogEntry)
admin.site.register(FuelSupplier)
admin.site.register(Group)
admin.site.register(GroupMembership)
admin.site.register(History)
admin.site.register(LookupList)
admin.site.register(Note)
admin.site.register(Notification)
admin.site.register(NotificationEvent)
admin.site.register(Offer)
admin.site.register(Permission)
admin.site.register(Role)
admin.site.register(RolePermission)
admin.site.register(User)
admin.site.register(UserFavourite)
admin.site.register(UserRole) | [
"gwalker@escapesystems.com"
] | gwalker@escapesystems.com |
950a91e7f352536d8c73b04d04228a01c66a4fda | 13a80359dedbf4aae47ad47ae9ae7d6dbd1bcb16 | /chat_search.py | 8b901c54b040ef6ce9c7070e7ef8804002ae8ebf | [] | no_license | tiarafreddyandika/mitm_addon | bb52569a4ed86142cfd11ec33b8f73f00ddc6053 | 8246f5d9230c0b3c9f456dd907c142d7db773c17 | refs/heads/master | 2022-11-11T19:20:53.325345 | 2020-06-18T03:55:55 | 2020-06-18T03:55:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | import mitmproxy
from base.base_simple_gql_request import BaseRequest
class ChatSearch(BaseRequest):
def __init__(self):
super().__init__()
@property
def error_response_file(self) -> str:
return "./response/chat_attachment_error.json"
@property
def modified_response_file(self) -> str:
return "./response/chat_initial_search.json"
@property
def query_matcher(self) -> str:
return "query contactAndRepliesSearch"
@property
def simulate_error(self) -> bool:
return False
@property
def modify_response(self) -> bool:
return True
addons = [
ChatSearch()
] | [
"alfon.lavinski@tokopedia.com"
] | alfon.lavinski@tokopedia.com |
0458ec665e53e9ec2babef6d8d8f4eb70518e005 | cb28edc8fecba9b12de7d5798ea8f04fd99cfce1 | /LIA_test.py | a6dcad4032e4749f4d5f30dfb1f0ca75ab1054db | [] | no_license | khoidnyds/Rosanlind | 5b597e267c6a9e9b54ed2c05f72537af8160b52b | 68902285e6ca462f8cebb21eae6016bc18d02518 | refs/heads/master | 2022-08-28T00:28:40.318066 | 2020-05-25T22:40:25 | 2020-05-25T22:40:25 | 266,893,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | import unittest
from LIA import Mendel2
class MyTestCase(unittest.TestCase):
def test_seq1(self):
nuc = Mendel2(2, 1)
self.assertAlmostEqual(nuc.get_result(), 0.684, 3)
def test_seq2(self):
nuc = Mendel2(7, 31)
self.assertAlmostEqual(nuc.get_result(), 0.6142569731, 7)
if __name__ == '__main__':
unittest.main()
| [
"khoidnyds@vt.edu"
] | khoidnyds@vt.edu |
5e38d99387ab0922958a03e78b4d12fe8b189012 | 20171d2c050a727d265bb551861326d723e43322 | /2019/05/sunny_with_a_chance_of_asteroids.py | f493be77d97d3e5ce6938fde33bbf9f7a57c4546 | [] | no_license | paisuhas/AdventOfCode | d7d0e708e7672d4ec68e63fe9ec40b3eb1de320c | 31bad46f44a7c657d12bb5718d9e0903950993ff | refs/heads/master | 2020-09-24T06:39:45.694404 | 2019-12-13T06:50:20 | 2019-12-13T06:50:20 | 225,690,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,020 | py | #!/usr/bin/env python3
def decode(opcode):
op = opcode % 100
modes = []
for i in [100, 1000]:
modes.append((opcode // i) % 10)
return (modes, op)
def get_operands(pc, modes):
global program
operands = []
for offset, mode in enumerate(modes, 1):
address = pc + offset if mode else program[pc+offset]
operands.append(program[address])
return operands
program = list(map(int, open("input.txt").readlines()[0].strip().split(',')))
next_pc = 0
three_op_instructions = [1, 2]
one_op_instructions = [3, 4]
jump_instructions = [5, 6]
comparison_instructions = [7, 8]
for pc, opcode in enumerate(program):
if pc == next_pc:
modes, op = decode(opcode)
if op in three_op_instructions:
operands = get_operands(pc, modes)
next_pc = pc + 4
if op == 1:
result = sum(operands)
else:
assert(op == 2)
result = operands[0] * operands[1]
program[program[pc + 3]] = result
elif op in one_op_instructions:
next_pc = pc + 2
if op == 3:
program[program[pc+1]] = 5 # 1 for Part 1
else:
assert(op == 4)
if modes[0]:
print(program[pc+1])
else:
print(program[program[pc+1]])
elif op in jump_instructions:
operands = get_operands(pc, modes)
if (op == 5 and operands[0]) or (op == 6 and not operands[0]):
next_pc = operands[1]
else:
next_pc = pc + 3
elif op in comparison_instructions:
next_pc = pc + 4
operands = get_operands(pc, modes)
if (op == 7 and operands[0] < operands[1]) or (op == 8 and operands[0] == operands[1]):
program[program[pc+3]] = 1
else:
program[program[pc+3]] = 0
else:
assert(op == 99)
break
| [
"spai@cs.wisc.edu"
] | spai@cs.wisc.edu |
b033e8f0b13e41e324b11e403739c993c52bbe7e | a4a01e251b194f6d3c6654a2947a33fec2c03e80 | /PythonWeb/Ajax/1809/Day02/1808/AjaxDemo02/run01.py | 35ac2bfbdbdab18d5da55f05332beae995cd1c85 | [] | no_license | demo112/1809 | 033019043e2e95ebc637b40eaf11c76bfd089626 | e22972229e5e7831dce2aae0b53ce19a6e3bb106 | refs/heads/master | 2020-04-09T07:10:49.906231 | 2019-02-27T13:08:45 | 2019-02-27T13:08:45 | 160,143,869 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,861 | py | from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
import json
import pymysql
pymysql.install_as_MySQLdb()
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI']="mysql://root:123456@localhost:3306/flask"
db = SQLAlchemy(app)
class Login(db.Model):
__tablename__ = "login"
id = db.Column(db.Integer,primary_key=True)
lname = db.Column(db.String(30))
lpwd = db.Column(db.String(30))
uname = db.Column(db.String(30))
def to_dict(self):
dic = {
'id':self.id,
'lname' : self.lname,
'lpwd' : self.lpwd,
'uname' : self.uname,
}
return dic
@app.route('/00-homework')
def homework():
return render_template('00-homework.html')
@app.route('/00-server')
def server00():
lname = request.args.get('lname')
login=Login.query.filter_by(lname=lname).first()
if login:
return "用户名称已经存在"
else:
return "通过"
@app.route('/01-post')
def post():
return render_template("01-post.html")
@app.route('/01-server',methods=['POST'])
def server01():
uname = request.form['uname']
uage = request.form['uage']
return "传递过来的uname的值为:%s,传递过来的uage的值为:%s" % (uname,uage)
@app.route('/02-form',methods=['GET','POST'])
def form():
if request.method == 'GET':
return render_template('02-form.html')
else:
uname = request.form['uname']
uage = request.form['uage']
return "传递过来的uname的值为:%s,传递过来的uage的值为:%s" % (uname,uage)
@app.route('/03-getlogin')
def getlogin():
return render_template('03-getlogin.html')
@app.route('/03-server')
def server03():
logins = Login.query.all()
str1 = ""
for login in logins:
str1 += str(login.id)
str1 += login.lname
str1 += login.lpwd
str1 += login.uname
return str1
@app.route('/04-json')
def json_views():
return render_template("04-json.html")
@app.route('/04-server')
def server04():
# list = ["王老六","RapWang","隔壁老顽固"]
# dic = {
# 'name':'TeacherWang',
# 'age' : 35,
# 'gender' : 'Male',
# }
# jsonStr=json.dumps(dic)
list = [
{
"name":"wangwc",
"age":35,
"gender":"Male",
},
{
'name':'RapWang',
'age':40,
'gender':'Female',
}
]
jsonStr=json.dumps(list)
return jsonStr
@app.route('/05-json-login')
def json_login():
return render_template('05-json-login.html')
@app.route('/05-server')
def server05():
#得到id为一的Login的信息
login=Login.query.filter_by(id=1).first()
jsonStr=json.dumps(login.to_dict())
return jsonStr
if __name__ == "__main__":
app.run(debug=True) | [
"huafengdongji@hotmail.com"
] | huafengdongji@hotmail.com |
35e5326d1aad1c103b3e76b9efefdd92864a2926 | 45edff14271724c5bf27e62e96eeb635840eae22 | /ML/ensemble_learning/util.py | d998161fe6c0a48ae7207841cc63d1e0147b0db8 | [] | no_license | DaiJitao/machine_learning | 1e41208dc94836a97e57a4b0f5778f8da2bb81d4 | 49e1db9ecbfbf886a11ce416eea402d214cf2049 | refs/heads/master | 2021-06-25T23:52:06.066315 | 2021-02-07T16:17:50 | 2021-02-07T16:17:50 | 209,712,507 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 347 | py |
"""
决策树常用的工具类:指标的计算、数据的加载
"""
import numpy as np
def load_data():
'''
根据《统计学习方法》第八章8.1.3产生数据.
:return:
'''
dataset_label = np.array([[0, 1], [1, 1], [2, 1], [3, -1], [4, -1], [5, -1], [6, 1], [7, 1], [8, 1], [9, -1]])
return dataset_label
| [
"hejinrong@news.cn"
] | hejinrong@news.cn |
2d62061754e25389e9568ddda07c4f09a77ac25f | db970f92ec15ff2a4221079d5b3c16c4000f3a2d | /tpot_secom_best.py | 1cf3dbfb67de19be280f5ab4a940e558e2d7a423 | [] | no_license | Ranga2904/AzureML_TS_SECOM | 390fab7b28b558bd76f5c78d14a1927f0f7197ca | 67aac95bd11a75beb1385365214aab5cfa0ba4a9 | refs/heads/main | 2023-03-19T03:24:39.160640 | 2021-03-14T15:24:27 | 2021-03-14T15:24:27 | 347,672,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,514 | py | import numpy as np
import pandas as pd
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, make_union
from tpot.builtins import StackingEstimator
from tpot.export_utils import set_param_recursive
# NOTE: Make sure that the outcome column is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1)
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['target'], random_state=1)
# Average CV score on the training set was: 0.9106825452925345
exported_pipeline = make_pipeline(
StackingEstimator(estimator=RandomForestClassifier(bootstrap=False, criterion="gini", max_features=0.3, min_samples_leaf=17, min_samples_split=12, n_estimators=100)),
StackingEstimator(estimator=ExtraTreesClassifier(bootstrap=True, criterion="entropy", max_features=0.4, min_samples_leaf=19, min_samples_split=20, n_estimators=100)),
ExtraTreesClassifier(bootstrap=True, criterion="gini", max_features=0.5, min_samples_leaf=1, min_samples_split=5, n_estimators=100)
)
# Fix random state for all the steps in exported pipeline
set_param_recursive(exported_pipeline.steps, 'random_state', 1)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
| [
"noreply@github.com"
] | noreply@github.com |
d602bd37839f0456bfb7aabbb13cffddb3e2b1e3 | c4f2c58b2eb83f5bf672e82a39b6c96671f0eac1 | /iss.py | 64eeed4e973b979754e09aa5b058cac7b82be4c4 | [] | no_license | fthbrmnby/ISS-Position | 0bf36407c9384ebd02ed19e25c8687965a495bf4 | cf69b23fad97a8810b20e70ac5c144b0500c06e0 | refs/heads/master | 2021-01-19T22:34:36.257275 | 2017-07-31T07:46:57 | 2017-07-31T07:46:57 | 88,828,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,257 | py | from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.animation as animation
import urllib.request
import json
# basic map setup
globe = Basemap(projection='robin', resolution = 'c', lat_0=0, lon_0=0)
globe.drawcoastlines()
globe.drawcountries()
globe.fillcontinents(color="grey")
globe.drawmapboundary()
globe.drawmeridians(np.arange(0, 360, 30))
globe.drawparallels(np.arange(-90, 90, 30))
x,y = globe(0, 0)
point = globe.plot(x, y, 'ro', markersize=7)[0]
def init():
point.set_data([], [])
return point,
# animation function. This is called sequentially
def animate(i):
lons, lats = iss_position()
x, y = globe(lons, lats)
point.set_data(x, y)
return point,
# http://api.open-notify.org/iss-now.json
def iss_position():
resp = urllib.request.urlopen("http://api.open-notify.org/iss-now.json").read()
jsn = json.loads(resp.decode('utf-8'))
pos = jsn["iss_position"]
lon = pos["longitude"]
lat = pos["latitude"]
return (lon, lat)
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(plt.gcf(), animate, init_func=init,
interval=3000, blit=True)
plt.show()
| [
"fthbrmnby@gmail.com"
] | fthbrmnby@gmail.com |
1d33a72c9dcc9eadff29cb6b67e3cceb03f68a5f | 72aeb6cbad1a595e656c1469df3e27a1794d9542 | /Tarea/repaso/repaso/settings.py | dd312496f36add46bcc6cd0e3aa79d8963291163 | [] | no_license | monicanicole/DjangoRest-Angular | 87f3b83b2dd2ee035d943c04db8ebb4b3614ef3d | 1a2db5a0ae93a9fb7b800f3be0f5e92aab76aee3 | refs/heads/master | 2020-12-25T11:15:26.871064 | 2016-07-21T03:53:30 | 2016-07-21T03:53:30 | 63,833,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,352 | py | """
Django settings for repaso project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qa6ipqpw$4arzr!u%273@odw0^emkj^&p98r##0si=g4rl^ccf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'administrar',
'rest_framework',
'servicioweb',
'corsheaders',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
ROOT_URLCONF = 'repaso.urls'
CORS_ORIGIN_ALLOW_ALL = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,"template")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'repaso.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
"monicanicole881@gmail.com"
] | monicanicole881@gmail.com |
85559cad23bef27acfad5460f0f619b730ca763d | 93720fa8240ed31835d53480a1db31519e5f22ea | /src/contest/migrations/setup_keyspaces.py | 1c9c5af1b564e9ce90742925a7f1c1b588b62935 | [] | no_license | riccitensor/contest-py | 788075916bbc6d78c8280977d542f78446151bef | c32f0321bd5819df9658cbeeb368aa70f3245af2 | refs/heads/master | 2021-01-25T08:55:23.822311 | 2012-06-11T19:44:05 | 2012-06-11T19:44:05 | 9,649,980 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | '''
Created on 25.12.2011
@author: christian.winkelmann@plista.com
'''
from contest.config import config_global
from contest.config import config_local
import cql
class Setup_Keyspaces(object):
def __init__(self):
dbconn = cql.connect(config_local.cassandra_host, config_local.cassandra_port )
cursor = dbconn.cursor()
try:
cql_query = """ DROP KEYSPACE :keyspace; """
cursor.execute(cql_query, dict(keyspace = config_global.cassandra_default_keyspace))
except cql.ProgrammingError as programmingError:
print cql_query
print programmingError
try:
cql_query = """ CREATE KEYSPACE :keyspace WITH strategy_class = 'SimpleStrategy'
AND strategy_options:replication_factor = 1; """
cursor.execute(cql_query, dict(keyspace = config_global.cassandra_default_keyspace))
except cql.ProgrammingError as programmingError:
print cql_query
print programmingError
if __name__ == '__main__':
sK = Setup_Keyspaces() | [
"christian.winkelmann@plista.com"
] | christian.winkelmann@plista.com |
7dbe4f4d19fda2c3257bb3c276319a61d493f7d9 | a2558e0d92c6f9e3dcd6b12410ba824cb523075d | /app.py | 7d2bb02b84a5bb24ba3b367450d3bbe9100840fb | [] | no_license | agsorganics/agsorganicsbs | 0ea2240ebfd8651bb5a5b61819739d1571c7049c | 21404976ce2ef4d57cacbc093c3d9303ee604cb7 | refs/heads/master | 2022-09-08T21:12:22.435434 | 2020-06-06T08:39:07 | 2020-06-06T08:39:07 | 269,766,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | from flask import Flask, render_template, url_for, request, redirect
import csv
app = Flask(__name__)
@app.route('/')
def hello_world():
return render_template('index.html')
@app.route('/<string:page_name>')
def html_page(page_name):
return render_template(page_name)
def write_to_csv(data):
with open('database.csv', mode='a') as database:
name = data["name"]
email = data["email"]
address = data["address"]
num = data["num"]
state = data["state"]
country = data["country"]
csv_writer = csv.writer(database, delimiter =',', quotechar ='"', quoting = csv.QUOTE_MINIMAL )
csv_writer.writerow([name,email,address,num,state,country])
@app.route('/submit_form', methods=['POST', 'GET'])
def submit_form():
if request.method == 'POST':
data = request.form.to_dict()
write_to_csv(data)
return redirect('/thanks.html')
else:
return 'try again'
| [
"eolisegun83@gmail.com"
] | eolisegun83@gmail.com |
bbae3698bee755a86e113f6ff4e7d52fe4f8a1ca | 7b12eb45c1ea76ad9c186b858b5dfebf2c5b862a | /.history/DEBER_20210905000023.py | 9516b0bda58c56e4e39bbf9f8a08dc9dc32c935e | [
"MIT"
] | permissive | Alopezm5/PROYECTO-PARTE-1 | a1dce04009b24852c1c60e69bdf602ad3af0574b | bd7a8594edf08d41c6ca544cf6bac01ea4fcb684 | refs/heads/main | 2023-07-25T11:22:17.994770 | 2021-09-07T03:27:34 | 2021-09-07T03:27:34 | 403,670,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,447 | py | import os
class Empresa():
def __init__(self,nom="",ruc=0,dire="",tele=0,ciud="",tipEmpr=""):
self.nombre=nom
self.ruc=ruc
self.direccion=dire
self.telefono=tele
self.ciudad=ciud
self.tipoEmpresa=tipEmpr
def datosEmpresa(self):#3
self.nombre=input("Ingresar nombre de la empresa: ")
self.ruc=int(input("Ingresar ruc de la empresa: "))
self.direccion=input("Ingresar la direccion de la empresa: ")
self.telefono=int(input("Ingresar el numero de telefono de la empresa: "))
self.ciudad=input("Ingresar ciudad donde esta la empresa: ")
self.tipoEmpresa=input("Ingresar tipo de empresa publica o privada: ")
def mostrarEmpresa(self):
print("")
print("Empresa")
print("La empresa de nombre {}\n De RUC #{} \n Está ubicada en {}\n Se puede comunicar al #{}\n Está empresa esta en la ciudad de {}\n Es una entidad {}".format(self.nombre,self.ruc,self.direccion, self.telefono,self.ciudad, self.tipoEmpresa))
class Empleado(Empresa):
def __init__(self,nom="",cedu=0,dire="",tele=0,email="",estado="",profe=""):
self.nombre=nom
self.cedula=cedu
self.direccion=dire
self.telefono=tele
self.correo=email
self.estadocivil=estado
self.profesion=profe
def empleado(self):
self.nombre=input("Ingresar nombre del empleado: ")
self.cedula=int(input("Ingresar numero de cedula del empleado: "))
self.direccion=input("Ingresar la direccion del empleado: ")
self.telefono=int(input("Ingresar numero de contacto del empleado: "))
self.correo=input("Ingresar correo personal del empleado: ")
def empleadoObrero(self):
self.estadocivil=input("Ingresar estado civil del empleado: ")
def empleadoOficina(self):
self.profesion=input("Ingresar profesion del empleado: ")
def mostrarempleado(self):
print("El empleado: {} con # de C.I. {} \n Con direccion {}, y numero de contacto{}\n Y correo {}".format(self.nombre,self.cedula,self.direccion,self.telefono,self.correo))
class Departamento(Empleado):
def __init__(self,dep=""):
self.departamento=dep
def departa(self):
self.departamento=input("Ingresar el departamento al que pertenece el empleado: ")
def mostrarDeparta(self):
print("El empleado pertenece al departamento de: {}".format(self.departamento))
class Pagos(Empleado):
def __init__(self, desper=0,valhora=0,hotraba=0,extra=0,suel=0,hrecar=0,hextra=0,pres=0,mcou=0,valho=0,sobtiem=0,comofi=0,antobre=0,iemple=0,cuopres=0,tot=0,liquid=0,cuota=0,anti=0,comi=0,fNomina="",fIngreso="",iess=0):
self.permisos=desper
self.valorhora=valhora
self.horastrabajadas=hotraba
self.valextra=extra
self.sueldo= suel
self.horasRecargo= hrecar
self.horasExtraordinarias=hextra
self.prestamo= pres
self.mesCuota= mcou
self.valor_hora= valho
self.sobretiempo=sobtiem
self.comEmpOficina = comofi
self.antiEmpObrero = antobre
self.iessEmpleado = iemple
self.cuotaPrestamo=cuopres
self.totdes = tot
self.liquidoRecibir = liquid
self.mesCuota=cuota
self.antiguedad=anti
self.comision=comi
self.fechaNomina=fNomina
self.fechaIngreso=fIngreso
self.iess=iess
def pagoNormal(self):
self.sueldo=float(input("Ingresar sueldo del trabajador: $ "))
self.prestamo=float(input("Ingresar monto del prestamo que ha generado el empleado: $ "))
self.mesCuota=int(input("Ingresar meses a diferir el prestamo: "))
self.comision=float(input("Ingresar valor de la comsion: "))
self.antiguedad=int(input("Ingresar antiguedad: "))
self.iess=float(input("Ingresar valor del iees recordar que debe ser porcentuado Ejemplo si quiere decir 20% debe ingresar 0.20"))
def pagoExtra(self):
self.horasRecargo=int(input("Ingresar horas de recargo: "))
self.horasExtraordinarias=int(input("Ingresar horas extraordinarias: "))
self.fechaNomina=float(input("Ingresar fecha de nomida (formato año-mes-dia): "))
self.fechaIngreso=float(input("Ingresar fecha de ingreso (formato año-mes-dia): "))
def calculoSueldo(self):
self.valor_hora=self.sueldo/240
self.sobretiempo= self.valor_hora * (self.horasRecargo*0.50+self.horasExtraordinarias*2)
self.comEmpOficina = self.comision*self.sueldo
self.antiEmpObrero = self.antiguedad*(self.fechaNomina - self.fechaIngreso)/365*self.sueldo
self.iessEmpleado = self.iess*(self.sueldo+self.sobretiempo)
self.cuotaPrestamo=self.prestamo/self.mesCuota
self.toting = self.sueldo+self.sobretiempo+ self.comEmpOficina + self.antiEmpObrero
self.totdes = self.iessEmpleado + self.prestamo
self.liquidoRecibir = self.toting - self.totdes
def mostrarSueldo(self):
print("El empleado tiene un sueldo de ${}")
emp=Empresa()
emp.datosEmpresa()
os.system ("cls")
emple=Empleado()
emple.empleado()
os.system ("cls")
emple.empleadoObrero()
emple.empleadoOficina()
os.system ("cls")
depa=Departamento()
depa.departa()
pag=Pagos()
pag.pagoNormal()
pag.pagoExtra()
pag.calculoSueldo()
os.system ("cls")
emp.mostrarEmpresa()
print("")
emple.mostrarempleado()
print("")
pag.mostrarSueldo() | [
"85761855+Alopezm5@users.noreply.github.com"
] | 85761855+Alopezm5@users.noreply.github.com |
46a790f3eecc7144206651c29840f940b5efa53c | 3ffb6d8600d767cf2e430b603a21ad9d85d1e02e | /Article/views.py | 3ee85e39abc330219488785956799d44b509b560 | [] | no_license | wyangyang1230/boke | 231ec662b5427daa4267a20126611ac159ec11da | f6da39c33aae7758fd6b4bba55b654ba533eeb90 | refs/heads/master | 2020-08-07T08:18:05.651256 | 2019-10-07T11:46:15 | 2019-10-07T11:46:15 | 207,441,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,890 | py | from django.shortcuts import render
from django.http import HttpResponse,HttpResponseRedirect,JsonResponse
from django.core.paginator import Paginator
from Back.models import *
# Create your views here.
## 父模板
def base(request):
# get请求
data=request.GET
serach=data.get('serach')
print(serach)
# 通过form表单提交的数据,判断数据库中是否存在某个文章
# 通过模型查询
article=Article.objects.filter(title__contains=serach).all()
print(article)
return render(request,'article/base.html',locals())
# 网站首页
def index(request):
article=Article.objects.order_by('-date')[:6]
recommend_article=Article.objects.filter(recommend=1)[:7]
click_article=Article.objects.order_by('-click')[:12]
return render(request,'article/index.html',locals())
# 个人相册
def listpic(request):
return render(request,'article/listpic.html')
# 个人简介
def about(request):
return render(request,'article/about.html')
# 文章分页
def newslistpic(request,page=1):
page=int(page) #1为字符串类型,需要将类型转换
article=Article.objects.order_by('-date')
paginator=Paginator(article,6) #显示每页6条数据
page_obj=paginator.page(page)
# 获取当前页
current_page=page_obj.number
start=current_page-3
if start<1:
start=0
end=current_page+2
if end > paginator.num_pages:
end = paginator.num_pages
if start==0:
end=5
if end==paginator.num_pages:
start=paginator.num_pages-5
page_range=paginator.page_range[start:end]
return render(request,'article/newslistpic.html',locals())
# 文章详情
def articledetails(request,id):
# id为字符串类型
id=int(id)
article=Article.objects.get(id=id)
print(article)
return render(request,'article/articledetails.html',locals()) | [
"root@163.com"
] | root@163.com |
e8fd309c9d59ebfbf16e05e16b272c2b4b073b2a | c3470e984f3c27766f16da46dde1467004469c1f | /venv/lib/python3.7/base64.py | 551d86a0e7946c38b9135752f36068e2ff83adcf | [] | no_license | binwei-yu/zqweb | e647077b320d14988efda60af224a37e50cb19fc | 2e3036dd254230272614fafd375e5142aacdb9d5 | refs/heads/master | 2020-05-07T22:04:13.020298 | 2019-04-25T20:14:15 | 2019-04-25T20:14:15 | 180,927,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47 | py | /Users/zhouqi/anaconda3/lib/python3.7/base64.py | [
"zqcarlos@umich.edu"
] | zqcarlos@umich.edu |
6d61713b8a648e26e0264e207a0bcba10f35cc49 | bcdd32e48435fdbcc717b300be34bccec188404f | /catkin_ws/src/camera_motor/Prediction.py | 8df13d9108796420a47c80b50af01f017da555de | [] | no_license | tuf22191/Senior_Design_Project_Spring_2017 | 8ce899a2138747f4aca9ec7f5e0f6d94dd1d56de | 21eff0ff5c2129b56c3d99ad01b9b49ffb2affbd | refs/heads/master | 2021-01-11T20:08:39.739420 | 2017-03-31T00:48:21 | 2017-03-31T00:48:21 | 79,049,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,980 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from camera_motor/Prediction.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
#import genpy
import std_msgs.msg
class Prediction(genpy.Message):
_md5sum = "f251b6023fb3143f56d892530c9c6948"
_type = "camera_motor/Prediction"
_has_header = False #flag to mark the presence of a Header object
_full_text = """std_msgs/Time msg_sent_time
float64 x_vel
float64 y_vel
std_msgs/Duration time_to_impact
================================================================================
MSG: std_msgs/Time
time data
================================================================================
MSG: std_msgs/Duration
duration data
"""
__slots__ = ['msg_sent_time','x_vel','y_vel','time_to_impact']
_slot_types = ['std_msgs/Time','float64','float64','std_msgs/Duration']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
msg_sent_time,x_vel,y_vel,time_to_impact
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Prediction, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.msg_sent_time is None:
self.msg_sent_time = std_msgs.msg.Time()
if self.x_vel is None:
self.x_vel = 0.
if self.y_vel is None:
self.y_vel = 0.
if self.time_to_impact is None:
self.time_to_impact = std_msgs.msg.Duration()
else:
self.msg_sent_time = std_msgs.msg.Time()
self.x_vel = 0.
self.y_vel = 0.
self.time_to_impact = std_msgs.msg.Duration()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_2I2d2i.pack(_x.msg_sent_time.secs, _x.msg_sent_time.nsecs, _x.x_vel, _x.y_vel, _x.time_to_impact.secs, _x.time_to_impact.nsecs))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.msg_sent_time is None:
self.msg_sent_time = std_msgs.msg.Time()
if self.time_to_impact is None:
self.time_to_impact = std_msgs.msg.Duration()
end = 0
_x = self
start = end
end += 32
(_x.msg_sent_time.secs, _x.msg_sent_time.nsecs, _x.x_vel, _x.y_vel, _x.time_to_impact.secs, _x.time_to_impact.nsecs,) = _struct_2I2d2i.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_2I2d2i.pack(_x.msg_sent_time.secs, _x.msg_sent_time.nsecs, _x.x_vel, _x.y_vel, _x.time_to_impact.secs, _x.time_to_impact.nsecs))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.msg_sent_time is None:
self.msg_sent_time = std_msgs.msg.Time()
if self.time_to_impact is None:
self.time_to_impact = std_msgs.msg.Duration()
end = 0
_x = self
start = end
end += 32
(_x.msg_sent_time.secs, _x.msg_sent_time.nsecs, _x.x_vel, _x.y_vel, _x.time_to_impact.secs, _x.time_to_impact.nsecs,) = _struct_2I2d2i.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_2I2d2i = struct.Struct("<2I2d2i")
| [
"tuf22191@temple.edu"
] | tuf22191@temple.edu |
6a730ff82c333d93882e1a954aba3e8f1b3fef01 | 5d3b79b7f823a7c66a61c065be83cced6073731d | /Basics/Tuple.py | 751640c9af345dd68ec4ec62ef35c22c54f63280 | [] | no_license | gtripti/PythonBasics | e4d548c34fdfd47c38f59a44a53750295a59b736 | 2a13178001888ce3093e253ed49203b958489472 | refs/heads/master | 2020-06-25T11:46:45.559838 | 2019-08-24T19:16:15 | 2019-08-24T19:16:15 | 199,299,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | t = (1,2,3)
l=[1,2,3]
print(type(t))
print(type(l))
print(len(t))
t = ('one',2)
# slicing and indexing
print(t[0])
print(t[-1])
# build in methods
# 1.count
t=('a','a','b')
print(t.count('a'))
# 2.index
print(t.index('a'))
# immutability possible with list but not with tuple
l = [1,2,3]
print(l)
l[0] = 'NEW'
print(l)
print(t)
t[0] = 'NEW' | [
"tripti.gupta97@gmail.com"
] | tripti.gupta97@gmail.com |
082fce6cf017f2b1f42c80cd64d20110852737af | 5e709e364397d8e26a8c188057b544d44b9fa2d5 | /blog/migrations/0001_initial.py | 579fcc154447c379e858f2cdbda9709ceeaed6f2 | [] | no_license | cdavis0119/my-first-blog | 56136df863f04eacb283643884e7c638d0a0da7a | 8d526cf4bf68b7bf9e9e18e544b188baabc698f2 | refs/heads/master | 2021-01-01T17:19:53.969453 | 2017-07-22T22:06:35 | 2017-07-22T22:06:35 | 98,050,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-22 18:06
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"courtneyldavis19@gmail.com"
] | courtneyldavis19@gmail.com |
3dca43e0102cde8dac9752705559f1b75cccde3d | 8203e42d18ea718302d19029b1df8a344d3a4ad9 | /quality/views.py | 75475e725155f9ebfceda9adf97f6b5ae7c31e0a | [] | no_license | sbsimo/quality | 07c6774d352f753aa11edc441c232dd507b53bf9 | a463cca3b223e8b135b7079c4aec9623e8b149fd | refs/heads/master | 2021-01-25T08:55:16.111596 | 2012-06-14T14:12:34 | 2012-06-14T14:12:34 | 2,779,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,129 | py | from geonode import settings
from geonode.maps.views import _perms_info, MAP_LEV_NAMES, _perms_info_json, \
LAYER_LEV_NAMES, _describe_layer
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.http import HttpResponse
from geonode.maps.models import Map, MapLayer, Layer
import json
from django.template import RequestContext, loader, Context
from django.utils.translation import ugettext as _
#from cartography.models import Document
from django.contrib.auth.decorators import login_required
from geonode.maps.views import default_map_config
from django.views.decorators.csrf import csrf_exempt
from quality.models import Subtopic, LayerSubtopic, QualityMatrix
#imgtypes = ['jpg','jpeg','tif','tiff','png','gif']
#def documentdetail(request, docid):
# """
# The view that show details of each document
# """
# document = get_object_or_404(Document, pk=docid)
# map = document.maps.all()[0]
# if not request.user.has_perm('maps.view_map', obj=map):
# return HttpResponse(loader.render_to_string('401.html',
# RequestContext(request, {'error_message':
# _("You are not allowed to view this map.")})), status=401)
# return render_to_response("cartography/docinfo.html", RequestContext(request, {
# 'map': map,
# 'permissions_json': json.dumps(_perms_info(map, MAP_LEV_NAMES)),
# 'document': document,
# 'imgtypes': imgtypes
# }))
#def newmaptpl(request):
# config = default_map_config()[0]
# return render_to_response('cartography/newmaptpl.html',RequestContext(request, {'config':json.dumps(config)}))
#@login_required
#def upload_document(request,mapid=None):
# if request.method == 'GET':
# return render_to_response('cartography/document_upload.html',
# RequestContext(request,{'mapid':mapid,}),
# context_instance=RequestContext(request)
# )
# elif request.method == 'POST':
# mapid = str(request.POST['map'])
# file = request.FILES['file']
# title = request.POST['title']
# document = Document(title=title, file=file)
# document.save()
# document.maps.add(Map.objects.get(id=mapid))
# return HttpResponse(json.dumps({'success': True,'redirect_to':'/maps/' + str(mapid)}))
@csrf_exempt
def layerController(request, layername):
DEFAULT_MAP_CONFIG, DEFAULT_BASE_LAYERS = default_map_config()
layer = get_object_or_404(Layer, typename=layername)
if (request.META['QUERY_STRING'] == "describe"):
return _describe_layer(request,layer)
if (request.META['QUERY_STRING'] == "remove"):
return _removeLayer(request,layer)
if (request.META['QUERY_STRING'] == "update"):
return _updateLayer(request,layer)
if (request.META['QUERY_STRING'] == "style"):
return _changeLayerDefaultStyle(request,layer)
else:
if not request.user.has_perm('maps.view_layer', obj=layer):
return HttpResponse(loader.render_to_string('401.html',
RequestContext(request, {'error_message':
_("You are not permitted to view this layer")})), status=401)
metadata = layer.metadata_csw()
maplayer = MapLayer(name = layer.typename, ows_url = settings.GEOSERVER_BASE_URL + "wms")
# center/zoom don't matter; the viewer will center on the layer bounds
map = Map(projection="EPSG:900913")
qualityRecord = layer.qualitymatrix
return render_to_response('quality/layer.html', RequestContext(request, {
"layer": layer,
"metadata": metadata,
"viewer": json.dumps(map.viewer_json(* (DEFAULT_BASE_LAYERS + [maplayer]))),
"permissions_json": _perms_info_json(layer, LAYER_LEV_NAMES),
"GEOSERVER_BASE_URL": settings.GEOSERVER_BASE_URL,
"qualityRecord": qualityRecord
}))
GENERIC_UPLOAD_ERROR = _("There was an error while attempting to upload your data. \
Please try again, or contact and administrator if the problem continues.")
def listSubtopics(request):
# access to the table that contains the list of subtopics
allSubtopics = Subtopic.objects.all()
return render_to_response('quality/subtopics.html', RequestContext(request, {
'allSubs' : allSubtopics,
}))
def ask4weights(request):
if request.method == 'GET':
subtopic_pk = request.GET.__getitem__("subtopic")[0]
subtopic = Subtopic.objects.get(pk=subtopic_pk)
return render_to_response('quality/ask4weights.html', RequestContext(request, {
'subtopic': subtopic,
'subtopic_pk': subtopic_pk,
}))
else:
return HttpResponse(loader.render_to_string('401.html',
RequestContext(request, {'error_message':
_("You are not permitted to view this layer")})), status=401)
def calculateBest(request):
if request.method == 'GET':
# get the weights input by the client
weightVector = [request.GET.__getitem__("geographicExtent")]
weightVector.append(request.GET.__getitem__("licensingConstraint"))
weightVector.append(request.GET.__getitem__("scaleDenominator"))
weightVector.append(request.GET.__getitem__("update"))
weightVector.append(request.GET.__getitem__("temporalExtent"))
weightVector.append(request.GET.__getitem__("fitness4use"))
weightVector.append(request.GET.__getitem__("thematicRichness"))
weightVector.append(request.GET.__getitem__("integration"))
weightVector.append(request.GET.__getitem__("dataIntegrity"))
weightVector.append(request.GET.__getitem__("positionalAccuracy"))
weightVector.append(request.GET.__getitem__("thematicAccuracy"))
weightVector.append(request.GET.__getitem__("completeness"))
# get the subtopic and the set of related layersubtopics
subtopic_id = request.GET.__getitem__("subtopic_pk")
subtopic = Subtopic.objects.get(pk=subtopic_id)
layersubtopics = subtopic.layersubtopic_set.all()
# generate a dictionary needed for storing the results of the total score calculation
results = []
# loop on layersubtopics in order to calculate the total score for each one
# and store them into the newly created dictionary
for layersubtopic in layersubtopics:
currentLayer = layersubtopic.layer
qualityVector = QualityMatrix.objects.get(layer=currentLayer)
# calculate the quality total score of the layer
currentScore = 0
unWeightedScore = 0
currentScore = qualityVector.geographicExtent*int(weightVector[0]) +\
qualityVector.licensingConstraint*int(weightVector[1])+\
qualityVector.scaleDenominator*int(weightVector[2])+\
qualityVector.update*int(weightVector[3])+\
qualityVector.temporalExtent*int(weightVector[4])+\
qualityVector.fitness4Use*int(weightVector[5])+\
qualityVector.thematicRichness*int(weightVector[6])+\
qualityVector.integration*int(weightVector[7])+\
qualityVector.dataIntegrity*int(weightVector[8])+\
qualityVector.positionalAccuracy*int(weightVector[9])+\
qualityVector.thematicAccuracy*int(weightVector[10])+\
qualityVector.completeness*int(weightVector[11])
unWeightedScore = qualityVector.geographicExtent +\
qualityVector.licensingConstraint + qualityVector.scaleDenominator +\
qualityVector.update + qualityVector.temporalExtent + \
qualityVector.fitness4Use + qualityVector.thematicRichness + \
qualityVector.integration + qualityVector.dataIntegrity + \
qualityVector.positionalAccuracy + qualityVector.thematicAccuracy + \
qualityVector.completeness
curLayerId = layersubtopic.layer.id
curLayerName = Layer.objects.get(id=curLayerId)
results.append([curLayerName, currentScore, unWeightedScore])
return render_to_response("quality/layerRanking.html", RequestContext(request, {
"results" : results,
}))
# winnerLayer = Layer.objects.get(id=winner_layer_id)
# layername = winnerLayer.typename
# return redirect("/data/" + layername)
# return layerController(request, layername)
# return render_to_response('quality/temp.html', RequestContext(request, {
# 'weightVector': weightVector,
# 'layername': layername,
# }))
else:
return HttpResponse(loader.render_to_string('401.html',
RequestContext(request, {'error_message':
_("You are not permitted to view this layer")})), status=401)
| [
"simone.blb@gmail.com"
] | simone.blb@gmail.com |
b06c0a336f7918f4804bc29c80b8474a18f07c42 | 3980219a237537ffbb2c1bdb25cbc606e2bc76dd | /teuthology/suite/placeholder.py | 4669c5faa101c6747fff433b1afbc57436791754 | [
"MIT"
] | permissive | dzedro/teuthology | 546ff04c906aaa8a846ff046a977ac55194e7494 | ed015732753d7564157f9f45c1fb1b868f88574d | refs/heads/master | 2020-03-17T21:03:07.971902 | 2018-06-04T12:29:23 | 2018-06-04T12:29:23 | 133,941,039 | 0 | 0 | MIT | 2018-05-18T10:38:30 | 2018-05-18T10:38:29 | null | UTF-8 | Python | false | false | 3,371 | py | import copy
class Placeholder(object):
"""
A placeholder for use with substitute_placeholders. Simply has a 'name'
attribute.
"""
def __init__(self, name):
self.name = name
def substitute_placeholders(input_dict, values_dict):
"""
Replace any Placeholder instances with values named in values_dict. In the
case of None values, the key is omitted from the result.
Searches through nested dicts.
:param input_dict: A dict which may contain one or more Placeholder
instances as values.
:param values_dict: A dict, with keys matching the 'name' attributes of all
of the Placeholder instances in the input_dict, and
values to be substituted.
:returns: The modified input_dict
"""
input_dict = copy.deepcopy(input_dict)
def _substitute(input_dict, values_dict):
for key, value in input_dict.items():
if isinstance(value, dict):
_substitute(value, values_dict)
elif isinstance(value, Placeholder):
if values_dict[value.name] is None:
del input_dict[key]
continue
# If there is a Placeholder without a corresponding entry in
# values_dict, we will hit a KeyError - we want this.
input_dict[key] = values_dict[value.name]
return input_dict
return _substitute(input_dict, values_dict)
# Template for the config that becomes the base for each generated job config
dict_templ = {
'branch': Placeholder('ceph_branch'),
'sha1': Placeholder('ceph_hash'),
'teuthology_branch': Placeholder('teuthology_branch'),
'archive_upload': Placeholder('archive_upload'),
'archive_upload_key': Placeholder('archive_upload_key'),
'machine_type': Placeholder('machine_type'),
'nuke-on-error': True,
'os_type': Placeholder('distro'),
'os_version': Placeholder('distro_version'),
'overrides': {
'admin_socket': {
'branch': Placeholder('ceph_branch'),
},
'ceph': {
'conf': {
'mon': {
'debug mon': 20,
'debug ms': 1,
'debug paxos': 20},
'osd': {
'debug filestore': 20,
'debug journal': 20,
'debug ms': 1,
'debug osd': 25
}
},
'log-whitelist': ['slow request'],
'sha1': Placeholder('ceph_hash'),
},
'ceph-deploy': {
'conf': {
'client': {
'log file': '/var/log/ceph/ceph-$name.$pid.log'
},
'mon': {
'osd default pool size': 2
}
}
},
'install': {
'ceph': {
'sha1': Placeholder('ceph_hash'),
}
},
'workunit': {
'sha1': Placeholder('ceph_hash'),
}
},
'repo': Placeholder('ceph_repo'),
'suite': Placeholder('suite'),
'suite_repo': Placeholder('suite_repo'),
'suite_relpath': Placeholder('suite_relpath'),
'suite_branch': Placeholder('suite_branch'),
'suite_sha1': Placeholder('suite_hash'),
'tasks': [],
}
| [
"ncutler@suse.com"
] | ncutler@suse.com |
48ae0683541c724901af2003c42a3e01a2680bd3 | 0acbec663e7b2b77f799e8f1f298d62fceecbe1c | /admin.py | e038343044d3571276f66c2871bc23ed928cea93 | [] | no_license | rashedul-islam/managebook | d264ca75b031e974337a5bdb1ffa261c381675ad | 0bdd7f01f2cfe9b436b41ac92c3a98ef62a70129 | refs/heads/master | 2021-01-13T16:58:09.363257 | 2016-12-25T12:40:20 | 2016-12-25T12:40:20 | 77,324,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | from django.contrib import admin
from .models import Book, Genre, Choices
admin.site.register(Book)
admin.site.register(Genre) | [
"rashedul.islam.kth@gmail.com"
] | rashedul.islam.kth@gmail.com |
ebf8f91c4cebdb610c8c71f2511f1d32c8984cf2 | 1099175fcf3dca6d1ae00e5729c954c7838d3cce | /main.py | a34799ae095bf748fbe1221af66112b93a4061f1 | [] | no_license | thenfserver/bot-py | d990e360c8c164d329b77a72096d567d60fef95e | a8e3cbd193679281e9a8cf5dd2ef53bae814363b | refs/heads/main | 2023-04-25T14:42:49.572423 | 2021-05-07T03:29:32 | 2021-05-07T03:29:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,945 | py | import discord, os, time, random, datetime, asyncio, platform, youtube_dl
from discord.ext import commands
from discord.utils import get
from discord import FFmpegPCMAudio
start_time = time.time()
intents = discord.Intents.default()
intents.members = True
client = commands.Bot(command_prefix =["nf!", "NF!", "Nf!", "nF!", "!"], case_insensitive=True, intents=intents)
TOKEN = ''
client.remove_command('help')
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
client.load_extension(f'cogs.{filename[:-3]}')
print(f"Loaded cog.{filename[:-3]}")
@client.event
async def on_ready():
print(f"Succesfully signed in as {client.user.name} ({client.user.id}).")
channel = client.get_channel(743246479594094693)
embed = discord.Embed(description=f"{client.user.name} has booted on {time.ctime()}.",color=discord.Color.green())
await channel.send(embed=embed)
voice = client.get_channel(830314719356387359)
songsource = random.choice(os.listdir("/root/nf%20songs"))
source = FFmpegPCMAudio(songsource)
await voice.connect()
player = voice.play(source)
async def ch_pr():
await client.wait_until_ready()
fo = open("utils/lists/songs.txt", "r")
song = fo.readlines()
statuses = [f"{random.choice(song)} | nf!help", "nf.lnk.to/clouds"]
while not client.is_closed():
fo = open("utils/lists/songs.txt", "r")
song = fo.readlines()
statuses = [f"{random.choice(song)} | nf!help", "nf.lnk.to/clouds"]
status = random.choice(statuses)
await client.change_presence(activity=discord.Game(name=status))
await asyncio.sleep(30)
client.loop.create_task(ch_pr())
@client.command()
async def info(ctx):
""" The bot's info. """
current_time = time.time()
difference = int(round(current_time - start_time))
text = str(datetime.timedelta(seconds=difference))
embed = discord.Embed(color=discord.Color.green())
embed.set_author(name=f"{client.user.name}'s Info")
embed.set_footer(text=f"Ping {round(client.latency * 1000)}ms | Uptime {text} | Version 2020.20.09")
embed.add_field(name="Developer", value=f"bread#7620", inline=True)
embed.add_field(name="Language", value=f"Python {platform.python_version()}")
embed.add_field(name="Libary", value=f"discord.py {discord.__version__}", inline=True)
embed.add_field(name="Users", value=f"`{len(set(client.get_all_members()))}`", inline=True)
embed.add_field(name="Github", value=f"[Click Here](https://github.com/IronCodez/nfrealbot/)")
embed.add_field(name="Status", value="[Click Here](https://stats.uptimerobot.com/L5ZkxcPQNB)")
await ctx.send(embed=embed)
@client.command()
async def uptime(ctx):
current_time = time.time()
difference = int(round(current_time - start_time))
text = str(datetime.timedelta(seconds=difference))
embed = discord.Embed(color=discord.Color.green(), description=text)
await ctx.send(embed=embed)
client.run(TOKEN, bot=True, reconnect=True)
| [
"noreply@github.com"
] | noreply@github.com |
3bb90c581abdb121a7c377a67bb816e7b76164f4 | 5bb5cc34e3d52f5cd1f88efde0c182735f682cf4 | /inference.py | fd7f0f5b1ec3a8af6da0607a48b699dd047f62c1 | [] | no_license | jtpils/pc_mr_net | 4f3816bed3b4cd6dd2fc4481cc3374a0ac5ffca8 | 3b5c3ce563473593ebd2b90d0d57a423852f822f | refs/heads/master | 2020-05-05T05:09:23.345742 | 2019-03-21T22:56:43 | 2019-03-21T22:56:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,329 | py | import torch
from argparse import ArgumentParser
import os
import numpy as np
from h5py import File
from layers.pc_mr_net import PointCloudMapRegressionNet
from data.hdf_dataset_loader import HdfDataset
class InferenceDataset(HdfDataset):
def __getitem__(self, item):
data_file = self.data_files[item]
_file = os.path.join(self.dataset_folder, data_file)
with File(_file) as f:
pcl_data = np.array(f["point_cloud"])
feature_vector = self.compute_feature_vector(pcl_data)
return feature_vector, data_file, pcl_data
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("model")
parser.add_argument("data_folder")
parser.add_argument("save_folder")
args = parser.parse_args()
net = PointCloudMapRegressionNet()
net.load_state_dict(torch.load(args.model))
files = os.listdir(args.data_folder)
data_loader = InferenceDataset(args.data_folder)
for i in range(len(data_loader)):
feature_vector, file_name, pcl = data_loader[i]
output = net(feature_vector)
output_file_name = os.path.join(args.save_folder, "out_" + file_name)
with File(output_file_name, "w") as f:
f.create_dataset("point_cloud", data=pcl)
f.create_dataset("object_vectors", data=output)
| [
"jae251@gmx.de"
] | jae251@gmx.de |
46ca9958a730d18a7f5981a994caa4ea011f3532 | 75388db141483f6aa8994df4f97e83584b93e50e | /movie/movie_app/migrations/0001_initial.py | b7a48040292ddc91dcda7436e9623e4bcc3b592a | [] | no_license | bonrg/movies | 8a2c8b44b3525cce1633b163e4879a9c8d03c3d6 | e1703a3a887d87a5524e9d71c51700b83aa7e984 | refs/heads/master | 2022-11-29T06:26:01.097169 | 2020-03-25T07:26:55 | 2020-03-25T07:26:55 | 248,919,260 | 0 | 0 | null | 2022-11-22T05:24:58 | 2020-03-21T06:22:42 | HTML | UTF-8 | Python | false | false | 7,010 | py | # Generated by Django 3.0.4 on 2020-03-21 09:23
import datetime
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Actor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Name')),
('age', models.PositiveSmallIntegerField(default=0, verbose_name='Age')),
('description', models.TextField(verbose_name='Description')),
('image', models.ImageField(upload_to='actors/', verbose_name='Image')),
],
options={
'verbose_name': 'Actors and Producers',
'verbose_name_plural': 'Actors and Producers',
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150, verbose_name='Category')),
('description', models.TextField(verbose_name='Description')),
('url', models.SlugField(max_length=160, unique=True)),
],
options={
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
},
),
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Name')),
('description', models.TextField(verbose_name='Description')),
('url', models.SlugField(max_length=160, unique=True)),
],
options={
'verbose_name': 'Genre',
'verbose_name_plural': 'Genres',
},
),
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='Title')),
('tagline', models.CharField(default='', max_length=100, verbose_name='Slogan')),
('description', models.TextField(verbose_name='Description')),
('poster', models.ImageField(upload_to='movies/', verbose_name='Poster')),
('year', models.PositiveSmallIntegerField(default=2019, verbose_name='Issue date')),
('country', models.CharField(max_length=30, verbose_name='Country')),
('world_premiere', models.DateField(default=datetime.date.today, verbose_name='Premiere in world')),
('budget', models.PositiveIntegerField(default=0, help_text='in dollars', verbose_name='Budget')),
('fees_in_usa', models.PositiveIntegerField(default=0, help_text='in dollars', verbose_name='Fees in USA')),
('fees_in_world', models.PositiveIntegerField(default=0, help_text='in dollars', verbose_name='Fees in world')),
('url', models.SlugField(max_length=160, unique=True)),
('draft', models.BooleanField(default=False, verbose_name='Draft')),
('actors', models.ManyToManyField(related_name='film_actor', to='movie_app.Actor', verbose_name='actors')),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='movie_app.Category', verbose_name='Category')),
('directors', models.ManyToManyField(related_name='film_director', to='movie_app.Actor', verbose_name='producer')),
('genres', models.ManyToManyField(to='movie_app.Genre', verbose_name='genres')),
],
options={
'verbose_name': 'Movie',
'verbose_name_plural': 'Movies',
},
),
migrations.CreateModel(
name='RatingStar',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.PositiveSmallIntegerField(default=0, verbose_name='Value')),
],
options={
'verbose_name': 'Star rating',
'verbose_name_plural': 'Stars rating',
},
),
migrations.CreateModel(
name='Reviews',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('name', models.CharField(max_length=100, verbose_name='Name')),
('text', models.TextField(max_length=5000, verbose_name='Message')),
('movie', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='movie_app.Movie', verbose_name='movie')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='movie_app.Reviews', verbose_name='Parent')),
],
options={
'verbose_name': 'Review',
'verbose_name_plural': 'Reviews',
},
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.CharField(max_length=15, verbose_name='IP address')),
('movie', models.ForeignKey(on_delete=django.db.models.fields.CharField, to='movie_app.Movie', verbose_name='movie')),
('star', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='movie_app.RatingStar', verbose_name='star')),
],
options={
'verbose_name': 'Rating',
'verbose_name_plural': 'Ratings',
},
),
migrations.CreateModel(
name='MovieShots',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='Title')),
('description', models.TextField(verbose_name='Description')),
('image', models.ImageField(upload_to='movie_shots/', verbose_name='Image')),
('movie', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='movie_app.Movie', verbose_name='Movie')),
],
options={
'verbose_name': 'Shot on movie',
'verbose_name_plural': 'Shots on movie',
},
),
]
| [
"a.uderbay@kazdream.kz"
] | a.uderbay@kazdream.kz |
9f08f0ce81f15f2afdcd8017b72a7d1a9acf39fd | 41976606488ba795e201c05cccdc4c39a3015875 | /app/views/perfil_views.py | 74cf6feb731dcf167f44950f98c2a49d16374f39 | [] | no_license | andersonvaler/capstone-backend-Q3-python | c16158f52ef57b47cb0e36d6f929ee3ef1c41fbf | 8cf10943e2ca98939e55f53c0a60fad882e0bc32 | refs/heads/main | 2023-06-23T10:44:30.524647 | 2021-07-24T16:24:03 | 2021-07-24T16:24:03 | 389,144,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py | from app.models.lojistas_model import Lojistas
from flask_jwt_extended import jwt_required
from app.models.clientes_model import Clientes
from flask import Blueprint, jsonify
bp = Blueprint("bp_perfil", __name__)
@bp.get("/lojistas/<int:lojista_id>")
@jwt_required()
def get_lojista_id(lojista_id):
lojista = Lojistas.query.filter_by(id=lojista_id).first()
if not lojista:
return {"Error": "Lojista não encontrado."}, 404
return jsonify(lojista.serialized)
@bp.get("/clientes/<int:cliente_id>")
@jwt_required()
def get_cliente_id(cliente_id):
cliente = Clientes.query.filter_by(id=cliente_id).first()
if not cliente:
return {"Error": "Cliente não encontrado."}, 404
return jsonify(cliente.serialized)
@bp.get("/clientes")
@jwt_required()
def get_all_clientes():
clientes = Clientes.query.all()
data = [cliente.serialized for cliente in clientes]
return jsonify(data)
@bp.get("/lojistas")
@jwt_required()
def get_all_lojistas():
lojistas = Lojistas.query.all()
data = [lojista.serialized for lojista in lojistas]
return jsonify(data)
| [
"andersonvaler@gmail.com"
] | andersonvaler@gmail.com |
d3e323c429533162b102744f30b393fd5c2f8081 | 0951cb62572e75a8e8a7ef1f98092110bb73d20a | /pandas/tests/categorical/test_operators.py | 09a0607b67a88f0f3b238c65434191cfa6e3562f | [
"BSD-3-Clause"
] | permissive | ActiveState/pandas | 452de0fe049412f273caf6ebc86b8d0ffa0c68e6 | 106a04f14e0c090f95784c311f3d07c35e6ef276 | refs/heads/master | 2023-08-30T09:05:13.587536 | 2018-01-04T15:25:01 | 2018-01-04T15:25:01 | 112,227,117 | 1 | 4 | BSD-3-Clause | 2023-07-28T17:52:11 | 2017-11-27T17:32:22 | Python | UTF-8 | Python | false | false | 11,023 | py | # -*- coding: utf-8 -*-
import pytest
import pandas as pd
import numpy as np
import pandas.util.testing as tm
from pandas import Categorical, Series, DataFrame, date_range
from pandas.tests.categorical.common import TestCategorical
class TestCategoricalOpsWithFactor(TestCategorical):
def test_categories_none_comparisons(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
tm.assert_categorical_equal(factor, self.factor)
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
tm.assert_categorical_equal(result, expected)
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
tm.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
tm.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = Categorical(
["a", "b", "c"], categories=["c", "b", "a"], ordered=True)
cat_rev_base = Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = Categorical(["a", "b", "c"], ordered=True)
cat_base = Categorical(
["b", "b", "b"], categories=cat.categories, ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
tm.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
tm.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
tm.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
pytest.raises(TypeError, f)
cat_rev_base2 = Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
pytest.raises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
assert not (cat > cat).any()
def f():
cat > cat_unorderd
pytest.raises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
pytest.raises(TypeError, lambda: cat > s)
pytest.raises(TypeError, lambda: cat_rev > s)
pytest.raises(TypeError, lambda: s < cat)
pytest.raises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
pytest.raises(TypeError, lambda: cat > a)
pytest.raises(TypeError, lambda: cat_rev > a)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
tm.assert_numpy_array_equal(res, exp)
class TestCategoricalOps(object):
def test_datetime_categorical_comparison(self):
dt_cat = Categorical(date_range('2014-01-01', periods=3), ordered=True)
tm.assert_numpy_array_equal(dt_cat > dt_cat[0],
np.array([False, True, True]))
tm.assert_numpy_array_equal(dt_cat[0] < dt_cat,
np.array([False, True, True]))
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = Categorical([1, 2, 3], ordered=True)
tm.assert_numpy_array_equal(cat > cat[0],
np.array([False, True, True]))
tm.assert_numpy_array_equal(cat[0] < cat,
np.array([False, True, True]))
def test_comparison_with_unknown_scalars(self):
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Categorical([1, 2, 3], ordered=True)
pytest.raises(TypeError, lambda: cat < 4)
pytest.raises(TypeError, lambda: cat > 4)
pytest.raises(TypeError, lambda: 4 < cat)
pytest.raises(TypeError, lambda: 4 > cat)
tm.assert_numpy_array_equal(cat == 4,
np.array([False, False, False]))
tm.assert_numpy_array_equal(cat != 4,
np.array([True, True, True]))
@pytest.mark.parametrize('data,reverse,base', [
(list("abc"), list("cba"), list("bbb")),
([1, 2, 3], [3, 2, 1], [2, 2, 2])]
)
def test_comparisons(self, data, reverse, base):
cat_rev = Series(
Categorical(data, categories=reverse, ordered=True))
cat_rev_base = Series(
Categorical(base, categories=reverse, ordered=True))
cat = Series(Categorical(data, ordered=True))
cat_base = Series(
Categorical(base, categories=cat.cat.categories, ordered=True))
s = Series(base)
a = np.array(base)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = Series([True, False, False])
tm.assert_series_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = Series([False, False, True])
tm.assert_series_equal(res_rev, exp_rev)
res = cat > cat_base
exp = Series([False, False, True])
tm.assert_series_equal(res, exp)
scalar = base[1]
res = cat > scalar
exp = Series([False, False, True])
exp2 = cat.values > scalar
tm.assert_series_equal(res, exp)
tm.assert_numpy_array_equal(res.values, exp2)
res_rev = cat_rev > scalar
exp_rev = Series([True, False, False])
exp_rev2 = cat_rev.values > scalar
tm.assert_series_equal(res_rev, exp_rev)
tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
# Only categories with same categories can be compared
def f():
cat > cat_rev
pytest.raises(TypeError, f)
# categorical cannot be compared to Series or numpy array, and also
# not the other way around
pytest.raises(TypeError, lambda: cat > s)
pytest.raises(TypeError, lambda: cat_rev > s)
pytest.raises(TypeError, lambda: cat > a)
pytest.raises(TypeError, lambda: cat_rev > a)
pytest.raises(TypeError, lambda: s < cat)
pytest.raises(TypeError, lambda: s < cat_rev)
pytest.raises(TypeError, lambda: a < cat)
pytest.raises(TypeError, lambda: a < cat_rev)
@pytest.mark.parametrize('ctor', [
lambda *args, **kwargs: Categorical(*args, **kwargs),
lambda *args, **kwargs: Series(Categorical(*args, **kwargs)),
])
def test_unordered_different_order_equal(self, ctor):
# https://github.com/pandas-dev/pandas/issues/16014
c1 = ctor(['a', 'b'], categories=['a', 'b'], ordered=False)
c2 = ctor(['a', 'b'], categories=['b', 'a'], ordered=False)
assert (c1 == c2).all()
c1 = ctor(['a', 'b'], categories=['a', 'b'], ordered=False)
c2 = ctor(['b', 'a'], categories=['b', 'a'], ordered=False)
assert (c1 != c2).all()
c1 = ctor(['a', 'a'], categories=['a', 'b'], ordered=False)
c2 = ctor(['b', 'b'], categories=['b', 'a'], ordered=False)
assert (c1 != c2).all()
c1 = ctor(['a', 'a'], categories=['a', 'b'], ordered=False)
c2 = ctor(['a', 'b'], categories=['b', 'a'], ordered=False)
result = c1 == c2
tm.assert_numpy_array_equal(np.array(result), np.array([True, False]))
def test_unordered_different_categories_raises(self):
c1 = Categorical(['a', 'b'], categories=['a', 'b'], ordered=False)
c2 = Categorical(['a', 'c'], categories=['c', 'a'], ordered=False)
with tm.assert_raises_regex(TypeError,
"Categoricals can only be compared"):
c1 == c2
def test_compare_different_lengths(self):
c1 = Categorical([], categories=['a', 'b'])
c2 = Categorical([], categories=['a'])
msg = "Categories are different lengths"
with tm.assert_raises_regex(TypeError, msg):
c1 == c2
def test_numeric_like_ops(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
# numeric ops should not succeed
for op in ['__add__', '__sub__', '__mul__', '__truediv__']:
pytest.raises(TypeError,
lambda: getattr(df, op)(df))
# reduction ops should not succeed (unless specifically defined, e.g.
# min/max)
s = df['value_group']
for op in ['kurt', 'skew', 'var', 'std', 'mean', 'sum', 'median']:
pytest.raises(TypeError,
lambda: getattr(s, op)(numeric_only=False))
# mad technically works because it takes always the numeric data
# numpy ops
s = Series(Categorical([1, 2, 3, 4]))
pytest.raises(TypeError, lambda: np.sum(s))
# numeric ops on a Series
for op in ['__add__', '__sub__', '__mul__', '__truediv__']:
pytest.raises(TypeError, lambda: getattr(s, op)(2))
# invalid ufunc
pytest.raises(TypeError, lambda: np.log(s))
| [
"jeff@reback.net"
] | jeff@reback.net |
c2154d3a5fe4c8670860e1c2b5ea7301a892ea20 | 780b6cca690a213ac908b1cd5faef5366a18dc4e | /314_print_names_to_columns/save1_nopass.py | 8cb6c53bb39aa700c4f9bc48b51e4735762b74ba | [] | no_license | katkaypettitt/pybites-all | 899180a588e460b343c00529c6a742527e4ea1bc | 391c07ecac0d92d5dc7c537bcf92eb6c1fdda896 | refs/heads/main | 2023-08-22T16:33:11.171732 | 2021-10-24T17:29:44 | 2021-10-24T17:29:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | from typing import List # not needed when we upgrade to 3.9
def print_names_to_columns(names: List[str], cols: int = 2) -> None:
name_list = [f'| {name:{9}}' for name in names]
output = ''
for i in range(0, len(name_list), cols):
output += ' '.join(name_list[i: i + cols]) + '\n'
print(output) | [
"70788275+katrinaalaimo@users.noreply.github.com"
] | 70788275+katrinaalaimo@users.noreply.github.com |
0f31bab85029d70a6e19843c3d32bb3f395b7394 | 395707d0df8dd0df9667401b4bde0b38960b1e24 | /prefect-experiments/flow-of-flows.py | 9994d5303773c859051b69a0523ae07968bcb18b | [] | no_license | agatagawad/prefect-experiments | 539114b68f2ffdb4ec3f856f3b8d90213683440a | 7e4175d02a2e94f5a9af94a97733bf3d0a1c864f | refs/heads/main | 2023-04-02T09:09:28.011771 | 2021-03-27T17:52:10 | 2021-03-27T17:52:10 | 352,137,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,853 | py |
# Example from https://docs.prefect.io/core/idioms/flow-to-flow.html
from prefect import Flow, task
# from prefect.core import task
from prefect.core.parameter import Parameter
from prefect.tasks.prefect import StartFlowRun
@task
def A_task1(val):
return 10*val
@task
def A_task2(val):
return val + 5
with Flow(name='A') as flow_A:
A_param = Parameter('A_param', 2)
x = A_task1(A_param)
y = A_task2(x)
flow_A.register(project_name='examples')
@task
def B_task1(val):
return 20*val
@task
def B_task2(val):
return val + 15
with Flow(name='B') as flow_B:
B_param = Parameter('B_param', 1)
x = B_task1(B_param)
y = B_task2(x)
flow_B.register(project_name='examples')
@task
def C_task1(val):
return 20*val
@task
def C_task2(val):
return val + 15
with Flow(name='C') as flow_C:
C_param = Parameter('C_param', 1)
x = C_task1(C_param)
y = C_task2(x)
flow_C.register(project_name='examples')
@task
def D_task1(val):
return 20*val
@task
def D_task2(val):
return val + 15
@task
def D_task3(x, y, val):
return x + y + val
with Flow(name='D') as flow_D:
C_param = Parameter('D_param', 1)
x = D_task1(D_param)
y = D_task2(x)
z = D_task3(x, y, C_param)
flow_D.register(project_name='examples')
# assumes you have registered the following flows in a project named "examples"
flow_a = StartFlowRun(flow_name="A", project_name="examples", wait=True)
flow_b = StartFlowRun(flow_name="B", project_name="examples", wait=True)
flow_c = StartFlowRun(flow_name="C", project_name="examples", wait=True)
flow_d = StartFlowRun(flow_name="D", project_name="examples", wait=True)
with Flow("parent-flow") as flow:
b = flow_b(upstream_tasks=[flow_a])
c = flow_c(upstream_tasks=[flow_a])
d = flow_d(upstream_tasks=[b, c])
flow.register(project_name='examples') | [
"agata.gawad@yher.be"
] | agata.gawad@yher.be |
4669e42a6a2d00e57c11c14494944ba996bf543b | 9f55ac816c6a4bdb8ac35c4eea55ef5283c2a5cf | /homework/hw06/hw06.py | 17afe454cd9b9192798f9f042301cf27dc136aa9 | [
"MIT"
] | permissive | Nicoleyss/cs61a-self-study | ead2663df58bc5b080badd51a1d90b3e5148368f | e32d77f751af66008ff4c69ffe0b32688b275516 | refs/heads/master | 2022-01-08T17:39:00.442276 | 2018-09-08T18:22:58 | 2018-09-08T18:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,034 | py | passphrase = 'CC74EB'
def survey(p):
"""
You do not need to understand this code.
>>> survey(passphrase)
'3d2eea56786a3d9e503a4c07dd667867ef3d92bfccd68b2aa0900ead'
"""
import hashlib
return hashlib.sha224(p.encode('utf-8')).hexdigest()
class Fib():
"""A Fibonacci number.
>>> start = Fib()
>>> start
0
>>> start.next()
1
>>> start.next().next()
1
>>> start.next().next().next()
2
>>> start.next().next().next().next()
3
>>> start.next().next().next().next().next()
5
>>> start.next().next().next().next().next().next()
8
>>> start.next().next().next().next().next().next() # Ensure start isn't changed
8
"""
def __init__(self, value=0):
self.value = value
def next(self):
# assuming that the user is starting the sequence with zero (duh!)
fib = Fib(1) if self.value == 0 else Fib(self.value + self.previous)
fib.previous = self.value
return fib
def __repr__(self):
return str(self.value)
class VendingMachine:
"""A vending machine that vends some product for some price.
>>> v = VendingMachine('candy', 10)
>>> v.vend()
'Machine is out of stock.'
>>> v.deposit(15)
'Machine is out of stock. Here is your $15.'
>>> v.restock(2)
'Current candy stock: 2'
>>> v.vend()
'You must deposit $10 more.'
>>> v.deposit(7)
'Current balance: $7'
>>> v.vend()
'You must deposit $3 more.'
>>> v.deposit(5)
'Current balance: $12'
>>> v.vend()
'Here is your candy and $2 change.'
>>> v.deposit(10)
'Current balance: $10'
>>> v.vend()
'Here is your candy.'
>>> v.deposit(15)
'Machine is out of stock. Here is your $15.'
>>> w = VendingMachine('soda', 2)
>>> w.restock(3)
'Current soda stock: 3'
>>> w.restock(3)
'Current soda stock: 6'
>>> w.deposit(2)
'Current balance: $2'
>>> w.vend()
'Here is your soda.'
"""
def __init__(self, item, cost):
self.item = item
self.cost = cost
self.stock = 0
self.bank = 0
def vend(self):
if self.stock <= 0:
return 'Machine is out of stock.'
elif self.bank < self.cost:
return 'You must deposit ${0} more.'.format(self.cost - self.bank)
else:
self.bank -= self.cost
self.stock -= 1
if self.bank == 0:
return 'Here is your {0}.'.format(self.item)
else:
change, self.bank = self.bank, 0
return 'Here is your {0} and ${1} change.'.format(self.item, change)
def deposit(self, money):
if self.stock <= 0:
return 'Machine is out of stock. Here is your ${0}.'.format(money)
else:
self.bank += money
return 'Current balance: ${0}'.format(self.bank)
def restock(self, amt):
self.stock += amt
return 'Current {0} stock: {1}'.format(self.item, self.stock) | [
"tejashah88@gmail.com"
] | tejashah88@gmail.com |
8cf0710d3d1e894a8ffc673a018df78e74505973 | 81e84e22e5d8ce033499d382c584a57acd8af1d3 | /seconde_app/views.py | 984937b337e251b2cfde0a28b1a4f59465096650 | [] | no_license | LakhanKumarGautam/travello | 7525f3edd352995d2c2d85f40043102df78c9153 | 8ca9be9f211039f38176b2cdd71a19ccb04a2304 | refs/heads/master | 2022-07-15T09:25:31.386645 | 2020-05-18T18:05:07 | 2020-05-18T18:05:07 | 259,970,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def home2(request):
my_dict = {'hii_Lakhan':"this is my best friend"}
return render(request,'home2.html',context =my_dict)
def add(request):
val1 = int(request.POST['val1'])
val2 = int(request.POST['val2'])
val3 = int(request.POST['val3'])
res = val1 + val2 + val3
return render(request,'result.html',{'result':res}) | [
"50776528+LakhanKumarGautam@users.noreply.github.com"
] | 50776528+LakhanKumarGautam@users.noreply.github.com |
c2ba1834a55e267479f9cbd6ac9640b5e7397ba9 | 8fd4822d6d04fe0643b84139ddb1ee1d7d4d9f0a | /tests/test_signal/__init__.py | a5ff54368aa509e876bc36001ae564c1f61973dc | [
"MIT"
] | permissive | mcanatalay/SIMULOC | 48d4352599daa807009635953162d22fced273a0 | af32c522887dec08a0815052a5878b5a595b8d44 | refs/heads/master | 2021-08-22T17:12:30.332027 | 2017-11-18T23:33:14 | 2017-11-18T23:33:14 | 111,238,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36 | py | """Test class for Signal module."""
| [
"mcanatalay@hotmail.co.uk"
] | mcanatalay@hotmail.co.uk |
f0fb90be4126c4d7c1b3bc08502dc6de8c3bdc26 | 59405bb9af890a081e33f9f56d20a7ecb7d03853 | /02. Logistic Regression/Social Network Ads Logistic Regression.py | 899c9dbea65173d856b4baa545afc7a31c73e83f | [] | no_license | aaryankaushik/ML-Algorithms | cf9a69d15fee6c4f725f16ce4d463411e39fe474 | 38f86b5bebe1e2f50778d79c4833c050b852960b | refs/heads/master | 2020-04-24T23:19:02.879274 | 2019-02-24T15:01:53 | 2019-02-24T15:01:53 | 172,340,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | import pandas as pd
sn=pd.read_csv('Social Network Ads.csv')
gen=pd.get_dummies(sn['Gender'],drop_first=True)
sn.drop(['Gender'],axis=1,inplace=True)
sn=pd.concat([sn,gen],axis=1)
x=sn.drop('Purchased',axis=1)
y=sn['Purchased']
from sklearn.model_selection import train_test_split
xtrain,xtest,ytrain,ytest=train_test_split(x,y,test_size=0.2,random_state=0)
from sklearn.linear_model import LogisticRegression
lr=LogisticRegression()
lr.fit(xtrain,ytrain)
ypre=lr.predict(xtest)
print('acc:',lr.score(xtest,ytest))
from sklearn.metrics import confusion_matrix,classification_report
print('cf:',confusion_matrix(ytest,ypre))
print('cr:',classification_report(ytest,ypre)) | [
"noreply@github.com"
] | noreply@github.com |
e9d40f1152b20f9719a6a72cf80ee1684fc24f55 | 3acd83134884afb4ee92f58346162f847328623a | /django_vuetify/settings/production.py | fefe519385f435d69a8f098d00b1f04d060c707d | [] | no_license | Navaneeth-Nagesh/django_vue | a9ff8810d21518c5cd8c210cc7b716cc11d0fc22 | 0e825373327d98ece325b6b282ea9ebf20a73586 | refs/heads/master | 2020-05-19T01:56:57.852744 | 2019-05-03T14:26:46 | 2019-05-03T14:26:46 | 184,769,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py |
import os
from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['']
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '127.0.0.1',
'PORT': '',
}
}
| [
"navaneeth.webtrigon@gmail.com"
] | navaneeth.webtrigon@gmail.com |
0a34c488dbde7d4762ed53a0db5049c78bd899cf | be760ae24f4b9eb148ec3d3efe7d9490fc1c56d2 | /LPTHW/positive.py | 8699df3d8f2d7489e77826a968f0bcdd292c7839 | [] | no_license | yogicat/python-crash-course | 4a7c302cdc82647b3e2ccd42a80a9fedc9bf9c25 | af64c43aeb9a74cc794d88db9cbf6390e2e258eb | refs/heads/master | 2020-05-16T09:41:41.193500 | 2019-04-27T11:09:52 | 2019-04-29T11:09:52 | 182,958,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | def main():
i = get_positive_int("positive integer: ")
print(i)
def get_positive_int(prompt):
while True:
n = int(input(prompt))
if n > 0:
return n
main()
| [
"dahe.oh@gmail.com"
] | dahe.oh@gmail.com |
9a0d1a518ad80a316b052d7d50ffc3d5918fc703 | ddcef0ffeb4a024850252ff5c5da247c4433e207 | /src/mmgroup/tests/test_mm/test_prep_xy.py | 8ee8245cac11f4afb4009b578f4e7e43193d7ec6 | [
"MIT"
] | permissive | stratosthirios/mmgroup | 8423bc8c3d38a2478f76ba53ca99db8e8cf8dfa0 | a7a9a92a20580ecd697075f1c673989f0ad13bdc | refs/heads/master | 2023-08-29T00:01:42.038120 | 2021-10-17T20:27:46 | 2021-10-17T20:27:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,848 | py | from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import numpy as np
from random import randint
import pytest
from mmgroup.mm import mm_sub_test_prep_xy
from mmgroup import mat24 as m24
from mmgroup.tests.spaces.sparse_mm_space import SparseMmV
from mmgroup.tests.spaces.sparse_mm_space import SparseMmVector
from mmgroup.tests.groups.mgroup_n import MGroupNWord
from mmgroup.mm_space import characteristics
PRIMES = characteristics()
def _as_suboctad(v1, o):
d = m24.octad_to_gcode(o)
c = m24.ploop_cap(v1, d)
return m24.cocode_to_suboctad(c, d)
class prep_xy:
group = MGroupNWord
space = SparseMmVector
def __init__(self, eps, e, f):
self.f = f & 0x1fff
self.e = e & 0x1fff
self.eps = eps = eps & 0xfff
self.odd = (eps & 0x800) >> 11
lin = np.zeros(6, dtype = np.uint32)
mm_sub_test_prep_xy(f, e, eps, 1, lin)
self.lin_i = lin[:3]
self.lin_d = lin[3:6]
self.sign_XYZ = np.zeros(2048, dtype = np.uint32)
mm_sub_test_prep_xy(f, e, eps, 2, self.sign_XYZ)
self.s_T = np.zeros(759, dtype = np.uint32)
mm_sub_test_prep_xy(f, e, eps, 3, self.s_T)
def inv_op_unit(self, tag, d, j):
if tag == 'X':
tag1 = 'X'
d1 = d ^ self.lin_d[0]
j1 = j
sign = (self.sign_XYZ[d] & 1)
sign ^= (self.lin_i[0] >> j) & 1
if self.odd:
cc = m24.vect_to_cocode(1 << j)
sign ^= m24.scalar_prod(d, cc)
elif tag in 'ZY':
s = self.odd ^ (tag == 'Y')
tag1 = 'ZY'[s]
s += 1
d1 = d ^ self.lin_d[s]
j1 = j
sign = (self.sign_XYZ[d] >> s) & 1
sign ^= (self.lin_i[s] >> j) & 1
elif tag == 'T':
tag1 = 'T'
d1 = d
te = self.s_T[d]
so_exp = _as_suboctad(self.f, d)
assert te & 0x3f == so_exp , (hex(te), hex(so_exp))
j1 = j ^ (te & 0x3f)
sign = m24.suboctad_scalar_prod(j, (te >> 8) & 0x3f)
sign ^= (te >> 14) & 1
sign ^= m24.suboctad_weight(j) & self.odd & 1
assert ((te >> 15) ^ self.odd) & 1 == 0
else:
raise ValueError("Illegal tag " + str(tag))
return sign & 1, tag1, d1, j1
def inv_op(self, v):
w = self.space(v.p)
for value, tag, d, j in v.as_tuples():
sign, tag, d, j = self.inv_op_unit(tag, d, j)
if sign & 1:
value = -value % p
w += value * space(v.p, tag, d, j)
return w
def check_v(self, v, verbose = 0):
grp = self.group
delta_atom = grp('d', self.eps)
x_atom = grp('x', self.e)**(-1)
y_atom = grp('y', self.f)**(-1)
w_ref = v * delta_atom * x_atom * y_atom
w = self.inv_op(v)
error = w != w_ref
if error or verbose:
eps, e, f = self.eps, self.e, self.f
print("vector", v)
print("operation", "d_%xh * x_%xh * y_%xh" % (eps, e, f))
print("obtained:", w)
if error:
print(v * delta_atom , v, delta_atom)
print("expected:", w_ref)
raise ValueError("x-y operation failed")
print("Error: x-y operation failed!!!")
p = PRIMES[0]
space = SparseMmVector
def as_vector(x):
if isinstance(x, str):
data = [(tag, 'r') for tag in x]
return space(p, data)
if isinstance(x, tuple):
return space(p, *x)
if isinstance(x, list):
return space(p, x)
raise TypeError("Bad type for vector of rep")
p = PRIMES[0]
space = SparseMmVector
def prep_xy_testcases():
testcases = [
[ [("X", 3, 6)], 0, 0, 0x1171 ],
[ [("X", 3, 6)], 12, 0, 0 ],
[ [("X", 3, 6)], 12, 1111, 0 ],
[ [("X", 3, 6)], 12, 0, 1111],
[ [("Z", 0, 0)], 0, 0, 0],
[ [("Z", 0, 0)], 12, 0, 0],
[ [("Z", 0, 0)], 0, 34, 0],
[ [("Z", 0, 0)], 0x800, 0, 0],
[ [("Z", 0, 0)], 0x812, 0, 0],
[ [("Z", 0, 0)], 0x800, 34, 0],
[ [("Z", 0, 0)], 0x800, 0, 34],
]
for v, eps, e, f in testcases:
yield as_vector(v), prep_xy(eps, e, f)
v_tags = "TXZY"
for v in v_tags:
for i in range(1000):
v1 = as_vector(v)
eps = randint(0, 0xfff)
e = randint(0, 0x1fff)
f = randint(0, 0x1fff)
yield v1, prep_xy(eps, e, f)
@pytest.mark.mm
def test_prep_xy(verbose = 0):
print("Testing preparation of operation x-y...")
for v, op in prep_xy_testcases():
op.check_v(v, verbose = verbose)
if verbose: print("")
print("passed")
| [
"m.seysen@gmx.de"
] | m.seysen@gmx.de |
dd01c8f97e4ee6823d6132b76cc95d6b6bffdaac | 6eab9dfed4521d65df94da0cd2b6542793a7ba22 | /As1.py | 81349938f30450c5815e42540ddf773e1813c050 | [] | no_license | shreyansh-sinha/ML-Assignments | 430b04660773c6879885df04c4f72238724fd50e | 90eadde93e8c2e64b6c7cc6b15e7aee6c4fbb464 | refs/heads/master | 2020-12-18T12:52:08.480934 | 2020-05-23T07:15:38 | 2020-05-23T07:15:38 | 235,388,535 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,318 | py | import numpy as np
#import pandas as pd
import matplotlib.pyplot as plt
def hypothesis(theta, X, n): # h = X.B_transpose
h = np.ones((X.shape[0],1))
theta = theta.reshape(1,n+1)
for i in range(0,X.shape[0]):
h[i] = float(np.matmul(theta, X[i]))
h = h.reshape(X.shape[0])
return h
# iterative updation
def gradient_descent(theta, learning_rate, iterations, h, X, Y, n):
cost = np.ones(iterations)
for i in range(0, iterations):
theta[0] = theta[0] - (learning_rate/X.shape[0]) * sum(h - Y)
for j in range(1, n+1):
theta[j] = theta[j] - (learning_rate/X.shape[0]) * sum((h - Y) * X.transpose()[j])
h = hypothesis(theta, X, n)
# cost function = 1/(2*m) (sigma(h(x) - y) ** 2)
cost[i] = (1/X.shape[0]) * 0.5 * sum(np.square(h - Y))
theta = theta.reshape(1, n+1)
return theta, cost
def linear_regression(X, y, alpha, num_iters):
n = X.shape[1] #size of X
one_column = np.ones((X.shape[0],1))
X = np.concatenate((one_column, X), axis = 1)
# initializing the parameter vector...
theta = np.zeros(n+1)
#print(theta)
# hypothesis calculation....
h = hypothesis(theta, X, n)
# returning the optimized parameters by Gradient Descent...
theta, cost = gradient_descent(theta,alpha,num_iters,h,X,y,n)
return theta, cost
data = np.loadtxt('airfoil_self_noise.dat', delimiter='\t')
X_train = data[:,:-1] #feature set...select all the input values
y_train = data[:,5] #label set...select the output values
mean = np.ones(X_train.shape[1]) # define mean array
std_dev = np.ones(X_train.shape[1]) # define standard deviation array
# Scaling Data
# shape attriute for numpy arrays returns dimensions orf array
# if X has n rows and m columns then X.shape[0] is n and X.shape[1]
# is m
for i in range(0, X_train.shape[1]):
mean[i] = np.mean(X_train.transpose()[i])
std_dev[i] = np.std(X_train.transpose()[i])
for j in range(0, X_train.shape[0]):
X_train[j][i] = (X_train[j][i] - mean[i])/std_dev[i]
iterations = 10000
learning_rate = 0.005
theta, cost = linear_regression(X_train, y_train, learning_rate, iterations)
print(theta)
print(cost[iterations-1])
cost = list(cost)
n_iterations = [x for x in range(1, 10001)]
plt.plot(n_iterations, cost)
plt.xlabel('Number of Iterations')
plt.ylabel('Cost Value')
| [
"noreply@github.com"
] | noreply@github.com |
44c17b4c002a53f8d2f4fddad9e6dde17fc7dbcb | dbebe89c24d43b54d5b0a83f04b5d5d753f08b0e | /webapi/sandbox/testapp_Https_Sim_Fianancial.py | 6511a410e12e5200af33ed8c6e86ea7603209cad | [] | no_license | jianhuayan/traffic-dispersive | 91d07bd82c86678922607624a47ff825abd64c72 | 8a5ee0272ed4a16c3c50c36b070e1100466a0a42 | refs/heads/master | 2021-07-10T09:46:19.702409 | 2017-10-13T15:24:31 | 2017-10-13T15:24:31 | 106,841,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,452 | py | from ixia.webapi import *
import ixchariotApi
import os
from subprocess import call
import dvn
IxiaIPaddr = dvn.const.IxiachariotIP
# webServerAddress = "https://'dvn.const.IxiachariotIP'"
webServerAddress = "https://" + IxiaIPaddr
print dvn.const.IxiachariotIP
print webServerAddress
apiVersion = "v1"
username = "N/A"
password = "N/A"
apiKey = "e31589d3-4cf1-4bd9-854d-18e9eac768a8" # Get the API Key from the web interface, Menu > My Account > Api Key
print "Connecting to " + webServerAddress
# api = webApi.connect(webServerAddress, apiVersion, None, username, password)
# It is also possible to connect with the API Key instead of username and password, using:
api = webApi.connect(webServerAddress, apiVersion, apiKey, None, None)
session = api.createSession("ixchariot")
print "Created session %s" % session.sessionId
print "Starting the session..."
session.startSession()
print "Configuring the test..."
# Configure few test options
testOptions = session.httpGet("config/ixchariot/testOptions")
testOptions.testDuration = 20
testOptions.consoleManagementQoS = ixchariotApi.getQoSTemplateFromResourcesLibrary(session, "Best Effort")
testOptions.endpointManagementQoS = ixchariotApi.getQoSTemplateFromResourcesLibrary(session, "Best Effort")
session.httpPut("config/ixchariot/testOptions", data = testOptions)
# Available endpoints used in test (list of 'testIP/mgmtIP' strings)
src_EndpointsList = [dvn.const.IxiaEpoint1 + "/" + dvn.const.IxiaMgmt1]
dst_EndpointsList = [dvn.const.IxiaEpoint2 + "/" + dvn.const.IxiaMgmt2]
# Create a new ApplicationMix
name = "AppMix 1"
objective = "USERS"
users = 1
direction = "SRC_TO_DEST"
topology = "FULL_MESH"
appmix = ixchariotApi.createApplicationMix(name, objective, users, direction, topology)
session.httpPost("config/ixchariot/appMixes", data = appmix)
# Configure endpoints for the AppMix
# This demonstrates how to manually assign endpoints to the test configuration using known IP addresses.
# If you want to assign an endpoint discovered by the Registration Server, use the ixchariotApi.getEndpointFromResourcesLibrary() function
# to get the data for httpPost
for src_Endpoint in src_EndpointsList:
ips = src_Endpoint.split('/')
session.httpPost("config/ixchariot/appMixes/1/network/sourceEndpoints", data = ixchariotApi.createEndpoint(ips[0], ips[1]))
for dst_Endpoint in dst_EndpointsList:
ips = dst_Endpoint.split('/')
session.httpPost("config/ixchariot/appMixes/1/network/destinationEndpoints", data = ixchariotApi.createEndpoint(ips[0], ips[1]))
# Add applications to the AppMix
# appName appRatio
appList = [
["HTTPS Simulated Financial", 100],
]
for i in range(0, len(appList)):
appData = appList[i]
appName = appData[0]
appRatio = appData[1]
appScript = ixchariotApi.getApplicationScriptFromResourcesLibrary(session, appName)
app = ixchariotApi.createApp(appScript, appRatio);
session.httpPost("config/ixchariot/appMixes/1/settings/applications", data = app)
try:
print "Starting the test..."
result = session.runTest()
print "The test ended"
#Save all results to CSV files.
print "Saving the test results into zipped CSV files...\n"
filePath = "testResults.zip"
with open(filePath, "wb+") as statsFile:
api.getStatsCsvZipToFile(result.testId, statsFile)
# Get results after test run.
# The functions below can also be used while the test is running, by using session.startTest() to start the execution,
# calling any of the results retrieval functions during the run, and using session.waitTestStopped() to wait for test end.
# You can use time.sleep() to call the results retrieval functions from time to time.
# These functions will return statistics for all the timestamps reported since the beginning of the test until the current moment.
# Get test level results.
# Note: the statistic names should be identical to those that appear in the results CSV
results = ixchariotApi.getTestLevelResults(session, ["Throughput"])
print "Test Level Results: \n"
for res in results:
# Each object in the list of results is of type Statistic (contains the statistic name and a list of StatisticValue objects).
print res.name
for val in res.values:
# The list will contain StatisticValue objects for all the reported timestamps since the beginning of the test.
# Each StatisticValue object contains the timestamp and the actual value.
print str(val.timestamp) + " " + str(val.value)
print ""
# Get group level results.
# Note: the statistic names should be identical to those that appear in the results CSV
results = ixchariotApi.getGroupLevelResults(session, ["Throughput"], "AppMix 1")
print "Group Level Results for AppMix 1:\n"
for res in results:
# Each object in the list of results has a printing function defined.
# It will print the name of the statistic and the list of timestamp - value pairs.
# For accessing each of these components separately see the example above.
print res
print ""
except Exception, e:
print "Error", e
print "Stopping the session..."
session.stopSession()
print "Deleting the session..."
session.httpDelete()
a = int(os.system('ls | grep testResults.zip | wc -l'))
print a
os.system('echo $appData[0]')
if a == 0:
os.system('echo $appData[0]')
os.system('mv testResults.zip (echo $appData[0])_testResults.zip')
os.system('cp testResults.zip ./runningLog')
else:
print "the testing is not finishing...."
| [
"jianhuayan@users.noreply.github.com"
] | jianhuayan@users.noreply.github.com |
8222458758108ced814b77fd8613c63ed0a6df86 | 395ba33c6faecc49eb3cbf32d7cc09ed4ee9c5f0 | /Person.py | ffcc9c3c461dc21d300e7524cb02c28bc96be5c5 | [] | no_license | MackRoe/Herd_Immunity_Term2 | 823d6e9ab8502efb09150529d364450a120a98dc | 236601fd9128790c9ec4d0e48163a700141f9d86 | refs/heads/master | 2020-09-26T06:23:56.950292 | 2019-12-10T18:07:25 | 2019-12-10T18:07:25 | 226,187,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | import random
from Virus import Virus
class Person:
''' The simulation will contain people who will make up a population.'''
def __init__(self, is_vaccinated, infection=None):
''' We start out with is_alive = True
All other values will be set by the simulation through the parameters
when it instantiates each Person object.
'''
self.is_alive = True # boolean
self.is_vaccinated = is_vaccinated # boolean
self.infection = infection # virus object
def did_survive_infection(self):
''' Generate a random number between 0.0 and 1.0 and compare to the
virus's mortality_num.
If the random number is smaller, person dies from the disease. Set the
person's is alive attribute to False
If Person survives, they become vaccinated and they have no infection
(set the vaccinated attibute to True and the infection to None)
Return True if they survived the infection and False if they did not.
'''
compare = random.randint(0.0, 1.0)
if compare < self.infection.mortality_num:
self.is_alive = False
return False
else:
vaccinated = True
infection = None
return True
| [
"elaine.music@students.makeschool.com"
] | elaine.music@students.makeschool.com |
15ea8659d5ebb57864269738d0afdbd7d47851c3 | 7eaf758ed8954794ddcf0d56ca247b31ce68af55 | /dapl_ckeditor/app_ckeditor/urls.py | 137a4999f6819af53ba1b45926f5892668891cc9 | [] | no_license | b4isty/Django-Ckeditor | 72a9050c464860931e0b85d2984a68fdca2bfa0e | fe11c3ecdb9706c6aade959ad688ca2f39ce4fe8 | refs/heads/master | 2020-03-23T20:07:58.233621 | 2018-11-14T15:00:16 | 2018-11-14T15:00:16 | 142,023,260 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | from django.urls import path
from . import views
urlpatterns = [
path('home/', views.home, name='home'),
path('blog/', views.blog, name='blog'),
path('blog_list/', views.blog_list, name='blog_list'),
path('blog_details/<int:pk>/', views.blog_detail_view, name='blog_details'),
path('blog_edit/<int:pk>/', views.blog_edit_view, name='blog_edit'),
path('blog_delete/<int:pk>/', views.blog_delete, name='blog_delete')
]
| [
"baishakhi@digitalaptech.com"
] | baishakhi@digitalaptech.com |
fe4b88457337dd6b7961c723050db6e1729548f3 | 6cb4f70534e4087ef11163a1c660374784a9bb6c | /skia/skia_library.gypi | 9fc63d05a542b784b4aeb847ab61b87833d9e715 | [
"BSD-3-Clause"
] | permissive | yodamaster/engine | 07a3e576b680f6c2d0db30c0b0be763d279f5884 | 33e5611409d261f8783e762e69e82b1dfa3ac480 | refs/heads/master | 2021-01-16T22:22:17.491692 | 2015-11-13T22:02:30 | 2015-11-13T22:02:30 | 46,162,097 | 1 | 0 | null | 2015-11-14T04:58:45 | 2015-11-14T04:58:45 | null | UTF-8 | Python | false | false | 13,850 | gypi | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This gypi file contains the Skia library.
# In component mode (shared_lib) it is folded into a single shared library with
# the Chrome-specific enhancements but in all other cases it is a separate lib.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!WARNING!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# variables and defines should go in skia_common.gypi so they can be seen
# by files listed here and in skia_library_opts.gypi.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!WARNING!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
{
'dependencies': [
'skia_library_opts.gyp:skia_opts',
'../third_party/zlib/zlib.gyp:zlib',
],
'includes': [
'../third_party/skia/gyp/core.gypi',
'../third_party/skia/gyp/effects.gypi',
'../third_party/skia/gyp/pdf.gypi',
'../third_party/skia/gyp/utils.gypi',
],
'sources': [
'../third_party/skia/src/ports/SkImageDecoder_empty.cpp',
'../third_party/skia/src/images/SkScaledBitmapSampler.cpp',
'../third_party/skia/src/images/SkScaledBitmapSampler.h',
'../third_party/skia/src/ports/SkFontConfigInterface_direct.cpp',
'../third_party/skia/src/fonts/SkFontMgr_fontconfig.cpp',
'../third_party/skia/src/ports/SkFontHost_fontconfig.cpp',
'../third_party/skia/src/fonts/SkFontMgr_indirect.cpp',
'../third_party/skia/src/fonts/SkRemotableFontMgr.cpp',
'../third_party/skia/src/ports/SkRemotableFontMgr_win_dw.cpp',
'../third_party/skia/src/ports/SkImageGenerator_none.cpp',
'../third_party/skia/src/ports/SkFontHost_FreeType.cpp',
'../third_party/skia/src/ports/SkFontHost_FreeType_common.cpp',
'../third_party/skia/src/ports/SkFontHost_FreeType_common.h',
'../third_party/skia/src/ports/SkFontHost_mac.cpp',
'../third_party/skia/src/ports/SkFontHost_win.cpp',
'../third_party/skia/src/ports/SkFontMgr_android.cpp',
'../third_party/skia/src/ports/SkFontMgr_android_factory.cpp',
'../third_party/skia/src/ports/SkFontMgr_android_parser.cpp',
'../third_party/skia/src/ports/SkFontMgr_win_dw.cpp',
'../third_party/skia/src/ports/SkGlobalInitialization_chromium.cpp',
'../third_party/skia/src/ports/SkOSFile_posix.cpp',
'../third_party/skia/src/ports/SkOSFile_stdio.cpp',
'../third_party/skia/src/ports/SkOSFile_win.cpp',
'../third_party/skia/src/ports/SkScalerContext_win_dw.cpp',
'../third_party/skia/src/ports/SkScalerContext_win_dw.h',
'../third_party/skia/src/ports/SkTime_Unix.cpp',
'../third_party/skia/src/ports/SkTLS_pthread.cpp',
'../third_party/skia/src/ports/SkTLS_win.cpp',
'../third_party/skia/src/ports/SkTypeface_win_dw.cpp',
'../third_party/skia/src/ports/SkTypeface_win_dw.h',
'../third_party/skia/src/sfnt/SkOTTable_name.cpp',
'../third_party/skia/src/sfnt/SkOTTable_name.h',
'../third_party/skia/src/sfnt/SkOTUtils.cpp',
'../third_party/skia/src/sfnt/SkOTUtils.h',
'../third_party/skia/include/core/SkFontStyle.h',
'../third_party/skia/include/images/SkMovie.h',
'../third_party/skia/include/images/SkPageFlipper.h',
'../third_party/skia/include/ports/SkFontConfigInterface.h',
'../third_party/skia/include/ports/SkFontMgr.h',
'../third_party/skia/include/ports/SkFontMgr_indirect.h',
'../third_party/skia/include/ports/SkRemotableFontMgr.h',
'../third_party/skia/include/ports/SkTypeface_win.h',
],
# Exclude all unused files in skia utils.gypi file
'sources!': [
'../third_party/skia/include/utils/SkBoundaryPatch.h',
'../third_party/skia/include/utils/SkFrontBufferedStream.h',
'../third_party/skia/include/utils/SkCamera.h',
'../third_party/skia/include/utils/SkCanvasStateUtils.h',
'../third_party/skia/include/utils/SkCubicInterval.h',
'../third_party/skia/include/utils/SkCullPoints.h',
'../third_party/skia/include/utils/SkDebugUtils.h',
'../third_party/skia/include/utils/SkDumpCanvas.h',
'../third_party/skia/include/utils/SkEventTracer.h',
'../third_party/skia/include/utils/SkInterpolator.h',
'../third_party/skia/include/utils/SkLayer.h',
'../third_party/skia/include/utils/SkMeshUtils.h',
'../third_party/skia/include/utils/SkNinePatch.h',
'../third_party/skia/include/utils/SkParsePaint.h',
'../third_party/skia/include/utils/SkParsePath.h',
'../third_party/skia/include/utils/SkRandom.h',
'../third_party/skia/src/utils/SkBitmapHasher.cpp',
'../third_party/skia/src/utils/SkBitmapHasher.h',
'../third_party/skia/src/utils/SkBoundaryPatch.cpp',
'../third_party/skia/src/utils/SkFrontBufferedStream.cpp',
'../third_party/skia/src/utils/SkCamera.cpp',
'../third_party/skia/src/utils/SkCanvasStack.h',
'../third_party/skia/src/utils/SkCubicInterval.cpp',
'../third_party/skia/src/utils/SkCullPoints.cpp',
'../third_party/skia/src/utils/SkDumpCanvas.cpp',
'../third_party/skia/src/utils/SkFloatUtils.h',
'../third_party/skia/src/utils/SkInterpolator.cpp',
'../third_party/skia/src/utils/SkLayer.cpp',
'../third_party/skia/src/utils/SkMD5.cpp',
'../third_party/skia/src/utils/SkMD5.h',
'../third_party/skia/src/utils/SkMeshUtils.cpp',
'../third_party/skia/src/utils/SkNinePatch.cpp',
'../third_party/skia/src/utils/SkOSFile.cpp',
'../third_party/skia/src/utils/SkParsePath.cpp',
'../third_party/skia/src/utils/SkPathUtils.cpp',
'../third_party/skia/src/utils/SkSHA1.cpp',
'../third_party/skia/src/utils/SkSHA1.h',
'../third_party/skia/src/utils/SkTFitsIn.h',
'../third_party/skia/src/utils/SkTLogic.h',
# We don't currently need to change thread affinity, so leave out this complexity for now.
"../third_party/skia/src/utils/SkThreadUtils_pthread_mach.cpp",
"../third_party/skia/src/utils/SkThreadUtils_pthread_linux.cpp",
#windows
'../third_party/skia/include/utils/win/SkAutoCoInitialize.h',
'../third_party/skia/include/utils/win/SkHRESULT.h',
'../third_party/skia/include/utils/win/SkIStream.h',
'../third_party/skia/include/utils/win/SkTScopedComPtr.h',
'../third_party/skia/src/utils/win/SkAutoCoInitialize.cpp',
'../third_party/skia/src/utils/win/SkIStream.cpp',
'../third_party/skia/src/utils/win/SkWGL_win.cpp',
#testing
'../third_party/skia/src/fonts/SkGScalerContext.cpp',
'../third_party/skia/src/fonts/SkGScalerContext.h',
],
'include_dirs': [
'../third_party/skia/include/c',
'../third_party/skia/include/core',
'../third_party/skia/include/effects',
'../third_party/skia/include/images',
'../third_party/skia/include/lazy',
'../third_party/skia/include/pathops',
'../third_party/skia/include/pdf',
'../third_party/skia/include/pipe',
'../third_party/skia/include/ports',
'../third_party/skia/include/record',
'../third_party/skia/include/utils',
'../third_party/skia/src/core',
'../third_party/skia/src/opts',
'../third_party/skia/src/image',
'../third_party/skia/src/pdf',
'../third_party/skia/src/ports',
'../third_party/skia/src/sfnt',
'../third_party/skia/src/utils',
'../third_party/skia/src/lazy',
],
'conditions': [
['skia_support_gpu != 0', {
'includes': [
'../third_party/skia/gyp/gpu.gypi',
],
'sources': [
'<@(skgpu_null_gl_sources)',
'<@(skgpu_sources)',
],
'include_dirs': [
'../third_party/skia/include/gpu',
'../third_party/skia/src/gpu',
],
}],
['skia_support_pdf == 0', {
'sources/': [
['exclude', '../third_party/skia/src/doc/SkDocument_PDF.cpp'],
['exclude', '../third_party/skia/src/pdf/'],
],
}],
['skia_support_pdf == 1', {
'dependencies': [
'../third_party/sfntly/sfntly.gyp:sfntly',
],
}],
[ 'OS == "win"', {
'sources!': [
# Keeping _win.cpp
"../third_party/skia/src/utils/SkThreadUtils_pthread.cpp",
"../third_party/skia/src/utils/SkThreadUtils_pthread_other.cpp",
],
},{
'sources!': [
# Keeping _pthread.cpp and _pthread_other.cpp
"../third_party/skia/src/utils/SkThreadUtils_win.cpp",
],
}],
[ 'OS != "mac"', {
'sources/': [
['exclude', '/mac/']
],
}],
[ 'OS == "android" and target_arch == "arm"', {
'sources': [
'../third_party/skia/src/core/SkUtilsArm.cpp',
],
'includes': [
'../build/android/cpufeatures.gypi',
],
}],
[ 'desktop_linux == 1 or chromeos == 1', {
'dependencies': [
'../build/linux/system.gyp:fontconfig',
'../build/linux/system.gyp:freetype2',
'../third_party/icu/icu.gyp:icuuc',
],
'cflags': [
'-Wno-unused',
'-Wno-unused-function',
],
}],
[ 'use_cairo == 1 and use_pango == 1', {
'dependencies': [
'../build/linux/system.gyp:pangocairo',
],
}],
[ 'OS=="win" or OS=="mac" or OS=="ios" or OS=="android"', {
'sources!': [
'../third_party/skia/src/ports/SkFontConfigInterface_direct.cpp',
'../third_party/skia/src/ports/SkFontHost_fontconfig.cpp',
'../third_party/skia/src/fonts/SkFontMgr_fontconfig.cpp',
],
}],
[ 'OS=="win" or OS=="mac" or OS=="ios"', {
'sources!': [
'../third_party/skia/src/ports/SkFontHost_FreeType.cpp',
'../third_party/skia/src/ports/SkFontHost_FreeType_common.cpp',
],
}],
[ 'OS == "android"', {
'dependencies': [
'../third_party/expat/expat.gyp:expat',
'../third_party/freetype/freetype.gyp:ft2',
],
# This exports a hard dependency because it needs to run its
# symlink action in order to expose the skia header files.
'hard_dependency': 1,
'include_dirs': [
'../third_party/expat/files/lib',
],
}, { # not 'OS == "android"'
'sources!': [
"../third_party/skia/src/ports/SkFontMgr_android_factory.cpp",
'../third_party/skia/src/ports/SkFontMgr_android_parser.cpp',
],
}],
[ 'OS == "ios"', {
'include_dirs': [
'../third_party/skia/include/utils/ios',
'../third_party/skia/include/utils/mac',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/ImageIO.framework',
],
},
'sources': [
# This file is used on both iOS and Mac, so it should be removed
# from the ios and mac conditions and moved into the main sources
# list.
'../third_party/skia/src/utils/mac/SkStream_mac.cpp',
],
# The main skia_opts target does not currently work on iOS because the
# target architecture on iOS is determined at compile time rather than
# gyp time (simulator builds are x86, device builds are arm). As a
# temporary measure, this is a separate opts target for iOS-only, using
# the _none.cpp files to avoid architecture-dependent implementations.
'dependencies': [
'skia_library_opts.gyp:skia_opts_none',
],
'dependencies!': [
'skia_library_opts.gyp:skia_opts',
],
}],
[ 'OS == "mac"', {
'direct_dependent_settings': {
'include_dirs': [
'../third_party/skia/include/utils/mac',
],
},
'include_dirs': [
'../third_party/skia/include/utils/mac',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/AppKit.framework',
],
},
'sources': [
'../third_party/skia/src/utils/mac/SkStream_mac.cpp',
],
}],
[ 'OS == "win"', {
'sources!': [
'../third_party/skia/src/ports/SkOSFile_posix.cpp',
'../third_party/skia/src/ports/SkTime_Unix.cpp',
'../third_party/skia/src/ports/SkTLS_pthread.cpp',
],
'include_dirs': [
'../third_party/skia/include/utils/win',
'../third_party/skia/src/utils/win',
],
},{ # not 'OS == "win"'
'sources!': [
'../third_party/skia/src/ports/SkFontMgr_win_dw.cpp',
'../third_party/skia/src/ports/SkRemotableFontMgr_win_dw.cpp',
'../third_party/skia/src/ports/SkScalerContext_win_dw.cpp',
'../third_party/skia/src/ports/SkScalerContext_win_dw.h',
'../third_party/skia/src/ports/SkTypeface_win_dw.cpp',
'../third_party/skia/src/ports/SkTypeface_win_dw.h',
'../third_party/skia/src/utils/win/SkDWrite.h',
'../third_party/skia/src/utils/win/SkDWrite.cpp',
'../third_party/skia/src/utils/win/SkDWriteFontFileStream.cpp',
'../third_party/skia/src/utils/win/SkDWriteFontFileStream.h',
'../third_party/skia/src/utils/win/SkDWriteGeometrySink.cpp',
'../third_party/skia/src/utils/win/SkDWriteGeometrySink.h',
'../third_party/skia/src/utils/win/SkHRESULT.cpp',
],
}],
],
'target_conditions': [
# Pull in specific Mac files for iOS (which have been filtered out
# by file name rules).
[ 'OS == "ios"', {
'sources/': [
['include', 'SkFontHost_mac\\.cpp$',],
['include', 'SkStream_mac\\.cpp$',],
['include', 'SkCreateCGImageRef\\.cpp$',],
],
'xcode_settings' : {
'WARNING_CFLAGS': [
# SkFontHost_mac.cpp uses API deprecated in iOS 7.
# crbug.com/408571
'-Wno-deprecated-declarations',
],
},
}],
],
'direct_dependent_settings': {
'include_dirs': [
'../third_party/skia/include/core',
'../third_party/skia/include/effects',
'../third_party/skia/include/pdf',
'../third_party/skia/include/gpu',
'../third_party/skia/include/lazy',
'../third_party/skia/include/pathops',
'../third_party/skia/include/pipe',
'../third_party/skia/include/ports',
'../third_party/skia/include/utils',
],
},
}
| [
"jackson@google.com"
] | jackson@google.com |
76e5e742f70e3956df15fe104b869bd14bb845b2 | a68cf0acc3127303bed87d982558aa458ff5ad62 | /VRD/__init__.py | 1a1456cb40908938312912d832749d3b2c1e6dff | [] | no_license | AbhiJay-K/VRD | cf45f1a3047c0906d996c851e576c0d746b5b013 | 673ca3818a558c9ca3e5ada8290bb1f2da0fda1b | refs/heads/main | 2023-08-11T16:35:48.943967 | 2021-09-23T15:27:45 | 2021-09-23T15:27:45 | 395,602,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20 | py | __version__ = '0.22' | [
""
] | |
c6b05976675faa83c8508a32dc60ddb4607ba399 | 9975809b516d3e6ff4cf3082761fde8f2c4cdcdb | /blogengine/blog/utils.py | db7d922921be4c805f219e53a1682a3a7de65ebb | [] | no_license | ameagle/django1 | ecba2b4a93724d92c4446e2b1957163ead9cfdc3 | f7895970b4480324be332e4a16b2c807d8b88bab | refs/heads/master | 2023-08-05T02:14:10.624488 | 2021-09-26T19:07:19 | 2021-09-26T19:07:19 | 408,866,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | from django.shortcuts import render, redirect
from django.shortcuts import get_object_or_404
from .models import *
class ObjectDetailMixin:
model = None
template = None
def get(self,request,slug):
obj = get_object_or_404(self.model,slug__iexact=slug)
return render(request,
self.template,
context={self.model.__name__.lower(): obj}
)
class ObjectCreateMixin():
model_form=None
template=None
def get(self,request):
form = self.model_form()
return render(request, self.template,context={'form':form})
def post(self,request):
bound_form = self.model_form(request.POST)
if bound_form.is_valid():
new_obj=bound_form.save()
return redirect(new_obj)
return render(request,self.template,context={'form':bound_form})
#print(request.POST) | [
"ao@ixi.ru"
] | ao@ixi.ru |
250e40e1c9cc2fcc4091722bca3c92a70c3ac1bf | feab2811821b0d7bcb6dc4c7b29c703757a85747 | /and.py | 3bb581c8b3225921eca07c515454b7223ba2cebd | [] | no_license | smritipillai/256314_Daily_Commits | b2e6638efde4dfdd19c4b3b8fc375160c7350080 | 90dbbc896213c9d9239b4c10eafb0e7ccbc7c2e2 | refs/heads/main | 2023-04-03T14:49:26.089096 | 2021-04-23T06:04:22 | 2021-04-23T06:04:22 | 359,127,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | if (1==1) and (2+2 >3):
print("true")
else
print("false")
| [
"smritipillai.smriti@gmail.com"
] | smritipillai.smriti@gmail.com |
5167039388d0d43817b5eb5500459c05bf2b35e8 | c181023ce9db43e957df86420d3005b677d16fde | /Boxplot_calculations.py | d0d12c4969daa087e46251bc2f6687d243be6d79 | [] | no_license | dcmuelle/Master-Thesis | 5c9184145db2c4084a06e38f5795af9fa6bf5dcf | 70cffd1ba5d3df37dfc5a54b38070c541b279af0 | refs/heads/main | 2023-07-07T21:37:01.079184 | 2021-08-10T06:51:18 | 2021-08-10T06:51:18 | 394,547,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,338 | py | {
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "olympic-particle",
"metadata": {},
"outputs": [],
"source": [
"def Boxplot_calculations(meteo_data):\n",
"meteo_data = meteo_data[meteo_data.time.dt.year == year]\n",
"hourly_average=meteo_data.groupby([meteo_data[\"time\"].dt.month, meteo_data[\"time\"].dt.day, meteo_data[\"time\"].dt.hour]).mean()\n",
"hourly_average.index.names = [\"month\", \"day\", \"hour\"]\n",
"hourly_average['Prod/m2'] = hourly_average['G(i)']*0.17/1000\n",
"hourly_average['Prod'] = hourly_average['Prod/m2']*size\n",
"yearly_PV_prod = hourly_average['Prod'].sum()\n",
"PV_production = hourly_average['Prod']\n",
"power_balance = pd.DataFrame()\n",
"power_balance['consumption'] = total_elec_load\n",
"power_balance['from PV'] = PV_production\n",
"power_balance['exchange grid'] = PV_production - total_elec_load\n",
"power_balance['to Grid'] = (PV_production - total_elec_load).clip(lower=0)\n",
"power_balance['from Grid'] = (total_elec_load - PV_production).clip(lower=0)\n",
"power_balance = power_balance.fillna(0)\n",
"total_elec_load = load_SFH_modern_full_retrofit['Total Electricity without AC']\n",
"power_balance = pd.DataFrame()\n",
"power_balance['consumption'] = total_elec_load\n",
"power_balance['from PV'] = PV_production\n",
"power_balance['exchange grid'] = PV_production - total_elec_load\n",
"power_balance['to Grid'] = (PV_production - total_elec_load).clip(lower=0)\n",
"power_balance['from Grid'] = (total_elec_load - PV_production).clip(lower=0)\n",
"power_balance = power_balance.fillna(0)\n",
"power_balance = BatteryDispatch(power_balance, battery_size, eta_discharge, max_c_charge, max_c_discharge)\n",
"power_balance['exchange grid new'] = power_balance['to Grid New'] - power_balance['from Grid New']"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
| [
"PsiVmGij75"
] | PsiVmGij75 |
0c3252c4dcf7604b633a0875ad23e72836719ee2 | 75ff3b2483447ae18bffe508fe66844bf5e57199 | /course_parsers/campus_course_parser.py | afc239a915693a790d2f4822245d63c4814f5a26 | [] | no_license | SantoshSrinivas79/StudyBoi | 84bdaa4227d05abcd46a3ba22e49ad96ebec5309 | 2c56b2ff35cbb3f85efb4de0168966d7d7d47791 | refs/heads/master | 2022-04-24T20:27:53.202585 | 2020-04-27T12:51:45 | 2020-04-27T12:51:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | import requests
from bs4 import BeautifulSoup
from parameters import *
class Course:
def __init__(self, title, duration, link, description):
self.title = title
self.duration = duration
self.link = link
self.description = description
def parse_course(url):
try:
response = requests.get(url, headers=headers)
response = response.text
data = BeautifulSoup(response, 'lxml')
workbox = data.find('div',class_='wrap-info-single-course-inner')
inner_workbox = workbox.find('div',class_='content-info-wrap')
for field in inner_workbox:
try:
spans = field.find_all('span')
print(f"{spans[0].text}{spans[1].text.replace(' ','').replace('', '')}")
print('_____________________')
except:
pass
except:
print('a')
parse_course('https://campus.gov.il/course/course-v1-cs-gov_cs_selfpy101/') | [
"urigami2010@gmail.com"
] | urigami2010@gmail.com |
922c0ec014cbb7e3e0b8cf73bd810fb9a6f986a2 | 631a90a2af858b784f19b1242c91f1aaa807cd86 | /Merging-catalogs-V2/Benchmark_plotter.py | 48606cac840ba664f9ece3aefc47d89f97620209 | [] | no_license | atilapaes/PhD-PostDoc | 560430cc8aa0f845934216acf9ec42d2aed9046b | 8611e636e8d9974b3f4fdf24739f474131b9ea51 | refs/heads/master | 2022-12-19T21:06:53.738581 | 2020-09-26T03:54:42 | 2020-09-26T03:54:42 | 281,584,169 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,622 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 15 01:48:07 2020
@author: atilapaes
"""
import pandas
#%% Data importing and preparation
# Import catalog of single events (no duplicate present)
catalog = pandas.read_csv('result_catalog_merged_ES_MFA_FINAL.csv',index_col='Unnamed: 0')
catalog['datetime']=pandas.to_datetime(catalog['datetime'])
#%% Generate the data
# Printed using set(catalog['Date'].values)
list_days=['2016-10-26', '2016-10-27', '2016-10-28', '2016-10-29', '2016-10-30', '2016-10-31',
'2016-11-01', '2016-11-02', '2016-11-03','2016-11-04', '2016-11-05', '2016-11-06',
'2016-11-07', '2016-11-08', '2016-11-09', '2016-11-10', '2016-11-11', '2016-11-12', '2016-11-13',
'2016-11-14', '2016-11-15', '2016-11-16', '2016-11-17', '2016-11-18', '2016-11-19',
'2016-11-20', '2016-11-21', '2016-11-22', '2016-11-23', '2016-11-24', '2016-11-25',
'2016-11-26', '2016-11-27', '2016-11-28', '2016-11-29', '2016-11-30']
#%% Creating the dataframe
benchmark=pandas.DataFrame(data=list_days,columns=['Date'])
benchmark['ES']=''
benchmark['MFA']=''
benchmark['Both']=''
#%%
for index_day in range(len(list_days)):
benchmark.at[index_day,'ES']=len(catalog.loc[(catalog['Date']==list_days[index_day]) & (catalog['source']=='ES')])
benchmark.at[index_day,'MFA']=len(catalog.loc[(catalog['Date']==list_days[index_day]) & (catalog['source']=='MFA')])
benchmark.at[index_day,'Both']=len(catalog.loc[(catalog['Date']==list_days[index_day]) & (catalog['source']=='Both')])
#%% Plot
benchmark.set_index('Date').plot.bar(title='Events detected',figsize=(15,10),fontsize=12) | [
"atila.paes@gmail.com"
] | atila.paes@gmail.com |
8c988b95d39cf5d55d0879a8e1fb1ad9356e1543 | 843da58da462f0d82c847c12a4b67eeaee072e3d | /r2env/__init__.py | 92d4d42ea0d3c4f5fb6debe86e1689ae9b80ad18 | [
"MIT"
] | permissive | as0ler/r2env | 1864a2d716c688aaac4ed944a3ca65a75a373381 | 8796a6502741ccef82bdc7174c2ad5f04c87b3ef | refs/heads/master | 2023-05-30T08:51:02.346778 | 2021-05-24T22:41:03 | 2021-05-24T22:41:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | from r2env.package import Package
from r2env.repl import main
import r2env.ipdb
import os
def load_packages(cfg):
from r2env.db import Radare2
from r2env.db import R0
pkgs = []
pkgs.append(Radare2(cfg))
pkgs.append(R0(cfg))
return pkgs
cfg = {
"srcdir": "", # depends on the pkg
"linkdir": "/usr",
"envdir": 123,
"prefix": "",
}
class R2Env:
def __init__(self):
self.db = load_packages(cfg)
def init(self):
if not os.path.isdir(".r2env"):
os.mkdir(".r2env")
def version(self):
return "0.2.0"
def available_packages(self):
return self.db
def installed_packages(self):
return ipdb.list()
def clean_package(self, pkgname):
return ipdb.clean(pkgname)
| [
"pancake@nopcode.org"
] | pancake@nopcode.org |
b81eacbae1cc55af0ff7165de0962951628a87e6 | 6fa14cd7be2d22553496326bce954b74dd6779bd | /ejercicios 1er año/impares hasta 100.py | 94d9029aa6a93b794e87029da74ab48b1637eed9 | [] | no_license | nucleomis/Archivos_Python | 28e93dfac08eee604f25173ecdbfc3e86eb952ef | dd3e104bb38e9763d1e5efb22614e845f21d68f1 | refs/heads/main | 2023-06-22T13:19:27.952059 | 2021-07-20T14:05:13 | 2021-07-20T14:05:13 | 387,810,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | ##Hacer un pseudocódigo que imprima los números impares hasta el
##100 y que Imprima cuantos impares hay.
a=1
cont=0
while a<99:
a=a+2
cont=cont+1
print (a)
print ("la cantidad de veces que se repiten los impares son", cont) | [
"nucleo.mis@gmail.com"
] | nucleo.mis@gmail.com |
fdc34fc0a555f3f41229467a66e4e653cb445b40 | a45f69e1daf40a933a5805eddda36d59658815de | /cloudipsp/async_api.py | c8f3ef18ebb6eafc068660e7b594636dcc96f4ff | [
"MIT"
] | permissive | xen/cloudipsp_async | c97f4dba286c8999162a3854aa82427ec35bb4bf | 6e1045de16ad535d858860cd202f8bd0f887aa83 | refs/heads/master | 2022-12-04T11:37:16.820299 | 2020-08-25T21:21:37 | 2020-08-25T21:21:37 | 289,747,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | import logging
from types import MethodType
from cloudipsp import exceptions
try:
import aiohttp
except ImportError:
aiohttp = False
from cloudipsp.api import BaseAPI
log = logging.getLogger(__name__)
class AsyncAPI(BaseAPI):
is_async = True
def __init__(self, **kwargs):
if not aiohttp:
raise ModuleNotFoundError(
"Run 'pip install -U aiohttp' to work with AsyncAPI"
)
super().__init__(**kwargs)
| [
"mkashkin@gmail.com"
] | mkashkin@gmail.com |
83acc1d478a46b104cd5f8f7702ae959347f562a | 2b757e74a9ec0a208a1591fd7597d2975bbc5f1d | /app.py | 3bd6635749da14fdcab2bd448f077641b760dff8 | [] | no_license | surfeatcoderepeat/multifinger | cb2110f639c9fe0aeb59b0d789e504569e1005df | 627ecebd3fa2d3d6cae58855051a230297d7b705 | refs/heads/main | 2023-07-14T18:17:41.545934 | 2021-08-27T13:17:48 | 2021-08-27T13:17:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,221 | py | import base64
import datetime
import io
import dash
from dash.dependencies import Input, Output, State, MATCH, ALL
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
import plotly.express as px
import lasio
import pandas as pd
import plotly.graph_objects as go
import numpy as np
from dash.exceptions import PreventUpdate
import re
import webbrowser
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
app.config['suppress_callback_exceptions'] = True
server = app.server
SIDEBAR_STYLE = {
"position": "fixed",
"top": 0,
"left": 0,
"bottom": 0,
"width": "16rem",
"padding": "2rem 1rem",
"background-color": "#f9f9fa",
"overflow": "scroll",
}
CONTENT_STYLE = {
"margin-left": "18rem",
"margin-right": "2rem",
"padding": "2rem 1rem",
}
sidebar = html.Div([
html.H3("Visualizador Multifinger", className="display-5", style={'textAlign':'center'}),
html.Hr(),
dcc.Upload(
id='upload-data',
children=html.Div([
'Drag and Drop your .las file'
]),
style={
'width': '100%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '2px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
},
),
html.Hr(),
html.Div(id='select_fingers'),
html.Div(id='select_curves'),
html.Hr(),
html.Button('Graficar', id='graficar'),
],
style=SIDEBAR_STYLE,
)
content = html.Div(id="page-content",
style=CONTENT_STYLE,
children=[
dcc.Store(id='stored-data'),
dcc.Store(id='radios_units'),
dbc.Row([
dbc.Col(
[html.H3('POLAR PLOT', style={'textAlign':'center'}),
dcc.Graph(id="plot2d", figure={
'layout': go.Layout(
xaxis = {
'visible': False
},
yaxis = {
'visible': False,
}
)
}),
html.Div(
dcc.Input(id='polar_center',
type='number',
placeholder='Input an MD to plot',
),
style=dict(display='flex', justifyContent='center'),
)
], width=4),
dbc.Col([
html.H3('3D SURFACE', style={'textAlign':'center'}),
dcc.Graph(id="plot3d", figure={
'layout': go.Layout(
xaxis = {
'visible': False
},
yaxis = {
'visible': False,
}
)
}),
dcc.RangeSlider(id='slider',
tooltip = { 'always_visible': True },
),
], width=6 ),
dbc.Col([
html.H5('Z aspect ratio'),
dcc.Input(id='z-aspectratio',
type='number',
value=1
),
html.Hr(),
html.H5('X-Y aspect ratio'),
dcc.Input(id='xy-aspectratio',
type='number',
value=1
),
html.Hr(),
], width=2),
],no_gutters=True, align="center")
]
)
app.layout = html.Div([sidebar, content])
@app.callback(
Output('select_fingers', 'children'),
Output('stored-data', 'data'),
Output('radios_units', 'data'),
Input('upload-data', 'contents'),
State('upload-data', 'filename'),
)
def update_output(contents, filename):
if contents is None:
raise PreventUpdate
else:
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
if '.las' in filename or '.LAS' in filename:
las = lasio.read(io.StringIO(decoded.decode('utf-8')))
curvesdict = {k:las.curvesdict[k].unit for k in las.curvesdict}
curvesdict['step'] = abs(las.well.STEP.value)
df = las.df().reset_index()
options = [{'label':n, 'value':n} for n in range(100)]
data = df.to_dict('records')
children = html.Div([
html.H5(filename),
html.Hr(),
html.H5('Nominal Inner Diameter (mm)'),
dcc.Input(id='nominal_id',
type='number',
placeholder='Input an MD to plot',
value=104.8,
),
html.Hr(),
dcc.Dropdown(id='depth_index',
options = [{'label':c, 'value':c} for c in df.columns],
placeholder='pick depht'),
html.Hr(),
dcc.Dropdown(id='tool_rotation',
options = [{'label':c, 'value':c} for c in df.columns],
placeholder='pick tool rotation'),
html.Hr(),
dcc.Dropdown(id='tool_offset',
options = [{'label':c, 'value':c} for c in df.columns],
placeholder='pick tool offset'),
dcc.Dropdown(id='tool_theta',
options = [{'label':c, 'value':c} for c in df.columns],
placeholder='pick tool angle'),
html.Hr(),
dcc.Dropdown(id='fingers_n',
options = options,
placeholder='pick number of fingers'),
])
return children, data, curvesdict
@app.callback(
Output("select_curves", "children"),
Input("fingers_n", "value"),
State('stored-data','data'),
)
def curves_selection(n_fingers, data):
if n_fingers is not None:
df = pd.DataFrame(data)
options = [{'label':c, 'value':c} for c in df.columns]
return [
html.Hr(),
html.Div(id='curvas', children=[
dcc.Dropdown(id={
'type': 'filter-dropdown',
'index': i
},
options=options,
placeholder='finger_{}'.format(i+1),
# value='FING{:02d}'.format(i+1),
)
for i in range(n_fingers)],
),
]
@app.callback(
Output({'type': 'filter-dropdown', 'index': ALL}, 'value'),
Input({'type': 'filter-dropdown', 'index': ALL}, 'value'),
State({'type': 'filter-dropdown', 'index': ALL}, 'id'),
State("fingers_n", "value"),
)
def find_regex(allvalues, allindex, n_fingers):
try:
index, value = [(i,v) for i,v in enumerate(allvalues) if v is not None][0]
notnumber = re.sub(r"\d+", '#$#', value)
number = re.sub(r'\D', '', value)
if index<9 and number==str(index):
final_values = [notnumber.replace('#$#', str(i)) for i in range(n_fingers)]
elif index<9 and number=='0'+str(index+1):
final_values = [notnumber.replace('#$#', '{:02d}'.format(i+1)) for i in range(n_fingers)]
elif index<9 and number=='0'+str(index):
final_values = [notnumber.replace('#$#', '{:02d}'.format(i)) for i in range(n_fingers)]
return final_values
except:
# raise PreventUpdate
return [None for i in range(n_fingers)]
@app.callback(
Output('plot3d', 'figure'),
Output('plot2d', 'figure'),
Output('slider', 'min'),
Output('slider', 'max'),
Output('slider', 'value'),
Output('polar_center', 'value'),
Output('polar_center', 'step'),
Input('graficar', 'n_clicks'),
Input('slider', 'value'),
Input('polar_center', 'value'),
Input('z-aspectratio', 'value'),
Input('xy-aspectratio', 'value'),
State({'type': 'filter-dropdown', 'index': ALL}, 'value'),
State('stored-data','data'),
State('depth_index', 'value'),
State('tool_rotation', 'value'),
State('tool_offset', 'value'),
State('tool_theta', 'value'),
State('nominal_id', 'value'),
State('radios_units', 'data'),
)
def plot_graf(n_clicks, range_values, polar_center, zratio, xyratio, fingers, data, depth, rot, offset, angle, nomid, curvesdict):
unit = curvesdict[fingers[0]]
step = curvesdict['step']
if unit=='IN':
factor = 25.4
else:
factor = 1
ctx = dash.callback_context
trigger_id = ctx.triggered[0]['prop_id'].split('.')[0]
df = pd.DataFrame(data).sort_values(depth).set_index(depth).dropna()
if trigger_id=='graficar':
radios = df[fingers]
if rot is not None:
rot3d = df[rot]
else:
i_min = np.searchsorted(df.index, range_values[0], side="left")
i_max = np.searchsorted(df.index, range_values[1], side="left")
radios = df.iloc[i_min:i_max][fingers]
if rot is not None:
rot3d = df.iloc[i_min:i_max][rot]
radios = radios*factor
min, max = radios.index.min(), radios.index.max()
nmediciones, npatines = radios.shape
radios_casing = np.full(radios.shape, nomid/2)
diff = radios - radios_casing
Z = np.vstack([radios.index]*npatines)
p = np.linspace(0, 2*np.pi, npatines)
P = np.column_stack([p]*nmediciones)
if rot is not None:
P = P + np.radians(rot3d.values)
X, Y = radios.values.transpose()*np.cos(P), radios.values.transpose()*np.sin(P)
fig3d = go.Figure(data=[go.Surface(x=X, y=Y, z=Z,
surfacecolor=diff.transpose(),
colorscale='Jet',
cmin=-5,
cmax=5,
# customdata=,
hovertemplate='z: %{z:.2f}<extra></extra>'+
'<br><b>z*2</b>: %{z:.2f}<br>',
# text=['ovalizacion: {}'.format(i) for i in Z[:,0]],
)])
fig3d.update_scenes(xaxis_visible=False,
yaxis_visible=False,
zaxis_visible=False,
xaxis_showgrid=False,
yaxis_showgrid=False,
zaxis_showgrid=False,
aspectmode='manual',
aspectratio=dict(x=xyratio, y=xyratio, z=zratio),
)
fig3d.add_trace(go.Scatter3d(x=[X[0,1]], y=[Y[0,1]], z=[Z[0,1]],
mode='markers',
marker = dict(size=10,
color='blue',
opacity=.8,)))
xtop, ytop = nomid/2, 0
fig3d.add_trace(go.Scatter3d(x=[xtop], y=[ytop], z=[Z[0,1]],
mode='markers',
marker = dict(size=10,
color='grey',
opacity=.8,)))
if polar_center is None or trigger_id!='polar_center':
radios_polar_plot = radios.iloc[0].values
polar_depth = radios.index[0]
if rot is not None:
rot2d = df[rot].loc[polar_depth]
else:
i = np.searchsorted(df.index, polar_center, side="right")
radios_polar_plot = df[fingers].iloc[i].values*factor
polar_depth = df.index[i]
if rot is not None:
rot2d = df[rot].loc[polar_depth]
radios_polar_casing = np.full(radios_polar_plot.shape, nomid/2)
diff_polar = radios_polar_plot - radios_polar_casing
if rot is not None:
p = p + np.radians(rot2d)
polar_data = pd.DataFrame({
'theta':[np.degrees(i) for i in p],
'radios':radios_polar_plot,
# 'text':['finger_{}'.format(i) for i in range(1, len(fingers)+1)],
})
fig2d = px.scatter_polar(polar_data,
r="radios",
theta="theta",
# text='text',
color=diff_polar,
color_continuous_scale='jet',
range_color=[-5,5],
)
fig2d.update_traces(marker=dict(size=10),)
fig2d.add_trace(go.Scatterpolar(
r = [0, polar_data.radios.iloc[0]],
theta = [0, polar_data.theta.iloc[0]],
name = "finger_1",
mode = "lines",
))
fig2d.add_trace(go.Scatterpolar(
r = [nomid/2]*len(fingers),
theta = [np.degrees(i) for i in p],
name = "casing ID",
mode = "lines",
line_color = 'black',
line_width = 4,
opacity = .2,
))
if offset is not None and angle is not None:
fig2d.add_trace(go.Scatterpolar(
r = [df[offset].loc[polar_depth]],
theta = [np.radians(df[angle].loc[polar_depth])],
text = 'tool_center',
marker=dict(size=15, color = "magenta", symbol='x'),
name = "tool_center",
))
fig2d.update(layout_coloraxis_showscale=False)
fig2d.update_polars(
radialaxis_range=[0, (nomid//2)+10],
radialaxis_showticklabels=False,
bgcolor='white',
angularaxis_gridcolor='grey',
radialaxis_gridcolor='white',
)
return fig3d, fig2d, df.index.min(), df.index.max(), [min, max], polar_depth, step
if __name__ == '__main__':
url = 'http://127.0.0.1:8050/'
webbrowser.open(url, new=1, autoraise=True)
app.run_server(debug=False)# ,dev_tools_ui=False,dev_tools_props_check=False)
| [
"RY15618@grupo.ypf.com"
] | RY15618@grupo.ypf.com |
e7457ef3edccd7ffa5e7a792a53e60da70576705 | aa32c9526306990f599a495919508d16d4361492 | /lesson_002/03_favorite_movies.py | 481a1866b45c67e363391500e597a80794a498f5 | [] | no_license | nnngracheducation/pyHomeWorks | 31ff46a8376fdf28bab5de4637cedc220edd31ee | 507bab7a63ae9b72d20797ca6e815783da41eaae | refs/heads/master | 2023-01-08T18:29:35.743241 | 2020-11-07T15:35:24 | 2020-11-07T15:35:24 | 310,876,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Есть строка с перечислением фильмов
my_favorite_movies = 'Терминатор, Пятый элемент, Аватар, Чужие, Назад в будущее'
# Выведите на консоль с помощью индексации строки, последовательно:
# первый фильм
# последний
# второй
# второй с конца
# Переопределять my_favorite_movies и использовать .split() нельзя.
# Запятая не должна выводиться.
# TODO здесь ваш код
print(my_favorite_movies[:10])
print(my_favorite_movies[-15:])
print(my_favorite_movies[12:25])
print(my_favorite_movies[-22: -17]) | [
"nnngrach@gmail.com"
] | nnngrach@gmail.com |
178073e46d08f76dd3f07a0d95396e86a2d69c87 | 4887a1a84e5ae0a3f4e23c41576b53e11e56840c | /parkproject/manage.py | 99de1c8a11bea0e3ef907c1d17e2f720a6d87d4f | [] | no_license | liujwplayer/python | c39dfd9d76034e9f4f8dd053442d3cbf3b220020 | 5e270a06c6c0a13cbabb409cebd64fdc6b3150d2 | refs/heads/master | 2020-04-01T08:29:03.386841 | 2018-10-28T05:41:56 | 2018-10-28T05:41:56 | 153,032,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "parkproject.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"liujwplayer@163.com"
] | liujwplayer@163.com |
584a3ac0d6ea04049204f7aaf58c6306f7ddde0d | aeca65f4c5396942d0b710d8915e1c6f53ff218a | /src/carbon_intelligence/meter/models.py | edfeee268242d98fba7385513a34f33dcaebf44c | [] | no_license | rbennell/carbon-intelligence | 9aaaf433faa68094b3f94c010c0021557dd223e1 | 73cb0786cb05e7ae13a9a9e48650e07de9fcb04c | refs/heads/master | 2023-08-11T06:02:04.359094 | 2020-04-19T14:34:28 | 2020-04-19T14:34:28 | 256,499,256 | 0 | 0 | null | 2021-09-22T18:52:51 | 2020-04-17T12:39:28 | Python | UTF-8 | Python | false | false | 1,381 | py | import datetime
from django.db import models
# Create your models here.
METER_TYPE_CHOICES = [
("electricity", "Electricity"),
("gas", "Natural Gas"),
("water", "Water"),
]
UNIT_CHOICES = [
("kWh", "kWh"),
("m3", "m3"),
]
class Meter(models.Model):
id = models.IntegerField(primary_key=True)
building = models.ForeignKey(to="building.Building", on_delete=models.CASCADE)
fuel = models.CharField(max_length=63, choices=METER_TYPE_CHOICES)
unit = models.CharField(max_length=63, choices=UNIT_CHOICES)
# these are the transformations that would need to be applied to each column of data, in order, from the csv file.
csv_transformations = [int, int, str, str]
def parse_meter_reading_datetime(dt):
return datetime.datetime.strptime(dt, "%Y-%m-%d %H:%M")
class MeterReading(models.Model):
meter = models.ForeignKey(to="meter.Meter", on_delete=models.CASCADE)
consumption = models.FloatField()
reading_date_time = models.DateTimeField()
# these are the transformations that would need to be applied to each column of data, in order, from the csv file.
csv_transformations = [float, int, parse_meter_reading_datetime]
class Meta:
get_latest_by = ["reading_date_time"]
@property
def graph_data(self):
return {"datetime": self.reading_date_time, "consumption": self.consumption}
| [
"rbennell@hotmail.co.uk"
] | rbennell@hotmail.co.uk |
aa4be24b9867a5c959b936e949960dfdea6302f6 | 6c43a8e9075d0f1a1a3b6d478731f7e303487efb | /app.py | d7efc9b87a16427ab97db0948ee2cb6aaad94e74 | [
"MIT"
] | permissive | ericavdp/flashcards | e94a88f9326387a0c061a5bbd89fa4bd0d9968a3 | f758e12efca935afb4e0af550c98654a95056963 | refs/heads/master | 2020-04-21T09:40:14.970210 | 2019-02-06T18:56:15 | 2019-02-06T18:56:15 | 169,457,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,333 | py | from flask import Flask, render_template, request
import pymysql
import pymysql.cursors
from random import randint
app = Flask(__name__)
@app.route('/')
def main():
return render_template('index.html')
@app.route('/showMakeCard')
def showMakeCard():
return render_template('makeCard.html')
@app.route('/makeCard', methods=['POST', 'GET'])
def makeCard():
# conn = mysql.connect()
conn = pymysql.connect(user='*', passwd='*', db='*', unix_socket='/tmp/mysql.sock',
cursorclass=pymysql.cursors.DictCursor)
cursor = conn.cursor()
try:
_question = request.form['inputQuestion']
_answer = request.form['inputAnswer']
_tags = request.form['inputTags']
_confidence = request.form['inputConfidence']
if _question and _answer and _tags and _confidence:
cursor.execute(""" INSERT INTO card (question, answer, tags, confidence)
VALUES (%s, %s, %s, %s)""", (_question, _answer, _tags, _confidence))
conn.commit()
# cursor.close()
# conn.close()
print('boop')
return
else:
print('enter the required fields')
return
except BaseException:
print('it broke :(')
return
finally:
cursor.execute("SELECT COUNT(*) FROM card")
boop = cursor.fetchone()
print('count of rows is ', boop)
print(boop.values())
moo = boop['COUNT(*)']
print(moo)
shuffle = randint(0, moo)
print(shuffle)
cursor.execute("SELECT * FROM card WHERE id = %s", [shuffle])
yup = cursor.fetchone()
print(yup)
cursor.close()
conn.close()
print('fin')
return render_template("success.html")
@app.route('/showReviewCards')
def showReviewCards():
return render_template('reviewcards.html')
@app.route('/displayCard', methods=['POST', 'GET'])
def displayCard():
conn = pymysql.connect(user='*', passwd='*', db='*', unix_socket='/tmp/mysql.sock',
cursorclass=pymysql.cursors.DictCursor)
cursor = conn.cursor()
cursor.execute("SELECT COUNT(*) FROM card")
boop = cursor.fetchone()
print('count of rows is ', boop)
print(boop.values())
moo = boop['COUNT(*)']
print(moo)
shuffle = randint(0, moo)
print(shuffle)
cursor.execute("SELECT * FROM card WHERE id = %s", [shuffle])
yup = cursor.fetchone()
print(yup)
question = yup['question']
answer = yup['answer']
tags = yup['tags']
confidence = yup['confidence']
cursor.close()
conn.close()
print('fin')
return render_template('showCard.html', question=question, answer=answer, tags=tags, confidence=confidence)
@app.route('/showAnswer', methods=['POST', 'GET'])
def showAnswer():
_confidence = request.form['confidence']
_answer = request.form['answer']
return render_template('showAnswer.html', answer=_answer, confidence=_confidence)
@app.route('/updateCard', methods=['POST', 'GET'])
def updateCard():
conn = pymysql.connect(user='*', passwd='*', db='*', unix_socket='/tmp/mysql.sock',
cursorclass=pymysql.cursors.DictCursor)
cursor = conn.cursor()
try:
#create new table for historical confidence and timestamp
_confidence = request.form['inputConfidence']
if _confidence:
#update the confidence value to new value, archive old confidence value?
# cursor.execute(""" INSERT INTO card (question, answer, tags, confidence)
# VALUES (%s,)""", (_question, _answer, _tags, _confidence))
# conn.commit()
#insert a timestamp
print('boop')
return
else:
print('enter the required fields')
return
except BaseException:
print('it broke :(')
return
finally:
cursor.close()
conn.close()
return render_template("reviewCards.html")
# def selectNextCard():
# random selection based on index number (training case)
# selection based on time and confidence level (assumption case)
#display needs to update a time stamp
# def trainAlgorithm():
# Baysian confidence training around time vs confidence
if __name__ == '__main__':
app.run()
| [
"noreply@github.com"
] | noreply@github.com |
48259064e154e547151d473c41338ec1af6d2bd3 | c2f2c299b2dcc33229010ef77c96293059dfab61 | /classrooms/urls.py | 0041d6784e0553a9bbc50fd91b64bb597a0ac8d8 | [] | no_license | nbalrifai/Classrooms | f6895fa74e3ad84aad7fbde64e755439304f363d | 6100b9bd0e7c773cf3c4e90edef9993415cbb2b3 | refs/heads/master | 2022-01-18T09:00:07.401363 | 2019-07-21T17:39:06 | 2019-07-21T17:39:06 | 198,065,792 | 0 | 1 | null | 2019-07-21T14:21:54 | 2019-07-21T14:21:53 | null | UTF-8 | Python | false | false | 839 | py |
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from classes import views
urlpatterns = [
path('admin/', admin.site.urls),
path('classrooms/', views.classroom_list, name='classroom-list'),
path('classrooms/<int:classroom_id>/', views.classroom_detail, name='classroom-detail'),
path('classrooms/create', views.classroom_create, name='classroom-create'),
path('classrooms/<int:classroom_id>/update/', views.classroom_update, name='classroom-update'),
path('classrooms/<int:classroom_id>/delete/', views.classroom_delete, name='classroom-delete'),
]
if settings.DEBUG:
urlpatterns+=static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns+=static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"lailaabdulraheem@gmail.com"
] | lailaabdulraheem@gmail.com |
d1564abb5583ba7d937b0d846491cf7aa40a1cb2 | 00ef8e1eb57b73427508b20aadf0266da6b1f900 | /rlf/exp_mgr/viz_utils.py | f323dee2afc60a42bb37336d3b28e50fe18fb7b4 | [] | no_license | amy12xx/rl-toolkit | f4643935cc8afd960356bfeae74c233d2596dea9 | 8254df8346752ea0226ae2064cc1eabc839567b0 | refs/heads/master | 2023-08-14T00:56:52.270642 | 2021-09-28T15:59:32 | 2021-09-28T15:59:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,503 | py | """
Utilities for manipulating images, rendering images, and rendering videos.
"""
import os
import os.path as osp
from argparse import Namespace
from typing import List, Optional, Union
import cv2
import matplotlib.pyplot as plt
import numpy as np
import rlf.rl.utils as rutils
try:
import wandb
except:
pass
def append_text_to_image(
image: np.ndarray, lines: List[str], from_bottom: bool = False
) -> np.ndarray:
"""
Args:
image: The NxMx3 frame to add the text to.
lines: The list of strings (new line separated) to add to the image.
Returns:
image: (np.array): The modified image with the text appended.
"""
h, w, c = image.shape
font_size = 0.5
font_thickness = 1
font = cv2.FONT_HERSHEY_SIMPLEX
blank_image = np.zeros(image.shape, dtype=np.uint8)
if from_bottom:
y = image.shape[0]
else:
y = 0
for line in lines:
textsize = cv2.getTextSize(line, font, font_size, font_thickness)[0]
if from_bottom:
y -= textsize[1] + 10
else:
y += textsize[1] + 10
x = 10
cv2.putText(
blank_image,
line,
(x, y),
font,
font_size,
(255, 255, 255),
font_thickness,
lineType=cv2.LINE_AA,
)
final = image + blank_image
return final
def save_agent_obs(frames, imdim, vid_dir, name):
use_dir = osp.join(vid_dir, name + "_frames")
if not osp.exists(use_dir):
os.makedirs(use_dir)
if imdim != 1:
raise ValueError("Only gray scale is supported right now")
for i in range(frames.shape[0]):
for frame_j in range(frames.shape[1]):
fname = f"{i}_{frame_j}.jpg"
frame = frames[i, frame_j].cpu().numpy()
cv2.imwrite(osp.join(use_dir, fname), frame)
print(f"Wrote observation sequence to {use_dir}")
def save_mp4(frames, vid_dir, name, fps=60.0, no_frame_drop=False, should_print=True):
frames = np.array(frames)
if len(frames[0].shape) == 4:
new_frames = frames[0]
for i in range(len(frames) - 1):
new_frames = np.concatenate([new_frames, frames[i + 1]])
frames = new_frames
if not osp.exists(vid_dir):
os.makedirs(vid_dir)
vid_file = osp.join(vid_dir, name + ".mp4")
if osp.exists(vid_file):
os.remove(vid_file)
w, h = frames[0].shape[:-1]
videodims = (h, w)
fourcc = cv2.VideoWriter_fourcc("m", "p", "4", "v")
video = cv2.VideoWriter(vid_file, fourcc, fps, videodims)
for frame in frames:
frame = frame[..., 0:3][..., ::-1]
video.write(frame)
video.release()
if should_print:
print(f"Rendered to {vid_file}")
def plot_traj_data(
pred: np.ndarray,
real: np.ndarray,
save_name: str,
log_name: str,
save_path_info: Union[Namespace, str],
step: int,
y_axis_name: str = "State %i",
no_wb: Optional[bool] = None,
title: str = "",
ylim=None,
):
"""
Plots each state dimension of a trajectory comparing a predicted and real trajectory.
:param pred: Shape [H, D] for a trajectory of length H and state dimension D.
D plots will be created.
:param real: Shape [H, D].
:param save_name: Appended to log_name. This should likely be unique so
files on the disk are not overriden. Include file extension.
:param log_name: Has %i in the name to dynamically insert the state dimension.
Should NOT be unique so the log key is updated.
:param save_path_info: The save path will either be extracted from the args or the
path passed as a string.
:param y_axis_name: string with %i to dynamically insert state dimension.
"""
save_name = log_name + "_" + save_name
if isinstance(save_path_info, str):
save_path = osp.join(save_path_info, save_name)
else:
save_path = osp.join(rutils.get_save_dir(save_path_info), save_name)
if no_wb is None:
if not isinstance(save_path_info, Namespace) and "no_wb" not in vars(
save_path_info
):
raise ValueError(
f"Could not find property `no_wb` in the passed `save_path_info`"
)
no_wb = save_path_info.no_wb
per_state_mse = np.mean((pred - real) ** 2, axis=0)
per_state_sqrt_mse = np.sqrt(per_state_mse)
H, state_dim = real.shape
for state_i in range(state_dim):
use_save_path = save_path % state_i
plt.plot(np.arange(H), real[:, state_i], label="Real")
plt.plot(np.arange(H), pred[:, state_i], label="Pred")
plt.grid(b=True, which="major", color="lightgray", linestyle="--")
plt.xlabel("t")
plt.ylabel(y_axis_name % state_i)
if ylim is not None:
plt.ylim(ylim)
if isinstance(title, list):
use_title = title[state_i]
else:
use_title = title
if len(use_title) != 0:
use_title += "\n"
use_title += "MSE %.4f, SQRT MSE %.4f" % (
per_state_mse[state_i],
per_state_sqrt_mse[state_i],
)
plt.title(use_title)
plt.legend()
rutils.plt_save(use_save_path)
if not no_wb:
use_full_log_name = log_name % state_i
wandb.log(
{use_full_log_name: [wandb.Image(use_save_path)]},
step=step,
)
return np.mean(per_state_mse)
| [
"me@andrewszot.com"
] | me@andrewszot.com |
432430035beb53f8a57dcc46ac91de96ad290daa | d00a51990868a5e4eb4cc3100d47bd1f8930ffa3 | /rllab/envs/mujoco/ant_env.py | c0dc8b41edf98c55fa5588c3d3b046b6de4cf8ec | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | akashratheesh/rllab | 42f1d2a21701343c317ef70c7432439236dbafd7 | 5b0232d2a1b412dd4fd7eb5835142f25ff981afe | refs/heads/master | 2023-08-28T16:20:42.932852 | 2021-10-25T00:43:33 | 2021-10-25T00:43:33 | 417,612,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,224 | py | from .mujoco_env import MujocoEnv
from rllab.core.serializable import Serializable
import numpy as np
from rllab.envs.base import Step
from rllab.misc.overrides import overrides
from rllab.misc import logger
class AntEnv(MujocoEnv, Serializable):
FILE = 'ant.xml'
def __init__(self, *args, **kwargs):
super(AntEnv, self).__init__(*args, **kwargs)
Serializable.__init__(self, *args, **kwargs)
def get_current_obs(self):
return np.concatenate([
self.model.data.qpos.flat,
self.model.data.qvel.flat,
self.get_body_xmat("torso").flat,
self.get_body_com("torso"),
self.get_body_comvel("torso"),
]).reshape(-1)
def step(self, action, collectingInitialData=False):
xposbefore = self.get_body_com("torso")[0]
self.forward_dynamics(action)
comvel = self.get_body_comvel("torso")
forward_reward = comvel[0]
xposafter = self.get_body_com("torso")[0]
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling))
contact_cost = 0
survive_reward = 0.05
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self._state
notdone = np.isfinite(state).all() \
and self.get_body_com("torso")[2] >= 0.3 and self.get_body_com("torso")[2] <= 1.0 #used to be 0.2, state[2]
done = not notdone
ob = self.get_current_obs()
return Step(ob, float(reward), done)
def get_my_sim_state(self):
my_sim_state=np.squeeze(np.concatenate((self.model.data.qpos, self.model.data.qvel, self.model.data.qacc, self.model.data.ctrl)))
return my_sim_state
@overrides
def log_diagnostics(self, paths):
progs = [
path["observations"][-1][-3] - path["observations"][0][-3]
for path in paths
]
logger.record_tabular('AverageForwardProgress', np.mean(progs))
logger.record_tabular('MaxForwardProgress', np.max(progs))
logger.record_tabular('MinForwardProgress', np.min(progs))
logger.record_tabular('StdForwardProgress', np.std(progs))
| [
"anusha.nagabandi@gmail.com"
] | anusha.nagabandi@gmail.com |
a24fa36e3d4cbc2f2bd776e44a28aa7d7c325484 | 3d273d7102dba56a99ba8eb2a163b160d4e882bc | /gnn.py | a1b8df84b637bbe80321af57010d083a8cf94d49 | [] | no_license | silent567/nn_parts | 0b85b2d615f040cff0fd38c402b0fa83558b3f1b | 1f0dfe1b0a0b794066220f2c0bb200bfbef605a1 | refs/heads/master | 2020-04-28T23:25:58.929950 | 2019-03-14T16:08:21 | 2019-03-14T16:08:21 | 175,655,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,548 | py | #!/usr/bin/env python
# coding=utf-8
# 20180825 by tanghao
# This file contains graph-network-related layers
import tensorflow as tf
from .init_var import *
from .fc import *
from .norm import LayerNorm
class FastGCNN:
'''
The class for the FastGCNN layer, which is based on "Convolutional Neural Networks on Graphs with Fast Localized Spectral Filtering"
'''
def __init__(self,kernel_size,input_filter_size,output_filter_size,node_num=None,dynamical_graph_flag=True,name_scope='FastGCNNCell',F=None,F_init=None,L=None,L_init=None,summ_flag=True):
'''
kernel_size is positive int, which is K in the paper, the polynomial degree of filters
input_filter_size is int, which is input channel number
output_filter_size is int, which is output channel number
node_num is int, which is the vertex number in the graph.
dynamical_graph_flag is boolean, which denotes whether the Laplacian Matrix is updated by the optimizer
name_scope should be of type string
F is tf.Variable with shape equal to [self.input_filter_size,self.output_filter_size,self.kernel_size]
init_F can be tf.Variable, tf.Tensor, list, numpy.ndarray of shape [self.input_filter_size,self.output_filter_size,self.kernel_size]
L is tf.Variable with shape equal to [self.node_num,self.node_num]
init_L can be tf.Variable, tf.Tensor, list, numpy.ndarray of shape [self.node_num,self.node_num]
summ_flag is boolean, indicating whether tensors are summarized
One of node_num, L, L_init should not be None for Laplacian Matrix initialization
Sample use:
gcnn_layer = FastGCNN(kernel_size,input_filter_size,output_filter_size,node_num)
gcnn_layer = FastGCNN(kernel_size,input_filter_size,output_filter_size,dynamical_graph_flag=False,L=LaplacianMatrix)
'''
self.input_filter_size = input_filter_size
self.output_filter_size = output_filter_size
self.kernel_size = kernel_size
self.dynamical_graph_flag = dynamical_graph_flag
self.summ_flag = summ_flag
with tf.name_scope(name_scope) as self.name_scope:
L = init_identity_matrix_variable(L,L_init,node_num,'UnsymmetrixLaplacianMatrix')
self.L = tf.divide(L+tf.transpose(L),2.,name='LaplacianMatrix')
self.node_num = self.L.shape.as_list()[-1]
self.L_maxeigenvalue = tf.self_adjoint_eig(self.L)[0][-1]
self.normL = tf.subtract(2*self.L/self.L_maxeigenvalue,tf.eye(self.node_num),name='NormedLaplacianMatrix')
if self.kernel_size == 1:
TnormL_list = [tf.eye(self.node_num)]
else:
TnormL_list = [tf.eye(self.node_num),self.normL]
for tindex in range(2,self.kernel_size):
TnormL_list.append(2*tf.matmul(self.normL,TnormL_list[-1])-TnormL_list[-2])
self.TnormL = tf.stack(TnormL_list,axis=0)
self.F = init_random_variable(F,F_init,[self.input_filter_size,self.output_filter_size,self.kernel_size],2./(self.input_filter_size*self.node_num),'filter')
self.coefficents = tf.einsum('aim,mjk->aijk',self.F,self.TnormL,name='coefficents')
if self.summ_flag:
self.F_summ = tf.summary.histogram('F_summ',self.F)
self.L_summ = tf.summary.histogram('L_summ',self.L)
self.normL_summ = tf.summary.histogram('normL_summ',self.normL)
def get_l2_loss(self,):
return tf.reduce_mean(tf.square(self.F))
def __call__(self,input_tensor):
return self.get_output(input_tensor)
def get_output(self,input_tensor):
'''
input_tensor should be of shape [N,node_num,input_filter_size]
output_tensor should be of the same type as input_tensor
and of shape [N,node_num,output_filter_size]
'''
with tf.name_scope(self.name_scope):
return tf.einsum('nai,ijab->nbj',input_tensor,self.coefficents)
class DenseUpdateLayer(object):
def __init__(self,input_size,output_size,layer_num,norm_flag=True,dropout_flag=False,res_flag=True,activation_func=tf.nn.leaky_relu,summ_flag=False,name_scope='DenseUpdateLayer'):
self.input_size = input_size
self.output_size = output_size
self.layer_num = layer_num
self.norm_flag = norm_flag
self.dropout_flag = dropout_flag
self.res_flag = res_flag
self.activation_func = activation_func
self.summ_flag = summ_flag
with tf.name_scope(name_scope) as self.name_scope:
pass
self.build_model()
def build_model(self):
input_size = self.input_size
output_size = self.output_size
layer_num = self.layer_num
summ_flag = self.summ_flag
self.name_scope_layers = []
self.dense_layers = []
self.norm_layers = []
with tf.name_scope(self.name_scope):
for ln in range(layer_num):
with tf.name_scope('Layer%d'%ln) as tmp_name_scope:
self.name_scope_layers.append(tmp_name_scope)
self.dense_layers.append(Dense(input_size,output_size,activation_func=linear_activation,summ_flag=summ_flag))
self.norm_layers.append(LayerNorm([output_size],summ_flag=summ_flag))
input_size = output_size
def __call__(self,X,train_flag):
'''
input arguments:
X is the node attributes matrix of type tf.Tensor and of shape [N,C]
output: updated node attributes X' of type tf.Tensor annd of shape [N,C']
'''
norm_flag = self.norm_flag
dropout_flag = self.dropout_flag
res_flag = self.res_flag
activation_func = self.activation_func
with tf.name_scope(self.name_scope):
input_X = X
for ns,dense,norm in zip(self.name_scope_layers,self.dense_layers,self.norm_layers):
with tf.name_scope(ns):
output_X = dense(input_X)
if norm_flag:
output_X = norm(output_X)
output_X = activation_func(output_X)
if dropout_flag:
output_X = tf.layers.dropout(output_X,0.5,training=train_flag)
if res_flag and dense.input_size == dense.output_size:
output_X = tf.add(output_X,input_X)
input_X = output_X
return output_X
def get_l2_loss(self):
with tf.name_scope(self.name_scope):
l2_loss = tf.add_n([dense.get_l2_loss() for dense in self.dense_layers])
return l2_loss
class MPNNLayer:
def __init__(self,update_func,aggregate_func,edge_label_num,name_scope='MPNNLayer'):
with tf.name_scope(name_scope) as self.name_scope:
'''
update_func is applied to X to update node attributes individually (similar to conv when kernel size=1)
aggregate_func receives A and X and output aggregated node attributes X'
'''
self.update_func = update_func
self.aggregate_func = aggregate_func
self.edge_label_num = edge_label_num
def __call__(self,A,X,train_flag):
'''
input arguments:
A is the graph adjacency matrix of type tf.Tensor and of shape [N,N,M]
X is the node attributes matrix of type tf.Tensor and of shape [N,N,C]
, where N is the number of nodes, M is the number of edge classes, and C is the channel number of node attributes
train_flag is the flag for dropout layer of type tf.Tensor, of shape [] and of type tf.Boolean
output arguments:
updated and aggregated new node attributes X' of type tf.Tensor and of shape [N,N,C']
'''
update_func = self.update_func
aggregate_func = self.aggregate_func
with tf.name_scope(self.name_scope):
output_X_list = []
for en in range(self.edge_label_num):
updated_X = update_func(X,train_flag)
aggregated_X = aggregate_func(A[:,:,en],updated_X)
output_X_list.append(aggregated_X)
output_X = tf.add_n(output_X_list,name='output_X')
return output_X
class SumAggregator:
def __init__(self,name_scope='SumAggregator'):
with tf.name_scope(name_scope) as self.name_scope:
pass
def __call__(self,A,X):
'''
input arguments:
A is the graph adjacency matrix of type tf.Tensor and of shape [N,N]
X is the node attributes matrix of type tf.Tensor and of shape [N,C]
, where N is the number of nodes and C is the channel number of node attributes
output arguments:
aggregated new node attributes X' of type tf.Tensor and of shape [N,C]
'''
with tf.name_scope(self.name_scope):
self_loop_A = tf.add(A,tf.eye(tf.shape(A)[0]),name='self_loop_A')
output_X = tf.matmul(self_loop_A,X,name='output_X')
return output_X
class MeanAggregator:
def __init__(self,name_scope='MeanAggregator'):
with tf.name_scope(name_scope) as self.name_scope:
pass
def __call__(self,A,X):
'''
input arguments:
A is the graph adjacency matrix of type tf.Tensor and of shape [N,N]
X is the node attributes matrix of type tf.Tensor and of shape [N,C]
, where N is the number of nodes and C is the channel number of node attributes
output arguments:
aggregated new node attributes X' of type tf.Tensor and of shape [N,C]
'''
with tf.name_scope(self.name_scope):
self_loop_A = tf.add(A,tf.eye(tf.shape(A)[0]),name='self_loop_A')
output_X = tf.divide(tf.matmul(self_loop_A,X),tf.reduce_sum(self_loop_A,axis=-1,keepdims=True),name='output_X')
return output_X
class MaxAggregator_old:
def __init__(self,name_scope='MaxAggregator'):
with tf.name_scope(name_scope) as self.name_scope:
pass
def __call__(self,A,X):
'''
input arguments:
A is the graph adjacency matrix of type tf.Tensor and of shape [N,N]
X is the node attributes matrix of type tf.Tensor and of shape [N,C]
, where N is the number of nodes and C is the channel number of node attributes
output arguments:
aggregated new node attributes X' of type tf.Tensor and of shape [N,C]
'''
with tf.name_scope(self.name_scope):
output_shape = X.get_shape()
node_num = tf.shape(X,name='output_shape')[0]
self_loop_A = tf.add(A,tf.eye(node_num),name='self_loop_A')
flat_self_loop_A = tf.reshape(self_loop_A,[-1,1],name='flat_self_loop_A')
tiled_X = tf.tile(X,[node_num,1],name='tiled_flat_X')
flat_X_dot_A = tf.reshape(tiled_X*flat_self_loop_A - 1e4*(1-flat_self_loop_A),[node_num,node_num,-1],name='flat_X_dot_A')
output_X = tf.reduce_max(flat_X_dot_A,axis=1,keepdims=False,name='output_X')
output_X.set_shape(output_shape)
return output_X
class MaxAggregator:
def __init__(self,name_scope='MaxAggregator'):
with tf.name_scope(name_scope) as self.name_scope:
pass
def _maximum_neighborhood(self,index,A,X,out):
with tf.name_scope(self.name_scope):
neigh = tf.boolean_mask(X,A[index])
max_neigh = tf.reduce_max(neigh,keepdims=True,axis=0)
out = tf.concat([out,max_neigh],axis=0)
return out
def __call__(self,A,X):
'''
input arguments:
A is the graph adjacency matrix of type tf.Tensor and of shape [N,N]
X is the node attributes matrix of type tf.Tensor and of shape [N,C]
, where N is the number of nodes and C is the channel number of node attributes
output arguments:
aggregated new node attributes X' of type tf.Tensor and of shape [N,C]
'''
with tf.name_scope(self.name_scope):
output_shape = X.get_shape()
node_num = tf.shape(X,name='output_shape')[0]
output_dim = int(output_shape[-1])
self_loop_A = tf.add(A,tf.eye(node_num),name='self_loop_A')
output_X = tf.zeros([0,output_dim])
_,_,_,output_X = tf.while_loop(lambda index,A,X,out: index<node_num,\
lambda index,A,X,out: [index+1,A,X,self._maximum_neighborhood(index,A,X,out)],\
loop_vars = [tf.zeros([],tf.int32),self_loop_A,X,output_X],\
shape_invariants = [tf.TensorShape([]),A.get_shape(),X.get_shape(),tf.TensorShape([None,output_dim])])
output_X.set_shape(output_shape)
return output_X
class GCNAggregator:
def __init__(self,name_scope='GCNAggregator'):
with tf.name_scope(name_scope) as self.name_scope:
pass
def __call__(self,A,X):
'''
input arguments:
A is the graph adjacency matrix of type tf.Tensor and of shape [N,N]
X is the node attributes matrix of type tf.Tensor and of shape [N,C]
, where N is the number of nodes and C is the channel number of node attributes
output arguments:
aggregated new node attributes X' of type tf.Tensor and of shape [N,C]
'''
with tf.name_scope(self.name_scope):
self_loop_A = tf.add(A,tf.eye(tf.shape(A)[0]),name='self_loop_A')
self_loop_D_sqrt = tf.linalg.diag(1./tf.sqrt(tf.reduce_sum(self_loop_A,axis=1)),name='self_loop_D_sqrt')
normalized_self_loop_A = tf.matmul(self_loop_D_sqrt,tf.matmul(self_loop_A,self_loop_D_sqrt),name='normalized_self_loop_A')
output_X = tf.matmul(normalized_self_loop_A,X,name='output_X')
return output_X
class SumGraphAggregator:
def __init__(self,name_scope='SumGraphAggregator'):
with tf.name_scope(name_scope) as self.name_scope:
pass
def __call__(self,X):
'''
input arguments:
X is the node attributes matrix of type tf.Tensor and of shape [N,C]
, where N is the number of nodes and C is the channel number of node attributes
output arguments:
aggregated new node attributes X' of type tf.Tensor and of shape [1,C]
'''
with tf.name_scope(self.name_scope):
output_X = tf.reduce_sum(X,axis=0,keepdims=True,name='output_X')
return output_X
class MeanGraphAggregator:
def __init__(self,name_scope='MeanGraphAggregator'):
with tf.name_scope(name_scope) as self.name_scope:
pass
def __call__(self,X):
'''
input arguments:
X is the node attributes matrix of type tf.Tensor and of shape [N,C]
, where N is the number of nodes and C is the channel number of node attributes
output arguments:
aggregated new node attributes X' of type tf.Tensor and of shape [1,C]
'''
with tf.name_scope(self.name_scope):
output_X = tf.reduce_mean(X,axis=0,keepdims=True,name='output_X')
return output_X
class MaxGraphAggregator:
def __init__(self,name_scope='MaxGraphAggregator'):
with tf.name_scope(name_scope) as self.name_scope:
pass
def __call__(self,X):
'''
input arguments:
X is the node attributes matrix of type tf.Tensor and of shape [N,C]
, where N is the number of nodes and C is the channel number of node attributes
output arguments:
aggregated new node attributes X' of type tf.Tensor and of shape [1,C]
'''
with tf.name_scope(self.name_scope):
output_X = tf.reduce_max(X,axis=0,keepdims=True,name='output_X')
return output_X
class CreateSubgraph:
def __init__(self,name_scope='CreateSubgraph'):
with tf.name_scope(name_scope) as self.name_scope:
pass
def _remove_one_node(self,X,A):
with tf.name_scope(self.name_scope):
indices = tf.range(tf.shape(A)[0])
indices = tf.random_shuffle(indices)[:-1]
X = tf.gather(X,indices)
A = tf.gather(tf.gather(A,indices),indices,axis=1)
return X,A
def __call__(self,X,A):
with tf.name_scope(self.name_scope):
return self._remove_one_node(X,A)
| [
"silent56@sjtu.edu.cn"
] | silent56@sjtu.edu.cn |
03e2a912883ed7271a2cc5d4993b027cbcef07ec | 7df7efb0872a24471d376ceda741b3752502ebc9 | /flaskAPI/models.py | e4cac78ed42cd486872a78f09ceae07b7e61a327 | [] | no_license | EliasAguirre/Flask-Python-Api | 4546541082f656f76cc4cf017725e8a2ebeacab7 | be813fd48ecdf4b7cedda98cd0e1c7c004d6115c | refs/heads/master | 2020-12-27T06:04:31.951371 | 2020-02-02T15:06:12 | 2020-02-02T15:06:12 | 237,787,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | from main import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(10), index=True, unique=True)
age = db.Column(db.Integer, index=True, unique=True)
def __repr__(self):
return '<User {}>'.format(self.name)
def init_db():
db.create_all()
# Create a test user
new_user = User('ad', 2)
db.session.add(new_user)
db.session.commit()
if __name__ == '__main__':
init_db() | [
"eliasdavid.aguirre.a@gmail.com"
] | eliasdavid.aguirre.a@gmail.com |
bff7b6d57c42b3b74cbfa6b65e9e3e4fd2c58bd0 | a766f6ee10be86bd33d2cfc06c19d94247b6ad08 | /aea/cli/registry/registration.py | e2da0dfcaf986b79c130ef7afa9834bdbe712d07 | [
"Apache-2.0"
] | permissive | ejfitzgerald/agents-aea | 3b07db6c1f9f5fc8fded4ce497a2283ae88f0b84 | 6411fcba8af2cdf55a3005939ae8129df92e8c3e | refs/heads/master | 2022-12-07T05:53:55.379150 | 2020-08-14T15:22:19 | 2020-08-14T15:22:19 | 288,688,666 | 0 | 0 | Apache-2.0 | 2020-08-19T09:24:07 | 2020-08-19T09:24:06 | null | UTF-8 | Python | false | false | 2,053 | py | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Module with methods for new user registration."""
from typing import List
from click import ClickException
from aea.cli.registry.utils import request_api
def register(
username: str, email: str, password: str, password_confirmation: str
) -> str:
"""
Register new Registry account and automatically login if successful.
:param username: str username.
:param email: str email.
:param password: str password.
:param password_confirmation: str password confirmation.
:return: str auth token.
"""
data = {
"username": username,
"email": email,
"password1": password,
"password2": password_confirmation,
}
resp_json, status_code = request_api(
"POST",
"/rest-auth/registration/",
data=data,
handle_400=False,
return_code=True,
)
if status_code == 400:
errors: List[str] = []
for key in ("username", "email", "password1", "password2"):
param_errors = resp_json.get(key)
if param_errors:
errors.extend(param_errors)
raise ClickException(
"Errors occured during registration.\n" + "\n".join(errors)
)
else:
return resp_json["key"]
| [
"panasevychol@gmail.com"
] | panasevychol@gmail.com |
fea85b2a070376ac73feafffcec765b84aadb0fe | f3cec139bc484a376753ac8089f000e25927d940 | /Xray_trainloop.py | 8d4678958367de1bf0ffec7d01a7353f64c729fb | [] | no_license | wisemin7/covid | e28e309c1f35eec11a886bf4f6cf0495506b64dd | f347664df8de97c1643e3a060183e8c01a3c925c | refs/heads/master | 2022-11-09T16:34:43.764813 | 2020-06-27T10:26:16 | 2020-06-27T10:26:16 | 275,346,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,831 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 11:25:33 2020
@author: hoon
"""
import torch
import torchvision
from torchvision import transforms
from torch.utils.data.dataset import Dataset
import os, sys, random
import numpy as np
import PIL
from PIL import Image
from gen_utils import *
from ds import *
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
load_tfm = transforms.Compose([
transforms.ToTensor(),
lambda x : (x-x.min())/(x.max()-x.min())
])
train_set = XrayDset('./data_new2/train/', load_tfm)
train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=10, shuffle=True)
test_set = XrayDset('./data4/test_Shenzen/', load_tfm)
test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=10, shuffle=False)
class XrayResnet(torch.nn.Module):
def __init__(self):
super(XrayResnet, self).__init__()
self.C1 = torch.nn.Conv2d(in_channels=1, out_channels=3, kernel_size=3, padding=1, stride=2)
self.model_ft = torchvision.models.resnet18()
self.model_ft.avgpool = torch.nn.AvgPool2d(kernel_size=4, padding=0, stride=2)
self.model_ft.fc = torch.nn.Sequential(
torch.nn.Linear(512,256),
torch.nn.Linear(256,2)
)
def forward(self, x):
y = x
y = self.C1(y)
for lid, layer in enumerate(list(self.model_ft.children())[:9]):
y = layer(y)
y = y.squeeze(-1).squeeze(-1)
y = list(self.model_ft.children())[-1](y)
return y
n_epochs = 30
#device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = torch.device('cpu')
M = XrayResnet()
M = M.to(device)
optimizer = torch.optim.Adam(M.parameters(), lr=6e-4, weight_decay=1e-2)
exp_lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, n_epochs)
criterion = torch.nn.CrossEntropyLoss()
train_loss_track = []
test_loss_track = []
for eph in range(n_epochs):
print('epoch : {} ...'.format(eph))
n_correct = 0
avg_loss = 0
n_samples = 0
M.train()
exp_lr_scheduler.step()
for idx, xy in enumerate(train_loader):
x, y = xy
x, y = x.to(device), y.to(device)
outputs = M(x)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
n_correct += torch.sum(preds.data == y.data)
avg_loss += loss.item()
n_samples += x.size(0)
avg_loss = avg_loss/n_samples
train_loss_track.append(avg_loss)
print('train avg loss : ', avg_loss)
print('num of correct samples : {}/{}'.format(n_correct, n_samples))
n_correct = 0
avg_loss = 0
n_samples = 0
gt_labels = []
pred_labels = []
M.eval()
for idx, xy in enumerate(test_loader):
x, y = xy
# x, y = x.cuda(), y.cuda()
x, y = x.to(device), y.to(device)
outputs = M(x)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, y)
n_correct += torch.sum(preds.data == y.data)
gt_labels += list(y.data.cpu().numpy())
pred_labels += list(preds.data.cpu().numpy())
avg_loss += loss.item()
n_samples += x.size(0)
avg_loss = avg_loss/n_samples
test_loss_track.append(avg_loss)
print('test avg loss : ', avg_loss)
print('num of correct samples : {}/{}'.format(n_correct, n_samples))
plt.plot(train_loss_track, 'b')
plt.plot(test_loss_track, 'r')
plt.xlabel('epochs')
plt.ylabel('avg loss')
plt.show()
target_names = ['No TB', 'TB', 'COVID']
print(classification_report(gt_labels, pred_labels, target_names=target_names))
| [
"noreply@github.com"
] | noreply@github.com |
010885dad083a7b1ec9ebb80c5c3d64b92989605 | 37930870719caede967fdf6905c032e22d086e8b | /scripts/imaging/chaining/slam/light_parametric__mass_light_dark__source_parametric.py | 80e4df39df68667dc5cd365fcf51cfac21c6f9f0 | [] | no_license | Cywtim/autolens_workspace | cbede944c0f85ee95cd7362fee957ef77e701280 | da40cafee8dc26e5d8b1041888fb280598e74a5e | refs/heads/master | 2023-04-05T14:22:06.091992 | 2021-04-15T20:29:28 | 2021-04-15T20:29:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,711 | py | """
SLaM (Source, Light and Mass): Light Parametric + Mass Total + Source Parametric
================================================================================
SLaM pipelines break the analysis down into multiple pipelines which focus on modeling a specific aspect of the strong
lens, first the Source, then the (lens) Light and finally the Mass. Each of these pipelines has it own inputs which
which customize the model and analysis in that pipeline.
The models fitted in earlier pipelines determine the model used in later pipelines. For example, if the SOURCE PIPELINE
uses a parametric `EllSersic` profile for the bulge, this will be used in the subsequent MASS LIGHT DARK PIPELINE.
Using a SOURCE PARAMETRIC PIPELINE, LIGHT PIPELINE and a MASS LIGHT DARK PIPELINE this SLaM script fits `Imaging` of
a strong lens system, where in the final model:
- The lens galaxy's light is a bulge `EllSersic`.
- The lens galaxy's stellar mass distribution is a bulge tied to the light model above.
- The lens galaxy's dark matter mass distribution is modeled as a `EllNFWMCRLudlow`.
- The source galaxy's light is a parametric `EllSersic`.
This runner uses the SLaM pipelines:
`source_parametric/source_parametric__with_lens_light`
`light_parametric/with_lens_light`
`mass_total/mass_light_dark`
Check them out for a detailed description of the analysis!
"""
# %matplotlib inline
# from pyprojroot import here
# workspace_path = str(here())
# %cd $workspace_path
# print(f"Working Directory has been set to `{workspace_path}`")
import os
import sys
from os import path
import autofit as af
import autolens as al
import autolens.plot as aplt
sys.path.insert(0, os.getcwd())
import slam
"""
__Dataset__
Load the `Imaging` data, define the `Mask2D` and plot them.
"""
dataset_name = "light_sersic__mass_mlr_nfw__source_sersic"
dataset_path = path.join("dataset", "imaging", "with_lens_light", dataset_name)
imaging = al.Imaging.from_fits(
image_path=path.join(dataset_path, "image.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
pixel_scales=0.1,
)
mask = al.Mask2D.circular(
shape_native=imaging.shape_native, pixel_scales=imaging.pixel_scales, radius=3.0
)
imaging = imaging.apply_mask(mask=mask)
imaging_plotter = aplt.ImagingPlotter(imaging=imaging)
imaging_plotter.subplot_imaging()
"""
__Paths__
The path the results of all chained searches are output:
"""
path_prefix = path.join("imaging", "slam", dataset_name)
"""
__Redshifts__
The redshifts of the lens and source galaxies, which are used to perform unit converions of the model and data (e.g.
from arc-seconds to kiloparsecs, masses to solar masses, etc.).
"""
redshift_lens = 0.5
redshift_source = 1.0
"""
__HYPER SETUP__
The `SetupHyper` determines which hyper-mode features are used during the model-fit.
"""
setup_hyper = al.SetupHyper(
hyper_galaxies_lens=False,
hyper_galaxies_source=False,
hyper_image_sky=None,
hyper_background_noise=None,
)
"""
__SOURCE PARAMETRIC PIPELINE (with lens light)__
The SOURCE PARAMETRIC PIPELINE (with lens light) uses three searches to initialize a robust model for the
source galaxy's light, which in this example:
- Uses a parametric `EllSersic` bulge.
- Uses an `EllIsothermal` model for the lens's total mass distribution with an `ExternalShear`.
__Settings__:
- Mass Centre: Fix the mass profile centre to (0.0, 0.0) (this assumption will be relaxed in the MASS LIGHT DARK
PIPELINE).
"""
analysis = al.AnalysisImaging(dataset=imaging)
bulge = af.Model(al.lp.EllSersic)
bulge.centre = (0.0, 0.0)
source_parametric_results = slam.source_parametric.with_lens_light(
path_prefix=path_prefix,
analysis=analysis,
setup_hyper=setup_hyper,
lens_bulge=bulge,
lens_disk=None,
mass=af.Model(al.mp.EllIsothermal),
shear=af.Model(al.mp.ExternalShear),
source_bulge=af.Model(al.lp.EllSersic),
mass_centre=(0.0, 0.0),
redshift_lens=redshift_lens,
redshift_source=redshift_source,
)
"""
__LIGHT PARAMETRIC PIPELINE__
The LIGHT PARAMETRIC PIPELINE uses one search to fit a complex lens light model to a high level of accuracy, using the
lens mass model and source light model fixed to the maximum log likelihood result of the SOURCE PARAMETRIC PIPELINE.
In this example it:
- Uses a parametric `EllSersic` bulge [Do not use the results of the SOURCE PARAMETRIC PIPELINE to initialize priors].
- Uses an `EllIsothermal` model for the lens's total mass distribution [fixed from SOURCE PARAMETRIC PIPELINE].
- Uses the `EllSersic` model representing a bulge for the source's light [fixed from SOURCE PARAMETRIC PIPELINE].
- Carries the lens redshift, source redshift and `ExternalShear` of the SOURCE PIPELINE through to the MASS
PIPELINE [fixed values].
"""
bulge = af.Model(al.lp.EllSersic)
light_results = slam.light_parametric.with_lens_light(
path_prefix=path_prefix,
analysis=analysis,
setup_hyper=setup_hyper,
source_results=source_parametric_results,
lens_bulge=bulge,
lens_disk=None,
)
"""
__MASS LIGHT DARK PIPELINE (with lens light)__
The MASS LIGHT DARK PIPELINE (with lens light) uses one search to fits a complex lens mass model to a high level of
accuracy, using the source model of the SOURCE PIPELINE and the lens light model of the LIGHT PARAMETRIC PIPELINE to
initialize the model priors . In this example it:
- Uses a parametric `EllSersic` bulge for the lens galaxy's light and its stellar mass [12 parameters: fixed from
LIGHT PARAMETRIC PIPELINE].
- The lens galaxy's dark matter mass distribution is a `EllNFWMCRLudlow` whose centre is aligned with bulge of
the light and stellar mass mdoel above [5 parameters].
- Uses the `EllSersic` model representing a bulge for the source's light [priors initialized from SOURCE
PARAMETRIC PIPELINE].
- Carries the lens redshift, source redshift and `ExternalShear` of the SOURCE PARAMETRIC PIPELINE through to the MASS
LIGHT DARK PIPELINE.
"""
analysis = al.AnalysisImaging(dataset=imaging)
lens_bulge = af.Model(al.lmp.EllSersic)
dark = af.Model(al.mp.EllNFWMCRLudlow)
dark.centre = lens_bulge.centre
mass_results = slam.mass_light_dark.with_lens_light(
path_prefix=path_prefix,
analysis=analysis,
setup_hyper=setup_hyper,
source_results=source_parametric_results,
light_results=light_results,
lens_bulge=lens_bulge,
lens_disk=None,
lens_envelope=None,
dark=dark,
)
"""
Finish.
"""
| [
"james.w.nightingale@durham.ac.uk"
] | james.w.nightingale@durham.ac.uk |
fcf73361e13334179a65507f2fd77fdb971b2c40 | 8ca47670ed87ff22b1086032e14175bb1b6760c7 | /scrapyCrawler/scrapycrawl/scrapycrawl/scrapy_redis/dupefilter.py | e17cd6c15b889ecc4131e8c88f9910d2803b4684 | [] | no_license | public-spider/spider | 789aad3b0f781464267f03234cd26ea4e1147978 | f1e57f3ac6548b1bb8f6a5dc1f03c39c481f9311 | refs/heads/master | 2020-12-24T14:53:38.040968 | 2014-10-20T03:15:27 | 2014-10-20T03:15:27 | 22,784,057 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,376 | py | '''
Created on Aug 13, 2014
@author: whisky
'''
import redis
import time
from scrapy.dupefilter import BaseDupeFilter
from scrapy.utils.request import request_fingerprint
class RFPDupeFilter(BaseDupeFilter):
def __init__(self, server, key):
"""
initialize duplization filter
"""
self.server = server
self.key = key
@classmethod
def from_settings(cls, settings):
host = settings.get('REDIS_HOST', 'localhost')
port = settings.get('REDIS-PORT', 6379)
server = redis.Redis(host, port)
key = "dupefilter:%s" % int(time.time())
return cls(server, key)
def from_clawler(self, cls, crawler):
return cls.from_settings(crawler.settings)
def request_seen(self, request):
"""
use sismember judge whether fp is duplicate
"""
fp = request_fingerprint(request)
if self.server.sismember(self.key, fp):
return True
self.server.sadd(self.key, fp)
# self.server.sismember(self.key, fp)
return False
def close(self, reson):
"""
delete data on close, called by scrapy's scheduler
"""
self.clear()
def clear(self):
"""
clears fingerprints data
"""
self.server.delete(self.key)
| [
"260643431@qq.com"
] | 260643431@qq.com |
a652e78af109d2bacbf8df44bfbe96159701df7b | d6375b3202143d7a0761dcb82a2ae8466ff4676a | /apps/users/migrations/0001_initial.py | 09323b8724c2cf41125463026fb876b9fb3956ef | [] | no_license | Allkoman/mxonline | 264c85d18100c3a458b103dcef16393a883d6607 | 431d524e32043aead36f4da71a8424a132a5c1bc | refs/heads/master | 2021-01-20T14:10:27.493578 | 2017-02-24T01:21:41 | 2017-02-24T01:21:41 | 82,746,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,620 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-02-20 06:19
from __future__ import unicode_literals
import django.contrib.auth.models
import django.core.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('nick_name', models.CharField(default='', max_length=50, verbose_name='\u6635\u79f0')),
('birday', models.DateField(blank=True, null=True, verbose_name='\u751f\u65e5')),
('gender', models.CharField(choices=[('male', '\u7537'), ('female', '\u5973')], default='female', max_length=5)),
('address', models.CharField(default='', max_length=100)),
('mobile', models.CharField(blank=True, max_length=11, null=True)),
('image', models.ImageField(default='image/default.png', upload_to='image/%Y/%m')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': '\u7528\u6237\u4fe1\u606f',
'verbose_name_plural': '\u7528\u6237\u4fe1\u606f',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"18646085515@163.com"
] | 18646085515@163.com |
1e6e066f903701f4d59405a4f73cd24b8d2114a3 | f950882940764ace71e51a1512c16a5ac3bc47bc | /src/ThirdParty/freetype/src/tools/PaxHeaders.20567/chktrcmp.py | b08e1c1af6a391977a0aba1d7924afee49d7c048 | [
"FTL"
] | permissive | ViacheslavN/GIS | 3291a5685b171dc98f6e82595dccc9f235e67bdf | e81b964b866954de9db6ee6977bbdf6635e79200 | refs/heads/master | 2021-01-23T19:45:24.548502 | 2018-03-12T09:55:02 | 2018-03-12T09:55:02 | 22,220,159 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | 30 mtime=1398010730.538379929
30 atime=1417845746.439132282
30 ctime=1398010730.538379929
| [
"nk.viacheslav@gmail.com"
] | nk.viacheslav@gmail.com |
47b47f8164ca12deea39a8616361bde823c92e50 | 09c976bf8d942bb30e284fff9f76db1845c2aa6a | /UTD_CS_6375/HW6/ScikitKmeansAndKmeans++.py | b8d692a229dbc4680cbc8e6554d8e7a633fb2639 | [] | no_license | mikexie360/UTD_CS | 232d62ca992b43c8f4917f5525fc006fdc7132df | 23f7a6266841f6c25dd649d56060b961343869f7 | refs/heads/master | 2023-04-30T06:40:55.272767 | 2021-05-25T00:48:02 | 2021-05-25T00:48:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,433 | py | # -*- coding: utf-8 -*-
"""
Created on Sun May 10 17:29:41 2020
@author: ROHITH PEDDI
"""
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
data_train = pd.read_csv('leaf.data', header = None).values
M, N_c = data_train.shape
cluster_centers_actual = data_train[:, 0]
X = data_train[:, 1:N_c]
#k_list = [12, 18, 24, 36, 42]
k_list = [36]
tol = 1e-17
max_iter = 1e+4
N = N_c-1
###################################################################################
####################### KMEANS ########################
###################################################################################
def get_vanilla_cluster_centers(n_clusters):
cluster_centers = np.empty((n_clusters, N))
for k in range(n_clusters):
cluster_centers[k] = np.array(np.random.choice(np.arange(-3, 4, 1), N)).reshape(1, N)
return cluster_centers
def Vanilla_Kmeans():
inertia_matrix = np.empty((20, 5))
for i in range(1):
for j in range(len(k_list)):
print('#############################################################################')
n_clusters = k_list[j]
print (n_clusters, ' CLUSTERS, ','ITERATION ', i)
cluster_centers = get_vanilla_cluster_centers(n_clusters)
kmeans = KMeans(n_clusters=n_clusters, init=cluster_centers, tol=tol, max_iter=max_iter, verbose=1, n_init=1).fit(X)
predicted_cluster_labels = kmeans.labels_
print(cluster_centers_actual)
print(predicted_cluster_labels+1)
inertia_matrix[i][j] = kmeans.inertia_
return inertia_matrix
vanilla_inertia_matrix = Vanilla_Kmeans()
vanilla_mean = np.mean(vanilla_inertia_matrix, axis = 0)
vanilla_var = np.var(vanilla_inertia_matrix, axis = 0)
###################################################################################
####################### KMEANS++ ########################
###################################################################################
def get_kmeans_plus_plus_cluster_centers(n_clusters):
# Pick a random point
cluster_centers = []
initial_point = (X[np.random.randint(0, M),:]).reshape(1, -1)
cluster_centers.append(initial_point)
# Run the loop for k-1 times
for k in range(n_clusters-1):
# Find distances of each point from the nearest point
distances = []
for i in range(M):
X_i = X[i]
min_distance = 1e+20
for j in range(len(cluster_centers)):
current_distance = np.sum( (X_i-cluster_centers[j])**2 )
if current_distance < min_distance:
min_distance = current_distance
distances.append(min_distance)
# Normalize distance measures such that sum of them is unity
distances = np.array(distances).reshape(1, M)
distances_sum = np.sum(distances)
distances = distances/distances_sum
# Associate distance with probability measure of picking other points
probabilities = distances.flatten().tolist()
sampled_choice = np.random.choice(list(range(0, M)), 1, p=probabilities)
# Pick new points with corresponding probabilities
new_cluster_center = X[sampled_choice]
cluster_centers.append(new_cluster_center)
return np.array(cluster_centers).reshape(n_clusters, N)
def Kmeans_plus_plus():
kmeans_plus_plus_inertia_matrix = np.empty((20, 5))
for i in range(20):
for j in range(len(k_list)):
print('#############################################################################')
n_clusters = k_list[j]
print (n_clusters, ' CLUSTERS, ','ITERATION ', i)
cluster_centers = get_kmeans_plus_plus_cluster_centers(n_clusters)
kmeans = KMeans(n_clusters=n_clusters, init=cluster_centers, tol=tol, max_iter=max_iter, verbose=1, n_init=1).fit(X)
predicted_cluster_labels = kmeans.labels_
kmeans_plus_plus_inertia_matrix[i][j] = kmeans.inertia_
return kmeans_plus_plus_inertia_matrix
#kmeans_plus_plus_inertia_matrix = Kmeans_plus_plus()
#kmeans_plus_plus_mean = np.mean(kmeans_plus_plus_inertia_matrix, axis = 0)
#kmeans_plus_plus_var = np.var(kmeans_plus_plus_inertia_matrix, axis = 0) | [
"rohith.peddi7@gmail.com"
] | rohith.peddi7@gmail.com |
a2912b63ff16dc838e87900ce2db2d1f3a43c590 | 0d153f781d04c0fa925a864e03bf28d2bd61cb06 | /python/p7.py | 443704e9aca28ce794b8c809d7048a90e9545fa0 | [] | no_license | glovguy/project-euler-solutions | f9750cf1ca71a2aba9433f99d89838749aa9cf00 | 38f9c60d9d45f88d5d9a384404ab5d41cff491f0 | refs/heads/master | 2021-01-21T15:04:41.877811 | 2020-06-07T21:20:27 | 2020-06-07T21:20:27 | 57,855,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | '''By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
What is the 10 001st prime number?'''
def is_prime(num):
upperLimit = int(num/2)
for n in range(2, upperLimit):
if num%n == 0:
return False
return True
def prime_numbers(until):
t=0
i=2
while t < until+1:
while not is_prime(i):
i+=1
t+=1
yield i
i+=1
primes = prime_numbers(10001)
allPrimes = [j for j in primes]
print(allPrimes[len(allPrimes)-1])
| [
"karlsmith@bouzou.com"
] | karlsmith@bouzou.com |
a3143711129b88f014fda2d2ef6ac1b8d0d0f0c0 | 6a0a7269ee3cd16763510753a9b2b073accd017d | /5 Airflow/L3/dags/exercise4.py | b997cad138f3cbe2165205bae8ec154054d644fb | [] | no_license | villoro/DEND | e8a5010a916ecf70c47780f9a59b84ccc5dcbcb2 | 398d297232cc5139d9536019db2fd5d60a9ac04f | refs/heads/master | 2021-05-18T16:50:26.306945 | 2020-04-25T09:57:39 | 2020-04-25T09:57:39 | 251,325,163 | 0 | 0 | null | 2020-04-25T09:57:40 | 2020-03-30T14:10:16 | Jupyter Notebook | UTF-8 | Python | false | false | 1,782 | py | import datetime
from airflow import DAG
from airflow.operators import FactsCalculatorOperator, HasRowsOperator, S3ToRedshiftOperator
#
# The following DAG performs the following functions:
#
# 1. Loads Trip data from S3 to RedShift
# 2. Performs a data quality check on the Trips table in RedShift
# 3. Uses the FactsCalculatorOperator to create a Facts table in Redshift
# a. **NOTE**: to complete this step you must complete the FactsCalcuatorOperator
# skeleton defined in plugins/operators/facts_calculator.py
#
dag = DAG("lesson3.exercise4", start_date=datetime.datetime.utcnow())
#
# The following code will load trips data from S3 to RedShift. Use the s3_key
# "data-pipelines/divvy/unpartitioned/divvy_trips_2018.csv"
# and the s3_bucket "udacity-dend"
#
copy_trips_task = S3ToRedshiftOperator(
task_id="load_trips_from_s3_to_redshift",
dag=dag,
table="trips",
redshift_conn_id="redshift",
aws_credentials_id="aws_credentials",
s3_bucket="udacity-dend",
s3_key="data-pipelines/divvy/unpartitioned/divvy_trips_2018.csv",
)
#
# Data quality check on the Trips table
#
check_trips = HasRowsOperator(
task_id="check_trips_data", dag=dag, redshift_conn_id="redshift", table="trips"
)
#
# We use the FactsCalculatorOperator to create a Facts table in RedShift. The fact column is
# `tripduration` and the groupby_column is `bikeid`
#
calculate_facts = FactsCalculatorOperator(
task_id="calculate_facts_trips",
dag=dag,
redshift_conn_id="redshift",
origin_table="trips",
destination_table="trips_facts",
fact_column="tripduration",
groupby_column="bikeid",
)
#
# Task ordering for the DAG tasks
#
copy_trips_task >> check_trips
check_trips >> calculate_facts
| [
"villoro7@gmail.com"
] | villoro7@gmail.com |
52e17291e5c10f8c1e415d3e6968fd57a2fa3c58 | b5321f6865f91ef8fb783a3e76e15e0d13e5a711 | /lesson_11/lesson11_ex1.py | 8aeb00e829a0e0bced0a4739ba3f56ff5a8b1983 | [] | no_license | DianaChumachenko/PythonIntro | 6689772391ed7f7e3c9380cf8470ae67fd3e9dd4 | 46e7c4c8b07ebdb076073910337b18f4d7f5ac1a | refs/heads/main | 2023-02-09T09:50:51.679456 | 2021-01-04T19:31:27 | 2021-01-04T19:31:27 | 311,464,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | from pprint import pprint
d = dict(zip([x for x in range(32, 128)], [chr(x) for x in range(32, 128)]))
pprint(d) | [
"dchumachenko0508@gmail.com"
] | dchumachenko0508@gmail.com |
b094b2109fab7c668ff7b27eeb1147aa55d6aa9c | 8e13c309b04ab6e56de828ab6f2206ba84ed00d8 | /app/models.py | 72a1eae3d26f16bbd0dea9f9f8a6a06fbf0c381c | [] | no_license | mr-Sanchez/first_project | d809cfc93c8486d51ae82fae303f264fb0130d32 | 8a5defb863833fcb6905e55f34271aaabcd7485b | refs/heads/main | 2023-05-24T13:22:45.216893 | 2021-06-03T14:28:18 | 2021-06-03T14:28:18 | 363,196,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,319 | py | from app import db
import re
from sqlalchemy.orm import backref
class PaymentMethod(db.Model):
id = db.Column(db.Integer, primary_key=True)
payment_method_name = db.Column(db.String(100))
payment_method_caption = db.Column(db.String(100))
class ClothesCategory(db.Model):
id = db.Column(db.Integer, primary_key=True)
category_path = db.Column(db.String(255))
category_name = db.Column(db.String(255))
clothes_for = db.Column(db.String(100))
clothes = db.relationship('ClothesItem', backref='category')
class ClothesItem(db.Model):
id = db.Column(db.Integer, primary_key=True)
clothes_name = db.Column(db.String(255))
clothes_price = db.Column(db.Integer)
clothes_discount = db.Column(db.Integer)
clothes_description = db.Column(db.Text)
clothes_category_id = db.Column(db.Integer, db.ForeignKey('clothes_category.id'))
images = db.relationship('ClothesItemImage', backref='clothes')
sizes = db.relationship('ClothesSizes', backref='clothes')
class ClothesItemImage(db.Model):
id = db.Column(db.Integer, primary_key=True)
clothes_image_path = db.Column(db.String(255))
clothes_id = db.Column(db.Integer, db.ForeignKey('clothes_item.id'))
class ClothesSizes(db.Model):
id = db.Column(db.Integer, primary_key=True)
size = db.Column(db.String(100))
count = db.Column(db.Integer)
clothes_id = db.Column(db.Integer, db.ForeignKey('clothes_item.id'))
purchases = db.relationship('SoldClothes', backref='size')
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_name = db.Column(db.String(255))
user_email = db.Column(db.String(255), unique=True)
user_password = db.Column(db.String(255))
purchases = db.relationship('Purchase', backref='user')
class Coupon(db.Model):
id = db.Column(db.Integer, primary_key=True)
coupon_code = db.Column(db.String(20))
coupon_discount = db.Column(db.Integer)
coupon_is_added = db.Column(db.Boolean)
coupon_is_active = db.Column(db.Boolean)
class Purchase(db.Model):
id = db.Column(db.Integer, primary_key=True)
purchase_date = db.Column(db.DateTime)
purchase_cost = db.Column(db.Integer)
purchase_discount = db.Column(db.Integer)
purchase_address = db.Column(db.String(255))
purchase_payment_method_id = db.Column(db.Integer, db.ForeignKey('payment_method.id'))
purchase_user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
sizes = db.relationship('SoldClothes', backref='purchase')
class SoldClothes(db.Model):
id = db.Column(db.Integer, primary_key=True)
sold_clothes_quantity = db.Column(db.Integer)
sold_clothes_size_id = db.Column(db.Integer, db.ForeignKey('clothes_sizes.id'))
sold_clothes_size = db.relationship('ClothesSizes', backref=backref('sold', passive_deletes='all'))
sold_clothes_purchase_id = db.Column(db.Integer, db.ForeignKey('purchase.id'))
sold_clothes_purchase = db.relationship('Purchase', backref=backref('sold', passive_deletes='all'))
class Comment(db.Model):
id = db.Column(db.Integer, primary_key=True)
comment_author = db.Column(db.String(100))
comment_text = db.Column(db.Text)
comment_publish_date = db.Column(db.DateTime)
comment_clothes_id = db.Column(db.Integer, db.ForeignKey('clothes_item.id')) | [
"aleksandr.ptrk@gmail.com"
] | aleksandr.ptrk@gmail.com |
f8527e61ab34f1911b17fa049c376e9b2b0500f1 | 2bb2d5f01b1f9c77e8092f1bdbf15eb10b263b2b | /livecareer/items.py | 0e150df1eb46e48395e3d8a284e86ddb68281e46 | [] | no_license | vasarmilan/livecareer-scraper | 5ab96500ed167e319eb6814953cc8f7a885bdffd | f1b545a5de506fb223d94699cfb42c66897a9959 | refs/heads/master | 2022-04-22T09:49:54.860732 | 2020-03-17T11:58:08 | 2020-03-17T11:58:08 | 247,695,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class LivecareerItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| [
"vasarmilan@gmail.com"
] | vasarmilan@gmail.com |
fdb31dc080683eafda61d023918635e0d3993089 | 19a5407847be78fcc48dfedbfa677c78e26d39e6 | /PythonLearn/函数式编程/高阶函数/filter.py | 87444e5290e4104e6f75fc8d9fa03eae05144359 | [] | no_license | gong782008371/yuquan | f9ac943ef6f1f8a0f855eb7be289ba5f830fccfe | 93ef594ec671f3ac3a945609065bd481238cead6 | refs/heads/master | 2020-06-05T02:25:47.070041 | 2017-01-11T11:31:18 | 2017-01-11T11:31:18 | 31,074,477 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | # -*- coding:utf-8 -*-
#Python内建的filter()函数用于过滤序列。
#
#和map()类似,filter()也接收一个函数和一个序列。
#和map()不同的时,filter()把传入的函数依次作用于每个元素,然后根据返回值是True还是False决定保留还是丢弃该元素。
def is_odd(n):
return n % 2 == 1
print filter(is_odd, [1, 2, 3, 4, 5, 6, 9]) #[1, 3, 5, 9]
def not_empty(s):
return s and s.strip()
print filter(not_empty, ['A', '', 'B', None, 'C', ' ']) #['A', 'B', 'C']
#练习
#
#请尝试用filter()删除1~100的素数。
import math
def not_prime(x):
if x <= 1:
return True
for i in range(2, int(math.sqrt(x + 0.5)) + 1):
if x % i == 0:
return True
return False
print filter(not_prime, [i for i in range(1, 101)]) | [
"782008371@qq.com"
] | 782008371@qq.com |
9dcb940cd9146536df36cac078e567c812b0cf16 | e6c1c1352df0ff0906e23b3cd14520155b9d0e0c | /mysite/settings.py | ef3b731603b572d49fec66ecf0a1b4eb6d8e28b4 | [] | no_license | elciorodrigo/apiCep | 63645190d8439bb3c49f786bd474c0597655c707 | abdb7bfb2787b93ad5b19a989747d99d70c67538 | refs/heads/master | 2021-08-11T20:30:44.460138 | 2017-11-14T03:57:26 | 2017-11-14T03:57:26 | 110,638,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,126 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'lng!)12@vj#m0f@zpzg%8=6(eo7ux!r64!hdcrz1_l^c+5gxvk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"elciorodrigo@gmail.com"
] | elciorodrigo@gmail.com |
d3ae884063fc0c7dd51548c9a177d6e35488fb1e | 0687f997984b71293ba896862758f46103901b36 | /compute_prediction/cnn_test.py | 2999faa86459e04be7a87ac6426d9d8c0203540b | [] | no_license | XinYao1994/Clara | 28b6ad41428301a49401d60d36d15741857dbbdc | eea38c52beb17600dd325f465a3740f267bab2e5 | refs/heads/master | 2023-08-29T22:01:17.451714 | 2021-11-01T04:08:25 | 2021-11-01T04:08:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,830 | py | import warnings
warnings.filterwarnings("ignore")
import numpy as np
import tensorflow as tf
#import matplotlib.pyplot as plt
import tqdm
import csv
import re
import pickle
with open("training.pickle",'rb') as ftrain:
dataset_train = pickle.load(ftrain)
X, Y = dataset_train
with open("testing.pickle",'rb') as ftest:
dataset_test = pickle.load(ftest)
X_test, Y_test = dataset_test
with open("nf.pickle",'rb') as factual:
dataset_actual = pickle.load(factual)
X_actual, Y_actual = dataset_actual
with open("source.pickle",'rb') as fsource:
source_text_to_int = pickle.load(fsource)
with open("target.pickle",'rb') as ftarget:
target_text_to_int = pickle.load(ftarget)
# parameters
tf.reset_default_graph()
HIDDEN_SIZE = 512
SENTENCE_LIMIT_SIZE = 70
EMBEDDING_SIZE = 100
source_vocab_size = 125
encoder_embedding_size = 100
filters_size = [3, 5]
num_filters = 50
BATCH_SIZE = 256
EPOCHES = 50
LEARNING_RATE = 0.001
L2_LAMBDA = 10
KEEP_PROB = 0.8
with tf.name_scope("cnn"):
with tf.name_scope("placeholders"):
inputs = tf.placeholder(dtype=tf.int32, shape=(None, SENTENCE_LIMIT_SIZE), name="inputs")
targets = tf.placeholder(dtype=tf.float32, shape=(None, 1), name="targets")
# embeddings
with tf.name_scope("embeddings"):
#embedding_matrix = tf.Variable(initial_value=static_embeddings, trainable=False, name="embedding_matrix")
#embed = tf.nn.embedding_lookup(embedding_matrix, inputs, name="embed")
encoder_embed = tf.contrib.layers.embed_sequence(inputs, source_vocab_size, encoder_embedding_size)
embed_expanded = tf.expand_dims(encoder_embed, -1, name="embed_expand")
# max-pooling results
pooled_outputs = []
# iterate multiple filter
for i, filter_size in enumerate(filters_size):
with tf.name_scope("conv_maxpool_%s" % filter_size):
filter_shape = [filter_size, EMBEDDING_SIZE, 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, mean=0.0, stddev=0.1), name="W")
b = tf.Variable(tf.zeros(num_filters), name="b")
conv = tf.nn.conv2d(input=embed_expanded,
filter=W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# activation
a = tf.nn.relu(tf.nn.bias_add(conv, b), name="activations")
# pooling
max_pooling = tf.nn.max_pool(value=a,
ksize=[1, SENTENCE_LIMIT_SIZE - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding="VALID",
name="max_pooling")
pooled_outputs.append(max_pooling)
# filter information
total_filters = num_filters * len(filters_size)
total_pool = tf.concat(pooled_outputs, 3)
flattend_pool = tf.reshape(total_pool, (-1, total_filters))
# dropout
#with tf.name_scope("dropout"):
#dropout = tf.nn.dropout(flattend_pool, KEEP_PROB)
# output
with tf.name_scope("output"):
W = tf.get_variable("W", shape=(total_filters, 1), initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.zeros(1), name="b")
logits = tf.add(tf.matmul(flattend_pool, W), b)
predictions = tf.nn.sigmoid(logits, name="predictions")
# loss
with tf.name_scope("loss"):
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=targets, logits=logits))
loss = loss + L2_LAMBDA * tf.nn.l2_loss(W)
optimizer = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)
# evaluation
with tf.name_scope("evaluation"):
correct_preds = tf.equal(tf.cast(tf.greater(predictions, 0.5), tf.float32), targets)
accuracy = tf.reduce_sum(tf.reduce_sum(tf.cast(correct_preds, tf.float32), axis=1))
def get_batch(x, y, batch_size=BATCH_SIZE, shuffle=True):
assert x.shape[0] == y.shape[0], print("error shape!")
# shuffle
if shuffle:
shuffled_index = np.random.permutation(range(x.shape[0]))
x = x[shuffled_index]
y = y[shuffled_index]
n_batches = int(x.shape[0] / batch_size)
for i in range(n_batches - 1):
x_batch = x[i*batch_size: (i+1)*batch_size]
y_batch = y[i*batch_size: (i+1)*batch_size]
yield x_batch, y_batch
saver = tf.train.Saver()
import time
with tf.Session() as sess:
#sess.run(tf.global_variables_initializer())
saver.restore(sess, "./models/cnn_final")
writer = tf.summary.FileWriter("./graphs/cnn_final", tf.get_default_graph())
n_batches = int(X.shape[0] / BATCH_SIZE)
print("n_batches: ", n_batches)
total_ind = 0
end_flag = 0
test_sum = 0
t_batches = int(X_test.shape[0] / BATCH_SIZE)
for x_batch, y_batch in get_batch(X_test, Y_test):
answer = sess.run(predictions, feed_dict={inputs: x_batch, targets: y_batch})
for index in range(len(answer)):
test_sum += (abs(answer[index]*64-y_batch[index]*64))/(y_batch[index]*64)
print("Test loss: {}".format(test_sum/(256*(t_batches-1))))
answer = sess.run(predictions, feed_dict={inputs: X_test[-1:], targets: Y_test[-1:]})
#print(answer, Y_test[-1])
#lstm_test_accuracy.append(test_sum/(256*(t_batches-1)))
real_sum = 0
r_batches = int(X.shape[0] / BATCH_SIZE)
for x_batch, y_batch in get_batch(X, Y):
answer = sess.run(predictions, feed_dict={inputs: x_batch, targets: y_batch})
for index in range(len(answer)):
real_sum += (abs(answer[index]*64-y_batch[index]*64))/(y_batch[index]*64)
print("Train loss: {}".format(real_sum/(256*(r_batches-1))))
#lstm_real_accuracy.append(real_sum/(256*(r_batches-1)))
answer = sess.run(predictions, feed_dict={inputs: X_actual, targets: Y_actual})
summation = 0
jndex = 0
pos = 0
nfs = ["aggcounter", "anonipaddr", "forcetcp", "tcp_gen", "tcpack", "tcpresp", "timefilter" ,"udpipencap"]
len_nfs = [15, 5, 17, 15, 2, 19, 12, 4]
nn = a = b = c = 0
temp_list = []
for index in range(89):
a += answer[index]
b += Y_actual[index]
c += abs(answer[index]-Y_actual[index])
summation += abs(answer[index]-Y_actual[index])/Y_actual[index]
nn += abs(answer[index]-Y_actual[index])/Y_actual[index]
if len_nfs[pos] > 1:
len_nfs[pos] -= 1
else:
temp_var = c/a
temp_list.append(temp_var[0])
pos += 1
a = b = c = nn = 0
print("Performance on real Click elements: ")
for index, item in enumerate(temp_list):
print("WMAPE of:", nfs[index], item)
time_start = time.time()
writer.close()
| [
"qiuyimingrichard@gmail.com"
] | qiuyimingrichard@gmail.com |
bffe1c48eedf03607efaea59b6631311acfecb45 | 684cc3be4bbc6b7edfc9467e21d80b6bcfa692bc | /db/__init__.py | fc3b5e809327dc514f4fc4dd894ba88dbed3ba87 | [] | no_license | sap-ibso-t4/DemoJam2020-ML | 99ed38735ece9a95a583579c995cf7e9cde6cea5 | cb4cbe77cfb76e25b4792231b9b0444e5a236c81 | refs/heads/master | 2023-01-10T21:44:18.989212 | 2020-11-03T09:35:16 | 2020-11-03T09:35:16 | 297,948,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | from .sqliteAPI import SqliteAPI
from .dict_to_itab import data_frame_to_internal_table
| [
"zzfancitizen@gmail.com"
] | zzfancitizen@gmail.com |
2aeb217b02dbe82cdc5445f4bec4aafb01b07802 | 68049b03dbbd9a3d778571794472e07c05fb00ad | /python/courses/jose_portilla/flask/sandbox/10_databases/10_1_flask_and_databases_practice/setupdatabase.py | e2f38694c1a0eb91547cf484e4e8aa594a19934b | [] | no_license | tjkhara/notes | c9e96ecea6efed860c521eb7df562c5715091aea | 5602a25ba23104e4154700108f1b8a3a0144f712 | refs/heads/master | 2023-01-20T07:42:47.129359 | 2020-11-24T06:43:24 | 2020-11-24T06:43:24 | 285,811,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | from basic import db, Puppy
# creates all the tables
# takes classes and converts them into tables
db.create_all()
sam = Puppy('Sammy', 3)
frank = Puppy('Frankie', 4)
miles = Puppy('Miles', 10)
# These will say none because they are not in the database yet
# They don't have any ids
print(sam.id)
print(frank.id)
print(miles.id)
# Add these two objects to the database
db.session.add_all([sam, frank, miles])
# commit changes
db.session.commit()
print(sam.id)
print(frank.id)
print(miles.id) | [
"tkhara@gmail.com"
] | tkhara@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.