hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
02618a7eed33bdfbec9b651a6841eb4fcf49a22c
| 1,663
|
py
|
Python
|
utils/auth.py
|
BudzynskiMaciej/notifai_recruitment
|
56860db3a2dad6115747a675895b8f7947e7e12e
|
[
"MIT"
] | null | null | null |
utils/auth.py
|
BudzynskiMaciej/notifai_recruitment
|
56860db3a2dad6115747a675895b8f7947e7e12e
|
[
"MIT"
] | 2
|
2021-05-21T13:26:26.000Z
|
2022-02-10T10:04:55.000Z
|
utils/auth.py
|
BudzynskiMaciej/notifai_recruitment
|
56860db3a2dad6115747a675895b8f7947e7e12e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from rest_framework import authentication
from rest_framework import exceptions
from notifai_recruitment import settings
class MasterKeyNaiveAuthentication(authentication.BaseAuthentication):
"""Authentication model for master key.
Note:
It was done the way I understood the assignment.In the task it was written that the authorization will be
performed by the key in the email exchange. This solution allows you to set the master key as requested in the
email. You could also set the authorization using the ready-made implementation included in the Django Rest
Framework, which is TokenAuthentication. This authorization would allow tokens to be assigned to specific users.
DRF requires any user to be returned upon authorization. For this authorization it will always be the first
SuperUser, therefore at least one super user account is required. It may not be the best authorization for use
in production, but it meets the assumption of a token sent by e-mail. In my opinion, a good authentication
mechanism would be JWT(Json Web Token).
"""
def authenticate(self, request):
request_master_key = request.META.get('HTTP_BEARER')
if not request_master_key:
return None
super_user = User.objects.filter(is_superuser=True).first()
if request_master_key != settings.MASTER_KEY:
raise exceptions.AuthenticationFailed('Wrong Bearer token!')
return super_user, None
def authenticate_header(self, request):
return 'Bearer: <MASTER_KEY>'
| 47.514286
| 120
| 0.736019
| 1,470
| 0.883945
| 0
| 0
| 0
| 0
| 0
| 0
| 1,006
| 0.604931
|
02623225e5d363b265ee6e56ba38be5191b44c1f
| 435
|
py
|
Python
|
scripts/issues/issue6.py
|
slamer59/awesome-panel
|
91c30bd6d6859eadf9c65b1e143952f7e64d5290
|
[
"Apache-2.0"
] | 179
|
2019-12-04T14:54:53.000Z
|
2022-03-30T09:08:38.000Z
|
scripts/issues/issue6.py
|
slamer59/awesome-panel
|
91c30bd6d6859eadf9c65b1e143952f7e64d5290
|
[
"Apache-2.0"
] | 62
|
2019-12-14T16:51:28.000Z
|
2022-03-19T18:47:12.000Z
|
scripts/issues/issue6.py
|
slamer59/awesome-panel
|
91c30bd6d6859eadf9c65b1e143952f7e64d5290
|
[
"Apache-2.0"
] | 35
|
2019-12-08T13:19:53.000Z
|
2022-03-25T10:33:02.000Z
|
import panel as pn
def main():
text_error = """
This is not formatted correctly by Markdown due to the indentation!"""
text_ok = """
This is formatted correctly by Markdown!
"""
app = pn.Column(
pn.pane.Markdown(text_error),
pn.pane.HTML(
"<hr>",
sizing_mode="stretch_width",
),
pn.pane.Markdown(text_ok),
)
app.servable()
main()
| 19.772727
| 75
| 0.542529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 150
| 0.344828
|
02649919ebe1649e2c617d8a536cb6343e919b0b
| 18,257
|
py
|
Python
|
Electronic_Arts_Software_Engineering_Virtual_Program/Task_1/Vaxman_in_Python/vaxman.py
|
melwyncarlo/Virtual_Internship_Programmes
|
1d1ae99abd63765d69ce930438c4bd6d15bd3d45
|
[
"CC0-1.0"
] | null | null | null |
Electronic_Arts_Software_Engineering_Virtual_Program/Task_1/Vaxman_in_Python/vaxman.py
|
melwyncarlo/Virtual_Internship_Programmes
|
1d1ae99abd63765d69ce930438c4bd6d15bd3d45
|
[
"CC0-1.0"
] | null | null | null |
Electronic_Arts_Software_Engineering_Virtual_Program/Task_1/Vaxman_in_Python/vaxman.py
|
melwyncarlo/Virtual_Internship_Programmes
|
1d1ae99abd63765d69ce930438c4bd6d15bd3d45
|
[
"CC0-1.0"
] | null | null | null |
# Vax-Man, a re-implementation of Pacman, in Python, with PyGame.
# Forked from: https://github.com/hbokmann/Pacman
# Edited by Melwyn Francis Carlo (2021)
# Video link: https://youtu.be/ZrqZEC6DvMc
import time
import pygame
# Ghosts multiply themselves every thirty seconds.
GHOST_MULTIPLICATION_TIME_GAP = 30
# Thirty-two times for each ghost type.
MAXIMUM_GHOSTS = 32 * 4;
indigo = ( 85, 48, 141 )
yellow = ( 255, 255, 0 )
darkRed = ( 201, 33, 30 )
darkGrey = ( 28, 28, 28 )
lightGrey = ( 238, 238, 238 )
Vaxman_icon=pygame.image.load('images/Vaxman_Big.png')
pygame.display.set_icon(Vaxman_icon)
# Add music
# Spook4 by PeriTune | http://peritune.com
# Attribution 4.0 International (CC BY 4.0)
# https://creativecommons.org/licenses/by/4.0/
# Music promoted by https://www.chosic.com/free-music/all/
pygame.mixer.init()
pygame.mixer.music.load('peritune-spook4.mp3')
pygame.mixer.music.play(-1, 0.0)
# This class represents the bar at the bottom that the player controls
class Wall(pygame.sprite.Sprite):
# Constructor function
def __init__(self,x,y,width,height, color):
# Call the parent's constructor
pygame.sprite.Sprite.__init__(self)
# Make an indigo wall, of the size specified in the parameters
self.image = pygame.Surface([width, height])
self.image.fill(color)
# Make our top-left corner the passed-in location.
self.rect = self.image.get_rect()
self.rect.top = y
self.rect.left = x
# This creates all the walls in room 1
def setupRoomOne(all_sprites_list):
# Make the walls. (x_pos, y_pos, width, height)
wall_list=pygame.sprite.RenderPlain()
# This is a list of walls. Each is in the form [x, y, width, height]
walls = [ [0,0,6,600],
[0,0,600,6],
[0,600,606,6],
[600,0,6,606],
[300,0,6,66],
[60,60,186,6],
[360,60,186,6],
[60,120,66,6],
[60,120,6,126],
[180,120,246,6],
[300,120,6,66],
[480,120,66,6],
[540,120,6,126],
[120,180,126,6],
[120,180,6,126],
[360,180,126,6],
[480,180,6,126],
[180,240,6,126],
[180,360,246,6],
[420,240,6,126],
[240,240,42,6],
[324,240,42,6],
[240,240,6,66],
[240,300,126,6],
[360,240,6,66],
[0,300,66,6],
[540,300,66,6],
[60,360,66,6],
[60,360,6,186],
[480,360,66,6],
[540,360,6,186],
[120,420,366,6],
[120,420,6,66],
[480,420,6,66],
[180,480,246,6],
[300,480,6,66],
[120,540,126,6],
[360,540,126,6]
]
# Loop through the list. Create the wall, add it to the list.
for item in walls:
wall = Wall(item[0], item[1], item[2], item[3], indigo)
wall_list.add(wall)
all_sprites_list.add(wall)
# Return our new list.
return wall_list
def setupGate(all_sprites_list):
gate = pygame.sprite.RenderPlain()
gate.add(Wall(282, 242, 42, 2, lightGrey))
all_sprites_list.add(gate)
return gate
# This class represents the ball
# It derives from the "Sprite" class in Pygame
class Block(pygame.sprite.Sprite):
# Constructor. Pass in the color of the block,
# and its x and y position
def __init__(self, color, width, height):
# Call the parent class (Sprite) constructor
pygame.sprite.Sprite.__init__(self)
# Create an image of the block, and fill it with a color.
# This could also be an image loaded from the disk.
self.image = pygame.Surface([width, height])
self.image.fill(lightGrey)
self.image.set_colorkey(lightGrey)
pygame.draw.ellipse(self.image,color,[0,0,width,height])
# Fetch the rectangle object that has the dimensions of the image
# image.
# Update the position of this object by setting the values
# of rect.x and rect.y
self.rect = self.image.get_rect()
# This class represents the bar at the bottom that the player controls
class Player(pygame.sprite.Sprite):
# Set speed vector
change_x=0
change_y=0
# Constructor function
def __init__(self, x, y, filename):
# Call the parent's constructor
pygame.sprite.Sprite.__init__(self)
# Set height, width
self.image = pygame.image.load(filename).convert_alpha()
# Make our top-left corner the passed-in location.
self.rect = self.image.get_rect()
self.rect.top = y
self.rect.left = x
self.prev_x = x
self.prev_y = y
# Clear the speed of the player
def prevdirection(self):
self.prev_x = self.change_x
self.prev_y = self.change_y
# Change the speed of the player
def changespeed(self,x,y):
self.change_x+=x
self.change_y+=y
# Find a new position for the player
def update(self,walls,gate):
# Get the old position, in case we need to go back to it
old_x=self.rect.left
new_x=old_x+self.change_x
prev_x=old_x+self.prev_x
self.rect.left = new_x
old_y=self.rect.top
new_y=old_y+self.change_y
prev_y=old_y+self.prev_y
# Did this update cause us to hit a wall?
x_collide = pygame.sprite.spritecollide(self, walls, False)
if x_collide:
# Whoops, hit a wall. Go back to the old position
self.rect.left=old_x
# self.rect.top=prev_y
# y_collide = pygame.sprite.spritecollide(self, walls, False)
# if y_collide:
# # Whoops, hit a wall. Go back to the old position
# self.rect.top=old_y
# print('a')
else:
self.rect.top = new_y
# Did this update cause us to hit a wall?
y_collide = pygame.sprite.spritecollide(self, walls, False)
if y_collide:
# Whoops, hit a wall. Go back to the old position
self.rect.top=old_y
# self.rect.left=prev_x
# x_collide = pygame.sprite.spritecollide(self, walls, False)
# if x_collide:
# # Whoops, hit a wall. Go back to the old position
# self.rect.left=old_x
# print('b')
if gate != False:
gate_hit = pygame.sprite.spritecollide(self, gate, False)
if gate_hit:
self.rect.left=old_x
self.rect.top=old_y
#Inheritime Player klassist
class Ghost(Player):
# Change the speed of the ghost
def changespeed(self,list,ghost,turn,steps,l):
try:
z=list[turn][2]
if steps < z:
self.change_x=list[turn][0]
self.change_y=list[turn][1]
steps+=1
else:
if turn < l:
turn+=1
elif ghost == "clyde":
turn = 2
else:
turn = 0
self.change_x=list[turn][0]
self.change_y=list[turn][1]
steps = 0
return [turn,steps]
except IndexError:
return [0,0]
Pinky_directions = [
[0,-30,4],
[15,0,9],
[0,15,11],
[-15,0,23],
[0,15,7],
[15,0,3],
[0,-15,3],
[15,0,19],
[0,15,3],
[15,0,3],
[0,15,3],
[15,0,3],
[0,-15,15],
[-15,0,7],
[0,15,3],
[-15,0,19],
[0,-15,11],
[15,0,9]
]
Blinky_directions = [
[0,-15,4],
[15,0,9],
[0,15,11],
[15,0,3],
[0,15,7],
[-15,0,11],
[0,15,3],
[15,0,15],
[0,-15,15],
[15,0,3],
[0,-15,11],
[-15,0,3],
[0,-15,11],
[-15,0,3],
[0,-15,3],
[-15,0,7],
[0,-15,3],
[15,0,15],
[0,15,15],
[-15,0,3],
[0,15,3],
[-15,0,3],
[0,-15,7],
[-15,0,3],
[0,15,7],
[-15,0,11],
[0,-15,7],
[15,0,5]
]
Inky_directions = [
[30,0,2],
[0,-15,4],
[15,0,10],
[0,15,7],
[15,0,3],
[0,-15,3],
[15,0,3],
[0,-15,15],
[-15,0,15],
[0,15,3],
[15,0,15],
[0,15,11],
[-15,0,3],
[0,-15,7],
[-15,0,11],
[0,15,3],
[-15,0,11],
[0,15,7],
[-15,0,3],
[0,-15,3],
[-15,0,3],
[0,-15,15],
[15,0,15],
[0,15,3],
[-15,0,15],
[0,15,11],
[15,0,3],
[0,-15,11],
[15,0,11],
[0,15,3],
[15,0,1],
]
Clyde_directions = [
[-30,0,2],
[0,-15,4],
[15,0,5],
[0,15,7],
[-15,0,11],
[0,-15,7],
[-15,0,3],
[0,15,7],
[-15,0,7],
[0,15,15],
[15,0,15],
[0,-15,3],
[-15,0,11],
[0,-15,7],
[15,0,3],
[0,-15,11],
[15,0,9],
]
pl = len(Pinky_directions) - 1
bl = len(Blinky_directions) - 1
il = len(Inky_directions) - 1
cl = len(Clyde_directions) - 1
# Call this function so the Pygame library can initialize itself
pygame.init()
# Create an 606x606 sized screen
screen = pygame.display.set_mode([606, 606])
# This is a list of 'sprites.' Each block in the program is
# added to this list. The list is managed by a class called 'RenderPlain.'
# Set the title of the window
pygame.display.set_caption('Melly the Vax-Man')
# Create a surface we can draw on
background = pygame.Surface(screen.get_size())
# Used for converting color maps and such
background = background.convert()
# Fill the screen with a dark grey background
background.fill(darkGrey)
clock = pygame.time.Clock()
pygame.font.init()
font = pygame.font.Font("freesansbold.ttf", 24)
#default locations for Vax-Man and ghosts
w = 303 - 16 # Width
p_h = 19 + (7 * 60) # Vax-Man height
m_h = 19 + (4 * 60) # Monster height
b_h = 19 + (3 * 60) # Binky height
i_w = 303 - 16 - 32 # Inky width
c_w = 303 + (32 - 16) # Clyde width
def startGame():
all_sprites_list = pygame.sprite.RenderPlain()
block_list = pygame.sprite.RenderPlain()
ghosts_list = pygame.sprite.RenderPlain()
vaxman_collide = pygame.sprite.RenderPlain()
wall_list = setupRoomOne(all_sprites_list)
gate = setupGate(all_sprites_list)
# Create the player paddle object
Vaxman = Player(w, p_h, "images/Vaxman_Small.png")
all_sprites_list.add(Vaxman)
vaxman_collide.add(Vaxman)
Blinkies = []
Pinkies = []
Inkies = []
Clydes = []
# Draw the grid
for row in range(19):
for column in range(19):
if (row == 7 or row == 8) and (column == 8 or column == 9 or column == 10):
continue
else:
block = Block(yellow, 4, 4)
# Set a random location for the block
block.rect.x = (30*column+6)+26
block.rect.y = (30*row+6)+26
b_collide = pygame.sprite.spritecollide(block, wall_list, False)
p_collide = pygame.sprite.spritecollide(block, vaxman_collide, False)
if b_collide:
continue
elif p_collide:
continue
else:
# Add the block to the list of objects
block_list.add(block)
all_sprites_list.add(block)
bll = len(block_list)
score = 0
done = False
i = 0
previousTime = 0;
while done == False:
# ALL EVENT PROCESSING SHOULD GO BELOW THIS COMMENT
currentTime = time.time();
deltaTime = currentTime - previousTime;
if previousTime == 0 or deltaTime > GHOST_MULTIPLICATION_TIME_GAP:
if previousTime == 0 or Blinkies:
Blinkies.append( { "entity" : Ghost(w, b_h, "images/Blinky.png"), "turn" : 0, "steps" : 0 } )
ghosts_list.add(Blinkies[-1]["entity"])
all_sprites_list.add(Blinkies[-1]["entity"])
if previousTime == 0 or Pinkies:
Pinkies.append( { "entity" : Ghost(w, m_h, "images/Pinky.png"), "turn" : 0, "steps" : 0 } )
ghosts_list.add(Pinkies[-1]["entity"])
all_sprites_list.add(Pinkies[-1]["entity"])
if previousTime == 0 or Inkies:
Inkies.append( { "entity" : Ghost(i_w, m_h, "images/Inky.png"), "turn" : 0, "steps" : 0 } )
ghosts_list.add(Inkies[-1]["entity"])
all_sprites_list.add(Inkies[-1]["entity"])
if previousTime == 0 or Clydes:
Clydes.append( { "entity" : Ghost(c_w, m_h, "images/Clyde.png"), "turn" : 0, "steps" : 0 } )
ghosts_list.add(Clydes[-1]["entity"])
all_sprites_list.add(Clydes[-1]["entity"])
previousTime = currentTime
for event in pygame.event.get():
if event.type == pygame.QUIT:
done=True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
Vaxman.changespeed(-30,0)
if event.key == pygame.K_RIGHT:
Vaxman.changespeed(30,0)
if event.key == pygame.K_UP:
Vaxman.changespeed(0,-30)
if event.key == pygame.K_DOWN:
Vaxman.changespeed(0,30)
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT:
Vaxman.changespeed(30,0)
if event.key == pygame.K_RIGHT:
Vaxman.changespeed(-30,0)
if event.key == pygame.K_UP:
Vaxman.changespeed(0,30)
if event.key == pygame.K_DOWN:
Vaxman.changespeed(0,-30)
# ALL EVENT PROCESSING SHOULD GO ABOVE THIS COMMENT
# ALL GAME LOGIC SHOULD GO BELOW THIS COMMENT
Vaxman.update(wall_list, gate)
for Pinky in Pinkies:
returned = Pinky["entity"].changespeed(Pinky_directions, False, Pinky["turn"], Pinky["steps"], pl)
Pinky["turn"] = returned[0]
Pinky["steps"] = returned[1]
Pinky["entity"].changespeed(Pinky_directions, False, Pinky["turn"], Pinky["steps"], pl)
Pinky["entity"].update(wall_list, False)
for Blinky in Blinkies:
returned = Blinky["entity"].changespeed(Blinky_directions, False, Blinky["turn"], Blinky["steps"], bl)
Blinky["turn"] = returned[0]
Blinky["steps"] = returned[1]
Blinky["entity"].changespeed(Blinky_directions, False, Blinky["turn"], Blinky["steps"], bl)
Blinky["entity"].update(wall_list, False)
for Inky in Inkies:
returned = Inky["entity"].changespeed(Inky_directions, False, Inky["turn"], Inky["steps"], il)
Inky["turn"] = returned[0]
Inky["steps"] = returned[1]
Inky["entity"].changespeed(Inky_directions, False, Inky["turn"], Inky["steps"], il)
Inky["entity"].update(wall_list, False)
for Clyde in Clydes:
returned = Clyde["entity"].changespeed(Clyde_directions, "clyde", Clyde["turn"], Clyde["steps"], cl)
Clyde["turn"] = returned[0]
Clyde["steps"] = returned[1]
Clyde["entity"].changespeed(Clyde_directions, "clyde", Clyde["turn"], Clyde["steps"], cl)
Clyde["entity"].update(wall_list, False)
# See if the Vax-Man block has collided with anything.
blocks_hit_list = pygame.sprite.spritecollide(Vaxman, block_list, True)
# Check the list of collisions.
if len(blocks_hit_list) > 0:
score +=len(blocks_hit_list)
# ALL GAME LOGIC SHOULD GO ABOVE THIS COMMENT
# ALL CODE TO DRAW SHOULD GO BELOW THIS COMMENT
screen.fill(darkGrey)
wall_list.draw(screen)
gate.draw(screen)
text=font.render("Score: "+str(score)+"/"+str(bll), True, darkRed)
screen.blit(text, [10, 10])
if score == bll:
userWantsToExit = doNext("Congratulations, you won!", 145, all_sprites_list, block_list, ghosts_list, vaxman_collide, wall_list, gate)
if userWantsToExit:
break
ghosts_hit_list = pygame.sprite.spritecollide(Vaxman, ghosts_list, True)
if ghosts_hit_list:
for refBlinky in Blinkies:
if refBlinky["entity"] in ghosts_hit_list:
Blinkies = [Blinky for Blinky in Blinkies if Blinky != refBlinky]
for refPinky in Pinkies:
if refPinky["entity"] in ghosts_hit_list:
Pinkies = [Pinky for Pinky in Pinkies if Pinky != refPinky]
for refInky in Inkies:
if refInky["entity"] in ghosts_hit_list:
Inkies = [Inky for Inky in Inkies if Inky != refInky]
for refClyde in Clydes:
if refClyde["entity"] in ghosts_hit_list:
Clydes = [Clyde for Clyde in Clydes if Clyde != refClyde]
all_sprites_list.draw(screen)
ghosts_list.draw(screen)
if len(ghosts_list) >= MAXIMUM_GHOSTS:
userWantsToExit = doNext("Game Over", 235, all_sprites_list, block_list, ghosts_list, vaxman_collide, wall_list, gate)
if userWantsToExit:
break
# ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT
pygame.display.flip()
clock.tick(10)
def doNext(message, left, all_sprites_list, block_list, ghosts_list, vaxman_collide, wall_list, gate):
while True:
# ALL EVENT PROCESSING SHOULD GO BELOW THIS COMMENT
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
return True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pygame.quit()
return True
if event.key == pygame.K_RETURN:
del all_sprites_list
del block_list
del ghosts_list
del vaxman_collide
del wall_list
del gate
startGame()
return False
#Grey background
w = pygame.Surface((400,200)) # the size of your rect
w.set_alpha(10) # alpha level
w.fill((128,128,128)) # this fills the entire surface
screen.blit(w, (100,200)) # (0,0) are the top-left coordinates
#Won or lost
text1=font.render(message, True, lightGrey)
screen.blit(text1, [left, 233])
text2=font.render("To play again, press ENTER.", True, lightGrey)
screen.blit(text2, [135, 303])
text3=font.render("To quit, press ESCAPE.", True, lightGrey)
screen.blit(text3, [165, 333])
pygame.display.flip()
clock.tick(10)
startGame()
pygame.quit()
| 29.025437
| 143
| 0.570083
| 4,397
| 0.240839
| 0
| 0
| 0
| 0
| 0
| 0
| 4,319
| 0.236567
|
02672c292331f32c5416bda0b2eba29281a17676
| 1,320
|
py
|
Python
|
examples/ecr/rl_formulations/common/state_shaper.py
|
zhawan/maro
|
d8c98deea4296cdcb90efd1fb59bc571cec3a2ef
|
[
"MIT"
] | null | null | null |
examples/ecr/rl_formulations/common/state_shaper.py
|
zhawan/maro
|
d8c98deea4296cdcb90efd1fb59bc571cec3a2ef
|
[
"MIT"
] | null | null | null |
examples/ecr/rl_formulations/common/state_shaper.py
|
zhawan/maro
|
d8c98deea4296cdcb90efd1fb59bc571cec3a2ef
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
from maro.rl import AbstractStateShaper
class ECRStateShaper(AbstractStateShaper):
def __init__(self, *, look_back, max_ports_downstream, port_attributes, vessel_attributes):
super().__init__()
self._look_back = look_back
self._max_ports_downstream = max_ports_downstream
self._port_attributes = port_attributes
self._vessel_attributes = vessel_attributes
self._dim = (look_back + 1) * (max_ports_downstream + 1) * len(port_attributes) + len(vessel_attributes)
def __call__(self, decision_event, snapshot_list):
tick, port_idx, vessel_idx = decision_event.tick, decision_event.port_idx, decision_event.vessel_idx
ticks = [tick - rt for rt in range(self._look_back-1)]
future_port_idx_list = snapshot_list["vessels"][tick: vessel_idx: 'future_stop_list'].astype('int')
port_features = snapshot_list["ports"][ticks: [port_idx] + list(future_port_idx_list): self._port_attributes]
vessel_features = snapshot_list["vessels"][tick: vessel_idx: self._vessel_attributes]
state = np.concatenate((port_features, vessel_features))
return str(port_idx), state
@property
def dim(self):
return self._dim
| 45.517241
| 117
| 0.724242
| 1,184
| 0.89697
| 0
| 0
| 53
| 0.040152
| 0
| 0
| 119
| 0.090152
|
0267eac0bf1a3be3319a75260f8b10b9d6a39d75
| 2,834
|
py
|
Python
|
src/runner.py
|
Shahrukh-Badar/DeepLearning
|
5f6bbd6f8ace06014f10e35183442901d984b231
|
[
"MIT"
] | null | null | null |
src/runner.py
|
Shahrukh-Badar/DeepLearning
|
5f6bbd6f8ace06014f10e35183442901d984b231
|
[
"MIT"
] | null | null | null |
src/runner.py
|
Shahrukh-Badar/DeepLearning
|
5f6bbd6f8ace06014f10e35183442901d984b231
|
[
"MIT"
] | null | null | null |
from os import listdir
from os.path import join, isfile
import json
from random import randint
#########################################
## START of part that students may change
from code_completion_baseline import Code_Completion_Baseline
training_dir = "./../../programs_800/"
query_dir = "./../../programs_200/"
model_file = "./../../trained_model"
use_stored_model = False
max_hole_size = 2
simplify_tokens = True
## END of part that students may change
#########################################
def simplify_token(token):
if token["type"] == "Identifier":
token["value"] = "ID"
elif token["type"] == "String":
token["value"] = "\"STR\""
elif token["type"] == "RegularExpression":
token["value"] = "/REGEXP/"
elif token["type"] == "Numeric":
token["value"] = "5"
# load sequences of tokens from files
def load_tokens(token_dir):
token_files = [join(token_dir, f) for f in listdir(token_dir) if isfile(join(token_dir, f)) and f.endswith("_tokens.json")]
token_lists = [json.load(open(f)) for f in token_files]
if simplify_tokens:
for token_list in token_lists:
for token in token_list:
simplify_token(token)
return token_lists
# removes up to max_hole_size tokens
def create_hole(tokens):
hole_size = randint(1, max_hole_size)
hole_start_idx = randint(1, len(tokens) - hole_size)
prefix = tokens[0:hole_start_idx]
expected = tokens[hole_start_idx:hole_start_idx + hole_size]
suffix = tokens[hole_start_idx + hole_size:]
return(prefix, expected, suffix)
# checks if two sequences of tokens are identical
def same_tokens(tokens1, tokens2):
if len(tokens1) != len(tokens2):
return False
for idx, t1 in enumerate(tokens1):
t2 = tokens2[idx]
if t1["type"] != t2["type"] or t1["value"] != t2["value"]:
return False
return True
#########################################
## START of part that students may change
code_completion = Code_Completion_Baseline()
## END of part that students may change
#########################################
# train the network
training_token_lists = load_tokens(training_dir)
if use_stored_model:
code_completion.load(training_token_lists, model_file)
else:
code_completion.train(training_token_lists, model_file)
# query the network and measure its accuracy
query_token_lists = load_tokens(query_dir)
correct = incorrect = 0
for tokens in query_token_lists:
(prefix, expected, suffix) = create_hole(tokens)
completion = code_completion.query(prefix, suffix)
if same_tokens(completion, expected):
correct += 1
else:
incorrect += 1
accuracy = correct / (correct + incorrect)
print("Accuracy: " + str(correct) + " correct vs. " + str(incorrect) + " incorrect = " + str(accuracy))
| 32.953488
| 127
| 0.650318
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 786
| 0.277347
|
0268d3f7d9cf4572520e699a426fa385cc8944bc
| 4,491
|
py
|
Python
|
superhelp/formatters/cli_formatter.py
|
grantps/superhelp
|
d8e861bf1ad91571ac23b9c833a8cd461bb1952f
|
[
"MIT"
] | 27
|
2020-05-17T20:48:43.000Z
|
2022-01-08T21:32:30.000Z
|
superhelp/formatters/cli_formatter.py
|
grantps/superhelp
|
d8e861bf1ad91571ac23b9c833a8cd461bb1952f
|
[
"MIT"
] | null | null | null |
superhelp/formatters/cli_formatter.py
|
grantps/superhelp
|
d8e861bf1ad91571ac23b9c833a8cd461bb1952f
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from textwrap import dedent
from superhelp import conf
from superhelp.conf import Level, Theme
from superhelp.formatters.cli_extras import md2cli
from superhelp.formatters.cli_extras.cli_colour import set_global_colours
from superhelp.gen_utils import (get_code_desc, get_intro,
get_line_numbered_snippet, layout_comment as layout)
"""
Note - displays properly in the terminal but not necessarily in other output
e.g. Eclipse console.
Lots in common with md displayer but risks of DRYing probably outweigh benefits
at this stage.
Probably should swap out for https://github.com/willmcgugan/rich
"""
TERMINAL_WIDTH = 80
MDV_CODE_BOUNDARY = "```"
def get_message(message_dets, detail_level: Level):
message = dedent(message_dets.message[detail_level])
if detail_level == Level.EXTRA:
message = dedent(message_dets.message[Level.MAIN]) + message
message = dedent(message)
message = (message
.replace(f" {conf.PYTHON_CODE_START}", MDV_CODE_BOUNDARY)
.replace(f"\n {conf.PYTHON_CODE_END}", MDV_CODE_BOUNDARY)
)
message = md2cli.main(md=message.replace('`', '')) ## They create problems in formatting
return message
def _need_snippet_displayed(overall_messages_dets, block_messages_dets, *,
multi_block=False):
"""
Don't need to see the code snippet displayed when it is already visible:
* because there is only one block in snippet and there is a block message
for it (which will display the block i.e. the entire snippet) UNLESS there
is an overall message separating them
Otherwise we need it displayed.
"""
mono_block_snippet = not multi_block
if mono_block_snippet and block_messages_dets and not overall_messages_dets:
return False
return True
def get_formatted_help(code: str, file_path: Path, messages_dets, *,
detail_level: Level = Level.BRIEF, theme_name: Theme = Theme.LIGHT,
warnings_only=False, multi_block=False) -> str:
"""
Show by code blocks.
"""
set_global_colours(theme_name)
md2cli.term_columns = TERMINAL_WIDTH
if warnings_only:
options_msg = conf.WARNINGS_ONLY_MSG
else:
options_msg = conf.ALL_HELP_SHOWING_MSG
intro = get_intro(file_path, multi_block=multi_block)
text = [
md2cli.main(layout(f"""\
# SuperHELP - Help for Humans!
{intro}
Currently showing {detail_level} content as requested.
{options_msg}.
{conf.MISSING_ADVICE_MESSAGE}
## Help by spreading the word about SuperHELP on social media.
{conf.FORCE_SPLIT}Twitter: {conf.TWITTER_HANDLE}. Thanks!
"""
)),
]
overall_messages_dets, block_messages_dets = messages_dets
display_snippet = _need_snippet_displayed(
overall_messages_dets, block_messages_dets, multi_block=multi_block)
if display_snippet:
line_numbered_snippet = get_line_numbered_snippet(code)
code_desc = get_code_desc(file_path)
text.append(md2cli.main(dedent(
f"## {code_desc}"
f"\n{MDV_CODE_BOUNDARY}\n"
+ line_numbered_snippet
+ f"\n{MDV_CODE_BOUNDARY}")))
for message_dets in overall_messages_dets:
message = get_message(message_dets, detail_level)
text.append(message)
block_messages_dets.sort(key=lambda nt: (nt.first_line_no, nt.warning))
prev_line_no = None
for message_dets in block_messages_dets:
## display code for line number (once ;-))
line_no = message_dets.first_line_no
new_block = (line_no != prev_line_no)
if new_block:
block_has_warning_header = False
text.append(md2cli.main(dedent(
f'## Code block starting line {line_no:,}'
f"\n{MDV_CODE_BOUNDARY}\n"
+ message_dets.code_str
+ f"\n{MDV_CODE_BOUNDARY}")))
prev_line_no = line_no
if message_dets.warning and not block_has_warning_header:
text.append(md2cli.main(layout("""\
### Questions / Warnings
There may be some issues with this code block you want to
address.
""")))
block_has_warning_header = True
## process message
message = get_message(message_dets, detail_level)
text.append(message)
formatted_help = '\n'.join(text)
return formatted_help
| 37.115702
| 93
| 0.674905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,497
| 0.333333
|
0268e7698751adcedb3a0f8d62ab2e3667fd33f3
| 4,941
|
py
|
Python
|
atom/instance.py
|
enthought/atom
|
1f194e3550d62c4ca1d79521dff97531ffe3f0ac
|
[
"BSD-3-Clause"
] | null | null | null |
atom/instance.py
|
enthought/atom
|
1f194e3550d62c4ca1d79521dff97531ffe3f0ac
|
[
"BSD-3-Clause"
] | 1
|
2020-12-04T10:11:07.000Z
|
2020-12-04T10:13:46.000Z
|
atom/instance.py
|
enthought/atom
|
1f194e3550d62c4ca1d79521dff97531ffe3f0ac
|
[
"BSD-3-Clause"
] | 1
|
2020-12-04T10:05:32.000Z
|
2020-12-04T10:05:32.000Z
|
#------------------------------------------------------------------------------
# Copyright (c) 2013, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
from .catom import (
Member, DEFAULT_FACTORY, DEFAULT_VALUE, USER_DEFAULT, VALIDATE_INSTANCE,
USER_VALIDATE
)
class Instance(Member):
""" A value which allows objects of a given type or types.
Values will be tested using the `PyObject_IsInstance` C API call.
This call is equivalent to `isinstance(value, kind)` and all the
same rules apply.
The value of an Instance may also be set to None.
"""
__slots__ = ()
def __init__(self, kind, factory=None, args=None, kwargs=None):
""" Initialize an Instance.
Parameters
----------
kind : type or tuple of types
The allowed type or types for the instance.
factory : callable, optional
An optional factory to use for creating the default value.
If this is not provided and 'args' and 'kwargs' is None,
then the default value will be None.
args : tuple, optional
If 'factory' is None, then 'kind' is a callable type and
these arguments will be passed to the constructor to create
the default value.
kwargs : dict, optional
If 'factory' is None, then 'kind' is a callable type and
these keywords will be passed to the constructor to create
the default value.
"""
if factory is not None:
self.set_default_kind(DEFAULT_FACTORY, factory)
elif args is not None or kwargs is not None:
args = args or ()
kwargs = kwargs or {}
factory = lambda: kind(*args, **kwargs)
self.set_default_kind(DEFAULT_FACTORY, factory)
else:
self.set_default_kind(DEFAULT_VALUE, None)
self.set_validate_kind(VALIDATE_INSTANCE, kind)
class ForwardInstance(Instance):
""" An Instance which delays resolving the type definition.
The first time the value is accessed or modified, the type will
be resolved and the forward instance will behave identically to
a normal instance.
"""
__slots__ = ()
def __init__(self, resolve, factory=None, args=None, kwargs=None):
""" Initialize a ForwardInstance.
resolve : callable
A callable which takes no arguments and returns the type or
tuple of types to use for validating the values.
factory : callable, optional
An optional factory to use for creating the default value.
If this is not provided and 'args' and 'kwargs' is None,
then the default value will be None.
args : tuple, optional
If 'factory' is None, then 'resolve' will return a callable
type and these arguments will be passed to the constructor
to create the default value.
kwargs : dict, optional
If 'factory' is None, then 'resolve' will return a callable
type and these keywords will be passed to the constructor to
create the default value.
"""
if factory is not None:
self.set_default_kind(DEFAULT_FACTORY, factory)
elif args is not None or kwargs is not None:
args = args or ()
kwargs = kwargs or {}
self.set_default_kind(USER_DEFAULT, (args, kwargs))
else:
self.set_default_kind(DEFAULT_VALUE, None)
self.set_validate_kind(USER_VALIDATE, resolve)
def default(self, owner, name):
""" Called to retrieve the default value.
This will resolve and instantiate the type. It will then update
the internal default and validate handlers to behave like a
normal instance member.
"""
resolve = self.validate_kind[1]
kind = resolve()
args, kwargs = self.default_kind[1]
value = kind(*args, **kwargs)
self.set_default_kind(DEFAULT_FACTORY, lambda: kind(*args, **kwargs))
self.set_validate_kind(VALIDATE_INSTANCE, kind)
return value
def validate(self, owner, name, old, new):
""" Called to validate the value.
This will resolve the type and validate the new value. It will
then update the internal default and validate handlers to behave
like a normal instance member.
"""
resolve = self.validate_kind[1]
kind = resolve()
if not isinstance(new, kind):
raise TypeError('invalid instance type')
self.set_validate_kind(VALIDATE_INSTANCE, kind)
if self.default_kind[0] == USER_DEFAULT:
args, kwargs = self.default_kind[1]
factory = lambda: kind(*args, **kwargs)
self.set_default_kind(DEFAULT_FACTORY, factory)
return new
| 36.065693
| 79
| 0.608379
| 4,593
| 0.929569
| 0
| 0
| 0
| 0
| 0
| 0
| 2,885
| 0.58389
|
0268f15772e163a48707362a23538e64ee3c364e
| 4,744
|
py
|
Python
|
operators/elastic-cloud-eck/python/pulumi_pulumi_kubernetes_crds_operators_elastic_cloud_eck/_tables.py
|
pulumi/pulumi-kubernetes-crds
|
372c4c0182f6b899af82d6edaad521aa14f22150
|
[
"Apache-2.0"
] | null | null | null |
operators/elastic-cloud-eck/python/pulumi_pulumi_kubernetes_crds_operators_elastic_cloud_eck/_tables.py
|
pulumi/pulumi-kubernetes-crds
|
372c4c0182f6b899af82d6edaad521aa14f22150
|
[
"Apache-2.0"
] | 2
|
2020-09-18T17:12:23.000Z
|
2020-12-30T19:40:56.000Z
|
operators/elastic-cloud-eck/python/pulumi_pulumi_kubernetes_crds_operators_elastic_cloud_eck/_tables.py
|
pulumi/pulumi-kubernetes-crds
|
372c4c0182f6b899af82d6edaad521aa14f22150
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
SNAKE_TO_CAMEL_CASE_TABLE = {
"access_modes": "accessModes",
"api_group": "apiGroup",
"api_version": "apiVersion",
"app_protocol": "appProtocol",
"association_status": "associationStatus",
"available_nodes": "availableNodes",
"change_budget": "changeBudget",
"client_ip": "clientIP",
"cluster_ip": "clusterIP",
"config_ref": "configRef",
"daemon_set": "daemonSet",
"data_source": "dataSource",
"elasticsearch_association_status": "elasticsearchAssociationStatus",
"elasticsearch_ref": "elasticsearchRef",
"expected_nodes": "expectedNodes",
"external_i_ps": "externalIPs",
"external_name": "externalName",
"external_traffic_policy": "externalTrafficPolicy",
"file_realm": "fileRealm",
"health_check_node_port": "healthCheckNodePort",
"ip_family": "ipFamily",
"kibana_association_status": "kibanaAssociationStatus",
"kibana_ref": "kibanaRef",
"last_probe_time": "lastProbeTime",
"last_transition_time": "lastTransitionTime",
"load_balancer_ip": "loadBalancerIP",
"load_balancer_source_ranges": "loadBalancerSourceRanges",
"match_expressions": "matchExpressions",
"match_labels": "matchLabels",
"max_surge": "maxSurge",
"max_unavailable": "maxUnavailable",
"min_available": "minAvailable",
"node_port": "nodePort",
"node_sets": "nodeSets",
"pod_disruption_budget": "podDisruptionBudget",
"pod_template": "podTemplate",
"publish_not_ready_addresses": "publishNotReadyAddresses",
"remote_clusters": "remoteClusters",
"rolling_update": "rollingUpdate",
"secret_name": "secretName",
"secret_token_secret": "secretTokenSecret",
"secure_settings": "secureSettings",
"self_signed_certificate": "selfSignedCertificate",
"service_account_name": "serviceAccountName",
"session_affinity": "sessionAffinity",
"session_affinity_config": "sessionAffinityConfig",
"storage_class_name": "storageClassName",
"subject_alt_names": "subjectAltNames",
"target_port": "targetPort",
"timeout_seconds": "timeoutSeconds",
"topology_keys": "topologyKeys",
"update_strategy": "updateStrategy",
"volume_claim_templates": "volumeClaimTemplates",
"volume_mode": "volumeMode",
"volume_name": "volumeName",
}
CAMEL_TO_SNAKE_CASE_TABLE = {
"accessModes": "access_modes",
"apiGroup": "api_group",
"apiVersion": "api_version",
"appProtocol": "app_protocol",
"associationStatus": "association_status",
"availableNodes": "available_nodes",
"changeBudget": "change_budget",
"clientIP": "client_ip",
"clusterIP": "cluster_ip",
"configRef": "config_ref",
"daemonSet": "daemon_set",
"dataSource": "data_source",
"elasticsearchAssociationStatus": "elasticsearch_association_status",
"elasticsearchRef": "elasticsearch_ref",
"expectedNodes": "expected_nodes",
"externalIPs": "external_i_ps",
"externalName": "external_name",
"externalTrafficPolicy": "external_traffic_policy",
"fileRealm": "file_realm",
"healthCheckNodePort": "health_check_node_port",
"ipFamily": "ip_family",
"kibanaAssociationStatus": "kibana_association_status",
"kibanaRef": "kibana_ref",
"lastProbeTime": "last_probe_time",
"lastTransitionTime": "last_transition_time",
"loadBalancerIP": "load_balancer_ip",
"loadBalancerSourceRanges": "load_balancer_source_ranges",
"matchExpressions": "match_expressions",
"matchLabels": "match_labels",
"maxSurge": "max_surge",
"maxUnavailable": "max_unavailable",
"minAvailable": "min_available",
"nodePort": "node_port",
"nodeSets": "node_sets",
"podDisruptionBudget": "pod_disruption_budget",
"podTemplate": "pod_template",
"publishNotReadyAddresses": "publish_not_ready_addresses",
"remoteClusters": "remote_clusters",
"rollingUpdate": "rolling_update",
"secretName": "secret_name",
"secretTokenSecret": "secret_token_secret",
"secureSettings": "secure_settings",
"selfSignedCertificate": "self_signed_certificate",
"serviceAccountName": "service_account_name",
"sessionAffinity": "session_affinity",
"sessionAffinityConfig": "session_affinity_config",
"storageClassName": "storage_class_name",
"subjectAltNames": "subject_alt_names",
"targetPort": "target_port",
"timeoutSeconds": "timeout_seconds",
"topologyKeys": "topology_keys",
"updateStrategy": "update_strategy",
"volumeClaimTemplates": "volume_claim_templates",
"volumeMode": "volume_mode",
"volumeName": "volume_name",
}
| 39.533333
| 80
| 0.707841
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,795
| 0.799958
|
026b557b15ada072d61283c89f10a088c8637df4
| 1,416
|
py
|
Python
|
webapp/app.py
|
aleksandergurin/news
|
9e7d3c35857600445cb6df42ba18d289dc0e37a9
|
[
"BSD-3-Clause"
] | 3
|
2015-08-20T11:08:28.000Z
|
2018-01-28T21:22:53.000Z
|
webapp/app.py
|
aleksandergurin/news
|
9e7d3c35857600445cb6df42ba18d289dc0e37a9
|
[
"BSD-3-Clause"
] | null | null | null |
webapp/app.py
|
aleksandergurin/news
|
9e7d3c35857600445cb6df42ba18d289dc0e37a9
|
[
"BSD-3-Clause"
] | null | null | null |
from flask import Flask, render_template
from config import configs
from .extensions import login_manager, db
from .account import account
from .frontend import frontend
from webapp.session import RedisSessionInterface
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(configs[config_name])
register_session_storage(app, configs[config_name])
register_blueprints(app)
init_extensions(app)
add_error_pages(app)
return app
def register_session_storage(app, conf):
if hasattr(conf, 'REDIS'):
from redis import Redis
host = conf.REDIS['host']
port = conf.REDIS['port']
db_num = conf.REDIS['db']
app.session_interface = RedisSessionInterface(Redis(host, port, db_num))
def register_blueprints(app):
app.register_blueprint(frontend)
app.register_blueprint(account)
def init_extensions(app):
login_manager.init_app(app)
db.init_app(app)
def add_error_pages(app):
@app.errorhandler(401)
def unauthorized(e):
return render_template('errors/401.html'), 401
@app.errorhandler(403)
def forbidden(e):
return render_template('errors/403.html'), 403
@app.errorhandler(404)
def not_found(e):
return render_template('errors/404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('errors/500.html'), 500
| 24.413793
| 80
| 0.711864
| 0
| 0
| 0
| 0
| 411
| 0.290254
| 0
| 0
| 91
| 0.064266
|
026bd83279fbac0f51bacbf47138a5022a5dd278
| 27,723
|
py
|
Python
|
src/ezcode/knapsack/__init__.py
|
zheng-gao/ez_code
|
fbf48990291aa57d6436d4548b0a6c25dfb8f82d
|
[
"MIT"
] | null | null | null |
src/ezcode/knapsack/__init__.py
|
zheng-gao/ez_code
|
fbf48990291aa57d6436d4548b0a6c25dfb8f82d
|
[
"MIT"
] | null | null | null |
src/ezcode/knapsack/__init__.py
|
zheng-gao/ez_code
|
fbf48990291aa57d6436d4548b0a6c25dfb8f82d
|
[
"MIT"
] | null | null | null |
from typing import Callable
class Knapsack:
@staticmethod
def best_value(
capacity: int,
sizes: list,
values: list,
quantities,
min_max: Callable = max,
zero_capacity_value=0,
fill_to_capacity=True,
output_item_list=True
):
if capacity < 0:
raise ValueError(f"Capacity cannot be negative: {capacity}")
for s in sizes:
if s <= 0:
raise ValueError(f"Item sizes must be positive: {sizes}")
if len(sizes) != len(values):
raise ValueError(f"The length of sizes {sizes} not match the length of values {values}")
if quantities:
if isinstance(quantities, list):
if len(quantities) != len(sizes):
raise ValueError(f"The length of quantities {quantities} not match the length of sizes {sizes}")
for q in quantities:
if q < 0:
raise ValueError(f"Item quantities cannot contain negative: {quantities}")
elif quantities < 0:
raise ValueError(f"Item quantities cannot be negative: {quantities}")
return Knapsack.best_value_with_limited_items_1d(
capacity=capacity,
sizes=sizes,
values=values,
quantities=quantities,
min_max=min_max,
zero_capacity_value=zero_capacity_value,
fill_to_capacity=fill_to_capacity,
output_dp_table=False,
output_item_list=output_item_list
)
else:
return Knapsack.best_value_with_unlimited_items_1d(
capacity=capacity,
sizes=sizes,
values=values,
min_max=min_max,
zero_capacity_value=zero_capacity_value,
fill_to_capacity=fill_to_capacity,
output_dp_table=False,
output_item_list=output_item_list
)
@staticmethod
def ways_to_fill(
capacity: int,
sizes: list,
quantities,
output_item_list=True
):
if capacity < 0:
raise ValueError(f"Capacity cannot be negative: {capacity}")
for s in sizes:
if s <= 0:
raise ValueError(f"Item sizes must be positive: {sizes}")
if quantities:
if isinstance(quantities, list):
if len(quantities) != len(sizes):
raise ValueError(f"The length of quantities {quantities} not match the length of sizes {sizes}")
for q in quantities:
if q < 0:
raise ValueError(f"Item quantities cannot contain negative: {quantities}")
elif quantities < 0:
raise ValueError(f"Item quantities cannot be negative: {quantities}")
return Knapsack.number_of_ways_to_fill_to_capacity_with_limited_items_1d(
capacity=capacity,
sizes=sizes,
quantities=quantities,
output_dp_table=False,
output_item_list=output_item_list
)
else:
return Knapsack.number_of_ways_to_fill_to_capacity_with_unlimited_items_1d(
capacity=capacity,
sizes=sizes,
output_dp_table=False,
output_item_list=output_item_list
)
@staticmethod
def best_value_with_limited_items_2d(
capacity: int,
sizes: list,
values: list,
min_max: Callable = max,
zero_capacity_value=0,
fill_to_capacity=True,
iterate_sizes_first=True,
output_dp_table=False,
output_item_list=True
):
"""
0-1 Knapsack
Bag Capacity = C
Items Sizes = [s0, s1, ... ]
Items Values = [v0, v1, ... ]
2D-Array "max_value" Init
Cap=[0, 1, 2, ... c_j-1, c_j, ..., C]
s_0 0, 0, 0, ... 0, v_0, ..., v_0 (where c_j-1 < w0 < c_j)
s_1 0,
... 0, Max Value
s_i 0, Other cells will be overwritten later
... 0,
s_N 0,
The meaning of max_value[i][c]:
Given the FIRST "i + 1" items, the max value of a bag of size "c" can make
value_without_item_i = max_value[i-1][c]
value_with_item_i = max_value[i - 1][c - w[i]] + v[i]
max_value[i - 1][c - w[i]] means if we put item i into the bag (+v[i]),
the max value that the rest of the capacity "c - w[i]" can make with a selection of the previous items
if the capacity of the bag is not large enough for the item i, max_value[i][c] = max_value[i - 1][c]
otherwise max_value[i][c] = max( value_without_item_i + value_with_item_i )
"""
infinity = float("-inf") if min_max == max else float("inf")
knapsack_init_value = infinity if fill_to_capacity else zero_capacity_value
knapsack_value = [[knapsack_init_value for _ in range(capacity + 1)] for _ in range(len(sizes))]
item_lists = None
if output_item_list:
item_lists = [[list() for _ in range(capacity + 1)] for _ in range(len(sizes))]
for i in range(len(sizes)): # init first column
knapsack_value[i][0] = zero_capacity_value
if fill_to_capacity:
knapsack_value[0][sizes[0]] = values[0] # init first row, c != w means not filled
if output_item_list:
item_lists[0][sizes[0]].append(0)
else:
for c in range(sizes[0], capacity + 1): # init first row, c < w means the bag is empty
knapsack_value[0][c] = values[0]
if output_item_list:
item_lists[0][c].append(0)
if iterate_sizes_first: # we can iterate either of the sizes or capacity first
for i in range(1, len(sizes)):
for c in range(1, capacity + 1):
if c < sizes[i]:
knapsack_value[i][c] = knapsack_value[i - 1][c]
else:
knapsack_value[i][c] = min_max(knapsack_value[i - 1][c], knapsack_value[i - 1][c - sizes[i]] + values[i])
if output_item_list:
if knapsack_value[i][c] == knapsack_init_value:
item_lists[i][c] = list()
elif knapsack_value[i][c] == knapsack_value[i - 1][c]:
item_lists[i][c] = item_lists[i - 1][c].copy()
else:
item_lists[i][c] = item_lists[i - 1][c - sizes[i]] + [i]
else:
for c in range(1, capacity + 1):
for i in range(1, len(sizes)):
if c < sizes[i]:
knapsack_value[i][c] = knapsack_value[i - 1][c]
else:
knapsack_value[i][c] = min_max(knapsack_value[i - 1][c], knapsack_value[i - 1][c - sizes[i]] + values[i])
if output_item_list:
if knapsack_value[i][c] == knapsack_init_value:
item_lists[i][c] = list()
elif knapsack_value[i][c] == knapsack_value[i - 1][c]:
item_lists[i][c] = item_lists[i - 1][c].copy()
else:
item_lists[i][c] = item_lists[i - 1][c - sizes[i]] + [i]
if output_dp_table:
return (knapsack_value, item_lists) if output_item_list else knapsack_value
else:
best_value = knapsack_value[len(sizes) - 1][capacity]
if output_item_list:
item_list = item_lists[len(sizes) - 1][capacity]
return (None, item_list) if best_value == knapsack_init_value else (best_value, item_list)
else:
return None if best_value == knapsack_init_value else best_value
@staticmethod
def best_value_with_limited_items_1d(
capacity: int,
sizes: list,
values: list,
quantities=1,
min_max: Callable = max,
zero_capacity_value=0,
fill_to_capacity=True,
output_dp_table=False,
output_item_list=True
):
"""
Rolling dp array: copy row i-1 to row i
We just need one row:
knapsack_value[c] means the max value that a bag with capacity c can make
Each loop will overwrite the knapsack_value[c]
Cannot swap loops
"""
if isinstance(quantities, int):
quantities_list = list()
for i in range(len(sizes)):
quantities_list.append(quantities)
quantities = quantities_list
else:
assert len(sizes) == len(quantities)
infinity = float("-inf") if min_max == max else float("inf")
knapsack_init_value = infinity if fill_to_capacity else zero_capacity_value
knapsack_value = [knapsack_init_value for _ in range(capacity + 1)]
knapsack_value[0] = zero_capacity_value
item_lists = None
if output_item_list:
item_lists = [list() for _ in range(capacity + 1)]
for i in range(len(sizes)): # must loop item sizes first, because we are rolling the rows not columns
for q in range(1, quantities[i] + 1): # it is same as flatten the items: sizes=[2,3] quantities=[1,2] ==> sizes=[2, 3, 3]
# c < sizes[i], knapsack_value[c] won't change
# Capacity is looping backward, otherwise the item will be put in to the knapsack multiple times
for c in range(capacity, sizes[i] - 1, -1):
knapsack_value[c] = min_max(knapsack_value[c], knapsack_value[c - sizes[i]] + values[i])
if output_item_list:
if knapsack_value[c] == knapsack_init_value:
item_lists[c] = list()
elif knapsack_value[c] == knapsack_value[c - sizes[i]] + values[i]:
item_lists[c] = item_lists[c - sizes[i]] + [i]
# Another solution
# for c in range(capacity, sizes[i] - 1, -1):
# for q in range(1, min(quantities[i], c // sizes[i]) + 1):
# knapsack_value[c] = min_max(knapsack_value[c], knapsack_value[c - q * sizes[i]] + q * values[i])
# if output_item_list:
# if knapsack_value[c] == knapsack_init_value:
# item_lists[c] = list()
# elif knapsack_value[c] == knapsack_value[c - q * sizes[i]] + q * values[i]:
# item_lists[c] = item_lists[c - q * sizes[i]] + [i] * q
if output_dp_table:
return (knapsack_value, item_lists) if output_item_list else knapsack_value
else:
best_value = knapsack_value[capacity]
if output_item_list:
return (None, item_lists[capacity]) if best_value == knapsack_init_value else (best_value, item_lists[capacity])
else:
return None if best_value == knapsack_init_value else best_value
@staticmethod
def best_value_with_unlimited_items_1d(
capacity: int,
sizes: list,
values: list,
min_max: Callable = max,
zero_capacity_value=0,
fill_to_capacity=True,
iterate_sizes_first=True,
output_dp_table=False,
output_item_list=True
):
""" Similar to rolling row solution, but the two loops can swap the order """
infinity = float("-inf") if min_max == max else float("inf")
knapsack_init_value = infinity if fill_to_capacity else zero_capacity_value
knapsack_value = [knapsack_init_value for _ in range(capacity + 1)]
knapsack_value[0] = zero_capacity_value
item_lists = None
if output_item_list:
item_lists = [list() for _ in range(capacity + 1)]
if iterate_sizes_first:
for i in range(len(sizes)):
for c in range(sizes[i], capacity + 1): # Looping forward, so items can be added multiple times
knapsack_value[c] = min_max(knapsack_value[c], knapsack_value[c - sizes[i]] + values[i])
if output_item_list:
if knapsack_value[c] == knapsack_init_value:
item_lists[c] = list()
elif knapsack_value[c] == knapsack_value[c - sizes[i]] + values[i]:
item_lists[c] = item_lists[c - sizes[i]] + [i]
else:
for c in range(1, capacity + 1): # Looping forward, so items can be added multiple times
for i in range(len(sizes)):
if c >= sizes[i]: # c < sizes[i], knapsack_value[c] won't change
knapsack_value[c] = min_max(knapsack_value[c], knapsack_value[c - sizes[i]] + values[i])
if output_item_list:
if knapsack_value[c] == knapsack_init_value:
item_lists[c] = list()
elif knapsack_value[c] == knapsack_value[c - sizes[i]] + values[i]:
item_lists[c] = item_lists[c - sizes[i]] + [i]
if output_dp_table:
return (knapsack_value, item_lists) if output_item_list else knapsack_value
else:
best_value = knapsack_value[capacity]
if output_item_list:
return (None, item_lists[capacity]) if best_value == knapsack_init_value else (best_value, item_lists[capacity])
else:
return None if best_value == knapsack_init_value else best_value
@staticmethod
def best_value_with_unlimited_items_2d(
capacity: int,
sizes: list,
values: list,
min_max: Callable = max,
zero_capacity_value=0,
fill_to_capacity=True,
iterate_sizes_first=True,
output_dp_table=False,
output_item_list=True
):
infinity = float("-inf") if min_max == max else float("inf")
knapsack_init_value = infinity if fill_to_capacity else zero_capacity_value
knapsack_value = [[knapsack_init_value for _ in range(capacity + 1)] for _ in range(len(sizes))]
item_lists = None
if output_item_list:
item_lists = [[list() for _ in range(capacity + 1)] for _ in range(len(sizes))]
for i in range(len(sizes)): # init first column
knapsack_value[i][0] = zero_capacity_value
for c in range(sizes[0], capacity + 1): # init first row, c < w means the bag is empty, c != w means not fill
if c % sizes[0] == 0 or not fill_to_capacity:
knapsack_value[0][c] = values[0] * (c // sizes[0])
if output_item_list:
item_lists[0][c].extend([0] * (c // sizes[0]))
if iterate_sizes_first: # we can iterate either of the sizes or capacity first
for i in range(1, len(sizes)):
for c in range(1, capacity + 1):
# if c < sizes[i]:
# knapsack_value[i][c] = knapsack_value[i - 1][c]
# else:
# best_value = knapsack_init_value
# for k in range(1, (c // sizes[i]) + 1):
# best_value = min_max(best_value, knapsack_value[i - 1][c - k * sizes[i]] + k * values[i])
# knapsack_value[i][c] = min_max(knapsack_value[i - 1][c], best_value)
knapsack_value[i][c] = knapsack_value[i - 1][c]
if output_item_list:
item_lists[i][c] = item_lists[i - 1][c].copy()
if c >= sizes[i]:
knapsack_value[i][c] = min_max(knapsack_value[i][c], knapsack_value[i][c - sizes[i]] + values[i])
if output_item_list:
if knapsack_value[i][c] == knapsack_init_value:
item_lists[i][c] = list()
elif knapsack_value[i][c] == knapsack_value[i][c - sizes[i]] + values[i]:
item_lists[i][c] = item_lists[i][c - sizes[i]] + [i]
else:
for c in range(1, capacity + 1):
for i in range(1, len(sizes)):
# if c < sizes[i]:
# knapsack_value[i][c] = knapsack_value[i - 1][c]
# else:
# best_value = knapsack_init_value
# for k in range(1, (c // sizes[i]) + 1):
# best_value = min_max(best_value, knapsack_value[i - 1][c - k * sizes[i]] + k * values[i])
# knapsack_value[i][c] = min_max(knapsack_value[i - 1][c], best_value)
knapsack_value[i][c] = knapsack_value[i - 1][c]
if output_item_list:
item_lists[i][c] = item_lists[i - 1][c].copy()
if c >= sizes[i]:
knapsack_value[i][c] = min_max(knapsack_value[i][c], knapsack_value[i][c - sizes[i]] + values[i])
if output_item_list:
if knapsack_value[i][c] == knapsack_init_value:
item_lists[i][c] = list()
elif knapsack_value[i][c] == knapsack_value[i][c - sizes[i]] + values[i]:
item_lists[i][c] = item_lists[i][c - sizes[i]] + [i]
if output_dp_table:
return (knapsack_value, item_lists) if output_item_list else knapsack_value
else:
best_value = knapsack_value[len(sizes) - 1][capacity]
if output_item_list:
item_list = item_lists[len(sizes) - 1][capacity]
return (None, item_list) if best_value == knapsack_init_value else (best_value, item_list)
else:
return None if best_value == knapsack_init_value else best_value
@staticmethod
def number_of_ways_to_fill_to_capacity_with_unlimited_items_2d(
capacity: int,
sizes: list,
output_dp_table=False,
output_item_list=True
):
"""
number_of_ways[i][c] means given the FIRST i + 1 items, the number of ways to make capacity c
number_of_ways[i][c] = number_of_ways[i - 1][c] + number_of_ways[i][c - sizes[i]]
"""
number_of_ways = [[0 for _ in range(capacity + 1)] for _ in range(len(sizes))]
combo_lists = None
if output_item_list:
combo_lists = [[None for _ in range(capacity + 1)] for _ in range(len(sizes))]
for i in range(len(sizes)): # init first column
number_of_ways[i][0] = 1 # no item for 0 capacity is 1 way
if output_item_list:
combo_lists[i][0] = [[]] # empty list for no item combo
for c in range(sizes[0], capacity + 1): # init first row
if c % sizes[0] == 0:
number_of_ways[0][c] = 1
if output_item_list:
combo_lists[0][c] = [[0] * (c // sizes[0])] # one combo of all the item 0
for i in range(1, len(sizes)):
for c in range(1, capacity + 1):
number_of_ways[i][c] = number_of_ways[i - 1][c]
if c >= sizes[i]:
number_of_ways[i][c] += number_of_ways[i][c - sizes[i]] # On the same line, no i - 1
if output_item_list:
combo_lists[i][c] = combo_lists[i - 1][c]
if c >= sizes[i] and combo_lists[i][c - sizes[i]] is not None:
new_combo_list = list()
for combo in combo_lists[i][c - sizes[i]]:
new_combo_list.append(combo + [i])
combo_lists[i][c] = combo_lists[i][c] + new_combo_list if combo_lists[i][c] is not None else new_combo_list
if output_dp_table:
return (number_of_ways, combo_lists) if output_item_list else number_of_ways
else:
best_value = number_of_ways[len(sizes) - 1][capacity]
if output_item_list:
combo_list = combo_lists[len(sizes) - 1][capacity]
return (best_value, combo_list)
else:
return best_value
@staticmethod
def number_of_ways_to_fill_to_capacity_with_unlimited_items_1d(
capacity: int,
sizes: list,
output_dp_table=False,
output_item_list=True
):
"""
number_of_ways[c] means the number of ways to make capacity c
rolling row[i-1] over to row[i]
number_of_ways[c] = number_of_ways[c] + number_of_ways[c - sizes[i]]
"""
number_of_ways = [0 for _ in range(capacity + 1)]
combo_lists = None
if output_item_list:
combo_lists = [None for _ in range(capacity + 1)]
number_of_ways[0] = 1 # no item for 0 capacity is 1 way
if output_item_list:
combo_lists[0] = [[]] # empty list for no item combo
for i in range(len(sizes)):
for c in range(sizes[i], capacity + 1): # c starts from sizes[i] (c >= sizes[i])
number_of_ways[c] += number_of_ways[c - sizes[i]] # + (c > sizes[i] and c % sizes[i] == 0)
if output_item_list:
if combo_lists[c - sizes[i]] is not None:
new_combo_list = list()
for combo in combo_lists[c - sizes[i]]:
new_combo_list.append(combo + [i])
combo_lists[c] = combo_lists[c] + new_combo_list if combo_lists[c] is not None else new_combo_list
if output_dp_table:
return (number_of_ways, combo_lists) if output_item_list else number_of_ways
else:
best_value = number_of_ways[capacity]
if output_item_list:
combo_list = combo_lists[capacity]
return (best_value, combo_list)
else:
return best_value
@staticmethod
def number_of_ways_to_fill_to_capacity_with_limited_items_2d(
capacity: int,
sizes: list,
output_dp_table=False,
output_item_list=True
):
"""
number_of_ways[i][c] means given the FIRST i items, the number of ways to make capacity c
number_of_ways[i][c] = number_of_ways[i - 1][c] + number_of_ways[i - 1][c - sizes[i]]
number_of_ways[i - 1][c] means without item[i], only use FIRST i - 1 items, the number of combos
number_of_ways[i - 1][c - sizes[i]] means with item[i], every combo that make c-sizes[i] can add item[i] to get a new combo that make
"""
number_of_ways = [[0 for _ in range(capacity + 1)] for _ in range(len(sizes))]
combo_lists = None
if output_item_list:
combo_lists = [[None for _ in range(capacity + 1)] for _ in range(len(sizes))]
for i in range(len(sizes)): # init first column
number_of_ways[i][0] = 1 # no item for 0 capacity is 1 way
if output_item_list:
combo_lists[i][0] = [[]] # empty list for no item combo
if sizes[0] <= capacity: # init first row
number_of_ways[0][sizes[0]] = 1
if output_item_list:
combo_lists[0][sizes[0]] = [[0]]
for i in range(1, len(sizes)):
for c in range(1, capacity + 1):
# if c < sizes[i]:
# number_of_ways[i][c] = number_of_ways[i - 1][c]
# elif c == sizes[i]:
# number_of_ways[i][c] = number_of_ways[i - 1][c] + number_of_ways[i - 1][c - sizes[i]]
number_of_ways[i][c] = number_of_ways[i - 1][c]
if c >= sizes[i]:
number_of_ways[i][c] += number_of_ways[i - 1][c - sizes[i]]
if output_item_list:
if combo_lists[i - 1][c] is not None:
combo_lists[i][c] = combo_lists[i - 1][c]
if c >= sizes[i] and combo_lists[i - 1][c - sizes[i]] is not None:
new_combo_list = list()
for combo in combo_lists[i - 1][c - sizes[i]]:
new_combo_list.append(combo + [i])
combo_lists[i][c] = combo_lists[i][c] + new_combo_list if combo_lists[i][c] is not None else new_combo_list
if output_dp_table:
return (number_of_ways, combo_lists) if output_item_list else number_of_ways
else:
best_value = number_of_ways[len(sizes) - 1][capacity]
if output_item_list:
combo_list = combo_lists[len(sizes) - 1][capacity]
return (best_value, combo_list)
else:
return best_value
@staticmethod
def number_of_ways_to_fill_to_capacity_with_limited_items_1d(
capacity: int,
sizes: list,
quantities=1,
output_dp_table=False,
output_item_list=True
):
"""
number_of_ways[c] means the number of ways to make capacity c
number_of_ways[c] = number_of_ways[c] + number_of_ways[c - sizes[i]]
"""
if isinstance(quantities, int):
quantities_list = list()
for i in range(len(sizes)):
quantities_list.append(quantities)
quantities = quantities_list
else:
assert len(sizes) == len(quantities)
number_of_ways = [0 for _ in range(capacity + 1)]
combo_lists = None
if output_item_list:
combo_lists = [None for _ in range(capacity + 1)]
number_of_ways[0] = 1 # no item for 0 capacity is 1 way
if output_item_list:
combo_lists[0] = [[]] # empty list for no item combo
for i in range(len(sizes)):
for q in range(1, quantities[i] + 1):
for c in range(capacity, sizes[i] - 1, -1): # c >= sizes[i]
number_of_ways[c] += number_of_ways[c - sizes[i]]
if output_item_list:
if combo_lists[c - sizes[i]] is not None:
new_combo_list = list()
for combo in combo_lists[c - sizes[i]]:
new_combo_list.append(combo + [i])
combo_lists[c] = combo_lists[c] + new_combo_list if combo_lists[c] is not None else new_combo_list
if output_dp_table:
return (number_of_ways, combo_lists) if output_item_list else number_of_ways
else:
best_value = number_of_ways[capacity]
if output_item_list:
combo_list = combo_lists[capacity] # It might have duplicates
unique_combo_list = list()
if combo_list:
combo_set = set()
for i, combo in enumerate(combo_list):
t = tuple(combo.sort())
if t not in combo_set:
unique_combo_list.append(combo)
combo_set.add(t)
return (len(unique_combo_list), unique_combo_list)
return (best_value, combo_list)
else:
return best_value
| 48.046794
| 145
| 0.537171
| 27,675
| 0.998269
| 0
| 0
| 27,601
| 0.995599
| 0
| 0
| 5,818
| 0.209862
|
026d6883b4b4ef48ca95ca7facd1d38932ace6a3
| 26
|
py
|
Python
|
env/lib/python3.7/site-packages/grpc/_grpcio_metadata.py
|
PrudhviGNV/speechemotion
|
c99b4a7f644e1fd495cb5e6750ada0dd50d8b86f
|
[
"MIT"
] | 5
|
2019-04-16T20:43:47.000Z
|
2020-10-24T22:35:39.000Z
|
Lib/site-packages/grpc/_grpcio_metadata.py
|
caiyongji/Anaconda-py36.5-tensorflow-built-env
|
f4eb40b5ca3f49dfc929ff3ad2b4bb877e9663e2
|
[
"PSF-2.0"
] | 2
|
2021-04-30T20:43:55.000Z
|
2021-06-10T21:34:23.000Z
|
Lib/site-packages/grpc/_grpcio_metadata.py
|
caiyongji/Anaconda-py36.5-tensorflow-built-env
|
f4eb40b5ca3f49dfc929ff3ad2b4bb877e9663e2
|
[
"PSF-2.0"
] | 3
|
2019-08-03T13:47:09.000Z
|
2021-08-03T14:20:25.000Z
|
__version__ = """1.19.0"""
| 26
| 26
| 0.576923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0.461538
|
027134b2e08ff17613c7279b030cfe1fcf0d8e8e
| 309
|
py
|
Python
|
pycon/tutorials/urls.py
|
azkarmoulana/pycon
|
931388e6f640c35b892bb4b2d12581ba7ec8cf4e
|
[
"BSD-3-Clause"
] | 154
|
2015-01-17T02:29:24.000Z
|
2022-03-20T20:37:24.000Z
|
pycon/tutorials/urls.py
|
azkarmoulana/pycon
|
931388e6f640c35b892bb4b2d12581ba7ec8cf4e
|
[
"BSD-3-Clause"
] | 316
|
2015-01-10T04:01:50.000Z
|
2020-09-30T20:18:08.000Z
|
pycon/tutorials/urls.py
|
azkarmoulana/pycon
|
931388e6f640c35b892bb4b2d12581ba7ec8cf4e
|
[
"BSD-3-Clause"
] | 89
|
2015-01-10T05:25:21.000Z
|
2022-02-27T03:28:59.000Z
|
from django.conf.urls import url, patterns
from .views import tutorial_email, tutorial_message
urlpatterns = patterns("", # flake8: noqa
url(r"^mail/(?P<pk>\d+)/(?P<pks>[0-9,]+)/$", tutorial_email, name="tutorial_email"),
url(r"^message/(?P<pk>\d+)/$", tutorial_message, name="tutorial_message"),
)
| 34.333333
| 88
| 0.679612
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 114
| 0.368932
|
027194484ee86822b39b3b119ff07d71c83e4daa
| 895
|
py
|
Python
|
setup.py
|
oleks/gigalixir-cli
|
d1b1c303e24be548ddc895165e34652c378f4347
|
[
"MIT"
] | null | null | null |
setup.py
|
oleks/gigalixir-cli
|
d1b1c303e24be548ddc895165e34652c378f4347
|
[
"MIT"
] | null | null | null |
setup.py
|
oleks/gigalixir-cli
|
d1b1c303e24be548ddc895165e34652c378f4347
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name='gigalixir',
url='https://github.com/gigalixir/gigalixir-cli',
author='Jesse Shieh',
author_email='jesse@gigalixir.com',
version='1.1.10',
packages=find_packages(),
include_package_data=True,
install_requires=[
'click~=6.7',
'requests~=2.20.0',
'stripe~=1.51.0',
'rollbar~=0.13.11',
'pygments~=2.2.0',
],
entry_points='''
[console_scripts]
gigalixir=gigalixir:cli
''',
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest',
'HTTPretty',
'sure',
],
extras_require={
'dev': [
'Sphinx',
'sphinx_rtd_theme',
'sphinx-tabs',
],
'test': [
'pytest',
'HTTPretty',
'sure',
],
}
)
| 20.813953
| 53
| 0.492737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 362
| 0.404469
|
02723743e00e16a13861c25c7c9d6a4bb4b3f93e
| 254
|
py
|
Python
|
runTest.py
|
Amedeo91/cushypost_integration
|
fc7ffc9daf535ed5bcfdee4933a7a57340a583b2
|
[
"MIT"
] | 1
|
2021-10-06T06:23:40.000Z
|
2021-10-06T06:23:40.000Z
|
runTest.py
|
Amedeo91/cushypost_integration
|
fc7ffc9daf535ed5bcfdee4933a7a57340a583b2
|
[
"MIT"
] | null | null | null |
runTest.py
|
Amedeo91/cushypost_integration
|
fc7ffc9daf535ed5bcfdee4933a7a57340a583b2
|
[
"MIT"
] | null | null | null |
import os
import unittest
dir_path = os.path.dirname(os.path.realpath(__file__))
suite = unittest.TestLoader().discover(dir_path, pattern='test_*.py')
result = unittest.TextTestRunner(verbosity=3).run(suite)
print(result)
assert result.wasSuccessful()
| 25.4
| 69
| 0.787402
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0.043307
|
02735e99efa8906c66196996cdf60aedba9354a2
| 6,145
|
py
|
Python
|
tests/test_pydent/test_models/models/test_plan.py
|
aquariumbio/trident
|
d1712cae544103fb145e3171894e4b35141f6813
|
[
"MIT"
] | 5
|
2019-01-21T11:12:05.000Z
|
2020-03-05T20:52:14.000Z
|
tests/test_pydent/test_models/models/test_plan.py
|
aquariumbio/pydent
|
d1712cae544103fb145e3171894e4b35141f6813
|
[
"MIT"
] | 28
|
2020-11-18T02:07:09.000Z
|
2021-06-08T15:49:41.000Z
|
tests/test_pydent/test_models/models/test_plan.py
|
aquariumbio/trident
|
d1712cae544103fb145e3171894e4b35141f6813
|
[
"MIT"
] | 2
|
2021-02-27T19:23:45.000Z
|
2021-09-14T10:29:07.000Z
|
import pytest
from pydent.models import Plan
def test_plan_constructor(fake_session):
g = fake_session.Plan.new()
assert g.name is not None
print(g.plan_associations)
assert g.operations is None
assert g.wires == []
g = Plan(name="MyPlan", status="running")
assert g.name == "MyPlan"
assert g.status == "running"
def test_add_operation(fake_session):
op = fake_session.Operation.load({"id": 4})
p = fake_session.Plan.new()
# add first operation
assert p.operations is None
p.add_operation(op)
assert p.operations == [op]
# add second operation
op2 = fake_session.Operation.load({"id": 5})
p.add_operation(op2)
assert p.operations == [op, op2]
def test_add_operations(fake_session):
op = fake_session.Operation.load({"id": 4})
op2 = fake_session.Operation.load({"id": 5})
ops = [op, op2]
p = fake_session.Plan.new()
p.add_operations(ops)
assert p.operations == [op, op2]
@pytest.fixture(scope="function")
def fake_plan(fake_session):
p = fake_session.Plan.new()
op1 = fake_session.Operation.load({})
op2 = fake_session.Operation.load({})
src = fake_session.FieldValue.load(
{
"name": "myinput",
"parent_class": "Operation",
"operation": op1,
"role": "output",
}
)
dest = fake_session.FieldValue.load(
{
"name": "myoutput",
"parent_class": "Operation",
"operation": op2,
"role": "input",
}
)
op1.field_values = [src]
op2.field_values = [dest]
return p, src, dest
def test_wire(fake_plan):
p, src, dest = fake_plan
p.add_operations([src.operation, dest.operation])
p.wire(src, dest)
assert len(p.wires) == 1
assert p.wires[0].source.name == "myinput"
assert p.wires[0].destination.name == "myoutput"
print(p.wires)
def test_plan_copy(example_plan):
"""Copying plans should anonymize operations and wires."""
copied_plan = example_plan.copy()
assert copied_plan.operations
for op in copied_plan.operations:
assert op.id is None
assert op.operation_type_id is not None
assert op.field_values is not None
for fv in op.field_values:
assert fv.id is None
assert fv.parent_id is None
assert fv.field_type_id is not None
# TODO: make this adeterministic test
"""def test_new_plan(session):
p = fake_session.Plan.new()
p.connect_to_session(session)
assert p.operations is None
assert p.plan_associations is None
p.id = 1000000
assert p.operations == []
assert p.plan_associations == []"""
# def test_submit(session):
# primer = session.SampleType.find(1).samples[-1]
#
# # get Order Primer operation type
# ot = session.OperationType.find(328)
#
# # create an operation
# order_primer = ot.instance()
#
# # set io
# order_primer.set_output("Primer", sample=primer)
# order_primer.set_input("Urgent?", value="no")
#
# # create a new plan and add operations
# p = session.Plan(name="MyPlan")
# p.add_operation(order_primer)
#
# # save the plan
# p.create()
#
# # estimate the cost
# p.estimate_cost()
#
# # show the plan
# p.show()
#
# # submit the plan
# p.submit(session.current_user, session.current_user.budgets[0])
# def test_submit_pcr(session):
# def get_op(name):
# return session.OperationType.where(
# {'name': name, 'deployed': True})[-1].instance()
#
# make_pcr_fragment = get_op('Make PCR Fragment')
# pour_gel = get_op('Pour Gel')
# run_gel = get_op('Run Gel')
# extract_gel_slice = get_op('Extract Gel Slice')
# purify_gel = get_op('Purify Gel Slice')
#
# # setup pcr
# make_pcr_fragment.set_input('Forward Primer',
# item=session.Item.find(81867))
# make_pcr_fragment.set_input('Reverse Primer',
# item=session.Item.find(57949))
# make_pcr_fragment.set_input('Template', item=session.Item.find(61832))
# make_pcr_fragment.set_output('Fragment',
# sample=session.Sample.find(16976))
#
# # setup outputs
# # run_gel.set_output(sample=session.Sample.find(16976))
# # extract_gel_slice.set_output(sample=session.Sample.find(16976))
# # purify_gel.set_output(sample=session.Sample.find(16976))
# # purify_gel.pour_gel(sample=session.Sample.find(16976))
#
# # new plan
# p = session.fake_session.Plan.new()
# p.add_operations([make_pcr_fragment, pour_gel, run_gel,
# extract_gel_slice, purify_gel])
#
# p.add_wires([
# (make_pcr_fragment.output("Fragment"), run_gel.input("Fragment")),
# (pour_gel.output("Lane"), run_gel.input("Gel")),
# (run_gel.output("Fragment"), extract_gel_slice.input("Fragment")),
# (extract_gel_slice.output("Fragment"), purify_gel.input("Gel"))
# ])
#
# make_pcr_fragment.set_output("Fragment",
# sample=session.Sample.find(16976))
#
#
# pdata = p.to_save_json()
#
# # wire up the operations
# # p.wire(make_pcr_fragment.outputs[0], run_gel.input('Fragment'))
# # p.wire(pour_gel.outputs[0], run_gel.input('Gel'))
# # p.wire(run_gel.outputs[0], extract_gel_slice.input('Fragment'))
# # p.wire(extract_gel_slice.outputs[0], purify_gel.input('Gel'))
#
# # save the plan
# p.create()
#
# # estimate the cost
# p.estimate_cost()
#
# p.validate()
#
# # show the plan
# p.show()
#
# # submit the plan
# p.submit(session.current_user, session.current_user.budgets[0])
# # TODO: having difficulty patching plans/operations here...
# def test_replan(session):
#
# p = session.Plan.find(79797)
# newplan = p.replan()
# newplan.print()
#
# for op in newplan.operations:
# if op.operation_type.name == "Make PCR Fragment":
# op.set_input('Template', item=session.Item.find(57124))
# newplan.patch(newplan.to_save_json())
| 28.581395
| 76
| 0.621318
| 0
| 0
| 0
| 0
| 661
| 0.107567
| 0
| 0
| 3,936
| 0.640521
|
0273c9fe7bf28f09a7dc46bd636570ab46c8a8fa
| 611
|
py
|
Python
|
FusionIIIT/applications/gymkhana/migrations/0007_auto_20200608_2210.py
|
sabhishekpratap5/sonarcubeTest2
|
9bd8105e457f6feb8c38fa94b335e54783fca99e
|
[
"bzip2-1.0.6"
] | 2
|
2020-01-24T16:34:54.000Z
|
2020-08-01T05:09:24.000Z
|
FusionIIIT/applications/gymkhana/migrations/0007_auto_20200608_2210.py
|
sabhishekpratap5/sonarcubeTest2
|
9bd8105e457f6feb8c38fa94b335e54783fca99e
|
[
"bzip2-1.0.6"
] | 1
|
2021-05-05T09:50:22.000Z
|
2021-05-05T09:50:22.000Z
|
FusionIIIT/applications/gymkhana/migrations/0007_auto_20200608_2210.py
|
sabhishekpratap5/sonarcubeTest2
|
9bd8105e457f6feb8c38fa94b335e54783fca99e
|
[
"bzip2-1.0.6"
] | 4
|
2020-01-16T17:00:08.000Z
|
2020-06-30T15:58:32.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-06-08 22:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gymkhana', '0006_form_available'),
]
operations = [
migrations.RemoveField(
model_name='form_available',
name='id',
),
migrations.AddField(
model_name='form_available',
name='roll',
field=models.CharField(default=2016001, max_length=7, primary_key=True, serialize=False),
),
]
| 24.44
| 101
| 0.610475
| 452
| 0.739771
| 0
| 0
| 0
| 0
| 0
| 0
| 145
| 0.237316
|
027588263d8cfcf1854016d6bcb09a5b8fcae300
| 1,899
|
py
|
Python
|
config/presets/Modes/Python/T - Bits H/main.py
|
The-XOR/EYESY_OS
|
6a5e3d0bc5574ba2311e0c7e81c600c3af7a3e34
|
[
"BSD-3-Clause"
] | 18
|
2021-03-06T05:39:30.000Z
|
2022-03-25T17:59:23.000Z
|
presets/Modes/Python/T - Bits H/main.py
|
jqrsound/EYESY_OS_for_RasPiSound
|
ac117b91cd84ad4c0566bd1a7d4c7b1ccc01cf62
|
[
"BSD-3-Clause"
] | null | null | null |
presets/Modes/Python/T - Bits H/main.py
|
jqrsound/EYESY_OS_for_RasPiSound
|
ac117b91cd84ad4c0566bd1a7d4c7b1ccc01cf62
|
[
"BSD-3-Clause"
] | 4
|
2021-03-14T18:38:42.000Z
|
2021-07-11T14:31:18.000Z
|
import os
import pygame
import random
trigger = False
x = 0
y = 0
height = 720
width = 1280
linelength = 50
lineAmt = 20
displace = 10
xpos = [random.randrange(-200,1280) for i in range(0, lineAmt + 2)]
xpos1 = [(xpos[i]+displace) for i in range(0, lineAmt + 2)]
xr = 360
yr = 240
def setup(screen, etc):
global trigger, x, y, height, width, xpos, lineAmt, xpos1, linelength, displace, xr, yr
xr = etc.xres
yr = etc.yres
height = yr
width = xr
linelength = ((50*xr)/1280)
lineAmt = ((20*xr)/1280)
displace = ((10*xr)/1280)
xpos = [random.randrange(int((-200*xr)/1280),xr) for i in range(0, lineAmt + 2)]
xpos1 = [(xpos[i]+displace) for i in range(0, lineAmt + 2)]
pass
def draw(screen, etc):
global trigger, x, y, height, width, xpos, lineAmt, xpos1, linelength, displace, xr, yr
etc.color_picker_bg(etc.knob5)
displace = ((10*xr)/1280)
linewidth = (height / lineAmt)
linelength = int(etc.knob2*((300*xr)/1280)+1)
color = etc.color_picker(etc.knob4)
minus = (etc.knob3*0.5)+0.5
shadowColor = (etc.bg_color[0]*minus, etc.bg_color[1]*minus, etc.bg_color[2]*minus)
if etc.audio_trig or etc.midi_note_new :
trigger = True
if trigger == True :
lineAmt = int(etc.knob1*((100*yr)/720) + 2)
xpos = [random.randrange(int((-200*xr)/1280),xr) for i in range(0, lineAmt + 2)]
xpos1 = [(xpos[i]+displace) for i in range(0, lineAmt + 2)]
for k in range(0, lineAmt + 2) :
x = xpos1[k] + linelength
y = (k * linewidth) + int(linewidth/2)- 1
pygame.draw.line(screen, shadowColor, (xpos1[k], y+displace), (x, y+displace), linewidth)
for j in range(0, lineAmt + 2) :
x = xpos[j] + linelength
y = (j * linewidth) + int(linewidth/2)- 1
pygame.draw.line(screen, color, (xpos[j], y), (x, y), linewidth)
trigger = False
| 31.131148
| 97
| 0.604529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0275d85ad826b0b81b83f4f373f69ae66117d9ed
| 2,577
|
py
|
Python
|
ext/std/code/mi.py
|
iazarov/metrixplusplus
|
322777cba4e089502dd6053749b07a7be9da65b2
|
[
"MIT"
] | null | null | null |
ext/std/code/mi.py
|
iazarov/metrixplusplus
|
322777cba4e089502dd6053749b07a7be9da65b2
|
[
"MIT"
] | null | null | null |
ext/std/code/mi.py
|
iazarov/metrixplusplus
|
322777cba4e089502dd6053749b07a7be9da65b2
|
[
"MIT"
] | null | null | null |
#
# Metrix++, Copyright 2009-2019, Metrix++ Project
# Link: https://github.com/metrixplusplus/metrixplusplus
#
# This file is a part of Metrix++ Tool.
#
import mpp.api
class Plugin(mpp.api.Plugin,
mpp.api.IConfigurable,
mpp.api.Child,
mpp.api.MetricPluginMixin):
def declare_configuration(self, parser):
self.parser = parser
parser.add_option("--std.code.maintindex.simple", "--scmis",
action="store_true", default=False,
help="Enables collection of simple maintainability index metric."
" It uses std.code.line:code, std.code.complexity:cyclomatic"
" metrics to rank level of maintainability."
" Lower value of this metric indicates better maintainability."
" [default: %default]")
def configure(self, options):
self.is_active_simple = options.__dict__['std.code.maintindex.simple']
if self.is_active_simple == True:
required_opts = ['std.code.complexity.cyclomatic', 'std.code.lines.code']
for each in required_opts:
if options.__dict__[each] == False:
self.parser.error('option --std.code.maintindex.simple: requires --{0} option'.
format(each))
def initialize(self):
self.declare_metric(self.is_active_simple,
self.Field('simple', int),
{
'std.code.complexity':(None, self.RankedComplexityCounter),
'std.code.lines':(None, self.RankedLinesCounter),
},
# set none, because this plugin is not interested in parsing the code
marker_type_mask=mpp.api.Marker.T.NONE)
super(Plugin, self).initialize(fields=self.get_fields())
if self.is_active() == True:
self.subscribe_by_parents_name('std.code.complexity')
self.subscribe_by_parents_name('std.code.lines')
class RankedComplexityCounter(mpp.api.MetricPluginMixin.RankedCounter):
rank_source = ('std.code.complexity', 'cyclomatic')
rank_ranges = [(None, 7), (8, 11), (12, 19), (20, 49), (50, None)]
class RankedLinesCounter(mpp.api.MetricPluginMixin.RankedCounter):
rank_source = ('std.code.lines', 'code')
rank_ranges = [(None, 124), (125, 249), (250, 499), (500, 999), (1000, None)]
| 45.210526
| 100
| 0.568879
| 2,379
| 0.923166
| 0
| 0
| 0
| 0
| 0
| 0
| 819
| 0.317811
|
027b51903bbc31466f05349aa598a39bb4d2919d
| 447
|
py
|
Python
|
6.00.1x/quiz/flatten.py
|
NicholasAsimov/courses
|
d60981f25816445578eb9e89bbbeef2d38eaf014
|
[
"MIT"
] | null | null | null |
6.00.1x/quiz/flatten.py
|
NicholasAsimov/courses
|
d60981f25816445578eb9e89bbbeef2d38eaf014
|
[
"MIT"
] | null | null | null |
6.00.1x/quiz/flatten.py
|
NicholasAsimov/courses
|
d60981f25816445578eb9e89bbbeef2d38eaf014
|
[
"MIT"
] | null | null | null |
def flatten(aList):
'''
aList: a list
Returns a copy of aList, which is a flattened version of aList
'''
if aList == []:
return aList
if type(aList[0]) == list:
return flatten(aList[0]) + flatten(aList[1:])
return aList[:1] + flatten(aList[1:])
aList = [[1, 'a', ['cat'], 2], [[[3]], 'dog'], 4, 5]
print flatten(aList)
testCase = [1, 'a', 'cat', 2, 3, 'dog', 4, 5]
print flatten(aList) == testCase
| 22.35
| 66
| 0.548098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 122
| 0.272931
|
027cdd147516550681b095c7591faaa5e2b26a2b
| 9,960
|
py
|
Python
|
copo_code/copo/algo_svo/svo_env.py
|
decisionforce/CoPO
|
3a06a48522b901db2e380a62a0efb5e8a30cd079
|
[
"Apache-2.0"
] | 37
|
2021-11-01T03:30:30.000Z
|
2022-03-29T08:38:12.000Z
|
copo_code/copo/algo_svo/svo_env.py
|
decisionforce/CoPO
|
3a06a48522b901db2e380a62a0efb5e8a30cd079
|
[
"Apache-2.0"
] | null | null | null |
copo_code/copo/algo_svo/svo_env.py
|
decisionforce/CoPO
|
3a06a48522b901db2e380a62a0efb5e8a30cd079
|
[
"Apache-2.0"
] | 4
|
2021-11-05T06:55:34.000Z
|
2022-01-04T07:08:37.000Z
|
"""
Usage: Call get_svo_env(env_class) to get the real env class!
"""
from collections import defaultdict
from math import cos, sin
import numpy as np
from gym.spaces import Box
from metadrive.envs.marl_envs.marl_tollgate import TollGateObservation, MultiAgentTollgateEnv
from metadrive.obs.state_obs import LidarStateObservation
from metadrive.utils import get_np_random, norm, clip
from copo.utils import get_rllib_compatible_env
class SVOObsForRound(LidarStateObservation):
@property
def observation_space(self):
space = super(SVOObsForRound, self).observation_space
assert isinstance(space, Box)
assert len(space.shape) == 1
length = space.shape[0] + 1
space = Box(
low=np.array([space.low[0]] * length),
high=np.array([space.high[0]] * length),
shape=(length,),
dtype=space.dtype
)
return space
class SVOObsForRoundForTollgate(TollGateObservation):
@property
def observation_space(self):
space = super(SVOObsForRoundForTollgate, self).observation_space
assert isinstance(space, Box)
assert len(space.shape) == 1
length = space.shape[0] + 1
space = Box(
low=np.array([space.low[0]] * length),
high=np.array([space.high[0]] * length),
shape=(length,),
dtype=space.dtype
)
return space
class SVOObsForRoundForTollgate(TollGateObservation):
@property
def observation_space(self):
space = super(SVOObsForRoundForTollgate, self).observation_space
assert isinstance(space, Box)
assert len(space.shape) == 1
length = space.shape[0] + 1
space = Box(
low=np.array([space.low[0]] * length),
high=np.array([space.high[0]] * length),
shape=(length, ),
dtype=space.dtype
)
return space
class SVOEnv:
@classmethod
def default_config(cls):
config = super(SVOEnv, cls).default_config()
config.update(
dict(
neighbours_distance=20,
# Two mode to compute utility for each vehicle:
# "linear": util = r_me * svo + r_other * (1 - svo), svo in [0, 1]
# "angle": util = r_me * cos(svo) + r_other * sin(svo), svo in [0, pi/2]
# "angle" seems to be more stable!
svo_mode="angle",
svo_dist="uniform", # "uniform" or "normal"
svo_normal_std=0.3, # The initial STD of normal distribution, might change by calling functions.
return_native_reward=False,
include_ego_reward=False,
# Whether to force set the svo
force_svo=-100
)
)
return config
def __init__(self, config=None):
super(SVOEnv, self).__init__(config)
self.svo_map = {}
if hasattr(super(SVOEnv, self), "_update_distance_map"):
# The parent env might be CCEnv, so we don't need to do this again!
self._parent_has_distance_map = True
else:
self.distance_map = defaultdict(lambda: defaultdict(lambda: float("inf")))
self._parent_has_distance_map = False
assert self.config["svo_mode"] in ["linear", "angle"]
assert self.config["svo_dist"] in ["uniform", "normal"]
assert self.config["svo_normal_std"] > 0.0
self.force_svo = self.config["force_svo"]
# Only used in normal SVO distribution
# SVO is always in range [0, 1], but the real SVO degree is in [-pi/2, pi/2].
self.current_svo_mean = 0.0 # Set to 0 degree.
self.current_svo_std = self.config["svo_normal_std"]
def get_single_observation(self, vehicle_config):
# TODO we should generalize this function in future!
if issubclass(self.__class__, MultiAgentTollgateEnv):
return SVOObsForRoundForTollgate(vehicle_config)
else:
return SVOObsForRound(vehicle_config)
def _get_reset_return(self):
self.svo_map.clear()
self._update_distance_map()
obses = super(SVOEnv, self)._get_reset_return()
ret = {}
for k, o in obses.items():
svo, ret[k] = self._add_svo(o)
self.svo_map[k] = svo
return ret
def step(self, actions):
# step the environment
o, r, d, i = super(SVOEnv, self).step(actions)
self._update_distance_map()
# add SVO into observation, also update SVO map and info.
ret = {}
for k, v in o.items():
svo, ret[k] = self._add_svo(v, self.svo_map[k] if k in self.svo_map else None, k)
if k not in self.svo_map:
self.svo_map[k] = svo
if i[k]:
i[k]["svo"] = svo
if self.config["return_native_reward"]:
return ret, r, d, i
# compute the SVO-weighted rewards
new_rewards = {}
for k, own_r in r.items():
other_rewards = []
if self.config["include_ego_reward"]:
other_rewards.append(own_r)
# neighbours = self._find_k_nearest(k, K)
neighbours = self._find_in_range_for_svo(k, self.config["neighbours_distance"])
for other_k in neighbours:
if other_k is None:
break
else:
other_rewards.append(r[other_k])
if len(other_rewards) == 0:
other_reward = own_r
else:
other_reward = np.mean(other_rewards)
# svo_map stores values in [-1, 1]
if self.config["svo_mode"] == "linear":
new_r = self.svo_map[k] * own_r + (1 - self.svo_map[k]) * other_reward
elif self.config["svo_mode"] == "angle":
svo = self.svo_map[k] * np.pi / 2
new_r = cos(svo) * own_r + sin(svo) * other_reward
else:
raise ValueError("Unknown SVO mode: {}".format(self.config["svo_mode"]))
new_rewards[k] = new_r
return ret, new_rewards, d, i
def set_force_svo(self, v):
self.force_svo = v
def _add_svo(self, o, svo=None, agent_name=None):
if self.force_svo != -100:
if self.config["svo_dist"] == "normal":
svo = get_np_random().normal(loc=self.force_svo, scale=self.current_svo_std)
else:
svo = self.force_svo
elif svo is not None:
pass
else:
if self.config["svo_dist"] == "normal":
svo = get_np_random().normal(loc=self.current_svo_mean, scale=self.current_svo_std)
svo = clip(svo, -1, 1)
else:
svo = get_np_random().uniform(-1, 1)
# print("For agent {}, we assign new SVO {} deg! Current mean {} deg, std {}.".format(
# agent_name, svo * 90, self.current_svo_mean * 90, self.current_svo_std))
output_svo = (svo + 1) / 2
return svo, np.concatenate([o, [output_svo]])
def set_svo_dist(self, mean, std):
assert self.config["svo_dist"] == "normal"
self.current_svo_mean = mean
self.current_svo_std = std
assert std > 0.0
def _find_in_range_for_svo(self, v_id, distance):
if distance <= 0:
return []
max_distance = distance
dist_to_others = self.distance_map[v_id]
dist_to_others_list = sorted(dist_to_others, key=lambda k: dist_to_others[k])
ret = [
dist_to_others_list[i] for i in range(len(dist_to_others_list))
if dist_to_others[dist_to_others_list[i]] < max_distance
]
return ret
def _update_distance_map(self):
if self._parent_has_distance_map:
return super(SVOEnv, self)._update_distance_map()
self.distance_map.clear()
keys = list(self.vehicles.keys())
for c1 in range(0, len(keys) - 1):
for c2 in range(c1 + 1, len(keys)):
k1 = keys[c1]
k2 = keys[c2]
p1 = self.vehicles[k1].position
p2 = self.vehicles[k2].position
distance = norm(p1[0] - p2[0], p1[1] - p2[1])
self.distance_map[k1][k2] = distance
self.distance_map[k2][k1] = distance
def get_svo_env(env_class, return_env_class=False):
name = env_class.__name__
class TMP(SVOEnv, env_class):
pass
TMP.__name__ = name
TMP.__qualname__ = name
if return_env_class:
return TMP
return get_rllib_compatible_env(TMP)
if __name__ == '__main__':
# env = SVOEnv({"num_agents": 8, "neighbours_distance": 3, "svo_mode": "angle", "force_svo": 0.9})
env = get_svo_env(
MultiAgentTollgateEnv, return_env_class=True
)({
"num_agents": 8,
"neighbours_distance": 3,
"svo_mode": "angle",
"svo_dist": "normal"
})
o = env.reset()
assert env.observation_space.contains(o)
assert all([0 <= oo[-1] <= 1.0 for oo in o.values()])
total_r = 0
ep_s = 0
for i in range(1, 100000):
o, r, d, info = env.step({k: [0.0, 1.0] for k in env.vehicles.keys()})
assert env.observation_space.contains(o)
assert all([0 <= oo[-1] <= 1.0 for oo in o.values()])
for r_ in r.values():
total_r += r_
print("SVO: {}".format({kkk: iii["svo"] if "svo" in iii else None for kkk, iii in info.items()}))
ep_s += 1
if d["__all__"]:
print(
"Finish! Current step {}. Group Reward: {}. Average reward: {}".format(
i, total_r, total_r / env.agent_manager.next_agent_count
)
)
break
if len(env.vehicles) == 0:
total_r = 0
print("Reset")
env.reset()
env.close()
| 35.44484
| 113
| 0.570482
| 8,008
| 0.804016
| 0
| 0
| 2,196
| 0.220482
| 0
| 0
| 1,622
| 0.162851
|
027d3d4607b5f1e18cfb2663664c754672a047c8
| 1,995
|
py
|
Python
|
tests/test_renderer.py
|
derlin/get-html
|
ea6d81f424ed0a60a37a52b95dd5b27c85cf0852
|
[
"Apache-2.0"
] | 11
|
2020-03-02T08:38:37.000Z
|
2021-11-19T05:03:20.000Z
|
tests/test_renderer.py
|
derlin/get-html
|
ea6d81f424ed0a60a37a52b95dd5b27c85cf0852
|
[
"Apache-2.0"
] | 2
|
2020-03-02T11:43:12.000Z
|
2020-03-10T07:59:07.000Z
|
tests/test_renderer.py
|
derlin/get-html
|
ea6d81f424ed0a60a37a52b95dd5b27c85cf0852
|
[
"Apache-2.0"
] | 2
|
2020-03-02T08:13:53.000Z
|
2020-03-09T21:15:26.000Z
|
from get_html.html_renderer import HtmlRenderer
import pytest
import re
@pytest.fixture(scope='module')
def renderer():
r = HtmlRenderer()
try:
yield r
finally:
r.close()
def test_response(renderer: HtmlRenderer):
url = 'http://www.twitter.com' # this will redirect to https://twitter.com
r = renderer.render(url)
assert r.status_code == 200
assert r.reason == 'OK', 'reason not ok'
assert r.text == r.content.decode(r.encoding), 'wrong encoding'
assert len(r.history) > 0, 'no redirect ??'
def test_404(renderer: HtmlRenderer):
r = renderer.render('https://github.com/afadfadfaf/adsfa-not-exist')
assert r.status_code == 404
assert r.reason == 'Not Found'
@pytest.mark.parametrize(
'url', [
'https://meertjes-stuff.blogspot.com/search/label/Link%20Your%20Stuff', # whut ?? this one is strange
'https://www.investing.com/indices/major-indices', # has websocket
# 'http://data.fis-ski.com/dynamic/athlete-biography.html?sector=AL&competitorid=147749&type=result', # don't remember, but was problematic
])
def test_potentially_problematic_urls(renderer: HtmlRenderer, url):
r = renderer.render(url)
assert r.status_code == 200
assert len(r.content) > 0
def test_manipulate_page(renderer: HtmlRenderer):
url = 'https://9gag.com/'
r = renderer.render(url)
num_articles = len(re.findall('<article', r.text))
# async def fn(page):
# assert page is not None
# for _ in range(10):
# await page._keyboard.down('PageDown')
async def fn(page):
# https://github.com/miyakogi/pyppeteer/issues/205#issuecomment-470886682
await page.evaluate('{window.scrollBy(0, document.body.scrollHeight);}')
r = renderer.render(url, manipulate_page_func=fn)
num_articles_after_scroll = len(re.findall('<article', r.text))
print(num_articles, num_articles_after_scroll)
assert num_articles_after_scroll > num_articles
| 32.704918
| 148
| 0.680201
| 0
| 0
| 94
| 0.047118
| 666
| 0.333835
| 182
| 0.091228
| 777
| 0.389474
|
027e6a3b136fbe978f346957d7b86c2022fa6ea2
| 724
|
py
|
Python
|
resources/include-lists/string_manipulator_util.py
|
e-loughlin/CppCodeGenerator
|
638f80f9df21d709d1240bb3bd43f9d43dd2e3ac
|
[
"MIT"
] | 6
|
2019-09-30T10:27:15.000Z
|
2020-12-20T14:46:24.000Z
|
resources/include-lists/string_manipulator_util.py
|
e-loughlin/CppCodeGenerator
|
638f80f9df21d709d1240bb3bd43f9d43dd2e3ac
|
[
"MIT"
] | 4
|
2019-11-25T18:14:29.000Z
|
2019-12-09T20:47:29.000Z
|
resources/include-lists/string_manipulator_util.py
|
emloughl/CppCodeGenerator
|
638f80f9df21d709d1240bb3bd43f9d43dd2e3ac
|
[
"MIT"
] | 1
|
2021-12-01T07:03:31.000Z
|
2021-12-01T07:03:31.000Z
|
import sys
import os
import ntpath
def readFile(filePath):
with open(filePath, "r") as file:
return file.read()
def writeToDisk(filePath, stringToSave):
with open(filePath, "w+") as newFile:
newFile.write(stringToSave)
def main():
filePath = os.path.abspath("./qt-includes.txt")
stuff = readFile(filePath)
stuff = stuff.replace(" ", "\n")
lines = stuff.split("\n")
newLines = []
for line in lines:
line = line.replace("(", "").replace(")", "")
if line[0] != "Q":
line = line[1:]
newLines.append(line)
stuff = "\n".join(newLines)
writeToDisk("qt-includes-new.txt", stuff)
if __name__ == "__main__":
main()
| 20.111111
| 53
| 0.574586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 85
| 0.117403
|
027e798c00ba61f438e908e5871d0e08cf7a12f8
| 2,205
|
py
|
Python
|
build/lib/henmedlib/functions/hounsfield.py
|
schmitzhenninglmu/henmedlib
|
196b63710f092470ab21173cfcc0b14e65778f33
|
[
"MIT"
] | null | null | null |
build/lib/henmedlib/functions/hounsfield.py
|
schmitzhenninglmu/henmedlib
|
196b63710f092470ab21173cfcc0b14e65778f33
|
[
"MIT"
] | null | null | null |
build/lib/henmedlib/functions/hounsfield.py
|
schmitzhenninglmu/henmedlib
|
196b63710f092470ab21173cfcc0b14e65778f33
|
[
"MIT"
] | 1
|
2019-09-20T10:59:25.000Z
|
2019-09-20T10:59:25.000Z
|
__author__ = "Henning Schmitz"
import numpy as np
def calculate_hounsfield_unit(mu, mu_water, mu_air):
"""
Given linear attenuation coefficients the function calculates the corresponding Hounsfield units.
:param mu: Attenuation coefficient to determine corresponding Hounsfield unit.
:param mu_water: Constant linear attenuation coefficient for water
:param mu_air: Constant linear attenuation coefficient for air
:return: Hounsfield unit corresponding to mu
"""
HU = 1000 * ((mu - mu_water) / (mu_water - mu_air))
return HU
def calculate_hounsfield_unit_parameterless(mu):
"""
Given linear attenuation coefficients the function calculates the corresponding Hounsfield units.
:param mu: Attenuation coefficient to determine corresponding Hounsfield unit.
:return: Hounsfield unit corresponding to mu
"""
HU = mu * 65536-1024
return HU
def create_array_with_hounsfield_units(image_data, mu_water, mu_air):
"""
Given 3d array with linear attenuation coefficients the function calculates the corresponding Hounsfield units.
:param image_data: 3d array corresponding to image
:param mu: Attenuation coefficient to determine corresponding Hounsfield unit.
:param mu_water: Constant linear attenuation coefficient for water
:param mu_air: Constant linear attenuation coefficient for air
:return: 3d array calculated in Hounsfield unit
"""
# print dimensions of array
dim_x = np.size(image_data, 0)
dim_y = np.size(image_data, 1)
dim_slice = np.size(image_data, 2)
# loop through array
count = 0
iterations = dim_x * dim_y * dim_slice
# loop through x direction
for i in range(0, dim_x):
# loop through y direction
for j in range(0, dim_y):
# loop through slices
for k in range(0, dim_slice):
image_data[i][j][k] = calculate_hounsfield_unit(image_data[i][j][k], mu_water, mu_air)
count += 1
if count % (0.1 * iterations) == 0:
print(round(count / iterations, 1) * 100, "% progress")
return image_data
| 38.017241
| 116
| 0.677098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,254
| 0.568707
|
027ff59d51aedead00128b3b38fec073cc323ee3
| 1,028
|
py
|
Python
|
coaddExtract.py
|
rbliu/LSST_DM_Scripts
|
0a32ba629a2b52d3add407e92ab8ff4bc3cbd64d
|
[
"MIT"
] | null | null | null |
coaddExtract.py
|
rbliu/LSST_DM_Scripts
|
0a32ba629a2b52d3add407e92ab8ff4bc3cbd64d
|
[
"MIT"
] | null | null | null |
coaddExtract.py
|
rbliu/LSST_DM_Scripts
|
0a32ba629a2b52d3add407e92ab8ff4bc3cbd64d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
## last modified by Robert Liu at 7/29/2019
## This script is used to extract data (and WCS info) in the image extension of a coadd patch.
## Ext 0 = primaryHDU, Ext 1 = image, Ext 2 = mask, Ext 3 = variancce.
## Then, the output fits file can be used by SWarp to assemble a mosaic coadd image.
import re
import sys
import numpy as np
from astropy.io import fits
from astropy import wcs
if len(sys.argv) != 3:
print("Usage: python coaddExtract.py {coadd_image} {extracted_image}", file=sys.stderr)
exit(1);
coadd_patch = sys.argv[1]
extracted_patch = sys.argv[2]
# Open the fits image
hdu = fits.open(coadd_patch)
# Create a new HDU. Save the data of Ext1 to it.
hdu1 = fits.PrimaryHDU(hdu[1].data)
print('Coadd patch loaded.')
# Extract WCS info and append to the new HDU.
w = wcs.WCS(hdu[1].header)
wcs_keys = w.to_header()
hdu1.header += wcs_keys
print('WCS information appened.')
# Write the new HDU
hdu1.writeto(extracted_patch)
print('New coadd image saved!\n')
| 28.555556
| 94
| 0.715953
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 604
| 0.587549
|
02824286d75d00e50642afe49b18a9fd9681523d
| 22
|
py
|
Python
|
backend_server/backend_globals.py
|
MSNLAB/SmartEye
|
40b38190aeff5d5b970c8cbf43e8781634b38028
|
[
"MIT",
"Unlicense"
] | 17
|
2021-06-27T04:33:13.000Z
|
2022-03-21T02:54:52.000Z
|
backend_server/backend_globals.py
|
MSNLAB/SmartEye
|
40b38190aeff5d5b970c8cbf43e8781634b38028
|
[
"MIT",
"Unlicense"
] | null | null | null |
backend_server/backend_globals.py
|
MSNLAB/SmartEye
|
40b38190aeff5d5b970c8cbf43e8781634b38028
|
[
"MIT",
"Unlicense"
] | 2
|
2021-10-31T05:14:24.000Z
|
2022-03-25T18:53:49.000Z
|
global loaded_model
| 5.5
| 19
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
02842784fc821e743357ee9efac57212bf1f6827
| 326
|
py
|
Python
|
src/utils.py
|
fabiob/wwwsqldesigner-aws
|
5518eae682e8228be30b094c6015054b3cddf8f3
|
[
"MIT"
] | null | null | null |
src/utils.py
|
fabiob/wwwsqldesigner-aws
|
5518eae682e8228be30b094c6015054b3cddf8f3
|
[
"MIT"
] | null | null | null |
src/utils.py
|
fabiob/wwwsqldesigner-aws
|
5518eae682e8228be30b094c6015054b3cddf8f3
|
[
"MIT"
] | 1
|
2021-04-04T09:41:51.000Z
|
2021-04-04T09:41:51.000Z
|
from .env import S3_PREFIX
def respond(body=None, mime="text/plain", code=200, headers={}):
h = {"Content-Type": mime}
h.update(headers)
return {"statusCode": code, "body": body, "headers": h}
def fn(keyword):
return f'{S3_PREFIX}{keyword}.xml'
def fix(filename):
return filename[len(S3_PREFIX):-4]
| 20.375
| 64
| 0.650307
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 80
| 0.245399
|
028456bd34d14ef1d7f23ca7f443c4b9f0404a35
| 4,071
|
py
|
Python
|
waferscreen/inst_control/inactive/agilent_34970A.py
|
chw3k5/WaferScreen
|
c0ca7fe939fe7cd0b722b7d6129b148c03a7505c
|
[
"Apache-2.0"
] | 1
|
2021-07-30T19:06:07.000Z
|
2021-07-30T19:06:07.000Z
|
waferscreen/inst_control/inactive/agilent_34970A.py
|
chw3k5/WaferScreen
|
c0ca7fe939fe7cd0b722b7d6129b148c03a7505c
|
[
"Apache-2.0"
] | 8
|
2021-04-22T20:47:48.000Z
|
2021-07-30T19:06:01.000Z
|
waferscreen/inst_control/inactive/agilent_34970A.py
|
chw3k5/WaferScreen
|
c0ca7fe939fe7cd0b722b7d6129b148c03a7505c
|
[
"Apache-2.0"
] | null | null | null |
import serial
class Agilent34970A:
def __init__(self):
self.timeout = 10
self.baudrate = 4800
self.bytesize = serial.EIGHTBITS
self.parity = serial.PARITY_NONE
self.stopbits = serial.STOPBITS_ONE
xonxoff = True
self.s = serial.Serial(port='/dev/ttyUSB3', timeout=self.timeout, baudrate=self.baudrate,
bytesize=self.bytesize, parity=self.parity, stopbits=self.stopbits, xonxoff=True)
def reset(self):
self.s.write('*RST\n')
def closeSwitch(self, board, switch):
self.s.write('ROUT:CLOS (@' + str(board) + str(switch).zfill(2) + ')\n')
def checkClosed(self, board, switch):
self.s.write('ROUT:CLOS? (@' + str(board) + str(switch).zfill(2) + ')\n')
sto = self.s.readline()
if int(sto) == 0:
print 'Switch open'
elif int(sto) == 1:
print 'Switch closed'
def measureResistance(self, board, switch, Range="AUTO", Resolution="AUTO"):
if Resolution == "AUTO":
self.s.write(
'MEAS:RES? ' + str(Range) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
else:
self.s.write(
'MEAS:RES? ' + str(Range) + ',' + str(Resolution) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
return float(self.s.readline())
def measureFrequency(self, board, switch, Range="AUTO", Resolution="AUTO"):
if Resolution == "AUTO":
self.s.write(
'MEAS:FREQ? ' + str(Range) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
else:
self.s.write(
'MEAS:FREQ? ' + str(Range) + ',' + str(Resolution) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
return float(self.s.readline())
def measurePeriod(self, board, switch, Range="AUTO", Resolution="AUTO"):
if Resolution == "AUTO":
self.s.write(
'MEAS:PER? ' + str(Range) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
else:
self.s.write(
'MEAS:PER? ' + str(Range) + ',' + str(Resolution) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
return float(self.s.readline())
def measureACCurrent(self, board, switch, Range="AUTO", Resolution="AUTO"):
if Resolution == "AUTO":
self.s.write(
'MEAS:CURR:AC? ' + str(Range) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
else:
self.s.write(
'MEAS:CURR:AC? ' +
str(Range) + ',' + str(Resolution) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
return float(self.s.readline())
def measureDCCurrent(self, board, switch, Range="AUTO", Resolution="AUTO"):
if Resolution == "AUTO":
self.s.write(
'MEAS:CURR:DC? ' + str(Range) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
else:
self.s.write(
'MEAS:CURR:DC? ' +
str(Range) + ',' + str(Resolution) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
return float(self.s.readline())
def measureACVoltage(self, board, switch, Range="AUTO", Resolution="AUTO"):
if Resolution == "AUTO":
self.s.write(
'MEAS:VOLT:AC? ' + str(Range) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
else:
self.s.write(
'MEAS:VOLT:AC? ' +
str(Range) + ',' + str(Resolution) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
return float(self.s.readline())
def measureDCVoltage(self, board, switch, Range="AUTO", Resolution="AUTO"):
if Resolution == "AUTO":
self.s.write(
'MEAS:VOLT:DC? ' + str(Range) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
else:
self.s.write(
'MEAS:VOLT:DC? ' +
str(Range) + ',' + str(Resolution) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
return float(self.s.readline())
| 39.911765
| 119
| 0.503316
| 4,055
| 0.99607
| 0
| 0
| 0
| 0
| 0
| 0
| 578
| 0.14198
|
0285c8a2ee84e232d1b5d465f4047d255ab9153e
| 2,318
|
py
|
Python
|
force_wfmanager/gui/tests/test_click_run.py
|
force-h2020/force-wfmanager
|
bcd488cd37092cacd9d0c81b544ee8c1654d1d92
|
[
"BSD-2-Clause"
] | 1
|
2019-08-19T16:02:20.000Z
|
2019-08-19T16:02:20.000Z
|
force_wfmanager/gui/tests/test_click_run.py
|
force-h2020/force-wfmanager
|
bcd488cd37092cacd9d0c81b544ee8c1654d1d92
|
[
"BSD-2-Clause"
] | 396
|
2017-07-18T15:19:55.000Z
|
2021-05-03T06:23:06.000Z
|
force_wfmanager/gui/tests/test_click_run.py
|
force-h2020/force-wfmanager
|
bcd488cd37092cacd9d0c81b544ee8c1654d1d92
|
[
"BSD-2-Clause"
] | 2
|
2019-03-05T16:23:10.000Z
|
2020-04-16T08:59:11.000Z
|
# (C) Copyright 2010-2020 Enthought, Inc., Austin, TX
# All rights reserved.
import unittest
import sys
import os
from unittest import mock
from click.testing import CliRunner
import force_wfmanager.gui.run
from force_wfmanager.tests.dummy_classes.dummy_wfmanager import \
DummyWfManager
from force_wfmanager.version import __version__
def mock_run_constructor(*args, **kwargs):
mock_wf_run = mock.Mock(spec=force_wfmanager.gui.run)
mock_wf_run.main = lambda: None
class TestClickRun(unittest.TestCase):
def test_click_cli_version(self):
clirunner = CliRunner()
clirunner.invoke(force_wfmanager.gui.run.force_wfmanager,
args="--version")
def test_click_cli_main(self):
with mock.patch('force_wfmanager.gui.run') as mock_run:
mock_run.side_effect = mock_run_constructor
force_wfmanager.gui.run.force_wfmanager()
self.assertTrue(mock_run.force_wfmanager.called)
def test_run_with_debug(self):
with mock.patch('force_wfmanager.gui.run.WfManager') as mock_wf:
mock_wf.return_value = DummyWfManager()
force_wfmanager.gui.run.main(
window_size=(1650, 1080),
debug=True,
profile=False,
workflow_file=None
)
self.log = force_wfmanager.gui.run.logging.getLogger(__name__)
self.assertEqual(self.log.getEffectiveLevel(), 10)
def test_run_with_profile(self):
with mock.patch('force_wfmanager.gui.run.WfManager') as mock_wf:
mock_wf.return_value = DummyWfManager()
force_wfmanager.gui.run.main(
window_size=(1650, 1080), debug=False,
profile=True, workflow_file=None
)
root = ('force_wfmanager-{}-{}.{}.{}'
.format(__version__,
sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro))
exts = ['.pstats', '.prof']
files_exist = [False] * len(exts)
for ind, ext in enumerate(exts):
files_exist[ind] = os.path.isfile(root + ext)
os.remove(root + ext)
self.assertTrue(all(files_exist))
| 34.088235
| 74
| 0.615617
| 1,831
| 0.789905
| 0
| 0
| 0
| 0
| 0
| 0
| 228
| 0.098361
|
0286818653d925685a7dbe2ea01784b7a5521b18
| 675
|
py
|
Python
|
menu.py
|
shaolinbertrand/RPG
|
77292c54baa14baf9e09d036be67592bb8f2c093
|
[
"MIT"
] | null | null | null |
menu.py
|
shaolinbertrand/RPG
|
77292c54baa14baf9e09d036be67592bb8f2c093
|
[
"MIT"
] | null | null | null |
menu.py
|
shaolinbertrand/RPG
|
77292c54baa14baf9e09d036be67592bb8f2c093
|
[
"MIT"
] | null | null | null |
from cadastrarJogador import cadastra_jogador
from cadastrarMonstros import cadastra_monstro
from atualizaJogador import atualiza
from combate import combate_iniciado
while True:
print('Bem vindo ao RPG selecione a opção desenjada')
print('[0] - Cadastrar Novo Jogador\n[1] - Atualizar Jogador\n[2] - Cadastrar Novo Monstro\n[3] Iniciar Combate\n[4]-Sair do sistema')
o = int(input('Entre com o numero da opção desejada: '))
if o == 0:
cadastra_jogador()
elif o == 1:
cadastra_monstro()
elif o == 2:
atualiza()
elif o == 3:
combate_iniciado()
elif o == 4:
break
else:
print('Opção invalida')
| 33.75
| 138
| 0.665185
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 235
| 0.345081
|
02869a45220bc3cd768ae9f192b46417fa96c690
| 4,354
|
py
|
Python
|
plugin_manager/accounts/models.py
|
ahharu/plugin-manager
|
43d5e2c6e25ed8f50eedf7fd876fbc04f75d94bb
|
[
"MIT"
] | null | null | null |
plugin_manager/accounts/models.py
|
ahharu/plugin-manager
|
43d5e2c6e25ed8f50eedf7fd876fbc04f75d94bb
|
[
"MIT"
] | null | null | null |
plugin_manager/accounts/models.py
|
ahharu/plugin-manager
|
43d5e2c6e25ed8f50eedf7fd876fbc04f75d94bb
|
[
"MIT"
] | null | null | null |
"""
Custom user model for deployments.
"""
import urllib
import hashlib
import base64
import random
from authtools.models import AbstractEmailUser
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.db.models.signals import post_save
from .managers import DeployUserManager
from plugin_manager.hosts.models import Host
from plugin_manager.accounts.model_managers import DeployUserActiveManager
from plugin_manager.core.mixins.models import TrackingFields
class DeployUser(AbstractEmailUser, TrackingFields):
"""
Custom user class for deployments. Email as username using
django-custom-user.
"""
AMELIA = 'amelia.min.css'
CERULEAN = 'cerulean.min.css'
COSMO = 'cosmo.min.css'
CYBORG = 'cyborg.min.css'
DARKLY = 'darkly.min.css'
FLATLY = 'flatly.min.css'
JOURNAL = 'journal.min.css'
LUMEN = 'lumen.min.css'
READABLE = 'readable.min.css'
SIMPLEX = 'simplex.min.css'
SLATE = 'slate.min.css'
SPACELAB = 'spacelab.min.css'
SUPERHERO = 'superhero.min.css'
UNITED = 'united.min.css'
YETI = 'yeti.min.css'
TEMPLATES = (
(AMELIA, 'Amelia'),
(CERULEAN, 'Cerulean'),
(COSMO, 'Cosmo'),
(CYBORG, 'Cyborg'),
(DARKLY, 'Darkly'),
(FLATLY, 'Flatly'),
(JOURNAL, 'Journal'),
(LUMEN, 'Lumen'),
(READABLE, 'Readable'),
(SIMPLEX, 'Simplex'),
(SLATE, 'Slate'),
(SPACELAB, 'Spacelab'),
(SUPERHERO, 'Superhero'),
(UNITED, 'United'),
(YETI, 'Yeti'),
)
active_records = DeployUserActiveManager()
first_name = models.CharField(_('first name'), max_length=30, blank=False)
last_name = models.CharField(_('last name'), max_length=30,
blank=False)
template = models.CharField(max_length=255, blank=True,
choices=TEMPLATES, default=YETI)
objects = DeployUserManager()
def __unicode__(self):
return u'{} {}'.format(self.first_name, self.last_name)
@property
def role(self):
"""
Assumes the user is only assigned to one role and return it
"""
return self.group_strigify()
def _get_groups(self):
if not hasattr(self, '_cached_groups'):
self._cached_groups = list(self.groups.values_list("name",
flat=True))
return self._cached_groups
def user_is_admin(self):
if not self.pk:
return False
return "Admin" in self._get_groups()
def user_is_deployer(self):
if not self.pk:
return False
return "Deployer" in self._get_groups()
def user_is_historian(self):
if not self.pk:
return False
return "Historian" in self._get_groups()
def group_strigify(self):
"""
Converts this user's group(s) to a string and returns it.
"""
return "/".join(self._get_groups())
def gravatar(self, size=20):
"""
Construct a gravatar image address for the user
"""
default = "mm"
gravatar_url = "http://www.gravatar.com/avatar/" + hashlib.md5(
self.email.lower()).hexdigest() + "?"
gravatar_url += urllib.urlencode({'d': default, 's': str(size)})
return gravatar_url
class APIKey(models.Model):
apikey = models.CharField(max_length=255, primary_key=True)
deployuser = models.ForeignKey(DeployUser)
class Meta:
unique_together = (("apikey", "deployuser"),)
class PermissionHost(models.Model):
user = models.ForeignKey(DeployUser)
host = models.ForeignKey(Host)
def __unicode__(self):
return u'User: {} Host: {}'.format(self.user, self.host)
def generate_APIKey(sender, instance, created, **kwargs):
if created:
apikey = APIKey()
apikey.apikey = base64.b64encode(hashlib.sha256(
str(random.getrandbits(256))).digest(),
random.choice(
['rA', 'aZ', 'gQ', 'hH', 'hG',
'aR', 'DD'])).rstrip('==')
apikey.deployuser = instance
apikey.save()
post_save.connect(generate_APIKey, sender=DeployUser)
| 29.221477
| 78
| 0.595315
| 3,312
| 0.76068
| 0
| 0
| 158
| 0.036288
| 0
| 0
| 949
| 0.21796
|
02871af56c42a72cf7ba11b3dac2fc5de68923f2
| 1,007
|
py
|
Python
|
heads/fc1024_normalize.py
|
ahmdtaha/tf_retrieval_baseline
|
31b1588f888cecc1d4287f77bd046314956482d5
|
[
"Apache-2.0"
] | 37
|
2019-06-01T02:11:48.000Z
|
2021-12-31T06:27:42.000Z
|
heads/fc1024_normalize.py
|
ahmdtaha/tf_retrieval_baseline
|
31b1588f888cecc1d4287f77bd046314956482d5
|
[
"Apache-2.0"
] | 1
|
2019-06-21T03:20:59.000Z
|
2019-09-03T14:20:04.000Z
|
heads/fc1024_normalize.py
|
ahmdtaha/tf_retrieval_baseline
|
31b1588f888cecc1d4287f77bd046314956482d5
|
[
"Apache-2.0"
] | 6
|
2019-10-11T10:21:56.000Z
|
2022-03-09T06:22:57.000Z
|
import tensorflow as tf
from tensorflow.contrib import slim
def head(endpoints, embedding_dim, is_training, weights_regularizer=None):
predict_var = 0
input = endpoints['model_output']
endpoints['head_output'] = slim.fully_connected(
input, 1024, normalizer_fn=slim.batch_norm,
normalizer_params={
'decay': 0.9,
'epsilon': 1e-5,
'scale': True,
'is_training': is_training,
'updates_collections': tf.GraphKeys.UPDATE_OPS,
},
weights_regularizer=weights_regularizer
)
input_1 = endpoints['head_output']
endpoints['emb_raw'] = slim.fully_connected(
input_1, embedding_dim + predict_var, activation_fn=None,weights_regularizer=weights_regularizer,
weights_initializer=tf.orthogonal_initializer(), scope='emb')
endpoints['emb'] = tf.nn.l2_normalize(endpoints['emb_raw'], -1)
# endpoints['data_sigma'] = None
print('Normalize batch embedding')
return endpoints
| 33.566667
| 105
| 0.675273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 184
| 0.182721
|
02875f5951726e518af5547e018727a57f4c2846
| 1,144
|
py
|
Python
|
vendor/github.com/elastic/beats/topbeat/tests/system/test_base.py
|
ninjasftw/libertyproxybeat
|
b8acafe86ad285f091bf69b59d2ebd1da80dcf5e
|
[
"Apache-2.0"
] | 37
|
2016-01-25T10:52:59.000Z
|
2021-05-08T11:44:39.000Z
|
vendor/github.com/elastic/beats/topbeat/tests/system/test_base.py
|
ninjasftw/libertyproxybeat
|
b8acafe86ad285f091bf69b59d2ebd1da80dcf5e
|
[
"Apache-2.0"
] | 35
|
2016-01-25T09:19:28.000Z
|
2017-11-20T23:29:35.000Z
|
vendor/github.com/elastic/beats/topbeat/tests/system/test_base.py
|
ninjasftw/libertyproxybeat
|
b8acafe86ad285f091bf69b59d2ebd1da80dcf5e
|
[
"Apache-2.0"
] | 23
|
2016-01-25T09:15:05.000Z
|
2020-12-14T06:08:31.000Z
|
from topbeat import BaseTest
import os
import shutil
import time
"""
Contains tests for base config
"""
class Test(BaseTest):
def test_invalid_config(self):
"""
Checks stop when input and topbeat defined
"""
shutil.copy("./config/topbeat-input-invalid.yml",
os.path.join(self.working_dir, "invalid.yml"))
exit_code = self.run_beat(config="invalid.yml", extra_args=["-N"])
assert exit_code == 1
assert self.log_contains(
"'topbeat' and 'input' are both set in config.") is True
def test_old_config(self):
"""
Test that old config still works with deprecation warning
"""
shutil.copy("./config/topbeat-old.yml",
os.path.join(self.working_dir, "topbeat-old.yml"))
topbeat = self.start_beat(config="topbeat-old.yml", extra_args=["-N"])
time.sleep(1)
topbeat.check_kill_and_wait()
assert self.log_contains(
"Using 'input' in configuration is deprecated and is scheduled to "
"be removed in Topbeat 6.0. Use 'topbeat' instead.") is True
| 28.6
| 79
| 0.612762
| 1,035
| 0.90472
| 0
| 0
| 0
| 0
| 0
| 0
| 480
| 0.41958
|
0289963af258cded39c2b0dcfaad0d26f59c24b0
| 7,133
|
py
|
Python
|
JapanSize.py
|
AleksanderLidtke/XKCD
|
47c5029d9737390a910184adc66efc1347b84441
|
[
"MIT"
] | null | null | null |
JapanSize.py
|
AleksanderLidtke/XKCD
|
47c5029d9737390a910184adc66efc1347b84441
|
[
"MIT"
] | null | null | null |
JapanSize.py
|
AleksanderLidtke/XKCD
|
47c5029d9737390a910184adc66efc1347b84441
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Throughout my travels I've discovered that most people, including myself, do not
realise many things about our Planet's size. For example, the latitude and
longitude of certain regions (South America is much further east than the US)
or the relative size of countries (Japan is surprisingly long).
Thus, I've created this script to understand such things a bit better. It
compares the sizes of Japan and Europe, which is the most recent surprise
I came across.
The shape data were aquired from [Global Administrative Areas](http://www.gadm.org/country)
website. Thus, their **redistribution, or commercial use is not allowed without
prior permission**.
Created on Sun May 7 14:13:47 2017
@author: Alek
"""
from mpl_toolkits.basemap import Basemap
import numpy, shapefile, os, matplotlib.pyplot
matplotlib.pyplot.xkcd() # Here we go.
def plotPrefecture(*,shp,colour,bMap,axes,latOff=0,longOff=0,lwdth=0.5):
""" Plot a prefecture from a shapefile.
Kwargs
-------
* shp - shape as returned by :func:`shapefile.Reader.shapes`,
* colour - colour accepted by :func:`matplotlib.pyplot.Axes.plot',
* bMap - instance of :class:`mpl_toolkits.basemap.Basemap` used to project
the shape onto a map,
* axes - :class:`matplotlib.pyplot.Axes` instance where to plot,
* latOff,longOff - deg, by how much to offset the `shp` lattitudes and
longitudes before plotting,
* lwdth - line width as accepted by :func:`matplotlib.pyplot.Axes.plot'.
"""
if len(shp.parts)==1: # Only one region in this shape.
vertices=numpy.array(shp.points)
bMap.plot(vertices[:,0]+longOff,vertices[:,1]+latOff,color=colour,
lw=lwdth,ls='-',latlon=True,ax=axes)
else: # This shape has islands, disjoint regions and what-not.
for ip in range(len(shp.parts)): # For every part of the shape.
# Indices that get the slice with this part of the shape.
lower=shp.parts[ip]
if ip==len(shp.parts)-1:
upper=len(shp.points) # Last part.
else:
upper=shp.parts[ip+1] # Next part starts at idx parts[ip+1]
partVertices=numpy.array(shp.points[lower:upper])
bMap.plot(partVertices[:,0]+longOff,partVertices[:,1]+latOff,
color=colour,lw=lwdth,ls='-',latlon=True,ax=axes)
# Various font sizes.
ticksFontSize=18
labelsFontSizeSmall=20
labelsFontSize=30
titleFontSize=34
legendFontSize=20
matplotlib.rc('xtick',labelsize=ticksFontSize)
matplotlib.rc('ytick',labelsize=ticksFontSize)
cm=matplotlib.pyplot.cm.get_cmap('viridis')
# Read a shapefile with Japan's cartography data.
shapeRdr0=shapefile.Reader(os.path.join('borders','JPN_adm0')) # Country.
shapeRdr1=shapefile.Reader(os.path.join('borders','JPN_adm1')) # Prefectures.
shapeRdr2=shapefile.Reader(os.path.join('borders','JPN_adm2')) # Towns.
shape=shapeRdr0.shapes()[0]
if shape.shapeType != shapefile.POLYGON:
raise ValueError('Shape not polygon with shapeType={}'.format(shape.shapeType ))
vertices=numpy.array(shape.points) # 2D array of coordinates.
# Where to centre different maps and where to translate Japan to.
latJpn=37 # Where to centre one map, i.e. over Japan. Lat/lon in degrees.
lonJpn=138
latCtr=40 # Where to centre the Europe's map. Lat/lon in degrees.
lonCtr=10
dLonJ=10 # Plot Japan at these coordinates over the map of Europe.
dLatJ=50
' Mercator projection, a.k.a. "the things you learn in schools".'
fig,ax=matplotlib.pyplot.subplots(1,2,figsize=(16,8))
# The whole Planet.
mercMapP=Basemap(projection='merc',llcrnrlat=-80,urcrnrlat=80,llcrnrlon=-180,
urcrnrlon=180,lat_ts=10,ax=ax[0],resolution='c')
mercMapP.drawcoastlines(linewidth=0.5)
mercMapP.drawcountries(linewidth=0.25)
mercMapP.drawparallels(numpy.arange(-90.,91.,30.))
mercMapP.drawmeridians(numpy.arange(-180.,181.,60.))
ax[0].set_title(r'$Our\ Planet$',fontsize=titleFontSize)
plotPrefecture(shp=shape,colour='gold',lwdth=1,bMap=mercMapP,axes=ax[0])
# Only Europe.
mercMapE=Basemap(projection='merc',llcrnrlat=30,urcrnrlat=75,llcrnrlon=-25,
urcrnrlon=40,lat_ts=10,ax=ax[1],resolution='l')
mercMapE.drawcoastlines(linewidth=0.5)
mercMapE.drawcountries(linewidth=0.25)
mercMapE.drawparallels(numpy.arange(mercMapE.latmin,mercMapE.latmax,10.))
mercMapE.drawmeridians(numpy.arange(mercMapE.lonmin,mercMapE.lonmax,15.))
ax[1].set_title(r'$Europe$',fontsize=titleFontSize)
plotPrefecture(shp=shape,colour='gold',lwdth=2,bMap=mercMapE,axes=ax[1],
latOff=dLatJ-latJpn,longOff=dLonJ-lonJpn)
fig.show()
' One figure with orthonormal maps centred on Japan and Europe.'
fig,ax=matplotlib.pyplot.subplots(1,2,figsize=(16,8))
# Centred on Japan.
ortnMapJ=Basemap(projection='ortho',lat_0=latJpn,lon_0=lonJpn,resolution='c',
ax=ax[0])
ortnMapJ.drawcoastlines(linewidth=0.5)
ortnMapJ.drawcountries(linewidth=0.25)
ortnMapJ.drawmeridians(numpy.arange(0,360,30))
ortnMapJ.drawparallels(numpy.arange(-90,90,30))
ax[0].set_title(r'${}$'.format(shapeRdr0.records()[0][4]),fontsize=titleFontSize)
plotPrefecture(shp=shape,colour='gold',lwdth=2,bMap=ortnMapJ,axes=ax[0])
# Plot all the prefectures.
cNorm=matplotlib.colors.Normalize(vmin=0,vmax=shapeRdr1.numRecords)
scalarMap=matplotlib.cm.ScalarMappable(norm=cNorm,cmap=cm)
prefectures=shapeRdr1.shapes()
prefRecords=shapeRdr1.records()
for i in range(shapeRdr1.numRecords):
if prefRecords[i][9]=='Prefecture':
plotPrefecture(shp=prefectures[i],colour=scalarMap.to_rgba(i),
lwdth=0.5,bMap=ortnMapJ,axes=ax[0])
# Centred on Europe.
ortnMapE=Basemap(projection='ortho',lat_0=latCtr,lon_0=lonCtr,resolution='c',
ax=ax[1])
ortnMapE.drawcoastlines(linewidth=0.5)
ortnMapE.drawcountries(linewidth=0.25)
ortnMapE.drawmeridians(numpy.arange(0,360,30))
ortnMapE.drawparallels(numpy.arange(-90,90,30))
ax[1].set_title(r'${}\ over\ Europe$'.format(shapeRdr0.records()[0][4]),
fontsize=titleFontSize)
plotPrefecture(shp=shape,colour='gold',lwdth=2,bMap=ortnMapE,axes=ax[1],
latOff=dLatJ-latJpn,longOff=dLonJ-lonJpn)
fig.show()
' Japan and Kitakyushu overlaid on Europe.'
fig,ax=matplotlib.pyplot.subplots(1,1,figsize=(16,8))
mercMapE=Basemap(projection='merc',llcrnrlat=30,urcrnrlat=75,llcrnrlon=-25,
urcrnrlon=40,lat_ts=10,ax=ax,resolution='l')
mercMapE.drawcoastlines(linewidth=0.5)
mercMapE.drawcountries(linewidth=0.25)
mercMapE.drawparallels(numpy.arange(mercMapE.latmin,mercMapE.latmax,10.))
mercMapE.drawmeridians(numpy.arange(mercMapE.lonmin,mercMapE.lonmax,15.))
ax.set_title(r'$Europe,\ true\ lat.$',fontsize=titleFontSize)
plotPrefecture(shp=shape,colour='gold',lwdth=2,bMap=mercMapE,axes=ax,
latOff=0,longOff=dLonJ-lonJpn)
# Show annotation at the true latitude.
xKIT,yKIT=mercMapE.projtran(130.834730+dLonJ-lonJpn,33.8924837)
xTXT,yTXT=mercMapE.projtran(110.834730+dLonJ-lonJpn,45.8924837)
ax.scatter([xKIT],[yKIT],s=50,c='crimson')
ax.annotate('Here', xy=(xKIT,yKIT),xytext=(xTXT,yTXT),color='crimson',
arrowprops=dict(facecolor='crimson', shrink=0.05))
fig.show()
| 43.493902
| 91
| 0.728305
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,550
| 0.357493
|
028a79224d1b3b0d7d2cc26a3b2408f89ff5f8c5
| 7,252
|
py
|
Python
|
lstm_toyexample.py
|
dsriaditya999/LSTM-Toy-Example
|
850f7923122b547c1fd25b3b1dc739e8c5db2570
|
[
"MIT"
] | null | null | null |
lstm_toyexample.py
|
dsriaditya999/LSTM-Toy-Example
|
850f7923122b547c1fd25b3b1dc739e8c5db2570
|
[
"MIT"
] | null | null | null |
lstm_toyexample.py
|
dsriaditya999/LSTM-Toy-Example
|
850f7923122b547c1fd25b3b1dc739e8c5db2570
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Importing Libraries
"""
# Commented out IPython magic to ensure Python compatibility.
import torch
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import torch.nn as nn
from tqdm import tqdm_notebook
from sklearn.preprocessing import MinMaxScaler
# %matplotlib inline
torch.manual_seed(0)
"""# Loading Dataset"""
sns.get_dataset_names()
flight_data = sns.load_dataset("flights")
flight_data.head()
"""# Preprocessing"""
# Changing the plot size
figsize = plt.rcParams["figure.figsize"]
figsize[0] = 15
figsize[1] = 5
plt.rcParams["figure.figsize"] = figsize
# Plotting the data
plt.title("Time Series Representation of Data")
plt.xlabel("Months")
plt.ylabel("Passengers")
plt.grid(True)
plt.autoscale(axis = "x",tight=True)
plt.plot(flight_data["passengers"])
#Please note that this is univariate time series data : consisting of one variable passengers
#
data = flight_data["passengers"].values.astype(float)
print(data)
print(len(data))
# Train-Test Split
# Consider the last the 12 months data as evaluation data for testing the model's behaviour
train_window = 12
train_data = data[:-train_window]
test_data = data[-train_window:]
print(len(train_data))
print(len(test_data))
# Normalizing the train-data
scaler = MinMaxScaler(feature_range=(-1,1))
train_data_normalized = scaler.fit_transform(train_data.reshape(-1,1))
print(train_data_normalized[:10])
# Converting to Torch Tensor
train_data_normalized = torch.FloatTensor(train_data_normalized).view(-1)
print(train_data_normalized)
# Final step is creating sequences of length 12 (12 months data) from the train-data and
# the label for each sequence is the passenger_data for the (12+1)th Month
def create_in_sequences(input_data,tw):
in_seq = []
L = len(input_data)
for i in range(L-tw):
train_seq = input_data[i:i+tw]
train_label = input_data[i+tw:i+tw+1]
in_seq.append((train_seq,train_label))
return in_seq
# Therefore, we get 120 train sequences along with the label value
train_in_seq = create_in_sequences(train_data_normalized,train_window)
print(len(train_in_seq))
print(train_in_seq[:5])
"""# The Model
Please note that the model considered here is:
1. LSTM layer with a univariate input sequence of length 12 and LSTM's previous hidden cell consisting of previous hidden state and previous cell state of length 100 and also , the size of LSTM's output is 100
2. The second layer is a Linear layer of 100 inputs from the LSTM's output and a single output size
"""
class LSTM(nn.Module): #LSTM Class inheriting the inbuilt nn.Module class for neural networks
def __init__(self,input_size = 1,hidden_layer_size = 100, output_size = 1):
super().__init__() #Calls the init function of the nn.Module superclass for being able to access its features
self.hidden_layer_size = hidden_layer_size # Defines the size of h(t-1) [Previous hidden output] and c(t-1) [previous cell state]
self.lstm = nn.LSTM(input_size,hidden_layer_size,dropout = 0.45) # definining the LSTM with univariate input,output and with a dropout regularization of 0.45
self.linear = nn.Linear(hidden_layer_size,output_size) # Linear layer which returns the weighted sum of 100 outputs from the LSTM
self.hidden_cell = (torch.ones(1,1,self.hidden_layer_size), # This is the previous hidden state
torch.ones(1,1,self.hidden_layer_size)) # This is the previous cell state
def forward(self,input_seq):
lstm_out, self.hidden_cell = self.lstm(input_seq.view(len(input_seq),1,-1),self.hidden_cell) #returns 1200 outputs from each of the 100 output neurons for the 12 valued sequence
predictions = self.linear(lstm_out.view(len(input_seq),-1)) # Reshaped to make it compatible as an input to the linear layer
return predictions[-1] # The last element contains the prediction
model = LSTM()
print(model)
"""# Loss Function and Learning Algorithm (Optimizer)
Please note that for this simple model ,
* Loss Function considered is *Mean Squared Error* and
* Optimization Function used is Stochastic Version of **Adam** *Optimizer*.
"""
loss_fn = nn.MSELoss() # Mean Squared Error Loss Function
optimizer = torch.optim.Adam(model.parameters(),lr = 0.0002) # Adam Learning Algorithm
"""# Training"""
epochs = 450
loss_plot = []
for epoch in tqdm_notebook(range(epochs), total=epochs, unit="epoch"):
for seq,label in train_in_seq:
optimizer.zero_grad() # makes the gradients zero for each new sequence
model.hidden_cell = (torch.zeros(1,1,model.hidden_layer_size), # Initialising the previous hidden state and cell state for each new sequence
torch.zeros(1,1,model.hidden_layer_size))
y_pred = model(seq) # Automatically calls the forward pass
loss = loss_fn(y_pred,label) # Determining the loss
loss.backward() # Backpropagation of loss and gradients computation
optimizer.step() # Weights and Bias Updation
loss_plot.append(loss.item()) # Some Bookkeeping
plt.plot(loss_plot,'r-')
plt.xlabel("Epochs")
plt.ylabel("Loss : MSE")
plt.show()
print(loss_plot[-1])
"""# Making Prediction
Please note that for comparison purpose we use the training data's values and predicted data values to predict the number of passengers for the test data months and then compare them
"""
fut_pred = 12
test_inputs = train_data_normalized[-train_window: ].tolist()
print(test_inputs)
print(len(test_inputs))
model.eval() # Makes the model ready for evaluation
for i in range(fut_pred):
seq = torch.FloatTensor(test_inputs[-train_window: ]) # Converting to a tensor
with torch.no_grad(): # Stops adding to the computational flow graph (stops being prepared for backpropagation)
model.hidden_cell = (torch.zeros(1,1,model.hidden_layer_size),
torch.zeros(1,1,model.hidden_layer_size))
test_inputs.append(model(seq).item())
predicted_outputs_normalized = []
predicted_outputs_normalized = test_inputs[-train_window: ]
print(predicted_outputs_normalized)
print(len(predicted_outputs_normalized))
"""# Postprocessing"""
predicted_outputs = scaler.inverse_transform(np.array(predicted_outputs_normalized).reshape(-1,1))
print(predicted_outputs)
x = np.arange(132, 144, 1)
print(x)
"""# Final Output"""
figsize = plt.rcParams["figure.figsize"]
figsize[0] = 15
figsize[1] = 5
plt.rcParams["figure.figsize"] = figsize
plt.title('Month vs Passenger')
plt.ylabel('Total Passengers')
plt.grid(True)
plt.autoscale(axis='x', tight=True)
plt.plot(flight_data['passengers'])
plt.plot(x,predicted_outputs)
plt.show()
figsize = plt.rcParams["figure.figsize"]
figsize[0] = 15
figsize[1] = 5
plt.rcParams["figure.figsize"] = figsize
plt.title('Month vs Passenger')
plt.ylabel('Total Passengers')
plt.grid(True)
plt.autoscale(axis='x', tight=True)
plt.plot(flight_data['passengers'][-train_window-5: ])
plt.plot(x,predicted_outputs)
plt.show()
"""**Please observe that the model is able to get the trend of the passengers but it can be further fine-tuned by adding appropriate regularization methods**"""
| 34.046948
| 212
| 0.734694
| 1,387
| 0.191258
| 0
| 0
| 0
| 0
| 0
| 0
| 3,469
| 0.478351
|
028b6c5908aab150cc0d4d671ccfb977919ebe32
| 22,929
|
py
|
Python
|
api/chat.py
|
Jecosine/blivechat
|
d398e4913e0c76d93d3f5402938dc59ea1424ec6
|
[
"MIT"
] | null | null | null |
api/chat.py
|
Jecosine/blivechat
|
d398e4913e0c76d93d3f5402938dc59ea1424ec6
|
[
"MIT"
] | null | null | null |
api/chat.py
|
Jecosine/blivechat
|
d398e4913e0c76d93d3f5402938dc59ea1424ec6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import asyncio
import enum
import json
import logging
import random
import time
import uuid
from typing import *
import aiohttp
import tornado.websocket
import api.base
import blivedm.blivedm as blivedm
import config
import models.avatar
import models.translate
import models.log
logger = logging.getLogger(__name__)
class Command(enum.IntEnum):
HEARTBEAT = 0
JOIN_ROOM = 1
ADD_TEXT = 2
ADD_GIFT = 3
ADD_MEMBER = 4
ADD_SUPER_CHAT = 5
DEL_SUPER_CHAT = 6
UPDATE_TRANSLATION = 7
_http_session = aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=10))
room_manager: Optional['RoomManager'] = None
def init():
global room_manager
room_manager = RoomManager()
class Room(blivedm.BLiveClient):
HEARTBEAT_INTERVAL = 10
# 重新定义parse_XXX是为了减少对字段名的依赖,防止B站改字段名
def __parse_danmaku(self, command):
info = command['info']
if info[3]:
room_id = info[3][3]
medal_level = info[3][0]
else:
room_id = medal_level = 0
return self._on_receive_danmaku(blivedm.DanmakuMessage(
None, None, None, info[0][4], None, None, info[0][9], None,
info[1],
info[2][0], info[2][1], info[2][2], None, None, info[2][5], info[2][6], None,
medal_level, None, None, room_id, None, None,
info[4][0], None, None,
None, None,
info[7]
))
def __parse_gift(self, command):
data = command['data']
return self._on_receive_gift(blivedm.GiftMessage(
data['giftName'], data['num'], data['uname'], data['face'], None,
data['uid'], data['timestamp'], None, None,
None, None, None, data['coin_type'], data['total_coin']
))
def __parse_buy_guard(self, command):
data = command['data']
return self._on_buy_guard(blivedm.GuardBuyMessage(
data['uid'], data['username'], data['guard_level'], None, None,
None, None, data['start_time'], None
))
def __parse_super_chat(self, command):
data = command['data']
return self._on_super_chat(blivedm.SuperChatMessage(
data['price'], data['message'], None, data['start_time'],
None, None, data['id'], None,
None, data['uid'], data['user_info']['uname'],
data['user_info']['face'], None,
None, None,
None, None, None,
None
))
_COMMAND_HANDLERS = {
**blivedm.BLiveClient._COMMAND_HANDLERS,
'DANMU_MSG': __parse_danmaku,
'SEND_GIFT': __parse_gift,
'GUARD_BUY': __parse_buy_guard,
'SUPER_CHAT_MESSAGE': __parse_super_chat
}
def __init__(self, room_id):
super().__init__(room_id, session=_http_session, heartbeat_interval=self.HEARTBEAT_INTERVAL)
self.clients: List['ChatHandler'] = []
self.auto_translate_count = 0
async def init_room(self):
await super().init_room()
return True
def stop_and_close(self):
if self.is_running:
future = self.stop()
future.add_done_callback(lambda _future: asyncio.ensure_future(self.close()))
else:
asyncio.ensure_future(self.close())
def send_message(self, cmd, data):
body = json.dumps({'cmd': cmd, 'data': data})
models.log.add_danmaku(self.room_id, body)
for client in self.clients:
try:
client.write_message(body)
except tornado.websocket.WebSocketClosedError:
room_manager.del_client(self.room_id, client)
def send_message_if(self, can_send_func: Callable[['ChatHandler'], bool], cmd, data):
body = json.dumps({'cmd': cmd, 'data': data})
for client in filter(can_send_func, self.clients):
try:
client.write_message(body)
except tornado.websocket.WebSocketClosedError:
room_manager.del_client(self.room_id, client)
async def _on_receive_danmaku(self, danmaku: blivedm.DanmakuMessage):
asyncio.ensure_future(self.__on_receive_danmaku(danmaku))
async def __on_receive_danmaku(self, danmaku: blivedm.DanmakuMessage):
if danmaku.uid == self.room_owner_uid:
author_type = 3 # 主播
elif danmaku.admin:
author_type = 2 # 房管
elif danmaku.privilege_type != 0: # 1总督,2提督,3舰长
author_type = 1 # 舰队
else:
author_type = 0
need_translate = self._need_translate(danmaku.msg)
if need_translate:
translation = models.translate.get_translation_from_cache(danmaku.msg)
if translation is None:
# 没有缓存,需要后面异步翻译后通知
translation = ''
else:
need_translate = False
else:
translation = ''
id_ = uuid.uuid4().hex
# 为了节省带宽用list而不是dict
self.send_message(Command.ADD_TEXT, make_text_message(
await models.avatar.get_avatar_url(danmaku.uid),
int(danmaku.timestamp / 1000),
danmaku.uname,
author_type,
danmaku.msg,
danmaku.privilege_type,
danmaku.msg_type,
danmaku.user_level,
danmaku.urank < 10000,
danmaku.mobile_verify,
0 if danmaku.room_id != self.room_id else danmaku.medal_level,
id_,
translation
))
if need_translate:
await self._translate_and_response(danmaku.msg, id_)
async def _on_receive_gift(self, gift: blivedm.GiftMessage):
avatar_url = models.avatar.process_avatar_url(gift.face)
models.avatar.update_avatar_cache(gift.uid, avatar_url)
if gift.coin_type != 'gold': # 丢人
return
id_ = uuid.uuid4().hex
self.send_message(Command.ADD_GIFT, {
'id': id_,
'avatarUrl': avatar_url,
'timestamp': gift.timestamp,
'authorName': gift.uname,
'totalCoin': gift.total_coin,
'giftName': gift.gift_name,
'num': gift.num
})
async def _on_buy_guard(self, message: blivedm.GuardBuyMessage):
asyncio.ensure_future(self.__on_buy_guard(message))
async def __on_buy_guard(self, message: blivedm.GuardBuyMessage):
id_ = uuid.uuid4().hex
self.send_message(Command.ADD_MEMBER, {
'id': id_,
'avatarUrl': await models.avatar.get_avatar_url(message.uid),
'timestamp': message.start_time,
'authorName': message.username,
'privilegeType': message.guard_level
})
async def _on_super_chat(self, message: blivedm.SuperChatMessage):
avatar_url = models.avatar.process_avatar_url(message.face)
models.avatar.update_avatar_cache(message.uid, avatar_url)
need_translate = self._need_translate(message.message)
if need_translate:
translation = models.translate.get_translation_from_cache(message.message)
if translation is None:
# 没有缓存,需要后面异步翻译后通知
translation = ''
else:
need_translate = False
else:
translation = ''
id_ = str(message.id)
self.send_message(Command.ADD_SUPER_CHAT, {
'id': id_,
'avatarUrl': avatar_url,
'timestamp': message.start_time,
'authorName': message.uname,
'price': message.price,
'content': message.message,
'translation': translation
})
if need_translate:
asyncio.ensure_future(self._translate_and_response(message.message, id_))
async def _on_super_chat_delete(self, message: blivedm.SuperChatDeleteMessage):
self.send_message(Command.ADD_SUPER_CHAT, {
'ids': list(map(str, message.ids))
})
def _need_translate(self, text):
cfg = config.get_config()
return (
cfg.enable_translate
and (not cfg.allow_translate_rooms or self.room_id in cfg.allow_translate_rooms)
and self.auto_translate_count > 0
and models.translate.need_translate(text)
)
async def _translate_and_response(self, text, msg_id):
translation = await models.translate.translate(text)
if translation is None:
return
self.send_message_if(
lambda client: client.auto_translate,
Command.UPDATE_TRANSLATION, make_translation_message(
msg_id,
translation
)
)
def make_text_message(avatar_url, timestamp, author_name, author_type, content, privilege_type,
is_gift_danmaku, author_level, is_newbie, is_mobile_verified, medal_level,
id_, translation):
return [
# 0: avatarUrl
avatar_url,
# 1: timestamp
timestamp,
# 2: authorName
author_name,
# 3: authorType
author_type,
# 4: content
content,
# 5: privilegeType
privilege_type,
# 6: isGiftDanmaku
1 if is_gift_danmaku else 0,
# 7: authorLevel
author_level,
# 8: isNewbie
1 if is_newbie else 0,
# 9: isMobileVerified
1 if is_mobile_verified else 0,
# 10: medalLevel
medal_level,
# 11: id
id_,
# 12: translation
translation
]
def make_translation_message(msg_id, translation):
return [
# 0: id
msg_id,
# 1: translation
translation
]
class RoomManager:
def __init__(self):
self._rooms: Dict[int, Room] = {}
async def get_room(self, room_id):
if room_id not in self._rooms:
if not await self._add_room(room_id):
return
room = self._rooms.get(room_id, None)
return room
async def add_client(self, room_id, client: 'ChatHandler'):
if room_id not in self._rooms:
if not await self._add_room(room_id):
client.close()
return
room = self._rooms.get(room_id, None)
if room is None:
return
room.clients.append(client)
logger.info('%d clients in room %s', len(room.clients), room_id)
if client.auto_translate:
room.auto_translate_count += 1
await client.on_join_room()
def del_client(self, room_id, client: 'ChatHandler'):
room = self._rooms.get(room_id, None)
if room is None:
return
try:
room.clients.remove(client)
except ValueError:
# _add_room未完成,没有执行到room.clients.append
pass
else:
logger.info('%d clients in room %s', len(room.clients), room_id)
if client.auto_translate:
room.auto_translate_count = max(0, room.auto_translate_count - 1)
if not room.clients:
self._del_room(room_id)
async def _add_room(self, room_id):
if room_id in self._rooms:
return True
logger.info('Creating room %d', room_id)
self._rooms[room_id] = room = Room(room_id)
if await room.init_room():
# start new log file
room.start()
logger.info('%d rooms', len(self._rooms))
return True
else:
self._del_room(room_id)
return False
def _del_room(self, room_id):
room = self._rooms.get(room_id, None)
if room is None:
return
logger.info('Removing room %d', room_id)
for client in room.clients:
client.close()
room.stop_and_close()
self._rooms.pop(room_id, None)
logger.info('%d rooms', len(self._rooms))
# noinspection PyAbstractClass
class ChatHandler(tornado.websocket.WebSocketHandler):
HEARTBEAT_INTERVAL = 10
RECEIVE_TIMEOUT = HEARTBEAT_INTERVAL + 5
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._heartbeat_timer_handle = None
self._receive_timeout_timer_handle = None
self.room_id = None
self.auto_translate = False
def open(self):
logger.info('Websocket connected %s', self.request.remote_ip)
self._heartbeat_timer_handle = asyncio.get_event_loop().call_later(
self.HEARTBEAT_INTERVAL, self._on_send_heartbeat
)
self._refresh_receive_timeout_timer()
def _on_send_heartbeat(self):
self.send_message(Command.HEARTBEAT, {})
self._heartbeat_timer_handle = asyncio.get_event_loop().call_later(
self.HEARTBEAT_INTERVAL, self._on_send_heartbeat
)
def _refresh_receive_timeout_timer(self):
if self._receive_timeout_timer_handle is not None:
self._receive_timeout_timer_handle.cancel()
self._receive_timeout_timer_handle = asyncio.get_event_loop().call_later(
self.RECEIVE_TIMEOUT, self._on_receive_timeout
)
def _on_receive_timeout(self):
logger.warning('Client %s timed out', self.request.remote_ip)
self._receive_timeout_timer_handle = None
self.close()
def on_close(self):
logger.info('Websocket disconnected %s room: %s', self.request.remote_ip, str(self.room_id))
if self.has_joined_room:
room_manager.del_client(self.room_id, self)
if self._heartbeat_timer_handle is not None:
self._heartbeat_timer_handle.cancel()
self._heartbeat_timer_handle = None
if self._receive_timeout_timer_handle is not None:
self._receive_timeout_timer_handle.cancel()
self._receive_timeout_timer_handle = None
def on_message(self, message):
try:
# 超时没有加入房间也断开
if self.has_joined_room:
self._refresh_receive_timeout_timer()
body = json.loads(message)
cmd = body['cmd']
if cmd == Command.HEARTBEAT:
pass
elif cmd == Command.JOIN_ROOM:
if self.has_joined_room:
return
self._refresh_receive_timeout_timer()
self.room_id = int(body['data']['roomId'])
logger.info('Client %s is joining room %d', self.request.remote_ip, self.room_id)
try:
cfg = body['data']['config']
self.auto_translate = cfg['autoTranslate']
except KeyError:
pass
asyncio.ensure_future(room_manager.add_client(self.room_id, self))
else:
logger.warning('Unknown cmd, client: %s, cmd: %d, body: %s', self.request.remote_ip, cmd, body)
except Exception:
logger.exception('on_message error, client: %s, message: %s', self.request.remote_ip, message)
# 跨域测试用
def check_origin(self, origin):
if self.application.settings['debug']:
return True
return super().check_origin(origin)
@property
def has_joined_room(self):
return self.room_id is not None
def send_message(self, cmd, data):
body = json.dumps({'cmd': cmd, 'data': data})
try:
self.write_message(body)
except tornado.websocket.WebSocketClosedError:
self.close()
async def on_join_room(self):
if self.application.settings['debug']:
await self.send_test_message()
# 不允许自动翻译的提示
if self.auto_translate:
cfg = config.get_config()
if cfg.allow_translate_rooms and self.room_id not in cfg.allow_translate_rooms:
self.send_message(Command.ADD_TEXT, make_text_message(
models.avatar.DEFAULT_AVATAR_URL,
int(time.time()),
'blivechat',
2,
'Translation is not allowed in this room. Please download to use translation',
0,
False,
60,
False,
True,
0,
uuid.uuid4().hex,
''
))
# 测试用
async def send_test_message(self):
base_data = {
'avatarUrl': await models.avatar.get_avatar_url(300474),
'timestamp': int(time.time()),
'authorName': 'xfgryujk',
}
text_data = make_text_message(
base_data['avatarUrl'],
base_data['timestamp'],
base_data['authorName'],
0,
'我能吞下玻璃而不伤身体',
0,
False,
20,
False,
True,
0,
uuid.uuid4().hex,
''
)
member_data = {
**base_data,
'id': uuid.uuid4().hex,
'privilegeType': 3
}
gift_data = {
**base_data,
'id': uuid.uuid4().hex,
'totalCoin': 450000,
'giftName': '摩天大楼',
'num': 1
}
sc_data = {
**base_data,
'id': str(random.randint(1, 65535)),
'price': 30,
'content': 'The quick brown fox jumps over the lazy dog',
'translation': ''
}
self.send_message(Command.ADD_TEXT, text_data)
text_data[2] = '主播'
text_data[3] = 3
text_data[4] = "I can eat glass, it doesn't hurt me."
text_data[11] = uuid.uuid4().hex
self.send_message(Command.ADD_TEXT, text_data)
self.send_message(Command.ADD_MEMBER, member_data)
self.send_message(Command.ADD_SUPER_CHAT, sc_data)
sc_data['id'] = str(random.randint(1, 65535))
sc_data['price'] = 100
sc_data['content'] = '敏捷的棕色狐狸跳过了懒狗'
self.send_message(Command.ADD_SUPER_CHAT, sc_data)
# self.send_message(Command.DEL_SUPER_CHAT, {'ids': [sc_data['id']]})
self.send_message(Command.ADD_GIFT, gift_data)
gift_data['id'] = uuid.uuid4().hex
gift_data['totalCoin'] = 1245000
gift_data['giftName'] = '小电视飞船'
self.send_message(Command.ADD_GIFT, gift_data)
# noinspection PyAbstractClass
class RoomInfoHandler(api.base.ApiHandler):
_host_server_list_cache = blivedm.DEFAULT_DANMAKU_SERVER_LIST
async def get(self):
room_id = int(self.get_query_argument('roomId'))
logger.info('Client %s is getting room info %d', self.request.remote_ip, room_id)
room_id, owner_uid = await self._get_room_info(room_id)
host_server_list = await self._get_server_host_list(room_id)
if owner_uid == 0:
# 缓存3分钟
self.set_header('Cache-Control', 'private, max-age=180')
else:
# 缓存1天
self.set_header('Cache-Control', 'private, max-age=86400')
self.write({
'roomId': room_id,
'ownerUid': owner_uid,
'hostServerList': host_server_list
})
@staticmethod
async def _get_room_info(room_id):
try:
async with _http_session.get(blivedm.ROOM_INIT_URL, params={'room_id': room_id}
) as res:
if res.status != 200:
logger.warning('room %d _get_room_info failed: %d %s', room_id,
res.status, res.reason)
return room_id, 0
data = await res.json()
except (aiohttp.ClientConnectionError, asyncio.TimeoutError):
logger.exception('room %d _get_room_info failed', room_id)
return room_id, 0
if data['code'] != 0:
logger.warning('room %d _get_room_info failed: %s', room_id, data['message'])
return room_id, 0
room_info = data['data']['room_info']
return room_info['room_id'], room_info['uid']
@classmethod
async def _get_server_host_list(cls, _room_id):
return cls._host_server_list_cache
# 连接其他host必须要key
# try:
# async with _http_session.get(blivedm.DANMAKU_SERVER_CONF_URL, params={'id': room_id, 'type': 0}
# ) as res:
# if res.status != 200:
# logger.warning('room %d _get_server_host_list failed: %d %s', room_id,
# res.status, res.reason)
# return cls._host_server_list_cache
# data = await res.json()
# except (aiohttp.ClientConnectionError, asyncio.TimeoutError):
# logger.exception('room %d _get_server_host_list failed', room_id)
# return cls._host_server_list_cache
#
# if data['code'] != 0:
# logger.warning('room %d _get_server_host_list failed: %s', room_id, data['message'])
# return cls._host_server_list_cache
#
# host_server_list = data['data']['host_list']
# if not host_server_list:
# logger.warning('room %d _get_server_host_list failed: host_server_list is empty')
# return cls._host_server_list_cache
#
# cls._host_server_list_cache = host_server_list
# return host_server_list
# noinspection PyAbstractClass
class AvatarHandler(api.base.ApiHandler):
async def get(self):
uid = int(self.get_query_argument('uid'))
avatar_url = await models.avatar.get_avatar_url_or_none(uid)
if avatar_url is None:
avatar_url = models.avatar.DEFAULT_AVATAR_URL
# 缓存3分钟
self.set_header('Cache-Control', 'private, max-age=180')
else:
# 缓存1天
self.set_header('Cache-Control', 'private, max-age=86400')
self.write({
'avatarUrl': avatar_url
})
# noinspection PyAbstractClass
# handle reply message
class ReplyHandler(api.base.ApiHandler):
def get(self):
self.write('pong')
async def post(self):
uid = None if self.json_args['uid'] == -1 else self.json_args['uid']
avatar_url = await models.avatar.get_avatar_url(uid)
text_message = make_text_message(
avatar_url=avatar_url,
timestamp=int(time.time()),
author_name=self.json_args['name'],
author_type=3,
content=self.json_args['content'],
author_level=0,
id_=uuid.uuid4().hex,
privilege_type=0,
is_newbie=0,
is_gift_danmaku=0,
is_mobile_verified=True,
medal_level=0,
translation=0
)
# get room
room: Room = await room_manager.get_room(room_id=self.json_args['room_id'])
room.send_message(Command.ADD_TEXT, text_message)
| 34.019288
| 111
| 0.584326
| 21,544
| 0.925469
| 0
| 0
| 2,325
| 0.099875
| 12,763
| 0.548262
| 3,863
| 0.165944
|
65f43d030f26c2fcb657f044a4435543df49146f
| 954
|
py
|
Python
|
gan.py
|
AtlantixJJ/LBSGAN
|
e91d500d4a9c02dd5e3bfcbd9a9eca96dc60102a
|
[
"BSD-2-Clause"
] | 1
|
2019-06-09T02:43:35.000Z
|
2019-06-09T02:43:35.000Z
|
gan.py
|
AtlantixJJ/LBSGAN
|
e91d500d4a9c02dd5e3bfcbd9a9eca96dc60102a
|
[
"BSD-2-Clause"
] | null | null | null |
gan.py
|
AtlantixJJ/LBSGAN
|
e91d500d4a9c02dd5e3bfcbd9a9eca96dc60102a
|
[
"BSD-2-Clause"
] | null | null | null |
import argparse
import os
import sys
import time
import torch
import torch.nn.functional as F
import torchvision
import models, lib
cfg = lib.config.BaseConfig()
cfg.parse()
print('Preparing model')
gen_model = cfg.gen_function(
upsample=cfg.upsample,
map_size=cfg.map_size,
out_dim=cfg.out_dim)
disc_model = cfg.disc_function(
downsample=cfg.downsample,
in_dim=cfg.out_dim)
if cfg.num_gpu > 1:
gen_model = torch.nn.DataParallel(gen_model)
disc_model = torch.nn.DataParallel(disc_model)
gen_model.cuda()
disc_model.cuda()
print(gen_model)
print(disc_model)
print("=> Generator")
print(gen_model)
print("=> Discriminator")
print(disc_model)
if cfg.args.delayed_batch_size > -1:
trainer = lib.train.DelayLBSTrainer(gen_model=gen_model, disc_model=disc_model, dataloader=cfg.dl, cfg=cfg)
else:
trainer = lib.train.BaseGANTrainer(gen_model=gen_model, disc_model=disc_model, dataloader=cfg.dl, cfg=cfg)
trainer.train()
| 24.461538
| 111
| 0.765199
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 0.051363
|
65f47a4c6cbf9c3cbfef8996d91a66023d1ce4f0
| 1,475
|
py
|
Python
|
leetcode/minimumAreaRectangle.py
|
federicoemartinez/problem_solving
|
d0352f76bc21ed67d6851a159a00f70a892934b9
|
[
"MIT"
] | null | null | null |
leetcode/minimumAreaRectangle.py
|
federicoemartinez/problem_solving
|
d0352f76bc21ed67d6851a159a00f70a892934b9
|
[
"MIT"
] | null | null | null |
leetcode/minimumAreaRectangle.py
|
federicoemartinez/problem_solving
|
d0352f76bc21ed67d6851a159a00f70a892934b9
|
[
"MIT"
] | null | null | null |
# https://leetcode.com/problems/minimum-area-rectangle/description/
"""
Given a set of points in the xy-plane, determine the minimum area of a rectangle formed from these points, with sides parallel to the x and y axes.
If there isn't any rectangle, return 0.
Example 1:
Input: [[1,1],[1,3],[3,1],[3,3],[2,2]]
Output: 4
Example 2:
Input: [[1,1],[1,3],[3,1],[3,3],[4,1],[4,3]]
Output: 2
Note:
1 <= points.length <= 500
0 <= points[i][0] <= 40000
0 <= points[i][1] <= 40000
All points are distinct.
"""
from collections import defaultdict
class Solution:
def minAreaRect(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
same_x = defaultdict(lambda:set())
same_y = defaultdict(lambda:set())
for (x,y) in points:
same_x[x].add((x,y))
same_y[y].add((x,y))
best_area = None
for (x,y) in points:
for same_x_point in same_x[x]:
if same_x_point[1] < y:continue
for same_y_point in same_y[y]:
if same_y_point[0] < x: continue
if (same_y_point[0], same_x_point[1]) in same_y[same_x_point[1]]:
area = abs(same_x_point[1] - y) * abs(same_y_point[0] - x)
if area > 0 and (best_area is None or best_area > area): best_area = area
return 0 if best_area is None else best_area
| 27.314815
| 147
| 0.553898
| 887
| 0.601356
| 0
| 0
| 0
| 0
| 0
| 0
| 583
| 0.395254
|
65f96718aa17ce886b225fbdf113223d6df0b594
| 3,002
|
py
|
Python
|
code/google_sheet_writing.py
|
BastinFlorian/BoondManager-Auto-Holidays-Validation
|
28ae01d997132745018666952829771d5f8d99a3
|
[
"MIT"
] | null | null | null |
code/google_sheet_writing.py
|
BastinFlorian/BoondManager-Auto-Holidays-Validation
|
28ae01d997132745018666952829771d5f8d99a3
|
[
"MIT"
] | 18
|
2020-03-24T17:24:10.000Z
|
2022-03-12T00:29:56.000Z
|
code/google_sheet_writing.py
|
BastinFlorian/BoondManager-Auto-Holidays-Validation
|
28ae01d997132745018666952829771d5f8d99a3
|
[
"MIT"
] | null | null | null |
'''Functions writing the needed informations in the google drive spreadsheet
From CP, RTT and holidays request : create a worksheet per employee --
write_info_in_worksheet(info_paie, out_attente, out_valide, name, sh, problemes_date, problemes_type_conge)
'''
from google_sheet_access import *
# Write in worksheet at the specific cells
def write_info_in_worksheet(info_paie, out_attente, out_valide, name, sh, problemes_date, problemes_type_conge):
wks = acces_worksheet(sh, name)
cp_to_add = out_attente[name].loc["CP"].tolist()
rtt_to_add = out_attente[name].loc["RTT"].tolist()
indice_row_pb_date = 8
indice_col_pb_date = 9
indice_row_pb_conge = 9
indice_col_pb_conge = 9
if (name in problemes_date.keys()):
pb_date_name = problemes_date[name]
update_cell(wks, indice_row_pb_date, indice_col_pb_date, " ".join(pb_date_name))
if (name in problemes_type_conge.keys()):
pb_type_conge_name = problemes_type_conge[name]
update_cell(wks, indice_row_pb_conge, indice_col_pb_conge, " ".join(pb_type_conge_name))
indice_col_cp = 2
indice_col_rtt = 3
indice_row_cp_rtt = 13
indice_row_cp_rtt_validees = 12
for (x, y) in zip(cp_to_add, rtt_to_add):
indice_col_cp += 2
indice_col_rtt += 2
if (x + y == 0 or indice_col_rtt > 13):
continue
update_cell(wks, indice_row_cp_rtt, indice_col_cp, x)
update_cell(wks, indice_row_cp_rtt, indice_col_rtt, y)
if name in out_valide.keys():
cp_valide_to_add = out_valide[name].loc["CP"].tolist()
rtt_valide_to_add = out_valide[name].loc["RTT"].tolist()
indice_col_cp = 2
indice_col_rtt = 3
for (x, y) in zip(cp_valide_to_add, rtt_valide_to_add):
indice_col_cp += 2
indice_col_rtt += 2
if (x + y == 0):
continue
update_cell(wks, indice_row_cp_rtt_validees, indice_col_cp, x)
update_cell(wks, indice_row_cp_rtt_validees, indice_col_rtt, y)
df_name = info_paie[info_paie["NOM Prénom"] == name]
if (df_name.empty):
df_name = info_paie[info_paie["nom"].str.contains(name.upper().replace(" ", "|"), regex=True)]
if len(df_name) != 1:
df_name = info_paie[info_paie[info_paie["prenom"].apply(lambda x: x.lower()) \
.str.contains(name.lower().replace(" ", "|"), regex=True)]]
if len(df_name) != 1:
df_name = info_paie[info_paie["NOM Prénom"] == name]
if (df_name.empty):
print("ERROR : FICHE DE PAIE NOT FOUND FOR --- %s." % name)
else:
row_cp_n = 6
row_cp_n1 = 7
col_cp_n_n1 = 6
row_rtt = 6
col_rtt = 7
update_cell(wks, row_cp_n, col_cp_n_n1, float(df_name["Congé_N_solde"]))
update_cell(wks, row_cp_n1, col_cp_n_n1, float(df_name["Congé_N_1_solde"]))
update_cell(wks, row_rtt, col_rtt, float(df_name["RTT_solde"]))
print("%s done"%(name))
| 37.061728
| 112
| 0.651899
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 478
| 0.159015
|
65f9d6849276abc9d2abce58b864383e8eca894c
| 531
|
py
|
Python
|
madlib.py
|
Yukthi-C/python_learing
|
340579e2bb767e8fdb209f705fdf12058e8e150f
|
[
"MIT"
] | null | null | null |
madlib.py
|
Yukthi-C/python_learing
|
340579e2bb767e8fdb209f705fdf12058e8e150f
|
[
"MIT"
] | null | null | null |
madlib.py
|
Yukthi-C/python_learing
|
340579e2bb767e8fdb209f705fdf12058e8e150f
|
[
"MIT"
] | null | null | null |
ad1 = input(f"Adjective1: ")
ad2 = input(f"Adjective2: ")
part1 = input(f"body part: ")
dish = input(f"Dish: ")
madlib=f"One day, a {ad1} fox invited a stork for dinner. \
Stork was very {ad2} with the invitation – she reached the fox’s home on time and knocked at the door with her {part1}.\
The fox took her to the dinner table and served some {dish} in shallow bowls for both of them.\
As the bowl was too shallow for the stork, she couldn’t have soup at all. But, the fox licked up his soup quickly."
print(f"{madlib}")
| 59
| 121
| 0.706215
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 458
| 0.852886
|
65fb489a3669c5076b79a0d2bdaf7df0aec3faeb
| 3,114
|
py
|
Python
|
algofi/v1/send_keyreg_online_transaction.py
|
Algofiorg/algofi-py-sdk
|
6100a6726d36db4d4d3287064f0ad1d0b9a05e03
|
[
"MIT"
] | 38
|
2021-12-30T02:32:57.000Z
|
2022-03-23T22:09:16.000Z
|
algofi/v1/send_keyreg_online_transaction.py
|
Algofiorg/algofi-py-sdk
|
6100a6726d36db4d4d3287064f0ad1d0b9a05e03
|
[
"MIT"
] | 4
|
2021-11-03T00:14:46.000Z
|
2022-03-28T02:17:33.000Z
|
algofi/v1/send_keyreg_online_transaction.py
|
Algofiorg/algofi-py-sdk
|
6100a6726d36db4d4d3287064f0ad1d0b9a05e03
|
[
"MIT"
] | 8
|
2021-12-15T05:29:55.000Z
|
2022-02-08T03:45:11.000Z
|
from algosdk.future.transaction import ApplicationNoOpTxn
from .prepend import get_init_txns
from ..utils import Transactions, TransactionGroup, int_to_bytes
from ..contract_strings import algofi_manager_strings as manager_strings
def prepare_send_keyreg_online_transactions(sender, suggested_params, storage_account, vote_pk, selection_pk, state_proof_pk, vote_first, vote_last, vote_key_dilution,
manager_app_id, supported_market_app_ids, supported_oracle_app_ids):
"""Returns a :class:`TransactionGroup` object representing a send keyreg
transaction group transaction against the algofi protocol. The sender instructs
the algo vault to register itself online to participate in Algorand's consensus.
NOTE: The storage account address must be registered with a participation node
in order for the account to participate in consensus. It is unsafe to register
an account online without registering it with a participation node. See
https://developer.algorand.org/docs/run-a-node/participate/generate_keys
:param sender: account address for the sender
:type sender: string
:param suggested_params: suggested transaction params
:type suggested_params: :class:`algosdk.future.transaction.SuggestedParams` object
:param storage_account: storage account address for sender
:type storage_account: string
:param vote_pk: vote key
:type vote_pk: bytes
:param selection_pk: selection key
:type selection_pk: bytes
:param state_proof_pk: state proof key
:type state_proof_pk: bytes
:param vote_first: first round to vote in consensus
:type vote_first: int
:param vote_last: last round to vote in consensus
:type vote_last: int
:param vote_key_dilution: vote key dilution
:type vote_key_dilution: int
:param manager_app_id: id of the manager application
:type manager_app_id: int
:param supported_market_app_ids: list of supported market application ids
:type supported_market_app_ids: list
:param supported_oracle_app_ids: list of supported oracle application ids
:type supported_oracle_app_ids: list
:return: :class:`TransactionGroup` object representing a claim rewards transaction
:rtype: :class:`TransactionGroup`
"""
prefix_transactions = get_init_txns(
transaction_type=Transactions.SEND_KEYREG_ONLINE_TXN,
sender=sender,
suggested_params=suggested_params,
manager_app_id=manager_app_id,
supported_market_app_ids=supported_market_app_ids,
supported_oracle_app_ids=supported_oracle_app_ids,
storage_account=storage_account
)
txn0 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=manager_app_id,
app_args=[manager_strings.send_keyreg_txn.encode(), vote_pk, selection_pk, state_proof_pk,
int_to_bytes(vote_first), int_to_bytes(vote_last), int_to_bytes(vote_key_dilution)],
accounts=[storage_account],
)
txn_group = TransactionGroup(prefix_transactions + [txn0])
return txn_group
| 48.65625
| 168
| 0.756583
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,777
| 0.570649
|
65fdd0400541291beac65b8a408eaf8121f2b56b
| 402
|
py
|
Python
|
server/resources/platform.py
|
simon-dube/CARMIN-server
|
1481d2c4231458d33119c57ab2e3e480375da63b
|
[
"MIT"
] | 1
|
2018-03-12T23:08:12.000Z
|
2018-03-12T23:08:12.000Z
|
server/resources/platform.py
|
simon-dube/CARMIN-server
|
1481d2c4231458d33119c57ab2e3e480375da63b
|
[
"MIT"
] | 15
|
2018-03-15T04:23:31.000Z
|
2018-06-28T21:46:15.000Z
|
server/resources/platform.py
|
simon-dube/CARMIN-server
|
1481d2c4231458d33119c57ab2e3e480375da63b
|
[
"MIT"
] | null | null | null |
from flask_restful import Resource
from server.platform_properties import PLATFORM_PROPERTIES
from server.resources.models.platform_properties import PlatformPropertiesSchema
from server.resources.decorators import marshal_response
class Platform(Resource):
@marshal_response(PlatformPropertiesSchema())
def get(self):
return PlatformPropertiesSchema().load(PLATFORM_PROPERTIES).data
| 36.545455
| 80
| 0.840796
| 167
| 0.415423
| 0
| 0
| 137
| 0.340796
| 0
| 0
| 0
| 0
|
65febfc830676365453c5d43b397d3e86ac87c5f
| 471
|
py
|
Python
|
invenio_flow/decorators.py
|
egabancho/invenio-flow
|
583e55d17ab6aabd20bc4a46d098f034c0d0f693
|
[
"MIT"
] | null | null | null |
invenio_flow/decorators.py
|
egabancho/invenio-flow
|
583e55d17ab6aabd20bc4a46d098f034c0d0f693
|
[
"MIT"
] | null | null | null |
invenio_flow/decorators.py
|
egabancho/invenio-flow
|
583e55d17ab6aabd20bc4a46d098f034c0d0f693
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 Esteban J. G. Gabancho.
#
# Invenio-Flow is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Useful decorators."""
from celery import shared_task
from .api import Task
def task(*args, **kwargs):
"""Wrapper around shared task to set default base class."""
kwargs.setdefault('base', Task)
return shared_task(*args, **kwargs)
| 23.55
| 73
| 0.694268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 303
| 0.643312
|
65ff6cff89c7853c15b51290646017146b4909fa
| 2,460
|
py
|
Python
|
backend/offchain/types/fund_pull_pre_approval_types.py
|
tanshuai/reference-wallet
|
e8efec4acc6af6e319cf075c10693ddf7754cc83
|
[
"Apache-2.0"
] | 14
|
2020-12-17T08:03:51.000Z
|
2022-03-26T04:21:18.000Z
|
backend/offchain/types/fund_pull_pre_approval_types.py
|
tanshuai/reference-wallet
|
e8efec4acc6af6e319cf075c10693ddf7754cc83
|
[
"Apache-2.0"
] | 20
|
2020-12-15T12:02:56.000Z
|
2021-05-19T23:37:34.000Z
|
backend/offchain/types/fund_pull_pre_approval_types.py
|
tanshuai/reference-wallet
|
e8efec4acc6af6e319cf075c10693ddf7754cc83
|
[
"Apache-2.0"
] | 12
|
2020-12-10T16:35:27.000Z
|
2022-02-01T04:06:10.000Z
|
import typing
from dataclasses import dataclass, field as datafield
from .command_types import CommandType
class FundPullPreApprovalStatus:
# Pending user/VASP approval
pending = "pending"
# Approved by the user/VASP and ready for use
valid = "valid"
# User/VASP did not approve the pre-approval request
rejected = "rejected"
# Approval has been closed by the user/VASP and can no longer be used
closed = "closed"
class TimeUnit:
day = "day"
week = "week"
month = "month"
year = "year"
@dataclass(frozen=True)
class CurrencyObject:
amount: int
currency: str
@dataclass(frozen=True)
class ScopedCumulativeAmountObject:
unit: str = datafield(
metadata={
"valid-values": [TimeUnit.day, TimeUnit.week, TimeUnit.month, TimeUnit.year]
}
)
value: int
max_amount: CurrencyObject
class FundPullPreApprovalType:
save_sub_account = "save_sub_account"
consent = "consent"
@dataclass(frozen=True)
class FundPullPreApprovalScopeObject:
type: str = datafield(
metadata={
"valid-values": [
FundPullPreApprovalType.save_sub_account,
FundPullPreApprovalType.consent,
]
}
)
expiration_timestamp: int
max_cumulative_amount: typing.Optional[ScopedCumulativeAmountObject] = datafield(
default=None
)
max_transaction_amount: typing.Optional[CurrencyObject] = datafield(default=None)
@dataclass(frozen=True)
class FundPullPreApprovalObject:
funds_pull_pre_approval_id: str = datafield(metadata={"write_once": True})
address: str = datafield(metadata={"write_once": True})
biller_address: str = datafield(metadata={"write_once": True})
scope: FundPullPreApprovalScopeObject
status: str = datafield(
metadata={
"valid-values": [
FundPullPreApprovalStatus.pending,
FundPullPreApprovalStatus.valid,
FundPullPreApprovalStatus.rejected,
FundPullPreApprovalStatus.closed,
]
}
)
description: typing.Optional[str] = datafield(default=None)
@dataclass(frozen=True)
class FundPullPreApprovalCommandObject:
fund_pull_pre_approval: FundPullPreApprovalObject
_ObjectType: str = datafield(
default=CommandType.FundPullPreApprovalCommand,
metadata={"valid-values": [CommandType.FundPullPreApprovalCommand]},
)
| 27.032967
| 88
| 0.681301
| 2,207
| 0.897154
| 0
| 0
| 1,806
| 0.734146
| 0
| 0
| 371
| 0.150813
|
65ffb62169d811cc14af150c5eafa69ec8772792
| 19,924
|
py
|
Python
|
data/battle_animation_scripts.py
|
kielbasiago/WorldsCollide
|
5aa7cffdecd14754c9eaa83cd0ad4d0282cc2cc2
|
[
"MIT"
] | 7
|
2022-01-15T02:53:53.000Z
|
2022-02-17T00:51:32.000Z
|
data/battle_animation_scripts.py
|
asilverthorn/WorldsCollide
|
5aa7cffdecd14754c9eaa83cd0ad4d0282cc2cc2
|
[
"MIT"
] | 8
|
2022-01-16T02:45:24.000Z
|
2022-03-21T02:08:27.000Z
|
data/battle_animation_scripts.py
|
asilverthorn/WorldsCollide
|
5aa7cffdecd14754c9eaa83cd0ad4d0282cc2cc2
|
[
"MIT"
] | 5
|
2022-01-15T02:53:38.000Z
|
2022-01-19T17:42:10.000Z
|
# List of addresses within the Battle Animation Scripts for the following commands which cause screen flashes:
# B0 - Set background palette color addition (absolute)
# B5 - Add color to background palette (relative)
# AF - Set background palette color subtraction (absolute)
# B6 - Subtract color from background palette (relative)
# By changing address + 1 to E0 (for absolute) or F0 (for relative), it causes no change to the background color (that is, no flash)
BATTLE_ANIMATION_FLASHES = {
"Goner": [
0x100088, # AF E0 - set background color subtraction to 0 (black)
0x10008C, # B6 61 - increase background color subtraction by 1 (red)
0x100092, # B6 31 - decrease background color subtraction by 1 (yellow)
0x100098, # B6 81 - increase background color subtraction by 1 (cyan)
0x1000A1, # B6 91 - decrease background color subtraction by 1 (cyan)
0x1000A3, # B6 21 - increase background color subtraction by 1 (yellow)
0x1000D3, # B6 8F - increase background color subtraction by 15 (cyan)
0x1000DF, # B0 FF - set background color addition to 31 (white)
0x100172, # B5 F2 - decrease background color addition by 2 (white)
],
"Final KEFKA Death": [
0x10023A, # B0 FF - set background color addition to 31 (white)
0x100240, # B5 F4 - decrease background color addition by 4 (white)
0x100248, # B0 FF - set background color addition to 31 (white)
0x10024E, # B5 F4 - decrease background color addition by 4 (white)
],
"Atom Edge": [ # Also True Edge
0x1003D0, # AF E0 - set background color subtraction to 0 (black)
0x1003DD, # B6 E1 - increase background color subtraction by 1 (black)
0x1003E6, # B6 E1 - increase background color subtraction by 1 (black)
0x10044B, # B6 F1 - decrease background color subtraction by 1 (black)
0x100457, # B6 F1 - decrease background color subtraction by 1 (black)
],
"Boss Death": [
0x100476, # B0 FF - set background color addition to 31 (white)
0x10047C, # B5 F4 - decrease background color addition by 4 (white)
0x100484, # B0 FF - set background color addition to 31 (white)
0x100497, # B5 F4 - decrease background color addition by 4 (white)
],
"Transform into Magicite": [
0x100F30, # B0 FF - set background color addition to 31 (white)
0x100F3F, # B5 F2 - decrease background color addition by 2 (white)
0x100F4E, # B5 F2 - decrease background color addition by 2 (white)
],
"Purifier": [
0x101340, # AF E0 - set background color subtraction to 0 (black)
0x101348, # B6 62 - increase background color subtraction by 2 (red)
0x101380, # B6 81 - increase background color subtraction by 1 (cyan)
0x10138A, # B6 F1 - decrease background color subtraction by 1 (black)
],
"Wall": [
0x10177B, # AF E0 - set background color subtraction to 0 (black)
0x10177F, # B6 61 - increase background color subtraction by 1 (red)
0x101788, # B6 51 - decrease background color subtraction by 1 (magenta)
0x101791, # B6 81 - increase background color subtraction by 1 (cyan)
0x10179A, # B6 31 - decrease background color subtraction by 1 (yellow)
0x1017A3, # B6 41 - increase background color subtraction by 1 (magenta)
0x1017AC, # B6 91 - decrease background color subtraction by 1 (cyan)
0x1017B5, # B6 51 - decrease background color subtraction by 1 (magenta)
],
"Pearl": [
0x10190E, # B0 E0 - set background color addition to 0 (white)
0x101913, # B5 E2 - increase background color addition by 2 (white)
0x10191E, # B5 F1 - decrease background color addition by 1 (white)
0x10193E, # B6 C2 - increase background color subtraction by 2 (blue)
],
"Ice 3": [
0x101978, # B0 FF - set background color addition to 31 (white)
0x10197B, # B5 F4 - decrease background color addition by 4 (white)
0x10197E, # B5 F4 - decrease background color addition by 4 (white)
0x101981, # B5 F4 - decrease background color addition by 4 (white)
0x101984, # B5 F4 - decrease background color addition by 4 (white)
0x101987, # B5 F4 - decrease background color addition by 4 (white)
0x10198A, # B5 F4 - decrease background color addition by 4 (white)
0x10198D, # B5 F4 - decrease background color addition by 4 (white)
0x101990, # B5 F4 - decrease background color addition by 4 (white)
],
"Fire 3": [
0x1019FA, # B0 9F - set background color addition to 31 (red)
0x101A1C, # B5 94 - decrease background color addition by 4 (red)
],
"Sleep": [
0x101A23, # AF E0 - set background color subtraction to 0 (black)
0x101A29, # B6 E1 - increase background color subtraction by 1 (black)
0x101A33, # B6 F1 - decrease background color subtraction by 1 (black)
],
"7-Flush": [
0x101B43, # AF E0 - set background color subtraction to 0 (black)
0x101B47, # B6 61 - increase background color subtraction by 1 (red)
0x101B4D, # B6 51 - decrease background color subtraction by 1 (magenta)
0x101B53, # B6 81 - increase background color subtraction by 1 (cyan)
0x101B59, # B6 31 - decrease background color subtraction by 1 (yellow)
0x101B5F, # B6 41 - increase background color subtraction by 1 (magenta)
0x101B65, # B6 91 - decrease background color subtraction by 1 (cyan)
0x101B6B, # B6 51 - decrease background color subtraction by 1 (magenta)
],
"H-Bomb": [
0x101BC5, # B0 E0 - set background color addition to 0 (white)
0x101BC9, # B5 E1 - increase background color addition by 1 (white)
0x101C13, # B5 F1 - decrease background color addition by 1 (white)
],
"Revenger": [
0x101C62, # AF E0 - set background color subtraction to 0 (black)
0x101C66, # B6 81 - increase background color subtraction by 1 (cyan)
0x101C6C, # B6 41 - increase background color subtraction by 1 (magenta)
0x101C72, # B6 91 - decrease background color subtraction by 1 (cyan)
0x101C78, # B6 21 - increase background color subtraction by 1 (yellow)
0x101C7E, # B6 51 - decrease background color subtraction by 1 (magenta)
0x101C84, # B6 81 - increase background color subtraction by 1 (cyan)
0x101C86, # B6 31 - decrease background color subtraction by 1 (yellow)
0x101C8C, # B6 91 - decrease background color subtraction by 1 (cyan)
],
"Phantasm": [
0x101DFD, # AF E0 - set background color subtraction to 0 (black)
0x101E03, # B6 E1 - increase background color subtraction by 1 (black)
0x101E07, # B0 FF - set background color addition to 31 (white)
0x101E0D, # B5 F4 - decrease background color addition by 4 (white)
0x101E15, # B6 E2 - increase background color subtraction by 2 (black)
0x101E1F, # B0 FF - set background color addition to 31 (white)
0x101E27, # B5 F4 - decrease background color addition by 4 (white)
0x101E2F, # B6 E2 - increase background color subtraction by 2 (black)
0x101E3B, # B6 F1 - decrease background color subtraction by 1 (black)
],
"TigerBreak": [
0x10240D, # B0 FF - set background color addition to 31 (white)
0x102411, # B5 F2 - decrease background color addition by 2 (white)
0x102416, # B5 F2 - decrease background color addition by 2 (white)
],
"Metamorph": [
0x102595, # AF E0 - set background color subtraction to 0 (black)
0x102599, # B6 61 - increase background color subtraction by 1 (red)
0x1025AF, # B6 71 - decrease background color subtraction by 1 (red)
],
"Cat Rain": [
0x102677, # B0 FF - set background color addition to 31 (white)
0x10267B, # B5 F1 - decrease background color addition by 1 (white)
],
"Charm": [
0x1026EE, # B0 FF - set background color addition to 31 (white)
0x1026FB, # B5 F1 - decrease background color addition by 1 (white)
],
"Mirager": [
0x102791, # B0 FF - set background color addition to 31 (white)
0x102795, # B5 F2 - decrease background color addition by 2 (white)
],
"SabreSoul": [
0x1027D3, # B0 FF - set background color addition to 31 (white)
0x1027DA, # B5 F2 - decrease background color addition by 2 (white)
],
"Back Blade": [
0x1028D3, # AF FF - set background color subtraction to 31 (black)
0x1028DF, # B6 F4 - decrease background color subtraction by 4 (black)
],
"RoyalShock": [
0x102967, # B0 FF - set background color addition to 31 (white)
0x10296B, # B5 F2 - decrease background color addition by 2 (white)
0x102973, # B5 F2 - decrease background color addition by 2 (white)
],
"Overcast": [
0x102C3A, # AF E0 - set background color subtraction to 0 (black)
0x102C55, # B6 E1 - increase background color subtraction by 1 (black)
0x102C8D, # B6 F1 - decrease background color subtraction by 1 (black)
0x102C91, # B6 F1 - decrease background color subtraction by 1 (black)
],
"Disaster": [
0x102CEE, # AF E0 - set background color subtraction to 0 (black)
0x102CF2, # B6 E1 - increase background color subtraction by 1 (black)
0x102D19, # B6 F1 - decrease background color subtraction by 1 (black)
],
"ForceField": [
0x102D3A, # B0 E0 - set background color addition to 0 (white)
0x102D48, # B5 E1 - increase background color addition by 1 (white)
0x102D64, # B5 F1 - decrease background color addition by 1 (white)
],
"Terra/Tritoch Lightning": [
0x102E05, # B0 E0 - set background color addition to 0 (white)
0x102E09, # B5 81 - increase background color addition by 1 (red)
0x102E24, # B5 61 - increase background color addition by 1 (cyan)
],
"S. Cross": [
0x102EDA, # AF E0 - set background color subtraction to 0 (black)
0x102EDE, # B6 E2 - increase background color subtraction by 2 (black)
0x102FA8, # B6 F2 - decrease background color subtraction by 2 (black)
0x102FB1, # B0 E0 - set background color addition to 0 (white)
0x102FBE, # B5 E2 - increase background color addition by 2 (white)
0x102FD9, # B5 F2 - decrease background color addition by 2 (white)
],
"Mind Blast": [
0x102FED, # B0 E0 - set background color addition to 0 (white)
0x102FF1, # B5 81 - increase background color addition by 1 (red)
0x102FF7, # B5 91 - decrease background color addition by 1 (red)
0x102FF9, # B5 21 - increase background color addition by 1 (blue)
0x102FFF, # B5 31 - decrease background color addition by 1 (blue)
0x103001, # B5 C1 - increase background color addition by 1 (yellow)
0x103007, # B5 91 - decrease background color addition by 1 (red)
0x10300D, # B5 51 - decrease background color addition by 1 (green)
0x103015, # B5 E2 - increase background color addition by 2 (white)
0x10301F, # B5 F1 - decrease background color addition by 1 (white)
],
"Flare Star": [
0x1030F5, # B0 E0 - set background color addition to 0 (white)
0x103106, # B5 81 - increase background color addition by 1 (red)
0x10310D, # B5 E2 - increase background color addition by 2 (white)
0x103123, # B5 71 - decrease background color addition by 1 (cyan)
0x10312E, # B5 91 - decrease background color addition by 1 (red)
],
"Quasar": [
0x1031D2, # AF E0 - set background color subtraction to 0 (black)
0x1031D6, # B6 E1 - increase background color subtraction by 1 (black)
0x1031FA, # B6 F1 - decrease background color subtraction by 1 (black)
],
"R.Polarity": [
0x10328B, # B0 FF - set background color addition to 31 (white)
0x103292, # B5 F1 - decrease background color addition by 1 (white)
],
"Rippler": [
0x1033C6, # B0 FF - set background color addition to 31 (white)
0x1033CA, # B5 F1 - decrease background color addition by 1 (white)
],
"Step Mine": [
0x1034D9, # B0 FF - set background color addition to 31 (white)
0x1034E0, # B5 F4 - decrease background color addition by 4 (white)
],
"L.5 Doom": [
0x1035E6, # B0 FF - set background color addition to 31 (white)
0x1035F6, # B5 F4 - decrease background color addition by 4 (white)
],
"Megazerk": [
0x103757, # B0 80 - set background color addition to 0 (red)
0x103761, # B5 82 - increase background color addition by 2 (red)
0x10378F, # B5 92 - decrease background color addition by 2 (red)
0x103795, # B5 92 - decrease background color addition by 2 (red)
0x10379B, # B5 92 - decrease background color addition by 2 (red)
0x1037A1, # B5 92 - decrease background color addition by 2 (red)
0x1037A7, # B5 92 - decrease background color addition by 2 (red)
0x1037AD, # B5 92 - decrease background color addition by 2 (red)
0x1037B3, # B5 92 - decrease background color addition by 2 (red)
0x1037B9, # B5 92 - decrease background color addition by 2 (red)
0x1037C0, # B5 92 - decrease background color addition by 2 (red)
],
"Schiller": [
0x103819, # B0 FF - set background color addition to 31 (white)
0x10381D, # B5 F4 - decrease background color addition by 4 (white)
],
"WallChange": [
0x10399E, # B0 FF - set background color addition to 31 (white)
0x1039A3, # B5 F2 - decrease background color addition by 2 (white)
0x1039A9, # B5 F2 - decrease background color addition by 2 (white)
0x1039AF, # B5 F2 - decrease background color addition by 2 (white)
0x1039B5, # B5 F2 - decrease background color addition by 2 (white)
0x1039BB, # B5 F2 - decrease background color addition by 2 (white)
0x1039C1, # B5 F2 - decrease background color addition by 2 (white)
0x1039C7, # B5 F2 - decrease background color addition by 2 (white)
0x1039CD, # B5 F2 - decrease background color addition by 2 (white)
0x1039D4, # B5 F2 - decrease background color addition by 2 (white)
],
"Ultima": [
0x1056CB, # AF 60 - set background color subtraction to 0 (red)
0x1056CF, # B6 C2 - increase background color subtraction by 2 (blue)
0x1056ED, # B0 FF - set background color addition to 31 (white)
0x1056F5, # B5 F1 - decrease background color addition by 1 (white)
],
"Bolt 3": [ # Also Giga Volt
0x10588E, # B0 FF - set background color addition to 31 (white)
0x105893, # B5 F4 - decrease background color addition by 4 (white)
0x105896, # B5 F4 - decrease background color addition by 4 (white)
0x105899, # B5 F4 - decrease background color addition by 4 (white)
0x10589C, # B5 F4 - decrease background color addition by 4 (white)
0x1058A1, # B5 F4 - decrease background color addition by 4 (white)
0x1058A6, # B5 F4 - decrease background color addition by 4 (white)
0x1058AB, # B5 F4 - decrease background color addition by 4 (white)
0x1058B0, # B5 F4 - decrease background color addition by 4 (white)
],
"X-Zone": [
0x105A5D, # B0 FF - set background color addition to 31 (white)
0x105A6A, # B5 F2 - decrease background color addition by 2 (white)
0x105A79, # B5 F2 - decrease background color addition by 2 (white)
],
"Dispel": [
0x105DC2, # B0 FF - set background color addition to 31 (white)
0x105DC9, # B5 F1 - decrease background color addition by 1 (white)
0x105DD2, # B5 F1 - decrease background color addition by 1 (white)
0x105DDB, # B5 F1 - decrease background color addition by 1 (white)
0x105DE4, # B5 F1 - decrease background color addition by 1 (white)
0x105DED, # B5 F1 - decrease background color addition by 1 (white)
],
"Muddle": [ # Also L.3 Muddle, Confusion
0x1060EA, # B0 FF - set background color addition to 31 (white)
0x1060EE, # B5 F1 - decrease background color addition by 1 (white)
],
"Shock": [
0x1068BE, # B0 FF - set background color addition to 31 (white)
0x1068D0, # B5 F1 - decrease background color addition by 1 (white)
],
"Bum Rush": [
0x106C3E, # B0 E0 - set background color addition to 0 (white)
0x106C47, # B0 E0 - set background color addition to 0 (white)
0x106C53, # B0 E0 - set background color addition to 0 (white)
0x106C7E, # B0 FF - set background color addition to 31 (white)
0x106C87, # B0 E0 - set background color addition to 0 (white)
0x106C95, # B0 FF - set background color addition to 31 (white)
0x106C9E, # B0 E0 - set background color addition to 0 (white)
],
"Stunner": [
0x1071BA, # B0 20 - set background color addition to 0 (blue)
0x1071C1, # B5 24 - increase background color addition by 4 (blue)
0x1071CA, # B5 24 - increase background color addition by 4 (blue)
0x1071D5, # B5 24 - increase background color addition by 4 (blue)
0x1071DE, # B5 24 - increase background color addition by 4 (blue)
0x1071E9, # B5 24 - increase background color addition by 4 (blue)
0x1071F2, # B5 24 - increase background color addition by 4 (blue)
0x1071FD, # B5 24 - increase background color addition by 4 (blue)
0x107206, # B5 24 - increase background color addition by 4 (blue)
0x107211, # B5 24 - increase background color addition by 4 (blue)
0x10721A, # B5 24 - increase background color addition by 4 (blue)
0x10725A, # B5 32 - decrease background color addition by 2 (blue)
],
"Quadra Slam": [ # Also Quadra Slice
0x1073DC, # B0 FF - set background color addition to 31 (white)
0x1073EE, # B5 F2 - decrease background color addition by 2 (white)
0x1073F3, # B5 F2 - decrease background color addition by 2 (white)
0x107402, # B0 5F - set background color addition to 31 (green)
0x107424, # B5 54 - decrease background color addition by 4 (green)
0x107429, # B5 54 - decrease background color addition by 4 (green)
0x107436, # B0 3F - set background color addition to 31 (blue)
0x107458, # B5 34 - decrease background color addition by 4 (blue)
0x10745D, # B5 34 - decrease background color addition by 4 (blue)
0x107490, # B0 9F - set background color addition to 31 (red)
0x1074B2, # B5 94 - decrease background color addition by 4 (red)
0x1074B7, # B5 94 - decrease background color addition by 4 (red)
],
"Slash": [
0x1074F4, # B0 FF - set background color addition to 31 (white)
0x1074FD, # B5 F2 - decrease background color addition by 2 (white)
0x107507, # B5 F2 - decrease background color addition by 2 (white)
],
"Flash": [
0x107850, # B0 FF - set background color addition to 31 (white)
0x10785C, # B5 F1 - decrease background color addition by 1 (white)
]
}
| 58.428152
| 133
| 0.630546
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 14,428
| 0.724152
|
65ffed323033ff0ac5225d3d784dead8adf418b4
| 2,643
|
py
|
Python
|
Jumpscale/tools/capacity/reality_parser.py
|
threefoldtech/JumpscaleX
|
5fb073a82aeb0e66fc7d9660c45a1e31bc094bfa
|
[
"Apache-2.0"
] | 2
|
2019-05-09T07:21:25.000Z
|
2019-08-05T06:37:53.000Z
|
Jumpscale/tools/capacity/reality_parser.py
|
threefoldtech/JumpscaleX
|
5fb073a82aeb0e66fc7d9660c45a1e31bc094bfa
|
[
"Apache-2.0"
] | 664
|
2018-12-19T12:43:44.000Z
|
2019-08-23T04:24:42.000Z
|
Jumpscale/tools/capacity/reality_parser.py
|
threefoldtech/jumpscale10
|
5fb073a82aeb0e66fc7d9660c45a1e31bc094bfa
|
[
"Apache-2.0"
] | 7
|
2019-05-03T07:14:37.000Z
|
2019-08-05T12:36:52.000Z
|
"""
this module contain the logic of parsing the actual usage of the ressource unit of a zero-os node
"""
from .units import GiB
from sal_zos.disks.Disks import StorageType
class RealityParser:
def __init__(self):
self._ressources = {"mru": 0.0, "cru": 0.0, "hru": 0.0, "sru": 0.0}
def get_report(self, disks, storage_pools, total_cpu_nr, used_cpu, used_memory):
self._ressources["mru"] = _parse_memory(used_memory)
self._ressources["cru"] = _parse_cpu(total_cpu_nr, used_cpu)
storage = _parse_storage(disks, storage_pools)
self._ressources["sru"] = storage["sru"]
self._ressources["hru"] = storage["hru"]
return Report(**self._ressources)
class Report:
def __init__(self, cru, mru, hru, sru):
self._cru = round(cru, 2)
self._mru = round(mru, 2)
self._hru = round(hru, 2)
self._sru = round(sru, 2)
@property
def CRU(self):
return self._cru
@property
def MRU(self):
return self._mru
@property
def SRU(self):
return self._sru
@property
def HRU(self):
return self._hru
def __repr__(self):
return str(dict(cru=self.CRU, mru=self.MRU, hru=self.HRU, sru=self.SRU))
__str__ = __repr__
def _parse_storage(disks, storage_pools):
disk_mapping = {}
for disk in disks:
for part in disk.partitions:
if part.devicename not in disk_mapping:
disk_mapping[part.devicename] = disk.type
ressoures = {"sru": 0, "hru": 0}
for sp in storage_pools:
if len(sp.devices) <= 0:
continue
if sp.mountpoint == "/mnt/storagepools/sp_zos-cache":
continue
disk_type = disk_mapping[sp.devices[0]]
size = sp.fsinfo["data"]["used"]
if disk_type in [StorageType.HDD, StorageType.ARCHIVE]:
ressoures["hru"] += size / GiB
elif disk_type in [StorageType.SSD, StorageType.NVME]:
ressoures["sru"] += size / GiB
else:
raise ValueError("disk type %s is not valid" % disk.type.name)
return ressoures
def _parse_cpu(total_cpu_nr, used_cpu):
# self._node.client.aggregator.query("machine.CPU.percent")
cpu_percentages = [value["current"]["3600"]["avg"] for value in used_cpu.values()]
return (total_cpu_nr * sum(cpu_percentages)) / 100
def _parse_memory(used_memory):
"""
convert the used memory in bytes to ressource units
:param used_memory: amount of used memory in bytes
:type used_memory: float
:return: number of MRU
:rtype: float
"""
return used_memory / GiB
| 26.69697
| 97
| 0.626182
| 1,091
| 0.412788
| 0
| 0
| 212
| 0.080212
| 0
| 0
| 523
| 0.197881
|
5a01dafbd8cdef4d174904ccd475a2627ada858d
| 3,314
|
py
|
Python
|
fsem/similarity_measures/jaro.py
|
sajith-rahim/fs-em
|
2e8dde8b5f36ee1e1dfc5407611ec2fb91630c2a
|
[
"BSD-3-Clause"
] | null | null | null |
fsem/similarity_measures/jaro.py
|
sajith-rahim/fs-em
|
2e8dde8b5f36ee1e1dfc5407611ec2fb91630c2a
|
[
"BSD-3-Clause"
] | null | null | null |
fsem/similarity_measures/jaro.py
|
sajith-rahim/fs-em
|
2e8dde8b5f36ee1e1dfc5407611ec2fb91630c2a
|
[
"BSD-3-Clause"
] | null | null | null |
import math
__all__ = ['get_jaro_distance']
__author__ = 'Jean-Bernard Ratte - jean.bernard.ratte@unary.ca'
""" Find the Jaro Winkler Distance which indicates the similarity score between two Strings.
The Jaro measure is the weighted sum of percentage of matched characters from each file and transposed characters.
Winkler increased this measure for matching initial characters.
This implementation is based on the Jaro Winkler similarity algorithm from
http://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance
This Python implementation is based on the Apache StringUtils implementation from
http://commons.apache.org/proper/commons-lang/apidocs/src-html/org/apache/commons/lang3/StringUtils.html#line.7141
"""
def get_jaro_distance(first, second, winkler=True, winkler_ajustment=True, scaling=0.1):
"""
:param first: word to calculate distance for
:param second: word to calculate distance with
:param winkler: same as winkler_ajustment
:param winkler_ajustment: add an adjustment factor to the Jaro of the distance
:param scaling: scaling factor for the Winkler adjustment
:return: Jaro distance adjusted (or not)
"""
if not first or not second:
raise JaroDistanceException("Cannot calculate distance from NoneType ({0}, {1})".format(
first.__class__.__name__,
second.__class__.__name__))
jaro = _score(first, second)
cl = min(len(_get_prefix(first, second)), 4)
if all([winkler, winkler_ajustment]): # 0.1 as scaling factor
return round((jaro + (scaling * cl * (1.0 - jaro))) * 100.0) / 100.0
return jaro
def _score(first, second):
shorter, longer = first.lower(), second.lower()
if len(first) > len(second):
longer, shorter = shorter, longer
m1 = _get_matching_characters(shorter, longer)
m2 = _get_matching_characters(longer, shorter)
if len(m1) == 0 or len(m2) == 0:
return 0.0
return (float(len(m1)) / len(shorter) +
float(len(m2)) / len(longer) +
float(len(m1) - _transpositions(m1, m2)) / len(m1)) / 3.0
def _get_diff_index(first, second):
if first == second:
return -1
if not first or not second:
return 0
max_len = min(len(first), len(second))
for i in range(0, max_len):
if not first[i] == second[i]:
return i
return max_len
def _get_prefix(first, second):
if not first or not second:
return ""
index = _get_diff_index(first, second)
if index == -1:
return first
elif index == 0:
return ""
else:
return first[0:index]
def _get_matching_characters(first, second):
common = []
limit = math.floor(min(len(first), len(second)) / 2)
for i, l in enumerate(first):
left, right = int(max(0, i - limit)), int(min(i + limit + 1, len(second)))
if l in second[left:right]:
common.append(l)
second = second[0:second.index(l)] + '*' + second[second.index(l) + 1:]
return ''.join(common)
def _transpositions(first, second):
return math.floor(len([(f, s) for f, s in zip(first, second) if not f == s]) / 2.0)
class JaroDistanceException(Exception):
def __init__(self, message):
super(Exception, self).__init__(message)
| 31.561905
| 118
| 0.658117
| 125
| 0.037719
| 0
| 0
| 0
| 0
| 0
| 0
| 1,130
| 0.340978
|
5a0258dc0630fde008fae59e8ca2f2322000aca2
| 732
|
py
|
Python
|
UnitTests/FullAtomModel/PDB2Coords/test.py
|
dendisuhubdy/TorchProteinLibrary
|
89f0f6c311658b9313484cd92804682a251b1b97
|
[
"MIT"
] | null | null | null |
UnitTests/FullAtomModel/PDB2Coords/test.py
|
dendisuhubdy/TorchProteinLibrary
|
89f0f6c311658b9313484cd92804682a251b1b97
|
[
"MIT"
] | null | null | null |
UnitTests/FullAtomModel/PDB2Coords/test.py
|
dendisuhubdy/TorchProteinLibrary
|
89f0f6c311658b9313484cd92804682a251b1b97
|
[
"MIT"
] | null | null | null |
import sys
import os
import matplotlib.pylab as plt
import numpy as np
import mpl_toolkits.mplot3d.axes3d as p3
import seaborn as sea
import torch
from TorchProteinLibrary import FullAtomModel
if __name__=='__main__':
# p2c = FullAtomModel.PDB2Coords.PDB2CoordsBiopython()
p2c = FullAtomModel.PDB2CoordsUnordered()
coords, res, anames, num_atoms = p2c(["f4TQ1_B.pdb"])
print (coords.size())
print (res.size())
print (anames.size())
print (num_atoms)
coords = coords.numpy()
coords = coords.reshape(int(coords.shape[1]/3), 3)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = coords[:,0]
y = coords[:,1]
z = coords[:,2]
ax.scatter(x,y,z)
plt.show()
| 24.4
| 58
| 0.674863
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 81
| 0.110656
|
5a025cdbfc11bf834d39b1a16efe1582cdd5e329
| 4,306
|
py
|
Python
|
vae/scripts/gm_vae_fc_toy.py
|
ondrejba/vae
|
23f179637ca45c20d4e5f74e8c56b62f57554ef4
|
[
"MIT"
] | 1
|
2019-11-23T20:51:58.000Z
|
2019-11-23T20:51:58.000Z
|
vae/scripts/gm_vae_fc_toy.py
|
ondrejba/vae
|
23f179637ca45c20d4e5f74e8c56b62f57554ef4
|
[
"MIT"
] | null | null | null |
vae/scripts/gm_vae_fc_toy.py
|
ondrejba/vae
|
23f179637ca45c20d4e5f74e8c56b62f57554ef4
|
[
"MIT"
] | 1
|
2021-12-01T07:29:39.000Z
|
2021-12-01T07:29:39.000Z
|
import argparse
import collections
import os
import numpy as np
import matplotlib.pyplot as plt
from .. import toy_dataset
from .. import gm_vae_fc
def main(args):
# gpu settings
if args.gpus is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
# generate and show dataset
train_data = toy_dataset.get_dataset()
print("training data:")
plt.scatter(train_data[:, 0], train_data[:, 1])
plt.show()
# build model
num_clusters = 4
x_size = 2
w_size = 2
input_size = 2
model = gm_vae_fc.GM_VAE(
input_size, [120, 120], [120, 120, input_size], [120], num_clusters, x_size, w_size,
gm_vae_fc.GM_VAE.LossType.L2, args.weight_decay, args.learning_rate, clip_z_prior=args.clip_z_prior
)
model.build_all()
model.start_session(gpu_memory=args.gpu_memory_fraction)
# train model
epoch_size = len(train_data) // args.batch_size
losses = collections.defaultdict(list)
epoch_losses = collections.defaultdict(list)
for train_step in range(args.num_training_steps):
epoch_step = train_step % epoch_size
if train_step > 0 and train_step % 1000 == 0:
print("step {:d}".format(train_step))
if epoch_step == 0 and train_step > 0:
losses["total"].append(np.mean(epoch_losses["total"]))
losses["output"].append(np.mean(epoch_losses["output"]))
losses["w KL divergence"].append(np.mean(epoch_losses["w KL divergence"]))
losses["x KL divergence"].append(np.mean(epoch_losses["x KL divergence"]))
losses["z KL divergence"].append(np.mean(epoch_losses["z KL divergence"]))
losses["regularization"].append(np.mean(epoch_losses["regularization"]))
epoch_losses = collections.defaultdict(list)
samples = train_data[epoch_step * args.batch_size : (epoch_step + 1) * args.batch_size]
loss, output_loss, w_kl_loss, x_kl_loss, z_kl_loss, reg_loss = model.train(samples)
epoch_losses["total"].append(loss)
epoch_losses["output"].append(output_loss)
epoch_losses["w KL divergence"].append(w_kl_loss)
epoch_losses["x KL divergence"].append(x_kl_loss)
epoch_losses["z KL divergence"].append(z_kl_loss)
epoch_losses["regularization"].append(reg_loss)
# plot losses
for key, value in losses.items():
plt.plot(list(range(1, len(value) + 1)), value, label=key)
plt.legend()
plt.xlabel("epoch")
plt.show()
x_encodings, w_encodings = model.encode(train_data, args.batch_size)
y_decodings = model.predict_from_x_sample(x_encodings)
# plot x
print("encodings to x:")
plt.scatter(x_encodings[:, 0], x_encodings[:, 1])
plt.show()
# plot w
print("encodings to w:")
plt.scatter(w_encodings[:, 0], w_encodings[:, 1])
plt.show()
# plot reconstruction
print("reconstructions:")
plt.scatter(y_decodings[:, 0], y_decodings[:, 1])
plt.show()
# plot samples from mixtures
w_samples = np.random.normal(0, 1, size=(100, w_size))
c_mu, c_sd = model.get_clusters(w_samples)
print("cluster centroids:")
for c_idx in range(num_clusters):
plt.scatter(c_mu[:, c_idx, 0], c_mu[:, c_idx, 1], label="cluster {:d}".format(c_idx + 1))
plt.legend()
plt.show()
print("cluster samples:")
for c_idx in range(num_clusters):
x_samples = c_mu[:, c_idx, :] + np.random.normal(0, 1, size=(100, x_size)) * c_sd[:, c_idx, :]
y_samples = model.predict_from_x_sample(x_samples)
plt.scatter(y_samples[:, 0], y_samples[:, 1], label="cluster {:d}".format(c_idx + 1))
plt.legend()
plt.show()
model.stop_session()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--num-training-steps", type=int, default=100000)
parser.add_argument("--learning-rate", type=float, default=0.0001)
parser.add_argument("--batch-size", type=int, default=50)
parser.add_argument("--weight-decay", type=float, default=0.0)
parser.add_argument("--clip-z-prior", type=float, default=1.4)
parser.add_argument("--gpus", default=None)
parser.add_argument("--gpu-memory-fraction", default=None, type=float)
parsed = parser.parse_args()
main(parsed)
| 31.202899
| 107
| 0.65699
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 691
| 0.160474
|
5a042bec8f40b0fa98c21fc7bd7c2df868d70903
| 520
|
py
|
Python
|
Python/354.py
|
JWang169/LintCodeJava
|
b75b06fa1551f5e4d8a559ef64e1ac29db79c083
|
[
"CNRI-Python"
] | 1
|
2020-12-10T05:36:15.000Z
|
2020-12-10T05:36:15.000Z
|
Python/354.py
|
JWang169/LintCodeJava
|
b75b06fa1551f5e4d8a559ef64e1ac29db79c083
|
[
"CNRI-Python"
] | null | null | null |
Python/354.py
|
JWang169/LintCodeJava
|
b75b06fa1551f5e4d8a559ef64e1ac29db79c083
|
[
"CNRI-Python"
] | 3
|
2020-04-06T05:55:08.000Z
|
2021-08-29T14:26:54.000Z
|
class Solution:
def maxEnvelopes(self, envelopes: List[List[int]]) -> int:
if not envelopes:
return 0
pairs = sorted(envelopes, key=lambda x: (x[0], -x[1]))
result = []
for pair in pairs:
height = pair[1]
if len(result) == 0 or height > result[-1]:
result.append(height)
else:
index = bisect.bisect_left(result, height)
result[index] = height
return len(result)
| 34.666667
| 62
| 0.492308
| 503
| 0.967308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
5a054c6f2f48cad9dc180b59f6e0034f5b144f73
| 331
|
py
|
Python
|
codes/day06/03.py
|
Youngfellows/HPyBaseCode
|
94d11872795d85b8c4387b650e82edcd20da0667
|
[
"Apache-2.0"
] | null | null | null |
codes/day06/03.py
|
Youngfellows/HPyBaseCode
|
94d11872795d85b8c4387b650e82edcd20da0667
|
[
"Apache-2.0"
] | null | null | null |
codes/day06/03.py
|
Youngfellows/HPyBaseCode
|
94d11872795d85b8c4387b650e82edcd20da0667
|
[
"Apache-2.0"
] | null | null | null |
class Dog:
def __init__(self, newColor):
self.color = newColor
def bark(self):
print("---旺旺叫----")
def printColor(self):
print("颜色为:%s"%self.color)
def test(AAA):
AAA.printColor()
wangcai = Dog("白")
#wangcai.printColor()
xiaoqiang = Dog("黑")
#xiaoqiang.printColor()
test(wangcai)
| 15.045455
| 34
| 0.592145
| 203
| 0.581662
| 0
| 0
| 0
| 0
| 0
| 0
| 88
| 0.252149
|
5a05e2efcbe249cfc654b1e6e98561ecca3c15b5
| 1,158
|
py
|
Python
|
LC_problems/699.py
|
Howardhuang98/Blog
|
cf58638d6d0bbf55b95fe08e43798e7dd14219ac
|
[
"MIT"
] | null | null | null |
LC_problems/699.py
|
Howardhuang98/Blog
|
cf58638d6d0bbf55b95fe08e43798e7dd14219ac
|
[
"MIT"
] | null | null | null |
LC_problems/699.py
|
Howardhuang98/Blog
|
cf58638d6d0bbf55b95fe08e43798e7dd14219ac
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : 699.py
@Contact : huanghoward@foxmail.com
@Modify Time : 2022/5/26 17:22
------------
"""
from typing import List
class Solution:
def fallingSquares(self, positions: List[List[int]]) -> List[int]:
height = [p[1] for p in positions]
for i, (left, l) in enumerate(positions):
if i == 0:
continue
else:
for j in range(i):
# left,left+l
# positions[j][0],positions[j][0] + positions[j][1]
if not left >= positions[j][0] + positions[j][1] and not left + l <= positions[j][0]:
height[i] = max(height[i], height[j] + l)
ans = []
for i, h in enumerate(height):
if i == 0:
ans.append(h)
continue
else:
if h < ans[-1]:
ans.append(ans[-1])
else:
ans.append(h)
return ans
if __name__ == '__main__':
s = Solution()
print(s.fallingSquares([[9, 6], [2, 2], [2, 6]]))
| 28.95
| 105
| 0.443005
| 857
| 0.740069
| 0
| 0
| 0
| 0
| 0
| 0
| 243
| 0.209845
|
5a05e7be3fed210c95055f9564a15535552003ac
| 5,150
|
py
|
Python
|
plastid/test/functional/test_metagene.py
|
joshuagryphon/plastid
|
e63a818e33766b01d84b3ac9bc9f55e6a1ece42f
|
[
"BSD-3-Clause"
] | 31
|
2016-04-05T09:58:29.000Z
|
2022-01-18T11:58:30.000Z
|
plastid/test/functional/test_metagene.py
|
joshuagryphon/plastid
|
e63a818e33766b01d84b3ac9bc9f55e6a1ece42f
|
[
"BSD-3-Clause"
] | 49
|
2015-09-15T19:50:13.000Z
|
2022-01-06T18:17:35.000Z
|
plastid/test/functional/test_metagene.py
|
joshuagryphon/plastid
|
e63a818e33766b01d84b3ac9bc9f55e6a1ece42f
|
[
"BSD-3-Clause"
] | 14
|
2017-02-08T09:38:57.000Z
|
2020-09-16T02:32:46.000Z
|
#!/usr/bin/env python
"""Test suite for :py:mod:`plastid.bin.metagene`"""
import tempfile
import os
from pkg_resources import resource_filename, cleanup_resources
from nose.plugins.attrib import attr
from plastid.test.functional.base import execute_helper
from plastid.test.ref_files import (
RPATH,
REF_FILES,
COUNT_OPTIONS,
ANNOTATION_OPTIONS,
MASK_OPTIONS,
)
from plastid.bin.test_table_equality import main as table_test
from plastid.bin.metagene import main
from plastid.util.services.decorators import catch_stderr
#===============================================================================
# INDEX: global constants used by tests
#===============================================================================
TEST_INFO = {
"test_method": catch_stderr()(main),
"module_name": "plastid.bin.metagene",
"ref_file_path": resource_filename("plastid", "test/data/command_line"),
"temp_file_path": tempfile.mkdtemp(prefix="metagene"),
}
_basename = os.path.join(TEST_INFO["temp_file_path"], "test_metagene")
#===============================================================================
# INDEX: tests
#===============================================================================
tests = [
# test generate cds start
(
"generate %s_cds_start --downstream 100 %s %s" %
(_basename, ANNOTATION_OPTIONS, MASK_OPTIONS),
[REF_FILES["yeast_metagene_cds_start"], REF_FILES["yeast_metagene_cds_start_bed"]], [
_basename + "_cds_start_rois.txt",
_basename + "_cds_start_rois.bed",
], ["", "--no_header"]
),
# test generate cds stop
(
"generate %s_cds_stop --upstream 100 --landmark cds_stop %s %s" %
(_basename, ANNOTATION_OPTIONS, MASK_OPTIONS), [
REF_FILES["yeast_metagene_cds_stop"],
REF_FILES["yeast_metagene_cds_stop_bed"],
], [
_basename + "_cds_stop_rois.txt",
_basename + "_cds_stop_rois.bed",
], ["", "--no_header"]
),
# test count cds start with --norm_region
(
"count %s %s_cds_start --keep --norm_region 70 150 %s" %
(REF_FILES["yeast_metagene_cds_start"], _basename, COUNT_OPTIONS), [
REF_FILES["yeast_metagene_cds_start_profile"],
REF_FILES["yeast_metagene_cds_start_normcounts"],
REF_FILES["yeast_metagene_cds_start_rawcounts"],
], [
_basename + "_cds_start_metagene_profile.txt",
_basename + "_cds_start_normcounts.txt.gz", _basename + "_cds_start_rawcounts.txt.gz"
], ["", "--no_header", "--no_header"]
),
# test count cds stop with --norm_region
(
"count %s %s_cds_stop --keep --norm_region 0 80 %s" %
(REF_FILES["yeast_metagene_cds_stop"], _basename, COUNT_OPTIONS), [
REF_FILES["yeast_metagene_cds_stop_profile"],
REF_FILES["yeast_metagene_cds_stop_normcounts"],
REF_FILES["yeast_metagene_cds_stop_rawcounts"],
], [
_basename + "_cds_stop_metagene_profile.txt", _basename + "_cds_stop_normcounts.txt.gz",
_basename + "_cds_stop_rawcounts.txt.gz"
], ["", "--no_header", "--no_header"]
),
# test count cds start, using --normalize_over
(
"count %s %s_cds_start --keep --normalize_over 20 100 %s" %
(REF_FILES["yeast_metagene_cds_start"], _basename, COUNT_OPTIONS), [
REF_FILES["yeast_metagene_cds_start_profile"],
REF_FILES["yeast_metagene_cds_start_normcounts"],
REF_FILES["yeast_metagene_cds_start_rawcounts"],
], [
_basename + "_cds_start_metagene_profile.txt",
_basename + "_cds_start_normcounts.txt.gz", _basename + "_cds_start_rawcounts.txt.gz"
], ["", "--no_header", "--no_header"]
),
# test count cds stop, using --normalize_over
(
"count %s %s_cds_stop --keep --normalize_over '-100' '-20' %s" %
(REF_FILES["yeast_metagene_cds_stop"], _basename, COUNT_OPTIONS), [
REF_FILES["yeast_metagene_cds_stop_profile"],
REF_FILES["yeast_metagene_cds_stop_normcounts"],
REF_FILES["yeast_metagene_cds_stop_rawcounts"],
], [
_basename + "_cds_stop_metagene_profile.txt", _basename + "_cds_stop_normcounts.txt.gz",
_basename + "_cds_stop_rawcounts.txt.gz"
], ["", "--no_header", "--no_header"]
),
]
"""Functional tests of :py:mod:`plastid.bin.metagene`.
Tests are specified as tuples of:
1. Command-line style arguments to pass to :py:func:`main`
2. A list of reference files that output should be compared against
3. A list of output files created by running :py:func:`main`
with the arguments provided in (1)
4. A list of strings specifying how equality should be evaluated
"""
#===============================================================================
# INDEX: test functions
#===============================================================================
@attr(test="functional")
@attr(speed="slow")
def do_test():
for x in execute_helper(TEST_INFO, tests):
yield x
| 39.615385
| 100
| 0.594175
| 0
| 0
| 77
| 0.014951
| 122
| 0.023689
| 0
| 0
| 2,981
| 0.578835
|
5a0619271eef494f524dc719a9ba4f63c1373613
| 4,967
|
py
|
Python
|
tests/router/test_router.py
|
macneiln/ombott
|
f18f6e0e639f20efb63b137edbab8c8b3871d354
|
[
"MIT"
] | null | null | null |
tests/router/test_router.py
|
macneiln/ombott
|
f18f6e0e639f20efb63b137edbab8c8b3871d354
|
[
"MIT"
] | null | null | null |
tests/router/test_router.py
|
macneiln/ombott
|
f18f6e0e639f20efb63b137edbab8c8b3871d354
|
[
"MIT"
] | null | null | null |
import pytest
from ombott.router import RadiRouter, Route
from ombott.router.errors import RouteMethodError
route_meth_handler_path = [
('/foo/bar', 'GET', 'foo_bar:get', '/foo/bar'),
('/foo/bar', 'POST', 'foo_bar:post', '/foo/bar/'),
('/foo/bar', ['PUT', 'PATCH'], 'foo_bar:put,patch', 'foo/bar'),
('foo@/named/foo', ['PUT', 'PATCH'], 'foo@:put,patch', '/named/foo'),
('bar@/named/bar', ['PUT', 'PATCH'], 'bar@:put,patch', '/named/bar'),
('/foo/bar1', 'GET', 404, ['/foo/ba', '/foo/ba12']),
('/foo/bar1', 'POST', 405, '/foo/bar1:PATCH'),
('/foo/<re(pro.+?(?=l))>le/:user/bar', 'GET', dict(user='tom'), '/foo/profile/tom/bar'),
('/re/{re(to.)}/bar', 'GET', 're:get', '/re/tom/bar'),
('/re/{:re(to.)}/bar', 'PUT', 're:put', '/re/tom/bar'),
('/re/{name:re(to.)}/bar', 'POST', dict(name='tom'), '/re/tom/bar'),
('/re/{name:re(to.)}/bar1', 'GET', dict(name='tos'), '/re/tos/bar1'),
('/re/{surname:re(to.)}/bar2', 'GET', dict(surname='tok'), '/re/tok/bar2/'),
('/path/{pth:path()}/end', 'GET', dict(pth='this/path/to'), '/path/this/path/to/end'),
('/path1/{pth:path()}end', 'GET', dict(pth='this/path/to-'), '/path1/this/path/to-end'),
]
def expand_params():
ret = []
for it in route_meth_handler_path:
rule, meth, handler, path = it
name = None
if '@' in rule:
name, rule = rule.split('@', 1)
if not isinstance(path, list):
path = [path]
for p in path:
ret.append([name, rule, meth, handler, p])
return ret
def make_router():
router = RadiRouter()
for it in route_meth_handler_path:
rule, meth, handler, path = it
name = None
if '@' in rule:
name, rule = rule.split('@', 1)
router.add(rule, meth, handler, name=name or None)
return router
def exist_rule_paths():
seen = set()
ret = []
for it in expand_params():
name, rule, meth, handler, path = it
print(name or '<no>', rule)
pattern = RadiRouter.to_pattern(rule)
if handler not in [404, 405] and pattern not in seen:
ret.append([name, rule, path])
seen.add(pattern)
return ret
@pytest.fixture
def fresh_router():
return make_router()
@pytest.fixture(scope='class')
def router():
return make_router()
@pytest.fixture
def routes():
return route_meth_handler_path[:]
@pytest.fixture(params = expand_params())
def routes_iter(request):
return request.param
class TestRoutes:
def test_routes(self, router, routes_iter):
name, rule, meth, handler, path = routes_iter
path, _, path_meth = path.partition(':')
end_point, err404_405 = router.resolve(path, path_meth or meth)
if end_point is None:
assert handler in {404, 405}
assert err404_405[0] == handler
else:
assert handler is not None
route_meth, params, hooks = end_point
assert route_meth.handler == handler
if isinstance(meth, str):
assert route_meth.name == meth
else:
assert route_meth.name == meth[0]
if params:
assert params == handler
if name:
assert router[name][meth] is route_meth
def test_overwrite_error():
router = RadiRouter()
route = '/foo/bar'
def h():
pass
router.add(route, ['GET', 'POST'], h)
with pytest.raises(RouteMethodError) as exc_info:
router.add(route, 'GET', h)
assert route in str(exc_info.value)
assert 'already registered' in str(exc_info.value)
def test_str_repr():
router = RadiRouter()
route = '/foo/bar'
def h():
pass
router.add(route, ['GET', 'POST'], h)
end_point, err404_405 = router.resolve(route, 'GET')
route_meth, *_ = end_point
assert h.__qualname__ in str(route_meth)
assert 'GET' in str(route_meth)
assert h.__qualname__ in repr(route_meth)
assert route in repr(route_meth)
class TestRemove:
@pytest.mark.parametrize(
'name, rule, path',
exist_rule_paths()
)
def test_remove_route(self, router: RadiRouter, name, rule, path):
route = router[{rule}]
assert route
assert router.resolve(path) is route
if name:
assert router[name] is route
router.remove(rule)
assert router.resolve(path) is None
assert router[{rule}] is None
if name:
assert router[name] is None
route_meth, err = router.resolve(path, 'GET')
assert err[0] == 404
def test_remove_method(fresh_router: RadiRouter):
router = fresh_router
route: Route = router.resolve('/foo/bar')
assert route['GET']
route.remove_method('GET')
assert 'GET' not in route.methods
route_meth, err = router.resolve('/foo/bar', 'GET')
assert err[0] == 405
assert router.resolve('/foo/bar')
| 29.742515
| 92
| 0.582444
| 1,385
| 0.27884
| 0
| 0
| 850
| 0.171129
| 0
| 0
| 934
| 0.188041
|
5a06baf447f7c7644ae324b314d4d848bee4ba67
| 12,225
|
py
|
Python
|
app_api/serializers.py
|
pkucsie/SIEPServer
|
00b0637eb8302135dfc772fccd18cd749a93e5c6
|
[
"Apache-2.0"
] | 2
|
2021-02-12T10:02:42.000Z
|
2021-03-15T13:08:04.000Z
|
app_api/serializers.py
|
pkucsie/SIEPServer
|
00b0637eb8302135dfc772fccd18cd749a93e5c6
|
[
"Apache-2.0"
] | null | null | null |
app_api/serializers.py
|
pkucsie/SIEPServer
|
00b0637eb8302135dfc772fccd18cd749a93e5c6
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import time
from utils import utils
from rest_framework import serializers
from rest_framework.relations import StringRelatedField
from app_api.models import Album, Info, Order, Coupon, Integral, Notice, Lesson, Question, Cart, Setup, User, Bill, Address, Catalog, Log, \
ReadType, Teacher, Comment, \
Hot, Recharge, LabelFollow, Student, Navigation, Read, Article, History, Qa, ArticleType, UserNotice, Slider, \
UserLesson, Nav, LabelType, \
IntegralType, Label, Footer, CommonPathConfig, StudentType, LessonType, LessonHardType, Chapter, Term, QaType, \
RechargeAction, RechargePay, \
CouponRange, CouponStatus, OrderItem, OrderStatus, Consult, ReadChapterItem, ReadChapter, LogType, VipGuest, Judge, \
Organization, TaskTimeline, Project, Score, WXAdmin, WXUser
class ConsultSerializer(serializers.ModelSerializer):
class Meta:
model = Consult
fields = "__all__"
class OrderStatusSerializer(serializers.ModelSerializer):
class Meta:
model = OrderStatus
fields = ["text", "code"]
class OrderItemSerializer(serializers.ModelSerializer):
class Meta:
model = OrderItem
fields = "__all__"
class OrderWaySerializer(serializers.ModelSerializer):
class Meta:
model = RechargePay
fields = ["text", "code"]
class OrderListSerializer(serializers.ModelSerializer):
status = OrderStatusSerializer()
way = OrderWaySerializer()
list = OrderItemSerializer(many=True)
class Meta:
model = Order
fields = "__all__"
class OrderSerializer(serializers.ModelSerializer):
class Meta:
model = Order
fields = "__all__"
class OrderInfoSerializer(serializers.ModelSerializer):
list = OrderItemSerializer(many=True)
class Meta:
model = Order
fields = "__all__"
class CouponRangeSerializer(serializers.ModelSerializer):
class Meta:
model = CouponRange
fields = ["text", "code"]
class CouponStatusSerializer(serializers.ModelSerializer):
class Meta:
model = CouponStatus
fields = ["text", "code"]
class CouponSerializer(serializers.ModelSerializer):
range = CouponRangeSerializer()
status = CouponStatusSerializer()
starttime = serializers.DateTimeField(format="%Y.%m.%d")
endtime = serializers.DateTimeField(format="%Y.%m.%d")
class Meta:
model = Coupon
fields = "__all__"
class IntegralSerializer(serializers.ModelSerializer):
class Meta:
model = Integral
fields = "__all__"
class UserNoticeSerializer(serializers.ModelSerializer):
class Meta:
model = UserNotice
fields = "__all__"
class NoticeSerializer(serializers.ModelSerializer):
class Meta:
model = Notice
fields = "__all__"
class LabelTypeHomeSerializer(serializers.ModelSerializer):
class Meta:
model = LabelType
fields = ["code", "title"]
class LabelTypeSerializer(serializers.ModelSerializer):
text = serializers.CharField(source="title")
class Meta:
model = LabelType
fields = ["code", "text"]
class LessonTypeSerializer(serializers.ModelSerializer):
class Meta:
model = LessonType
fields = ["code", "text"]
class LabelSerializer(serializers.ModelSerializer):
type = LabelTypeSerializer()
class Meta:
model = Label
fields = "__all__"
class LabelInnerLessonSerializer(serializers.ModelSerializer):
class Meta:
model = Label
fields = ["title"]
class LessonHardTypeSerializer(serializers.ModelSerializer):
class Meta:
model = LessonHardType
fields = ["code", "text"]
class TaskTimelineSerializer(serializers.ModelSerializer):
class Meta:
model = TaskTimeline
fields = "__all__"
class OrganizationSerializer(serializers.ModelSerializer):
class Meta:
model = Organization
fields = "__all__"
class VipGuestSerializer(serializers.ModelSerializer):
class Meta:
model = VipGuest
fields = "__all__"
class JudgeSerializer(serializers.ModelSerializer):
class Meta:
model = Judge
fields = "__all__"
class ScoreSerializer(serializers.ModelSerializer):
class Meta:
model = Score
fields = "__all__"
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = Project
fields = "__all__"
class TeacherSerializer(serializers.ModelSerializer):
class Meta:
model = Teacher
fields = "__all__"
class LessonSerializer(serializers.ModelSerializer):
category = LabelTypeSerializer()
type = LessonTypeSerializer()
labels = StringRelatedField(many=True)
teacher = TeacherSerializer()
class Meta:
model = Lesson
fields = "__all__"
class LessonInfoSerializer(serializers.ModelSerializer):
category = LabelTypeSerializer()
type = LessonTypeSerializer()
labels = StringRelatedField(many=True)
hard = LessonHardTypeSerializer()
teacher = TeacherSerializer()
class Meta:
model = Lesson
fields = "__all__"
class TermSerializer(serializers.ModelSerializer):
class Meta:
model = Term
fields = "__all__"
class ChapterSerializer(serializers.ModelSerializer):
term = TermSerializer(many=True)
class Meta:
model = Chapter
fields = "__all__"
class QuestionSerializer(serializers.ModelSerializer):
class Meta:
model = Question
fields = "__all__"
class CartSerializer(serializers.ModelSerializer):
class Meta:
model = Cart
fields = "__all__"
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = "__all__"
class AddressSerializer(serializers.ModelSerializer):
class Meta:
model = Address
fields = "__all__"
class CatalogSerializer(serializers.ModelSerializer):
class Meta:
model = Catalog
fields = "__all__"
class LogTypeSerializer(serializers.ModelSerializer):
class Meta:
model = LogType
fields = ["text", "code"]
class LogSerializer(serializers.ModelSerializer):
type = LogTypeSerializer()
class Meta:
model = Log
fields = "__all__"
class ReadTypeSerializer(serializers.ModelSerializer):
class Meta:
model = ReadType
fields = "__all__"
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = "__all__"
class HotSerializer(serializers.ModelSerializer):
class Meta:
model = Hot
fields = "__all__"
class RechargeActionSerializer(serializers.ModelSerializer):
class Meta:
model = RechargeAction
fields = ["text", "code"]
class RechargePaySerializer(serializers.ModelSerializer):
action = RechargeActionSerializer
class Meta:
model = RechargePay
fields = ["text", "code"]
class RechargeListSerializer(serializers.ModelSerializer):
action = RechargeActionSerializer()
way = RechargePaySerializer()
class Meta:
model = Recharge
fields = "__all__"
class BillSerializer(serializers.ModelSerializer):
way = RechargePaySerializer()
orderno = serializers.SerializerMethodField()
class Meta:
model = Bill
fields = "__all__"
@staticmethod
def get_orderno(obj):
year = datetime.datetime.now().year
month = datetime.datetime.now().month
day = datetime.datetime.now().day
if month < 10:
month = f'0{month}'
if day < 10:
day = f'0{day}'
return f'{year}{month}{day}{int(time.time())}'
class RechargeSerializer(serializers.ModelSerializer):
class Meta:
model = Recharge
fields = "__all__"
class LabelFollowSerializer(serializers.ModelSerializer):
class Meta:
model = LabelFollow
fields = "__all__"
class StudentTypeSerializer(serializers.ModelSerializer):
class Meta:
model = StudentType
fields = ["code", "text"]
class StudentSerializer(serializers.ModelSerializer):
type = StudentTypeSerializer()
class Meta:
model = Student
fields = "__all__"
class NavigationSerializer(serializers.ModelSerializer):
class Meta:
model = Navigation
fields = "__all__"
class ReadChapterItemSerializer(serializers.ModelSerializer):
class Meta:
model = ReadChapterItem
fields = "__all__"
class ReadChapterSerializer(serializers.ModelSerializer):
data = ReadChapterItemSerializer(source="chapter_item", many=True)
class Meta:
model = ReadChapter
fields = "__all__"
class ReadSerializer(serializers.ModelSerializer):
author = TeacherSerializer()
tryRead = serializers.SerializerMethodField()
class Meta:
model = Read
fields = "__all__"
@staticmethod
def get_tryRead(obj):
chapters = ReadChapter.objects.filter(read=obj).values_list("id", flat=True)
chapter_items = ReadChapterItem.objects.filter(read_chapter_id__in=chapters, isTry=True).values_list("title", flat=True)
return chapter_items
class ReadInfoSerializer(serializers.ModelSerializer):
author = TeacherSerializer()
chapter = ReadChapterSerializer(many=True)
class Meta:
model = Read
fields = "__all__"
class ArticleSerializer(serializers.ModelSerializer):
type = serializers.IntegerField(source="type.code")
class Meta:
model = Article
fields = "__all__"
class HistorySerializer(serializers.ModelSerializer):
class Meta:
model = History
fields = "__all__"
class QaTypeSerializer(serializers.ModelSerializer):
class Meta:
model = QaType
fields = ["text", "code"]
class QaSerializer(serializers.ModelSerializer):
status = QaTypeSerializer(source="type")
class Meta:
model = Qa
fields = "__all__"
class ArticleTypeSerializer(serializers.ModelSerializer):
class Meta:
model = ArticleType
fields = "__all__"
class SliderSerializer(serializers.ModelSerializer):
class Meta:
model = Slider
fields = "__all__"
class UserLessonSerializer(serializers.ModelSerializer):
type = LessonTypeSerializer()
class Meta:
model = UserLesson
fields = "__all__"
class NavSerializer(serializers.ModelSerializer):
class Meta:
model = Nav
fields = "__all__"
class IntegralTypeSerializer(serializers.ModelSerializer):
class Meta:
model = IntegralType
fields = "__all__"
class FooterSerializer(serializers.ModelSerializer):
class Meta:
model = Footer
fields = "__all__"
class CommonPathConfigSerializer(serializers.ModelSerializer):
class Meta:
model = CommonPathConfig
fields = "__all__"
class AlbumSerializer(serializers.ModelSerializer):
class Meta:
model = Album
fields = "__all__"
class InfoSerializer(serializers.ModelSerializer):
class Meta:
model = Info
fields = "__all__"
class SetupSerializer(serializers.ModelSerializer):
class Meta:
model = Setup
fields = "__all__"
class WxuserSerializer(serializers.ModelSerializer):
def to_representation(self, instance):
"""Convert `username` to lowercase."""
ret = super().to_representation(instance)
ret['USER_LOGIN_TIME'] = utils.get_agostr(ret['USER_LOGIN_TIME'])
ret['USER_BIRTH'] = utils.get_age(ret['USER_BIRTH'])
return ret
class Meta:
model = WXUser
fields = "__all__"
class WxuserFullSerializer(serializers.ModelSerializer):
def to_representation(self, instance):
"""Convert `username` to lowercase."""
ret = super().to_representation(instance)
ret['USER_BIRTH'] = utils.get_ymd(ret['USER_BIRTH'])
return ret
class Meta:
model = WXUser
fields = "__all__"
class WxadminSerializer(serializers.ModelSerializer):
class Meta:
model = WXAdmin
fields = "__all__"
| 23.41954
| 140
| 0.674683
| 11,203
| 0.916401
| 0
| 0
| 612
| 0.050061
| 0
| 0
| 954
| 0.078037
|
5a07b3f93f0df160b35b13e2ca081e2f2413ce44
| 718
|
py
|
Python
|
6_API/pytorch/configure.py
|
misoA/DeepCalendar
|
50cafc1e70f125f3b6b42cd88e1e9dd071676b49
|
[
"MIT"
] | null | null | null |
6_API/pytorch/configure.py
|
misoA/DeepCalendar
|
50cafc1e70f125f3b6b42cd88e1e9dd071676b49
|
[
"MIT"
] | 3
|
2019-01-14T06:59:24.000Z
|
2019-01-14T07:48:38.000Z
|
6_API/pytorch/configure.py
|
misoA/DeepCalendar
|
50cafc1e70f125f3b6b42cd88e1e9dd071676b49
|
[
"MIT"
] | 5
|
2019-01-08T05:01:26.000Z
|
2021-05-17T23:34:51.000Z
|
# -*- coding: utf-8 -*-
# This file is made to configure every file number at one place
# Choose the place you are training at
# AWS : 0, Own PC : 1
PC = 1
path_list = ["/jet/prs/workspace/", "."]
url = path_list[PC]
clothes = ['shirt',
'jeans',
'blazer',
'chino-pants',
'jacket',
'coat',
'hoody',
'training-pants',
't-shirt',
'polo-shirt',
'knit',
'slacks',
'sweat-shirt']
schedule = ['party',
'trip',
'sport',
'work',
'speech',
'daily',
'school',
'date']
weather = ['snow',
'sunny',
'cloudy',
'rain']
| 17.95
| 63
| 0.431755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 371
| 0.516713
|
5a0835b17e7c0f765c8aa93d7341da5395fe71d2
| 32
|
py
|
Python
|
provider/__init__.py
|
depop/django-oauth2-provider
|
afcdef72747233dc0259a4bc068a8086ba7a69d3
|
[
"MIT"
] | 1
|
2020-05-10T00:11:05.000Z
|
2020-05-10T00:11:05.000Z
|
provider/__init__.py
|
depop/django-oauth2-provider
|
afcdef72747233dc0259a4bc068a8086ba7a69d3
|
[
"MIT"
] | 1
|
2016-05-23T15:22:41.000Z
|
2016-05-23T15:22:41.000Z
|
provider/__init__.py
|
depop/django-oauth2-provider
|
afcdef72747233dc0259a4bc068a8086ba7a69d3
|
[
"MIT"
] | null | null | null |
__version__ = "0.2.7+depop.6.1"
| 16
| 31
| 0.65625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 17
| 0.53125
|
5a0841fa1b97d80f5fc2c97be82b59ce57dfb2d4
| 7,381
|
py
|
Python
|
python/craftassist/voxel_models/subcomponent_classifier.py
|
kayburns/craftassist
|
07909493d320afc2c9ff428d0891bc3acd4dc68f
|
[
"MIT"
] | null | null | null |
python/craftassist/voxel_models/subcomponent_classifier.py
|
kayburns/craftassist
|
07909493d320afc2c9ff428d0891bc3acd4dc68f
|
[
"MIT"
] | null | null | null |
python/craftassist/voxel_models/subcomponent_classifier.py
|
kayburns/craftassist
|
07909493d320afc2c9ff428d0891bc3acd4dc68f
|
[
"MIT"
] | null | null | null |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import logging
import queue
from multiprocessing import Queue, Process
import sys
import os
from mc_memory_nodes import InstSegNode, PropSegNode
from heuristic_perception import all_nearby_objects
from shapes import get_bounds
VISION_DIR = os.path.dirname(os.path.realpath(__file__))
CRAFTASSIST_DIR = os.path.join(VISION_DIR, "../")
SEMSEG_DIR = os.path.join(VISION_DIR, "semantic_segmentation/")
sys.path.append(CRAFTASSIST_DIR)
sys.path.append(SEMSEG_DIR)
import build_utils as bu
from semseg_models import SemSegWrapper
# TODO all "subcomponent" operations are replaced with InstSeg
class SubcomponentClassifierWrapper:
def __init__(self, agent, model_path, vocab_path, perceive_freq=0):
self.agent = agent
self.memory = self.agent.memory
self.perceive_freq = perceive_freq
self.true_temp = 1
if model_path is not None:
self.subcomponent_classifier = SubComponentClassifier(
voxel_model_path=model_path, vocab_path=vocab_path,
)
self.subcomponent_classifier.start()
else:
self.subcomponent_classifier = None
def perceive(self, force=False):
if self.perceive_freq == 0 and not force:
return
if self.perceive_freq > 0 and self.agent.count % self.perceive_freq != 0 and not force:
return
if self.subcomponent_classifier is None:
return
# TODO don't all_nearby_objects again, search in memory instead
to_label = []
# add all blocks in marked areas
for pos, radius in self.agent.areas_to_perceive:
for obj in all_nearby_objects(self.agent.get_blocks, pos, radius):
to_label.append(obj)
# add all blocks near the agent
for obj in all_nearby_objects(self.agent.get_blocks, self.agent.pos):
to_label.append(obj)
for obj in to_label: # (6, 69, 11) in [b[0] for b in obj]
self.subcomponent_classifier.block_objs_q.put(obj)
# everytime we try to retrieve as many recognition results as possible
while not self.subcomponent_classifier.loc2labels_q.empty():
loc2labels, obj = self.subcomponent_classifier.loc2labels_q.get() # (6, 69, 11) in [b[0] for b in obj]
loc2ids = dict(obj)
label2blocks = {}
def contaminated(blocks):
"""
Check if blocks are still consistent with the current world
"""
mx, Mx, my, My, mz, Mz = get_bounds(blocks)
yzxb = self.agent.get_blocks(mx, Mx, my, My, mz, Mz)
for b, _ in blocks:
x, y, z = b
if loc2ids[b][0] != yzxb[y - my, z - mz, x - mx, 0]:
return True
return False
for loc, labels in loc2labels.items():
b = (loc, loc2ids[loc])
for l in labels:
if l in label2blocks:
label2blocks[l].append(b)
else:
label2blocks[l] = [b]
labels_str = " ".join(list(label2blocks.keys()))
if len(labels_str) == 1:
self.agent.send_chat(
"I found this in the scene: " + labels_str
)
elif len(labels_str) > 1:
self.agent.send_chat(
"I found these in the scene: " + labels_str
)
for l, blocks in label2blocks.items():
## if the blocks are contaminated we just ignore
if not contaminated(blocks):
#locs = [loc for loc, idm in blocks]
InstSegNode.create(
self.memory, blocks, [l, 'semseg'])
def update(self, label, blocks, house):
pass
#self.subcomponent_classifier.to_update_q.put((label, blocks, house))
class SubComponentClassifier(Process):
"""
A classifier class that calls a voxel model to output object tags.
"""
def __init__(self, voxel_model_path=None, vocab_path=None, true_temp=1):
super().__init__()
if voxel_model_path is not None:
logging.info(
"SubComponentClassifier using voxel_model_path={}".format(voxel_model_path)
)
self.model = SemSegWrapper(voxel_model_path, vocab_path)
else:
raise Exception("specify a segmentation model")
self.block_objs_q = Queue() # store block objects to be recognized
self.loc2labels_q = Queue() # store loc2labels dicts to be retrieved by the agent
#self.to_update_q = Queue()
self.daemon = True
def run(self):
"""
The main recognition loop of the classifier
"""
while True: # run forever
#for _ in range(100):
# print("If I print here, it solves the bug ¯\_(ツ)_/¯, priority thing?")
tb = self.block_objs_q.get(block=True, timeout=None)
loc2labels = self._watch_single_object(tb)
for k in loc2labels.keys():
loc2labels[k].append("house")
self.loc2labels_q.put((loc2labels, tb))
#try:
# label, blocks, house = self.to_update_q.get_nowait()
# self.update(label, blocks, house)
#except queue.Empty:
# pass
def _watch_single_object(self, tuple_blocks, t=1):
"""
Input: a list of tuples, where each tuple is ((x, y, z), [bid, mid]). This list
represents a block object.
Output: a dict of (loc, [tag1, tag2, ..]) pairs for all non-air blocks.
"""
def get_tags(p):
"""
convert a list of tag indices to a list of tags
"""
return [self.model.tags[i][0] for i in p]
def apply_offsets(cube_loc, offsets):
"""
Convert the cube location back to world location
"""
return (cube_loc[0] + offsets[0], cube_loc[1] + offsets[1], cube_loc[2] + offsets[2])
np_blocks, offsets = bu.blocks_list_to_npy(blocks=tuple_blocks, xyz=True)
pred = self.model.segment_object(np_blocks, T=t)
# convert prediction results to string tags
return dict([(apply_offsets(loc, offsets), get_tags([p])) for loc, p in pred.items()])
def recognize(self, list_of_tuple_blocks):
"""
Multiple calls to _watch_single_object
"""
tags = dict()
for tb in list_of_tuple_blocks:
tags.update(self._watch_single_object(tb))
return tags
def update(self, label, blocks, house):
# changes can come in from adds or removals, if add, update house
logging.info("Updated label {}".format(label))
if blocks[0][0][0] > 0:
house += blocks
blocks = [(xyz, (1, 0)) for xyz, _ in blocks]
np_house, offsets = bu.blocks_list_to_npy(blocks=house, xyz=True)
np_blocks, _ = bu.blocks_list_to_npy(
blocks=blocks, xyz=False, offsets=offsets, shape=np_house.shape) # shape is still xyz bc of shape arg
self.model.update(label, np_blocks, np_house)
| 38.243523
| 116
| 0.582441
| 6,731
| 0.911442
| 0
| 0
| 0
| 0
| 0
| 0
| 1,941
| 0.26283
|
5a0b2d031fe808c99bfba67eaa85c3e839cc5992
| 197
|
py
|
Python
|
tests/test_problem16.py
|
nolanwrightdev/blind-75-python
|
b92ef3449eb0143c760ddd339897a3f0a2972830
|
[
"MIT"
] | 6
|
2020-02-01T23:29:51.000Z
|
2022-02-20T20:46:56.000Z
|
tests/test_problem16.py
|
nolanwrightdev/blind-75-python
|
b92ef3449eb0143c760ddd339897a3f0a2972830
|
[
"MIT"
] | null | null | null |
tests/test_problem16.py
|
nolanwrightdev/blind-75-python
|
b92ef3449eb0143c760ddd339897a3f0a2972830
|
[
"MIT"
] | null | null | null |
import unittest
from problems.problem16 import solution
class Test(unittest.TestCase):
def test(self):
self.assertTrue(solution([2, 3, 1, 1, 4]))
self.assertFalse(solution([3, 2, 1, 0, 4]))
| 21.888889
| 45
| 0.71066
| 138
| 0.700508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
5a0b50f8318c63395085bc807823eccbb8a5e4b9
| 510
|
py
|
Python
|
project/dynamic.py
|
andresitodeguzman/smspy
|
29b9feb4356de5dbd1a5d222d38d45396a349d23
|
[
"Apache-2.0"
] | 4
|
2017-01-27T05:15:09.000Z
|
2020-12-08T13:24:19.000Z
|
project/dynamic.py
|
andresitodeguzman/smspy
|
29b9feb4356de5dbd1a5d222d38d45396a349d23
|
[
"Apache-2.0"
] | 1
|
2019-05-20T15:09:53.000Z
|
2019-05-20T15:09:53.000Z
|
project/dynamic.py
|
andresitodeguzman/smspy
|
29b9feb4356de5dbd1a5d222d38d45396a349d23
|
[
"Apache-2.0"
] | null | null | null |
##
## DYNAMIC
##
## Import the module explicitly (import dynamics.<module_name> as module_name)
import dynamics.root as root
## Register all modules for checking here. If something interferes, rearrange the order
## module_name_ = module_name.do(params)
def responseQuery(number, body):
# Available params
n = number
b = body
# Do actions
root_ = root.do(n,b)
if root_:
return root_
else:
# Returns False if all actions returns False
return False
| 21.25
| 88
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 295
| 0.578431
|
5a0bac916180eec03144ad684ddb2ec3547f8ee7
| 288
|
py
|
Python
|
accounts/urls.py
|
mishrakeshav/Django-Real-Estate-Website
|
4f6146ad8d13003f890677c2c1af82b26c69678b
|
[
"MIT"
] | null | null | null |
accounts/urls.py
|
mishrakeshav/Django-Real-Estate-Website
|
4f6146ad8d13003f890677c2c1af82b26c69678b
|
[
"MIT"
] | 7
|
2021-04-08T20:21:35.000Z
|
2022-01-13T03:27:33.000Z
|
accounts/urls.py
|
mishrakeshav/Django-Real-Estate-Website
|
4f6146ad8d13003f890677c2c1af82b26c69678b
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('login', views.login, name = 'login'),
path('register', views.register, name = 'register'),
path('logout', views.logout, name = 'logout'),
path('dashboard', views.dashboard, name = 'dashboard'),
]
| 28.8
| 59
| 0.645833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 72
| 0.25
|
5a0e378937b9fd8ab97a5e345d693d92224ab800
| 4,333
|
py
|
Python
|
src/past/types/oldstr.py
|
kianmeng/python-future
|
80523f383fbba1c6de0551e19d0277e73e69573c
|
[
"MIT"
] | 908
|
2015-01-01T21:20:45.000Z
|
2022-03-29T20:47:16.000Z
|
src/past/types/oldstr.py
|
kianmeng/python-future
|
80523f383fbba1c6de0551e19d0277e73e69573c
|
[
"MIT"
] | 402
|
2015-01-04T01:30:19.000Z
|
2022-03-24T11:56:38.000Z
|
src/past/types/oldstr.py
|
kianmeng/python-future
|
80523f383fbba1c6de0551e19d0277e73e69573c
|
[
"MIT"
] | 305
|
2015-01-18T19:29:37.000Z
|
2022-03-24T09:40:09.000Z
|
"""
Pure-Python implementation of a Python 2-like str object for Python 3.
"""
from numbers import Integral
from past.utils import PY2, with_metaclass
if PY2:
from collections import Iterable
else:
from collections.abc import Iterable
_builtin_bytes = bytes
class BaseOldStr(type):
def __instancecheck__(cls, instance):
return isinstance(instance, _builtin_bytes)
def unescape(s):
r"""
Interprets strings with escape sequences
Example:
>>> s = unescape(r'abc\\def') # i.e. 'abc\\\\def'
>>> print(s)
'abc\def'
>>> s2 = unescape('abc\\ndef')
>>> len(s2)
8
>>> print(s2)
abc
def
"""
return s.encode().decode('unicode_escape')
class oldstr(with_metaclass(BaseOldStr, _builtin_bytes)):
"""
A forward port of the Python 2 8-bit string object to Py3
"""
# Python 2 strings have no __iter__ method:
@property
def __iter__(self):
raise AttributeError
def __dir__(self):
return [thing for thing in dir(_builtin_bytes) if thing != '__iter__']
# def __new__(cls, *args, **kwargs):
# """
# From the Py3 bytes docstring:
# bytes(iterable_of_ints) -> bytes
# bytes(string, encoding[, errors]) -> bytes
# bytes(bytes_or_buffer) -> immutable copy of bytes_or_buffer
# bytes(int) -> bytes object of size given by the parameter initialized with null bytes
# bytes() -> empty bytes object
#
# Construct an immutable array of bytes from:
# - an iterable yielding integers in range(256)
# - a text string encoded using the specified encoding
# - any object implementing the buffer API.
# - an integer
# """
#
# if len(args) == 0:
# return super(newbytes, cls).__new__(cls)
# # Was: elif isinstance(args[0], newbytes):
# # We use type() instead of the above because we're redefining
# # this to be True for all unicode string subclasses. Warning:
# # This may render newstr un-subclassable.
# elif type(args[0]) == newbytes:
# return args[0]
# elif isinstance(args[0], _builtin_bytes):
# value = args[0]
# elif isinstance(args[0], unicode):
# if 'encoding' not in kwargs:
# raise TypeError('unicode string argument without an encoding')
# ###
# # Was: value = args[0].encode(**kwargs)
# # Python 2.6 string encode() method doesn't take kwargs:
# # Use this instead:
# newargs = [kwargs['encoding']]
# if 'errors' in kwargs:
# newargs.append(kwargs['errors'])
# value = args[0].encode(*newargs)
# ###
# elif isinstance(args[0], Iterable):
# if len(args[0]) == 0:
# # What is this?
# raise ValueError('unknown argument type')
# elif len(args[0]) > 0 and isinstance(args[0][0], Integral):
# # It's a list of integers
# value = b''.join([chr(x) for x in args[0]])
# else:
# raise ValueError('item cannot be interpreted as an integer')
# elif isinstance(args[0], Integral):
# if args[0] < 0:
# raise ValueError('negative count')
# value = b'\x00' * args[0]
# else:
# value = args[0]
# return super(newbytes, cls).__new__(cls, value)
def __repr__(self):
s = super(oldstr, self).__repr__() # e.g. b'abc' on Py3, b'abc' on Py3
return s[1:]
def __str__(self):
s = super(oldstr, self).__str__() # e.g. "b'abc'" or "b'abc\\ndef'
# TODO: fix this:
assert s[:2] == "b'" and s[-1] == "'"
return unescape(s[2:-1]) # e.g. 'abc' or 'abc\ndef'
def __getitem__(self, y):
if isinstance(y, Integral):
return super(oldstr, self).__getitem__(slice(y, y+1))
else:
return super(oldstr, self).__getitem__(y)
def __getslice__(self, *args):
return self.__getitem__(slice(*args))
def __contains__(self, key):
if isinstance(key, int):
return False
def __native__(self):
return bytes(self)
__all__ = ['oldstr']
| 31.860294
| 95
| 0.558505
| 3,714
| 0.857143
| 0
| 0
| 62
| 0.014309
| 0
| 0
| 2,766
| 0.638357
|
5a0e75196f538319c5078d09117599bf367b0df0
| 1,208
|
py
|
Python
|
app/api/utlis/models.py
|
jurekpawlikowski/flask-boilerplate
|
15b7e6c4e0241a7d59dbca543e023a22b17b9903
|
[
"MIT"
] | 3
|
2017-08-05T08:57:37.000Z
|
2021-03-03T09:09:03.000Z
|
app/api/utlis/models.py
|
jurekpawlikowski/flask-boilerplate
|
15b7e6c4e0241a7d59dbca543e023a22b17b9903
|
[
"MIT"
] | null | null | null |
app/api/utlis/models.py
|
jurekpawlikowski/flask-boilerplate
|
15b7e6c4e0241a7d59dbca543e023a22b17b9903
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
from sqlalchemy.event import listen
from app.factory import db
class BaseModel(db.Model):
"""
Base model with `created_at` and `updated_at` fields
"""
__abstract__ = True
fields_to_serialize = []
created_at = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
updated_at = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
def save(self):
db.session.add(self)
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
@classmethod
def all(cls):
return cls.query.all()
def serialize(self, fields=None):
serialized = {}
for field in fields or self.fields_to_serialize:
field_value = getattr(self, field)
if isinstance(field_value, datetime):
field_value = field_value.isoformat()
serialized[field] = field_value
return serialized
def set_updated_at(target, value, oldvalue):
"""
Set updated_at value
"""
value.updated_at = datetime.now()
listen(BaseModel, "before_update", set_updated_at)
| 23.686275
| 80
| 0.647351
| 885
| 0.732616
| 0
| 0
| 61
| 0.050497
| 0
| 0
| 163
| 0.134934
|
5a0ebed0bb4e1667aef392ee3608c9732dd33560
| 278
|
py
|
Python
|
systest/tests/test_rm.py
|
devconsoft/pycred
|
d72bdae2e703a87a7424f08af326834281b83fee
|
[
"MIT"
] | null | null | null |
systest/tests/test_rm.py
|
devconsoft/pycred
|
d72bdae2e703a87a7424f08af326834281b83fee
|
[
"MIT"
] | 5
|
2018-07-01T22:53:24.000Z
|
2018-07-17T21:54:10.000Z
|
systest/tests/test_rm.py
|
devconsoft/pycred
|
d72bdae2e703a87a7424f08af326834281b83fee
|
[
"MIT"
] | null | null | null |
def test_rm_long_opt_help(pycred):
pycred('rm --help')
def test_rm_short_opt_help(pycred):
pycred('rm -h')
def test_rm_none_existing_store_gives_exit_code_2(pycred, workspace):
with workspace():
pycred('rm non-existing-store user', expected_exit_code=2)
| 23.166667
| 69
| 0.73741
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 46
| 0.165468
|
5a0fd6978a62253af90bdbf0d79e056e97e5921d
| 1,391
|
py
|
Python
|
source/tweaks/cms_plugins.py
|
mverleg/svsite
|
5c9dbcacf81020cf0c1960e337bdd33113acd597
|
[
"BSD-3-Clause"
] | null | null | null |
source/tweaks/cms_plugins.py
|
mverleg/svsite
|
5c9dbcacf81020cf0c1960e337bdd33113acd597
|
[
"BSD-3-Clause"
] | 142
|
2015-06-05T07:53:09.000Z
|
2020-03-31T18:37:07.000Z
|
source/tweaks/cms_plugins.py
|
mdilli/svsite
|
5c9dbcacf81020cf0c1960e337bdd33113acd597
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Raw HTML widget.
Adapted/copied from https://github.com/makukha/cmsplugin-raw-html
"""
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from django.template import Template
from django.utils.safestring import mark_safe
from .models import RawHtmlModel, CMSMember
from django.utils.translation import ugettext as _
class RawHtmlPlugin(CMSPluginBase):
model = RawHtmlModel
name = 'HTML'
render_template = 'cms/raw_html_widget.html'
text_enabled = True
def render(self, context, instance, placeholder):
context.update({
'body': mark_safe(Template(instance.body).render(context)),
'object': instance,
'placeholder': placeholder
})
return context
plugin_pool.register_plugin(RawHtmlPlugin)
class MemberPlugin(CMSPluginBase):
"""
This needs to be defined in `tweaks` because it has to be after `cms`, whereas
`AUTH_USER_MODEL` needs to be loaded before `cms`.
"""
model = CMSMember # model where plugin data are saved
module = _('Member')
name = _('Member info') # name of the plugin in the interface
render_template = 'members_widget.html'
def render(self, context, instance, placeholder):
context.update(dict(
inst=instance,
title=instance.title,
description=instance.description,
users=instance.members.all(),
))
return context
plugin_pool.register_plugin(MemberPlugin) # register the plugin
| 24.839286
| 79
| 0.757009
| 918
| 0.659957
| 0
| 0
| 0
| 0
| 0
| 0
| 424
| 0.304817
|
5a1046d61cc7585c8ffb76dc65a2afa1c14d62a9
| 3,296
|
py
|
Python
|
tests/test_trackings.py
|
EugeneLiu/aftership-sdk-python
|
37184272869452734d616b31295a4ac883051f5d
|
[
"MIT"
] | null | null | null |
tests/test_trackings.py
|
EugeneLiu/aftership-sdk-python
|
37184272869452734d616b31295a4ac883051f5d
|
[
"MIT"
] | null | null | null |
tests/test_trackings.py
|
EugeneLiu/aftership-sdk-python
|
37184272869452734d616b31295a4ac883051f5d
|
[
"MIT"
] | null | null | null |
from unittest import TestCase, mock
import pytest
from requests import Response
import aftership
class TrackingTestCase(TestCase):
def setUp(self):
self.slug = "4px"
self.tracking_number = "HH19260817"
self.tracking_id = "k5lh7dy7vvqeck71p5loe011"
@pytest.mark.vcr()
def test_create_tracking(self):
response = aftership.tracking.create_tracking(
tracking={"slug": self.slug, "tracking_number": self.tracking_number}
)
@pytest.mark.vcr()
def test_get_tracking(self):
response = aftership.tracking.get_tracking(slug=self.slug, tracking_number=self.tracking_number)
# @pytest.mark.vcr()
# def test_delete_tracking(self):
# response = aftership.tracking.delete_tracking(slug='china-ems',tracking_number='1234567890')
@pytest.mark.vcr()
def test_list_trackings(self):
response = aftership.tracking.list_trackings(slug=self.slug, limit=1)
@pytest.mark.vcr()
def test_update_tracking(self):
response = aftership.tracking.update_tracking(tracking_id=self.tracking_id, tracking={"title": "new title"})
@pytest.mark.vcr()
def test_retrack(self):
response = aftership.tracking.retrack(tracking_id=self.tracking_id)
@pytest.mark.vcr()
def test_get_last_checkpoint(self):
response = aftership.tracking.get_last_checkpoint(tracking_id=self.tracking_id)
class TrackingWithAdditionalFieldsTestCase(TestCase):
def setUp(self):
self.tracking_id = "wuuxyb7ohjx55kmpt5r7y017"
self.slug = "postnl-3s"
self.tracking_number = "3SKAAG5995399"
self.destination_country = "ESP"
self.postal_code = "46970"
@pytest.mark.vcr()
def test_create_tracking(self):
response = aftership.tracking.create_tracking(
tracking={
"slug": self.slug,
"tracking_number": self.tracking_number,
"tracking_destination_country": self.destination_country,
"tracking_postal_code": self.postal_code,
}
)
@pytest.mark.vcr()
def test_get_tracking(self):
response = aftership.tracking.get_tracking(
slug=self.slug,
tracking_number=self.tracking_number,
tracking_destination_country=self.destination_country,
tracking_postal_code=self.postal_code,
)
@pytest.mark.vcr()
def test_get_tracking_by_id(self):
response = aftership.tracking.get_tracking(tracking_id=self.tracking_id)
@pytest.mark.vcr()
def test_update_tracking(self):
response = aftership.tracking.update_tracking(tracking_id=self.tracking_id, tracking={"title": "new title"})
@pytest.mark.vcr()
def test_get_last_checkpoint(self):
response = aftership.tracking.get_last_checkpoint(tracking_id=self.tracking_id)
@pytest.mark.vcr()
def test_get_tracking_with_internal_error(self):
with self.assertRaises(aftership.exception.InternalError):
response = aftership.tracking.get_tracking(
slug=self.slug,
tracking_number=self.tracking_number,
tracking_destination_country=self.destination_country,
tracking_postal_code=self.postal_code,
)
| 34.694737
| 116
| 0.681129
| 3,191
| 0.968143
| 0
| 0
| 2,491
| 0.755765
| 0
| 0
| 392
| 0.118932
|
5a105c110cc6114a77deee02c167af5066ada602
| 1,089
|
py
|
Python
|
071_caixaeletronico.py
|
laissilveira/python-exercises
|
906f7e46878b296ecb9b9df9fd39ec1e362ce3a4
|
[
"MIT"
] | null | null | null |
071_caixaeletronico.py
|
laissilveira/python-exercises
|
906f7e46878b296ecb9b9df9fd39ec1e362ce3a4
|
[
"MIT"
] | null | null | null |
071_caixaeletronico.py
|
laissilveira/python-exercises
|
906f7e46878b296ecb9b9df9fd39ec1e362ce3a4
|
[
"MIT"
] | null | null | null |
# Calcula a quantidade de notas de cada valor a serem sacadas em uma caixa eletrônico
print('=' * 30)
print('{:^30}'.format('CAIXA ELETRÔNICO'))
print('=' * 30)
valor = int(input('Valor a ser sacado: R$ '))
# notas de real (R$) existentes
tot200 = valor // 200
tot100 = (valor % 200) // 100
tot50 = ((valor % 200) % 100) // 50
tot20 = (((valor % 200) % 100) % 50) // 20
tot10 = ((((valor % 200) % 100) % 50) % 20) //10
tot5 = (((((valor % 200) % 100) % 50) % 20) % 10) // 5
tot2 = ((((((valor % 200) % 100) % 50) % 20) % 10) % 5) // 2
while True:
if tot200 > 0:
print(f'Total de {tot200} cédula(s) de R$ 200,00.')
if tot100 > 0:
print(f'Total de {tot100} cédula(s) de R$ 100,00.')
if tot50 > 0:
print(f'Total de {tot50} cédula(s) de R$ 50,00.')
if tot20 > 0:
print(f'Total de {tot20} cédula(s) de R$ 20,00.')
if tot10 > 0:
print(f'Total de {tot10} cédula(s) de R$ 10,00.')
if tot5 > 0:
print(f'Total de {tot5} cédula(s) de R$ 5,00.')
if tot2 > 0:
print(f'Total de {tot2} cédula(s) de R$ 2,00.')
break
| 36.3
| 85
| 0.543618
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 476
| 0.433515
|
5a1066f50cc73cde3a7e5f65b4cffbe41ddedc46
| 575
|
py
|
Python
|
2020/04/Teil 1 - V01.py
|
HeWeMel/adventofcode
|
90acb10f03f21ef388673bbcf132d04972175970
|
[
"MIT"
] | 1
|
2020-12-12T19:34:59.000Z
|
2020-12-12T19:34:59.000Z
|
2020/04/Teil 1 - V01.py
|
HeWeMel/adventofcode
|
90acb10f03f21ef388673bbcf132d04972175970
|
[
"MIT"
] | null | null | null |
2020/04/Teil 1 - V01.py
|
HeWeMel/adventofcode
|
90acb10f03f21ef388673bbcf132d04972175970
|
[
"MIT"
] | null | null | null |
import sys
lines=[]
new=True
lc=0
with open('input.txt', 'r') as f:
for line in f:
line=line[:-1] # remove new line char
if line=='':
lc+=1
new=True
else:
if new:
lines.append(line)
new=False
else:
lines[lc] = lines[lc] + " " + line
valids=0
for line in lines:
valid=True
for must in ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']:
if line.find(must)==-1:
valid=False
if valid:
valids += 1
print(valids)
| 19.166667
| 66
| 0.452174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 76
| 0.132174
|
5a10f64e9ccb60f772e7fb4e5d093560ebd8cdb4
| 9,366
|
py
|
Python
|
src/falconpy/quick_scan.py
|
CrowdStrike/falconpy
|
e7245202224647a2c8d134e72f27d2f6c667a1ce
|
[
"Unlicense"
] | 111
|
2020-11-19T00:44:18.000Z
|
2022-03-03T21:02:32.000Z
|
src/falconpy/quick_scan.py
|
CrowdStrike/falconpy
|
e7245202224647a2c8d134e72f27d2f6c667a1ce
|
[
"Unlicense"
] | 227
|
2020-12-05T03:02:27.000Z
|
2022-03-22T14:12:42.000Z
|
src/falconpy/quick_scan.py
|
CrowdStrike/falconpy
|
e7245202224647a2c8d134e72f27d2f6c667a1ce
|
[
"Unlicense"
] | 47
|
2020-11-23T21:00:14.000Z
|
2022-03-28T18:30:19.000Z
|
"""Falcon Quick Scan API Interface Class
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
from ._util import force_default, process_service_request, handle_single_argument
from ._payload import generic_payload_list, aggregate_payload
from ._service_class import ServiceClass
from ._endpoint._quick_scan import _quick_scan_endpoints as Endpoints
class QuickScan(ServiceClass):
"""The only requirement to instantiate an instance of this class is one of the following:
- a valid client_id and client_secret provided as keywords.
- a credential dictionary with client_id and client_secret containing valid API credentials
{
"client_id": "CLIENT_ID_HERE",
"client_secret": "CLIENT_SECRET_HERE"
}
- a previously-authenticated instance of the authentication service class (oauth2.py)
- a valid token provided by the authentication service class (oauth2.py)
"""
@force_default(defaults=["body"], default_types=["dict"])
def get_scans_aggregates(self: object, body: dict = None, **kwargs) -> dict:
"""Get scans aggregations as specified via json in request body.
Keyword arguments:
body -- full body payload, not required when using other keywords.
{
"date_ranges": [
{
"from": "string",
"to": "string"
}
],
"field": "string",
"filter": "string",
"interval": "string",
"min_doc_count": 0,
"missing": "string",
"name": "string",
"q": "string",
"ranges": [
{
"From": 0,
"To": 0
}
],
"size": 0,
"sort": "string",
"sub_aggregates": [
null
],
"time_zone": "string",
"type": "string"
}
date_ranges -- List of dictionaries.
field -- String.
filter -- FQL syntax. String.
interval -- String.
min_doc_count -- Minimum number of documents required to match. Integer.
missing -- String.
name -- Scan name. String.
q -- FQL syntax. String.
ranges -- List of dictionaries.
size -- Integer.
sort -- FQL syntax. String.
sub_aggregates -- List of strings.
time_zone -- String.
type -- String.
This method only supports keywords for providing arguments.
This method does not support body payload validation.
Returns: dict object containing API response.
HTTP Method: POST
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/quick-scan/GetScansAggregates
"""
if not body:
body = aggregate_payload(submitted_keywords=kwargs)
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="GetScansAggregates",
body=body
)
@force_default(defaults=["parameters"], default_types=["dict"])
def get_scans(self: object, *args, parameters: dict = None, **kwargs) -> dict:
"""Check the status of a volume scan. Time required for
analysis increases with the number of samples in a volume
but usually it should take less than 1 minute.
Keyword arguments:
ids -- One or more remediation IDs. String or list of strings.
parameters - full parameters payload, not required if ids is provided as a keyword.
Arguments: When not specified, the first argument to this method is assumed to be 'ids'.
All others are ignored.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/quick-scan/GetScans
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="GetScans",
keywords=kwargs,
params=handle_single_argument(args, parameters, "ids")
)
@force_default(defaults=["body"], default_types=["dict"])
def scan_samples(self: object, *args, body: dict = None, **kwargs) -> dict:
"""Get scans aggregations as specified via json in request body.
Keyword arguments:
body -- full body payload, not required when samples keyword is provided.
{
"samples": [
"string"
]
}
samples -- SHA256(s) of the samples to scan. Must have been previously submitted using
SampleUploadV3 (SampleUploads class). String or list of strings.
Arguments: When not specified, the first argument to this method is assumed to be
'samples'. All others are ignored.
Returns: dict object containing API response.
HTTP Method: POST
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/quick-scan/ScanSamples
"""
if not body:
body = generic_payload_list(submitted_arguments=args,
submitted_keywords=kwargs,
payload_value="samples"
)
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="ScanSamples",
body=body,
body_validator={"samples": list} if self.validate_payloads else None,
body_required=["samples"] if self.validate_payloads else None
)
@force_default(defaults=["parameters"], default_types=["dict"])
def query_submissions(self: object, parameters: dict = None, **kwargs) -> dict:
"""Find IDs for submitted scans by providing an FQL filter and paging details.
Returns a set of volume IDs that match your criteria.
Keyword arguments:
filter -- The filter expression that should be used to limit the results. FQL syntax.
limit -- The maximum number of records to return. [integer, 1-5000]
offset -- The integer offset to start retrieving records from.
parameters - full parameters payload, not required if using other keywords.
sort -- The property to sort by. FQL syntax.
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/quick-scan/QuerySubmissionsMixin0
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="QuerySubmissionsMixin0",
keywords=kwargs,
params=parameters
)
# These method names align to the operation IDs in the API but
# do not conform to snake_case / PEP8 and are defined here for
# backwards compatibility / ease of use purposes
GetScansAggregates = get_scans_aggregates
GetScans = get_scans
ScanSamples = scan_samples
QuerySubmissionsMixin0 = query_submissions
# The legacy name for this class does not conform to PascalCase / PEP8
# It is defined here for backwards compatibility purposes only.
Quick_Scan = QuickScan # pylint: disable=C0103
| 40.025641
| 105
| 0.604527
| 7,228
| 0.771728
| 0
| 0
| 6,303
| 0.672966
| 0
| 0
| 7,052
| 0.752936
|
5a118345944c61aa57f158d2bab247572f49c59f
| 353
|
py
|
Python
|
images/auth-service/settings.d/00-settings.py
|
ESGF/esgf-docker
|
95f5b76c85be65920810795484786a13865f4ac1
|
[
"Apache-2.0"
] | 3
|
2018-04-16T00:58:30.000Z
|
2020-10-07T17:58:02.000Z
|
images/auth-service/settings.d/00-settings.py
|
ESGF/esgf-docker
|
95f5b76c85be65920810795484786a13865f4ac1
|
[
"Apache-2.0"
] | 115
|
2017-01-10T20:12:42.000Z
|
2021-03-03T16:11:48.000Z
|
images/auth-service/settings.d/00-settings.py
|
ESGF/esgf-docker
|
95f5b76c85be65920810795484786a13865f4ac1
|
[
"Apache-2.0"
] | 21
|
2017-08-28T15:20:24.000Z
|
2021-02-09T00:08:49.000Z
|
# Application definition
INSTALLED_APPS = [
'django.contrib.staticfiles',
'django.contrib.sessions',
'authenticate',
]
ROOT_URLCONF = 'auth_service.urls'
WSGI_APPLICATION = 'auth_service.wsgi.application'
# Use a non database session engine
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
SESSION_COOKIE_SECURE = False
| 23.533333
| 66
| 0.776204
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 225
| 0.637394
|
5a12d4be2ea76f2966c05949af40280a754ab4f5
| 3,641
|
py
|
Python
|
tests/test_gru.py
|
nsuke/hyrnn
|
b3efcc7b004d8402344467bf319f1d86324d11e5
|
[
"Apache-2.0"
] | 73
|
2019-04-08T08:17:39.000Z
|
2022-03-29T03:48:07.000Z
|
tests/test_gru.py
|
nsuke/hyrnn
|
b3efcc7b004d8402344467bf319f1d86324d11e5
|
[
"Apache-2.0"
] | 10
|
2019-03-19T04:24:07.000Z
|
2021-02-25T00:19:24.000Z
|
tests/test_gru.py
|
nsuke/hyrnn
|
b3efcc7b004d8402344467bf319f1d86324d11e5
|
[
"Apache-2.0"
] | 14
|
2019-05-06T09:42:37.000Z
|
2021-07-17T17:18:05.000Z
|
import hyrnn
import torch.nn
def test_MobiusGRU_no_packed_just_works():
input_size = 4
hidden_size = 3
batch_size = 5
gru = hyrnn.nets.MobiusGRU(input_size, hidden_size, hyperbolic_input=False)
timestops = 10
sequence = torch.randn(timestops, batch_size, input_size)
out, ht = gru(sequence)
# out: (seq_len, batch, num_directions * hidden_size)
# ht: (num_layers * num_directions, batch, hidden_size)
assert out.shape[0] == timestops
assert out.shape[1] == batch_size
assert out.shape[2] == hidden_size
assert ht.shape[0] == 1
assert ht.shape[1] == batch_size
assert ht.shape[2] == hidden_size
def test_MobiusGRU_2_layers_no_packed_just_works():
input_size = 4
hidden_size = 3
batch_size = 5
num_layers = 2
gru = hyrnn.nets.MobiusGRU(
input_size, hidden_size, num_layers=num_layers, hyperbolic_input=False
)
timestops = 10
sequence = torch.randn(timestops, batch_size, input_size)
out, ht = gru(sequence)
# out: (seq_len, batch, num_directions * hidden_size)
# ht: (num_layers * num_directions, batch, hidden_size)
assert out.shape[0] == timestops
assert out.shape[1] == batch_size
assert out.shape[2] == hidden_size
assert ht.shape[0] == num_layers
assert ht.shape[1] == batch_size
assert ht.shape[2] == hidden_size
def test_mobius_gru_loop_just_works():
input_size = 4
hidden_size = 3
num_sequences = 3
seqs = torch.nn.utils.rnn.pack_sequence(
[
torch.zeros(10, input_size),
torch.zeros(5, input_size),
torch.zeros(1, input_size),
]
)
loop_params = dict()
loop_params["h0"] = torch.zeros(num_sequences, hidden_size, requires_grad=False)
loop_params["input"] = seqs.data
loop_params["weight_ih"] = torch.nn.Parameter(
torch.randn(3 * hidden_size, input_size)
)
loop_params["weight_hh"] = torch.nn.Parameter(
torch.randn(3 * hidden_size, hidden_size)
)
loop_params["bias"] = torch.randn(3, hidden_size)
loop_params["c"] = 1.0
loop_params["nonlin"] = None
loop_params["hyperbolic_input"] = True
loop_params["hyperbolic_hidden_state0"] = True
loop_params["batch_sizes"] = seqs.batch_sizes
hyrnn.nets.mobius_gru_loop(**loop_params)
def test_MobiusGRU_with_packed_just_works():
input_size = 4
hidden_size = 3
gru = hyrnn.nets.MobiusGRU(input_size, hidden_size, hyperbolic_input=False)
seqs = torch.nn.utils.rnn.pack_sequence(
[
torch.zeros(10, input_size),
torch.zeros(5, input_size),
torch.zeros(1, input_size),
]
)
h, ht = gru(seqs)
assert h.data.size(0) == 16 # sum of times
assert h.data.size(1) == hidden_size
# ht: (num_layers * num_directions, batch, hidden_size)
assert ht.size(2) == hidden_size
assert ht.size(1) == 3 # batch size
assert ht.size(0) == 1 # num layers
def test_MobiusGRU_2_layers_with_packed_just_works():
input_size = 4
hidden_size = 3
gru = hyrnn.nets.MobiusGRU(
input_size,
hidden_size,
num_layers=2,
hyperbolic_input=False)
seqs = torch.nn.utils.rnn.pack_sequence([
torch.zeros(10, input_size),
torch.zeros(5, input_size),
torch.zeros(1, input_size)
])
h, ht = gru(seqs)
assert h.data.size(0) == 16 # sum of times
assert h.data.size(1) == hidden_size
# ht: (num_layers * num_directions, batch, hidden_size)
assert ht.size(2) == hidden_size
assert ht.size(1) == 3 # batch size
assert ht.size(0) == 2 # num layers
| 31.938596
| 84
| 0.651744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 509
| 0.139797
|
5a12f3dfdcd98b07c6a9e2f6f164d8d44b462308
| 1,190
|
py
|
Python
|
ecogvis/signal_processing/common_referencing.py
|
NKI-ECOG/ecogVIS
|
f65212fc238e5b2588a4674a6aa1236f99e7d833
|
[
"BSD-3-Clause"
] | 13
|
2020-04-01T22:39:24.000Z
|
2022-03-04T13:27:51.000Z
|
ecogvis/signal_processing/common_referencing.py
|
NKI-ECOG/ecogVIS
|
f65212fc238e5b2588a4674a6aa1236f99e7d833
|
[
"BSD-3-Clause"
] | 56
|
2020-04-01T14:27:21.000Z
|
2022-03-23T21:33:06.000Z
|
ecogvis/signal_processing/common_referencing.py
|
luiztauffer/ecogVIS
|
c97e79a20b3af1074a3a5e1f1ad864a580c97e04
|
[
"BSD-3-Clause"
] | 11
|
2020-05-15T17:48:53.000Z
|
2022-02-01T23:55:12.000Z
|
from __future__ import division
import numpy as np
__all__ = ['subtract_CAR',
'subtract_common_median_reference']
def subtract_CAR(X, b_size=16):
"""
Compute and subtract common average reference in 16 channel blocks.
"""
channels, time_points = X.shape
s = channels // b_size
r = channels % b_size
X_1 = X[:channels-r].copy()
X_1 = X_1.reshape((s, b_size, time_points))
X_1 -= np.nanmean(X_1, axis=1, keepdims=True)
if r > 0:
X_2 = X[channels-r:].copy()
X_2 -= np.nanmean(X_2, axis=0, keepdims=True)
X = np.vstack([X_1.reshape((s*b_size, time_points)), X_2])
return X
else:
return X_1.reshape((s*b_size, time_points))
def subtract_common_median_reference(X, channel_axis=-2):
"""
Compute and subtract common median reference
for the entire grid.
Parameters
----------
X : ndarray (..., n_channels, n_time)
Data to common median reference.
Returns
-------
Xp : ndarray (..., n_channels, n_time)
Common median referenced data.
"""
median = np.nanmedian(X, axis=channel_axis, keepdims=True)
Xp = X - median
return Xp
| 23.8
| 71
| 0.616807
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 437
| 0.367227
|
5a13150c841953f716f3e772e7c48bc269734ed8
| 3,701
|
py
|
Python
|
rackspace/heat_store/catalog/tests.py
|
rohithkumar-rackspace/rcbops
|
fb690bc528499bbf9aebba3ab0cce0b4dffd9e35
|
[
"Apache-2.0"
] | null | null | null |
rackspace/heat_store/catalog/tests.py
|
rohithkumar-rackspace/rcbops
|
fb690bc528499bbf9aebba3ab0cce0b4dffd9e35
|
[
"Apache-2.0"
] | null | null | null |
rackspace/heat_store/catalog/tests.py
|
rohithkumar-rackspace/rcbops
|
fb690bc528499bbf9aebba3ab0cce0b4dffd9e35
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import unittest
import mox
import six.moves.urllib.request as urlrequest
from six import StringIO
from solution import Solution
class TestSolution(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
pass
def tearDown(self):
self.mox.VerifyAll()
self.mox.UnsetStubs()
def test_create_solution(self):
s = Solution('test_data/example/info.yaml')
self.assertEqual(len(s.id), 32)
self.assertEqual(s.title, 'the_title')
self.assertEqual(s.logo, 'test_data/example/the_logo.png')
self.assertEqual(s.release, '0.1')
self.assertEqual(s.short_description, '<p>The short description</p>')
self.assertIn('This is the <em>long</em> description',
s.long_description)
self.assertIn('src="test_data/example/diagram.png"', s.long_description)
self.assertIn('alt="here is a diagram"', s.long_description)
self.assertIn('<strong>architecture</strong>', s.architecture)
self.assertIn('Design spec #1', s.design_specs)
self.assertIn('Design spec #2', s.design_specs)
self.assertEqual(s.heat_template, 'example.yaml')
self.assertEqual(s.env_file, 'env.yaml')
def test_incomplete_solution(self):
lines = open('test_data/example/info.yaml').readlines()
# ensure the solution isn't imported if any of the items
# below are missing
missing_list = ['name', 'short_desc', 'long_desc', 'heat_template',
'release']
self.mox.StubOutWithMock(urlrequest, 'urlopen')
for missing in missing_list:
yaml = [line for line in lines if missing not in line]
urlrequest.urlopen(
'http://example.com/no-{0}.yaml'.format(missing)) \
.AndReturn(StringIO('\n'.join(yaml)))
self.mox.ReplayAll()
for missing in missing_list:
with self.assertRaises(KeyError):
Solution('http://example.com/no-{0}.yaml'.format(missing))
def test_parameter_types(self):
s = Solution('test_data/example/info.yaml')
params = s.get_parameter_types(None)
self.assertEqual(len(params), 5)
self.assertEqual(params[0]['name'], 'floating-network-id')
self.assertEqual(params[0]['type'], 'comma_delimited_list')
self.assertIn(params[0]['default'],
params[0]['constraints'][0]['allowed_values'])
self.assertIn('_mapping', params[0])
self.assertEqual(params[0]['_mapping'], {'first_network': '11',
'second_network': '22',
'third_network': '33'})
self.assertEqual(s.map_parameter(params, 'floating-network-id',
'second_network'), '22')
self.assertEqual(params[1]['name'], 'flavor')
self.assertEqual(params[1]['type'], 'comma_delimited_list')
self.assertIn(params[1]['default'], 'm1.small')
self.assertEqual(params[2]['name'], 'image')
self.assertEqual(params[2]['type'], 'comma_delimited_list')
self.assertIn(params[2]['default'],
params[2]['constraints'][0]['allowed_values'])
self.assertEqual(params[3]['name'], 'image-count')
self.assertEqual(params[3]['type'], 'number')
self.assertEqual(params[4]['name'], 'keyname')
self.assertEqual(params[4]['type'], 'comma_delimited_list')
self.assertIn(params[4]['default'],
params[4]['constraints'][0]['allowed_values'])
if __name__ == '__main__':
unittest.main()
| 39.795699
| 80
| 0.599838
| 3,497
| 0.94488
| 0
| 0
| 0
| 0
| 0
| 0
| 1,055
| 0.285058
|
5a13d8b3614f878639aab1f5c25f37f50a754ad3
| 17
|
py
|
Python
|
tests/errors/semantic/ex4.py
|
toddrme2178/pyccel
|
deec37503ab0c5d0bcca1a035f7909f7ce8ef653
|
[
"MIT"
] | null | null | null |
tests/errors/semantic/ex4.py
|
toddrme2178/pyccel
|
deec37503ab0c5d0bcca1a035f7909f7ce8ef653
|
[
"MIT"
] | null | null | null |
tests/errors/semantic/ex4.py
|
toddrme2178/pyccel
|
deec37503ab0c5d0bcca1a035f7909f7ce8ef653
|
[
"MIT"
] | null | null | null |
x is 1
y is None
| 5.666667
| 9
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
5a15278549975dbd09dff5e97bcd011523d42479
| 4,784
|
py
|
Python
|
GetTopK.py
|
unsuthee/SemanticHashingWeekSupervision
|
2b2498c70ad3184203855222efde861211edcaea
|
[
"MIT"
] | 19
|
2018-10-30T08:36:49.000Z
|
2020-09-11T08:08:47.000Z
|
GetTopK.py
|
unsuthee/SemanticHashingWeekSupervision
|
2b2498c70ad3184203855222efde861211edcaea
|
[
"MIT"
] | 1
|
2019-10-12T07:03:06.000Z
|
2020-03-08T09:22:00.000Z
|
GetTopK.py
|
unsuthee/SemanticHashingWeekSupervision
|
2b2498c70ad3184203855222efde861211edcaea
|
[
"MIT"
] | 6
|
2018-09-05T09:07:34.000Z
|
2020-04-07T16:58:08.000Z
|
################################################################################################################
# Author: Suthee Chaidaroon
# schaidaroon@scu.edu
################################################################################################################
import numpy as np
import os
from utils import *
from tqdm import tqdm
import scipy.io
import torch
import torch.autograd as autograd
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
#################################################################################################################
def GetTopK_UsingCosineSim(outfn, queries, documents, TopK, queryBatchSize=10, docBatchSize=100):
n_docs = documents.shape[0]
n_queries = queries.shape[0]
query_row = 0
with open(outfn, 'w') as out_fn:
for q_idx in tqdm(range(0, n_queries, queryBatchSize), desc='Query', ncols=0):
query_batch_s_idx = q_idx
query_batch_e_idx = min(query_batch_s_idx + queryBatchSize, n_queries)
queryMats = torch.cuda.FloatTensor(queries[query_batch_s_idx:query_batch_e_idx].toarray())
queryNorm2 = torch.norm(queryMats, 2, dim=1)
queryNorm2.unsqueeze_(1)
queryMats.unsqueeze_(2)
scoreList = []
indicesList = []
#print('{}: perform cosine sim ...'.format(q_idx))
for idx in tqdm(range(0, n_docs, docBatchSize), desc='Doc', leave=False, ncols=0):
batch_s_idx = idx
batch_e_idx = min(batch_s_idx + docBatchSize, n_docs)
n_doc_in_batch = batch_e_idx - batch_s_idx
#if batch_s_idx > 1000:
# break
candidateMats = torch.cuda.FloatTensor(documents[batch_s_idx:batch_e_idx].toarray())
candidateNorm2 = torch.norm(candidateMats, 2, dim=1)
candidateNorm2.unsqueeze_(0)
candidateMats.unsqueeze_(2)
candidateMats = candidateMats.permute(2, 1, 0)
# compute cosine similarity
queryMatsExpand = queryMats.expand(queryMats.size(0), queryMats.size(1), candidateMats.size(2))
candidateMats = candidateMats.expand_as(queryMatsExpand)
cos_sim_scores = torch.sum(queryMatsExpand * candidateMats, dim=1) / (queryNorm2 * candidateNorm2)
K = min(TopK, n_doc_in_batch)
scores, indices = torch.topk(cos_sim_scores, K, dim=1, largest=True)
del cos_sim_scores
del queryMatsExpand
del candidateMats
del candidateNorm2
scoreList.append(scores)
indicesList.append(indices + batch_s_idx)
all_scores = torch.cat(scoreList, dim=1)
all_indices = torch.cat(indicesList, dim=1)
_, indices = torch.topk(all_scores, TopK, dim=1, largest=True)
topK_indices = torch.gather(all_indices, 1, indices)
#all_topK_indices.append(topK_indices)
#all_topK_scores.append(scores)
del queryMats
del queryNorm2
del scoreList
del indicesList
topK_indices = topK_indices.cpu().numpy()
for row in topK_indices:
out_fn.write("{}:".format(query_row))
outtext = ','.join([str(col) for col in row])
out_fn.write(outtext)
out_fn.write('\n')
query_row += 1
torch.cuda.empty_cache()
#################################################################################################################
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--gpunum")
parser.add_argument("--dataset")
parser.add_argument("--usetrain", action='store_true')
args = parser.parse_args()
if args.gpunum:
print("Use GPU #:{}".format(args.gpunum))
gpunum = args.gpunum
else:
print("Use GPU #0 as a default gpu")
gpunum = "0"
os.environ["CUDA_VISIBLE_DEVICES"]=gpunum
if args.dataset:
print("load {} dataset".format(args.dataset))
dataset = args.dataset
else:
parser.error("Need to provide the dataset.")
data = Load_Dataset("data/ng20.mat")
print("num train:{} num tests:{}".format(data.n_trains, data.n_tests))
if args.usetrain:
print("use train as a query corpus")
query_corpus = data.train
out_fn = "bm25/{}_train_top101.txt".format(dataset)
else:
print("use test as a query corpus")
query_corpus = data.test
out_fn = "bm25/{}_test_top101.txt".format(dataset)
print("save the result to {}".format(out_fn))
GetTopK_UsingCosineSim(out_fn, query_corpus, data.train, TopK=101, queryBatchSize=500, docBatchSize=100)
| 36.519084
| 114
| 0.567517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,037
| 0.216764
|
5a16e96d11bf3bbabd290d8e7eb17ada9e705ea1
| 1,065
|
py
|
Python
|
migrate_from_fnordcredit.py
|
stratum0/Matekasse
|
9b48a8a07978a150e1df1b13b394791044cce82e
|
[
"MIT"
] | 1
|
2019-07-13T16:25:06.000Z
|
2019-07-13T16:25:06.000Z
|
migrate_from_fnordcredit.py
|
stratum0/Matekasse
|
9b48a8a07978a150e1df1b13b394791044cce82e
|
[
"MIT"
] | 10
|
2020-01-09T16:14:19.000Z
|
2021-03-07T17:04:30.000Z
|
migrate_from_fnordcredit.py
|
stratum0/Matekasse
|
9b48a8a07978a150e1df1b13b394791044cce82e
|
[
"MIT"
] | 1
|
2021-06-01T07:21:03.000Z
|
2021-06-01T07:21:03.000Z
|
from matekasse import create_app, db
from matekasse.models import User, Transaction
import sqlite3
import argparse
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument("-p", "--path", action='store', type=str, required=True, help="Path to fnordcredit database")
inp = parser.parse_args()
app = create_app()
ctx = app.app_context()
ctx.push()
try:
conn = sqlite3.connect(inp.path)
cursor = conn.cursor()
cursor.execute('SELECT * FROM user')
rows = cursor.fetchall()
for r in rows:
user = r[5]
credit = r[1] * 100
newuser = User(username=user, credit=credit)
db.session.add(newuser)
'''cursor.execute('SELECT * FROM transaction')
rows = cursor.fetchall()
for r in rows:
user = r[5]
trans = r[2] * 100
newtrans = Transaction(userid=user, credit=trans)
db.session.add(newtrans)'''
db.session.commit()
except sqlite3.Error as error:
print(error)
finally:
if conn:
conn.close()
print('Migration complete')
ctx.pop()
exit()
| 25.97561
| 113
| 0.650704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 324
| 0.304225
|
5a17b8b4d053d2409ae3602977dee83dcbebc0b2
| 4,340
|
py
|
Python
|
scripts/lc/ARES/testing/run_rose_tool.py
|
ouankou/rose
|
76f2a004bd6d8036bc24be2c566a14e33ba4f825
|
[
"BSD-3-Clause"
] | 488
|
2015-01-09T08:54:48.000Z
|
2022-03-30T07:15:46.000Z
|
scripts/lc/ARES/testing/run_rose_tool.py
|
ouankou/rose
|
76f2a004bd6d8036bc24be2c566a14e33ba4f825
|
[
"BSD-3-Clause"
] | 174
|
2015-01-28T18:41:32.000Z
|
2022-03-31T16:51:05.000Z
|
scripts/lc/ARES/testing/run_rose_tool.py
|
ouankou/rose
|
76f2a004bd6d8036bc24be2c566a14e33ba4f825
|
[
"BSD-3-Clause"
] | 146
|
2015-04-27T02:48:34.000Z
|
2022-03-04T07:32:53.000Z
|
#!/usr/bin/env python
"""Runs a ROSE tool. If the tool does not return status 0, then runs the
corresponding non-ROSE compiler. Records whether the tool succeeded, in
passed.txt and failed.txt, but always returns status 0.
"""
import argparse
import inspect
import os
from support.local_logging import Logger
from support.runner import Runner
_SEPARATOR = "================================================================================"
class ROSERunner (object):
def __init__(self):
# Will be a Namespace (e.g. can refer to self._args_defined.command_args):
self._args_defined = None
# Will be a list:
self._args_remaining = None
self._current_dir = ""
self._failed_file = None
self._failed_file_path = ""
self._logger = Logger("run_rose_tool.ROSERunner")
self._parser = None
self._passed_file = None
self._passed_file_path = ""
self._primary_command = ""
self._runner = Runner()
self._script_dir = ""
self._secondary_command = ""
self._define_args()
def _define_args(self):
""" This script passes all its arguments on to the called
programs, so there are no argus defined.
"""
parser = argparse.ArgumentParser(
description="""Runs a ROSE tool. If the tool does not return status 0, then runs the
corresponding non-ROSE compiler. Records whether the tool succeeded, in
passed.txt and failed.txt, but always returns status 0.
""")
# We want ALL the arguments, so, we are using parse_known_arguments
# below instead and commenting this out for now:
## This matches the first positional and all remaining/following args:
#parser.add_argument('command_args', nargs=argparse.REMAINDER)
self._parser = parser
def _process_args(self):
self._args_defined, self._args_remaining = self._parser.parse_known_args()
self._logger.debug("defined args\n" + str(self._args_defined))
self._logger.debug("remaining args\n" + str(self._args_remaining))
self._current_dir = os.getcwd()
#self._script_dir = os.path.dirname(os.path.abspath(__file__))
# Robustly get this script's directory, even when started by exec or execfiles:
script_rel_path = inspect.getframeinfo(inspect.currentframe()).filename
self._script_dir = os.path.dirname(os.path.abspath(script_rel_path))
self._primary_command = "/g/g17/charles/code/ROSE/rose-0.9.10.64-intel-18.0.1.mpi/tutorial/identityTranslator"
self._secondary_command = "/usr/tce/packages/mvapich2/mvapich2-2.2-intel-18.0.1/bin/mpicxx"
self._passed_file_path = os.path.join (self._script_dir, "passed.txt")
self._failed_file_path = os.path.join (self._script_dir, "failed.txt")
def _log_success(self, args):
self._logger.success("\n" + _SEPARATOR + "\nPASSED")
self._logger.debug("Will log to passed file:")
self._logger.debug(args)
self._passed_file.write(str(args) + '\n')
def _log_failure(self, args):
self._logger.problem("\n" + _SEPARATOR + "\nFAILED")
self._logger.debug("Will log to failed file:")
self._logger.debug(args)
self._failed_file.write(str(args) + '\n')
def _run_command (self, args, dir):
self._logger.info("\n" + _SEPARATOR)
self._runner.callOrLog(args, dir)
def run(self):
""" Run the primary command. If it fails, run the secondary command. If
that fails, let the exception (Runner.Failed) propagate.
"""
self._logger.set_debug_off()
#self._logger._logger.setLevel(Logger.ERROR)
self._process_args()
self._passed_file = open(self._passed_file_path, 'a')
self._failed_file = open(self._failed_file_path, 'a')
try:
primary_args = [self._primary_command] + self._args_remaining
self._run_command(primary_args, self._current_dir)
self._log_success(primary_args)
except Runner.Failed, e:
self._log_failure(primary_args)
secondary_args = [self._secondary_command] + self._args_remaining
self._run_command(secondary_args, self._current_dir)
def main():
ROSERunner().run()
if __name__ == '__main__':
main()
| 41.333333
| 118
| 0.653917
| 3,818
| 0.879724
| 0
| 0
| 0
| 0
| 0
| 0
| 1,665
| 0.383641
|
5a18641e63b3fcad6914df89d4ba92c48cbaed17
| 951
|
py
|
Python
|
source/odp/migrations/0003_auto_20201121_0919.py
|
kssvrk/BhoonidhiODP
|
e222087629250ea4ccd1ae8d8903d9ff400c13b4
|
[
"BSD-3-Clause"
] | null | null | null |
source/odp/migrations/0003_auto_20201121_0919.py
|
kssvrk/BhoonidhiODP
|
e222087629250ea4ccd1ae8d8903d9ff400c13b4
|
[
"BSD-3-Clause"
] | null | null | null |
source/odp/migrations/0003_auto_20201121_0919.py
|
kssvrk/BhoonidhiODP
|
e222087629250ea4ccd1ae8d8903d9ff400c13b4
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 3.1.2 on 2020-11-21 09:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('odp', '0002_auto_20201121_0659'),
]
operations = [
migrations.CreateModel(
name='ProcessGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group_name', models.CharField(max_length=100, unique=True)),
('group_description', models.CharField(blank=True, max_length=5000, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.AddField(
model_name='processcatalogue',
name='group_id',
field=models.ManyToManyField(to='odp.ProcessGroup'),
),
]
| 32.793103
| 114
| 0.59306
| 858
| 0.902208
| 0
| 0
| 0
| 0
| 0
| 0
| 200
| 0.210305
|
5a18bfbbcf6c30bc2b6197bebec5c6f5638d264b
| 935
|
py
|
Python
|
test/auth/test_client_credentials.py
|
membranepotential/mendeley-python-sdk
|
0336f0164f4d409309e813cbd0140011b5b2ff8f
|
[
"Apache-2.0"
] | 103
|
2015-01-12T00:40:51.000Z
|
2022-03-29T07:02:06.000Z
|
test/auth/test_client_credentials.py
|
membranepotential/mendeley-python-sdk
|
0336f0164f4d409309e813cbd0140011b5b2ff8f
|
[
"Apache-2.0"
] | 26
|
2015-01-10T04:08:41.000Z
|
2021-02-05T16:31:37.000Z
|
test/auth/test_client_credentials.py
|
membranepotential/mendeley-python-sdk
|
0336f0164f4d409309e813cbd0140011b5b2ff8f
|
[
"Apache-2.0"
] | 43
|
2015-03-04T18:11:06.000Z
|
2022-03-13T02:33:34.000Z
|
from oauthlib.oauth2 import InvalidClientError, MissingTokenError
import pytest
from test import configure_mendeley, cassette
def test_should_get_authenticated_session():
mendeley = configure_mendeley()
auth = mendeley.start_client_credentials_flow()
with cassette('fixtures/auth/client_credentials/get_authenticated_session.yaml'):
session = auth.authenticate()
assert session.token['access_token']
assert session.host == 'https://api.mendeley.com'
def test_should_throw_exception_on_incorrect_credentials():
mendeley = configure_mendeley()
mendeley.client_secret += '-invalid'
auth = mendeley.start_client_credentials_flow()
# We should never get an access token back
# and the OAuth library should be unhappy about that
with cassette('fixtures/auth/client_credentials/incorrect_credentials.yaml'), pytest.raises(MissingTokenError):
auth.authenticate()
| 34.62963
| 115
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 270
| 0.28877
|
5a18ed1bc8e6b13c94274ea7e8252580407f9a6b
| 338
|
py
|
Python
|
problem/01000~09999/06137/6137.py3.py
|
njw1204/BOJ-AC
|
1de41685725ae4657a7ff94e413febd97a888567
|
[
"MIT"
] | 1
|
2019-04-19T16:37:44.000Z
|
2019-04-19T16:37:44.000Z
|
problem/01000~09999/06137/6137.py3.py
|
njw1204/BOJ-AC
|
1de41685725ae4657a7ff94e413febd97a888567
|
[
"MIT"
] | 1
|
2019-04-20T11:42:44.000Z
|
2019-04-20T11:42:44.000Z
|
problem/01000~09999/06137/6137.py3.py
|
njw1204/BOJ-AC
|
1de41685725ae4657a7ff94e413febd97a888567
|
[
"MIT"
] | 3
|
2019-04-19T16:37:47.000Z
|
2021-10-25T00:45:00.000Z
|
s=[]
for i in range(int(input())):
s.append(input())
cnt=0
while s:
flag=True
for i in range(len(s)//2):
if s[i]<s[-(i+1)]:
print(s[0],end='')
s.pop(0)
flag=False
break
elif s[-(i+1)]<s[i]:
print(s[-1],end='')
s.pop()
flag=False
break
if flag:
print(s[-1],end='')
s.pop()
cnt+=1
if cnt%80==0: print()
| 15.363636
| 29
| 0.52071
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0.017751
|
5a1bae372e9a9d499e2d0814cd4b789a6fdb51ad
| 2,072
|
py
|
Python
|
test/test_thirty.py
|
jakubtuchol/dailycodingproblem
|
9f0f3193f1746e949e16febace5aa5622dc5d4dc
|
[
"MIT"
] | 1
|
2020-10-13T20:54:37.000Z
|
2020-10-13T20:54:37.000Z
|
test/test_thirty.py
|
jakubtuchol/dailycodingproblem
|
9f0f3193f1746e949e16febace5aa5622dc5d4dc
|
[
"MIT"
] | null | null | null |
test/test_thirty.py
|
jakubtuchol/dailycodingproblem
|
9f0f3193f1746e949e16febace5aa5622dc5d4dc
|
[
"MIT"
] | null | null | null |
from src.thirty import edit_distance
from src.thirty import find_second_largest_node
from src.thirty import make_palindrome
from src.thirty import powerset
from src.data_structures import BinaryNode
class TestEditDistance:
"""
Problem #31
"""
def test_provided_example(self):
assert 3 == edit_distance('kitten', 'sitting')
def test_empty_examples(self):
assert 7 == edit_distance('', 'sitting')
assert 6 == edit_distance('kitten', '')
def test_equal_examples(self):
assert 0 == edit_distance('', '')
assert 0 == edit_distance('kitten', 'kitten')
def test_none_in_common(self):
assert 3 == edit_distance('abc', 'xyz')
class TestMakePalindrome:
"""
Problem #34
"""
def test_provided_example(self):
assert 'ecarace' == make_palindrome('race')
def test_another_example(self):
assert 'elgoogle' == make_palindrome('google')
class TestFindSecondLargestNode:
"""
Problem #36
"""
def test_on_left(self):
root = BinaryNode(5)
root.left = BinaryNode(3)
assert 3 == find_second_largest_node(root).val
def test_on_right(self):
root = BinaryNode(2)
root.right = BinaryNode(4)
assert 2 == find_second_largest_node(root).val
def test_balanced(self):
root = BinaryNode(2)
root.left = BinaryNode(1)
root.right = BinaryNode(3)
assert 2 == find_second_largest_node(root).val
def test_less_than_two_elements(self):
root = BinaryNode(2)
assert not find_second_largest_node(root)
class TestPowerset:
"""
Problem #37
"""
def test_three(self):
expected_set = [
[],
[1], [2], [3],
[1, 2], [1, 3], [2, 3],
[1, 2, 3],
]
calculated_set = powerset([1, 2, 3])
assert len(calculated_set) == len(expected_set)
for elt in calculated_set:
assert elt in expected_set
def test_empty(self):
assert [[]] == powerset([])
| 22.769231
| 55
| 0.605695
| 1,861
| 0.898166
| 0
| 0
| 0
| 0
| 0
| 0
| 209
| 0.100869
|
5a1be255168c22e03a6a98004add6394315035a9
| 3,947
|
py
|
Python
|
src/google_music_proto/musicmanager/utils.py
|
ddboline/google-music-proto
|
d3af3a1fe911edcd083482c9a6e8bde5a2902462
|
[
"MIT"
] | null | null | null |
src/google_music_proto/musicmanager/utils.py
|
ddboline/google-music-proto
|
d3af3a1fe911edcd083482c9a6e8bde5a2902462
|
[
"MIT"
] | null | null | null |
src/google_music_proto/musicmanager/utils.py
|
ddboline/google-music-proto
|
d3af3a1fe911edcd083482c9a6e8bde5a2902462
|
[
"MIT"
] | null | null | null |
__all__ = [
'generate_client_id',
'get_album_art',
'get_transcoder',
'transcode_to_mp3',
]
import os
import shutil
import subprocess
from base64 import b64encode
from binascii import unhexlify
from hashlib import md5
import audio_metadata
# The id is found by: getting md5sum of audio, base64 encode md5sum, removing trailing '='.
def generate_client_id(song):
if not isinstance(song, audio_metadata.Format):
song = audio_metadata.load(song)
md5sum = None
if isinstance(song, audio_metadata.FLAC):
md5sum = unhexlify(song.streaminfo.md5)
else:
m = md5()
audio_size = song.streaminfo._size
with open(song.filepath, 'rb') as f:
f.seek(song.streaminfo._start)
# Speed up by reading in chunks
read = 0
while True:
read_size = min(audio_size - read, 65536)
if not read_size:
break
read += read_size
data = f.read(read_size)
m.update(data)
md5sum = m.digest()
client_id = b64encode(md5sum).rstrip(b'=').decode('ascii')
return client_id
def get_album_art(song):
if not isinstance(song, audio_metadata.Format):
song = audio_metadata.load(song)
album_art = next(
(
picture.data
for picture in song.pictures
if picture.type == 3
),
None
)
return album_art
def get_transcoder():
"""Return the path to a transcoder (ffmpeg or avconv) with MP3 support."""
transcoders = ['ffmpeg', 'avconv']
transcoder_details = {}
for transcoder in transcoders:
command_path = shutil.which(transcoder)
if command_path is None:
transcoder_details[transcoder] = 'Not installed.'
continue
stdout = subprocess.run(
[command_path, '-codecs'],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
universal_newlines=True,
).stdout
mp3_encoding_support = (
'libmp3lame' in stdout
and 'disable-libmp3lame' not in stdout
)
if mp3_encoding_support:
transcoder_details[transcoder] = "MP3 encoding support."
break
else:
transcoder_details[transcoder] = "No MP3 encoding support."
else:
raise ValueError(
f"ffmpeg or avconv must be in the path and support mp3 encoding."
"\nDetails: {transcoder_details}"
)
return command_path
def _transcode(command, input_=None):
try:
transcode = subprocess.run(
command,
input=input_,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
transcode.check_returncode()
except (OSError, subprocess.CalledProcessError) as e:
error_msg = f"Transcode command '{' '.join(command)}' failed: {e}. "
if 'No such file or directory' in str(e):
error_msg += '\nffmpeg or avconv must be installed PATH.'
if transcode.stderr is not None:
error_msg += f"\nstderr: '{transcode.stderr}'"
e.message = error_msg
raise
else:
return transcode.stdout
def transcode_to_mp3(song, *, slice_start=None, slice_duration=None, quality='320k'):
command_path = get_transcoder()
input_ = None
if isinstance(song, audio_metadata.Format):
if hasattr(song.filepath, 'read'):
raise ValueError("Audio metadata must be from a file.")
# command = [command_path, '-i', '-']
# input_ = song.filepath.read()
else:
command = [command_path, '-i', song.filepath]
elif isinstance(song, bytes):
command = [command_path, '-i', '-']
input_ = song
elif isinstance(song, str):
command = [command_path, '-i', song]
elif isinstance(song, os.PathLike):
command = [command_path, '-i', song.__fspath__()]
else:
raise ValueError(
"'song' must be os.PathLike, filepath string, a file/bytes-like object, or binary data."
)
if slice_duration is not None:
command.extend(['-t', str(slice_duration)])
if slice_start is not None:
command.extend(['-ss', str(slice_start)])
if isinstance(quality, int):
command.extend(['-q:a', str(quality)])
elif isinstance(quality, str):
command.extend(['-b:a', str(quality)])
# Use 's16le' to not output id3 headers.
command.extend(['-f', 's16le', '-c', 'libmp3lame', '-'])
return _transcode(command, input_=input_)
| 23.777108
| 91
| 0.701292
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 975
| 0.247023
|
5a1c4a115bd07e61146d8a14b7bb3639da60f1ea
| 8,731
|
py
|
Python
|
Other/SocialNetwork/Solver.py
|
lesyk/Evolife
|
8e3dd1aab84061f7ce082f3a4b1bac0b2e31bc4a
|
[
"MIT"
] | null | null | null |
Other/SocialNetwork/Solver.py
|
lesyk/Evolife
|
8e3dd1aab84061f7ce082f3a4b1bac0b2e31bc4a
|
[
"MIT"
] | null | null | null |
Other/SocialNetwork/Solver.py
|
lesyk/Evolife
|
8e3dd1aab84061f7ce082f3a4b1bac0b2e31bc4a
|
[
"MIT"
] | null | null | null |
## {{{ http://code.activestate.com/recipes/303396/ (r1)
'''equation solver using attributes and introspection'''
from __future__ import division
class Solver(object):
'''takes a function, named arg value (opt.) and returns a Solver object'''
def __init__(self,f,**args):
self._f=f
self._args={}
# see important note on order of operations in __setattr__ below.
for arg in f.func_code.co_varnames[0:f.func_code.co_argcount]:
self._args[arg]=None
self._setargs(**args)
def __repr__(self):
argstring=','.join(['%s=%s' % (arg,str(value)) for (arg,value) in
self._args.items()])
if argstring:
return 'Solver(%s,%s)' % (self._f.func_code.co_name, argstring)
else:
return 'Solver(%s)' % self._f.func_code.co_name
def __getattr__(self,name):
'''used to extract function argument values'''
self._args[name]
return self._solve_for(name)
def __setattr__(self,name,value):
'''sets function argument values'''
# Note - once self._args is created, no new attributes can
# be added to self.__dict__. This is a good thing as it throws
# an exception if you try to assign to an arg which is inappropriate
# for the function in the solver.
if self.__dict__.has_key('_args'):
if name in self._args:
self._args[name]=value
else:
raise KeyError, name
else:
object.__setattr__(self,name,value)
def _setargs(self,**args):
'''sets values of function arguments'''
for arg in args:
self._args[arg] # raise exception if arg not in _args
setattr(self,arg,args[arg])
def _solve_for(self,arg):
'''Newton's method solver'''
TOL=0.0000001 # tolerance
ITERLIMIT=1000 # iteration limit
CLOSE_RUNS=10 # after getting close, do more passes
args=self._args
if self._args[arg]:
x0=self._args[arg]
else:
x0=1
if x0==0:
x1=1
else:
x1=x0*1.1
def f(x):
'''function to solve'''
args[arg]=x
return self._f(**args)
fx0=f(x0)
n=0
while 1: # Newton's method loop here
fx1 = f(x1)
if fx1==0 or x1==x0: # managed to nail it exactly
break
if abs(fx1-fx0)<TOL: # very close
close_flag=True
if CLOSE_RUNS==0: # been close several times
break
else:
CLOSE_RUNS-=1 # try some more
else:
close_flag=False
if n>ITERLIMIT:
print "Failed to converge; exceeded iteration limit"
break
slope=(fx1-fx0)/(x1-x0)
if slope==0:
if close_flag: # we're close but have zero slope, finish
break
else:
print 'Zero slope and not close enough to solution'
break
x2=x0-fx0/slope # New 'x1'
fx0 = fx1
x0=x1
x1=x2
n+=1
self._args[arg]=x1
return x1
## end of http://code.activestate.com/recipes/303396/ }}}
######### Example ############
##from math import cos
##
##def toto(x,A):
## return A-cos(x)
##
##T = Solver(toto)
##T.A = 0
##print 2 * T.x
######### Fin Example ############
def competence(BottomCompetence, Quality):
return BottomCompetence + (1-BottomCompetence)*Quality
def Profit(b, K, friendQuality, r, NFriends):
Risk = 1
for f in range(NFriends):
Risk *= (1 - K * r**f * competence(b, friendQuality))
return 1 - Risk
def IntegralProfit(b, K, friendQuality, r, NFriends):
Sum = 0
for FQ in range(int(friendQuality * 100.01)):
Sum += Profit(b, K, FQ/100.1, r, NFriends)
return Sum / 100.01
def CompetitiveSignal(b, K, q, r, NFriends, cost):
profit = Profit(b, K, q, r, NFriends)
integralProfit = IntegralProfit(b, K, q, r, NFriends)
return (competence(b, q) * profit - (1-b) * ralProfit) / cost
def CompetitiveSignal1FriendsB0(K, q, cost):
# special case 1 friend and no bottom competence
return K*q**2/(2*cost)
def CompetitiveSignal2FriendsB0(K, q, r, cost):
# special case 2 friends and no bottom competence
return (-2*K**2*r*q**3/(3*cost)+K*q**2*(1+r)/(2*cost))
def CompetitiveSignal3Friends(b, q, r, cost):
# special case 3 friends
return (1-b)*((1+r+r**2)*(b*q+(1-b)*q**2/2) - 2*r*(1+r+r**2)*(b**2*q + (1-b)**2*q**3/3 +2*b*(1-b)*q**2/2) + 3*r**3 *(b**3*q + 3*b**2*(1-b)*q**2/2 + 3*b*(1-b)**2*q**3/3 + (1-b)**3*q**4/4) )/cost
def CompetitiveSignal3FriendsB0(q, r, cost):
# special case 3 friends and no bottom competence
return (1+r+r**2) *q**2/(2*cost) - 2*r*(1+r+r**2) *q**3/(3*cost) + 3*r**3* q**4/(4*cost)
def Equil2Friends(b, K, C, eta, r, deltag):
# for two friends
ro = 0
S_eta = competence(b, eta)
S_tau = competence(b,(1+eta)/2)
bh = K * S_tau *(1+r) - K**2 * r * S_tau**2 - C * deltag
bl = (1 - (1 - (1-ro)*K*S_tau - ro * K * S_eta)*(1 - K*r*(1-ro)*S_tau - ro*K*r*S_eta)) + C * deltag
# return bh-bl
#return (1-S_eta)*(1 + r/2 - 3*r*S_eta/2) - 4 * C * deltag
return K*(1-b)*(1-eta)*(1+r-K*r*(2*b + (1-b)*(3*eta+1)/2))/4 - C*deltag
def EquilManyFriends(b, K, C, eta, r, deltag, NF):
S_eta = competence(b, eta)
S_tau = competence(b,(1+eta)/2)
return Profit(b,K,S_tau,r,NF) - Profit(b,K,S_eta,r,NF) - 2*C*deltag
def SubEquilManyFriends(b, K, C, eta, tau, theta, r, deltag, NF):
S_eta = competence(b, eta)
S_tau = competence(b, tau)
SubMiddle = competence(b, (theta+eta)/2)
## return Profit(b,K,S_tau,r,NF) - Profit(b,K,SubMiddle,r,NF) - 2*C*deltag / S_eta
return Profit(b,K,S_tau,r,NF) - Profit(b,K,SubMiddle,r,NF) - 2*C*deltag
def UniformSignalB0(K, C, eta, r, deltag, NF):
b = 0 # otherwise false
S_eta = competence(b, eta)
S_tau = competence(b,(1+eta)/2)
Pu = (Profit(b,K,S_tau,r,NF) + Profit(b,K,S_eta,r,NF)) /2
Pc = IntegralProfit(b, K, eta, r, NF)
return ( S_eta * Pu - Pc ) / C
def UniformBenefitB0(K, C, eta, theta, r, deltag, NF, sm, ro):
b = 0 # otherwise false
S_eta = competence(b, eta)
S_theta = competence(b, theta)
S_tau = competence(b,(1+eta)/2)
Ptau = (1 + ro + ro*ro/4)/2
#Ptau = (1 + ro)/2
return Ptau * Profit(b,K,S_tau,r,NF) + (1-Ptau) * Profit(b,K,theta,r,NF) - (C*sm+ro*deltag)/S_theta
def DiffBenefitB0(K, C, eta, theta, r, deltag, NF, sm, ro):
S_theta = competence(b, theta)
return UniformBenefitB0(K, C, eta, theta, r, deltag, NF, sm, ro) \
- IntegralProfit(b, K, theta, r, NF)/S_theta
Equil = Solver(EquilManyFriends)
#Equil = Solver(Equil2Friends)
Equil.deltag = deltag = 1.2 * 0.11 # Learning noise
Equil.b = b = 0.0 # BottomCompetence
Equil.K = K = 1.0
Equil.r = r = 0.6 # RankEffect
Equil.NF = NF = 2 # Number of friends
Equil.C = C = 0.6 # Cost
#Equil.ro = 0.0 # shift from uniform signal
Equil.eta = 0.1 # threshold for uniform signal (initialization)
ETA = Equil.eta
OffSet = 0
print Equil
#print '%d\t%d' % (int(round(100* ETA)),(100*CompetitiveSignal(b,K,ETA, r, NF, C)))
#print 'Eta: %d\t competitive signal in Eta: %d' % (int(round(100* ETA)),(100*CompetitiveSignal3FriendsB0(ETA, r, C))),
print 'Eta: %d\t competitive signal in Eta: %d' % (int(round(100* ETA)),(100*CompetitiveSignal2FriendsB0(1,ETA, r, C))),
# sm = CompetitiveSignal3FriendsB0(ETA, r, C)
sm = UniformSignalB0(K, C, ETA, r, deltag, NF)
print 'sm: %d' % (100 * sm),
SubEquil = Solver(SubEquilManyFriends)
SubEquil.deltag = deltag # Learning noise
SubEquil.b = b # BottomCompetence
SubEquil.K = K
SubEquil.r = r # RankEffect
SubEquil.NF = NF # Number of friends
SubEquil.C = C # Cost
SubEquil.eta = ETA
SubEquil.tau = ETA
SubEquil.theta = 0.1 # initialization
THETA = SubEquil.theta
print 'Theta: %d' % (100 * THETA),
### 2nd iteration
##SubEquil.eta = THETA
##SubEquil.theta = 0.1 # initialization
##THTHETA = SubEquil.theta
##print 'ThTheta: %d' % (100 * THTHETA),
print
#print SubEquil
"""
for q in range(1,int(round(100* THETA)),1):
# print CompetitiveSignal3Friends(0,q/100.0,0.6,0.6),
print "%01.1f\t" % (100 * CompetitiveSignal1FriendsB0(K,q/100.0,C)),
#print "%01.1f\t" % (10000 * CompetitiveSignal3FriendsB0(q/100.0,r,C)/q),
for q in range(int(round(100* THETA)),101,1):
print "%01.1f\t" % (100 * sm),
#print "%01.1f\t" % (10000 * sm/q),
print
"""
##for q in range(1,int(round(100* ETA)),1):
## print "%01.2f\t" % (100 * IntegralProfit(b,K,q/100.0,r,NF)),
##print
##for ro in range(-190, 190, 1):
## Equil.ro = ro/100.0
## Equil.eta = 0.01
## ETA = Equil.eta
## print '%d\t' % (int(round(100* ETA))),
##print
##for dtheta in range(-5, 10):
## theta = ETA - dtheta / 100.0
## print theta
## for ro in range(-5,5,1):
## print "%01.1f\t" % (100 * DiffBenefitB0(K, C, ETA, theta, r, deltag, NF, sm, ro/100.0)),
## print
#print "%01.1f\t" % (100 * DiffBenefitB0(K, C, ETA, ETA, r, deltag, NF, sm, 0.0))
__author__ = 'Dessalles'
| 31.293907
| 195
| 0.611499
| 2,588
| 0.296415
| 0
| 0
| 0
| 0
| 0
| 0
| 3,460
| 0.396289
|
5a1c596e4aa4f0daea1821382fed5edc2f1a2f2c
| 15,459
|
py
|
Python
|
server/graph.py
|
Alpacron/vertex-cover
|
cfdace128f1578f9613e30990b9a87cc64ffb988
|
[
"MIT"
] | null | null | null |
server/graph.py
|
Alpacron/vertex-cover
|
cfdace128f1578f9613e30990b9a87cc64ffb988
|
[
"MIT"
] | 15
|
2021-04-03T08:28:58.000Z
|
2021-06-07T15:08:08.000Z
|
server/graph.py
|
Alpacron/vertex-cover
|
cfdace128f1578f9613e30990b9a87cc64ffb988
|
[
"MIT"
] | 1
|
2021-05-21T13:16:51.000Z
|
2021-05-21T13:16:51.000Z
|
import json
import random
class Graph:
"""
Graph data structure G = (V, E). Vertices contain the information about the edges.
"""
def __init__(self, graph=None):
if graph is None:
graph = {}
is_weighted = graph is not None and any(
True for x in graph if len(graph[x]) > 0 and isinstance(graph[x][0], list))
graph2 = {}
for vertex in graph.keys():
if is_weighted:
graph2.update({str(vertex): [int(e[0]) for e in graph[vertex]]})
else:
graph2.update({str(vertex): [int(e) for e in graph[vertex]]})
self.graph = graph2
def __str__(self):
return json.dumps(self.graph)
def to_adj_matrix(self):
keys = sorted(self.graph.keys())
size = len(keys)
matrix = [[0] * size for _ in range(size)]
for a, b in [(keys.index(str(a)), keys.index(str(b))) for a, row in self.graph.items() for b in row]:
matrix[a][b] = 2 if (a == b) else 1
return matrix
def generate_graph(self, n: int, p: float):
"""
Initialize from n vertices.
"""
# Add vertices
for i in range(n):
self.add_vertex(i)
# Add edges according to probability
e = [False, True]
probability = [1 - p, p]
for v in self.vertices():
for u in self.vertices():
if u > v and not self.is_connected(u, v) and random.choices(e, probability)[0]:
self.add_edge(u, v)
return self.graph
def vertices(self):
"""
Returns a list of all vertices in the graph.
"""
return [int(i) for i in self.graph]
def edges(self):
"""
Returns a list of all edges in the graph.
"""
edges = []
for vertex in self.graph:
for neighbour in self.graph[vertex]:
if not ((int(neighbour), int(vertex)) in edges or (int(vertex), int(neighbour)) in edges):
edges += [(int(vertex), int(neighbour))]
return edges
def add_vertex(self, u: int):
"""
Add a vertex to the graph.
"""
if u not in self.vertices():
self.graph[str(u)] = []
def remove_vertex(self, u: int):
"""
Remove vertex from graph.
"""
if u in self.vertices():
del self.graph[str(u)]
def add_edge(self, u: int, v: int):
"""
Add an edge to the graph.
"""
assert u in self.vertices() and v in self.vertices()
self.graph[str(u)].append(v)
self.graph[str(v)].append(u)
def remove_edge(self, u: int, v: int):
"""
Remove an edge from the graph.
"""
assert u in self.vertices() and v in self.vertices()
self.graph[str(u)].remove(v)
self.graph[str(v)].remove(u)
def remove_all_edges(self, v: int):
if v in self.vertices():
edges = list(self.graph[str(v)])
for e in edges:
self.remove_edge(e, v)
def is_connected(self, u: int, v: int):
"""
Check if two vertices are connected.
"""
assert u in self.vertices() and v in self.vertices()
if v not in self.graph[str(u)]:
return False
return True
def connect_two_random_vertices(self):
"""
Randomly connect two vertices.
"""
vertices = [v for v in self.vertices() if len(self.graph[str(v)]) < len(self.vertices()) - 1]
if len(vertices) > 0:
v1 = random.choice(vertices)
items = [v for v in vertices if v not in [v1] + self.graph[str(v1)]]
if len(items) > 0:
v2 = random.choice(items)
if not self.is_connected(v1, v2):
self.add_edge(v1, v2)
def connect_vertex_to_random(self, v: int):
assert v in self.vertices()
vertices = [u for u in self.vertices() if
len(self.graph[str(u)]) < len(self.vertices()) - 1 and u not in [v] + self.graph[str(v)]]
if len(vertices) > 0:
v2 = random.choice(vertices)
not_connected = [u for u in vertices if len(self.graph[str(u)]) == 0]
if len(not_connected) > 0:
v2 = random.choice(not_connected)
if not self.is_connected(v, v2):
self.add_edge(v, v2)
def remove_random_edge(self, v: int):
vertices = [u for u in self.vertices() if u in self.graph[str(v)]]
if len(vertices) > 0:
self.remove_edge(v, random.choice(vertices))
def find_sub_graph(self, vertex: int, sub_graph: [int]):
"""
Find subgraph connected to vertex.
"""
for i in self.graph[str(vertex)]:
if i not in sub_graph:
sub_graph = self.find_sub_graph(i, sub_graph + [i])
return sub_graph
def connect_all_sub_graphs(self):
"""
Find all disconnected sub graphs, select a random vertex in each of them and add an edge between
those two vertices.
"""
vertex = random.choice(self.vertices())
while True:
sub = self.find_sub_graph(vertex, [vertex])
if len(sub) == len(self.vertices()):
break
for v in self.vertices():
if v not in sub:
self.add_edge(random.choice(sub), v)
break
def connect_two_sub_graphs(self):
"""
Find two disconnected sub graphs, select a random vertex in each of them and add an edge between
those two vertices.
"""
vertices = self.vertices()
vertex = random.choice(vertices)
sub = self.find_sub_graph(vertex, [vertex])
for v in vertices:
if v not in sub:
self.add_edge(random.choice(sub), v)
break
def vertex_cover_brute(self, k: int, depth: int = 1, vertices: [int] = None, edges: [(int, int)] = None,
best: [int] = None, best_covered: [(int, int)] = None,
current: [int] = None, current_covered: [(int, int)] = None):
"""
Find minimum required vertices that cover all edges.
"""
# All edges in graph
if edges is None:
edges = self.edges()
# All vertices in graph
if vertices is None:
vertices = self.vertices()
# Best case result [vertex]
if best is None:
best = []
# Edges best vertices cover [(vertex, vertex)]
if best_covered is None:
best_covered = []
# Current result in recursion [vertex]
if current is None:
current = []
# Edges current vertices in recursion cover [(vertex, vertex)]
if current_covered is None:
current_covered = []
# If there are more vertices > k, return all vertices
if k >= len(vertices):
return vertices, edges
# If current has less vertices than result and contains all edges, return
if k == -1 and len(current_covered) == len(edges) and (best == [] or len(current) < len(best)):
return current, current_covered
# If k is equal to current and current covers more edges than best, return
if k == len(current) and len(current_covered) > len(best_covered):
return current, current_covered
# Get all vertices that have not been covered and shuffle them
ver = [u for u in vertices if len(current) == 0 or u > current[-1]]
random.shuffle(ver)
# Recursively do this for all vertices, until a solution is found.
if (k == -1 or len(current) < k) and (best == [] or len(current) < len(best)):
for v in ver:
c = current_covered + [e for e in self.vertex_cover(v, depth) if
not (e in current_covered or (e[1], e[0]) in current_covered)]
best, best_covered = self.vertex_cover_brute(k, depth, vertices, edges,
best, best_covered, current + [v], c)
return best, best_covered
def vertex_cover(self, v: int, reach: int = 1, current_depth: int = 0, covered: [(int, int)] = None):
if covered is None:
covered = []
if current_depth < reach:
for u in [e for e in self.graph[str(v)] if not ((v, e) in covered or (e, v) in covered)]:
covered = self.vertex_cover(u, reach, current_depth + 1, covered + [(v, u)])
return covered
def increase_pendant_vertices(self):
non_pendant_vertices = [u for u in self.vertices() if not self.is_pendant(u)]
if len(non_pendant_vertices) > 0:
v = random.choice(non_pendant_vertices)
while not self.is_pendant(v):
remaining_non_pendant_vertices = [u for u in self.graph[str(v)] if
not self.is_pendant(u) and not u == v]
if len(remaining_non_pendant_vertices) > 0:
if self.degree(v) > 1:
self.remove_edge(v, random.choice(remaining_non_pendant_vertices))
else:
self.add_edge(v, random.choice(remaining_non_pendant_vertices))
else:
if self.degree(v) > 1:
self.remove_edge(v, random.choice(self.graph[str(v)]))
else:
self.connect_vertex_to_random(v)
def decrease_pendant_vertices(self):
pendant_vertices = [v for v in self.vertices() if self.is_pendant(v)]
if len(pendant_vertices) > 0:
vertex = random.choice(pendant_vertices)
self.remove_edge(vertex, random.choice(self.graph[str(vertex)]))
def increase_tops_vertices(self, k: int):
non_tops_vertices = [v for v in self.vertices() if not self.is_tops(v, k)]
if len(non_tops_vertices) > 0:
v = random.choice(non_tops_vertices)
while not self.is_tops(v, k) and self.degree(v) + 1 < len(self.vertices()):
self.connect_vertex_to_random(v)
def decrease_tops_vertices(self, k: int):
tops_vertices = [v for v in self.vertices() if self.is_tops(v, k)]
if len(tops_vertices) > 0:
v = random.choice(tops_vertices)
while self.is_tops(v, k) and self.degree(v) > 0:
self.remove_random_edge(v)
def decrease_isolated_vertices(self):
isolated_vertices = [v for v in self.vertices() if self.is_isolated(v)]
self.connect_vertex_to_random(random.choice(isolated_vertices))
def increase_isolated_vertices(self):
non_isolated_vertices = [v for v in self.vertices() if not self.is_isolated(v)]
if len(non_isolated_vertices) > 0:
v = random.choice(non_isolated_vertices)
self.remove_all_edges(v)
def degree(self, v: int, depth: int = 1):
if depth == 1:
return len(self.graph[str(v)])
return len(self.vertex_cover(v, depth))
def is_isolated(self, vertex: int):
return self.degree(vertex) == 0
def is_pendant(self, vertex: int):
return self.degree(vertex) == 1
def is_tops(self, vertex: int, k: int):
return self.degree(vertex) > k
def highest_degree_vertex(self, vertices: [int] = None):
if vertices is None:
vertices = self.vertices()
k = -1
vertex = random.choice(vertices)
for v in vertices:
if len(self.graph[str(v)]) > k:
vertex = v
return vertex
def visualize_kernelization(self, k: int):
isolated = [v for v in self.vertices() if self.is_isolated(v)]
pendant = [v for v in self.vertices() if self.is_pendant(v)]
tops = [v for v in self.vertices() if self.is_tops(v, k)]
return {"isolated": isolated, "pendant": pendant, "tops": tops}
def kernelization(self, k: int):
covered = []
# 1. If k > 0 and v is a vertex of degree greater than k, remove v from the graph and decrease k by one.
# Every vertex cover of size k must contain v, since other wise too many of its neighbours would have to
# be picked to cover the incident edges. Thus, an optimal vertex cover for the original graph may be
# formed from a cover of the reduced problem by adding v back to the cover.
while k > 0:
# Get all tops for k.
tops = [v for v in self.vertices() if self.is_tops(v, k)]
# If tops is not empty.
if len(tops) > 0:
# Remove random v from tops and decrease k by one.
v = tops[0]
self.remove_vertex(v)
covered.append(v)
k -= 1
else:
break
# 2. If v is an isolated vertex, remove it. Since, any v cannot cover any edges it is not a part of the
# minimal vertex cover.
isolated = [v for v in self.vertices() if self.is_isolated(v)]
for vertex in isolated:
self.remove_vertex(vertex)
# 3. If more than k^2 edges remain in the graph, and neither of the previous two rules can be applied,
# then the graph cannot contain a vertex cover of size k. For, after eliminating all vertices of degree
# greater than k, each remaining vertex can only cover at most k edges and a set of k vertices could only
# cover at most k^2 edges. In this case, the instance may be replaced by an instance with two vertices,
# one edge, and k = 0, which also has no solution.
if len(self.edges()) > k ** 2 and k != -1:
return {}, None
return self.graph, covered
def approximation(self):
# Initialize the empty cover.
cover = []
edges = self.edges()
# Consider a set of all edges in a graph.
while edges:
# Pick an arbitrary edges (u, v) from that set and add both u and v to the cover.
u, v = random.choice(edges)
cover.append(u)
cover.append(v)
# Remove all edges from that set that are incident on u or v.
edges = [e for e in edges if e[0] is not u and e[0] is not v and e[1] is not u and e[1] is not v]
# Return the result
return cover
def tree_approximation(self):
# Initialize the empty cover
cover = []
leaves = [v for v in self.vertices() if self.is_pendant(v)]
parents = [node for parents in [self.graph[str(leave)] for leave in leaves] for node in parents]
# While there exists leaves in the graph.
while leaves:
# Add all parents to the cover.
for parent in parents:
cover.append(parent)
# Remove all leaves and their parents from the graph.
for node in leaves + parents:
self.remove_all_edges(node)
self.remove_vertex(node)
# Recalculate leaves and parents
leaves = [node for node in self.vertices() if self.is_pendant(node)]
parents = [node for parents in [self.graph[str(leave)] for leave in leaves] for node in parents]
return cover
| 37.982801
| 113
| 0.559545
| 15,430
| 0.998124
| 0
| 0
| 0
| 0
| 0
| 0
| 3,129
| 0.202406
|
5a1caac07eb9f441668b6c4d0592a3fd8fa4aefc
| 576
|
py
|
Python
|
ex4.py
|
AyeAyeZin/python_exercises
|
77079dcd7809dd2967180ffd30df0166dd53edb4
|
[
"MIT"
] | null | null | null |
ex4.py
|
AyeAyeZin/python_exercises
|
77079dcd7809dd2967180ffd30df0166dd53edb4
|
[
"MIT"
] | null | null | null |
ex4.py
|
AyeAyeZin/python_exercises
|
77079dcd7809dd2967180ffd30df0166dd53edb4
|
[
"MIT"
] | null | null | null |
cars=100
cars_in_space=5
drivers=20
pasengers=70
car_not_driven=cars-drivers
cars_driven=drivers
carpool_capacity=cars_driven*space_in_a_car
average_passengers_percar=passengers/cars_driven
print("There are", cars,"cars availble")
print("There are only",drivers,"drivers availble")
print("There will be",car_not_driven,"empty cars today")
print("There are",cars_in_space,"space availble in car")
print("We can transport",carpool_capacity,"peopletoday.")
print("We have", passengers,"to carpool today.")
print("We need to put about", average_passengers_per_car,"in each car.")
| 36
| 72
| 0.805556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 223
| 0.387153
|
5a1cb185553a265ef90a6017854334865e3cc339
| 304
|
py
|
Python
|
python_docs/05Functions/01Definition.py
|
Matheus-IT/lang-python-related
|
dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9
|
[
"MIT"
] | null | null | null |
python_docs/05Functions/01Definition.py
|
Matheus-IT/lang-python-related
|
dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9
|
[
"MIT"
] | null | null | null |
python_docs/05Functions/01Definition.py
|
Matheus-IT/lang-python-related
|
dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9
|
[
"MIT"
] | null | null | null |
# Definição de função
def soma(a2, b2): # Os parâmetros aqui precisam ter outro nome
print(f'A = {a2} e B = {b2}')
s = a2 + b2
print(f'A soma vale A + B = {s}')
# Programa principal
a = int(input('Digite um valor para A: '))
b = int(input('Digite um valor para B: '))
soma(a, b)
| 25.333333
| 63
| 0.582237
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 193
| 0.624595
|
5a1d0c02ec27af98a78a24a6f4e896b2268b6a0f
| 852
|
py
|
Python
|
python/number.py
|
Dahercode/datalumni-test
|
9587400bddafd1c32e97655727c5d3dbbfd17574
|
[
"MIT"
] | 1
|
2020-02-18T16:56:38.000Z
|
2020-02-18T16:56:38.000Z
|
python/number.py
|
Dahercode/datalumni-test
|
9587400bddafd1c32e97655727c5d3dbbfd17574
|
[
"MIT"
] | null | null | null |
python/number.py
|
Dahercode/datalumni-test
|
9587400bddafd1c32e97655727c5d3dbbfd17574
|
[
"MIT"
] | null | null | null |
# Your code goes here
tab=[]
for i in range(1000) :
tab.append(i)
tab2=[]
for i in range(len(tab)):
if sum([ int(c) for c in str(tab[i]) ]) <= 10:
tab2.append(tab[i])
tab3=[]
for i in range(len(tab2)):
a=str(tab2[i])
if a[len(a)-2] == '4':
tab3.append(tab2[i])
tab4=[]
for i in range(len(tab3)):
if len(str(tab3[i]))>=2 :
tab4.append(tab3[i])
tab5=[]
for i in range(len(tab4)):
a=str(tab4[i])
if a.find('7')==-1 and a.find('1')==-1:
tab5.append(tab4[i])
tab6=[]
for i in range(len(tab5)):
a=str(tab5[i])
if ((int(a[0])+int(a[1]))%2) != 0:
tab6.append(tab5[i])
tab7=[]
for i in range(len(tab6)):
a=str(tab6[i])
if str(len(a)) == a[len(a)-1]:
tab7.append(tab6[i])
print(tab7)
mystery_number=tab7[0]
print(f'Le nombre mystère est le : {mystery_number}')
| 18.933333
| 53
| 0.543427
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 77
| 0.09027
|
5a1d385aaac2b104c89e97a052215f1dccd44141
| 3,885
|
py
|
Python
|
backend/src/baserow/contrib/database/migrations/0016_token_tokenpermission.py
|
ashishdhngr/baserow
|
b098678d2165eb7c42930ee24dc6753a3cb520c3
|
[
"MIT"
] | null | null | null |
backend/src/baserow/contrib/database/migrations/0016_token_tokenpermission.py
|
ashishdhngr/baserow
|
b098678d2165eb7c42930ee24dc6753a3cb520c3
|
[
"MIT"
] | null | null | null |
backend/src/baserow/contrib/database/migrations/0016_token_tokenpermission.py
|
ashishdhngr/baserow
|
b098678d2165eb7c42930ee24dc6753a3cb520c3
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.11 on 2020-10-23 08:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("core", "0001_initial"),
("database", "0015_emailfield"),
]
operations = [
migrations.CreateModel(
name="Token",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"name",
models.CharField(
max_length=100,
help_text="The human readable name of the token for the user.",
),
),
(
"key",
models.CharField(
db_index=True,
max_length=32,
unique=True,
help_text="The unique token key that can be used to authorize "
"for the table row endpoints.",
),
),
("created", models.DateTimeField(auto_now=True)),
(
"group",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="core.Group",
help_text="Only the tables of the group can be accessed.",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
help_text="The user that owns the token.",
),
),
],
options={
"ordering": ("id",),
},
),
migrations.CreateModel(
name="TokenPermission",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"type",
models.CharField(
choices=[
("create", "Create"),
("read", "Read"),
("update", "Update"),
("delete", "Delete"),
],
max_length=6,
),
),
(
"database",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="database.Database",
),
),
(
"table",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="database.Table",
),
),
(
"token",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="database.Token"
),
),
],
),
]
| 32.647059
| 88
| 0.344916
| 3,725
| 0.958816
| 0
| 0
| 0
| 0
| 0
| 0
| 548
| 0.141055
|
5a1f561a631ec5529e54bc7090d1958be5eb6f6f
| 1,639
|
py
|
Python
|
setup.py
|
davidharvey1986/rrg
|
26b4658f14279af21af1a61d57e9936daf315a71
|
[
"MIT"
] | 2
|
2019-11-18T12:51:09.000Z
|
2019-12-11T03:13:51.000Z
|
setup.py
|
davidharvey1986/rrg
|
26b4658f14279af21af1a61d57e9936daf315a71
|
[
"MIT"
] | 5
|
2017-06-09T10:06:27.000Z
|
2019-07-19T11:28:18.000Z
|
setup.py
|
davidharvey1986/rrg
|
26b4658f14279af21af1a61d57e9936daf315a71
|
[
"MIT"
] | 2
|
2017-07-19T15:48:33.000Z
|
2017-08-09T16:07:20.000Z
|
#!/usr/local/bin/python3
import sys,os,string,glob,subprocess
from setuptools import setup,Extension
from setuptools.command.build_ext import build_ext
from setuptools.command.install import install
import numpy
long_description = """\
This module uses the RRG method to measure the shapes of galaxies
in Hubble Space Telescope data
"""
#sudo python3 setup.py sdist upload -r pypi
version='0.1.2'
INCDIRS=['.']
packages = ['pyRRG', 'RRGtools','asciidata']
package_dir = {'RRGtools':'./lib/RRGtools',
'pyRRG':'./src',
'asciidata':'./lib/asciidata'}
package_data = {'pyRRG': ['psf_lib/*/*','sex_files/*','*.pkl']}
setup ( name = "pyRRG",
version = version,
author = "David Harvey",
author_email = "david.harvey@epfl.ch",
description = "pyRRG module",
license = 'MIT',
packages = packages,
package_dir = package_dir,
package_data = package_data,
scripts = ['scripts/pyRRG'],
url = 'https://github.com/davidharvey1986/pyRRG', # use the URL to the github repo
download_url = 'https://github.com/davidharvey1986/pyRRG/archive/'+version+'.tar.gz',
install_requires=['scikit-learn',\
'numpy', \
'ipdb', 'pyraf',\
'scipy'],
)
| 32.78
| 101
| 0.494814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 626
| 0.38194
|
5a2032ec76e617ec97b415d06f3a42408d534a65
| 635
|
py
|
Python
|
restbed/core/api.py
|
mr-tenders/restbed
|
68d36536286203048ce01f1467d3db7ee108bebb
|
[
"MIT"
] | null | null | null |
restbed/core/api.py
|
mr-tenders/restbed
|
68d36536286203048ce01f1467d3db7ee108bebb
|
[
"MIT"
] | null | null | null |
restbed/core/api.py
|
mr-tenders/restbed
|
68d36536286203048ce01f1467d3db7ee108bebb
|
[
"MIT"
] | null | null | null |
"""
restbed core api
"""
import pyinsane2
from typing import List
class CoreApi(object):
scanner: pyinsane2.Scanner = None
scan_session: pyinsane2.ScanSession = None
@staticmethod
def initialize():
"""
initialize SANE, don't call if unit testing
(well that's the hope...)
"""
pyinsane2.init()
def select_device(self):
devs: List[pyinsane2.Scanner] = pyinsane2.get_devices()
self.scanner = devs[0]
return self.scanner
def start_scanning(self):
pyinsane2.maximize_scan_area(self.scanner)
self.scan_session = self.scanner.scan()
| 22.678571
| 63
| 0.63937
| 566
| 0.891339
| 0
| 0
| 170
| 0.267717
| 0
| 0
| 125
| 0.19685
|
5a20458f16a895f14563ad81b494f0d3c1292dbf
| 736
|
py
|
Python
|
secure_notes_client/thread_pool.py
|
rlee287/secure-notes-client
|
56d5fcce1d2eeb46de22aac63131fe7214b6f185
|
[
"MIT"
] | null | null | null |
secure_notes_client/thread_pool.py
|
rlee287/secure-notes-client
|
56d5fcce1d2eeb46de22aac63131fe7214b6f185
|
[
"MIT"
] | 4
|
2019-07-10T01:34:12.000Z
|
2019-08-20T01:52:31.000Z
|
secure_notes_client/thread_pool.py
|
rlee287/secure-notes-client
|
56d5fcce1d2eeb46de22aac63131fe7214b6f185
|
[
"MIT"
] | null | null | null |
from concurrent.futures import ThreadPoolExecutor
import time
from PySide2.QtCore import QCoreApplication
thread_pool=None
def init_thread_pool():
global thread_pool
thread_pool=ThreadPoolExecutor()
def deinit_thread_pool():
global thread_pool
thread_pool.shutdown()
def submit_task(function,*args,**kwargs):
global thread_pool
return thread_pool.submit(function,*args,**kwargs)
#TODO: find a less hacky way to keep events processed
def async_run_await_result(function,*args,**kwargs):
future_obj=thread_pool.submit(function,*args,**kwargs)
while not future_obj.done():
QCoreApplication.processEvents()
time.sleep(0.1)
future_result=future_obj.result()
return future_result
| 26.285714
| 58
| 0.762228
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 53
| 0.072011
|
5a2274118fccaff1a7fc9becbc4b24e208209e91
| 10,463
|
py
|
Python
|
Fairness_attack/data_utils.py
|
Ninarehm/attack
|
0d5a6b842d4e81484540151d879036e9fe2184f1
|
[
"MIT"
] | 8
|
2021-03-08T17:13:42.000Z
|
2022-03-31T00:57:53.000Z
|
Fairness_attack/data_utils.py
|
lutai14/attack
|
773024c7b86be112521a2243f2f809a54891c81f
|
[
"MIT"
] | null | null | null |
Fairness_attack/data_utils.py
|
lutai14/attack
|
773024c7b86be112521a2243f2f809a54891c81f
|
[
"MIT"
] | 1
|
2022-02-10T22:36:11.000Z
|
2022-02-10T22:36:11.000Z
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import json
import numpy as np
import scipy.sparse as sparse
import defenses
import upper_bounds
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
assert len(np.shape(obj)) == 1 # Can only handle 1D ndarrays
return obj.tolist()
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.int16):
return str(obj)
else:
return super(NumpyEncoder, self).default(obj)
def get_class_map():
return {-1: 0, 1: 1}
def get_centroids(X, Y, class_map):
num_classes = len(set(Y))
num_features = X.shape[1]
centroids = np.zeros((num_classes, num_features))
for y in set(Y):
centroids[class_map[y], :] = np.mean(X[Y == y, :], axis=0)
return centroids
def get_centroid_vec(centroids):
assert centroids.shape[0] == 2
centroid_vec = centroids[0, :] - centroids[1, :]
centroid_vec /= np.linalg.norm(centroid_vec)
centroid_vec = np.reshape(centroid_vec, (1, -1))
return centroid_vec
# Can speed this up if necessary
def get_sqrt_inv_cov(X, Y, class_map):
num_classes = len(set(Y))
num_features = X.shape[1]
sqrt_inv_covs = np.zeros((num_classes, num_features, num_features))
for y in set(Y):
cov = np.cov(X[Y == y, :], rowvar=False)
U_cov, S_cov, _ = np.linalg.svd(cov + 1e-6 * np.eye(num_features))
print(' min eigenvalue of cov after 1e-6 reg is %s' % np.min(S_cov))
sqrt_inv_covs[class_map[y], ...] = U_cov.dot(np.diag(1 / np.sqrt(S_cov)).dot(U_cov.T))
return sqrt_inv_covs
# Can speed this up if necessary
def get_data_params(X, Y, percentile):
num_classes = len(set(Y))
num_features = X.shape[1]
centroids = np.zeros((num_classes, num_features))
class_map = get_class_map()
centroids = get_centroids(X, Y, class_map)
# Get radii for sphere
sphere_radii = np.zeros(2)
dists = defenses.compute_dists_under_Q(
X, Y,
Q=None,
centroids=centroids,
class_map=class_map,
norm=2)
for y in set(Y):
sphere_radii[class_map[y]] = np.percentile(dists[Y == y], percentile)
# Get vector between centroids
centroid_vec = get_centroid_vec(centroids)
# Get radii for slab
slab_radii = np.zeros(2)
for y in set(Y):
dists = np.abs(
(X[Y == y, :].dot(centroid_vec.T) - centroids[class_map[y], :].dot(centroid_vec.T)))
slab_radii[class_map[y]] = np.percentile(dists, percentile)
return class_map, centroids, centroid_vec, sphere_radii, slab_radii
def vstack(A, B):
if (sparse.issparse(A) or sparse.issparse(B)):
return sparse.vstack((A, B), format='csr')
else:
return np.concatenate((A, B), axis=0)
def add_points(x, y, X, Y, num_copies=1):
if num_copies == 0:
return X, Y
x = np.array(x).reshape(-1)
if sparse.issparse(X):
X_modified = sparse.vstack((
X,
sparse.csr_matrix(
np.tile(x, num_copies).reshape(-1, len(x)))))
else:
X_modified = np.append(
X,
np.tile(x, num_copies).reshape(-1, len(x)),
axis=0)
Y_modified = np.append(Y, np.tile(y, num_copies))
return X_modified, Y_modified
def copy_random_points(X, Y, mask_to_choose_from=None, target_class=1, num_copies=1,
random_seed=18, replace=False):
# Only copy from points where mask_to_choose_from == True
np.random.seed(random_seed)
combined_mask = (np.array(Y, dtype=int) == target_class)
if mask_to_choose_from is not None:
combined_mask = combined_mask & mask_to_choose_from
idx_to_copy = np.random.choice(
np.where(combined_mask)[0],
size=num_copies,
replace=replace)
if sparse.issparse(X):
X_modified = sparse.vstack((X, X[idx_to_copy, :]))
else:
X_modified = np.append(X, X[idx_to_copy, :], axis=0)
Y_modified = np.append(Y, Y[idx_to_copy])
return X_modified, Y_modified
def threshold(X):
return np.clip(X, 0, np.max(X))
def rround(X, random_seed=3, return_sparse=True):
if sparse.issparse(X):
X = X.toarray()
X_frac, X_int = np.modf(X)
X_round = X_int + (np.random.random_sample(X.shape) < X_frac)
if return_sparse:
return sparse.csr_matrix(X_round)
else:
return X_round
def rround_with_repeats(X, Y, repeat_points, random_seed=3, return_sparse=True):
X_round = rround(X, random_seed=random_seed, return_sparse=return_sparse)
assert Y.shape[0] == X.shape[0]
if repeat_points > 1:
pos_idx = 0
neg_idx = 0
for i in range(X_round.shape[0]):
if Y[i] == 1:
if pos_idx % repeat_points == 0:
last_pos_x = X_round[i, :]
else:
X_round[i, :] = last_pos_x
pos_idx += 1
else:
if neg_idx % repeat_points == 0:
last_neg_x = X_round[i, :]
else:
X_round[i, :] = last_neg_x
neg_idx += 1
return X_round
def project_onto_sphere(X, Y, radii, centroids, class_map):
for y in set(Y):
idx = class_map[y]
radius = radii[idx]
centroid = centroids[idx, :]
shifts_from_center = X[Y == y, :] - centroid
dists_from_center = np.linalg.norm(shifts_from_center, axis=1)
shifts_from_center[dists_from_center > radius, :] *= radius / np.reshape(dists_from_center[dists_from_center > radius], (-1, 1))
X[Y == y, :] = shifts_from_center + centroid
print("Number of (%s) points projected onto sphere: %s" % (y, np.sum(dists_from_center > radius)))
return X
def project_onto_slab(X, Y, v, radii, centroids, class_map):
"""
v^T x needs to be within radius of v^T centroid.
v is 1 x d and normalized.
"""
v = np.reshape(v / np.linalg.norm(v), (1, -1))
for y in set(Y):
idx = class_map[y]
radius = radii[idx]
centroid = centroids[idx, :]
# If v^T x is too large, then dists_along_v is positive
# If it's too small, then dists_along_v is negative
dists_along_v = (X[Y == y, :] - centroid).dot(v.T)
shifts_along_v = np.reshape(
dists_along_v - np.clip(dists_along_v, -radius, radius),
(1, -1))
X[Y == y, :] -= shifts_along_v.T.dot(v)
print("Number of (%s) points projected onto slab: %s" % (y, np.sum(np.abs(dists_along_v) > radius)))
return X
def get_projection_fn(
X_clean,
Y_clean,
sphere=True,
slab=True,
non_negative=False,
less_than_one=False,
use_lp_rounding=False,
percentile=90):
print(X_clean)
goal = 'find_nearest_point'
class_map, centroids, centroid_vec, sphere_radii, slab_radii = get_data_params(X_clean, Y_clean, percentile)
if use_lp_rounding or non_negative or less_than_one or (sphere and slab):
if use_lp_rounding:
projector = upper_bounds.Minimizer(
d=X_clean.shape[1],
use_sphere=sphere,
use_slab=slab,
non_negative=non_negative,
less_than_one=less_than_one,
constrain_max_loss=False,
goal=goal,
X=X_clean
)
projector = upper_bounds.Minimizer(
d=X_clean.shape[1],
use_sphere=sphere,
use_slab=slab,
non_negative=non_negative,
less_than_one=less_than_one,
constrain_max_loss=False,
goal=goal,
X=X_clean
)
else:
projector = upper_bounds.Minimizer(
d=X_clean.shape[1],
use_sphere=sphere,
use_slab=slab,
non_negative=non_negative,
less_than_one=less_than_one,
constrain_max_loss=False,
goal=goal
)
# Add back low-rank projection if we move back to just sphere+slab
def project_onto_feasible_set(
X, Y,
theta=None,
bias=None,
):
num_examples = X.shape[0]
proj_X = np.zeros_like(X)
for idx in range(num_examples):
x = X[idx, :]
y = Y[idx]
class_idx = class_map[y]
centroid = centroids[class_idx, :]
sphere_radius = sphere_radii[class_idx]
slab_radius = slab_radii[class_idx]
proj_X[idx, :] = projector.minimize_over_feasible_set(
None,
x,
centroid,
centroid_vec,
sphere_radius,
slab_radius)
num_projected = np.sum(np.max(X - proj_X, axis=1) > 1e-6)
print('Projected %s examples.' % num_projected)
return proj_X
else:
def project_onto_feasible_set(X, Y, theta=None, bias=None):
if sphere:
X = project_onto_sphere(X, Y, sphere_radii, centroids, class_map)
elif slab:
X = project_onto_slab(X, Y, centroid_vec, slab_radii, centroids, class_map)
return X
return project_onto_feasible_set
def filter_points_outside_feasible_set(X, Y,
centroids, centroid_vec,
sphere_radii, slab_radii,
class_map):
sphere_dists = defenses.compute_dists_under_Q(
X,
Y,
Q=None,
centroids=centroids,
class_map=class_map)
slab_dists = defenses.compute_dists_under_Q(
X,
Y,
Q=centroid_vec,
centroids=centroids,
class_map=class_map)
idx_to_keep = np.array([True] * X.shape[0])
for y in set(Y):
idx_to_keep[np.where(Y == y)[0][sphere_dists[Y == y] > sphere_radii[class_map[y]]]] = False
idx_to_keep[np.where(Y == y)[0][slab_dists[Y == y] > slab_radii[class_map[y]]]] = False
print(np.sum(idx_to_keep))
return X[idx_to_keep, :], Y[idx_to_keep]
| 31.610272
| 136
| 0.581
| 423
| 0.040428
| 0
| 0
| 0
| 0
| 0
| 0
| 682
| 0.065182
|
5a23d3f4e52679a350233bbde834e4fd8f3310ec
| 74
|
py
|
Python
|
pytracetable/__init__.py
|
filwaitman/pytracetable
|
eb884953e179fc65677a9e3b3c70fde1b1439ccb
|
[
"MIT"
] | 1
|
2016-02-10T20:28:00.000Z
|
2016-02-10T20:28:00.000Z
|
pytracetable/__init__.py
|
filwaitman/pytracetable
|
eb884953e179fc65677a9e3b3c70fde1b1439ccb
|
[
"MIT"
] | 1
|
2020-05-27T18:12:10.000Z
|
2020-05-27T18:12:10.000Z
|
pytracetable/__init__.py
|
filwaitman/pytracetable
|
eb884953e179fc65677a9e3b3c70fde1b1439ccb
|
[
"MIT"
] | null | null | null |
from pytracetable.core import tracetable
__all__ = [
'tracetable',
]
| 12.333333
| 40
| 0.716216
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0.162162
|
5a24938eab3876854f2631917fd72abe26cefe64
| 1,518
|
py
|
Python
|
quandl_data_retriever/server.py
|
fabiomolinar/quandl-data-retriever
|
d9359922cb222ac519f7d9e4dd892bbcf6b1b2d0
|
[
"MIT"
] | null | null | null |
quandl_data_retriever/server.py
|
fabiomolinar/quandl-data-retriever
|
d9359922cb222ac519f7d9e4dd892bbcf6b1b2d0
|
[
"MIT"
] | null | null | null |
quandl_data_retriever/server.py
|
fabiomolinar/quandl-data-retriever
|
d9359922cb222ac519f7d9e4dd892bbcf6b1b2d0
|
[
"MIT"
] | null | null | null |
""" Server module
Quandl API limits:
Authenticated users have a limit of 300 calls per 10 seconds,
2,000 calls per 10 minutes and a limit of 50,000 calls per day.
"""
import urllib
import logging
from twisted.internet import reactor
from twisted.web.client import Agent, readBody
from . import settings
from . import resources
logger = logging.getLogger(settings.LOG_NAME + ".server")
def main():
from .data import data_list
try:
resource_name = data_list[0]["resource"]
resource = resources.__dict__[resource_name]
key = data_list[0]["api_key"]
key = settings.__dict__[key]
resource = resource(key)
except KeyError as e:
logger.warning("KeyError while trying to instantiate Resource class with : " + str(e))
else:
# TODO: go to next item
pass
url = resource.get_url(data_list[0]["url"])
agent = Agent(reactor)
d = agent.request(
str.encode(data_list[0]["method"], "utf-8"),
str.encode(url, "ascii")
)
def cbResponse(response):
if response.code == 200:
def cbBody(body):
resource.save(body)
pass
d = readBody(response)
d.addCallback(cbBody)
return d
d.addCallback(cbResponse)
def cbShutdown(ignored):
if not ignored:
logger.warning("request failed.")
reactor.stop()
d.addBoth(cbShutdown)
reactor.run()
if __name__ == "__main__":
main()
| 26.172414
| 94
| 0.614625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 334
| 0.220026
|
5a24a53e97cdff184ba28a85fbb3b5ee4e244277
| 4,696
|
py
|
Python
|
actingweb/deprecated_db_gae/db_subscription_diff.py
|
gregertw/actingweb
|
e1c8f66451f547c920c64c4e2a702698e3a0d299
|
[
"BSD-3-Clause"
] | null | null | null |
actingweb/deprecated_db_gae/db_subscription_diff.py
|
gregertw/actingweb
|
e1c8f66451f547c920c64c4e2a702698e3a0d299
|
[
"BSD-3-Clause"
] | null | null | null |
actingweb/deprecated_db_gae/db_subscription_diff.py
|
gregertw/actingweb
|
e1c8f66451f547c920c64c4e2a702698e3a0d299
|
[
"BSD-3-Clause"
] | null | null | null |
from builtins import object
from google.appengine.ext import ndb
import logging
"""
DbSubscriptionDiff handles all db operations for a subscription diff
DbSubscriptionDiffList handles list of subscriptions diffs
Google datastore for google is used as a backend.
"""
__all__ = [
'DbSubscriptionDiff',
'DbSubscriptionDiffList',
]
class SubscriptionDiff(ndb.Model):
id = ndb.StringProperty(required=True)
subid = ndb.StringProperty(required=True)
timestamp = ndb.DateTimeProperty(auto_now_add=True)
diff = ndb.TextProperty()
seqnr = ndb.IntegerProperty()
class DbSubscriptionDiff(object):
"""
DbSubscriptionDiff does all the db operations for subscription diff objects
The actor_id must always be set.
"""
def get(self, actor_id=None, subid=None, seqnr=None):
""" Retrieves the subscriptiondiff from the database """
if not actor_id and not self.handle:
return None
if not subid and not self.handle:
logging.debug("Attempt to get subscriptiondiff without subid")
return None
if not self.handle:
if not seqnr:
self.handle = SubscriptionDiff.query(SubscriptionDiff.id == actor_id,
SubscriptionDiff.subid == subid
).get()
else:
self.handle = SubscriptionDiff.query(SubscriptionDiff.id == actor_id,
SubscriptionDiff.subid == subid,
SubscriptionDiff.seqnr == seqnr
).get()
if self.handle:
t = self.handle
return {
"id": t.id,
"subscriptionid": t.subid,
"timestamp": t.timestamp,
"data": t.diff,
"sequence": t.seqnr,
}
else:
return None
def create(self, actor_id=None,
subid=None,
diff=None,
seqnr=None):
""" Create a new subscription diff """
if not actor_id or not subid:
logging.debug("Attempt to create subscriptiondiff without actorid or subid")
return False
if not seqnr:
seqnr = 1
if not diff:
diff = ''
self.handle = SubscriptionDiff(id=actor_id,
subid=subid,
diff=diff,
seqnr=seqnr)
self.handle.put(use_cache=False)
return True
def delete(self):
""" Deletes the subscription diff in the database """
if not self.handle:
return False
self.handle.key.delete()
self.handle = None
return True
def __init__(self):
self.handle = None
class DbSubscriptionDiffList(object):
"""
DbSubscriptionDiffList does all the db operations for list of diff objects
The actor_id must always be set.
"""
def fetch(self, actor_id=None, subid=None):
""" Retrieves the subscription diffs of an actor_id from the database as an array"""
if not actor_id:
return None
if not subid:
self.handle = SubscriptionDiff.query(SubscriptionDiff.id == actor_id).order(SubscriptionDiff.seqnr).fetch(use_cache=False)
else:
self.handle = SubscriptionDiff.query(SubscriptionDiff.id == actor_id,
SubscriptionDiff.subid == subid).order(SubscriptionDiff.seqnr).fetch(use_cache=False)
self.diffs = []
if self.handle:
for t in self.handle:
self.diffs.append(
{
"id": t.id,
"subscriptionid": t.subid,
"timestamp": t.timestamp,
"diff": t.diff,
"sequence": t.seqnr,
})
return self.diffs
else:
return []
def delete(self, seqnr=None):
""" Deletes all the fetched subscription diffs in the database
Optional seqnr deletes up to (excluding) a specific seqnr
"""
if not self.handle:
return False
if not seqnr or not isinstance(seqnr, int):
seqnr = 0
for p in self.handle:
if seqnr == 0 or p.seqnr <= seqnr:
p.key.delete()
self.handle = None
return True
def __init__(self):
self.handle = None
| 33.304965
| 134
| 0.5296
| 4,336
| 0.923339
| 0
| 0
| 0
| 0
| 0
| 0
| 1,098
| 0.233816
|
5a24c50dca0ab02ce229e044f402eb5085a1288a
| 1,703
|
py
|
Python
|
azure-mgmt-iothub/azure/mgmt/iothub/models/ip_filter_rule.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-iothub/azure/mgmt/iothub/models/ip_filter_rule.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-mgmt-iothub/azure/mgmt/iothub/models/ip_filter_rule.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2019-06-17T22:18:23.000Z
|
2019-06-17T22:18:23.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class IpFilterRule(Model):
"""The IP filter rules for the IoT hub.
All required parameters must be populated in order to send to Azure.
:param filter_name: Required. The name of the IP filter rule.
:type filter_name: str
:param action: Required. The desired action for requests captured by this
rule. Possible values include: 'Accept', 'Reject'
:type action: str or ~azure.mgmt.iothub.models.IpFilterActionType
:param ip_mask: Required. A string that contains the IP address range in
CIDR notation for the rule.
:type ip_mask: str
"""
_validation = {
'filter_name': {'required': True},
'action': {'required': True},
'ip_mask': {'required': True},
}
_attribute_map = {
'filter_name': {'key': 'filterName', 'type': 'str'},
'action': {'key': 'action', 'type': 'IpFilterActionType'},
'ip_mask': {'key': 'ipMask', 'type': 'str'},
}
def __init__(self, **kwargs):
super(IpFilterRule, self).__init__(**kwargs)
self.filter_name = kwargs.get('filter_name', None)
self.action = kwargs.get('action', None)
self.ip_mask = kwargs.get('ip_mask', None)
| 36.234043
| 77
| 0.603641
| 1,187
| 0.697005
| 0
| 0
| 0
| 0
| 0
| 0
| 1,225
| 0.719319
|
5a29b2a7e94aa859d4fcd87428416a71deaf7e01
| 551
|
py
|
Python
|
4344.py
|
yzkim9501/Baekjoon
|
222e55d0bd65cbb66f1f5486652ad8c697817844
|
[
"Unlicense"
] | null | null | null |
4344.py
|
yzkim9501/Baekjoon
|
222e55d0bd65cbb66f1f5486652ad8c697817844
|
[
"Unlicense"
] | null | null | null |
4344.py
|
yzkim9501/Baekjoon
|
222e55d0bd65cbb66f1f5486652ad8c697817844
|
[
"Unlicense"
] | null | null | null |
# 대학생 새내기들의 90%는 자신이 반에서 평균은 넘는다고 생각한다. 당신은 그들에게 슬픈 진실을 알려줘야 한다.
# 첫째 줄에는 테스트 케이스의 개수 C가 주어진다.
# 둘째 줄부터 각 테스트 케이스마다 학생의 수 N(1 ≤ N ≤ 1000, N은 정수)이 첫 수로 주어지고, 이어서 N명의 점수가 주어진다. 점수는 0보다 크거나 같고, 100보다 작거나 같은 정수이다.
# 각 케이스마다 한 줄씩 평균을 넘는 학생들의 비율을 반올림하여 소수점 셋째 자리까지 출력한다.
def avg(l):
s=0
for i in l:
s+=i
return((s-l[0])/l[0])
t=int(input())
for _ in range(t):
a=list(map(int,input().split()))
c=0
for j in range(1,a[0]+1):
if a[j]>avg(a):
c+=1
print(str('{:,.3f}'.format(round(c/a[0]*100,3)))+"%")
| 25.045455
| 114
| 0.555354
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 605
| 0.685164
|
5a29f8551225fbef514f169502222d4f73af2984
| 4,531
|
py
|
Python
|
tests/dualtor/test_standby_tor_upstream_mux_toggle.py
|
AndoniSanguesa/sonic-mgmt
|
bac8b9bf7c51008ceab75e83ce68fa9473a7d2ec
|
[
"Apache-2.0"
] | 1
|
2021-09-24T08:40:57.000Z
|
2021-09-24T08:40:57.000Z
|
tests/dualtor/test_standby_tor_upstream_mux_toggle.py
|
AndoniSanguesa/sonic-mgmt
|
bac8b9bf7c51008ceab75e83ce68fa9473a7d2ec
|
[
"Apache-2.0"
] | null | null | null |
tests/dualtor/test_standby_tor_upstream_mux_toggle.py
|
AndoniSanguesa/sonic-mgmt
|
bac8b9bf7c51008ceab75e83ce68fa9473a7d2ec
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import logging
import ipaddress
import json
import re
import time
from tests.common.dualtor.dual_tor_mock import *
from tests.common.helpers.assertions import pytest_assert as pt_assert
from tests.common.dualtor.dual_tor_utils import rand_selected_interface, verify_upstream_traffic, get_crm_nexthop_counter
from tests.common.utilities import compare_crm_facts
from tests.common.config_reload import config_reload
from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports
from tests.common.fixtures.ptfhost_utils import change_mac_addresses, run_garp_service, run_icmp_responder
logger = logging.getLogger(__file__)
pytestmark = [
pytest.mark.topology('t0'),
pytest.mark.usefixtures('apply_mock_dual_tor_tables', 'apply_mock_dual_tor_kernel_configs', 'run_garp_service', 'run_icmp_responder')
]
PAUSE_TIME = 10
def get_l2_rx_drop(host, itfs):
"""
Return L2 rx packet drop counter for given interface
"""
res = {}
stdout = host.shell("portstat -j")['stdout']
match = re.search("Last cached time was.*\n", stdout)
if match:
stdout = re.sub("Last cached time was.*\n", "", stdout)
data = json.loads(stdout)
return int(data[itfs]['RX_DRP'])
def clear_portstat(dut):
dut.shell("portstat -c")
@pytest.fixture(scope='module', autouse=True)
def test_cleanup(rand_selected_dut):
"""
Issue a config reload at the end of module
"""
yield
config_reload(rand_selected_dut)
def test_standby_tor_upstream_mux_toggle(
rand_selected_dut, tbinfo, ptfadapter, rand_selected_interface,
require_mocked_dualtor, toggle_all_simulator_ports, set_crm_polling_interval):
itfs, ip = rand_selected_interface
PKT_NUM = 100
# Step 1. Set mux state to standby and verify traffic is dropped by ACL rule and drop counters incremented
set_mux_state(rand_selected_dut, tbinfo, 'standby', [itfs], toggle_all_simulator_ports)
# Wait sometime for mux toggle
time.sleep(PAUSE_TIME)
crm_facts0 = rand_selected_dut.get_crm_facts()
# Verify packets are not go up
verify_upstream_traffic(host=rand_selected_dut,
ptfadapter=ptfadapter,
tbinfo=tbinfo,
itfs=itfs,
server_ip=ip['server_ipv4'].split('/')[0],
pkt_num=PKT_NUM,
drop=True)
time.sleep(5)
# Verify dropcounter is increased
drop_counter = get_l2_rx_drop(rand_selected_dut, itfs)
pt_assert(drop_counter >= PKT_NUM,
"RX_DRP for {} is expected to increase by {} actually {}".format(itfs, PKT_NUM, drop_counter))
# Step 2. Toggle mux state to active, and verify traffic is not dropped by ACL and fwd-ed to uplinks; verify CRM show and no nexthop objects are stale
set_mux_state(rand_selected_dut, tbinfo, 'active', [itfs], toggle_all_simulator_ports)
# Wait sometime for mux toggle
time.sleep(PAUSE_TIME)
# Verify packets are not go up
verify_upstream_traffic(host=rand_selected_dut,
ptfadapter=ptfadapter,
tbinfo=tbinfo,
itfs=itfs,
server_ip=ip['server_ipv4'].split('/')[0],
pkt_num=PKT_NUM,
drop=False)
# Step 3. Toggle mux state to standby, and verify traffic is dropped by ACL; verify CRM show and no nexthop objects are stale
set_mux_state(rand_selected_dut, tbinfo, 'standby', [itfs], toggle_all_simulator_ports)
# Wait sometime for mux toggle
time.sleep(PAUSE_TIME)
# Verify packets are not go up again
verify_upstream_traffic(host=rand_selected_dut,
ptfadapter=ptfadapter,
tbinfo=tbinfo,
itfs=itfs,
server_ip=ip['server_ipv4'].split('/')[0],
pkt_num=PKT_NUM,
drop=True)
# Verify dropcounter is increased
drop_counter = get_l2_rx_drop(rand_selected_dut, itfs)
pt_assert(drop_counter >= PKT_NUM,
"RX_DRP for {} is expected to increase by {} actually {}".format(itfs, PKT_NUM, drop_counter))
crm_facts1 = rand_selected_dut.get_crm_facts()
unmatched_crm_facts = compare_crm_facts(crm_facts0, crm_facts1)
pt_assert(len(unmatched_crm_facts)==0, 'Unmatched CRM facts: {}'.format(json.dumps(unmatched_crm_facts, indent=4)))
| 41.568807
| 154
| 0.670492
| 0
| 0
| 146
| 0.032222
| 192
| 0.042375
| 0
| 0
| 1,182
| 0.26087
|
5a2a01adbfb1b632775069e902a5a1facd9c2f69
| 3,308
|
py
|
Python
|
birdsong_recognition/dataset.py
|
YingyingF/birdsong_recognition
|
4f8a2ccb900898a02d4454a5f1c206125f23fa44
|
[
"Apache-2.0"
] | null | null | null |
birdsong_recognition/dataset.py
|
YingyingF/birdsong_recognition
|
4f8a2ccb900898a02d4454a5f1c206125f23fa44
|
[
"Apache-2.0"
] | null | null | null |
birdsong_recognition/dataset.py
|
YingyingF/birdsong_recognition
|
4f8a2ccb900898a02d4454a5f1c206125f23fa44
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: dataset.ipynb (unless otherwise specified).
__all__ = ['load_mp3', 'get_sample_label', 'preprocess_file', 'pad_by_zeros', 'split_file_by_window_size',
'wrapper_split_file_by_window_size', 'create_dataset_fixed_size', 'get_spectrogram', 'add_channel_dim']
# Cell
def load_mp3(file):
sample = tf.io.read_file(file)
sample_audio = tfio.audio.decode_mp3(sample)
return sample_audio
# Cell
def get_sample_label(file):
sample = load_mp3(file)
label = tf.argmax(tf.strings.split(file, '/')[1] == np.array(ebirds))
return sample, label
# Cell
def preprocess_file(sample_audio, label):
# Only look at the first channel
sample_audio = sample_audio[:,0]
sample_audio_scaled = (sample_audio - tf.math.reduce_min(sample_audio))/(tf.math.reduce_max(sample_audio) - tf.math.reduce_min(sample_audio))
sample_audio_scaled = 2*(sample_audio_scaled - 0.5)
return sample_audio_scaled, label
# Cell
def pad_by_zeros(sample, min_file_size, last_sample_size):
padding_size = min_file_size - last_sample_size
sample_padded = tf.pad(sample, paddings=[[tf.constant(0), padding_size]])
return sample_padded
# Cell
def split_file_by_window_size(sample, label):
# number of subsamples given none overlapping window size.
subsample_count = int(np.round(sample.shape[0]/min_file_size))
# ignore extremely long files for now
subsample_limit = 75
if subsample_count <= subsample_limit:
# if the last sample is at least half the window size, then pad it, if not, clip it.
last_sample_size = sample.shape[0]%min_file_size
if last_sample_size/min_file_size > 0.5:
sample = pad_by_zeros(sample, min_file_size, last_sample_size)
else:
sample = sample[:subsample_count*min_file_size]
sample = tf.reshape(sample, shape=[subsample_count, min_file_size])
label = tf.pad(tf.expand_dims(label, axis=0), paddings=[[0, subsample_count-1]], constant_values=label.numpy())
else:
sample = tf.reshape(sample[:subsample_limit*min_file_size], shape=[subsample_limit, min_file_size])
label = tf.pad(tf.expand_dims(label, axis=0), paddings=[[0, 74]], constant_values=label.numpy())
return sample, label
# Cell
def wrapper_split_file_by_window_size(sample, label):
sample, label = tf.py_function(split_file_by_window_size, inp=(sample, label),
Tout=(sample.dtype, label.dtype))
return sample, label
# Cell
def create_dataset_fixed_size(ds):
iterator = iter(ds)
sample, label = iterator.next()
samples_all = tf.unstack(sample)
labels_all = tf.unstack(label)
while True:
try:
sample, label = iterator.next()
sample = tf.unstack(sample)
label = tf.unstack(label)
samples_all = tf.concat([samples_all, sample], axis=0)
labels_all = tf.concat([labels_all, label], axis=0)
except:
break
return samples_all, labels_all
# Cell
def get_spectrogram(sample, label):
spectrogram = tfio.experimental.audio.spectrogram(sample, nfft=512, window=512, stride=256)
return spectrogram, label
# Cell
def add_channel_dim(sample, label):
sample = tf.expand_dims(sample, axis=-1)
return sample, label
| 39.380952
| 145
| 0.703144
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 537
| 0.162334
|
5a2a02d3be8c76f34df0d751d6f767892052893d
| 492
|
py
|
Python
|
Lib/objc/_SplitKit.py
|
kanishpatel/Pyto
|
feec7a1a54f635a6375fa7ede074ff35afbfbb95
|
[
"MIT"
] | null | null | null |
Lib/objc/_SplitKit.py
|
kanishpatel/Pyto
|
feec7a1a54f635a6375fa7ede074ff35afbfbb95
|
[
"MIT"
] | null | null | null |
Lib/objc/_SplitKit.py
|
kanishpatel/Pyto
|
feec7a1a54f635a6375fa7ede074ff35afbfbb95
|
[
"MIT"
] | null | null | null |
'''
Classes from the 'SplitKit' framework.
'''
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
PodsDummy_SplitKit = _Class('PodsDummy_SplitKit')
InstantPanGestureRecognizer = _Class('SplitKit.InstantPanGestureRecognizer')
HandleView = _Class('SplitKit.HandleView')
SPKSplitViewController = _Class('SPKSplitViewController')
| 21.391304
| 76
| 0.731707
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 149
| 0.302846
|
5a2a102330d36f9fe8e0e169c14680aef835ac84
| 3,743
|
py
|
Python
|
wiki_music/gui_lib/search_and_replace.py
|
marian-code/wikipedia-music-tags
|
e8836c23b7b7e43661b59afd1bfc18d381b95d4a
|
[
"MIT"
] | 5
|
2019-01-28T21:53:14.000Z
|
2020-06-27T08:52:36.000Z
|
wiki_music/gui_lib/search_and_replace.py
|
marian-code/wikipedia-music-tags
|
e8836c23b7b7e43661b59afd1bfc18d381b95d4a
|
[
"MIT"
] | 4
|
2019-01-15T16:33:59.000Z
|
2020-05-20T08:09:02.000Z
|
wiki_music/gui_lib/search_and_replace.py
|
marian-code/wikipedia-music-tags
|
e8836c23b7b7e43661b59afd1bfc18d381b95d4a
|
[
"MIT"
] | 1
|
2020-04-15T11:00:20.000Z
|
2020-04-15T11:00:20.000Z
|
"""Module controling search and replace tab."""
import logging
from wiki_music.constants import GUI_HEADERS
from wiki_music.gui_lib import BaseGui, CheckableListModel
from wiki_music.gui_lib.qt_importer import QMessageBox, QPushButton, QIcon, QStyle
__all__ = ["Replacer"]
log = logging.getLogger(__name__)
log.debug("finished gui search & replace imports")
class Replacer(BaseGui):
"""Controls the search and replace tab in GUI.
Warnings
--------
This class is not ment to be instantiated, only inherited.
"""
def __init__(self) -> None:
super().__init__()
self.replace_tag_selector_model = CheckableListModel()
self._fill_tags_list()
def _fill_tags_list(self):
"""Create a checkable list with table column name headers."""
for tag in GUI_HEADERS:
self.replace_tag_selector_model.add(tag)
self.replace_tag_selector_view.setModel(
self.replace_tag_selector_model)
def _setup_search_replace(self):
"""Connect to signals essential for search and replace tab."""
# re-run search when search columns are reselected
self.replace_tag_selector_model.itemChanged.connect(
self._search_replace_run)
# re-run search when options are checked
self.search_support_re.stateChanged.connect(
self._search_replace_run)
self.search_support_wildcard.stateChanged.connect(
self._search_replace_run)
self.search_case_sensitive.stateChanged.connect(
self._search_replace_run)
# connect to control buttons
self.search_next.clicked.connect(self.tableView.search_next)
self.search_previous.clicked.connect(self.tableView.search_previous)
self.replace_one.clicked.connect(
lambda: self.tableView.replace_one(
self.search_string_input.text(),
self.replace_string_input.text()))
self.replace_all.clicked.connect(
lambda: self.tableView.replace_all(
self.search_string_input.text(),
self.replace_string_input.text()
))
# search is run interacively as user is typing
self.search_string_input.textChanged.connect(
self._search_replace_run)
# on tab change change selection and search highlight mode
self.tool_tab.currentChanged.connect(
self.tableView.set_search_visibility)
# seems that filtering is done by rows
# self.search_string_input.textChanged.connect(
# self.proxy.setFilterFixedString)
def _search_replace_run(self, string: str):
"""Process search parameters and call table search method.
Parameters
----------
string: str
string to search for
"""
if (self.search_support_re.isChecked() and
self.search_support_wildcard.isChecked()):
msg = QMessageBox(QMessageBox.Warning, "Warning",
"Attempting to use regex and wildcards at once "
"may return unexpected results. "
"Do you want to proceed?",
QMessageBox.Yes | QMessageBox.No)
if msg.exec_() == QMessageBox.No:
return
else:
log.warning("Wildcard and regex used at once in search")
self.tableView.search_string(
self.search_string_input.text(),
self.search_case_sensitive.isChecked(),
self.search_support_re.isChecked(),
self.search_support_wildcard.isChecked(),
self.replace_tag_selector_model.get_checked_indices()
)
| 35.647619
| 82
| 0.6428
| 3,379
| 0.902752
| 0
| 0
| 0
| 0
| 0
| 0
| 1,028
| 0.274646
|