hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5445ab0135e0f3ff0f80b808bab631bc81bb1f98 | 804 | py | Python | nsapiwrapper/exceptions.py | DolphDev/nsapiwrapper | cd67be445cfc4845f822ff815f3fb265f75061c9 | [
"MIT"
] | null | null | null | nsapiwrapper/exceptions.py | DolphDev/nsapiwrapper | cd67be445cfc4845f822ff815f3fb265f75061c9 | [
"MIT"
] | null | null | null | nsapiwrapper/exceptions.py | DolphDev/nsapiwrapper | cd67be445cfc4845f822ff815f3fb265f75061c9 | [
"MIT"
] | null | null | null | """Exceptions for this library"""
| 18.272727 | 46 | 0.705224 |
544703b0ead742e49b1d2aa2223e76a2cd97299b | 62,639 | py | Python | src.py | edbezci/mapOverlayHumanoid | 95d5e16fb983a7384abea6f51599483274ff0f62 | [
"MIT"
] | null | null | null | src.py | edbezci/mapOverlayHumanoid | 95d5e16fb983a7384abea6f51599483274ff0f62 | [
"MIT"
] | null | null | null | src.py | edbezci/mapOverlayHumanoid | 95d5e16fb983a7384abea6f51599483274ff0f62 | [
"MIT"
] | null | null | null | # lines 1-4 imports the necessary libraries
import pygame
import os
import random
import math
import sys
import hlp
import intro
import dsb # this is the last module with the description files
'''
declaring some global variables beacause in Python, we can set global variables that can be used in future functions
setting the variables false allows us to activate them in the game loop, or vice versa
creating empty lists as global variables allows us to access them outside of the functions they are being used
'''
cursor = False
randomLine = False
randomTimer = True
run = False
stop = False
start = False
clear = False
lines = []
colours = []
brutecolours = []
points = []
line_name = []
intersect_name = []
orderList = []
# initialise Pygame library, it is necessary in Programs using Pygame
pygame.init()
line_colour = pygame.Color(50, 50, 120)
# initialise window size at 800 * 550 with a caption
display = pygame.display.set_mode((1280, 550), pygame.FULLSCREEN |
pygame.DOUBLEBUF | pygame.HWSURFACE)
pygame.display.set_caption("Line Segment Intersection Visualisation Tool")
# frames per second determines how many frames should be refreshed per second
clock = pygame.time.Clock()
# load cursor image for inserting line, os.path method points to the path of the cursor image file
pointer = pygame.image.load(os.path.join("resources", "pointer.png"))
# BitterFont text used throughout the program
bitterfont = os.path.abspath("resources/bitterfont.otf")
def AddPoints(p):
'''
this function takes a point as an argument, then append the 'points' list by using iteration over every item in the points list
if that point is already in the list, the function does nothing
if not, the function appends the points list object with the argument p.
'''
# make sure we're referring to the points object outside of this function
global points
# step through all the current items in points list
for point in points:
# is p the same as the current item
if point == p:
# if so, stop stepping through and drop out of this function without doing anything
return
# if we get here, we've gone through the whole list without a match
# add the new point to the list
points.append(p)
def TransValue(OldValue, oldMax, oldMin):
'''
scales the data
'''
newMax = 350
newMin = 0
OldRange = (oldMax - oldMin)
NewRange = (newMax - newMin)
NewValue = int((((OldValue - oldMin) * NewRange) / OldRange) + newMin)
return NewValue
def GenerateRandomLine():
'''
generates random lines
'''
x1 = random.randrange(51, 450) # randomly choses between 51 and 450
y1 = random.randrange(50, 450) # randomly choses between 50 and 450
x2 = random.randrange(51, 450) # randomly choses between 51 and 450
y2 = random.randrange(50, 450) # randomly choses between 50 and 450
# calls for the AddNewLine function to create new lines
AddNewLine([(x1, y1), (x2, y2)])
def CheckIntersect(p1, p2, q1, q2):
'''
this function determines if two lines intersect
p1,p2, q1, q2 are start and end points of the lines
it uses Cramer's rule of linear algebra to determine whether lines intersect
'''
# getting the distance between end points by accessing the second index of the p1 and p2 list items and appointing it to variable a1
a1 = p2[1] - p1[1]
b1 = p1[0] - p2[0] # same as above but accessing to the first index
c1 = a1 * p1[0] + b1 * p1[1]
a2 = q2[1] - q1[1] # same as a1 but for q instead of p
b2 = q1[0] - q2[0] # same as b1 but for q instead of p
c2 = a2 * q1[0] + b2 * q1[1]
d = (a1 * b2 - a2 * b1) # finding the determinant
if d == 0: # paralel or same line, determinant is zero
return
x = int((c1 * b2 - c2 * b1) / d) # solving for x
y = int((a1 * c2 - a2 * c1) / d) # solving for y
if min(p1[0], p2[0]) <= x <= max(p1[0], p2[0]) and min(p1[1], p2[1]) <= y <= max(p1[1], p2[1]):
if min(q1[0], q2[0]) <= x <= max(q1[0], q2[0]) and min(q1[1], q2[1]) <= y <= max(q1[1], q2[1]):
# found the intersection by checking solution of x and y for existing points
AddPoints((x, y))
return True # returns true
return False
def BruteForceMain():
'''
this function is the Brute-Force Algorithm function with main display loop
'''
# acessing the global variables
global cursor, lines, brutecolours, points, randomLine, randomTimer, run, stop, clear, intersect_name
# first the lines are accessing necessary global variables
global display, line_name, orderList
pygame.display.set_caption("Brute-Force Algorithm") # adding a caption
# setting the display for the algorithm
display = pygame.display.set_mode((1280, 550), pygame.FULLSCREEN)
cursor = False # until while true line, which is the main loop, lines below creating the default values
randomLine = False # again the default placeholder for the randomline
clickedPos = [] # default place holder value for position
orderList = [] # same for the order list, empty now all these values will be appended during the game loop
efficiency = 0 # default place holder value for algorithm efficieny
eventQueue = [] # event queue place holder, empty now
back = 0 # if this becomes one, you go back
while True: # starting the game loop
# pygame method to fill the screen, takes colours and a display object
display.fill((0, 0, 0))
# pygame method, iterates over the events in pygame to determine what we are doing with every event
for event in pygame.event.get():
if event.type == pygame.QUIT: # this one quits
pygame.quit() # putting the quit pygame method
exit() # takes the user from GUI to the script for exiting
# Here is to tell the computer to recognise if a keybord key is pressed.
if event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE: # if that keyboard key is ESC
exit() # call for the exit function.
'''
if mouse clicked on the below coordinates, create a line
pygame GUI property detecting when mouse click is on
MOUSEBUTTONDOWN and MOUSEBUTTONUP should be used as a small loops so that the computer can understand when that instance of the mouse movement is over
'''
if cursor == True and event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1: # pygame method defining the button in the GUI
mouse_pos = pygame.mouse.get_pos() # displays the mouse position on the screen
# pygame property pos[0] is the mouse cursor in the X axis and pos[1] is the Y axis
if 50 < pos[0] < 450 and 50 < pos[1] < 450:
# here it adds the clicked postion corresponding to the positon of the mouse
clickedPos.append(pos)
if event.type == pygame.MOUSEBUTTONUP:
randomTimer = True # turning the random from false to true so the timer can activate
for i in range(0, 41): # choosing coordinates for drawing, exiting the previous iteration, range (0,41) goes between 0 and 40
# for the pygame method of drawing below, we need to determine the position on the screen as a tuple object
pos = i * 10 + 50
# pygame method, takes display, colour, and positions of where the lines start and end. i.e, starts in (50,pos) ends in (450,pos), 1 at the end is the width of the line
pygame.draw.line(display, line_colour, (50, pos), (450, pos), 1)
# same as above but takes pos as y, by doing so and iterating through the range, you cover all the plane
pygame.draw.line(display, line_colour, (pos, 50), (pos, 450), 1)
i = 0 # index determining for data structure, taking it back to zero
for line in lines: # iterating through lines which is a global variable for the priority queue aka eventQueue
'''
having [i] next to colour allows me to colour each line differently
each line has tuple object in the global variable
line[0] accesses the nth item's first coordinates in the iteration and drawing ends in the line[1], nth item's second object
'''
pygame.draw.line(display, brutecolours[i], line[0], line[1], 1)
# calling the hlp.AddText function that was created before in the script
hlp.AddText(line_name[i], line[0])
i += 1 # remember, need to increase the index.
orderList = [] # creating the placeholder list object to secure future items
i = 50
while i < 450: # this is the start of the brute force algorithm, it uses a try and error methods by iterating through all existing points
j = 0 # that's why it enumarates through all possible points on the screen to go through, thus, I have the second while loop here
for point in points: # 450 is the max number of points on the display, therefore, indexing goes until 450 i < 450
if point[0] == i: # while trying all the points, if the x value of the selected point intersects with the given index
# then add it to the orderList
orderList.append(intersect_name[j])
j += 1 # as before, increse indexing values by one
i += 1 # as before in the previous function, increase the index by one
n = len(lines) # finding out how many lines are drawn already
for point in points: # iterating over the points
# use this pygame method to draw a small circle where the lines intersect
pygame.draw.circle(display, hlp.red, point, 3)
efficiency = n * n # this is the efficieny formula for the brute-force algorithm
if cursor == True: # arrange the mouse cursors
pygame.mouse.set_visible(False)
pos = pygame.mouse.get_pos() # this is a pygame method for mouse cursor
# the cursor with the existing pointer image, pygame method called display.blit which adds a spirit to the screen
display.blit(pointer, pos)
# if you clicked on the screen, this checks the number of clicks and starts drawing
if len(clickedPos) > 0:
# again pygame method to draw, if clicked then draw this
pygame.draw.circle(display, hlp.white, clickedPos[0], 2)
# if clicked then draw this
pygame.draw.line(display, hlp.white, clickedPos[0], pos, 1)
if len(clickedPos) >= 2: # if the cursor is in a positon which is longer than 2 that can draw lines, if you clicked on more or equal to 2 times, which means begining and end for the lines
# then add lines according to the points saved in the clickedPos object. [0] is the begining index and clickedPos[1] is the ending index.
AddNewLine([clickedPos[0], clickedPos[1]])
cursor = False # disable the cursor after drawing
clickedPos = [] # empty the placeholder after drawing the line
else: # now you are entering into the scene of mouse action
# again pygame GUI method enabling mouse action on the screen to interact
pygame.mouse.set_visible(True)
if randomLine == True: # if mouse clicked on the randomline
GenerateRandomLine() # then create a random line, calling the existing function
randomLine = False # turn it off after drawing so it would not keep drawing forever
randomTimer = False # and stop the timer so it won't go forever
if clear == True: # clear action is enabled, clear back all the placeholders to default
lines = [] # everything is back to the default value
colours = [] # everything is back to the default value
brutecolours = [] # everything is back to the default value
points = [] # everything is back to the default value
orderList = [] # everything is back to the default value
efficiency = 0 # everything is back to the default value
eventQueue = [] # everything is back to the default value
intersect_name = [] # everything is back to the default value
line_name = [] # everything is back to the default value
clear = False
'''
adding text positions and texts for the frame
calling existing functions, giving text, position and when applicable the action
my helper functions are button and addtext that help me in my larger script.
'''
# adding the texts and buttons as above function
hlp.AddText("(0,0)", (30, 25))
hlp.AddText("(50,0)", (430, 25))
hlp.AddText("(0,50)", (30, 450))
hlp.AddText("(50,50)", (430, 450))
hlp.Button("Clear", 200, 5, 100, 30, ClearActive)
hlp.Button("Random Segment", 50, 500, 180, 30, RandomActive)
hlp.Button("Insert Segment", 280, 500, 180, 35, CursorActive)
hlp.Button("Exit", 500, 5, 100,
30, sys.exit)
back = hlp.ButtonWithReturn("Back", 900, 5, 100, 30, 1)
if back > 0: # if back has a value, which means it has been clicked, stop the bigger loop that we started, i.e. the game loop, and break the game loop
break
# calls the helper function
nxt = hlp.ButtonWithReturn("Next", 700, 5, 100, 30, 1)
if nxt > 0: # so if the next button is clicked
# calls for the description function
hlp.Description(dsb.bf_desc)
# pygame method to draw an object
pygame.draw.rect(display, line_colour, [500, 50, 750, 490], 2)
# adding the text on the given location
hlp.AddText("Brute-Force Algorithm", (520, 70))
# adding the text on the given location.
hlp.AddText("Order List:", (520, 120))
# creating indexing i and x, y positions to display on the GUI, this is an important way to assign values to a tuplae object
i, o_x, o_y = 0, 540, 150
'''
iterating through the existing values in the orderList.
because we don't want the texts to overlap on the screen
most of the numbers below are finetuning to prevent overlapping of the texts for the order list and the eventqueue list.
'''
for val in orderList: # going through the items in the orderList
# calling the helper function to add the text of the values in the orderList
hlp.AddText(val, (o_x, o_y), (255, 255, 255))
o_x += 50 # moving 50 pix on the x axis for each item
i += 1 # going to next item by increasing the index
if i % 14 == 0: # check if the line ends
o_x = 540 # text is on the edge, there no more horizontol space
o_y += 20 # # go to the next line by adding 20 to the y axis
# adding the text on the given location
hlp.AddText("Efficiency O(n*n):", (520, 480))
# adding the text on the given location
hlp.AddText(str(efficiency), (540, 505), (255, 255, 255))
# updates the screen every turn
pygame.display.flip()
# will not run more than 30 frames per second
clock.tick(90)
intro.Introduction2() # calls back the introduction function
def BentleyMain():
'''
this function is the Bentley-Ottmann Algorithm function with main display loop
'''
global cursor, lines, colours, points, randomLine, randomTimer, run, stop, clear, intersect_name
# first the lines are accessing necessary global variables
global display, line_name, orderList
pygame.display.set_caption("Bentley-Ottmann Algorithm") # adding a caption
# setting the display for the algorithm
display = pygame.display.set_mode((1280, 550), pygame.FULLSCREEN)
cursor = False # until while true line, which is the main loop, lines below creating the default values
randomLine = False # again the default placeholder for the randomline
clickedPos = [] # default place holder value for position
efficiency = 0 # default place holder value for algorithm efficieny
eventQueue = [] # event queue place holder, empty now
orderList = [] # same for the order list, empty now all these values will be appended during the game loop
x = 50 # location of the x value on the screen
back = 0 # if this becomes one, you go back
while True: # starting the game loop
# pygame method to fill the screen. takes colours and a display object
display.fill((0, 0, 0))
# pygame method, iterates over the events in pygame to determine what we are doing with every event
for event in pygame.event.get():
if event.type == pygame.QUIT: # this one quits
pygame.quit() # putting the quit pygame method
exit() # takes the user from GUI to the script for exiting
# Here is to tell the computer to recognise if a keybord key is pressed.
if event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE: # if that keyboard key is ESC
exit() # call for the exit function.
'''
if mouse clicked on the below coordinates, create a line
pygame GUI property detecting when mouse click is on
MOUSEBUTTONDOWN and MOUSEBUTTONUP should be used as a small loops so that the computer can understand when that instance of the mouse movement is over
'''
if cursor == True and event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1: # pygame method defining the button in the GUI
mouse_pos = pygame.mouse.get_pos() # displays the mouse position on the screen
# pygame property pos[0] is the mouse cursor in the X axis and pos[1] is the Y axis
if 50 < pos[0] < 450 and 50 < pos[1] < 450:
# here it adds the clicked postion corresponding to the positon of the mouse
clickedPos.append(pos)
if event.type == pygame.MOUSEBUTTONUP:
randomTimer = True # turning the random from false to true so the timer can activate
for i in range(0, 41): # choosing coordinates for drawing, exiting the previous iteration, range (0,41) goes between 0 and 40
# for the pygame method of drawing below, we need to determine the position on the screen as a tuple object
pos = i * 10 + 50
# pygame method, takes display, colour, and positions of where the lines start and end. i.e, starts in (50,pos) ends in (450,pos), 1 at the end is the width of the line
pygame.draw.line(display, line_colour, (50, pos), (450, pos), 1)
# same as above but takes pos as y, by doing so and iterating through the range, you cover all the plane
pygame.draw.line(display, line_colour, (pos, 50), (pos, 450), 1)
i = 0 # index determining for data structure, taking it back to zero
for line in lines: # iterating through lines which is a global variable for the priority queue aka eventQueue
'''
having [i] next to colour allows me to colour each line differently
each line has tuple object in the global variable
line[0] accesses the nth item's first coordinates in the iteration and drawing ends in the line[1], nth item's second object
'''
pygame.draw.line(display, colours[i], line[0], line[1], 1)
# calling the addText function that was created before in the script
hlp.AddText(line_name[i], line[0])
'''
nested indexing, as I am accessing the first item of the first item in the line object which is in the lines global variable
result of this nested indexing should access a point of x- coordinated saved in a tuple
'''
if x == line[0][0]:
# if that begining point of the line's x coordinates equals to the preset x, then append the queue list with the name of this line
eventQueue.append(line_name[i])
if x == line[1][0]: # again the nested indexing
# removes the line from the queue if the end of the line's x coordinates equals to x variable
eventQueue.remove(line_name[i])
# increasing the index number at the end of the iteration loop so I can access the other items saved
i += 1
if stop == True: # tells to stop if stop is clicked
run = False # turns off the run, if it is stop, then run must be false
x = 50 # set x to default
# if I don't make the stop false at the end of this clause, there would be a logic error as stop must be false after it was used otherwise, it will be true forever
stop = False
if run == True: # tells it to start if run is clicked
cursor = False # when it is running cursor can't draw any newlines
randomLine = False # again no new random lines too
x += 1 # since I am scanning, the x value should scan the screen pixel after pixel, thus, adding 1 to the x value
# this draws the scan line on the screen
pygame.draw.line(display, hlp.red, (x, 50), (x, 450), 1)
# j and k are placeholders to keep track of the index
j = 0
k = 0
# iterating through points to draw the intersection circle in the run
for point in points:
# if the first item's x value is smaller or equal to the present x variable
if point[0] <= x:
# use this pygame method to draw a small circle where the lines intersect
pygame.draw.circle(display, hlp.white, point, 3)
k += 1 # increase the placeholders value
if point[0] == x: # if x value is already equal to the preset x
# then append the orderList with the name of the intersection
orderList.append(intersect_name[j])
j += 1 # increase the j once more
if k > 0: # so it means there is already an intersection
n = len(lines) # check how many lines were drawn already
if n > 0: # if the number of lines are more than 0, it means that there are existing lines
# measure the algorithm's speed
efficiency = (n + k) * math.log10(n)
'''
since the display stars from 50th pixel, I substract 50 from that, and the script uses //8 as divide without remainers to convert the x values pixel to coordinates
this is so it can be used to name the incident of intersection
'''
c = (x - 50) // 8
# adding the text as well for the intersection
hlp.AddText("(X, Y) = (" + str(c) + ", 0)",
(200, 470), (255, 255, 255))
if cursor == True: # arrange the mouse cursors
pygame.mouse.set_visible(False)
pos = pygame.mouse.get_pos() # this is a pygame method for mouse cursor
# the cursor with the existing pointer image, pygame method called display.blit which adds a spirit to the screen
display.blit(pointer, pos)
# if you clicked on the screen, this checks the number of clicks and starts drawing
if len(clickedPos) > 0:
# again pygame method to draw, if clicked then draw this
pygame.draw.circle(display, hlp.white, clickedPos[0], 2)
# if clicked then draw this
pygame.draw.line(display, hlp.white, clickedPos[0], pos, 1)
if len(clickedPos) >= 2: # if the cursor is in a positon which is longer than 2 that can draw lines, if you clicked on more or equal to 2 times, which means begining and end for the lines
# then add lines according to the points saved in the clickedPos object. [0] is the begining index and clickedPos[1] is the ending index.
AddNewLine([clickedPos[0], clickedPos[1]])
cursor = False # disable the cursor after drawing
clickedPos = [] # empty the placeholder after drawing the line
else: # now you are entering into the scene of mouse action
# again pygame GUI method enabling mouse action on the screen to interact
pygame.mouse.set_visible(True)
if randomLine == True: # if mouse clicked on the randomline
GenerateRandomLine() # then create a random line, calling the existing function
randomLine = False # turn it off after drawing so it would not keep drawing forever
randomTimer = False # and stop the timer so it won't go forever
if run == True and x > 450: # if run function is enabled however the x value is out of the screen
x = 50 # put x back to the default of 50
run = False # and disable the run
if clear == True: # clear action is enabled, clear back all the placeholders to default
lines = [] # everything is back to the default value
colours = [] # everything is back to the default value
points = [] # everything is back to the default value
orderList = [] # everything is back to the default value
efficiency = 0 # everything is back to the default value
eventQueue = [] # everything is back to the default value
intersect_name = [] # everything is back to the default value
line_name = [] # everything is back to the default value
x = 50 # everything is back to the default value
run = False # everything is back to the default value
clear = False # everything is back to the default value
'''
adding text positions and texts for the frame
calling existing functions, giving text, position and when applicable the action
my helper functions are button and addtext that help me in my larger script
'''
# adding text positions and texts for the frame
hlp.AddText("(0,0)", (30, 25))
hlp.AddText("(50,0)", (430, 25))
hlp.AddText("(0,50)", (30, 450))
hlp.AddText("(50,50)", (430, 450))
# drawing buttons and determining positions
hlp.Button("Run", 80, 5, 100, 35, RunActive)
hlp.Button("Stop", 200, 5, 100, 35, StopActive)
hlp.Button("Clear", 320, 5, 100, 30, ClearActive)
hlp.Button("Random Segment", 50, 500, 180, 30, RandomActive)
hlp.Button("Insert Segment", 280, 500, 180, 35, CursorActive)
hlp.Button("Exit", 500, 5, 100,
30, sys.exit)
back = hlp.ButtonWithReturn("Back", 900, 5, 100, 30, 1)
if back > 0: # if back has a value, which means it has been clicked, stop the bigger loop that we started, i.e. the game loop, and break the game loop
break
# calls the helper function
nxt = hlp.ButtonWithReturn("Next", 700, 5, 100, 30, 1)
if nxt > 0: # so if the next button is clicked
# calls for the description function
hlp.Description(dsb.bo_desc)
text = ["If you are learning to play, it is recommended", # and displays this text
"you chose your own starting area."]
# pygame method to draw an object
pygame.draw.rect(display, line_colour, [500, 50, 750, 490], 2)
# adding the text on the given location
hlp.AddText("Bentley-Ottmann Algorithm", (520, 70))
# adding the text on the given location
hlp.AddText("Event Queue:", (520, 120))
# creating indexing i and x, y positions to display on the GUI, this is an important way to assign values to a tuplae object
i, o_x, o_y = 0, 540, 150
'''
iterating through the existing values in the eventQueue
because we don't want the texts to overlap on the screen
most of the numbers below are finetuning to prevent overlapping of the texts for the order list and the eventqueue list
'''
for val in eventQueue:
# val is each text saved in the eventQueue, and these values are not to overlap on the screen
hlp.AddText(val, (o_x, o_y), (255, 255, 255))
o_x += 30 # therefore for each value, I'm adding +30 for each one
i += 1 # adding one to the index to access to the next item
if i % 23 == 0: # 23rd item appears on the righest point on the screen so for the next one you need to go on the y axis
o_x = 540 # text is on the edge, there no more horizontol space
# text needs to appear on the next line, so adding 20 onto the y axis, vertical move
o_y += 20
hlp.AddText("Order List:", (520, 200)) # adding the text
i, o_x, o_y = 0, 540, 230
for val in orderList: # same as above iteration but for the order list this time
hlp.AddText(val, (o_x, o_y), (255, 255, 255))
o_x += 50 # adding to x axis
i += 1 # increasing the index
if i % 14 == 0: # this is 14, because the text has less horizontal space to appear.
o_x = 540 # reached the end of the line
o_y += 20 # go to the next line, move vertical, thus adding to the y value
# adding the text on the given location
hlp.AddText("Efficiency O((n+k)logn):", (520, 480))
# adding the text on the given location
hlp.AddText(str(efficiency), (540, 505), (255, 255, 255))
# updates the screen every turn
pygame.display.flip()
# will not run more than 30 frames per second
clock.tick(30)
intro.Introduction2() # calls back the introduction function
def ShamosHoeyMain():
'''
this function is the Shamos-Hoey Algorithm function with main display loop
'''
global cursor, lines, colours, points, randomLine, randomTimer, run, stop, clear, intersect_name
global display, line_name # first the lines are accessing necessary global variables
pygame.display.set_caption("Shamos-Hoey Algorithm") # adding a caption
# setting the display for the algorithm
display = pygame.display.set_mode((1280, 550), pygame.FULLSCREEN)
cursor = False # until while true line, which is the main loop, lines below creating the default values
randomLine = False # again the default placeholder for the randomline
clickedPos = [] # default place holder value for position
firstPoint = None # first intersection point identified
efficiency = 0 # default place holder value for algorithm efficieny
eventQueue = [] # event queue place holder, empty now
run = False
x = 50 # location of the x value on the screen
back = 0 # if this becomes one, you go back
while True: # starting the game loop
# pygame method to fill the screen, takes colours and a display object
display.fill((0, 0, 0))
# pygame method, iterates over the events in pygame to determine what we are doing with every event
for event in pygame.event.get():
if event.type == pygame.QUIT: # this one quits
pygame.quit() # putting the quit pygame method
exit() # takes the user from GUI to the script for exiting
# Here is to tell the computer to recognise if a keybord key is pressed.
if event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE: # if that keyboard key is ESC
exit() # call for the exit function.
'''
if mouse clicked on the below coordinates, create a line
pygame GUI property detecting when mouse click is on
MOUSEBUTTONDOWN and MOUSEBUTTONUP should be used as a small loops so that the computer can understand when that instance of the mouse movement is over
'''
if cursor == True and event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1: # pygame method defining the button in the GUI
mouse_pos = pygame.mouse.get_pos() # displays the mouse position on the screen
# pygame property pos[0] is the mouse cursor in the X axis and pos[1] is the Y axis
if 50 < pos[0] < 450 and 50 < pos[1] < 450:
# here it adds the clicked postion corresponding to the positon of the mouse
clickedPos.append(pos)
if event.type == pygame.MOUSEBUTTONUP:
randomTimer = True # turning the random from false to true so the timer can activate
for i in range(0, 41): # choosing coordinates for drawing, exiting the previous iteration, range (0,41) goes between 0 and 40
# for the pygame method of drawing below, we need to determine the position on the screen as a tuple object
pos = i * 10 + 50
# pygame method, takes display, colour, and positions of where the lines start and end. i.e, starts in (50,pos) ends in (450,pos), 1 at the end is the width of the line
pygame.draw.line(display, line_colour, (50, pos), (450, pos), 1)
# same as above but takes pos as y, by doing so and iterating through the range, you cover all the plane
pygame.draw.line(display, line_colour, (pos, 50), (pos, 450), 1)
i = 0 # index determining for data structure, taking it back to zero
for line in lines: # iterating through lines which is a global variable for the priority queue aka eventQueue
'''
having [i] next to colour allows me to colour each line differently
each line has tuple object in the global variable
line[0] accesses the nth item's first coordinates in the iteration and drawing ends in the line[1], nth item's second object
'''
pygame.draw.line(display, colours[i], line[0], line[1], 1)
# calling the addText function that was created before in the script
hlp.AddText(line_name[i], line[0])
'''
nested indexing, as I am accessing the first item of the first item in the line object which is in the lines global variable
result of this nested indexing should access a point of x- coordinated saved in a tuple
' '''
if x == line[0][0]:
# if that begining point of the line's x coordinates equals to the preset x, then append the queue list with the name of this line
eventQueue.append(line_name[i])
if x == line[1][0]: # again the nested indexing
# removes the line from the queue if the end of the line's x coordinates equals to x variable
eventQueue.remove(line_name[i])
# increasing the index number at the end of the iteration loop so I can access the other items saved
i += 1
if stop == True: # tells to stop if stop is clicked
run = False # turns off the run, if it is stop, then run must be false
x = 50 # set x to default
# if I don't make the stop false at the end of this clause, there would be a logic error as stop must be false after it was used otherwise, it will be true forever
stop = False
eventQueue = [] # empties the eventQueue
if run == True: # tells it to start if run is clicked
cursor = False # when it is running cursor can't draw any newlines
randomLine = False # again no new random lines too
x += 1 # since I am scanning, the x value should scan the screen pixel after pixel, thus, adding 1 to the x value
# this draws the scan line on the screen
pygame.draw.line(display, hlp.red, (x, 50), (x, 450), 1)
# iterating through points to draw the intersection circle in the run
for point in points:
# if the first item's x value is smaller or equal to the present x variable
if point[0] == x:
firstPoint = point # having a designated first point variable
run = False # setting variables to default.
x = 50 # setting variables to default.
eventQueue = [] # setting variables to default.
efficiency = 0 # setting variables to default.
break # break the loop
n = len(lines) # number of existing lines
if n > 0: # if the number of lines are more than 0, it means that there are existing lines
efficiency = n * math.log10(n) # measure the algorithm's speed
'''
since the display stars from 50th pixel, I substract 50 from that, and the script uses //8 as divide without remainers to convert the x values pixel to coordinates
this is so it can be used to name the incident of intersection
'''
c = (x - 50) // 8
# adding the text as well for the intersection
hlp.AddText("(X, Y) = (" + str(c) + ", 0)", (200, 470),
hlp.white) # adding the intersection
if firstPoint != None: # if there is a first point
# use this pygame method of drawing a circle.
pygame.draw.circle(display, hlp.white, firstPoint, 3)
if cursor == True: # arrange the mouse cursors
pygame.mouse.set_visible(False)
pos = pygame.mouse.get_pos() # this is a pygame method for mouse cursor
# the cursor with the existing pointer image, pygame method called display.blit which adds a spirit to the screen
display.blit(pointer, pos)
# if you clicked on the screen, this checks the number of clicks and starts drawing
if len(clickedPos) > 0:
pygame.draw.circle(display, hlp.white, clickedPos[0], 2)
# if clicked then draw this
pygame.draw.line(display, hlp.white, clickedPos[0], pos, 1)
if len(clickedPos) >= 2: # if the cursor is in a positon which is longer than 2 that can draw lines, if you clicked on more or equal to 2 times, which means begining and end for the lines
# then add lines according to the points saved in the clickedPos object. [0] is the begining index and clickedPos[1] is the ending index.
AddNewLine([clickedPos[0], clickedPos[1]])
cursor = False # disable the cursor after drawing
clickedPos = [] # empty the placeholder after drawing the line
else: # now you are entering into the scene of mouse action
# again pygame GUI method enabling mouse action on the screen to interact
pygame.mouse.set_visible(True)
if randomLine == True: # if mouse clicked on the randomline
GenerateRandomLine() # then create a random line, calling the existing function
randomLine = False # turn it off after drawing so it would not keep drawing forever
randomTimer = False # and stop the timer so it won't go forever
if run == True and x > 450: # if run function is enabled however the x value is out of the screen
x = 50 # put x back to the default of 50
run = False # and disable the run
if clear == True: # clear action is enabled, clear back all the placeholders to default
lines = [] # everything is back to the default value
colours = [] # everything is back to the default value
points = [] # everything is back to the default value
efficiency = 0 # everything is back to the default value
firstPoint = None # everything is back to the default value
eventQueue = [] # everything is back to the default value
intersect_name = [] # everything is back to the default value
line_name = [] # everything is back to the default value
x = 50 # everything is back to the default value
run = False # everything is back to the default value
clear = False # everything is back to the default value
'''
adding text positions and texts for the frame
calling existing functions, giving text, position and when applicable the action
my helper functions are button and addtext that help me in my larger script.
'''
# adding text positions and texts for the frame
hlp.AddText("(0,0)", (30, 25))
hlp.AddText("(50,0)", (430, 25))
hlp.AddText("(0,50)", (30, 450))
hlp.AddText("(50,50)", (430, 450))
# drawing buttons and determining positions
hlp.Button("Run", 80, 5, 100, 35, RunActive)
hlp.Button("Stop", 200, 5, 100, 35, StopActive)
hlp.Button("Clear", 320, 5, 100, 30, ClearActive)
hlp.Button("Random Segment", 50, 500, 180, 30, RandomActive)
hlp.Button("Insert Segment", 280, 500, 180, 35, CursorActive)
hlp.Button("Exit", 500, 5, 100,
30, sys.exit)
back = hlp.ButtonWithReturn("Back", 900, 5, 100, 30, 1)
if back > 0: # if back has a value, which means it has been clicked, stop the bigger loop that we started, i.e. the game loop, and break the game loop
break
# calls the helper function
nxt = hlp.ButtonWithReturn("Next", 700, 5, 100, 30, 1)
if nxt > 0: # so if the next button is clicked
# calls for the description function
hlp.Description(dsb.sh_desc)
# pygame method to draw an object
pygame.draw.rect(display, line_colour, [500, 50, 750, 490], 2)
# adding caption, frame size, texts, buttons and their positions
# adding the text on the given location
hlp.AddText("Shamos-Hoey Algorithm", (520, 70))
# adding the text on the given location
hlp.AddText("Event Queue:", (520, 120))
# creating indexing i and x, y positions to display on the GUI, this is an important way to assign values to a tuplae object
i, o_x, o_y = 0, 540, 150
'''
iterating through the existing values in the eventQueue.
because we don't want the texts to overlap on the screen
most of the numbers below are finetuning to prevent overlapping of the texts for the order list and the eventqueue list.
'''
for val in eventQueue:
# val is each text saved in the eventQueue, and these values are not to overlap on the screen
# calling the helper function.
hlp.AddText(val, (o_x, o_y), hlp.white)
o_x += 30 # adding 30 to the x-axis for each item.
i += 1 # adding one to the index to access to the next item
if i % 23 == 0: # 23rd item appears on the righest point on the screen so for the next one you need to go on the y axis
o_x = 540 # text is on the edge, there no more horizontol space
# text needs to appear on the next line, so adding 20 onto the y axis, vertical move
o_y += 20 # go to the next line by adding 20 to the y axis
# adding the text on the given location
hlp.AddText("Efficiency O(nlogn):", (520, 200))
# adding the text on the given location
hlp.AddText(str(efficiency), (540, 230), hlp.white)
# updates the screen every turn
pygame.display.flip()
# will not run more than 30 frames per second
clock.tick(30)
intro.Introduction2() # calls back the introduction function
def Efficiency():
'''
this function compares the efficiency of the algorithms
'''
pygame.display.set_caption("Efficiency Comparison")
display = pygame.display.set_mode(
(1280, 550), pygame.FULLSCREEN | pygame.DOUBLEBUF)
n = 0 # number segment
k = 0 # intersection
posX1 = 180 # position to appear
posX2 = 400 # position to appear
posY = 20 # position to appear
bPos = 450 # position to appear
bo = 0 # bentley-ottmann placeholders
bf = 0 # brute-force placeholders
sh = 0 # shamos-hoey placeholders
bog = 0 # bentley-Ottman placeholders
bfg = 0 # brute-force placeholders
shg = 0 # shamos-hoey placeholders
while True: # starting the initial loop with first game events, ie. quit and mouse button
# starting the initial loop with first game events, ie. quit and mouse button
display.fill((0, 0, 0))
# display.blit(hlp.dscbg,(0,0))
# pygame method, iterates over the events in pygame to determine what we are doing with every event
# again iterating as an important pygame method to set the features.
for event in pygame.event.get():
if event.type == pygame.QUIT: # this one quits
pygame.quit() # putting the quit pygame method
exit() # takes the user from GUI to the script for exiting
# Here is to tell the computer to recognise if a keybord key is pressed.
if event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE: # if that keyboard key is ESC
exit() # call for the exit function.
# starting the initial loop with first game events, i.e. quit and mouse button
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1: # pygame method defining the button in the GUI
pos = pygame.mouse.get_pos() # displays the mouse position on the screen
# starting the initial loop with first game events, ie. quit and mouse button
if posX1 < pos[0] < posX1 + 130 and posY < pos[1] < posY + 60:
# getting the number of lines
lineTxt = hlp.InsertNumber("Line Number:")
if lineTxt != "": # if the string is not empty
try:
# input gives string so this one turns it into an integer
n = int(lineTxt)
except: # if that is not happening
n = 0 # make n equals to zero, this is a error-handling method by managing the possible error by wrong input, i.e. linetxt can't be converted to an integer
# same as above but for the intersect number
elif posX2 < pos[0] < posX2 + 170 and posY < pos[1] < posY + 60:
intersectTxt = hlp.InsertNumber("Intersect Number :")
if intersectTxt != "":
try:
k = int(intersectTxt)
except:
k = 0
if n > 0:
# using established algorithm efficiency calculation for every algorithm
bo = int((n + k) * math.log10(n))
bog = bo # number to be used in the graph string
# using established algorithm efficiency calculation for every algorithm
bf = int(n * n)
bfg = bf # number to be used in the graph string
# using established algorithm efficiency calculation for every algorithm
sh = int(n * math.log10(n))
shg = sh # number to be used in the graph string
if bo > 350 or bf > 350 or sh > 350: # multiply by 350 for later on to use for rectangle object below
m = max(bo, bf, sh)
bo = int((bo / m) * 350)
bf = int((bf / m) * 350)
sh = int((sh / m) * 350)
if bo == 0: # handling zeros for graphs below
bo = 1 # handling zeros for graphs below
if bf == 0: # handling zeros for graphs below
bf = 1 # handling zeros for graphs below
if sh == 0: # handling zeros for graphs below
sh = 1 # handling zeros for graphs below
# setting the texts and buttons
hlp.Button("Insert Line", posX1, posY, 130, 30, None)
hlp.Button("Insert Intersect", posX2, posY, 160, 30, None)
hlp.AddText("Line: " + str(n), (600, 20), hlp.white)
hlp.AddText("Intersect: " + str(k), (600, 50), hlp.white)
hlp.AddText("BF", (180, 460), hlp.white)
hlp.AddText("BO", (330, 460), hlp.white)
hlp.AddText("SH", (480, 460), hlp.white)
# pygame method, takes display, colour, and positions of where the lines start and end
pygame.draw.line(display, line_colour, (100, 100), (100, 500), 2)
# pygame method, takes display, colour, and positions of where the lines start and end
pygame.draw.line(display, line_colour, (50, 450), (650, 450), 2)
if bf > 0: # comparing here which one is better, if bf exists
# comparing here which one is better
hlp.AddText(str(bfg), (165, bPos - bf - 30), hlp.white)
pygame.draw.rect(display, hlp.button_colour, (165, bPos - bf, 50, bf)
) # drawing a rectangular bar on the screen
if bo > 0: # comparing here which one is better, if bo exists
# comparing here which one is better
hlp.AddText(str(bog), (315, bPos - bo - 30), hlp.white)
pygame.draw.rect(display, hlp.button_colour, (315, bPos - bo, 50, bo)
) # drawing a rectangular bar on the screen
if sh > 0: # comparing here which one is better, if sh exists
# comparing here which one is better
hlp.AddText(str(shg), (465, bPos - sh - 30), hlp.white)
# drawing a rectangular bar on the screen. # bPos- algorithm name determines the rectangle's dimensions
pygame.draw.rect(display, hlp.button_colour,
(465, bPos - sh, 50, sh))
# setting and drawing the next/back buttons
hlp.Button("Exit", 350, 500, 100,
30, sys.exit)
back = hlp.ButtonWithReturn("Back", 650, 500, 100, 30, 1)
if back > 0:
break
nxt = hlp.ButtonWithReturn("Next", 500, 500, 100, 30, 1)
if nxt > 0:
hlp.Description(dsb.effic_desc)
pygame.display.flip() # updates the screen every turn
clock.tick(60) # will not run more than 15 frames per second
intro.Introduction2() # calls back the introduction function
def Efficiency2():
'''
this function compares the efficiency of the algorithms
'''
pygame.display.set_caption("Efficiency Comparison")
display = pygame.display.set_mode(
(1280, 550), pygame.FULLSCREEN | pygame.DOUBLEBUF)
n = range(10, 1001) # number segment
bet = False
posX1 = 180 # position to appear
posX2 = 400 # position to appear
posY = 20 # position to appear
bPos = 450 # position to appear
sheffc = [i * math.log10(i) for i in n] # it is a list comprehension method for sh algoritm efficiency.
bfeffc = [i**2 for i in n] # it is a list comprehension method for bf algoritm efficiency.
boeffc = [((i + (((i**2) - i) / 2)) * math.log10(i)) for i in n] # it is a list comprehension method for bo algoritm efficiency.
topalg = sheffc + bfeffc + boeffc # here compiles all efficency into one list
mx = max(topalg) # getting the max value from the list
mn = min(topalg) # getting the min value from the list
transsheffc = [TransValue(i, mx, mn) for i in sheffc] #here it starts a list comprehension to normalize the values for across three efficiencies
transshefc2 = random.sample(transsheffc, 550) #then getting 550 values to represent equally across the pixels
transshefc2.sort() # sorting in descending order
shno = 0 #starting an index for iteration
shpoints = [] #placeholder value
for i in transshefc2[:200]: #here it uses indexing and iteration for creating display pixel points for sh algoritm. First one is the x value, other one is y value.
shpoints.append((100 + shno, 450 - int(i))) #here it uses indexing and iteration for creating display pixel points for sh algoritm. First one is the x value, other one is y value.
shno += 1 #here it uses indexing and iteration for creating display pixel points for sh algoritm. First one is the x value, other one is y value.
for i in transshefc2[200:349]: #here it uses indexing and iteration for creating display pixel points for sh algoritm. First one is the x value, other one is y value.
shpoints.append((100 + shno, 450 - (int(i + 2)))) #here it uses indexing and iteration for creating display pixel points for sh algoritm. First one is the x value, other one is y value.
shno += 1 #here it uses indexing and iteration for creating display pixel points for sh algoritm. First one is the x value, other one is y value.
for i in transshefc2[349:]: #here it uses indexing and iteration for creating display pixel points for sh algoritm. First one is the x value, other one is y value.
shpoints.append((100 + shno, 450 - (int(i + 4)))) #here it uses indexing and iteration for creating display pixel points for sh algoritm. First one is the x value, other one is y value.
shno += 1 #here it uses indexing and iteration for creating display pixel points for sh algoritm. First one is the x value, other one is y value.
transbfeffc = [TransValue(i, mx, mn) for i in bfeffc] # between lines 910 and 917, same as above but for bf algoritm
transbfeffc2 = random.sample(transbfeffc, 550)
transbfeffc2.sort()
bfno = 0
bfpoints = []
for i in(transbfeffc2):
bfpoints.append((100 + bfno, 450 - int(i)))
bfno += 1
transboeffc = [TransValue(i, mx, mn) for i in boeffc] # between lines 919 and 926, same as above but for bo algoritm
transboeffc2 = random.sample(transboeffc, 550)
transboeffc2.sort()
bono = 0
bopoints = []
for i in(transboeffc2):
bopoints.append((100 + bono, 450 - int(i)))
bono += 1
while True: # starting the initial loop with first game events, ie. quit and mouse button
# starting the initial loop with first game events, ie. quit and mouse button
display.fill((0, 0, 0))
# display.blit(hlp.dscbg,(0,0))
# pygame method, iterates over the events in pygame to determine what we are doing with every event
# again iterating as an important pygame method to set the features.
for event in pygame.event.get():
if event.type == pygame.QUIT: # this one quits
pygame.quit() # putting the quit pygame method
exit() # takes the user from GUI to the script for exiting
# Here is to tell the computer to recognise if a keybord key is pressed.
if event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE: # if that keyboard key is ESC
exit() # call for the exit function.
# starting the initial loop with first game events, i.e. quit and mouse button
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1: # pygame method defining the button in the GUI
pos = pygame.mouse.get_pos() # displays the mouse position on the screen
# starting the initial loop with first game events, ie. quit and mouse button
if posX2 < pos[0] < posX2 + 170 and posY < pos[1] < posY + 60:
bet = True
hlp.Button("Start", posX2, posY, 160, 30, None)
hlp.AddText("Lines: 10, 100, 1000", (600, 20), hlp.white)
hlp.AddText("10", (115, 460), hlp.white)
hlp.AddText("100", (350, 460), hlp.white)
hlp.AddText("1000", (650, 460), hlp.white)
hlp.AddText("max", (50, 100), hlp.white)
hlp.AddText("0", (50, 460), hlp.white)
sidefont = pygame.font.Font(bitterfont, 16)
sidetext = sidefont.render("Algorithm Efficiency", True, hlp.white)
sidetext = pygame.transform.rotate(sidetext, 90)
display.blit(sidetext, (70, 235))
# pygame method, takes display, colour, and positions of where the lines start and end
pygame.draw.line(display, line_colour, (100, 100), (100, 500), 2)
# pygame method, takes display, colour, and positions of where the lines start and end
pygame.draw.line(display, line_colour, (50, 450), (650, 450), 2)
if bet:
pygame.draw.lines(display, (62, 150, 81), False, bfpoints, 4)
pygame.draw.lines(display, (255, 255, 0), False, shpoints, 4)
pygame.draw.lines(display, (255, 0, 0), False, bopoints, 4)
hlp.AddText("Brute Force", (750, 150), hlp.white)
hlp.AddText("Bentley-Ottmann", (750, 250), hlp.white)
hlp.AddText("Shamos-Hoey", (750, 350), hlp.white)
pygame.draw.line(display, (62, 150, 81), (720, 160), (740, 160), 4)
pygame.draw.line(display, (255, 0, 0), (720, 260), (740, 260), 4)
pygame.draw.line(display, (255, 255, 0), (720, 360), (740, 360), 4)
hlp.AddText("n=10;100;1000", (720, 390), hlp.white)
hlp.AddText("Brute Force = " + str(round(bfeffc[9])) + "; " + str(
round(bfeffc[499])) + "; " + str(round(bfeffc[989])), (720, 405), hlp.white)
hlp.AddText("Bentley-Ottmann = " + str(round(boeffc[9])) + "; " + str(
round(boeffc[499])) + "; " + str(round(boeffc[989])), (720, 420), hlp.white)
hlp.AddText("Shamos-Hoey = " + str(round(sheffc[9])) + "; " + str(
round(sheffc[499])) + "; " + str(round(sheffc[989])), (720, 435), hlp.white)
hlp.Button("Exit", 350, 500, 100,
30, sys.exit)
back = hlp.ButtonWithReturn("Back", 650, 500, 100, 30, 1)
if back > 0:
break
nxt = hlp.ButtonWithReturn("Next", 500, 500, 100, 30, 1)
if nxt > 0:
hlp.Description(dsb.effic_desc)
pygame.display.flip() # updates the screen every turn
clock.tick(60) # will not run more than 15 frames per second
intro.Introduction2() # calls back the introduction function
def AddNewColour():
'''
this function selects random colours and appends the global colours variable
used for adding random colour to each line
'''
global colours # accessing the variable
r = random.randrange(1, 255) # choosing the red tone
g = random.randrange(1, 255) # choosing the green tone
b = random.randrange(1, 255) # choosing the blue tone
randomColour = pygame.Color(r, g, b) # appointing the colour
colours.append(randomColour) # appending the global variable
def AddNewLine(newLine):
'''
this function adds a new line to the list
it iterates through the lines list item and checks whether they intersect
if so, it appoints a name for the intersecting lines and appends the intersect lines list
'''
global lines, line_name, intersect_name
name = str(1 + len(lines)) # appointing a name
i = 0 # appointing default index for the coming iteration below
for line in lines:
# checking whether new line and existing line intersect
status = CheckIntersect(newLine[0], newLine[1], line[0], line[1])
if status:
intsec_name = line_name[i] + "." + name # appointing a name
intersect_name.append(intsec_name) # appending the list
i += 1 # increasing the index by one
l = newLine
# indexing the newline's points and sorting from start to end in the next line
if(newLine[0][0] > newLine[1][0]):
l = [newLine[1], newLine[0]]
lines.append(l) # appending the new line
line_name.append(name) # appending the name of the new line.
AddNewColour()
ChangeColour()
def ChangeColour():
'''
this function changes the line colours to white for the brute force algorithm
it iterates through the different lines and appoints a new colour for each line
'''
global intersect_name, colours, brutecolours
brutecolours = colours[:] # copies the colours variable
for name in intersect_name: # iterates through the items
sp = name.split(".") # splits the string object
# appoints each splitted names to converted integer objects
n1 = int(sp[0])
n2 = int(sp[1])
brutecolours[n1 - 1] = hlp.white # making them white
brutecolours[n2 - 1] = hlp.white # making them white
def CursorActive():
'''
acessing and activating the cursor image to be used
this is for when the user wishes to draw their own line segments
'''
global cursor
cursor = True # activating the cursor
def RandomActive():
'''
accessing the existing global variables of random timer and lines
if random timer is on create random lines
this activates the action for the button, i.e. it gives the action to the button
'''
global randomLine, randomTimer
if randomTimer == True: # if random timer is on
randomLine = True # create the random lines
def RunActive():
'''
empities the orderlist and runs the system with the button click
'''
global run, orderList
run = True
orderList = [] # empties the list object
def StopActive():
'''
stops the system when stop button is clicked
'''
global stop
stop = True
def ClearActive():
'''
clears existing system
'''
global clear
clear = True
# activate flag for introduction menu
| 56.279425 | 200 | 0.61765 |
544732e628a00b56caac8c9cd412468f1e74169a | 8,514 | py | Python | iologik/e2210.py | shannon-jia/iologik | bda254ee1cdb3f4d724fbb9d6fe993257f1cce52 | [
"MIT"
] | null | null | null | iologik/e2210.py | shannon-jia/iologik | bda254ee1cdb3f4d724fbb9d6fe993257f1cce52 | [
"MIT"
] | null | null | null | iologik/e2210.py | shannon-jia/iologik | bda254ee1cdb3f4d724fbb9d6fe993257f1cce52 | [
"MIT"
] | null | null | null | import aiohttp
import asyncio
import async_timeout
import logging
from collections import namedtuple, deque
from .events import Events
from html.parser import HTMLParser
log = logging.getLogger(__name__)
| 38.7 | 161 | 0.499765 |
5448e80da68c244752c3380cbc4f039308ae3d65 | 7,009 | py | Python | apps/cmdb/verify/operate.py | yanshicheng/super-ops | dd39fe971bfd0f912cab155b82e41a09aaa47892 | [
"Apache-2.0"
] | null | null | null | apps/cmdb/verify/operate.py | yanshicheng/super-ops | dd39fe971bfd0f912cab155b82e41a09aaa47892 | [
"Apache-2.0"
] | 1 | 2022-01-17T09:34:14.000Z | 2022-01-18T13:32:20.000Z | apps/cmdb/verify/operate.py | yanshicheng/super_ops | dd39fe971bfd0f912cab155b82e41a09aaa47892 | [
"Apache-2.0"
] | null | null | null | from ..models import Classify, Fields, Asset, AssetBind, ClassifyBind
from django.db.models import Q
from collections import OrderedDict
from django.forms.models import model_to_dict
def get_c_classify_bind(cid):
""" classify_id """
parent_bind_obj = ClassifyBind.objects.filter(child_classify_id=cid)
if parent_bind_obj:
return parent_bind_obj
return []
| 31.859091 | 89 | 0.610786 |
544b2254aa27aedc58e9f1dae64e313ac23e420d | 525 | py | Python | glass/mirror.py | fwcd/glass | eba5321753a41e4ebb28f6933ec554c104cb0f4c | [
"MIT"
] | 2 | 2021-02-01T23:06:35.000Z | 2022-01-12T15:39:30.000Z | glass/mirror.py | fwcd/glass | eba5321753a41e4ebb28f6933ec554c104cb0f4c | [
"MIT"
] | 1 | 2022-03-18T04:07:58.000Z | 2022-03-19T18:00:08.000Z | glass/mirror.py | fwcd/glass | eba5321753a41e4ebb28f6933ec554c104cb0f4c | [
"MIT"
] | null | null | null | import subprocess
from pathlib import Path
from urllib.parse import urlparse
| 35 | 88 | 0.67619 |
544bbee47e78ee286a199342f8cffdd22f773ed2 | 3,880 | py | Python | modeling/__init__.py | WinstonHuTiger/BOEMD-UNet | f81a0506b8b8a90fd783afcda61f28acb113fc77 | [
"MIT"
] | 2 | 2021-10-03T11:49:32.000Z | 2021-12-15T11:40:52.000Z | modeling/__init__.py | WinstonHuTiger/BOEMD-UNet | f81a0506b8b8a90fd783afcda61f28acb113fc77 | [
"MIT"
] | null | null | null | modeling/__init__.py | WinstonHuTiger/BOEMD-UNet | f81a0506b8b8a90fd783afcda61f28acb113fc77 | [
"MIT"
] | null | null | null | import os
import torch
from modeling.unet import *
from modeling.bAttenUnet import MDecoderUNet, MMultiBAUNet, MMultiBUNet
def build_transfer_learning_model(args, nchannels, nclass, pretrained, model='unet'):
"""
param args:
param nclass: number of classes
param pretrained: path to the pretrained model parameters
"""
# hard coded class number for pretained UNet on BraTS
pre_model = UNet(
n_channels=args.nchannels,
n_classes=3,
bilinear=True,
dropout=args.dropout,
dropp=args.drop_p
)
if not os.path.isfile(pretrained):
raise RuntimeError("no checkpoint found at {}".format(pretrained))
params = torch.load(pretrained)
pre_model.load_state_dict(params['state_dict'])
m = UNet(
n_channels=args.nchannels,
n_classes=nclass,
bilinear=pre_model.bilinear,
dropout=args.dropout,
dropp=args.drop_p
)
assert args.nchannels == pre_model.n_channels
m.inc = pre_model.inc
m.down1 = pre_model.down1
m.down2 = pre_model.down2
m.down3 = pre_model.down3
m.down4 = pre_model.down4
m.up1 = pre_model.up1
m.up2 = pre_model.up2
m.up3 = pre_model.up3
m.up4 = pre_model.up4
return m
| 27.51773 | 85 | 0.559794 |
544c328461515102957fb6ba2f7ecaadd80e93ff | 1,356 | py | Python | A.py | JK-Incorporated/EYN-DOS | 6dc331655b5fd04e6d37651ea79ac4e204bfd52e | [
"BSD-3-Clause"
] | null | null | null | A.py | JK-Incorporated/EYN-DOS | 6dc331655b5fd04e6d37651ea79ac4e204bfd52e | [
"BSD-3-Clause"
] | null | null | null | A.py | JK-Incorporated/EYN-DOS | 6dc331655b5fd04e6d37651ea79ac4e204bfd52e | [
"BSD-3-Clause"
] | null | null | null | import os
from os import listdir
from os.path import isfile, join
dir_path = os.path.dirname(os.path.realpath(__file__))
filesys = [f for f in listdir(dir_path) if isfile(join(dir_path, f))]
size=0
for path, dirs, files in os.walk(dir_path):
for f in files:
fp = os.path.join(path, f)
size += os.path.getsize(fp)
while True:
command_lineA=input("A:\> ")
if command_lineA==("B:"):
print("")
os.system("python3 B.py")
print("")
if command_lineA==("C:"):
print("")
os.system("python3 C.py")
print("")
if command_lineA==("D:"):
print("")
os.system("python3 D.py")
print("")
if command_lineA==("E:"):
print("")
os.system("python3 E.py")
print("")
if command_lineA==("dir"):
print("")
print("ERROR EYN_A1")
print("")
if command_lineA==("listdir"):
print("")
print("ERROR EYN_A1")
print("")
if command_lineA==("end"):
print("")
exit()
| 22.229508 | 69 | 0.526549 |
544ec34dfb38023e11066f7adf551926d37772c9 | 3,111 | py | Python | api_site/src/api_x/application/entry/bankcard_views.py | webee/pay | b48c6892686bf3f9014bb67ed119506e41050d45 | [
"W3C"
] | 1 | 2019-10-14T11:51:49.000Z | 2019-10-14T11:51:49.000Z | api_site/src/api_x/application/entry/bankcard_views.py | webee/pay | b48c6892686bf3f9014bb67ed119506e41050d45 | [
"W3C"
] | null | null | null | api_site/src/api_x/application/entry/bankcard_views.py | webee/pay | b48c6892686bf3f9014bb67ed119506e41050d45 | [
"W3C"
] | null | null | null | # coding=utf-8
from __future__ import unicode_literals
from api_x.utils import response
from api_x.utils.entry_auth import verify_request
from flask import request
from . import application_mod as mod
from .. import dba
from .. import bankcard
from api_x.utils.parser import to_bool
from pytoolbox.util.log import get_logger
logger = get_logger(__name__)
| 34.566667 | 104 | 0.724204 |
544eed2f5a6fd341973e64324b8db14d8a1824d5 | 2,928 | py | Python | httpd/httpd.py | protocollabs/dmprd | c39e75532ae73458b8239b2d21ca69e42b68929f | [
"MIT"
] | 1 | 2018-09-05T08:16:00.000Z | 2018-09-05T08:16:00.000Z | httpd/httpd.py | protocollabs/dmprd | c39e75532ae73458b8239b2d21ca69e42b68929f | [
"MIT"
] | 8 | 2017-01-08T19:11:16.000Z | 2018-09-24T12:20:40.000Z | httpd/httpd.py | protocollabs/dmprd | c39e75532ae73458b8239b2d21ca69e42b68929f | [
"MIT"
] | 2 | 2017-08-23T12:41:02.000Z | 2018-08-17T08:11:35.000Z | import asyncio
import os
try:
from aiohttp import web
except ImportError:
web = None
| 34.046512 | 100 | 0.565574 |
5451d6245307e0c41240f5d6be7ea9013b165899 | 196 | py | Python | SImple-Number.py | TonikaHristova/Loops | 55b3f1608cf81d185fe98366450b527350d86f3b | [
"MIT"
] | null | null | null | SImple-Number.py | TonikaHristova/Loops | 55b3f1608cf81d185fe98366450b527350d86f3b | [
"MIT"
] | null | null | null | SImple-Number.py | TonikaHristova/Loops | 55b3f1608cf81d185fe98366450b527350d86f3b | [
"MIT"
] | null | null | null | import math
num = int(input())
is_prime = True
if num < 2:
print("Not prime")
for i in range(2, int(math.sqrt(num)+1)):
if num / i == 0:
is_prime = False
print(is_prime)
| 9.8 | 41 | 0.571429 |
54520f95709f73e2e760152d29139cc05ba229e9 | 218 | py | Python | badgify/apps.py | BrendanBerkley/django-badgify | 61203e92cb76982f778caf168d371a72a401db10 | [
"MIT"
] | null | null | null | badgify/apps.py | BrendanBerkley/django-badgify | 61203e92cb76982f778caf168d371a72a401db10 | [
"MIT"
] | null | null | null | badgify/apps.py | BrendanBerkley/django-badgify | 61203e92cb76982f778caf168d371a72a401db10 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 19.818182 | 42 | 0.674312 |
545268aad6cd438a8b86741579655c5f5b28ba41 | 249 | py | Python | test/test_i18n.py | timgates42/uliweb | 80c0459c5e5d257b665eb2e1d0b5f68ad55c42f1 | [
"BSD-2-Clause"
] | 202 | 2015-01-12T08:10:48.000Z | 2021-11-08T09:04:32.000Z | test/test_i18n.py | timgates42/uliweb | 80c0459c5e5d257b665eb2e1d0b5f68ad55c42f1 | [
"BSD-2-Clause"
] | 30 | 2015-01-01T09:07:17.000Z | 2021-06-03T12:58:45.000Z | test/test_i18n.py | timgates42/uliweb | 80c0459c5e5d257b665eb2e1d0b5f68ad55c42f1 | [
"BSD-2-Clause"
] | 58 | 2015-01-12T03:28:54.000Z | 2022-01-14T01:58:08.000Z | from uliweb.i18n import ugettext_lazy as _
def test_1():
"""
>>> x = _('Hello')
>>> print repr(x)
ugettext_lazy('Hello')
"""
def test_1():
"""
>>> x = _('Hello {0}')
>>> print x.format('name')
Hello name
""" | 16.6 | 42 | 0.48996 |
545316d49d38f35bdeec6536c47e60475a119d98 | 1,041 | py | Python | KeyBoardControlImageCaptue.py | Prashant-1305/Tello-Drone | 11c3f845a9887c66ac7e52e042dfd28f23555d2e | [
"MIT"
] | null | null | null | KeyBoardControlImageCaptue.py | Prashant-1305/Tello-Drone | 11c3f845a9887c66ac7e52e042dfd28f23555d2e | [
"MIT"
] | null | null | null | KeyBoardControlImageCaptue.py | Prashant-1305/Tello-Drone | 11c3f845a9887c66ac7e52e042dfd28f23555d2e | [
"MIT"
] | null | null | null | import KeyPressModule as kp
from djitellopy import tello
import time
import cv2
global img
kp.init()
skynet = tello.Tello()
skynet.connect()
print(skynet.get_battery())
skynet.streamon()
while True:
keyVals = getKeyboardInput()
skynet.send_rc_control(keyVals[0], keyVals[1], keyVals[2], keyVals[3])
img = skynet.get_frame_read().frame
#timg = cv2.resize(img, (360, 240))
cv2.imshow("Image", img)
cv2.waitKey(1) | 22.148936 | 74 | 0.616715 |
545376512fee3de8e6da4487e774ee09c7ad912d | 1,479 | py | Python | cnns/foolbox/foolbox_2_3_0/tests/test_model_zoo.py | anonymous-user-commits/perturb-net | 66fc7c4a1234fa34b92bcc85751f0a6e23d80a23 | [
"MIT"
] | 12 | 2021-07-27T07:18:24.000Z | 2022-03-09T13:52:20.000Z | cnns/foolbox/foolbox_2_3_0/tests/test_model_zoo.py | anonymous-user-commits/perturb-net | 66fc7c4a1234fa34b92bcc85751f0a6e23d80a23 | [
"MIT"
] | 2 | 2021-08-03T09:21:33.000Z | 2021-12-29T14:25:30.000Z | cnns/foolbox/foolbox_2_3_0/tests/test_model_zoo.py | anonymous-user-commits/perturb-net | 66fc7c4a1234fa34b92bcc85751f0a6e23d80a23 | [
"MIT"
] | 3 | 2021-11-18T14:46:40.000Z | 2022-01-03T15:47:23.000Z | from foolbox import zoo
import numpy as np
import foolbox
import sys
import pytest
from foolbox.zoo.model_loader import ModelLoader
from os.path import join, dirname
test_data = [
# private repo won't work on travis
# ('https://github.com/bethgelab/AnalysisBySynthesis.git', (1, 28, 28)),
# ('https://github.com/bethgelab/convex_adversarial.git', (1, 28, 28)),
# ('https://github.com/bethgelab/mnist_challenge.git', 784)
(join("file://", dirname(__file__), "data/model_repo"), (3, 224, 224))
]
| 27.90566 | 76 | 0.694388 |
54538684df9453f633582e0d87edd283242082a7 | 8,464 | py | Python | tests/unit/nistbeacon/test_nistbeacon.py | urda/py_nist_beacon | 0251970ec31bc370c326c4c3c3b93a5513bdc028 | [
"Apache-2.0"
] | 11 | 2017-05-06T02:42:34.000Z | 2021-02-11T10:13:09.000Z | tests/unit/nistbeacon/test_nistbeacon.py | urda/nistbeacon | 0251970ec31bc370c326c4c3c3b93a5513bdc028 | [
"Apache-2.0"
] | 31 | 2015-12-13T12:04:10.000Z | 2021-01-27T02:34:34.000Z | tests/unit/nistbeacon/test_nistbeacon.py | urda/py_nist_beacon | 0251970ec31bc370c326c4c3c3b93a5513bdc028 | [
"Apache-2.0"
] | 1 | 2015-12-25T03:50:25.000Z | 2015-12-25T03:50:25.000Z | """
Copyright 2015-2017 Peter Urda
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from unittest import TestCase
from unittest.mock import (
Mock,
patch,
)
import requests.exceptions
from requests import Response
from nistbeacon import (
NistBeacon,
NistBeaconValue,
)
from tests.test_data.nist_records import local_record_json_db
| 34.129032 | 79 | 0.69258 |
54539ddc987a464c0db1b706648667e1f538fd7a | 5,417 | py | Python | aae/auto_pose/visualization/render_pose.py | shbe-aau/multi-pose-estimation | 0425ed9dcc7969f0281cb435615abc33c640e157 | [
"MIT"
] | 4 | 2021-12-28T09:25:06.000Z | 2022-01-13T12:55:44.000Z | aae/auto_pose/visualization/render_pose.py | shbe-aau/multi-view-pose-estimation | 22cea6cd09684fe655fb2214bc14856f589048e1 | [
"MIT"
] | null | null | null | aae/auto_pose/visualization/render_pose.py | shbe-aau/multi-view-pose-estimation | 22cea6cd09684fe655fb2214bc14856f589048e1 | [
"MIT"
] | 1 | 2022-01-13T13:00:15.000Z | 2022-01-13T13:00:15.000Z |
import cv2
import numpy as np
import os
from auto_pose.meshrenderer import meshrenderer
from auto_pose.ae.utils import lazy_property
| 47.517544 | 158 | 0.567288 |
5454b8f602a3ea5235a7102af61b547b5c4c3b31 | 1,128 | py | Python | client/nodes/common/docker_subsriber.py | CanboYe/BusEdge | 2e53e1d1d82559fc3e9f0029b2f0faf4e356b210 | [
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 2 | 2021-08-17T14:14:28.000Z | 2022-02-02T02:09:33.000Z | client/nodes/common/docker_subsriber.py | cmusatyalab/gabriel-BusEdge | 528a6ee337882c6e709375ecd7ec7e201083c825 | [
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | client/nodes/common/docker_subsriber.py | cmusatyalab/gabriel-BusEdge | 528a6ee337882c6e709375ecd7ec7e201083c825 | [
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2021-09-01T16:18:29.000Z | 2021-09-01T16:18:29.000Z | # SPDX-FileCopyrightText: 2021 Carnegie Mellon University
#
# SPDX-License-Identifier: Apache-2.0
import cv2
import numpy as np
import rospy
from gabriel_protocol import gabriel_pb2
from std_msgs.msg import UInt8MultiArray
| 29.684211 | 72 | 0.720745 |
54562608a59ce9476a71d70e032f5d5bf8f6d75b | 138 | py | Python | datx/base_station.py | ipipdotnet/datx-python | 68d6e99363abc6ae48714be38aa90a5ae6e20fd4 | [
"Apache-2.0"
] | 39 | 2018-03-13T02:48:36.000Z | 2021-03-18T07:51:54.000Z | datx/base_station.py | ipipdotnet/datx-python | 68d6e99363abc6ae48714be38aa90a5ae6e20fd4 | [
"Apache-2.0"
] | 1 | 2018-11-06T08:30:31.000Z | 2018-11-06T08:30:31.000Z | datx/base_station.py | ipipdotnet/datx-python | 68d6e99363abc6ae48714be38aa90a5ae6e20fd4 | [
"Apache-2.0"
] | 10 | 2018-04-28T02:07:08.000Z | 2020-11-09T04:26:47.000Z | # -*- coding: utf-8 -*-
"""
:copyright: 2018 by IPIP.net
"""
from .district import District | 15.333333 | 33 | 0.623188 |
5456722cbb51619ad54be3201718c3cfa01f24c7 | 13,034 | py | Python | cogs/user.py | billydevyt/RoboBilly | 6d79ab9626a6d6b487dd73688ad7187212e7864c | [
"MIT"
] | 6 | 2020-11-07T16:46:18.000Z | 2021-01-03T11:52:39.000Z | cogs/user.py | billyeatcookies/RoboBilly | 6d79ab9626a6d6b487dd73688ad7187212e7864c | [
"MIT"
] | 3 | 2020-11-30T01:52:41.000Z | 2021-01-03T11:53:18.000Z | cogs/user.py | billyeatcookies/RoboBilly | 6d79ab9626a6d6b487dd73688ad7187212e7864c | [
"MIT"
] | 7 | 2021-04-17T07:27:58.000Z | 2021-08-31T15:21:42.000Z | """
User module
"""
import discord
import random
import asyncio
from discord.ext import commands
from discord.ext.commands import has_permissions, MissingPermissions, BadArgument
import requests, json, pyfiglet
from datetime import timedelta, datetime
#===================================== ADD COG ======================================#
def setup(bot):
bot.add_cog(User(bot))
| 43.15894 | 490 | 0.603192 |
5459131a00c531976bbf1bad787c4cbce19610f5 | 622 | py | Python | wsu/tools/simx/simx/python/simx/protomap/util.py | tinyos-io/tinyos-3.x-contrib | 3aaf036722a2afc0c0aad588459a5c3e00bd3c01 | [
"BSD-3-Clause",
"MIT"
] | 1 | 2020-02-28T20:35:09.000Z | 2020-02-28T20:35:09.000Z | wsu/tools/simx/simx/python/simx/protomap/util.py | tinyos-io/tinyos-3.x-contrib | 3aaf036722a2afc0c0aad588459a5c3e00bd3c01 | [
"BSD-3-Clause",
"MIT"
] | null | null | null | wsu/tools/simx/simx/python/simx/protomap/util.py | tinyos-io/tinyos-3.x-contrib | 3aaf036722a2afc0c0aad588459a5c3e00bd3c01 | [
"BSD-3-Clause",
"MIT"
] | null | null | null | def sync_read(socket, size):
"""
Perform a (temporary) blocking read.
The amount read may be smaller than the amount requested if a
timeout occurs.
"""
timeout = socket.gettimeout()
socket.settimeout(None)
try:
return socket.recv(size)
finally:
socket.settimeout(timeout)
def sync_write(socket, data):
"""
Perform a (temporary) blocking write.
"""
timeout = socket.gettimeout()
socket.settimeout(None)
try:
while data:
sent = socket.send(data)
data = data[sent:]
finally:
socket.settimeout(timeout)
| 22.214286 | 65 | 0.607717 |
545b4ee6fb3b667ccf9bf2aadc9dfb4077e4dee6 | 976 | py | Python | mergeKsortedlist.py | ZhouLihua/leetcode | 7a711e450756fb7b5648e938879d690e583f5957 | [
"MIT"
] | 2 | 2019-05-16T03:11:44.000Z | 2019-10-25T03:20:05.000Z | mergeKsortedlist.py | ZhouLihua/leetcode | 7a711e450756fb7b5648e938879d690e583f5957 | [
"MIT"
] | null | null | null | mergeKsortedlist.py | ZhouLihua/leetcode | 7a711e450756fb7b5648e938879d690e583f5957 | [
"MIT"
] | null | null | null | #Definition for singly-linked list.
import sys
| 27.885714 | 50 | 0.482582 |
545c039475e437fcfe31a7978e08b358e2864ddd | 1,334 | py | Python | f5/bigip/tm/vcmp/test/unit/test_virtual_disk.py | nghia-tran/f5-common-python | acb23a6e5830a119b460c19a578654113419f5c3 | [
"Apache-2.0"
] | 272 | 2016-02-23T06:05:44.000Z | 2022-02-20T02:09:32.000Z | f5/bigip/tm/vcmp/test/unit/test_virtual_disk.py | nghia-tran/f5-common-python | acb23a6e5830a119b460c19a578654113419f5c3 | [
"Apache-2.0"
] | 1,103 | 2016-02-11T17:48:03.000Z | 2022-02-15T17:13:37.000Z | f5/bigip/tm/vcmp/test/unit/test_virtual_disk.py | nghia-tran/f5-common-python | acb23a6e5830a119b460c19a578654113419f5c3 | [
"Apache-2.0"
] | 167 | 2016-02-11T17:48:21.000Z | 2022-01-17T20:13:05.000Z | # Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mock
import pytest
from f5.bigip.tm.vcmp.virtual_disk import Virtual_Disk
from f5.sdk_exception import UnsupportedMethod
| 29.644444 | 74 | 0.749625 |
545c6c254ab620127f5ce9a6e6a0f63adc08b458 | 1,281 | py | Python | tinylinks/admin.py | lavindiuss/django-shorter | 50bc018e762b396cd9bc71991f6ea1329aaceddd | [
"MIT"
] | null | null | null | tinylinks/admin.py | lavindiuss/django-shorter | 50bc018e762b396cd9bc71991f6ea1329aaceddd | [
"MIT"
] | null | null | null | tinylinks/admin.py | lavindiuss/django-shorter | 50bc018e762b396cd9bc71991f6ea1329aaceddd | [
"MIT"
] | null | null | null | """Admin sites for the ``django-tinylinks`` app."""
from django.contrib import admin
from django.template.defaultfilters import truncatechars
from django.utils.translation import ugettext_lazy as _
from django.template.loader import render_to_string
from tinylinks.forms import TinylinkAdminForm
from tinylinks.models import Tinylink, TinylinkLog
admin.site.register(Tinylink, TinylinkAdmin)
admin.site.register(TinylinkLog, TinylinkLogAdmin)
| 28.466667 | 76 | 0.699454 |
545c8aae9bf713a7f6422a8269de2049905dd92f | 562 | py | Python | wk11frontend.py | alvaro-root/pa2_2021 | fee3931f9e10a7d39af9bf2ce5f033e41621bbda | [
"MIT"
] | null | null | null | wk11frontend.py | alvaro-root/pa2_2021 | fee3931f9e10a7d39af9bf2ce5f033e41621bbda | [
"MIT"
] | null | null | null | wk11frontend.py | alvaro-root/pa2_2021 | fee3931f9e10a7d39af9bf2ce5f033e41621bbda | [
"MIT"
] | null | null | null | import requests
import json
if __name__ == "__main__":
main()
| 21.615385 | 82 | 0.551601 |
545fd8631d933f37ee5ed9022359f6f1a7a06f4b | 73 | py | Python | software/python/XilinxKcu1500Pgp3/__init__.py | ejangelico/cryo-on-epix-hr-dev | 354bf205a67d3c43b4e815823dd78cec85d3b672 | [
"BSD-3-Clause-LBNL"
] | 1 | 2021-05-24T22:01:54.000Z | 2021-05-24T22:01:54.000Z | software/python/XilinxKcu1500Pgp3/__init__.py | ejangelico/cryo-on-epix-hr-dev | 354bf205a67d3c43b4e815823dd78cec85d3b672 | [
"BSD-3-Clause-LBNL"
] | 1 | 2021-02-25T20:27:36.000Z | 2021-03-31T17:55:08.000Z | software/python/XilinxKcu1500Pgp3/__init__.py | ejangelico/cryo-on-epix-hr-dev | 354bf205a67d3c43b4e815823dd78cec85d3b672 | [
"BSD-3-Clause-LBNL"
] | 4 | 2020-10-21T21:39:37.000Z | 2021-07-24T02:19:34.000Z | #!/usr/bin/env python
from XilinxKcu1500Pgp3.XilinxKcu1500Pgp3 import *
| 18.25 | 49 | 0.808219 |
545fe80c1b80eb166756266947e1f74465ae48f6 | 2,517 | py | Python | files/files.py | StevenKangWei/tools | f0de7d2202dbe979b06ba8344addad6df6e96320 | [
"MIT"
] | 15 | 2021-07-06T13:03:09.000Z | 2022-03-05T04:18:13.000Z | files/files.py | StevenKangWei/tools | f0de7d2202dbe979b06ba8344addad6df6e96320 | [
"MIT"
] | 1 | 2021-12-03T05:39:24.000Z | 2021-12-03T05:39:24.000Z | files/files.py | StevenKangWei/tools | f0de7d2202dbe979b06ba8344addad6df6e96320 | [
"MIT"
] | 5 | 2021-07-30T09:31:31.000Z | 2022-01-03T06:30:25.000Z | #!/usr/bin/python
import os
import glob
import traceback
import datetime
import dandan
from flask import Flask
from flask import abort
from flask import send_file
from flask import send_from_directory
from flask import render_template
from werkzeug.routing import BaseConverter
import config
__VERSION__ = "0.0.1.1"
dirname = os.path.dirname(os.path.abspath(__file__))
favicon = os.path.join(dirname, "static/images/favicon.ico")
server = Flask(__name__)
server.url_map.converters['regex'] = RegexConverter
if __name__ == "__main__":
main()
| 25.683673 | 90 | 0.642034 |
546042473af828587af78168aa3e36324191b2db | 2,961 | py | Python | jdcloud_sdk/services/iotcore/models/DeviceVO.py | Tanc009/jdcloud-sdk-python | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | [
"Apache-2.0"
] | 14 | 2018-04-19T09:53:56.000Z | 2022-01-27T06:05:48.000Z | jdcloud_sdk/services/iotcore/models/DeviceVO.py | Tanc009/jdcloud-sdk-python | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | [
"Apache-2.0"
] | 15 | 2018-09-11T05:39:54.000Z | 2021-07-02T12:38:02.000Z | jdcloud_sdk/services/iotcore/models/DeviceVO.py | Tanc009/jdcloud-sdk-python | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | [
"Apache-2.0"
] | 33 | 2018-04-20T05:29:16.000Z | 2022-02-17T09:10:05.000Z | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
| 43.544118 | 401 | 0.69875 |
54607def7c2c2dd5026968fee33155a24a8770a7 | 155 | py | Python | satyrus/sat/types/__init__.py | lucasvg/Satyrus3-FinalProject-EspTopsOTM | 024785752abdc46e3463d8c94df7c3da873c354d | [
"MIT"
] | null | null | null | satyrus/sat/types/__init__.py | lucasvg/Satyrus3-FinalProject-EspTopsOTM | 024785752abdc46e3463d8c94df7c3da873c354d | [
"MIT"
] | null | null | null | satyrus/sat/types/__init__.py | lucasvg/Satyrus3-FinalProject-EspTopsOTM | 024785752abdc46e3463d8c94df7c3da873c354d | [
"MIT"
] | null | null | null | from .array import Array
from .string import String
from .problem import Constraint, Loop
from .main import SatType, Var, Number
from .expr import Expr | 31 | 39 | 0.780645 |
54615497a597809e722b75e586e88b607f457119 | 470 | py | Python | magma/backend/util.py | Kuree/magma | be2439aa897768c5810be72e3a55a6f772ac83cf | [
"MIT"
] | null | null | null | magma/backend/util.py | Kuree/magma | be2439aa897768c5810be72e3a55a6f772ac83cf | [
"MIT"
] | null | null | null | magma/backend/util.py | Kuree/magma | be2439aa897768c5810be72e3a55a6f772ac83cf | [
"MIT"
] | null | null | null | import os
__magma_codegen_debug_info = False
if os.environ.get("MAGMA_CODEGEN_DEBUG_INFO", False):
__magma_codegen_debug_info = True
| 21.363636 | 53 | 0.77234 |
546277ddd1038ab1b79d6538508e871a2186c14c | 3,560 | py | Python | src/backend/main.py | tuimac/servertools | ceda2685a248d700f48aea4f93887b0f89a264a8 | [
"MIT"
] | null | null | null | src/backend/main.py | tuimac/servertools | ceda2685a248d700f48aea4f93887b0f89a264a8 | [
"MIT"
] | null | null | null | src/backend/main.py | tuimac/servertools | ceda2685a248d700f48aea4f93887b0f89a264a8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from subprocess import Popen, PIPE, DEVNULL, run
import socket
import sys
import traceback
import argparse
import time
import logging
import os
logger = logging.getLogger("django")
if __name__ == '__main__':
main()
| 30.169492 | 108 | 0.538202 |
5463fe7521a3910ac70e77bb4ec4fc1c354e171b | 35 | py | Python | pyble/const/characteristic/sensor_location.py | bgromov/PyBLEWrapper | 8a5d016e65b3c259391ddc97c371ab4b1b5c61b5 | [
"MIT"
] | 14 | 2015-03-30T23:11:36.000Z | 2020-04-07T00:57:12.000Z | pyble/const/characteristic/sensor_location.py | bgromov/PyBLEWrapper | 8a5d016e65b3c259391ddc97c371ab4b1b5c61b5 | [
"MIT"
] | 3 | 2016-05-17T06:11:07.000Z | 2017-05-15T16:43:09.000Z | pyble/const/characteristic/sensor_location.py | bgromov/PyBLEWrapper | 8a5d016e65b3c259391ddc97c371ab4b1b5c61b5 | [
"MIT"
] | 11 | 2016-03-11T08:53:03.000Z | 2019-03-11T21:32:13.000Z | NAME="Sensor Location"
UUID=0x2A5D
| 11.666667 | 22 | 0.8 |
546484ce8b5ed762d88a0033bf3308f52967f631 | 296 | py | Python | active-learning/seq_data.py | ansunsujoe/ml-research | 7ab529a5ec1d420385e64b9eebf87e0847b85afd | [
"MIT"
] | null | null | null | active-learning/seq_data.py | ansunsujoe/ml-research | 7ab529a5ec1d420385e64b9eebf87e0847b85afd | [
"MIT"
] | null | null | null | active-learning/seq_data.py | ansunsujoe/ml-research | 7ab529a5ec1d420385e64b9eebf87e0847b85afd | [
"MIT"
] | null | null | null | import random
from tqdm import tqdm
if __name__ == "__main__":
with open("sequences-1-train.txt", "w") as f:
for i in tqdm(range(5000)):
f.write(",".join(random_seq()) + "\n") | 29.6 | 76 | 0.614865 |
546488ac5fe6da6a714985e1c5c6692b62df9032 | 3,585 | py | Python | datatest/main.py | ajhynes7/datatest | 78742e98de992807286655f5685a2dc33a7b452e | [
"Apache-2.0"
] | 277 | 2016-05-12T13:22:49.000Z | 2022-03-11T00:18:32.000Z | datatest/main.py | ajhynes7/datatest | 78742e98de992807286655f5685a2dc33a7b452e | [
"Apache-2.0"
] | 57 | 2016-05-18T01:03:32.000Z | 2022-02-17T13:48:43.000Z | datatest/main.py | ajhynes7/datatest | 78742e98de992807286655f5685a2dc33a7b452e | [
"Apache-2.0"
] | 16 | 2016-05-22T11:35:19.000Z | 2021-12-01T19:41:42.000Z | """Datatest main program"""
import sys as _sys
from unittest import TestProgram as _TestProgram
from unittest import defaultTestLoader as _defaultTestLoader
try:
from unittest.signals import installHandler
except ImportError:
installHandler = None
from datatest import DataTestRunner
__unittest = True
__datatest = True
if _sys.version_info[:2] == (3, 1): # Patch methods for Python 3.1.
DataTestProgram.__init__ = __init__
elif _sys.version_info[:2] == (2, 6): # Patch runTests() for Python 2.6.
DataTestProgram.__init__ = __init__
main = DataTestProgram
| 38.138298 | 80 | 0.538633 |
54655fd5e9013ea6eec439615853e317aa7b100b | 17,503 | py | Python | zvmsdk/vmops.py | jasealpers/python-zvm-sdk | feb19dd40915b1a6cad74e7ccda17bc76d015ea5 | [
"Apache-2.0"
] | 9 | 2017-06-13T17:46:33.000Z | 2019-01-08T03:00:00.000Z | zvmsdk/vmops.py | jasealpers/python-zvm-sdk | feb19dd40915b1a6cad74e7ccda17bc76d015ea5 | [
"Apache-2.0"
] | 4 | 2018-07-18T21:41:21.000Z | 2019-01-07T06:05:15.000Z | zvmsdk/vmops.py | jasealpers/python-zvm-sdk | feb19dd40915b1a6cad74e7ccda17bc76d015ea5 | [
"Apache-2.0"
] | 20 | 2017-02-27T09:46:13.000Z | 2019-05-29T23:17:52.000Z | # Copyright 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import six
from zvmsdk import config
from zvmsdk import dist
from zvmsdk import exception
from zvmsdk import log
from zvmsdk import smtclient
from zvmsdk import database
from zvmsdk import utils as zvmutils
_VMOPS = None
CONF = config.CONF
LOG = log.LOG
| 40.144495 | 79 | 0.588642 |
546685a1cd267c088cdbed690f4354973078c4ca | 3,481 | py | Python | Q146.py | Linchin/python_leetcode_git | 3d08ab04bbdbd2ce268f33c501fbb149662872c7 | [
"MIT"
] | null | null | null | Q146.py | Linchin/python_leetcode_git | 3d08ab04bbdbd2ce268f33c501fbb149662872c7 | [
"MIT"
] | null | null | null | Q146.py | Linchin/python_leetcode_git | 3d08ab04bbdbd2ce268f33c501fbb149662872c7 | [
"MIT"
] | null | null | null | """
Q146
LRU Cache
Medium
Author: Lingqing Gan
Date: 08/06/2019
Question:
Design and implement a data structure for Least Recently Used (LRU) cache.
It should support the following operations: get and put.
get(key) - Get the value (will always be positive) of the key if the key
exists in the cache, otherwise return -1.
put(key, value) - Set or insert the value if the key is not already present.
When the cache reached its capacity, it should invalidate the least
recently used item before inserting a new item.
The cache is initialized with a positive capacity.
Follow up:
Could you do both operations in O(1) time complexity?
notes:
linked list + dict(hash map)
12/24/2019
Merry Xmas~~
Now the code I wrote is working correctly. Just not very efficient.
Time to learn how the tutorial did it.
"""
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
capacity = 2
cache = LRUCache(capacity)
cache.put(1,1)
cache.put(2,2)
print(cache.get(1))
cache.put(3,3)
print(cache.get(2))
cache.put(4,4)
print(cache.get(1))
print(cache.get(3))
print(cache.get(4))
| 26.172932 | 76 | 0.600402 |
54670eac7c97edca8f6b8dd01151c748a6156511 | 9,940 | py | Python | bin/genparams.py | neonkingfr/VizBench | e41f559cb6e761d717f2f5b202482d5d8dacd2d8 | [
"MIT"
] | 7 | 2015-01-05T06:32:49.000Z | 2020-10-30T19:29:07.000Z | bin/genparams.py | neonkingfr/VizBench | e41f559cb6e761d717f2f5b202482d5d8dacd2d8 | [
"MIT"
] | null | null | null | bin/genparams.py | neonkingfr/VizBench | e41f559cb6e761d717f2f5b202482d5d8dacd2d8 | [
"MIT"
] | 4 | 2016-03-09T22:29:26.000Z | 2021-04-07T13:52:28.000Z | # This script reads *VizParams.list files that define Vizlet parameters
# and generates .h files for them, making runtime access to them much faster.
# This allows new parameters to be added just by editing one file.
import sys
import os
import re
types={"bool":"BOOL","int":"INT","double":"DBL","string":"STR"}
realtypes={"bool":"bool","int":"int","double":"double","string":"std::string"}
paramtypes={"bool":"BoolParam","int":"IntParam","double":"DoubleParam","string":"StringParam"}
## utility to make sure floating-point values are printed with a decimal point
## so function calls/etc get disambiguated between double and int.
if __name__ != "__main__":
print "This code needs to be invoked as a main program."
sys.exit(1)
if len(sys.argv) < 2:
print("Usage: %s {paramlist}" % sys.argv[0])
sys.exit(1)
# We expect this program to be invoked from the VizBench/bin directory
# so everything can be full paths without depending on environment variables
paramdir = "../src/params"
if not os.path.isdir(paramdir):
print("No directory "+paramdir+" !?")
sys.exit(1)
os.chdir(paramdir)
force = False
if len(sys.argv) > 2 and sys.argv[1] == "-f":
force = True
parambase = sys.argv[2]
else:
parambase = sys.argv[1]
paramclass = parambase+"VizParams"
paramlist = parambase+"VizParams.list"
paramtouch = parambase+"VizParams.touch"
paramnames = parambase+"VizParamsNames"
file_h = parambase + "VizParams.h"
file_cpp = parambase + "VizParams.cpp"
changed = force or (modtime(paramlist) > modtime(paramtouch) ) or not os.path.exists(file_h) or not os.path.exists(file_cpp)
if not changed:
print "No change in "+paramlist
sys.exit(0)
do_not_edit = "/************************************************\n" \
" *\n" \
" * This file is generated from '"+paramlist+"' by genparams.py\n" \
" *\n" \
" * DO NOT EDIT!\n" \
" *\n" \
" ************************************************/\n";
f = open(file_h,"w")
f.write(do_not_edit)
sys.stdout = f
params = readparams(paramlist)
genparamheader(params,paramclass)
f.close()
f = open(file_cpp,"w")
f.write(do_not_edit);
sys.stdout = f
genparamcpp(paramclass)
f.close()
touch(paramtouch)
| 27.458564 | 124 | 0.609557 |
54685a8741677f7fae5e8b83b5e24b77c1c400f9 | 712 | py | Python | notebooks/session_4/s3-sobelAndmatplotlib.py | bigmpc/cv-spring-2021 | 81d9384f74f5411804cdbb26be5b7ced0d0f5958 | [
"Apache-2.0"
] | 3 | 2021-03-09T10:00:50.000Z | 2021-12-26T07:19:09.000Z | notebooks/session_4/s3-sobelAndmatplotlib.py | bigmpc/cv-spring-2021 | 81d9384f74f5411804cdbb26be5b7ced0d0f5958 | [
"Apache-2.0"
] | null | null | null | notebooks/session_4/s3-sobelAndmatplotlib.py | bigmpc/cv-spring-2021 | 81d9384f74f5411804cdbb26be5b7ced0d0f5958 | [
"Apache-2.0"
] | 1 | 2021-02-27T16:09:30.000Z | 2021-02-27T16:09:30.000Z | import cv2
import numpy as np
import matplotlib.pyplot as plt
#Read the image as grayscale:
image = cv2.imread('building.jpg', 0)
#Compute the gradient approximations using the Sobel operator:
dx = cv2.Sobel(image, cv2.CV_32F, 1, 0)
dy = cv2.Sobel(image, cv2.CV_32F, 0, 1)
#Visualize the results:
plt.figure()
plt.subplot(141)
plt.axis('off')
plt.title('image')
plt.imshow(image, cmap='gray')
plt.subplot(142)
plt.axis('off')
plt.imshow(dx, cmap='gray')
plt.title('dx')
plt.subplot(143)
plt.axis('off')
plt.imshow(dy, cmap='gray')
plt.title('dx')
plt.subplot(144)
plt.axis('off')
plt.title('dy + dx')
plt.imshow(np.absolute(dx)+np.absolute(dy), cmap='gray')
plt.show()
| 19.777778 | 63 | 0.671348 |
5468626a4d8739106b686cc86e072541eeccc86e | 956 | py | Python | reporter-cli/sql-pdf/python/src/reporterprimary/__init__.py | rgolubtsov/reporter-multilang | 6d7e04bbd57342ea80e1beccea3c4de1b1c4e203 | [
"Unlicense"
] | 3 | 2017-04-28T16:40:22.000Z | 2019-02-22T16:57:12.000Z | reporter-cli/sql-pdf/python/src/reporterprimary/__init__.py | rgolubtsov/reporter-multilang | 6d7e04bbd57342ea80e1beccea3c4de1b1c4e203 | [
"Unlicense"
] | 46 | 2017-01-17T01:10:15.000Z | 2019-06-13T20:45:12.000Z | reporter-cli/sql-pdf/python/src/reporterprimary/__init__.py | rgolubtsov/reporter-multilang | 6d7e04bbd57342ea80e1beccea3c4de1b1c4e203 | [
"Unlicense"
] | 1 | 2017-07-06T14:18:55.000Z | 2017-07-06T14:18:55.000Z | # -*- coding: utf-8 -*-
# reporter-cli/sql-pdf/python/src/reporterprimary/__init__.py
# =============================================================================
# Reporter Multilang. Version 0.5.9
# =============================================================================
# A tool to generate human-readable reports based on data from various sources
# with the focus on its implementation using a series of programming languages.
# =============================================================================
# Written by Radislav (Radicchio) Golubtsov, 2016-2021
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# (See the LICENSE file at the top of the source tree.)
#
# vim:set nu et ts=4 sw=4:
| 45.52381 | 79 | 0.561715 |
5468c394ce1fe6e2cc2dd6fce2fd7d4c6e567c44 | 3,494 | py | Python | bem/teq_planet.py | DanielAndreasen/bem | c4cca79322f08b5e9a3f3d39749c11d9f6296aae | [
"MIT"
] | null | null | null | bem/teq_planet.py | DanielAndreasen/bem | c4cca79322f08b5e9a3f3d39749c11d9f6296aae | [
"MIT"
] | null | null | null | bem/teq_planet.py | DanielAndreasen/bem | c4cca79322f08b5e9a3f3d39749c11d9f6296aae | [
"MIT"
] | null | null | null | import numpy as np
from uncertainties import umath as um
def getTeqpl(Teffst, aR, ecc, A=0, f=1/4.):
"""Return the planet equilibrium temperature.
Relation adapted from equation 4 page 4 in http://www.mpia.de/homes/ppvi/chapter/madhusudhan.pdf
and https://en.wikipedia.org/wiki/Stefan%E2%80%93Boltzmann_law
and later updated to include the effect of excentricity on the average stellar planet distance
according to equation 5 p 25 of Laughlin & Lissauer 2015arXiv150105685L (1501.05685)
Plus Exoplanet atmospheres, physical processes, Sara Seager, p30 eq 3.9 for f contribution.
:param float/np.ndarray Teffst: Effective temperature of the star
:param float/np.ndarray aR: Ration of the planetary orbital semi-major axis over the stellar
radius (without unit)
:param float/np.ndarray A: Bond albedo (should be between 0 and 1)
:param float/np.ndarray f: Redistribution factor. If 1/4 the energy is uniformly redistributed
over the planetary surface. If f = 2/3, no redistribution at all, the atmosphere immediately
reradiate whithout advection.
:return float/np.ndarray Teqpl: Equilibrium temperature of the planet
"""
return Teffst * (f * (1 - A))**(1 / 4.) * np.sqrt(1 / aR) / (1 - ecc**2)**(1/8.)
def getTeqpl_error(Teffst, aR, ecc, A=0, f=1/4.):
"""Return the planet equilibrium temperature.
Relation adapted from equation 4 page 4 in http://www.mpia.de/homes/ppvi/chapter/madhusudhan.pdf
and https://en.wikipedia.org/wiki/Stefan%E2%80%93Boltzmann_law
and later updated to include the effect of excentricity on the average stellar planet distance
according to equation 5 p 25 of Laughlin & Lissauer 2015arXiv150105685L (1501.05685)
Plus Exoplanet atmospheres, physical processes, Sara Seager, p30 eq 3.9 for f contribution.
:param float/np.ndarray Teffst: Effective temperature of the star
:param float/np.ndarray aR: Ration of the planetary orbital semi-major axis over the stellar
radius (without unit)
:param float/np.ndarray A: Bond albedo (should be between 0 and 1)
:param float/np.ndarray f: Redistribution factor. If 1/4 the energy is uniformly redistributed
over the planetary surface. If f = 2/3, no redistribution at all, the atmosphere immediately
reradiate whithout advection.
:return float/np.ndarray Teqpl: Equilibrium temperature of the planet
"""
return Teffst * (f * (1 - A))**(1 / 4.) * um.sqrt(1 / aR) / (1 - ecc**2)**(1/8.)
| 48.527778 | 100 | 0.660561 |
5469add1bc5b0732388dfd9a2adc569e52915599 | 1,656 | py | Python | poppy/data_preprocess.py | phanxuanphucnd/BertTextClassification | c9a0500f07d831f924f56cc8211569b035c6e47a | [
"MIT"
] | 1 | 2021-06-14T21:03:04.000Z | 2021-06-14T21:03:04.000Z | poppy/data_preprocess.py | phanxuanphucnd/BertTextClassification | c9a0500f07d831f924f56cc8211569b035c6e47a | [
"MIT"
] | null | null | null | poppy/data_preprocess.py | phanxuanphucnd/BertTextClassification | c9a0500f07d831f924f56cc8211569b035c6e47a | [
"MIT"
] | null | null | null | import pandas as pd
import re
import os
from tqdm import tqdm
## Cleaning train raw dataset
train = open('./data/raw/train.crash').readlines()
train_ids = []
train_texts = []
train_labels = []
for id, line in tqdm(enumerate(train)):
line = line.strip()
if line.startswith("train_"):
train_ids.append(id)
elif line == "0" or line == "1":
train_labels.append(id)
for id, lb in tqdm(zip(train_ids, train_labels)):
line_id = train[id].strip()
label = train[lb].strip()
text = ' '.join(train[id + 1: lb])
text = re.sub('\s+', ' ', text).strip()[1: -1].strip()
train_texts.append(text)
train_df = pd.DataFrame({
'id': train_ids,
'text': train_texts,
'label': train_labels
})
if not os.path.exists('./data'):
os.makedirs('./data')
train_df.to_csv('./data/train.csv', encoding='utf-8', index=False)
## Clean test raw dataset
test = open("./data/raw/test.crash").readlines()
test_ids = []
test_texts = []
for id, line in tqdm(enumerate(test)):
line = line.strip()
if line.startswith("test_"):
test_ids.append(id)
for i, id in tqdm(enumerate(test_ids)):
if i >= len(test_ids) - 1:
end = len(test)
else:
end = test_ids[i + 1]
line_id = test[id].strip()
text = re.sub('\s+', ' ', ' '.join(test[id + 1: end])).strip()[1:-1].strip()
test_texts.append(text)
test_df = pd.DataFrame({
'id': test_ids,
'text': test_texts
})
submission = pd.read_csv('./data/raw/sample_submission.csv', encoding='utf-8')
result = pd.concat([test_df, submission], axis=1, sort=False)
result.to_csv('./data/test.csv', encoding='utf-8', index=False) | 23.323944 | 80 | 0.618357 |
546a32ceac58022d2ad2cfb8c9d2804371eb31f5 | 6,456 | py | Python | websaw/core/app.py | valq7711/websaw | fb5718ad3ecd011d7fbb3f24fa007d84951bd58c | [
"MIT"
] | 1 | 2022-02-25T15:02:25.000Z | 2022-02-25T15:02:25.000Z | websaw/core/app.py | valq7711/websaw | fb5718ad3ecd011d7fbb3f24fa007d84951bd58c | [
"MIT"
] | null | null | null | websaw/core/app.py | valq7711/websaw | fb5718ad3ecd011d7fbb3f24fa007d84951bd58c | [
"MIT"
] | null | null | null | import functools
from types import SimpleNamespace
from typing import List
from . import globs
from .context import BaseContext
from .exceptions import FixtureProcessError
from .reloader import Reloader
from .static_registry import static_registry
| 31.960396 | 118 | 0.577912 |
546beba67c891d71b93c4df6d7f37c550d736d00 | 1,772 | py | Python | observations/r/chest_sizes.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 199 | 2017-07-24T01:34:27.000Z | 2022-01-29T00:50:55.000Z | observations/r/chest_sizes.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 46 | 2017-09-05T19:27:20.000Z | 2019-01-07T09:47:26.000Z | observations/r/chest_sizes.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 45 | 2017-07-26T00:10:44.000Z | 2022-03-16T20:44:59.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def chest_sizes(path):
"""Chest measurements of 5738 Scottish Militiamen
Quetelet's data on chest measurements of 5738 Scottish Militiamen.
Quetelet (1846) used this data as a demonstration of the normal
distribution of physical characteristics.
A data frame with 16 observations on the following 2 variables.
`chest`
Chest size (in inches)
`count`
Number of soldiers with this chest size
Velleman, P. F. and Hoaglin, D. C. (1981). *Applications, Basics, and
Computing of Exploratory Data Analysis*. Belmont. CA: Wadsworth.
Retrieved from Statlib:
`https://www.stat.cmu.edu/StatDat/Datafiles/MilitiamenChests.html`
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `chest_sizes.csv`.
Returns:
Tuple of np.ndarray `x_train` with 16 rows and 2 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'chest_sizes.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/HistData/ChestSizes.csv'
maybe_download_and_extract(path, url,
save_file_name='chest_sizes.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| 29.533333 | 71 | 0.705418 |
546e4ec20d3fdf8c1c5f8ed657bb3f80549f9803 | 1,365 | py | Python | setup.py | google/ads-api-reports-fetcher | de0bacc3ab520b020cf19985284b7e3dbc9778b0 | [
"Apache-2.0"
] | 4 | 2022-02-16T12:42:26.000Z | 2022-03-30T17:14:32.000Z | setup.py | google/ads-api-reports-fetcher | de0bacc3ab520b020cf19985284b7e3dbc9778b0 | [
"Apache-2.0"
] | null | null | null | setup.py | google/ads-api-reports-fetcher | de0bacc3ab520b020cf19985284b7e3dbc9778b0 | [
"Apache-2.0"
] | 1 | 2022-03-28T05:51:57.000Z | 2022-03-28T05:51:57.000Z | import pathlib
from setuptools import setup, find_packages
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(name="google-ads-api-report-fetcher",
version="0.1",
description="Library for fetching reports from Google Ads API and saving them locally / BigQuery.",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/google/ads-api-reports-fetcher",
author="Google Inc. (gTech gPS CSE team)",
author_email="no-reply@google.com",
license="Apache 2.0",
classifiers=[
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: OS Independent",
"License :: OSI Approved :: Apache Software License"
],
packages=find_packages(include=["runner", "runner.*"]),
install_requires=[
"google-ads==14.1.0", "google-cloud-bigquery==2.26.0",
"pandas==1.3.4", "pyarrow==6.0.1", "tabulate"
],
setup_requires=["pytest-runner"],
tests_requires=["pytest"],
entry_points={
"console_scripts": [
"fetch-reports=runner.fetcher:main",
"post-process-queries=runner.post_processor:main",
]
})
| 36.891892 | 105 | 0.621245 |
546e73d201a7995e9aa7205db669d55b27e2e940 | 2,880 | py | Python | scan_service/scan_service/utils/stats.py | kkkkv/tgnms | a3b8fd8a69b647a614f9856933f05e50a4affadf | [
"MIT"
] | 12 | 2021-04-06T06:27:18.000Z | 2022-03-18T10:52:29.000Z | scan_service/scan_service/utils/stats.py | kkkkv/tgnms | a3b8fd8a69b647a614f9856933f05e50a4affadf | [
"MIT"
] | 6 | 2022-01-04T13:32:16.000Z | 2022-03-28T21:13:59.000Z | scan_service/scan_service/utils/stats.py | kkkkv/tgnms | a3b8fd8a69b647a614f9856933f05e50a4affadf | [
"MIT"
] | 7 | 2021-09-27T13:14:42.000Z | 2022-03-28T16:24:15.000Z | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import asyncio
import logging
import time
from collections import defaultdict
from typing import DefaultDict, Dict, List
from tglib.clients.prometheus_client import PrometheusClient, consts
from tglib.exceptions import ClientRuntimeError
from .topology import Topology
def reshape_values(network_name: str, values: Dict) -> DefaultDict:
"""Reshape the Prometheus results and map to other node's MAC address."""
node_metrics: DefaultDict = defaultdict(dict)
other_node: str
for metric, result in values.items():
for link_result in result:
node_pair = Topology.link_name_to_mac.get(network_name, {}).get(
link_result["metric"]["linkName"]
)
if node_pair is None:
logging.error(
f"Missing node_mac mapping for {link_result['metric']['linkName']}"
)
continue
if link_result["metric"]["radioMac"] == node_pair[0]:
other_node = node_pair[1]
elif link_result["metric"]["radioMac"] == node_pair[1]:
other_node = node_pair[0]
else:
logging.error(
"Incorrect node_mac mapping for "
f"{link_result['metric']['linkName']}"
)
continue
if link_result["values"]:
node_metrics[other_node][metric] = link_result["values"][-1][1]
return node_metrics
| 32.727273 | 87 | 0.594097 |
547084a7679711993b0e3d30495458fce0c7f40b | 1,866 | py | Python | multithread_pipeline.py | kapitsa2811/smartOCR | 6ecca79b29778778b1458ea28763a39920a3d58a | [
"MIT"
] | null | null | null | multithread_pipeline.py | kapitsa2811/smartOCR | 6ecca79b29778778b1458ea28763a39920a3d58a | [
"MIT"
] | null | null | null | multithread_pipeline.py | kapitsa2811/smartOCR | 6ecca79b29778778b1458ea28763a39920a3d58a | [
"MIT"
] | null | null | null | import glob
import os
from io import StringIO
from threading import Thread
import logging
from logger import TimeHandler
from costants import THREADS, INFERENCE_GRAPH
from pipeline import pipeline
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(TimeHandler().handler)
if __name__ == '__main__':
path_list = []
for path in glob.iglob("..\\Polizze\\" + '/**/*.pdf', recursive=True):
path_list.append(path)
el_per_list = int(len(path_list) / THREADS)
thread_list = []
i = 0
path_list_per_thread = []
if len(path_list) == 1:
new_thread = MyThread('Thread_{}'.format(0), path_list)
new_thread.start()
new_thread.join()
else:
for i in range(0, THREADS):
if i < THREADS - 2:
path_list_per_thread = path_list[el_per_list * i:el_per_list * (i + 1) - 1]
else:
path_list_per_thread = path_list[
el_per_list * i:len(path_list) - 1] # lista vuota se c'e' un solo elemento
new_thread = MyThread('Thread_{}'.format(i), path_list_per_thread)
new_thread.start()
thread_list.append(new_thread)
for new_thread in thread_list:
new_thread.join()
| 29.619048 | 115 | 0.576099 |
5470a342899892808b0ad450ef5da5a2f9cf5b36 | 12,319 | py | Python | src/keys_server/GMO/GMOKeysLookup.py | OasisLMF/gem | 95c755a1cb76a2bbc41e5dd7bc503c59123ca3ac | [
"BSD-2-Clause"
] | null | null | null | src/keys_server/GMO/GMOKeysLookup.py | OasisLMF/gem | 95c755a1cb76a2bbc41e5dd7bc503c59123ca3ac | [
"BSD-2-Clause"
] | null | null | null | src/keys_server/GMO/GMOKeysLookup.py | OasisLMF/gem | 95c755a1cb76a2bbc41e5dd7bc503c59123ca3ac | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Python 2 standard library imports
import csv
import io
import logging
import os
# Python 2 non-standard library imports
import pandas as pd
# Imports from Oasis core repos + subpackages or modules within keys_server
from oasislmf.utils.coverages import COVERAGE_TYPES
from oasislmf.utils.peril import PERILS
from oasislmf.utils.status import OASIS_KEYS_STATUS
KEYS_STATUS_FAIL = OASIS_KEYS_STATUS['fail']
KEYS_STATUS_NOMATCH = OASIS_KEYS_STATUS['nomatch']
KEYS_STATUS_SUCCESS = OASIS_KEYS_STATUS['success']
from oasislmf.model_preparation.lookup import OasisBaseKeysLookup
from oasislmf.utils.log import oasis_log
KEYS_STATUS_FAIL = OASIS_KEYS_STATUS['fail']['id']
KEYS_STATUS_NOMATCH = OASIS_KEYS_STATUS['nomatch']['id']
KEYS_STATUS_SUCCESS = OASIS_KEYS_STATUS['success']['id']
from .utils import (
AreaPerilLookup,
VulnerabilityLookup,
)
#
# Public entry point
#
__all__ = [
'GMOKeysLookup'
]
#
# START - deprecated oasislmf.utils.values
#
from datetime import datetime
import pytz
NULL_VALUES = [None, '', 'n/a', 'N/A', 'null', 'Null', 'NULL']
def get_timestamp(thedate=None, fmt='%Y%m%d%H%M%S'):
""" Get a timestamp """
d = thedate if thedate else datetime.now()
return d.strftime(fmt)
def get_utctimestamp(thedate=None, fmt='%Y-%b-%d %H:%M:%S'):
"""
Returns a UTC timestamp for a given ``datetime.datetime`` in the
specified string format - the default format is::
YYYY-MMM-DD HH:MM:SS
"""
d = thedate.astimezone(pytz.utc) if thedate else datetime.utcnow()
return d.strftime(fmt)
def to_string(val):
"""
Converts value to string, with possible additional formatting.
"""
return '' if val is None else str(val)
def to_int(val):
"""
Parse a string to int
"""
return None if val in NULL_VALUES else int(val)
def to_float(val):
"""
Parse a string to float
"""
return None if val in NULL_VALUES else float(val)
#
# END - deprecated oasislmf.utils.values
#
""" ---- Implementation note ----
In the original lookup implementation each location can map to multiple vulnerability ids,
each with difference levels of ductility and or material type.
Note from Malcolm:
Ductility is largely a product of materials, with unreinforced
masonry being the worst and wood the best. The reason its probably
not explicitly included in commercial cat models is
likely that the ductility for a given material is largely a function of age,
since better construction codes usually leads to more ductile structures.
Age usually is explicitly included in cat models wheres
the GEM functions capture this through the construction itself.
Original taxonomy:
gem_taxonomy_by_oed_occupancy_and_number_of_storeys_df = pd.DataFrame.from_dict({
'constructioncode': ['5156', '5150', '5150', '5150', '5150', '5150', '5150', '5109', '5109', '5109', '5109', '5109', '5109', '5109', '5105', '5105', '5105', '5105', '5105', '5105', '5105', '5105', '5101', '5103', '5103', '5103', '5000', '5050', '5050', '5050', '5050', '5050'],
'numberofstoreys': [1, 2, 2, 3, 2, 3, 1, 2, 3, 2, 3, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 2, 2, 1, 2, 1, 1, 1, 2, 1, -1],
'taxonomy': ['CR-PC_LWAL-DNO_H1', 'CR_LFINF-DNO_H2', 'CR_LFINF-DUH_H2', 'CR_LFINF-DUH_H3', 'CR_LFINF-DUM_H2', 'CR_LFINF-DUM_H3', 'CR_LFM-DNO_H1', 'MCF_LWAL-DNO_H2', 'MCF_LWAL-DNO_H3', 'MCF_LWAL-DUH_H2', 'MCF_LWAL-DUH_H3', 'MCF_LWAL-DUM_H2','MCF_LWAL-DUM_H3', 'MR_LWAL-DNO_H1','MR_LWAL-DNO_H2', 'MR_LWAL-DNO_H3','MR_LWAL-DUH_H1', 'MR_LWAL-DUH_H2', 'MR_LWAL-DUH_H3', 'MR_LWAL-DUM_H1', 'MR_LWAL-DUM_H2', 'MR_LWAL-DUM_H3', 'MUR-ADO_LWAL-DNO_H2', 'MUR-ST_LWAL-DNO_H2', 'MUR_LWAL-DNO_H1', 'MUR_LWAL-DNO_H2', 'UNK_H1', 'W-WBB_LPB-DNO_H1', 'W-WLI_LWAL-DNO_H1', 'W-WLI_LWAL-DNO_H2', 'W-WS_LPB-DNO_H1', 'W-']
})
The below was changed so that each unique combination of ('constructioncode', 'numberofstoreys')
maps to a single 'taxonomy' code
"""
gem_taxonomy_by_oed_occupancy_and_number_of_storeys_df = pd.DataFrame.from_dict({
'constructioncode': ['5156', '5150', '5150', '5150', '5109', '5109', '5109', '5105', '5105', '5105', '5101', '5103', '5103', '5000', '5050', '5050', '5050'],
'numberofstoreys': [1, 2, 3, 1, 2, 3, 1, 1, 2, 3, 2, 1, 2, 1, 1, 2, -1],
'taxonomy': ['CR-PC_LWAL-DNO_H1', 'CR_LFINF-DUM_H2', 'CR_LFINF-DUM_H3', 'CR_LFM-DNO_H1', 'MCF_LWAL-DNO_H2', 'MCF_LWAL-DNO_H3', 'MR_LWAL-DNO_H1', 'MR_LWAL-DUM_H1', 'MR_LWAL-DUM_H2', 'MR_LWAL-DUM_H3', 'MUR-ADO_LWAL-DNO_H2', 'MUR_LWAL-DNO_H1', 'MUR_LWAL-DNO_H2', 'UNK_H1', 'W-WLI_LWAL-DNO_H1', 'W-WLI_LWAL-DNO_H2', 'W-']
})
def _get_location_record(self, loc_item):
"""
Construct a location record (dict) from the location item, which in this
case is a row in a Pandas dataframe.
"""
# print("!! _get_location_record: {0}".format(loc_item))
meta = self._LOCATION_RECORD_META
return dict((
k,
meta[k]['validator'](loc_item[meta[k]['source_header'].lower()])
) for k in meta
)
| 36.554896 | 606 | 0.596396 |
5470aea747a6878071245059e1de2776baa03338 | 18,485 | py | Python | pandemic_eval.py | aypan17/value_learning | 240a67ecf99b178fe0c4ced2bfd1dd50453fbdfe | [
"MIT"
] | null | null | null | pandemic_eval.py | aypan17/value_learning | 240a67ecf99b178fe0c4ced2bfd1dd50453fbdfe | [
"MIT"
] | null | null | null | pandemic_eval.py | aypan17/value_learning | 240a67ecf99b178fe0c4ced2bfd1dd50453fbdfe | [
"MIT"
] | null | null | null | import time
import sys
import json
import argparse
from tqdm import trange
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
import numpy as np
from scipy.spatial.distance import jensenshannon
import gym
import matplotlib.pyplot as plt
from matplotlib.axes import Axes
from matplotlib.ticker import MaxNLocator
from matplotlib.lines import Line2D
import pandemic_simulator as ps
from pandemic_simulator.environment.reward import RewardFunction, SumReward, RewardFunctionFactory, RewardFunctionType
from pandemic_simulator.environment.interfaces import InfectionSummary
from pandemic_simulator.viz import PandemicViz
from pandemic_simulator.environment import PandemicSimOpts
from stable_baselines3.common import base_class
from stable_baselines3.common.vec_env import DummyVecEnv, VecEnv
def evaluate_policy(
name: str,
model: "base_class.BaseAlgorithm",
base_model: "base_class.BaseAlgorithm",
env: Union[gym.Env, VecEnv],
n_eval_episodes: int = 32,
deterministic: bool = True,
render: bool = False,
viz: Optional[PandemicViz] = None,
reward_threshold: Optional[float] = None,
return_episode_rewards: bool = False,
warn: bool = True,
) -> Union[Tuple[float, float], Tuple[List[float], List[int]]]:
"""
Runs policy for ``n_eval_episodes`` episodes and returns average reward.
If a vector env is passed in, this divides the episodes to evaluate onto the
different elements of the vector env. This static division of work is done to
remove bias. See https://github.com/DLR-RM/stable-baselines3/issues/402 for more
details and discussion.
.. note::
If environment has not been wrapped with ``Monitor`` wrapper, reward and
episode lengths are counted as it appears with ``env.step`` calls. If
the environment contains wrappers that modify rewards or episode lengths
(e.g. reward scaling, early episode reset), these will affect the evaluation
results as well. You can avoid this by wrapping environment with ``Monitor``
wrapper before anything else.
:param model: The RL agent you want to evaluate.
:param env: The gym environment or ``VecEnv`` environment.
:param n_eval_episodes: Number of episode to evaluate the agent
:param deterministic: Whether to use deterministic or stochastic actions
:param render: Whether to render the environment or not
:param callback: callback function to do additional checks,
called after each step. Gets locals() and globals() passed as parameters.
:param reward_threshold: Minimum expected reward per episode,
this will raise an error if the performance is not met
:param return_episode_rewards: If True, a list of rewards and episode lengths
per episode will be returned instead of the mean.
:param warn: If True (default), warns user about lack of a Monitor wrapper in the
evaluation environment.
:return: Mean reward per episode, std of reward per episode.
Returns ([float], [int]) when ``return_episode_rewards`` is True, first
list containing per-episode rewards and second containing per-episode lengths
(in number of steps).
"""
if not isinstance(env, VecEnv):
env = DummyVecEnv([lambda: env])
episode_rewards = []
reward_std = []
episode_true_rewards = []
true_reward_std = []
episode_true_rewards2 = []
true_reward_std2 = []
vfs = []
log_probs = []
ents = []
base_vfs = []
base_log_probs = []
base_ents = []
kls = []
js = []
h = []
numpy_obs = env.reset()
states = None
for t in range(200):
actions, states = model.predict(numpy_obs, state=states, deterministic=True)
vf, logp, ent = model.policy.evaluate_actions(torch.as_tensor(numpy_obs), torch.as_tensor(actions))
base_vf, base_logp, base_ent = base_model.policy.evaluate_actions(torch.as_tensor(numpy_obs), torch.as_tensor(actions))
vfs.append(torch.mean(vf).detach().item())
log_probs.append(torch.mean(logp).detach().item())
ents.append(torch.mean(ent).detach().item())
base_vfs.append(torch.mean(base_vf).detach().item())
base_log_probs.append(torch.mean(base_logp).detach().item())
base_ents.append(torch.mean(base_ent).detach().item())
# Distances
log_ratio = logp - base_logp
# Estimator of KL from http://joschu.net/blog/kl-approx.html
kls.append(torch.mean(torch.exp(log_ratio) - 1 - log_ratio).item())
latent_pi, _, latent_sde = model.policy._get_latent(torch.as_tensor(numpy_obs))
model_dist = model.policy._get_action_dist_from_latent(latent_pi, latent_sde=latent_sde).distribution.probs.detach().numpy()
latent_pi, _, latent_sde = base_model.policy._get_latent(torch.as_tensor(numpy_obs))
base_dist = base_model.policy._get_action_dist_from_latent(latent_pi, latent_sde=latent_sde).distribution.probs.detach().numpy()
js.append(np.mean(jensenshannon(model_dist, base_dist, axis=1)).item())
h.append(np.mean(hellinger(model_dist, base_dist)).item())
numpy_obs, _, done, info = env.step(actions)
rew = env.get_attr("last_reward")
true_rew = env.get_attr("get_true_reward")
true_rew2 = env.get_attr("get_true_reward2")
episode_rewards.append(np.mean(rew))
reward_std.append(rew)
episode_true_rewards.append(np.mean(true_rew))
true_reward_std.append(true_rew)
episode_true_rewards2.append(np.mean(true_rew2))
true_reward_std2.append(true_rew2)
obs = env.get_attr("observation")
infection_data = np.zeros((1, 5))
threshold_data = np.zeros(len(obs))
for o in obs:
infection_data += o.global_infection_summary[-1]
gis = np.array([o.global_infection_summary[-1] for o in obs]).squeeze(1)
gts = np.array([o.global_testing_summary[-1] for o in obs]).squeeze(1)
stage = np.array([o.stage[-1].item() for o in obs])
if viz:
viz.record_list(obs[0], gis, gts, stage, rew, true_rew, true_rew2=true_rew2)
reward = np.sum(episode_rewards).item()
true_reward = np.sum(episode_true_rewards).item()
true_reward2 = np.sum(episode_true_rewards2).item()
#if viz:
# viz.plot(name=name, evaluate=True, plots_to_show=['critical_summary', 'stages', 'cumulative_reward', 'cumulative_true_reward2'])
# viz.reset()
return reward, np.std(np.sum(np.array(reward_std), axis=0)).item(), \
true_reward, np.std(np.sum(np.array(true_reward_std), axis=0)).item(), \
true_reward2, np.std(np.sum(np.array(true_reward_std2), axis=0)).item(), \
kls, js, h, log_probs, base_log_probs, vfs, base_vfs
if __name__ == '__main__':
main()
| 47.51928 | 182 | 0.648526 |
5471ef5e2041074700733cd254f4357bec345d93 | 3,289 | py | Python | WagerBrain/odds.py | sedemmler/WagerBrain | b1cc33f5eb7a6130106bf8251b554718e2d22172 | [
"MIT"
] | 83 | 2020-03-26T22:14:24.000Z | 2022-03-22T19:00:48.000Z | website.py | rax-v/XSS | ff70b89c9fb94a19caaf84e81eddeeca052344ea | [
"MIT"
] | 2 | 2020-03-26T19:34:03.000Z | 2020-03-27T19:56:14.000Z | website.py | rax-v/XSS | ff70b89c9fb94a19caaf84e81eddeeca052344ea | [
"MIT"
] | 19 | 2020-04-06T10:47:30.000Z | 2022-03-30T19:16:42.000Z | from fractions import Fraction
from math import gcd
import numpy as np
"""
Convert the style of gambling odds to Function Name (Decimal, American, Fractional).
TO DO: Fix edge case related to Fraction module that causes weird rounding / slightly off output
"""
def american_odds(odds):
"""
:param odds: Float (e.g., 2.25) or String (e.g., '3/1' or '5/4').
:return: Integer. Odds expressed in American terms.
"""
if isinstance(odds, int):
return odds
elif isinstance(odds, float):
if odds > 2.0:
return round((odds - 1) * 100, 0)
else:
return round(-100 / (odds - 1), 0)
elif "/" in odds:
odds = Fraction(odds)
if odds.numerator > odds.denominator:
return (odds.numerator / odds.denominator) * 100
else:
return -100 / (odds.numerator / odds.denominator)
def decimal_odds(odds):
"""
:param odds: Integer (e.g., -350) or String (e.g., '3/1' or '5/4').
:return: Float. Odds expressed in Decimal terms.
"""
if isinstance(odds, float):
return odds
elif isinstance(odds, int):
if odds >= 100:
return abs(1 + (odds / 100))
elif odds <= -101 :
return 100 / abs(odds) + 1
else:
return float(odds)
elif "/" in odds:
odds = Fraction(odds)
return round((odds.numerator / odds.denominator) + 1, 2)
def fractional_odds(odds):
"""
:param odds: Numeric. (e.g., 2.25 or -350).
:return: Fraction Class. Odds expressed in Fractional terms.
"""
if isinstance(odds, str):
return Fraction(odds)
elif isinstance(odds, int):
if odds > 0:
denom = 100
g_cd = gcd(odds, denom)
num = int(odds / g_cd)
denom = int(denom / g_cd)
return Fraction(num, denom)
else:
num = 100
g_cd = gcd(num, odds)
num = int(num / g_cd)
denom = int(odds / g_cd)
return -Fraction(num, denom)
elif isinstance(odds, float):
new_odds = int((odds - 1) * 100)
g_cd = gcd(new_odds, 100)
return Fraction(int(new_odds/g_cd), int(100/g_cd))
def parlay_odds(odds):
"""
:param odds: List. A list of odds for wagers to be included in parlay
:return: Parlay odds in Decimal terms
"""
return np.prod(np.array([decimal_odds(x) for x in odds]))
def convert_odds(odds, odds_style='a'):
"""
:param odds: Stated odds from bookmaker (American, Decimal, or Fractional)
:param odds_style: American ('a', 'amer', 'american'), Decimal ('d', dec','decimal) Fractional ('f','frac','fractional)
:return: Numeric. Odds converted to selected style.
"""
try:
if odds_style.lower() == "american" or odds_style.lower() == 'amer' or odds_style.lower() == 'a':
return american_odds(odds)
elif odds_style.lower() == "decimal" or odds_style.lower() == 'dec' or odds_style.lower() == 'd':
return decimal_odds(odds)
elif odds_style.lower() == "fractional" or odds_style.lower() == 'frac' or odds_style.lower() == 'f':
return fractional_odds(odds)
except (ValueError, KeyError, NameError):
return None
| 28.850877 | 123 | 0.578291 |
5472180161d7e60f43fc9232da207e59fa3cb086 | 16,438 | py | Python | GANs/jsigan/ops.py | JonathanLehner/nnabla-examples | 2971b987484945e12fb171594181908789485a0f | [
"Apache-2.0"
] | null | null | null | GANs/jsigan/ops.py | JonathanLehner/nnabla-examples | 2971b987484945e12fb171594181908789485a0f | [
"Apache-2.0"
] | null | null | null | GANs/jsigan/ops.py | JonathanLehner/nnabla-examples | 2971b987484945e12fb171594181908789485a0f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.initializer as I
import numpy as np
from utils import depth_to_space
def box_filter(x, szf):
"""
Box filter
"""
y = F.identity(x)
szy = list(y.shape)
b_filt = nn.Variable((szf, szf, 1, 1))
b_filt.data.fill(1.)
b_filt = b_filt / (szf ** 2)
# 5,5,1,1
b_filt = F.tile(b_filt, [1, 1, szy[3], 1])
b_filt = F.transpose(b_filt, (3, 2, 0, 1))
b_filt = F.reshape(b_filt, (6, 5, 5))
pp = int((szf - 1) / 2)
y = F.pad(y, (0, 0, pp, pp, pp, pp, 0, 0), mode='reflect')
y_chw = F.transpose(y, (0, 3, 1, 2))
y_chw = F.depthwise_convolution(y_chw, b_filt, multiplier=1, stride=(1, 1))
y_hwc = F.transpose(y_chw, (0, 2, 3, 1))
return y_hwc
def guided_filter(img, r, eps):
"""
Edge preserving filter
"""
img2 = F.concatenate(img, img * img, axis=3)
img2 = box_filter(img2, r)
mean = F.split(img2, axis=3)
mean_i = F.stack(mean[0], mean[1], mean[2], axis=3)
mean_ii = F.stack(mean[3], mean[4], mean[5], axis=3)
var_i = mean_ii - mean_i * mean_i
a = var_i / (var_i + eps)
b = mean_i - a * mean_i
ab = F.concatenate(a, b, axis=3)
ab = box_filter(ab, r)
mean_ab = F.split(ab, axis=3)
mean_a = F.stack(mean_ab[0], mean_ab[1], mean_ab[2], axis=3)
mean_b = F.stack(mean_ab[3], mean_ab[4], mean_ab[5], axis=3)
q = mean_a * img + mean_b
return q
def conv_2d(x, o_ch, kernel, name=None):
"""
Convolution for JSInet
"""
b = I.ConstantInitializer(0.)
h = PF.convolution(x, o_ch, kernel=kernel, stride=(1, 1), pad=(1, 1), channel_last=True,
b_init=b, name=name)
return h
def res_block(x, out_ch, name):
"""
Create residual block
"""
with nn.parameter_scope(name):
h = conv_2d(F.relu(x), out_ch, kernel=(3, 3), name='conv/0')
h = conv_2d(F.relu(h), out_ch, kernel=(3, 3), name='conv/1')
h = x + h
return h
def dyn_2d_filter(x, lf_2d, k_sz):
"""
Dynamic 2d filtering
"""
with nn.parameter_scope('Dynamic_2D_Filtering'):
f_localexpand = nn.Variable.from_numpy_array(
np.eye(k_sz[0] * k_sz[1], k_sz[0] * k_sz[1]))
f_localexpand = F.reshape(f_localexpand,
(k_sz[0], k_sz[1], 1, k_sz[0] * k_sz[1])) # (9,9,1,81))
f_localexpand = F.transpose(f_localexpand, (3, 0, 1, 2)) # (81,9,9,1))
x_sz = x.shape
x = F.reshape(x, (x_sz[0], x_sz[1], x_sz[2], 1)) # (1,100,170,1)
x_localexpand = F.convolution(x, f_localexpand, stride=(1, 1), pad=(4, 4),
channel_last=True) # (1,100,170,81)
x_le_sz = x_localexpand.shape
x_localexpand = F.reshape(x_localexpand,
(x_le_sz[0], x_le_sz[1], x_le_sz[2], 1, x_le_sz[3]))
y = F.batch_matmul(x_localexpand, lf_2d)
y_sz = y.shape
y = F.reshape(y, (y_sz[0], y_sz[1], y_sz[2], y_sz[4]))
return y
def dyn_2d_up_operation(x, lf_2d, k_sz, sf=2):
"""
Dynamic 2d upsampling
"""
with nn.parameter_scope("Dynamic_2D_Upsampling"):
y = []
sz = lf_2d.shape
lf_2d_new = F.reshape(
lf_2d, (sz[0], sz[1], sz[2], k_sz[0] * k_sz[0], sf ** 2))
lf_2d_new = F.softmax(lf_2d_new, axis=3)
for ch in range(3): # loop over YUV channels
# apply dynamic filtering operation
temp = dyn_2d_filter(x[:, :, :, ch], lf_2d_new, k_sz)
temp = depth_to_space(temp, sf)
y += [temp]
y = F.concatenate(*y, axis=3)
return y
def dyn_sep_up_operation(x, dr_k_v, dr_k_h, k_sz, sf):
"""
Dynamic separable upsampling operation with 1D separable local kernels.
x: [B, H, W, C], dr_k_v: [B, H, W, 41*sf*sf], dr_k_h: [B, H, W, 41*sf*sf]
out: [B, H*sf, W*sf, C]
"""
sz = x.shape
pad = k_sz // 2 # local filter pad size
# [B, H, W, C*sf*sf]
out_v = nn.Variable((sz[0], sz[1], sz[2], sz[3] * sf ** 2))
out_v.data.zero()
# [B, H, W, C*sf*sf]
out_h = nn.Variable((sz[0], sz[1], sz[2], sz[3] * sf ** 2))
out_h.data.zero()
img_pad = F.pad(x, (0, 0, pad, pad, 0, 0, 0, 0))
img_pad_y = F.reshape(img_pad[:, :, :, 0],
(img_pad.shape[0], img_pad.shape[1], img_pad.shape[2], 1))
img_pad_y = F.tile(img_pad_y, [1, 1, 1, sf ** 2])
img_pad_u = F.reshape(img_pad[:, :, :, 1],
(img_pad.shape[0], img_pad.shape[1], img_pad.shape[2], 1))
img_pad_u = F.tile(img_pad_u, [1, 1, 1, sf ** 2])
img_pad_v = F.reshape(img_pad[:, :, :, 2],
(img_pad.shape[0], img_pad.shape[1], img_pad.shape[2], 1))
img_pad_v = F.tile(img_pad_v, [1, 1, 1, sf ** 2])
img_pad = F.concatenate(img_pad_y, img_pad_u, img_pad_v, axis=3)
# vertical 1D filter
for i in range(k_sz):
out_v = out_v + img_pad[:, i:i + sz[1], :, :] * F.tile(
dr_k_v[:, :, :, i:k_sz * sf ** 2:k_sz], [1, 1, 1, 3])
img_pad = F.pad(out_v, (0, 0, 0, 0, pad, pad, 0, 0))
# horizontal 1D filter
for i in range(k_sz):
out_h = out_h + img_pad[:, :, i:i + sz[2], :] * F.tile(
dr_k_h[:, :, :, i:k_sz * sf ** 2:k_sz], [1, 1, 1, 3])
# depth to space upsampling (YUV)
out = depth_to_space(out_h[:, :, :, 0:sf ** 2], sf)
out = F.concatenate(out, depth_to_space(
out_h[:, :, :, sf ** 2:2 * sf ** 2], sf), axis=3)
out = F.concatenate(out, depth_to_space(
out_h[:, :, :, 2 * sf ** 2:3 * sf ** 2], sf), axis=3)
return out
def res_block_concat(x, out_ch, name):
"""
Basic residual block -> [conv-relu | conv-relu] + input
"""
with nn.parameter_scope(name):
h = conv_2d(F.relu(x), out_ch, kernel=(3, 3), name='conv/0')
h = conv_2d(F.relu(h), out_ch, kernel=(3, 3), name='conv/1')
h = x[:, :, :, :out_ch] + h
return h
def model(img, sf):
"""
Define JSInet model
"""
with nn.parameter_scope('Network'):
with nn.parameter_scope('local_contrast_enhancement'):
## ================= Local Contrast Enhancement Subnet ============================ ##
ch = 64
b = guided_filter(img, 5, 0.01)
n1 = conv_2d(b, ch, kernel=(3, 3), name='conv/0')
for i in range(4):
n1 = res_block(n1, ch, 'res_block/%d' % i)
n1 = F.relu(n1, inplace=True)
local_filter_2d = conv_2d(n1, (9 ** 2) * (sf ** 2), kernel=(3, 3),
name='conv_k') # [B, H, W, (9x9)*(sfxsf)]
# dynamic 2D upsampling with 2D local filters
pred_C = dyn_2d_up_operation(b, local_filter_2d, (9, 9), sf)
# local contrast mask
pred_C = 2 * F.sigmoid(pred_C)
## ================= Detail Restoration Subnet ============================ ##
ch = 64
d = F.div2(img, b + 1e-15)
with nn.parameter_scope('detail_restoration'):
n3 = conv_2d(d, ch, kernel=(3, 3), name='conv/0')
for i in range(4):
n3 = res_block(n3, ch, 'res_block/%d' % i)
if i == 0:
d_feature = n3
n3 = F.relu(n3, inplace=True)
# separable 1D filters
dr_k_h = conv_2d(n3, 41 * sf ** 2, kernel=(3, 3), name='conv_k_h')
dr_k_v = conv_2d(n3, 41 * sf ** 2, kernel=(3, 3), name='conv_k_v')
# dynamic separable upsampling with with separable 1D local filters
pred_D = dyn_sep_up_operation(d, dr_k_v, dr_k_h, 41, sf)
## ================= Image Reconstruction Subnet ============================ ##
with nn.parameter_scope('image_reconstruction'):
n4 = conv_2d(img, ch, kernel=(3, 3), name='conv/0')
for i in range(4):
if i == 1:
n4 = F.concatenate(n4, d_feature, axis=3)
n4 = res_block_concat(n4, ch, 'res_block/%d' % i)
else:
n4 = res_block(n4, ch, 'res_block/%d' % i)
n4 = F.relu(n4, inplace=True)
n4 = F.relu(conv_2d(n4, ch * sf * sf, kernel=(3, 3),
name='conv/1'), inplace=True)
# (1,100,170,1024) -> (1,100,170,4,4,64) -> (1,100,4,170,4,64)
# pixel shuffle
n4 = depth_to_space(n4, sf)
pred_I = conv_2d(n4, 3, kernel=(3, 3), name='conv/2')
pred = F.add2(pred_I, pred_D, inplace=True) * pred_C
jsinet = namedtuple('jsinet', ['pred'])
return jsinet(pred)
def truncated_normal(w_shape, mean, std):
"""
Numpy truncated normal
"""
init = I.NormalInitializer()
tmp = init(w_shape + (4,))
valid = np.logical_and((np.less(tmp, 2)), (np.greater(tmp, -2)))
ind = np.argmax(valid, axis=-1)
ind1 = (np.expand_dims(ind, -1))
trunc_norm = np.take_along_axis(tmp, ind1, axis=4).squeeze(-1)
trunc_norm = trunc_norm * std + mean
return trunc_norm
def conv(x, channels, kernel=4, stride=2, pad=0, pad_type='zero', use_bias=True, scope='conv_0'):
"""
Convolution for discriminator
"""
w_n_shape = (channels, kernel, kernel, x.shape[-1])
w_init = truncated_normal(w_n_shape, mean=0.0, std=0.02)
b_init = I.ConstantInitializer(0.)
with nn.parameter_scope(scope):
if pad > 0:
h = x.shape[1]
if h % stride == 0:
pad = pad * 2
else:
pad = max(kernel - (h % stride), 0)
pad_top = pad // 2
pad_bottom = pad - pad_top
pad_left = pad // 2
pad_right = pad - pad_left
if pad_type == 'zero':
x = F.pad(x, (0, 0, pad_top, pad_bottom,
pad_left, pad_right, 0, 0))
if pad_type == 'reflect':
x = F.pad(x, (0, 0, pad_top, pad_bottom, pad_left,
pad_right, 0, 0), mode='reflect')
x = PF.convolution(x, channels, kernel=(kernel, kernel), stride=(
stride, stride), apply_w=apply_w, w_init=w_init, b_init=b_init, with_bias=use_bias,
channel_last=True)
return x
def dis_block(n, c, i, train=True):
"""
Discriminator conv_bn_relu block
"""
out = conv(n, channels=c, kernel=4, stride=2, pad=1, use_bias=False,
scope='d_conv/' + str(2 * i + 2))
out_fm = F.leaky_relu(
PF.batch_normalization(
out, axes=[3], batch_stat=train, name='d_bn/' + str(2 * i + 1)),
alpha=0.2)
out = conv(out_fm, channels=c * 2, kernel=3, stride=1, pad=1, use_bias=False,
scope='d_conv/' + str(2 * i + 3))
out = F.leaky_relu(
PF.batch_normalization(
out, axes=[3], batch_stat=train, name='d_bn/' + str(2 * i + 2)),
alpha=0.2)
return out, out_fm
def discriminator_fm(x, sf, scope="Discriminator_FM"):
"""
Feature matching discriminator
"""
with nn.parameter_scope(scope):
fm_list = []
ch = 32
n = F.leaky_relu(conv(x, ch, 3, 1, 1, scope='d_conv/1'), alpha=0.2)
for i in range(4):
n, out_fm = dis_block(n, ch, i, train=True)
ch = ch * 2
fm_list.append(out_fm)
n = F.leaky_relu(PF.batch_normalization(
conv(n, channels=ch, kernel=4, stride=2,
pad=1, use_bias=False, scope='d_conv/10'),
axes=[3], batch_stat=True, name='d_bn/9'), alpha=0.2,
inplace=True)
if sf == 1:
n = F.leaky_relu(PF.batch_normalization(
conv(n, channels=ch, kernel=5, stride=1,
pad=1, use_bias=False, scope='d_conv/11'),
axes=[3], batch_stat=True, name='d_bn/10'), alpha=0.2, inplace=True)
else:
n = F.leaky_relu(PF.batch_normalization(
conv(n, channels=ch, kernel=5, stride=1,
use_bias=False, scope='d_conv/11'),
axes=[3], batch_stat=True, name='d_bn/10'), alpha=0.2, inplace=True)
n = PF.batch_normalization(
conv(n, channels=1, kernel=1, stride=1,
use_bias=False, scope='d_conv/12'),
axes=[3], batch_stat=True, name='d_bn/11')
out_logit = n
out = F.sigmoid(out_logit) # [B,1]
return out, out_logit, fm_list
def discriminator_loss(real, fake):
"""
Calculate discriminator loss
"""
real_loss = F.mean(
F.relu(1.0 - (real - F.reshape(F.mean(fake), (1, 1, 1, 1)))))
fake_loss = F.mean(
F.relu(1.0 + (fake - F.reshape(F.mean(real), (1, 1, 1, 1)))))
l_d = real_loss + fake_loss
return l_d
def generator_loss(real, fake):
"""
Calculate generator loss
"""
real_loss = F.mean(
F.relu(1.0 + (real - F.reshape(F.mean(fake), (1, 1, 1, 1)))))
fake_loss = F.mean(
F.relu(1.0 - (fake - F.reshape(F.mean(real), (1, 1, 1, 1)))))
l_g = real_loss + fake_loss
return l_g
def feature_matching_loss(x, y, num=4):
"""
Calculate feature matching loss
"""
fm_loss = 0.0
for i in range(num):
fm_loss += F.mean(F.squared_error(x[i], y[i]))
return fm_loss
def gan_model(label_ph, pred, conf):
"""
Define GAN model with adversarial and discriminator losses and their orchestration
"""
# Define Discriminator
_, d_real_logits, d_real_fm_list = discriminator_fm(
label_ph, conf.scaling_factor, scope="Discriminator_FM")
# output of D for fake images
_, d_fake_logits, d_fake_fm_list = discriminator_fm(
pred, conf.scaling_factor, scope="Discriminator_FM")
# Define Detail Discriminator
# compute the detail layers for the dicriminator (reuse)
base_gt = guided_filter(label_ph, 5, 0.01)
detail_gt = F.div2(label_ph, base_gt + 1e-15)
base_pred = guided_filter(pred, 5, 0.01)
detail_pred = F.div2(pred, base_pred + 1e-15)
# detail layer output of D for real images
_, d_detail_real_logits, d_detail_real_fm_list = \
discriminator_fm(detail_gt, conf.scaling_factor,
scope="Discriminator_Detail")
# detail layer output of D for fake images
_, d_detail_fake_logits, d_detail_fake_fm_list = \
discriminator_fm(detail_pred, conf.scaling_factor,
scope="Discriminator_Detail")
# Loss
# original GAN (hinge GAN)
d_adv_loss = discriminator_loss(d_real_logits, d_fake_logits)
d_adv_loss.persistent = True
g_adv_loss = generator_loss(d_real_logits, d_fake_logits)
g_adv_loss.persistent = True
# detail GAN (hinge GAN)
d_detail_adv_loss = conf.detail_lambda * \
discriminator_loss(d_detail_real_logits, d_detail_fake_logits)
d_detail_adv_loss.persistent = True
g_detail_adv_loss = conf.detail_lambda * \
generator_loss(d_detail_real_logits, d_detail_fake_logits)
g_detail_adv_loss.persistent = True
# feature matching (FM) loss
fm_loss = feature_matching_loss(d_real_fm_list, d_fake_fm_list, 4)
fm_loss.persistent = True
fm_detail_loss = conf.detail_lambda * feature_matching_loss(d_detail_real_fm_list,
d_detail_fake_fm_list, 4)
fm_detail_loss.persistent = True
jsigan = namedtuple('jsigan',
['d_adv_loss', 'd_detail_adv_loss', 'g_adv_loss', 'g_detail_adv_loss',
'fm_loss', 'fm_detail_loss'])
return jsigan(d_adv_loss, d_detail_adv_loss, g_adv_loss, g_detail_adv_loss, fm_loss,
fm_detail_loss)
| 37.359091 | 98 | 0.55828 |
5475f0c326a3f8de3e388b70e03c71cc3faf4139 | 2,973 | py | Python | neptune/internal/hardware/gpu/gpu_monitor.py | neptune-ml/neptune-client | 7aea63160b5149c3fec40f62d3b0da7381a35748 | [
"Apache-2.0"
] | 13 | 2019-02-11T13:18:38.000Z | 2019-12-26T06:26:07.000Z | neptune/internal/hardware/gpu/gpu_monitor.py | neptune-ml/neptune-client | 7aea63160b5149c3fec40f62d3b0da7381a35748 | [
"Apache-2.0"
] | 39 | 2019-03-07T13:40:10.000Z | 2020-01-07T17:19:24.000Z | neptune/internal/hardware/gpu/gpu_monitor.py | neptune-ml/neptune-client | 7aea63160b5149c3fec40f62d3b0da7381a35748 | [
"Apache-2.0"
] | 4 | 2019-02-11T13:07:23.000Z | 2019-11-26T08:20:24.000Z | #
# Copyright (c) 2019, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from neptune.vendor.pynvml import (
NVMLError,
nvmlDeviceGetCount,
nvmlDeviceGetHandleByIndex,
nvmlDeviceGetMemoryInfo,
nvmlDeviceGetUtilizationRates,
nvmlInit,
)
_logger = logging.getLogger(__name__)
| 35.392857 | 100 | 0.664649 |
54768720b8a58a3c4d1cf1c8c265ceea8f6fc111 | 5,219 | py | Python | tests/redis_map.py | jaredlunde/redis_structures | b9cce5f5c85db5e12c292633ff8d04e3ae053294 | [
"MIT"
] | 2 | 2016-04-05T08:40:47.000Z | 2016-06-27T14:03:26.000Z | tests/redis_map.py | jaredLunde/redis_structures | b9cce5f5c85db5e12c292633ff8d04e3ae053294 | [
"MIT"
] | 1 | 2015-10-27T14:30:53.000Z | 2015-11-09T17:54:33.000Z | tests/redis_map.py | jaredlunde/redis_structures | b9cce5f5c85db5e12c292633ff8d04e3ae053294 | [
"MIT"
] | null | null | null | #!/usr/bin/python3 -S
# -*- coding: utf-8 -*-
"""
`Redis Map Tests`
------------------------------------------------------
2015 Jared Lunde The MIT License (MIT)
http://github.com/jaredlunde
"""
import datetime
import time
import pickle
import unittest
from redis_structures.debug import RandData, gen_rand_str
from redis_structures import StrictRedis, RedisMap
if __name__ == '__main__':
unittest.main()
| 30.881657 | 80 | 0.57923 |
5477f31f091eaba6d081dd15b6e4e452029c17e6 | 4,480 | py | Python | examples/parser_example.py | pibico/beacontools | 513e1c7ff2aaf74b6c7d7b10805c2f6ca4384e3d | [
"MIT"
] | 139 | 2017-06-09T17:15:23.000Z | 2022-03-15T03:02:17.000Z | examples/parser_example.py | pibico/beacontools | 513e1c7ff2aaf74b6c7d7b10805c2f6ca4384e3d | [
"MIT"
] | 71 | 2017-06-20T03:20:56.000Z | 2022-02-13T22:47:53.000Z | examples/parser_example.py | pibico/beacontools | 513e1c7ff2aaf74b6c7d7b10805c2f6ca4384e3d | [
"MIT"
] | 59 | 2017-06-20T03:10:00.000Z | 2022-03-15T23:54:44.000Z | # -*- coding: utf-8 -*-
from beacontools import parse_packet
# Eddystone UID packet
uid_packet = b"\x02\x01\x06\x03\x03\xaa\xfe\x17\x16\xaa\xfe\x00\xe3\x12\x34\x56\x78\x90\x12" \
b"\x34\x67\x89\x01\x00\x00\x00\x00\x00\x01\x00\x00"
uid_frame = parse_packet(uid_packet)
print("Namespace: %s" % uid_frame.namespace)
print("Instance: %s" % uid_frame.instance)
print("TX Power: %s" % uid_frame.tx_power)
print("-----")
# Eddystone URL packet
url_packet = b"\x03\x03\xAA\xFE\x13\x16\xAA\xFE\x10\xF8\x03github\x00citruz"
url_frame = parse_packet(url_packet)
print("TX Power: %d" % url_frame.tx_power)
print("URL: %s" % url_frame.url)
print("-----")
# Eddystone TLM packet (unencrypted)
tlm_packet = b"\x02\x01\x06\x03\x03\xaa\xfe\x11\x16\xaa\xfe\x20\x00\x0b\x18\x13\x00\x00\x00" \
b"\x14\x67\x00\x00\x2a\xc4\xe4"
tlm_frame = parse_packet(tlm_packet)
print("Voltage: %d mV" % tlm_frame.voltage)
print("Temperature: %f C" % tlm_frame.temperature)
print("Advertising count: %d" % tlm_frame.advertising_count)
print("Seconds since boot: %d" % tlm_frame.seconds_since_boot)
print("-----")
# Eddystone TLM packet (encrypted)
enc_tlm_packet = b"\x02\x01\x06\x03\x03\xaa\xfe\x11\x16\xaa\xfe\x20\x01\x41\x41\x41\x41\x41" \
b"\x41\x41\x41\x41\x41\x41\x41\xDE\xAD\xBE\xFF"
enc_tlm_frame = parse_packet(enc_tlm_packet)
print("Data: %s" % enc_tlm_frame.encrypted_data)
print("Salt: %d" % enc_tlm_frame.salt)
print("Mic: %d" % enc_tlm_frame.mic)
print("-----")
# iBeacon Advertisement
ibeacon_packet = b"\x02\x01\x06\x1a\xff\x4c\x00\x02\x15\x41\x41\x41\x41\x41\x41\x41\x41\x41" \
b"\x41\x41\x41\x41\x41\x41\x41\x00\x01\x00\x01\xf8"
adv = parse_packet(ibeacon_packet)
print("UUID: %s" % adv.uuid)
print("Major: %d" % adv.major)
print("Minor: %d" % adv.minor)
print("TX Power: %d" % adv.tx_power)
print("-----")
# Cypress iBeacon Sensor
cypress_packet = b"\x02\x01\x04\x1a\xff\x4c\x00\x02\x15\x00\x05\x00\x01\x00\x00\x10\x00\x80" \
b"\x00\x00\x80\x5f\x9b\x01\x31\x00\x02\x6c\x66\xc3"
sensor = parse_packet(cypress_packet)
print("UUID: %s" % sensor.uuid)
print("Major: %d" % sensor.major)
print("Temperature: %d C" % sensor.cypress_temperature)
print("Humidity: %d %%" % sensor.cypress_humidity)
print("TX Power: %d" % sensor.tx_power)
print("-----")
# Estimote Telemetry Packet (Subframe A)
telemetry_a_packet = b"\x02\x01\x04\x03\x03\x9a\xfe\x17\x16\x9a\xfe\x22\x47\xa0\x38\xd5"\
b"\xeb\x03\x26\x40\x00\x00\x01\x41\x44\x47\xfa\xff\xff\xff\xff"
telemetry = parse_packet(telemetry_a_packet)
print("Identifier: %s" % telemetry.identifier)
print("Protocol Version: %d" % telemetry.protocol_version)
print("Acceleration (g): (%f, %f, %f)" % telemetry.acceleration)
print("Is moving: %s" % telemetry.is_moving)
# ... see packet_types/estimote.py for all available attributes and units
print("-----")
# Estimote Telemetry Packet (Subframe B)
telemetry_b_packet = b"\x02\x01\x04\x03\x03\x9a\xfe\x17\x16\x9a\xfe\x22\x47\xa0\x38\xd5"\
b"\xeb\x03\x26\x40\x01\xd8\x42\xed\x73\x49\x25\x66\xbc\x2e\x50"
telemetry_b = parse_packet(telemetry_b_packet)
print("Identifier: %s" % telemetry_b.identifier)
print("Protocol Version: %d" % telemetry_b.protocol_version)
print("Magnetic field: (%f, %f, %f)" % telemetry_b.magnetic_field)
print("Temperature: %f C" % telemetry_b.temperature)
# ... see packet_types/estimote.py for all available attributes and units
# Estimote Nearable Advertisement
nearable_packet = b"\x02\x01\x04\x03\x03\x0f\x18\x17\xff\x5d" \
b"\x01\x01\x1e\xfe\x42\x7e\xb6\xf4\xbc\x2f" \
b"\x04\x01\x68\xa1\xaa\xfe\x05\xc1\x45\x25" \
b"\x53\xb5"
nearable_adv = parse_packet(nearable_packet)
print("Identifier: %s" % nearable_adv.identifier)
print("Hardware_version: %d" % nearable_adv.hardware_version)
print("Firmware_version: %d" % nearable_adv.firmware_version)
print("Temperature: %d" % nearable_adv.temperature)
print("Is moving: %i" % nearable_adv.is_moving)
print("-----")
# CJ Monitor packet
cj_monitor_packet = b"\x02\x01\x06\x05\x02\x1A\x18\x00\x18" \
b"\x09\xFF\x72\x04\xFE\x10\xD1\x0C\x33\x61" \
b"\x09\x09\x4D\x6F\x6E\x20\x35\x36\x34\x33"
cj_monitor = parse_packet(cj_monitor_packet)
print("Name: %s" % cj_monitor.name)
print("Temperature: %f C" % cj_monitor.temperature)
print("Humidity: %d %%" % cj_monitor.humidity)
print("Light: %f" % cj_monitor.light)
| 40 | 94 | 0.690625 |
547c48103894763c6518d10f40329e0d7d4eaefd | 1,228 | py | Python | mlsurvey/sl/workflows/multiple_learning_workflow.py | jlaumonier/mlsurvey | 373598d067c7f0930ba13fe8da9756ce26eecbaf | [
"MIT"
] | null | null | null | mlsurvey/sl/workflows/multiple_learning_workflow.py | jlaumonier/mlsurvey | 373598d067c7f0930ba13fe8da9756ce26eecbaf | [
"MIT"
] | null | null | null | mlsurvey/sl/workflows/multiple_learning_workflow.py | jlaumonier/mlsurvey | 373598d067c7f0930ba13fe8da9756ce26eecbaf | [
"MIT"
] | null | null | null | from kedro.io import DataCatalog, MemoryDataSet
from kedro.pipeline import Pipeline
from kedro.runner import SequentialRunner
import mlsurvey as mls
from mlsurvey.workflows.learning_workflow import LearningWorkflow
| 34.111111 | 87 | 0.653094 |
547cd68f734cef8dede708252277b864855b2580 | 2,542 | py | Python | backend/apps/cmdb/migrations/0001_initial.py | renmcc/SA2 | a524124c140ae0b291b10dafc11d38744dd93bd9 | [
"MIT"
] | 4 | 2020-06-25T05:57:39.000Z | 2021-06-26T04:58:16.000Z | backend/apps/cmdb/migrations/0001_initial.py | renmcc/SA2 | a524124c140ae0b291b10dafc11d38744dd93bd9 | [
"MIT"
] | null | null | null | backend/apps/cmdb/migrations/0001_initial.py | renmcc/SA2 | a524124c140ae0b291b10dafc11d38744dd93bd9 | [
"MIT"
] | 1 | 2020-12-10T15:12:11.000Z | 2020-12-10T15:12:11.000Z | # Generated by Django 2.2.12 on 2020-06-15 16:55
import datetime
from django.db import migrations, models
import django.db.models.deletion
| 59.116279 | 215 | 0.632179 |
547d39324fd1deeba259dcc2ee665fe787ad6b6c | 1,055 | py | Python | sphecius/ciphers/base.py | douglasdaly/sphecius | df8fc8dd2add157c6360c2b66cb22ac6f0241051 | [
"MIT"
] | 1 | 2019-09-26T01:08:20.000Z | 2019-09-26T01:08:20.000Z | sphecius/ciphers/base.py | douglasdaly/sphecius | df8fc8dd2add157c6360c2b66cb22ac6f0241051 | [
"MIT"
] | null | null | null | sphecius/ciphers/base.py | douglasdaly/sphecius | df8fc8dd2add157c6360c2b66cb22ac6f0241051 | [
"MIT"
] | 1 | 2019-09-26T01:08:19.000Z | 2019-09-26T01:08:19.000Z | # -*- coding: utf-8 -*-
"""
base.py
Base Cipher Object class
@author: Douglas Daly
@date: 1/12/2017
"""
#
# Imports
#
from abc import ABCMeta, abstractmethod
from ..alphabets import English
#
# Classes
#
| 16.484375 | 50 | 0.57346 |
547ee9e4da4b047390b557dc16580a853bcc3c8e | 281 | py | Python | setup.py | codewars/python-unittest | 5a6cc27a51a9d91ce997c953099515c701b76057 | [
"MIT"
] | 4 | 2020-06-20T12:36:09.000Z | 2021-10-31T22:04:48.000Z | setup.py | codewars/python-unittest | 5a6cc27a51a9d91ce997c953099515c701b76057 | [
"MIT"
] | null | null | null | setup.py | codewars/python-unittest | 5a6cc27a51a9d91ce997c953099515c701b76057 | [
"MIT"
] | 3 | 2020-07-11T13:46:24.000Z | 2022-02-23T20:55:19.000Z | from setuptools import setup
setup(
name="codewars_unittest",
version="0.1.0",
packages=["codewars_unittest"],
license="MIT",
description="unittest runner with Codewars output",
install_requires=[],
url="https://github.com/Codewars/python-unittest",
)
| 23.416667 | 55 | 0.690391 |
547f16545ac590cbce83d8fc70ff6fbb32f028e2 | 16,628 | py | Python | code/python/FactSetFunds/v1/fds/sdk/FactSetFunds/model/classifications.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 6 | 2022-02-07T16:34:18.000Z | 2022-03-30T08:04:57.000Z | code/python/FactSetFunds/v1/fds/sdk/FactSetFunds/model/classifications.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 2 | 2022-02-07T05:25:57.000Z | 2022-03-07T14:18:04.000Z | code/python/FactSetFunds/v1/fds/sdk/FactSetFunds/model/classifications.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | null | null | null | """
FactSet Funds API
FactSet Mutual Funds data offers over 50 fund- and share class-specific data points for mutual funds listed in the United States. <p>FactSet Mutual Funds Reference provides fund-specific reference information as well as FactSet's proprietary classification system. It includes but is not limited to the following coverage * Fund descriptions * A seven-tier classification system * Leverage information * Fees and expenses * Portfolio managers FactSet Mutual Funds Time Series provides quantitative data items on a historical basis. It includes but is not limited to the following coverage * Net asset value * Fund flows * Assets under management * Total return # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: api@factset.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.FactSetFunds.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.FactSetFunds.exceptions import ApiAttributeError
| 57.536332 | 709 | 0.619016 |
547ff536693b82874299f521ef54379c7a3ee663 | 1,637 | py | Python | tests/test_drc.py | atait/lymask | a047bee386e7c9c7f04030277cdfaf7b3c731d14 | [
"MIT"
] | 3 | 2020-12-01T07:55:50.000Z | 2022-03-16T22:18:07.000Z | tests/test_drc.py | atait/lymask | a047bee386e7c9c7f04030277cdfaf7b3c731d14 | [
"MIT"
] | null | null | null | tests/test_drc.py | atait/lymask | a047bee386e7c9c7f04030277cdfaf7b3c731d14 | [
"MIT"
] | 2 | 2020-12-01T22:56:35.000Z | 2021-05-03T09:30:09.000Z | import os, sys
import subprocess
import xmltodict
import lymask
from lymask import batch_drc_main
from conftest import test_dir
drc_file = os.path.join(test_dir, 'tech', 'lymask_example_tech', 'drc', 'default.yml')
layout_file = os.path.join(test_dir, '2_drc_src.oas')
outfile = os.path.join(test_dir, '2_drc_run.lyrdb')
reffile = os.path.join(test_dir, '2_drc_answer.lyrdb')
def assert_equal(rdb_file1, rdb_file2):
''' Errors if the rdbs are different.
This is done with dictionaries not the XML text itself
Note, ordering of lists matters currently (although it shouldn't). Dict key order does not (appropriately).
'''
with open(rdb_file1, 'r') as fx:
rdbspec1 = xmltodict.parse(fx.read(), process_namespaces=True)
with open(rdb_file2, 'r') as fx:
rdbspec2 = xmltodict.parse(fx.read(), process_namespaces=True)
if rdbspec1 != rdbspec2:
raise DRC_difference()
# This one need Technology working
| 30.886792 | 115 | 0.722053 |
5480da3b737fa2ac8f9665bf668142513e4bbaba | 1,731 | py | Python | graphviz/parameters/formatters.py | boeddeker/graphviz | acf79bca4518781cad02c102e89ec4e9ce757088 | [
"MIT"
] | 1 | 2022-01-19T04:02:46.000Z | 2022-01-19T04:02:46.000Z | graphviz/parameters/formatters.py | boeddeker/graphviz | acf79bca4518781cad02c102e89ec4e9ce757088 | [
"MIT"
] | 1 | 2021-11-19T07:21:48.000Z | 2021-11-19T07:21:48.000Z | graphviz/parameters/formatters.py | boeddeker/graphviz | acf79bca4518781cad02c102e89ec4e9ce757088 | [
"MIT"
] | 1 | 2022-01-14T17:15:38.000Z | 2022-01-14T17:15:38.000Z | """Rendering formatter parameter handling."""
import typing
from . import base
__all__ = ['FORMATTERS', 'verify_formatter', 'Formatter']
FORMATTERS = {'cairo',
'core',
'gd',
'gdiplus',
'gdwbmp',
'xlib'}
REQUIRED = False
| 28.377049 | 84 | 0.60312 |
5480e17b073b3d2de7a418823c0645c307bf4d95 | 183 | py | Python | reward/utils/device.py | lgvaz/torchrl | cfff8acaf70d1fec72169162b95ab5ad3547d17a | [
"MIT"
] | 5 | 2018-06-21T14:33:40.000Z | 2018-08-18T02:26:03.000Z | reward/utils/device.py | lgvaz/reward | cfff8acaf70d1fec72169162b95ab5ad3547d17a | [
"MIT"
] | null | null | null | reward/utils/device.py | lgvaz/reward | cfff8acaf70d1fec72169162b95ab5ad3547d17a | [
"MIT"
] | 2 | 2018-05-08T03:34:49.000Z | 2018-06-22T15:04:17.000Z | import torch
CONFIG = {"device": torch.device("cuda" if torch.cuda.is_available() else "cpu")}
| 22.875 | 81 | 0.704918 |
548192ff87fcf5b59d3f5cc728048383ca680545 | 5,727 | py | Python | Source/Functions/RPSLS.Python.Api/NextMove/next_move.py | ivan-b-ivanov/RockPaperScissorsLizardSpock | 9167bcbe5ad2937e834408475c2ec66cf92fef84 | [
"MIT"
] | null | null | null | Source/Functions/RPSLS.Python.Api/NextMove/next_move.py | ivan-b-ivanov/RockPaperScissorsLizardSpock | 9167bcbe5ad2937e834408475c2ec66cf92fef84 | [
"MIT"
] | null | null | null | Source/Functions/RPSLS.Python.Api/NextMove/next_move.py | ivan-b-ivanov/RockPaperScissorsLizardSpock | 9167bcbe5ad2937e834408475c2ec66cf92fef84 | [
"MIT"
] | null | null | null | import logging
import random
import os
import json
from typing import Tuple, List
import requests
R_rock, P_paper, S_scissors, V_spock, L_lizard = ('R', 'P', 'S', 'V', 'L')
INTERNAL_MOVES_ENCODING = [R_rock, P_paper, S_scissors, V_spock, L_lizard]
| 39.226027 | 101 | 0.596124 |
5481ba7b076cad5057871b2955d0e7140c538c8a | 5,410 | py | Python | examples/trials/nas_cifar10/src/cifar10/nni_child_cifar10.py | runauto/nni | 30152b04c4739f5b4f95087dee5f1e66ee893078 | [
"MIT"
] | 2 | 2019-12-30T20:42:17.000Z | 2021-01-24T16:51:56.000Z | examples/trials/nas_cifar10/src/cifar10/nni_child_cifar10.py | runauto/nni | 30152b04c4739f5b4f95087dee5f1e66ee893078 | [
"MIT"
] | null | null | null | examples/trials/nas_cifar10/src/cifar10/nni_child_cifar10.py | runauto/nni | 30152b04c4739f5b4f95087dee5f1e66ee893078 | [
"MIT"
] | 1 | 2020-01-11T13:19:26.000Z | 2020-01-11T13:19:26.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import logging
import tensorflow as tf
from src.cifar10.data_utils import read_data
from src.cifar10.general_child import GeneralChild
import src.cifar10_flags
from src.cifar10_flags import FLAGS
logger = build_logger("nni_child_cifar10")
def build_trial(images, labels, ChildClass):
'''Build child class'''
child_model = ChildClass(
images,
labels,
use_aux_heads=FLAGS.child_use_aux_heads,
cutout_size=FLAGS.child_cutout_size,
num_layers=FLAGS.child_num_layers,
num_cells=FLAGS.child_num_cells,
num_branches=FLAGS.child_num_branches,
fixed_arc=FLAGS.child_fixed_arc,
out_filters_scale=FLAGS.child_out_filters_scale,
out_filters=FLAGS.child_out_filters,
keep_prob=FLAGS.child_keep_prob,
drop_path_keep_prob=FLAGS.child_drop_path_keep_prob,
num_epochs=FLAGS.num_epochs,
l2_reg=FLAGS.child_l2_reg,
data_format=FLAGS.data_format,
batch_size=FLAGS.batch_size,
clip_mode="norm",
grad_bound=FLAGS.child_grad_bound,
lr_init=FLAGS.child_lr,
lr_dec_every=FLAGS.child_lr_dec_every,
lr_dec_rate=FLAGS.child_lr_dec_rate,
lr_cosine=FLAGS.child_lr_cosine,
lr_max=FLAGS.child_lr_max,
lr_min=FLAGS.child_lr_min,
lr_T_0=FLAGS.child_lr_T_0,
lr_T_mul=FLAGS.child_lr_T_mul,
optim_algo="momentum",
sync_replicas=FLAGS.child_sync_replicas,
num_aggregate=FLAGS.child_num_aggregate,
num_replicas=FLAGS.child_num_replicas
)
return child_model
def get_child_ops(child_model):
'''Assemble child op to a dict'''
child_ops = {
"global_step": child_model.global_step,
"loss": child_model.loss,
"train_op": child_model.train_op,
"lr": child_model.lr,
"grad_norm": child_model.grad_norm,
"train_acc": child_model.train_acc,
"optimizer": child_model.optimizer,
"num_train_batches": child_model.num_train_batches,
"eval_every": child_model.num_train_batches * FLAGS.eval_every_epochs,
"eval_func": child_model.eval_once,
}
return child_ops
if __name__ == "__main__":
tf.app.run()
| 33.190184 | 85 | 0.64085 |
5481d023ae1cb5111f38843d186a6cb4876d216a | 175 | py | Python | apps/oper/apps.py | dryprojects/MyBlog | ec04ba2bc658e96cddeb1d4766047ca8e89ff656 | [
"BSD-3-Clause"
] | 2 | 2021-08-17T13:29:21.000Z | 2021-09-04T05:00:01.000Z | apps/oper/apps.py | dryprojects/MyBlog | ec04ba2bc658e96cddeb1d4766047ca8e89ff656 | [
"BSD-3-Clause"
] | 1 | 2020-07-16T11:22:32.000Z | 2020-07-16T11:22:32.000Z | apps/oper/apps.py | dryprojects/MyBlog | ec04ba2bc658e96cddeb1d4766047ca8e89ff656 | [
"BSD-3-Clause"
] | 1 | 2020-09-18T10:41:59.000Z | 2020-09-18T10:41:59.000Z | from django.apps import AppConfig
| 17.5 | 34 | 0.64 |
5481e05c5889a5fab05aff46f53912b82371d733 | 1,952 | py | Python | stella/core/interpreter/lexer.py | xabinapal/stella | ae02055749f997323390d642c99a37b80aa5df68 | [
"MIT"
] | null | null | null | stella/core/interpreter/lexer.py | xabinapal/stella | ae02055749f997323390d642c99a37b80aa5df68 | [
"MIT"
] | null | null | null | stella/core/interpreter/lexer.py | xabinapal/stella | ae02055749f997323390d642c99a37b80aa5df68 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import io
import collections
from stella.core.utils import RewindableIterator
from stella.core.interpreter.productions import Token
__all__ = ['Tokenizer', 'Lexer']
################################################################################
### Tokenizer
################################################################################
################################################################################
### Lexer
################################################################################
| 29.134328 | 82 | 0.483607 |
548332d9c8a9e409da8648383e49cb1b1c4dbca5 | 12,628 | py | Python | tensorflow_v1/10_-_Sequence-to-sequence/03_-_Dynamic_attention_with_par-inject.py | mtanti/deeplearningtutorial | a6fef37c77216e4f98dba2bde7c62d6aa6292476 | [
"MIT"
] | 5 | 2019-05-31T08:30:28.000Z | 2020-02-13T20:17:13.000Z | tensorflow_v1/10_-_Sequence-to-sequence/03_-_Dynamic_attention_with_par-inject.py | mtanti/deeplearningtutorial | a6fef37c77216e4f98dba2bde7c62d6aa6292476 | [
"MIT"
] | null | null | null | tensorflow_v1/10_-_Sequence-to-sequence/03_-_Dynamic_attention_with_par-inject.py | mtanti/deeplearningtutorial | a6fef37c77216e4f98dba2bde7c62d6aa6292476 | [
"MIT"
] | 6 | 2019-04-12T15:34:05.000Z | 2019-10-01T16:57:39.000Z | import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
max_epochs = 6000
init_stddev = 0.0001
source_embedding_size = 2
target_embedding_size = 2
source_state_size = 2
preattention_size = 2
target_state_size = 2
max_seq_len = 10
source_tokens = [
'i like it'.split(' '),
'i hate it'.split(' '),
'i don\'t hate it'.split(' '),
'i don\'t like it'.split(' '),
]
target_tokens = [
'i don\'t like it'.split(' '),
'i don\'t hate it'.split(' '),
'i hate it'.split(' '),
'i like it'.split(' '),
]
source_vocab = [ 'EDGE' ] + sorted({ token for sent in source_tokens for token in sent })
source_token2index = { token: index for (index, token) in enumerate(source_vocab) }
source_index2token = { index: token for (index, token) in enumerate(source_vocab) }
source_max_len = max(len(sent) for sent in source_tokens)
index_source_indexes = []
index_source_lens = []
for sent in source_tokens:
source_lens = len(sent)
source_index = [ source_token2index[token] for token in sent ] + [ 0 for _ in range(source_max_len - source_lens) ]
index_source_lens.append(source_lens)
index_source_indexes.append(source_index)
target_vocab = [ 'EDGE' ] + sorted({ token for sent in target_tokens for token in sent })
target_token2index = { token: index for (index, token) in enumerate(target_vocab) }
target_index2token = { index: token for (index, token) in enumerate(target_vocab) }
target_max_len = max(len(sent) for sent in target_tokens) + 1 #Plus edge token
index_target_prefixes = []
index_target_lens = []
index_target_targets = []
for sent in target_tokens:
target_len = len(sent) + 1 #Plus edge token
target_index = [ target_token2index[token] for token in sent ]
target_prefix = [ target_token2index['EDGE'] ] + target_index + [ 0 for _ in range(target_max_len - target_len) ]
target_target = target_index + [ target_token2index['EDGE'] ] + [ 0 for _ in range(target_max_len - target_len) ]
index_target_prefixes.append(target_prefix)
index_target_lens.append(target_len)
index_target_targets.append(target_target)
g = tf.Graph()
with g.as_default():
source_indexes = tf.placeholder(tf.int32, [None, None], 'source_indexes')
source_lens = tf.placeholder(tf.int32, [None], 'source_lens')
target_prefixes = tf.placeholder(tf.int32, [None, None], 'target_prefixes')
target_lens = tf.placeholder(tf.int32, [None], 'target_lens')
target_targets = tf.placeholder(tf.int32, [None, None], 'target_targets')
batch_size = tf.shape(source_indexes)[0]
source_seq_width = tf.shape(source_indexes)[1]
target_seq_width = tf.shape(target_prefixes)[1]
with tf.variable_scope('source'):
with tf.variable_scope('embedding'):
embedding_matrix = tf.get_variable('embedding_matrix', [len(source_vocab), source_embedding_size], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
embedded = tf.nn.embedding_lookup(embedding_matrix, source_indexes)
with tf.variable_scope('init_state'):
init_state_fw = tf.get_variable('init_state_fw', [source_state_size], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
batch_init_fw = tf.tile(tf.reshape(init_state_fw, [1, source_state_size]), [batch_size, 1])
init_state_bw = tf.get_variable('init_state_bw', [source_state_size], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
batch_init_bw = tf.tile(tf.reshape(init_state_bw, [1, source_state_size]), [batch_size, 1])
with tf.variable_scope('rnn'):
cell_fw = tf.contrib.rnn.GRUCell(source_state_size)
cell_bw = tf.contrib.rnn.GRUCell(source_state_size)
((outputs_fw, outputs_bw), _) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, embedded, sequence_length=source_lens, initial_state_fw=batch_init_fw, initial_state_bw=batch_init_bw)
outputs_ = tf.concat([ outputs_fw, outputs_bw ], axis=2)
outputs_2d_ = tf.reshape(outputs_, [batch_size*source_seq_width, 2*source_state_size])
W = tf.get_variable('W', [2*source_state_size, source_state_size], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
b = tf.get_variable('b', [source_state_size], tf.float32, tf.zeros_initializer())
source_outputs_2d = tf.matmul(outputs_2d_, W) + b
source_outputs = tf.reshape(source_outputs_2d, [batch_size, source_seq_width, source_state_size])
with tf.variable_scope('targets'):
with tf.variable_scope('embedding'):
embedding_matrix = tf.get_variable('embedding_matrix', [len(target_vocab), target_embedding_size], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
embedded = tf.nn.embedding_lookup(embedding_matrix, target_prefixes)
with tf.variable_scope('init_state'):
init_state = tf.get_variable('init_state', [target_state_size], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
batch_init = tf.tile(tf.reshape(init_state, [1, target_state_size]), [batch_size, 1])
with tf.variable_scope('rnn'):
#Custom RNN cell for producing attention vectors that condition the language model via par-inject
cell = CellAttention()
((attentions, outputs), _) = tf.nn.dynamic_rnn(cell, embedded, sequence_length=target_lens, initial_state=batch_init)
with tf.variable_scope('output'):
W = tf.get_variable('W', [target_state_size, len(target_vocab)], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
b = tf.get_variable('b', [len(target_vocab)], tf.float32, tf.zeros_initializer())
outputs_2d = tf.reshape(outputs, [batch_size*target_seq_width, target_state_size])
logits_2d = tf.matmul(outputs_2d, W) + b
logits = tf.reshape(logits_2d, [batch_size, target_seq_width, len(target_vocab)])
probs = tf.nn.softmax(logits)
next_word_probs = probs[:, -1, :]
mask = tf.sequence_mask(target_lens, target_seq_width, tf.float32)
error = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target_targets, logits=logits)*mask)/tf.cast(tf.reduce_sum(target_lens), tf.float32)
step = tf.train.AdamOptimizer().minimize(error)
init = tf.global_variables_initializer()
g.finalize()
with tf.Session() as s:
s.run([ init ], { })
(fig, ax) = plt.subplots(1, 1)
plt.ion()
train_errors = list()
print('epoch', 'train error', sep='\t')
for epoch in range(1, max_epochs+1):
s.run([ step ], { source_indexes: index_source_indexes, source_lens: index_source_lens, target_prefixes: index_target_prefixes, target_lens: index_target_lens, target_targets: index_target_targets })
[ train_error ] = s.run([ error ], { source_indexes: index_source_indexes, source_lens: index_source_lens, target_prefixes: index_target_prefixes, target_lens: index_target_lens, target_targets: index_target_targets })
train_errors.append(train_error)
if epoch%100 == 0:
print(epoch, train_error, sep='\t')
ax.cla()
ax.plot(np.arange(len(train_errors)), train_errors, color='red', linestyle='-', label='train')
ax.set_xlim(0, max_epochs)
ax.set_xlabel('epoch')
ax.set_ylim(0.0, 2.0)
ax.set_ylabel('XE') #Cross entropy
ax.grid(True)
ax.set_title('Error progress')
ax.legend()
fig.tight_layout()
plt.draw()
plt.pause(0.0001)
print()
for sent in source_tokens:
source = [ source_token2index[token] for token in sent ]
prefix_prob = 1.0
index_prefix = [ target_token2index['EDGE'] ]
for _ in range(max_seq_len):
[ curr_probs ] = s.run([ next_word_probs ], { source_indexes: [ source ], source_lens: [ len(source) ], target_prefixes: [ index_prefix ], target_lens: [ len(index_prefix) ] })
selected_index = np.argmax(curr_probs[0, :])
prefix_prob = prefix_prob*curr_probs[0, selected_index]
index_prefix.append(selected_index)
if selected_index == target_token2index['EDGE']:
break
index_generated = index_prefix[1:]
generated = [ target_index2token[i] for i in index_generated ]
[ curr_attentions ] = s.run([ attentions ], { source_indexes: [ source ], source_lens: [ len(source) ], target_prefixes: [ index_generated ], target_lens: [ len(index_generated) ] })
print('Input sentence: ', ' '.join(sent))
print('Generated sentence:', ' '.join(generated))
print('Sentence probability:', prefix_prob)
print('Attention:')
print('', '\t', *sent)
for i in range(len(generated)):
print('', generated[i]+'\t', np.round(curr_attentions[0, i, :], 2))
print()
fig.show() | 52.83682 | 230 | 0.633275 |
54835562ea5262f2ee7bb00d7ceac361aa51a6f1 | 226 | py | Python | lnd/utils.py | gsmadi/lightningpy | 14f4cc2dd5eb8726a06db8798944302974b890aa | [
"MIT"
] | null | null | null | lnd/utils.py | gsmadi/lightningpy | 14f4cc2dd5eb8726a06db8798944302974b890aa | [
"MIT"
] | 3 | 2019-08-21T11:51:52.000Z | 2019-10-07T11:51:45.000Z | lnd/utils.py | smadici-labs/pylnd | 14f4cc2dd5eb8726a06db8798944302974b890aa | [
"MIT"
] | null | null | null | import codecs | 20.545455 | 53 | 0.738938 |
5483a8653b465908b4e7a3a5f68321bd151006ac | 1,649 | py | Python | ctapipe/image/muon/ring_fitter.py | chaimain/ctapipe | ff80cff2daaf56e1d05ea6501c68fd83a9cf79d5 | [
"BSD-3-Clause"
] | 53 | 2015-06-23T15:24:20.000Z | 2021-09-23T22:30:58.000Z | ctapipe/image/muon/ring_fitter.py | chaimain/ctapipe | ff80cff2daaf56e1d05ea6501c68fd83a9cf79d5 | [
"BSD-3-Clause"
] | 1,537 | 2015-06-24T11:27:16.000Z | 2022-03-31T16:17:08.000Z | ctapipe/image/muon/ring_fitter.py | chaimain/ctapipe | ff80cff2daaf56e1d05ea6501c68fd83a9cf79d5 | [
"BSD-3-Clause"
] | 275 | 2015-07-09T14:09:28.000Z | 2022-03-17T22:25:51.000Z | import numpy as np
from ctapipe.core import Component
from ctapipe.containers import MuonRingContainer
from .fitting import kundu_chaudhuri_circle_fit, taubin_circle_fit
import traitlets as traits
# the fit methods do not expose the same interface, so we
# force the same interface onto them, here.
# we also modify their names slightly, since the names are
# exposed to the user via the string traitlet `fit_method`
def kundu_chaudhuri(x, y, weights, mask):
"""kundu_chaudhuri_circle_fit with x, y, weights, mask interface"""
return kundu_chaudhuri_circle_fit(x[mask], y[mask], weights[mask])
def taubin(x, y, weights, mask):
"""taubin_circle_fit with x, y, weights, mask interface"""
return taubin_circle_fit(x, y, mask)
FIT_METHOD_BY_NAME = {m.__name__: m for m in [kundu_chaudhuri, taubin]}
__all__ = ["MuonRingFitter"]
| 32.333333 | 71 | 0.691935 |
5484be9bfb8cd5688ba3f0f969954eaa83e32875 | 1,873 | py | Python | Main.py | dalwindercheema/FWPython | 4c5d4d6d0b29a199dbf37d16bd4ed9bb2ac22d19 | [
"BSD-2-Clause"
] | 2 | 2021-12-18T17:08:02.000Z | 2021-12-22T04:19:15.000Z | Main.py | dalwindercheema/FWPython | 4c5d4d6d0b29a199dbf37d16bd4ed9bb2ac22d19 | [
"BSD-2-Clause"
] | null | null | null | Main.py | dalwindercheema/FWPython | 4c5d4d6d0b29a199dbf37d16bd4ed9bb2ac22d19 | [
"BSD-2-Clause"
] | null | null | null | import pandas as pd
from os import listdir
import numpy
from sklearn.model_selection import StratifiedKFold
from FS_ALO import WFS
from FW_ALO import WFW
from WFSWFW_ALO import WFSWFW
import matplotlib.pyplot as plt
# Main program
Cost,Best_WFS,Best_WFW,Best_WFSWFW,CC=main_CV()
| 36.019231 | 107 | 0.645489 |
548634bd7f60817d2246c17acdb44bb98affa644 | 1,189 | py | Python | demo/demo/models.py | dracarysX/django-rest-query | 62fe8ee8f72251a1a8982265fff57870f2d43ca9 | [
"MIT"
] | 2 | 2017-06-28T02:51:52.000Z | 2017-06-28T09:28:33.000Z | demo/demo/models.py | dracarysX/django-rest-query | 62fe8ee8f72251a1a8982265fff57870f2d43ca9 | [
"MIT"
] | null | null | null | demo/demo/models.py | dracarysX/django-rest-query | 62fe8ee8f72251a1a8982265fff57870f2d43ca9 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*-coding: utf-8 -*-
__author__ = 'dracarysX'
from django.db import models
| 22.018519 | 48 | 0.652649 |
54870fd0b78e5e716753c262ab01d38621a1dd9c | 4,796 | py | Python | feedback-api/src/api/services/feedback/feedback_camunda_service.py | josekudiyirippil/queue-management | e56a987e14cfd2b50b820f679c7669060450da8e | [
"Apache-2.0"
] | 30 | 2018-09-19T03:30:51.000Z | 2022-03-07T02:57:05.000Z | feedback-api/src/api/services/feedback/feedback_camunda_service.py | ann-aot/queue-management | 8ac8353a1e5f3f27fea74e70831ab5f0590d1805 | [
"Apache-2.0"
] | 159 | 2018-09-17T23:45:58.000Z | 2022-03-30T17:35:05.000Z | feedback-api/src/api/services/feedback/feedback_camunda_service.py | ann-aot/queue-management | 8ac8353a1e5f3f27fea74e70831ab5f0590d1805 | [
"Apache-2.0"
] | 52 | 2018-05-18T18:30:06.000Z | 2021-08-25T12:00:29.000Z | # Copyright 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Submit Citizen feedback.
This module consists of API that calls Camunda BPM to save citizen feedback comments.
"""
import os, requests, json
from typing import Dict
from jinja2 import Environment, FileSystemLoader
from .feedback_base_service import FeedbackBaseService
from flask import jsonify
| 51.021277 | 111 | 0.641785 |
5489ae18fd1a18ba304d5257203fc13d1b20346d | 2,334 | py | Python | dezede/urls.py | dezede/dezede | 985ed1b42a2a6bab996e26c1b92444ae04afcc2c | [
"BSD-3-Clause"
] | 15 | 2015-02-10T21:16:31.000Z | 2021-03-25T16:46:20.000Z | dezede/urls.py | dezede/dezede | 985ed1b42a2a6bab996e26c1b92444ae04afcc2c | [
"BSD-3-Clause"
] | 4 | 2021-02-10T15:42:08.000Z | 2022-03-11T23:20:38.000Z | dezede/urls.py | dezede/dezede | 985ed1b42a2a6bab996e26c1b92444ae04afcc2c | [
"BSD-3-Clause"
] | 6 | 2016-07-10T14:20:48.000Z | 2022-01-19T18:34:02.000Z | from django.conf import settings
from django.conf.urls import *
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.sitemaps.views import sitemap
from django.views.decorators.cache import cache_page
from django.views.generic import TemplateView
from ajax_select import urls as ajax_select_urls
from .views import (
HomeView, CustomSearchView, autocomplete, ErrorView, BibliographieView,
RssFeed, GlobalSitemap,
)
admin.autodiscover()
urlpatterns = [
url(r'^$', HomeView.as_view(), name='home'),
url(r'^', include('libretto.urls')),
url(r'^examens/', include('examens.urls')),
url(r'^presentation$',
TemplateView.as_view(template_name='pages/presentation.html'),
name='presentation'),
url(r'^contribuer$',
TemplateView.as_view(template_name='pages/contribute.html'),
name='contribuer'),
url(r'^bibliographie$', BibliographieView.as_view(), name='bibliographie'),
url(r'^', include('accounts.urls')),
url(r'^dossiers/', include('dossiers.urls')),
url(r'^admin/lookups/', include(ajax_select_urls)),
url(r'^admin/', admin.site.urls),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^grappelli/', include('grappelli.urls')),
url(r'^recherche/', CustomSearchView(), name='haystack_search'),
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
url(r'^autocomplete$', autocomplete, name='autocomplete'),
url(r'^rss\.xml$', RssFeed(), name='rss_feed'),
url(r'^sitemap.xml$', cache_page(24*60*60)(sitemap),
{'sitemaps': {'global': GlobalSitemap}},
name='django.contrib.sitemaps.views.sitemap'),
url(r'^404$', ErrorView.as_view(status=404)),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL,
document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
url(r'^403$', ErrorView.as_view(status=403)),
url(r'^500$', ErrorView.as_view(status=500)),
url(r'^503$', ErrorView.as_view(status=503)),
]
| 38.9 | 79 | 0.662811 |
548afc21b16ee46ad8044ba3566ba260b8c8d71a | 899 | py | Python | database/chemtrack/contacts.py | mshobair/invitro_cheminformatics | 17201496c73453accd440646a1ee81726119a59c | [
"MIT"
] | null | null | null | database/chemtrack/contacts.py | mshobair/invitro_cheminformatics | 17201496c73453accd440646a1ee81726119a59c | [
"MIT"
] | null | null | null | database/chemtrack/contacts.py | mshobair/invitro_cheminformatics | 17201496c73453accd440646a1ee81726119a59c | [
"MIT"
] | null | null | null | import datetime
from database.database_schemas import Schemas
from sqlalchemy import Column, Integer, String, DateTime
from database.base import Base
| 31 | 80 | 0.72525 |
548ba908b52f98060805c6474bd241356237c223 | 7,487 | py | Python | otter/generate/autograder.py | drjbarker/otter-grader | 9e89e1675b09cf7889995b5f1bc8e1648bf6c309 | [
"BSD-3-Clause"
] | null | null | null | otter/generate/autograder.py | drjbarker/otter-grader | 9e89e1675b09cf7889995b5f1bc8e1648bf6c309 | [
"BSD-3-Clause"
] | null | null | null | otter/generate/autograder.py | drjbarker/otter-grader | 9e89e1675b09cf7889995b5f1bc8e1648bf6c309 | [
"BSD-3-Clause"
] | null | null | null | """
Gradescope autograder configuration generator for Otter Generate
"""
import os
import json
import shutil
# import subprocess
import zipfile
import tempfile
import pathlib
import pkg_resources
import yaml
from glob import glob
from subprocess import PIPE
from jinja2 import Template
from .token import APIClient
from .utils import zip_folder
from ..plugins import PluginCollection
from ..run.run_autograder.constants import DEFAULT_OPTIONS
TEMPLATE_DIR = pkg_resources.resource_filename(__name__, "templates")
MINICONDA_INSTALL_URL = "https://repo.anaconda.com/miniconda/Miniconda3-py38_4.9.2-Linux-x86_64.sh"
OTTER_ENV_NAME = "otter-env"
def main(tests_path, output_path, config, lang, requirements, overwrite_requirements, environment,
username, password, files, assignment=None, plugin_collection=None, **kwargs):
"""
Runs Otter Generate
Args:
tests_path (``str``): path to directory of test files for this assignment
output_path (``str``): directory in which to write output zip file
config (``str``): path to an Otter configuration JSON file
lang (``str``): the language of the assignment; one of ``["python", "r"]``
requirements (``str``): path to a Python or R requirements file for this assignment
overwrite_requirements (``bool``): whether to overwrite the default requirements instead of
adding to them
environment (``str``): path to a conda environment file for this assignment
username (``str``): a username for Gradescope for generating a token
password (``str``): a password for Gradescope for generating a token
files (``list[str]``): list of file paths to add to the zip file
assignment (``otter.assign.assignment.Assignment``, optional): the assignment configurations
if used with Otter Assign
**kwargs: ignored kwargs (a remnant of how the argument parser is built)
Raises:
``FileNotFoundError``: if the specified Otter configuration JSON file could not be found
``ValueError``: if the configurations specify a Gradescope course ID or assignment ID but not
both
"""
# read in otter_config.json
if config is None and os.path.isfile("otter_config.json"):
config = "otter_config.json"
if config is not None and not os.path.isfile(config):
raise FileNotFoundError(f"Could not find otter configuration file {config}")
if config:
with open(config) as f:
otter_config = json.load(f)
else:
otter_config = {}
if "course_id" in otter_config and "assignment_id" in otter_config:
client = APIClient()
if username is not None and password is not None:
client.log_in(username, password)
token = client.token
else:
token = client.get_token()
otter_config["token"] = token
elif "course_id" in otter_config or "assignment_id" in otter_config:
raise ValueError(f"Otter config contains 'course_id' or 'assignment_id' but not both")
options = DEFAULT_OPTIONS.copy()
options.update(otter_config)
# update language
options["lang"] = lang.lower()
template_dir = os.path.join(TEMPLATE_DIR, options["lang"])
templates = {}
for fn in os.listdir(template_dir):
fp = os.path.join(template_dir, fn)
if os.path.isfile(fp): # prevents issue w/ finding __pycache__ in template dirs
with open(fp) as f:
templates[fn] = Template(f.read())
template_context = {
"autograder_dir": options['autograder_dir'],
"otter_env_name": OTTER_ENV_NAME,
"miniconda_install_url": MINICONDA_INSTALL_URL,
"ottr_branch": "stable",
}
if plugin_collection is None:
plugin_collection = PluginCollection(otter_config.get("plugins", []), None, {})
else:
plugin_collection.add_new_plugins(otter_config.get("plugins", []))
plugin_collection.run("during_generate", otter_config, assignment)
# create tmp directory to zip inside
with tempfile.TemporaryDirectory() as td:
# try:
# copy tests into tmp
test_dir = os.path.join(td, "tests")
os.mkdir(test_dir)
pattern = ("*.py", "*.[Rr]")[options["lang"] == "r"]
for file in glob(os.path.join(tests_path, pattern)):
shutil.copy(file, test_dir)
# open requirements if it exists
requirements = requirements
reqs_filename = f"requirements.{'R' if options['lang'] == 'r' else 'txt'}"
if requirements is None and os.path.isfile(reqs_filename):
requirements = reqs_filename
if requirements:
assert os.path.isfile(requirements), f"Requirements file {requirements} not found"
f = open(requirements)
else:
f = open(os.devnull)
template_context["other_requirements"] = f.read()
template_context["overwrite_requirements"] = overwrite_requirements
# close the {% if not other_requirements %}stream
f.close()
# open environment if it exists
# unlike requirements.txt, we will always overwrite, not append by default
environment = environment
env_filename = "environment.yml"
if environment is None and os.path.isfile(env_filename):
environment = env_filename
if environment:
assert os.path.isfile(environment), f"Environment file {environment} not found"
with open(environment) as f:
data = yaml.safe_load(f)
data['name'] = template_context["otter_env_name"]
template_context["other_environment"] = yaml.safe_dump(data, default_flow_style=False)
f.close()
else:
template_context["other_environment"] = None
rendered = {}
for fn, tmpl in templates.items():
rendered[fn] = tmpl.render(**template_context)
if os.path.isabs(output_path):
zip_path = os.path.join(output_path, "autograder.zip")
else:
zip_path = os.path.join(os.getcwd(), output_path, "autograder.zip")
if os.path.exists(zip_path):
os.remove(zip_path)
with zipfile.ZipFile(zip_path, mode="w") as zf:
for fn, contents in rendered.items():
zf.writestr(fn, contents)
test_dir = "tests"
pattern = ("*.py", "*.[Rr]")[options["lang"] == "r"]
for file in glob(os.path.join(tests_path, pattern)):
zf.write(file, arcname=os.path.join(test_dir, os.path.basename(file)))
zf.writestr("otter_config.json", json.dumps(otter_config, indent=2))
# copy files into tmp
if len(files) > 0:
for file in files:
full_fp = os.path.abspath(file)
assert os.getcwd() in full_fp, f"{file} is not in a subdirectory of the working directory"
if os.path.isfile(full_fp):
zf.write(file, arcname=os.path.join("files", file))
elif os.path.isdir(full_fp):
zip_folder(zf, full_fp, prefix="files")
else:
raise ValueError(f"Could not find file or directory '{full_fp}'")
if assignment is not None:
assignment._otter_config = otter_config
| 38.792746 | 110 | 0.631762 |
548be68a4be4ce8e389208606dd772dad630cd84 | 4,947 | py | Python | kanka-manager/test.py | davidbradlycurtis/kanka-manager | f44f814c6d9433a40cb1edc558baac12f26b31ad | [
"MIT"
] | null | null | null | kanka-manager/test.py | davidbradlycurtis/kanka-manager | f44f814c6d9433a40cb1edc558baac12f26b31ad | [
"MIT"
] | null | null | null | kanka-manager/test.py | davidbradlycurtis/kanka-manager | f44f814c6d9433a40cb1edc558baac12f26b31ad | [
"MIT"
] | null | null | null | import requests
import yaml
import json
import os
import sys
import logging
from kankaclient.client import KankaClient
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s')
LOGGER = logging.getLogger('KankaManagement')
token = 'Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJhdWQiOiIxIiwianRpIjoiNjUxYzNkNDk1ZjVjZTUzMWQxMjc3MTk5Y2NlMzE1N2U4ZTFkMzZlOWRiYWZiOTY1ZGEyYmI5MTVkZjhkZDFkNTNkZGZlNDhmZTFmZWMzYjMiLCJpYXQiOjE2NDY0NTU3MDguMDA2Mjc4LCJuYmYiOjE2NDY0NTU3MDguMDA2MjgzLCJleHAiOjE2Nzc5OTE3MDcuOTk1NDY5LCJzdWIiOiIzMzM2MiIsInNjb3BlcyI6W119.BsK_qRFoPIlDnNG7DemtD_cVfN98LS-i3f9QUhfm_J7mS7_ltzuJ3typrPL_4lyqbnkrjjx0r5oICRqvgs902AmIDzt-bCGxsyesMWGQcQXFfoahGyJlYfRe4QSNsjlj3cLsM22dn0limMtnKB0I-7XcrbmNU15UJAN0MYJDOZ2pfCmjpn-5GnhgJQNwZrCZc33afUZSVvN_FAYT54GMPExMY0z1J1Zo49uUfs6FQhSG_SNrQ8zbPArCaGgH9hwMIEEhk0dn8-Kv-7SjJu1y4utWs3i9F08-WmIZ9YjDerJsrySc_N6TCgFn2GIeEnb_c-S3RpG4K3PMCTSrOGIKvy_S5zLYZOn6lNXaJ2RTaOhpZvHQHX_OeccoRJ5H9_K5ma1DXBPWaXgujCdaAi5S860ZRqsa8OUSQvHEsq03TNaOKupImBSKLGN6r3Qc57iBTfk6VrOIAO3cFG5Qej7t0gKQdpkDDPAK8dnLvC9QxrfKQCJcfwOrXz7dmUNb-XAKydU2brpqRzJyP3EScShrwPpYgXvE1BJNxtejpPhpE8GCM5TS6-qmHymHILYG0SsoM5HMrA70vFGu3DAJVkRzRavGEBsh_0mFzKR64zNT4hFFEzLyLha5c0FnkgKIFjUfZyrmskRW0t0DifJF5ZGX95PRezeNQHpRZ4yM5G3YseQ'
campaign = 'Journey to Morrivir'
kanka_client = KankaClient(token=token, campaign=campaign, verbose=True)
test_characters(kanka_client)
print()
# camp_id = 107538
# base_url = 'https://kanka.io/api/1.0/campaigns'
# char_url = '%s/%s/characters' % (base_url, camp_id)
# header = {'Authorization': token, 'Content-type': 'application/json'}
# result = requests.get(url=char_url, headers=header)
# if result.reason == 'OK':
# _characters = json.loads(result.text)['data']
# characters = list()
# for char in _characters:
# character = {
# "id" : char.get('id', None),
# "name" : char.get('name', None),
# "entry" : char.get('entry', None),
# "entry_parsed" : char.get('entry_parsed', None),
# "image" : char.get('image', None),
# "image_full" : char.get('image_full', None),
# "image_thumb" : char.get('image_thumb', None),
# "is_private" : char.get('is_private', None),
# "tags" : char.get('tags', []),
# "title" : char.get('title', None),
# "age" : char.get('age', None),
# "pronouns" : char.get('pronouns', None),
# "type" : char.get('type', None),
# "family_id" : char.get('family_id', None),
# "location_id" : char.get('location_id', None),
# "races" : char.get('races', []),
# "is_dead" : char.get('is_dead', None),
# "image_url" : char.get('image_url', None),
# "personality_name" : char.get('personality_name', []),
# "personality_entry" : char.get('personality_entry', []),
# "appearance_name" : char.get('appearance_name', []),
# "appearance_entry" : char.get('appearance_entry', []),
# "is_personality_visible" : char.get('is_personality_visible', None),
# }
# # Prep character for dump
# for field in character.copy():
# if character[field] == None or character[field] == []:
# del character[field]
# del character['id']
# characters.append(character)
# file = 'C:\\Users\\quazn\\Documents\\dev\\kanka-manager\\morrivir\\characters.yaml'
# code = write_data(file, characters)
# file_characters = read_data(file)
#print(file_characters) | 46.233645 | 1,002 | 0.686275 |
548e7df7f685de5e09edd46875612218fa28a72f | 1,788 | py | Python | setup.py | m-aciek/python-sdk | ab447b58ae5f45ce2d5beb4bfc4d7063e42b4311 | [
"MIT"
] | null | null | null | setup.py | m-aciek/python-sdk | ab447b58ae5f45ce2d5beb4bfc4d7063e42b4311 | [
"MIT"
] | null | null | null | setup.py | m-aciek/python-sdk | ab447b58ae5f45ce2d5beb4bfc4d7063e42b4311 | [
"MIT"
] | 2 | 2018-03-30T10:10:56.000Z | 2018-05-25T09:27:36.000Z | #!/usr/bin/env python
import os
import re
import codecs
from setuptools import setup, find_packages
ground = os.path.abspath(os.path.dirname(__file__))
metadata = read(os.path.join(ground, 'hyperwallet', '__init__.py'))
setup(
name = 'hyperwallet-sdk',
url = extract_metaitem('url'),
author = extract_metaitem('author'),
author_email = extract_metaitem('email'),
version = extract_metaitem('version'),
license = extract_metaitem('license'),
description = extract_metaitem('description'),
long_description = (read('README.rst') + '\n\n' +
read('CHANGELOG.rst')),
maintainer = extract_metaitem('author'),
maintainer_email = extract_metaitem('email'),
packages = find_packages(exclude = ('tests', 'doc')),
install_requires = ['requests', 'requests-toolbelt', 'jwcrypto', 'python-jose'],
test_suite = 'nose.collector',
tests_require = [ 'mock', 'nose'],
keywords='hyperwallet api',
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Sphinx',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| 33.735849 | 114 | 0.636465 |
548fac7398ada6cb536131133e9e9aa0af68eb01 | 7,850 | py | Python | big-picture-spectra/big-picture-spectra.py | aibhleog/plotting-playground | 84c19698e659de97c263362c7440faa3f873476e | [
"MIT"
] | null | null | null | big-picture-spectra/big-picture-spectra.py | aibhleog/plotting-playground | 84c19698e659de97c263362c7440faa3f873476e | [
"MIT"
] | null | null | null | big-picture-spectra/big-picture-spectra.py | aibhleog/plotting-playground | 84c19698e659de97c263362c7440faa3f873476e | [
"MIT"
] | null | null | null | '''
This script makes an image very similar to Figure 2 of Hutchison et al. 2019 (https://arxiv.org/pdf/1905.08812.pdf). Undoubtedly, there are likely simpler ways to make this figure -- this is how I chose to code it up.
Because the figure in the paper uses some proprietary data, the code below will generate fake data to be plotted.
Credit: Taylor Hutchison
aibhleog@tamu.edu
Texas A&M University
'''
_author_ = 'Taylor Hutchison'
import numpy as np
import matplotlib.pyplot as plt
import astropy.io.fits as fits
import matplotlib.gridspec as gridspec
from matplotlib.patches import Polygon
import matplotlib.patheffects as PathEffects
from mpl_toolkits.axes_grid.inset_locator import inset_axes
from matplotlib.lines import Line2D
from matplotlib import patches
# -- Generating fake data -- #
# -------------------------- #
np.random.seed(seed=3) # fixing the random seed so we can get the same result
gauss2d = np.loadtxt('gaussian2D_sig2_kernel7.txt') # fake 1D emission line
gauss1d = np.loadtxt('gaussian1D_sig2_kernel7.txt') # fake 2D emission line
# 1D & 2D gaussian pulled from here (because it's faster for this exercise):
# http://dev.theomader.com/gaussian-kernel-calculator/
noise1d = np.random.uniform(-1,1,250) # noise for 1D spectrum
noise2d = np.random.uniform(-1,1,(250,70)) # noise for 2D spectrum
shape = noise2d.shape
xcen, ycen = int(shape[0]/2), int(shape[1]/2)
galspec2d_line1 = noise2d.copy()
galspec2d_line1[xcen-3:xcen+4,ycen-3:ycen+4] += gauss2d * 35 # 2D emission line
galspec1d_line1 = noise1d.copy()
galspec1d_line1[xcen-3:xcen+4] += gauss1d * 15 # Lya 1D emission line
galspec2d_line2 = galspec2d_line1.copy()
galspec2d_line2[xcen+17:xcen+24,ycen-3:ycen+4] += gauss2d * 35 # 2D emission line
galspec1d_line2 = galspec1d_line1.copy()
galspec1d_line2[xcen+17:xcen+24] += gauss1d * 10 # CIII] 1D doublet emission line
noisegal = np.random.uniform(-1,1,(50,35)) # noise for photometry of 'galaxy'
galaxy = noisegal.copy()
galaxy[22:29,13:20] += gauss2d * 25 # add signal for galaxy shape
galaxy[24:31,16:23] += gauss2d * 25 # add signal for galaxy shape
wavelength = np.arange(len(galspec1d_line1)) # fake wavelength range
# fake errors
np.random.seed(seed=13) # fixing the random seed so we can get the same result
error1d = np.random.random(len(noise1d)) + 0.4
# ---------------------------#
# -- Initializing the image -- #
# ---------------------------- #
f = plt.figure(figsize=(10.5,9))
gs0 = gridspec.GridSpec(2,1,height_ratios=[1,0.9],hspace=0.1) # the main subplots
# ------------- #
# -- TOP ROW -- #
# ------------- #
gs01 = gridspec.GridSpecFromSubplotSpec(1,2,subplot_spec=gs0[0], # the top panel's subplots
width_ratios=[1.2,2],wspace=0.22)
# --> RIGHT SIDE: the Lya spectrum
line = 'lya'
band = 'Y'
# The subplot gs001 is made up of 3 subplots where the top and bottom are just used to
# center the middle one more accurately -- they aren't necessary if you don't care THAT much :)
gs001 = gridspec.GridSpecFromSubplotSpec(3,1,subplot_spec=gs01[1],
height_ratios=[0.05,1,0.12],hspace=0.0)
# This is the real subplot for the data (the middle one from gs001), split into 2 subplots
# so that we can have the 2D spectrum on top and the 1D on the bottom
gs011 = gridspec.GridSpecFromSubplotSpec(2,1,subplot_spec=gs001[1],
height_ratios=[1.25,2],hspace=0.0)
# 2D spectrum
ax01 = plt.Subplot(f, gs011[0])
ax01.imshow(galspec2d_line1[75:175,28:42].T, # zooming in for the sake of the example
aspect='auto',origin='lower',cmap='gray',clim=(-1.5,2.3))
# removing the tickmarks and labels for the 2D spectrum
ax01.xaxis.set_ticks_position('none')
ax01.yaxis.set_ticks_position('none')
ax01.set_yticklabels([])
ax01.set_xticklabels([])
# white text with black outline
txt = ax01.text(0.023,0.73,'%s-band'%(band), size=20.5, color='w',transform=ax01.transAxes)
txt.set_path_effects([PathEffects.withStroke(linewidth=3, foreground='k')])
f.add_subplot(ax01) # adds the subplot to the image
# 1D spectrum
ax02 = plt.Subplot(f, gs011[1])
ax02.step(wavelength,galspec1d_line1,where='mid',lw=2.3)
ax02.fill_between(wavelength,error1d,error1d*-1,alpha=0.2)
ax02.set_xlim(wavelength[74],wavelength[174])
ax02.set_ylabel(r'F$_{\lambda}$ [10$^{-18}$ erg/s/cm$^2$/$\AA$]',fontsize=16)
ax02.set_xlabel('observed wavelength [microns]',labelpad=5,fontsize=16)
f.add_subplot(ax02) # adds the subplot to the image
# --> LEFT SIDE: F160W STAMP
gs002 = gridspec.GridSpecFromSubplotSpec(1,1,subplot_spec=gs01[0])
ax002 = plt.Subplot(f, gs002[0]) # no need to add extra tiny subplots for padding here!
ax002.imshow(galaxy,aspect='auto',origin='upper',cmap='gray',clim=(-1,2))
# removing the tickmarks and labels for the 2D spectrum
ax002.xaxis.set_ticks_position('none')
ax002.yaxis.set_ticks_position('none')
ax002.set_yticklabels([])
ax002.set_xticklabels([])
# white text with black outline
txt = ax002.text(0.03,0.90,'F160W',ha='left',size=22.5, color='w',transform=ax002.transAxes)
txt.set_path_effects([PathEffects.withStroke(linewidth=3, foreground='k')])
# adding years for the slit layouts, using the set_path_effects to "bold" the text
txt = ax002.text(0.04,0.13,'2016',size=19.5, color='#CF6060',transform=ax002.transAxes)
txt.set_path_effects([PathEffects.withStroke(linewidth=1.18, foreground='#CF6060')])
txt = ax002.text(0.04,0.22,'2014',size=19.5, color='#F4D03F',transform=ax002.transAxes)
txt.set_path_effects([PathEffects.withStroke(linewidth=1.18, foreground='#F4D03F')])
txt = ax002.text(0.04,0.04,'2017',size=19.5, color='#70B5E3',transform=ax002.transAxes)
txt.set_path_effects([PathEffects.withStroke(linewidth=1.18, foreground='#70B5E3')])
# plotting slits over the regions in the image
# loc: 2, 3, 4, 1
ax002.add_patch(Polygon([[7,7],[22,45],[25.5,43],[11,5]], # 2016 slit
zorder=3,facecolor='none',lw=1.8,edgecolor='#CF6060'))
ax002.add_patch(Polygon([[15,5],[15,45],[20,45],[20,5]], # 2014 slit
zorder=3,facecolor='none',lw=1.8,edgecolor='#F4D03F'))
ax002.add_patch(Polygon([[5,23],[5,28],[28,28],[28,23]], # 2017 slit
zorder=3,facecolor='none',lw=1.8,edgecolor='#70B5E3'))
f.add_subplot(ax002) # adds the subplot to the figure
# ------------------------------------------------------------------------- #
# ---------------- #
# -- BOTTOM ROW -- #
# ---------------- #
# --> the CIII] spectrum
line = 'ciii'
band = 'H'
# similar padding process done as with the Lya spectrum (where only the middle one matters)
gs02 = gridspec.GridSpecFromSubplotSpec(1,3,subplot_spec=gs0[1],width_ratios=[0.28,2,0.13],wspace=0.0)
# splitting the middle subplot from above into two, so that we can have 2D on top and 1D on bottom
gs003 = gridspec.GridSpecFromSubplotSpec(2,1,subplot_spec=gs02[1],height_ratios=[1.75,2],hspace=0.0)
# 2D spectrum
ax21 = plt.Subplot(f, gs003[0])
ax21.imshow(galspec2d_line2[:,15:55].T,aspect='auto',origin='lower',cmap='gray',clim=(-1.5,2.2))
# removing the tickmarks and labels for the 2D spectrum
ax21.xaxis.set_ticks_position('none')
ax21.yaxis.set_ticks_position('none')
ax21.set_yticklabels([])
ax21.set_xticklabels([])
# white text with black outline
txt = ax21.text(0.02,0.75,'%s-band'%(band), size=16+8.5, color='w',transform=ax21.transAxes)
txt.set_path_effects([PathEffects.withStroke(linewidth=3, foreground='k')])
f.add_subplot(ax21) # adds subplot to the figure
# 1D spectrum
ax22 = plt.Subplot(f, gs003[1])
ax22.step(wavelength,galspec1d_line2,where='mid',lw=2.7)
ax22.fill_between(wavelength,error1d,error1d*-1,alpha=0.2)
ax22.set_xlim(wavelength[0],wavelength[-1])
ax22.set_ylabel(r'F$_{\lambda}$ [10$^{-19}$ erg/s/cm$^{2}$/$\AA$]',fontsize=16)
ax22.set_xlabel('observed wavelength [microns]',fontsize=16)
f.add_subplot(ax22) # adds subplot to the figure
# saving figure
plt.savefig('figure.pdf')
#plt.show()
plt.close('all')
| 39.25 | 217 | 0.707771 |
54902b07fce1f2bf2bcf246ab039ab703861aaf3 | 8,517 | py | Python | pesummary/core/plots/corner.py | pesummary/pesummary | 99e3c450ecbcaf5a23564d329bdf6e0080f6f2a8 | [
"MIT"
] | 1 | 2021-08-03T05:58:20.000Z | 2021-08-03T05:58:20.000Z | pesummary/core/plots/corner.py | pesummary/pesummary | 99e3c450ecbcaf5a23564d329bdf6e0080f6f2a8 | [
"MIT"
] | 1 | 2020-06-13T13:29:35.000Z | 2020-06-15T12:45:04.000Z | pesummary/core/plots/corner.py | pesummary/pesummary | 99e3c450ecbcaf5a23564d329bdf6e0080f6f2a8 | [
"MIT"
] | 3 | 2021-07-08T08:31:28.000Z | 2022-03-31T14:08:58.000Z | # Licensed under an MIT style license -- see LICENSE.md
import numpy as np
from scipy.stats import gaussian_kde
from matplotlib.colors import LinearSegmentedColormap, colorConverter
import corner
__author__ = ["Charlie Hoy <charlie.hoy@ligo.org>"]
def hist2d(
x, y, bins=20, range=None, weights=None, levels=None, smooth=None, ax=None,
color=None, quiet=False, plot_datapoints=True, plot_density=True,
plot_contours=True, no_fill_contours=False, fill_contours=False,
contour_kwargs=None, contourf_kwargs=None, data_kwargs=None,
pcolor_kwargs=None, new_fig=True, kde=None, kde_kwargs={},
density_cmap=None, label=None, grid=True, **kwargs
):
"""Extension of the corner.hist2d function. Allows the user to specify the
kde used when estimating the 2d probability density
Parameters
----------
x : array_like[nsamples,]
The samples.
y : array_like[nsamples,]
The samples.
quiet : bool
If true, suppress warnings for small datasets.
levels : array_like
The contour levels to draw.
ax : matplotlib.Axes
A axes instance on which to add the 2-D histogram.
plot_datapoints : bool
Draw the individual data points.
plot_density : bool
Draw the density colormap.
plot_contours : bool
Draw the contours.
no_fill_contours : bool
Add no filling at all to the contours (unlike setting
``fill_contours=False``, which still adds a white fill at the densest
points).
fill_contours : bool
Fill the contours.
contour_kwargs : dict
Any additional keyword arguments to pass to the `contour` method.
contourf_kwargs : dict
Any additional keyword arguments to pass to the `contourf` method.
data_kwargs : dict
Any additional keyword arguments to pass to the `plot` method when
adding the individual data points.
pcolor_kwargs : dict
Any additional keyword arguments to pass to the `pcolor` method when
adding the density colormap.
kde: func, optional
KDE you wish to use to work out the 2d probability density
kde_kwargs: dict, optional
kwargs passed directly to kde
"""
x = np.asarray(x)
y = np.asarray(y)
if kde is None:
kde = gaussian_kde
if ax is None:
raise ValueError("Please provide an axis to plot")
# Set the default range based on the data range if not provided.
if range is None:
range = [[x.min(), x.max()], [y.min(), y.max()]]
# Set up the default plotting arguments.
if color is None:
color = "k"
# Choose the default "sigma" contour levels.
if levels is None:
levels = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)
# This is the color map for the density plot, over-plotted to indicate the
# density of the points near the center.
if density_cmap is None:
density_cmap = LinearSegmentedColormap.from_list(
"density_cmap", [color, (1, 1, 1, 0)]
)
elif isinstance(density_cmap, str):
from matplotlib import cm
density_cmap = cm.get_cmap(density_cmap)
# This color map is used to hide the points at the high density areas.
white_cmap = LinearSegmentedColormap.from_list(
"white_cmap", [(1, 1, 1), (1, 1, 1)], N=2
)
# This "color map" is the list of colors for the contour levels if the
# contours are filled.
rgba_color = colorConverter.to_rgba(color)
contour_cmap = [list(rgba_color) for l in levels] + [rgba_color]
for i, l in enumerate(levels):
contour_cmap[i][-1] *= float(i) / (len(levels) + 1)
# We'll make the 2D histogram to directly estimate the density.
try:
_, X, Y = np.histogram2d(
x.flatten(),
y.flatten(),
bins=bins,
range=list(map(np.sort, range)),
weights=weights,
)
except ValueError:
raise ValueError(
"It looks like at least one of your sample columns "
"have no dynamic range. You could try using the "
"'range' argument."
)
values = np.vstack([x.flatten(), y.flatten()])
kernel = kde(values, **kde_kwargs)
xmin, xmax = np.min(x.flatten()), np.max(x.flatten())
ymin, ymax = np.min(y.flatten()), np.max(y.flatten())
X, Y = np.meshgrid(X, Y)
pts = np.vstack([X.ravel(), Y.ravel()])
z = kernel(pts)
H = z.reshape(X.shape)
if smooth is not None:
if kde_kwargs.get("transform", None) is not None:
from pesummary.utils.utils import logger
logger.warning(
"Smoothing PDF. This may give unwanted effects especially near "
"any boundaries"
)
try:
from scipy.ndimage import gaussian_filter
except ImportError:
raise ImportError("Please install scipy for smoothing")
H = gaussian_filter(H, smooth)
if plot_contours or plot_density:
pass
if kde_kwargs is None:
kde_kwargs = dict()
if contour_kwargs is None:
contour_kwargs = dict()
if plot_datapoints:
if data_kwargs is None:
data_kwargs = dict()
data_kwargs["color"] = data_kwargs.get("color", color)
data_kwargs["ms"] = data_kwargs.get("ms", 2.0)
data_kwargs["mec"] = data_kwargs.get("mec", "none")
data_kwargs["alpha"] = data_kwargs.get("alpha", 0.1)
ax.plot(x, y, "o", zorder=-1, rasterized=True, **data_kwargs)
# Plot the base fill to hide the densest data points.
cs = ax.contour(
X, Y, H, levels=(1 - np.array(levels)) * np.max(H), alpha=0.
)
contour_set = []
for _contour in cs.collections:
_contour_set = []
for _path in _contour.get_paths():
data = _path.vertices
transpose = data.T
for idx, axis in enumerate(["x", "y"]):
limits = [
kde_kwargs.get("{}low".format(axis), -np.inf),
kde_kwargs.get("{}high".format(axis), np.inf)
]
if kde_kwargs.get("transform", None) is None:
if limits[0] is not None:
transpose[idx][
np.argwhere(transpose[idx] < limits[0])
] = limits[0]
if limits[1] is not None:
transpose[idx][
np.argwhere(transpose[idx] > limits[1])
] = limits[1]
else:
_transform = kde_kwargs["transform"](transpose)
_contour_set.append(transpose)
contour_set.append(_contour_set)
# Plot the density map. This can't be plotted at the same time as the
# contour fills.
if plot_density:
if pcolor_kwargs is None:
pcolor_kwargs = dict()
pcolor_kwargs["shading"] = "auto"
ax.pcolor(X, Y, np.max(H) - H, cmap=density_cmap, **pcolor_kwargs)
# Plot the contour edge colors.
if plot_contours:
colors = contour_kwargs.pop("colors", color)
linestyles = kwargs.pop("linestyles", "-")
_list = [colors, linestyles]
for num, (prop, default) in enumerate(zip(_list, ['k', '-'])):
if prop is None:
_list[num] = default * len(contour_set)
elif isinstance(prop, str):
_list[num] = [prop] * len(contour_set)
elif len(prop) < len(contour_set):
raise ValueError(
"Please provide a color/linestyle for each contour"
)
for idx, _contour in enumerate(contour_set):
for _idx, _path in enumerate(_contour):
if idx == 0 and _idx == 0:
_label = label
else:
_label = None
ax.plot(
*_path, color=_list[0][idx], label=_label,
linestyle=_list[1][idx]
)
_set_xlim(new_fig, ax, range[0])
_set_ylim(new_fig, ax, range[1])
| 36.242553 | 80 | 0.589996 |
549070123669b37704f083b9611ce10258a9d787 | 2,240 | py | Python | tests/test_tokenizer.py | mkartawijaya/dango | 9cc9d498c4eac851d6baa96ced528c1d91a87216 | [
"BSD-3-Clause"
] | null | null | null | tests/test_tokenizer.py | mkartawijaya/dango | 9cc9d498c4eac851d6baa96ced528c1d91a87216 | [
"BSD-3-Clause"
] | null | null | null | tests/test_tokenizer.py | mkartawijaya/dango | 9cc9d498c4eac851d6baa96ced528c1d91a87216 | [
"BSD-3-Clause"
] | null | null | null | from typing import List
import pytest
import dango
# Since extracting the reading of the dictionary form depends on knowledge
# of the internal workings of SudachiPy we treat this functionality as a
# black box and just perform a smoke test if we get some plausible output.
# This test could break depending on the dictionary used as the readings
# for the words might change.
| 36.129032 | 82 | 0.525446 |
5490a142b6dfe4a57805f7133f0d2ea9a4a1539c | 2,829 | py | Python | neutron_lib/db/sqlalchemytypes.py | rolaya/neutron-lib | 41a2226dfb93a0e6138de260f5126fa7c954178c | [
"Apache-2.0"
] | null | null | null | neutron_lib/db/sqlalchemytypes.py | rolaya/neutron-lib | 41a2226dfb93a0e6138de260f5126fa7c954178c | [
"Apache-2.0"
] | null | null | null | neutron_lib/db/sqlalchemytypes.py | rolaya/neutron-lib | 41a2226dfb93a0e6138de260f5126fa7c954178c | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Custom SQLAlchemy types."""
import netaddr
from sqlalchemy import types
from neutron_lib._i18n import _
| 33.678571 | 78 | 0.607282 |
5491d3f5c105c58d0e54d67614d6a8faed7a1e75 | 256 | py | Python | Algorithm/Array/217. Contains Duplicate.py | smsubham/Data-Structure-Algorithms-Questions | 45da68231907068ef4e4a0444ffdac69b337fa7c | [
"Apache-2.0"
] | null | null | null | Algorithm/Array/217. Contains Duplicate.py | smsubham/Data-Structure-Algorithms-Questions | 45da68231907068ef4e4a0444ffdac69b337fa7c | [
"Apache-2.0"
] | null | null | null | Algorithm/Array/217. Contains Duplicate.py | smsubham/Data-Structure-Algorithms-Questions | 45da68231907068ef4e4a0444ffdac69b337fa7c | [
"Apache-2.0"
] | null | null | null | # https://leetcode.com/problems/contains-duplicate/
# We are forming whole set always which isn't optimal though time complexity is O(n). | 36.571429 | 85 | 0.710938 |
54940d248d43c1725fcc0fa869fadb3c0a38e2a1 | 1,488 | py | Python | script/check_conf_whitelist.py | Kaiyuan-Zhang/Gravel-public | ff3f7dc7d5ac63d91e26f03ae4e49a7451c6cb22 | [
"MIT"
] | 4 | 2020-04-11T19:11:25.000Z | 2021-02-06T10:46:39.000Z | script/check_conf_whitelist.py | Kaiyuan-Zhang/Gravel-public | ff3f7dc7d5ac63d91e26f03ae4e49a7451c6cb22 | [
"MIT"
] | 1 | 2021-11-01T20:19:23.000Z | 2021-11-01T20:19:43.000Z | script/check_conf_whitelist.py | Kaiyuan-Zhang/Gravel-public | ff3f7dc7d5ac63d91e26f03ae4e49a7451c6cb22 | [
"MIT"
] | 1 | 2020-04-18T03:36:03.000Z | 2020-04-18T03:36:03.000Z | import sys
import os
if __name__ == '__main__':
if len(sys.argv) < 3:
print("Usage: {} <conf-list> <conf-dir> [white-list-files]".format(sys.argv[0]))
sys.exit(-1)
conf_list_file = sys.argv[1]
conf_dir = sys.argv[2]
conf_list = {}
white_list_files = sys.argv[3:]
ele_white_list = set()
for fn in white_list_files:
with open(fn, 'r') as f:
lines = f.readlines()
for l in lines:
ele_white_list.add(l.rstrip())
with open(conf_list_file, 'r') as f:
lines = f.readlines()
for l in lines:
fn = os.path.join(conf_dir, l.rstrip())
with open(fn, 'r') as conf_f:
elements = conf_f.readlines()
conf_list[l] = list(map(lambda s: s.rstrip(), elements))
offensive = {}
supported = []
for conf, eles in conf_list.items():
can_not_run = False
for e in eles:
if e not in ele_white_list:
can_not_run = True
if e not in offensive:
offensive[e] = 0
offensive[e] += 1
if not can_not_run:
supported.append(conf)
ratio = float(len(supported)) / float(len(conf_list.keys())) * 100.0
sorted_eles = sorted(offensive.items(), key = lambda x : x[1])
print("Support {} / {} ({}%) Confs".format(len(supported), len(conf_list.keys()), ratio))
for e in sorted_eles[::-1]:
print(e[0], e[1])
| 31.659574 | 93 | 0.536962 |
54944c0a9b4c84df76cbc3d9fc9c516394ab50a2 | 4,383 | py | Python | models/joint_inference_model.py | pnsuau/neurips18_hierchical_image_manipulation | 712ff8008f8d4c38626bd556fc44adfbcde8fa28 | [
"MIT"
] | null | null | null | models/joint_inference_model.py | pnsuau/neurips18_hierchical_image_manipulation | 712ff8008f8d4c38626bd556fc44adfbcde8fa28 | [
"MIT"
] | null | null | null | models/joint_inference_model.py | pnsuau/neurips18_hierchical_image_manipulation | 712ff8008f8d4c38626bd556fc44adfbcde8fa28 | [
"MIT"
] | null | null | null | import torch
from torch.autograd import Variable
from util.util import *
from util.data_util import *
import numpy as np
from PIL import Image
from data.base_dataset import get_transform_params, get_raw_transform_fn, \
get_transform_fn, get_soft_bbox, get_masked_image
from util.data_util import crop_canvas, paste_canvas
| 42.553398 | 94 | 0.620123 |
549626fa07a7cc95e2aa2428a235bbc1adf539d5 | 2,102 | py | Python | solutions/051_n_queens.py | abawchen/leetcode | 41d3b172a7694a46a860fbcb0565a3acccd000f2 | [
"MIT"
] | null | null | null | solutions/051_n_queens.py | abawchen/leetcode | 41d3b172a7694a46a860fbcb0565a3acccd000f2 | [
"MIT"
] | null | null | null | solutions/051_n_queens.py | abawchen/leetcode | 41d3b172a7694a46a860fbcb0565a3acccd000f2 | [
"MIT"
] | null | null | null |
import time
start_time = time.time()
s = Solution()
print s.solveNQueens(1)
print s.solveNQueens(2)
print s.solveNQueens(3)
print (4, s.solveNQueens(4))
print (5, len(s.solveNQueens(5)))
print (6, len(s.solveNQueens(6)))
print (7, len(s.solveNQueens(7)))
print (8, len(s.solveNQueens(8)))
print (9, len(s.solveNQueens(9)))
print (10, len(s.solveNQueens(10)))
print (11, len(s.solveNQueens(11)))
print("--- %s seconds ---" % (time.time() - start_time))
# s.solveNQueens(4)
# qs = s.solveNQueens(5)
# for q in qs:
# print "-------------------"
# for r in q:
# print r
# print "-------------------"
| 28.794521 | 108 | 0.471456 |
5497a6164438dad00ba23076949d1e3d84fd4868 | 3,812 | py | Python | tests/v2/parties/test_parties.py | jama5262/Politico | 7292f604723cf115004851b9767688cf1a956bb1 | [
"MIT"
] | null | null | null | tests/v2/parties/test_parties.py | jama5262/Politico | 7292f604723cf115004851b9767688cf1a956bb1 | [
"MIT"
] | 2 | 2019-02-19T12:43:32.000Z | 2019-03-04T16:15:38.000Z | tests/v2/parties/test_parties.py | jama5262/Politico | 7292f604723cf115004851b9767688cf1a956bb1 | [
"MIT"
] | null | null | null | import unittest
import json
from app import createApp
from app.api.database.migrations.migrations import migrate
| 36.304762 | 128 | 0.635887 |
5497dc6a086f32d3001f4b0c68ed070534942148 | 179 | py | Python | tests/_compat.py | lanius/hunk | bba04d9fb7f37c378ea41bc934c3a02401e34fe6 | [
"MIT"
] | 1 | 2015-04-03T08:35:41.000Z | 2015-04-03T08:35:41.000Z | tests/_compat.py | lanius/hunk | bba04d9fb7f37c378ea41bc934c3a02401e34fe6 | [
"MIT"
] | null | null | null | tests/_compat.py | lanius/hunk | bba04d9fb7f37c378ea41bc934c3a02401e34fe6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
PY2 = sys.version_info[0] == 2
if not PY2:
json_text = lambda rv: rv.data.decode(rv.charset)
else:
json_text = lambda rv: rv.data
| 12.785714 | 53 | 0.625698 |
549905ffeca6d09d599080cd848b9e365ea51dd3 | 763 | py | Python | oriskami/test/resources/test_router_data.py | oriskami/oriskami-python | 2b0d81f713a9149977907183c67eec136d49ee8c | [
"MIT"
] | 4 | 2017-05-28T19:37:31.000Z | 2017-06-13T11:34:26.000Z | oriskami/test/resources/test_router_data.py | ubivar/ubivar-python | 2b0d81f713a9149977907183c67eec136d49ee8c | [
"MIT"
] | null | null | null | oriskami/test/resources/test_router_data.py | ubivar/ubivar-python | 2b0d81f713a9149977907183c67eec136d49ee8c | [
"MIT"
] | null | null | null | import os
import oriskami
import warnings
from oriskami.test.helper import (OriskamiTestCase)
| 38.15 | 69 | 0.714286 |
54990a8312bff53b0e8f90e7a2361334c451c834 | 1,625 | py | Python | osbot_aws/helpers/IAM_Policy.py | artem7902/OSBot-AWS | 4b676b8323f18d3d9809d41263f3a71745ec2828 | [
"Apache-2.0"
] | null | null | null | osbot_aws/helpers/IAM_Policy.py | artem7902/OSBot-AWS | 4b676b8323f18d3d9809d41263f3a71745ec2828 | [
"Apache-2.0"
] | null | null | null | osbot_aws/helpers/IAM_Policy.py | artem7902/OSBot-AWS | 4b676b8323f18d3d9809d41263f3a71745ec2828 | [
"Apache-2.0"
] | null | null | null | from osbot_aws.apis.IAM import IAM
| 36.111111 | 123 | 0.649846 |
5499335d4a53f32fd4ee6cd0b97b91f92adeec0e | 3,959 | py | Python | data_visualization.py | vashineyu/Common_tools | b933660e007ae104910c975d074523012bb7b58e | [
"Apache-2.0"
] | 1 | 2018-10-26T09:33:26.000Z | 2018-10-26T09:33:26.000Z | data_visualization.py | vashineyu/Common_tools | b933660e007ae104910c975d074523012bb7b58e | [
"Apache-2.0"
] | null | null | null | data_visualization.py | vashineyu/Common_tools | b933660e007ae104910c975d074523012bb7b58e | [
"Apache-2.0"
] | null | null | null | # Visualization function
import numpy as np
import matplotlib.pyplot as plt
from math import ceil
from PIL import Image
from scipy.ndimage.filters import gaussian_filter
def img_combine(img, ncols=5, size=1, path=False):
"""
Draw the images with array
img: image array to plot - size = n x im_w x im_h x 3
"""
nimg= img.shape[0]
nrows=int(ceil(nimg/ncols))
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=True, sharey=True, figsize=(ncols*size,nrows*size))
if nrows==0:
return
elif ncols == 1:
for r, ax in zip(np.arange(nrows), axes):
nth=r
if nth < nimg:
ax.imshow(img[nth])
ax.set_axis_off()
elif nrows==1:
for c, ax in zip(np.arange(ncols), axes):
nth=c
if nth < nimg:
ax.imshow(img[nth])
ax.set_axis_off()
else:
for r, row in zip(np.arange(nrows), axes):
for c, ax in zip(np.arange(ncols), row):
nth=r*ncols+c
if nth < nimg:
ax.imshow(img[nth])
ax.set_axis_off()
if path:
plt.tight_layout()
plt.savefig(path, dpi = 300)
plt.show()
def get_image_for_paper(original_image_object, prediction_map, IHC_map=None,
activation_threshold=0.3, overlay_alpha=0.6, sigma_filter=128,
mix=False, colormap_style="coolwarm"):
"""
Get paper used images (raw, overlay_only, raw+overlay, IHC responding region)
Args:
- original_image_object: PIL image obejct
- prediction_map: Array of prediction
- IHC_map: PIL object of IHC
- overlap_alpha: control overlay color (0. - 1.0)
- sigma_filter: Use a Gaussian filter to smooth the prediction map (prevent grid-like looking)
- mix: True/False, True: return combined map
Returns:
Tuple of PIL images
- (raw, overlay, raw+overlay, IHC)
"""
# Prediction map filtering
if sigma_filter > 0:
pred_smooth = gaussian_filter(prediction_map, sigma=sigma_filter)
else:
pred_smooth = prediction_map
# Create a overlap map
cm = plt.get_cmap(colormap_style)
overlay = cm(pred_smooth) * 255
mr, mc = np.where(pred_smooth > activation_threshold)
nr, nc = np.where(pred_smooth < activation_threshold)
overlay[nr, nc, :] = 255
overlay[nr, nc, 3] = 0
overlay[mr, mc, 3] = pred_smooth[mr, mc] * 255 * overlay_alpha
overlay = Image.fromarray(overlay.astype('uint8'))
# Render overlay to original image
render = original_image_object.copy()
render.paste(im=overlay, box=(0, 0), mask=overlay)
if not mix:
return (original_image_object, overlay, render, IHC_map)
else:
"""
raw | overlay
---------------------
raw+overlay | IHC
"""
sz = tuple([int(i / 4) for i in original_image_object.size])
raw_arr = np.array(original_image_object.resize(sz)) # RGBA
overlay = np.array(overlay.resize(sz)) # RGBA
render = np.array(render.resize(sz)) # RGBA
IHC_map = np.array(IHC_map.resize(sz)) if IHC_map is not None else np.zeros((sz + (4,)))
r1 = np.hstack((raw_arr, overlay))
r2 = np.hstack((render, IHC_map))
mixed = np.vstack((r1, r2))
return Image.fromarray(mixed.astype('uint8'))
def plot_mask_on_image(img, mask, color=[0, 255, 255], alpha=0.3):
'''Plot colorful masks on the image
img: cv2 image
mask: boolean array or np.where
color: BGR triplet [_, _, _]. Default: [0, 255, 255] is yellow
alpha: transparency. float [0, 1]
Ref: http://www.pyimagesearch.com/2016/03/07/transparent-overlays-with-opencv/
'''
out = img.copy()
img_layer = img.copy()
img_layer[mask] = color
out = cv2.addWeighted(img_layer, alpha, out, 1-alpha, 0, out)
return out
| 34.12931 | 113 | 0.602172 |
5499a0762a3bf6035430062da7d86593750133d8 | 2,037 | py | Python | src/CIA_History.py | Larz60p/WorldFactBook | c2edb4c8b0b9edab4a41b7384aade6d1d8ce6128 | [
"MIT"
] | 1 | 2019-03-29T03:33:43.000Z | 2019-03-29T03:33:43.000Z | src/CIA_History.py | Larz60p/WorldFactBook | c2edb4c8b0b9edab4a41b7384aade6d1d8ce6128 | [
"MIT"
] | null | null | null | src/CIA_History.py | Larz60p/WorldFactBook | c2edb4c8b0b9edab4a41b7384aade6d1d8ce6128 | [
"MIT"
] | null | null | null | # copyright (c) 2018 Larz60+
from lxml import html
import ScraperPaths
import CIA_ScanTools
import GetPage
import os
import json
import sys
from bs4 import BeautifulSoup
if __name__ == '__main__':
CIA_History()
| 31.828125 | 103 | 0.522337 |
549b59fe62af96d3a0abf31ed9194bf5c91e167c | 301 | py | Python | tests/thumbnail_tests/urls.py | roojoom/sorl-thumbnail | f10fd48f8b33efe4f468ece056fd545be796bf72 | [
"BSD-3-Clause"
] | 2 | 2019-04-09T16:07:23.000Z | 2019-04-09T16:07:26.000Z | tests/thumbnail_tests/urls.py | roojoom/sorl-thumbnail | f10fd48f8b33efe4f468ece056fd545be796bf72 | [
"BSD-3-Clause"
] | null | null | null | tests/thumbnail_tests/urls.py | roojoom/sorl-thumbnail | f10fd48f8b33efe4f468ece056fd545be796bf72 | [
"BSD-3-Clause"
] | 1 | 2020-02-18T13:00:55.000Z | 2020-02-18T13:00:55.000Z | from django.conf.urls import patterns
from django.conf import settings
urlpatterns = patterns(
'',
(r'^media/(?P<path>.+)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
(r'^(.*\.html)$', 'thumbnail_tests.views.direct_to_template'),
)
| 27.363636 | 67 | 0.671096 |
549b88a77a4a74ecdad5b7ba7eb748aea0547a53 | 822 | py | Python | data/mapper.py | GhostBadger/Kurien_G_DataViz_Fall2020 | 817f1a352027d4d81db0260393912e78a2a5e596 | [
"MIT"
] | null | null | null | data/mapper.py | GhostBadger/Kurien_G_DataViz_Fall2020 | 817f1a352027d4d81db0260393912e78a2a5e596 | [
"MIT"
] | 1 | 2020-12-13T03:46:44.000Z | 2020-12-13T03:46:44.000Z | data/mapper.py | GhostBadger/Kurien_G_DataViz_Fall2020 | 817f1a352027d4d81db0260393912e78a2a5e596 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
hfont = {'fontname':'Lato'}
#draw a simple line chart showing population grown over the last 115 years
years = [1900, 1950, 1955, 1960, 1965, 1970, 1975, 1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015]
pops = [1.6, 2.5, 2.6, 3.0, 3.3, 3.6, 4.2, 4.4, 4.8, 5.3, 5.7, 6.1, 6.5, 6.9, 7.3]
# plot out chart with the data above, and also format the line color and width
plt.plot(years, pops, color=(0/255, 100/255, 100/255), linewidth=3.0)
# label on the left hand side
plt.ylabel("World population by Billions")
# label on the bottom of the chart
plt.xlabel("Population growth by year")
# add a title to the chart
plt.title("World Population Growth", pad="20", **hfont)
#run the show method (this lives inside the pyplot package)
#this wil generate a graphic in a new window
plt.show()
| 31.615385 | 98 | 0.69708 |
549b92a869131a02e61a4b0496d5ecab3305509e | 28,057 | py | Python | classification/train_classifier_tf.py | dnarqq/WildHack | 4fb9e4545cb47a4283ebc1dec955c0817b1664c0 | [
"MIT"
] | 402 | 2019-05-08T17:28:25.000Z | 2022-03-27T19:30:07.000Z | classification/train_classifier_tf.py | dnarqq/WildHack | 4fb9e4545cb47a4283ebc1dec955c0817b1664c0 | [
"MIT"
] | 72 | 2019-05-07T18:33:32.000Z | 2022-03-10T07:48:39.000Z | classification/train_classifier_tf.py | dnarqq/WildHack | 4fb9e4545cb47a4283ebc1dec955c0817b1664c0 | [
"MIT"
] | 162 | 2019-05-18T15:45:27.000Z | 2022-03-25T20:17:45.000Z | r"""Train an EfficientNet classifier.
Currently implementation of multi-label multi-class classification is
non-functional.
During training, start tensorboard from within the classification/ directory:
tensorboard --logdir run --bind_all --samples_per_plugin scalars=0,images=0
Example usage:
python train_classifier_tf.py run_idfg /ssd/crops_sq \
-m "efficientnet-b0" --pretrained --finetune --label-weighted \
--epochs 50 --batch-size 512 --lr 1e-4 \
--seed 123 \
--logdir run_idfg
"""
from __future__ import annotations
import argparse
from collections import defaultdict
from collections.abc import Callable, Mapping, MutableMapping, Sequence
from datetime import datetime
import json
import os
from typing import Any, Optional
import uuid
import numpy as np
import sklearn.metrics
import tensorflow as tf
from tensorboard.plugins.hparams import api as hp
import tqdm
from classification.train_utils import (
HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img,
imgs_with_confidences, load_dataset_csv, prefix_all_keys)
from visualization import plot_utils
AUTOTUNE = tf.data.experimental.AUTOTUNE
# match pytorch EfficientNet model names
EFFICIENTNET_MODELS: Mapping[str, Mapping[str, Any]] = {
'efficientnet-b0': dict(cls='EfficientNetB0', img_size=224, dropout=0.2),
'efficientnet-b1': dict(cls='EfficientNetB1', img_size=240, dropout=0.2),
'efficientnet-b2': dict(cls='EfficientNetB2', img_size=260, dropout=0.3),
'efficientnet-b3': dict(cls='EfficientNetB3', img_size=300, dropout=0.3),
'efficientnet-b4': dict(cls='EfficientNetB4', img_size=380, dropout=0.4),
'efficientnet-b5': dict(cls='EfficientNetB5', img_size=456, dropout=0.4),
'efficientnet-b6': dict(cls='EfficientNetB6', img_size=528, dropout=0.5),
'efficientnet-b7': dict(cls='EfficientNetB7', img_size=600, dropout=0.5)
}
def create_dataset(
img_files: Sequence[str],
labels: Sequence[Any],
sample_weights: Optional[Sequence[float]] = None,
img_base_dir: str = '',
transform: Optional[Callable[[tf.Tensor], Any]] = None,
target_transform: Optional[Callable[[Any], Any]] = None,
cache: bool | str = False
) -> tf.data.Dataset:
"""Create a tf.data.Dataset.
The dataset returns elements (img, label, img_file, sample_weight) if
sample_weights is not None, or (img, label, img_file) if
sample_weights=None.
img: tf.Tensor, shape [H, W, 3], type uint8
label: tf.Tensor
img_file: tf.Tensor, scalar, type str
sample_weight: tf.Tensor, scalar, type float32
Possible TODO: oversample the imbalanced classes
see tf.data.experimental.sample_from_datasets
Args:
img_files: list of str, relative paths from img_base_dir
labels: list of int if multilabel=False
sample_weights: optional list of float
img_base_dir: str, base directory for images
transform: optional transform to apply to a single uint8 JPEG image
target_transform: optional transform to apply to a single label
cache: bool or str, cache images in memory if True, cache images to
a file on disk if a str
Returns: tf.data.Dataset
"""
# images dataset
img_ds = tf.data.Dataset.from_tensor_slices(img_files)
img_ds = img_ds.map(lambda p: tf.io.read_file(img_base_dir + os.sep + p),
num_parallel_calls=AUTOTUNE)
# for smaller disk / memory usage, we cache the raw JPEG bytes instead
# of the decoded Tensor
if isinstance(cache, str):
img_ds = img_ds.cache(cache)
elif cache:
img_ds = img_ds.cache()
# convert JPEG bytes to a 3D uint8 Tensor
# keras EfficientNet already includes normalization from [0, 255] to [0, 1],
# so we don't need to do that here
img_ds = img_ds.map(lambda img: tf.io.decode_jpeg(img, channels=3))
if transform:
img_ds = img_ds.map(transform, num_parallel_calls=AUTOTUNE)
# labels dataset
labels_ds = tf.data.Dataset.from_tensor_slices(labels)
if target_transform:
labels_ds = labels_ds.map(target_transform, num_parallel_calls=AUTOTUNE)
# img_files dataset
img_files_ds = tf.data.Dataset.from_tensor_slices(img_files)
if sample_weights is None:
return tf.data.Dataset.zip((img_ds, labels_ds, img_files_ds))
# weights dataset
weights_ds = tf.data.Dataset.from_tensor_slices(sample_weights)
return tf.data.Dataset.zip((img_ds, labels_ds, img_files_ds, weights_ds))
def create_dataloaders(
dataset_csv_path: str,
label_index_json_path: str,
splits_json_path: str,
cropped_images_dir: str,
img_size: int,
multilabel: bool,
label_weighted: bool,
weight_by_detection_conf: bool | str,
batch_size: int,
augment_train: bool,
cache_splits: Sequence[str]
) -> tuple[dict[str, tf.data.Dataset], list[str]]:
"""
Args:
dataset_csv_path: str, path to CSV file with columns
['dataset', 'location', 'label'], where label is a comma-delimited
list of labels
splits_json_path: str, path to JSON file
augment_train: bool, whether to shuffle/augment the training set
cache_splits: list of str, splits to cache
training set is cached at /mnt/tempds/random_file_name
validation and test sets are cached in memory
Returns:
datasets: dict, maps split to DataLoader
label_names: list of str, label names in order of label id
"""
df, label_names, split_to_locs = load_dataset_csv(
dataset_csv_path, label_index_json_path, splits_json_path,
multilabel=multilabel, label_weighted=label_weighted,
weight_by_detection_conf=weight_by_detection_conf)
# define the transforms
# efficientnet data preprocessing:
# - train:
# 1) random crop: aspect_ratio_range=(0.75, 1.33), area_range=(0.08, 1.0)
# 2) bicubic resize to img_size
# 3) random horizontal flip
# - test:
# 1) center crop
# 2) bicubic resize to img_size
dataloaders = {}
for split, locs in split_to_locs.items():
is_train = (split == 'train') and augment_train
split_df = df[df['dataset_location'].isin(locs)]
weights = None
if label_weighted or weight_by_detection_conf:
# weights sums to:
# - if weight_by_detection_conf: (# images in split - conf delta)
# - otherwise: (# images in split)
weights = split_df['weights'].tolist()
if not weight_by_detection_conf:
assert np.isclose(sum(weights), len(split_df))
cache: bool | str = (split in cache_splits)
if split == 'train' and 'train' in cache_splits:
unique_filename = str(uuid.uuid4())
os.makedirs('/mnt/tempds/', exist_ok=True)
cache = f'/mnt/tempds/{unique_filename}'
ds = create_dataset(
img_files=split_df['path'].tolist(),
labels=split_df['label_index'].tolist(),
sample_weights=weights,
img_base_dir=cropped_images_dir,
transform=train_transform if is_train else test_transform,
target_transform=None,
cache=cache)
if is_train:
ds = ds.shuffle(1000, reshuffle_each_iteration=True)
ds = ds.batch(batch_size).prefetch(buffer_size=AUTOTUNE)
dataloaders[split] = ds
return dataloaders, label_names
def build_model(model_name: str, num_classes: int, img_size: int,
pretrained: bool, finetune: bool) -> tf.keras.Model:
"""Creates a model with an EfficientNet base."""
class_name = EFFICIENTNET_MODELS[model_name]['cls']
dropout = EFFICIENTNET_MODELS[model_name]['dropout']
model_class = tf.keras.applications.__dict__[class_name]
weights = 'imagenet' if pretrained else None
inputs = tf.keras.layers.Input(shape=(img_size, img_size, 3))
base_model = model_class(
input_tensor=inputs, weights=weights, include_top=False, pooling='avg')
if finetune:
# freeze the base model's weights, including BatchNorm statistics
# https://www.tensorflow.org/guide/keras/transfer_learning#fine-tuning
base_model.trainable = False
# rebuild output
x = tf.keras.layers.Dropout(dropout, name='top_dropout')(base_model.output)
outputs = tf.keras.layers.Dense(
num_classes,
kernel_initializer=tf.keras.initializers.VarianceScaling(
scale=1. / 3., mode='fan_out', distribution='uniform'),
name='logits')(x)
model = tf.keras.Model(inputs, outputs, name='complete_model')
model.base_model = base_model # cache this so that we can turn off finetune
return model
def main(dataset_dir: str,
cropped_images_dir: str,
multilabel: bool,
model_name: str,
pretrained: bool,
finetune: int,
label_weighted: bool,
weight_by_detection_conf: bool | str,
epochs: int,
batch_size: int,
lr: float,
weight_decay: float,
seed: Optional[int] = None,
logdir: str = '',
cache_splits: Sequence[str] = ()) -> None:
"""Main function."""
# input validation
assert os.path.exists(dataset_dir)
assert os.path.exists(cropped_images_dir)
if isinstance(weight_by_detection_conf, str):
assert os.path.exists(weight_by_detection_conf)
# set seed
seed = np.random.randint(10_000) if seed is None else seed
np.random.seed(seed)
tf.random.set_seed(seed)
# create logdir and save params
params = dict(locals()) # make a copy
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') # '20200722_110816'
logdir = os.path.join(logdir, timestamp)
os.makedirs(logdir, exist_ok=True)
print('Created logdir:', logdir)
with open(os.path.join(logdir, 'params.json'), 'w') as f:
json.dump(params, f, indent=1)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
img_size = EFFICIENTNET_MODELS[model_name]['img_size']
# create dataloaders and log the index_to_label mapping
loaders, label_names = create_dataloaders(
dataset_csv_path=os.path.join(dataset_dir, 'classification_ds.csv'),
label_index_json_path=os.path.join(dataset_dir, 'label_index.json'),
splits_json_path=os.path.join(dataset_dir, 'splits.json'),
cropped_images_dir=cropped_images_dir,
img_size=img_size,
multilabel=multilabel,
label_weighted=label_weighted,
weight_by_detection_conf=weight_by_detection_conf,
batch_size=batch_size,
augment_train=True,
cache_splits=cache_splits)
writer = tf.summary.create_file_writer(logdir)
writer.set_as_default()
model = build_model(
model_name, num_classes=len(label_names), img_size=img_size,
pretrained=pretrained, finetune=finetune > 0)
# define loss function and optimizer
loss_fn: tf.keras.losses.Loss
if multilabel:
loss_fn = tf.keras.losses.BinaryCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
else:
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
# using EfficientNet training defaults
# - batch norm momentum: 0.99
# - optimizer: RMSProp, decay 0.9 and momentum 0.9
# - epochs: 350
# - learning rate: 0.256, decays by 0.97 every 2.4 epochs
# - weight decay: 1e-5
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
lr, decay_steps=1, decay_rate=0.97, staircase=True)
optimizer = tf.keras.optimizers.RMSprop(
learning_rate=lr, rho=0.9, momentum=0.9)
best_epoch_metrics: dict[str, float] = {}
for epoch in range(epochs):
print(f'Epoch: {epoch}')
optimizer.learning_rate = lr_schedule(epoch)
tf.summary.scalar('lr', optimizer.learning_rate, epoch)
if epoch > 0 and finetune == epoch:
print('Turning off fine-tune!')
model.base_model.trainable = True
print('- train:')
# TODO: change weighted to False if oversampling minority classes
train_metrics, train_heaps, train_cm = run_epoch(
model, loader=loaders['train'], weighted=label_weighted,
loss_fn=loss_fn, weight_decay=weight_decay, optimizer=optimizer,
finetune=finetune > epoch, return_extreme_images=True)
train_metrics = prefix_all_keys(train_metrics, prefix='train/')
log_run('train', epoch, writer, label_names,
metrics=train_metrics, heaps=train_heaps, cm=train_cm)
print('- val:')
val_metrics, val_heaps, val_cm = run_epoch(
model, loader=loaders['val'], weighted=label_weighted,
loss_fn=loss_fn, return_extreme_images=True)
val_metrics = prefix_all_keys(val_metrics, prefix='val/')
log_run('val', epoch, writer, label_names,
metrics=val_metrics, heaps=val_heaps, cm=val_cm)
if val_metrics['val/acc_top1'] > best_epoch_metrics.get('val/acc_top1', 0): # pylint: disable=line-too-long
filename = os.path.join(logdir, f'ckpt_{epoch}.h5')
print(f'New best model! Saving checkpoint to {filename}')
model.save(filename)
best_epoch_metrics.update(train_metrics)
best_epoch_metrics.update(val_metrics)
best_epoch_metrics['epoch'] = epoch
print('- test:')
test_metrics, test_heaps, test_cm = run_epoch(
model, loader=loaders['test'], weighted=label_weighted,
loss_fn=loss_fn, return_extreme_images=True)
test_metrics = prefix_all_keys(test_metrics, prefix='test/')
log_run('test', epoch, writer, label_names,
metrics=test_metrics, heaps=test_heaps, cm=test_cm)
# stop training after 8 epochs without improvement
if epoch >= best_epoch_metrics['epoch'] + 8:
break
hparams_dict = {
'model_name': model_name,
'multilabel': multilabel,
'finetune': finetune,
'batch_size': batch_size,
'epochs': epochs
}
hp.hparams(hparams_dict)
writer.close()
def log_run(split: str, epoch: int, writer: tf.summary.SummaryWriter,
label_names: Sequence[str], metrics: MutableMapping[str, float],
heaps: Mapping[str, Mapping[int, list[HeapItem]]], cm: np.ndarray
) -> None:
"""Logs the outputs (metrics, confusion matrix, tp/fp/fn images) from a
single epoch run to Tensorboard.
Args:
metrics: dict, keys already prefixed with {split}/
"""
per_class_recall = recall_from_confusion_matrix(cm, label_names)
metrics.update(prefix_all_keys(per_class_recall, f'{split}/label_recall/'))
# log metrics
for metric, value in metrics.items():
tf.summary.scalar(metric, value, epoch)
# log confusion matrix
cm_fig = plot_utils.plot_confusion_matrix(cm, classes=label_names,
normalize=True)
cm_fig_img = tf.convert_to_tensor(fig_to_img(cm_fig)[np.newaxis, ...])
tf.summary.image(f'confusion_matrix/{split}', cm_fig_img, step=epoch)
# log tp/fp/fn images
for heap_type, heap_dict in heaps.items():
log_images_with_confidence(heap_dict, label_names, epoch=epoch,
tag=f'{split}/{heap_type}')
writer.flush()
def log_images_with_confidence(
heap_dict: Mapping[int, list[HeapItem]],
label_names: Sequence[str],
epoch: int,
tag: str) -> None:
"""
Args:
heap_dict: dict, maps label_id to list of HeapItem, where each HeapItem
data is a list [img, target, top3_conf, top3_preds, img_file],
and img is a tf.Tensor of shape [H, W, 3]
label_names: list of str, label names in order of label id
epoch: int
tag: str
"""
for label_id, heap in heap_dict.items():
label_name = label_names[label_id]
sorted_heap = sorted(heap, reverse=True) # sort largest to smallest
imgs_list = [item.data for item in sorted_heap]
fig, img_files = imgs_with_confidences(imgs_list, label_names)
# tf.summary.image requires input of shape [N, H, W, C]
fig_img = tf.convert_to_tensor(fig_to_img(fig)[np.newaxis, ...])
tf.summary.image(f'{label_name}/{tag}', fig_img, step=epoch)
tf.summary.text(f'{label_name}/{tag}_files', '\n\n'.join(img_files),
step=epoch)
def track_extreme_examples(tp_heaps: dict[int, list[HeapItem]],
fp_heaps: dict[int, list[HeapItem]],
fn_heaps: dict[int, list[HeapItem]],
inputs: tf.Tensor,
labels: tf.Tensor,
img_files: tf.Tensor,
logits: tf.Tensor) -> None:
"""Updates the 5 most extreme true-positive (tp), false-positive (fp), and
false-negative (fn) examples with examples from this batch.
Each HeapItem's data attribute is a tuple with:
- img: np.ndarray, shape [H, W, 3], type uint8
- label: int
- top3_conf: list of float
- top3_preds: list of float
- img_file: str
Args:
*_heaps: dict, maps label_id (int) to heap of HeapItems
inputs: tf.Tensor, shape [batch_size, H, W, 3], type float32
labels: tf.Tensor, shape [batch_size]
img_files: tf.Tensor, shape [batch_size], type tf.string
logits: tf.Tensor, shape [batch_size, num_classes]
"""
labels = labels.numpy().tolist()
inputs = inputs.numpy().astype(np.uint8)
img_files = img_files.numpy().astype(str).tolist()
batch_probs = tf.nn.softmax(logits, axis=1)
iterable = zip(labels, inputs, img_files, batch_probs)
for label, img, img_file, confs in iterable:
label_conf = confs[label].numpy().item()
top3_conf, top3_preds = tf.math.top_k(confs, k=3, sorted=True)
top3_conf = top3_conf.numpy().tolist()
top3_preds = top3_preds.numpy().tolist()
data = (img, label, top3_conf, top3_preds, img_file)
if top3_preds[0] == label: # true positive
item = HeapItem(priority=label_conf - top3_conf[1], data=data)
add_to_heap(tp_heaps[label], item, k=5)
else:
# false positive for top3_pred[0]
# false negative for label
item = HeapItem(priority=top3_conf[0] - label_conf, data=data)
add_to_heap(fp_heaps[top3_preds[0]], item, k=5)
add_to_heap(fn_heaps[label], item, k=5)
def run_epoch(model: tf.keras.Model,
loader: tf.data.Dataset,
weighted: bool,
top: Sequence[int] = (1, 3),
loss_fn: Optional[tf.keras.losses.Loss] = None,
weight_decay: float = 0,
finetune: bool = False,
optimizer: Optional[tf.keras.optimizers.Optimizer] = None,
return_extreme_images: bool = False
) -> tuple[
dict[str, float],
dict[str, dict[int, list[HeapItem]]],
np.ndarray
]:
"""Runs for 1 epoch.
Args:
model: tf.keras.Model
loader: tf.data.Dataset
weighted: bool, whether to use sample weights in calculating loss and
accuracy
top: tuple of int, list of values of k for calculating top-K accuracy
loss_fn: optional loss function, calculates the mean loss over a batch
weight_decay: float, L2-regularization constant
finetune: bool, if true sets model's dropout and BN layers to eval mode
optimizer: optional optimizer
Returns:
metrics: dict, metrics from epoch, contains keys:
'loss': float, mean per-example loss over entire epoch,
only included if loss_fn is not None
'acc_top{k}': float, accuracy@k over the entire epoch
heaps: dict, keys are ['tp', 'fp', 'fn'], values are heap_dicts,
each heap_dict maps label_id (int) to a heap of <= 5 HeapItems with
data attribute (img, target, top3_conf, top3_preds, img_file)
- 'tp': priority is the difference between target confidence and
2nd highest confidence
- 'fp': priority is the difference between highest confidence and
target confidence
- 'fn': same as 'fp'
confusion_matrix: np.ndarray, shape [num_classes, num_classes],
C[i, j] = # of samples with true label i, predicted as label j
"""
# if evaluating or finetuning, set dropout & BN layers to eval mode
is_train = False
train_dropout_and_bn = False
if optimizer is not None:
assert loss_fn is not None
is_train = True
if not finetune:
train_dropout_and_bn = True
reg_vars = [
v for v in model.trainable_variables if 'kernel' in v.name]
if loss_fn is not None:
losses = tf.keras.metrics.Mean()
accuracies_topk = {
k: tf.keras.metrics.SparseTopKCategoricalAccuracy(k) for k in top
}
# for each label, track 5 most-confident and least-confident examples
tp_heaps: dict[int, list[HeapItem]] = defaultdict(list)
fp_heaps: dict[int, list[HeapItem]] = defaultdict(list)
fn_heaps: dict[int, list[HeapItem]] = defaultdict(list)
all_labels = []
all_preds = []
tqdm_loader = tqdm.tqdm(loader)
for batch in tqdm_loader:
if weighted:
inputs, labels, img_files, weights = batch
else:
# even if batch contains sample weights, don't use them
inputs, labels, img_files = batch[0:3]
weights = None
all_labels.append(labels.numpy())
desc = []
with tf.GradientTape(watch_accessed_variables=is_train) as tape:
outputs = model(inputs, training=train_dropout_and_bn)
if loss_fn is not None:
loss = loss_fn(labels, outputs)
if weights is not None:
loss *= weights
# we do not track L2-regularization loss in the loss metric
losses.update_state(loss, sample_weight=weights)
desc.append(f'Loss {losses.result().numpy():.4f}')
if optimizer is not None:
loss = tf.math.reduce_mean(loss)
if not finetune: # only regularize layers before the final FC
loss += weight_decay * tf.add_n(
tf.nn.l2_loss(v) for v in reg_vars)
all_preds.append(tf.math.argmax(outputs, axis=1).numpy())
if optimizer is not None:
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
for k, acc in accuracies_topk.items():
acc.update_state(labels, outputs, sample_weight=weights)
desc.append(f'Acc@{k} {acc.result().numpy() * 100:.3f}')
tqdm_loader.set_description(' '.join(desc))
if return_extreme_images:
track_extreme_examples(tp_heaps, fp_heaps, fn_heaps, inputs,
labels, img_files, outputs)
confusion_matrix = sklearn.metrics.confusion_matrix(
y_true=np.concatenate(all_labels), y_pred=np.concatenate(all_preds))
metrics = {}
if loss_fn is not None:
metrics['loss'] = losses.result().numpy().item()
for k, acc in accuracies_topk.items():
metrics[f'acc_top{k}'] = acc.result().numpy().item() * 100
heaps = {'tp': tp_heaps, 'fp': fp_heaps, 'fn': fn_heaps}
return metrics, heaps, confusion_matrix
def _parse_args() -> argparse.Namespace:
"""Parses arguments."""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Trains classifier.')
parser.add_argument(
'dataset_dir',
help='path to directory containing: 1) classification dataset CSV, '
'2) label index JSON, 3) splits JSON')
parser.add_argument(
'cropped_images_dir',
help='path to local directory where image crops are saved')
parser.add_argument(
'--multilabel', action='store_true',
help='for multi-label, multi-class classification')
parser.add_argument(
'-m', '--model-name', default='efficientnet-b0',
choices=list(EFFICIENTNET_MODELS.keys()),
help='which EfficientNet model')
parser.add_argument(
'--pretrained', action='store_true',
help='start with pretrained model')
parser.add_argument(
'--finetune', type=int, default=0,
help='only fine tune the final fully-connected layer for the first '
'<finetune> epochs')
parser.add_argument(
'--label-weighted', action='store_true',
help='weight training samples to balance labels')
parser.add_argument(
'--weight-by-detection-conf', nargs='?', const=True, default=False,
help='weight training examples by detection confidence. '
'Optionally takes a .npz file for isotonic calibration.')
parser.add_argument(
'--epochs', type=int, default=0,
help='number of epochs for training, 0 for eval-only')
parser.add_argument(
'--batch-size', type=int, default=256,
help='batch size for both training and eval')
parser.add_argument(
'--lr', type=float, default=None,
help='initial learning rate, defaults to (0.016 * batch_size / 256)')
parser.add_argument(
'--weight-decay', type=float, default=1e-5,
help='weight decay')
parser.add_argument(
'--seed', type=int,
help='random seed')
parser.add_argument(
'--logdir', default='.',
help='directory where TensorBoard logs and a params file are saved')
parser.add_argument(
'--cache', nargs='*', choices=['train', 'val', 'test'], default=(),
help='which splits of the dataset to cache')
return parser.parse_args()
if __name__ == '__main__':
args = _parse_args()
if args.lr is None:
args.lr = 0.016 * args.batch_size / 256 # based on TF models repo
main(dataset_dir=args.dataset_dir,
cropped_images_dir=args.cropped_images_dir,
multilabel=args.multilabel,
model_name=args.model_name,
pretrained=args.pretrained,
finetune=args.finetune,
label_weighted=args.label_weighted,
weight_by_detection_conf=args.weight_by_detection_conf,
epochs=args.epochs,
batch_size=args.batch_size,
lr=args.lr,
weight_decay=args.weight_decay,
seed=args.seed,
logdir=args.logdir,
cache_splits=args.cache)
| 40.13877 | 116 | 0.641729 |
549bb5431eeb75a8dbdf100c69a7b7af3cb1061c | 4,704 | py | Python | pyreach/impl/constraints_impl_test.py | google-research/pyreach | f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159 | [
"Apache-2.0"
] | 13 | 2021-09-01T01:10:22.000Z | 2022-03-05T10:01:52.000Z | pyreach/impl/constraints_impl_test.py | google-research/pyreach | f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159 | [
"Apache-2.0"
] | null | null | null | pyreach/impl/constraints_impl_test.py | google-research/pyreach | f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159 | [
"Apache-2.0"
] | 6 | 2021-09-20T21:17:53.000Z | 2022-03-14T18:42:48.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utils.py."""
from typing import Optional
import unittest
from pyreach import constraints
from pyreach.common.python import types_gen
from pyreach.impl import constraints_impl as impl
from pyreach.impl import test_data
if __name__ == "__main__":
unittest.main()
| 39.864407 | 74 | 0.688776 |
549d785cbbd7f0e2ec80896ebc16b20cd8e0ba82 | 3,400 | py | Python | qplan/parse.py | mackstann/qplaniso | 97c4fbeeb529dfef0778cedc3e79087f6a87f5c4 | [
"CC0-1.0"
] | null | null | null | qplan/parse.py | mackstann/qplaniso | 97c4fbeeb529dfef0778cedc3e79087f6a87f5c4 | [
"CC0-1.0"
] | null | null | null | qplan/parse.py | mackstann/qplaniso | 97c4fbeeb529dfef0778cedc3e79087f6a87f5c4 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
import itertools
if __name__ == '__main__':
import pprint
with open('example-plan.txt') as f:
pprint.pprint(parse(f.read()).as_dict())
| 26.5625 | 98 | 0.523529 |
549e3c5ec51f517db74f9b45d00df6b1a26198eb | 2,397 | py | Python | 10054 - The Necklace/main.py | Shree-Gillorkar/uva-onlinejudge-solutions | df64f5c3a136827b5ca7871df1cf8aafadcf5c9b | [
"MIT"
] | 24 | 2017-10-15T04:04:55.000Z | 2022-01-31T17:14:29.000Z | 10054 - The Necklace/main.py | ashishrana080699/uva-onlinejudge-solutions | d2d0a58e53e3d9acf6d20e56a40900423ae705c4 | [
"MIT"
] | 1 | 2019-07-11T04:22:55.000Z | 2019-07-14T19:34:41.000Z | 10054 - The Necklace/main.py | ashishrana080699/uva-onlinejudge-solutions | d2d0a58e53e3d9acf6d20e56a40900423ae705c4 | [
"MIT"
] | 27 | 2017-01-06T17:33:57.000Z | 2021-11-25T00:07:54.000Z | from sys import stdin
from collections import defaultdict, deque
MAX_COLORS = 51
def build_necklace(beads):
"""Construct an euler circuit in the graph defined by the beads"""
# For a graph to have an euler circuit all vertices must have
# even degree. (Plus 0 or 2 odd vertices) Init and ckeck degree
amatrix = [defaultdict(int) for _ in range(MAX_COLORS)]
degree = defaultdict(int)
for b in beads:
amatrix[b[0]][b[1]] += 1
amatrix[b[1]][b[0]] += 1
degree[b[0]] +=1
degree[b[1]] +=1
for k, v in degree.items():
if v%2 != 0:
return None
# Create necklace using Fleury's algorithm
def get_next_bead(color):
""" """
s_color, s_degree = 0, 0
for col, deg in amatrix[color].items():
if deg > s_degree:
s_color, s_degree = col, deg
if s_degree>0:
amatrix[color][s_color] -= 1
amatrix[s_color][color] -= 1
return (color, s_color)
else:
return None
# Start construction
nxt = get_next_bead(beads[0][1])
necklace = deque([nxt])
while True:
nxt = get_next_bead(necklace[-1][1])
if nxt:
necklace.append(nxt)
elif len(beads) != len(necklace):
# Created a closed cycle.move last segment to the start
prev = necklace.pop()
necklace.appendleft(prev)
else:
break
return necklace
if __name__ == '__main__':
ncases = load_num()
for c in range(ncases):
beads = load_case()
necklace = build_necklace(beads)
# Print result
print("Case #{}".format(c+1))
if necklace:
# Print all necklace beads together for faster IO (damn timelimits)
# Almost a third of the time is wasted on IO
necklace_str = ""
for b in necklace:
necklace_str += "{} {}\n".format(b[0], b[1])
else:
necklace_str = "some beads may be lost\n"
if c+1 == ncases:
print(necklace_str[:-1])
else:
print(necklace_str)
| 27.238636 | 79 | 0.553191 |
549ee02e71d944702ec6c3b3ab3e03cf388c6552 | 458 | py | Python | tests/test_eeg.py | y1ngyang/NeuroKit.py | 867655f84bf210626649bca72258af6a2b5a2791 | [
"MIT"
] | null | null | null | tests/test_eeg.py | y1ngyang/NeuroKit.py | 867655f84bf210626649bca72258af6a2b5a2791 | [
"MIT"
] | null | null | null | tests/test_eeg.py | y1ngyang/NeuroKit.py | 867655f84bf210626649bca72258af6a2b5a2791 | [
"MIT"
] | null | null | null | import pytest
import doctest
import os
import numpy as np
import pandas as pd
import neurokit as nk
run_tests_in_local = False
#==============================================================================
# data
#==============================================================================
#def test_read_acqknowledge():
#
# assert 3 == 3
if __name__ == '__main__':
# nose.run(defaultTest=__name__)
doctest.testmod()
pytest.main()
| 16.962963 | 79 | 0.458515 |
549fb62cea23b9b1c82de165b05b9e48e6855b9f | 231,371 | py | Python | tests/semantics/models.py | dnikolay-ebc/FiLiP | 9a84979da8dff4523cb91e40869070bd02aa91fe | [
"BSD-3-Clause"
] | 6 | 2021-11-21T21:57:38.000Z | 2022-02-22T08:20:30.000Z | tests/semantics/models.py | RWTH-EBC/FiLiP | e294c5ef94b2b6ad9611316e50b5c550bcd77c1b | [
"BSD-3-Clause"
] | 83 | 2021-04-08T18:34:20.000Z | 2022-03-30T12:18:32.000Z | tests/semantics/models.py | dnikolay-ebc/FiLiP | 9a84979da8dff4523cb91e40869070bd02aa91fe | [
"BSD-3-Clause"
] | 5 | 2021-10-04T08:39:21.000Z | 2022-03-30T07:30:57.000Z | """
Autogenerated Models for the vocabulary described by the ontologies:
http://www.semanticweb.org/redin/ontologies/2020/11/untitled-ontology-25 (ParsingTesterOntology)
https://w3id.org/saref (saref.ttl)
"""
from enum import Enum
from typing import Dict, Union, List
from filip.semantics.semantics_models import\
SemanticClass,\
SemanticIndividual,\
RelationField,\
DataField,\
SemanticDeviceClass,\
DeviceAttributeField,\
CommandField
from filip.semantics.semantics_manager import\
SemanticsManager,\
InstanceRegistry
semantic_manager: SemanticsManager = SemanticsManager(
instance_registry=InstanceRegistry(),
)
# ---------CLASSES--------- #
# ---------Individuals--------- #
# ---------Datatypes--------- #
semantic_manager.datatype_catalogue = {
'customDataType1': {
'type': 'enum',
'enum_values': ['0', '15', '30'],
},
'customDataType2': {
'type': 'string',
},
'customDataType3': {
'type': 'string',
},
'customDataType4': {
'type': 'enum',
'enum_values': ['1', '2', '3', '4'],
},
'rational': {
'type': 'number',
'number_decimal_allowed': True,
},
'real': {
'type': 'number',
},
'PlainLiteral': {
'type': 'string',
},
'XMLLiteral': {
'type': 'string',
},
'Literal': {
'type': 'string',
},
'anyURI': {
'type': 'string',
},
'base64Binary': {
'type': 'string',
},
'boolean': {
'type': 'enum',
'enum_values': ['True', 'False'],
},
'byte': {
'type': 'number',
'number_range_min': -128,
'number_range_max': 127,
'number_has_range': True,
},
'dateTime': {
'type': 'date',
},
'dateTimeStamp': {
'type': 'date',
},
'decimal': {
'type': 'number',
'number_decimal_allowed': True,
},
'double': {
'type': 'number',
'number_decimal_allowed': True,
},
'float': {
'type': 'number',
'number_decimal_allowed': True,
},
'hexBinary': {
'allowed_chars': ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'],
'type': 'string',
},
'int': {
'type': 'number',
'number_range_min': -2147483648,
'number_range_max': 2147483647,
'number_has_range': True,
},
'integer': {
'type': 'number',
},
'language': {
'type': 'string',
},
'long': {
'type': 'number',
'number_range_min': -9223372036854775808,
'number_range_max': 9223372036854775807,
'number_has_range': True,
},
'Name': {
'type': 'string',
},
'NCName': {
'forbidden_chars': [':'],
'type': 'string',
},
'negativeInteger': {
'type': 'number',
'number_range_max': -1,
'number_has_range': True,
},
'NMTOKEN': {
'type': 'string',
},
'nonNegativeInteger': {
'type': 'number',
'number_range_min': 0,
'number_has_range': True,
},
'nonPositiveInteger': {
'type': 'number',
'number_range_max': -1,
'number_has_range': True,
},
'normalizedString': {
'type': 'string',
},
'positiveInteger': {
'type': 'number',
'number_range_min': 0,
'number_has_range': True,
},
'short': {
'type': 'number',
'number_range_min': -32768,
'number_range_max': 32767,
'number_has_range': True,
},
'string': {
'type': 'string',
},
'token': {
'type': 'string',
},
'unsignedByte': {
'type': 'number',
'number_range_min': 0,
'number_range_max': 255,
'number_has_range': True,
},
'unsignedInt': {
'type': 'number',
'number_range_min': 0,
'number_range_max': 4294967295,
'number_has_range': True,
},
'unsignedLong': {
'type': 'number',
'number_range_min': 0,
'number_range_max': 18446744073709551615,
'number_has_range': True,
},
'unsignedShort': {
'type': 'number',
'number_range_min': 0,
'number_range_max': 65535,
'number_has_range': True,
},
}
# ---------Class Dict--------- #
semantic_manager.class_catalogue = {
'Actuating_Function': Actuating_Function,
'Actuator': Actuator,
'Appliance': Appliance,
'Building_Related': Building_Related,
'Class1': Class1,
'Class123': Class123,
'Class13': Class13,
'Class1a': Class1a,
'Class1aa': Class1aa,
'Class1b': Class1b,
'Class2': Class2,
'Class3': Class3,
'Class3a': Class3a,
'Class3aa': Class3aa,
'Class4': Class4,
'Close_Command': Close_Command,
'Close_State': Close_State,
'Coal': Coal,
'Command': Command,
'Commodity': Commodity,
'Currency': Currency,
'Device': Device,
'Door_Switch': Door_Switch,
'Electricity': Electricity,
'Energy': Energy,
'Energy_Meter': Energy_Meter,
'Energy_Related': Energy_Related,
'Energy_Unit': Energy_Unit,
'Event_Function': Event_Function,
'Function': Function,
'Function_Related': Function_Related,
'Gas': Gas,
'Generator': Generator,
'Gertrude': Gertrude,
'Get_Command': Get_Command,
'Get_Current_Meter_Value_Command': Get_Current_Meter_Value_Command,
'Get_Meter_Data_Command': Get_Meter_Data_Command,
'Get_Meter_History_Command': Get_Meter_History_Command,
'Get_Sensing_Data_Command': Get_Sensing_Data_Command,
'Humidity': Humidity,
'Hvac': Hvac,
'Illuminance_Unit': Illuminance_Unit,
'Level_Control_Function': Level_Control_Function,
'Light': Light,
'Light_Switch': Light_Switch,
'Lighting_Device': Lighting_Device,
'Load': Load,
'Measurement': Measurement,
'Meter': Meter,
'Metering_Function': Metering_Function,
'Micro_Renewable': Micro_Renewable,
'Motion': Motion,
'Multi_Level_State': Multi_Level_State,
'Multimedia': Multimedia,
'Network': Network,
'Notify_Command': Notify_Command,
'Occupancy': Occupancy,
'Off_Command': Off_Command,
'Off_State': Off_State,
'On_Command': On_Command,
'On_Off_Function': On_Off_Function,
'On_Off_State': On_Off_State,
'On_State': On_State,
'Open_Close_Function': Open_Close_Function,
'Open_Close_State': Open_Close_State,
'Open_Command': Open_Command,
'Open_State': Open_State,
'Pause_Command': Pause_Command,
'Power': Power,
'Power_Unit': Power_Unit,
'Pressure': Pressure,
'Pressure_Unit': Pressure_Unit,
'Price': Price,
'Profile': Profile,
'Property': Property,
'Sensing_Function': Sensing_Function,
'Sensor': Sensor,
'Service': Service,
'Set_Absolute_Level_Command': Set_Absolute_Level_Command,
'Set_Level_Command': Set_Level_Command,
'Set_Relative_Level_Command': Set_Relative_Level_Command,
'Smoke': Smoke,
'Smoke_Sensor': Smoke_Sensor,
'Start_Command': Start_Command,
'Start_State': Start_State,
'Start_Stop_Function': Start_Stop_Function,
'Start_Stop_State': Start_Stop_State,
'State': State,
'Step_Down_Command': Step_Down_Command,
'Step_Up_Command': Step_Up_Command,
'Stop_Command': Stop_Command,
'Stop_State': Stop_State,
'Storage': Storage,
'Switch': Switch,
'Switch_On_Service': Switch_On_Service,
'Task': Task,
'Temperature': Temperature,
'Temperature_Sensor': Temperature_Sensor,
'Temperature_Unit': Temperature_Unit,
'Thing': Thing,
'Time': Time,
'Toggle_Command': Toggle_Command,
'Washing_Machine': Washing_Machine,
'Water': Water,
}
semantic_manager.individual_catalogue = {
'Individual1': Individual1,
'Individual2': Individual2,
'Individual3': Individual3,
'Individual4': Individual4,
'United_States_Dollar': United_States_Dollar,
'Bar': Bar,
'Degree_Celsius': Degree_Celsius,
'Degree_Fahrenheit': Degree_Fahrenheit,
'Euro': Euro,
'Kelvin': Kelvin,
'Kilowatt': Kilowatt,
'Kilowatt_Hour': Kilowatt_Hour,
'Lux': Lux,
'Pascal': Pascal,
'Great_Britain_Pound_Sterling': Great_Britain_Pound_Sterling,
'Watt': Watt,
'Cleaning': Cleaning,
'Close': Close,
'Comfort': Comfort,
'Drying': Drying,
'Energyefficiency': Energyefficiency,
'Entertainment': Entertainment,
'Get_Current_Meter_Value': Get_Current_Meter_Value,
'Get_Meter_Data': Get_Meter_Data,
'Get_Meter_History': Get_Meter_History,
'Get_Sensing_Data': Get_Sensing_Data,
'Lighting': Lighting,
'Meter_Reading': Meter_Reading,
'Notify': Notify,
'Off_': Off_,
'On': On,
'Open': Open,
'Pause': Pause,
'Safety': Safety,
'Set_Absolute_Level': Set_Absolute_Level,
'Set_Relative_Level': Set_Relative_Level,
'Start': Start,
'Step_Down': Step_Down,
'Step_Up': Step_Up,
'Stop': Stop,
'Toggle': Toggle,
'Washing': Washing,
'Wellbeing': Wellbeing,
'Watt_Hour': Watt_Hour,
}
| 29.788979 | 165 | 0.73243 |
549fd848dd75d3c337cc6b1655249d58340ef912 | 2,744 | py | Python | plotting/trackTurnOn.py | will-fawcett/trackerSW | fc097b97539d0b40a15e1d6e112f4048cb4122b4 | [
"MIT"
] | null | null | null | plotting/trackTurnOn.py | will-fawcett/trackerSW | fc097b97539d0b40a15e1d6e112f4048cb4122b4 | [
"MIT"
] | null | null | null | plotting/trackTurnOn.py | will-fawcett/trackerSW | fc097b97539d0b40a15e1d6e112f4048cb4122b4 | [
"MIT"
] | null | null | null |
from utils import prepareLegend
from colours import colours
from ROOT import *
gROOT.SetBatch(1)
gStyle.SetPadLeftMargin(0.15) # increase space for left margin
gStyle.SetPadBottomMargin(0.15) # increase space for left margin
gStyle.SetGridStyle(3)
gStyle.SetGridColor(kGray)
gStyle.SetPadTickX(1) # add tics on top x
gStyle.SetPadTickY(1) # add tics on right y
OUTPUT_DIR = 'plots/'
REBIN = 2
if __name__ == "__main__":
main()
| 27.717172 | 94 | 0.623178 |
54a054f1ed42ee815b1ac8ae21d88b15ea91f8bb | 154 | py | Python | pybo/inits/__init__.py | hfukada/pybo | 3be57adad901fcd8d45b8ee2af7c6032ab47611d | [
"BSD-2-Clause"
] | 115 | 2015-01-21T21:31:22.000Z | 2021-08-08T17:10:16.000Z | pybo/inits/__init__.py | hfukada/pybo | 3be57adad901fcd8d45b8ee2af7c6032ab47611d | [
"BSD-2-Clause"
] | 5 | 2016-02-24T16:00:01.000Z | 2020-12-21T00:28:30.000Z | pybo/inits/__init__.py | hfukada/pybo | 3be57adad901fcd8d45b8ee2af7c6032ab47611d | [
"BSD-2-Clause"
] | 35 | 2015-02-27T15:27:36.000Z | 2020-08-19T07:43:53.000Z | """
Initialization methods.
"""
# pylint: disable=wildcard-import
from .methods import *
from . import methods
__all__ = []
__all__ += methods.__all__
| 12.833333 | 33 | 0.714286 |
54a07034e31ea393994499d210b41085f8ae28cb | 2,362 | py | Python | src/Process/Process.py | mauriciocarvalho01/pln_api | 06743f1ae9e084ad15f1c91b32eb3719344f4a4b | [
"MIT"
] | 1 | 2021-12-14T19:10:44.000Z | 2021-12-14T19:10:44.000Z | src/Process/Process.py | mauriciocarvalho01/pln_api | 06743f1ae9e084ad15f1c91b32eb3719344f4a4b | [
"MIT"
] | null | null | null | src/Process/Process.py | mauriciocarvalho01/pln_api | 06743f1ae9e084ad15f1c91b32eb3719344f4a4b | [
"MIT"
] | null | null | null | import spacy
from nltk.tokenize import word_tokenize
from nltk.tokenize import sent_tokenize
from nltk.corpus import stopwords
from nltk.probability import FreqDist
from string import punctuation
from tqdm import tqdm
from rank_bm25 import BM25Okapi
import time
from collections import defaultdict
from heapq import nlargest
import nltk
nltk.download('punkt')
nltk.download('stopwords')
from operator import itemgetter
from .ProcessFiles import ProcessFiles
from src.Entity.ChatResponse import ChatResponse
from src.Entity.Files import Files
from .Thread import Thread
from .Resume import Resume
from .Tools import Tools
| 34.735294 | 148 | 0.600762 |