blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
8bb37bb464d97c50faca4fb42dc99deb5756e409 | Python | dlin94/leetcode | /array/496_next_greater_element.py | UTF-8 | 863 | 3.671875 | 4 | [] | no_license | def next_greater_element(nums1, nums2):
max_num = 0
return_list = []
for i in range(0, len(nums1)):
next_greater = -1
for j in range(0, len(nums2)):
if nums2[j] == nums1[i]:
for k in range (j+1, len(nums2)):
if nums2[k] > nums2[j]:
next_greater = nums2[k]
break
break
return_list.append(next_greater)
return return_list
def next_greater_element_alt(nums1, nums2):
d = {}
st = [] # stack
ans = [] # stores our answer
#
for x in nums2:
while len(st) and st[-1] < x:
d[st.pop()] = x
st.append(x)
for x in nums1:
ans.append(d.get(x, -1))
return ans
print(next_greater_element([2,4], [1,2,3,4]))
print(next_greater_element_alt([2,4], [1,2,3,4]))
| true |
98ad43be156da186afbc5c39d3557c4d1243e8b8 | Python | Arvintian/pretty-log-py | /pretty_logging/escape.py | UTF-8 | 969 | 3.15625 | 3 | [] | no_license | # -*- coding:utf-8 -*-
import sys
PY3 = sys.version_info >= (3,)
if PY3:
unicode_type = str
basestring_type = str
else:
# The names unicode and basestring don't exist in py3 so silence flake8.
unicode_type = unicode # noqa
basestring_type = basestring # noqa
_TO_UNICODE_TYPES = (unicode_type, type(None))
def to_unicode(value):
"""Converts a string argument to a unicode string.
If the argument is already a unicode string or None, it is returned
unchanged. Otherwise it must be a byte string and is decoded as utf8.
"""
if isinstance(value, _TO_UNICODE_TYPES):
return value
if not isinstance(value, bytes):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value)
)
return value.decode("utf-8")
# to_unicode was previously named _unicode not because it was private,
# but to avoid conflicts with the built-in unicode() function/type
_unicode = to_unicode
| true |
c7a6262ea89c52031e393343e8fa9224c18dd1fc | Python | lekha-badarinath/CodingForInterview | /Concepts/linkedLists.py | UTF-8 | 1,349 | 4.09375 | 4 | [] | no_license | class Element(): #Creating a container for linked list
def __init__(self,value):
self.value = value
self.next = None
class LinkedList():
def __init__(self,head = None): #Creating head of the linked list
self.head = head
def atBeginning(self,beginning):
beginningNode = Element(beginning)
beginningNode.next = self.head
self.head = beginningNode
def atEnd(self,end):
endNode = Element(end)
if self.head is None:
self.head = endNode
last = self.head
while last.next:
last = last.next
last.next = endNode
def printVal(self):
printVal = self.head
while printVal is not None:
if printVal.next is not None:
print (printVal.value)
printVal = printVal.next
node0 = LinkedList()
node0.head = Element(11) #Inserting the first element to the head of the LL
node1 = Element(12)
node2 = Element(15)
node3 = Element(14)
print (node1.next)
node0.head.next = node1 #Connecting the head to the next value
node1.next = node2
node2.next = node3
node0.atBeginning(10)
node0.atBeginning(9)
node0.atEnd(20)
print (node0.head.value,node0.head.next)
print (node0.printVal())
| true |
85e1cdb1ebd34b383eb560e002b4258488bdcc9e | Python | BaoAdrian/interview-prep | /Algorithms/merge_sort.py | UTF-8 | 1,885 | 3.984375 | 4 | [] | no_license | class Node:
def __init__(self, value):
self.value = value
self.next = None
def __str__(self):
return_str = ""
curr = self
while curr:
return_str += "[ {} ] > ".format(curr.value)
curr = curr.next
return return_str
def merge_sort_linked_list(ll):
# Case of NULL or single Node
if ll == None or ll.next == None:
return ll
# Split list into left/right halves
left, right = split_list(ll)
left = merge_sort_linked_list(left)
right = merge_sort_linked_list(right)
ll = merge(left, right)
return ll
def split_list(ll):
slow = ll
fast = ll.next
while fast:
fast = fast.next
if fast != None:
slow = slow.next
fast = fast.next
left = ll
right = slow.next
slow.next = None
return left, right
def merge(left, right):
head, curr = None, None
# Merge lists
while left and right:
if left.value <= right.value:
if head == None:
head = left
curr = left
else:
curr.next = left
curr = curr.next
left = left.next
else:
if curr == None:
head = right
curr = right
else:
curr.next = right
curr = curr.next
right = right.next
# Cleanup
if left == None:
curr.next = right
if right == None:
curr.next = left
return head
if __name__ == "__main__":
head = Node(3)
second = Node(6)
third = Node(2)
fourth = Node(1)
head.next = second
second.next = third
third.next = fourth
print("Input: {}".format(head))
head = merge_sort_linked_list(head)
print("Sorted: {}".format(head))
| true |
bf8cc6c5f6f83a48677231bc47847156e6e46bee | Python | dxt9140/CV_Frogger | /src/BlueMSX.py | UTF-8 | 2,146 | 2.53125 | 3 | [] | no_license | import threading
import os
from pynput.keyboard import Controller, Key
import shutil
import subprocess
from definitions import PROJECT_DIR
import time
class BlueMSX(threading.Thread):
def __init__(self, kb):
threading.Thread.__init__(self)
self._stop_event = threading.Event()
self.should_stop = False
self.running = False
self.kb = kb
def run(self):
try:
pipe = subprocess.Popen(PROJECT_DIR + "\\blueMSX.exe")
except WindowsError:
print("Exception thrown when opening pipe. Exiting.")
return
# clear the screenshots
if os.path.isdir("../Screenshots"):
shutil.rmtree("../Screenshots")
self.running = True
while not self.should_stop:
continue
if self.should_stop and not self.is_stopped():
self.kb.press(Key(Key.shift))
self.kb.press(Key(Key.esc))
self.kb.release(Key(Key.esc))
self.kb.release(Key(Key.shift))
self._stop_event.set()
self.running = False
def stop(self):
self.should_stop = True
def is_stopped(self):
return self._stop_event.is_set()
def take_screenshot(self):
self.kb.press(Key.print_screen.value)
self.kb.release(Key.print_screen.value)
def pause(self):
self.send_keys([Key.f9])
def send_keys(self, keys):
for key in keys:
self.kb.press(key)
self.kb.release(key)
def up(self):
#time.sleep(0.010)
self.kb.press('w')
time.sleep(0.020)
self.kb.release('w')
time.sleep(0.010)
def down(self):
time.sleep(0.010)
self.kb.press('s')
time.sleep(0.020)
self.kb.release('s')
time.sleep(0.010)
def left(self):
time.sleep(0.010)
self.kb.press('a')
time.sleep(0.020)
self.kb.release('a')
time.sleep(0.010)
def right(self):
time.sleep(0.010)
self.kb.press('d')
time.sleep(0.020)
self.kb.release('d')
time.sleep(0.010)
| true |
0c2a77ad7445960599d610087de6e2f5bed6f2f0 | Python | dsacchet/domot-api | /src/handlers/vmc/unelvent/mode/put | UTF-8 | 1,061 | 2.8125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/python
import minimalmodbus
import sys
value=['low','boost','bypass']
instrument = minimalmodbus.Instrument('/dev/ttyVMC1',0)
instrument.serial.baudrate = 19200
instrument.serial.bytesize = 8
instrument.serial.parity = 'E'
instrument.serial.stopbits = 1
def read_value(address):
while True:
try:
result = instrument.read_register(address,0,3,False)
except ValueError, IOError:
time.sleep(1)
continue
break
return result
def write_value(address,value):
while True:
try:
result = instrument.write_register(address,value)
except ValueError, IOError:
time.sleep(1)
continue
break
return result
if len(sys.argv) == 2:
newvalue=int(sys.argv[1])
result = read_value(15)
print "Current setting : ",value[result]
if newvalue != result:
print "Change setting to : ",value[newvalue]
write_value(15,newvalue)
result = read_value(15)
print "New setting : ",value[result]
else:
print "Same value, nothing to do"
else:
print "Usage : ",sys.argv[0]," <0|1|2>"
| true |
34752204492c137e3e26f70884ce958ce33ff736 | Python | dibsonthis/Movie-Randomizer | /movie_randomizer.py | UTF-8 | 4,152 | 2.875 | 3 | [
"MIT"
] | permissive | import requests
from bs4 import BeautifulSoup
import json
import random
genres = ['action-and-adventure', 'animation', 'anime', 'biography', 'children', 'comedy', 'crime', 'cult', 'documentary', 'drama', 'family', 'fantasy', 'history', 'horror', 'mystery', 'romance', 'science-fiction', 'thriller', 'all']
def get_titles(genre, amount=100):
all_titles = []
for offset in range(amount)[::50]:
if genre == "all":
url = "https://reelgood.com/movies/source/netflix?offset=" + str(offset)
else:
url = "https://reelgood.com/movies/genre/" + genre + "/on-netflix?offset=" + str(offset)
raw_html = requests.get(url).content
html = BeautifulSoup(raw_html, 'html.parser')
all_title_blocks = html.select('tr')
for title_block in all_title_blocks:
if title_block.select('td'):
title = title_block.select('td')[1].get_text()
all_titles.append(title)
print(title + ' - Title Added')
return all_titles
def get_images(genre, amount=100):
all_images = []
for offset in range(amount)[::50]:
if genre == "all":
url = "https://reelgood.com/movies/source/netflix?offset=" + str(offset)
else:
url = "https://reelgood.com/movies/genre/" + genre + "/on-netflix?offset=" + str(offset)
raw_html = requests.get(url).content
html = BeautifulSoup(raw_html, 'html.parser')
all_title_blocks = html.select('tr')
for title_block in all_title_blocks:
if title_block.select('td'):
title = title_block.select('td')
all_images.append(title)
image_links = []
for index, image in enumerate(all_images):
try:
link = image[0].select('img')[0]['src']
except IndexError:
link = "static/images/no-image.png"
image_links.append(link)
print('Image Added - ({}/{})'.format(index+1,len(all_images)))
return image_links
def get_description(title):
search_url = "https://www.google.com/search?q=" + title + " movie summary"
raw_html = requests.get(search_url).content
html = BeautifulSoup(raw_html, 'html.parser')
all_divs = html.select('div')
result = []
for i in all_divs:
if i.get('class') == ['BNeawe', 's3v9rd', 'AP7Wnd']:
result.append(i)
try:
result = result[2].get_text()
except IndexError:
result = 'No Description Available'
return result
def get_descriptions(titles):
descriptions = []
search_url = "https://www.google.com/search?q="
for index, title in enumerate(titles):
description = get_description(title)
descriptions.append(description)
print(title + ' - Description Added ({}/{})'.format(index+1, len(titles)))
return descriptions
def get_genre(genre, amount):
titles = get_titles(genre, amount)
images = get_images(genre, amount)
descriptions = get_descriptions(titles)
result = []
for index, title in enumerate(titles):
result.append( {'title': title, 'description': descriptions[index], 'img': images[index]} )
with open('genres/' + genre + '.txt', 'w') as file:
json.dump(result, file)
def get_all(genres, amount=100):
for index, genre in enumerate(genres):
get_genre(genre, amount)
print(genre + ' - Added ({}/{})'.format(index+1, len(genres)))
def get_genre_data(genre):
with open('genres/' + genre + '.txt') as file:
result = json.load(file)
return result
def get_all_genre_data(genres):
genre_list = []
for genre in genres:
genre_list.append(get_genre_data(genre))
return genre_list
def randomize(genre):
with open('genres/' + genre + '.txt') as file:
result = json.load(file)
random_number = random.randint(0,len(result)-1)
random_result = result[random_number]
return random_result
| true |
56a733ee86926b08cc3306a37725d587d3128455 | Python | jawang35/project-euler | /python/lib/problem28.py | UTF-8 | 1,000 | 4.0625 | 4 | [
"MIT"
] | permissive | '''
Problem 28 - Number Spiral Diagonals
Starting with the number 1 and moving to the right in a clockwise direction a 5
by 5 spiral is formed as follows:
21 22 23 24 25
20 7 8 9 10
19 6 1 2 11
18 5 4 3 12
17 16 15 14 13
It can be verified that the sum of the numbers on the diagonals is 101.
43 44 45 46 47 48 49
42 21 22 23 24 25 26
41 20 7 8 9 10 27
40 19 6 1 2 11 28
39 18 5 4 3 12 29
38 17 16 15 14 13 30
37 36 35 34 33 32 31
What is the sum of the numbers on the diagonals in a 1001 by 1001 spiral formed
in the same way?
'''
from functools import partial
from lib.helpers.runtime import print_answer_and_elapsed_time
def sum_spiral_diagonals(size):
assert size % 2 == 1
sum = 1
n = 1
radius = 1
while n < size**2:
for _ in range(4):
n += 2 * radius
sum += n
radius += 1
return sum
answer = partial(sum_spiral_diagonals, size=1001)
if __name__ == '__main__':
print_answer_and_elapsed_time(answer)
| true |
6f1ea24d336ab20139733dbbbefbbac92cdd6224 | Python | quake0day/oj | /tree_S_expression.py | UTF-8 | 2,066 | 3.40625 | 3 | [
"MIT"
] | permissive | class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def SExp(self, pair):
links = [[None, None, None] for _ in xrange(26)]
edges = pair.split(";")
for edge in edges:
edge = edge.replace('(','').replace(')','').replace(' ','')
a,b = edge.split(',')
# Change node value into index
a = ord(a) - ord('A')
b = ord(b) - ord('A')
if a < 0 or a > 25 or b < 0 or b > 25:
raise Exception("E5: invalid node value")
# multiple roots
if links[b][0]:
raise Exception ('E4')
# record the father node for a certain node
links[b][0] = a
# record father's child node
if links[a][1] == None:
links[a][1] = b
elif links[a][2] == None:
if b < links[a][1]:
links[a][2] = b
else:
links[a][2] = links[a][1]
links[a][1] = b
elif links[a][1] == b or links[a][2] == b:
raise Exception('E2')
else:
raise Exception('E1')
# find root
root = None
for idx in range(26):
parent, child1, child2 = links[idx]
if parent == None and (child1 != None or child2 != None):
if root:
raise Exception('E5: multiple tree!')
root = idx
if root == None:
raise Exception("E3") # no root -> cycle present
# avoid cycle, all the nodes should not appears in the same level or higher level
seen = [False for i in xrange(26)]
layer = [root]
while layer:
tmpLayer = []
for node in layer:
if seen[node]:
raise Exception("E3")
seen[node] = True
if links[node][1] != None:
tmpLayer.append(links[node][1])
if links[node][2] != None:
tmpLayer.append(links[node][2])
layer = tmpLayer
def output(node):
if links[node][1] == None:
return "(%s)" % (chr(65+node))
tmp = output(links[node][1])
if links[node][2] == None:
return "(%s%s)" % (chr(65+node), tmp)
tmp2 = output(links[node][2])
return "(%s%s%s)" % (chr(65 + node), tmp2, tmp)
return output(root)
a = Solution()
print a.SExp("(A,B);(A,C);(B,G);(C,H);(E,F);(B,D);(C,E)") | true |
121e1baf276dcf7c15946471e55d7c5a87c292e9 | Python | Centpledge/BUILDING-2 | /asd.py | UTF-8 | 76 | 2.625 | 3 | [] | no_license | a = ['1']
b = ['1']
if a !=[] :
print 'a'
if len(b) ==1 :
print 'b'
| true |
62354d19eba554c96444766fd29eb4011a0e2fdb | Python | kalleaaltonen/csolve | /chess.py | UTF-8 | 8,019 | 2.75 | 3 | [] | no_license | from itertools import chain,product,combinations
import copy
import operator
import string
import datrie
import time
# R Rook
# N knight
# B Bishop
# Q Queen
# K King
PIECES = set("RNBQK")
def prune(iter,bx,by):
return ((x,y) for (x,y) in iter if x >= 0 and y >= 0 and x < bx and y < by)
def threatens(piece, x, y, bx, by):
if piece == "R":
return chain(((x,j) for j in range(by) if j != y),
((i,y) for i in range(bx) if i != x))
elif piece == "K":
return prune(((x+i-1,y+j-1) for i in range(3) for j in range(3) if (i,j) != (1,1)), bx, by)
elif piece == "N":
a=([-1,1],[-2,2])
return prune(((x+i,y+j) for (i,j) in chain(product(*a), product(*a[::-1]))), bx, by)
# (1,1) = (0,0), (2,2)
elif piece == "B":
return prune(chain(((x+i,y+i) for i in range(-bx,bx) if i!=0), ((x+i,y-i) for i in range(-bx,bx) if i!=0)), bx, by)
elif piece == "Q":
return chain(threatens("R",x,y,bx,by), threatens("B",x,y,bx,by))
else:
print('unknown piece %s', piece)
FLIP = lambda a: list(reversed(a))
NOFLIP = lambda a: a
ROTATE0 = lambda a: a
ROTATE90 = lambda a: [list(t) for t in zip(*a[::-1])]
ROTATE180 = lambda a: ROTATE90(ROTATE90(a))
ROTATE270 = lambda a: ROTATE90(ROTATE90(ROTATE90(a)))
class Board(object):
def __init__(self,bx,by,data=None,free=None):
self.bx = bx
self.by = by
self.data = data or [list('.'*by) for j in range(bx)]
if free == None and not data:
self.free = set(product(range(bx), range(by)))
else:
self.free = free
self.repr = "\n".join("".join(row) for row in self.data)
def __repr__(self):
return self.repr
def __eq__(self, other):
return (isinstance(other, self.__class__) and self.__repr__() == other.__repr__())
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self.__repr__().__hash__()
def rotations(self):
""" Doesn't translate free lists
"""
return set(Board(self.bx,self.by,data=list(fs[0](fs[1](self.data)))) for fs in product([FLIP, NOFLIP], [ROTATE0,ROTATE90,ROTATE180,ROTATE270]))
def get_canonical(self):
return min(r.__repr__() for r in self.rotations())
def is_canonical(self):
return self.__repr__() == get_canonical(self)
def add_piece(self, moves):
if not moves or any(move[1:] not in self.free for move in moves):
#print "not in free %s free=%s" % (moves,self.free)
return None
# Check if this piece threatens someone
threats = set(chain( *[threatens(*(t+(self.bx,self.by))) for t in moves]))
if any(self.data[i][j] != '.' for (i,j) in threats):
#print "threatens someone"
return None
# Check if the new pieces threaten each other
if any(move[1:] in threats for move in moves):
#print "threatens %s eachother %s", (moves, threats)
return None
newFree = self.free - threats - set(move[1:] for move in moves)
newData = copy.deepcopy(self.data)
for (p,x,y) in moves:
newData[x][y] = p
return Board(self.bx, self.by, data=newData, free=newFree)
def impact(piece, x,y):
return len(list(threatens(piece, x/2, y/2, x, y)))
def get_ordering(pieces, board):
#return sorted(pieces, key=lambda x: impact(x[0], board.bx, board.by))
#return sorted(pieces, key=lambda x: x[1]*100-impact(x[0], board.bx, board.by), reverse=True)
return sorted(pieces, key=lambda x: list("BNKRQ").index(x[0]), reverse=True)
#return sorted(pieces, key=lambda x: x[1]*impact(x[0], board.bx, board.by)/len(list(combinations, x[1])), reverse=False)
def most_impactful_first(pieces, board):
return sorted(pieces, key=lambda x: impact(x[0], board.bx, board.by), reverse=True)
def least_first(pieces, board):
return sorted(pieces, key=lambda x: x[1]*100-impact(x[0], board.bx, board.by))
def fixed_order(pieces, board):
return sorted(pieces, key=lambda x: list("BNKRQ").index(x[0]), reverse=True)
def most_impact_per_move(pieces, board):
return sorted(pieces, key=lambda x: x[1]*impact(x[0], board.bx, board.by)/len(list(combinations(board.free, x[1]))), reverse=False)
def free_rows_and_columns(data):
return (sum( 1 for row in data if all(square == '.' for square in row)),
sum( 1 for row in zip(*data[::-1]) if all( square == '.' for square in row)))
def filterNode(n,pieces):
if len(n.free) < sum(p[1] for p in pieces):
#print "%s %s %s"%(len(n.free), sum(p[1] for p in pieces), pieces)
return False
queens_and_rooks = sum(p[1] for p in pieces if p[0] in {'Q','R'})
if queens_and_rooks > min(free_rows_and_columns(n.data)):
#print "Filter because queens and rooks"
return False
return True
def solve(board,pieces):
candidates=[board]
next_candidates = iter([])
pieces = get_ordering(pieces, board)
print "pieces %s" % (pieces,)
trie = datrie.Trie("%s.\n"%pieces)
trie = {}
while pieces:
(piece, count) = pieces.pop()
print "processing %s %i" % (piece, count)
#print hpy().heap()
for c in candidates:
cform = unicode(c.get_canonical().__repr__())
if cform not in trie:
#print cform
trie[cform] = True
moves = ( [(piece,) + t for t in c] for c in combinations(c.free, count))
next_candidates = chain(next_candidates, [c.add_piece(move) for move in moves])
# next_candidates.extend()
#print "canditates now %i" % len(next_candidates)
candidates = (n for n in next_candidates if n != None and filterNode(n, pieces))
#print "next_candidates %i" % len(candidates)
next_candidates = iter([])
return candidates
def solve_dfs(board, pieces,ordering):
t = time.time()
pieces = ordering(pieces, board)
stack = [(board, pieces)]
solutions = []
#discovered = {}
discovered = datrie.Trie("%s.\n"%pieces)
print "order: %s" % pieces
while stack:
b, ps = stack.pop()
(p, count), left = ps[0], ps[1:]
moves = ( [(p,) + t for t in c] for c in combinations(b.free, count))
for move in moves:
c = b.add_piece(move)
if not c:
continue
cform = unicode(c.get_canonical().__repr__())
if cform in discovered:
continue
discovered[cform] = True
if not filterNode(c, left):
#print "filtered!"
continue
if not left:
solutions.append(c)
#print "%s\nstack size: %i solutions found: %i" % (c, len(stack), len(solutions))
continue
stack.append((c, left))
print "took %f" %(time.time()-t)
return solutions
def start(bx, by, pieces):
board = Board(bx,by)
results = solve_dfs(board, pieces, most_impactful_first)
print "===== RESULTS =============="
#for r in results:
# print "%s\n\n" % r
print len(list(results))
results = solve_dfs(board, pieces, least_first)
print "===== RESULTS =============="
#for r in results:
# print "%s\n\n" % r
print len(list(results))
results = solve_dfs(board, pieces, most_impact_per_move)
print "===== RESULTS =============="
#for r in results:
# print "%s\n\n" % r
print len(list(results))
results = solve_dfs(board, pieces, fixed_order)
# print "===== RESULTS =============="
#for r in results:
#print "%s\n\n" % r
print len(list(results))
if __name__ == "__main__":
#start(7,8,[("K",3),('Q',1),('B',2),('R',2), ('N',3)])
#start(6,6,[("K",2),('Q',1),('B',3),('R',2), ('N',1)])
#start(6,6,[('Q',2), ('R', 2), ('N',2)])
#start(3,3,[('K',1), ('R', 2)])
start(5,5,[("K",1),('Q',1),('B',1),('R',1), ('N',1)])
| true |
a16f6727b5453f125540b1546271fd1137b6c799 | Python | jtsherba/db-factfinder | /factfinder/special.py | UTF-8 | 4,393 | 2.5625 | 3 | [
"MIT"
] | permissive | import math
import numpy as np
import pandas as pd
def pivot(df: pd.DataFrame, base_variables: list) -> pd.DataFrame:
dff = df.loc[:, ["census_geoid", "pff_variable", "e", "m"]].pivot(
index="census_geoid", columns="pff_variable", values=["e", "m"]
)
pivoted = pd.DataFrame()
pivoted["census_geoid"] = dff.index
del df
for i in base_variables:
pivoted[i + "e"] = dff.e.loc[pivoted.census_geoid, i].to_list()
pivoted[i + "m"] = dff.m.loc[pivoted.census_geoid, i].to_list()
del dff
return pivoted
def hovacrtm(hovacue, vacsalee, vacsalem, hovacum):
if hovacue == 0:
return 0
elif vacsalee == 0:
return 0
elif vacsalem ** 2 - (vacsalee * hovacum / hovacue) ** 2 < 0:
return (
math.sqrt(vacsalem ** 2 + (vacsalee * hovacum / hovacue) ** 2)
/ hovacue
* 100
)
else:
return (
math.sqrt(vacsalem ** 2 - (vacsalee * hovacum / hovacue) ** 2)
/ hovacue
* 100
)
def percapinc(df: pd.DataFrame, base_variables: list) -> pd.DataFrame:
df = pivot(df, base_variables)
df["e"] = df.agip15ple / df.pop_6e
df["m"] = (
1
/ df.pop_6e
* np.sqrt(df.agip15plm ** 2 + (df.agip15ple * df.pop_6m / df.pop_6e) ** 2)
)
return df
def mntrvtm(df: pd.DataFrame, base_variables: list) -> pd.DataFrame:
df = pivot(df, base_variables)
df["e"] = df["agttme"] / (df["wrkr16ple"] - df["cw_wrkdhme"])
df["m"] = (
1
/ df["wrkrnothme"]
* np.sqrt(
df["agttmm"] ** 2
+ (df["agttme"] * df["wrkrnothmm"] / df["wrkrnothme"]) ** 2
)
)
return df
def mnhhinc(df: pd.DataFrame, base_variables: list) -> pd.DataFrame:
df = pivot(df, base_variables)
df["e"] = df["aghhince"] / df["hh2e"]
df["m"] = (
1
/ df["hh5e"]
* np.sqrt(df["aghhincm"] ** 2 + (df["aghhince"] * df["hh5m"] / df["hh5e"]) ** 2)
)
return df
def avghhsooc(df: pd.DataFrame, base_variables: list) -> pd.DataFrame:
df = pivot(df, base_variables)
df["e"] = df["popoochue"] / df["oochu1e"]
df["m"] = (
df["popoochum"] ** 2 + (df["popoochue"] * df["oochu4m"] / df["oochu4e"]) ** 2
) ** 0.5 / df["oochu4e"]
return df
def avghhsroc(df: pd.DataFrame, base_variables: list) -> pd.DataFrame:
df = pivot(df, base_variables)
df["e"] = df["poprtochue"] / df["rochu1e"]
df["m"] = (
df["poprtochum"] ** 2 + (df["poprtochue"] * df["rochu2m"] / df["rochu2e"]) ** 2
) ** 0.5 / df["rochu2e"]
return df
def avghhsz(df: pd.DataFrame, base_variables: list) -> pd.DataFrame:
df = pivot(df, base_variables)
df["e"] = df["hhpop1e"] / df["hh1e"]
df["m"] = (
df["hhpop1m"] ** 2 + (df["hh4m"] * df["hhpop1e"] / df["hh4e"]) ** 2
) ** 0.5 / df["hh4e"]
return df
def avgfmsz(df: pd.DataFrame, base_variables: list) -> pd.DataFrame:
df = pivot(df, base_variables)
df["e"] = df["popinfmse"] / df["fam1e"]
df["m"] = (
df["popinfmsm"] ** 2 + (df["fam3m"] * df["popinfmse"] / df["fam3e"]) ** 2
) ** 0.5 / df["fam3e"]
return df
def hovacrt(df: pd.DataFrame, base_variables: list) -> pd.DataFrame:
df = pivot(df, base_variables)
df["e"] = 100 * df["vacsalee"] / df["hovacue"]
df["m"] = df.apply(
lambda row: hovacrtm(
row["hovacue"], row["vacsalee"], row["vacsalem"], row["hovacum"]
),
axis=1,
)
return df
def rntvacrt(df: pd.DataFrame, base_variables: list) -> pd.DataFrame:
df = pivot(df, base_variables)
df["e"] = 100 * df["vacrnte"] / df["rntvacue"]
df["m"] = df.apply(
lambda row: hovacrtm(
row["rntvacue"], row["vacrnte"], row["vacrntm"], row["rntvacum"]
),
axis=1,
)
return df
def wrkrnothm(df: pd.DataFrame, base_variables: list) -> pd.DataFrame:
df = pivot(df, base_variables)
df["e"] = df["wrkr16ple"] - df["cw_wrkdhme"]
df["m"] = (df["wrkr16plm"] ** 2 + df["cw_wrkdhmm"] ** 2) ** 0.5
return df
special_variable_options = {
"percapinc": percapinc,
"mntrvtm": mntrvtm,
"mnhhinc": mnhhinc,
"avghhsooc": avghhsooc,
"avghhsroc": avghhsroc,
"avghhsz": avghhsz,
"avgfmsz": avgfmsz,
"hovacrt": hovacrt,
"rntvacrt": rntvacrt,
"wrkrnothm": wrkrnothm,
}
| true |
0742e552432ce87e37388e14aa4dda441a27df90 | Python | Lee-121/sHIeR | /sHIeR_hogwarts/homework_0731/homework_1.py | UTF-8 | 661 | 4.3125 | 4 | [] | no_license |
# 用类和面向对象的思想,“描述”生活中任意接触到的东西
# 比如动物、小说里面的人物,不做限制,随意发挥),数量为5个
# 定义House类
class House:
window = "明亮的"
door = "安全的"
people = "有人"
ceiling = "天花板"
def people(self):
print("房间里有人吗?")
def high_wind(self):
print("要关窗吗?")
def open_door(self):
print("谁开的门?")
def upstairs(self):
print("楼上噪音好大呀!!!")
def back_home(self):
print("要回家了!!!")
me = House()
me.back_home()
me.high_wind()
| true |
db602103bce918c27603a7dacc77356b8e2c013d | Python | 0x913/python-practice-projects | /python practice projects/Control Structures/List Functions.py | UTF-8 | 328 | 4 | 4 | [] | no_license | nums = [1, 2, 3]
nums.append(4)
print(nums)
#
nums = [1, 3, 5, 2, 4]
print(len(nums))
#
words = ["Python", "fun"]
index = 1
words.insert(index, "is")
print(words)
#
letters = ['p', 'q', 'r', 's', 'p', 'u']
print(letters.index('r'))
print(letters.index('p'))
print(letters.index('z')) | true |
acdc661ef48286e117c4624e8f2c92cf26484436 | Python | rogeriomfneto/compgeo_algorithms | /geocomp/closest/divide.py | UTF-8 | 4,977 | 2.828125 | 3 | [] | no_license | #!/usr/bin/env python
"""Algoritmo por divisão e conquista"""
from geocomp.common.segment import Segment
from geocomp.common import control
from geocomp.common import prim
from geocomp.common import guiprim
import math
# COMPATING FUNCTIONS
def compareX(p1, p2):
if (p1.x == p2.x): return p1.y - p2.y
return p1.x - p2.x
def compareY(p1, p2):
if (p1.y == p2.y): return p1.x - p2.x
return p1.y - p2.y
# SORTING FUNCTIONS
def swap(v, i , j):
v[i], v[j] = v[j], v[i]
def partition(v, l, r, compare):
k = v[r-1]
j = l-1
for i in range(l, r-1):
comp = compare(v[i], k)
if (comp <= 0):
j += 1
swap(v, i, j)
j += 1
swap(v, r-1, j)
return j
def sort_rec(v, l, r, compare):
if (r > l + 1):
q = partition(v, l, r, compare)
sort_rec(v, l, q, compare)
sort_rec(v, q, r, compare)
def sort(v, compare):
n = len(v)
sort_rec(v, 0, n, compare)
# PLOTING FUNCTIONS
def plot_vertical_lines(pm, dmin):
vl1 = control.plot_vert_line(pm.x, "orange", 2)
vl2 = control.plot_vert_line(pm.x - dmin, "orange", 2)
vl3 = control.plot_vert_line(pm.x + dmin, "orange", 2)
return vl1, vl2, vl3
def delete_vertical_lines(vl1, vl2, vl3):
control.plot_delete(vl1)
control.plot_delete(vl2)
control.plot_delete(vl3)
def plot_horizontal_lines(p, dmin):
hl1 = control.plot_horiz_line(p.y, "blue", 2)
hl2 = control.plot_horiz_line(p.y + dmin, "blue", 2)
return hl1, hl2
def delete_horizontal_lines(hl1, hl2):
control.plot_delete(hl1)
control.plot_delete(hl2)
def hilight_candidates(f):
hi = []
for p in f:
hi.append(p.hilight("cyan"))
return hi
def unhilight_candidates(f, hi):
for i in range(len(f)):
f[i].unhilight(hi[i])
# CLOSEST PAIR FUNCTIONS
def merge(v, l, q, r, compare):
v1, v2 = [], []
for i in range(l, q):
v1.append(v[i])
for i in range(q, r):
v2.append(v[i])
n1, n2 = len(v1), len(v2)
i, j, k = 0, 0, l
while (i < n1 and j < n2):
comp = compare(v1[i], v2[j])
if (comp <= 0):
v[k] = v1[i]
i += 1
else:
v[k] = v2[j]
j += 1
k += 1
while (i < n1):
v[k] = v1[i]
i += 1
k += 1
while (j < n2):
v[k] = v2[j]
j += 1
k += 1
def update_points(p1, p2):
global a, b, id, hia, hib
if (a != None and b != None):
if (prim.dist2(p1, p2) >= prim.dist2(a, b)): return
control.freeze_update()
if a != None: a.unhilight(hia)
if b != None: b.unhilight(hib)
if id != None: control.plot_delete(id)
a = p1
b = p2
hia = a.hilight()
hib = b.hilight()
id = a.lineto(b)
control.thaw_update()
control.update()
def candidates(p, l, r, dmin, pm):
f = []
for i in range(l, r):
if (abs(p[i].x - pm.x) < dmin):
f.append(p[i])
return f
def combine(p, l, r, p1, p2, pm):
dmin2 = guiprim.dist2(p1, p2)
dmin = math.sqrt(dmin2)
f = candidates(p, l, r, dmin, pm)
t = len(f)
vl1, vl2, vl3 = plot_vertical_lines(pm, dmin)
hi = hilight_candidates(f)
for i in range(t):
hl1, hl2 = plot_horizontal_lines(f[i], dmin)
j = i + 1
while j < t and (f[j].y - f[i].y) < dmin:
d = guiprim.dist2(f[i], f[j])
if (d < dmin2):
p1, p2, dmin2 = f[i], f[j], d
update_points(p1, p2)
j += 1
delete_horizontal_lines(hl1, hl2)
delete_vertical_lines(vl1, vl2, vl3)
unhilight_candidates(f, hi)
return p1, p2
def divide_rec(p, l, r, compare):
#base
if r - l == 2:
sort_rec(p, l, r, compare)
guiprim.dist2(p[l], p[l+1])
update_points(p[l], p[l+1])
return p[l], p[l+1]
if r - l == 3:
sort_rec(p, l, r, compare)
d1 = guiprim.dist2(p[l], p[l+1])
d2 = guiprim.dist2(p[l], p[l+2])
d3 = guiprim.dist2(p[l+1], p[l+2])
if (d1 <= d2 and d1 <= d3):
update_points(p[l], p[l+1])
return p[l], p[l+1]
if (d2 <= d1 and d2 <= d3):
update_points(p[l], p[l+2])
return p[l], p[l+2]
if (d3 <= d1 and d3 <= d2):
update_points(p[l+1], p[l+2])
return p[l+1], p[l+2]
# 4 points or more
q = (l+r)//2
pm = p[q] #median point
p1, p2 = divide_rec(p, l, q, compare)
de = prim.dist2(p1, p2)
p3, p4 = divide_rec(p, q, r, compare)
dr = prim.dist2(p3, p4)
p1, p2 = (p1, p2) if (de <= dr) else (p3, p4)
update_points(p1, p2)
merge(p, l, q, r, compare)
return combine(p, l, r, p1, p2, pm)
def Divide (p):
global a, b, id, hia, hib
a, b, id, hia, hib = None, None, None, None, None
sort(p, compareX)
n = len(p)
if n == 1: return
p1, p2 = divide_rec(p, 0, n, compareY)
p1.hilight()
p2.hilight()
return p1, p2
| true |
34abe73092995fe669ca57734e8daa8459b52d7e | Python | mickeyhoang/SchoolAnalysis | /graphs.py | UTF-8 | 4,307 | 2.9375 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
import json
schools = ['PaloAltoHighSchool', 'MontaVistaHighSchool', 'Menlo-AthertonHighSchool', 'WoodsideHighSchool', 'ApolloHighSchool', 'NationalAverages']
colors = ['#4286f4', '#4286f4', '#4286f4', '#4286f4', '#4286f4', '#fcb65a']
data = []
for name in schools:
with open(name + '.json') as f:
data.append(json.loads(f.read()))
def getNames():
names = []
for school in data:
names.append(school['Name'][0:len(school['Name'])-12])
return names
def getScores(key):
scores = []
for school in data:
scores.append(int(school['Scores'][key]))
return scores
def getData(key):
dataPoints = []
for school in data:
dataPoints.append(int(school[key]))
return dataPoints
def getRaceData(threshold):
raceData = []
for school in data:
schoolData = []
for race in school['Race Data']:
if int(race[1][0:-1]) >= threshold:
schoolData.append(race)
raceData.append(len(schoolData))
return raceData
def compareScoreGraph(crit0, crit1):
crit0Data = getData(crit0)
crit1Data = getScores(crit1)
plt.plot(np.unique(crit0Data), np.poly1d(np.polyfit(crit0Data, crit1Data, 1))(np.unique(crit0Data)))
plt.title(crit0 + " vs. " + crit1 + " Scores")
plt.xlabel(crit0)
plt.ylabel(crit1)
plt.scatter(crit0Data, crit1Data, None, colors)
if crit1 == 'SAT':
if crit0 == 'Teacher/Student Ratio: ':
plt.axis([10, 25, 0, 2400])
else:
plt.axis([0, 100, 0, 2400])
elif crit0 == 'Teacher/Student Ratio: ':
plt.axis([10, 25, 0, 100])
else:
plt.axis([0, 100, 0, 100])
fileName = crit0 + "vs" + crit1 + "Scores.png"
fileName = fileName.replace("%", "")
ileName = fileName.replace("%", "")
fileName = fileName.replace("/", "")
fileName = fileName.replace(" ", "")
fileName = fileName.replace(':', "")
plt.savefig(fileName)
plt.show()
def compareScoresGraph(crit0, crit1):
crit0Data = getScores(crit0)
crit1Data = getScores(crit1)
plt.plot(np.unique(crit0Data), np.poly1d(np.polyfit(crit0Data, crit1Data, 1))(np.unique(crit0Data)))
plt.title(crit0 + " Scores vs. " + crit1 + " Scores")
plt.xlabel(crit0)
plt.ylabel(crit1)
plt.scatter(crit0Data, crit1Data, None, colors)
if crit0 == 'SAT':
plt.axis([0, 2400, 0, 100])
else:
plt.axis([0, 100, 0, 100])
fileName = crit0 + "ScoresVs" + crit1 + "Scores.png"
fileName = fileName.replace("%", "")
plt.savefig(fileName)
plt.show()
def compareGraph(crit0, crit1):
crit0Data = getData(crit0)
crit1Data = getData(crit1)
plt.plot(np.unique(crit0Data), np.poly1d(np.polyfit(crit0Data, crit1Data, 1))(np.unique(crit0Data)))
plt.title(crit0 + " vs. " + crit1)
plt.xlabel(crit0)
plt.ylabel(crit1)
plt.scatter(crit0Data, crit1Data, None, colors)
if crit0 == 'Teacher/Student Ratio: ':
plt.axis([15, 25, 0, 100])
else:
plt.axis([0, 100, 0, 100])
fileName = crit0 + "ScoresVs" + crit1 + ".png"
fileName = fileName.replace("%", "")
fileName = fileName.replace("/", "")
fileName = fileName.replace(" ", "")
fileName = fileName.replace(':', "")
plt.savefig(fileName)
plt.show()
def compareRaceGraph(threshold, crit1):
crit0Data = getRaceData(threshold)
crit1Data = getScores(crit1)
plt.plot(np.unique(crit0Data), np.poly1d(np.polyfit(crit0Data, crit1Data, 1))(np.unique(crit0Data)))
plt.title('# of Races with over ' + str(threshold) + "% vs. " + crit1 + ' Scores')
plt.xlabel('# of Races with over ' + str(threshold) + '%')
plt.ylabel(crit1)
plt.scatter(crit0Data, crit1Data, None, colors)
plt.axis([0, 5, 0, 100])
if crit1 == 'SAT':
plt.axis([0, 5, 0, 2400])
fileName = 'RaceOver' + str(threshold) + "Vs" + crit1 + 'Scores'
fileName = fileName.replace("%", "")
fileName = fileName.replace("/", "")
plt.savefig(fileName)
plt.show()
'''
compareGraph('Teacher/Student Ratio: ', 'Grad %')
compareScoreGraph('Teacher/Student Ratio: ', 'SAT')
compareScoreGraph('Teacher/Student Ratio: ', 'English')
compareScoreGraph('Teacher/Student Ratio: ', 'Math')
compareScoreGraph('Low Income %', 'English')
compareScoreGraph('Low Income %', 'Math')
compareScoreGraph('Low Income %', 'SAT')
compareGraph('Low Income %', 'Grad %')
compareGraph('Teacher/Student Ratio: ', 'Grad %')
compareScoresGraph('SAT', 'English')
compareScoresGraph('SAT', 'Math')
compareRaceGraph(10, 'English')
compareRaceGraph(10, 'Math')
compareRaceGraph(10, 'SAT')
''' | true |
db32259c0fcd4a339c67c25f908d7a8e8374db64 | Python | HongyuHe/leetcode-new-round | /dp/70_again.py | UTF-8 | 613 | 3.203125 | 3 | [] | no_license | class Solution:
def climbStairs(self, n: int) -> int:
# * Base case: 0 -> 1
# * 1 -> 1
# * 2 -> 1 + 1 = 2
# * 3 -> 2(2->1) + 1 = 3
# count = [0] * (n+1)
# count[0] = 1
# count[1] = 1
if n <= 2: return n
one_step = 2
two_steps = 1
for step in range(3, n+1):
# ! We only need to retain two previous steps in the recurrence relation.
# count[step] = count[step-1] + count[step-2]
cur_step = one_step + two_steps
one_step, two_steps = cur_step, one_step
return cur_step | true |
98581dca0e9cb64b13dcb71e03674ba2a2faa1df | Python | gdcfornari/recuperacao02 | /soma.py | UTF-8 | 236 | 3.15625 | 3 | [] | no_license | class Soma:
@staticmethod
def calcula(array):
result = 0
for numero in array:
result = result + numero
return result
bytearray = [5,8,3]
resultado = Soma.calcula(bytearray)
print(resultado)
| true |
e9cc13aadedbbe42af4e98f5bf91d48696f35fac | Python | jehoons/sbie_weinberg | /module/ifa/tutorial/boolean2/projects/immune/localdefs.py | UTF-8 | 2,356 | 3 | 3 | [] | no_license | """
Bordetella Bronchiseptica simulation
- local function definitions that are loaded into the generated code
"""
import time, sys
from random import random, randint, seed
from boolean2.plde.defs import *
seed(100)
#
# There is a stochasticty in the expiration, each number gets
# and expiration between MIN_AGE and MAX_AGE.
#
#MIN_AGE = 0.2
#MAX_AGE = 1.2
MIN_AGE = 0.2
MAX_AGE = 2.0
DIFF_AGE = float(MAX_AGE) - MIN_AGE
STORE = {}
def slow_prop( label, rc, r, t):
"""
Generates a proprtion slowly, generating a new random number
after a certain expiration time. It can generate random numbers
for different labels
"""
return slow_func( label=label, func=prop, t=t, rc=rc, r=r)
def slow_sticky_prop( label, rc, r, t):
"""
Generates a proprtion slowly, generating a new random number
after a certain expiration time. It can generate random numbers
for different labels
"""
return slow_func( label=label, func=sticky_prop, t=t, rc=rc, r=r )
def slow_func( label, func, t, **kwds):
"""
Generates a function slowly, providing a new value for the function
after a certain expiration time.
"""
global STORE, MIN_AGE, DIFF_AGE
lastV, lastT, expT = STORE.get( label, (0, -10, 0) )
if abs(t - lastT) > expT:
lastV = func( **kwds )
lastT = t
expT = MIN_AGE + random() * DIFF_AGE
STORE[label] = (lastV, lastT, expT)
return lastV
def prop(rc, r):
"Generates a random proportion"
value = random()*r
if randint(0,1):
return rc + value
else:
return rc - value
LAST_S = 0
def sticky_prop(rc, r):
"Generates a sticky proportion, that attempts"
global LAST_S
value = r - 2*random()*(r + LAST_S/2)
LAST_S = value
return rc + value
def make_slow_prop( node, indexer, param ):
"Makes a slow proportion function from the parameters"
text = 'slow_prop(label="%s", rc=%s, r=%s, t=t)' % (node, param[node].rc, param[node].r)
return text
def positive(x):
"""
Some values may go negative due to rounding errors or
other reasons. This function will return zero for
any negative value.
"""
if x >=0:
return x
else:
return 0
if __name__ == '__main__':
for i in range(10):
print slow_sticky_prop( label='A', rc=10, r=1, t=i) | true |
f292afb55ed9c1d05c40c8453dd819ce5bc24a15 | Python | linlicro/python100 | /day14/t04-server.py | UTF-8 | 1,708 | 3.296875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python3
"""
实现TCP服务器: 服务器是能够同时接纳和处理多个用户请求的。
设计一个使用多线程技术处理多个用户请求的服务器,该服务器会向连接到服务器的客户端发送一张图片。
version: 0.1
author: icro
"""
from socket import socket
from base64 import b64encode
from json import dumps
from threading import Thread
def main():
# 自定义线程类
class FileTransferHandler(Thread):
def __init__(self, cclient):
super.__init__()
self._cclient = cclient
def run(self):
my_dict = {}
my_dict['filename'] = 'xxx.icon'
# JSON是纯文本不能携带二进制数据
# 所以图片的二进制数据要处理成base64编码
my_dict['filedata'] = data
# 通过dumps函数将字典处理成JSON字符串
json_str = dumps(my_dict)
# 发送JSON字符串
self._cclient.send(json_str.encode('utf-8'))
self._cclient.close()
# 1. 创建套接字对象并指定使用哪种传输服务
server = socket()
# 2. 绑定IP地址和端口(区分不同的服务)
server.bind('127.0.0.1', 5566)
# 3. 开启监听 - 监听客户端连接到服务器
server.listen(512)
print('服务器启动开始监听...')
with open('xxxx.icon', 'rb') as f:
# 将二进制数据处理成base64在解码成字符串
data = b64encode(f.read()).decode('utf-8')
while True:
client, addr = server.accept()
# 启动一个线程来处理客服端请求
FileTransferHandler(client).start()
if __name__ == "__main__":
main()
| true |
676f264d116a7b5d32cf32697470f44b6b0277b4 | Python | Shatrugna-Strife/N-Gram-Extractor | /chisquare.py | UTF-8 | 2,541 | 3.015625 | 3 | [] | no_license | # import these modules
import nltk
from collections import Counter
from nltk.tokenize import RegexpTokenizer
import re
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize, sent_tokenize
import pandas as pd
from nltk.corpus import stopwords
from nltk import ngrams
tokenizer = RegexpTokenizer(r'(?:[^\W\d_]\.)+| \d+(?:[.,]\d+)*(?:[.,]\d+)|\w+(?:\.(?!\.|$))?| \d+(?:[-\\/]\d+)*| \$')
# tokenizer = RegexpTokenizer(r'\w+')
'''
(?:[^\W\d_]\.)+| # one letter abbreviations, e.g. E.U.A.
\d+(?:[.,]\d+)*(?:[.,]\d+)| # numbers in format 999.999.999,99999
\w+(?:\.(?!\.|$))?| # words with numbers (including hours as 12h30),
# followed by a single dot but not at the end of sentence
\d+(?:[-\\/]\d+)*| # dates. 12/03/2012 12-03-2012
\$| # currency sign
-+| # any sequence of dashes
\S # any non space characters
'''
f = open("wiki_06", 'r', encoding = "utf8").read()
data = re.sub(r'<.*?>', '', f)
tokenize = tokenizer.tokenize(data)
tokenize = [w.lower() for w in tokenize ]
stop_words = set(stopwords.words('english'))
filtered_sentence = [w for w in tokenize if not w in stop_words]
# bigram = ngrams(filtered_sentence, 2)
bigrams = nltk.collocations.BigramAssocMeasures()
bigramFinder = nltk.collocations.BigramCollocationFinder.from_words(filtered_sentence)
bigramChiTable = pd.DataFrame(list(bigramFinder.score_ngrams(bigrams.chi_sq)), columns=['bigram','chi-sq']).sort_values(by='chi-sq', ascending=False)
bigramChiTable.to_csv ('Chi-square collocation.csv', index = None, header=True)
bigramFinder.apply_freq_filter(3)
fw = open("Top20bigrams.txt", 'w',encoding = "utf8")
fw.write("Using Student's t Test\n")
# print(bigramFinder.nbest(bigrams.student_t, 20))
fw.write(str(bigramFinder.nbest(bigrams.student_t, 20)))
fw.write('\n\n')
fw.write("Using Pointwise Mutual Exclusion(PMI) Test\n")
# print(bigramFinder.nbest(bigrams.pmi, 20))
fw.write(str(bigramFinder.nbest(bigrams.pmi, 20)))
fw.write('\n\n')
fw.write("Using Likelihood ratio Test\n")
# print(bigramFinder.nbest(bigrams.likelihood_ratio, 20))
fw.write(str(bigramFinder.nbest(bigrams.likelihood_ratio, 20)))
fw.write('\n\n')
fw.write("Using Chi-square Test\n")
# print(bigramFinder.nbest(bigrams.chi_sq, 20))
fw.write(str(bigramFinder.nbest(bigrams.chi_sq, 20)))
fw.write('\n\n')
| true |
2b090097336428b76e8e303dbe28ef6af3c79d47 | Python | keiouok/atcoder | /2020/0423/ki.py | UTF-8 | 1,288 | 2.703125 | 3 | [] | no_license | import sys, re, os
from collections import deque, defaultdict, Counter
from math import ceil, sqrt, hypot, factorial, pi, sin, cos, radians
from itertools import permutations, combinations, product, accumulate
from operator import itemgetter, mul
from copy import deepcopy
from string import ascii_lowercase, ascii_uppercase, digits
from heapq import heapify, heappop, heappush
def input(): return sys.stdin.readline().strip()
def INT(): return int(input())
def MAP(): return map(int, input().split())
def S_MAP(): return map(str, input().split())
def LIST(): return list(map(int, input().split()))
def S_LIST(): return list(map(str, input().split()))
sys.setrecursionlimit(10 ** 9)
INF = float('inf')
mod = 10 ** 9 + 7
N, Q = MAP()
A = [LIST() for i in range(N-1)]
P = [LIST() for i in range(Q)]
graph = defaultdict(list)
for a, b in A:
graph[a-1].append(b-1)
graph[b-1].append(a-1)
cnt = [0] * N
ans = [0] * N
for node, point in P:
cnt[node-1] += point
check = [False] * N
q = deque([])
q.append(0)
check[0] = True
while q:
a = q.pop()
for node in graph[a]:
if check[node] == True:
continue
cnt[node] += cnt[a]
check[node] = True
q.append(node)
print(*cnt)
| true |
e4d6370c00e0765cbcb72e5328b93a67e39d5a9a | Python | azimjohn/leetcode | /algorithms/reverse_words.py | UTF-8 | 207 | 3.359375 | 3 | [] | no_license | # https://leetcode.com/problems/reverse-words-in-a-string/submissions/
class Solution:
def reverseWords(self, s: str) -> str:
words = s.strip().split()
return " ".join(reversed(words))
| true |
2599587e914343909cc6a822102e2ee81e86334e | Python | 1Mr-Styler/ner-spacy | /model/snert.py | UTF-8 | 144 | 2.53125 | 3 | [] | no_license | import spacy
import sys
nlp = spacy.load("en_core_web_sm")
doc = nlp("--text--")
for ent in doc.ents:
print(ent.label_ + "---" +ent.text) | true |
0e088c21a75ac73cd3b1d46b498f75fe2273a822 | Python | vijju3335/MovieTrailer | /fresh_tomatoes.py | UTF-8 | 3,676 | 2.734375 | 3 | [] | no_license | #!/usr/bin/env python
import webbrowser
import os
import re
# Styles and scripting for the start page
start_page_content = '''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- The above 3 meta tags *must* come first in the head;
any other head content must come *after* these tags -->
<title>Movie Trailer</title>
<!-- Bootstrap Core CSS -->
<link rel="stylesheet"
href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css">
<link rel="stylesheet" href="css/custom.css">
<link rel="stylesheet" href="css/modal.css">
<!-- Custom CSS: You can use this stylesheet to override
any Bootstrap styles and/or apply your own styles -->
</head>
<body style="padding-top:0px;">
<!-- Content -->
<div class="container">
<!-- Heading -->
<div class="row">
<div class="col-lg-10">
<h1 class="page-header">Movie Trailer</h1>
</div>
</div>
<div>
<!-- The Modal -->
<div id="myModal" class="modal">
<!-- Modal content -->
<div class="modal-content">
<span class="close"> ×</span>
<iframe width="500px" height="350px"
src="" frameborder="0"
allow="autoplay; encrypted-media" allowfullscreen></iframe>
</div>
</div>
</div>
<!-- Projects Row -->
<div class="row">
'''
''' The end page layout and title bar'''
end_page_content = '''
</div>
</div>
<!-- /.container -->
<footer>
<div class="copyright">
<div class="container">
<p style="background-color: skyblue;">
Copyright ©   vijju3335   #2018</p>
</div>
</div>
</footer>
</body>
<script src="js/main.js"></script>
</html>
'''
''' A single movie main entry html template middle page'''
movie_tile_content = '''
<div class="col-md-4 portfolio-item" onclick="onc('{trailer_youtube_url}')">
<img class="img-responsive" src="{poster_image_url}" alt="{movie_title}">
<h3 ><b>{movie_title}</b></h3>
</div>
'''
def create_movie_tiles_content(movies):
# The HTML content for this section of the page
content = ''
for movie in movies:
# Extract the youtube ID from the url
youtube_id_match = re.search(
r'(?<=v=)[^&#]+', movie.trailer_youtube_url)
youtube_id_match = youtube_id_match or re.search(
r'(?<=be/)[^&#]+', movie.trailer_youtube_url)
trailer_youtube_id = (youtube_id_match.group(0) if youtube_id_match
else None)
# Append the tile for the movie with its content filled in
content += movie_tile_content.format(
movie_title=movie.movie_title,
poster_image_url=movie.poster_image_url,
trailer_youtube_url=trailer_youtube_id
)
return content
def open_movies_page(movies):
# Create or overwrite the output file
output_file = open('fresh_tomatoes.html', 'w')
# Replace the movie tiles placeholder generated content
rendered_content = create_movie_tiles_content(movies)
# Output the file
output_file.write(start_page_content + rendered_content + end_page_content)
output_file.close()
# open the output file in the browser (in a new tab, if possible)
url = os.path.abspath(output_file.name)
webbrowser.open('file://' + url, new=2)
| true |
b42f792a14bba5dab11cdbebe0c0b559936ffef7 | Python | matanbroner/StocksPlatform | /data/nlp/retrieve_news.py | UTF-8 | 1,663 | 2.828125 | 3 | [] | no_license | import concurrent.futures
import pandas as pd
from nlp.news_sources import GeneralNewsData, RedditData
from multiprocessing import Lock
lock = Lock() # used in pipeline
from nlp.nlp_pipeline import to_pipeline
def retrieve_news_data(src):
"""
Called when subprocess is started.
Retrieves news data using the supplied source and sends data to NLP pipeline.
@param src: source object
@return: None
"""
response_df = None
attempts = 0
while attempts < 3:
try:
response_df = src.retrieve_data()
break
except RuntimeError:
attempts += 1
if attempts >= 3:
print("Failed to grab news data for %s." % (src.get_stock()))
return response_df if response_df is not None else None
def main(fmp_key, stock_list):
"""
Creates a source list and sets up subprocesses for retrieving news data.
"""
sources = []
for stock in stock_list:
sources.append(GeneralNewsData(fmp_key, stock))
with concurrent.futures.ProcessPoolExecutor() as executor:
future_to_stock = {executor.submit(retrieve_news_data, source):source.get_stock() for source in sources}
for future in concurrent.futures.as_completed(future_to_stock):
stock = future_to_stock[future]
try:
data = future.result()
except Exception as e:
print('%s news retrieval generated an exception: %s' % (stock, e))
else:
if data is not None:
#print("Sending", stock, "news data to pipeline...")
to_pipeline(data)
return 1
| true |
589fb730a229be2b098acef6b243b9e6cc53f02c | Python | williamneto/twitter-capture | /src/stream.py | UTF-8 | 2,592 | 2.578125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Stream tweets by keywords and send to API.
Requires API key/secret and token key/secret.
More information on query operators can be read at:
https://dev.twitter.com/rest/public/search
"""
from requests import post
from twython import TwythonStreamer
from config import APP_KEY, APP_SECRET
from config import OAUTH_TOKEN, OAUTH_TOKEN_SECRET
from .api import post_tweets
try: # import JSON lib
import json
except ImportError:
import simplejson as json
try: # capture @-messages
from config import STREAM_ATS
except: STREAM_ATS = True
try: # capture retweets
from config import STREAM_RTS
except: STREAM_RTS = True
class Stream(TwythonStreamer):
'''
Execute action on every streamed tweet.
'''
def on_success(self, data):
if 'text' in data:
load_tweet(data)
def on_error(self, status_code, data):
print(status_code, data)
return True # don't quit streaming
# self.disconnect() # quit streaming
def on_timeout(self):
print >> sys.stderr, 'Timeout...'
return True # don't quit streaming
def stream(query, post_url):
'''
Start streaming tweets.
'''
global API_URL, TWEETS
API_URL = post_url # URL to send tweets
TWEETS = [] # array for sending tweets
print('Authenticating...')
# requires authentication as of Twitter API v1.1
stream = Stream(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
print('Streaming...\n\nTweets: True\nRetweets: '+str(STREAM_RTS)+'\n@-messages: '+str(STREAM_ATS)+'\n')
stream.statuses.filter(track=query)
# stream.site(follow='twitter')
# stream.user()
def print_tweet(data):
'''
Print captured tweet on terminal screen.
'''
tweet_text = data['text'].encode('utf8', 'ignore').decode('ascii', 'ignore').replace("\n", "")
tweet_username = '@' + data['user']['screen_name']
print(tweet_username, str(' ')*int(20-len(tweet_username)), tweet_text, '(' + data['id_str'] + ')')
def load_tweet(data):
'''
Store tweet to array in JSON format.
'''
is_at = True if data['in_reply_to_status_id'] else False # tweet_text.startswith('@')
is_rt = True if 'retweeted_status' in data else False # tweet_text.startswith('RT @')
is_tweet = all(not i for i in [is_at, is_rt])
if is_tweet or (is_at and STREAM_ATS) or (is_rt and STREAM_RTS):
print_tweet(data)
tweet = json.dumps(data)
TWEETS.append(tweet)
if len(TWEETS) == 10:
send_tweets()
reset_tweets()
def send_tweets():
'''
Send tweets array to API endpoint.
'''
post_tweets(TWEETS, API_URL)
def reset_tweets():
'''
Reset tweets array after successful post.
'''
global TWEETS
TWEETS = [] | true |
f7b6acbb6c09bc56aceb800d57a1864c24d171a5 | Python | christophmeise/OOP | /2_übung/u2.py | UTF-8 | 3,921 | 3.703125 | 4 | [] | no_license | import math
import time
import random
# 1. Aufgabe
def apply_if(f, p, xs):
# assumes f, p is a function and xs is a list
res = []
for x in xs:
if p(x) == True:
res.append(f(x))
else:
res.append(x)
return res
# Hilfsfunktion für 1. Aufgabe
def odd(x):
if x % 2 == 1:
return True
else:
return False
# Testmethode der 1. Aufgabe
def test_apply_if():
if apply_if(math.factorial, odd, [2,5,7,4,9,6]) == [2, 120, 5040, 4, 362880, 6]:
print("Test bestanden")
else:
print("Test nicht bestanden")
# 2. Aufgabe Teil a
def zipWith(f, xs, ys):
# assumes xs, ys are lists
if len(xs) == 1 or len(ys) == 1:
return [f(xs[0], ys[0])]
else:
return [f(xs[0], ys[0])] + zipWith(f, xs[1:], ys[1:])
# 2. Aufgabe Teil b
def zipWith2(f, xs, ys):
# assumes xs, ys are lists
res = [f(a,b) for (a,b) in zip(xs, ys)]
return res
# Testmethode der 2. Aufgabe inkl. Teil c
def test_zipWith():
start = time.time()
test1 = zipWith(divmod, [1,2,3,15], [1,2,3,4])
end = time.time()
if test1 == [(1, 0), (1, 0), (1, 0), (3, 3)]:
print("Test zipWith bestanden, die Ausführungszeit betrug:", (end-start))
else:
print("Test zipWith nicht bestanden, die Ausführungszeit betrug:", (end-start))
start = time.time()
test2 = zipWith2(divmod, [1,2,3,15], [1,2,3,4])
end = time.time()
if test2 == [(1, 0), (1, 0), (1, 0), (3, 3)]:
print("Test zipWith2 bestanden, die Ausführungszeit betrug:", (end-start))
else:
print("Test zipWith2 nicht bestanden, die Ausführungszeit betrug:", (end-start))
# 3. Aufgabe
def my_random(lower, upper):
i = 0
dictionary = {}
x = random.randint(lower, upper)
while str(x) not in dictionary:
dictionary[str(x)] = 1
x = random.randint(lower, upper)
i += 1
return i
# Testmethode der 3. Aufgabe
def test_my_random():
lower = -100
upper = 100
if my_random(lower, upper) > 0:
print("Test my_random bestanden")
else:
print("Test my_random nicht bestanden")
# 4. Aufgabe Teil a
def double_birthday():
i = 1
birthdays = {}
birthday = createBD
while birthday not in birthdays:
birthdays[birthday] = 1
i +=1
birthday = createBD()
return i
# Hilfsfunktion zum generieren von Geburtstagen für Teil a
def createBD():
year = random.randint(1940, 2017)
month = random.randint(1, 12)
day = random.randint(1, 28)
return str(day).zfill(2) + str(month).zfill(2) + str(year)
# Testmethode der 4. Aufgabe Teil a
def test_double_birthday():
if double_birthday() > 0:
print("Test double_birthday bestanden")
else:
print("Test double_birthday nicht bestanden")
# Aufgabe 4 Teil b
def repeat_double_birthday():
duplicates = []
i = 0
while i < 1000:
bd = double_birthday()
if bd > 365:
pass
else:
duplicates.append(bd)
i +=1
return duplicates
# Testmethode der 4. Aufgabe Teil b
def test_repeat_double_birthday():
result = repeat_double_birthday()
if len(result) > 0:
print("Test repeat_double_birthday bestanden")
else:
print("Test repeat_double_birthday nicht bestanden")
# Aufgabe 4 Teil c
#def birthday_paradox(n):
# listOfDuplicates = []
# for x in range(0, n):
# duplicates = repeat_double_birthday()
# listOfDuplicates.append(duplicates)
# numberOfDuplicates = 0
# for x in range(0, listOfDuplicates.count()):
# for y in range(0, listOfDuplicates[x].count())
# listOfDuplicates[x][]
# duplicates = repeat_double_birthday()
# if str(n) in duplicates:
# res = duplicates
# return "Die Wahrscheinlichkeit liegt bei " + duplicates[str(n)] + "%."
# else:
# return "Kein Datensatz vorhanden!"
| true |
c7a345cf637c8f7d05aae9a5cdac6d633a5c5add | Python | daniel-reich/ubiquitous-fiesta | /uKPc5faEzQkMwLYPP_14.py | UTF-8 | 154 | 2.765625 | 3 | [] | no_license |
def end_corona(recovers, new_cases, active_cases):
num1 = active_cases / (recovers - new_cases)
return num1 if num1 % 1 == 0 else int(num1) + 1
| true |
cef8d2bae281ee25d29086ddf9b99e07d2a040bd | Python | cristinarivera/python | /untitled-33.py | UTF-8 | 171 | 2.984375 | 3 | [] | no_license | def proc3(n):
if n <=3:
return 1
return proc3(n-1) + proc3(n-2) + proc3(n-3)
print proc3(1)
print proc3(0)
print proc3(-1)
print proc3(4)
print proc3(3) | true |
b5332ffb9de4324983670a8de4e67ef7ea7b3c37 | Python | Aasthaengg/IBMdataset | /Python_codes/p03544/s646363318.py | UTF-8 | 138 | 3.109375 | 3 | [] | no_license | N = int(input())
lucas = (N+2)*[0]
lucas[0] = 2
lucas[1] = 1
for i in range(2,N+2):
lucas[i] = lucas[i-1]+lucas[i-2]
print(lucas[N]) | true |
c921a4781950dc97810d54789ecde22c44a1180c | Python | imNKnavin/google-foobar | /solutions/bomb_baby/test.py | UTF-8 | 805 | 2.75 | 3 | [] | no_license | import unittest
from . import solution
class TestCase(unittest.TestCase):
def test_case_1(self):
self.assertEqual(
solution.answer('2', '1'),
'1'
)
def test_case_2(self):
self.assertEqual(
solution.answer('4', '7'),
'4'
)
def test_case_3(self):
self.assertEqual(
solution.answer('2', '4'),
'impossible'
)
def test_case_4(self):
self.assertEqual(
solution.answer('4', '31'),
'10'
)
def test_case_5(self):
self.assertEqual(
solution.answer('9', '68'),
'12'
)
def test_case_6(self):
self.assertEqual(
solution.answer('95', '302'),
'14'
)
| true |
98f009378cc2930a1bce0c3b91c5ebfa69d0fb72 | Python | scrapehero/selectorlib-scrapy-example | /scrapeme_shop/spiders/scrapeme_with_formatter.py | UTF-8 | 1,351 | 2.765625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import scrapy
import os
import selectorlib
from selectorlib.formatter import Formatter
class Price(Formatter):
def format(self, text):
price = text.replace('£','').strip()
return float(price)
class ScrapemeSpider(scrapy.Spider):
name = 'scrapeme_with_formatter'
allowed_domains = ['scrapeme.live']
start_urls = ['http://scrapeme.live/shop/']
# Create Extractor for listing page
listing_page_extractor = selectorlib.Extractor.from_yaml_file(os.path.join(os.path.dirname(__file__),'../selectorlib_yaml/ListingPage.yml'))
# Create Extractor for product page
product_page_extractor = selectorlib.Extractor.from_yaml_file(os.path.join(os.path.dirname(__file__),'../selectorlib_yaml/ProductPage_with_Formatter.yml'),formatters = [Price])
def parse(self, response):
# Extract data using Extractor
data = self.listing_page_extractor.extract(response.text)
if 'next_page' in data:
yield scrapy.Request(data['next_page'],callback=self.parse)
for p in data['product_page']:
yield scrapy.Request(p,callback=self.parse_product)
def parse_product(self, response):
# Extract data using Extractor
product = self.product_page_extractor.extract(response.text)
if product:
yield product | true |
a0f63d03956e1f91394cf847db16650ad1a0c5fb | Python | pombreda/comp304 | /Assignment4/atom3/Kernel/ATOM3Types/ATOM3Enum.py | UTF-8 | 11,802 | 3.09375 | 3 | [] | no_license | # __ File: ATOM3Enum.py __________________________________________________________________________________________________
# Implements : class ATOM3Enum
# Author : Juan de Lara
# Description : A class for the ATOM3 Enum type.
# Modified : 23 Oct 2001
# Changes :
# - 19 DEc 2001 : Modified the setValue(). If the second part of the tuple is < 0, then it is interpreted as setNone().
# ________________________________________________________________________________________________________________________
from Tkinter import *
from ATOM3Type import *
from ATOM3List import *
from ATOM3Exceptions import *
from types import *
import copy
class ATOM3Enum (ATOM3Type):
def __init__(self, values = None, sel = None, config = 0 ):
""" - values: is a tuple of strings
- sel : Initially selected item
- config: 1 = if we will configure the item, 0 = if we will use the item """
if values: # if a list of values is given...
self.enumValues = values # store enumerate values and selected value
self.selected = IntVar() # create an IntVar with the selected value
if sel: # is a selected element is given...
if (sel < 0) or (sel > len(values)-1):
raise ATOM3BadAssignmentValue, "ATOM3Enum: selection out of range"
self.selected.set(sel+1) # do select it
else:
self.selected.set(1) # else select the first
else: # No enumerated values yet...
self.enumValues = [] # attribute to store the possible values
self.selected = None # selected item
self.config = config # Store the flag that indicates if we are configuring...
if self.config: # If we are configuring...
self.configItems = ATOM3List([1,1,1,0], ATOM3String, None ) # create the list to configure the items
# add each element to the list...
if values: # If we have some values yet...
vl = [] # create an empty, auxiliary list
for item in values: vl.append(ATOM3String( item )) # populate the list with the items (wrapped in an ATOM3String object)
self.configItems.setValue( vl ) # set the widget with the elements
self.enumValues = [] # set enumValues to void, we won't use it when configuring the widget
else:
self.configItems = None # set config flag properly
self.enumValuesWidget = [] # list of radiobuttons to select one
self.containerFrame = None # frame with all the widgets
self.enumFrame = None
ATOM3Type.__init__(self)
def clone(self):
"makes an exact copy of the self object"
cloneObject = ATOM3Enum(self.enumValues )
cloneObject.parent = self.parent
cloneObject.config = self.config
cloneObject.mode = self.mode
if self.selected:
cloneObject.selected = IntVar()
cloneObject.selected.set(self.selected.get())
else: cloneObject.selected = None
if self.enumValuesWidget:
cloneObject.enumValuesWidget = copy.copy(self.enumValuesWidget)
else: cloneObject.enumValuesWidget = []
cloneObject.containerFrame = self.containerFrame
return cloneObject
def copy(self, other):
"copies each field of the other object into its own state"
ATOM3Type.copy(self, other) # call the ancestor (copies the parent field)
self.enumValues = other.enumValues
self.config = other.config
if other.selected:
self.selected = IntVar()
self.selected.set(other.selected.get())
else: self.selected = None
if other.enumValuesWidget:
self.enumValuesWidget = copy.copy(other.enumValuesWidget)
else: self.enumValuesWidget = []
self.containerFrame = other.containerFrame
def isNone(self):
"checks if the value is none"
if not self.selected or self.selected.get() < 0: return 1
return 0
def setNone(self):
"sets the value to None"
if self.selected: self.selected.set(-1)
def unSetNone (self):
"sets to selected attribute value to 0"
if self.selected and self.selected.get() == -1: self.selected.set(0)
def setValue(self, value):
"value is a tuple ([values...], selected). [values...] can be none, and the only the selection is changed."
if value and type(value) == TupleType: # if we have a tuple as argument...
if value[0]:
if type(value[0]) != ListType and type(value[0]) != TupleType: # we expect a list or tuple of elements
raise ATOM3BadAssignmentValue, "ATOM3Constraint: Bad type in setValue(), "+str(value)
self.enumValues = value[0] # store enumerate values and selected value, if present
if value[1] != None: # if we have a second value...
if type(value[1]) != IntType: # we expect the index of the selected element
raise ATOM3BadAssignmentValue, "ATOM3Constraint: Bad type in setValue(), "+str(value)
# check that values are inside the limits...
if (value[1] > len(self.enumValues)-1): # outside the limits! : raise exception
raise ATOM3BadAssignmentValue, "ATOM3Constraint: Bad type in setValue(), "+str(value)
elif (value[1] < 0): # if the value is negative then set it to None
self.setNone()
return
selected = value[1] # obtain index of selected element
self.selected = IntVar() # create an IntVar with the selected value
self.selected.set(selected+1)
if self.enumValuesWidget: # if we are visible...
for rb in self.enumValuesWidget: # delete each radiobutton
rb.grid_forget()
self.createRadioButtons(self.enumFrame) # create buttons with the new values
elif type(value) == NoneType: # call setNone
self.setNone()
else:
raise ATOM3BadAssignmentValue, "ATOM3Constraint: Bad type in setValue(), "+str(value)
def getValue(self):
"returns a tuple ([values...], selected->integer)"
if self.config:
self.enumValues = [] # update the value of the enumerate items
for item in self.configItems.getValue(): self.enumValues.append(item.toString())
if self.selected:
return (self.enumValues, self.selected.get()-1 )
else:
return (self.enumValues, None)
def createRadioButtons(self, frame):
"creates a radiobutton with each value"
if self.containerFrame and self.enumValues and self.selected:
counter = 1
for item in self.enumValues: # create a radioButton with each value
rb = Radiobutton ( frame, text = item, variable = self.selected, value = counter)
rb.grid(row = counter-1, sticky = W)
self.enumValuesWidget.append(rb) # add widget to list
counter = counter + 1
def show(self, parent, topWindowParent = None ):
"Method that presents a widget to edit the selected value"
ATOM3Type.show(self, parent)
self.containerFrame = Frame(parent) # create the container frame
self.enumFrame = Frame(self.containerFrame) # A frame to put the enumerate widget
if self.config: # if we are configuring the object
self.configFrame = Frame(self.containerFrame) # A frame to put the configuration widget (if it is the case)
widget = self.configItems.show(self.configFrame) # obtain the widget
widget.pack()
self.configFrame.pack(side=TOP)
self.createRadioButtons(self.enumFrame)
self.enumFrame.pack(side=TOP)
return self.containerFrame
def invalid(self):
"decides if we have a valid enumerate type"
if not self.selected and not self.config:
return "A value must be selected"
return None
def destroy(self):
"updates attributes and destroys graphical widgets"
if self.containerFrame:
self.enumValuesWidget = []
self.containerFrame = None
if self.config: # if it is a configurable enumerate list...
self.enumValues = [] # update the value of the enumerate items
for item in self.configItems.getValue(): self.enumValues.append(item.toString())
def toString(self, maxWide = None, maxLines = None ):
"Shows the widget as a string"
if self.config: retValue = str(self.enumValues) # if it is configurable, show the item values
elif self.selected: retValue = str(self.enumValues[self.selected.get()-1]) # if not and there is a selected item, return its value
else: retValue = str(self.enumValues) # else return the items
if maxWide: return retValue[0:maxWide]
else: return retValue
def writeConstructor2File(self, file, indent, objName='at', depth = 0, generatingCode = 0):
"""Method that writes into a file the constructor and the value of the object. Must be overriden in children
if generatingCode == 1, that means that we are generating code, otherwise, it means that we are saving"""
if self.selected: selec = str(self.selected.get()-1)
else: selec = 'None'
if generatingCode:
if self.configItems:
self.enumValues = [] # update the value of the enumerate items
for item in self.configItems.getValue(): self.enumValues.append(item.toString())
# before writing, check if we have a None value!
if not self.selected or self.selected.get() < 0: # None value!
file.write(indent+objName+"=ATOM3Enum("+str(self.enumValues)+", None, 0)\n")
file.write(indent+objName+".setNone()\n")
else: # Value is not None
file.write(indent+objName+"=ATOM3Enum("+str(self.enumValues)+", "+selec+", 0)\n")
else:
file.write(indent+objName+"=ATOM3Enum("+str(self.enumValues)+","+selec+","+str(self.config)+")\n")
if self.configItems:
self.configItems.writeValue2File(file, indent, objName+".configItems", depth, generatingCode )
def writeValue2File(self, file, indent, objName='at', depth = 0, generatingCode = 0):
"Method that writes into a file the constructor and the value of the object. Must be overriden in children"
if not self.selected or self.selected.get() < 0: # We have a None value!
file.write(indent+objName+".setNone()\n")
else: # Value is NOT None
if self.selected: selec = str(self.selected.get()-1)
else: selec = 'None'
file.write(indent+objName+".setValue( "+str(self.getValue())+" )\n")
if generatingCode:
file.write(indent+objName+".config = 0\n")
else:
file.write(indent+objName+".config = "+str(self.config)+"\n")
if self.isNone():
file.write(indent+objName+".setNone()\n")
| true |
68f14ee63e4336a811876927d075ef0307d053d4 | Python | DyassKhalid007/MIT-6.001-Codes | /Week5Part1/Why_OPP.py | UTF-8 | 1,537 | 4.28125 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 24 11:18:22 2018
@author: Dyass
"""
"""
Topics:
Why OOP
"""
"""
The power of OOP:
Bundle together objects that share:
common attributes and
procedures that operate on those attributes
Use abstraction to make a distinction between how to
implement an object vs how to use the object
Build layers of object abstraction that inherit behaviors
from other classes of objects
Create our own classes of objects on top of python's
basic classes
"""
"""
Implementing the class versus Using the class:
Write code from two different perspectives
All class examples we have seen so far were numerical
Implementing a new object type with a class:
define the class
define data attributes
define methods
Using the new object type in code:
create instances of the object type
do operations with them
"""
class Animal(object):
def __init__(self,age):
self.age=age
self.name=None
def get_age(self):
return self.age
def get_name(self):
return self.name
def set_age(self,age):
self.age=age
def set_name(self,name=""):
self.name=name
def __str__(self):
return "animal:"+str(self.name)+":"+str(self.age)
myanimal=Animal(3)
print(myanimal)
myanimal.set_name("foobar")
print(myanimal)
print(myanimal.get_age())
| true |
c2ab05f19ded99cf722f628aaf03f427c4f75508 | Python | SuperGuy10/LeetCode_Practice | /Python/443. String Compression.py | UTF-8 | 1,766 | 4.03125 | 4 | [] | no_license | '''
Given an array of characters, compress it in-place.
The length after compression must always be smaller than or equal to the original array.
Every element of the array should be a character (not int) of length 1.
After you are done modifying the input array in-place, return the new length of the array.
Follow up:
Could you solve it using only O(1) extra space?
Example 1:
Input:
["a","a","b","b","c","c","c"]
Output:
Return 6, and the first 6 characters of the input array should be: ["a","2","b","2","c","3"]
Explanation:
"aa" is replaced by "a2". "bb" is replaced by "b2". "ccc" is replaced by "c3".
Example 2:
Input:
["a"]
Output:
Return 1, and the first 1 characters of the input array should be: ["a"]
Explanation:
Nothing is replaced.
Example 3:
Input:
["a","b","b","b","b","b","b","b","b","b","b","b","b"]
Output:
Return 4, and the first 4 characters of the input array should be: ["a","b","1","2"].
Explanation:
Since the character "a" does not repeat, it is not compressed. "bbbbbbbbbbbb" is replaced by "b12".
Notice each digit has it's own entry in the array.
Note:
All characters have an ASCII value in [35, 126].
1 <= len(chars) <= 1000.
'''
class Solution(object):
def compress(self, chars):
"""
:type chars: List[str]
:rtype: int
"""
left = i = 0
while i < len(chars):
tmp = chars[i]
count = 1
while (i + 1) < len(chars) and tmp == chars[i + 1]:
count+=1
i+=1
chars[left] = tmp
if count > 1:
len_str = str(count)
chars[left + 1:left + 1 + len(len_str)] = len_str
left += len(len_str)
left, i = left + 1, i + 1
return left
| true |
0122646c11b2363409fa30ef52974b436231396a | Python | huanghyw/akshare | /akshare/futures_derivative/nh_index_volatility.py | UTF-8 | 8,194 | 2.75 | 3 | [
"MIT"
] | permissive | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/10/14 16:52
Desc: 南华期货-商品指数历史走势-收益率指数-波动率
http://www.nanhua.net/nhzc/varietytrend.html
1000 点开始, 用收益率累计
目标地址: http://www.nanhua.net/ianalysis/volatility/20/NHCI.json?t=1574932291399
"""
import time
import requests
import pandas as pd
def num_to_str_data(str_date: int) -> str:
str_date = str_date / 1000
str_date = time.localtime(str_date) # 生成一个元组的时间
strp_time = time.strftime("%Y-%m-%d %H:%M:%S", str_date) # 格式化元组
return strp_time
def get_nh_list_table() -> pd.DataFrame:
"""
获取南华期货-南华指数所有品种一览表
:return: pandas.DataFrame
| id | code | exchange | firstday | category | name |
|----|-------|----------------------|------------|----------|----------------|
| 0 | NHECI | 南华期货 | 2004/6/1 | 板块 | 南华能化指数 |
| 1 | NHCI | 南华期货 | 2004/6/1 | 板块 | 南华商品指数 |
| 2 | NHAI | 南华期货 | 2004/6/1 | 板块 | 南华农产品指数 |
| 3 | NHII | 南华期货 | 2004/6/1 | 板块 | 南华工业品指数 |
| 4 | NHMI | 南华期货 | 2004/6/1 | 板块 | 南华金属指数 |
| 5 | IF | 南华期货 | 2010/4/16 | 板块 | 南华股指指数 |
| 6 | NHPMI | 南华期货 | 2012/9/6 | 板块 | 南华贵金属指数 |
| 7 | A | 大连商品交易所 | 1994/9/19 | 品种 | 大豆 |
| 8 | AL | 上海期货交易所 | 1994/10/12 | 品种 | 铝 |
| 9 | CU | 上海期货交易所 | 1996/4/5 | 品种 | 铜 |
| 10 | RU | 上海期货交易所 | 1997/4/17 | 品种 | 橡胶 |
| 11 | M | 大连商品交易所 | 2000/7/19 | 品种 | 豆粕 |
| 12 | CF | 郑州商品交易所 | 2004/6/2 | 品种 | 棉花 |
| 13 | FU | 上海期货交易所 | 2004/8/26 | 品种 | 燃油 |
| 14 | C | 大连商品交易所 | 2004/9/23 | 品种 | 玉米 |
| 15 | SR | 郑州商品交易所 | 2006/1/9 | 品种 | 白糖 |
| 16 | Y | 大连商品交易所 | 2006/1/10 | 品种 | 豆油 |
| 17 | TA | 郑州商品交易所 | 2006/12/19 | 品种 | PTA |
| 18 | ZN | 上海期货交易所 | 2007/3/26 | 品种 | 锌 |
| 19 | L | 大连商品交易所 | 2007/7/31 | 品种 | 塑料 |
| 20 | P | 大连商品交易所 | 2007/10/29 | 品种 | 棕榈油 |
| 21 | AU | 上海期货交易所 | 2008/1/9 | 品种 | 黄金 |
| 22 | RB | 上海期货交易所 | 2009/3/27 | 品种 | 螺纹钢 |
| 23 | WR | 上海期货交易所 | 2009/3/27 | 品种 | 线材 |
| 24 | V | 大连商品交易所 | 2009/5/25 | 品种 | PVC |
| 25 | IF | 中国金融期货交易所 | 2010/4/16 | 品种 | 股指 |
| 26 | PB | 上海期货交易所 | 2011/3/24 | 品种 | 铅 |
| 27 | J | 大连商品交易所 | 2011/4/15 | 品种 | 焦炭 |
| 28 | PM | 郑州商品交易所 | 2012/1/17 | 品种 | 普麦 |
| 29 | AG | 上海期货交易所 | 2012/5/10 | 品种 | 白银 |
| 30 | OI | 郑州商品交易所 | 2012/7/16 | 品种 | 菜籽油 |
| 31 | RI | 郑州商品交易所 | 2012/7/24 | 品种 | 早籼稻 |
| 32 | WH | 郑州商品交易所 | 2012/7/24 | 品种 | 强麦 |
| 33 | FG | 郑州商品交易所 | 2012/12/3 | 品种 | 玻璃 |
| 34 | RS | 郑州商品交易所 | 2012/12/28 | 品种 | 油菜籽 |
| 35 | RM | 郑州商品交易所 | 2012/12/28 | 品种 | 菜籽粕 |
| 36 | JM | 大连商品交易所 | 2013/3/22 | 品种 | 焦煤 |
| 37 | TF | 中国金融期货交易所 | 2013/9/6 | 品种 | 五年国债 |
| 38 | BU | 上海期货交易所 | 2013/10/9 | 品种 | 沥青 |
| 39 | I | 大连商品交易所 | 2013/10/18 | 品种 | 铁矿石 |
| 40 | JD | 大连商品交易所 | 2013/11/8 | 品种 | 鸡蛋 |
| 41 | JR | 郑州商品交易所 | 2013/11/18 | 品种 | 粳稻 |
| 42 | BB | 大连商品交易所 | 2013/12/6 | 品种 | 胶合板 |
| 43 | FB | 大连商品交易所 | 2013/12/6 | 品种 | 纤维板 |
| 44 | PP | 大连商品交易所 | 2014/2/28 | 品种 | 聚丙烯 |
| 45 | HC | 上海期货交易所 | 2014/3/21 | 品种 | 热轧卷板 |
| 46 | LR | 郑州商品交易所 | 2014/7/8 | 品种 | 晚籼稻 |
| 47 | SF | 郑州商品交易所 | 2014/8/8 | 品种 | 硅铁 |
| 48 | SM | 郑州商品交易所 | 2014/8/8 | 品种 | 锰硅 |
| 49 | CS | 大连商品交易所 | 2014/12/19 | 品种 | 玉米淀粉 |
| 50 | T | 中国金融期货交易所 | 2015/3/20 | 品种 | 十年国债 |
| 51 | NI | 上海期货交易所 | 2015/3/27 | 品种 | 沪镍 |
| 52 | SN | 上海期货交易所 | 2015/3/27 | 品种 | 沪锡 |
| 53 | MA | 郑州商品交易所 | 2015/4/10 | 品种 | 甲醇 |
| 54 | IH | 中国金融期货交易所 | 2015/4/16 | 品种 | 上证50 |
| 55 | IC | 中国金融期货交易所 | 2015/4/16 | 品种 | 中证500 |
| 56 | ZC | 郑州商品交易所 | 2015/5/18 | 品种 | 动力煤 |
| 57 | SC | 上海国际能源交易中心 | 2017/3/23 | 品种 | 原油 |
| 58 | CY | 郑州商品交易所 | 2017/8/18 | 品种 | 棉纱 |
"""
url_name = "http://www.nanhua.net/ianalysis/plate-variety.json"
res = requests.get(url_name)
futures_name = [item["name"] for item in res.json()]
futures_code = [item["code"] for item in res.json()]
futures_exchange = [item["exchange"] for item in res.json()]
futures_first_day = [item["firstday"] for item in res.json()]
futures_index_cat = [item["indexcategory"] for item in res.json()]
futures_df = pd.DataFrame(
[
futures_code,
futures_exchange,
futures_first_day,
futures_index_cat,
futures_name,
]
).T
futures_df.columns = ["code", "exchange", "start_date", "category", "name"]
return futures_df
def nh_volatility_index(code: str = "NHCI", day_count: int = 20) -> pd.DataFrame:
"""
南华期货-南华指数单品种所有历史数据
:param code: str 通过 get_nh_list 提供
:param day_count: int [5, 20, 60, 120] 任意一个
:return: pandas.Series
"""
if code in get_nh_list_table()["code"].tolist():
t = time.time()
base_url = f"http://www.nanhua.net/ianalysis/volatility/{day_count}/{code}.json?t={int(round(t * 1000))}"
res = requests.get(base_url)
date = [num_to_str_data(item[0]).split(" ")[0] for item in res.json()]
data = [item[1] for item in res.json()]
df_all = pd.DataFrame([date, data]).T
df_all.columns = ["date", "value"]
df_all.index = pd.to_datetime(df_all["date"])
del df_all["date"]
return df_all
if __name__ == "__main__":
nh_volatility_index_df = nh_volatility_index(code="IC", day_count=5)
print(nh_volatility_index_df)
| true |
9f48263193b6395b34827e10ff548f7a267a012c | Python | anthony2v/InternetRelayChat | /tests/test_server.py | UTF-8 | 3,280 | 2.515625 | 3 | [] | no_license | import asyncio
import socket
from irc_server.server import Server
import pytest
from unittest import mock
def test_server_send_sends_message_to_all_connections_when_no_exclude():
server = Server()
server._connections = [
mock.MagicMock() for _ in range(5)
]
server.send('PING')
for conn in server._connections:
conn.send_message.asset_called_with(b'::6667 PING')
def test_server_send_sends_message_to_all_connections_except_the_one_specified_by_exclude():
server = Server()
exclude = mock.MagicMock()
server._connections = [
mock.MagicMock() for _ in range(5)
] + [exclude]
server.send('PING', exclude=exclude)
for conn in server._connections:
if conn != exclude:
conn.send_message.assert_called_with(b'::6667 PING')
else:
conn.send_message.assert_not_called()
@pytest.mark.asyncio
async def test_remove_connection():
server = Server()
connection = mock.MagicMock()
server._connections = [connection]
await server.remove_connection(connection)
assert server._connections == []
connection.shutdown.assert_called()
@pytest.mark.asyncio
async def test_server_accepts_connections():
with Server('0.0.0.0', port=6667) as server:
server_task = asyncio.create_task(server.start())
s = socket.create_connection(('0.0.0.0', 6667))
# Sleep to allow time for connection to be accepted
await asyncio.sleep(0.1)
# Cancel server task
server_task.cancel()
try:
await server_task
except:
pass
# Shut down test client
s.shutdown(socket.SHUT_RDWR)
s.close()
assert len(server._connections) == 1
@pytest.mark.asyncio
async def test_server_processes_messages():
with Server('0.0.0.0', port=6667) as server:
server.handle_message = mock.AsyncMock()
server_task = asyncio.create_task(server.start())
s = socket.create_connection(('0.0.0.0', 6667))
s.sendall(b'NICK\r\n')
# Sleep to allow time for connection to be accepted
await asyncio.sleep(0.1)
# Cancel server task
server_task.cancel()
try:
await server_task
except:
pass
# Shut down test client
s.shutdown(socket.SHUT_RDWR)
s.close()
server.handle_message.assert_called_with(server._connections[0], b'NICK')
@pytest.mark.asyncio
async def test_server_writes_back_messages():
with Server('0.0.0.0', port=6667) as server:
server.handle_message = mock.AsyncMock()
server_task = asyncio.create_task(server.start())
s = socket.create_connection(('0.0.0.0', 6667))
s.settimeout(1.0)
s.sendall(b'PING\r\n')
# Sleep to allow time for connection to be accepted
await asyncio.sleep(0.1)
server._connections[0].send_message(b'PONG')
await asyncio.sleep(0.1)
assert s.recv(512) == b'PONG\r\n'
# Shut down test client
s.shutdown(socket.SHUT_RDWR)
s.close()
# Cancel server task
server_task.cancel()
try:
await server_task
except:
pass
| true |
142a6019222b2ae247918305ed9fb41f44e693d6 | Python | Capocaccia/amazon-giveaway-bot | /amazoncontest.py | UTF-8 | 19,238 | 2.640625 | 3 | [] | no_license | from myimports import os
from myimports import sys
from myimports import time
from myimports import datetime
from myimports import random
from myimports import webdriver
from myimports import Keys
from myimports import Select
from myimports import Options
from myimports import get
from myimports import put
from myimports import post
from myimports import BeautifulSoup
from myimports import sqlite3
from myimports import getpass
import captchachecker
import localhandler
#Script the opens amazon, enters user information, and enters in every contest
def amazon_bot(email, password, name, want_follow, firefox_profile_path, amazon_pass):
print ("Loading prizes")
try:
#Go to website with all items in one table
response = get("https://www.giveawaylisting.com/index2.html")
amazon_soup = BeautifulSoup(response.text, 'lxml')
type(amazon_soup)
#Find table, and then all rows
all_giveaways_table = amazon_soup.find('table', id='giveaways')
all_giveaways_row = all_giveaways_table.findChildren('tr')
except:
print("Could not load items")
print ("")
time.sleep(5)
amazon_bot(email, password, name, want_follow, firefox_profile_path, amazon_pass)
#Pages to index to retrieve items, add giveaways urls list
item_urls_list = {}
item_count = 1
total_count = len(all_giveaways_row)
#Loop through each row and add item URL to dictionary
for row in all_giveaways_row:
try:
row_sections = row.findAll('td') #All columns of that row
price = row_sections[4].text[1:] #Price of item excluding the dollar sign
link = row.find('a')['href'] #Link data
item_urls_list[link] = float(price) #Adding to dictionary
except:
pass
loading_percentage(item_count, total_count)
item_count += 1
print ("Removing prizes that you have already entered into")
#Load enteredurls database and grab all the previously entered urls, delete old ones, and load into list
local_database = sqlite3.connect('localdatabase.db', detect_types=sqlite3.PARSE_DECLTYPES)
cursor = local_database.cursor()
entered_urls_database = cursor.execute("SELECT * FROM enteredurls") #Find all rows in enteredurls table
entered_urls_database_loop = cursor.fetchall()
entered_urls = []
for row in entered_urls_database_loop:
time_since = datetime.date.today() - row[2] #Compare date of url
if time_since.days >= 10: #If url is older than a week delete it
cursor.execute("DELETE FROM enteredurls WHERE url=?", (row[1],))
else:
entered_urls.append(row[1])
#Save changes and close database connection
local_database.commit()
local_database.close()
#Used for loading percentage when removing old giveaways
item_count = 1
total_count = len(entered_urls)
#Remove urls that are in entered_urls from item_urls_list
for url in entered_urls:
if url in item_urls_list:
del item_urls_list[url]
#Show loading percentage
loading_percentage(item_count, total_count)
item_count += 1
#If no prizes left wait 6 hours and check again
if len(item_urls_list) < 100:
time_count = 0
time_wait = 21600
while time_count < time_wait:
time_message = time_wait - time_count
time_count += 1
print ("Entered into all the giveaways, will check again in "+str(time_message), end="\r")
time.sleep(1)
print ("Restarting...")
print ("")
#Restart the program
amazon_bot(email, password, name, want_follow, firefox_profile_path, amazon_pass)
else:
print ("Entering in "+str(len(item_urls_list))+" new giveaways!")
print ("")
#Sort items from highest price down
item_urls_list = sorted(item_urls_list, key=item_urls_list.get, reverse=True) #reverse=True makes it start from highest to lowest
#Item number
item_number = 1
#Runs through each giveaway item in item_urls_list
for link in item_urls_list:
#Open Firefox with the current url for the item
try:
options = Options()
options.headless = True #Currently on, turn off if you notice multiple prizes that are unreadable in a row, CAPTCHA could be enabled
profile = webdriver.FirefoxProfile(firefox_profile_path) #Add your own path, google create firefox profile
profile.set_preference("media.volume_scale", "0.0") #Mutes sound coming videos
browser = webdriver.Firefox(firefox_profile=profile, executable_path=os.path.join(os.path.dirname(sys.argv[0]), 'geckodriver.exe'), options=options)
browser.get((link))
item_page_loaded = True
except:
item_page_loaded = False
#Run through the prize cycle if browser loads
if item_page_loaded is True:
#Variable for sponsor follow giveaway and user does not want to enter
is_follow_no_want = False
#Find Email and password boxes and log in to account and clicks the Sign in button
try:
browser.find_element_by_id('ap_email').send_keys(email)
browser.find_element_by_id('ap_password').send_keys(amazon_pass)
time.sleep(random.randint(2,3))
login_button = browser.find_element_by_id('signInSubmit').click()
print ("Logged in")
except:
already_logged = True
#Run captcha test, check for captcha and solve it
captchachecker.check_for_captcha(browser)
#Print the item number
print ("Item #"+str(item_number))
#Find item name and price
try:
giveaway_item_name = browser.find_element_by_id("prize-name").text
giveaway_item_price = browser.find_element_by_class_name("qa-prize-cost-value").text
print (giveaway_item_name+"-" +giveaway_item_price)
except:
print ("Could not find item name")
time.sleep(random.randint(2,5))
#Check if contest has already ended
try:
contest_ended = browser.find_element_by_id('giveaway-ended-header')
except:
contest_ended = False
#Check if contest has ended, if not continue
if contest_ended is False:
#Looks for videos, follow sponsor button, or regualar giveaway box
#Amazon video
try:
amazon_video = browser.find_element_by_id("enter-video-button-announce")
except:
amazon_video = False
#Youtube video
try:
youtube_video = browser.find_element_by_id("videoSubmitForm")
except:
youtube_video = False
#Sponsor follow button
try:
follow_button = browser.find_element_by_name('follow')
except:
follow_button = False
#Standard animated giveaway box
try:
#Find animated contest box to click on
click_to_win = browser.find_element_by_id('box_click_target')
except:
#Could not find the animated box
click_to_win = False
try:
claim_kindle_book = browser.find_element_by_name("ClaimMyPrize")
except:
claim_kindle_book = False
#Click video, follow button, or animated box if present
if amazon_video != False:
#Did not enter in the contest yet
skip_wait_time = False
try:
click_video = browser.find_element_by_id("airy-outer-container").click()
print ("Waiting 15 seconds for amazon video")
time.sleep(random.randint(16,18))
browser.find_element_by_name('continue').click()
print ("Entered giveaway")
except:
print ("Amazon video failed")
elif youtube_video != False:
#Did not enter in the contest yet
skip_wait_time = False
try:
print ("Waiting 15 seconds for youtube video")
time.sleep(random.randint(16,18))
browser.find_element_by_name('continue').click()
print ("Entered giveaway")
except:
print ("Youtube video script failed")
elif follow_button != False:
#Check if want_follow is true
if want_follow == 1:
skip_wait_time = False
try:
follow_button.click()
print ("Followed the sponsor, Entered giveaway")
except:
print ("Could not follow sponsor")
else:
is_follow_no_want = True
skip_wait_time = True
print ("Is a sponsor follow giveaway, skipping")
elif click_to_win != False:
time.sleep(2)
#Did not enter the contest yet
skip_wait_time = False
try:
click_to_win.click()
print ("Entered giveaway")
except:
print ("Could not click bouncing box")
elif claim_kindle_book != False:
try:
claim_kindle_book.click()
claim_kindle_book = True
except:
print ("Could not claim free kindle book")
skip_wait_time = True
else:
print ("Previously entered")
skip_wait_time = True
#If entering giveaway and need time, wait
if skip_wait_time is False:
time.sleep(random.randint(12,15))
#If not a sponsor follow and user does not want, look for giveaway text
if is_follow_no_want is False:
try:
giveaway_results_text = browser.find_element_by_id('title').text.lower()
except:
giveaway_results_text = False
#Check giveaway results and see if they are a winner
if giveaway_results_text != False:
#Check if you already lost
if giveaway_results_text != name+", you didn't win":
#Check to see if placed an entry into raffle, if not try to claim prize
if giveaway_results_text != name+", your entry has been received":
#Check if amazon changed the prize collection page
browser.get_screenshot_as_file('pics/'+str(item_number)+'.png')
try:
#Look for claim item button and click it
claim_prize = browser.find_element_by_name('ShipMyPrize')
except:
claim_prize = False
#If not already claimed prize
if claim_prize != False:
try:
claim_prize.click()
print ("***WINNER!***")
#Update the win column in stats table
post("http://www.primegiveaway.com/add_winning_prize", data={'email':email,'giveaway_item_name':giveaway_item_name,'giveaway_item_price':giveaway_item_price,'link':link})
#Update winning stats
post("http://www.primegiveaway.com/update_wins_stats", data={'email':email})
except:
print ("Could not claim prize")
return
else:
#If free kindle book tell user
if claim_kindle_book is True:
print ("You claimed a kindle book!")
#Update the win column in stats table
post("http://www.primegiveaway.com/add_winning_prize", data={'email':email,'giveaway_item_name':giveaway_item_name,'giveaway_item_price':giveaway_item_price,'link':link})
#Update winning stats
post("http://www.primegiveaway.com/update_wins_stats", data={'email':email})
else:
print ("You have already won this prize!")
else:
print ("Entered into raffle giveaway")
else:
print ('-Not a winner-')
else:
print ("Could not find winning status")
else:
print ("Contest has already ended")
else:
print ("Could not load page")
#Add link to enteredurls database if page loaded and found giveaway results
if item_page_loaded is True:
if contest_ended is True or giveaway_results_text != False:
database = sqlite3.connect('localdatabase.db', detect_types=sqlite3.PARSE_DECLTYPES)
cursor = database.cursor()
cursor.execute('INSERT INTO enteredurls(url, day) VALUES(?, ?)', (link, datetime.date.today(), ))
database.commit()
database.close()
#Wait some time before closing window
browser.quit()
time.sleep(random.randint(1,3))
item_number += 1
print ("")
print ("End of prizes, restarting...")
print ("")
#Update entries stats
#Open and find last entry count in enteredurls table from local database
local_database = sqlite3.connect('localdatabase.db', detect_types=sqlite3.PARSE_DECLTYPES)
local_cursor = local_database.cursor()
local_cursor.execute("""SELECT * FROM enteredurls ORDER BY id DESC LIMIT 1""")
for x in local_cursor:
entries = x[0]
local_database.close()
post("http://www.primegiveaway.com/update_entries_stats", data={'email':email,'entries':entries})
#Starts the script over once it completes the last item
amazon_bot(email, password, name, want_follow, firefox_profile_path, amazon_pass)
#Loading percentage function
def loading_percentage(item_count, total_count):
count_percentage = 100 / total_count
percentage_done_loading = int(item_count * count_percentage)
if percentage_done_loading < 100:
print (str(percentage_done_loading)+"% completed...", end='\r')
else:
print ("100% complete", end='\r')
print ("")
print ("")
#Loads the user input questions, email, password, follow, correct info
def load_login_info():
print ("Please sign in to your FinessePrime Account:")
email = input("Email: ")
password = getpass.getpass("Password: ")
#Run login_account function to check if user has account with FinessePrime
if post("http://www.primegiveaway.com/programlogin", data={'email':email,'password':password}).text == 'True':
#Create local storage if needed
localhandler.create_local_account(email)
#Open and find last entry count in enteredurls table from local database
local_database = sqlite3.connect('localdatabase.db', detect_types=sqlite3.PARSE_DECLTYPES)
local_cursor = local_database.cursor()
#Get entry data
local_cursor.execute("""SELECT * FROM enteredurls ORDER BY id DESC LIMIT 1""")
entries = local_cursor.fetchone()
local_database.close()
if entries is None:
entries = 0
else:
entries = entries[0]
#Update user stats
post("http://www.primegiveaway.com/update_entries_stats", data={'email':email,'entries':entries})
#Gather account settings
account_settings = localhandler.find_local_account_settings()
#Continue if able to find settings for user
if account_settings != False:
print ("")
#Prompt user for settings update, move past if not
change_settings = input("Would you like to change your settings? (Y/N): ").lower()
while (change_settings != "yes") and (change_settings != "y") and (change_settings != "no") and (change_settings != "n"):
print ("")
print ("Invalid input please try again")
change_settings = input("Would you like to change your settings? (Y/N): ").lower()
if change_settings == "yes" or change_settings == "y":
localhandler.update_local_settings() #Update the settings
account_settings = localhandler.find_local_account_settings() #Load the newly saved settings
#Account settings
name = account_settings[0]
want_follow = account_settings[1]
firefox_profile_path = account_settings[2]
amazon_pass = account_settings[3]
#Reset amazon cookies
localhandler.reset_amazon_cookies(email,password,firefox_profile_path, amazon_pass) #Turned off for now, amazon login captcha issues
print ("")
amazon_bot(email, password, name, want_follow, firefox_profile_path, amazon_pass)
else:
print ("Failed to find settings, please close program and try again.")
else:
print ("Login failed")
print ("")
load_login_info()
#Greeting message when first opened
print ("Welcome to the Amazon Giveaways Bot!")
print ("")
load_login_info()
| true |
08142ed1672f83593ff551cd0b6984eb1ed4e5b7 | Python | moudii04/urban-winner | /game.py | UTF-8 | 3,098 | 3.25 | 3 | [] | no_license | import pygame
from comet_event import CometEvent
from player import Player
from monster import Mummy
from random import randint
from sounds import SoundManager
class Game:
def __init__(self):
self.is_playing = False
self.all_players = pygame.sprite.Group()
self.player = Player(self)
self.all_players.add(self.player)
self.all_monsters = pygame.sprite.Group()
self.spawn_monster(Mummy)
self.spawn_monster(Mummy)
self.comet_fall = CometEvent(self)
self.score = 0
self.sound = SoundManager()
def start_game(self):
self.is_playing = True
self.spawn_rand_monster()
def game_over(self):
self.all_monsters = pygame.sprite.Group()
self.comet_fall.all_comets = pygame.sprite.Group()
self.comet_fall.reset_percent()
self.player.health = 100
self.is_playing = False
self.score = 0
self.sound.play("end")
def add_score(self, score_amount):
self.score += score_amount
def update_game(self, screen):
arial_font = pygame.font.SysFont("arial", 20, False, False)
score_text = arial_font.render(f"Score : {self.score}", 1, (0, 0, 0))
# appliquer l'image du joueur
screen.blit(self.player.image, self.player.rect)
# appliquer le score
screen.blit(score_text, (20, 20))
# Animatio
self.player.update_animation()
# Dessin barre de vie
self.player.max_health_bar(screen)
self.player.update_health_bar(screen)
# Barre event
self.comet_fall.update_bar(screen)
# Projectiles UwU
for projectile in self.player.all_projectiles:
projectile.move()
if projectile.rect.x > 1080:
projectile.player.all_projectiles.remove(projectile)
# Dessin monstre
self.all_monsters.draw(screen)
for monster in self.all_monsters:
monster.forward()
monster.max_health_bar(screen)
monster.update_health_bar(screen)
monster.update_animation()
# Dessin comete
self.comet_fall.all_comets.draw(screen)
for comet in self.comet_fall.all_comets:
comet.fall()
# Dessin projectiles
self.player.all_projectiles.draw(screen)
if 0 < self.player.rect.x < 1080 - self.player.rect.width:
self.player.move()
elif self.player.rect.x == 0:
self.player.rect.x += 2
elif self.player.rect.x == 1080 - self.player.rect.width:
self.player.rect.x -= 2
def spawn_monster(self, monster_class_name):
self.all_monsters.add(monster_class_name.__call__(self))
def check_collision(self, sprite, group):
return pygame.sprite.spritecollide(
sprite, group, False, pygame.sprite.collide_mask)
def spawn_rand_monster(self):
for x in range(randint(2, 3)):
self.spawn_monster(Mummy)
| true |
d8fa13b97f4e995020ddcbb6dbb8372730046a30 | Python | ArchLaelia/MiniProject | /mini_main.py | UTF-8 | 2,844 | 3.484375 | 3 | [] | no_license | # FindFiles, första funktionen
# FindFileExt, andra funktionen
# FindInfo, tredje funktionen
#
#
# Saker som behöver fixas
# #1: Optimera FindInfo med de två for loopar
# Kanske kan kombinera de två i en enda loop
# #2: Ska se om jag kan kombinera FindFileExt med FindInfo
# Det gäller när man filtrerar extension, då de båda gör typ samma sak
#
#
import os
import os.path
import re
from pathlib import Path
def FindFiles(road):
list_of_files = []
# går igenom alla mappar och filer från road, neråt
for root, dirs, files in os.walk(road, topdown=True):
# för varje fil som kommer upp, lägg det i en lista
for name in files:
try:
value = os.path.join(root, name)
except UnicodeDecodeError:
print(root, dirs, files, name)
else:
list_of_files.append(value)
return list_of_files
def FindFileExt(ext, folder):
list = []
# listar alla filer i directory (folder)
oslist = os.listdir(folder)
for i in range(len(oslist)):
# appendar alla filer som har ext, txt (.txt, .pdf etc)
if oslist[i].endswith(ext):
list.append(oslist[i])
print(list)
return list
def FindInfo(pattern, folder):
file_list = []
# går igenom alla mappar och filler från folder
for root, dirs, files in os.walk(folder, topdown=True):
# går igenom varje fil
for file in files:
# jämnför om de slutar med en extension
if file.endswith(".txt"):
file_list.append(os.path.join(root, file))
for i in range(len(file_list)):
# testar om filen finns, och om man har permission
abspath = Path(file_list[i])
try:
pat = abspath.resolve(strict=True)
except FileNotFoundError:
continue
except PermissionError:
continue
# välj en fil och skicka den till ReadFile för att läsa den
file = ReadFile(file_list[i])
if pattern in file:
print(file_list[i] + " contains " + pattern)
def ReadFile(file_list):
# När man läser filerna så kan det ge problem med decoding
# Använder flera try except med olika encoding ifall någon inte funkar
f = open(file_list, "r")
try:
file = f.read()
except UnicodeDecodeError:
f.close()
f = open(file_list, "r", encoding="utf-8")
try:
file = f.read()
except UnicodeDecodeError:
f.close()
f = open(file_list, "r", encoding="latin-1")
try:
file = f.read()
except UnicodeDecodeError:
print("send help")
f.close()
return file
# FindFiles(str(r"C:\Python Code"))
# Att söka från C: tar lång tid
FindInfo("a", r"C:/")
| true |
c5c823eaad65ae42e25f64c29510b1c1b519d7e6 | Python | sathulkiran/CS313E-A0 | /A11/Triangle.py | UTF-8 | 4,273 | 3.75 | 4 | [] | no_license |
# File: Triangle.py
# Description: Min path sum for triangle
# Student Name: Athul Srinivasaraghavan
# Student UT EID: as84444
# Partner Name: None
# Partner UT EID: N/a
# Course Name: CS 313E
# Unique Number:
# Date Created: 03/28/2021
# Date Last Modified:
import sys
from timeit import timeit
# returns the greatest path sum using exhaustive search
def brute_force (grid):
possibles = []
brute_helper(grid, 0, 0, possibles, 0)
maxval = 0
for i in range(len(possibles)):
if possibles[i] > maxval:
maxval = possibles[i]
return maxval
return
def brute_helper (grid, idx, adj, possibles, count):
if idx == len(grid):
possibles.append(count)
else:
count += grid[idx][adj]
return (brute_helper(grid, idx+1, adj, possibles, count)) or (brute_helper(grid, idx+1, adj+1, possibles, count))
# returns the greatest path sum using greedy approach
def greedy (grid):
adj = 0
count = 0
for i in range(len(grid)):
count += grid[i][adj]
if i < len(grid)-1:
if grid[i+1][adj+1] > grid[i+1][adj]:
adj = adj + 1
return count
# returns the greatest path sum using divide and conquer (recursive) approach
def divide_conquer (grid):
possibles = []
div_helper(grid, possibles, 0)
maxval = 0
for i in range(len(possibles)):
if possibles[i] > maxval:
maxval = possibles[i]
return maxval
def div_helper (grid, possibles, count):
if len(grid) == 1:
possibles.append(count + grid[0][0])
else:
grida = []
gridb = []
for i in grid[1:]:
grida.append(i[1:])
gridb.append(i[:-1])
count = count + grid[0][0]
return (div_helper(grida, possibles, count)) or (div_helper(gridb, possibles, count))
# returns the greatest path sum and the new grid using dynamic programming
def dynamic_prog (grid):
n = len(grid)
y = len(grid[0])
for i in range(n-2,-1,-1):
for j in range(y):
if grid[i][j] != 0:
grid[i][j] += max(grid[i+1][j], grid[i+1][j+1])
return grid[0][0]
# reads the file and returns a 2-D list that represents the triangle
def read_file ():
# read number of lines
line = sys.stdin.readline()
line = line.strip()
n = int (line)
# create an empty grid with 0's
grid = [[0 for i in range (n)] for j in range (n)]
# read each line in the input file and add to the grid
for i in range (n):
line = sys.stdin.readline()
line = line.strip()
row = line.split()
row = list (map (int, row))
for j in range (len(row)):
grid[i][j] = grid[i][j] + row[j]
return grid
def main ():
# read triangular grid from file
grid = read_file()
'''
# check that the grid was read in properly
'''
# output greatest path from exhaustive search
times = timeit ('brute_force({})'.format(grid), 'from __main__ import brute_force', number = 10)
times = times / 10
print('The greatest path sum through exhaustive search is')
print(brute_force(grid))
# print time taken using exhaustive search
print('The time taken for exhaustive search in seconds is')
print(times)
# output greatest path from greedy approach
times = timeit ('greedy({})'.format(grid), 'from __main__ import greedy', number = 10)
times = times / 10
print('The greatest path sum through greedy search is')
print(greedy(grid))
# print time taken using greedy approach
print('The time taken for greedy search in seconds is')
print(times)
# output greatest path from divide-and-conquer approach
times = timeit ('divide_conquer({})'.format(grid), 'from __main__ import divide_conquer', number = 10)
times = times / 10
print('The greatest path sum through divide and conquer search is')
print(divide_conquer(grid))
# print time taken using divide-and-conquer approach
print('The time taken for divide and conquer search in seconds is')
print(times)
# output greatest path from dynamic programming
times = timeit ('dynamic_prog({})'.format(grid), 'from __main__ import dynamic_prog', number = 10)
times = times / 10
print('The greatest path sum through dynamic programming search is')
print(dynamic_prog(grid))
# print time taken using dynamic programming
print('The time taken for dynamic programming search in seconds is')
print(times)
if __name__ == "__main__":
main()
| true |
22f525c3d6b4b5a0d28e6d54266fbe2fb6a90aaa | Python | papazianz/Trading | /Bot.py | UTF-8 | 1,057 | 2.90625 | 3 | [] | no_license | """
Created on Aug 5th, 2018
-Nick Papazian
"""
from Keys import *
import datetime
from time import sleep
from binance.client import Client
client = Client(api_key, api_secret)
def sys():
#Check System Status
try:
status = client.get_system_status()
print("\nExchange Status: ", status)
except():
print('\nNo connection to server')
def BTC_Bot():
symbol= 'BTCUSDT'
quantity= .0025
order= False
while order==False:
BTC= client.get_historical_klines(symbol= symbol, interval= '15m', start_str= '1 hour ago utc')
if (float(BTC[-1][4])-float(BTC[-2][4]))>500:
print('Buying .0025 BTC')
client.order_market_buy(symbol= symbol, quantity= quantity)
order= False
elif (float(BTC[-1][4])-float(BTC[-2][4]))>500:
print('Selling .0025 BTC')
client.order_limit_sell(symbol= symbol, quantity= quantity)
order= True
else:
print('doing nothing')
sys()
BTC_Bot() | true |
0424de1bec02a139f3dfba650849d909ad834367 | Python | Ciasterix/NEO-Revisited | /model/run.py | UTF-8 | 2,023 | 2.5625 | 3 | [] | no_license | import tensorflow as tf
from model.Attention import Attention
from model.Decoder import Decoder
from model.Encoder import Encoder
if __name__ == "__main__":
BATCH_SIZE = 64
vocab_inp_size = 32
vocab_tar_size = 32
embedding_dim = 256
units = 1024
# Encoder
encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)
example_input_batch = tf.random.uniform(shape=(64, 16), minval=0, maxval=31,
dtype=tf.int64)
example_target_batch = tf.random.uniform(shape=(64, 11), minval=0,
maxval=31, dtype=tf.int64)
print(example_input_batch.shape, example_target_batch.shape)
# sample input
sample_hidden = encoder.initialize_hidden_state()
sample_cell = encoder.initialize_cell_state()
sample_output, sample_hidden, cell_hidden = encoder(example_input_batch, [sample_hidden, sample_cell])
print(
'Encoder output shape: (batch size, sequence length, units) {}'.format(
sample_output.shape))
print('Encoder Hidden state shape: (batch size, units) {}'.format(
sample_hidden.shape))
print('Encoder Cell state shape: (batch size, units) {}'.format(
sample_hidden.shape))
# Attention
attention_layer = Attention()
attention_result, attention_weights = attention_layer(sample_hidden,
sample_output)
print("Attention result shape: (batch size, units) {}".format(
attention_result.shape))
print("Attention weights shape: (batch_size, sequence_length, 1) {}".format(
attention_weights.shape))
# Decoder
decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE)
sample_decoder_output, _, _, _ = decoder(tf.random.uniform((BATCH_SIZE, 1)),
sample_hidden, sample_output)
print('Decoder output shape: (batch_size, vocab size) {}'.format(
sample_decoder_output.shape))
| true |
8e525259a1b13647c64a6f944f91649df6b2d9b6 | Python | videan42/cs280_final_project | /annotate_db.py | UTF-8 | 4,542 | 2.640625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python2
# Standard lib
import os
import json
import argparse
# 3rd party
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
# Constants
THISDIR = os.path.dirname(os.path.realpath(__file__))
# Class
class ImageTagger(object):
def __init__(self, imgdir):
self.imgdir = imgdir
self.tagfile = os.path.join(imgdir, 'tags.json')
self.tags = None
self._fig = None
self._ax = None
self._mouse_down = None
self._mouse_rects = []
self._mouse_cur = None
self._status = None
self._bbox = []
def load_tags(self):
if self.tags is not None:
return
if os.path.isfile(self.tagfile):
with open(self.tagfile, 'rt') as fp:
tags = json.load(fp)
else:
tags = {}
self.tags = tags
def save_tags(self):
if self.tags is None:
return
with open(self.tagfile, 'wt') as fp:
tags = json.dump(self.tags, fp)
def on_mouse_down(self, event):
if self._mouse_down is None and event.button == 1 and event.inaxes:
self._mouse_down = (event.xdata, event.ydata)
b0, b1 = self._mouse_down
self._mouse_cur = self._ax.add_patch(
plt.Rectangle((b0, b1), 1, 1, fill=False,
edgecolor='red', linewidth=3.5))
def on_mouse_up(self, event):
if self._mouse_down is not None and event.inaxes:
sx, sy = self._mouse_down
ex, ey = (event.xdata, event.ydata)
self._bbox.append((sx, sy, ex, ey))
self._mouse_rects.append(self._mouse_cur)
self._mouse_cur = None
self._mouse_down = None
self._mouse_cur = None
def on_mouse_move(self, event):
if self._mouse_down is not None and event.inaxes:
b0, b1 = self._mouse_down
b2, b3 = (event.xdata, event.ydata)
self._mouse_cur.set_width(b2 - b0)
self._mouse_cur.set_height(b3 - b1)
self._mouse_cur.figure.canvas.draw()
def on_key_press(self, event):
if event.key in (' ', '\n', '\r', '\r\n'):
plt.close()
if event.key in ('d', ):
plt.close()
self._status = 'delete'
def tag_img(self, imgfile, tag):
print('tagging {}: {}'.format(tag, imgfile))
imgname = os.path.basename(imgfile)
img_tags = self.tags.get(imgname, {})
bbox = img_tags.get(tag, [])
self._bbox = bbox
self._mouse_down = None
self._mouse_rects = []
self._mouse_cur = None
self._ax = None
self._fig = None
self._status = None
img = np.asarray(Image.open(imgfile))
self._fig, self._ax = plt.subplots(1, 1, figsize=(16, 16))
self._ax.imshow(img, aspect='equal')
for b0, b1, b2, b3 in self._bbox:
self._ax.add_patch(
plt.Rectangle((b0, b1), b2-b0, b3-b1, fill=False,
edgecolor='red', linewidth=3.5))
self._fig.canvas.mpl_connect('button_press_event', self.on_mouse_down)
self._fig.canvas.mpl_connect('button_release_event', self.on_mouse_up)
self._fig.canvas.mpl_connect('motion_notify_event', self.on_mouse_move)
self._fig.canvas.mpl_connect('key_press_event', self.on_key_press)
plt.show()
if self._status == 'delete':
print('Removing: {}'.format(imgfile))
os.remove(imgfile)
if imgname in self.tags:
del self.tags[imgname]
else:
img_tags[tag] = self._bbox
self.tags[imgname] = img_tags
def tag_all(self, tag):
self.load_tags()
imgs = [os.path.join(self.imgdir, tf)
for tf in os.listdir(self.imgdir)
if tf.lower().endswith(('.jpg', '.jpeg'))]
for imgfile in imgs:
self.tag_img(imgfile, tag)
self.save_tags()
# Functions
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('animal')
parser.add_argument('tag')
return parser.parse_args(args=args)
def main(args=None):
args = parse_args(args=args)
animal = args.animal.lower().strip()
tag = args.tag.lower().strip()
imgdir = os.path.join(THISDIR, 'images', 'val', animal[0], animal)
tagger = ImageTagger(imgdir)
tagger.tag_all(tag)
if __name__ == '__main__':
main()
| true |
747de288e536179e0b844baf4518337849e461d8 | Python | ramyasutraye/Guvi_Python | /set4/31.py | UTF-8 | 79 | 3.15625 | 3 | [] | no_license | a=input("Enter the String:").split(' ')
print(len("".join(str(x) for x in a)))
| true |
a49b2906de30bf70e8cb9bfff79b882ea1ae90be | Python | Kal103/Algorithm | /string/count_char_in_string.py | UTF-8 | 158 | 3.03125 | 3 | [] | no_license | s=str(input())
ans=[]
for i in range(len(set(s))):
ans.append(s.count(s[0]))
s=s.replace(s[0],"")
print(ans)
"""
input:
aaabbc
output:
3 2 1
"""
| true |
d491b7668c25d6276ef5d0e24c71dd97b5d8f9fa | Python | oplatek/tdb | /tdb/debug_session.py | UTF-8 | 6,803 | 2.625 | 3 | [
"Apache-2.0"
] | permissive |
from .ht_op import HTOp
from . import op_store
import tensorflow as tf
# debug status codes
INITIALIZED = 'INITIALIZED'
RUNNING = 'RUNNING'
PAUSED = 'PAUSED'
FINISHED = 'FINISHED'
class DebugSession(object):
def __init__(self, session=None):
super(DebugSession, self).__init__()
if session is None:
session = tf.InteractiveSession()
self.step = 0 # index into execution order
self.session = session
self.state = INITIALIZED
self._original_evals = [] # evals passed into self.debug, in order
self._evalset = set() # string names to evaluate
self._bpset = set() # breakpoint names
self._cache = {} # key: node names in evalset -> np.ndarray
self._exe_order = [] # list of HTOps, tf.Tensors to be evaluated
###
# PUBLIC METHODS
###
def run(self, evals, feed_dict=None, breakpoints=None,
break_immediately=False):
"""
starts the debug session
"""
if not isinstance(evals, list):
evals = [evals]
if feed_dict is None:
feed_dict = {}
if breakpoints is None:
breakpoints = []
self.state = RUNNING
self._original_evals = evals
self._original_feed_dict = feed_dict
self._exe_order = op_store.compute_exe_order(evals)
self._init_evals_bps(evals, breakpoints)
# convert cache keys to strings
for k, v in feed_dict.items():
if not isinstance(k, str):
k = k.name
self._cache[k] = v
op_store.register_dbsession(self)
if break_immediately:
return self._break()
else:
return self.c()
def s(self):
"""
step to the next node in the execution order
"""
next_node = self._exe_order[self.step]
self._eval(next_node)
self.step += 1
if self.step == len(self._exe_order):
return self._finish()
else:
# if stepping, return the value of the node we just
# evaled
return self._break(value=self._cache.get(next_node.name))
def c(self):
"""
continue
"""
i, node = self._get_next_eval()
if node.name in self._bpset:
if self.state == RUNNING:
return self._break()
self.state = RUNNING
self._eval(node)
# increment to next node
self.step = i+1
if self.step < len(self._exe_order):
return self.c()
else:
return self._finish()
def get_values(self):
"""
returns final values (same result as tf.Session.run())
"""
return [self._cache.get(i.name, None) for i in self._original_evals]
def get_exe_queue(self):
return self._exe_order[self.step:]
def get_value(self, node):
"""
retrieve a node value from the cache
"""
if isinstance(node, tf.Tensor):
return self._cache.get(node.name, None)
elif isinstance(node, tf.Operation):
return None
else: # handle ascii, unicode strings
return self._cache.get(node, None)
###
# PRIVATE METHODS
###
def _cache_value(self, tensor, ndarray):
"""
store tensor ndarray value in cache. this is called by python ops
"""
self._cache[tensor.name] = ndarray
def _init_evals_bps(self, evals, breakpoints):
# If an eval or bp is the tf.Placeholder output of a
# tdb.PythonOp, replace it with its respective PythonOp node
evals2 = [op_store.get_op(t) if op_store.is_htop_out(t)
else t for t in evals]
breakpoints2 = [op_store.get_op(t) if op_store.is_htop_out(t)
else t for t in breakpoints]
# compute execution order
self._exe_order = op_store.compute_exe_order(evals2) # list of nodes
# compute evaluation set
"""
HTOps may depend on tf.Tensors that are not in eval. We need
to have all inputs to HTOps ready upon evaluation.
1. all evals that were originally specified are added
2. each HTOp in the execution closure needs to be in eval
(they won't be eval'ed automatically by Session.run)
3. if an input to an HTOp is a tf.Tensor (not a HT placeholder tensor)
it needs to be in eval as well (it's not tensorflow so we'll have
to manually evaluate it). Remember, we don't track Placeholders
because we instead run the HTOps that generate their values.
"""
self._evalset = set([e.name for e in evals2])
for e in self._exe_order:
if isinstance(e, HTOp):
self._evalset.add(e.name)
for t in e.inputs:
if not op_store.is_htop_out(t):
self._evalset.add(t.name)
# compute breakpoint set
self._bpset = set([bp.name for bp in breakpoints2])
def _get_next_eval(self):
n = len(self._exe_order)
o = self._exe_order
return next((i, o[i]) for i in range(self.step, n) if
(o[i].name in self._evalset or o[i].name in self._bpset))
def _eval(self, node):
"""
node is a TensorFlow Op or Tensor from self._exe_order
"""
# if node.name == 'Momentum':
# db.set_trace()
if isinstance(node, HTOp):
# All Tensors MUST be in the cache.
feed_dict = dict((t, self._cache[t.name]) for t in node.inputs)
node.run(feed_dict) # this will populate self._cache on its own
else: # is a TensorFlow node
if isinstance(node, tf.Tensor):
result = self.session.run(node, self._cache)
self._cache[node.name] = result
else:
# is an operation
if (node.type == 'Assign' or node.type == 'AssignAdd'
or node.type == 'AssignSub'):
# special operation that takes in a tensor ref and
# mutates it unfortunately, we end up having to execute
# nearly the full graph? alternatively, find a way to pass
# the tensor_ref thru the feed_dict rather than the tensor
# values.
self.session.run(node, self._original_feed_dict)
def _break(self, value=None):
self.state = PAUSED
i, next_node = self._get_next_eval()
print('Breakpoint triggered. Next Node: ', next_node.name)
return (self.state, value)
def _finish(self):
self.state = FINISHED
return (self.state, self.get_values())
| true |
472ed9779c54a9dbd2d6c75eab3d9b01ca2da715 | Python | bdcolosi/pythonexercises | /tip_calculator.py | UTF-8 | 391 | 3.734375 | 4 | [] | no_license | bill_amount = int(input("How much was the bill? "))
service_level = input("Level of service? ")
def service(service_level):
if service_level == "good":
print((.2 * bill_amount) + bill_amount)
if service_level == "fair":
print((.15 * bill_amount)+ bill_amount)
if service_level == "bad":
print((.1 * bill_amount)+ bill_amount)
service(service_level) | true |
61185649ce31951fba9feb746a5659db53b5f3fa | Python | akshala/Data-Structures-and-Algorithms | /graph/journey_2.py | UTF-8 | 1,068 | 3.21875 | 3 | [] | no_license | class Graph:
def __init__(self, n):
self.vertices=n
self.graph={}
self.incomingGraph={}
self.ans = 0
def addEdge(self, u, v):
if u in self.graph.keys() and v not in self.graph.values():
self.graph[u].append(v)
else:
self.graph[u]=[v]
def remainingEdge(self):
for vertex in range(0, self.vertices):
if vertex not in self.graph:
self.graph[vertex]=[]
def journey_call(self):
visited = []
for i in range(0, self.vertices):
visited.append(False)
journey(0, 0, 1, visited)
def journey(self, vertex, distance, prob, visited):
print("yes")
visited[vertex] = True
children = 0
for neighbour in self.graph[u]:
if(not visited[neighbour]):
children += 1
print(children)
for neighbour in self.graph[u]:
if(not visited[neighbour]):
journey(neighbour, distance + 1, prob/children, visited)
if(children == 0):
print(ans)
self.ans += p*d
n = int(input())
g = Graph(n+1)
for i in range(0,n-1):
a = input()
a = list(map(int, a.split()))
u = a[0]
v = a[1]
g.addEdge(u, v)
g.journey_call
print(g.ans)
| true |
c8d594008a7f01e9e8ab7b47aef469a135d90e15 | Python | samsonleegh/poem_generator | /scripts/RNN_utils.py | UTF-8 | 8,128 | 3.34375 | 3 | [] | no_license | from __future__ import print_function
import numpy as np
from random import random
# method for generating text, using model
def generate_text(model, length, vocab_size, ix_to_char, use_subwords, temp = 0.8, end_symbol = "$"):
# starting with random character
ix = np.random.randint(vocab_size)
y_char = [ix_to_char[ix]]
X = np.zeros((1, length, vocab_size))
if use_subwords:
end = ' '
else:
end = ''
for i in range(length):
# appending the last predicted character to sequence
X[0, i, :][ix] = 1
print(ix_to_char[ix], end=end)
pred = model.predict(X[:, :i+1, :])[0]
rand_nr = random()
if temp > rand_nr: # next symbol predicted based on distribution
ix = np.random.choice(np.arange(vocab_size), p = pred[-1]) # Chooses prediction with probability of next char
else: # Most probable char
ix = np.argmax(model.predict(X[:, :i+1, :])[0], 1)[-1] # Only last index needed
if end_symbol == ix_to_char[ix]:
break
y_char.append(ix_to_char[ix])
return (end).join(y_char)
# Read data and generate vocabulary
def load_vocabulary(data_dir, seq_length, batch_size, use_subwords):
data = open(data_dir, 'r', encoding="utf-8").read() # Read data
if use_subwords: # Split data into subwords
data = data.split()
chars = sorted(list(set(data))) # get possible chars
VOCAB_SIZE = len(chars)
print('Data length: {} chars/subwords'.format(len(data)))
print('Vocabulary size: {} chars/subwords'.format(VOCAB_SIZE))
ix_to_char = {ix:char for ix, char in enumerate(chars)} # index to char map # can also be subwords here
char_to_ix = {char:ix for ix, char in enumerate(chars)} # char to index map
steps_per_epoch = int(len(data)/seq_length/batch_size)
return VOCAB_SIZE, ix_to_char, char_to_ix, steps_per_epoch, data
# Load vocabulary poem by poem
def load_vocabulary_poem(data_dir, batch_size, poem_end, use_subwords, end_symbol):
data = open(data_dir, 'r', encoding="utf-8").read() # Read data
poems = data.split(poem_end) # list with all the poems in data
poems = [s for s in poems if len(s) >= 2] # Leave out empty poems.
seq_length = len(max(poems, key=len)) + 1 # +1 so the longest poem has also end symbol
if use_subwords: # Split data into subwords
data_new = data.split() # Later initial data needed
chars = sorted(list(set(data_new))) # get possible subwords
else:
chars = sorted(list(set(data))) # get possible chars
chars.append(end_symbol)
VOCAB_SIZE = len(chars)
print('Data length: {} poems'.format(len(poems)))
print('Vocabulary size: {} chars/subwords'.format(VOCAB_SIZE))
ix_to_word = {ix:char for ix, char in enumerate(chars)} # index to char map
word_to_ix = {char:ix for ix, char in enumerate(chars)} # char to index map
steps_per_epoch = int(len(poems)/batch_size) # One poem per batch
print("Steps per epoch:", steps_per_epoch)
return VOCAB_SIZE, ix_to_word, word_to_ix, steps_per_epoch, data
# Read in data by batches, atm only for char-to-char
def data_generator(data, seq_length, batch_size, steps_per_epoch):
chars = sorted(list(set(data))) # get possible chars
VOCAB_SIZE = len(chars)
ix_to_char = {ix:char for ix, char in enumerate(chars)} # index to char map
char_to_ix = {char:ix for ix, char in enumerate(chars)} # char to index map
batch_nr = 0
while True:
X = np.zeros((batch_size, seq_length, VOCAB_SIZE)) # input data
y = np.zeros((batch_size, seq_length, VOCAB_SIZE))
pos_start = batch_nr*batch_size*seq_length # Continue where left on from patch
for i in range(0, batch_size):
X_sequence = data[pos_start + i*seq_length:pos_start + (i+1)*seq_length]
X_sequence_ix = [char_to_ix[value] for value in X_sequence]
input_sequence = np.zeros((seq_length, VOCAB_SIZE))
for j in range(len(X_sequence)): # Last sequence otherwise shorter
input_sequence[j][X_sequence_ix[j]] = 1.
X[i] = input_sequence
y_sequence = data[pos_start+i*seq_length+1:pos_start + (i+1)*seq_length+1] # next character, as we want to predict next character
y_sequence_ix = [char_to_ix[value] for value in y_sequence]
target_sequence = np.zeros((seq_length, VOCAB_SIZE))
for j in range(len(y_sequence)):
target_sequence[j][y_sequence_ix[j]] = 1.
y[i] = target_sequence
if batch_nr == (steps_per_epoch-1): # Because we start from zero
batch_nr = 0 # Back to beginning - so we could loop indefinitely
else:
batch_nr += 1
yield(X, y)
# Read in data in poem by poem
def data_generator_poem(data, batch_size, poem_end, use_subwords, end_symbol = "$"):
poems = data.split(poem_end)
poems = [s for s in poems if len(s) >= 2] # Leave out empty poems.
# Get longest poem to set the sequence length
seq_length = len(max(poems, key=len)) + 1 # +1 so the longest poem has also end symbol
print("Subwords:", use_subwords)
if use_subwords:
data = data.split()
chars = sorted(list(set(data))) # get possible chars/subwords
chars.append(end_symbol)
VOCAB_SIZE = len(chars)
print('Data length: {} poems'.format(len(poems)))
print('Vocabulary size: {} chars/subwords'.format(VOCAB_SIZE))
ix_to_word = {ix:char for ix, char in enumerate(chars)} # index to char/subword map
word_to_ix = {char:ix for ix, char in enumerate(chars)} # char/subword to index map
batch_nr = 0
steps_per_epoch = int(len(poems)/batch_size)
# Generate data matrices
while True:
for i in range(0, batch_size):
poem = poems[batch_nr*batch_size + i]
if use_subwords:
elements = poem.split()
len_poem = len(elements)
elements = elements + (seq_length-len(elements))*[end_symbol] # Add end_symbol + phantom symbols
else:
elements = poem
len_poem = len(elements)
elements = elements + (seq_length-len(elements))*end_symbol # Add end_symbol + phantom symbols
#seq_length = len(elements) - 1 # One less to predict
X = np.zeros((batch_size, seq_length-1, VOCAB_SIZE)) # input data
y = np.zeros((batch_size, seq_length-1, VOCAB_SIZE))
X_sequence = elements[:-1] # Take all but last subword to learn
X_sequence_ix = [word_to_ix[value] for value in X_sequence]
input_sequence = np.zeros((seq_length-1, VOCAB_SIZE))
for j in range(len(X_sequence)):
inp = 1.
# Phantom symbols
if j == len_poem + 2:
inp = 0.
input_sequence[j][X_sequence_ix[j]] = inp
X[i] = input_sequence # Batch size 1
y_sequence = elements[1:] # Next subword to predict
y_sequence_ix = [word_to_ix[value] for value in y_sequence]
target_sequence = np.zeros((seq_length-1, VOCAB_SIZE))
for j in range(len(y_sequence)):
inp = 1.
# Phantom symbols
if j == len_poem:
inp = 0.
target_sequence[j][y_sequence_ix[j]] = inp
y[i] = target_sequence
if batch_nr == (steps_per_epoch-1): # Because we start from zero (in case many epochs learnt together)
batch_nr = 0 # Back to beginning - so we could loop indefinitely
else:
batch_nr += 1
yield(X, y)
| true |
1cc6077fe53733223dd281f4ab8c1f28a44f3f39 | Python | rohitkeshav/stack_question_match | /classification.py | UTF-8 | 13,322 | 2.9375 | 3 | [] | no_license | # use MultinomialNB algorithm
import pandas as pd
import re
import numpy as np
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_validate
from sklearn.linear_model import LogisticRegression
from nltk.tokenize import RegexpTokenizer
from sklearn import metrics
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import classification_report
# creating a general class for all the classifiers
# TODO: Ongoing
class ClassifyStackData:
# title, p_num
def __init__(self, fname, text, label, cval):
self.stack_data = pd.read_csv(fname)
self.text = text
self.x = self.stack_data[text]
self.y = self.stack_data[label]
self.cval = cval
def fit_data(self):
stemmer = SnowballStemmer('english')
words = stopwords.words("english")
self.x = self.stack_data.title.apply(
lambda x: " ".join([stemmer.stem(i) for i in re.sub("[^a-zA-Z]", " ", x).split() if i not in words]).lower())
#X = stack_data.title
self.y = self.stack_data.p_lang
x_train, x_test, y_train, y_test = train_test_split(self.x, self.y, random_state=2)
c_vect = CountVectorizer(lowercase=True, stop_words='english')
c_vect.fit(x_train)
x_train_dtm = c_vect.fit_transform(x_train)
x_test_dtm = c_vect.transform(x_test)
return c_vect, x_train_dtm, x_test_dtm, y_train, y_test
def multinomial_nb(self):
nb = MultinomialNB()
return self.predict(nb)
def logistic_regression(self):
lr = LogisticRegression()
return self.predict(lr)
def linear_svc(self):
lsv = LinearSVC()
return self.predict(lsv)
def check_text(self):
if type(self.cval) == str:
return pd.DataFrame({self.text: [self.cval]}, index=[0])
return pd.DataFrame({self.text: self.cval}, index=[idx for idx in range(len(self.cval))])
def predict(self, c_obj):
vect, x_train_dtm, x_test_dtm, y_train, y_test = self.fit_data()
c_obj.fit(x_train_dtm, y_train)
y_pred_class = c_obj.predict(x_test_dtm)
print(c_obj.predict(vect.transform(self.check_text())))
print(metrics.accuracy_score(y_test, y_pred_class))
def multinomial(data):
stemmer = SnowballStemmer('english')
words = stopwords.words("english")
data['cleaned'] = data['title'].apply(lambda x: " ".join([stemmer.stem(i) for i in re.sub("[^a-zA-Z]", " ", x).split() if i not in words]).lower())
X_train, X_test, y_train, y_test = train_test_split(data['cleaned'], data.p_lang, test_size=0.1)
pipeline = Pipeline([('vect', TfidfVectorizer(ngram_range=(1, 2), stop_words="english", sublinear_tf=True)),
('clf', MultinomialNB(alpha=1, class_prior=None, fit_prior=True))])
model = pipeline.fit(X_train, y_train)
clas_pred = model.predict(X_test)
print(clas_pred)
print("accuracy score - Multinomial: " + str(model.score(X_test, y_test)))
print('Confusion Matrix - MultinomialNB - ','\n',metrics.confusion_matrix(y_test,clas_pred))
# print('Classification Report - MultinomialNB - ','\n',classification_report(y_test,clas_pred))
def linear_svc(data, ques):
stemmer = SnowballStemmer('english')
words = stopwords.words("english")
data['cleaned'] = data['title'].apply(lambda x: " ".join([stemmer.stem(i) for i in re.sub("[^a-zA-Z]", " ", x).split() if i not in words]).lower())
X_train, X_test, y_train, y_test = train_test_split(data['cleaned'], data.p_lang, test_size=0.1)
pipeline = Pipeline([('vect', TfidfVectorizer(ngram_range=(1, 2), stop_words="english", sublinear_tf=True)),
('chi', SelectKBest(chi2, k=1500)),
('clf', LinearSVC(C=1.0, penalty='l2', max_iter=3000, dual=False, random_state=0))])
model = pipeline.fit(X_train, y_train)
clas_pred = model.predict(X_test)
print(clas_pred)
print("accuracy score - LinearSVC: " + str(model.score(X_test, y_test)))
print('Confusion Matrix - LinearSVC - ', '\n',metrics.confusion_matrix(y_test,clas_pred))
print('Classification Report - LinearSVC - ', '\n',classification_report(y_test,clas_pred))
return model.predict([ques])[0]
def logistic_regression(data):
stemmer = SnowballStemmer('english')
words = stopwords.words("english")
data['cleaned'] = data['title'].apply(lambda x: " ".join([stemmer.stem(i) for i in re.sub("[^a-zA-Z]", " ", x).split() if i.lower() not in words]).lower()) #mine
X_train, X_test, y_train, y_test = train_test_split(data['cleaned'], data.p_lang, test_size=0.1)
pipeline = Pipeline([('vect', TfidfVectorizer(ngram_range=(1, 2), stop_words="english", sublinear_tf=True)),
('chi', SelectKBest(chi2, k=1500)),
('clf', LogisticRegression())])
model = pipeline.fit(X_train, y_train)
clas_pred = model.predict(X_test)
print(clas_pred)
print("accuracy score - DecisionTree: " + str(model.score(X_test, y_test)))
print('Confusion Matrix - Decision Tree - ','\n',metrics.confusion_matrix(y_test,clas_pred))
print('Classification Report - Decision Tree - ','\n',classification_report(y_test,clas_pred))
def __try():
features = ['p_lang', 'title', 'p_num']
stack_data = pd.read_csv('./data_set.csv')
# define X, y
stemmer = SnowballStemmer('english')
words = stopwords.words("english")
X = stack_data.title.apply(
lambda x: " ".join([stemmer.stem(i) for i in re.sub("[^a-zA-Z]", " ", x).split() if i not in words]).lower())
#X = stack_data.title
y = stack_data.p_num
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=2)
vect = CountVectorizer(lowercase=True, stop_words='english')
vect.fit(X_train)
# transform training data
X_train_dtm = vect.fit_transform(X_train)
X_test_dtm = vect.transform(X_test)
nb = MultinomialNB()
nb.fit(X_train_dtm, y_train)
y_pred_class = nb.predict(X_test_dtm)
# print(nb.predict(vect.transform(testcase())))
print(metrics.accuracy_score(y_test, y_pred_class))
# Linear SVC
def _c_try(testdata = None):
features = ['p_lang', 'title', 'p_num']
stack_data = pd.read_csv('./data_set.csv')
# define X, y
stemmer = SnowballStemmer('english')
words = stopwords.words("english")
X = stack_data.title.apply(
lambda x: " ".join([stemmer.stem(i) for i in re.sub("[^a-zA-Z]", " ", x).split() if i not in words]).lower())
#X = stack_data.title
y = stack_data.p_num
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=2)
vect = CountVectorizer(lowercase=True, stop_words='english')
vect.fit(X_train)
# transform training data
X_train_dtm = vect.fit_transform(X_train)
X_test_dtm = vect.transform(X_test)
lsv = LinearSVC()
lsv.fit(X_train_dtm, y_train)
y_pred_class = lsv.predict(X_test_dtm)
print('hey')
print(stack_data.p_lang.unique())
print(metrics.accuracy_score(y_test, y_pred_class))
if testdata:
testdata.title = testdata.title.apply(lambda x: " ".join([stemmer.stem(i) for i in re.sub("[^a-zA-Z]", " ", x).split() if i not in words]).lower())
print(lsv.predict(vect.fit_transform(testdata.title)))
#print(X_test.shape, X_test_dtm.shape, X_train_dtm.shape, X_train.shape)
# Random forest
def _d_try():
features = ['p_lang', 'title', 'p_num']
stack_data = pd.read_csv('./data_set.csv')
# define X, y
X = stack_data.title
y = stack_data.p_num
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=2)
vect = CountVectorizer(lowercase=True, stop_words='english')
vect.fit(X_train)
# transform training data
X_train_dtm = vect.fit_transform(X_train)
y_train_dtm = vect.fit_transform(y_train)
X_test_dtm = vect.transform(X_test)
rf = RandomForestClassifier(n_estimators= 1000, random_state= 40)
rf.fit(X_train_dtm, y_train_dtm)
prediction = rf.predict(X_test)
print(prediction)
#scores = cross_validate(rf, X_train_dtm, y_train_dtm, cv=100, return_train_score=True)
#print(scores)
print("accuracy score - Logistic regression : " + str(model.score(X_test, y_test)))
print('Confusion Matrix - Logistic regression - ', '\n', metrics.confusion_matrix(y_test,clas_pred))
# print('Classification Report - Decision Tree - ','\n',classification_report(y_test,clas_pred))
# logistic regression
"""
def __logistic_regression(df):
stack_data = pd.read_csv('./data_set.csv')
# define X, y
stemmer = SnowballStemmer('english')
words = stopwords.words("english")
X = stack_data.title.apply(
lambda x: " ".join([stemmer.stem(i) for i in re.sub("[^a-zA-Z]", " ", x).split() if i not in words]).lower())
#X = stack_data.title
y = stack_data.p_num
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=2)
vect = CountVectorizer(lowercase=True, stop_words='english')
vect.fit(X_train)
# transform training data
X_train_dtm = vect.fit_transform(X_train)
X_test_dtm = vect.transform(X_test)
nb = LogisticRegression()
nb.fit(X_train_dtm, y_train)
y_pred_class = nb.predict(X_test_dtm)
print(nb.predict(vect.transform(test_case())))
print(metrics.accuracy_score(y_test, y_pred_class))
"""
# Neural Nets
def __nn_try(hidden_layer_size):
features = ['p_lang', 'title', 'p_num']
stack_data = pd.read_csv('./data_set.csv')
# define X, y
stemmer = SnowballStemmer('english')
words = stopwords.words("english")
X = stack_data.title.apply(
lambda x: " ".join([stemmer.stem(i) for i in re.sub("[^a-zA-Z]", " ", x).split() if i not in words]).lower())
#X = stack_data.title
y = stack_data.p_num
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=2)
vect = CountVectorizer(lowercase=True, stop_words='english')
vect.fit(X_train)
# transform training data
X_train_dtm = vect.fit_transform(X_train)
X_test_dtm = vect.transform(X_test)
clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes = hidden_layer_size, random_state = 1)
clf.fit(X_train_dtm, y_train)
y_pred_class = clf.predict(X_test_dtm)
# print(nb.predict(vect.transform(testcase())))
print(hidden_layer_size, metrics.accuracy_score(y_test, y_pred_class))
def _c_try_mod(testdata=None):
print('\n\n Modded SVM code')
stemmer = SnowballStemmer('english')
tokenizer = RegexpTokenizer(r'\w+')
stack_data = pd.read_csv('./data_set.csv')
# Define X and y
X = stack_data.title.apply(lambda x: ' '.join(
[stemmer.stem(i.lower()) for i in tokenizer.tokenize(x) if i.lower() not in stopwords.words("english")]))
y = stack_data.p_lang
# Test and Train Split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=2)
vect = CountVectorizer(lowercase=True)
vect.fit(X_train)
X_train_dtm = vect.fit_transform(X_train)
X_test_dtm = vect.transform(X_test)
lsv = LinearSVC()
lsv.fit(X_train_dtm, y_train)
y_pred_class = lsv.predict(X_test_dtm)
print(metrics.accuracy_score(y_test, y_pred_class))
if testdata is not None:
test_case = testdata.title.apply(lambda x: ' '.join(
[stemmer.stem(i.lower()) for i in tokenizer.tokenize(x) if i.lower() not in stopwords.words("english")]))
#print(lsv.predict(vect.transform(testdata.title)))
#print(testdata.title.values)
for i, j in zip(testdata.title.values, lsv.predict(vect.transform(test_case))):
print(i, ' | predicted as : ', j)
def testcase(stringList):
return pd.DataFrame({'title': stringList}, index=[i for i in range(len(stringList))])
# if __name__ == "__main__":
# df = pd.read_csv("data_set.csv")
# print('Sample analysis -')
# # multinomial(df)
# __nn_try((20)) # Pass number and size of hidden layers as tuple inputs
# __nn_try((40))
# __nn_try((20, 10))
# __nn_try((10, 5))
# __nn_try((20, 10, 5))
# # linear_svc(df)
# # decision_tree(df)
# print('Classifiers -')
#
# print(logistic_regression(df))
# csd = ClassifyStackData('./data_set.csv', 'title', 'p_num', [''])
# print(csd.multinomial_nb())
# print(csd.logistic_regression())
# print(csd.linear_svc())
print(_c_try_mod())
| true |
52e52e06a68e91824ad7929eded8f09868e2d2d7 | Python | alan010/MyBrain | /cell_maker.py | UTF-8 | 1,337 | 2.875 | 3 | [] | no_license | #! /usr/bin/python
import sys, os, random
TEMP='/root/MyBrain/cell_temp.py'
BASIC_DIR='/MyBrain'
def open_temp(cell_temp):
return open(cell_temp).read().splitlines()
def gen_axon():
while True:
path = '/'.join([BASIC_DIR, str(random.randint(0,255)), str(random.randint(0,255)), str(random.randint(0,255))])
cell_name = 'cell_' + str(random.randint(0,65535))
axon = path + '/' + cell_name
if not os.path.isfile(axon):
break
return axon
def set_cell(opened_temp, axon, dendrons, data):
index_axon = opened_temp.index('axon')
print index_axon
index_dendrons = opened_temp.index('dendrons')
print index_dendrons
index_data = opened_temp.index('data')
print index_data
opened_temp[index_axon] = "axon = '" + axon + "'"
opened_temp[index_dendrons] = 'dendrons = ' + dendrons
opened_temp[index_data] = 'data = ' + data
def make_cell(axon, temp):
os.system('mkdir -p ' + os.path.dirname(axon))
tmp_axon=open(axon,'w')
for line in temp:
tmp_axon.write(line + '\n')
tmp_axon.close()
#----------- main --------------
opened_cell_temp = open_temp(TEMP)
new_cell_axon = gen_axon()
print new_cell_axon
set_cell(opened_cell_temp, new_cell_axon, sys.argv[1], sys.argv[2])
make_cell(new_cell_axon, opened_cell_temp)
| true |
b6a2ee13fa7304ba1f0afeeed1dcf7efb215a43c | Python | jell0213/2048game | /2048game.py | UTF-8 | 14,501 | 2.71875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 26 23:08:07 2021
@author: li
"""
from Tkinter import *
import time
print '程式執行中...'
window = Tk()
window.title('2048game')
window.geometry('330x330')
import random
aicontrol = 1
gameover = 0
i=0
l=[]
rec = []
recnum = 0
while i < 16 : #做標籤
l.append(1)
rec.append('1')
l[i] = Label(window,
text='0',
font=('Arial', 12),
bd = 2,
relief=RAISED,
width=4, height=2
)
i=i+1
i=0
j=0
k=0
while i < 4 :
j=0
while j < 4 :
l[k].grid(row=i,column=j)
k=k+1
j=j+1
i=i+1
result = Label(window,
text='',
font=('Arial', 12),
bd = 2,
relief=GROOVE,
width=10, height=1
)
result.grid(row=0,column=4)
def chcolor(): #換顏色
i=0
while i < 16 :
if l[i]['text'] == '0':
l[i]['bg']='white'#white
elif l[i]['text'] == '2':
l[i]['bg']='lavender'#lavender
elif l[i]['text'] == '4':
l[i]['bg']='lightblue'#lightblue
elif l[i]['text'] == '8':
l[i]['bg']='khaki'#khaki
elif l[i]['text'] == '16':
l[i]['bg']='lightsalmon'
elif l[i]['text'] == '32':
l[i]['bg']='gold'
elif l[i]['text'] == '64':
l[i]['bg']='deeppink'
elif l[i]['text'] == '128':
l[i]['bg']='firebrick'
elif l[i]['text'] == '256':
l[i]['bg']='fuchsia'
elif l[i]['text'] == '512':
l[i]['bg']='coral'
elif l[i]['text'] == '1024':
l[i]['bg']='burlywood'
elif l[i]['text'] == '2048':
l[i]['bg']='red'
else:
l[i]['bg']='plum'
i=i+1
def toempty():
global l
i=0
while i < 16 :
if l[i]['text'] == '0' :
l[i]['text'] = ''
i=i+1
def tozero():
global l
i=0
while i < 16 :
if l[i]['text'] == '' :
l[i]['text'] = '0'
i=i+1
def record() : #記錄上一步
global recnum
recnum = 1
bb['text']='Backspace'
i=0
while i < 16 :
rec[i] = l[i]['text']
i=i+1
def together(arr): #合併數字
length = len(arr)
le = 0
ri = 0
A = []
B = []
for a in arr:
if a != 0:
A.append(a)
lena = len(A)
while ( lena < length ):
A.append(0)
lena += 1
while ( len(A) > 1):
le = A[0]
ri = A[1]
if le == ri :
B.append(le*2)
A.pop(0)
A.pop(0)
else:
B.append(le)
A.pop(0)
while ( len(A) > 0):
B.append(A.pop(0))
while ( len(B) < length):
B.append(0)
return B
def end(): #判斷結束
k=0
t1 =[int(l[0]['text']),int(l[1]['text']),int(l[2]['text']),int(l[3]['text'])]
t2 =[int(l[4]['text']),int(l[5]['text']),int(l[6]['text']),int(l[7]['text'])]
t3 =[int(l[8]['text']),int(l[9]['text']),int(l[10]['text']),int(l[11]['text'])]
t4 =[int(l[12]['text']),int(l[13]['text']),int(l[14]['text']),int(l[15]['text'])]
r1 = together(t1)
r2 = together(t2)
r3 = together(t3)
r4 = together(t4)
if t1 == r1 and t2 == r2 and t3==r3 and t4 == r4 :
k=k+1
t1 =[int(l[3]['text']),int(l[2]['text']),int(l[1]['text']),int(l[0]['text'])]
t2 =[int(l[7]['text']),int(l[6]['text']),int(l[5]['text']),int(l[4]['text'])]
t3 =[int(l[11]['text']),int(l[10]['text']),int(l[9]['text']),int(l[8]['text'])]
t4 =[int(l[15]['text']),int(l[14]['text']),int(l[13]['text']),int(l[12]['text'])]
r1 = together(t1)
r2 = together(t2)
r3 = together(t3)
r4 = together(t4)
if t1 == r1 and t2 == r2 and t3==r3 and t4 == r4 :
k=k+1
t1 =[int(l[0]['text']),int(l[4]['text']),int(l[8]['text']),int(l[12]['text'])]
t2 =[int(l[1]['text']),int(l[5]['text']),int(l[9]['text']),int(l[13]['text'])]
t3 =[int(l[2]['text']),int(l[6]['text']),int(l[10]['text']),int(l[14]['text'])]
t4 =[int(l[3]['text']),int(l[7]['text']),int(l[11]['text']),int(l[15]['text'])]
r1 = together(t1)
r2 = together(t2)
r3 = together(t3)
r4 = together(t4)
if t1 == r1 and t2 == r2 and t3==r3 and t4 == r4 :
k=k+1
t1 =[int(l[12]['text']),int(l[8]['text']),int(l[4]['text']),int(l[0]['text'])]
t2 =[int(l[13]['text']),int(l[9]['text']),int(l[5]['text']),int(l[1]['text'])]
t3 =[int(l[14]['text']),int(l[10]['text']),int(l[6]['text']),int(l[2]['text'])]
t4 =[int(l[15]['text']),int(l[11]['text']),int(l[7]['text']),int(l[3]['text'])]
r1 = together(t1)
r2 = together(t2)
r3 = together(t3)
r4 = together(t4)
if t1 == r1 and t2 == r2 and t3==r3 and t4 == r4 :
k=k+1
k=k+1
global gameover
if k == 5 and gameover == 0:
gameover = 1
result['text'] = '遊戲結束'
bb['text'] = ''
global recnum
recnum = 0
i = 0
max = 0
score = 0
while i < 16 :
if int(l[i]['text']) > max:
max = int(l[i]['text'])
i=i+1
if max <128 :
score = 0
elif max == 128 :
score = 5
elif max == 256 :
score = 10
elif max == 512 :
score = 20
elif max == 1024 :
score = 30
else :
score = 40
resultwindow = Tk()
resultwindow.title('Game Over')
resultwindow.geometry('300x100')
resultlabel = Label(resultwindow,
text='分數 : '+str(score),
font=('Arial', 12),
bd = 2,
relief=RAISED,
width=12, height=2
)
resultlabel.pack()
resultwindow.mainloop()
def left() : #按鈕功能
global aicontrol
aicontrol = 0
ai['text']=''
tozero()
move = 0
t1 =[int(l[0]['text']),int(l[1]['text']),int(l[2]['text']),int(l[3]['text'])]
t2 =[int(l[4]['text']),int(l[5]['text']),int(l[6]['text']),int(l[7]['text'])]
t3 =[int(l[8]['text']),int(l[9]['text']),int(l[10]['text']),int(l[11]['text'])]
t4 =[int(l[12]['text']),int(l[13]['text']),int(l[14]['text']),int(l[15]['text'])]
r1 = together(t1)
r2 = together(t2)
r3 = together(t3)
r4 = together(t4)
if t1 != r1 or t2 != r2 or t3!=r3 or t4 != r4 :
move = 1
record()
l[0]['text']=str(r1[0])
l[1]['text']=str(r1[1])
l[2]['text']=str(r1[2])
l[3]['text']=str(r1[3])
l[4]['text']=str(r2[0])
l[5]['text']=str(r2[1])
l[6]['text']=str(r2[2])
l[7]['text']=str(r2[3])
l[8]['text']=str(r3[0])
l[9]['text']=str(r3[1])
l[10]['text']=str(r3[2])
l[11]['text']=str(r3[3])
l[12]['text']=str(r4[0])
l[13]['text']=str(r4[1])
l[14]['text']=str(r4[2])
l[15]['text']=str(r4[3])
i=0
j=0
while i<16 :
if l[i]['text'] =='0':
j=1
i=i+1
while j==1 and move == 1:
ran = random.randint(0,15)
if l[ran]['text'] == '0' :
ran2 = random.randint(1,2)
l[ran]['text'] = str(ran2*2)
j=0
chcolor()
end()
toempty()
def right() :
global aicontrol
aicontrol = 0
ai['text']=''
tozero()
move = 0
t1 =[int(l[3]['text']),int(l[2]['text']),int(l[1]['text']),int(l[0]['text'])]
t2 =[int(l[7]['text']),int(l[6]['text']),int(l[5]['text']),int(l[4]['text'])]
t3 =[int(l[11]['text']),int(l[10]['text']),int(l[9]['text']),int(l[8]['text'])]
t4 =[int(l[15]['text']),int(l[14]['text']),int(l[13]['text']),int(l[12]['text'])]
r1 = together(t1)
r2 = together(t2)
r3 = together(t3)
r4 = together(t4)
if t1 != r1 or t2 != r2 or t3!=r3 or t4 != r4 :
move = 1
record()
l[0]['text']=str(r1[3])
l[1]['text']=str(r1[2])
l[2]['text']=str(r1[1])
l[3]['text']=str(r1[0])
l[4]['text']=str(r2[3])
l[5]['text']=str(r2[2])
l[6]['text']=str(r2[1])
l[7]['text']=str(r2[0])
l[8]['text']=str(r3[3])
l[9]['text']=str(r3[2])
l[10]['text']=str(r3[1])
l[11]['text']=str(r3[0])
l[12]['text']=str(r4[3])
l[13]['text']=str(r4[2])
l[14]['text']=str(r4[1])
l[15]['text']=str(r4[0])
i=0
j=0
while i<16 :
if l[i]['text'] =='0':
j=1
i=i+1
while j==1 and move == 1:
ran = random.randint(0,15)
if l[ran]['text'] == '0' :
ran2 = random.randint(1,2)
l[ran]['text'] = str(ran2*2)
j=0
chcolor()
end()
toempty()
def up() :
global aicontrol
aicontrol = 0
ai['text']=''
tozero()
move = 0
t1 =[int(l[0]['text']),int(l[4]['text']),int(l[8]['text']),int(l[12]['text'])]
t2 =[int(l[1]['text']),int(l[5]['text']),int(l[9]['text']),int(l[13]['text'])]
t3 =[int(l[2]['text']),int(l[6]['text']),int(l[10]['text']),int(l[14]['text'])]
t4 =[int(l[3]['text']),int(l[7]['text']),int(l[11]['text']),int(l[15]['text'])]
r1 = together(t1)
r2 = together(t2)
r3 = together(t3)
r4 = together(t4)
if t1 != r1 or t2 != r2 or t3!=r3 or t4 != r4 :
move = 1
record()
l[0]['text']=str(r1[0])
l[1]['text']=str(r2[0])
l[2]['text']=str(r3[0])
l[3]['text']=str(r4[0])
l[4]['text']=str(r1[1])
l[5]['text']=str(r2[1])
l[6]['text']=str(r3[1])
l[7]['text']=str(r4[1])
l[8]['text']=str(r1[2])
l[9]['text']=str(r2[2])
l[10]['text']=str(r3[2])
l[11]['text']=str(r4[2])
l[12]['text']=str(r1[3])
l[13]['text']=str(r2[3])
l[14]['text']=str(r3[3])
l[15]['text']=str(r4[3])
i=0
j=0
while i<16 :
if l[i]['text'] =='0':
j=1
i=i+1
while j==1 and move == 1:
ran = random.randint(0,15)
if l[ran]['text'] == '0' :
ran2 = random.randint(1,2)
l[ran]['text'] = str(ran2*2)
j=0
chcolor()
end()
toempty()
def down() :
global aicontrol
aicontrol = 0
ai['text']=''
tozero()
move = 0
t1 =[int(l[12]['text']),int(l[8]['text']),int(l[4]['text']),int(l[0]['text'])]
t2 =[int(l[13]['text']),int(l[9]['text']),int(l[5]['text']),int(l[1]['text'])]
t3 =[int(l[14]['text']),int(l[10]['text']),int(l[6]['text']),int(l[2]['text'])]
t4 =[int(l[15]['text']),int(l[11]['text']),int(l[7]['text']),int(l[3]['text'])]
r1 = together(t1)
r2 = together(t2)
r3 = together(t3)
r4 = together(t4)
if t1 != r1 or t2 != r2 or t3!=r3 or t4 != r4 :
move = 1
record()
l[0]['text']=str(r1[3])
l[1]['text']=str(r2[3])
l[2]['text']=str(r3[3])
l[3]['text']=str(r4[3])
l[4]['text']=str(r1[2])
l[5]['text']=str(r2[2])
l[6]['text']=str(r3[2])
l[7]['text']=str(r4[2])
l[8]['text']=str(r1[1])
l[9]['text']=str(r2[1])
l[10]['text']=str(r3[1])
l[11]['text']=str(r4[1])
l[12]['text']=str(r1[0])
l[13]['text']=str(r2[0])
l[14]['text']=str(r3[0])
l[15]['text']=str(r4[0])
i=0
j=0
while i<16 :
if l[i]['text'] =='0':
j=1
i=i+1
while j==1 and move == 1:
ran = random.randint(0,15)
if l[ran]['text'] == '0' :
ran2 = random.randint(1,2)
l[ran]['text'] = str(ran2*2)
j=0
chcolor()
end()
toempty()
def back() :
tozero()
global recnum
if recnum == 1 :
bb['text']=''
recnum = 0
l[0]['text']=rec[0]
l[1]['text']=rec[1]
l[2]['text']=rec[2]
l[3]['text']=rec[3]
l[4]['text']=rec[4]
l[5]['text']=rec[5]
l[6]['text']=rec[6]
l[7]['text']=rec[7]
l[8]['text']=rec[8]
l[9]['text']=rec[9]
l[10]['text']=rec[10]
l[11]['text']=rec[11]
l[12]['text']=rec[12]
l[13]['text']=rec[13]
l[14]['text']=rec[14]
l[15]['text']=rec[15]
chcolor()
toempty()
def ai2048(): #AI操控
global aicontrol
if aicontrol == 1 :
global gameover
randomai = 0
while gameover == 0 :
if randomai == 0 :
left()
randomai = 1
elif randomai == 1 :
up()
randomai = 2
elif randomai == 2 :
right()
randomai = 3
else :
down()
randomai = 0
lb=Button(window, #按鈕
text='左',
font=('Arial', 12),
bd = 2,
command=left,
relief=RAISED,
width=3, height=1
)
lb.grid(row=5,column=0)
rb=Button(window,
text='右',
font=('Arial', 12),
command=right,
bd = 2,
relief=RAISED,
width=3, height=1
)
rb.grid(row=5,column=2)
ub=Button(window,
text='上',
font=('Arial', 12),
command=up,
bd = 2,
relief=RAISED,
width=3, height=1
)
ub.grid(row=4,column=1)
db=Button(window,
text='下',
font=('Arial', 12),
command=down,
bd = 2,
relief=RAISED,
width=3, height=1
)
db.grid(row=6,column=1)
bb=Button(window,
text='',
font=('Arial', 12),
command=back,
bd = 2,
relief=RAISED,
width=12, height=1
)
bb.grid(row=4,column=4)
ai=Button(window,
text='AI控制',
font=('Arial', 12),
command=ai2048,
bd = 2,
relief=RAISED,
width=12, height=1
)
ai.grid(row=5,column=4)
ran = random.randint(0,15) #開始
if l[ran]['text'] == '0' :
ran2 = random.randint(1,2)
l[ran]['text'] = str(ran2*2)
chcolor()
toempty()
window.resizable(0,0)
window.mainloop() | true |
bddb3f457b9b65773f04fe05366bd247ad0ee003 | Python | jiangshanmeta/lintcode | /src/0103/solution.py | UTF-8 | 1,051 | 3.65625 | 4 | [
"MIT"
] | permissive | """
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param head: The first node of linked list.
@return: The node where the cycle begins. if there is no cycle, return null
"""
def detectCycle(self, head):
if head is None :
return None
slow = head
fast = slow.next
while fast and fast.next and slow != fast :
slow = slow.next
fast = fast.next.next
if slow != fast :
return None
# 数出环中有几个节点
fast = fast.next
count = 1
while slow != fast :
fast = fast.next
count += 1
# 都从头开始 快指针先走环中节点个数
slow = head
fast = head
while count :
fast = fast.next
count -= 1
while slow != fast :
slow = slow.next
fast = fast.next
return slow | true |
03d742ce4c7e45ecbbdc3e434ff42c96d3465d01 | Python | Kawser-nerd/CLCDSA | /Source Codes/AtCoder/abc035/B/4831046.py | UTF-8 | 293 | 3.25 | 3 | [] | no_license | hoge=input()
t=input()
kyori_1=abs(hoge.count("U")-hoge.count("D"))
kyori_2=abs(hoge.count("R")-hoge.count("L"))
hatena=hoge.count("?")
if(t=="1"):
print(kyori_1+kyori_2+hatena)
elif(t=="2" and hatena>kyori_1+kyori_2):
print(len(hoge)%2)
else:
print(kyori_1+kyori_2-hatena) | true |
9a6a3f7e5fb799433b40b8d5c4f62109d491d552 | Python | tonysosos/leetcode | /leetcode-py/two-sum.py | UTF-8 | 272 | 2.9375 | 3 | [] | no_license | class Solution:
# @return a tuple, (index1, index2)
def twoSum(self, num, target):
map = {}
for x in range(len(num)):
if num[x] in map:
return map[num[x]]+1, x+1
else:
map[target - num[x]] = x | true |
04f160c29901f8ed8f6df7edb1f6bf5032c171d4 | Python | NgocVTran/daily-coding | /200. Number of Island/main.py | UTF-8 | 2,481 | 4.28125 | 4 | [] | no_license | # Number of Island
from test_data import input_matrices
class Island():
def __init__(self, input_matrix):
self.input_matrix = input_matrix
self.row = len(input_matrix) # number of matrix row
self.col = len(input_matrix[0]) # number of matrix column
self.nr_of_island = 0
def counting_island(self):
"""
This main algorithm function will take input with format:
input_matrix = List[ List[int] ]
and return number of island.
:return: nr_of_island
"""
for i in range(self.row):
for j in range(self.col):
if self.input_matrix[i][j] == 1:
self.nr_of_island += 1
self.explore(i, j)
return self.nr_of_island
def explore(self, i, j):
"""
This function will check area around position (i,j)
If this area is a land, it will be marked as visited
:param i: row i of the input matrix
:param j: column j of the input matrix
"""
# initial list of surrounding land
expand_area = []
expand_area.append([i, j])
while expand_area:
x, y = expand_area.pop()
self.input_matrix[x][y] = 0 # mark current position as visited
# add land around current position to this list until only water around
self.check_neighbour(expand_area, x, y)
def check_neighbour(self, expand_area, x, y):
"""
This function will check lef, right, above and under current position
if they're a land or water
:param expand_area: list of land
:param x: current position in x-axis
:param y: current position in y-axis
"""
# check left and right position
if (x + 1 < self.row) and (self.input_matrix[x + 1][y] == 1):
expand_area.append([x + 1, y])
if (x - 1 >= 0) and (self.input_matrix[x - 1][y] == 1):
expand_area.append([x - 1, y])
# check above and under position
if (y + 1 < self.col) and (self.input_matrix[x][y + 1] == 1):
expand_area.append([x, y + 1])
if (y - 1 >= 0) and (self.input_matrix[x][y - 1] == 1):
expand_area.append([x, y - 1])
if __name__ == "__main__":
for i in range(len(input_matrices)):
print("Total number of Island(s) in matrix {}: {}"
.format(i+1, Island(input_matrices[i]).counting_island()))
| true |
22e611dbe8d30be2a99b99ceca818c1d3f013db6 | Python | iam3mer/mTICP172022 | /Ciclo I/Unidad 1 y 2/primos2.py | UTF-8 | 273 | 3.734375 | 4 | [] | no_license | def esPrimo(num: int, n: int):
if n >= num:
return print('Es primo.')
elif num % n != 0:
return esPrimo(num, n+1) # Recursividad
else:
return print(f"{num} No es primo. {n} es divisor")
esPrimo(555555412154746465465456874946, 2) | true |
197e57fffaa1c3c23f85daac6406e346ee5094f9 | Python | n5g/Py | /letskodeit/126windowSize.py | UTF-8 | 521 | 2.96875 | 3 | [] | no_license | from selenium import webdriver
import time
class Screenshots():
def test(self):
driver = webdriver.Chrome()
driver.maximize_window()
#driver.get("https://learn.letskodeit.com/p/practice")
driver.implicitly_wait(3)
height = driver.execute_script("return window.innerHeight;")
width = driver.execute_script("return window.innerWidth;")
print("Height: " + str(height))
print("Width: " + str(width))
driver.quit()
run = Screenshots()
run.test()
| true |
a2d3bfbbfc460a5d89911265d7545bd4682b83b0 | Python | rresender/python-samples | /emailformart.py | UTF-8 | 174 | 3.109375 | 3 | [
"MIT"
] | permissive | import re
n = int(input())
regex = '<[a-z][a-z0-9_.-]+@[a-z]+\.[a-z]{1,3}>'
for x in range(n):
in_put = input()
if re.search(regex, in_put):
print(in_put)
| true |
2b0293a0bd0452e9e94a7c6aea0d13a803cc9dbd | Python | Demesaikiran/MyCaptainAI | /Fibonacci.py | UTF-8 | 480 | 4.03125 | 4 | [] | no_license | def fibonacci(r, a, b):
if r == 0:
return
else:
print("{0} {1}".format(a, b), end = ' ')
r -= 1
fibonacci(r, a+b, a+ 2*b)
return
if __name__ == "__main__":
num = int(input("Enter the number of fibonacci series you want: "))
if num == 0 or num < 0:
print("Incorrect choice")
elif num == 1:
print("0")
else:
fibonacci(num//2, 0, 1)
| true |
8a31ccc0f2d704fc3a93320c196073df8027dd64 | Python | tjian123/OnosSystemTest | /TestON/tests/FUNC/FUNCgroup/dependencies/group-bucket.py | UTF-8 | 1,090 | 2.65625 | 3 | [] | no_license | def addBucket( main , egressPort = "" ):
"""
Description:
Create a single bucket which can be added to a Group.
Optional:
* egressPort: port of egress device
Returns:
* Returns a Bucket
* Returns None in case of error
Note:
The ip and port option are for the requests input's ip and port
of the ONOS node.
"""
try:
bucket = {
"treatment":{ "instructions":[] }
}
if egressPort:
bucket[ 'treatment' ][ 'instructions' ].append( {
"type":"OUTPUT",
"port":egressPort } )
return bucket
except ( AttributeError, TypeError ):
main.log.exception( self.name + ": Object not as expected" )
return None
except Exception:
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanup()
main.exit()
| true |
84c62fc83085eb221f64c45ad111d04bf6e78e05 | Python | Patel-Jenu-1991/SQLite3_basics | /hw_cars.py | UTF-8 | 587 | 3.34375 | 3 | [] | no_license | #!/usr/bin/env python3
# Create a new database called cars
# that has a table inventory
# I'm gonna use a functional approach this time
import sqlite3
conn = sqlite3.connect("cars.db")
cursor = conn.cursor()
def main(): create_inventory()
def create_inventory():
''' This function creates a table in
the cars database called inventory '''
query = """CREATE TABLE IF NOT EXISTS inventory
(Make TEXT, Model TEXT, Quantity INT)"""
cursor.execute(query)
if __name__ == "__main__": main()
# close the cursor and connection
cursor.close()
conn.close()
| true |
cc891a1e43208c40267c169a68b213f9ee17b857 | Python | Melwyna/Algoritmos-Taller | /77.py | UTF-8 | 352 | 3.171875 | 3 | [] | no_license | usua="g0812"
cotr="081215"
for x in range(0,3):
usuario=str(input("Ingrese su usuario:"))
contraseña=str(input("Ingrese su contraseña:"))
if usuario==usua and contraseña==cotr:
print("SU USUARIO Y CONTRASEÑA SON CORRECTOS")
else:
print("SU USUARIO Y CONTRASEÑA SON INCORRECTOS")
print("YA LLEVA 3 INTENTOS, VUELVA A INTENTAR EN 1 MINUTO")
| true |
00b5b1fea6118b56eca14237f21325a24cd1101a | Python | Athenian-ComputerScience-Fall2020/functions-practice-yesak1 | /return_practice.py | UTF-8 | 418 | 4.125 | 4 | [
"Apache-2.0"
] | permissive | # Add comments to explain what the output from this program will be and how you know.
def math1():
num1 = 50
num2 = 5
return num1 + num2
def math2():
num1 = 50
num2 = 5
return num1 - num2
def math3():
num1 = 50
num2 = 5
return num1 * num2
output_num = math2()
print(output_num)
'''
Add prediction(s) here:
-the output will be 45 because the function math2 is called which is 50-5.
'''
| true |
4b93f9e804ffca8fa905acf5342dbdd4b75802bc | Python | wistbean/learn_python3_spider | /stackoverflow/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/progress/helpers.py | UTF-8 | 2,931 | 2.53125 | 3 | [
"MIT"
] | permissive | # Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import print_function
HIDE_CURSOR = '\x1b[?25l'
SHOW_CURSOR = '\x1b[?25h'
class WriteMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WriteMixin, self).__init__(**kwargs)
self._width = 0
if message:
self.message = message
if self.file and self.file.isatty():
if self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
print(self.message, end='', file=self.file)
self.file.flush()
def write(self, s):
if self.file and self.file.isatty():
b = '\b' * self._width
c = s.ljust(self._width)
print(b + c, end='', file=self.file)
self._width = max(self._width, len(s))
self.file.flush()
def finish(self):
if self.file and self.file.isatty() and self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
class WritelnMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WritelnMixin, self).__init__(**kwargs)
if message:
self.message = message
if self.file and self.file.isatty() and self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
def clearln(self):
if self.file and self.file.isatty():
print('\r\x1b[K', end='', file=self.file)
def writeln(self, line):
if self.file and self.file.isatty():
self.clearln()
print(line, end='', file=self.file)
self.file.flush()
def finish(self):
if self.file and self.file.isatty():
print(file=self.file)
if self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
from signal import signal, SIGINT
from sys import exit
class SigIntMixin(object):
"""Registers a signal handler that calls finish on SIGINT"""
def __init__(self, *args, **kwargs):
super(SigIntMixin, self).__init__(*args, **kwargs)
signal(SIGINT, self._sigint_handler)
def _sigint_handler(self, signum, frame):
self.finish()
exit(0)
| true |
c27e3b46b22a7db182a3062ad934a052c9743de6 | Python | taboomz/TaxxLenguaje | /Taxx.py | UTF-8 | 521 | 2.546875 | 3 | [] | no_license | import taxxLexico
import codecs
import ply.lex as lex
class Taxx(object):
"""docstring for Taxx"""
def __init__(self):
super(Taxx, self).__init__()
def compilar(self,archivo):
fp=codecs.open(archivo,'r')
texto=fp.read()
analizador=lex.lex()
i=0
analizador.input(texto)
print('['+'/'*i + ']',i,'%', end='\r')
while True:
tok = analizador.token()
if not tok:
break
else:
i=i+1
print('\n',tok)
print('['+'/'*i + ']',i,'%', end='\r')
fp.close()
| true |
4bfaa30cf9509526bf58b0240fa26d86bbf399ec | Python | lucaseduardo101/MetodosII | /Integrais Duplas/leitura.py | UTF-8 | 927 | 3.5 | 4 | [] | no_license | # -*- coding: utf-8 -*-
def ler(arq):
a = open(arq,"r") #Abre um arquivo chamado dados.txt
m = a.readline().split() #Le a primeira linha do arquivo, salva o valor dela na variavel m e a ponta para a segunda linha do arquivo
for i in range (0,len(m)):
m[i] = int(m[i])
l = []#Declara uma lista vazia que ira guardar os valores das linhas seguintes
l.append(m)
for i in range(0, (m[0]+1) * (m[1]+1) ):
i = a.readline().split() #Recebe o valor da linha seguinte em formato de String e aponta para a proxima linha se houver
x = 0 #Variavel auxiliar que ira ajudar na etapa de transformar as strings em variaves float
while (x < len(i)): #Verifica se a proxima string da lista pode ser convertida para numeral
i[x] = float(i[x]) #converte a String para float
x = x + 1 #incrementa x
l.append(i) # a lista l so recebe os valores que foram convertidos
a.close()
return l
| true |
d55eadb490c217a412ee41bd7c0b6711552553a2 | Python | giuggy/Thesis | /Project/controllers/venv/lib/python3.6/site-packages/pypacker/statemachine.py | UTF-8 | 3,823 | 2.953125 | 3 | [] | no_license | """
Logic to build state machines. Borrowed from Scapy's Automata concept.
"""
import threading
import collections
import logging
logger = logging.getLogger("pypacker")
STATE_TYPE_BEGIN = 0
STATE_TYPE_INTERM = 1 # default
STATE_TYPE_END = 2
class TimedCallback(threading.Thread):
def __init__(self):
self._obj = None
self._is_running = True
self._cb = None
# assume this will never trigger
self._timeout = 9999999
self._event = threading.Event()
super().__init__()
self.start()
def run(self):
# logger.debug("starting cb iterator")
while self._is_running:
# logger.debug("cb: next round")
self._event.clear()
self._event.wait(timeout=self._timeout)
# wait was interrupted
if self._event.is_set():
continue
if self._cb is not None:
# logger.debug("executing timeout cb")
self._cb(self._obj)
# logger.debug("cb iterator finished")
def retrigger(self, obj, timeout, cb):
self._obj = obj
self._timeout = timeout
self._cb = cb
self._event.set()
def set_inactive(self):
self._cb = None
self._timeout = 9999999
self._event.set()
def stop(self):
self._is_running = False
self._event.set()
_cb_threads = collections.defaultdict(TimedCallback)
def sm_state(state_type=STATE_TYPE_INTERM, timeout=None, timeout_cb=None):
def gen(old_f):
if timeout is not None and timeout_cb is None:
logger.warning(
"timeout set to %d but no timeout action for %r",
timeout,
old_f.__name__)
# replace with new method to store state infos
def new_f(self, *args, **kwds):
# end of function (state) -> clear old one
# logger.debug("getting cb class via %r", self.__class__)
cb_thread = _cb_threads[self.__class__]
cb_thread.set_inactive()
ret = old_f(self, *args, **kwds)
# start timeout after method reaches end
if timeout is not None:
# logger.debug("restarting timeout: %ds", timeout)
cb_thread.retrigger(self, timeout, timeout_cb)
return ret
if state_type == STATE_TYPE_BEGIN:
#logger.debug("setting inital state cb: %r", old_f)
new_f._state_method_begin = True
return new_f
return gen
class AutomateMeta(type):
def __new__(cls, clsname, clsbases, clsdict):
t = type.__new__(cls, clsname, clsbases, clsdict)
for key, val in clsdict.items():
state_method = getattr(val, "_state_method_begin", None)
if state_method is not None:
#logger.debug("initial method found: %r %r" % (key, val))
t._state = key
break
return t
class StateMachine(object, metaclass=AutomateMeta):
"""
This state machine allows to react on network stimulus (incoming packets)
and imitate/build protocols.
State_1 -> event: decide next state -> State_2 ...
"""
def __init__(self, receive_cb):
self._states = set()
self._actions = set()
self._receive_cb = receive_cb
self._is_running = True
self._old_f = None
self._state = getattr(self, self._state, None)
if self._state is None:
logger.exception("no initial state defined!")
else:
logger.debug("initial state: %r", self._state)
self._receive_thread = threading.Thread(
target=StateMachine.receive_cycler,
args=[self]
)
self._receive_thread.start()
@staticmethod
def receive_cycler(obj):
while obj._is_running:
pkt = obj._receive_cb()
try:
obj._state(pkt)
except:
logger.warning(
"could not execute callback: %r",
obj._state
)
#ex.printstacktrace()
# logger.debug("receive cycler finished")
def stop(self):
self._is_running = False
# _receive_cb() (eg sockets) needs to be stopped first or this will likely hang
self._receive_thread.join()
try:
_cb_thread = _cb_threads.get(self.__class__, None)
_cb_thread.stop()
except AttributeError:
pass
# logger.debug("no cb thread found")
except Exception as ex:
ex.printstacktrace()
| true |
bf42205bccf7fce2d9b99351860f3610fe8d02c8 | Python | juliusdeane/beginningfrida | /simple/3/create_struct_in_memory64.py | UTF-8 | 1,224 | 2.53125 | 3 | [
"MIT"
] | permissive | import frida
session = frida.attach("simple3")
# The invented struct we want to build, but in a 64bit architecture:
#
# Now we are on 64bits so:
# short - 2 bytes
# long - 8 bytes
#
# typedef struct my_INVENTED_STRUCT {
# USHORT counter;
# ULONG starCount;
# ULONG blackholeCount;
# } INVENTED_STRUCT, *INVENTED_HEADER;
script = session.create_script("""
const INVENTED_STRUCT_SIZE = 18;
var myStruct = Memory.alloc(INVENTED_STRUCT_SIZE);
console.log('[myStruct] BASE address: ' + myStruct);
myStruct.writeU16(0x0000);
var mystruct_plus_2 = myStruct.add(0x02);
console.log('[myStruct] BASE address: ' + mystruct_plus_2 + ' +2 bytes');
mystruct_plus_2.writeU64(0x00000000FE00FE00);
var mystruct_plus_2_plus_8 = mystruct_plus_2.add(0x08);
console.log('[myStruct] BASE address: ' + mystruct_plus_2_plus_8 + ' +10 bytes');
mystruct_plus_2_plus_8.writeU64(0x00000000000000fe);
// Now read:
var buffer_read = Memory.readByteArray(myStruct, INVENTED_STRUCT_SIZE);
console.log(hexdump(buffer_read, {
offset: 0,
length: INVENTED_STRUCT_SIZE,
header: true,
ansi: false
}));
""")
script.load()
session.detach()
| true |
eb2850761b8420c8019072a981dd4d4b772a9a93 | Python | viktorpi/algorithms | /algorithms/max_slice/max_profit.py | UTF-8 | 341 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive | def solution(A):
# kadane's approach for max slice problem
a_normalized = [0] * len(A)
for i in range(1, len(A)):
a_normalized[i] = A[i] - A[i - 1]
max_ending = max_slice = 0
for a in a_normalized:
max_ending = max(a, max_ending + a)
max_slice = max(max_slice, max_ending)
return max_slice
| true |
3f7f6d19d6ae6d99f12e4dc5495dd709af893dce | Python | george-galli/python-usp | /imprimirfatorial.py | UTF-8 | 174 | 3.875 | 4 | [] | no_license | n = int(input("Digite um número natural: "))
n_fat = 1
i = 1
while i <= n:
n_fat *= i
i += 1
print(n_fat)
| true |
95e4a78cd606440cc8b34f0651ab8898306a05be | Python | munsangu/20190615python | /START_PYTHON/6日/13.バンボクムンfor/05.問題.py | UTF-8 | 170 | 3.65625 | 4 | [] | no_license | print("\n === 문제 1번 ===")
num = int(input("숫자 입력:"))
for i in range(num,0,-1):
print(i,end=" ")
# for i in range(1,num+1)[::-1]:
# print(i,end=" ")
| true |
f998ebd926f014ad8ddb3feaa80198390098503c | Python | GuillaumeLagrange/advent-of-code | /2018/2.py | UTF-8 | 839 | 3.375 | 3 | [] | no_license | #!/bin/python3
data = [x.strip() for x in open("input/2.txt", "r").readlines()]
def main():
two = 0
three = 0
for line in data:
letters = dict.fromkeys(line, 0)
for letter in line:
letters[letter] += 1
if 2 in letters.values():
two += 1
if 3 in letters.values():
three += 1
print("two is %d three is %d answer is %d" % (two, three, two * three))
for line in data:
for string in data:
diff = 0
for i in range(len(line)):
if line[i] != string[i]:
diff += 1
if diff == 1:
ans = [x for i, x in enumerate(line) if string[i] == x]
print("part two answer is %s" % "".join(ans))
return
if __name__ == '__main__':
main()
| true |
481610301f018ee6908c3200b6645530ac6edc4c | Python | onesMas46/BCS-2021 | /src/chapter8/exercise5.py | UTF-8 | 331 | 3.234375 | 3 | [
"MIT"
] | permissive | fname = "mbox_short.txt"
file = open(fname)
index = 0
count = 0
for line in file:
line = line.rstrip()
if not line.startswith('From'):
continue
count += 1
index = line.find('From') + 1
word = line.split()
print(word[index])
print("There were",count,"lines in the file with From as the first word") | true |
ca16e54fdca65f98ce674b6a1cda60d82f2e1cfa | Python | mipt-m06-803/Slava-Inderiakin | /test6/ex2.1.py | UTF-8 | 61 | 2.703125 | 3 | [] | no_license | for a, b in zip(A, B):
print(' '.join([str(a), str(b)]))
| true |
1551f3ff91e1424b0d1eba1c185d754e65e8d881 | Python | yamachu/codecheck-asahi-coef | /app/main.py | UTF-8 | 3,646 | 2.6875 | 3 | [] | no_license | #!/usr/bin/env python3
import json
import collections
import datetime
import asyncio
import aiohttp
from .AsahiNewsArchives.api import AsahiNewsAPI
import numpy
# for debug
# from pprint import pprint
def _strdate_to_datetime(strdate):
return datetime.date(*[int(part_ymd) for part_ymd in strdate.split('-')])
def _parse_keywords(base_keyword):
return [''.join(words.strip()[1:-1]).encode('utf-8', 'surrogateescape').decode('utf-8', 'surrogateescape')
for words in base_keyword[1:-1].split(',')]
def _generate_floor_day_week_list(str_start_day, str_end_day):
start_day = _strdate_to_datetime(str_start_day)
end_day = _strdate_to_datetime(str_end_day)
total_week = ((end_day - start_day).days + 1) // 7
week_list = []
for i in range(total_week):
_start = start_day + datetime.timedelta(days=7*i)
_end = _start + datetime.timedelta(days=6)
week_list.append({
"start": _start,
"end": _end
})
return week_list
async def _search_per_week(keyword, week_list):
api = AsahiNewsAPI("869388c0968ae503614699f99e09d960f9ad3e12")
async def _inner_search_per_week(keyword, week):
response = await api.search_async(
query='Body:{} AND ReleaseDate:[{} TO {}]'
.format(keyword,
week['start'].strftime('%Y-%m-%d'),
week['end'].strftime('%Y-%m-%d')),
rows=1
)
return int(response['response']['result']['numFound'])
tasks = [_inner_search_per_week(keyword, week) for week in week_list]
result = await asyncio.gather(*tasks)
return result
def _calc_coef_to_get_tri_mat(responses):
non_same_pos = 0
items = list(responses.items())
keyword_sums = numpy.zeros(len(responses), numpy.int)
coef_mat = numpy.empty(
(1 + len(responses)) * len(responses) // 2,
numpy.double
)
for i, item in enumerate(responses):
keyword_sums[i] = sum(responses[item])
for i in range(len(responses)):
for j in range(i):
if keyword_sums[i] != 0 and keyword_sums[j] != 0:
coef_mat[non_same_pos] = numpy.corrcoef(
items[i][1], items[j][1]
)[0, 1]
else:
coef_mat[non_same_pos] = numpy.NaN
non_same_pos += 1
return coef_mat
def main(argv):
from pprint import pprint
result = collections.OrderedDict()
keywords = _parse_keywords(argv[0])
week_list = _generate_floor_day_week_list(*argv[1:3])
loop = asyncio.get_event_loop()
for keyword in keywords:
result.update({keyword: loop.run_until_complete(_search_per_week(keyword, week_list))})
coef_mat = _calc_coef_to_get_tri_mat(result)
tmp_len = len(result)
coef_arr = [[None for _ in range(tmp_len)] for _ in range(tmp_len)]
idx = 0
for i in range(tmp_len):
coef_arr[i][i] = '1'
for j in range(i):
if not numpy.isnan(coef_mat[idx]):
coef_arr[i][j] = str(round(coef_mat[idx],3))
coef_arr[j][i] = coef_arr[i][j]
else:
coef_arr[i][j] = 'null'
coef_arr[j][i] = coef_arr[i][j]
idx += 1
output = '{"coefficients":['
for outter in coef_arr:
output += '['
for val in outter:
output += val
output += ','
output = output[:-1]
output += '],'
output = output[:-1]
output += ']'
output += ',"posChecker":'
# do check
output += 'true'
output += '}'
print(output)
| true |
95ec9e069ce937deaa3a50816b0a68dd0b3de59b | Python | InoveAlumnos/mongodb_python | /ejemplos_clase.py | UTF-8 | 4,938 | 3.25 | 3 | [] | no_license | #!/usr/bin/env python
'''
MongoDB [Python]
Ejemplos de clase
---------------------------
Autor: Inove Coding School
Version: 1.2
Descripcion:
Programa creado para mostrar ejemplos prácticos de los visto durante la clase
'''
__author__ = "Inove Coding School"
__email__ = "alumnos@inove.com.ar"
__version__ = "1.2"
import json
import tinymongo as tm
import tinydb
# Bug: https://github.com/schapman1974/tinymongo/issues/58
class TinyMongoClient(tm.TinyMongoClient):
@property
def _storage(self):
return tinydb.storages.JSONStorage
db_name = 'personas'
def clear():
conn = TinyMongoClient()
db = conn[db_name]
# Eliminar todos los documentos que existan en la coleccion persons
db.persons.remove({})
# Cerrar la conexión con la base de datos
conn.close()
def insert_persona(name, age, nationality=""):
conn = TinyMongoClient()
db = conn[db_name]
# Insertar un documento
persona_json = {"name": name, "age": age, "nationality": nationality}
db.persons.insert_one(persona_json)
# Cerrar la conexión con la base de datos
conn.close()
def insert_grupo(group):
conn = TinyMongoClient()
db = conn[db_name]
# Insertar varios documentos, una lista de JSON
db.persons.insert_many(group)
# Cerrar la conexión con la base de datos
conn.close()
def show(fetch_all=True):
# Conectarse a la base de datos
conn = TinyMongoClient()
db = conn[db_name]
# Leer todos los documentos y obtener todos los datos juntos
if fetch_all is True:
cursor = db.persons.find()
data = list(cursor)
json_string = json.dumps(data, indent=4)
print(json_string)
else:
# Leer todos los documentos y obtener los datos de a uno
cursor = db.persons.find()
for doc in cursor:
print(doc)
# Cerrar la conexión con la base de datos
conn.close()
def find_persona(name):
# Conectarse a la base de datos
conn = TinyMongoClient()
db = conn[db_name]
# Encontrar un documento por le campo name
person_data = db.persons.find_one({"name": name})
# Cerrar la conexión con la base de datos
conn.close()
return person_data
def count_by_country(country):
# Conectarse a la base de datos
conn = TinyMongoClient()
db = conn[db_name]
# Contar cuantos docuemtnos poseen el campo de nacionalidad indicado
count = db.persons.find({"nationality": country}).count()
# Cerrar la conexión con la base de datos
conn.close()
return count
def lookfor_older_than(age):
conn = TinyMongoClient()
db = conn[db_name]
# Leer todos los documentos y obtener los datos de a uno
cursor = db.persons.find({"age": {"$gt": age}})
for doc in cursor:
print(doc)
def update_persona_address(name, address):
# Conectarse a la base de datos
conn = TinyMongoClient()
db = conn[db_name]
# Actualizar un documento que coincida con el campo name
db.persons.update_one({"name": name}, {"$set": address})
# Cerrar la conexión con la base de datos
conn.close()
def remove_persona(name):
# Conectarse a la base de datos
conn = TinyMongoClient()
db = conn[db_name]
# Remover todos los documentos que poseen el campo name deseado
db.persons.remove({"name": name})
# Cerrar la conexión con la base de datos
conn.close()
if __name__ == '__main__':
print("Bienvenidos a otra clase de Inove con Python")
# Borrar la DB
clear()
# Fill database
insert_persona('Inove', 12, 'Argentina')
insert_persona('Python', 29, 'Holanda')
insert_persona('Max', 35, 'Estados Unidos')
insert_persona('Mirta', 93, 'Argentina')
# Mostrar contenido
show()
# Modificar contenido de "Inove", agregar dirección
# ------------------------------------------------
inove_data = find_persona('Inove')
address = {"address": {"street": "Monroe", "number": 500}}
update_persona_address('Inove', address)
inove_data_2 = find_persona('Inove')
# ------------------------------------------------
# Contar cuantos argentinos en la db
print('Cantidad de argentinos:', count_by_country("Argentina"))
# Contar cuantas personas son mayores de 25
lookfor_older_than(25)
# Insertar un grupo de datos
# ------------------------------------------------
group = [{"age": 40, "nationality:": "Estados Unidos"},
{"name": "SQL", "age": 13, "nationality:": "Inglaterra"},
{"name": "SQLite", "nationality:": "Estados Unidos"}
]
insert_grupo(group)
print('\n\nMostrar nuevos datos insertados por grupo')
show(False)
# ------------------------------------------------
| true |
2b2ff28b02d38a53bd1e75bfe15c22bb1ef7dfe0 | Python | JOravetz/Data_Analysis | /read_csv.py | UTF-8 | 686 | 2.578125 | 3 | [] | no_license | import unicodecsv
def read_csv(filename):
with open(filename, 'rb') as f:
reader = unicodecsv.DictReader(f)
return list(reader)
enrollments = read_csv('enrollments.csv')
daily_engagement = read_csv('daily_engagement.csv')
project_submissions = read_csv('project_submissions.csv')
row_count = sum(1 for row in open('enrollments.csv', 'rb') )
print ("Number of rows in enrollments.csv = ", row_count)
row_count = sum(1 for row in open('daily_engagement.csv', 'rb') )
print ("Number of rows in daily_engagement.csv = ", row_count)
row_count = sum(1 for row in open('project_submissions.csv', 'rb') )
print ("Number of rows in project_submissions.csv = ", row_count)
| true |
50d9deac397624f2c5e7a0a0644dbbc04fccf028 | Python | ivanilsonjunior/2018.2-Redes-PRC | /Avaliação/B1/20180926/menu.py | UTF-8 | 679 | 3.828125 | 4 | [] | no_license | def menu():
print("Programa da Agenda:\n\t1 - Inserir\n\t2 - Apagar\n\t3 - Listar\n\t0 - sair")
return input("Digite uma opção: ")
def inserir():
print ("Aqui voce deve recuperar os dados da agenda e inserir no banco")
def apagar():
print ("Aqui voce deve receber o contato que vc queira apagar e apagar no banco")
def listar():
print ("Aqui voce deve listar todos os contatos no banco")
opcao = menu()
while (opcao != '0'):
if opcao == '1':
print ("1 - Inserir")
inserir()
if opcao == '2':
print ("2 - Apagar")
apagar()
if opcao == '3':
print ("3 - Listar")
listar()
opcao = menu()
| true |
6c874727afa3c28817af1dbbc14be7e19d400e64 | Python | nathanstuart01/coding_assessment | /app/business_logic/helper_functions.py | UTF-8 | 2,283 | 3.203125 | 3 | [] | no_license | import pandas as pd
import math
def create_df(file_path, columns: list, sep='\t'):
df = pd.read_csv(file_path, usecols=columns, sep=sep)
return df
def merge_dfs(df_1, df_2, left_on='tconst', right_on='tconst'):
merged_df = df_1.merge(df_2, left_on=left_on, right_on=right_on)
return merged_df
def process_genres_counts(genre, file_path, columns):
df = create_df(file_path, columns)
df = df[df.titleType == 'movie']
df['genres'] = df['genres'].apply(lambda x: x.split(','))
df = df.explode('genres')
df = df.groupby('genres', as_index=False).count()
df = df[['genres', 'tconst']]
values = dict(zip(df.genres, df.tconst))
if genre in values.keys():
return values[genre]
else:
return 'Provided genre does not exist in movie data'
def get_movie_rating(title, data_file_paths: dict, data_columns: dict):
df_1 = create_df(data_file_paths['basics_data_loc'], data_columns['basics_data_cols_ratings_titles'])
df_1 = df_1[df_1.titleType == 'movie']
df_2 = create_df(data_file_paths['ratings_data_loc'], data_columns['ratings_data_cols'])
merged_df = merge_dfs(df_1, df_2)
values = merged_df.loc[merged_df['primaryTitle'] == f'{title}']
if len(values) == 0:
return 'Provided movie title does not exist in movie data'
values = values[['primaryTitle', 'averageRating']]
avg_rating = sum(list(values['averageRating'].values)) / len(list(values['averageRating'].values))
if math.isnan(avg_rating) == True:
return 'Provided movie title does not have an average rating'
return avg_rating
def get_top_rated_title_genre(genre, data_file_paths: dict, data_columns: dict):
df_1 = create_df(data_file_paths['basics_data_loc'], data_columns['basics_data_cols_ratings_titles'])
df_1 = df_1[df_1.titleType == 'movie']
df_1['genres'] = df_1['genres'].apply(lambda x: x.split(','))
df_1 = df_1.explode('genres')
df_2 = create_df(data_file_paths['ratings_data_loc'], data_columns['ratings_data_cols'])
df_3 = merge_dfs(df_1, df_2)
df_3 = df_3.loc[df_3['genres'] == f'{genre}']
df_3 = df_3.loc[df_3['averageRating'] == df_3.groupby(['genres']).agg({'averageRating':'max'}).values[0][0]]
titles = list(df_3.primaryTitle.values)
return titles
| true |
8cf6e47ee5fca9874e39944417325b5cb13f60cc | Python | TheUninvitedGuest/tmh-challenge | /challenge/src/hh_sim/hh_sim.py | UTF-8 | 1,246 | 3.03125 | 3 | [] | no_license | #!/usr/bin/env python3
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from broker.broker import Publisher
class HHSim:
""" Simple household simulator that generates random uniform numbers between -9 and 0 for a given time range.
The output corresponds to the household consumption in kilowatts, loads are assumed to have negative values."""
publisher: Publisher
times: pd.DatetimeIndex
def __init__(self, start_time, end_time, freq):
np.random.seed(0)
self.times = pd.date_range(start_time, end_time, freq=freq)
self.publisher = Publisher()
def run(self):
self._send_meter_data()
def _get_pac_kw(self):
return -np.random.uniform(0.0, 9.0)
def _send_meter_data(self):
for timestamp in self.times:
self.publisher.send_data(timestamp=timestamp, power=self._get_pac_kw())
self.publisher.send_ctrl("done")
self.publisher.close()
if __name__ == '__main__':
hhsim = HHSim("20190629 000000", "20190629 000100", freq="5s")
res_arr = [hhsim._get_pac_kw() for _ in hhsim.times]
res_df = pd.DataFrame(res_arr, columns=['Pac[kW]'], index=hhsim.times)
print(res_df)
res_df.plot()
plt.show()
| true |
2d15b6ca04b01b31c69f0c87ba59797740aff2d2 | Python | wakafengfan/Leetcode | /tree/same_tree.py | UTF-8 | 877 | 3.734375 | 4 | [] | no_license | """
Given two binary trees, write a function to check if they are the same or not.
Two binary trees are considered the same if they are structurally identical and the nodes have the same value.
Example 1:
Input: 1 1
/ \ / \
2 3 2 3
[1,2,3], [1,2,3]
Output: true
Example 2:
Input: 1 1
/ \
2 2
[1,2], [1,null,2]
Output: false
Example 3:
Input: 1 1
/ \ / \
2 1 1 2
[1,2,1], [1,1,2]
Output: false
"""
from tree.tree_node import TreeNode
def same(root1: TreeNode, root2: TreeNode):
if not root1 and not root2: return True
if not root1 or not root2: return False
if root1.val != root2.val: return False
return same(root1.left, root2.left) and same(root1.right, root2.right)
| true |
f2e589acb68f11a500fc097856414baf6e202f59 | Python | lavanya2495/seattleu_projects | /chord_node.py | UTF-8 | 15,761 | 2.6875 | 3 | [] | no_license | """
CPSC 5520, Seattle University
Lab 4: DHT
Author: Sai Lavanya Kanakam
Usage: python chord_node.py 0
"""
import sys
import pickle
import hashlib
import threading
import socket
import time
import ast
from datetime import datetime
TIME_FORMAT = '%H:%M:%S.%f'
NODE_NAME_FORMAT = '{}:{}'
M = 3 # FIXME: Test environment, normally = hashlib.sha1().digest_size * 8
NODES = 2**M
BUF_SZ = 4096 # socket recv arg
BACKLOG = 100 # socket listen arg
TEST_BASE = 43545 # for testing use port numbers on localhost at TEST_BASE+n
SLEEP_TIME_IN_SECS = 5
BATCH_SIZE = 10
"""
def generate_hash(str):
result = hashlib.md5(str.encode())
x = int(result.hexdigest(), 16)
return x
def generate_hash(str):
sha1 = hashlib.sha1()
sha1.update(str.encode('utf-8'))
result = sha1.hexdigest()
return int(result, 16)
"""
def generate_hash(str):
sha1 = hashlib.sha1()
sha1.update(str.encode('utf-8'))
result = sha1.hexdigest()
return int(result, 16)
def in_range(id, start, end):
start = start % NODES
end = end % NODES
id = id % NODES
if start < end:
return start <= id and id < end
return start <= id or id < end
class Address:
def __init__(self, endpoint, port):
self.endpoint = endpoint
self.port = int(port)
self.hash_val = generate_hash(NODE_NAME_FORMAT.format(self.endpoint, self.port))
def __str__(self):
return '{}:{}'.format(self.endpoint, self.port)
def get_hash(self):
return self.hash_val
def connection(func):
def inner(self, *args, **kwargs):
self._mutex.acquire()
self.create_connection()
ret = func(self, *args, **kwargs)
self.close_connection()
self._mutex.release()
return ret
return inner
class RemoteNode(object):
def __init__(self, remote_addr=None):
self.my_address = remote_addr
self._mutex = threading.Lock()
def __str__(self):
return 'Remote {}'.format(self.my_address)
def create_connection(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.my_address.endpoint, self.my_address.port))
def close_connection(self):
self.sock.close()
self.sock = None
def get_id(self, offset = 0):
return (self.my_address.get_hash() + offset) % NODES
def send_message(self, message):
self.sock.sendall(pickle.dumps(message))
def recv_message(self):
raw_data = self.sock.recv(BUF_SZ)
return pickle.loads(raw_data)
@connection
def get_remote_node(self, message):
self.send_message(message)
response = self.recv_message()
addr = Address(response[0], response[1])
return RemoteNode(addr)
def find_successor(self, id):
return self.get_remote_node('find_successor {}'.format(id))
def successor(self):
return self.get_remote_node('successor')
def predecessor(self):
return self.get_remote_node('get_predecessor')
def closest_preceding_node(self, id):
return self.get_remote_node('closest_preceding_node {}'.format(id))
def ping(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.my_address.endpoint, self.my_address.port))
s.sendall(pickle.dumps('ping'))
s.close()
return True
except socket.error:
return False
@connection
def notify(self, node):
self.send_message('notify {} {}'.format(node.my_address.endpoint, node.my_address.port))
@connection
def look_up_key(self, key):
self.send_message('final_look_up_key {}'.format(key))
return self.recv_message()
@connection
def insert_key_value(self, key, value):
self.send_message('final_insert_key_val {} {}'.format(key, value))
return self.recv_message()
"""
Takes a port number of an existing node (or 0 to indicate it should start a new network).
This program joins a new node into the network using a system-assigned port number for itself.
The node joins and then listens for incoming connections (other nodes or queriers).
You can use blocking TCP for this and pickle for the marshaling.
"""
class ChordNode(object):
def __init__(self, my_address, remote_node_address=None):
self.listener_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listener_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listener_socket.bind(('localhost', 0))
self.listener_socket.listen(BACKLOG)
self.my_address = Address('localhost', int(self.listener_socket.getsockname()[1]))
#self.my_address = my_address
self.n_id = self.my_address.get_hash() % NODES
self.threads = {}
self.finger = {}
for i in range(1, NODES+1):
self.finger[i] = None
self.predecessor_node = None
self.kv_store = {}
print('n_id = {} for endpoint = {} joining network using remote node with endpoint = {}'.format(self.n_id, self.my_address, remote_node_address))
self.join(remote_node_address)
def get_id(self, offset = 0):
return (self.my_address.get_hash() + offset) % NODES
def join(self, remote_node_address=None):
if remote_node_address:
remote_node = RemoteNode(remote_node_address)
self.finger[1] = remote_node.find_successor(self.get_id())
print('Successor node upon joining the network = {}'.format(self.finger[1]))
else:
self.finger[1] = self
def put_key_value(self, key, value):
self.kv_store[key] = value
def get_key_hash(self, key):
return generate_hash(key) % NODES
def get_key(self, key):
if key in self.kv_store:
return self.kv_store[key]
else:
return '-1'
def __str__(self):
return 'node id = {}, endpoint = {}'.format(self.n_id, self.my_address.endpoint + ':' + str(self.my_address.port))
def stabilize(self):
while True:
if self.predecessor() != None:
print('In stabilize: n_id = {}, predecessor = ({})'.format(self.n_id, self.predecessor().__str__()))
if self.successor() != None:
print('In stabilize: n_id = {}, successor = ({})'.format(self.n_id, self.successor().__str__()))
succ = self.successor()
if succ == self and self.predecessor() != None:
self.finger[1] = self.predecessor()
else:
node = succ.predecessor()
if node != None and in_range(node.get_id(), self.get_id(), succ.get_id()) and (self.get_id() != succ.get_id()) and (node.get_id() != self.get_id()) and (node.get_id() != succ.get_id()):
self.finger[1] = node
self.successor().notify(self)
time.sleep(SLEEP_TIME_IN_SECS)
def successor(self):
return self.finger[1]
def notify(self, remote):
if (self.predecessor() == None or self.predecessor() == self) or (((in_range(remote.get_id(), self.predecessor().get_id(), self.get_id())) and (self.predecessor().get_id() != self.get_id()) and (remote.get_id() != self.predecessor().get_id()) and (remote.get_id() != self.get_id()))):
self.predecessor_node = remote
for key in self.kv_store.keys():
if self.get_key_hash(key) <= remote.get_id():
remote.insert_key_value(key, self.kv_store[key])
def insert_key_value(self, key, value):
print('INSERT key: {}'.format(key))
self.put_key_value(key, value)
def predecessor(self):
return self.predecessor_node
def fix_fingers(self):
index = 1
while True:
index = index + 1
if index > M:
index = 1
self.finger[index-1] = self.find_successor(self.get_id(1 << (index-1)))
time.sleep(SLEEP_TIME_IN_SECS)
def pr_finger_table(self):
for index in range(1, M+1):
if self.finger[index] != None:
print('Node ID = {} with Finger Entry[{}]: remote node id = {} and remote node address = {}'.format(self.get_id(), index, self.finger[index].get_id(), self.finger[index].my_address))
else:
print('Node ID = {} with Finger Entry[{}]: None'.format(self.get_id(), index))
def check_predecessor(self):
while True:
#print('Check predecessor')
if self.predecessor() != None:
if self.predecessor().my_address.get_hash() != self.my_address.get_hash():
if self.predecessor().ping() == False:
print('Predecessor ping returned False')
self.predecessor_node = None
time.sleep(SLEEP_TIME_IN_SECS)
def process_dictionary(self, dict):
count = 0
for key in dict:
value = dict[key]
hash_key = self.get_key_hash(key)
node = self.find_successor(hash_key)
print('Target node address: {} and target node id = {}'.format(node.my_address, node.get_id()))
if node.get_id() == self.get_id():
self.insert_key_value(key, value)
else:
print('inserting in remote node')
node.insert_key_value(key, value)
count = count + 1
if count % BATCH_SIZE == 0:
time.sleep(1)
def run(self):
while True:
try:
sock, addr = self.listener_socket.accept()
except socket.error:
print("Listener socket accept error")
raw_data = bytearray()
while True:
data = sock.recv(BUF_SZ)
if not data: break
raw_data.extend(data)
if len(data) < BUF_SZ: break
request_received = pickle.loads(raw_data)
if request_received:
req = request_received.split()
cmd = req[0]
print('cmd = {}'.format(cmd))
remaining_req = request_received[len(cmd)+1:]
resp = ''
#print('Received command = {} and remaining req = {}'.format(cmd, remaining_req))
if cmd == 'dictionary':
dict = ast.literal_eval(remaining_req)
upload_dict_thread = threading.Thread(target=self.process_dictionary, args=[dict])
upload_dict_thread.start()
#self.process_dictionary(dict)
resp = 'UPLOADED'
if cmd == 'insert_key_val':
key = req[1]
value = ''.join(req[2:])
hash_key = self.get_key_hash(key)
node = self.find_successor(hash_key)
#print('Target node address: {} and target node id = {}'.format(node.my_address, node.get_id()))
if node.get_id() == self.get_id():
self.insert_key_value(key, value)
else:
node.insert_key_value(key, value)
resp = 'INSERTED'
if cmd == 'final_insert_key_val':
key = req[1]
value = ''.join(req[2:])
self.insert_key_value(key, value)
resp = 'INSERTED'
if cmd == 'look_up_key':
key = req[1]
hash_key = self.get_key_hash(key)
print('lookup hash key = {}'.format(hash_key))
node = self.find_successor(hash_key)
print('Target node address: {} and target node id = {}'.format(node.my_address, node.get_id()))
if node.get_id() == self.get_id():
resp = self.look_up_key(key)
else:
resp = node.look_up_key(key)
if cmd == 'final_look_up_key':
key = req[1]
resp = self.look_up_key(key)
if cmd == 'get_finger_table':
self.pr_finger_table()
resp = 'Finger table printed'
if cmd == 'successor':
succ = self.successor()
resp = pickle.dumps((succ.my_address.endpoint, succ.my_address.port))
if cmd == 'get_predecessor':
if self.predecessor_node != None:
pred = self.predecessor()
resp = (pred.my_address.endpoint, pred.my_address.port)
if cmd == 'find_successor':
succ = self.find_successor(int(remaining_req))
resp = (succ.my_address.endpoint, succ.my_address.port)
if cmd == 'closest_preceding_node':
closest = self.closes_preceding_node(int(remaining_req))
resp = (closest.my_address.endpoint, closest.my_address.port)
if cmd == 'notify':
npredecessor = Address(remaining_req.split(' ')[0], int(remaining_req.split(' ')[1]))
self.notify(RemoteNode(npredecessor))
sock.sendall(pickle.dumps(resp))
def pr_now(self):
return datetime.now().strftime(TIME_FORMAT)
def find_successor(self, id):
""" Ask this node to find id's successor = successor(predecessor(id))"""
#print('find_successor called by node_id = {} for key: {} at timestamp = {}'.format(self.n_id, str(id), self.pr_now()))
if (in_range(id, self.get_id(), self.successor().get_id()) and (self.n_id != self.successor().get_id()) and (id != self.n_id)):
return self.successor()
else:
remote = self.closest_preceding_node(id)
if self.my_address.get_hash() != remote.my_address.get_hash():
return remote.find_successor(id)
else:
#print('returning self')
return self
def closest_preceding_node(self, id):
for index in range(M+1, 0, -1):
if (self.finger[index] != None and in_range(self.finger[index].get_id(), self.get_id(), id) and self.get_id != id and self.finger[index].get_id() != self.get_id() and self.finger[index].get_id() != id):
return self.finger[index]
return self
def look_up_key(self, key):
print("Look up for key = {}".format(key))
val = self.get_key(key)
if (val != '-1'):
print('Key found')
else:
print('Key does not exist')
return val
def inesrt_key_value(self, key, value):
print('Insert key = {} and value = {}'.format(key, value))
self.put_key_value(key, value)
def start(self):
self.threads['run'] = threading.Thread(target=self.run)
self.threads['fix_fingers'] = threading.Thread(target=self.fix_fingers)
self.threads['stabilize'] = threading.Thread(target=self.stabilize)
self.threads['check_predecessor'] = threading.Thread(target=self.check_predecessor)
for key in self.threads:
self.threads[key].start()
print('started all threads successfully')
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: python chord_node.py port_number\nEnter port_number as 0 to start a new network")
exit(1)
#my_addr = Address('localhost', TEST_BASE+5)
remote_addr = None
if int(sys.argv[1]) != 0:
remote_addr = Address('localhost', int(sys.argv[1]))
cn = ChordNode(remote_addr)
cn.start()
| true |
990e9cc37100d3bea07d9f89c2c56c5047d8a350 | Python | yxzhang2/Projects | /AI_ML/CS440_mp1code/mp1-code/search.py | UTF-8 | 10,789 | 3.5 | 4 | [] | no_license | # search.py
# ---------------
# Licensing Information: You are free to use or extend this projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to the University of Illinois at Urbana-Champaign
#
# Created by Michael Abir (abir2@illinois.edu) on 08/28/2018
# Modified by Rahul Kunji (rahulsk2@illinois.edu) on 01/16/2019
"""
This is the main entry point for MP1. You should only modify code
within this file -- the unrevised staff files will be used for all other
files and classes when code is run, so be careful to not modify anything else.
"""
# Search should return the path and the number of states explored.
# The path should be a list of tuples in the form (row, col) that correspond
# to the positions of the path taken by your search algorithm.
# Number of states explored should be a number.
# maze is a Maze object based on the maze from the file specified by input filename
# searchMethod is the search method specified by --method flag (bfs,dfs,greedy,astar)
import collections
import queue
import heapq
import copy
import time
class SearchTree:
def __init__(self, start, end):
self.root_key = start
self.goal_key = end
self.tree = dict()
def insert(self, cell_coor, nlist):
self.tree[cell_coor] = nlist
def recursive_path(self,node_key, path, visited):
if node_key in visited:
return False
visited[node_key] = True
#path.append(((int(node_key/maze_size[0])), node_key % maze_size[0]))
path.append( node_key )
#print ( node_key % self.maze_size[0], (int (node_key/self.maze_size[0]) ))
if node_key == self.root_key:
#print(path)
return True
if node_key not in self.tree:
path.pop()
return False
#print(a, i)
b = self.recursive_path(self.tree[node_key], path, visited)
#print(path)
if b:
return True
path.pop()
return False
def find_path(self, start, goal):
self.root_key = start
path = list()
visited = dict()
self.recursive_path(goal, path, visited)
return path
def search(maze, searchMethod):
return {
"bfs": bfs,
"dfs": dfs,
"greedy": greedy,
"astar": astar,
}.get(searchMethod)(maze)
def bfs(maze):
current_pos = maze.getStart()
goal_pos = maze.getObjectives()[0]
maze_size = maze.getDimensions()
path = []
visited = dict()
search_tree = SearchTree(current_pos, goal_pos)
search_tree.insert(current_pos, None)
#Queue to keep track of all paths taken
q = collections.deque()
q.append(current_pos)
while (q):
#pop the first path in the queue
current_pos = q.popleft()
#get current position as last node in path
if current_pos == goal_pos:
break
elif current_pos not in visited:
#For each neighbor check if move is possible, make new path and added it to the queue
possible_moves = maze.getNeighbors(current_pos[0], current_pos[1])
for i in possible_moves:
q.append(i)
if i not in visited: #if not overwrite most efficient, o(n)
search_tree.insert(i, current_pos)
visited[current_pos] = True
path = search_tree.find_path(maze.getStart(),goal_pos)
path.reverse()
return path, len(visited)
print ("Error: Nothing returned")
return [], 0
def dfs(maze):
# TODO: Write your code here
# return path, num_states_explored
current_pos = maze.getStart()
goal_pos = maze.getObjectives()[0]
maze_size = maze.getDimensions()
path = []
#Set to keep track of visited nodes
visited = dict()
search_tree = SearchTree(current_pos, goal_pos)
search_tree.insert(current_pos, None)
#Queue to keep track of all paths taken
q = collections.deque()
q.append(current_pos)
while (q):
current_pos = q.pop()
if current_pos == goal_pos:
break
elif current_pos not in visited:
#For each neighbor check if move is possible, make new path and added it to the queue
possible_moves = maze.getNeighbors(current_pos[0], current_pos[1])
for i in possible_moves:
q.append(i)
if i not in visited: #if not overwrite most efficient, o(n)
search_tree.insert(i, current_pos)
visited[current_pos] = True
path = search_tree.find_path(maze.getStart(), goal_pos)
path.reverse()
return path, len(visited)
print ("Error: Nothing returned")
return [], 0
def greedy(maze):
# TODO: Write your code here
# return path, num_states_explored
current_pos = maze.getStart()
goal_pos = maze.getObjectives()[0]
maze_size = maze.getDimensions()
#Set to keep track of visited nodes
visited = dict()
search_tree = SearchTree(current_pos, goal_pos)
search_tree.insert(current_pos, None)
#Queue to keep track of all paths taken
q = collections.deque()
q.append(current_pos)
while (q):
#pop the first path in the queue
current_pos = q.pop()
#get current position as last node in path
if current_pos == goal_pos:
break
elif current_pos not in visited:
#For each neighbor check if move is possible, make new path and added it to the queue
possible_moves = maze.getNeighbors(current_pos[0], current_pos[1])
possible_moves = sort_list(goal_pos, possible_moves)
for i in possible_moves:
q.append(i)
if i not in visited:
search_tree.insert(i, current_pos)
visited[current_pos] = True
path = search_tree.find_path(maze.getStart(),goal_pos)
path.reverse()
return path, len(visited)
def astar(maze):
goals = maze.getObjectives()
start = maze.getStart()
dots = goals.copy()
dots.append(start)
span_tree = dict.fromkeys(dots)
explored_goals = dict.fromkeys(dots, False)
start_time = time.time()
for source in dots:
span_tree[source] = {}
for dest in goals :
if source != dest:
res = helper_astar(maze, source, dest)
path = res[0]
span_tree[source][dest] = path
last_goal = maze.getStart()
frontier = []
f_n = 0
g_n = 0
explored_queue = collections.deque()
states = 0
results = []
heapq.heappush(frontier, [f_n, g_n,(last_goal, explored_queue.copy(), explored_goals.copy())])
while frontier:
states += 1
curr = heapq.heappop(frontier)
g_n = curr[1]
last_goal = curr[2][0]
explored_queue = curr[2][1]
explored_goals = curr[2][2]
explored_queue.append(last_goal)
explored_goals[last_goal] = True
if is_completed(explored_goals):
break
for edge in span_tree[last_goal].keys():
if explored_goals[edge] == False:
temp = len(span_tree[last_goal][edge])
explored_goals[edge] = True
f_n = MST(explored_goals.copy(), span_tree.copy()) + g_n + temp
explored_goals[edge] = False
heapq.heappush(frontier, [f_n ,g_n + temp, (edge, explored_queue.copy(), explored_goals.copy())])
print(explored_queue)
print ('Time: ' + str(time.time() - start_time))
path = []
prev = explored_queue.popleft()
curr = explored_queue.popleft()
while explored_queue:
if path:
path.pop(0)
path = span_tree[prev][curr] + path
prev = curr
curr = explored_queue.popleft()
if path:
path.pop(0)
path = span_tree[prev][curr] + path
path.reverse()
return path, states
def helper_astar(maze, start, end):
curr = start
goal = end
maze_size = maze.getDimensions()
visited = {}
tree = SearchTree(curr, goal)
frontier = []
g_n = 1
f_n = g_n + calc_manhattan(curr, goal)
heapq.heappush(frontier,[f_n, g_n, curr])
fin_g = goal
while frontier:
current_node = heapq.heappop(frontier)
curr = current_node[2]
g_n = current_node[1]
if curr == goal:
break
h_n = calc_manhattan(goal, curr)
f_n = g_n + h_n
if curr not in visited:
visited[curr] = f_n
g_n += 1
for neighbor in maze.getNeighbors(curr[0], curr[1]):
f_n = g_n + calc_manhattan(goal, neighbor)
if neighbor not in visited:
heapq.heappush(frontier, [f_n, g_n, neighbor])
tree.insert(neighbor, curr)
return tree.find_path(start, goal) , goal, len(visited)
def neighbor_list(maze, current_pos):
return maze.getNeighbors(current_pos[0], current_pos[1])
def sort_list(goal, neighbor_list):
new_list = []
sorted_list = []
for i in neighbor_list:
dist = calc_manhattan(goal, i)
new_list.append((dist, i))
new_list.sort(reverse=True)
for i in new_list:
sorted_list.append(i[1])
return sorted_list
def calc_manhattan(goal, coordinate):
dist = abs(goal[0] - coordinate[0]) + abs(goal[1] - coordinate[1])
return dist
def is_completed(explored):
keys = explored.keys()
for key in keys:
if explored[key] == False:
return False
return True
class Uptree:
def __init__(self, goals):
self.tree = dict.fromkeys(goals, -1)
def find(self, key):
if self.tree[key] == -1:
return key
rootVal = self.find(self.tree[key])
self.tree[key] = rootVal
return rootVal
def union(self, left, right):
right_root = self.find(right)
self.tree[right_root] = self.find(left)
def MST(explored_map, path_map):
vertices = []
for goal in explored_map:
if explored_map[goal] == False:
vertices.append(goal)
frontier = []
for source in vertices:
for dest in vertices:
if source != dest:
heapq.heappush(frontier, [len(path_map[source][dest]), source, dest])
edge_lim = len(vertices) - 1
retVal = 0
edge_num = 0
tree = Uptree(vertices)
while frontier and edge_num != edge_lim:
edge = heapq.heappop(frontier)
source = edge[1]
dest = edge[2]
if tree.find(source) != tree.find(dest):
tree.union(source, dest)
retVal += len(path_map[source][dest])
edge_num += 1
return retVal | true |
eef1843039d386b62a9c6f3d91fcb52cf61e69b5 | Python | icevivian/Hello_offer | /567.字符串的排列.py | UTF-8 | 1,096 | 3.015625 | 3 | [] | no_license | #
# @lc app=leetcode.cn id=567 lang=python3
#
# [567] 字符串的排列
#
# @lc code=start
class Solution:
def checkInclusion(self, s1: str, s2: str) -> bool:
left = right = 0
minlen = float('INF')
need = dict()
for i in s1:
if i in need:
need[i] += 1
else:
need[i] = 1
window = dict()
valid = 0
while right < len(s2):
word = s2[right]
right += 1
if word in need:
if word in window:
window[word]+=1
else:
window[word]=1
if window[word] == need[word]:
valid+=1
while right-left>=len(s1):
if valid == len(need):
return True
word = s2[left]
left += 1
if word in need:
if window[word] == need[word]:
valid -= 1
window[word]-=1
return False
# @lc code=end
| true |
d3c4d997c65d474f233511a3fafe10d4227a930b | Python | GeoMukkath/python_programs | /All_python_programs/anagram.py | UTF-8 | 210 | 4.125 | 4 | [] | no_license | #Q. Check whether the given string is an anagram or not.
str1 = input("Enter string1 : ");
str2 = input("Enter string2 : ");
if sorted(str1) == sorted(str2):
print("The given strings are anagrams");
| true |
d76738d968cfebc2c5bfe151d7fa035d8a131912 | Python | iotgopigo/gopigo1st_season | /array.py | UTF-8 | 251 | 3.359375 | 3 | [] | no_license |
def array (rect):
del rect[:]
rect.append([1,2])
rect.append([3,4])
rect.append([5,6])
rect.append([7,8])
return True
if __name__ == "__main__":
rect = []
for num in range(2):
array(rect)
print rect
| true |
aafe37d2ff453d5f6a816f6b66929008a72177d0 | Python | CutiePizza/holbertonschool-higher_level_programming | /0x0F-python-object_relational_mapping/14-model_city_fetch_by_state.py | UTF-8 | 744 | 2.59375 | 3 | [] | no_license | #!/usr/bin/python3
"""
Start link class to table in database
"""
import sys
from model_city import Base, City
from model_state import Base, State
from sqlalchemy import (create_engine)
from sqlalchemy.orm import sessionmaker
if __name__ == "__main__":
engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.format(
sys.argv[1],
sys.argv[2],
sys.argv[3]
), pool_pre_ping=True)
Base.metadata.create_all(engine)
Session = sessionmaker()
Session.configure(bind=engine)
session = Session()
for row1, row2 in session.query(City, State).filter(
City.state_id == State.id
).order_by(City.id).all():
print("{}: ({}) {}".format(row2.name, row1.id, row1.name))
| true |
d91bf20756de79ae39e3bbefdbeac1ee15f0bc6b | Python | elonca/LWB-benchmark-generator | /defs.py | UTF-8 | 8,282 | 3.234375 | 3 | [] | no_license | import sys
sys.setrecursionlimit(1000001)
class Formula:
pass
class TRUE_(Formula):
def __init__(self):
pass
def __str__(self):
return "true"
def write(self, file):
file.write("true")
class FALSE_(Formula):
def __init__(self):
pass
def __str__(self):
return "false"
def write(self, file):
file.write("false")
TRUE=TRUE_()
FALSE=FALSE_()
class Lit(Formula):
def __init__(self, num):
self.num = num
def __str__(self):
return f"p{self.num}"
def write(self, file):
file.write(f"p{self.num}")
class Not(Formula):
def __init__(self, a1):
self.a1 = a1
def __str__(self):
return f"(~{self.a1})"
def write(self, file):
file.write("~(")
self.a1.write(file)
file.write(")")
class And(Formula):
def __init__(self, a1, a2):
self.a1 = a1
self.a2 = a2
def __str__(self):
return f"({self.a1} & {self.a2})"
def write(self, file):
file.write("(")
self.a1.write(file)
file.write(" & ")
self.a2.write(file)
file.write(")")
class Or(Formula):
def __init__(self, a1, a2):
self.a1 = a1
self.a2 = a2
def __str__(self):
return f"({self.a1.__str__()} v {self.a2.__str__()})"
def write(self, file):
file.write("(")
self.a1.write(file)
file.write(" | ")
self.a2.write(file)
file.write(")")
class Implies(Formula):
def __init__(self, a1, a2):
self.a1 = a1
self.a2 = a2
def __str__(self):
return f"({self.a1.__str__()} -> {self.a2.__str__()})"
def write(self, file):
file.write("(")
self.a1.write(file)
file.write(" -> ")
self.a2.write(file)
file.write(")")
class Iff(Formula):
def __init__(self, a1, a2):
self.a1 = a1
self.a2 = a2
def __str__(self):
return f"({self.a1.__str__()} <-> {self.a2.__str__()})"
def write(self, file):
file.write("(")
self.a1.write(file)
file.write(" <-> ")
self.a2.write(file)
file.write(")")
class Box(Formula):
def __init__(self, a1):
self.a1 = a1
def __str__(self):
if isinstance(self.a1, Lit) or isinstance(self.a1, TRUE_) or isinstance(self.a1, FALSE_):
return f"(box {self.a1})"
else:
return f"(box{self.a1})"
def write(self, file):
if isinstance(self.a1, Lit) or isinstance(self.a1, TRUE_) or isinstance(self.a1, FALSE_):
file.write("([r1] ")
else:
file.write("([r1]")
self.a1.write(file)
file.write(")")
class Dia(Formula):
def __init__(self, a1):
self.a1 = a1
def __str__(self):
if isinstance(self.a1, Lit) or isinstance(self.a1, TRUE_) or isinstance(self.a1, FALSE_):
return f"(dia {self.a1})"
else:
return f"(dia{self.a1})"
def write(self, file):
if isinstance(self.a1, Lit) or isinstance(self.a1, TRUE_) or isinstance(self.a1, FALSE_):
file.write("(<r1> ")
else:
file.write("(<r1>")
self.a1.write(file)
file.write(")")
done = {}
def p(n):
global done
if n not in done:
done[n] = Lit(n)
return done[n]
def mbox(n, formula):
for i in range(n):
formula = Box(formula)
return formula
def mdia(n, formula):
for i in range(n):
formula = Dia(formula)
return formula
def list2conj(lst):
if len(lst) == 0: return TRUE
cur = lst[0]
for item in lst[1:]:
cur = And(cur, item)
return cur
def list2disj(lst):
if len(lst) == 0: return FALSE
cur = lst[0]
for item in lst[1:]:
cur = Or(cur, item)
return cur
from functools import partial
# Infix code from http://tomerfiliba.com/blog/Infix-Operators/
class Infix(object):
def __init__(self, func):
self.func = func
def __or__(self, other):
return self.func(other)
def __ror__(self, other):
return Infix(partial(self.func, other))
def __call__(self, v1, v2):
return self.func(v1, v2)
@Infix
def AND(x, y): return And(x, y)
@Infix
def OR(x, y): return Or(x, y)
@Infix
def IMPLIES(x, y): return Implies(x, y)
@Infix
def IFF(x, y): return Iff(x, y)
###############################################################################
def D(p0=p(0)):
return Box(p0) |IMPLIES| Dia(p0)
def D2(p0=p(0)):
return Dia(TRUE)
def B(p0=p(0)):
return p0 |IMPLIES| Box(Dia(p0))
def T(p0=p(0)):
return Box(p0) |IMPLIES| p0
def A4(p0=p(0)):
return Box(p0) |IMPLIES| Box(Box(p0))
def A5(p0=p(0)):
return Not(Box(p0)) |IMPLIES| Box(Not(Box(p0)))
def H(p0=p(0), p1=p(1)):
return (Box(p0 |OR| p1) |AND| Box(Box(p0) |OR| p1) |AND| Box(p0 |OR| Box(p1))) |IMPLIES| \
(Box(p0) |OR| Box(p1))
def L(p0=p(0), p1=p(1)):
return Box(p0 |AND| Box(p0) |IMPLIES| p1) |OR| Box(p1 |AND| Box(p1) |IMPLIES| p0)
def Lplus(p0=p(0), p1=p(1)):
return Box(Box(p0) |IMPLIES| p1) |OR| Box(Box(p1) |IMPLIES| p0)
def Grz(p0=p(0)):
return Box(Box(p0 |IMPLIES| Box(p0)) |IMPLIES| p0) |IMPLIES| p0
def Grz1(p0=p(0)):
return Box(Box(p0 |IMPLIES| Box(p0)) |IMPLIES| p0) |IMPLIES| Box(p0)
def Dum(p0=p(0)):
return Box(Box(p0 |IMPLIES| Box(p0)) |IMPLIES| p0) |IMPLIES| (Dia(Box(p0)) |IMPLIES| p0)
def Dum1(p0=p(0)):
return Box(Box(p0 |IMPLIES| Box(p0)) |IMPLIES| p0) |IMPLIES| (Dia(Box(p0)) |IMPLIES| Box(p0))
def Dum4(p0=p(0)):
return Box(Box(p0 |IMPLIES| Box(p0)) |IMPLIES| p0) |IMPLIES| (Dia(Box(p0)) |IMPLIES| (p0 |OR| Box(p0)))
def nnf(f):
if isinstance(f, Lit): return f
if isinstance(f, TRUE_): return f
if isinstance(f, FALSE_): return f
if isinstance(f, And): return And(nnf(f.a1), nnf(f.a2))
if isinstance(f, Or): return Or(nnf(f.a1), nnf(f.a2))
if isinstance(f, Implies): return nnf(Or(Not(f.a1), f.a2))
if isinstance(f, Box): return Box(nnf(f.a1))
if isinstance(f, Dia): return Dia(nnf(f.a1))
if isinstance(f, Not):
if isinstance(f.a1, Not):
return nnf(f.a1.a1)
if isinstance(f.a1, Box):
return nnf(Dia(Not(f.a1.a1)))
if isinstance(f.a1, Dia):
return nnf(Box(Not(f.a1.a1)))
if isinstance(f.a1, And):
return nnf(Or(Not(f.a1.a1), Not(f.a1.a2)))
if isinstance(f.a1, Or):
return nnf(And(Not(f.a1.a1), Not(f.a1.a2)))
if isinstance(f.a1, Implies):
return nnf(And(f.a1.a1, Not(f.a1.a2)))
if isinstance(f.a1, Lit):
return Not(f.a1)
if isinstance(f.a1, TRUE_): return FALSE
if isinstance(f.a1, FALSE_): return TRUE
assert "Missing case in nnf"
def size(f):
if isinstance(f, Lit): return 1
if isinstance(f, TRUE_): return 1
if isinstance(f, FALSE_): return 1
if isinstance(f, And): return 1 + size(f.a1) + size(f.a2)
if isinstance(f, Or): return 1 + size(f.a1) + size(f.a2)
if isinstance(f, Implies): return 1 + size(f.a1) + size(f.a2)
if isinstance(f, Box): return 1 + size(f.a1)
if isinstance(f, Dia): return 1 + size(f.a1)
if isinstance(f, Not):return 1 + size(f.a1)
assert "Missing case in size"
import collections
import functools
# Code from https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize
class memoized(object):
'''Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
'''
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
return self.cache[args]
else:
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self):
'''Return the function's docstring.'''
return self.func.__name__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
| true |
d2da9d74cfc052af1835c8c549ad4e6ac544a7e3 | Python | Rorodu29/monClasseurNSI | /Robin NSI/mouvements.py | UTF-8 | 647 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env pybricks-micropython
from pybricks.hubs import EV3Brick
from pybricks.ev3devices import Motor
from pybricks.robotics import DriveBase
from pybricks.parameters import Port, Stop, Direction
from time import sleep
ev3 = EV3Brick()
left_motor = Motor(Port.B)
right_motor = Motor(Port.C)
robot = DriveBase(left_motor, right_motor, wheel_diameter=55.5, axle_track=104)
def avancer(distance) :
robot.straight(distance)
def stop() :
robot.stop()
def tourner(angle):
robot.turn(angle)
#TEST
if __name__ == '__main__' :
avancer(100)
stop()
sleep(2)
tourner(90)
tourner(-180)
avancer(-100)
| true |
e247a495e2ffbe3fa434b19486ebbf5fdc3de3df | Python | HarshKothari21/Covid-19_System_and_Analysis | /India_StateAnalysis_Notifications.py | UTF-8 | 1,004 | 2.875 | 3 | [] | no_license | from plyer import notification
import requests
from bs4 import BeautifulSoup
import time
def notifyMe(title, message):
notification.notify(
title = title,
message = message,
app_icon = None,
timeout =15
)
def getData(url):
r = requests.get(url)
return r.text
if __name__ == "__main__":
notifyMe("Harsh", "Hey, Updates in Corona Data")
myHtmlData = getData('https://www.mohfw.gov.in/')
soup = BeautifulSoup(myHtmlData, 'html.parser')
myDataStr = ""
for tr in soup.find_all('tbody')[0].find_all('tr'):
myDataStr += tr.get_text()
myDataStr = myDataStr[1:]
itemList = myDataStr.split("\n\n")
states = ['Gujarat', 'Uttar Pradesh']
for item in itemList[0:29]:
dataList = item.split("\n")
if dataList[1] in states:
print(dataList)
nTitle = 'Cases of Covid-19'
nText = f"{dataList[1]}- Total Cases : {dataList[2]}"
notifyMe(nTitle, nText)
#IF you want to get notificaton every hour then write time.sleep(3600)
#and apply an while(True) loop to whole body | true |
1061443c2482979e7f63a4ddcc7434f7eea3b5b6 | Python | soarhigh03/baekjoon-solutions | /solutions/prob3009/solution_python.py | UTF-8 | 292 | 3.375 | 3 | [] | no_license | """
Baekjoon Online Judge #3009
https://www.acmicpc.net/problem/3009
"""
a = []
b = []
for _ in range(3):
x, y = map(int, input().split())
if x in a:
a.remove(x)
else:
a.append(x)
if y in b:
b.remove(y)
else:
b.append(y)
print(a[0], b[0])
| true |
9da2ace699b2aa242eed15c3a4f5bedb3817b086 | Python | ximet/algoset | /src/datastructures/hashTable/test/HashTableNode_test.py | UTF-8 | 351 | 3.171875 | 3 | [] | no_license | from src.datastructures.hashTable.HashTableNode import HashTableNode
def test_linkedListNodeWithoutLink():
node = HashTableNode(1, 2)
assert node.key == 1
assert node.value == 2
assert node.next == None
def test_stringPresentation():
node = HashTableNode(1, 2)
assert str(node) == 'HashTableNode(key=1, value=2, next=None)' | true |
1cc44bad904bef774a73a26ed3b69b6fe9bf916b | Python | Nigam-Niti/deep_learning_practice | /pytorch/pytorch_practice_1/02.logistic_regression.py | UTF-8 | 1,863 | 2.703125 | 3 | [] | no_license | import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
# Hyperparams
inp_size = 28*28
num_classes = 10
num_epochs = 2
batch_size = 64
learning_rate = 0.001
# MNIST dataset
train_dataset = torchvision.datasets.MNIST(
root='~/.pytorch-datasets/',
train=True,
transform = transforms.ToTensor(),
download=True
)
test_dataset = torchvision.datasets.MNIST(
root='~/.pytorch-datasets/',
train=False,
transform=transforms.ToTensor()
)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=batch_size,
shuffle=True
)
test_loader = torch.utils.data.DataLoader(
dataset=test_dataset,
batch_size=batch_size,
shuffle=False
)
model = nn.Linear(inp_size, num_classes)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Model training
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
# Reshape images to batch size, input_size
images = images.reshape(-1, inp_size)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1)%100==0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
# Model test
# No gradients computation
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, inp_size)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print("Accuracy of the model on the 10000 test images: {} %".format(100 * correct // total))
# Save the model checkpoint
torch.save(model.state_dict(), 'log_reg.ckpt')
| true |
74f47d75436d5ee6120a89d38445f902a1a35a45 | Python | boringlee24/combinatorial_optimization | /bruteforce.py | UTF-8 | 1,529 | 2.9375 | 3 | [] | no_license | import itertools
import random
from time import time
import pdb
import json
from joblib import Parallel, delayed
import os
def bruteforce(x_list, target):
optimal = 0
start_t = time()
time_lim = 600 # 10 min
for x in powerset(x_list):
if target == 2000 and sum(x) == 1999:
pdb.set_trace()
total = sum(x)
if total >= optimal and total <= target:
optimal = total
if time() - start_t > time_lim:
break
return optimal
def powerset(iterable):
s = list(iterable)
return itertools.chain.from_iterable(itertools.combinations(s, r) for r in range(len(s) + 1))
def inner_loop(line, index):
# pdb.set_trace()
split = line.split(',')
target = int(split.pop(0))
split[-1] = split[-1].replace('\n','')
x_list = [int(i) for i in split]
bf = bruteforce(x_list, target)
print(f'finished line {index}')
return (bf, len(x_list))
def main():
x_list = []
for i in range(0,5):
num = random.randint(0,100)
x_list.append(num)
target = random.randint(0,100)
input_f = open('input.txt', 'r')
rline = input_f.readlines()
input_f.close()
usable_cores = ['0']#os.sched_getaffinity(0) #TODO
bf_opt = Parallel(n_jobs=len(usable_cores))(delayed(inner_loop)(line, rline.index(line)) for line in rline)
with open(f'data/bruteforce.json', 'w') as f:
json.dump(bf_opt, f, indent=4)
if __name__ == '__main__':
main()
| true |
ad84e502aee8ad2ab67a58ec0665b395ed0d20fb | Python | vishnusak/DojoAssignments | /10-MAY-2016_Assignment/python/alphaorder.py | UTF-8 | 1,136 | 4.59375 | 5 | [] | no_license | # Is Word Alphabetical
# Nikki, a queen of gentle sarcasm, loves the word facetiously. Lance helpfully points out that it is the only known English word that contains all five vowels in alphabetical order, and it even has a 'y' on the end! Nikki takes a break from debugging to turn and give him an acid stare that could only be described as arsenious. Given a string, return a boolean indicating whether all letters contained in that string are in alphabetical order.
# steps:
# ---- ignoring capitalization.
# 1. start reading the string char by char
# 2. if current char is < previous char, return false. else return true
def order(string):
newStr = ''
for char in string:
if ord(char.lower()) in range(ord('a'), ord('z')+1):
newStr += char.lower()
for pos in range(1, len(newStr)):
if newStr[pos] < newStr[pos - 1]:
return False
else:
return True
myStr = "Abcd efijkl nop st"
# myStr = "facetiously"
print("The string is '{}'").format(myStr)
myResult = order(myStr)
if (myResult):
print("The alphabet is in order")
else:
print("The alphabet is not in order")
| true |
e91be4977481e7f46e6cedbe4c258047cc681036 | Python | RyanBusby/fishery | /image_prep.py | UTF-8 | 2,574 | 2.921875 | 3 | [] | no_license | import numpy as np
from skimage import exposure
from skimage import filters
from skimage.color.adapt_rgb import adapt_rgb, each_channel
from skimage.transform import resize
from skimage.util import pad
def fix_nv(image):
'''
INPUT: numpy.3darray
OUTPUT: numpy.3darray
if an image has a green or blue/green tint, changes the correlation of the color channels to reduce the tint
'''
h, w, ch = image.shape
im2 = image.reshape(h*w, 3)
g_less_r = np.mean(im2, axis=0)[1] - np.mean(im2, axis=0)[0]
g_less_b = np.mean(im2, axis=0)[1] - np.mean(im2, axis=0)[2]
if g_less_r > 25 or g_less_b > 25:
if g_less_b > g_less_r:
im_adj = image
im_adj[:,:,2] = image[:,:,2] * 1.6
im_adj[:,:,1] = np.abs(image[:,:,1].astype(int) - 25).astype(np.uint8)
im_adj[:,:,1] = image[:,:,1] * .8
else:
im_adj = image
im_adj[:,:,1] = np.abs(image[:,:,1].astype(int) - 40).astype(np.uint8)
im_adj[:,:,1] = image[:,:,1] * .75
im_adj[:,:,2] = np.abs(image[:,:,2].astype(int) - 25).astype(np.uint8)
return im_adj
else:
return image
@adapt_rgb(each_channel)
def scharr_each(image):
'''
implements skimage scharr filter which finds edges of an image, and adapts the filter to three color channels
'''
return filters.scharr(image)
def resize_and_pad(image):
'''
INPUT: numpy.3darray
OUTPUT: numpy.3darray
reduces the size of an image to 256x144 pixels and keeps the proportions the same by padding images having w/h ratio not equal to 16/9
'''
h, w = image.shape[0], image.shape[1]
if w > h:
image = resize(image,(144, 144*w/h, 3))
else:
image = resize(image,(256*h/w, 256, 3))
h, w = image.shape[0], image.shape[1]
h_pad = (256-w)/2
v_pad = (144-h)/2
if (256 - w) == 0 and (144 - h) % 2 != 0:
image = pad(image,((v_pad+1,v_pad),(h_pad,h_pad),(0,0)), 'constant', constant_values=(0,))
elif(256 - w) % 2 != 0 and (144 - h) == 0:
image = pad(image,((v_pad,v_pad),(h_pad+1,h_pad),(0,0)), 'constant', constant_values=(0,))
else:
image = pad(image,((v_pad,v_pad),(h_pad,h_pad),(0,0)), 'constant', constant_values=(0,))
return image
def prep_image(image):
'''
implement functions and skimage methods to prepare image for processing
'''
image = fix_nv(image)
image = exposure.adjust_gamma(image, gamma=1.2)
image = exposure.equalize_adapthist(image)
image = resize_and_pad(image)
return image
| true |
09fc95c912b43ac99aed5b63f96b436cb33dbf46 | Python | Radmirkus/MelBo | /simplevk.py | UTF-8 | 2,903 | 2.546875 | 3 | [
"MIT"
] | permissive | import logging
import json
import time
from html.parser import HTMLParser
try:
import requests
except ImportError:
print('установите библиотеку requests')
class vk:
app_id = ''
user_id = ''
access_token = ''
v = '5.64'
def authorize(self, app_id, login, password, scope, v):
self.app_id = app_id
self.v = v
with requests.Session() as vk_session:
r = vk_session.get('https://oauth.vk.com/authorize?client_id='+app_id+'&display=page&redirect_uri=https://vk.com&scope='+scope+'&response_type=token&v='+v)
p = vkParser()
p.feed(r.text)
p.close()
p.login_data['email'] = login
p.login_data['pass'] = password
if p.method == 'get':
r = vk_session.get(p.url, params=p.login_data)
elif p.method == 'post':
r = vk_session.post(p.url, data=p.login_data)
if r.url.find('access_token=') >= 0:
self.access_token = r.url.partition('access_token=')[2].split('&')[0]
self.user_id = r.url.partition('user_id=')[2]
else:
p = vkParser()
p.feed(r.text)
p.close()
if p.method == 'get':
r = vk_session.get(p.url)
if p.method == 'post':
r = vk_session.post(p.url)
self.access_token = r.url.partition('access_token=')[2].split('&')[0]
self.user_id = r.url.partition('user_id=')[2]
if not self.user_id:
raise AuthorizationError('Неправильный логин или пароль')
def request(self, method, params=''):
access_param = '&access_token='+str(self.access_token) if self.access_token else ''
api_request = requests.get('https://api.vk.com/method/'+method+'?'+params+access_param+'&v='+str(self.v))
return api_request.json()
def encode_cyrilic(self, text):
return str(text.encode("utf-8")).replace("\\x", "%")[2:-1]
class vkParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.login_data = {}
self.method = "GET"
self.url = ""
def handle_starttag(self, tag, atribs):
attrs = {}
for attr in atribs:
attrs[attr[0]] = attr[1]
if tag == 'form':
self.url = attrs['action']
if 'method' in attrs:
self.method = attrs['method']
elif tag == 'input' and 'name' in attrs:
self.login_data[attrs['name']] = attrs['value'] if 'value' in attrs else ""
class AuthorizationError(Exception):
def __init__(self, value):
self.value = value
| true |