blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
6f245d49ec4bf16f337ecdc6a7fcce05da953851 | Python | drgarcia1986/coding | /algorithms/search/string_pattern_matching/python/string_pattern_matching.py | UTF-8 | 401 | 3.203125 | 3 | [] | no_license | """
>>> matching('aababba', 'abba')
3
>>> matching('cormen', 'skiena')
-1
"""
def matching(t: str, p: str) -> int:
t_len = len(t)
p_len = len(p)
for i in range((t_len - p_len) + 1):
j = 0
while j < p_len and t[i+j] == p[j]:
j += 1
if j == p_len:
return i
return -1
if __name__ == '__main__':
import doctest
doctest.testmod()
| true |
65d0a8394f7a5f8f6a3aa56c1c7fb80c821a718f | Python | naghamghanim/facodersPython | /Python/try2.py | UTF-8 | 184 | 3.640625 | 4 | [] | no_license | def add_to_list(list2,n):
L=len(list2)
x=0
while(x<L):
list2[x]+=2
x=x+1
return list2
list1 = [10, 5, 2, 6, 9, 11, 23]
print(add_to_list(list1,2))
| true |
4cea034974a96105e88416f56f36e576faafadc7 | Python | fandiarfa26/auto-summ | /app/models.py | UTF-8 | 1,049 | 2.65625 | 3 | [] | no_license | from datetime import datetime
from app import db
class Book(db.Model):
id = db.Column(db.Integer, primary_key=True)
code = db.Column(db.String(100))
title = db.Column(db.String(100))
chapters = db.relationship('Chapter')
def __repr__(self):
return '<Book ID:{}>'.format(self.id)
class Chapter(db.Model):
id = db.Column(db.Integer, primary_key=True)
code = db.Column(db.String(100))
title = db.Column(db.String(100))
pages = db.Column(db.Integer())
book_id = db.Column(db.Integer, db.ForeignKey('book.id'))
summaries = db.relationship('Summary')
def __repr__(self):
return '<Chapter ID:{}>'.format(self.id)
class Summary(db.Model):
id = db.Column(db.Integer, primary_key=True)
text = db.Column(db.Text())
keywords = db.Column(db.String(255), default="-")
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
chapter_id = db.Column(db.Integer, db.ForeignKey('chapter.id'))
def __repr__(self):
return '<Summary {}>'.format(self.id) | true |
be506c39c0a39961c1c17ad715895d651b169be8 | Python | Twosiders/University_Year_2 | /CE235-5-SP/15a4 Programming Assignment 3/Submissions/decryptor.py | UTF-8 | 2,870 | 3.203125 | 3 | [] | no_license | #****************************
#* Decryptor by Alex Mezodi *
#****************************
# Student Number: 1401665
def swap(s, i, j):
lst = list(s);
lst[i], lst[j] = lst[j], lst[i]
return ''.join(lst)
def swapChar(s,i,j):
lst = list(s);
lst[i] = j
return ''.join(lst)
def swapNibble(s,i,j,n):
lst = list(s);
lst[i], lst[j] = n[6], n[7]
return ''.join(lst)
array1 = [132, 201, 141, 74, 140, 94, 141, 140,
141, 15, 31, 164, 90, 229, 201, 141,
78, 114, 241, 217, 141, 217, 140, 180,
141, 164, 51, 141, 188, 221, 31, 164,
241, 177, 141, 140, 51, 217, 141,
201, 229, 152, 141, 78, 241, 114,
78, 102, 94, 141, 74, 152, 31, 152,
141, 94, 201, 31, 164, 102, 164, 51,
90, 141, 201, 229, 164, 31, 201, 152,
152, 51, 115]
array2 = []
key = 84
for number in array1:
#result = int(bin(number^key)[2:].zfill(8),2) #do XOR and also convert to int
result = bin(number^key)[2:].zfill(8) #step1
result = swap(result,4,6) #step2
result = swap(result,5,7)
for i in range(0,8): #step3
if(i%2 == 0):
if(result[i:i+2] == '10'):
result = swapChar(result,i+1,'1')
elif(result[i:i+2] == '11'):
result = swapChar(result,i,'0')
elif(result[i:i+2] == '00'):
result = swapChar(result,i,'1')
elif(result[i:i+2] == '01'):
result = swapChar(result,i+1,'0')
result = bin(int(result,2)^key)[2:].zfill(8) #step4
nb1 = result[0:2] #step5
nb2 = result[2:4]
nb3 = result[4:6]
nb4 = result[6:8]
if(int(nb1) < int(nb3)):
nb1 = "1" + nb1
if(int(nb2) < int(nb4)):
nb2 = "1" + nb2
nb1 = int(nb1,2) - int(nb3,2)
nb1 = bin(nb1)[2:].zfill(8)
nb2 = int(nb2,2) - int(nb4,2)
nb2 = bin(nb2)[2:].zfill(8)
result = swapNibble(result,0,1,nb1)
result = swapNibble(result,2,3,nb2)
result = swap(result,4,6) #step6
result = swap(result,5,7)
for i in range(0,8): #step7
if(i%2 == 0):
if(result[i:i+2] == '10'):
result = swapChar(result,i+1,'1')
elif(result[i:i+2] == '11'):
result = swapChar(result,i,'0')
elif(result[i:i+2] == '00'):
result = swapChar(result,i,'1')
elif(result[i:i+2] == '01'):
result = swapChar(result,i+1,'0')
result = bin(int(result,2)^key)[2:].zfill(8) #step8
array2.append(result)
#print result
print("The numbers decrypted are:")
for number in array2:
#chr(int(number))
print(int(number,2),end=', ')
print("")
print("And the message is:")
for number in array2:
print(chr(int(number,2)), end="")
| true |
c2aad504dc1969e82376cb85699b7b97f9e9a7ac | Python | MartinMa28/Algorithms_review | /concurrency/1117_building_H2O.py | UTF-8 | 787 | 2.78125 | 3 | [
"MIT"
] | permissive | from threading import Semaphore, Lock
class H2O:
def __init__(self):
self.hy_sema = Semaphore(value=2)
self.oxy_sema = Semaphore(value=0)
self.oxy_mutex = Lock()
def hydrogen(self, releaseHydrogen: 'Callable[[], None]') -> None:
self.hy_sema.acquire()
# releaseHydrogen() outputs "H". Do not change or remove this line.
releaseHydrogen()
self.oxy_sema.release()
def oxygen(self, releaseOxygen: 'Callable[[], None]') -> None:
with self.oxy_mutex:
self.oxy_sema.acquire()
self.oxy_sema.acquire()
# releaseOxygen() outputs "O". Do not change or remove this line.
releaseOxygen()
self.hy_sema.release()
self.hy_sema.release() | true |
be7968728f3f02d545965f4739c2507c9414a28d | Python | shibinbshaji/card | /script.py | UTF-8 | 1,234 | 3 | 3 | [] | no_license | import sys
card_no = list(sys.argv[1])
######################################################
def double_it(dig):
if dig < 5:
dig = dig*2
else:
if dig == 5:
dig = 1
elif dig == 6:
dig = 3
elif dig == 7:
dig = 5
elif dig == 9:
dig = 9
return dig
######################################################
def check_card(numb):
num = numb.copy()
num[0] = double_it(num[0])
num[2] = double_it(num[2])
num[4] = double_it(num[4])
num[6] = double_it(num[6])
num[8] = double_it(num[8])
num[10] = double_it(num[10])
num[12] = double_it(num[12])
num[14] = double_it(num[14])
sum = 0
for i in num:
sum = sum + i
if sum%10 == 0:
##print("Valid Card Number")
return 1
#######################################################
for i in range(0,16):
card_no[i] = int(card_no[i])
num = card_no.copy()
check_card(num)
#########################temp code#################3
temp = []
for i in range(0,12):
temp.append(card_no[i])
temp2 = temp.copy()
print(temp2)
for i in range(0,10):
for j in range(0,10):
for k in range(0,10):
for l in range(0,10):
temp2 = temp.copy()
temp2.append(i)
temp2.append(j)
temp2.append(k)
temp2.append(l)
if (check_card(temp2)):
print(temp2)
| true |
8224e68e7972f2352b1df4a88c382021b1cc51cc | Python | mtlynch/sia_load_tester | /sia_load_tester/progress.py | UTF-8 | 6,762 | 3.0625 | 3 | [
"MIT"
] | permissive | """Monitors upload progress to make sure Sia is still making real progress.
This offers a collection of classes meant to monitor Sia's upload progress to
ensure progress has not stalled.
"""
import collections
import datetime
import logging
import threading
import time
import sia_client as sc
logger = logging.getLogger(__name__)
# Sia must average at least 3 Mbps upload speed in the past hour window.
TIME_WINDOW_MINUTES = 60
MINIMUM_PROGRESS_THRESHOLD = 1350000000 # ~1.26 GiB
_CHECK_FREQUENCY_IN_SECONDS = 60
def start_monitor_async(exit_event):
"""Creates a Monitor instance and starts monitoring."""
monitor = make_monitor(exit_event)
thread = threading.Thread(target=monitor.monitor)
thread.daemon = True
logger.info('Starting background thread to monitor upload progress.')
thread.start()
def make_monitor(exit_event):
"""Creates a Monitor instance using production defaults."""
return Monitor(
Tracker(sc.make_sia_client(), datetime.datetime.utcnow), time.sleep,
exit_event)
class Monitor(object):
"""Monitor that tracks upload progress and fires an event when it slows."""
def __init__(self, tracker, sleep_fn, exit_event):
"""Creates a new Monitor instance.
Args:
tracker: A tracker for upload progress.
sleep_fn: A callback function for putting the thread to sleep for
a given number of seconds.
exit_event: If this event is set, monitoring stops. Monitor will set
this event if progress falls below minimum.
"""
self._tracker = tracker
self._sleep_fn = sleep_fn
self._exit_event = exit_event
def monitor(self):
"""Monitors progress until exit event or progress falls below min.
Polls Sia to track progress regularly to track upload progress. If
upload progress falls below the minimum required threshold, sets the
exit event. If another thread sets the exit event, monitoring will exit
gracefully.
"""
while not self._exit_event.is_set():
if self._progress_is_below_minimum():
logger.critical('Signaling for load test to end')
self._exit_event.set()
return
self._sleep_fn(_CHECK_FREQUENCY_IN_SECONDS)
logger.info('Exit event is set. Terminating progress monitoring.')
def _progress_is_below_minimum(self):
bytes_uploaded = self._tracker.bytes_uploaded_in_window()
if not bytes_uploaded:
return False
if bytes_uploaded < MINIMUM_PROGRESS_THRESHOLD:
logger.critical(
'Upload progress has slowed below minimum: %d bytes in last %d minutes (minimum=%d)',
bytes_uploaded, TIME_WINDOW_MINUTES, MINIMUM_PROGRESS_THRESHOLD)
return True
return False
class Tracker(object):
"""Keeps track of changes in Sia aggregate upload progress over time."""
def __init__(self, sia_client, time_fn):
self._sia_client = sia_client
self._time_fn = time_fn
# A list of history entries, with the oldest at position 0.
self._progress_history = []
def bytes_uploaded_in_window(self):
"""Returns number of bytes uploaded in current time window.
Tracker maintains a trailing time window of upload progress. This
function returns the delta of upload progress within the window. In
other words:
bytes_uploaded_in_window = (bytes uploaded now) -
(bytes uploaded at window start)
Bytes uploaded data is based on Sia API information, so if files are
deleted from Sia, progress can be negative.
Returns:
Number of bytes uploaded since time window start or None if tracker
does not have enough history for a full time window.
"""
self._record_latest()
self._prune_history()
bytes_uploaded = self._window_bytes()
if self._has_complete_time_window():
logger.info(
'%d bytes uploaded in time window (averaging %.2f Mbps)',
bytes_uploaded, self._get_upload_mbps_in_time_window())
return bytes_uploaded
else:
logger.info(
'%d bytes uploaded since tracking began (averaging %.2f Mbps)',
bytes_uploaded, self._get_upload_mbps_in_time_window())
return None
def _get_upload_mbps_in_time_window(self):
if len(self._progress_history) < 2:
return 0
bytes_uploaded = self._window_bytes()
time_window_seconds = (self._window_end_timestamp() -
self._window_start_timestamp()).total_seconds()
megabits_uploaded = (bytes_uploaded * 8.0) / pow(10, 6)
return megabits_uploaded / time_window_seconds
def _record_latest(self):
self._progress_history.append(
HistoryEntry(
timestamp=self._time_fn(),
uploaded_bytes=self._count_uploaded_bytes()))
def _count_uploaded_bytes(self):
uploaded_bytes = 0
for f in self._sia_client.renter_files():
uploaded_bytes += f[u'uploadedbytes']
return uploaded_bytes
def _prune_history(self):
"""Removeis all history entries before start of time window."""
# Walk backwards until we find a record >= TIME_WINDOW_MINUTES earlier
# than the latest record.
for i in range(len(self._progress_history) - 1, 0, -1):
entry = self._progress_history[i]
if ((self._window_end_timestamp() - entry.timestamp) >=
datetime.timedelta(minutes=TIME_WINDOW_MINUTES)):
# Trim all entries prior to the current entry. Make this entry
# the oldest.
self._progress_history = self._progress_history[i:]
return
def _window_bytes(self):
return self._window_end_bytes() - self._window_start_bytes()
def _window_start_bytes(self):
return self._progress_history[0].uploaded_bytes
def _window_end_bytes(self):
return self._progress_history[-1].uploaded_bytes
def _window_start_timestamp(self):
return self._progress_history[0].timestamp
def _window_end_timestamp(self):
return self._progress_history[-1].timestamp
def _has_complete_time_window(self):
return (self._window_end_timestamp() - self._window_start_timestamp()
) >= datetime.timedelta(minutes=TIME_WINDOW_MINUTES)
HistoryEntry = collections.namedtuple('HistoryEntry',
['timestamp', 'uploaded_bytes'])
| true |
66648749eb3c6031410e6ca212077b7274a2c023 | Python | flvSantos15/pythonExercicies | /Mundo1/Exercicios/ex015.py | UTF-8 | 118 | 3.734375 | 4 | [] | no_license | k = float(input('Kilometros? '))
d = int(input('Quantos dias? '))
vd = 60 * d
kr = 0.15 * k
print('Valor: ', vd + kr)
| true |
b4be77f7bc0ed0955d0b41bc9bc86e368cb4a6d7 | Python | siyulu14/12_web_scraping | /scrape_mars.py | UTF-8 | 3,830 | 2.90625 | 3 | [] | no_license | # dependency
from bs4 import BeautifulSoup
from splinter import Browser
import pandas as pd
import time
def init_browser():
executable_path = {"executable_path": "/usr/local/bin/chromedriver"}
return Browser("chrome", **executable_path, headless=False)
def scrape():
browser = init_browser()
# Create a dictionary for all of the scraped data
mars_data = {}
# use splinter to visit url
url = "https://mars.nasa.gov/news/"
browser.visit(url)
# save page into soup
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
# save the most recent news, title and date
news = soup.find("div", class_="list_text")
news_date = news.find("div", class_="list_date").text
news_title = news.find("div", class_="content_title").text
news_p = news.find("div", class_="article_teaser_body").text
# Add the news date, title and summary to the mars data
mars_data["news_date"] = news_date
mars_data["news_title"] = news_title
mars_data["summary"] = news_p
# visit the JPL Mars URL
url2 = "https://jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(url2)
# save page into soup
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
# find the image and image url
image = soup.find("img", alt="Landing in Oxia Palus")["src"]
featured_image_url = "https://jpl.nasa.gov"+image
# Add the featured image url to the mars data
mars_data["featured_image_url"] = featured_image_url
# Visit the Mars Weather twitter account here and scrape the latest Mars weather tweet from the page.
url3 = "https://twitter.com/marswxreport?lang=en"
browser.visit(url3)
# save page into soup
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
# find the weather information
tweets = soup.find('div', class_="js-tweet-text-container")
mars_weather = tweets.find('p', class_="TweetTextSize TweetTextSize--normal js-tweet-text tweet-text").text
# Add the weather to the mars data
mars_data["mars_weather"] = mars_weather
# Visit the Mars facts webpage and scrape table data into Pandas
url3 = "http://space-facts.com/mars/"
tables = pd.read_html(url3)
mars_dt = tables[0]
mars_dt.colums = ['parameter', 'data']
mars_dt = mars_dt.rename(columns = {0:'parameter', 1:'data'})
mars_dt = mars_dt.set_index("parameter")
marsdt = mars_dt.to_html(classes='marsdata')
marsdt = marsdt.replace('\n', ' ')
# Add the Mars facts table to the mars data
mars_data["mars_table"] = marsdt
# Visit the USGS Astogeology site and scrape pictures of the hemispheres
url4 = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(url4)
# Use splinter to load and loop through the 4 images and load them into a dictionary
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
mars_hemis=[]
# find all items with hemisphere info
hemispheres = soup.find_all('div', class_='description')
import time
for hemis in hemispheres:
time.sleep(1)
# find the title
img_title = hemis.find("h3").text
# find link for origin img
partial = hemis.find('a', class_='itemLink product-item')["href"]
hemis_link = 'https://astrogeology.usgs.gov'+ partial
browser.visit(hemis_link)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
img_url = soup.find('div', class_='downloads').find('ul').find('li').find('a')['href']
dictionary={"title":img_title,"img_url":img_url}
mars_hemis.append(dictionary)
browser.back()
mars_data["mars_hemis"] = mars_hemis
browser.quit()
# Return the dictionary
return mars_data
| true |
a542c28168e1d849dfb3e5a94a62c3bf549828d5 | Python | leogemetric/cryptoctf-2016 | /MLG-Crypto_90/solution.py | UTF-8 | 874 | 2.640625 | 3 | [] | no_license | lines = open("mlg_crypto.txt", "r").readlines()[1:-1]
subs = {}
for line in lines:
line = line.strip()
for word in line.split("_"):
if word in subs:
subs[word] += 1
else:
subs[word] = 1
print len(subs)
print subs
space = max(subs, key=lambda x: subs[x])
del subs[space]
total = "\n".join(lines)
total = total.replace(space, " ") # Most common character is " "
# Do some bs substitutions
alphabet = "abcdefghijklmnopqrstuvwxyz"
i = 0
for sub in subs:
total = total.replace(sub, alphabet[i])
i += 1
total = total.replace("_", "").replace("\n\n", "\n")
print total
# Given the hint, we need to crack the substitution cipher on the ciphertext.
# This script will assign each word a letter and print it out
# Plugging the output into quipqiup, we kinda decode the message and the flag:
# flag{leet_smoked_memes_bro}
| true |
b20f8d7cae859ee0ffe48e03dd4dee616c5c271b | Python | aarthisandhiya/codekata_player-py- | /pg79.py | UTF-8 | 195 | 2.78125 | 3 | [] | no_license | n=int(input())
l=[int(x) for x in input().split()]
tl=[]
t=0
if(n==len(l)):
for i in range(0,n):
for j in range(0,n):
t=abs(l[j]-l[i])
tl.append(t)
t=0
print(max(tl))
| true |
9f342be1d33fea3efa174e9c9fbf3b61529e9ab7 | Python | johshisha/selective_search | /program/selective.py | UTF-8 | 3,052 | 2.609375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import Image
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.image as mpimg
import selectivesearch,sys,os
sys.path.append('/home/dl-box/study/.package/python_util/')
import util,Feature
class Feature_from_array(Feature.Classify):
def classify(self, input_file):
inputs = [input_file]
predictions = self.classifier.predict(inputs)
result = self.judge_result(predictions)
score = sorted(predictions[0], reverse=True)
return result, score[0]
def main(argv):
filename = argv[1]
classify = Feature_from_array('imagenet')
images, candidates = selective_image(filename)
results = [classify.classify(image) for image in images]
#print results
img = plt.imread(filename)
fig, ax = plt.subplots(1,1)
ax.imshow(img)
labels = []
boxes = []
for res, locates in zip(results,candidates):
if res[1] >= 0.5:
labels.append(res[0])
boxes.append(locates)
ax = plot_boxes(ax,boxes,labels)
plt.show()
def plot_boxes(ax, boxes, labels=None):
"""ax への矩形とラベルの追加"""
if labels is None:
labels = [None] * len(boxes)
history = []
from matplotlib.patches import FancyBboxPatch
for box, label in zip(boxes, labels):
coords = (box[0], box[1])
b = FancyBboxPatch(coords, box[2], box[3],
boxstyle="square,pad=0.", ec="b", fc="none", lw=0.5)
mindist = 100000
if len(history) > 0:
mindist = min([sum((np.array(box) - np.array(h)) ** 2) for h in history])
# ほぼ重なる矩形は描画しない
if mindist > 30000:
if label is not None:
ax.text(coords[0], coords[1], label, color='b')
ax.add_patch(b)
history.append(box)
return ax
def selective_image(filename):
# loading lena image
pil_img = Image.open(filename).convert('RGB')
img = np.asarray(pil_img)
# perform selective search
img_lbl, regions = selectivesearch.selective_search(
img, scale=500, sigma=0.6, min_size=10)
candidates = set()
for r in regions:
# excluding same rectangle (with different segments)
if r['rect'] in candidates:
continue
# excluding regions smaller than 2000 pixels
if r['size'] < 2000:
continue
# distorted rects
x, y, w, h = r['rect']
if w / h > 1.2 or h / w > 1.2:
continue
candidates.add(r['rect'])
images = [crop_image(pil_img,locate) for locate in candidates]
array_images = map(np.asarray,images)
normed = map(lambda x:x/255.0, array_images)
return normed, candidates
def crop_image(img,locates):
x,y,w,h = locates
cropped = img.crop((x,y,x+w+1,y+h+1))
resized = cropped.resize((256,256), Image.ANTIALIAS)
return resized
if __name__ == "__main__":
main(sys.argv)
| true |
c8ea9af9e932587d62891d22607a76cd02b2596c | Python | Grant-W-Code92/E-Dice_Terminal_V.1 | /E-Dice.py | UTF-8 | 3,829 | 4.0625 | 4 | [] | no_license | import random
#This is my first ever project#
#Help Screen#
def help_screen():
print("""
~Help~
How to play: Once you have started the program,
you will choose a dice to roll from the provided
list, once selected the program will roll said
dice and give you a randomly generated result.
Press 1 to play the game
Press 2 to return to menu
**This Program was written by Grant Walker April 2020**
""")
help_screen_number = input("Please choose a option: ")
try:
help_screen_int = int(help_screen_number)
print("you have selected: ", help_screen_int)
except ValueError:
print(" Invalid Selection! ")
help_screen()
if help_screen_int == 1:
gamescreen()
if help_screen_int == 2:
menu_screen()
else:
print("Invalid Selection! Please try again")
menu_screen()
# GameScreen#
def gamescreen():
print("""
Welcome brave adventurer to the dice roller!
May the rolls forever be in your favor
Please select a dice to roll:
Press 1 for a D4
Press 2 for a D6
Press 3 for a D10
Press 4 for a D12
Press 5 for a D20
Press 6 to return to menu
""")
gamescreen_number = input("Please select an option: ")
try:
gamescreen_int = int(gamescreen_number)
print("you have selected: ", gamescreen_int)
except ValueError:
print(" Invalid Selection! ")
gamescreen()
if gamescreen_int == 1:
selected = random.randint(1,4)
print("Your dice roll is: ", selected)
gamescreen()
if gamescreen_int == 2:
selected = random.randint(1, 6)
print("Your dice roll is: ", selected)
gamescreen()
if gamescreen_int == 3:
selected = random.randint(1,10)
print("Your dice roll is: ", selected)
gamescreen()
if gamescreen_int == 4:
selected = random.randint(1,12)
print("Your dice roll is: ", selected)
gamescreen()
if gamescreen_int == 5:
selected = random.randint(1,20)
print("Your dice roll is: ", selected)
gamescreen()
if gamescreen_int == 6:
menu_screen()
if gamescreen_int == 100:
selected = random.randint(1,100)
print("""Haha! You have found the fabled 100D!
It has rolled:""", selected)
gamescreen()
else:
print("Invalid Selection! Please try again")
gamescreen()
#Menu Screen#
def menu_screen():
print("""
~Main Menu~
Press 1 to Play
Press 2 for Help
Press 3 to Quit
""")
menu_selection_number = input("Please choose a option: ")
try:
menu_selection_int = int(menu_selection_number)
print("you have selected: ", menu_selection_int)
except ValueError:
print(" Invalid Selection! ")
menu_screen()
if menu_selection_int == 1:
gamescreen()
elif menu_selection_int == 2:
help_screen()
elif menu_selection_int == 3:
exit()
elif menu_selection_int == 69: #Keeping this for meme reasons
print("Nice...")
menu_screen()
elif menu_selection_int == 42: #Keeping this for meme reasons
print("The ultimate answer to life, the universe and everything...")
menu_screen()
else:
print("Invalid Selection! Please try again")
menu_screen()
#Opening#
print("""
##### #### ##### #### #####
# # ## # ## # #
#### ### # ## # ## ####
# # ## # ## # #
##### #### ##### #### #####
""")
name = input("Please type your name:")
print("Welcome, " +name+ ", what can we do for you today?" )
menu_screen() | true |
56b30ad77b0326ba6d867779985a38a5b27b31d1 | Python | aamini/chemprop | /chemprop/data/data.py | UTF-8 | 17,436 | 2.875 | 3 | [
"MIT"
] | permissive | from argparse import Namespace
import random
from typing import Callable, List, Union
import numpy as np
from torch.utils.data.dataset import Dataset
from rdkit import Chem
from .scaler import StandardScaler
from chemprop.features import get_features_generator
from schnetpack.datasets import QM9
class MoleculeDatapoint:
"""A MoleculeDatapoint contains a single molecule and its associated features and targets."""
def __init__(self,
line: List[str],
args: Namespace = None,
features: np.ndarray = None,
use_compound_names: bool = False):
"""
Initializes a MoleculeDatapoint, which contains a single molecule.
:param line: A list of strings generated by separating a line in a data CSV file by comma.
:param args: Arguments.
:param features: A numpy array containing additional features (ex. Morgan fingerprint).
:param use_compound_names: Whether the data CSV includes the compound name on each line.
"""
if args is not None:
self.features_generator = args.features_generator
self.args = args
else:
self.features_generator = self.args = None
if features is not None and self.features_generator is not None:
raise ValueError('Currently cannot provide both loaded features and a features generator.')
self.features = features
if use_compound_names:
self.compound_name = line[0] # str
line = line[1:]
else:
self.compound_name = None
self.smiles = line[0] # str
self.mol = Chem.MolFromSmiles(self.smiles)
# Generate additional features if given a generator
if self.features_generator is not None:
self.features = []
for fg in self.features_generator:
features_generator = get_features_generator(fg)
if self.mol is not None and self.mol.GetNumHeavyAtoms() > 0:
self.features.extend(features_generator(self.mol))
self.features = np.array(self.features)
# Fix nans in features
if self.features is not None:
replace_token = 0
self.features = np.where(np.isnan(self.features), replace_token, self.features)
# Create targets
self.targets = [float(x) if x != '' else None for x in line[1:]]
def set_features(self, features: np.ndarray):
"""
Sets the features of the molecule.
:param features: A 1-D numpy array of features for the molecule.
"""
self.features = features
def num_tasks(self) -> int:
"""
Returns the number of prediction tasks.
:return: The number of tasks.
"""
return len(self.targets)
def set_targets(self, targets: List[float]):
"""
Sets the targets of a molecule.
:param targets: A list of floats containing the targets.
"""
self.targets = targets
class MoleculeDataset(Dataset):
"""A MoleculeDataset contains a list of molecules and their associated features and targets."""
def __init__(self, data: List[MoleculeDatapoint]):
"""
Initializes a MoleculeDataset, which contains a list of MoleculeDatapoints (i.e. a list of molecules).
:param data: A list of MoleculeDatapoints.
"""
self.data = data
self.args = self.data[0].args if len(self.data) > 0 else None
self.scaler = None
def compound_names(self) -> List[str]:
"""
Returns the compound names associated with the molecule (if they exist).
:return: A list of compound names or None if the dataset does not contain compound names.
"""
if len(self.data) == 0 or self.data[0].compound_name is None:
return None
return [d.compound_name for d in self.data]
def smiles(self) -> List[str]:
"""
Returns the smiles strings associated with the molecules.
:return: A list of smiles strings.
"""
return [d.smiles for d in self.data]
def mols(self) -> List[Chem.Mol]:
"""
Returns the RDKit molecules associated with the molecules.
:return: A list of RDKit Mols.
"""
return [d.mol for d in self.data]
def features(self) -> List[np.ndarray]:
"""
Returns the features associated with each molecule (if they exist).
:return: A list of 1D numpy arrays containing the features for each molecule or None if there are no features.
"""
if len(self.data) == 0 or self.data[0].features is None:
return None
return [d.features for d in self.data]
def targets(self) -> List[List[float]]:
"""
Returns the targets associated with each molecule.
:return: A list of lists of floats containing the targets.
"""
return [d.targets for d in self.data]
def num_tasks(self) -> int:
"""
Returns the number of prediction tasks.
:return: The number of tasks.
"""
return self.data[0].num_tasks() if len(self.data) > 0 else None
def features_size(self) -> int:
"""
Returns the size of the features array associated with each molecule.
:return: The size of the features.
"""
return len(self.data[0].features) if len(self.data) > 0 and self.data[0].features is not None else None
def sample(self, sample_size: int):
"""
Samples a random subset of the dataset.
:param sample_size: The size of the sample to produce.
"""
self.data = random.sample(self.data, sample_size)
def shuffle(self, seed: int = None):
"""
Shuffles the dataset.
:param seed: Optional random seed.
"""
if seed is not None:
random.seed(seed)
random.shuffle(self.data)
def normalize_features(self, scaler: StandardScaler = None, replace_nan_token: int = 0) -> StandardScaler:
"""
Normalizes the features of the dataset using a StandardScaler (subtract mean, divide by standard deviation).
If a scaler is provided, uses that scaler to perform the normalization. Otherwise fits a scaler to the
features in the dataset and then performs the normalization.
:param scaler: A fitted StandardScaler. Used if provided. Otherwise a StandardScaler is fit on
this dataset and is then used.
:param replace_nan_token: What to replace nans with.
:return: A fitted StandardScaler. If a scaler is provided, this is the same scaler. Otherwise, this is
a scaler fit on this dataset.
"""
if len(self.data) == 0 or self.data[0].features is None:
return None
if scaler is not None:
self.scaler = scaler
elif self.scaler is None:
features = np.vstack([d.features for d in self.data])
self.scaler = StandardScaler(replace_nan_token=replace_nan_token)
self.scaler.fit(features)
for d in self.data:
d.set_features(self.scaler.transform(d.features.reshape(1, -1))[0])
return self.scaler
def set_targets(self, targets: List[List[float]]):
"""
Sets the targets for each molecule in the dataset. Assumes the targets are aligned with the datapoints.
:param targets: A list of lists of floats containing targets for each molecule. This must be the
same length as the underlying dataset.
"""
assert len(self.data) == len(targets)
for i in range(len(self.data)):
self.data[i].set_targets(targets[i])
def sort(self, key: Callable):
"""
Sorts the dataset using the provided key.
:param key: A function on a MoleculeDatapoint to determine the sorting order.
"""
self.data.sort(key=key)
def sample_inds(self, inds: List[float]):
"""
Samples the dataset according to specified indicies and returns new
dataset.
:param inds: A list of desired inds of the dataset to keep.
"""
data = [self.data[i] for i in inds]
return MoleculeDataset(data)
def sample_task_ind(self, inds: List[int]):
"""
Samples the dataset and keeps only the specified task target inds
:param inds: The indices of the desired targets to sub-sample.
"""
for i in range(len(self.data)):
all_targets = self.data[i].targets
new_targets = [all_targets[ind] for ind in inds]
self.data[i].targets = new_targets
def __len__(self) -> int:
"""
Returns the length of the dataset (i.e. the number of molecules).
:return: The length of the dataset.
"""
return len(self.data)
def __getitem__(self, item) -> Union[MoleculeDatapoint, List[MoleculeDatapoint]]:
"""
Gets one or more MoleculeDatapoints via an index or slice.
:param item: An index (int) or a slice object.
:return: A MoleculeDatapoint if an int is provided or a list of MoleculeDatapoints if a slice is provided.
"""
return self.data[item]
class AtomisticDataset(MoleculeDataset):
"""AtomisticDataset contains schnetpack QM9 dataset."""
def __init__(self, data, preload=True, data_ram=None):
"""
Initializes a AtomisticDataset, which contains a schnetpack QM9 dataset.
:param data: A schnetpack QM9 dataset object.
:param preload: If the data should be pre-fetched and stored into RAM
:param data_ram: The RAM version of data, if already available (e.g. from sub-sampling)
"""
self.data = data
self.preload = preload
self.data_ram = data_ram
if self.preload and self.data_ram is None:
import schnetpack as spk
self.data_ram = []
load_batch_size = 1280
loader = spk.AtomsLoader(self.data, batch_size=load_batch_size, shuffle=False, num_workers=10)
# Loop through the dataset and store into RAM
for batch in loader:
keys = batch.keys()
for i in range(len(batch[QM9.U0])):
atom_dict = {k: batch[k][i] for k in keys}
self.data_ram.append(atom_dict)
# convert from array of dicts to a collated dict of arrays
# we do this since some of the size of some attributes are dependent
# on the molecule, so we need to standardize the sizes for training
self.data_ram = spk.data.loader._collate_aseatoms(self.data_ram)
# convert back to array of dicts now that we've collated in line above
def dict_to_list(DL):
return [dict(zip(DL,t)) for t in zip(*DL.values())]
self.data_ram = dict_to_list(self.data_ram)
def get_atomref(self, *args, **kwargs):
"""
Inherit the get atomref function of the QM9 data
"""
return self.data.get_atomref(*args, **kwargs)
def compound_names(self) -> List[str]:
"""
Returns the compound names associated with the molecule (if they exist).
:return: A list of compound names or None if the dataset does not contain compound names.
"""
return None
def smiles(self) -> List[str]:
"""
Returns the smiles strings associated with the molecules.
TODO: actually extract smiles
:return: A list of smiles strings.
"""
return [None for i in range(len(self.data))]
def mols(self) -> List[Chem.Mol]:
"""
Returns the RDKit molecules associated with the molecules.
:return: A list of RDKit Mols.
"""
return []
def features(self) -> List[np.ndarray]:
"""
Returns the features associated with each molecule (if they exist).
:return: A list of 1D numpy arrays containing the features for each molecule or None if there are no features.
"""
return None
def targets(self) -> List[List[float]]:
"""
Returns the targets associated with each molecule.
:return: A list of lists of floats containing the targets.
"""
if self.preload:
return [[j[QM9.U0].item()] for j in self.data_ram] # if data_ram is a array of dicts
# return self.data_ram[QM9.U0].numpy() # if data_ram is a dict of arrays
else:
return [[j[QM9.U0].item()] for j in self.data]
def num_tasks(self) -> int:
"""
Returns the number of prediction tasks.
TODO: Remove hardcoding of 1
:return: The number of tasks.
"""
return 1
def features_size(self) -> int:
"""
Returns the size of the features array associated with each molecule.
:return: The size of the features.
"""
return None
def sample(self, sample_size: int):
"""
Samples a random subset of the dataset.
:param sample_size: The size of the sample to produce.
"""
raise NotImplemetedError("AtomisticDataset.sample is not implemeneted.")
# self.data = random.sample(self.data, sample_size)
def shuffle(self, seed: int = None):
"""
Shuffles the dataset.
:param seed: Optional random seed.
"""
if self.preload:
if seed is not None:
random.seed(seed)
random.shuffle(self.data_ram)
else:
raise NotImplemetedError("AtomisticDataset.shuffle is not implemeneted for non-preloaded datasets.")
def normalize_features(self, scaler: StandardScaler = None, replace_nan_token: int = 0) -> StandardScaler:
"""
Normalizes the features of the dataset using a StandardScaler (subtract mean, divide by standard deviation).
If a scaler is provided, uses that scaler to perform the normalization. Otherwise fits a scaler to the
features in the dataset and then performs the normalization.
:param scaler: A fitted StandardScaler. Used if provided. Otherwise a StandardScaler is fit on
this dataset and is then used.
:param replace_nan_token: What to replace nans with.
:return: A fitted StandardScaler. If a scaler is provided, this is the same scaler. Otherwise, this is
a scaler fit on this dataset.
"""
raise NotImplemetedError("AtomisticDataset.normalize_features is not implemeneted.")
# if len(self.data) == 0 or self.data[0].features is None:
# return None
#
# if scaler is not None:
# self.scaler = scaler
#
# elif self.scaler is None:
# features = np.vstack([d.features for d in self.data])
# self.scaler = StandardScaler(replace_nan_token=replace_nan_token)
# self.scaler.fit(features)
#
# for d in self.data:
# d.set_features(self.scaler.transform(d.features.reshape(1, -1))[0])
#
# return self.scaler
def set_targets(self, targets: List[List[float]]):
"""
Sets the targets for each molecule in the dataset. Assumes the targets are aligned with the datapoints.
:param targets: A list of lists of floats containing targets for each molecule. This must be the
same length as the underlying dataset.
"""
if self.preload:
for i in range(len(self.data_ram)):
for j in range(len(targets[i])):
self.data_ram[i][QM9.U0][j] = targets[i][j]
else:
for i in range(len(self.data)):
for j in range(len(targets[i])):
self.data[i][QM9.U0][j] = targets[i][j]
def sort(self, key: Callable):
"""
Sorts the dataset using the provided key.
:param key: A function on a MoleculeDatapoint to determine the sorting order.
"""
self.data.sort(key=key)
def sample_inds(self, inds: List[float]):
"""
Samples the dataset according to specified indicies and returns new
dataset.
:param inds: A list of desired inds of the dataset to keep.
"""
data_subset = self.data.create_subset(inds)
if self.preload:
data_ram_subset = [self.data_ram[i] for i in inds]
return AtomisticDataset(data_subset, data_ram=data_ram_subset)
else:
return AtomisticDataset(data_subset)
def __len__(self) -> int:
"""
Returns the length of the dataset (i.e. the number of molecules).
:return: The length of the dataset.
"""
if self.preload:
return len(self.data_ram)
else:
return len(self.data)
def __getitem__(self, item) -> Union[MoleculeDatapoint, List[MoleculeDatapoint]]:
"""
Gets one or more schnetpack QM9 datapoint via an index or slice.
:param item: An index (int) or a slice object.
:return: A QM9 datapoint if an int is provided or a list of MoleculeDatapoints if a slice is provided.
"""
if self.preload:
return self.data_ram[item]
else:
return self.data[item]
| true |
2ec2c3d60141ea2d78fc08e37e4a86a51de4ecb4 | Python | Rymou/Projet-RI | /TransformationsMethodes.py | UTF-8 | 2,880 | 2.828125 | 3 | [] | no_license | from string import *
from math import *
import os
import collections,re
def fichierInverse():
k = 1
N = 4
freq = {}
ListCar = {".", ",", "!", '?', "'"}
stoplist = open('stopwords_fr.txt', 'r')
stoplist = stoplist.read()
stoplist = stoplist.lower()
stoplist = stoplist.split()
while (k<=N):
#f = open('./DocsFatim/D'+str(k)+'.txt','r')
f = open('D'+str(k)+'.txt','r')
t = f.read()
t = t.lower()
i=0
while (i < len(t)):
if (t[i] in ListCar):
t=t.replace(t[i], " ")
i = i + 1
a = t.split()
nb = len(a)
for w in a:
if (not w in stoplist and len(w) > 1):
if (w, k) not in freq:
freq[w, k] = a.count(w)
k = k + 1
f.close()
return freq
#cette fonction retourne un dictionnaire de frequence d'un fichier numDoc (terme): freq
def indexDoc(freq, numDoc):
li = {}
for (a, b) in freq:
if (b == numDoc):
li[a]=freq[a, b]
return li
def indexMot(freq, mot):
li = {}
# print("le mot")
# print(mot)
for (w, d) in freq:
if (w == mot):
li[mot,d] = freq[w, d]
return li
def cleanQuery(query):
query = query.lower()
ListCar = {'.', ',', '!', '?', '"', ':', ';', "'"}
stoplist = open('stopwords_fr.txt', 'r')
stoplist = stoplist.read()
stoplist = stoplist.lower()
stoplist = stoplist.split()
import re
a = re.split('\s+',query)
i = 0
while i < len(query):
if query[i] in ListCar:
query = query.replace(query[i], " ")
i += 1
f =""
for w in a:
if w not in stoplist and len(w) > 1:
if w not in f:
f=f+" "+w
li=f.split()
f=" ".join(li)
return f
def myMax(freq):
maxi = {}
for (w, d) in freq:
if not d in maxi:
maxi[d] = freq[w, d]
else:
if (maxi[d] < freq[w, d]):
maxi[d] = freq[w, d]
return maxi
def ni(freq):
ni = {}
for (w1, d1) in freq:
if w1 not in ni:
ni[w1] = 1
else:
ni[w1] += 1
return ni
def N():
return 4
#Retourne un dictionnaire contenant (terme, numeroDuFichier):poids
def tfIdf(freq):
poids = {}
for (w, d) in freq:
poids[w,d] = (float(freq[w,d]) / float(myMax(freq)[d])) * log10(float(N()) / float(ni(freq)[w]) + 1)
return poids
#print("**************************ponderation TF*IDF*************************")
def poidFichier(numDoc,poids):
liste = {}
for (w, d) in poids:
if (numDoc == d):
liste[w] = poids[w, d]
return liste
def poidWord(word,poids):
liste = {}
for (w, d) in poids:
if (word == w):
liste[d] = poids[w, d]
return liste
| true |
3220ef99c1900071dad9464c8de85d5faa892d9c | Python | jeongyongwon/Algo_Coding | /swexpert/캐슬 디펜스.py | UTF-8 | 194 | 3.09375 | 3 | [] | no_license |
R,C,D = map(int,input().split())
mat = []
for i in range(R):
mat.append(list(map(int,input().split())))
### 궁수들이 서 있을 수 있는 방법은 5c3
print(permute([1,2,3,4]))
| true |
69d98c7405a923f267ffbaed102493259932febd | Python | aokellermann/fcrypt | /fcrypt.py | UTF-8 | 3,163 | 2.84375 | 3 | [] | no_license | #!/usr/bin/env python3
# Copyright Antony Kellermann 2020
# Usage: fcrypt.py [--encrypt|--decrypt] [<receiver_public_key>|<receiver_private_key>] <plaintext_file> <encrypted_file>
import sys
import zlib
from Crypto.Random import get_random_bytes
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
from Crypto.Signature import pkcs1_15
from Crypto.Cipher import AES, PKCS1_v1_5
def write_plaintext(to_write: bytes, fname: str):
with open(fname, "wb") as w:
w.write(to_write)
def write_encrypted(to_write: list, fname: str):
with open(fname, "wb") as w:
w.write(b''.join(len(msg).to_bytes(8, byteorder='little') + msg for msg in to_write))
def read_encrypted(fname: str):
with open(fname, "rb") as r:
msgs = [r.read(int.from_bytes(r.read(8), byteorder='little')) for _ in range(3)]
return msgs
def read_plaintext(fname: str):
with open(fname, "rb") as r:
return r.read()
if __name__ == '__main__':
if len(sys.argv) != 5:
print("Usage:")
print("\tfcrypt --encrypt <receiver_public_key> <plaintext_file> <encrypted_file>")
print("\tfcrypt --decrypt <receiver_private_key> <encrypted_file> <decrypted_file>")
exit(1)
sender_public_key = RSA.import_key(read_plaintext("alice.crt"))
sender_private_key = RSA.import_key(read_plaintext("alice.key"))
receiver_key = RSA.import_key(read_plaintext(sys.argv[2]))
if sys.argv[1] == "--encrypt":
message = read_plaintext(sys.argv[3])
message_hash = SHA256.new(message)
encrypted_hash = pkcs1_15.new(sender_private_key).sign(message_hash)
to_zip = [encrypted_hash, message]
zipped = [zlib.compress(msg) for msg in to_zip]
session_key = get_random_bytes(32)
encrypted_zipped = [AES.new(session_key, AES.MODE_OPENPGP).encrypt(z) for z in zipped]
final_message = [PKCS1_v1_5.new(receiver_key).encrypt(session_key)] + encrypted_zipped
write_encrypted(final_message, sys.argv[4])
print("Successful encryption!")
elif sys.argv[1] == "--decrypt":
encrypted_session_key, encrypted_hash, ciphertext = tuple(read_encrypted(sys.argv[3]))
sentinel = get_random_bytes(16)
session_key = PKCS1_v1_5.new(receiver_key).decrypt(encrypted_session_key, sentinel)
if session_key == sentinel:
print("Failed to decrypt session key!")
exit(1)
zipped_plaintexts = []
for to_decrypt in [encrypted_hash, ciphertext]:
try:
zipped_plaintexts.append(AES.new(session_key, AES.MODE_OPENPGP, iv=to_decrypt[:18]).decrypt(to_decrypt[18:]))
except ValueError or KeyError:
print("Failed to decrypt!")
exit(1)
plaintext_hash, plaintext_message = tuple(zlib.decompress(plaintext) for plaintext in zipped_plaintexts)
try:
pkcs1_15.new(sender_public_key).verify(SHA256.new(plaintext_message), plaintext_hash)
except ValueError:
print("Failed to authenticate!")
exit(1)
write_plaintext(plaintext_message, sys.argv[4])
print("Successful decryption!")
| true |
fa0cdaf5f8fc14cb3c633bf6736ad073391caa8f | Python | Aasthaengg/IBMdataset | /Python_codes/p03090/s085619297.py | UTF-8 | 861 | 3.3125 | 3 | [] | no_license | import sys
def input():
return sys.stdin.readline().strip()
sys.setrecursionlimit(20000000)
def main():
N = int(input())
answer = []
if N % 2 == 1:
for i in range(1, N):
answer.append((N, i))
for j in range(1, N):
for k in range(j + 1, N):
if j + k == N:
continue
else:
answer.append((j, k))
else:
for i in range(2, N):
answer.append((N, i))
answer.append((1, i))
for j in range(2, N):
for k in range(j + 1, N):
if j + k == N + 1:
continue
else:
answer.append((j, k))
M = len(answer)
print(M)
for i in range(M):
print(*answer[i], sep=" ")
if __name__ == "__main__":
main()
| true |
63ed008bb3d24e4e9be0945cac649965e98c006f | Python | ooscar2/Oscar | /holamundo.py | UTF-8 | 340 | 3.84375 | 4 | [] | no_license | #Se utiliza el "def" para definir una función, a la cual se le colocara un nombre, en este caso "imprimir"
def imprimir():
#al momento de definirla se le agregara la accion que debe realizar en este caso imprimir un mensaje
print("HOLA MUNDOOOOOO, ya se usar python!!:)")
#se imprime el mensaje con la funcion definida
print (imprimir()) | true |
99d3fca1e9935bc7108674a0d534d6a1d2a17cbe | Python | Aasthaengg/IBMdataset | /Python_codes/p02790/s446309454.py | UTF-8 | 63 | 2.578125 | 3 | [] | no_license | f = list(map(int, input().split()))
print(str(min(f)) * max(f)) | true |
5dff86391d03cfe79073cbf44619acf4cb2a9596 | Python | wtbsw/AnomalyDetection | /docs/graph_laplacian/test.py | UTF-8 | 601 | 2.921875 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import numpy as np
from sklearn.utils.graph import graph_laplacian
def assign_undirected_weight(W, i, j, v):
W[i,j] = W[j,i] = v
n = 5
W = np.zeros((n,n))
assign_undirected_weight(W,0,1,0.08)
assign_undirected_weight(W,0,2,0.09)
assign_undirected_weight(W,1,2,0.45)
assign_undirected_weight(W,1,3,0.22)
assign_undirected_weight(W,1,4,0.24)
assign_undirected_weight(W,2,3,0.2)
assign_undirected_weight(W,2,4,0.19)
assign_undirected_weight(W,3,4,1)
adjacency = W;
print W
laplacian, dd = graph_laplacian(adjacency, normed=True, return_diag=True)
print laplacian
print dd
| true |
761ad083a5e5f80b958eaf96818964c869e18bf0 | Python | srinivasshingade/Custom-Object-detection-using-tensorflow | /Find_Phone/train_phone_finder.py | UTF-8 | 2,325 | 2.734375 | 3 | [] | no_license | import os
import cv2
import numpy as np
import tensorflow as tf
import sys
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# Import utilites
from utils import label_map_util
from utils import visualization_utils as vis_util
# Grab path to current working directory
CWD_PATH = os.getcwd()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def get_graph(image):
# Name of the directory containing the object detection module we're using
MODEL_NAME = 'find_mobile_graph'
# Path to frozen detection graph .pb file, which contains the model that is used
# for object detection.
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,'training','object-detection.pbtxt')
# Number of classes the object detector can identify
NUM_CLASSES = 1
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
image_expanded = np.expand_dims(image, axis=0)
boxes = sess.run(
detection_boxes,
feed_dict={image_tensor: image_expanded})
return boxes
"""def main():
IMAGE_NAME = 'image13.jpg'
# Path to image
PATH_TO_IMAGE = os.path.join(CWD_PATH,'test_images',IMAGE_NAME)
image = cv2.imread(PATH_TO_IMAGE)
box = get_graph(image)
# Perform the actual detection by running the model with the image as input
print(box[0][0])
main()
"""
| true |
3fde1885933bb865a06754827be034bca5530ad2 | Python | prateek27/python-dev-march-18 | /Lecture-7/crawler.py | UTF-8 | 711 | 3.25 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
def get_links(url):
r = requests.get(url)
soup = BeautifulSoup(r.content, "html5lib")
links = soup.findAll('a')
urls = [link['href'] for link in links if link.has_attr('href') and link['href'].startswith('http')]
return urls
def spider(url, limit=100):
pagesToVisit = [url]
pagesVisited = []
while len(pagesVisited) < limit and pagesToVisit != []:
url = pagesToVisit.pop(0)
pagesVisited.append(url)
print("Visiting " + url)
new_links = get_links(url)
for link in new_links:
if link not in pagesVisited and link not in pagesToVisit:
pagesToVisit.append(link)
if __name__ == "__main__":
spider("https://indianpythonista.wordpress.com") | true |
877d24102590ae40cf5bcb4057be30bacd4d4615 | Python | davidjamesbeck/slexil | /LineDataFrame.py | UTF-8 | 6,848 | 2.625 | 3 | [
"MIT",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-only",
"AGPL-3.0-or-later"
] | permissive | import pandas as pd
from xml.etree import ElementTree as etree
pd.set_option('display.max_columns', 500)
class DataFrame:
def __init__(self, doc, allElements):
'''doc = .eaf file; allElements = line element and its children'''
self.doc = doc
self.allElements = allElements
self.tbl = self.buildTable(doc,self.allElements)
def getTbl(self):
return self.tbl
# def getTimeSlotIDs(self, doc, tbl_elements):
# '''next step asks for row 0 of dataframe (speech), get value of TSRef1 (start time)'''
# startTimeSlotID = tbl_elements.iloc[0, tbl_elements.columns.values.tolist().index('TIME_SLOT_REF1')]
# endTimeSlotID = tbl_elements.iloc[0, tbl_elements.columns.values.tolist().index('TIME_SLOT_REF2')]
# return startTimeSlotID, endTimeSlotID
def getTimeSlotIDs(self, doc, tbl_elements):
'''next step asks for row 0 of dataframe (speech), get value of TSRef1 (start time)'''
if 'TIME_SLOT_REF1' in tbl_elements.columns:
startTimeSlotID = tbl_elements.iloc[0, tbl_elements.columns.values.tolist().index('TIME_SLOT_REF1')]
endTimeSlotID = tbl_elements.iloc[0, tbl_elements.columns.values.tolist().index('TIME_SLOT_REF2')]
else:
startTimeSlotID = False
parentRefID = tbl_elements.iloc[0, tbl_elements.columns.values.tolist().index('ANNOTATION_REF')]
while startTimeSlotID == False:
# print(parentRefID)
parentAnnotation = doc.find('TIER/ANNOTATION/ALIGNABLE_ANNOTATION[@ANNOTATION_ID="%s"]' %parentRefID)
if not parentAnnotation:
parentAnnotation = doc.find('TIER/ANNOTATION/REF_ANNOTATION[@ANNOTATION_ID="%s"]' % parentRefID)
if 'TIME_SLOT_REF1' in parentAnnotation.attrib:
startTimeSlotID = parentAnnotation.attrib['TIME_SLOT_REF1']
endTimeSlotID = parentAnnotation.attrib['TIME_SLOT_REF2']
newTSR1_column = [startTimeSlotID]
newTSR2_column = [endTimeSlotID]
for i in range(1,tbl_elements.shape[0]):
newTSR1_column.append("NaN")
newTSR2_column.append("NaN")
tbl_elements.insert(0,'TIME_SLOT_REF1',newTSR1_column)
tbl_elements.insert(0, 'TIME_SLOT_REF2', newTSR2_column)
# row = tbl_elements.loc[tbl_elements['TIER_ID'] == self.speechTier]
# print("speech tier row is %s" %row)
else:
try:
parentRefID = parentAnnotation.attrib[('ANNOTATION_REF')]
except KeyError:
'''this will happen if the speech tier is not time-aligned or the child
of a time-aligned tier; this will probably crash SLEXIL, but this is an inadmissible
file type anyway, we can figure out how to warn the user later'''
print('bailing')
startTimeSlotID = float('NaN')
endTimeSlotID = float('NaN')
return startTimeSlotID, endTimeSlotID
def buildTable(self, doc, lineElements):
#doc = .eaf file; lineElements = line element and its children
tbl_elements = pd.DataFrame(e.attrib for e in lineElements)
startTimeSlotID, endTimeSlotID = self.getTimeSlotIDs(doc, tbl_elements)
pattern = "TIME_ORDER/TIME_SLOT[@TIME_SLOT_ID='%s']" % startTimeSlotID
startTime = int(doc.find(pattern).attrib["TIME_VALUE"])
startTimes = [startTime]
rowCount = tbl_elements.shape[0]
'''next step fills in NaN for all the children of the time-aligned tier, but since that
messes us up with the getStart/End methods in IjalLine if the *speech tier* isn't aligned,
let's just give every row a copy of the start and end times'''
for i in range(1, rowCount):
# startTimes.append(float('NaN'))
startTimes.append(startTime)
'''repeat previous for end times'''
pattern = "TIME_ORDER/TIME_SLOT[@TIME_SLOT_ID='%s']" % endTimeSlotID
endTime = int(doc.find(pattern).attrib["TIME_VALUE"])
endTimes = [endTime]
for i in range(1, rowCount):
# endTimes.append(float('NaN'))
endTimes.append(endTime)
tbl_times = pd.DataFrame({"START": startTimes, "END": endTimes}) #dataframe of timecodes speech & children
# print(tbl_times)
ids = [e.attrib["ANNOTATION_ID"] for e in lineElements] #list of ids
tierInfo = []
text = []
for id in ids:
parentPattern = "*/*/*/[@ANNOTATION_ID='%s']/../.." % id
tierAttributes = doc.find(parentPattern).attrib
tierInfo.append(tierAttributes)
childPattern = "*/*/*/[@ANNOTATION_ID='%s']/ANNOTATION_VALUE" % id
elementText = doc.find(childPattern).text
if (elementText is None):
elementText = ""
# print("elementText: %s" % elementText)
text.append(elementText.strip())
tbl_tierInfo = pd.DataFrame(tierInfo) #a dataframe of the attributes of speech & children
tbl_text = pd.DataFrame({"TEXT": text}) #dataframe of text contents of speech & children
# print("---- tbl_elements")
# print(tbl_elements)
#
# print("---- tbl_tierInfo")
# print(tbl_tierInfo)
#
# print("---- tbl_times")
# print(tbl_times)
#
# print("---- tbl_text")
# print(tbl_text)
tbl = pd.concat([tbl_elements, tbl_tierInfo, tbl_times, tbl_text], axis=1)
preferredColumnOrder = ["ANNOTATION_ID", "LINGUISTIC_TYPE_REF", "START", "END", "TEXT", "ANNOTATION_REF",
"TIME_SLOT_REF1", "TIME_SLOT_REF2",
"PARENT_REF", "TIER_ID"]
try:
tbl = tbl[preferredColumnOrder]
except KeyError:
preferredColumnOrder = ["ANNOTATION_ID", "LINGUISTIC_TYPE_REF", "START", "END", "TEXT",
"TIME_SLOT_REF1", "TIME_SLOT_REF2", "TIER_ID"]
tbl = tbl[preferredColumnOrder]
textLengths = [len(t) for t in tbl["TEXT"].tolist()]
tbl["TEXT_LENGTH"] = textLengths
hasTabs = ["\t" in t for t in tbl["TEXT"].tolist()]
tbl["HAS_TABS"] = hasTabs
hasSpaces = [" " in t for t in tbl["TEXT"].tolist()]
tbl["HAS_SPACES"] = hasSpaces
# eliminate rows with no text
# leave it in for now, take the tiers at face value, handle empty lines in toHTML
tbl = tbl.query("TEXT != ''").reset_index(drop=True)
# print("---- tbl")
# print(tbl)
return (tbl)
| true |
638c162e138950833fb3d155612e511ee5066df9 | Python | RaishavHanspal/PythonBeginner | /cat_talk.py | UTF-8 | 331 | 3.265625 | 3 | [] | no_license | def greeting(variable):
print('{}{}'.format(' '*len(' /'),'_'*len(variable)))
print('{} {}'.format(' '*len(' '),'< '+ variable +' >'))
print('{}{}'.format(' '*len(' /'),'-'*len(variable)))
print(' /')
print(' /\_/\ /')
print('( o.o )')
print(' > ^ <')
print('')
| true |
a6cce496ea19f07ad7b0b9bb3cf7a3a56d18d116 | Python | eejd/course-content | /tutorials/W3D1_BayesianDecisions/solutions/W3D1_Tutorial2_Solution_797c061a.py | UTF-8 | 1,165 | 3.28125 | 3 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | permissive |
# 1. The minimium of the different loss functions correspond to the mean, median,
#. and mode of the posterior (just as in Interactive Demo 3). If we have a bi-modal
#. prior, those properties of the posterior can be distinct.
#. 2. The posterior is just another probability distribution, so all the properies we
#. saw in Interactive Demo 3, are true of the posterior two—-even though in this case,
#. the posterior inherited the non-symetric properties from the prior. So, in this
#. example, any prior that itself has a different mean, median and mode with also
#. produce differents across their equivilant Loss functions.
#. 3. As long as the posterior probability densities are symetric around the true mean
#. (hidden state), the MSE and ABS loss functions will look the same as for a Gaussian
#. prior. The mean and the median are the same for symetric distributions. (When the
#. mean exists--look up the Cauchy distributions.) The mode will be the same as the
#. mean and median, when the distribution is unimodal (and therefor when the mixture
#. means are the same. There can also be two modes with the mixture prior! | true |
ef335f7cb7123886289b2c036a8a9101dbe16e9c | Python | Bipul-Harsh/Code-Chef-Solutions | /SIMPSTAT.py | UTF-8 | 195 | 2.84375 | 3 | [] | no_license | # cook your dish here
for _ in range(int(input())):
n, k = map(int, input().split())
a = sorted(map(int, input().split()))
ans = a[k:n-k]
print('{:.6f}'.format(sum(ans)/len(ans))) | true |
266ef6b3bf1a9c8dc8c7bf1e055a65ddb8ced58b | Python | murrutia/ngwallpaper | /modules/Displays.py | UTF-8 | 2,258 | 2.765625 | 3 | [
"BSD-2-Clause"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import json
import Script
from DatabaseActions import DatabaseActions
class Displays(object):
def __init__(self):
self.db = DatabaseActions()
spaces_display_configuration = Script.load_json_file("~/Library/Preferences/com.apple.spaces.plist")
self.displays = spaces_display_configuration["SpacesDisplayConfiguration"]["Management Data"]["Monitors"]
self.filterout_virtual_display()
self.determine_main_display_uuid()
def filterout_virtual_display(self):
self.displays = filter(lambda m: "Current Space" in m, self.displays)
def determine_main_display_uuid(self, retry=False):
# In the plist we've loaded earlier, on a MBP the Main Display's UUID isn't listed
# so before we alter the wallpaper database, we retrieve it by getting them all
# and removing those we know. By elimination, the last one should be the main.
uuids = self.db.get_display_uuids()
main_display = None
for display in self.displays:
uuid = display["Display Identifier"]
if uuid == 'Main':
main_display = display
elif uuid in uuids:
uuids.remove(display["Display Identifier"])
if len(uuids) == 1 and main_display != None:
main_display["Display Identifier"] = uuids[0]
elif not retry:
self._try_refreshing_wallpaper_database()
self.determine_main_display_uuid(True)
else:
Script.print_error('''Error while determining main display uuid !
Try resetting manually a desktop backgoung and relaunching this command
''')
sys.exit(1)
def _try_refreshing_wallpaper_database(self):
self.db.sqlite('delete from displays where 1')
Script.shell('''osascript -e 'tell application "Finder" to set desktop picture to POSIX file "/Library/Desktop Pictures/Snow.jpg"' ''')
def __getitem__(self, item):
return self.displays[item]
def __len__(self):
return len(self.displays)
def displayCount(self):
return len(self.displays)
def spaceCount(self):
return sum(len(display['Spaces']) for display in self.displays)
| true |
7705d3f7374eba3641e710c6c67e7318726d9367 | Python | 2016102050016/n_gram_graph | /datasets/prepare_muv.py | UTF-8 | 3,285 | 2.546875 | 3 | [
"MIT"
] | permissive | from __future__ import print_function
import pandas as pd
from rdkit import Chem
from rdkit.Chem import AllChem, MolFromSmiles, MolFromMolBlock, MolToSmarts
from sklearn.model_selection import StratifiedKFold
from data_preprocess import *
import os
np.random.seed(123)
target_names = [
'MUV-466', 'MUV-548', 'MUV-600', 'MUV-644', 'MUV-652', 'MUV-689',
'MUV-692', 'MUV-712', 'MUV-713', 'MUV-733', 'MUV-737', 'MUV-810',
'MUV-832', 'MUV-846', 'MUV-852', 'MUV-858', 'MUV-859'
]
max_atom_num = 55
K = 5
def prepare_fingerprints_muv(dataset_name):
whole_data_pd = pd.read_csv('{}.csv.gz'.format(dataset_name))
for target_name in target_names:
print(target_name)
column = [target_name, 'mol_id', 'smiles']
data_pd = whole_data_pd.dropna(how='any', subset=column)[column]
data_pd = data_pd.rename(columns={"smiles": "SMILES", "mol_id": "Molecule"})
morgan_fps = []
valid_index = []
index_list = data_pd.index.tolist()
smiles_list = data_pd['SMILES'].tolist()
for idx, smiles in zip(index_list, smiles_list):
mol = Chem.MolFromSmiles(smiles)
if len(mol.GetAtoms()) > max_atom_num:
print('Outlier {} has {} atoms'.format(idx, mol.GetNumAtoms()))
continue
valid_index.append(idx)
fingerprints = AllChem.GetMorganFingerprintAsBitVect(mol, radius=2, nBits=1024)
morgan_fps.append(fingerprints.ToBitString())
data_pd = data_pd.ix[valid_index]
data_pd['Fingerprints'] = morgan_fps
data_pd = data_pd[['Molecule', 'SMILES', 'Fingerprints', target_name]]
y_label = data_pd[target_name].tolist()
y_label = np.array(y_label)
directory = '{}/{}'.format(dataset_name, target_name)
if not os.path.exists(directory):
os.makedirs(directory)
print('total shape\t', data_pd.shape)
skf = StratifiedKFold(n_splits=K, shuffle=True)
for i, (_, index) in enumerate(skf.split(y_label, y_label)):
temp_pd = data_pd.iloc[index]
print(i, '\t', temp_pd.shape)
temp_pd.to_csv('{}/{}.csv.gz'.format(directory, i), compression='gzip', index=None)
return
def get_hit_ratio():
for target_name in target_names:
directory = 'muv/{}'.format(target_name)
y_label = []
for i in range(4):
data_path = '{}/{}_graph.npz'.format(directory, i)
data = np.load(data_path)
y_label.extend(data['label_name'])
y_label = np.stack(y_label)
hit_ratio = 1.0 * sum(y_label) / len(y_label)
print('\'{}\': {},'.format(target_name, hit_ratio))
if __name__ == '__main__':
dataset_name = 'muv'
prepare_fingerprints_muv(dataset_name)
for target_name in target_names:
directory = '{}/{}'.format(dataset_name, target_name)
for i in range(K):
extract_graph(data_path='{}/{}.csv.gz'.format(directory, i),
out_file_path='{}/{}_graph.npz'.format(directory, i),
label_name=target_name,
max_atom_num=max_atom_num)
get_hit_ratio()
| true |
4d2af918325de8763a1ce0bc3e1d3df9956cd72f | Python | kermitt/challenges | /py/Tester.py | UTF-8 | 220 | 3.15625 | 3 | [] | no_license | def log(s):
print("|{}|".format(s))
def verdict(actual, expected):
didPass = "FAIL"
if actual == expected:
didPass = "PASS"
print("{} |{}| ---> |{}|".format(didPass, actual, expected))
| true |
764a03e20c62c3ad42c858f508c949383aa8f4b3 | Python | vakisan/Python-Summer-2020 | /Python Syntax/Exponents.py | UTF-8 | 185 | 3.984375 | 4 | [] | no_license | # Calculation of squares for:
# 6x6 quilt
print(6**2);
# 7x7 quilt
print(7**2);
# 8x8 quilt
print(8**2);
# How many squares for 6 people to have 6 quilts each that are 6x6?
print(6**4); | true |
a8abb8042b0982f5ef76092dc2d2fec24b8cb6ce | Python | webclinic017/TraderSoftwareRP | /EXE_RP/PROJECT.programmingTools/PROJECT.DailyControls/copy_invoices_from_finance.py | UTF-8 | 770 | 2.6875 | 3 | [] | no_license | ######### VARIABLE TO CHANGE ###############
############################################
FinanceDrive = "C:/Work/test/" ############# <----- Amend this path only
############################################
############################################
FinanceDrive = 'Y:\FINANCE_ALL\INVOICING\\2011_InvoicesINC\December'
print FinanceDrive
##### DATE SETTINGS #####
import datetime, shutil, os
path = os.getcwd() + '/'
today = datetime.date.today()
todayf = today.strftime('%Y%m%d')
todaystring = str(todayf)
print todaystring
##### COPY TO Y:\ DRIVE #####
filedate = todayf
monthtoday = todayf[0:6]
SfArea = 'Y:\EXE\DATA\INVOICES\\'
sfmonth = SfArea + 'Invoices from ' + todayf
shutil.copytree(FinanceDrive, sfmonth)
print 'WELL DONE'
| true |
3fc2c395f04cce3bad4254f7bce987ffa9301ee5 | Python | r-nulled/interview-questions | /numbers/combination_w_weights.py | UTF-8 | 1,993 | 3.859375 | 4 | [] | no_license | """
Given 4 separate lists of prices (A_1, A_2, A_3, A_4), each for a different product (eg hats, shoes, shirts, and pants), how many combinations (sets of clothing including 1 item from each product category) can one make
given a (budget) total price limitation of B?
example input:
hats: [1, 2, 3]
shoes: [2, 3, 4]
pants: [1,3, 5]
shirts: [1, 2]
[1,2][3,4][5,6][7,8]
[1,3][1,4][2,3][2,4],[5,7][5,8][6,7][6,8]
Simple solution:
Find and check every possible combination. Time complexity: O(klmn)*O(1)
Better solutions:
- Dynamic programming is a tempting tool to reach for, but the potentially large input size of B makes it very memory expensive.
- We want to avoid testing all klmn combinations.
- We can merge our four lists into two list of (hat, shoes) and (pants, shirts) combinations in O(kl) + O(mn) time.
- We map the prices of the two combined lists onto a hashmap of (price : number of combinations)
- For each price pair between the two lists satisfying the budgetary constraint, add the product to the total.
- Run time is now contingent not upon the length of the lists, but the prices of the items: O(max(price_1*price_2)*max(price_3*price*4))
"""
def inc_counts(counter, prices):
for price in prices:
counter[price] = counter.get(price, default = 0) + 1
def num_combinations(one,two,three,four,budget):
merge_one_two = [a + b for b in one for a in two]
merge_three_four = [c + d for c in three for d in four]
map_ot = {}
map_tf = {}
def inc_counts(counter, prices):
for price in prices:
counter[price] = counter.get(price, 0) + 1
inc_counts(map_ot, merge_one_two)
inc_counts(map_tf, merge_three_four)
total = 0
for price_a in sorted(map_ot.keys()):
for price_b in reversed(range(1, budget - price_a + 1)):
total += map_ot[price_a] * map_tf.get(price_b, 0)
return (total, map_ot, map_tf)
hats = [1, 2, 3]
shirts = [1, 2]
shoes = [2, 3, 4]
pants = [1,3, 5]
| true |
f75c32cca0de8f159dfadb2d3a8eb4bc15412b16 | Python | Teftelly/Cloude_service | /NeuroPlusPoop/Picture/OCR.py | UTF-8 | 1,409 | 2.6875 | 3 | [] | no_license | from PIL import Image
import pytesseract
import logging
import cv2
import os
import pathlib
class Picture_to_text():
def Get_text_from_picture(self, image):
logging.basicConfig(filename="Text_Graber.log", level=logging.INFO)
logging.debug("initializing...")
try:
logging.info("Opening image: {}".format(image))
image = cv2.imread(image)
logging.info("Cleare image from junk")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.medianBlur(gray, 3)
filename = "{}.png".format(os.getpid())
print(filename)
# print(str(pathlib.Path(__file__).parent.absolute()))
cv2.imwrite(filename, gray)
custom_config = r'-l rus --psm 6'
logging.info("Try to read russian text from picture")
text_from_file = pytesseract.image_to_string(Image.open(filename), config=custom_config)
except Exception as e:
print(e)
logging.info("Error! Check input file, it should be here: {}".format(image))
text_from_file = "Кажеться ты обосрался"
text_file_name = "Output.txt"
logging.info("Create output file with text")
text_file = open(text_file_name, "w")
text_file.write(text_from_file)
text_file.close()
return text_file_name | true |
21f5f6eda4c9f566a71fa6e0bf10ff275e2b5b3e | Python | RossMeikleham/DailyProgrammer | /Easy/Challenge227/Challenge227.py | UTF-8 | 3,145 | 3.96875 | 4 | [
"MIT"
] | permissive | import math
from decimal import Decimal
def getPoint(s, n) :
if n > (s**2) or n <= 0 or (s & 0x1 != 0x1):
print("Error expected n > 0 and n < s^2, and s to be odd")
else:
#Obtain co-ordinates for center of the spiral
center = (s + 1) / 2
#Next obtain the "level" that the given n is in,
#Low represents the lowest value, and high the highest
# e.g. level 0 is for n =1,
#level 1 is between 2 and 9, then level 2 is between 10 and 25 etc.
high = (math.ceil(math.sqrt(n)))**2
low = 0
#Highest must be a square number must be the next square number
#above or equal to n which is odd
if (high & 0x1 != 0x1):
high = (math.sqrt(high) + 1)**2
sqrtHigh = math.sqrt(high)
low = (sqrtHigh - 2)**2 + 1
sideLength = (high - low + 1)/4
offset = (sqrtHigh - 1)/2 # Offset from center where the perimiter is
# End Of Spiral
if (n == high):
return (center + offset, center + offset)
# Right side of spiral
elif (n <= low + sideLength - 1):
y = low - n + ((sqrtHigh - 3)/2)
return (center + offset, center + y)
# Top side of spiral (excluding top right positon)
elif (n <= low + (sideLength * 2) - 1):
x = low - n + sideLength + ((sqrtHigh - 3)/2)
return (center + x, center - offset)
# Left side of spiral (excluding top left positon)
elif (n <= low + (sideLength * 3) - 1):
y = low - n + (2 * sideLength) + ((sqrtHigh - 3)/2)
return (center - offset, center - y)
# Bottom side of spiral (excluding bottom left position)
else:
x = low - n + (3 * sideLength) + ((sqrtHigh - 3)/2)
return (center - x, center + offset)
def getN(s, x, y):
if (x < 1 or x > s) or (y < 1 or y > s) or (s & 0x1 != 0x1):
print("Error, expected 1 <= x,y <= s, and s to be odd")
else:
center = Decimal((s + 1)/2)
#Use Chebyshev distance to work out how many "levels" out the point is
level = Decimal(max(abs(x - center), abs(y - center)))
squareLevel = Decimal(2 * level + 1)
# Before Top Left Corner
if (x > y):
low = Decimal((squareLevel - 2)**2 + 1)
n = Decimal(low + squareLevel - 2 - ((x - center) + (y - center)))
return n
# After Top Left Corner
elif (y > x):
low = Decimal((squareLevel - 1)**2 + 2)
n = Decimal(low + squareLevel - 2 + ((x - center) + (y - center)))
return n
##On the main Diagonal
else:
if (y < center):
return (squareLevel - 1)**2 + 1
elif (y > center):
return squareLevel ** 2
else:
return 1
def points():
s = int(input())
n = int(input())
print(getPoint(s, n))
def n():
s = int(input())
x = int(input())
y = int(input())
print(getN(s, x, y))
n()
| true |
4929156dc836f9ea9e06decb44d15381620b7c67 | Python | jack8daniels2/bouncer | /bouncer/storage/__init__.py | UTF-8 | 2,194 | 2.734375 | 3 | [] | no_license | from abc import ABCMeta, abstractmethod, abstractproperty
from tornado import gen
class VerdictBase(object):
__metaclass__ = ABCMeta
@abstractmethod
def generate_query_parameters(self, domain, path_parameters):
'''
Method to generates a dict of database query parameters from
domain and path_parameters. This encapsulates the db schema.
'''
pass
@abstractmethod
def insert(self, domain, path, query_parameters, payload):
'''
Insert key into the cache
'''
pass
@abstractmethod
def parse_response(self, res):
'''
Map response to generic format that the caller expects. Another encapsulation
over the schema.
'''
pass
@abstractproperty
def is_whitelist(self):
'''
In case, we want to maintain separate DBs for whitelist too, this
proprerty will help differentiate them from blacklists.
'''
pass
@abstractproperty
def priority(self):
'''
In case, we need to aggregate multiple dbs, priority will help
break a tie
'''
pass
class VerdictDB(VerdictBase):
"""
VerdictDB.lookup is a coroutine and returns a Future to support asynchronous lookups
"""
__metaclass__ = ABCMeta
@abstractmethod
@gen.coroutine
def lookup(self, domain, path, query_parameters):
'''
Lookup domain and path_parameters *asynchronously* and return a Future
that either yields a verdict if one is available, or None.
Use generate_query_parameters to map the query arguments to db query
arguments.
'''
pass
class VerdictCache(VerdictBase):
"""
VerdictCache.lookup should be really fast since it a blocking call.
"""
__metaclass__ = ABCMeta
@abstractmethod
def lookup(self, domain, path, query_parameters):
'''
Lookup domain and path_parameters *asynchronously* and return a Future
that either yields a verdict if one is available, or None.
Use generate_query_parameters to map the query arguments to db query
arguments.
'''
pass
| true |
d9a88bada67a8e966b34c7ff3a89780d01e929e4 | Python | gglue/Super-Dodger-Pygame | /pySprites.py | UTF-8 | 34,281 | 3.515625 | 4 | [] | no_license | '''
Name: Victor Li
Date: 5/5/2017
Description:
'''
import pygame, random
pygame.mixer.init()
# Sound effect when the player uses its pickaxe
pickAxeSound = pygame.mixer.Sound("sound/pickAxeHit.wav")
pickAxeSound.set_volume(0.5)
# Sound effect when the player uses its mop
mopSound = pygame.mixer.Sound("sound/mopHit.wav")
mopSound.set_volume(0.5)
class Player(pygame.sprite.Sprite):
''' This class defines the sprite the player controls in the game.'''
def __init__(self, screen, yPosition):
'''This initalizer takes the screen surface as a paraemter, initalizes
the image and rect attributes and other variables used for the player'''
# Call the sprite __init__() method
pygame.sprite.Sprite.__init__(self)
# List of sprite images used by the class
self.__playerImage = ['images/playerStandingLeft.gif', 'images/playerStandingRight.gif', 'images/playerLeft.gif', 'images/playerRight.gif', 'images/playerMopRight.gif', 'images/playerMopRight2.gif'\
, 'images/playerMopLeft.gif', 'images/playerMopLeft2.gif', 'images/playerAxeRight.gif', 'images/playerAxeRight2.gif', 'images/playerAxeLeft.gif', 'images/playerAxeLeft2.gif', 'images/playerHit.gif']
# Set the image and rect attributes for the player
self.image = pygame.image.load(self.__playerImage[1])
self.rect = self.image.get_rect()
self.rect.center = (screen.get_width()/2, yPosition)
# Set instance variables
self.__screen = screen
self.__currentSpeed = 6
self.__savedSpeed = self.__currentSpeed
self.__velocity = 0
self.__currentPosition = self.rect.center
self.__standState = True
self.__jumpState = False
self.__axeState = False
self.__mopState = False
self.__invincibleState = False
self.__mineSprite = False
# These variables are returned to check if the player is currently using a mop, pickaxe or none
self.__animationMop = False
self.__animationAxe = False
# Posture determines whether the in-between animations should be played on the right/left side in
# the update() method.
# 0 = Idle/Walking Left, 1 = Mining/Mopping Left, 2 = Idle/Walking Right, 3 = Mining/Mopping Right
self.__posture = 0
# This allows me to count the time for switching sprite images
self.__startTime = pygame.time.get_ticks()
def go_left(self):
''' This method will make the player's sprite go left by the walking speed,
change the posture and the sprite image.'''
self.image = pygame.image.load(self.__playerImage[2])
self.rect.left -= self.__currentSpeed
self.__posture = 2
def go_right(self):
''' This method will make the player's sprite go right by the walking speed,
change the posture and the sprite image.'''
self.image = pygame.image.load(self.__playerImage[3])
self.rect.right += self.__currentSpeed
self.__posture = 0
def slow(self, slowMultipler):
''' This method will slow the player's speed multipled by the slowMultipler '''
self.__currentSpeed = self.__savedSpeed * slowMultipler
def useMop(self):
'''This method sets the mopState variable to True and changes the posture and
sprite image.'''
self.__mopState = True
if self.__posture == 0:
self.image = pygame.image.load(self.__playerImage[4])
self.__posture = 1
elif self.__posture == 2:
self.image = pygame.image.load(self.__playerImage[6])
self.__posture = 3
def useAxe(self):
'''This method sets the axeState variable to True and changes the posture and
sprite image.'''
self.__axeState = True
if self.__posture == 0:
self.image = pygame.image.load(self.__playerImage[8])
# Moved the sprite image a little, as the new sprite image is not the same size as the other sprites,
# There will be would be a noticable shift in the sprite image if not.
self.rect.right += 8
self.__posture = 1
elif self.__posture == 2:
self.image = pygame.image.load(self.__playerImage[10])
# Moved the sprite image a little, as the new sprite image is not the same size as the other sprites,
# There will be would be a noticable shift in the sprite image if not.
self.rect.right -= 8
self.__posture = 3
def jump(self):
''' This method will increase its velocity to 25, sets the jumpState to True
and sets the standState to False'''
self.__standState = False
self.__jumpState = True
self.__velocity = 30
def jumpFall(self, gravity):
''' This method makes the player jump/fall by subtracting the player's rect bottom
by the velocity and also decreases the velocity by the gravity parameter '''
self.rect.bottom -= self.__velocity
self.__velocity -= gravity
def getJump(self):
''' This method will return the jumpState variable '''
return self.__jumpState
def getStand(self):
''' This method will return the standState variable '''
return self.__standState
def getMop(self):
''' This method will return the mopState variable '''
return self.__animationMop
def getAxe(self):
''' This method will return the axeState variable '''
return self.__animationAxe
def setInvincible(self, stateBool):
''' This method will set the invincibleState to a bool based on the parameter '''
self.__invincibleState = stateBool
def getInvincible(self):
''' This method returns the invincibleState variable '''
return self.__invincibleState
def update(self):
''' This method will make the player stay in the screen boundaries, update the sprite image
for in-between animations while moving, and stop the player from falling.'''
self.__animationMop = False
self.__animationAxe = False
# If player reached left border, move the player back to the edge
if (self.rect.left <= 0):
self.rect.left = 0
# If player reached right border, move the player back to the edge
if (self.rect.right >= self.__screen.get_width()):
self.rect.right = self.__screen.get_width()
# If finished jumping, disable jumping and re-enable jumping, and set the velocity back to 0
if self.__velocity == -35 and self.__jumpState == True:
self.__velocity = 0
self.__jumpState = False
self.__standState = True
# If the player is currently invincible, update the sprite image with an invincibility image
if self.__invincibleState:
self.image = pygame.image.load(self.__playerImage[12])
# In-between sprite image switches for left and right walking/idle
if pygame.time.get_ticks() - self.__startTime >= 250 and (self.__posture == 0 or self.__posture == 2):
# If one of the many mining sprites were blit before walking, move the sprite image based on posture #,
# As there would be a noticable shift if not.
if self.__mineSprite:
if self.__posture == 0:
self.rect.right -= 13
else:
self.rect.right += 13
self.__mineSprite = False
# Update with right sided walking sprite images
if self.__posture == 0:
self.image = pygame.image.load(self.__playerImage[1])
# Update with left sided walking sprite images
else:
self.image = pygame.image.load(self.__playerImage[0])
# Reset the timer
self.__startTime = pygame.time.get_ticks()
# In-between sprite images for left and right mining/mopping
if pygame.time.get_ticks() - self.__startTime >= 500 and (self.__posture == 1 or self.__posture == 3):
# Update with right sided mining/mopping sprite images
if self.__posture == 1:
if self.__mopState:
self.image = pygame.image.load(self.__playerImage[5])
# Moves the sprite so it looks like the player moves during animation
self.rect.right += 5
self.__animationMop = True
mopSound.play()
elif self.__axeState:
self.image = pygame.image.load(self.__playerImage[9])
# Moves the sprite so it looks like the player moves during animation
self.rect.right += 10
self.__mineSprite = True
self.__animationAxe = True
pickAxeSound.play()
# Changes the posture to right side walking/idle
self.__posture = 0
# Update with left sided mining/mopping sprite images
if self.__posture == 3:
if self.__mopState:
self.image = pygame.image.load(self.__playerImage[7])
# Moves the sprite so it looks like the player moves during animation
self.rect.right -= 5
self.__animationMop = True
mopSound.play()
elif self.__axeState:
self.image = pygame.image.load(self.__playerImage[11])
# Moves the sprite so it looks like the player moves during animation
self.rect.right -= 10
self.__mineSprite = True
self.__animationAxe = True
pickAxeSound.play()
# Changes the posture to left side walking/idle
self.__posture = 2
# Set the axeState variable and mopState variable to False so there will be no iteration
self.__axeState = False
self.__mopState = False
# Reset the timer
self.__startTime = pygame.time.get_ticks()
# Saves the current position of the sprite
self.__currentPosition = self.rect.center
# Obtain the new/updated sprite image rect attributes
self.rect = self.image.get_rect()
# Uses saved position and moves the sprite to that location, as everytime you get_rect(),
# the sprite will always move back to (0,0)
self.rect.center = self.__currentPosition
class Ground(pygame.sprite.Sprite):
''' This class defines the ground sprite used for collision detecting and for the player to
stand on '''
def __init__(self, screen):
''' Initalizer 'takes the screen surface as parameters to set the rect attributes '''
# Call the sprite __init__() method
pygame.sprite.Sprite.__init__(self)
# Set image and rect attributes for the Ground
self.image = pygame.image.load('images/ground.gif')
self.rect = self.image.get_rect()
self.rect.bottom = screen.get_height()
self.__topGround = self.rect.top - 31
def returnTopGround(self):
''' This method returns the topGround variable '''
return self.__topGround
class LiveCounter(pygame.sprite.Sprite):
''' This class defines the live counter which keeps track of the lives remaining'''
def __init__(self):
''' Initalizer initalizes the instance variable used to count number of lives '''
# Call the sprite __init__() method
pygame.sprite.Sprite.__init__(self)
# List of sprite images used by the class
self.__livesImages = ['images/fiveHearts.gif', 'images/fourHearts.gif', 'images/threeHearts.gif', 'images/twoHearts.gif', 'images/oneHeart.gif', 'images/noHeart.gif']
# This instance variable counts which image the sprite should load
self.__animationCount = 0
# Set the sprite's image and sets the rect attributes
self.image = pygame.image.load(self.__livesImages[self.__animationCount])
self.rect = self.image.get_rect()
self.rect.top = 5
# Set lives instance variables
self.__lives = 5
def loseLife(self, live):
''' This method will remove a life from the player '''
for eachTime in range(live):
if self.__lives > 0:
self.__animationCount += 1
self.__lives -= 1
def gainLife(self):
''' This method will add a life to the player '''
if self.__lives < 5:
self.__animationCount -= 1
self.__lives += 1
def getLife(self):
''' This method will return the lives variable '''
return self.__lives
def update(self):
''' This method will update the live counter sprite image based on the remaining number of lives '''
if self.__animationCount >= 0 and self.__animationCount <= 5:
self.image = pygame.image.load(self.__livesImages[self.__animationCount])
class Meteor(pygame.sprite.Sprite):
''' This class defines the meteor sprite in which the player has to avoid colliding with '''
def __init__(self, rowNum, colNum):
''' Initalizer takes the rowNum and colNum parameters to set the position of it on the screen '''
# Call the sprite __init__() method
pygame.sprite.Sprite.__init__(self)
# List of sprite images used by the class
self.__meteorImages = ['images/meteorOne.gif', 'images/meteorTwo.gif', 'images/meteorThree.gif', 'images/meteorFour.gif', 'images/meteorFive.gif',\
'images/meteorSix.gif', 'images/meteorSeven.gif', 'images/meteorEight.gif', 'images/meteorNine.gif']
# This instance variable counts which image the sprite should load
self.__animationCount = 0
# Set the sprite's image and sets the rect attributes
self.image = pygame.image.load(self.__meteorImages[self.__animationCount])
self.rect = self.image.get_rect()
self.rect.left = ((colNum)*80)
# Set instance variables
self.__rowNum = rowNum + 1
self.setSpeed()
self.setRespawn(random.randrange(0,2))
self.reset()
def setSpeed(self):
''' This method sets the speed of the meteor '''
if self.__rowNum == 1:
self.__meteorSpeed = 5
elif self.__rowNum == 2:
self.__meteorSpeed = random.randrange(1,4)
def setRespawn(self, boolSpawn):
''' This method sets whether the meteor should respawn on the next impact '''
if boolSpawn:
self.__respawn = True
else:
self.__respawn = False
def reset(self):
''' This method resets the position of the meteor '''
self.rect.centery = -78
if self.__rowNum == 1:
if self.__respawn:
self.setSpeed()
else:
self.__meteorSpeed = 0
self.setRespawn(random.randrange(0,2))
elif self.__rowNum == 2:
self.setSpeed()
def update(self):
''' This method will update the live counter sprite image based on the remaining number of lives
and will update the y position based on the meteor speed'''
self.__animationCount += 1
self.image = pygame.image.load(self.__meteorImages[self.__animationCount])
if self.__animationCount == 8:
self.__animationCount = 0
self.rect.centery += self.__meteorSpeed
class Puddle(pygame.sprite.Sprite):
''' This class defines the puddle created when a stain collides with the ground '''
def __init__(self, screen, groundX):
''' Initalizer takes the screen surface as a parameter and the ground's x parameter
to set the rect attributes'''
# Call the sprite __init__() method
pygame.sprite.Sprite.__init__(self)
# Set the image for the sprite and rect attributes
self.image = pygame.image.load('images/puddle.gif')
self.rect = self.image.get_rect()
self.rect.centerx = groundX
self.rect.bottom= screen.get_height()
class Lakitu(pygame.sprite.Sprite):
''' This class defines the lakitu sprite that throws oil stains towards the player '''
def __init__(self, screen):
''' Initalizer takes the screen surface as a parameter to set rect attributes '''
# Call the sprite __init__() method
pygame.sprite.Sprite.__init__(self)
# List of sprite images used by the class
self.__lakituImages = ['images/lakitu.gif', 'images/lakituThrow1.gif', 'images/lakituThrow2.gif', 'images/lakituThrow3.gif', 'images/lakituThrow4.gif', 'images/lakituThrow5.gif', \
'images/lakituThrow6.gif', 'images/lakituThrow7.gif', 'images/lakituThrow8.gif', 'images/lakituThrow9.gif', 'images/lakituThrow10.gif', 'images/lakituThrow11.gif', \
'images/lakituThrow12.gif', 'images/lakituThrow13.gif', 'images/lakituThrow14.gif', 'images/lakituThrow15.gif', 'images/lakituThrow16.gif', 'images/lakituThrow17.gif']
# Set the sprite's image and sets the rect attributes
self.__animationCount = 0
self.image = pygame.image.load(self.__lakituImages[self.__animationCount])
self.rect = self.image.get_rect()
self.rect.top = 20
self.rect.left = 240
# Set instance variables
self.__screen = screen
self.__startTime = pygame.time.get_ticks()
self.__thrown = False
self.__speed = 5
def throw(self):
''' This method updates the sprite image to make it look like lakitu is throwing oil stains '''
if pygame.time.get_ticks() - self.__startTime >= 30:
self.__animationCount += 1
self.image = pygame.image.load(self.__lakituImages[self.__animationCount])
if self.__animationCount == 17:
self.__animationCount = 0
self.__thrown = False
self.__startTime = pygame.time.get_ticks()
def getAnimationFrame(self):
''' This method returns the animationCount variable '''
return self.__animationCount
def setThrown(self, boolThrow):
''' This method changes the boolThrow variable '''
self.__thrown = boolThrow
def getCoords(self):
''' This method returns the bottom left rect attribute of the lakitu '''
return self.rect.bottomleft
def update(self):
''' This method moves the sprite to the left by the speed variable '''
self.rect.left += self.__speed
if (self.rect.right >= self.__screen.get_width()) or (self.rect.left <= 0):
self.__speed = -(self.__speed)
if self.__thrown:
self.throw()
class Stain(pygame.sprite.Sprite):
''' This class defines the oil stain sprite that is thrown by the lakitu and creates a puddle
if collided with the ground or slows the palyer down if collided '''
def __init__(self, stainCoords):
''' Initalizer takes the stainCoords parameter to set the rect position of the stain '''
# Call the sprite __init__() method
pygame.sprite.Sprite.__init__(self)
# List of sprite images used by the class
self.__stainImages = ['images/stainOne.gif', 'images/stainTwo.gif','images/stainOne.gif', 'images/stainTwo.gif','images/stainOne.gif', 'images/stainTwo.gif',]
# Set the sprite's image and sets the rect attributes
self.__animationCount = 0
self.image = pygame.image.load(self.__stainImages[self.__animationCount])
self.rect = self.image.get_rect()
self.rect.bottomleft = stainCoords
# Set instance variables
self.__stainSpeed = 3
def getCoords(self):
''' This method returns the x coordinate rect attribute of the sprite '''
return self.rect.centerx
def update(self):
''' This method will update the y position of the sprite by its instance variable speed and update the sprite image '''
self.__animationCount += 1
self.image = pygame.image.load(self.__stainImages[self.__animationCount])
if self.__animationCount == 5:
self.__animationCount = 0
self.rect.top += self.__stainSpeed
class Bomb(pygame.sprite.Sprite):
''' This class defines a bomb that drops down from the top of the screen and spawns an explosion when collided with
either the player of the ground '''
def __init__(self, screen, bombInt):
''' Initalizer takes the screen surface parameters to set location of the scorekeeper and the bombInt to create
the correct bomb type'''
# Call the sprite __init__() method
pygame.sprite.Sprite.__init__(self)
self.__bombInt = bombInt
self.setBombType()
self.__screen = screen
self.__inAir = True
self.__safeSpace = False
# This allows me to count the time for switching sprite images
self.__startTime = pygame.time.get_ticks()
# Choose the bomb colour
if self.__bombType == 1:
self.pinkBomb()
else:
self.blackBomb()
self.__animationCount = 0
# Sets the image and the rect attributes
self.image = pygame.image.load(self.__bombImage[self.__animationCount])
self.rect = self.image.get_rect()
self.rect.centery = -100
self.rect.left = random.randrange(0,640)
self.__previousX = self.rect.centerx
# Set x and y direction of bomb
self.setSpeed()
def pinkBomb(self):
''' This method sets the images of the bomb to a pink colour '''
self.__bombImage = ['images/pinkBomb.gif', 'images/pinkBomb1.gif', 'images/pinkBomb2.gif', 'images/pinkBomb3.gif', 'images/pinkBomb4.gif']
def blackBomb(self):
''' This method sets the images of the bomb to a black colour '''
self.__bombImage = ['images/blackBomb.gif', 'images/blackBomb1.gif', 'images/blackBomb2.gif', 'images/blackBomb3.gif', 'images/blackBomb4.gif']
def setBombType(self):
''' This method sets the bomb type '''
if self.__bombInt == 0:
self.__bombType = 1
else:
self.__bombType = 0
def reset(self):
self.__safeSpace = False
self.__bombInt = random.randrange(0,5)
self.setBombType()
if self.__bombType == 1:
self.pinkBomb()
else:
self.blackBomb()
self.__animationCount = 0
self.image = pygame.image.load(self.__bombImage[self.__animationCount])
self.__inAir = True
self.rect.bottom = 0
self.rect.left = random.randrange(0,640)
self.setSpeed()
def setSpeed(self):
if self.__bombType == 0:
self.__dx = 0
self.__dy = random.randrange(5, 11)
elif self.__bombType == 1:
self.__dx = random.randrange(-5,6)
self.__dy = random.randrange(5, 11)
def getCoords(self):
''' This method returns the x coordinate rect attribute of the sprite '''
return self.rect.centerx
def safeSpace(self):
''' This method moves the bomb to the top of tbe screen'''
self.rect.centery = -100
self.__safeSpace = True
def getSafeSpace(self):
''' This method returns the safeSpace variable '''
return self.__safeSpace
def getAnimationFrame(self):
''' This method returns the animationCount variable '''
return self.__animationCount
def getBombType(self):
''' This method returns the bombType variable '''
return self.__bombType
def land(self, groundY):
self.__dx = 0
self.__dy = 0
self.__inAir = False
if pygame.time.get_ticks() - self.__startTime >= 1500:
self.__animationCount += 1
self.__previousX = self.rect.centerx
self.image = pygame.image.load(self.__bombImage[self.__animationCount])
self.__startTime = pygame.time.get_ticks()
self.rect = self.image.get_rect()
self.rect.bottom = groundY + 33
self.rect.centerx = self.__previousX
def update(self):
# Move the bomb by the direction speed
self.rect.centery += self.__dy
self.rect.centerx += self.__dx
# If bomb reached left border, move the bomb back to the edge
if (self.rect.left <= 0):
self.rect.left = 0
# If bomb reached right border, move the bomb back to the edge
if (self.rect.right >= self.__screen.get_width()):
self.rect.right = self.__screen.get_width()
# If the direction speed is 0 and is still in the air, randomize the speed once more
if self.__inAir and self.__bombType == 1:
if self.__dx == 0:
self.__dx = random.randrange(-10, 11)
elif self.__dy == 0:
self.__dy = random.randrange(1, 11)
class Explosion(pygame.sprite.Sprite):
''' This class defines an explosion that is created when a bomb explodes '''
def __init__(self, screen, groundX, bombType):
''' Initalizer takes the screen surface as a parameter and the ground's x parameter
to set the rect attributes'''
# Call the sprite __init__() method
pygame.sprite.Sprite.__init__(self)
# Set the image for the sprite and rect attributes
if bombType == 0:
self.__explosionImage = ['images/circleExplosion.gif', 'images/circleExplosion1.gif', 'images/circleExplosion2.gif', 'images/circleExplosion3.gif', 'images/circleExplosion4.gif', 'images/circleExplosion5.gif', 'images/circleExplosion6.gif',\
'images/circleExplosion7.gif', 'images/circleExplosion8.gif', 'images/circleExplosion9.gif', 'images/circleExplosion10.gif', 'images/circleExplosion11.gif', 'images/circleExplosion12.gif',\
'images/circleExplosion13.gif', 'images/circleExplosion14.gif']
else:
self.__explosionImage = ['images/lineExplosion.gif', 'images/lineExplosion1.gif', 'images/lineExplosion2.gif', 'images/lineExplosion3.gif', 'images/lineExplosion4.gif']
self.__bombType = bombType
self.__animationCount = 0
self.__startTime = pygame.time.get_ticks()
self.image = pygame.image.load(self.__explosionImage[self.__animationCount])
self.rect = self.image.get_rect()
if bombType == 0:
self.rect.centerx = groundX
else:
self.rect.centerx = 320
self.rect.bottom= screen.get_height() - 30
def update(self):
if self.__animationCount == 14 and self.__bombType == 0:
self.kill()
elif self.__animationCount == 4 and self.__bombType == 1:
self.kill()
if pygame.time.get_ticks() - self.__startTime >= 50:
self.__animationCount += 1
self.image = pygame.image.load(self.__explosionImage[self.__animationCount])
self.__startTime = pygame.time.get_ticks()
class ScoreKeeper(pygame.sprite.Sprite):
''' This class defines the scoreboard in where you keep track of the current score '''
def __init__(self, screen):
''' Initalizer takes the screen surface parameters to set location of the scorekeeper '''
# Call the sprite __init__() method
pygame.sprite.Sprite.__init__(self)
# Sets the font used for the sprite
self.__font = pygame.font.Font('goodbyeDespair.ttf', 18)
# Set instance variables
self.__score = 0
self.__screen = screen
self.__message = ""
def addScore(self, amount):
''' This method increases the score by the parameter '''
self.__score += amount
def getScore(self):
''' This method returns the score instance variable '''
return self.__score
def update(self):
'''This method will be called automatically to display
the current score at the top of the game window.'''
# The text that contains the score that constantly update
self.__message = "Score: %.f" % (self.__score)
# Renders the score and sets the position of the scoreboard
self.image = self.__font.render(self.__message, 1, (255, 255, 255))
self.rect = self.image.get_rect()
self.rect.centery = 45
self.rect.left = 2
# Make the font smaller once it reaches 10000 score
if self.__score > 10000:
self.__font = pygame.font.Font('goodbyeDespair.ttf', 16)
class PowerUp(pygame.sprite.Sprite):
''' This class defines the power-up that gives the player a live if collided '''
def __init__(self, screen):
''' Initalizer takes the screen surface parameters to set the location for the power-up '''
# Call the sprite __init__() method
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load('images/powerUp.gif')
self.rect = self.image.get_rect()
self.rect.left = random.randrange(0,640)
# Set instance variables
self.__fallSpeed = 2
self.__landBool = False
self.__safeSpace = False
self.__startTime = pygame.time.get_ticks()
def land(self):
if not(self.__landBool):
self.__startTime = pygame.time.get_ticks()
self.__fallSpeed = 0
self.__landBool = True
def safeSpace(self):
self.__fallSpeed = 0
self.rect.centerx = -100
self.__safeSpace = True
def getSafeSpace(self):
return self.__safeSpace
def reset(self):
self.rect.left = random.randrange(0,640)
self.rect.centery = 0
self.__landBool = False
self.__safeSpace = False
self.__fallSpeed = 2
def update(self):
self.rect.centery += self.__fallSpeed
if pygame.time.get_ticks() - self.__startTime >= 3000 and self.__landBool:
self.safeSpace()
class Button(pygame.sprite.Sprite):
''' This class is a button used in the main menu '''
def __init__(self, screen, buttonType):
''' Intializer takes screen parameters to set position and buttonType to determine
which button image should be used '''
# Call the sprite __init__() method
pygame.sprite.Sprite.__init__(self)
self.__buttonImages = ['images/startButton.png', 'images/controlButton.png', 'images/quitButton.png', 'images/startButtonEnlarge.png', 'images/controlButtonEnlarge.png', 'images/quitButtonEnlarge.png']
self.image = pygame.image.load(self.__buttonImages[buttonType])
self.rect = self.image.get_rect()
# Create instance variables
self.__buttonType = buttonType
self.__enlargeBool = False
self.minimize()
def enlarge(self):
if self.__buttonType == 0:
self.image = pygame.image.load(self.__buttonImages[3])
self.rect.center = (295, 200)
elif self.__buttonType == 1:
self.image = pygame.image.load(self.__buttonImages[4])
self.rect.center = (295, 280)
elif self.__buttonType == 2:
self.image = pygame.image.load(self.__buttonImages[5])
self.rect.center = (295, 360)
def minimize(self):
if self.__buttonType == 0:
self.image = pygame.image.load(self.__buttonImages[0])
self.rect.center = (320, 200)
elif self.__buttonType == 1:
self.image = pygame.image.load(self.__buttonImages[1])
self.rect.center = (320, 280)
elif self.__buttonType == 2:
self.image = pygame.image.load(self.__buttonImages[2])
self.rect.center = (320, 360)
def setEnlarge(self, enlarge):
self.__enlargeBool = enlarge
def getRect(self):
return self.rect
def getButtonType(self):
return self.__buttonType
def update(self):
if self.__enlargeBool:
self.enlarge()
else:
self.minimize()
class ControlMenu(pygame.sprite.Sprite):
''' This class is a picture of the controls '''
def __init__(self):
# Call the sprite __init__() method
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load('images/controls.png')
self.rect = self.image.get_rect()
| true |
c8860457b5e16afde2b035aba761609bc0a77650 | Python | MuMuPatrick/4TB6 | /MachineLearning/take_training_pictures.py | UTF-8 | 806 | 2.796875 | 3 | [] | no_license | #in terminal do
#pip3 install opencv-python
#pip3 install
#then run the script
#press "space" to take a picture
#press "q" or "ctrl+z" to quit the program
import cv2
import imutils
import time
cam = cv2.VideoCapture(0)
#specify image resolution
cam.set(cv2.CAP_PROP_FRAME_WIDTH, 352)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 288)
img_counter = 0
while True:
ret, frame = cam.read()
if not ret:
print("failed to grab frame")
break
cv2.imshow("camera", frame)
if(cv2.waitKey(1)%256 == 32):
# SPACE pressed
img_name = "opencv_frame_{}.jpg".format(img_counter)
cv2.imwrite(img_name, frame)
print("{} written!".format(img_name))
img_counter += 1
elif(cv2.waitKey(1)%256 == ord("q")):
break
cam.release()
cv2.destroyAllWindows()
| true |
c2691f821d70a5d53ccc4e673503102f4f21084f | Python | JHaller27/star_trader | /ts/pyCommodities/commodityCsv2Json.py | UTF-8 | 2,070 | 3.0625 | 3 | [] | no_license | import csv
import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('csv_path', type=str, help='Path to CSV to read from')
parser.add_argument('json_path', type=str, help='Path to JSON to write to')
parser.add_argument('--no-vice', '-v', dest='novice', action='store_true', help='Set this flag to exclude vice')
args = parser.parse_args()
commodities = []
def convert_row(line) -> list:
row = []
for el in line:
if el == '':
row.append(None)
else:
try:
row.append(float(el))
except ValueError:
row.append(el)
return row
def buySellLines(itr) -> (str, list, list):
'''
Return (name, buy, sell)
'''
while True:
try:
buy = convert_row(next(itr))
sell = convert_row(next(itr))
name = buy[0]
yield name, buy, sell
except StopIteration:
return
with open(args.csv_path, newline='') as fin:
reader = csv.reader(fin)
i = iter(reader)
raw_locs = next(i)
for loc in raw_locs[1:]:
commodities.append({
'location': ['Stanton'] + loc.split(' > '),
'commodities': []
})
for name, buy_prices, sell_prices in buySellLines(i):
if args.novice and 'vice' in name.lower():
continue
# Swap buy to selling and sell to buying due to semantics
# CSV assumes perspective of player (e.g. 'player can buy this')
# JSON assumes perspective of port (e.g. 'port is selling this')
for pidx in range(len(buy_prices) - 1):
buy = buy_prices[pidx+1]
sell = sell_prices[pidx+1]
if buy is not None or sell is not None:
commodities[pidx]['commodities'].append({
'name': name,
'buying': sell,
'selling': buy
})
json_text = json.JSONEncoder(indent=4).encode(commodities)
with open(args.json_path, 'w') as fout:
fout.write(json_text)
| true |
5fc34133a06b1fc15fb65f04918e29fc706ff7ab | Python | V4p1d/Week3-Exercises | /Ex5.py | UTF-8 | 186 | 3.484375 | 3 | [] | no_license | # Write a program that, given an integer input, prints the prime numbers up to that integer.
# Hint, you might want to use two nested loops. Also, check how the modulo operation works.
| true |
7351c18a99723c59071410e75443502f9838bc5f | Python | TomekBarabasz/Game-AI | /python/tracer.py | UTF-8 | 6,292 | 2.8125 | 3 | [] | no_license | from subprocess import run
from itertools import count
def num2Suites(cards, separator=' '):
suits = '♥♠♣♦'
values=['9','10','W','D','K','A']
return separator.join([ values[x//10-9]+suits[x%10-1] for x in cards])
def move2str(move):
if move[0] == 'play':
return move[0] + ' ' + num2Suites(move[1])
elif move[0] == 'take':
return move[0] + ' ' + str(move[1])
elif move[0] == 'noop':
return move[0]
class GameLogger:
class DummyLogger:
def log(self,*args):pass
def logState(self, state):pass
@classmethod
def create(cls, filename):
return GameLogger(filename) if filename is not None else GameLogger.DummyLogger()
def __init__(self, filename):
self.fp = open(filename,'w')
def log(self, *args):
self.fp.write( *args )
def logState(self, state):
self.fp.write( 'state hash is {0}\n'.format(state.hash()))
self.fp.write( 'stack is {0}\n'.format(state.stack))
for hand,pi in zip(state.hand, count(0)):
self.fp.write( 'p{0} hand is {1}\n'.format(pi,hand))
class Tracer:
def __init__(self):pass
def enabled(self):return False
def traceNode(self,node):pass
def traceEdge(self, from_, to_, label):pass
def trace(self, *txt):pass
def restart(self,idxToHighlight):pass
def close(self):pass
class ConsoleTracer(Tracer):
def __init__(self,lvl):
self.lvl = lvl
def enabled(self):return True
def traceNode(self, n):
print('player',n.state.current)
print('bestv=',n.bestValue)
print('stack=',n.state.stack)
print('p0 hand=',n.state.hand[0])
print('p1 hand=',n.state.hand[1])
if hasattr(n,'moves'):
print('valid moves=',n.moves)
def trace(self, *args):
print(args)
class FileTracer(Tracer):
def __init__(self,lvl,logfile):
self.lvl = lvl
print('opening trace file',logfile)
self.fn = open(logfile,'w')
def enabled(self):return True
def traceNode(self, n):
text = 'backtracking node {0} bv {1}'.format(n.name, n.bestValue)
text += ' bm {0}\n'.format(n.bestMove) if hasattr(n, 'bestMove') else '\n'
self.fn.write(text)
def trace(self, *args):
self.fn.write(' '.join(map(str,args)) + '\n')
def traceEdge(self, from_, to_, label):
self.fn.write('node {0} trying move {1}\n'.format(from_,label))
class GraphTracer(Tracer):
def __init__(self, path, name_, exe):
self.path = path
self.calcnum = 1
self.name = name_
self.exe = exe
self.nodedesc = GraphTracer.nodeDescHtml if exe is not None else GraphTracer.nodeDesc
def enabled(self):return True
def restart(self,idxToHighlight):
self.calcnum += 1
self.fn = fn = self.path + '\\' + '{0}-{1}.gv'.format(self.name, self.calcnum)
self.gf = open(fn,'w')
self.gf.write('digraph g {\n')
self.idxToHighlight = idxToHighlight
@staticmethod
def nodeName2Str(name):
return '.'.join(map(str,name))
@staticmethod
def nodeDescHtml(node,idxToHighlight):
if node.state.current == idxToHighlight:
pn = '<tr><td align="left" bgcolor="black"><font color="white">player {0}</font></td></tr>'.format(node.state.current)
else:
pn = '<tr><td align="left">player {0}</td></tr>'.format(node.state.current)
beg = '"node{0}" [color="red" penwidth=4.0 ' if node.name==[1] else '"node{0}" [ '
nd = beg.format(GraphTracer.nodeName2Str(node.name))
nd += 'shape="box" fontname="Courier New" label=<<table border="0" cellborder="0">' + pn
nd += '<tr><td align="left">best={0}</td></tr>'.format(node.bestValue)
nd += '<tr><td align="left">stack={0}</td></tr>'.format(node.state.stack)
nd += '<tr><td align="left">p0 hand={0}</td></tr>'.format(node.state.hand[0])
nd += '<tr><td align="left">p1 hand={0}</td></tr>'.format(node.state.hand[1])
if hasattr(node,'numMoves'):
nd +='<tr><td align="left">number of moves={0}</td></tr>'.format(node.numMoves)
if hasattr(node,'bestMove'):
nd +='<tr><td align="left">best move={0}</td></tr>'.format(node.bestMove)
if hasattr(node,'alpha'):
nd +='<tr><td align="left">alpha={0} beta={1}</td></tr>'.format(node.alpha, node.beta)
nd += '</table>>];\n'
return nd
@staticmethod
def nodeDesc(node,idxToHighlight):
return '"node{0}" [label="player{1}\\nbest={2}\\nstack={3}\\np0 hand={4}\\np1 hand={5}\\nbest move={6}"];\n'.format(
GraphTracer.nodeName2Str(node.name), node.state.current, node.bestValue,node.state.stack, node.state.hand[0], node.state.hand[1], node.bestMove if hasattr(node,'bestMove') else '')
def traceNode(self, node):
self.gf.write(self.nodedesc(node, self.idxToHighlight))
def traceEdge(self, from_, to_, label):
self.gf.write('"node{0}" -> "node{1}" [label="{2} {3}"]\n'.format(GraphTracer.nodeName2Str(from_), GraphTracer.nodeName2Str(to_), *label))
def close(self):
self.gf.write('}\n')
self.gf.close()
self.gf = None
if self.exe is not None:
svg = self.fn[ 0 : self.fn.rfind('.')] + '.svg'
run([self.exe,'-Tsvg', self.fn, '-o', svg,'-Goverlap=prism'])
class CompositeTracer(Tracer):
def __init__(self, tracers):
self.tracers = tracers
def enabled(self):return True
def traceNode(self,n):
for t in self.tracers: t.traceNode(n)
def traceEdge(self,f,to,l):
for t in self.tracers: t.traceEdge(f,to,l)
def trace(self,*txt):
for t in self.tracers: t.trace(*txt)
def restart(self,idxToHighlight):
for t in self.tracers: t.restart(idxToHighlight)
def close(self):
for t in self.tracers: t.close()
def createTracer(args,type,pIdx):
name = '{0}-p{1}'.format(type, pIdx)
Trace = args.get('trace',None)
if Trace is None: Trace=[]
trace = (0,None)
for t in Trace:
tt = t.split(' ')
pi = int(tt[0][1])-1
if pi == pIdx:
trace = ( int(tt[1]),tt[2] if len(tt)==3 else None )
break
Graph = args.get('graph',[])
if Graph is None: Graph=[]
#else: print("Graph is",Graph)
graph = None
for g in Graph:
gg = g.split(' ')
pi = int( gg[0][1] )-1
if pi == pIdx:
graph = gg[1]
graphExe = args.get('graphexe',None)
if trace[0] > 0:
logger = ConsoleTracer(trace[0]) if trace[1] is None else FileTracer(trace[0], trace[1]+'\\'+name+'.log')
if graph is not None:
grapher = GraphTracer(graph,name,graphExe)
if trace[0] > 0 and graph is not None:
tracer = CompositeTracer( [logger, grapher] )
elif trace[0] > 0:
tracer = logger
elif graph is not None:
tracer = grapher
else:
tracer = Tracer()
return tracer
def createLogger(filename):
return GameLogger.create(filename)
| true |
5926b6b8cd315292cafd904e637234f7f60ea4b1 | Python | tatiana-kim/contest-algorithms | /contest5/A.py | UTF-8 | 1,958 | 3.609375 | 4 | [] | no_license | # n = t-shorts
# m = pents
def find_min_diff_btwn_shortspents(shorts, pents):
s = p = 0 # s = shorts; p = pents
n, m = len(shorts), len(pents)
minshorts = minpents = 0
minval = 10 ** 7 + 1
while s < n and p < m:
if abs(shorts[s] - pents[p]) < minval:
minval = abs(shorts[s] - pents[p])
minshorts = shorts[s]
minpents = pents[p]
# if minval == 0:
# return minshorts, minpents
if shorts[s] > pents[p]:
p += 1
else:
s += 1
return minshorts, minpents
def tests(algo):
shorts = [3, 4]
pents = [1, 2, 3]
assert algo(shorts, pents) == (3, 3), "WA :("
shorts = [4, 5]
pents = [1, 2, 3]
assert algo(shorts, pents) == (4, 3), "WA :("
shorts = [1, 2, 3, 4, 5]
pents = [1, 2, 3, 4, 5]
assert algo(shorts, pents) == (1, 1), "WA :("
shorts = [1, 2, 3, 4, 100000]
pents = [100, 101, 102, 103, 104, 105, 106, 107, 108, 99949]
assert algo(shorts, pents) == (100000, 99949), "WA :("
def main():
n = int(input())
shorts = [int(i) for i in input().split()] # 3 4
m = int(input())
pents = list(map(int, input().split())) # 1 2 3
print(*find_min_diff_btwn_shortspents(shorts, pents))
if __name__ == "__main__":
main()
"""
About commented lines:
if minval == 0:
return minshorts, minpents
Эта проверка в худшем случае замедлит ход программы,
так как на каждом шаге цикла будет выполнятся проверка.
Но в среднем это может дать выгоду и единственный способ
узнать будет ли это полезно или нет - запустить на данных,
похожих на реальные. в учебных задачах худший случай обязательно
будет
"""
# to launch tests: tests(find_min_diff_btwn_shortspents)
| true |
66fb3d74954fff394a57e8a8008190f42f3d74f2 | Python | nekapoor7/Python-and-Django | /PythonNEW/Practice/StringFormattedText.py | UTF-8 | 493 | 3.90625 | 4 | [] | no_license | """Write a Python program to display formatted text (width=50) as output."""
import textwrap
sample_text ='''
Python is a widely used high-level, general-purpose, interpreted,
dynamic programming language. Its design philosophy emphasizes
code readability, and its syntax allows programmers to express
concepts in fewer lines of code than possible in languages such
as C++ or Java.
'''
text = textwrap.dedent(sample_text)
wrap = textwrap.fill(text,width=50)
print(wrap) | true |
bad6c82884f81b45aeb2082b1b5f00abbb0f50a4 | Python | SSaratKS/Python-Projects | /100 Python Exercises/exercise97.py | UTF-8 | 610 | 4.1875 | 4 | [] | no_license | #Question:
'''
Create a program that ask the user to submit text repeatedly. The program saves
the changes when User submits SAVE, but doesn't close the program. Program
saves the changes and closes when user sumbits CLOSE.
Hint: Like the previous exercise, but here you need more conditional lines.
'''
#Answer:
'''
file = open("user_data_save_close", "a+")
while True:
line = input("Write a value: ")
if line == "SAVE":
file.close()
file = open("user_data_save_close", "a+")
elif line == "CLOSE":
file.close()
break
else:
file.write(line + "\n")
'''
| true |
5f61bede1ee142a5aa30b959588188c566c8954e | Python | chenhaoenen/FCTest | /python/yield.py | UTF-8 | 639 | 3.171875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Author: chenhao
# Date: 2020/7/4
# Description:
#-------------------------------------------------------------------------------
def foo():
print("starting...")
while True:
res = yield 4
print("res:",res)
g = foo()
print(next(g))
print("*"*20)
print(next(g))
print('-'*20)
print(g.send(7))
def func():
num = 0
while True:
print('num=', num)
yield num
num += 1
g = func()
print(next(g))
print('-'*10)
print(next(g))
import time
for i in g:
time.sleep(2)
print(i) | true |
bd958cfd2d4e662e5533256f73cd1db4ab24c9f7 | Python | farnswj1/ChromeDinosaurGameNEAT | /main.py | UTF-8 | 1,248 | 3.390625 | 3 | [
"MIT"
] | permissive | '''
Justin Farnsworth
Google Chrome Dinosaur Game (with NEAT)
November 12, 2020
This is a Python-based implementation of the dinosaur game featured on
Google Chrome. The user can choose to play the game manually or the user
can allow the NEAT algorithm to play the game. If the NEAT algorithm is
used, the AI will try to survive as long as possible. This project was
inspired by Code Bullet, who conceived the idea and made an implementation
in Processing.
To play the game manually, type in the following command:
python main.py
To run the game using NEAT, add the argument 'neat' to the command. For example:
python main.py neat
To enable night mode, add the argument 'night' to the command. For example:
python main.py night
To enable both NEAT and night mode, add both arguments to the command.
The order of these arguments do not matter.
'''
# Imported modules
import chrome_dinosaur_game_neat
from sys import argv
if __name__ == "__main__":
# Check if the user has enabled or disabled the NEAT implementation
enable_neat = ("neat" in argv)
# Check if the user has enabled night mode
night_mode = ("night" in argv)
# Run the game
chrome_dinosaur_game_neat.run(enable_neat=enable_neat, night_mode=night_mode)
| true |
568f44850730ebfbf2a7c368fc514e34656ba1ad | Python | LorinChen/lagom | /lagom/experiment/config.py | UTF-8 | 4,725 | 3.359375 | 3 | [
"MIT"
] | permissive | from itertools import product
class Grid(list):
r"""A grid search over a list of values. """
def __init__(self, values):
super().__init__(values)
class Sample(object):
def __init__(self, f):
self.f = f
def __call__(self):
return self.f()
class Condition(object):
def __init__(self, f):
assert callable(f)
self.f = f
def __call__(self, config):
return self.f(config)
class Config(object):
r"""Defines a set of configurations for the experiment.
The configuration includes the following possible items:
* Hyperparameters: learning rate, batch size etc.
* Experiment settings: training iterations, logging directory, environment name etc.
All items are stored in a dictionary. It is a good practice to semantically name each item
e.g. `network.lr` indicates the learning rate of the neural network.
For hyperparameter search, we support both grid search (:class:`Grid`) and random search (:class:`Sample`).
Call :meth:`make_configs` to generate a list of all configurations, each is assigned
with a unique ID.
note::
For random search over small positive float e.g. learning rate, it is recommended to
use log-uniform distribution, i.e.
.. math::
\text{logU}(a, b) \sim \exp(U(\log(a), \log(b)))
An example: `np.exp(np.random.uniform(low=np.log(low), high=np.log(high)))`
Because direct uniform sampling is very `numerically unstable`_.
.. warning::
The random seeds should not be set here. Instead, it should be handled by
:class:`BaseExperimentMaster` and :class:`BaseExperimentWorker`.
Example::
>>> config = Config({'log.dir': 'some path', 'network.lr': Grid([1e-3, 5e-3]), 'env.id': Grid(['CartPole-v1', 'Ant-v2'])}, num_sample=1, keep_dict_order=False)
>>> import pandas as pd
>>> print(pd.DataFrame(config.make_configs()))
ID env.id log.dir network.lr
0 0 CartPole-v1 some path 0.001
1 1 Ant-v2 some path 0.001
2 2 CartPole-v1 some path 0.005
3 3 Ant-v2 some path 0.005
Args:
items (dict): a dictionary of all configuration items.
num_sample (int): number of samples for random configuration items.
If grid search is also provided, then the grid will be repeated :attr:`num_sample`
of times.
keep_dict_order (bool): if ``True``, then each generated configuration has the same
key ordering with :attr:`items`.
.. _numerically unstable:
http://cs231n.github.io/neural-networks-3/#hyper
"""
def __init__(self, items, num_sample=1, keep_dict_order=False):
assert isinstance(items, dict), f'dict type expected, got {type(items)}'
self.items = items
self.num_sample = num_sample
self.keep_dict_order = keep_dict_order
def make_configs(self):
r"""Generate a list of all possible combinations of configurations, including
grid search and random search.
Returns:
list: a list of all possible configurations
"""
keys_fixed = []
keys_grid = []
keys_sample = []
for key in self.items.keys():
x = self.items[key]
if isinstance(x, Grid):
keys_grid.append(key)
elif isinstance(x, Sample):
keys_sample.append(key)
else:
keys_fixed.append(key)
if len(keys_sample) == 0: # if no random search defined, set num_sample=1 to avoid repetition
self.num_sample = 1
product_grid = list(product(*[self.items[key] for key in keys_grid])) # len >= 1, [()]
list_config = []
for n in range(len(product_grid)*self.num_sample):
x = {'ID': n}
x = {**x, **{key: self.items[key] for key in keys_fixed}}
for idx, key in enumerate(keys_grid):
x[key] = product_grid[n % len(product_grid)][idx]
for key in keys_sample:
x[key] = self.items[key]()
if self.keep_dict_order:
x = {**{'ID': x['ID']}, **{key: x[key] for key in self.items.keys()}}
for key, value in x.items():
if isinstance(value, Condition):
x[key] = value(x)
list_config.append(x)
return list_config
| true |
15cac5bfa8a6e91b81ca1cbb84ace664701b090a | Python | daviz888/python_works | /city_country.py | UTF-8 | 207 | 3.984375 | 4 | [] | no_license | import screen
screen.clear()
# Prints city and country using function
# Exercise 8.6
def city_country(city, country):
print(f'{city.title()}, {country.title()}')
city_country('manila', 'philippines') | true |
d23926f9f6623fdddf41a4c547f74e6a458d3963 | Python | robertcalvertphd/Atomic_NN | /KerasExample.py | UTF-8 | 2,882 | 2.9375 | 3 | [] | no_license |
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
import random
class ExamplePlayer():
def __init__(self):
self.qbSkill = random.randint(1, 10)
self.qbSkill += random.randint(1, 10)
self.wrSkill = random.randint(1,10)
self.week1 = self.getStats()[0]
self.week2Score = self.getStats()[1]
def getStats(self):
drives = random.randint(5,10)
atts = 0
cmpl = 0
yards = 0
ints = 0
tds = 0
for d in range(drives):
for i in range(5):
if random.randint(1,self.qbSkill+2) > 1:
atts += random.randint(1,4)
if random.randint(1,self.qbSkill+2) > 2 and random.randint(1,self.wrSkill+2)>2:
yards += random.randint(1,10)
if random.randint(1,10) == 10 and self.qbSkill > 5:
yards += 40
tds += 1
i = 8
cmpl += 1
else:
if random.randint(1,self.qbSkill+1) == 1 and random.randint(1,4) == 1:
ints += 1
else:
if i ==7 and random.randint(1, self.qbSkill) > 2 and random.randint(1, self.wrSkill) > 2:
tds += 1
score = yards/25 + tds*6 - ints*2
if score > 20: score = 1
else: score = 0
return (atts, cmpl, yards, ints, tds, 1), score
class TestObject:
def __init__(self):
self.weight = random.randint(25,40) + 10
self.height = random.randint(1,7) + 52
self.male = random.randint(0,1)
if self.male:
increasedHeight = random.randint(5,15)
increasedWeight = int(increasedHeight * 1.8)
self.weight += increasedWeight
self.height += increasedHeight
def getStats(self):
return [self.weight, self.height], self.male
def createDataSet():
samples = []
labels = []
for i in range(1000):
t = TestObject()
s = t.getStats()
samples.append(s[0])
labels.append(s[1])
return samples, labels
def createPlayerDataSet():
samples = []
labels = []
positive = 0
for i in range(10000):
p = ExamplePlayer()
samples.append(p.week1)
labels.append(p.week2Score)
positive += p.week2Score
print(positive / 100)
return samples, labels
model = Sequential([
Dense(6, input_shape=(6,), activation='relu'),
Dense(4, activation="relu"),
Dense(2, activation="softmax")
])
model.compile(Adam(lr=.01), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
d = createPlayerDataSet()
samples = [d[0]]
labels = d[1]
model.fit(samples, labels, batch_size=50, epochs=25, shuffle=True, verbose=2)
| true |
31788a0f65fe702c01bc4bd6e77c0907942b9cc2 | Python | evaneill/vae | /VAE/models/loss.py | UTF-8 | 5,175 | 2.59375 | 3 | [] | no_license |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.distributions.multinomial import Multinomial
import numpy as np
from torch import Tensor as T
import torch
import math
def VRBound(alpha,model,q_samples,q_mu, q_log_sigma,K=None,optimize_on='full_bound'):
""" Monte-carlo estimate of variational renyi bound
Args:
alpha (float): alpha of renyi alpha-divergence
model (VRalphaNet): net from models.network
q_samples (list): list of the output latent samples from training, with the (sampled) data as the first element.
(i.e. should be the result of model.forward(data))
q_mu (list): output mu of network forward() method
q_log_sigma (list): resulting log_sigma output list of network forward() method
K (int): # of importance samples. If None, use model approximation choice.
optimize_on (str, optional): "full_bound": sum over all samples inside log
"sample": sample according to alpha importance weight
"max": specifically VR-max
"""
# alpha = torch.float(alpha)
if K is None:
K=model.encoder.K
# coerce to float
alpha = float(alpha)
prior_mu = Variable(torch.zeros_like(q_samples[-1]),requires_grad=False) # Prior is N(0,1) latent distribution in the ultimate encoder layer
prior_log_sigma = Variable(torch.zeros_like(q_samples[-1]),requires_grad=False) # To work with innard of the LL function just use log(sigma^2) instead of sigma
log_pq_ratio=gaussian_log_likelihood(q_samples[-1],(prior_mu,prior_log_sigma))
#log_pq_ratio=torch.zeros_like(q_samples[-1].sum(axis=1))
for current_sample, next_sample, qmu , qlog_sigma, p_layer in zip(q_samples,q_samples[1:],q_mu,q_log_sigma,model.decoder.layers[::-1]):
p_out = next_sample
for unit in p_layer:
p_out, pmu, plog_sigma = unit.forward(p_out)
if plog_sigma is not None:
# then this unit is a stochastic gaussian decoder layer. want LL p(h_i | h_(i+1)) - LL q(h_(i+1) | h(i))
log_pq_ratio+=gaussian_log_likelihood(current_sample,(pmu,plog_sigma)) - gaussian_log_likelihood(next_sample,(qmu,qlog_sigma))
elif pmu is not None and plog_sigma is None:
# then pmu is actually theta of a bernoulli distribution
log_pq_ratio+=bernoulli_log_likelihood(current_sample,pmu) - gaussian_log_likelihood(next_sample,(qmu,qlog_sigma))
# At this point log_pq_ratio is log(p(*)/q(*)) for each observation
if abs(alpha-1)<=1e-3:
# The optimize the ELBO! optimize_on doesn't matter in this case.
return torch.sum(log_pq_ratio)/K
elif optimize_on=='full_bound':
log_pq_ratio = log_pq_ratio.reshape([-1,K]) * (1-alpha)
log_pq_minus_max = log_pq_ratio - log_pq_ratio.max(axis=1,keepdim=True).values
log_pq_sum = torch.log(torch.sum(torch.exp(log_pq_minus_max),axis=1,keepdim=True)/K)+log_pq_ratio.max(axis=1,keepdim=True).values
return (1/(1-alpha))*torch.sum(log_pq_sum)
elif optimize_on=="sample":
log_pq_matrix = log_pq_ratio.reshape([-1, K]) * (1-alpha)
log_pq_minus_max = log_pq_matrix - log_pq_matrix.max(axis=1, keepdim=True).values
ws = torch.exp(log_pq_minus_max)
ws_normalized = ws / torch.sum(ws, axis=1, keepdim=True)
sample_dist = Multinomial(1,ws_normalized)
log_pq_matrix = log_pq_matrix.gather(1,sample_dist.sample().argmax(1,keepdim=True))
return (1/(1-alpha))*torch.sum(log_pq_matrix)
elif optimize_on=='max':
log_ws_matrix = log_pq_ratio.reshape([-1, K]) * (1-alpha)
log_ws_matrix = log_ws_matrix.max(axis=1).values
return (1/(1-alpha))*torch.sum(log_pq_matrix)
def gaussian_log_likelihood(sample,params):
"""Calculate likelihood of current sample given previous (for encoder likelihood, talking about q(h_i | h_(i-1)))
By switching sample position can just as easily be used for decoder (which would be p(h_(i-1) | h_i), since p operates forward in decreasing latent layers)
Args:
sample: that generated by the network (or is just input!) whose likelihood we want to evaluate
params (tuple): mu and log_sigma generated by network given previous stochastic layer sample output
Returns:
torch.Tensor: observation-length vector whose entries are Log Likelihood of sample given params
"""
(mu, log_sigma) = params
sigma = torch.exp(log_sigma)
output = -.5*sample.shape[1]*T.log(torch.tensor(2*np.pi)) -torch.sum(log_sigma,axis=1)- .5*torch.sum(torch.pow((sample-mu)/sigma,2),axis=1)
return output
def bernoulli_log_likelihood(sample,theta):
"""Calculate likelihood of current sample given previous (for encoder likelihood, talking about q(h_i | h_(i-1)))
By switching sample position can just as easily be used for decoder (which would be p(h_(i-1) | h_i), since p operates forward in decreasing latent layers)
Args:
sample: that generated by the network (or is just input!) whose likelihood we want to evaluate
theta (Tensor): output distribution-parametrizing
Returns:
torch.Tensor: observation-length vector whose entries are Log Likelihood of sample given params
"""
output = (1-sample)*torch.log(1-theta+1e-19) + sample*torch.log(theta+1e-19)
return torch.sum(output,axis=1)
| true |
d4fcd1a5c75aa42345a9f334d20f701200760f9d | Python | fpaissan/raman_spectrograms_analysis | /src/features/utils.py | UTF-8 | 468 | 2.625 | 3 | [] | no_license | # Written by Francesco Paissan
from progress.bar import ShadyBar
import pickle
import glob
def save_feat_files(feat, path):
with open(path, "wb") as f:
pickle.dump(feat, f)
def load_features(path):
file_list = glob.glob('{0}/*'.format(path))
with ShadyBar(f"Loading dataset...", max=len(file_list)) as bar:
for f in file_list:
with open(f, "rb") as f:
data_x = pickle.load(f)
bar.next()
return data_x | true |
f52cd5e61ebb425f8107c7447f76bb5371ddfcf0 | Python | communitysnowobs/validation | /validation/Elevation.py | UTF-8 | 2,736 | 3.0625 | 3 | [] | no_license | import pandas as pd
import requests
import validation.utils as ut
import validation.creds as creds
BASE_ELEVATION_URL = 'https://maps.googleapis.com/maps/api/elevation/json'
def el_data(points=[]):
"""Retrieves elevation data from Google Elevation API.
Keyword arguments:
points -- List of coordinates to retrieve elevation data at
"""
records = []
# Split into batches for API requests
for batch in ut.batches(points, 256):
params = {
'locations': "|".join([",".join([str(point[0]), str(point[1])]) for point in points]),
'key': creds.get_credential('google_key')
}
print(params)
response = requests.get(BASE_ELEVATION_URL, params=params)
data = response.json()
if 'results' not in data:
raise ValueError(data)
records.extend(data['results'])
parsed = [{ 'lat' : point[0], 'long' : point[1], **parse_elevation(record)} for point, record in zip(points, records)]
df = pd.DataFrame.from_records(parsed)
return df
def parse_elevation(record):
"""Parses record returned by Google Elevation API into standard format.
Keyword arguments:
record -- Segment of JSON returned by Google Elevation API
"""
return {
'elevation' : record['elevation']
}
def average_elevation(box, grid_size = 16):
"""Approximates elevation over a bounding box using a grid of points.
Keyword arguments:
box -- Dictionary representing box to retrieve elevation data over
grid_size -- Number of intervals used in each direction to approximate elevation
"""
# Restrict grid size to fit in API request
grid_size = min(grid_size, 16)
points = []
for lat in ut.intervals(box['ymin'], box['ymax'], grid_size):
for long in ut.intervals(box['xmin'], box['xmax'], grid_size):
points.append((lat, long))
params = {
'locations': "|".join([",".join(['%.4f' % point[0], '%.4f' % point[1]]) for point in points]),
'key': config.GOOGLE_API_KEY
}
print(params)
response = requests.get(BASE_ELEVATION_URL, params=params)
print(response.text)
data = response.json()
if 'results' not in data:
raise ValueError(data)
records = data['results']
elevations = [record['elevation'] for record in records]
print(sum(elevations) / len(elevations))
return sum(elevations) / len(elevations)
def merge_el_data(df):
"""Merges elevation data with snow depth observations data.
Keyword arguments:
df -- Dataframe of SNODAS data to add elevation data to
"""
points = list(zip(df['latitude'], df['longitude']))
elevations = el_data(points)
return pd.merge(df, elevations)
| true |
2c10d40dcf5ec98620e940cec63d72c255a4e1d3 | Python | haramrit09k/HappyEarth | /app.py | UTF-8 | 6,077 | 2.5625 | 3 | [] | no_license | import streamlit as st
from PIL import Image
from clf import predict2
import time
classes=['beer-bottle','book', 'can', 'cardboard', 'egg', 'flower', 'food-peels', 'fruit', 'jute', 'leaf', 'meat', 'newspaper', 'paper-plate', 'pizza-box', 'plant', 'plastic-bag', 'plastic-bottle', 'spoilt-food', 'steel-container', 'thermocol']
st.set_option('deprecation.showfileUploaderEncoding', False)
st.title("Dont Trash Me")
st.markdown("## Upload an image and I will tell you how you can reuse/recycle it (i.e. if it is recyclable :stuck_out_tongue:)\n<br>", unsafe_allow_html=True)
st.write("Currently only supports the following objects: \n")
st.write(classes)
file_up = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg", "webp"])
plastic_bottles = {
"name": "Plastic Bottle",
"reusable":True,
"use":"bird feeders, storage containers, stationary stands, etc.",
"cfoot":"82.8 grams CO2 (for one 500ml plastic bottle)"
}
plastic_bags = {
"name": "Plastic Bag",
"reusable":True,
"use":"storing objects while moving, substitute trash can liners.",
"cfoot":"33 grams CO2 (for one average plastic grocery bag)"
}
can = {
"name": "Soda Can",
"reusable":True,
"use":"To plant, to hold writing utensils",
"cfoot":"142 grams CO2 (for one 355 ml aluminum soda can)"
}
cardboard = {
"name": "Cardboard",
"reusable":True,
"use":"",
"cfoot":"3.31 tonnes CO2 (for one tonne of cardboard)"
}
pizza_box = {
"name": "Pizza Box",
"reusable":True,
"use":"",
"cfoot":"33 grams CO2 (for one average plastic grocery bag)"
}
steel_container = {
"name": "Steel Container",
"reusable":True,
"use":"",
"cfoot":"33 grams CO2 (for one average plastic grocery bag)"
}
beer_bottle = {
"name": "Beer Bottle",
"reusable":True,
"use":"",
"cfoot":"33 grams CO2 (for one average plastic grocery bag)"
}
book = {
"name": "Book",
"reusable":True,
"use":"Art crafts",
"cfoot":"2-3 kg CO2 equivalent (for one book)"
}
egg = {
"name": "Egg",
"reusable":False,
"use":"",
"cfoot":"Unknown"
}
flower = {
"name": "Flower",
"reusable":False,
"use":"",
"cfoot":"33 grams CO2 (for one average plastic grocery bag)"
}
food_peels = {
"name": "Food Peels",
"reusable":False,
"use":"",
"cfoot":"33 grams CO2 (for one average plastic grocery bag)"
}
fruit = {
"name": "Fruits",
"reusable":False,
"use":"",
"cfoot":"33 grams CO2 (for one average plastic grocery bag)"
}
jute = {
"name": "Jute",
"reusable":False,
"use":"",
"cfoot":"33 grams CO2 (for one average plastic grocery bag)"
}
leaf = {
"name": "Leaf",
"reusable":False,
"use":"",
"cfoot":"33 grams CO2 (for one average plastic grocery bag)"
}
meat = {
"name": "Meat",
"reusable":False,
"use":"",
"cfoot":"33 grams CO2 (for one average plastic grocery bag)"
}
newspaper = {
"name": "Newspaper",
"reusable":True,
"use":"Art crafts",
"cfoot":"33 grams CO2 (for one average plastic grocery bag)"
}
plants = {
"name": "Plants",
"reusable":False,
"use":"",
"cfoot":"33 grams CO2 (for one average plastic grocery bag)"
}
spoilt_food = {
"name": "Spoilt Food",
"reusable":False,
"use":"",
"cfoot":"33 grams CO2 (for one average plastic grocery bag)"
}
thermocol = {
"name": "Thermocol",
"reusable":True,
"use":"",
"cfoot":"33 grams CO2 (for one average plastic grocery bag)"
}
if file_up is not None:
image = Image.open(file_up)
st.image(image, caption='Uploaded Image.', use_column_width=True)
st.write("")
# st.write("Just a second...")
labels = predict2(file_up)
st.write(labels[0][1])
with st.spinner('Processing...'):
time.sleep(5)
# print out the top prediction label with score
if labels[0][1] < 70:
st.markdown("<div style='color: blue; font-size: 40px;'> I am not sure what this object is! </div>", unsafe_allow_html=True)
st.markdown("<div style='color: red; font-size: 20px;'> I am not smart enough... *cries in corner* </div>", unsafe_allow_html=True)
st.image('media/crying.gif')
else:
res = {}
if labels[0][0] == 'can':
res = can
elif labels[0][0] == 'plastic-bottle':
res = plastic_bottles
elif labels[0][0] == 'plastic-bag':
res = plastic_bags
elif labels[0][0] == 'beer-bottle':
res = beer_bottle
elif labels[0][0] == 'cardboard':
res = cardboard
elif labels[0][0] == 'book':
res = book
elif labels[0][0] == 'egg':
res = egg
elif labels[0][0] == 'flower':
res = flower
elif labels[0][0] == 'food-peels':
res = food_peels
elif labels[0][0] == 'fruit':
res = fruit
elif labels[0][0] == 'jute':
res = jute
elif labels[0][0] == 'leaf':
res = meat
elif labels[0][0] == 'newspaper':
res = newspaper
elif labels[0][0] == 'pizza-box':
res = pizza_box
elif labels[0][0] == 'plant':
res = plants
elif labels[0][0] == 'spoilt-food':
res = spoilt_food
elif labels[0][0] == 'steel-container':
res = steel_container
elif labels[0][0] == 'thermocol':
res = thermocol
st.write("This looks like...")
st.markdown("<div style='color: blue; font-size: 40px;'>"+res.get('name')+"</div>", unsafe_allow_html=True)
st.markdown("<div style='color: green; font-size: 20px;'> Reusable? : "+str(res.get('reusable'))+"</div>", unsafe_allow_html=True)
if res.get('reusable') == True:
st.markdown("<div style='color: green; font-size: 20px;'> How can I reuse it?: "+str(res.get('use'))+"</div>", unsafe_allow_html=True)
st.markdown("<div style='color: green; font-size: 20px;'> Carbon footprint: "+str(res.get('cfoot'))+"</div>", unsafe_allow_html=True)
| true |
cc1ff6f23f118545e18ebb999d01570117f146e7 | Python | lishuwen88/cython_stuff | /ksmith/ch05/python_particle.py | UTF-8 | 239 | 3.15625 | 3 | [] | no_license | class Particle(object):
"""Simple Particle type."""
def __init__(self, m, p, v):
self.mass = m
self.position = p
self.velocity = v
def get_momentum(self):
return self.mass * self.velocity
| true |
d825b37373c63f647f1c4a1397e37ee303b3197b | Python | jcartus/SCFInitialGuess | /butadien/scripts/aimd_runs.py | UTF-8 | 1,916 | 2.84375 | 3 | [
"MIT"
] | permissive | """This script will run md runs for all mol files in a folder.
Author:
Johannes Cartus, QCIEP, TU Graz
"""
from os import listdir
from os.path import join
from pyQChem import inputfile
from pyQChem.utilities import _readinput
from SCFInitialGuess.utilities.usermessages import Messenger as msg
from SCFInitialGuess.utilities.misc import cd
import multiprocessing as mp
def worker(rem, molecule, index):
msg.info("Starting Job: " + str(index))
try:
job = inputfile()
job.add(rem)
job.add(molecule)
job.run(name="Butadien_{0}.mol".format(index))
msg.info("Finished JOb: " + str(index))
except:
msg.warn("Job failed: " + str(index))
def fetch_rem(inp_file):
"""Read rem from a settings file or another input file"""
return _readinput(inp_file).rem
def fetch_molecules(folder):
"""Read all molecules from .mol files in a folder
(in pyqchem molecule format)
"""
# TODO put this funciton into the SCFInitialGuess.utilities.dataset.Molcule
# class!
files = [file for file in listdir(folder) if ".mol" in file]
return [_readinput(join(folder, f)).molecule for f in files]
def main(folder="/home/jcartus/Repos/SCFInitialGuess/butadien/data/MDRuns/", number_of_processes = 6):
msg.info("Fetching aimd run settings ...", 1)
rem = fetch_rem(join(folder, "MDRunSettings.inp"))
msg.info("Fetching qchem molecules ...", 1)
molecules = fetch_molecules(folder)
msg.info("Setting up parallelisation ...", 1)
pool = mp.Pool(processes=number_of_processes)
with cd(folder):
msg.info("Starting the calculations ...", 1)
for i, mol in enumerate(molecules):
pool.apply_async(worker, (rem, mol, i + 1))
pool.close()
pool.join()
msg.info("Closed worker pool.")
msg.info("Calculations Done", 1)
if __name__ == '__main__':
main() | true |
6291d4b1c9dce136988a2e9f08fb48ab781f49f2 | Python | beidou9313/deeptest | /第一期/杭州-咫尺/Day1/dictiionary/func_Dic.py | UTF-8 | 331 | 3.203125 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding:utf-8 -*-
__author__ = u'Heatherwyz'
if __name__ == "__main__":
dict = {u"DeepTest": u"开源优测", u"book": u"快学Python3"}
#len
print(len(dict))
#将字典转化为字符串
str_d = str(dict)
print(str_d)
print(dict)
#判断类型
print(type(dict))
print(type(str_d))
| true |
d53fbb3d279834cdc71351f834fc937e17f489b6 | Python | py503/win10_pycharm_project | /blog/views.py | UTF-8 | 6,603 | 2.796875 | 3 | [] | no_license | # 分页插件包
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from .models import Article, Category, Banner, Tag, Link
def hello(request):
return HttpResponse('欢迎使用Django!')
def index_test(request):
'''主页'''
# 对Article进行声时并实例化,然后生成对象allarticle
allarticle = Article.objects.all()
print(allarticle)
# 把查询到的对象,封装到一下文
context = {
'allarticle' : allarticle,
}
# 把上下文传到模板页面index.html里
return render(request, 'index_test.html', context)
# 首页
def index(request):
allcategory = Category.objects.all() # 通过Category表查出所有分类
banner = Banner.objects.filter(is_active=True)[0:4] # 查询所有幻灯图数据,并进行切片
tui = Article.objects.filter(tui__id=1)[:3] # 查询推荐位ID为1的文章
allarticle = Article.objects.all().order_by('-id')[0:10] # order_by('-id')为数据排序方式,[0:10]为只获取10索引切片,只获取最新的10篇文章
# hot = Article.objects.all().order_by('?')[:10]#随机推荐
# hot = Article.objects.filter(tui__id=3)[:10] #通过推荐进行查询,以推荐ID是3为例
hot = Article.objects.all().order_by('views')[:10] # 通过浏览数进行排序
remen = Article.objects.filter(tui__id=2)[:6] # 右侧热门推荐
tags = Tag.objects.all() # 右侧所有标签
link = Link.objects.all() # 尾部的友情链接
# 把查询出来的分类封装到上下文里
context = {
'allcategory' : allcategory,
'banner' : banner,
'tui' : tui,
'allarticle' : allarticle, # 最新文章
'hot' : hot,
'remen' : remen,
'tags' : tags,
'link' : link,
}
return render(request, 'index.html', context) #把上下文传到index.html页面
# 列表页
def list(request, lid):
list = Article.objects.filter(category_id=lid) # 获取通过URL传进来的lid,然后筛选出对应文章
cname = Category.objects.get(id=lid) # 获取当前文章的栏目名
remen = Article.objects.filter(tui__id=2)[:6] # 右侧的热门推荐
allcategory = Category.objects.all() # 导航所有分类
tags = Tag.objects.all() # 右侧所有文章标签
page = request.GET.get('page') # 在URL中获取当前页面数
paginator = Paginator(list, 5) # 对查询到的数据对象list进行分页,设置超过5条数据就分页
try:
list = paginator.page(page) # 获取当前页码的记录
except PageNotAnInteger:
list = paginator.page(1) # 如果用户输入的页码不是整数时,显示第1页的内容
except EmptyPage:
list = paginator.page(paginator.num_pages) # 如果用户输入的页数不在系统的页码列表中时,显示最后一页内容
return render(request, 'list.html', locals()) # locals()的作用是返回一个包含当前作用域里面的所有变量和它们的值的字典
# 内容
def show(request, sid):
'''
代码里Article.objexts.get(id=sid), 因为获取的是单个对象,所以用get方法,id=sid查询URL传过来的指定id文章
previous_blog和netx_blog是文章上一篇下一篇,我们通过发布文章时间来进行筛选文章的,比当肖文章发布的时间小就是上一
篇,比当前文章发布时间大就是下一篇
category=show.category.id,则是指定查询的文章为当前分类下的文章
文章的浏览数,我们先通过show.views查询到当前浏览数,然后对这个数进行加1操作,意思是每访问一次页面(视图函数),
就进行加1操作.然后通过show.save()进行保存.
:param request:
:param sid:
:return:
'''
show = Article.objects.get(id=sid) # 查询指定ID的文章
allcategory = Category.objects.all() # 导航上的分类
tags = Tag.objects.all() # 右侧所有标签
remen = Article.objects.filter(tui__id=2)[:6] # 右侧热门推荐
hot = Article.objects.all().order_by('?')[:10] # 内容下面的您可能感兴趣的文章,随机推荐
previous_blog = Article.objects.filter(created_time__gt=show.created_time, category=show.category.id).first()
netx_blog = Article.objects.filter(created_time__lt=show.created_time, category=show.category.id).last()
show.views = show.views + 1
show.save()
return render(request, 'show.html', locals()) # locals()的作用是返回一个包含当前作用域里面的所有变量和它们的值的字典
# 标签页
def tag(request, tag):
list = Article.objects.filter(tags__name=tag) # 获取通过URL传进来的tag,然后进行查询文章
tname = Tag.objects.get(name=tag) # 获取当前搜索的标签名
remen = Article.objects.filter(tui__id=2)[:6] # 右侧的热门推荐
allcategory = Category.objects.all() # 导航所有分类
tags = Tag.objects.all() # 右侧所有文章标签
page = request.GET.get('page') # 在URL中获取当前页面数
paginator = Paginator(list, 5) # 对查询到的数据对象list进行分页,设置超过5条数据就分页
try:
list = paginator.page(page) # 获取当前页码的记录
except PageNotAnInteger:
list = paginator.page(1) # 如果用户输入的页码不是整数时,显示第1页的内容
except EmptyPage:
list = paginator.page(paginator.num_pages) # 如果用户输入的页数不在系统的页码列表中时,显示最后一页内容
return render(request, 'list.html', locals()) # locals()的作用是返回一个包含当前作用域里面的所有变量和它们的值的字典
# 搜索页
def search(request):
ss = request.GET.get('search') # 获取搜索的关键词
list = Article.objects.filter(title__icontains=ss) # 获取到搜索关键词通过标题进行匹配
remen = Article.objects.filter(tui__id=2)[:6]
allcategory = Category.objects.all()
page = request.GET.get('page')
tags = Tag.objects.all()
paginator = Paginator(list, 10)
try:
list = paginator.page(page) # 获取当前页码的记录
except PageNotAnInteger:
list = paginator.page(1) # 如果用户输入的页码不是整数时,显示第1页的内容
except EmptyPage:
list = paginator.page(paginator.num_pages) # 如果用户输入的页数不在系统的页码列表中时,显示最后一页的内容
return render(request, 'search.html', locals())
# 关于我们
def about(request):
pass
| true |
1d880f24962104979be397dc45e6becdaf62307c | Python | agautam-git/HaikuJAM-spellChecker | /output.py | UTF-8 | 538 | 3.140625 | 3 | [] | no_license | import requests, argparse
from flask import Flask, request, jsonify
parser = argparse.ArgumentParser(description='Enter the word: ')
parser.add_argument('-WORD','--word', help='input word', required=True)
def main(args):
url = 'http://localhost:5000/spellCorrect/'
data = {'word': args.word}
output = requests.post(url, json=data)
out_data = output.json()
out_text = 'Word: '+str(out_data['word'])+' | Sub words: '+str(out_data['sub_words'])
print(out_text)
return out_text
if __name__ == '__main__':
main(parser.parse_args()) | true |
db5a7beaac205748d20bca165465b16ccc7b27b6 | Python | abhishekmishragithub/python2-basic-exercises | /reverse of a string.py | UTF-8 | 224 | 4.125 | 4 | [] | no_license | '''
To reverse a string without var[::-1]
'''
string = raw_input("Enter a value: ")
reversed_string = ''
for i in range(len(string)):
reversed_string += string[len(string) - i - 1]
print(reversed_string)
| true |
94a09b9653f0de7e1737bf146b14ecd6d16d732c | Python | yi-guo/coding-interview | /leetcode/python/150-evaluateReversePolishNotation.py | UTF-8 | 1,022 | 4.5625 | 5 | [] | no_license | #!/usr/bin/python
# Evaluate the value of an arithmetic expression in Reverse Polish Notation.
# Valid operators are +, -, *, /. Each operand may be an integer or another expression.
# Some examples:
# ["2", "1", "+", "3", "*"] -> ((2 + 1) * 3) -> 9
# ["4", "13", "5", "/", "+"] -> (4 + (13 / 5)) -> 6
def evalRPN(tokens):
stack = list()
for token in tokens:
# Token is an integer or negative integer
if token.isdigit() or token[1:].isdigit():
stack.append(int(token))
continue
if token == '+':
stack.append(stack.pop() + stack.pop())
elif token == '-':
n = stack.pop()
stack.append(stack.pop() - n)
elif token == '*':
stack.append(stack.pop() * stack.pop())
else:
n = stack.pop()
m = stack.pop()
stack.append(0 - abs(m) / abs(n) if m * n < 0 else m / n)
return stack.pop()
def main():
print evalRPN(["4","-2","/","2","-3","-","-"])
main()
| true |
2c375c40bdfc23679cccd96653ad254b13961d7c | Python | azedlee/fizz_buzz | /fizzbuzz_extra.py | UTF-8 | 328 | 4.1875 | 4 | [] | no_license | min_num = input("Please input min number: ")
max_num = input("Please input max number: ")
for i in range(min_num, max_num):
if i % 3 == 0 and i % 5 != 0:
print("Fizz")
elif i % 5 == 0 and i % 3 != 0:
print("Buzz")
elif i % 3 == 0 and i % 5 == 0:
print("FizzBuzz")
else:
print(i)
| true |
ab3b4fd1e5d3beeec449c6f237f53af66646574b | Python | Roelio69/Workflow-course-11 | /GeneFilter1.py | UTF-8 | 548 | 2.734375 | 3 | [] | no_license | # filters genes containing numeric information
class class1():
def filterLPs(self, userInput, userOutput):
bestand = open(userInput, "r")
output = open(userOutput+".txt","w")
next(bestand)
next(bestand)
for line in bestand:
splitline = line.split("\t")
if float(splitline[1]) > float(-1.0) and float(splitline[1]) < float(1.0):
splitline[2] = splitline[2].strip('\n')
output.write("%s\n" % splitline)
bestand.close()
output.close()
| true |
2d429702cae39266d87834aa08c96efc679e40be | Python | Mittenss2010/PythonPractice | /tests/工具-计算bbox中心坐标--宽高--两点距离.py | UTF-8 | 887 | 3.296875 | 3 | [] | no_license |
import math
class Point():
def __init__(self, x, y):
self.x = x
self.y = y
class BboxUtils():
def __init__(self):
pass
@staticmethod
def get_bbox_center(bbox):
'''
输入: bbox 信息
返回: bbox 中心坐标
'''
xmin, ymin, xmax, ymax = [int(x) for x in bbox]
xcenter = (xmin + xmax)/2.
ycenter = (ymin + ymax)/2.
return Point(xcenter,ycenter)
@staticmethod
def get_bbox_width_height(bbox):
'''
输入: bbox 信息
返回: bbox 中心坐标
'''
xmin, ymin, xmax, ymax = [int(x) for x in bbox]
return [xmax-xmin, ymax-ymin]
@staticmethod
def get_2D_distence(point_a, point_b):
return math.sqrt(math.pow((point_a.x - point_b.x),2) + math.pow((point_a.y-point_b.y),2))
| true |
9b793b79ed5c82b7b78e87fe35f1f0e95e425676 | Python | curly-bois/Chips | /chipSolver/scripts/wire.py | UTF-8 | 345 | 3.34375 | 3 | [] | no_license |
class Wire(object):
'''
A wire in the grid, nice to save some essentials
'''
def __init__(self, start, end, route):
self.start = start
self.end = end
self.route = route
self.length = len(route) - 1
self.min_len = (abs(start[0] - end[0]) +
abs(start[1] - end[1]))
| true |
93d241d999b799aea678feb524c42b9060eead55 | Python | Jdporter2/rockPaperScissors | /rockPaperScissors.py | UTF-8 | 3,990 | 4.34375 | 4 | [] | no_license |
#set variable keepPlaying to true
keepPlaying = True
#While keepPlaying is true:
while keepPlaying == True:
print("Welcome to Rock, Paper, Scissors!")
print("The way you will play is you will choose 1 for rock, 2 for paper, 3 for scissors. Rock beats scissors, paper beats rock, and scissors beats paper. The game will be best 2 out of 3. If you wish to quit, you can type q.")
#print statement welcoming players to the game
#print statement stating the rules (best 2 out of 3. Press 'q' to quit)
#make a key that assigns a number to each choice for the computer
#(rock is 1, scissors is 2, paper is 3)
'''rock = 1
paper = 2
scissors = 3 '''
#import the random function - the computer makes its choice randomly from this function
import random
#set computer's score to 0
cpu = 0
#set player's score to 0
player = 0
#while player's score is less than 2 and computer's score is less than 2:
while((player < 2) and (cpu < 2)):
cpuChoice = random.randint(1,3)
#computer's choice = random number between 1 and 3 (random function gets used here)
#player's choice = input(ask player to select Rock, Paper, or Scissors)
playerChoice = input("Pick 1, 2, or 3: ")
#start checking user options
#userChoice = userChoice.lower()
#if players inputs 'q':
if(playerChoice == "q"):
keepPlaying = False
break
#set keepPlaying to False
#stop the loop
elif(((playerChoice == "1") and (cpuChoice == 1)) or ((playerChoice == "2") and (cpuChoice == 2)) or ((playerChoice == "3") and (cpuChoice == 3))):
print("Draw!")
print("Computer Score:" + str( cpu))
print("Player Score:" + str( player))
#else if (player inputs rock and computer chooses rock) or
#(player inputs paper and computer chooses paper) or
#(player inputs scissors and computer chooses scissors):
#print out DRAW
#print out player's score and computer's score
elif(((playerChoice == "1") and (cpuChoice == 3)) or ((playerChoice == "2") and (cpuChoice == 1)) or ((playerChoice == "3") and (cpuChoice == 2))):
player += 1
print("You won this round!")
print("Computer Score:" + str( cpu))
print("Player Score:" + str( player))
#else if (player inputs rock and computer scissors) or
#(player inputs scissors and computer chooses paper) or
#(player inputs papers and computer chooses rock):
#add one to the player's score
#print out player's score and computer's score
elif(((playerChoice == "1") and (cpuChoice == 2)) or ((playerChoice == "2") and (cpuChoice == 3)) or ((playerChoice == "3") and (cpuChoice == 1))):
cpu += 1
print("You lost this round :(")
print("Computer Score:" + str( cpu))
print("Player Score:" + str( player))
#else if (player inputs rock and computer paper) or
#(player inputs scissors and computer rock) or
#(player inputs paper and computer scissors):
#add onne to the computer's score
#print out player's score and computer's score
else:
print("Your input was invalid, try again...")
#else:
#tell the user their input was invalid
if player >= 2:
print("Congrats! You won... thanks for playing!")
elif cpu >= 2:
print("Oh no! You lost... thanks for playing.")
print("Computer Score: " + str(cpu))
print("Player Score: " + str(player))
#print statement tanking the players for playing
#if player's score is 2:
#print statement letting the player know they won
#if computer's score is 2:
#print statement letting player know that the computer won
#print out the player's score and computer's score | true |
c17d080ddf962d3644c8cac27f06e11684867308 | Python | MMohan1/leaderboard-python | /test/leaderboard/reverse_competition_ranking_leaderboard_test.py | UTF-8 | 4,458 | 2.59375 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | from leaderboard.leaderboard import Leaderboard
from leaderboard.competition_ranking_leaderboard import CompetitionRankingLeaderboard
import unittest
import sure
class ReverseCompetitionRankingLeaderboardTest(unittest.TestCase):
def setUp(self):
self.leaderboard = CompetitionRankingLeaderboard(
'ties', order=Leaderboard.ASC, decode_responses=True)
def tearDown(self):
self.leaderboard.redis_connection.flushdb()
def test_leaders(self):
self.leaderboard.rank_member('member_1', 50)
self.leaderboard.rank_member('member_2', 50)
self.leaderboard.rank_member('member_3', 30)
self.leaderboard.rank_member('member_4', 30)
self.leaderboard.rank_member('member_5', 10)
leaders = self.leaderboard.leaders(1)
leaders[0]['rank'].should.equal(1)
leaders[1]['rank'].should.equal(2)
leaders[2]['rank'].should.equal(2)
leaders[3]['rank'].should.equal(4)
leaders[4]['rank'].should.equal(4)
def test_correct_rankings_for_leaders_with_different_page_sizes(self):
self.leaderboard.rank_member('member_1', 50)
self.leaderboard.rank_member('member_2', 50)
self.leaderboard.rank_member('member_3', 30)
self.leaderboard.rank_member('member_4', 30)
self.leaderboard.rank_member('member_5', 10)
self.leaderboard.rank_member('member_6', 50)
self.leaderboard.rank_member('member_7', 50)
self.leaderboard.rank_member('member_8', 30)
self.leaderboard.rank_member('member_9', 30)
self.leaderboard.rank_member('member_10', 10)
leaders = self.leaderboard.leaders(1, page_size=3)
leaders[0]['rank'].should.equal(1)
leaders[1]['rank'].should.equal(1)
leaders[2]['rank'].should.equal(3)
leaders = self.leaderboard.leaders(2, page_size=3)
leaders[0]['rank'].should.equal(3)
leaders[1]['rank'].should.equal(3)
leaders[2]['rank'].should.equal(3)
leaders = self.leaderboard.leaders(3, page_size=3)
leaders[0]['rank'].should.equal(7)
leaders[1]['rank'].should.equal(7)
leaders[2]['rank'].should.equal(7)
def test_correct_rankings_for_around_me(self):
self.leaderboard.rank_member('member_1', 50)
self.leaderboard.rank_member('member_2', 50)
self.leaderboard.rank_member('member_3', 30)
self.leaderboard.rank_member('member_4', 30)
self.leaderboard.rank_member('member_5', 10)
self.leaderboard.rank_member('member_6', 50)
self.leaderboard.rank_member('member_7', 50)
self.leaderboard.rank_member('member_8', 30)
self.leaderboard.rank_member('member_9', 30)
self.leaderboard.rank_member('member_10', 10)
leaders = self.leaderboard.around_me('member_4')
leaders[0]['rank'].should.equal(1)
leaders[4]['rank'].should.equal(3)
leaders[9]['rank'].should.equal(7)
def test_retrieve_the_rank_of_a_single_member_using_rank_for(self):
self.leaderboard.rank_member('member_1', 50)
self.leaderboard.rank_member('member_2', 50)
self.leaderboard.rank_member('member_3', 30)
self.leaderboard.rank_for('member_3').should.equal(1)
self.leaderboard.rank_for('member_1').should.equal(2)
self.leaderboard.rank_for('member_2').should.equal(2)
def test_retrieve_the_score_and_rank_for_a_single_member_using_score_and_rank_for(self):
self.leaderboard.rank_member('member_1', 50)
self.leaderboard.rank_member('member_2', 50)
self.leaderboard.rank_member('member_3', 30)
self.leaderboard.score_and_rank_for('member_3')['rank'].should.equal(1)
self.leaderboard.score_and_rank_for('member_1')['rank'].should.equal(2)
self.leaderboard.score_and_rank_for('member_2')['rank'].should.equal(2)
def test_correct_rankings_and_scores_when_using_change_score_for(self):
self.leaderboard.rank_member('member_1', 50)
self.leaderboard.rank_member('member_2', 50)
self.leaderboard.rank_member('member_3', 30)
self.leaderboard.rank_member('member_4', 30)
self.leaderboard.rank_member('member_5', 10)
self.leaderboard.change_score_for('member_3', 10)
self.leaderboard.rank_for('member_3').should.equal(3)
self.leaderboard.rank_for('member_4').should.equal(2)
self.leaderboard.score_for('member_3').should.equal(40.0)
| true |
07f8d824c6a92db8433e85b8fc201b5dcc168369 | Python | EMilborn/earthquake | /utils/player.py | UTF-8 | 783 | 2.828125 | 3 | [] | no_license | from lagcomp import LagComp
from vector import Vector
from tick import TICKMULT
class PlayerInput:
def __init__(self):
self.up = False
self.down = False
self.right = False
self.left = False
self.mouse1 = False
self.mousePos = None
self.lockTime = 0 # used for stopping movement at beginning of a round
class Player:
SPEED = 3 * TICKMULT
RADIUS = 25
HEALTH = 100
def __init__(self, id):
self.pos = Vector(-1, -1)
self.input = PlayerInput()
self.userid = id
self.health = self.HEALTH
self.cooldown = 0
self.lagcomp = LagComp()
def restartInput(self):
self.input = PlayerInput()
self.input.lockTime = 360 * TICKMULT # 3 second lock
| true |
1b142464bec0d71d560c440d3513f959d3e2a93b | Python | Aasthaengg/IBMdataset | /Python_codes/p02381/s882620811.py | UTF-8 | 274 | 3.3125 | 3 | [] | no_license | import math
while True:
n = int(input())
if n == 0: break
s = [int(e) for e in input().split()]
average = sum(s) / n
total = 0
for e in s:
total += (e - average)**2 # 偏差の2乗の合計
print("{0: .4f}".format(math.sqrt(total/n)))
| true |
a148439f34b553e5923b01c64c065b54e6ece79c | Python | ChinaChenp/Knowledge | /interview/interview_python/mianshixinde/lesson4/4.5.1.py | UTF-8 | 1,230 | 4.375 | 4 | [] | no_license | # 局部最大(小)值
# 给定无序数组arr,已知arr中任意两个相邻的数都不相等。写一个函数,只需返回arr中任意一个局部最小出现的位置即可
# 解法
# 1)arr长度为1时,arr[0]是局部最小。
# 2)arr的长度为N(N>1)时,
# ①如果arr[0]<arr[1],那么arr[0]是局部最小;
# ②如果arr[N-1]<arr[N-2],那么arr[N-1]是局部最小;
# ③如果0<i<N-1,既有arr[i]<arr[i-1],又有arr[i]<arr[i+1],那么arr[i]是局部最小
#考虑找最小值
def getLessIndex(arr):
if len(arr) == 0:
return -1
#考虑边界左右两边情况
if len(arr) == 1:
return 0
if arr[0] < arr[1]:
return 0
if arr[-1] < arr[-2]:
return len(arr) - 1
#二分查找, 边界不用考虑了
beg, end = 1, len(arr) - 2
while beg <= end:
mid = (beg + end) // 2
#那一边有递减趋势往那边走
if arr[mid] > arr[mid - 1]:
end = mid - 1
elif arr[mid] > arr[mid + 1]:
beg = mid + 1
else:
return mid
return -1
arr = [2, 9, 10, 4, 2, 1, 11, 14, 8, 25]
#arr = [4, 2, 1, 0, 5]
re = getLessIndex(arr)
print(re, arr[re]) | true |
679ef64c347faf740397a81d5a96f93adcf77eaf | Python | fekocinas/EP | /Ex 3.py | UTF-8 | 2,367 | 3.53125 | 4 | [] | no_license | import json
with open("Estoque.json","r") as EstoqueFile:
estoque = json.loads(EstoqueFile.read())
print("Controle de estoque" "\n 0 - sair" "\n 1 - adicionar item" "\n 2 - remover item" "\n 3 - alterar quantidade do produto" "\n 4 - imprimir estoque \n 5 - alterar preço do produto")
a = int(input("Faça sua escolha: "))
while a != 0:
if a == 1:
b=input('Nome do produto:')
while b in estoque:
print('Produto já está cadastrado')
b=input('Nome do produto:')
c=int(input('Quantidade inicial:'))
while c < 0:
print ('A quantidade inicial não pode ser negativa')
c=int(input('Quantidade inicial:'))
estoque[b]={"Quantidade":c}
g=float(input("Preço: "))
while g<0:
print ('O preço não pode ser negativa')
g=float(input('Preço:'))
estoque[b]["Preço"]=g
elif a == 2:
d = input("Nome do produto:")
if d in estoque:
del estoque [d]
elif d not in estoque:
print ( " Elemento não encontrado")
elif a==3:
f=input("Nome do produto: ")
if f in estoque:
g=int(input("Quantidade: "))
s=-g
if s>estoque[f]["Quantidade"] :
while s>estoque[f]["Quantidade"]:
g=int(input("Quantidade: "))
s=-g
if s<=estoque[f]["Quantidade"]:
estoque[f]["Quantidade"]+=g
else:
estoque[f]["Quantidade"]+=g
print ("Novo estoque de {0} é {1} ".format(f,estoque[f]["Quantidade"]))
elif f not in estoque:
print ("Elemento não encontrado")
elif a == 4:
print(estoque)
elif a==5:
h=input("Nome do produto: ")
while h not in estoque:
h=input("Nome do produto: ")
i=float(input("Preço: "))
estoque[h]["Preço"]=i
print("Controle de estoque" "\n 0 - sair" "\n 1 - adicionar item" "\n 2 - remover item" "\n 3 - alterar quantidade do produto" "\n 4 - imprimir estoque \n 5 - alterar preço do produto")
a = int(input("Faça sua escolha:"))
if a == 0:
print ('Até mais')
with open("Estoque.json","w") as EstoqueFile:
EstoqueFile.write(json.dumps(estoque, sort_keys = True, indent=4))
| true |
63e976da11f045b635de6e38c0d0cdf15064db9c | Python | siamsalman/Imposter | /imposter.py | UTF-8 | 1,636 | 3.125 | 3 | [] | no_license | import turtle
body_color = 'red'
glass_color = '#9acedc'
screen = turtle.getscreen()
imp = turtle.Turtle()
def body():
imp.pensize(18)
imp.fillcolor(body_color)
imp.begin_fill()
imp.right(90)
imp.forward(50)
imp.right(180)
imp.circle(40, -180)
imp.right(180)
imp.forward(200)
imp.right(180)
imp.circle(100, -180)
imp.backward(20)
imp.left(15)
imp.circle(500, -20)
imp.backward(15)
imp.circle(40, -180)
imp.left(7)
imp.backward(50)
imp.up()
imp.left(90)
imp.forward(10)
imp.right(90)
imp.down()
imp.right(240)
imp.circle(50, -70)
imp.end_fill()
def glass():
imp.up()
imp.right(230)
imp.forward(100)
imp.left(90)
imp.forward(20)
imp.right(90)
imp.down()
imp.fillcolor(glass_color)
imp.begin_fill()
imp.right(150)
imp.circle(90,-55)
imp.right(180)
imp.forward(1)
imp.right(180)
imp.circle(10, -65)
imp.right(180)
imp.forward(110)
imp.right(180)
imp.circle(50, -190)
imp.right(170)
imp.forward(80)
imp.right(180)
imp.circle(40, -30)
imp.end_fill()
def backpack():
imp.up()
imp.right(60)
imp.forward(100)
imp.right(90)
imp.forward(75)
imp.fillcolor(body_color)
imp.begin_fill()
imp.down()
imp.forward(30)
imp.right(255)
imp.circle(300, -30)
imp.right(260)
imp.forward(30)
imp.end_fill()
body()
glass()
backpack()
imp.screen.exitonclick()
#Thank You
#Siam Salman
| true |
3387867a6ae3e336d8d9ba3bd1facfb2e1a644cd | Python | Mohamed-y-ph/Python-scripts | /udacity.py | UTF-8 | 20,144 | 3.765625 | 4 | [] | no_license |
'''import the required libraries needed for running the program without errors'''
import pandas as pd
import time
import numpy as np
print('\nWelcome to this program which was designed by Mohamed Yaser to compute some statistics about bikeshare\n')
print("Which city do you want to deal with? Washington, New York, or Chicago?")
cities={ 'c': 'chicago.csv',
'ny': 'new_york_city.csv',
'w': 'washington.csv' }
def filter_data(file):
try:
month=input('\nPlease enter the first 3 letters of the month you want to filter (e.g. Feb,Mar...etc), or type all\nYour answer: ').lower()
day=input('\nPlease enter the day you want to filter (e.g. Saturday,Sunday...etc), or type all\nYour answer: ')
file = pd.read_csv(cities[city])
file['Start Time'] = pd.to_datetime(file['Start Time'])
# extract month and day of week from Start Time to create new columns
file['Month'] = file['Start Time'].dt.month
file['Weekday'] = file['Start Time'].dt.day_name()
if month != 'all':
months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun']
month = months.index(month) + 1
file = file[file['Month'] == month]
if day != 'all':
file = file[file['Weekday'] == day.title()]
print (file)
except:
print('This is not a valid response')
def view_data(file):
view_data = input('\nWould you like to view the first 5 rows of data?\nEnter (y/n): ').lower()
start_loc = 5
while (view_data=='y'):
print(file.iloc[0:start_loc])
start_loc += 5
view_data = input("Do you want to show more five rows? (y/n): ").lower()
def most_popular_month(x):
try:
pm = pd.to_datetime(x['Start Time']).dt.month.mode()[0]
print('-'*20,'\nThe most popular month is: ', pm)
except:
print('\nOops!!\nIt seems that this is not the correct name')
def most_popular_day(file):
'''Calculates the most popular day'''
try:
pdd = pd.to_datetime(file['Start Time']).dt.day.mode()[0]
print('-'*20,'\nThe most popular day is: ', pdd, '\n')
except:
print('\nThis is not the right name of the column (Please consider upper and lower case)')
def most_popular_hour(file):
'''This function calculates the most popular hour based on the column containing the start times details,
(inp) reads the name of the time column from the user, and (pw) extracts hours from the start time column in order to calculate the most frequent hour'''
try:
pw = pd.to_datetime(file['Start Time']).dt.hour.mode()[0]
print('-'*20,'\nThe most crowded hour is: ', pw, '\n')
except:
print('This is not the right name of the column (Please consider upper and lower case)')
def most_ststaion(file):
'''This function calulates the most common start station
(s)states for the name of the column containg the start station name, and (ss) states for the most common start station'''
try:
s = input('\nPlease enter the name of the column containg the start station in the file: ')
ss=file[s].mode()[0]
print('-'*20,'\nThe most common start station is', ss)
except:
print('This is not the right name of the column (Please consider upper and lower case)')
def most_enstation(file):
'''This function calculates the most common end station'''
try:
endstation = input('\nPlease enter the name of the column containg the end station in the file: ')
most_common=file[endstation].mode()[0]
print('-'*20,'\nThe most common end station is ',most_common)
except:
print('This is not the right name of the column (Please consider upper and lower case)')
def most_cotrip(file):
'''Calulates the most common trip based on the user inputs'''
try:
starttime=input('\nPlease enter the name of the column containg the start station in the file: ')
endtime=input('\nPlease enter the name of the column containg the end station in the file: ')
most_common_trip=(file[starttime]+' to '+file[endtime]).mode()[0]
print('-'*20,'\nThe most common trip is from',most_common_trip)
except:
print('This is not the right name of the column (Please consider upper and lower case)')
def total_travel_time(file):
'''This function computes the total travel time in hours and minutes, (st) computes the total hours and (sm) computes the total minutes'''
try:
#starttime=input('\nPlease enter the name of the start time column: ')
#endtime=input('\nPlease enter the name of the end time column: ')
st=sum(pd.to_datetime(file['End Time']).dt.hour -pd.to_datetime(file['Start Time']).dt.hour)
sm=sum(pd.to_datetime(file['End Time']).dt.minute -pd.to_datetime(file['Start Time']).dt.minute)
print('-'*20,'\nThe total travel time is ',st,'hours and',sm,'minutes')
except:
print('This is not the right name of the column (Please consider upper and lower case)')
def average_travel_time(file):
'''This function calculates the mean of the travel times'''
try:
#s = input('\nPlease enter the name of the start time column: ')
#e = input('\nPlease enter the name of the end time column: ')
st = (pd.to_datetime(file['End Time'])) - (pd.to_datetime(file['Start Time']))
print('\nThe total travel time is', np.mean(st))
except:
print('This is not the right name of the column (Please consider upper and lower case)')
def c_user_type(file):
'''Calculates the number of user based on their type'''
try:
counts=file['User Type'].value_counts()
print('-'*20,'\nThe counts of each user type are ',counts)
except:
print('Something went wrong')
def gender_count(file):
'''Computes the total of males in comparison to females when the gender is mentioned for the user'''
try:
counts=file['Gender'].value_counts()
print('-'*20,'\nThe gender distribution is as following:',counts)
except:
print('Something went wrong')
def birth_years(file):
'''Perform statistics for birth dates where highest computes the oldest birth date and the most recent birth date besides the highest year in birth date counts '''
try:
year=file['Birth Year']
oldest=min(file['Birth Year'])
youngest=max(file['Birth Year'])
common=file['Birth Year'].mode()[0]
print('-'*20,'\nThe youngest user was born in',youngest,'and the oldest user was born in',oldest,'\n',common,'is the most common birth year')
except:
print('Something went wrong')
while True:
city = input("\nFor Washington press (W), for New York press (NY), for Chicago press (C), to exit press (s)\nYour choice is: ").lower()
if city =='w':
file=pd.read_csv(cities[city])
print('\nYou have selected Washington')
view_data(file)
filter=input('\nDo you want to filter data by certain month or day? (y/n)').lower()
if filter == 'y':
filter_data(file)
print('Please select the statistics you want to compute\n')
try:
sts=input('\nFor travel times statistics, press (1)\nFor stations and trips statistics, press (2)\nFor trip duration statistics, press (3)\n\nTo return to the main menu, press (S)\nYour Answer is: ')
except:
print('This is an unidentified letter')
if sts=='1':
#choose the type of sts to be calculated from the data
tsts = input(
'\n\nFor calculating the most common month, press (m)\nFor calculating the most popular day of week, press(d)\nFor calculating the most popular hour, press(h)\nFor returning to the main menu, press(s)\nYour Choice is: ')
while True:
tsts = input(
'\n\nFor calculating the most common month, press (m)\nFor calculating the most popular day of week, press(d)\nFor calculating the most popular hour, press(h)\nFor returning to the main menu, press(s)\nYour Choice is: ')
if tsts=="m":
most_popular_month(file)
q = input('\nDo you want to compute another stat? (y/n)')
if q == 'n':
break
elif tsts=='d':
most_popular_day(file)
q = input('\nDo you want to compute another stat? (y/n)')
if q == 'n':
break
elif tsts=='h' :
most_popular_hour(file)
q = input('\nDo you want to compute another stat? (y/n)')
if q == 'n':
break
elif tsts=='s':
break
else:
print('An unidentified letter!!')
elif sts=='2':
while True:
print('\nWhat is the type of statistics would you like to compute?')
type=input('\nThe most common start station (1)\nThe most common end station (2)\nThe most common trip (3)\nTo return back, press (s)\nYour choice is: ')
if type=='1':
most_ststaion(file)
q = input('Do you want to compute another stat? (y/n)')
if q == 'n':
break
elif type=='2':
most_enstation(file)
q = input('Do you want to compute another stat? (y/n)')
if q == 'n':
break
elif type=='3':
most_cotrip(file)
q = input('Do you want to compute another stat? (y/n)')
if q == 'n':
break
elif type =='s':
break
else:
print('Something is wrong')
elif sts== '3':
while True:
tst=input('\nFor computing the total travel time, press (1)\nFor computing the average travel time, press (2),\nTo return back, press (s)\nYour answer: ')
if tst == '1':
total_travel_time(file)
q = input('Do you want to compute another stat? (y/n)')
if q == 'n':
break
elif tst =='2':
average_travel_time(file)
q = input('Do you want to compute another stat? (y/n)')
if q == 'n':
break
elif type =='s':
break
else:
print('Please enter either (1) or (2)')
elif city=='c':
file=pd.read_csv(cities[city])
print('\nYou have selected Chicago\n')
view_data(file)
filter = input('\nDo you want to filter data by certain month or day? (y/n)').lower()
if filter == 'y':
filter_data(file)
print('\nPlease select the statistics you want to compute')
sts=input('\nFor travel times statistics, press (1)\nFor stations and trips statistics, press (2)\nFor trip duration statistics, press (3)\nFor User Info, press (4)\nTo return to the main menu, press (s)\nYour answer: ')
if sts=='1':
#choose the type of sts to be calculated from the data
while True:
tsts = input(
'\nFor calculating the most common month, press (m)\nFor calculating the most popular day of week, press(d)\nFor calculating the most popular hour, press(h)\nFor returning to the main menu, press(s). ')
if tsts=="m":
most_popular_month(file)
q = input('Do you want to compute another stat? (y/n)')
if q == 'n':
break
elif tsts=='d':
most_popular_day(file)
q = input('Do you want to compute another stat? (y/n)')
if q == 'n':
break
elif tsts == 'h':
most_popular_hour(file)
q = input('Do you want to compute another stat? (y/n)')
if q == 'n':
break
elif tsts=='s':
break
else:
print('\nSorry, this is an unidentfied letter')
elif sts=='2':
while True:
print('\nWhat is the type of statistics would you like to compute?')
type=input('\nThe most common start station (1)\nThe most common end station (2)\nThe most common trip (3)\nYour answer: ')
if type=='1':
most_ststaion(file)
q = input('Do you want to compute another stat? (y/n)')
if q == 'n':
break
elif type=='2':
most_enstation(file)
q = input('Do you want to compute another stat? (y/n)')
if q == 'n':
break
elif type=='3':
most_cotrip(file)
q = input('Do you want to compute another stat? (y/n)')
if q == 'n':
break
else:
print('Something is wrong')
elif sts== '3':
while True:
tst=input('\nFor computing the total travel time, press (1)\nFor computing the average travel time, press (2),\nTo exit, press (3)\nYour answer: ')
if tst == '1':
total_travel_time(file)
q = input('Do you want to compute another stat? (y/n)')
if q == 'n':
break
elif tst =='2':
average_travel_time(file)
q = input('Do you want to compute another stat? (y/n)')
if q == 'n':
break
elif tst == '3':
break
else:
print('Please enter either (1) or (2)')
elif sts =='4':
while True:
tst = input(
'\nFor user types counts, press (1)\nFor the number of males and females, press (2)\nFor birth date statistics, press (3)\nYour answer: ')
if tst == '1':
c_user_type(file)
q = input('Do you want to compute another stat? (y/n)')
if q == 'n':
break
elif tst =='2':
gender_count(file)
q = input('Do you want to compute another stat? (y/n)')
if q == 'n':
break
elif tst=='3':
birth_years(file)
q = input('Do you want to compute another stat? (y/n)')
if q == 'n':
break
else:
print('Something went wrong')
else:
print('Sorry, something went wrong')
elif city =='ny':
file=pd.read_csv(cities['ny'])
view_data(file)
print('\nYou have selected New York City\n')
filter = input('\nDo you want to filter data by certain month or day? (y/n)').lower()
if filter == 'y':
filter_data(file)
print('Please select the statistics you want to compute\n')
sts=input('\nFor travel times statistics, press (1)\nFor stations and trips statistics, press (2)\nFor trip duration statistics, press (3)\nFor User Info , press (4)\nTo exit the program, press (s).\nYour answer: ')
if sts=='1':
while True:
tsts=input('\nFor calculating the most common month, press (m)\nFor calculating the most popular day of week, press(d)\nFor calculating the most popular hour, press(h)\nFor returning to the main menu, press(s).\nYour answer: ')
if tsts=="m":
most_popular_month(file)
q = input('Do you want to compute another stat? (y/n)')
if q=='n':
break
elif tsts=='d':
most_popular_day(file)
q = input('Do you want to compute another stat? (y/n)')
if q=='n':
break
elif tsts=='h' :
most_popular_hour(file)
q = input('Do you want to compute another stat? (y/n)')
if q=='n':
break
elif tsts=='s':
break
else:
print('Sorry, this is an unidentfied letter')
elif sts=='2':
while True:
print('\nWhat is the type of statistics would you like to compute?')
type=input('\nThe most common start station (1)\nThe most common end station (2)\nThe most common trip (3)\nYour answer: ')
if type=='1':
most_ststaion(file)
q = input('Do you want to compute another stat? (y/n)')
if q == 'n':
break
elif type=='2':
most_enstation(file)
q = input('Do you want to compute another stat? (y/n)')
if q == 'n':
break
elif type=='3':
most_cotrip(file)
q = input('Do you want to compute another stat? (y/n)')
if q == 'n':
break
else:
print('Something is wrong')
elif sts== '3':
while True:
tst=input('\nFor computing the total travel time, press (1)\nFor computing the average travel time, press (2),\nTo exit, press (3)\nYour answer: ')
if tst == '1':
total_travel_time(file)
q = input('Do you want to compute another stat? (y/n)')
if q == 'n':
break
elif tst =='2':
average_travel_time(file)
q = input('Do you want to compute another stat? (y/n)')
if q == 'n':
break
elif tst == '3':
break
else:
print('Please enter either (1) or (2)')
elif sts == '4':
while True:
tst=input('\nFor user types counts, press (1)\nFor the number of males and females, press (2)\nFor birth date statistics, press (3)\nYour answer: ')
if tst =='1':
c_user_type(file)
q = input('Do you want to compute another stat? (y/n)')
if q == 'n':
break
elif tst =='2':
gender_count(file)
q = input('Do you want to compute another stat? (y/n)')
if q == 'n':
break
elif tst=='3':
birth_years(file)
q = input('Do you want to compute another stat? (y/n)')
if q == 'n':
break
else:
print('Something went wrong')
else:
print('Sorry, something went wrong')
elif city =='s':
break
exit()
else:
print('This is an unidentified letter')
| true |
6370415adc38d844f702dcb00dbfe357add9aac4 | Python | rileyjmurray/RandomizedLS | /Haoyun/randomized_least_square_solver/Test/LSRN/LSRN_over_for_error_test.py | UTF-8 | 3,026 | 2.609375 | 3 | [] | no_license | from math import ceil, sqrt
import numpy as np
from scipy.sparse.linalg import LinearOperator
from numpy.linalg import svd
from Haoyun.randomized_least_square_solver.Iter_Solver.Scipy_LSQR import lsqr_copy
def LSRN_over_for_error_test(A, b, tol=1e-8, gamma=2, iter_lim=1000):
"""
LSRN computes the min-length solution of linear least squares via LSQR with
randomized preconditioning
Parameters
----------
A : {matrix, sparse matrix, ndarray, LinearOperator} of size m-by-n
b : (m,) ndarray
gamma : float (>1), oversampling factor
tol : float, tolerance such that norm(A*x-A*x_opt)<tol*norm(A*x_opt)
iter_lim : integer, the max iteration number
rcond : float, reciprocal of the condition number
Returns
-------
x : (n,) ndarray, the min-length solution
itn : int, iteration number
r : int, the rank of A
flag : int,
"""
m, n = A.shape
#####################################################
# Incorporate the sketching method into the sketch.py
#####################################################
if m > n: # over-determined
s = ceil(gamma * n)
A_tilde = np.zeros([s, n])
blk_sz = 128
for i in range(int(ceil(1.0 * s / blk_sz))):
blk_begin = i * blk_sz
blk_end = np.min([(i + 1) * blk_sz, s])
blk_len = blk_end - blk_begin
G = np.random.randn(blk_len, m)
A_tilde[blk_begin:blk_end, :] = G.dot(A)
A_tilde = A_tilde * 1000
U_tilde, Sigma_tilde, VH_tilde = svd(A_tilde, False)
# t = U_tilde.dtype.char.lower()
# determine the new rank
rcond = Sigma_tilde[0] * np.min(A.shape) * np.finfo(float).eps
r_tol = rcond
r = np.sum(Sigma_tilde > r_tol)
# print('\t Dropped rank by %s' % (n - r))
N = VH_tilde[:r, :].T / Sigma_tilde[:r]
def LSRN_matvec(v):
return A.dot(N.dot(v))
def LSRN_rmatvec(v):
return N.T.dot(A.T.dot(v))
# re-estimate gamma
gamma_new = s / r
# estimate the condition number of AN
cond_AN = (sqrt(gamma_new) + 1) / (sqrt(gamma_new) - 1)
AN = LinearOperator(shape=(m, r), matvec=LSRN_matvec, rmatvec=LSRN_rmatvec)
result = lsqr_copy(AN, b, atol=tol / cond_AN, btol=tol / cond_AN, iter_lim=iter_lim)
y = result[0]
flag = result[1]
itn = result[2]
absolute_normal_equation_error_list = result[-5]
relative_normal_equation_error_list = result[-4]
S2_stopping_criteria_error_list = result[-3]
relative_residual_error_list = result[-2]
relative_error_list = result[-1]
x = N.dot(y)
else:
print("The under-determined case is not implemented.")
return x, itn, flag, r, absolute_normal_equation_error_list, relative_normal_equation_error_list, \
S2_stopping_criteria_error_list, relative_residual_error_list, relative_error_list
| true |
fac70b5e9aa85da1e4ea508fd8f923ba654a46d3 | Python | omerfarballi/Python-Dersleri | /15.lists-methods.py | UTF-8 | 501 | 3.296875 | 3 | [] | no_license | number =[0,1,2,3,6,5,98,65,2,3,5]
letters = ["a","b","c","k","p","ü","k","a"]
values = min(number)
values = min(letters)
values = max(number)
values = max(letters)
values =number[:5]
number[5] = 9999
number.append(letters)
number.insert(0,963946556468)
number.pop()
number.pop(0)
number.remove(9999)
number.sort(reverse=True)
# number.reverse()
letters.sort()
letters.reverse()
# number.clear()
print(len(number))
print(number)
print(letters)
print(number.count(3)) | true |
ac727a2a3d1a35e06006d8636a24a8159a579d1a | Python | SabrinaMB/softdes-desafios | /src/interface_tests.py | UTF-8 | 1,624 | 2.53125 | 3 | [] | no_license | import os
from selenium import webdriver
address = os.getenv("ADDRESS")
def test_login_success(): # Aluno faz login com sucesso
driver = webdriver.Firefox()
try:
driver.get(f"http://admin:admin@{address}/")
assert True
except:
assert False
driver.close()
def test_wrong_password(): # Aluno entra senha incorreta
driver = webdriver.Firefox()
try:
driver.get(f"http://admin:adm@{address}/")
driver.page_source()
assert False
except:
assert True
driver.close()
def test_wrong_answer(): # Aluno envia desafio com resposta incorreta
driver = webdriver.Firefox()
try:
driver.get(f"http://admin:admin@{address}/")
driver.find_element_by_id("resposta").send_keys(os.getcwd() + "/src/adduser.py")
driver.find_element_by_id("submit").click()
answers_table = driver.find_element_by_id("answers_table")
last_answer = answers_table.find_elements_by_id("date")[0]
assert last_answer.text == "Erro"
except:
assert False
driver.close()
def test_right_answer(): # Aluno envia desafio com resposta correta
driver = webdriver.Firefox()
try:
driver.get(f"http://admin:admin@{address}/")
driver.find_element_by_id("resposta").send_keys(os.getcwd() + "/src/adduser.py")
driver.find_element_by_id("submit").click()
answers_table = driver.find_element_by_id("answers_table")
last_answer = answers_table.find_elements_by_id("date")[0]
assert last_answer.text == "OK!"
except:
assert False
driver.close()
| true |
65f6077d88c17e2dd90c39adf869d54b09f995c4 | Python | ProtegerPW/Python-automatization | /ch_8_regex_search.py | UTF-8 | 1,097 | 3.59375 | 4 | [] | no_license | #! /usr/bin/python3
#Usage: Program to open any .txt in folder
# and searches for any line that matches a user-suplied regex.
# All results are then printed
# ./ch_8_regex_search.py <folder path> <regex>
import sys, re, os, glob
#TODO: validate input
if len(sys.argv) != 3:
print("Usage: ./ch_8_regex_search.py <folder path> <regex>")
sys.exit()
if not os.path.isdir(sys.argv[1]):
print("Folder path argument is not a directory")
sys.exit()
#TODO: open each file and read lines
os.chdir(sys.argv[1])
txtFiles = glob.glob("*.txt")
argRegex = re.compile(r"{}".format(sys.argv[2]))
for fileName in txtFiles:
activeFile = open(fileName)
contentOfFile = activeFile.readlines()
#TODO: search for regex and print results
for index in range(len(contentOfFile)):
findRegex = argRegex.findall(contentOfFile[index])
if findRegex:
print("In file: " + fileName + " at line: " + str(index + 1) + ":")
print(findRegex)
print("") # empty string for pretty printing
#TODO: close each file
activeFile.close()
| true |
e435c48dfaa50c6b7ef4fdebfaa3601c91912af4 | Python | andremartinon/pyCML | /cml/couplings.py | UTF-8 | 2,408 | 2.90625 | 3 | [
"MIT"
] | permissive | import numpy as np
from abc import ABC, abstractmethod
from scipy.signal import convolve2d
from .lattice import Lattice
class Coupling(ABC):
def __init__(self, lattice: Lattice = None):
self._kernel = np.array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
self._neighbors = 4
self._e = 0.1
self._lattice = None
self._with_boundaries = True
if lattice:
self.apply(lattice)
@property
def e(self) -> float:
return self._e
@e.setter
def e(self, value: float):
self._e = value
@property
def neighbors(self) -> int:
return self._neighbors
@neighbors.setter
def neighbors(self, value: int):
self._neighbors = value
@property
def kernel(self) -> np.ndarray:
return self._kernel
@kernel.setter
def kernel(self, array: np.ndarray):
self._kernel = array
@property
def lattice(self) -> Lattice:
if self._with_boundaries:
return self._lattice
else:
lattice = Lattice(tuple(np.array(self._lattice.shape) - 2))
lattice.u = self._lattice.u[1:-1, 1:-1]
return lattice
@lattice.setter
def lattice(self, lattice: Lattice):
self._lattice = lattice
def apply(self, lattice: Lattice, with_boundaries=False) -> Lattice:
self._with_boundaries = with_boundaries
updated_lattice = Lattice(lattice.shape)
updated_lattice.u = convolve2d(lattice.u,
self.kernel,
mode='same',
boundary='fill')
updated_lattice.u = updated_lattice.u / self.neighbors
self.lattice = updated_lattice
return self.lattice
@abstractmethod
def __str__(self) -> str:
pass
class FourNeighborCoupling(Coupling):
def __init__(self, coupling_constant: float = 0.1, lattice: Lattice = None):
self.e = coupling_constant
self.neighbors = 4
self.kernel = np.array([[0, self.e, 0],
[self.e, 4 * (1 - self.e), self.e],
[0, self.e, 0]])
if lattice:
super().__init__(lattice)
def __str__(self) -> str:
return f'Four Neighbors Coupling: e={self.e}'
| true |
3b47eb6b2ac40ee198a7e317fcc50dd22a081210 | Python | ethanwheatthin/COMP-262-final | /Flippy.py | UTF-8 | 1,881 | 3.421875 | 3 | [] | no_license | # coding=utf-8
# Math is being cited from this website:
# http://math.ucr.edu/home/baez/games/games_9.html
# and
# http://www.probabilityformula.org/empirical-probability-formula.html
# Could check out this also
# 3. If you repeat the experiment of flipping a coin ten times 10,000 times, (so 100,000 flips
# in all), about how many times do you expect to get the sequence HTHHTTHHHT?
# Answer: Since probability represents “long-range frequency”, we expect this particular
# sequence to occur about once for every 1000 repeats of the experiment of flipping a
# coin 10 times. So if we repeat the experiment 10,000 times, then we expect to get this
# particular sequence about 10 times.
#
# intuition behind coin flipping
# https://math.stackexchange.com/questions/151262/looking-for-intuition-behind-coin-flipping-pattern-expectation
import random
import time
from multiprocessing.dummy import Pool as ThreadPool
coinChoices = ["H", "T"]
# number of flips
idealFlip = "HHTHTHHHTHHHTHTH"
actualFlip = ""
counter = 0
flag = 1
probability = 0
# pool = ThreadPool(4)
def flip_coin(coins):
return str(random.choice(coins))
def compute_probability():
size = len(idealFlip)
print("do something damnit")
# make number of threads based on the length of the ideal flip
def make_threads():
pool = ThreadPool(len(idealFlip))
if __name__ == "__main__":
while flag != 0:
# then = time.time()
for i in range(len(idealFlip)):
# Each iteration represents a independent flip of the coin
actualFlip += flip_coin(coinChoices)
print("Actual flip = " + actualFlip + " Ideal flip = " + idealFlip)
if actualFlip == idealFlip:
# now = time.time()
flag = 0
counter += 1
actualFlip = ""
# diff = now - then
# print(str(diff % 60))
print("FOUND ON ITERATION: " + str(counter))
| true |
1b4330aebd40bcbec542d9860785f8cacf0b96c9 | Python | answerth3question/backend | /web/app/blueprints/post.py | UTF-8 | 1,681 | 2.609375 | 3 | [] | no_license | '''
post_bp endpoints should be used to get and submit posts.
to get posts specific to a given user, use the user_bp endpoints
'''
from flask import Blueprint, request, jsonify, abort
from app.util.jwt_manager import with_permission, get_jwt_identity
from app.database.models import Post
post_bp = Blueprint('post_bp', __name__)
@post_bp.route('/create', methods=['POST'])
@with_permission('contributer')
def create_post():
try:
user_id = get_jwt_identity()
body = request.get_json()
post = Post(created_by=user_id,
content=body['content'],
prompt_id=body['prompt_id'])
post.save_to_db()
return 'success', 201
except BaseException as e:
print(e)
abort(500)
@post_bp.route('/approved', methods=['GET'])
def get_approved_posts():
try:
approved = Post.get_approved()
return jsonify(approved)
except BaseException as e:
print(e)
abort(500)
@post_bp.route('/pending', methods=['GET'])
@with_permission('reviewer')
def get_pending_posts():
try:
pending = Post.get_pending()
return jsonify(pending)
except BaseException as e:
print(e)
abort(500)
@post_bp.route('/rejected', methods=['GET'])
@with_permission('reviewer')
def get_rejected_posts():
try:
rejected = Post.get_rejected()
return jsonify(rejected)
except BaseException as e:
print(e)
abort(500)
@post_bp.route('/all', methods=['GET'])
@with_permission('reviewer')
def get_all_posts():
try:
return jsonify({
'pending': Post.get_pending(),
'rejected': Post.get_rejected(),
'approved': Post.get_approved(),
})
except BaseException as e:
print(e)
abort(500)
| true |
183f97f0041f0bdcb496aca9a731a76da9ec108f | Python | OpenDSA/OpenDSA | /AV/Development/CommandLineBased/common/exercise-generator/generator.py | UTF-8 | 3,174 | 2.765625 | 3 | [
"MIT"
] | permissive | from pathlib import Path
def parseExercises(exercises):
return [(exerciseName, exerciseName.lower().replace("_", "-"), exerciseTitle) for exerciseName, exerciseTitle in exercises]
COMMAND_LINE_EXERCISES = parseExercises([
("PWD", "pwd 1"),
("PWD_2", "pwd 2"),
("PWD_3", "pwd 3"),
("LS", "ls"),
("CD", "cd 1"),
("CD_2", "cd 2"),
("CD_3", "cd 3"),
("TOUCH", "touch"),
("MKDIR", "mkdir"),
("RM", "rm"),
("RM_R", "rm -r"),
("RMDIR", "rmdir"),
("MV", "mv"),
("CP", "cp"),
("CHALLENGE_1", "Challenge 1"),
("CHALLENGE_2", "Challenge 2"),
("CHALLENGE_3", "Challenge 3"),
])
GIT_EXERCISES = parseExercises([
("CLONE", "git clone"),
("STATUS", "git status"),
("ADD", "git add"),
("GIT_RM", "git rm"),
("COMMIT", "git commit"),
("PUSH", "git push"),
("RESTORE", "git restore"),
("RESTORE_STAGED", "git restore --staged"),
("PULL", "git pull"),
("COMMIT_A", "git commit -a"),
("COMMIT_PATH", "git commit (path)"),
("BRANCH", "git branch"),
("SWITCH", "git switch"),
("SWITCH_C", "git switch -c"),
("SWITCH_DIVERGED", "git switch diverged branches"),
("GIT_CHALLENGE_1", "Challenge 1"),
("GIT_CHALLENGE_2", "Challenge 2"),
("GIT_CHALLENGE_3", "Challenge 3"),
])
def createFile(replacement, filename, extension, path):
inputFile = open(f"template.{extension}", "r")
inputString = inputFile.read()
outputString = inputString.replace("EXERCISE_NAME", replacement)
outputFileName = f"{path}/{filename}-exercise.{extension}"
outputFile = open(outputFileName, "w")
outputFile.write(outputString)
return outputFileName
def createExerciseFiles(exerciseConfigName, exerciseFileName, exerciseTitle, path):
p = Path(f"{path}/{exerciseFileName}")
p.mkdir(exist_ok=True)
createFile(exerciseConfigName, exerciseFileName, "js", p)
createFile(exerciseFileName, exerciseFileName, "html", p)
createFile(exerciseTitle, exerciseFileName, "json", p)
def createAllExerciseFiles(exerciseNames, path):
p = Path(path)
p.mkdir(exist_ok=True)
for exerciseConfigName, exerciseFileName, exerciseTitle in exerciseNames:
createExerciseFiles(exerciseConfigName, exerciseFileName, exerciseTitle, path)
def createRST(exerciseNames, outputFileName):
inputFile = open(f"header.rst", "r")
inputString = inputFile.read()
outputString = inputString
outputString += "\n\n"
for exerciseConfigName, exerciseFileName, exerciseTitle in exerciseNames:
outputString+=createRSTExerciseSection(exerciseFileName, exerciseTitle)
outputString += "\n\n"
outputFile = open(outputFileName, "w")
outputFile.write(outputString)
return outputFileName
def createRSTExerciseSection(exerciseFileName, exerciseTitle):
inputFile = open(f"template.rst", "r")
inputString = inputFile.read()
return inputString.replace("EXERCISE_NAME", exerciseFileName).replace("EXERCISE_TITLE", exerciseTitle)
createAllExerciseFiles(COMMAND_LINE_EXERCISES, "../../exercises")
createRST(COMMAND_LINE_EXERCISES, "../../../../../RST/en/CommandLine/Exercises.rst")
createAllExerciseFiles(GIT_EXERCISES, "../../exercises")
createRST(GIT_EXERCISES, "../../../../../RST/en/Git/Exercises.rst")
| true |
26c8798f8668bec457f67ce82a35da28582428a3 | Python | matthewdefranco94/FirstProjects | /Weapon/WOW/WeapSimBackend.py | UTF-8 | 3,148 | 2.875 | 3 | [] | no_license | #practice
import sys
sys.path.append ('../../../../../../Desktop/Projects')
import random
# import WeaponSimulation
import numpy as np
import random
import matplotlib
from dataclasses import dataclass
#Attacks have a 40% chance to glance for 30% less damage, weaponskill reduces the 30% damage penatly
#Weapon skill affecting glancing penalty
def damage(weapon_top_end, weapon_bottom_end, added_weapon_skill, total_hit , crit_chance):
glancing_chance_eff = 0.40
glancing_dam_red_base = 0.30
weapon_skill_reduction = 0.03
base_miss = 0.28
critical_strike_modifier = 1.5
weapon_top_end = int(weapon_top_end)
weapon_bottom_end = int(weapon_bottom_end)
added_weapon_skill = int(added_weapon_skill)
total_hit = int(total_hit)
crit_chance = int(crit_chance)
eff_miss = base_miss - total_hit / 100
# Check if the attack landed at all
hit_type = random.uniform(.01, 1.0)
if hit_type > eff_miss:
random_weapon_damage = random.randint(weapon_bottom_end , weapon_top_end)
# Check if a regular or glancing attack
# Regular hit
if hit_type > eff_miss + glancing_chance_eff:
# Critical strike
if random.uniform(.01 , 1.0) <= crit_chance:
effective_weapon_damage = random_weapon_damage * critical_strike_modifier
# Non Crit
else:
effective_weapon_damage = random_weapon_damage
# Glancing hit
else:
glancing_penalty = (glancing_dam_red_base) - added_weapon_skill * weapon_skill_reduction
effective_weapon_damage = random_weapon_damage * (1 - glancing_penalty)
else:
effective_weapon_damage = 0
return effective_weapon_damage
@dataclass
class SimulationResult():
number_of_attacks : int
total_damage : list[int]
fight_duration : float
average_DPS : float
weapon_DPS : float
#see this as a template for below
def do_simulation( weapon_bottom_end ,
weapon_top_end ,
weapon_speed ,
total_hit ,
crit_chance ,
added_weapon_skill ,
fight_duration ,):
# print(weapon_top_end)
# print(weapon_bottom_end)
# print(weapon_speed)
static_weap_DPS = ((weapon_top_end + weapon_bottom_end) / 2) / weapon_speed
static_weap_DPS = round(static_weap_DPS)
num_attacks = float(fight_duration) / float(weapon_speed)
num_attacks = round(num_attacks)
attack_damages = []
#attack iterator
for i in range(round(num_attacks)):
damage_result = damage(weapon_top_end, weapon_bottom_end, added_weapon_skill, total_hit, crit_chance)
attack_damages.append(damage_result)
average_DPS = sum(attack_damages) / num_attacks
# damage_distribution = []
# for i in range(user_iterations.get()):
# result = average_DPS*(user_iterations)
# damage_distribution.append(result)
# print(damage_distribution)
return SimulationResult(num_attacks, attack_damages , fight_duration ,
average_DPS , static_weap_DPS)
| true |
714ce63f57678b557ea2bef8178298afb604762c | Python | Aravindandeva/Python-files | /string.py | UTF-8 | 41 | 2.875 | 3 | [] | no_license | stringrev=input()
print(stringrev[::-1])
| true |
7aeffb35f02b80d5c456d958e10c74776362d3dd | Python | Range0122/CapsNet_for_MusicTagging | /augmentation.py | UTF-8 | 3,223 | 2.625 | 3 | [] | no_license | import librosa
import librosa.display
import numpy as np
import matplotlib.pyplot as plt
import random
def compute_melspectrogram(audio_path, sr=22050, mels=96):
SR = sr
N_FFT = 512
N_MELS = mels
HOP_LEN = 256
DURA = 29.12
src, sr = librosa.load(audio_path, sr=SR)
n_sample = src.shape[0]
n_sample_fit = int(DURA * SR)
if n_sample < n_sample_fit:
src = np.hstack((src, np.zeros((int(DURA * SR) - n_sample,))))
elif n_sample > n_sample_fit:
src = src[(n_sample - n_sample_fit) // 2:(n_sample + n_sample_fit) // 2]
logam = librosa.amplitude_to_db
melgram = librosa.feature.melspectrogram
ret = logam(melgram(y=src, sr=SR, hop_length=HOP_LEN,
n_fft=N_FFT, n_mels=N_MELS))
ret = ret[:, :, np.newaxis]
return ret
def draw_signal(audio_path):
y, sr = librosa.load(audio_path)
fig, ax = plt.subplots()
librosa.display.waveplot(y=y, sr=sr, ax=ax)
plt.xlabel("Time(s)")
plt.ylabel("Amplitude")
plt.show()
def draw_melspectrogram(audio_path):
# data augmentation or not
y, sr = librosa.load(audio_path, sr=None)
# y, sr = dropout(audio_path, None, 0.05)
# y, sr = gaussian_noise(audio_path, None, 0.05, 0)
y, sr = pitch_shifting(audio_path, sr=None, n_steps=6, bins_per_octave=12)
# y, sr = time_stretching(audio_path, sr=None, rate=2)
D = np.abs(librosa.stft(y)) ** 2
S = librosa.feature.melspectrogram(S=D, sr=sr)
# S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128, fmax=8000)
fig, ax = plt.subplots()
S_dB = librosa.power_to_db(S, ref=np.max)
# Loudness
# S_dB = loudness(S_dB, 10)
img = librosa.display.specshow(S_dB, x_axis='time', y_axis='mel', sr=sr, ax=ax)
fig.colorbar(img, ax=ax, format='%+2.0f dB')
ax.set(title='Mel-frequency spectrogram')
plt.xlabel("Time(s)")
plt.ylabel("Frequency(Hz)")
plt.show()
def dropout(audio_path, sr=None, p=0.05):
# TODO:p = 0.05, 0.10, 0.15, 0.20
y, sr = librosa.load(audio_path, sr)
for i in range(len(y)):
is_dropout = random.randint(0, 100) < 100 * p
if is_dropout:
y[i] = 0
return y, sr
def gaussian_noise(audio_path, sr=None, sigma=0.05, mu=0):
# TODO: sigma = 0.05, 0.1, 0.2
y, sr = librosa.load(audio_path, sr)
noise = sigma * sigma * np.random.randn(y.shape[0],) + mu
y += noise
return y, sr
def pitch_shifting(audio_path, sr=None, n_steps=0.0, bins_per_octave=12):
# TODO:
y, sr = librosa.load(audio_path, sr)
y = librosa.effects.pitch_shift(y, sr, n_steps=n_steps, bins_per_octave=bins_per_octave)
return y, sr
def time_stretching(audio_path, sr=None, rate=1):
# TODO: rate =
y, sr = librosa.load(audio_path, sr)
y = librosa.effects.time_stretch(y, rate)
return y, sr
def loudness(S_dB, db=10):
# TODO: db = +-5, +-10, +-20
ans = S_dB + db
ans[0][0] = -80
return ans
if __name__ == "__main__":
path = '/Users/range/Code/Data/example/1.mp3'
melspectrogram = compute_melspectrogram(audio_path=path, sr=22050, mels=96)
print(melspectrogram.shape)
# draw_signal(audio_path=path)
# draw_melspectrogram(audio_path=path)
| true |
bc542c6184734725a9c30d54ce21d7af94a14f51 | Python | srirachanaachyuthuni/Movie-Reviews-Analysis | /merge_datasets.py | UTF-8 | 5,850 | 2.609375 | 3 | [] | no_license | import sys
from os.path import isdir
import numpy as np
import pandas as pd
from pymongo import MongoClient
name_basics = "name.basics.tsv.gz"
title_basics = "title.basics.tsv.gz"
title_principals = "title.principals.tsv.gz"
ml_links = "links.csv"
ml_ratings = "ratings.csv"
def parse_argv(argv):
if len(argv) != 3 and len(argv) != 4:
print("Usage: python3 merge_datasets.py <directory containing the IMDb dataset>"
" <directory containing the MovieLens dataset> [MongoDB connection string]")
sys.exit(1)
imdb_dir = argv[1]
ml_dir = argv[2]
mongodb_connection_string = None if len(argv) == 3 else argv[3]
if not (isdir(imdb_dir) and isdir(ml_dir)):
print("Invalid directory/directories")
sys.exit(1)
return imdb_dir, ml_dir, mongodb_connection_string
def get_imdb_gz_df(imdb_dir, imdb_file, columns_to_read):
return pd.read_csv(imdb_dir + "\\" + imdb_file, sep="\t", usecols=columns_to_read, low_memory=False,
compression='gzip')
def get_ml_csv_df(ml_dir, ml_file, columns_to_read):
return pd.read_csv(ml_dir + "\\" + ml_file, usecols=columns_to_read, low_memory=False)
def get_imdb_dfs(imdb_dir):
name_basics_columns = ['nconst', 'primaryName', 'birthYear', 'deathYear']
name_basics_df = get_imdb_gz_df(imdb_dir, name_basics, name_basics_columns)
nconst_list = [int(nconst[2:]) for nconst in name_basics_df['nconst']]
birth_year_list = [np.nan if birth_year == '\\N' else int(birth_year) for birth_year in name_basics_df['birthYear']]
death_year_list = [np.nan if death_year == '\\N' else int(death_year) for death_year in name_basics_df['deathYear']]
name_basics_df['nconst'] = nconst_list
name_basics_df['birthYear'] = birth_year_list
name_basics_df['deathYear'] = death_year_list
name_basics_df.rename(columns={'nconst': 'id'}, inplace=True)
title_basics_columns = ['tconst', 'titleType', 'primaryTitle', 'startYear', 'runtimeMinutes', 'genres']
title_basics_df = get_imdb_gz_df(imdb_dir, title_basics, title_basics_columns)
titles_to_drop = title_basics_df[~(title_basics_df['titleType'] == 'movie')
& ~(title_basics_df['titleType'] == 'short')
& ~(title_basics_df['titleType'] == 'tvMovie')
& ~(title_basics_df['titleType'] == 'tvShort')].index
title_basics_df.drop(titles_to_drop, inplace=True)
title_basics_df.drop(['titleType'], axis=1, inplace=True)
tconst_list = [int(tconst[2:]) for tconst in title_basics_df['tconst']]
start_year_list = [np.nan if start_year == '\\N' else int(start_year)
for start_year in title_basics_df['startYear']]
runtime_list = [np.nan if not str(runtime).isdigit() else int(runtime)
for runtime in title_basics_df['runtimeMinutes']]
genres_list = [str(genres).split(',') for genres in title_basics_df['genres']]
title_basics_df['tconst'] = tconst_list
title_basics_df['startYear'] = start_year_list
title_basics_df['runtimeMinutes'] = runtime_list
title_basics_df['genres'] = genres_list
title_basics_df.rename(columns={'tconst': 'id'}, inplace=True)
title_principals_columns = ['tconst', 'nconst', 'category']
title_principals_df = get_imdb_gz_df(imdb_dir, title_principals, title_principals_columns)
nconst_list = [int(nconst[2:]) for nconst in title_principals_df['nconst']]
tconst_list = [int(tconst[2:]) for tconst in title_principals_df['tconst']]
title_principals_df['nconst'] = nconst_list
title_principals_df['tconst'] = tconst_list
title_principals_df.rename(columns={'tconst': 'movieId', 'nconst': 'personId'}, inplace=True)
return name_basics_df, title_basics_df, title_principals_df
def get_ml_dfs(ml_dir):
ml_links_columns = ['movieId', 'imdbId']
ml_links_df = get_ml_csv_df(ml_dir, ml_links, ml_links_columns)
ml_links_map = dict((link['movieId'], link['imdbId']) for link in ml_links_df.to_dict('records'))
ml_ratings_columns = ['userId', 'movieId', 'rating', 'timestamp']
ml_ratings_df = get_ml_csv_df(ml_dir, ml_ratings, ml_ratings_columns)
imdb_movie_id_list = [ml_links_map[movie_id] for movie_id in ml_ratings_df['movieId']]
ml_ratings_df['movieId'] = imdb_movie_id_list
return ml_ratings_df
def create_collections(imdb_dir, ml_dir, mongodb_connection_string):
name_basics_df, title_basics_df, title_principals_df = get_imdb_dfs(imdb_dir)
client = MongoClient() if mongodb_connection_string is None else MongoClient(mongodb_connection_string)
database = client['MapReduce']
person_collection = database['Person']
person_list = [dict((key, value) for key, value in zip(name_basics_df.columns, row)
if value is not np.nan and value == value) for row in name_basics_df.to_numpy()]
person_collection.insert_many(person_list)
movie_collection = database['Movie']
movie_list = [dict((key, value) for key, value in zip(title_basics_df.columns, row)
if value is not np.nan and value == value) for row in title_basics_df.to_numpy()]
movie_collection.insert_many(movie_list)
person_role_collection = database['Person_Roles']
person_role_collection.insert_many(title_principals_df.to_dict('records'))
ml_ratings_df = get_ml_dfs(ml_dir)
ratings_collection = database['Ratings']
ratings_collection.insert_many(ml_ratings_df.to_dict('records'))
def main():
imdb_dir, ml_dir, mongodb_connection_string = parse_argv(sys.argv)
create_collections(imdb_dir, ml_dir, mongodb_connection_string)
if __name__ == '__main__':
main()
| true |
58df67d974ab05cecf30badf904d390ebd56e947 | Python | regenalgrant/ROS_Py | /Topics/src/topic_publisher.py | UTF-8 | 437 | 2.71875 | 3 | [
"Apache-2.0",
"MIT"
] | permissive | #!/usr/bin/env python
import rospy
import Int32
from std_msgs.msg
# Setup: initialize node, register topic, set rate
rospy.init_node('topic_publisher')
# register ros /int at 32/ the que size at 3
pub = rospy.Publisher(
'counter',
Int32,
queue_size=3
)
#set rate two
rate = rospy.Rate(2)
# Loop: publish, count, sleep
count = 0
while not rospy.is_shutdown():
pub.publish(count)
count += 1
rate.sleep() | true |
100a030bb3f3be0bb04bc3720c2a9ca4490f961d | Python | IceIceRabbit/trajectory | /q_learning_agent.py | UTF-8 | 2,073 | 2.78125 | 3 | [] | no_license | import numpy as np
import random
from collections import defaultdict
import motion_model_reward as mmr
import matplotlib.pyplot as plt
import math
import matplotlib.pyplot as plt
import numpy.linalg as la
class QLearningAgent:
def __init__(self, actions):
self.actions = actions
self.learning_rate = 0.01
self.discount_factor = 0.9
self.epsilon = 0.1
self.q_table = defaultdict(lambda: [0.0,0.0,0.0,0.0,0.0])
def learn(self, state, action, reward, next_state):
current_q = self.q_table[state][action]
new_q = reward + self.discount_factor * max(self.q_table[next_state])
self.q_table[state][action] += self.learning_rate * (new_q - current_q)
def get_action(self, state):
if np.random.rand() < self.epsilon:
action = np.random.choice(self.actions)
else:
state_action = self.q_table[state]
action = self.arg_max(state_action)
return action
@staticmethod
def arg_max(state_action):
max_index_list = []
max_value = state_action[0]
for index, value in enumerate(state_action):
if value > max_value:
max_index_list.clear()
max_value = value
max_index_list.append(index)
elif value == max_value:
max_index_list.append(index)
return random.choice(max_index_list)
if __name__ == "__main__":
agent = QLearningAgent(actions = list(range(5)))
for episode in range(10):
mmr.reset()
state = [mmr.x1_in,mmr.y1_in] #initialize state to env values.
timestep = 0 #initialize iterator
while True:
action = agent.get_action(str(state))
next_state,reward ,done,timestep = mmr.step(action,timestep)
agent.learn(str(state), action, reward, str(next_state)) #reward defined by the sum of heuristics
state = next_state
mmr.plots()
if done:
break
| true |
87ac14e860b1a8426b53df96d063246e8bc451aa | Python | seamustuohy/openThreads | /testSuite.py | UTF-8 | 2,211 | 2.6875 | 3 | [] | no_license | import unittest
import openthreads
class testFunctions(unittest.TestCase):
def setUp(self):
self.listSrv = openthreads.openThread("tests/testEmail")
def test_init(self):
#Test that messages and first messages are created upon class instatiation.
self.assertNotEqual(self.listSrv.messages, [])
self.assertIsNotNone(self.listSrv.messages)
self.assertIsNotNone(self.listSrv.First)
def test_compactDate(self):
#test that computed date is being correctly computed
dateResult = self.listSrv.compactDate("Sun, 13 Jul 2008 22:11:01 -0700")
testDate = '2008713221101'
self.assertEqual(dateResult, testDate)
def test_getRaw(self):
raw = self.listSrv.getArchive('tests/rawSmall')
self.assertEqual(raw, 'abcdefg')
def test_split(self):
"""Test that the split function splits a list-serv into seperate e-mails"""
raw = self.listSrv.getArchive('tests/testEmail')
split = self.listSrv.split(raw)
self.assertEqual(len(split), 5)
def test_dictifying(self):
test = {
'Body': '\n\n\nI am a second messsage. I am From: testie McTesterson.\n\nLOVE Testie\n',
'From': 'testie at cs.testuni.edu (Testie McTesterson)',
'Name': 'Testie McTesterson',
'compactDate': '2008713221101',
'References': [],
'Date': ' Sun, 13 Jul 2008 22:11:01 -0700',
'Reply': [],
'ID': ' <a5ca47180807251726k34b5a5b5wda031814a3c36a4f@mail.gmail.com>',
'Subject': '[email-list] The Second Message'}
self.assertEqual(test, self.listSrv.messages[1])
def test_couchDB(self):
print("TODO Create a CouchDB test")
def test_json(self):
#test open
openedJSON = self.listSrv.jsonMaker("open", "tests/testOpenJSON")
self.assertEqual(openedJSON, {'test':'data'})
#test save
self.listSrv.jsonMaker("save", "tests/testSaveJSON", {'item':'content'})
openSavedJSON = self.listSrv.jsonMaker("open", "tests/testSaveJSON")
self.assertEqual(openSavedJSON, {u'item':u'content'})
def test_first(self):
"""Test that first produces the first message by each user."""
if __name__ == '__main__':
unittest.main()
| true |
04521b718389ddb64c122eeb6d6fa8a0936f2f70 | Python | eddiegz/Personal-C | /DMOJ/CCC/escape room.py | UTF-8 | 683 | 2.875 | 3 | [
"MIT"
] | permissive | import collections
def cal(num):
i=1
f=factor[num]
while i*i<=num:
if num%i==0 and i<=max(n,m) and num//i<=max(n,m):
f.append(i)
i+=1
return num
def dfs(i,j):
if i==m-1 and j==n-1:
return True
if i>=m and j>=n or grid[i][j] in factor:
return False
num=cal(grid[i][j])
for p in factor[num]:
nj=num//p
if dfs(p-1,nj-1) or dfs(nj-1,p-1):
return True
return False
m=int(input())
n=int(input())
grid=[]
for i in range(m):
grid.append(list(map(int,input().split())))
factor=collections.defaultdict(list)
print('yes' if dfs(0, 0) else 'no')
| true |
1bf9f3e572261a9d8161bfd737d91730cff863b0 | Python | Ana-geek/Python_test_XML | /drivers.py | UTF-8 | 2,957 | 3.578125 | 4 | [] | no_license | from xml.dom import minidom
class XMLDriver(object):
def __init__(self, path):
self._doc = minidom.parse(path)
# Красивенький принт
def nice_print(self, value):
print('>' * 50)
print(value)
# Метод вывода навания теста
def get_name(self):
root = self._doc.getElementsByTagName('test')[0]
name = root.attributes['name'].value
self.nice_print(name)
# Сдесь я обьеденила в один метод вывод вопроса и вариантов его отвата. Просто, как по мне, это логичней)
def get_question(self, number: str):
questions = self._doc.getElementsByTagName('question')
for q in questions:
if q.attributes['number'].value == number:
answers = q.getElementsByTagName('answer')
result = f"Вопрос {q.attributes['number'].value}. {q.attributes['text'].value}"
self.nice_print(result)
# Вот эта часть отвечает за вывод вариантов для вопроса, который запрашиваеться
for a in answers:
result2 = f"->>> {a.attributes['number'].value}. {a.attributes['text'].value}"
print(result2)
"""
После многочисленых попыток, я наконец то сделала рабочий метод для
ввода варианта ответа и его проверки. Наш метод принимает новер вопроса,
который идет по умолчанию в main и номер варианта ответа.
Далее идет проверка на правильность ести тэг correct имеет значение True.
И результат сразу выводиться в консоль.
Возможно его можно написать попроще и покороче, но я художник, я так вижу)
"""
def set_answer(self, question_number, answer_number):
questions = self._doc.getElementsByTagName('question')
for q in questions:
if q.attributes['number'].value == question_number:
answers = self._doc.getElementsByTagName('answer')
for a in answers:
if a.attributes['number'].value == answer_number:
if a.attributes['correct'].value == "True":
result = f"Ответ {a.attributes['number'].value} правильный. Ураа!!!"
self.nice_print(result)
else:
print(f"Ответ {a.attributes['number'].value} НЕ правильный. Плак-плак( ")
def final(self):
self.nice_print('Текст окончен!')
| true |
f370dfc5bee0167c369010d78097a622cba838ab | Python | mokamotosan/extr_actions_jp | /src/create_crosstable.py | UTF-8 | 1,537 | 2.828125 | 3 | [
"MIT"
] | permissive | import sqlite3
import pandas as pd
def __extract_crosstable(fullpath_to_db):
"""[summary]
Args:
fullpath_to_db ([type]): [description]
Returns:
[type]: [description]
"""
conn = sqlite3.connect(fullpath_to_db)
dpnd_df = pd.read_sql_query("SELECT * FROM dpnd_datatable WHERE 親文節ID == -1", conn)
conn.close()
# drop NA: 20190809
dpnd_df2 = dpnd_df.dropna(subset=["trait40"])
# 20190808: 親正規化代表表記->親見出しに変更
cross_df = pd.crosstab(dpnd_df["子見出し-親見出し"], dpnd_df2["trait40"])
return cross_df
def create_datatable(fullpath_to_db):
# create a data table for CA
crosstable_df = __extract_crosstable(fullpath_to_db)
print('Creating a data table for CA...done')
# save the table
conn = sqlite3.connect(fullpath_to_db)
crosstable_df.to_sql("crosstable_predicate_traitq", conn,
if_exists="replace", index=True)
conn.close()
def add_total(fullpath_to_db):
# read a data table for CA
conn = sqlite3.connect(fullpath_to_db)
cross_df = pd.read_sql_query("SELECT * FROM crosstable_predicate_traitq", conn)
conn.close()
# sum in each row
cross_df["total_count"] = cross_df.sum(axis="columns")
# save the table
conn = sqlite3.connect(fullpath_to_db)
cross_df.to_sql("crosstable_predicate_traitq_total", conn,
if_exists="replace", index=False)
conn.close()
| true |
d733e9fff056a6c5a03b7d21035a745ac51162f9 | Python | tttienthinh/AlgoTrade | /Tests/test.py | UTF-8 | 1,592 | 2.921875 | 3 | [] | no_license | from IaObject import Ia
from PriceObject import Price
from random import randint
from time import time
from datetime import datetime
import matplotlib.pyplot as plt
def single_test():
my_ia = Ia()
my_ia.load('7')
my_price = Price(180)
start = randint(1, my_price.born)
print(start)
data = my_price.get_several_data(start, 120)
label = my_price.get_several_data(start+120, 30)
predictions = my_ia.predict(data)
label = data+label
predictions = data+list(predictions[0])
print(label)
print(predictions)
plt.plot(label, label='Price')
plt.plot(predictions, label='Predict')
plt.legend()
plt.show()
def bunch_test():
my_price = Price(180)
my_ia = Ia()
liste = [6_421_500, 5_250_000, 4_000_000, 2_750_000, 1_500_000, 750_000]
plt.figure(figsize=(15, 10))
plt.subplots_adjust(hspace=0.4)
for i in range(1, 15):
my_ia.load(f'{i}')
for position in range(len(liste)):
start = liste[position]
data = my_price.get_several_data(start, 120)
label = my_price.get_several_data(start+120, 30)
predictions = my_ia.predict(data)
label = data+label
predictions = data+list(predictions[0])
plt.subplot(2, 3, position+1)
plt.plot(label, label='Price')
plt.plot(predictions, label='Predict')
plt.legend()
plt.title(my_price.price[start][0])
plt.savefig(f'Backtest/{i}-{str(datetime.now())[:10]}')
plt.clf()
if __name__ == '__main__':
bunch_test()
| true |
964464061757a83d2658424f180fda713bb39c22 | Python | averagehuman/mezzanine-invites | /invites/tests/conftest.py | UTF-8 | 966 | 2.53125 | 3 | [
"BSD-2-Clause"
] | permissive |
import os
from django.test.client import Client, RequestFactory
from django.core.urlresolvers import reverse
import pytest
@pytest.fixture()
def user(db):
"""A non-admin User"""
from django.contrib.auth.models import User
try:
User.objects.get(username='test')
except User.DoesNotExist:
user = User.objects.create_user(
'test', 'test@example.com', 'password'
)
user.is_staff = False
user.is_superuser = False
user.save()
return user
@pytest.fixture()
def auth_client(user):
"""A Django test client logged in as an authenticated user"""
client = Client()
client.login(username=user.username, password='password')
return client
data_root = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data')
@pytest.fixture
def File():
def FileOpener(relpath, mode="rb"):
return open(os.path.join(data_root, relpath.lstrip('/')), mode)
return FileOpener
| true |
870e5b0d2e790af35775ca4f0e264218f8fab919 | Python | artslob/cyberbox | /cyberbox/config.py | UTF-8 | 1,929 | 2.75 | 3 | [
"MIT"
] | permissive | import os
from pathlib import Path
from typing import Callable
import yaml
from pydantic import BaseModel, DirectoryPath, Field, PostgresDsn, root_validator
from cyberbox.const import CONFIG_ENV_NAME
from cyberbox.env import Env
class DatabaseConfig(BaseModel):
url: PostgresDsn
force_rollback: bool = False
class JwtConfig(BaseModel):
algorithm: str = "HS256"
access_token_expire_minutes: int = Field(60, gt=0)
secret_key: str = Field(..., min_length=10)
class Config(BaseModel):
environment: Env
jwt: JwtConfig
database: DatabaseConfig
files_dir: DirectoryPath
@root_validator
def check_force_rollback_only_in_testing(cls, values: dict):
# fields didnt pass validation
if "environment" not in values or "database" not in values:
return values
if values["environment"] is not Env.test and values["database"].force_rollback:
raise ValueError("force_rollback should be enabled only in testing environment")
return values
def default_loader(file: Path) -> dict:
""" Default loader parses .yaml config file. """
result = yaml.safe_load(file.read_text())
if not isinstance(result, dict):
raise ValueError("Loaded config is not a mapping")
return result
def parse_config(
env_name: str = CONFIG_ENV_NAME, loader: Callable[[Path], dict] = default_loader
) -> Config:
""" Parses file and returns config. Filename is provided by environment variable. """
config_file_path = os.environ.get(env_name)
if not config_file_path:
msg = f"Provide environment variable with name {env_name!r} and value - path to config file"
raise ValueError(msg)
config_file = Path(config_file_path)
if not config_file.is_file():
raise ValueError(f"File {config_file_path} does not exist or not a file")
config = loader(config_file)
return Config.parse_obj(config)
| true |
a58f69863d93d7602952c27db401a7858f27c531 | Python | Hana-Luong/practiceFeb2021 | /frequencyCounter.py | UTF-8 | 2,684 | 3.734375 | 4 | [] | no_license | #FREQUENCY COUNTER
#All solutions here are not the right solution
# An anagram is a word or phrase formed by rearranging the letters of a different word or phrase,
# typically using all the original letters exactly once.
# Is there a quick way to determine if they aren't an anagram before spending more time?
# Given two strings return whether or not they are anagrams
# Input: "yes", "eys"
# Output: True
# Input: "yes", "eYs"
# Output: True
# Input: "no", "noo"
# Output: False
# For Hana's reference
# for i in range(len(keys)):
# direct[keys[i]] = values[i]
""" def anagram(str1, str2):
if len(str1) != len(str2):
return False
else:
dict = {}
dict[char1 in str1] = char2 in str2
print(anagram("no", "noo")) """
# thanks
# kkstha
def anagram(str1, str2):
if len(str1) != len(str2):
return False
else:
counter = 0
for letter in range(len(str1)):
if str1[letter] in str2:
counter += 1
if counter == len(str2):
return True
else:
return False
print(anagram("nooo","oono"))
# Given a string that may have extra spaces at the start and the end,
# return a new string that has the extra spaces at the start and the end trimmed (removed)
# do not remove any other spaces.
# Input: " hello world "
# Output: "hello world"
string_length = len(" hello world ")
print(string_length)
# Needed to add the quote marks otherwise I got an error. However this returns 19#
# meaning it does count the empty space???
# you know, we need to keep the empty space between the two words
#
# appears so.
# according to some reading I did, Java will identify the unicode value, /u0020, and
# use that to id the leading and trailing whitespavce and remove it thath way.
# Not sure how we would implement that there tho
# WHAT ABOUT MAKING AN EMPTY DIC
# IF THE ELEMENTS ARE IN THE EMPTY DICT, MEANING IT IS AN EMPTY CHARACTER, REMOVE IT?
# If we do that, would Python identify the whitespace or just grab the first character we see? ~L
# We don't know
# Just give it a try to kill time
# Let communicate through writing only and turn off your mic so we don't get distracted!
def extraspaces(str3):
new_string = ""
for char in range(len(str3)):
if str3[char] != " ":
new_string = new_string + str3[char]
return new_string
result = extraspaces(" Hello World ")
print(result)
def strip(str4):
for char in range(len(str4)):
if str4[char] == " ":
str4.pop | true |
d29c21e80f81498926e42165adcc1e4da4992fa4 | Python | Diderikdm/Advent-of-Code-2018 | /day 12 - part 1 & 2.py | UTF-8 | 958 | 2.828125 | 3 | [] | no_license | from collections import defaultdict
with open("2018day12.txt", 'r') as file:
data = [x for x in file.read().splitlines()]
states = defaultdict(lambda: '.', {e : x for e,x in enumerate(data[0].split(': ')[1])})
keys = {x.split(' => ')[0] : x.split(' => ')[1] for x in data[2:]}
sums = []
i = 0
while not sums[20:] or not all(sums[x] - sums[x-1] == sums[x-1] - sums[x-2] for x in range(-1, -6, -1)):
i += 1
mn = next(iter(x for x in range(min(states), max(states) + 1) if states[x] == '#'))
mx = next(iter(x for x in range(max(states), min(states) - 1, -1) if states[x] == '#'))
new_states = {}
for e in range(mn - 2, mx + 3):
new_states[e] = keys[''.join([states[x] for x in range(e - 2, e + 3)])]
states.update(new_states)
sums.append(sum(k for k,v in states.items() if v == '#'))
print(sums[19])
print(sums[-1] + (sums[-1] - sums[-2]) * (50000000000 - i))
| true |
8421d36bd2cc50fa0e77478a76bd4264ab0c237e | Python | micka-sudo/cours | /udemy/formation_python/formation_complete_python/exr-007_nombre-mystere-comparaison/01-sources/devine_un_nombre_02.py | UTF-8 | 229 | 3.578125 | 4 | [] | no_license | nombre_mystere = 7
nombre_utilisateur = input("Quel est le nombre mystère ? ")
# Afficher à l'aide d'une structure conditionnelle si le nombre entré par l'utilisateur est plus grand,
# plus petit ou égal au nombre mystère.
| true |
22c11d0a9ffbcde6d9f7d03e3958bb0165b9fa2e | Python | cduck/qutrits | /cirq/google/decompositions.py | UTF-8 | 4,438 | 2.75 | 3 | [
"Apache-2.0"
] | permissive | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility methods related to optimizing quantum circuits."""
import cmath
import math
from typing import List, Tuple, cast
import numpy as np
from cirq import ops, linalg
from cirq.decompositions import single_qubit_op_to_framed_phase_form
from cirq.google.xmon_gates import ExpWGate
def _signed_mod_1(x: float) -> float:
return (x + 0.5) % 1 - 0.5
def _deconstruct_single_qubit_matrix_into_gate_turns(
mat: np.ndarray) -> Tuple[float, float, float]:
"""Breaks down a 2x2 unitary into gate parameters.
Args:
mat: The 2x2 unitary matrix to break down.
Returns:
A tuple containing the amount to rotate around an XY axis, the phase of
that axis, and the amount to phase around Z. All results will be in
fractions of a whole turn, with values canonicalized into the range
[-0.5, 0.5).
"""
pre_phase, rotation, post_phase = (
linalg.deconstruct_single_qubit_matrix_into_angles(mat))
# Figure out parameters of the actual gates we will do.
tau = 2 * np.pi
xy_turn = rotation / tau
xy_phase_turn = 0.25 - pre_phase / tau
total_z_turn = (post_phase + pre_phase) / tau
# Normalize turns into the range [-0.5, 0.5).
return (_signed_mod_1(xy_turn), _signed_mod_1(xy_phase_turn),
_signed_mod_1(total_z_turn))
def single_qubit_matrix_to_native_gates(
mat: np.ndarray, tolerance: float = 0
) -> List[ops.SingleQubitGate]:
"""Implements a single-qubit operation with few native gates.
Args:
mat: The 2x2 unitary matrix of the operation to implement.
tolerance: A limit on the amount of error introduced by the
construction.
Returns:
A list of gates that, when applied in order, perform the desired
operation.
"""
xy_turn, xy_phase_turn, total_z_turn = (
_deconstruct_single_qubit_matrix_into_gate_turns(mat))
# Build the intended operation out of non-negligible XY and Z rotations.
result = [
ExpWGate(half_turns=2*xy_turn, axis_half_turns=2*xy_phase_turn),
ops.RotZGate(half_turns=2 * total_z_turn)
]
result = [
g for g in result
if cast(ops.BoundedEffect, g).trace_distance_bound() > tolerance
]
# Special case: XY half-turns can absorb Z rotations.
if len(result) == 2 and abs(xy_turn) >= 0.5 - tolerance:
return [
ExpWGate(axis_half_turns=2*xy_phase_turn + total_z_turn)
]
return result
def controlled_op_to_native_gates(
control: ops.QubitId,
target: ops.QubitId,
operation: np.ndarray,
tolerance: float = 0.0) -> List[ops.Operation]:
"""Decomposes a controlled single-qubit operation into Z/XY/CZ gates.
Args:
control: The control qubit.
target: The qubit to apply an operation to, when the control is on.
operation: The single-qubit operation being controlled.
tolerance: A limit on the amount of error introduced by the
construction.
Returns:
A list of Operations that apply the controlled operation.
"""
u, z_phase, global_phase = single_qubit_op_to_framed_phase_form(operation)
if abs(z_phase - 1) <= tolerance:
return []
u_gates = single_qubit_matrix_to_native_gates(u, tolerance)
if u_gates and isinstance(u_gates[-1], ops.RotZGate):
# Don't keep border operations that commute with CZ.
del u_gates[-1]
ops_before = [gate(target) for gate in u_gates]
ops_after = ops.inverse(ops_before)
effect = ops.CZ(control, target) ** (cmath.phase(z_phase) / math.pi)
kickback = ops.Z(control) ** (cmath.phase(global_phase) / math.pi)
return list(ops.flatten_op_tree((
ops_before,
effect,
kickback if abs(global_phase - 1) > tolerance else [],
ops_after)))
| true |
64ff32b95a946935ea8459f405e44dfc459cc4bd | Python | gomba66/holbertonschool-higher_level_programming | /0x04-python-more_data_structures/12-roman_to_int.py | UTF-8 | 1,068 | 3.46875 | 3 | [] | no_license | #!/usr/bin/python3
def roman_to_int(roman_string):
if roman_string is None or type(roman_string) is not str:
return 0
M, D, C, L, X, V, I = 1000, 500, 100, 50, 10, 5, 1
total2 = 0
new_list = []
for i in range(len(roman_string)):
if roman_string[i] == 'M':
new_list.append(M)
elif roman_string[i] == 'D':
new_list.append(D)
elif roman_string[i] == 'C':
new_list.append(C)
elif roman_string[i] == 'L':
new_list.append(L)
elif roman_string[i] == 'X':
new_list.append(X)
elif roman_string[i] == 'V':
new_list.append(V)
elif roman_string[i] == 'I':
new_list.append(I)
new_list.append(0)
new_list.insert(0, 0)
for idx in range(len(new_list)):
if idx < len(new_list) - 1:
if new_list[idx] >= new_list[idx + 1]:
total2 = total2 + new_list[idx]
if new_list[idx] < new_list[idx + 1]:
total2 = total2 - new_list[idx]
return total2
| true |
f476b9121386cf278745b6e55cd15dd9f4ec2efb | Python | cmdbdu/little | /countcodelines.py | UTF-8 | 958 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env python
# coding:utf8
# By:dub
import os
import pprint
def get_file_list(code_dir):
codefile_list = os.listdir(code_dir)
for i in range(len(codefile_list)):
codefile_list[i] = os.path.join(code_dir,codefile_list[i])
return codefile_list
def countcodelines(filename):
content = {}
f = open(filename,'r')
l = f.readlines()
content['len'] = len(l)
notes = 0
for i in l:
if i.startswith('#'):
notes = notes+1
content['notes'] = notes
content['codes'] = len(l)-notes
return content
def main(listfile):
for i in listfile:
if os.path.isdir(i):
filelist = get_file_list(i)
main(filelist)
elif i.endswith('py'):
countcodelines(i)
record[i] = countcodelines(i)
if __name__ == "__main__":
record = {}
base_dir = './'
ll = get_file_list(base_dir)
main(ll)
pprint.pprint(record)
| true |
286fdc1c4495d3be91bb15e879425f7d6888abb3 | Python | PCS1000/login_system | /project_1.py | UTF-8 | 1,442 | 3.78125 | 4 | [] | no_license | import random
import requests
url = 'http://localhost:3000/users'
def login_system():
name_input = input('Enter your name: ')
response = requests.get(url = url)
data = response.json()
for name in data['users']:
#print('input_name =', name_input, 'user_name', name)
if(name_input == name['name']):
pass_input = input('Enter your pass: ')
for password in data['users']:
if(pass_input == password['password']):
print('success')
else:
print('Invalid Password,try again!')
break
else:
print('invalid name')
break
login_system()
##############################Python Random Walk Algorithym##############################################
# import random
# def random_walk(n):
# x,y = 0,0
# for i in range(n):
# (dx,dy) = random.choice([(0,1), (0,-1), (1,0), (-1,0)])
# y += dy
# x += dx
# return (x,y)
# number_of_walks = 20000
# for walk_length in range(1,31):
# no_transport = 0
# for i in range(number_of_walks):
# (x,y) = random_walk(walk_length)
# distance = abs(x) + abs(y)
# if distance <= 4:
# no_transport += 1
# no_transport_percentage = float(no_transport) / number_of_walks
# print("Walk size =", walk_length, "/% of no transport = ", 100 * no_transport_percentage)
| true |