blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
ff202ee5660531d20bc6e051b67a155b979d24a2
|
Python
|
pikestefan/AdventofCode2020
|
/day21.py
|
UTF-8
| 3,163
| 3.234375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 28 04:29:48 2020
@author: Lucio
"""
import re
def find_intersection(allergen, food_list, allergen_list):
ingredients_per_allergen = []
for ingredients, allergen_in_food in zip( food_list, allergen_list ):
if allergen in allergen_in_food:
ingredients_per_allergen.append(set(ingredients))
intersection_per_allergen = set.intersection( * ingredients_per_allergen )
return intersection_per_allergen
def remove_from_list_of_lists(element, list_of_lists):
new_list = []
for row in list_of_lists:
new_row = [ item for item in row if item != element ]
new_list.append(new_row)
return new_list
with open('inputs/day21.txt') as file:
food_list = []
allergen_list = []
for line in file.readlines():
line = line.strip().split(' (')
ingredients = line[0].split(' ')
allergens = line[-1].replace('contains ','').replace(')','')
allergens = allergens.split(', ')
food_list.append( ingredients )
allergen_list.append( allergens )
flattened_allergens = []
for listy in allergen_list:
flattened_allergens += listy
flattened_ingredients = []
for listy in food_list:
flattened_ingredients += listy
unique_allergens = list(dict.fromkeys(flattened_allergens))
unique_ingredients = list(dict.fromkeys(flattened_ingredients))
### Part 1
intersection_foods = []
for allergen in unique_allergens:
all_intersec = find_intersection(allergen, food_list, allergen_list)
intersection_foods.append(all_intersec)
food_w_allergens = set.union( *intersection_foods )
without_allergens = [ingredient for ingredient in unique_ingredients
if not ingredient in food_w_allergens]
no_allergen_counter = 0
for ingredient in flattened_ingredients:
if ingredient in without_allergens:
no_allergen_counter += 1
print(no_allergen_counter)
### Part 2
food_list_for_id = food_list[:]
#First remove the ingredients without allergens
for no_allergen in without_allergens:
food_list_for_id = remove_from_list_of_lists(no_allergen, food_list_for_id)
# Now, identify the allergens
identified_foods = {}
allergens_to_identify = unique_allergens[:]
ii = 0
while len(allergens_to_identify) > 0:
ii %= len(allergens_to_identify)
allergen = allergens_to_identify[ii]
intersy = find_intersection(allergen, food_list_for_id, allergen_list)
if len(intersy) == 1:
identified_food = list(intersy)[0]
identified_foods[allergen] = identified_food
food_list_for_id = remove_from_list_of_lists(identified_food,
food_list_for_id)
allergens_to_identify.remove(allergen)
ii += 1
sorted_allergen = []
sorted_ingredient = []
for allergen, ingredient in sorted(identified_foods.items()):
sorted_allergen.append(allergen)
sorted_ingredient.append(ingredient)
print(','.join(sorted_ingredient))
| true
|
76f898dcfb4c21a7665fe655c1beb39c6c239bdb
|
Python
|
boppreh/cryptopals-challenge
|
/Set 2 - Block crypto/10 - Implement CBC mode.py
|
UTF-8
| 384
| 2.796875
| 3
|
[] |
no_license
|
from utils import *
key = b'YELLOW SUBMARINE'
plaintext = b'beatles' * 10
assert aes_ecb_decrypt(key, aes_ecb_encrypt(key, plaintext)) == plaintext
ciphertext = from_base64(read('10.txt'))
plaintext = aes_cbc_decrypt(key, ciphertext, iv=b'\x00'*AES.BLOCK_SIZE)
assert plaintext.startswith(b"I'm back and I'm ringin' the bell")
assert plaintext.endswith(b"Play that funky music \n")
| true
|
cb57d9bebf8046139e557d61edac3ee5b5c9caa6
|
Python
|
osantana19/Temperature_Monitor
|
/dht11.py
|
UTF-8
| 6,954
| 2.875
| 3
|
[] |
no_license
|
#!/usr/bin/python
#--------------------------------------
# ___ ___ _ ____
# / _ \/ _ \(_) __/__ __ __
# / , _/ ___/ /\ \/ _ \/ // /
# /_/|_/_/ /_/___/ .__/\_, /
# /_/ /___/
#
# dht11.py
# Basic example script to read DHT11 sensor using
# Adafruit DHT library:
# https://github.com/adafruit/Adafruit_Python_DHT
#
# Based on examples by Tony DiCola
#
# Author : Matt Hawkins
# Date : 04/09/2017
#
# http://www.raspberrypi-spy.co.uk/tag/dht11/
#
#--------------------------------------
#Didn't want to remove credit from the guy above who created the base of the code that I expanded on incase you guys want to check out the original
#Created this code to check the temperature and send out emails/text alerts when over a certain amount of degrees
#There is a cronjob that runs this script everyday at 5:01pm after everyone leaves for the day
#The script is designed to run for 23 hours and 58 minutes
#To view the cronjob type crontab -l
#To change the cronjob settings type crontab -e and change the one corresponding to this file
#Use the website: https://crontab.guru/ to help when changing the time for the cron schedule
#For anything else cronjob related resort to this article: https://www.ostechnix.com/a-beginners-guide-to-cron-jobs/
import os
import re
import Adafruit_DHT
import time
import threading
from datetime import datetime
import smtplib, ssl
# Set sensor type : Options are DHT11,DHT22 or AM2302
sensor = Adafruit_DHT.DHT22
# Set GPIO sensor is connected to.
gpio = 17
#Set date and time
now = datetime.now()
dt_string = now.strftime("%m/%d/%Y %H:%M")
#method for opening and writing to a file
def tempfile():
f = open("temperature.txt",'a+')
for i in range (2):
f.write(str)
SMTP_PORT = 587 # For SSL
SMTP_SERVER = "smtp.gmail.com" # Enter type of email server run
GMAIL_USERNAME = "noreply@easternia.com" # Enter your address
GMAIL_PASSWORD = "newaccount12" # Enter your password
class Emailer:
def sendmail(self, recipient, subject, content):
#Create Headers
headers = ["From: " + GMAIL_USERNAME, "Subject: " + subject, "To: " + recipient,
"MIME-Version: 1.0", "Content-Type: text/html"]
headers = "\r\n".join(headers)
#Connect to Gmail Server
session = smtplib.SMTP(SMTP_SERVER, SMTP_PORT)
session.ehlo()
session.starttls()
session.ehlo()
#Login to Gmail
session.login(GMAIL_USERNAME, GMAIL_PASSWORD)
#Send Email & Exit
session.sendmail(GMAIL_USERNAME, recipient, headers + "\r\n\r\n" + content)
session.quit
# Sets up where to send Emails
sender = Emailer()
sendTo = 'it@easternia.com' #It email chain
sendTo2 = '3392292459@vtext.com' #Oliver
sendTo3 = '3392224287@vtext.com' #Steve
sendTo4 = '6175195579@vtext.com' #Chris
sendTo5 = '7814241042@vtext.com' #Bob
# Sets up the email subject and contents
emailSubject = "Waltham Server Rooms Hot"
emailContent = "This is a test of my Email of temperature alerts"
emailSubject2 = "Temperature Gauge Offline!!"
emailContent2 = "Temperature gauge has been knocked offline. Failed to get reading."
# Sets the maximum temperature for room before Alerts are sent out
tempmax = (75.9) #Change value to set temperature
# Creates a function called run_check to be called upon and executed to send the emails/texts out
def run_check():
humidity, temperature = Adafruit_DHT.read_retry(sensor, gpio) #Assigns humidty and temperature output to respective variables
temperature = float(temperature * 1.8 + 32) #Changes the temperature value from celsius to farenheit
now = datetime.now() #Assigns the date and time to variable now
dt_string = now.strftime("%m/%d/%Y %H:%M") #Formats the now variable and assigns to a new variable dt_string
if humidity is not None and temperature is not None: #if statement to make sure sensor is working
print('Temp={0:0.1f} Humidity={1:0.1f}% '.format(temperature, humidity),dt_string) #Placed this here for testing purposes prints out if works
if temperature > float(tempmax): #If the temperature is over the amount set in tempmax it will send emails/texts
# Sends an email to the "sendTo" address with the specified "emailSubject" as the subject and "emailContent" as the email content.
sender.sendmail(sendTo, emailSubject, emailContent)
sender.sendmail(sendTo2, emailSubject, emailContent)
sender.sendmail(sendTo3, emailSubject, emailContent)
sender.sendmail(sendTo4, emailSubject, emailContent)
sender.sendmail(sendTo5, emailSubject, emailContent)
print ('fire sent')
else:
print ('ok')
else:
sender.sendmail(sendTo, emailSubject2, emailContent2) #Sends out the notice that the reader is down to IT email
print('Failed to get reading. Try again!')
#Sets the counter values for the loops
counter2 = (5) #This counter is set to 29 because the counter2 if statement is triggered once it hits 30. it then takes an hour with this setup to trigger again
counter = (0) #For the while loop so our main loop keeps going for 23 hrs and 58min because the sleep time is set to 2 minutes and runs 719 times
while counter < 143:
#runs the runcheck command and sends out emails and texts alerts
#Needed to run temperature check seperate to save it to temp1 variable
humidity, temperature = Adafruit_DHT.read_retry(sensor, gpio)
temperature = float(temperature * 1.8 + 32)
now = datetime.now()
dt_string = now.strftime("%m/%d/%Y %H:%M")
#Saves Temperature to a variable
temp1 = ('Temp={0:0.1f} Humidity={1:0.1f}% '.format(temperature, humidity),dt_string)
#Turns temperature output into a string
temp2 = (str(temp1))
#expression to remove weird characters from string
temp3 = re.sub("'|\(|\)|,", '', temp2)
print(temp3)
f = open("temperature.txt",'a+')
f.write(temp3)
f.write("\r\n")
f.close()
if temperature > float(tempmax):
print('fire')
counter2 = counter2 + 1
counter = counter + 1
else:
counter = counter + 1
if counter2 >= (6):
emailContent = "Waltham server room temperature is high, the current temp is: " + str(int(temperature)) + " degrees"
run_check()
counter2 = (0)
else:
print ('ok')
time.sleep(599) #Amount of seconds before the code runs again
print ('finished')
#humidity, temperature = Adafruit_DHT.read_retry(sensor, gpio)
# Reading the DHT11 is very sensitive to timings and occasionally
# the Pi might fail to get a valid reading. So check if readings are valid.
#if humidity is not None and temperature is not None:
#print('Temp={0:0.1f}*C Humidity={1:0.1f}%'.format(temperature, humidity))
#else:
#print('Failed to get reading. Try again!')
| true
|
46e0585bcfab64ac677ad3576c28f2b3cc798f7e
|
Python
|
JGuymont/ift2015
|
/3_tree/Tree.py
|
UTF-8
| 3,536
| 3.625
| 4
|
[] |
no_license
|
from ListQueue import ListQueue
#ADT Tree (Classe de base)
class Tree:
#inner class Position
class Position:
def element( self ):
pass
def __eq__( self, other ):
pass
def __ne__( self, other):
return not( self == other )
# retourne la racine
def root( self ):
pass
def _validate(self, p):
if not isinstance(p, self.Position):
raise TypeError('p must be proper Position type')
if p._container is not self:
raise ValueError('p does not belong to this container')
if p._node._parent is p._node:
raise ValueError('p is no longer valid')
return p._node
def _make_position(self, node):
return self.Position(self, node) if node is not None else None
# retourne le parent d'une Position
def parent( self, p ):
pass
# retourne le nombre d'enfants d'une Position
def num_children( self, p ):
pass
# retourne les enfants d'une Position
def children( self, p ):
pass
# retourne le nombre de noeuds
def __len__( self ):
pass
# demande si une Position est la racine
def is_root( self, p ):
return self.root() == p
# demande si une Position est une feuille
def is_leaf( self, p ):
return self.num_children( p ) == 0
# demande si un arbre est vide
def is_empty( self ):
return len( self ) == 0
# retourne la profondeur d'une Position
def depth( self, p ):
# retourne le nombre d'ancêtres d'une Position
if self.is_root( p ):
return 0
else:
return 1 + self.depth(self.parent())
# retourne la hauteur d'une Position avec depth (non efficace)
def height1( self, p ):
# retourne la profondeur maximum des feuilles sous une Position
# positions n'est pas implanté et se fait en O(n)
return max( self.depth( p ) for p in self.positions() if self.is_leaf( p ))
# retourne la hauteur d'une Position en descendant l'arbre (efficace)
def height( self, p ):
# retourne la hauteur d'un sous-arbre à une Position
if self.is_leaf( p ):
return 0
else:
return 1 + max( self.height( c ) for c in self.children( p ) )
# imprime le sous-arbre dont la racine est la Position p
# utilise un parcours préfixé
def preorder_print( self, p, indent = "" ):
# on traite le noeud courant
print( indent + str( p ) )
# et par la suite les enfants, récursivement
for c in self.children( p ):
self.preorder_print( c, indent + " " )
# imprime le sous-arbre dont la racine est la Position p
# utilise un parcours postfixé
def postorder_print( self, p ):
# on traite les enfants
for c in self.children( p ):
self.postorder_print( c )
# et par la suite le parent
print( p )
# imprime le sous-arbre dont la racine est la Position p
# utilise un parcours en largeur, utilisant une File
def breadth_first_print( self, p ):
Q = ListQueue()
# on enqueue la Position p
Q.enqueue( p )
# tant qu'il y a des noeuds dans la File
while not Q.is_empty():
# prendre le suivant et le traiter
q = Q.dequeue()
print( q )
# enqueuer les enfants du noeud traité
for c in self.children( q ):
Q.enqueue( c )
| true
|
8981873d26ecc3c8d66ba3a1ffa04589564c9f10
|
Python
|
sergiorgiraldo/Python-lang
|
/sqlite/hello.py
|
UTF-8
| 403
| 3.078125
| 3
|
[] |
no_license
|
#!/usr/bin/python
import sqlite3
conn = sqlite3.connect('foo.sqlite.db')
print('Opened database successfully')
conn.execute('insert into DEPTO (name) values ("john doe") ' )
conn.commit()
print('Insert executed sucessfully')
cursor = conn.execute("SELECT id, name from DEPTO")
for row in cursor:
print("ID = ", row[0])
print("NAME = ", row[1])
print('Operation done successfully')
conn.close()
| true
|
65776a0e91197d9370861498746b0f8b061f5255
|
Python
|
lizzij/EI
|
/El_compiled.py
|
UTF-8
| 4,357
| 2.796875
| 3
|
[] |
no_license
|
########## All URLs ############
demo = 'https://scratch.mit.edu/projects/151017985/#fullscreen'
cY = 'https://scratch.mit.edu/projects/151030464/#fullscreen'
cG = 'https://scratch.mit.edu/projects/151031590/#fullscreen'
cR = 'https://scratch.mit.edu/projects/151031688/#fullscreen'
eY = 'https://scratch.mit.edu/projects/151034263/#fullscreen'
eG = 'https://scratch.mit.edu/projects/151034195/#fullscreen'
eR = 'https://scratch.mit.edu/projects/151034096/#fullscreen'
YGR = 'https://scratch.mit.edu/projects/151027157/#fullscreen'
YRG = 'https://scratch.mit.edu/projects/151028622/#fullscreen'
GRY = 'https://scratch.mit.edu/projects/151036783/#fullscreen'
GYR = 'https://scratch.mit.edu/projects/151029293/#fullscreen'
RYG = 'https://scratch.mit.edu/projects/151029942/#fullscreen'
RGY = 'https://scratch.mit.edu/projects/151036611/#fullscreen'
RG_Y = 'https://scratch.mit.edu/projects/151030159/#fullscreen'
GR_Y = 'https://scratch.mit.edu/projects/151034348/#fullscreen'
RY_G = 'https://scratch.mit.edu/projects/151036192/#fullscreen'
YR_G = 'https://scratch.mit.edu/projects/151035899/#fullscreen'
GY_R = 'https://scratch.mit.edu/projects/151036247/#fullscreen'
YG_R = 'https://scratch.mit.edu/projects/151036340/#fullscreen'
########## DEMO ############
# open browser
import webbrowser
webbrowser.open_new(demo)
# randomize lightbulb order
import random
light = ['Y', 'G', 'R']
random.shuffle(light)
print("This is the demo order: " + str(light))
########## PRACTICE ############
# open pratice trials in the order of YR_G, GYR, cG, eR
import webbrowser, os
webbrowser.open('file://' + os.path.realpath('EI_practices.html'))
############# TEST ###########
# original puzzles (9, 12, 27), i.e. set A - 1, 3, 5: (YGR, cY), (YGR, GR_Y), (eY, RY_G)
# answer: G, Y, G
# exchange G with R
# new puzzles, i.e., set B - 2, 4, 6: (YRG, cY), (YRG, RG_Y), (eY, GY_R)
# answer: R, Y, R
# two equivalent problem sets
## set A: puzzle 1, 3, 5
testPair_A = [(YGR, cY), (YGR, GR_Y), (eY, RY_G)]
puzzle_A = [1, 3, 5]
correctIV_A = ['G', 'Y', 'G']
## set B: puzzle 2, 4, 6
testPair_B = [(YRG, cY), (YRG, RG_Y), (eY, GY_R)]
puzzle_B = [2, 4, 6]
correctIV_B = ['R', 'Y', 'R']
# scramble problem order within set
## set A
orderTest_A = list(zip(testPair_A, puzzle_A, correctIV_A))
random.shuffle(orderTest_A)
testPair_A, puzzle_A, correctIV_A = zip(*orderTest_A)
## set B
orderTest_B = list(zip(testPair_B, puzzle_B, correctIV_B))
random.shuffle(orderTest_B)
testPair_B, puzzle_B, correctIV_B = zip(*orderTest_B)
## randomize the order of the two sets
counterbalance = random.choice([1,2])
if counterbalance == 1:
setAB = "A";
testPair = testPair_A + testPair_B
puzzle = puzzle_A + puzzle_B
correctIV = correctIV_A + correctIV_B
else:
setAB = "B";
testPair = testPair_B + testPair_A
puzzle = puzzle_B + puzzle_A
correctIV = correctIV_B + correctIV_A
print ("Set " + setAB + " comes first")
## print puzzle order
print("Prepare these pictures: " + str(puzzle))
# randomly choose a graph as the correct answer for each pair
testAnswer = [0, 0, 0, 0, 0, 0]
j = 0
while j < 6:
testAnswer[j] = random.choice(testPair[j])
j += 1
# and open the corresponding URLs
k = 0
while k < 6:
webbrowser.open_new(testAnswer[k])
k += 1
# which picture comes first in a pair?
choices= [(1, 2), (1, 2), (1, 2), (1, 2), (1, 2), (1, 2)]
choiceOrder=[0,0,0,0,0,0]
l = 0
while l < 6:
choiceOrder[l] = random.choice(choices[l])
l += 1
print("First choice to show in each puzzle: " + str(choiceOrder))
# print correct answers
print("Correct intervention for each puzzle: " + str(correctIV))
########## update EI temp google sheet with exp set-up ############
# https://docs.google.com/spreadsheets/d/1gyluz7Whrr3KHMEmoGBosAV59_CDIm48DF90d6LghU4/edit?usp=sharing
# coding: utf-8
import gspread
from oauth2client.service_account import ServiceAccountCredentials
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
credentials = ServiceAccountCredentials.from_json_keyfile_name('EI_clientSecret.json', scope)
ra = gspread.authorize(credentials)
sheet = ra.open('EI temp').sheet1
sheet.update_cell(2, 1, str(light))
sheet.update_cell(2, 3, str(setAB))
sheet.update_cell(2, 4, str(puzzle))
sheet.update_cell(2, 5, str(choiceOrder))
sheet.update_cell(2, 6, str(correctIV))
| true
|
b727f008e43f3adbd2083ad0e4277acd311f416d
|
Python
|
ShakteeSamant/my_program
|
/prime_numbers.py
|
UTF-8
| 814
| 4.3125
| 4
|
[] |
no_license
|
# Write a program to check given number is prime number or not.
num = int(input('Enter the number: '))
count = 0
for i in range(2,num+1):
if num % i == 0:
count += 1
if count == 1:
print ('Prime number')
else:
print('Not Prime')
# prime numbers between the range of 100 numbers
for num in range(101):
count = 0
for i in range(2,num+1):
if num % i == 0:
count += 1
if count == 1:
print (i)
# WAP to find prime numbers between two numbers.
def prime(l, u):
for num in range(l, u+1):
count = 0
for i in range(2,num+1):
if num % i == 0:
count += 1
if count == 1:
print (num)
low = int(input('Enter the lower number: '))
upp = int(input('Enter the upper number: '))
prime(low,upp)
| true
|
9d3341269579686bfd7907d22ed30826c8cb9e68
|
Python
|
hatimabualrub/COVID-Tracker
|
/ContryScreen.py
|
UTF-8
| 6,311
| 2.640625
| 3
|
[] |
no_license
|
import pandas as pd
from tkinter import *
from Components import header, footer
from requestData import requestGlobalData, requestContryData
from figures import generateLinePlot
def countryScreen(master):
try:
CountryWindow = Toplevel(master)
CountryWindow.title("Country Statistics")
CountryWindow.configure(background='gray90')
CountryWindow.state('zoomed')
CountryWindow.iconbitmap('./icon.ico')
header(CountryWindow)
title = Label(CountryWindow, text="Country Statistics", font="Helvetica 28 bold", fg='#009933', bg='gray90')
title.configure(anchor=NW, pady=10)
title.pack()
countriesData = requestGlobalData()
countriesDF = pd.DataFrame(countriesData)
countries = countriesDF['Country'].tolist()
tkvar= StringVar(CountryWindow)
tkvar.set('Select')
noInput = Label(CountryWindow, text="Please Select A Country", width=200, font="Helvetica 40", fg='#B00000',
bg='gray90')
noInput.configure(anchor="center", pady=150)
noInput.pack()
menuLabel = Label(CountryWindow, text="Select Country:", font="Helvetica 18", fg='gray18')
menuLabel.place(x=1010, y=610, anchor="nw")
menu = OptionMenu(CountryWindow, tkvar, *countries)
menu.config( width=15, height=2)
menu.place(x=1200, y=600, anchor="nw")
def onClick():
if not(tkvar.get() == 'Select'):
noInput.destroy()
data = requestContryData(tkvar.get())
df = pd.DataFrame(data)
df.Date = df.Date.str.split('T')
df.Date = df.Date.str[0]
countryName = df['Country'][0]
def activeBtnHandler():
generateLinePlot(df, 'Active', countryName)
def confirmedBtnHandler():
generateLinePlot(df, 'Confirmed', countryName)
def recoveredBtnHandler():
generateLinePlot(df, 'Recovered', countryName)
def deathsBtnHandler():
generateLinePlot(df, 'Deaths', countryName)
def generalBtnHandler():
generateLinePlot(df, ['Confirmed','Deaths', 'Recovered', 'Active'], countryName)
title['text'] = countryName+ ' Statistics'
ActiveLabel = Label(CountryWindow, text='Active Cases:', font="Helvetica 14 bold", fg='#009933', bg='gray90')
ActiveLabel.configure(anchor="center")
ActiveLabel.place(x=100, y=150)
ActiveValue = Label(CountryWindow, text=df.Active.iloc[-1], font="Helvetica 14", fg='gray18', bg='gray90', width=9)
ActiveValue.configure(anchor="center")
ActiveValue.place(x=250, y=150)
ConfirmedLabel = Label(CountryWindow, text='Confirmed Cases:', font="Helvetica 14 bold", fg='#009933', bg='gray90')
ConfirmedLabel.configure(anchor="center")
ConfirmedLabel.place(x=390, y=150)
ConfirmedValue = Label(CountryWindow, text=df.Confirmed.iloc[-1], font="Helvetica 14", fg='gray18', bg='gray90', width=9)
ConfirmedValue.configure(anchor="center")
ConfirmedValue.place(x=580, y=150)
RecoveredLabel = Label(CountryWindow, text='Recovered Cases:', font="Helvetica 14 bold", fg='#009933', bg='gray90')
RecoveredLabel.configure(anchor="center")
RecoveredLabel.place(x=730, y=150)
RecoveredValue = Label(CountryWindow, text=df.Recovered.iloc[-1], font="Helvetica 14", fg='gray18', bg='gray90', width=9)
RecoveredValue.configure(anchor="center")
RecoveredValue.place(x=920, y=150)
DeathsLabel = Label(CountryWindow, text='Death Cases:', font="Helvetica 14 bold", fg='#009933', bg='gray90')
DeathsLabel.configure(anchor="center")
DeathsLabel.place(x=1060, y=150)
DeathsValue = Label(CountryWindow, text=df.Deaths.iloc[-1], font="Helvetica 14", fg='gray18', bg='gray90', width=9)
DeathsValue.configure(anchor="center")
DeathsValue.place(x=1200, y=150)
btnActive = Button(CountryWindow,
text="Active Figure",
command= activeBtnHandler,
font="Helvetica 12 bold", bg='#D8D8D8', fg='#009933', width=18)
btnActive.place(x=120, y=200, anchor="nw")
btnConfirmed = Button(CountryWindow,
text="Confirmed Figure",
command=confirmedBtnHandler,
font="Helvetica 12 bold", bg='#D8D8D8', fg='#009933', width=18)
btnConfirmed.place(x=440, y=200, anchor="nw")
btnRecovered= Button(CountryWindow,
text="Recovered Figure",
command=recoveredBtnHandler,
font="Helvetica 12 bold", bg='#D8D8D8', fg='#009933', width=18)
btnRecovered.place(x=760, y=200, anchor="nw")
btnDeaths= Button(CountryWindow,
text="Deaths Figure",
command=deathsBtnHandler,
font="Helvetica 12 bold", bg='#D8D8D8', fg='#009933', width=18)
btnDeaths.place(x=1070, y=200, anchor="nw")
btnGeneral= Button(CountryWindow,
text="General Statistics Figure",
command=generalBtnHandler,
font="Helvetica 16 bold", bg='#009933', fg='white', width=26, height=1)
btnGeneral.place(x=530, y=320, anchor="nw")
btnOK = Button(CountryWindow,
text="OK",
command=onClick,
font="Helvetica 9 bold", bg='#009933', fg='white', height=2)
btnOK.place(x=1330, y=603,anchor="nw")
footer(CountryWindow)
except:
CountryWindow.destroy()
| true
|
17345168004530c2d51c48189d29333629abf197
|
Python
|
lucgiffon/psm-nets
|
/code/data/make_dataset.py
|
UTF-8
| 2,625
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
"""
Functions for downloading data set.
"""
import tempfile
import urllib.request
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
import os
import numpy as np
import scipy.io as sio
from palmnet.data import Mnist, Cifar10, Cifar100
from skluc.utils.osutils import read_matfile, download_file
from skluc.utils import logger
def load_svhn_data():
data_root_url = "http://ufldl.stanford.edu/housenumbers/"
data_leaf_values = {
"train": "train_32x32.mat",
"test": "test_32x32.mat",
}
data_arrays = {}
with tempfile.TemporaryDirectory() as d_tmp:
for leaf_name, leaf in data_leaf_values.items():
leaf_url = data_root_url + leaf
matfile_path = download_file(leaf_url, d_tmp, leaf)
data_arrays[leaf_name] = read_matfile(matfile_path)
return data_arrays["train"], data_arrays["test"]
def _download_single_dataset(output_dirpath, dataname):
if MAP_NAME_CLASSES_PRESENCE[dataname]:
(x_train, y_train), (x_test, y_test) = MAP_NAME_DATASET[dataname]()
map_savez = {"x_train": x_train,
"y_train": y_train,
"x_test": x_test,
"y_test": y_test
}
else:
X = MAP_NAME_DATASET[dataname]()
map_savez = {"x_train": X}
output_path = project_dir / output_dirpath / dataname
logger.info(f"Save {dataname} to {output_path}")
np.savez(output_path, **map_savez)
@click.command()
@click.argument('dataset', default="all")
@click.argument('output_dirpath', type=click.Path())
def main(output_dirpath, dataset):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
_download_single_dataset(output_dirpath, dataset)
MAP_NAME_DATASET = {
"svhn": load_svhn_data,
"cifar100": Cifar100.load_data,
"cifar10": Cifar10.load_data,
"mnist": Mnist.load_data,
}
MAP_NAME_CLASSES_PRESENCE = {
"svhn": True,
"cifar100": True,
"cifar10": True,
"mnist": True,
}
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
| true
|
1746a52075b2869f9f6326cf8073ff683c181894
|
Python
|
JustinBis/UCF-COP-3223
|
/Programs - Python/diapers.py
|
UTF-8
| 802
| 3.9375
| 4
|
[] |
no_license
|
#######################
# Diaper Money #
# By Justin Bisignano #
# 8/29/2012 #
#######################
# Constants
DAYS_PER_MONTH = 30
CALS_PER_LB = 3500
CALSLOST_PER_BEER = 20
DOZEN = 12
# Get inputs
diapercost = float(input("What is the cost of a dozen diapers?\n"))
diapersperday = int(input("How many diapers does the baby go through a day?\n"))
beercost = float(input("What is the cost of a single beer?\n"))
# Calculations
diaperspend = diapercost/DOZEN*DAYS_PER_MONTH*diapersperday
fewerbeers = diaperspend//beercost
lbslost = fewerbeers*CALSLOST_PER_BEER/CALS_PER_LB
# Print outputs
print('You will spend $'+str(diaperspend),'on diapers in a month.')
print('You will drink',int(fewerbeers),'fewer beers in a month.')
print('As a result, you will loose',lbslost,'pounds in a month.')
| true
|
38a7317f60fae41ee02e201888527b564924ddf3
|
Python
|
sebischair/ThesaurusLabelPropagation
|
/src/baselines/helpers.py
|
UTF-8
| 1,309
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
from os.path import join
import numpy as np
import pandas as pd
def get_train_test(path):
train = pd.read_table(
join(path, "y_train.txt"),
sep=" ",
dtype={"synset": np.int32},
index_col=0)["synset"]
test = pd.read_table(
join(path, "y_test.txt"),
sep=" ",
dtype={"synset": np.int32},
index_col=0)["synset"]
return train, test
def df_evaluation_init(index, train, pred, test):
df_predicted = pd.DataFrame(index=index).rename_axis("word")
df_predicted = df_predicted.join(train, how="left").fillna(-1).rename(columns={"synset": "y_train"})
df_predicted = df_predicted.assign(y_pred=pred["synset"])
df_predicted = df_predicted.assign(y_conf=pred["current_max_similarity"])
df_predicted = df_predicted.assign(y_top3_classes=[[-1, -1, -1]] * len(df_predicted))
df_evaluation = df_predicted.join(test, how="left")
df_evaluation.rename(columns={"synset": "y_test"}, inplace=True)
return df_evaluation
def apply_synset_prediction_if_more_similar(pred, synset_id, predictions):
for (word, similarity) in predictions:
if pred.loc[word].current_max_similarity < similarity:
pred.loc[word, "synset"] = synset_id
pred.loc[word, "current_max_similarity"] = similarity
| true
|
f9b85fdba98cdcdfd0a1b3aa90eadf523b7c7e3c
|
Python
|
kns94/coding-practice
|
/russianDoll_dp.py
|
UTF-8
| 2,407
| 3.015625
| 3
|
[] |
no_license
|
"""The russian doll algorithm"""
import operator
class Solution(object):
def maxEnvelopes(self, envelopes):
"""
:type envelopes: List[List[int]]
:rtype: int
"""
"""If no nodes are present"""
if len(envelopes) == 0:
return 0
sorted_envelopes = []
envelopes = sorted(envelopes, key = operator.itemgetter(1))
for i in range(len(envelopes)):
sorted_envelopes.append([envelopes[i][0], envelopes[i][1], i])
lis = []
sorted_envelopes = sorted(sorted_envelopes, key = operator.itemgetter(0))
lis.append(sorted_envelopes[0])
print(envelopes)
print(sorted_envelopes)
for i in range(1, len(sorted_envelopes)):
if sorted_envelopes[i][2] > lis[-1][2]:
if sorted_envelopes[i][0] > lis[-1][0]:
#Extend
if sorted_envelopes[i][1] > lis[-1][1]:
lis.append(sorted_envelopes[i])
else:
#Discard
if sorted_envelopes[i][0] > lis[-2][0] and sorted_envelopes[i][1] > lis[-2][1]:
lis = lis[:-1]
lis.append(sorted_envelopes[i])
else:
if sorted_envelopes[i][1] > lis[-1][1]:
lis.append(sorted_envelopes[i])
print(lis)
return len(lis)
input = [[2,100],[3,200],[4,300],[5,500],[5,400],[5,250],[6,370],[6,360],[7,380]]
#input = [[2,1],[4,1],[6,2],[8,3],[10,5],[12,8],[14,13],[16,21],[18,34],[20,55]]
#input = [[30,50],[12,2],[3,4],[12,15]]
#input = [[15,22],[8,34],[44,40],[9,17],[43,23],[4,7],[20,8],[30,46],[39,36],[45,14],[24,19],[24,36],[31,34],[32,19],[29,13],[16,48],[8,36],[44,2],[11,5],[2,50],[29,6],[18,38],[15,49],[22,37],[6,20],[25,11],[1,50],[19,40],[45,35],[37,21],[4,29],[40,5],[4,49],[1,45],[14,32],[14,37],[23,22],[31,21],[2,36],[43,4],[21,32],[41,2],[44,32],[36,20],[22,45],[3,41],[44,29],[29,33],[42,2],[38,17],[43,26],[30,15],[28,12],[33,30],[49,7],[8,14],[1,9],[41,25],[7,15],[26,32],[11,33],[12,45],[33,7],[16,34],[39,1],[20,49],[50,45],[14,29],[50,41],[1,45],[14,43],[49,20],[41,37],[43,22],[45,19],[20,21],[28,19],[2,1],[7,49],[3,3],[49,48],[34,35],[10,2]]
print(Solution().maxEnvelopes(input))
| true
|
a346864f1325ffe55c6667399cf122f364d003ce
|
Python
|
MoAshraf601/recommending-meals-for-diabetes-graduation-project-
|
/visuals.py
|
UTF-8
| 2,500
| 3.078125
| 3
|
[] |
no_license
|
###########################################
# Suppress matplotlib user warnings
# Necessary for newer version of matplotlib
import warnings
warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib")
#
# Display inline matplotlib plots with IPython
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'inline')
###########################################
import itertools
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from time import time
sns.set()
def plot_histogram(df, features, preprocessed=False):
fig = plt.figure(figsize=(20,10))
for i, feature in enumerate(features, 1):
ax = fig.add_subplot(2, 3, i)
if(preprocessed):
ax.hist(df[feature], bins=30, range=(0, 1))
else:
ax.hist(df[feature], bins=50, range=(0, 1000))
ax.set_title('%s feature histogram' %feature)
fig.suptitle("Histogram viasuals", fontsize = 16, y = 1.03)
fig.tight_layout()
def plot_box_plot(df, features):
fig = plt.figure(figsize=(20,10))
for i, feature in enumerate(features, 1):
ax = fig.add_subplot(2, 3, i)
sns.boxplot(data=df[feature])
ax.set_title('%s feature box plot' %feature)
fig.suptitle("Box plot viasuals", fontsize = 16, y = 1.03)
fig.tight_layout()
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
| true
|
d0531284e28a070f411678838ec5c03381d991c7
|
Python
|
icdatanalysis/MachineLearning-Python-2020.1
|
/classification/random_forest/eval_random_forest.py
|
UTF-8
| 2,650
| 3.25
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 27 12:55:37 2019
@author: Jairo Souza
"""
# Importando os pacotes
from sklearn.metrics import f1_score, recall_score, accuracy_score, precision_score
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
# Importando os dados
# Os dados contém informações relacionadas a empresas indianas coletadas por auditores com o objetivo de construir um modelo para realizar tarefas
# de classificação de empresas suspeitas. Os atributos estão relacionados a métricas de auditorias como: scores, riscos, etc.
df = pd.read_csv('data/audit_risk.csv')
# Descrevendo o dataset
df.info()
df.describe()
# Visualizando o dataset
df.head(5)
# Deletando coluna de localização:
df = df.drop('LOCATION_ID', axis=1)
# Analisando se existem valores nulos:
df[df.isnull().values.any(axis=1)]
# Preechendo os valores nulos com a mediana:
df = df.fillna(df.median())
# Definindo as variáveis dependentes/independentes.
X = df.iloc[:, :17].values
y = df.iloc[:, -1].values
# Criando os subconjuntos de treinamento e testes
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Criando o dicionário contendo todos os classificadores
estimators = {'Decision Tree': DecisionTreeClassifier(criterion = 'entropy', random_state = 0),
'KNN': KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2),
'Random Forest': RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0),
'SVC': SVC(kernel = 'rbf', random_state = 0)}
# Criando dataframe que irá guardar os resultados finais dos classificadores
df_results = pd.DataFrame(columns=['clf', 'acc', 'prec', 'rec', 'f1'], index=None)
# Percorrendo os classificadores
for name, estim in estimators.items():
# print("Treinando Estimador {0}: ".format(name))
# Treinando os classificadores com Conjunto de Treinamento
estim.fit(X_train, y_train)
# Prevendo os resultados do modelo criado com o conjunto de testes
y_pred = estim.predict(X_test)
# Armazenando as métricas de cada classificador em um dataframe
df_results.loc[len(df_results), :] = [name, accuracy_score(y_test, y_pred), precision_score (y_test, y_pred, average = 'macro'),
recall_score(y_test, y_pred, average = 'macro'), f1_score(y_test, y_pred, average = 'macro')]
# Exibindo os resultados finais
df_results
| true
|
8cf5754f83491bd33107247a47521eae2b2b9ceb
|
Python
|
AyodejiAfolabi/pythoncodes
|
/getUserInput.py
|
UTF-8
| 5,614
| 3.953125
| 4
|
[] |
no_license
|
# salary=input("please input your salary")
# bonus=input('please input your bonus')
# payCheck=float(salary)+float(bonus)
# print(payCheck*5)
# print('{0:d}+{1:d}'.format(salary,bonus))
# Trying to get and display todays dates
# import datetime
# currentTime=datetime.date.today()
# print(currentTime.strftime('%d,%b,%Y'))
if 5>2:
#do Not put space between your 5 and 2;
print("Five is greater than two!")
#you can know the type of data you are working with
# number=34
# print(number)
# compl=-2+5j
# newcompl=4j
# print(compl*newcompl)
#string operations in Python
# word="Mkhitaryan Roma"
# print(word[9:12])
# print(word.strip())
# print(word.__len__())
# print(len(word))
# If statement with OR and 'AND'
# if 3<5 and 2==8:
# print('Yes')
if 3<5 or 2==8:
print('YEP')
# print(word.split('a '))
# PYTHON OPERATORS
data=18
data=data|6
# print(data/3)
print(data)
# and, or, is, not, is not. STATEMENT is PYTHON
a=6
b=6
print(a is not b)
#Arrays are of 4 types in Python
# 1. List
# A List is a type of array that is ordered and changeable
# e.g
thislist=list(('Mango','PawPaw','Oranges','Tangerine','Cashew','Coconut'))
# print(len(thislist))
# myList=['Apple','dinosaur','Cyrup','Ball']
# thislist.append('Tangerine')
# print(thislist)
# x=(thislist.count('Tangerine'))
# print(x)
# def myFunc(e):
# return len(e)
# myList.sort(reverse=False)
# print(myList)
# Dictionaries
diction={
}
diction['eyes']=2
diction['nose']=2
diction['sex']='Male'
diction['hair']='infinity'
del diction['hair']
print(diction)
# TUPLE TYPE OF ARRAY (UNCHANGEABLE)
thistuple=tuple(("Banana","orange","cocnjut","lemon","lime"))
print(thistuple[1])
print(thistuple)
#SET TYPE OF ARRAY (UNORDERED UNINDEXED)
thisset=set(("OYO","Ibadan","kogi","lokoja","Kwara"))
thisset.add('Borno')
thisset.remove('OYO')
print(thisset)
print(len(thisset))
# DICTIONARY TYPE OF ARRAY INDEXED UNORDERED CHANGEABLE
dicti={
"banana":"yellow",
"egg":"Brown"
}
dicti['name']="Fruit"
print(dicti)
#PYthon Conditions
a=44
b=54
if a>b :
print('a is greater than b')
elif a==b:
print('a is equal to b')
else:
print('b is greater than a')
# print(i)
# a=10
# while a<20:
# if a==15:
# break
# a+=1
# print(a)
# nam=input('Please input your name ')
# age=input('pleae type you age ')
# town=input('where are you from ')
# myDiction={'name':nam,
# 'age':age,
# "city":town
# }
# print(myDiction)
# if name=='Giroud':
# print('Benz is better than you')
# FOr Loop in PYthon
array=['david','oba','buzzy','tchilas','tobi']
for x in array:
if x=='buzzy':
print('Its your birthday dude')
break
print(x)
for x in range(0,10,2):
print(x)
# for x in array:
# if x=='buzzy':
# break
# print('Its your birthday dude')
# print(x)
# def tri_recursion(k):
# if(k>0):
# result = k+tri_recursion(k-1)
# print(result)
# else:
# result = 0
# return result
# print("\n\nRecursion Example Results")
# tri_recursion(5)
def func(e=2):
print(2+e)
func(6)
# DEfault function parameters
def check(country='Nigeria'):
print("I'm from "+country)
check('Britain')
check()
# Return values
def ret(x=8):
return x*5
print(ret())
# LANMBDA function
lamn=lambda i: i*2
print(lamn(2))
lamn2=lambda x,y,z: (x+y)/z
print(lamn2(2,6,4))
def my(x):
return 8*3
print(my(6))
# def myfunc(n):
# return lambda i: i*n
# doubler = myfunc(2)
# tripler = myfunc(3)
# val = 11
# print(i)
# print("Doubled: " + str(doubler(val)) + ". Tripled: " + str(tripler(val)))
def fun(n):
return lambda i: i*n
double = fun(4)
print(double(3))
def myfunc(n):
return lambda i: i*n
doubler = myfunc(2)
print(doubler(6))
# Classes and Objects
class turtl:
def __init__(self,age,name):
self.name=name
self.age=age
def par(sel):
print('Hello Mr.'+sel.name)
p2=turtl(14,'KLI')
p2.par()
class joy:
def __init__(self,name,age):
self.name=name
self.age=age
def par(self):
print('Hello '+self.name)
p1=joy('Adeola',344)
p1.par()
import mymodule
mymodule.hel('Jonathan')
# p1=turtl(12,'TY')
# print(p1.age)
# print(p1.name)
# print(p1)
myName='Faithful'
class lautech:
def __init__(self,name='Daniel',age=35):
self.name=name
self.age=age
def prince(self):
print('Hello '+self.name)
print("I'm {0:d} years old".format(self.age))
faith=lautech(myName,22)
faith.prince()
Tolu=lautech('Tchu',22)
Tolu.age=21
# Tolu.prince()
Tolu.prince()
tayo=lautech()
tayo.prince()
def mul(x,y):
return x*y
ret=mul(3,2)
print(ret)
import mults as mul
print(mul.multiply(2,10))
squa=mul.square(7)
name=mul.person1['name']
nose=mul.person1['nose']
print(squa)
print('my name is '+name+' and I have {0:d} nose'.format(nose))
multsDir=dir(mul)
print(multsDir)
import platform as pt
dit=dir(pt)
print(dit)
div=pt.system()
from mults import set2019 as glorius
leng=len(glorius)
bestPlayer=glorius[leng-1]
print(bestPlayer+' is the best player in our set')
trial=0
def game(k=5):
k=k-1
guess=int(input('Try your luck and win $100,000 by guessing a number between 1 and 10 Guess Here: '))
import random as rd
numb=rd.randint(0,10)
print('The computer guessed:',numb)
if guess==numb:
print("SO lucky you have a great guessing power and you've won the Cash price ")
elif guess-numb==1 or guess-numb==2 or guess-numb==-1 or guess-numb==-2 :
print(k)
print('{0:d} trials more'.format(k))
print('So close try again bruhhhhhhhhhhhhhhhh')
game(k)
elif (k==0):
print('SORRY GAMEOVER')
else:
print('{0:d} trials more'.format(k))
print('Try again')
game(k)
game()
| true
|
4f0ab635cd8520f8e4ddcf629cb71216a019a0fd
|
Python
|
MahmoudHegazi/excel_api
|
/my_first_AI.py
|
UTF-8
| 2,922
| 2.859375
| 3
|
[] |
no_license
|
# Data Preprocessing Template
# Importing the libraries
import numpy as np
import excel
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
import pandas as pd
from tablib import Dataset
import numpy as np
import matplotlib.pyplot as plt
# Importing the dataset
dataset = pd.read_csv('Data.xlsx', error_bad_lines=False)
#X = dataset.iloc[:, :-1].values
#y = dataset.iloc[:, -1].values
#dates_row = dataset.loc[4, :]
#xt = pd.Series(dates_row).values
nx = pd.read_excel('Data.xlsx')
lf = pd.DataFrame(nx)
table_names = []
#df = pd.DataFrame(nx, columns= ['Name', 'P Number', 'age', 'love','python'])
for d in lf:
table_names.append(d)
df = pd.DataFrame(nx, columns= table_names)
min_length = len(df[table_names])
#print len(df[table_names])
for i in table_names:
for xxx in range(min_length):
print(df[i][xxx])
Names = []
pNumbers = []
age = []
love = []
python = []
filtered_names = []
filtered_pNumbers = []
filtered_age = []
filtered_love = []
filtered_python = []
def show_table(names, numbers, ages, loves, pythons):
for i in range(len(names)):
print names[i] + " | " + numbers[i] + " | " + ages[i] + " | " + loves[i] + " | " + pythons[i]
print "---------------------------------------------------------------"
for i in range(len(df['Name'])):
#print df['Name'][i]
Names.append(df['Name'][i])
pNumbers.append(df['P Number'][i])
age.append(df['age'][i])
love.append(df['love'][i])
python.append(df['python'][i])
print "Append to database one row values using forloop"
for x in range(len(Names)):
name = str(Names[x])
pr = str(pNumbers[x])
m_age = str(age[x])
m_love = str(love[x])
m_python = str(python[x])
#filter
if name == 'nan':
name = '------'
filtered_names.append(name)
else:
filtered_names.append(name)
if pr == 'nan':
pr = '------'
filtered_pNumbers.append(pr)
else:
filtered_pNumbers.append(pr)
if m_age == 'nan':
m_age = '------'
filtered_age.append(m_age)
else:
filtered_age.append(m_age)
if m_love == 'nan':
m_love = '------'
filtered_love.append(m_love)
else:
filtered_love.append(m_love)
if m_python == 'nan':
m_python = '------'
filtered_python.append(m_python)
else:
filtered_python.append(pr)
def runner():
show_table(filtered_names, filtered_pNumbers, filtered_age, filtered_love, filtered_python)
runner()
#print df.iloc[0]
#print df['Name']
# Splitting the dataset into the Training set and Test set
#from sklearn.model_selection import train_test_split
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
| true
|
c6e68bac35e4ec7dd7880e504d02e518493e73d7
|
Python
|
tracyqan/TBSpider
|
/process.py
|
UTF-8
| 4,127
| 3.015625
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: tracyqan time:2018/12/19
import pandas as pd
import pyecharts
from wordcloud import WordCloud
import jieba
import re
import matplotlib.pyplot as plt
def make_cloud(data):
print('开始绘制商品信息词云')
name = ''.join(list(data['name']))
word = re.sub(r'<spanclass=H>|</span>', '', name)
words = ' '.join(jieba.cut(word))
w = WordCloud(
font_path= 'simhei.ttf',
width=800,
height=400,
mask=plt.imread('mask.jpg')
)
w.generate(words)
filename = 'wordcloud.png'
w.to_file(filename)
print('图表已保存至{}'.format(filename))
def create_bar(data):
"""
绘制价格与出售数量的关系图
"""
print('开始绘制价格与出售数量的关系图')
# 1元以下的销售量
count_1 = data[data['price']<=1]['saleCount'].sum()
# 1元-3元的销售量
count_2 = data[(data['price']>1) & (data['price']<=3)]['saleCount'].sum()
# 3元-5元的销售量
count_3 = data[(data['price'] > 3) & (data['price'] <= 5)]['saleCount'].sum()
# 5元-7元的销售量
count_4 = data[(data['price'] > 5) & (data['price'] <= 7)]['saleCount'].sum()
# 7元-9元的销售量
count_5 = data[(data['price'] > 7) & (data['price'] <= 9)]['saleCount'].sum()
# 9元-12元的销售量
count_6 = data[(data['price'] > 9) & (data['price'] <= 12)]['saleCount'].sum()
# 12元-15元的销售量
count_7 = data[(data['price'] > 12) & (data['price'] <= 15)]['saleCount'].sum()
# 15元以上的销售量
count_8 = data[data['price'] > 15]['saleCount'].sum()
title = '圣诞商品价格与出售数量的关系图'
x = ['1元以下', '1元-3元', '3元-5元', '5元-7元', '7元-9元', '9元-12元', '12元-15元', '15元以上']
y = [count_1, count_2, count_3, count_4, count_5, count_6, count_7, count_8]
bar = pyecharts.Bar()
bar.add(title, x, y, mark_point=['max', 'min', 'average'])
bar.render(title+'.html')
print('图表已保存至{}.html'.format(title))
def make_bar(data):
"""
绘制折扣与出售数量的关系图
"""
print('开始绘制折扣与出售数量的关系图')
discount = [x/y if y!=0 else 0 for x,y in zip(data['realPrice'], data['price'] )]
data['discount'] = discount
# 打不折销售量
count_1 = data[data['discount'] == 0]['saleCount'].sum()
# 0-2折销售量
count_2 = data[(data['discount'] > 0) & (data['discount'] <= 0.2)]['saleCount'].sum()
# 2-4折销售量
count_3 = data[(data['discount'] > 0.2) & (data['discount'] <= 0.4)]['saleCount'].sum()
# 4-6折销售量
count_4 = data[(data['discount'] > 0.4) & (data['discount'] <= 0.6)]['saleCount'].sum()
# 6-8折销售量
count_5 = data[(data['discount'] > 0.6) & (data['discount'] <= 0.8)]['saleCount'].sum()
# 8折以上销售量
count_6 = data[data['discount'] > 0.8]['saleCount'].sum()
title = '圣诞商品折扣与出售数量的关系图'
x = ['打不折', '0-2折', '2-4折', '4-6折', '6-8折', '8折以']
y = [count_1, count_2, count_3, count_4, count_5, count_6]
bar = pyecharts.Bar()
bar.add(title, x, y, mark_point=['max', 'min', 'average'])
bar.render(title+'.html')
print('图表已保存至{}.html'.format(title))
def create_map(data):
print('开始绘制全国销售量示例图')
countrys = [x.split(' ')[0] for x in data['place']]
data['countrys'] = countrys
countrys_set = list(set(countrys))
values = []
title = '全国销售量示例图'
for i in countrys_set:
values.append(data[data['countrys'] == i]['saleCount'].sum())
map = pyecharts.Map(title, width=800, height=400)
map.add('', countrys_set, values, maptype='china', is_visualmap=True, visual_text_color="#000")
map.render(title+'.html')
print('图表已保存至{}.html'.format(title))
def main():
filename = 'goods.csv'
data = pd.read_csv(filename)
make_cloud(data)
#create_bar(data)
#make_bar(data)
#create_map(data)
if __name__ == '__main__':
main()
| true
|
d6215f5163adbe5da5f0351bd37eeb8bf3265b87
|
Python
|
Nicolas-31/IKT441_DataMining_2018
|
/CNN - Convolutional Neural Network/main.py
|
UTF-8
| 1,513
| 2.65625
| 3
|
[] |
no_license
|
import pickle
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
import keras
import numpy as np
batch_size = 25
num_classes = 2
epochs = 10
#(x_train, y_train),(x_test, y_test) = pickle.load(open('female.p', 'rb'))
(x_train, y_train),(x_test, y_test) = pickle.load(open('male.p', 'rb'))
def reshape(x):
x = np.asarray(x)
x = x.astype('float32')
return x/255
x_train = reshape(x_train)
x_test = reshape(x_test)
y_train = np.asarray(y_train)
y_test = np.asarray(y_test)
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(8, 3, activation='relu', input_shape=np.shape(x_train)[1:]))
model.add(Conv2D(16, 3, activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(24, 3, activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, 3, activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dropout(0.25))
model.add(Dense(128,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='sigmoid'))
model.summary()
model.compile(
loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train,y_train,batch_size=batch_size,epochs=epochs,verbose=2,validation_data=(x_test,y_test))
score = model.evaluate(x_test,y_test,verbose=2)
| true
|
048c67bbfc1879a4fab26b3a14139c65d9d3b0c8
|
Python
|
simonfong6/service-manager
|
/examples/launch/launch_async.py
|
UTF-8
| 2,877
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
"""
Service launcher
"""
from subprocess import Popen
from subprocess import PIPE
from shlex import split
from typing import List
def run(command):
cmd_seq = split(command)
proc = Popen(cmd_seq, stdout=PIPE, stdin=PIPE, universal_newlines=True)
return proc
def ask():
out = run('python3 ask.py')
while out.poll() is None:
msg = input("Need input: ")
output, errors = out.communicate(msg)
print(output, end='')
print(out.poll())
def kill_procs(procs: List[Popen]):
for proc in procs:
proc.kill()
def forever(num_procs: int = 2):
procs = []
for _ in range(num_procs):
proc = run('python3 forever.py')
procs.append(proc)
indexes = list(range(num_procs))
index_options = ','.join(indexes)
while True:
proc_index = input(f"Choose proc [{index_options}]: ")
proc_index = int(proc_index)
if proc_index == -1:
break
print(f"Chosen proc({proc_index})")
proc = procs[proc_index]
msg = input("Input for proc: ")
output, errors = proc.communicate(msg)
print(f"Output from proc({proc_index})")
print(output)
def ask_forever():
out = run('python3 forever.py')
while out.poll() is None:
msg = input("Need input: ")
output, errors = out.communicate(msg)
print((output,errors), end='')
print(out.poll())
def run_no_in(command, log_file_name):
cmd_seq = split(command)
log_file = open(log_file_name, 'w+')
proc = Popen(cmd_seq, stdout=log_file)
return proc
def servers(num_procs: int = 2):
procs = []
for index in range(num_procs):
port = 8000 + index
logfile_name = f'server_{index}.log'
proc = run_no_in(f'python3 server.py --port {port} --log_file {logfile_name}', logfile_name)
procs.append(proc)
indexes = list(range(num_procs))
index_options = ','.join([str(index) for index in indexes])
while True:
proc_index = input(f"Choose proc [{index_options}]: ")
proc_index = int(proc_index)
if proc_index == -1:
break
print(f"Chosen proc({proc_index})")
proc = procs[proc_index]
msg = input("Command for proc: ")
if msg == 'pid':
pid = proc.pid
print(f"PID: {pid}")
elif msg == 'kill':
print(f"Killing proc({proc_index})")
proc.kill()
elif msg == 'quit':
print("Killing processes...")
kill_procs(procs)
else:
print("Invalid command, skipping...")
continue
def main(args):
servers()
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
args = parser.parse_args()
main(args)
| true
|
02709c674dc39b165c103ff0b6902ed3d032432a
|
Python
|
Brac24/GameOfLife
|
/board/board_creator.py
|
UTF-8
| 1,334
| 3.90625
| 4
|
[] |
no_license
|
import random
from cell.cell import Cell
def randomstate(rows, cols):
#list comprehension to generate a list of lists with random 1's and 0's
#We can think of this nested list comprehension in terms of nested for loops
#In nested for loops the inner loop can be considered the column and the outer loop the row
#Same applies here where the inner list comprehension referes to the column and the outer refers to the row
board = [[1 if random.random() >= .94 else 0 for col in range(cols)] for row in range(rows)]
return board
def render(board):
rows = len(board)
cols = len(board[0])
for row in range(rows):
line = ''
for col in range(cols):
if board[row][col] is 1:
line += 'c'
else:
line += ' '
print(line)
def render_world(cell_board):
rows = len(cell_board)
cols = len(cell_board[0])
board_display = ''
for row in range(rows):
line = ''
for col in range(cols):
cell = cell_board[row][col]
if cell.alive:
line += '0'
else:
line += ' '
board_display += line + '\n'
print(board_display)
if __name__ == '__main__':
board_random = randomstate(10,70)
render(board_random)
| true
|
f32cc91b0c158aaeefd2a019c606107f5632a70c
|
Python
|
Billuc/3DEngine
|
/viewer.py
|
UTF-8
| 9,549
| 3.703125
| 4
|
[] |
no_license
|
from node import NamedNode3D
from matrix import Matrix
from math import cos, sin, pi, tan, atan
# Object corresponding to a set of points and data to transform them
class Viewer():
def __init__(self, p_width, p_height, p_v_fov = pi/2, p_radius = 10):
# Width and height of display
self.width = float(p_width)
self.height = float(p_height)
# Fields of view (vertical and horizontal)
self.v_fov = float(p_v_fov)
self.h_fov = 2 * atan( tan(self.v_fov / 2) * p_width / p_height )
# Distance between the origin and the "camera"
self.radius = float(p_radius)
# The transformation matrix (rotations)
# The zz element is -1 because the vertical axis on displays is towards the bottom
self.transformation_matrix = Matrix(
1,0,0,0,
0,1,0,0,
0,0,-1,0,
0,0,0,1
)
# Arrays containing the original nodes, their transformed and projected nodes
self.nodes = []
self.transformed_nodes = []
self.nodes_2d = []
# Method to add a new node
def add_node(self, node):
if self.find(node.name) is None:
self.nodes.append(node)
else:
raise BaseException("A node with this name already exists")
# Calculating the intersection of the screen plan and the line passing through the camera and the transformed node
# Plan's equation : x = 0 (the constant doesn't really matter as long as it is lower than self.radius; 0 is just convenient)
# Line's equation : (x,y,z) = (camera pos) + t * ((transformed node) - (camera pos))
# Camera position = (self.radius, 0, 0)
def transform_to_2d(self, node):
# To avoid dividing by 0
# Basically, if the transformed node is at the same x as the camera, we won't add the node
if (node.x / node.w) == self.radius:
t = 0
else:
# Solving
t = - self.radius / ( (node.x / node.w) - self.radius )
# If we are in front of the camera, the node is added
if t > 0:
# Calculating the relative coordinates on the screen
y = ( t * node.y / node.w ) / ( 2 * self.radius * tan(self.h_fov / 2.) ) + 1 / 2.
z = ( t * node.z / node.w ) / ( 2 * self.radius * tan(self.v_fov / 2.) ) + 1 / 2.
distance = ( (node.x / node.w) - self.radius )**2 + (node.y / node.w)**2 + (node.z / node.w)**2
distance = distance**(1/2.)
# Creating the projected node (with absolute coordinates)
# We add the distance to camera in the z coordinate
node_2d = NamedNode3D(self.width * y, self.height * z, distance, 1, node.name)
return node_2d
else:
return None
# From a 3D node calculate the transformed point then the 2D node
def calc_one_node(self, node):
# Calculating the transformed node by multiplying the transform matrix by the node
tranformed = self.transformation_matrix.multiply_by_node(node)
# Calculating the 2d coordinates on the screen
return self.transform_to_2d(tranformed)
# Method that transforms and projects the nodes
def calc_nodes_2d(self):
self.nodes_2d = []
# Looping through all the nodes
for n in self.nodes:
node_2d = self.calc_one_node(n)
if node_2d is not None:
# Looking for the index to insert the node to (ordered by distance decreasing)
index = 0
while index < len(self.nodes_2d) and self.nodes_2d[index].z > node_2d.z:
index += 1
# Adding the node
self.nodes_2d.insert(index, node_2d)
# Rotating the view horizontally (around the z-axis)
# Updating the transform matrix
def rotate_horizontally(self, angle, is_radians = True):
# If the angle is in degrees, we convert it to radians
if not is_radians:
angle = angle * pi / 180
# Updating the transform matrix (rotating it by multiplicating by the rotation matrix)
self.transformation_matrix = get_rot_z_matrix(angle).multiply(self.transformation_matrix)
# Rotating the view vertically (around the y-axis)
# Updating the transform matrix
def rotate_vertically(self, angle, is_radians = False):
# If the angle is in degrees, we convert it to radians
if not is_radians:
angle = angle * pi / 180
# Updating the transform matrix (rotating it by multiplicating by the rotation matrix)
self.transformation_matrix = get_rot_y_matrix(angle).multiply(self.transformation_matrix)
# Rotating the camera horizontally (around the z-axis)
# Updating the transform matrix
def rotate_camera_horizontally(self, angle, is_radians = True):
# If the angle is in degrees, we convert it to radians
if not is_radians:
angle = angle * pi / 180
# Updating the transform matrix
self.transformation_matrix = get_translate_x_matrix(-self.radius).multiply(self.transformation_matrix)
self.transformation_matrix = get_rot_z_matrix(angle).multiply(self.transformation_matrix)
self.transformation_matrix = get_translate_x_matrix(self.radius).multiply(self.transformation_matrix)
# Rotating the camera vertically (around the y-axis)
# Updating the transform matrix
def rotate_camera_vertically(self, angle, is_radians = False):
# If the angle is in degrees, we convert it to radians
if not is_radians:
angle = angle * pi / 180
# Updating the transform matrix
self.transformation_matrix = get_translate_x_matrix(-self.radius).multiply(self.transformation_matrix)
self.transformation_matrix = get_rot_y_matrix(angle).multiply(self.transformation_matrix)
self.transformation_matrix = get_translate_x_matrix(self.radius).multiply(self.transformation_matrix)
# Translating the view horizontally (y-axis)
# Updating the transform matrix
def translate_horizontally(self, distance):
# Updating the transform matrix (translating it by multiplicating by the translation matrix)
self.transformation_matrix = get_translate_y_matrix(distance).multiply(self.transformation_matrix)
# Translating the view vertically (z-axis)
# Updating the transform matrix
def translate_vertically(self, distance):
# Updating the transform matrix (translating it by multiplicating by the translation matrix)
self.transformation_matrix = get_translate_z_matrix(distance).multiply(self.transformation_matrix)
# Zooming out by increasing the distance between the camera and the origin
def increase_radius(self, delta):
self.radius += delta
# Zooming out by increasing the distance between the camera and the origin
def decrease_radius(self, delta):
if self.radius - delta <= 1:
self.radius = 1
else:
self.radius -= delta
# Updating width and height and horizontal field of view
# Used when resize
def update_dimensions(self, p_width, p_height):
self.width = float(p_width)
self.height = float(p_height)
self.h_fov = 2 * atan( tan(self.v_fov / 2) * p_width / p_height )
# Returns to the original view by resetting the transform matrix
def reset_transform_matrix(self):
self.transformation_matrix = Matrix(
1,0,0,0,
0,1,0,0,
0,0,-1,0,
0,0,0,1
)
# Returns the calculated node corresponding to the given name
def find(self, pname):
for node in self.nodes_2d:
if node.name == pname:
return node
return None
# Function to get the transform matrix of a rotation around the x-axis
def get_rot_x_matrix(angle):
return Matrix(
1, 0, 0, 0,
0, cos(angle), -sin(angle), 0,
0, sin(angle), cos(angle), 0,
0, 0, 0, 1
)
# Function to get the transform matrix of a rotation around the y-axis
def get_rot_y_matrix(angle):
return Matrix(
cos(angle), 0, sin(angle), 0,
0, 1, 0, 0,
-sin(angle), 0, cos(angle), 0,
0, 0, 0, 1
)
# Function to get the transform matrix of a rotation around the z-axis
def get_rot_z_matrix(angle):
return Matrix(
cos(angle), -sin(angle), 0, 0,
sin(angle), cos(angle), 0, 0,
0, 0, 1, 0,
0, 0, 0, 1
)
# Function to get the transform matrix of a rotation around the x-axis
def get_translate_x_matrix(distance):
return Matrix(
1, 0, 0, distance,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1
)
# Function to get the transform matrix of a rotation around the y-axis
def get_translate_y_matrix(distance):
return Matrix(
1, 0, 0, 0,
0, 1, 0, distance,
0, 0, 1, 0,
0, 0, 0, 1
)
# Function to get the transform matrix of a rotation around the z-axis
def get_translate_z_matrix(distance):
return Matrix(
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, distance,
0, 0, 0, 1
)
| true
|
60b2cf2d2e7b229dab63d551ac8205d3c067e6d1
|
Python
|
samanthaalcantara/codingbat2
|
/warm-2/array_count9.py
|
UTF-8
| 226
| 3.046875
| 3
|
[] |
no_license
|
"""
Date: 06 07 2020
Author: Samantha Alcantara
Question: Given an array of ints, return the number of 9's in the array.
"""
#Answer
def array_count9(nums):
for element in nums[:4]:
if element == 9:
return True
return False
| true
|
d3089247e2bd44d22f2b8e14d264da4142822d44
|
Python
|
dektox/btc_trade
|
/threading_test.py
|
UTF-8
| 1,857
| 2.703125
| 3
|
[] |
no_license
|
import threading
import requests
import time
from csv import reader
class Parser(threading.Thread):
def __init__(self, pair):
super(Parser, self).__init__()
self.daemon = True
self.pair = pair
def run(self):
header = ['pub_date', 'amnt_base', 'amnt_trade', 'id', 'price', 'type', 'user']
prev_ids = []
with open('{}.csv'.format(self.pair), 'r') as file:
order_ids = reader(file, delimiter=';')
for order_id in order_ids:
if order_id[3] != 'id':
prev_ids.append(int(order_id[3]))
while True:
try:
resp = requests.get(url='https://btc-trade.com.ua/api/deals/{}'.format(self.pair))
resp = resp.json()
offers_loaded = 0
csv = open(file='{}.csv'.format(self.pair), mode='a+', encoding='cp1251')
for offer in resp:
if offer['id'] not in prev_ids:
offers_loaded = offers_loaded + 1
prev_ids.append(offer['id'])
for key in header:
csv.write(str(offer[key]) + ';')
csv.write('\n')
csv.close()
print('{} {} Loaded {} new offers'.format(self.pair, time.ctime(), offers_loaded))
except KeyboardInterrupt:
break
except Exception as ex:
with open('errors_{}.log'.format(self.pair), 'a+', encoding='utf8') as log_file:
log_file.write(str(ex) + '\n')
time.sleep(5)
else:
time.sleep(2)
def main():
p1 = Parser('btc_uah')
p2 = Parser('krb_uah')
p1.start()
p2.start()
p1.join()
p2.join()
if __name__ == '__main__':
main()
| true
|
ca0c47286a9566b1ea2315497c36684221c0a425
|
Python
|
brendenlake/capture-tablet
|
/objects/viz_objects.py
|
UTF-8
| 919
| 2.65625
| 3
|
[] |
no_license
|
import glob
import re
import os
import matplotlib.pyplot as plt
import math
from PIL import Image
#
# Display all the instances of each object type
#
# output : files in folder imgs_by_type
#
# dirs = ['balloon','bowlingpin','butterfly','horseshoe']
dirs = ['car','butterfly','horse','airplane']
imgs_by_type = 'objects_by_type' # directory to store aggregated images
if not os.path.exists(imgs_by_type): os.makedirs(imgs_by_type)
for d in dirs:
mydir = d + '_shadow'
if os.path.exists(d):
fns = glob.glob(mydir+'/*.png')
plt.figure(1,figsize=(30,30))
plt.clf()
nrow = math.ceil(math.sqrt(len(fns)))
for s,fn in enumerate(fns):
plt.subplot(nrow, nrow, s+1)
IMG = Image.open(fn).convert('LA')
plt.imshow(IMG)
frame = plt.gca()
frame.axes.get_xaxis().set_visible(False)
frame.axes.get_yaxis().set_visible(False)
plt.title(str(s+1))
plt.savefig(imgs_by_type + '/' + d + '.pdf')
| true
|
e3c6172b06294cd0fa89ff7037801ef55525cd79
|
Python
|
weningerleon/InformationContent_HCP
|
/dataloader.py
|
UTF-8
| 6,235
| 2.578125
| 3
|
[] |
no_license
|
################################################################################
# Copyright (C) 2021 by RWTH Aachen University #
# License: #
# #
# This software is dual-licensed under: #
# • Commercial license (please contact: lfb@lfb.rwth-aachen.de) #
# • AGPL (GNU Affero General Public License) open source license #
################################################################################
# Author: Leon Weninger #
################################################################################
################################################################################
import settings
from os.path import join as opj
import numpy as np
import pandas as pd
import nibabel as nib
#%% Diffusion Connectivity Matrices
def get_dCM(subject, parcellation, zero_diagonal=True, symmetric=True):
dcm_path = opj(settings.image_dir_sc, subject + "_" + parcellation + "_matrix.npy")
dcm = np.load(dcm_path)
if symmetric and zero_diagonal:
dcm = np.triu(dcm, k=1)
dcm = dcm + dcm.transpose()
elif zero_diagonal:
# set diagonal elements to zero
s = dcm.shape
dcm = dcm.flatten()
dcm[0::s[0]+1] = 0
dcm = dcm.reshape(s)
else:
raise Exception("Not implemented!!!")
dcm = dcm / np.sum(np.abs(dcm))
return dcm
# return complete fMRI timeseries data
# LR/RL gets concatenated for phase_encoding="both"
def get_fMRI_ts_unclipped(subject, parcellation, task, phase_encoding="LR"):
if task == "REST":
ts1 = get_fMRI_ts_unclipped(subject, parcellation, "REST1", phase_encoding)
ts2 = get_fMRI_ts_unclipped(subject, parcellation, "REST2", phase_encoding)
return np.concatenate((ts1, ts2), axis=1)
if phase_encoding == "both":
ts1 = get_fMRI_ts_unclipped(subject, parcellation, task, phase_encoding="LR")
ts2 = get_fMRI_ts_unclipped(subject, parcellation, task, phase_encoding="RL")
return np.concatenate((ts1, ts2), axis=1)
assert parcellation in ("yeo_100", "yeo_400"), "wrong parcellation"
assert task in settings.datasets, "wrong task"
assert phase_encoding in ("LR", "RL"), "wrong phase encoding"
file = parcellation + "_" + subject + "_" + task + "_" + phase_encoding + "_ts.npy"
ts_path = opj(settings.image_dir_ts, file)
ts = np.load(ts_path)
# demean ts, sometimes there was a slight offset
ts = ts - np.mean(ts, axis=1, keepdims=True)
return ts
# return complete fMRI timeseries data, clipped according to REST1 distribution
def get_fMRI_dict(subject, parcellation, phase_encoding, clipped=True):
data = {}
for t in settings.datasets:
ts = get_fMRI_ts_unclipped(subject, parcellation, t, phase_encoding=phase_encoding)
data[t] = ts
if clipped:
for i in range(data["REST1"].shape[0]):
thr_p = min(data["REST1"][i, :].max(), data["REST1"][i, :].std() * 4)
thr_n = max(data["REST1"][i, :].min(), -data["REST1"][i, :].std() * 4)
for t in settings.datasets:
data[t][i, :] = np.clip(data[t][i, :], thr_n, thr_p)
return data
# Get all fMRI timeseries, stored in a dictionary
def get_fMRI_ts_clipped(subject, parcellation, task, phase_encoding="LR"):
ts = get_fMRI_ts_unclipped(subject, parcellation, task, phase_encoding=phase_encoding)
rest1_data = get_fMRI_ts_unclipped(subject, parcellation, task, phase_encoding=phase_encoding)
for i in range(rest1_data.shape[0]):
thr_p = min(rest1_data[i, :].max(), rest1_data[i, :].std() * 4)
thr_n = max(rest1_data[i, :].min(), -rest1_data[i, :].std() * 4)
for t in settings.datasets:
ts[i, :] = np.clip(ts[i, :], thr_n, thr_p)
return ts
def get_atlas(parcellation):
dir = opj(settings.image_dir_base, "Schaefer2018_LocalGlobal", "Parcellations", "MNI")
if parcellation == "yeo_100":
path = opj(dir, "Schaefer2018_100Parcels_17Networks_order_FSLMNI152_1mm.nii.gz")
elif parcellation == "yeo_400":
path = opj(dir, "Schaefer2018_400Parcels_17Networks_order_FSLMNI152_1mm.nii.gz")
else:
raise Exception("sorry, not implemented")
atlas = nib.load(path)
return atlas.get_fdata(), atlas.affine
def get_mni152():
path = opj(settings.image_dir_base, "mni_icbm152_nlin_sym_09a", "mni_icbm152_t1_tal_nlin_sym_09a.nii")
atlas = nib.load(path)
return atlas.get_fdata()
# returns a random matrix with the same degree, weight, and strength distribution
def randomMatrix(w, iter=100):
n = w.shape[0]
w0 = w.copy()
for i in range(iter):
p = np.random.permutation(n)
if np.random.uniform()<0.5:
w0 = w0[:,p]
w0 = w0[p,:]
else:
w0 = w0[p,:]
w0 = w0[:,p]
return w0
# Splits the fixation / initation blocks from the t-fMRI time series
def remove_fixblocks(data, task_name, tr=0.72):
switcher = {
"WM": [(0, 8), (64, 79), (135, 150), (206, 221), (277, 291)],
"GAMBLING": [(0, 8), (36, 51), (79, 94), (122, 137), (165, 180)],
"MOTOR": [(0, 8)],
"LANGUAGE": [(0, 8)],
"SOCIAL": [(0, 8), (31, 46), (69, 84), (107, 122), (145, 160), (183, 198)],
"RELATIONAL": [(0, 8), (44, 60), (98, 114), (150, 166)],
"EMOTION": [(0, 8)],
}
fix_blocks = switcher[task_name]
istask = np.ones(data.shape[-1]).astype(np.bool)
for block in fix_blocks:
start = np.int(np.rint(block[0] / tr))
end = np.int(np.rint(block[1] / tr))
istask[start:end] = False
if settings.pe == "both":
for block in fix_blocks:
start = np.int(np.rint(block[0] / tr) + data.shape[-1] / 2)
end = np.int(np.rint(block[1] / tr) + data.shape[-1] / 2)
istask[start:end] = False
if len(data.shape)==1:
data = np.expand_dims(data, axis=0)
onlytask = data[:,istask]
onlyfix = data[:,~istask]
return onlytask, onlyfix
| true
|
618bfa17f16a648edff7fada3951f6b3922f9c7a
|
Python
|
l33tdaima/l33tdaima
|
/pr1029m/two_city_sched_cost.py
|
UTF-8
| 1,334
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
from functools import reduce
class Solution:
def twoCitySchedCostV1(self, costs: list[list[int]]) -> int:
N = len(costs) // 2
costs_by_diff = sorted(costs, key=lambda c: c[0] - c[1])
return sum([a for a, _ in costs_by_diff[:N]]) + sum(
[b for _, b in costs_by_diff[-N:]]
)
def twoCitySchedCostV2(self, costs: list[list[int]]) -> int:
N = len(costs) // 2
costs_by_diff = sorted(costs, key=lambda c: c[0] - c[1])
cost_a = reduce(lambda a, v: a + v[0], costs_by_diff[:N], 0)
return reduce(lambda a, v: a + v[1], costs_by_diff[-N:], cost_a)
# TESTS
for costs, expected in [
([[10, 20], [30, 200]], 50),
([[10, 20], [30, 200], [400, 50], [30, 20]], 110),
(
[
[259, 770],
[448, 54],
[926, 667],
[184, 139],
[840, 118],
[577, 469],
],
1859,
),
(
[
[515, 563],
[451, 713],
[537, 709],
[343, 819],
[855, 779],
[457, 60],
[650, 359],
[631, 42],
],
3086,
),
]:
sol = Solution()
actual = sol.twoCitySchedCostV1(costs)
print("The minimum cost to fly in", costs, "->", actual)
assert actual == expected
| true
|
198b55e9dab45e2e45c4e75909c767ad764b062f
|
Python
|
Flouzr/Dijkstra-Bellman-Ford
|
/graph.py
|
UTF-8
| 15,821
| 3.609375
| 4
|
[] |
no_license
|
from collections import deque
import math
import random
from disjointsets import DisjointSets
from pq import PQ
from timeit import timeit
# Programming Assignment 3
# (5) After doing steps 1 through 4 below (look for relevant comments), return up here.
# Given the output of steps 3 and 4, how does the runtime of Dijkstra's algorithm compare to Bellman-Ford?
# Does graph density affect performance? Does size of the graph otherwise affect performance?
# Is Dijkstra always faster than Bellman-Ford? If not, when is Bellman-Ford faster?
def generate_random_weighted_digraph(v,e,min_w,max_w) :
"""Generates and returns a random weighted directed graph with v vertices and e different edges.
Keyword arguments:
v - number of vertices
e - number of edges
min_w - minimum weight
max_w - maximum weight
"""
# ensure all vertices reachable from 0
temp = [ x for x in range(1,v) ]
random.shuffle(temp)
temp.append(0)
temp.reverse()
edges = [ (temp[random.randrange(0,i)],temp[i]) for i in range(1,v) ]
# if desired number of edges greater than length of current edge list, then add more edges
if e > len(edges) :
edgeSet = { x for x in edges }
notYetUsedEdges = [ (x,y) for x in range(v) for y in range(v) if x != y and (x,y) not in edgeSet ]
random.shuffle(notYetUsedEdges)
count = e - len(edges)
count = min(count, len(notYetUsedEdges))
for i in range(count) :
edges.append(notYetUsedEdges.pop())
# generate random edge weights
weights = [ random.randint(min_w, max_w) for x in range(len(edges)) ]
# construct a Digraph with the lists of edges and weights generated
G = Digraph(v, edges, weights)
return G
def time_shortest_path_algs() :
"""Generates a table of timing results comparing two versions of Dijkstra's algorithm."""
g1632 = generate_random_weighted_digraph(16, 32, 1, 10)
g1660 = generate_random_weighted_digraph(16, 60, 1, 10)
g16240 = generate_random_weighted_digraph(16, 240, 1, 10)
g64128 = generate_random_weighted_digraph(64, 128, 1, 10)
g64672 = generate_random_weighted_digraph(64, 672, 1, 10)
g644032 = generate_random_weighted_digraph(64, 4032, 1, 10)
g256512 = generate_random_weighted_digraph(256, 512, 1, 10)
g2568160 = generate_random_weighted_digraph(256, 8160, 1, 10)
g25665280 = generate_random_weighted_digraph(256, 65280, 1, 10)
df = "%.7f"
print('%0s %10s %8s %8s' % ("Algorithm", "Vertices", "Edges", "Time"))
print('—' *43)
print("%0s %5s %10s %15s" % ("Dijkstra ", "16", "32", df % (timeit(lambda:g1632.dijkstra(0), number=10000)/10000)))
print("%0s %6s %10s %15s" % ("Bellman ", "16", "32", df % (timeit(lambda:g1632.bellman_ford(0), number=10000)/10000)))
print("%0s %5s %10s %15s" % ("Dijkstra ", "16", "60", df % (timeit(lambda:g1660.dijkstra(0), number=10000)/10000)))
print("%0s %6s %10s %15s" % ("Bellman ", "16", "60", df % (timeit(lambda:g1660.bellman_ford(0), number=10000)/10000)))
print("%0s %5s %10s %15s" % ("Dijkstra ", "16", "240", df % (timeit(lambda:g16240.dijkstra(0), number=10000)/10000)))
print("%0s %6s %10s %15s" % ("Bellman ", "16", "240", df % (timeit(lambda:g16240.bellman_ford(0), number=10000)/10000)))
print("%0s %5s %10s %15s" % ("Dijkstra ", "64", "128", df % (timeit(lambda:g64128.dijkstra(0), number=10000)/10000)))
print("%0s %6s %10s %15s" % ("Bellman ", "64", "128", df % (timeit(lambda:g64128.bellman_ford(0), number=10000)/10000)))
print("%0s %5s %10s %15s" % ("Dijkstra ", "64", "672", df % (timeit(lambda:g64672.dijkstra(0), number=10000)/10000)))
print("%0s %6s %10s %15s" % ("Bellman ", "64", "672", df % (timeit(lambda:g64672.bellman_ford(0), number=10000)/10000)))
print("%0s %5s %10s %15s" % ("Dijkstra ", "64", "4032", df % (timeit(lambda:g644032.dijkstra(0), number=1000)/1000)))
print("%0s %6s %10s %15s" % ("Bellman ", "64", "4032", df % (timeit(lambda:g644032.bellman_ford(0), number=1000)/1000)))
print("%0s %5s %10s %15s" % ("Dijkstra ", "256", "6512", "%.7f" % (timeit(lambda:g256512.dijkstra(0), number=1000)/1000)))
print("%0s %6s %10s %15s" % ("Bellman ", "256", "6512", "%.7f" % (timeit(lambda:g256512.bellman_ford(0), number=1000)/1000)))
print("%0s %5s %10s %15s" % ("Dijkstra ", "256", "8160", "%.7f" % (timeit(lambda:g2568160.dijkstra(0), number=1000)/1000)))
print("%0s %6s %10s %15s" % ("Bellman ", "256", "8160", "%.7f" % (timeit(lambda:g2568160.bellman_ford(0), number=1000)/1000)))
print("%0s %5s %10s %15s" % ("Dijkstra ", "256", "65280", "%.7f" % (timeit(lambda:g25665280.dijkstra(0), number=2)/2)))
print("%0s %6s %10s %15s" % ("Bellman ", "256", "65280", "%.7f" % (timeit(lambda:g25665280.bellman_ford(0), number=2)/2)))
class Graph :
"""Graph represented with adjacency lists."""
__slots__ = ['_adj']
def __init__(self, v=10, edges=[], weights=[]) :
"""Initializes a graph with a specified number of vertices.
Keyword arguments:
v - number of vertices
edges - any iterable of ordered pairs indicating the edges
weights - (optional) list of weights, same length as edges list
"""
self._adj = [ _AdjacencyList() for i in range(v) ]
i=0
hasWeights = len(edges)==len(weights)
for a, b in edges :
if hasWeights :
self.add_edge(a,b,weights[i])
i = i + 1
else :
self.add_edge(a, b)
def add_edge(self, a, b, w=None) :
"""Adds an edge to the graph.
Keyword arguments:
a - first end point
b - second end point
w - weight for the edge (optional)
"""
self._adj[a].add(b, w)
self._adj[b].add(a, w)
def num_vertices(self) :
"""Gets number of vertices of graph."""
return len(self._adj)
def degree(self, vertex) :
"""Gets degree of specified vertex.
Keyword arguments:
vertex - integer id of vertex
"""
return self._adj[vertex]._size
def bfs(self, s) :
"""Performs a BFS of the graph from a specified starting vertex.
Returns a list of objects, one per vertex, containing the vertex's distance
from s in attribute d, and vertex id of its predecessor in attribute pred.
Keyword arguments:
s - the integer id of the starting vertex.
"""
class VertexData :
__slots__ = [ 'd', 'pred' ]
def __init__(self) :
self.d = math.inf
self.pred = None
vertices = [VertexData() for i in range(len(self._adj))]
vertices[s].d = 0
q = deque([s])
while len(q) > 0 :
u = q.popleft()
for v in self._adj[u] :
if vertices[v].d == math.inf :
vertices[v].d = vertices[u].d + 1
vertices[v].pred = u
q.append(v)
return vertices
def dfs(self) :
"""Performs a DFS of the graph. Returns a list of objects, one per vertex, containing
the vertex's discovery time (d), finish time (f), and predecessor in the depth first forest
produced by the search (pred).
"""
class VertexData :
__slots__ = [ 'd', 'f', 'pred' ]
def __init__(self) :
self.d = 0
self.pred = None
vertices = [VertexData() for i in range(len(self._adj))]
time = 0
def dfs_visit(u) :
nonlocal time
nonlocal vertices
time = time + 1
vertices[u].d = time
for v in self._adj[u] :
if vertices[v].d == 0 :
vertices[v].pred = u
dfs_visit(v)
time = time + 1
vertices[u].f = time
for u in range(len(vertices)) :
if vertices[u].d == 0 :
dfs_visit(u)
return vertices
def print_graph(self, with_weights=False) :
"""Prints the graph."""
for v, vList in enumerate(self._adj) :
print(v, end=" -> ")
if with_weights :
for u, w in vList.__iter__(True) :
print(u, "(" + str(w) + ")", end="\t")
else :
for u in vList :
print(u, end="\t")
print()
def get_edge_list(self, with_weights=False) :
"""Returns a list of the edges of the graph
as a list of tuples. Default is of the form
[ (a, b), (c, d), ... ] where a, b, c, d, etc are
vertex ids. If with_weights is True, the generated
list includes the weights in the following form
[ ((a, b), w1), ((c, d), w2), ... ] where w1, w2, etc
are the edge weights.
Keyword arguments:
with_weights -- True to include weights
"""
edges = []
for v, vList in enumerate(self._adj) :
if with_weights :
for u, w in vList.__iter__(True) :
edges.append(((v,u),w))
else :
for u in vList :
edges.append((v,u))
return edges
def mst_kruskal(self) :
"""Returns the set of edges in some
minimum spanning tree (MST) of the graph,
computed using Kruskal's algorithm.
"""
A = set()
forest = DisjointSets(len(self._adj))
edges = self.get_edge_list(True)
edges.sort(key=lambda x : x[1])
for e, w in edges :
if forest.find_set(e[0]) != forest.find_set(e[1]) :
A.add(e)
#A = A | {e}
forest.union(e[0],e[1])
return A
def mst_prim(self, r=0) :
"""Returns the set of edges in some
minimum spanning tree (MST) of the graph,
computed using Prim's algorithm.
Keyword arguments:
r - vertex id to designate as the root (default is 0).
"""
parent = [ None for x in range(len(self._adj))]
Q = PQ()
Q.add(r, 0)
for u in range(len(self._adj)) :
if u != r :
Q.add(u, math.inf)
while not Q.is_empty() :
u = Q.extract_min()
for v, w in self._adj[u].__iter__(True) :
if Q.contains(v) and w < Q.get_priority(v) :
parent[v] = u
Q.change_priority(v, w)
A = set()
for v, u in enumerate(parent) :
if u != None :
A.add((u,v))
#A = A | {(u,v)}
return A
class Digraph(Graph) :
def __init__(self, v=10, edges=[], weights=[]) :
super(Digraph, self).__init__(v, edges, weights)
def add_edge(self, a, b, w=None) :
self._adj[a].add(b, w)
def bellman_ford(self,s) :
"""Bellman Ford Algorithm for single source shortest path.
Keyword Arguments:
s - The source vertex.
"""
class VertexData:
__slots__ = ['d', 'pred']
def __init__(self):
self.d = math.inf
self.pred = None
vertices = [VertexData() for i in range(len(self._adj))]
vertices[s].d = 0
list_tuples = []
def relax(u, v, w):
if vertices[v].d > vertices[u].d + w:
vertices[v].d = vertices[u].d + w
vertices[v].pred = vertices[u]
for u in range(len(vertices)):
for v, w in self._adj[u].__iter__(True):
relax(u, v, w)
for u in range(len(vertices)):
for v, w in self._adj[u].__iter__(True):
if vertices[v].d > vertices[u].d + w:
return list_tuples
for u in range(len(vertices)):
if vertices[u].pred is not None:
list_tuples.append((u, vertices[u].d, vertices.index(vertices[u].pred)))
else:
list_tuples.append((u, vertices[u].d, None))
return list_tuples
def dijkstra(self,s) :
"""Dijkstra's Algorithm using a binary heap as the PQ.
Keyword Arguments:
s - The source vertex.
"""
class VertexData:
__slots__ = ['d', 'pred']
def __init__(self):
self.d = math.inf
self.pred = None
vertices = [VertexData() for i in range(len(self._adj))]
vertices[s].d = 0
Q = PQ()
S = []
list = []
vertices[s].d = 0
Q.add(s, 0)
for u in range(len(self._adj)):
if u != s:
Q.add(u, math.inf)
while not Q.is_empty():
u = Q.extract_min()
S.append(u)
for v, w in self._adj[u].__iter__(True):
if (vertices[u].d + w) < vertices[v].d:
vertices[v].pred = u
vertices[v].d = (vertices[u].d + w)
for v in S:
list.append((v, vertices[v].d, vertices[v].pred))
return list
class _AdjacencyList :
__slots__ = [ '_first', '_last', '_size']
def __init__(self) :
self._first = self._last = None
self._size = 0
def add(self, node, w=None) :
if self._first == None :
self._first = self._last = _AdjListNode(node, w)
else :
self._last._next = _AdjListNode(node, w)
self._last = self._last._next
self._size = self._size + 1
def __iter__(self, weighted=False):
if weighted :
return _AdjListIterWithWeights(self)
else :
return _AdjListIter(self)
class _AdjListNode :
__slots__ = [ '_next', '_data', '_w' ]
def __init__(self, data, w=None) :
self._next = None
self._data = data
self._w = w
class _AdjListIter :
__slots__ = [ '_next', '_num_calls' ]
def __init__(self, adj_list) :
self._next = adj_list._first
self._num_calls = adj_list._size
def __iter__(self) :
return self
def __next__(self) :
if self._num_calls == 0 :
raise StopIteration
self._num_calls = self._num_calls - 1
data = self._next._data
self._next = self._next._next
return data
class _AdjListIterWithWeights :
__slots__ = [ '_next', '_num_calls' ]
def __init__(self, adj_list) :
self._next = adj_list._first
self._num_calls = adj_list._size
def __iter__(self) :
return self
def __next__(self) :
if self._num_calls == 0 :
raise StopIteration
self._num_calls = self._num_calls - 1
data = self._next._data
w = self._next._w
self._next = self._next._next
return data, w
if __name__ == "__main__" :
# here is where you will implement any code necessary to confirm that your
# methods work correctly.
# Code in this if block will only run if you run this module, and not if you load this module with
# an import for use by another module.
# (4) Call your time_shortest_path_algs() function here to output the results of step 3.
time_shortest_path_algs()
| true
|
ac87a744954656024b8eef031b28a389c1c24a97
|
Python
|
Shah-Shishir/URI-Solutions
|
/1079 - Weighted Averages.py
|
UTF-8
| 158
| 3.59375
| 4
|
[
"Apache-2.0"
] |
permissive
|
tc = int(input())
for pos in range (1,tc+1):
a,b,c = input().split()
a,b,c = [float(a),float(b),float(c)]
print("%0.1f" %((a*2+b*3+c*5)/10))
| true
|
38ca6ac0a5e6bbc2d9697475fba9b629e4f9fe99
|
Python
|
hsyeon4001/algorithm_records
|
/Python/boj/grade3.py
|
UTF-8
| 1,314
| 3.21875
| 3
|
[] |
no_license
|
# 2739번
a = int(input(""))
for i in range(1, 10):
print(a, '*', i, '=', a * i)
# 10950번
len = int(input(""))
for i in range(len):
a, b = map(int, input("").split(" "))
print(a+b)
# 8393번
n = int(input(""))
list = []
for i in range(1, n+1):
list.append(i)
print(sum(list))
# 15552번
import sys
len = int(input())
for i in range(len):
a, b = map(int, sys.stdin.readline().split())
print(a + b)
# 2741번
n = int(input())
for i in range(1, n+1):
print(i)
# 2742번
n = int(input())
for i in range(n, 0, -1):
print(i)
# 11021번
import sys
len = int(input())
for i in range(1, len+1):
a, b = map(int, sys.stdin.readline().split())
print("Case #{}: {}".format(i, a+b))
# 11022번
import sys
len = int(input())
for i in range(1, len+1):
a, b = map(int, sys.stdin.readline().split())
print("Case #{}: {} + {} = {}".format(i, a, b, a+b))
# 2438번
len = int(input())
for i in range(1, len+1):
print("*" * i)
# 2439번
len = int(input())
for i in range(1, len+1):
print(' '*(len - i) + '*'*i)
# 10871번
a = list(map(int, input().split()))
arr = list(map(int, input().split()))
answer = []
for ele in arr:
if a[1] > ele:
answer.append(str(ele))
print(" ".join(answer))
| true
|
20a1ea85261737bebc169f9179f6da7bacd2515f
|
Python
|
atulanandnitt/questionsBank
|
/basicDataStructure/array_list/extra/KthChar_InStr.py
|
UTF-8
| 1,145
| 3.484375
| 3
|
[] |
no_license
|
#https://practice.geeksforgeeks.org/problems/find-k-th-character-in-string/0/?ref=self
def binaryFun(m):
val=""
while m >1:
val += str(m % 2)
#print("val is :",val)
#print("m is ",m)
m=m // 2
val += str(m % 2)
return val
def sol(m,k,n):
bin_m= binaryFun(m)
str1=bin(m)
bin_m=str1[2:]
print(bin_m)
#m=11
print("binary val is :",bin(m)[2:])
print("decimal val is ",int(bin(m),2))
#print("decimal val is ",'1111'.fromBinaryToInt())
#new_bin_m=""
temp=""
for _ in range(n):
for item in bin_m:
if item == '1':
temp +='10'
elif item =='0':
temp +='01'
bin_m = temp
temp=""
#print(bin_m)
return bin_m[k]
print(" {} is mt name","atul")
print("%s %s is my full name" %("Atul"," Anand"))
t=1#int(input())
for _ in range(t):
m,k,n=42,47,8#map(int,input().strip().split())
#m,k,n=32,17,9#map(int,input().strip().split())
#m,k,n=11,6,4#map(int,input().strip().split())
print("sol is ",sol(m,k,n))
| true
|
084d83b9d8c3fc3c6d6493bf19e27f30893802c1
|
Python
|
real-ariful/Bangla-Digit-Recognition
|
/morpht.py
|
UTF-8
| 3,121
| 3.21875
| 3
|
[] |
no_license
|
# Morphological Transformations
#%%
# Erosion is where we will "erode" the edges
#Dilation-does the opposite
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while(1):
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_red = np.array([30,150,50])
upper_red = np.array([255,255,180])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(frame,frame, mask= mask)
kernel = np.ones((5,5),np.uint8)
erosion = cv2.erode(mask,kernel,iterations = 1)
dilation = cv2.dilate(mask,kernel,iterations = 1)
cv2.imshow('Original',frame)
cv2.imshow('Mask',mask)
cv2.imshow('Erosion',erosion)
cv2.imshow('Dilation',dilation)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
cap.release()
#%% Opening and Closing
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while(1):
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
#hav hue a value
lower_red = np.array([30,150,50])
upper_red = np.array([255,255,180])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(frame,frame, mask= mask)
kernel = np.ones((5,5),np.uint8)
erosion = cv2.erode(mask,kernel,iterations = 1)
dilation = cv2.dilate(mask,kernel,iterations = 1)
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
cv2.imshow('Original',frame)
cv2.imshow('Mask',mask)
cv2.imshow('Erosion',erosion)
cv2.imshow('Dilation',dilation)
cv2.imshow('opening',opening)
cv2.imshow('closing',closing)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
cap.release()
#%% tophat and blackhat
#not required but can be used if project requirement
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while(1):
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
#hav hue a value
lower_red = np.array([30,150,50])
upper_red = np.array([255,255,180])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(frame,frame, mask= mask)
kernel = np.ones((5,5),np.uint8)
erosion = cv2.erode(mask,kernel,iterations = 1)
dilation = cv2.dilate(mask,kernel,iterations = 1)
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
cv2.imshow('Original',frame)
cv2.imshow('Mask',mask)
cv2.imshow('Erosion',erosion)
cv2.imshow('Dilation',dilation)
cv2.imshow('opening',opening)
cv2.imshow('closing',closing)
# It is the difference between input image and Opening of the image
#cv2.imshow('Tophat',tophat)
# It is the difference between the closing of the input image and input image.
#cv2.imshow('Blackhat',blackhat)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
cap.release()
| true
|
aba2d55aeb1e2171447b23cfe44d9b6d511eb981
|
Python
|
andrewdonato/cityscape
|
/cityscape_with_classes/cityscape_with_classes.pyde
|
UTF-8
| 5,455
| 2.734375
| 3
|
[] |
no_license
|
import math
streets = []
curbs = []
buildings = []
mapWidth = None
mapHeight = None
mapDepth = None
mapTop = None
mapBottom = None
mapleft = None
mapRight = None
tileWall = 600
desiredStreets = [
(1*tileWall/8, 0*tileWall/8, 0, 1*tileWall/8, 8*tileWall/8, 0),
(0*tileWall/8, 4*tileWall/8, 0, 4*tileWall/8, 4*tileWall/8, 0),
(4*tileWall/8, 4*tileWall/8, 0, 8*tileWall/8, 8*tileWall/8, 0),
(2*tileWall/8, 0*tileWall/8, 0, 2*tileWall/8, 8*tileWall/8, 0),
(3*tileWall/8, 0*tileWall/8, 0, 3*tileWall/8, 8*tileWall/8, 0),
(0*tileWall/8, 5*tileWall/8, 0, 3*tileWall/8, 5*tileWall/8, 0),
(0*tileWall/8, 6*tileWall/8, 0, 6*tileWall/8, 6*tileWall/8, 0),
(0*tileWall/8, 7*tileWall/8, 0, 3*tileWall/8, 7*tileWall/8, 0),
(0*tileWall/8, 2*tileWall/8, 0, 8*tileWall/8, 2*tileWall/8, 0),
(4*tileWall/8, 2*tileWall/8, 0, 8*tileWall/8, 6*tileWall/8, 0),
(4*tileWall/8, 0*tileWall/8, 0, 4*tileWall/8, 2*tileWall/8, 0),
(5*tileWall/8, 2*tileWall/8, 0, 8*tileWall/8, 5*tileWall/8, 0),
(5*tileWall/8, 3*tileWall/8, 0, 8*tileWall/8, 3*tileWall/8, 0),
(6*tileWall/8, 4*tileWall/8, 0, 8*tileWall/8, 4*tileWall/8, 0),
(6*tileWall/8, 2*tileWall/8, 0, 8*tileWall/8, 4*tileWall/8, 0),
(4*tileWall/8, 1*tileWall/8, 0, 1*tileWall/8, 1*tileWall/8, 0),
(5*tileWall/8, 2*tileWall/8, 0, 5*tileWall/8, 0*tileWall/8, 0),
(6*tileWall/8, 2*tileWall/8, 0, 6*tileWall/8, 0*tileWall/8, 0),
(5*tileWall/8, 1*tileWall/8, 0, 8*tileWall/8, 1*tileWall/8, 0),
(5*tileWall/8, 3*tileWall/8, 0, 4*tileWall/8, 4*tileWall/8, 0),]
def setup():
global mapTop, mapBottom, mapleft, mapRight, streets, curbs, buildings
size(601, 601, P3D)
mapWidth = width
mapHeight = height
mapDepth = height
mapTop = None
mapBottom = None
mapleft = None
mapRight = None
for i in range(0, len(desiredStreets)):
streetLine = desiredStreets[i]
createStreet(streetLine[0], streetLine[1], streetLine[2], streetLine[3], streetLine[4], streetLine[5], i, 30, "both", "two-way")
# for street in streets:
# if street.visited == False:
# street.findIntersections()
for street in streets:
street.drawStreet()
# print streets[0].streetWidth
# print streets[0].buildingOrientation
# print streets[0].directionality
# def createStreet(xStart, yStart, zStart, xEnd, yEnd, zEnd, index, streetWidth=30, buildingOrientation="both", directionality="two-way"):
def createStreet(xStart, yStart, zStart, xEnd, yEnd, zEnd, index, streetWidth, buildingOrientation, directionality):
global streets
newStreet = Street(xStart, yStart, zStart, xEnd, yEnd, zEnd, index, streetWidth, buildingOrientation, directionality)
streets.append(newStreet)
class Street:
# def __init__(self, xStart, yStart, zStart, xEnd, yEnd, zEnd, streetCreationIndex, streetWidth, buildingOrientation = "both", directionality = "two-way"):
def __init__(self, xStart, yStart, zStart, xEnd, yEnd, zEnd, streetCreationIndex, streetWidth, buildingOrientation, directionality):
self.xStart = xStart
self.yStart = yStart
self.zStart = zStart
self.xEnd = xEnd
self.yEnd = yEnd
self.zEnd = zEnd
self.streetIndex = streetCreationIndex
self.streetWidth = streetWidth
self.buildingOrientation = buildingOrientation
self.curbs = []
self.intersections = []
self.intersectsWith = []
self.angle = None
self.directionality = directionality
# self.visited = False
self.lookedForIntersections = False
def createCurbs(self):
pass
def connectStreet(self, street):
pass
def getBuildings(self):
pass
def findIntersections(self, streets):
pass
def findCorners(self):
pass
def findAngle(self):
pass
def findVelocity(self):
pass
def drawStreet(self):
line(self.xStart, self.yStart, self.zStart, self.xEnd, self.yEnd, self.zEnd)
class Curb:
def __init__(self, xStart, yStart, zStart, xEnd, yEnd, zEnd, buildingOrientation, curbCreationIndex, streetIndex):
self.xStart = xStart
self.yStart = yStart
self.zStart = zStart
self.xEnd = xEnd
self.yEnd = yEnd
self.zEnd = zEnd
self.buildingOrientation = buildingOrientation
self.curbIndex = curbCreationIndex
self.streetIndex = streetIndex
self.buildings = []
self.angle = None
def createBuildings(self):
pass
def findAngle(self):
pass
class Intersection:
def __init__(self, streets, intersectionCreationIndex):
self.streets = []
self.intersectionIndex = intersectionCreationIndex
self.address = []
# def findAddress():
# pass
class Building:
def __init__(self, xCoordinate, yCoordinate, zCoordinate, xSize, ySize, zSize, buildingCreationIndex, curbIndex, streetIndex):
self.x = xCoordinate
self.y = yCoordinate
self.z = zCoordinate
self.xSize = xSize
self.ySize = ySize
self.zSize = zSize
self.buildingIndex = buildingCreationIndex
self.address = [xCoordinate, yCoordinate, zCoordinate]
self.curbIndex = curbIndex
self.streetIndex = streetIndex
| true
|
2ccd11d88e4d9b1307a9c915a09326e70d141a9c
|
Python
|
snack-boomz/Treehouse-Python-Unit3-OOP-PhraseHunter-Game
|
/phrasehunter/game.py
|
UTF-8
| 5,800
| 3.828125
| 4
|
[] |
no_license
|
import random
from phrasehunter.phrase import Phrase
class Game():
def __init__(self):
self.missed = 0
# -- https://randomwordgenerator.com/ --
self.phrases = [Phrase("hello world"), Phrase("Amazing pie"), Phrase("Black cat"), Phrase("Sparkling water"), Phrase("large chair")]
self.active_phrase = None
self.guesses = []
def start(self):
# Calls the welcome method, creates the game loop, calls the get_guess method, adds the user's guess to guesses,
# increments the number of missed by one if the guess is incorrect, calls the game_over method.
# Upon new game, reset values
self.guesses = []
self.active_phrase = None
self.missed = 0
self.welcome()
random_phrase = self.get_random_phrase()
new_phrase = random_phrase
new_phrase.active_phrase = random_phrase
while True:
new_phrase.display()
if new_phrase.check_complete():
print("\nYou won! You have guessed the phrase!")
# Referenced from Unit 1 Project
loop = 1
while loop:
play_again = input("Would you like to play again? (Y/N): ")
play_again = play_again.upper()
if play_again == 'Y':
return play_again
elif play_again == 'N':
print("Be seeing you!")
quit()
else:
print("Please enter Y or N.")
user_guess = self.get_guess()
self.guesses.append(user_guess)
print("Current guesses: ", self.guesses, "\n")
if new_phrase.check_letter(user_guess) == False:
self.missed += 1
print("\nIncorrect!")
print("Incorrect Guesses: ", self.missed, "\n")
if self.missed == 5:
if self.game_over() == True:
loop = 1
while loop:
play_again = input("Would you like to play again? (Y/N): ")
play_again = play_again.upper()
if play_again == 'Y':
return play_again
elif play_again == 'N':
print("Be seeing you!")
quit()
else:
print("Please enter Y or N.")
def get_random_phrase(self):
# this method randomly retrieves one of the phrases stored in the phrases list and returns it.
return random.choice(self.phrases)
def welcome(self):
print("""
╭━━━╮╱╱╱╭╮╭╮╱╱╱╱╱╱╱╱╭━━━━╮╱╱╱╱╭╮╱╱╱╭╮╱╱╱╱╱╱╱╱╱╱╱╱╱╱╭╮╱╭╮╱╱╱╭╮╱╭━━━╮
┃╭━╮┃╱╱╭╯╰┫┃╱╱╱╱╱╱╱╱┃╭╮╭╮┃╱╱╱╱┃┃╱╱╱┃┃╱╱╱╱╱╱╱╱╱╱╱╱╱╱┃┃╱┃┃╱╱╭╯╰╮┃╭━╮┃
┃╰━╯┣╮╱┣╮╭┫╰━┳━━┳━╮╱╰╯┃┃┣┻━┳━━┫╰━┳━╯┣━━┳━━┳━┳━━┳━━╮┃┃╱┃┣━╮┣╮╭╯╰╯╭╯┃
┃╭━━┫┃╱┃┃┃┃╭╮┃╭╮┃╭╮╮╱╱┃┃┃┃━┫╭━┫╭╮┃╭╮┃┃━┫╭╮┃╭┫┃━┫┃━┫┃┃╱┃┃╭╮╋┫┃╱╭╮╰╮┃
┃┃╱╱┃╰━╯┃╰┫┃┃┃╰╯┃┃┃┃╱╱┃┃┃┃━┫╰━┫┃┃┃╰╯┃┃━┫╰╯┃┃┃┃━┫┃━┫┃╰━╯┃┃┃┃┃╰╮┃╰━╯┃
╰╯╱╱╰━╮╭┻━┻╯╰┻━━┻╯╰╯╱╱╰╯╰━━┻━━┻╯╰┻━━┻━━┻━╮┣╯╰━━┻━━╯╰━━━┻╯╰┻┻━╯╰━━━╯
╱╱╱╱╭━╯┃╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╭━╯┃
╱╱╱╱╰━━╯╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╱╰━━╯
""", "\n")
print("== Welcome to Phrase Hunters. ==\n")
def get_guess(self):
guessing = True
while guessing:
# referenced from Unit 1 Project
try:
user_guess = input("\n\nGuess a letter: ").lower()
if len(user_guess) > 1:
raise ValueError("Your guess guess is longer than a single letter. Please enter a single letter.")
elif user_guess.isalpha() == False:
raise ValueError("Your guess is not a string/character. Please enter a string character.")
for character in self.guesses:
if character == user_guess:
raise ValueError("You've already guessed this character, try another character.")
# raise
# except
except ValueError as err:
print("That guess is not valid. Please enter a valid guess.")
print("Error: {}".format(err))
else:
return user_guess
def game_over(self):
print("Game over. You have failed humanity.")
return True
| true
|
201a551e189a1a911a0a1b00f18bbb5b755b56a8
|
Python
|
Anvi2520/Python-programming-Lab
|
/factr.py
|
UTF-8
| 183
| 3.859375
| 4
|
[] |
no_license
|
def fact(x):
if x==1:
return 1
else:
return x*fact(x-1)
n = int(input("Enter the number-"))
f=fact(n);
print("The factorial of the given number:\n",f)
| true
|
f2640b96fab090bda53ff61bdc47fd21845c5d57
|
Python
|
talitz/digital-humanities-course-assignments
|
/Q3/main.py
|
UTF-8
| 4,593
| 2.75
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###### global imports ######
import re
from lxml import etree
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import Element, SubElement, Comment, tostring
import datetime
from xml.etree import ElementTree
###### classes ######
class TaggedWords(object):
"""docstring for words"""
def __init__(self, word , ner):
self.word = word
self.ner = ner
class BiblElement(object):
"""docstring for words"""
def __init__(self, str):
help = str.split('. ')
self.firstname = help[0].split(', ')[1].decode('utf-8')
self.surename = help[0].split(', ')[0].decode('utf-8')
self.title = help[1].decode('utf-8')
self.publisher = help[2].decode('utf-8')
self.date = help[3].decode('utf-8')
self.pages = help[4].decode('utf-8')
###### functions ######
def prettify():
from bs4 import BeautifulSoup
x = open("Q4.XML","rb")
print(BeautifulSoup(x, "xml").prettify())
def split_file_to_tagged_sentences(filename):
file = open(filename, 'rb')
seperated_text = file.read().split('\n')
ret = [[]]
index = 0
for line in seperated_text:
if len(line) == 0:
index = index +1
ret.append([])
else:
ret[index].append(line)
return [x for x in ret if len(x) != 0]
def split_to_words(string):
return string.split(' ')
def get_ner_only(elm):
return elm[-1].replace('\t','').replace('null','')
def get_place_in_setence_only(elm):
return elm[0].replace('\t','').replace('null','')
def get_word_only(elm):
return elm[3].replace('\t','').replace('null','')
def ner_tag_to_tei_tag(elm):
ners = ['I_LOC','I_PERS','I_ORG','I_DATE']
tei = ['placeName','placeName','orgName','date']
return tei[ners.index(elm)]
def get_dic_of_word_and_ner(list):
dic = {}
for x in list:
if x.ner and x.ner[:2] != 'I_':
continue;
if x.word in dic and x.ner[:2] == 'I_':
dic[x.word].append(x.ner)
elif x.ner[:2] == 'I_':
dic[x.word] = [x.ner]
return dic
def seperate_file_to_paragraph(filename):
file = open(filename,"rb")
text = file.read().split('\n')
return [p for p in text if len(p) > 3]
def get_bibl_of_lex(lex):
return lex[-2].split(';')
def get_all_brackets(string):
return re.findall(r'"(.*?)"',string)
def tag_word(word,tag):
return '<' + tag + '> ' + word + ' </' + tag + '>'
def create_paragraph(string,dic):
xml_string = '<p> ' + string + ' </p>'
ret_string = ''
for word in xml_string.split(' '):
if word.replace('.','').replace(',', '').replace('?', '') in dic:
ret_string = ret_string + tag_word(word,ner_tag_to_tei_tag(dic[word.replace('.','').replace(',','')][0])) + ' '
else:
ret_string = ret_string + word + ' '
root = etree.fromstring(ret_string)
ET.ElementTree(root).write("Q3.XML",encoding="UTF-8",xml_declaration=True)
tree = etree.parse("Q4.XML")
tree.write("Q4.XML", pretty_print=True, encoding='utf-8')
def create_xml_bibl_element(bibl):
top2 = Element('xml')
top = Element('bibl')
persName = SubElement(top, 'persName')
forename = SubElement(persName, 'forename')
surename = SubElement(persName, 'surename')
forename.text = bibl.firstname
surename.text = bibl.surename
title = SubElement(top, 'title')
title.text = bibl.title
publisher = SubElement(top, 'publisher')
publisher.text = bibl.publisher
date = SubElement(top, 'date')
date.text = bibl.date
biblScope = SubElement(top, 'biblScope')
biblScope.set('unit','page')
biblScope.text = bibl.pages.replace('.','')
top2.append(top)
ET.ElementTree(top2).write("Q3.XML",encoding="UTF-8",xml_declaration=True)
tree = etree.parse("Q3.XML")
tree.write("Q3.XML", pretty_print=True, encoding='utf-8')
def get_gender(filename):
string = open(filename,'rb').read()
if 'נולד ' in string:
return 'male'
elif 'נולדה ' in string:
return 'female'
else:
return 'unknown'
#print get_gender('lex2.txt')
###### main ######
"""
list_of_tuples = []
tagged_sentences = split_file_to_tagged_sentences('output.txt')
for sentence in tagged_sentences:
for parsed_word in sentence:
list_of_tuples.append(TaggedWords(get_word_only(split_to_words(parsed_word)) , get_ner_only(split_to_words(parsed_word))))
dic = get_dic_of_word_and_ner(list_of_tuples)
lex = seperate_file_to_paragraph("lex2.txt")
create_paragraph(lex[4],dic)
"""
#for x in get_bibl_of_lex(lex):
# create_xml_bibl_element(BiblElement(x))
"""
for word in lex[5].split(' '):
print word
if word.replace('.','').replace(',', '').replace('?', '') in dic:
print dic[word.replace('.','').replace(',','')] , word
"""
"""
print "###"
for x in lex:
for z in get_all_brackets(x):
print z
"""
"""
"""
"""
"""
| true
|
570c56df3601ab353abe9b2094212ce08ba8beab
|
Python
|
sandance/Algorithms
|
/Graphs/walking.py
|
UTF-8
| 395
| 3.125
| 3
|
[] |
no_license
|
# Walk Function will traverse a single connected component( assuming the graph is connected)
# To Find all the components , you need to wrap it in over the nodes
#
#
def walk(G,s,S=set()): # Walk the Graph from node s
P,Q = dict(), set()
P[s] = None
Q.add(s)
while Q:
u=Q.pop() #Pick one, arbitarily
for v in G[u].difference(P,S): # new nodes
Q.add(v)
P[v]=u
return P
| true
|
ebce54f9544768a1cd5b60c96e3ffabb8a3a4c52
|
Python
|
sofiazenzola/Python-INFO1-CE9990
|
/NYC_Water_Consumption.py
|
UTF-8
| 1,050
| 3.734375
| 4
|
[] |
no_license
|
"""
NycWaterConsumption.py
Reads csv from NYC Open Data URL
Puts fields in a list of strings
Outputs the water consumption per capita (gallons per person per day) for a given year
"""
import sys
import csv
import urllib.request
year=input("Select a year from 1979-2016: ")
url = "https://data.cityofnewyork.us/api/views/ia2d-e54m/rows.csv" \
"?accessType=DOWNLOAD"
try:
lines = urllib.request.urlopen(url)
except urllib.error.URLError as error:
print("urllib.error.URLError", error)
sys.exit(1)
hopLines = []
for line in lines:
try:
s = line.decode("utf-8")
except UnicodeError as unicodeError:
print(unicodeError)
sys.exit(1)
r = csv.reader([s]) #[s] is a list containing one string
fields = next(r) #fields is a list of strings
if fields[0] == year:
hopLines.append(fields)
lines.close()
for line in hopLines:
print("In", line[0], "the water consumputer per capita was", line[3], "gallons per person per day")
sys.exit(0)
| true
|
fb50223586a5e6fd1738ff17590431323eb08005
|
Python
|
FeminaAnsar/luminarpython
|
/datastrctr/queue.py
|
UTF-8
| 883
| 4.0625
| 4
|
[] |
no_license
|
size=int(input("Enter the size : "))
queue=[]
rear=0
front=0
n=1
def insertion():
global rear
global front
if rear<size:
item=int(input("Enter the element : "))
queue.insert(rear,item)
rear+=1
else:
print(rear)
print("Queue is full...!!!")
def deletion():
global rear
global front
#rear-=1
if(rear==front):
print("Queue is empty..!!")
else:
print(queue[front],"Deleted...!!")
front+=1
def display():
for i in range(front,rear):
print(queue[i])
while(n!=0):
option=int(input("Enter the Task : 1)INSERT 2)DELETE 3)DISPLAY "))
if(option==1):
insertion()
elif(option==2):
deletion()
elif(option==3):
display()
else:
print("Invalid option entered...!!!!")
n=int(input("EXIT==press'0' CONTINUE==press any key"))
| true
|
86271b342c796035e040806d66ae130f036ba33c
|
Python
|
eishagoel15/GettingStartedAWS-2021
|
/myLambdaDemo/app.py
|
UTF-8
| 840
| 2.5625
| 3
|
[] |
no_license
|
import tempfile
import boto3
from PIL import Image
from chalice import Chalice
app = Chalice(app_name='chalice_image_thumbnails-2', debug=True)
s3_client = boto3.client('s3')
input_bucket = "demo-bucket-input-2021"
output_bucket = "demo-bucket-output-2021"
@app.on_s3_event(bucket=input_bucket)
def resize_image(event):
app.log.debug(f'Resizing the image from s3://{event.bucket}/{event.key}')
with tempfile.NamedTemporaryFile('w') as f:
s3_client.download_file(event.bucket, event.key, f.name)
resized_file = f.name + '.thumbnail.jpg'
with Image.open(f.name) as image:
image.thumbnail((256,256))
image.save(resized_file)
s3_client.upload_file(
Filename=resized_file,
Bucket=output_bucket,
Key=resized_file.rsplit("/", 1)[-1]
)
| true
|
627a281f65eaa02fd071340367a27da649867c55
|
Python
|
wsfjonah/data_science
|
/test/reporting_test/service/echarts/test_heatmap.py
|
UTF-8
| 629
| 2.65625
| 3
|
[] |
no_license
|
import random
from example.commons import Faker
from pyecharts import options as opts
from pyecharts.charts import HeatMap
def heatmap_base() -> HeatMap:
value = [[i, j, random.randint(0, 50)] for i in range(24) for j in range(7)]
print(Faker.clock)
print(Faker.week)
print(value)
c = (
HeatMap()
.add_xaxis(Faker.clock)
.add_yaxis("series", Faker.week, value)
.set_global_opts(title_opts=opts.TitleOpts(title="HeatMap-基本示例"), visualmap_opts=opts.VisualMapOpts(), )
)
return c
if __name__ == "__main__":
heatmap_base().render("heatMap.html")
| true
|
2ea732b1eeb0afbb231ab7f32ff024c08a105b98
|
Python
|
shwetgarg/algorithms
|
/Trees/tree.py
|
UTF-8
| 1,972
| 3.34375
| 3
|
[] |
no_license
|
import sys
from collections import deque
class Tree:
def __init__(self, v, l=None, r=None):
self.v = v
self.l = l
self.r = r
def print_inorder_traversal(self):
if self is None:
return
if self.l is not None:
self.l.print_inorder_traversal()
print self.v
if self.r is not None:
self.r.print_inorder_traversal()
def get_inorder_traversal(self):
if not self:
return
if self.l:
for node in self.l.get_inorder_traversal():
yield node
yield self
if self.r:
for node in self.r.get_inorder_traversal():
yield node
def get_preorder_traversal(self):
if not self:
return
yield self
if self.l:
for node in self.l.get_preorder_traversal():
yield node
if self.r:
for node in self.r.get_preorder_traversal():
yield node
def get_level_order_traversal(self):
if not self:
return
level = 0
this_level_queue = deque([self])
while this_level_queue:
next_level_queue = []
level += 1
yield level
for node in this_level_queue:
yield node
if node.l:
next_level_queue.append(node.l)
if node.r:
next_level_queue.append(node.r)
this_level_queue = next_level_queue
def copy_tree(self):
if self is None:
return
left = self.l.copy_tree() if (self.l is not None) else None
right = self.r.copy_tree() if (self.r is not None) else None
return Tree(self.v, left, right)
| true
|
eed167750babc9c50cf99a1f852fa827caeea8ac
|
Python
|
sharat7j/python-project
|
/findSumzero.py
|
UTF-8
| 302
| 3.28125
| 3
|
[] |
no_license
|
def find_zero_sum(arr):
H = {}
sum = 0
for i in xrange(len(arr)):
sum += arr[i]
if sum in H.keys():
print "Find the first sub-array with zero sum:"
print arr[H[sum]+1:i+1]
return True
else:
H[sum] = i
return False
| true
|
7d5a93c52fabf4f246fa9676bb76e55a350afe78
|
Python
|
mahespunshi/maheshpunshi
|
/linear serach.py
|
UTF-8
| 734
| 4.46875
| 4
|
[] |
no_license
|
# search a value from list, binary search is faster than linear search
pos = -1
def search():
# i = 0
# while i < len(list):
# if list[i] == n:
# globals()['pos'] = i
# return True;
# i = i + 1
# return False;
# above example is a while loop, let's do in for loop, we don't increment in for loop
# for i in list:
# if i == n:
# globals()['pos'] = i
# return True
# another method in for loop
for i in range(len(a)):
if a[i] == n:
globals()['pos'] = i
return True
return False
a = [5,8,4,6,9,2]
# search any number
n = 8
if search():
print("Found at", pos + 1)
else:
print("Not Found")
| true
|
1f5296a74ae813492a98fc70321b8fa576a3f529
|
Python
|
NoJeong/TID
|
/doit_python/02. 기초문법/19단_곱셈표.py
|
UTF-8
| 84
| 3.6875
| 4
|
[] |
no_license
|
for i in range(1,20):
for j in range(1,20):
print(f'{i} x {j} = {j*i}')
| true
|
d86e3b799515707ad3a38b7f57ed855d9c822dd5
|
Python
|
bravoo84/AI_Workshop
|
/trialPy.py
|
UTF-8
| 117
| 3.96875
| 4
|
[] |
no_license
|
var=input("Enter a string:")
for letter in var:
if(letter==''):
continue
else:
print(letter)
| true
|
5a148b51a3e9f93a92131176da93be1452b81e87
|
Python
|
steven85048/Networks_Client_Server
|
/messaging_system/client/client_setup.py
|
UTF-8
| 3,176
| 2.609375
| 3
|
[] |
no_license
|
import socket
import sys
import threading
import traceback
from messaging_system.client.config import client_config
from messaging_system.client.input_handler import InputHandler
from messaging_system.client.response_handler import ResponseHandler
from messaging_system.client.state_transition_manager import StateTransitionManager
from messaging_system.client.exceptions import MalformedRequestException, MalformedUserInputException
import messaging_system.socket_holder
class ClientSetup:
def __init__(self):
self.udp_ip = client_config['SERVER_IP_ADDR']
self.port = client_config['UDP_PORT']
self.state_transition_manager = StateTransitionManager()
self.input_handler = InputHandler(self.state_transition_manager)
self.response_handler = ResponseHandler(self.state_transition_manager)
self.sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
self.sock.settimeout(client_config['SOCKET_TIMEOUT'])
self.sock.bind((self.udp_ip, self.port))
messaging_system.socket_holder.socket = self.sock
# Starts two threads:
# (a) User_input_thread continually listens for user input and handles
# (b) Server_response_thread continually listens for messages from server and handles
def start_client(self):
self.user_input_thread = threading.Thread(name = 'user_input_thread', target = self._user_input_thread, args = ( ) )
self.user_input_thread.start()
self.server_response_thread = threading.Thread(name = 'server_response_thread', target = self._server_response_thread, args = ( ) )
self.server_response_thread.start()
self.user_input_thread.join()
self.server_response_thread.join()
def _user_input_thread(self):
while( True ):
try:
exc_info = None
user_input = input()
self.input_handler.handle_input(user_input)
except MalformedUserInputException as err:
print("Malformed input: {}".format(str(err)))
print("Enter another input")
except MalformedRequestException as err:
print("Error in processing request: {}".format(str(err)))
self.state_transition_manager.reset()
except Exception as err:
exc_info = sys.exc_info()
finally:
if( not exc_info is None ):
traceback.print_exception(*exc_info)
def _server_response_thread(self):
while( True ):
try:
exc_info = None
data, addr = self.sock.recvfrom(client_config['BUFFER_MAX_SIZE'])
self.response_handler.handle_response(data)
except MalformedRequestException as err:
print("Error in processing request: {}".format(str(err)))
self.state_transition_manager.reset()
except Exception as err:
exc_info = sys.exc_info()
finally:
if( not exc_info is None ):
traceback.print_exception(*exc_info)
| true
|
7086248555aeb4bf66ab6b7ee0c4ab77c761086a
|
Python
|
ZoranPandovski/al-go-rithms
|
/games/Python/Pong Game/scoreboard.py
|
UTF-8
| 690
| 3.484375
| 3
|
[
"CC0-1.0"
] |
permissive
|
from turtle import Turtle
class ScoreBoard(Turtle):
def __init__(self):
super().__init__()
self.penup()
self.color("white")
self.hideturtle()
self.l_score=0
self.r_score=0
self.update_scoreboard()
def update_scoreboard(self):
self.clear()
self.goto(-90,200)
self.write(self.l_score,align="center",font=("Courier",70,"normal"))
self.goto(90,200)
self.write(self.r_score,align="center",font=("Courier",70,"normal"))
def l_point(self):
self.l_score += 1
self.update_scoreboard()
def r_point(self):
self.r_score += 1
self.update_scoreboard()
| true
|
1696a0dc00f8511341c8cd17ceb100e7e08e7859
|
Python
|
neerajrp1999/library-Management
|
/BookDetails.py
|
UTF-8
| 5,379
| 2.625
| 3
|
[] |
no_license
|
import pyodbc
from tkinter import *
import tkinter
from tkinter import messagebox
import HomePage
cnxn = pyodbc.connect("Driver={SQL Server};"
"Server=.\;"
"Database=tester;"
"Trusted_Connection=yes;")
cursor = cnxn.cursor()
def ClearList(Bookid,Bookname,Authername,NoOfBookAvilable,NoOfBook):
Bookid.delete(0, tkinter.END)
Bookname.delete(0, tkinter.END)
Authername.delete(0, tkinter.END)
NoOfBookAvilable.delete(0, tkinter.END)
NoOfBook.delete(0, tkinter.END)
def Page(r):
r.destroy()
root=Tk()
root.title("Book Details")
root.geometry("650x550")
Label(root, text="Book Details",fg='red', font=("Helvetica", 16)).pack()
Label(root, text="",fg='red', font=("Helvetica", 16)).pack()
ra = PanedWindow()
var = IntVar()
R1 = Radiobutton(ra, text="Book ID", variable=var, value=1)
R2 = Radiobutton(ra, text="Book Name", variable=var, value=2)
R3 = Radiobutton(ra, text="Auther Name", variable=var, value=3)
search_entry = Entry(ra)
b1=Button(ra, text="Search", height=1,command=lambda:Search_query(var,search_entry))
b2=Button(ra, text="Refresh", height=1,command=lambda:AllData())
b3=Button(ra, text="GO Back", height=1,command=lambda:GoBack(root))
ra.add(R1)
ra.add(R2)
ra.add(R3)
ra.add(search_entry)
ra.add(b1)
ra.add(b2)
ra.add(b3)
ra.pack()
var.set(1)
head = PanedWindow()
l1=Label(head,text="Book ID\t\t ",fg='blue')
l2=Label(head,text=" Book Name \t",fg='blue')
l3=Label(head,text=" Auther Name\t",fg='blue')
l4=Label(head,text="\tNumber Of Books\t",fg='blue')
l5=Label(head,text="Number Of Books Avilable",fg='blue')
head.add(l1)
head.add(l2)
head.add(l3)
head.add(l4)
head.add(l5)
head.pack()
m1 = PanedWindow()
scrollbar = Scrollbar(root)
Bookid = Listbox(m1)
Bookname = Listbox(m1)
Authername = Listbox(m1)
NoOfBook = Listbox(m1)
NoOfBookAvilable = Listbox(m1)
def AllData():
ClearList(Bookid,Bookname,Authername,NoOfBookAvilable,NoOfBook)
cursor.execute('SELECT * FROM BookDetail ORDER BY BookId DESC')
for row in cursor:
Bookid.insert(0, row[0])
Bookname.insert(0, str(row[1]).strip())
Authername.insert(0, str(row[4]).strip())
NoOfBook.insert(0, row[2])
NoOfBookAvilable.insert(0, row[3])
def yview(*args):
Bookid.yview(*args)
Bookname.yview(*args)
Authername.yview(*args)
NoOfBook.yview(*args)
NoOfBookAvilable.yview(*args)
try:
AllData()
except:
print("Database connection problem!!!")
Bookid.config(yscrollcommand = scrollbar.set)
Bookname.config(yscrollcommand = scrollbar.set)
Authername.config(yscrollcommand = scrollbar.set)
NoOfBook.config(yscrollcommand = scrollbar.set)
NoOfBookAvilable.config(yscrollcommand = scrollbar.set)
m1.add(Bookid)
m1.add(Bookname)
m1.add(Authername)
m1.add(NoOfBook)
m1.add(NoOfBookAvilable)
scrollbar.config(command =yview)
scrollbar.pack(side=RIGHT, fill=Y)
m1.pack(side=LEFT, fill=Y)
def Search_query(var,search_entry):
varNo=var.get()
if(len(search_entry.get())==0):
messagebox.showerror("Error", "Enter something first")
return
if(varNo==1):
try:
toSearch=int(search_entry.get())
except:
messagebox.showerror("Error", "Enter number only")
return
ClearList(Bookid,Bookname,Authername,NoOfBookAvilable,NoOfBook)
cursor.execute("SELECT * FROM BookDetail WHERE BookId= ? ORDER BY BookId DESC",(toSearch))
for row in cursor:
Bookid.insert(0, row[0])
Bookname.insert(0, str(row[1]).strip())
Authername.insert(0, str(row[4]).strip())
NoOfBook.insert(0, row[2])
NoOfBookAvilable.insert(0, row[3])
if(varNo==2):
toSearch='%'+search_entry.get()+'%'
ClearList(Bookid,Bookname,Authername,NoOfBookAvilable,NoOfBook)
cursor.execute("SELECT * FROM BookDetail WHERE BookName LIKE '%s' ORDER BY BookId DESC"%toSearch)
for row in cursor:
Bookid.insert(0, row[0])
Bookname.insert(0, str(row[1]).strip())
Authername.insert(0, str(row[4]).strip())
NoOfBook.insert(0, row[2])
NoOfBookAvilable.insert(0, row[3])
if(varNo==3):
toSearch='%'+search_entry.get()+'%'
ClearList(Bookid,Bookname,Authername,NoOfBookAvilable,NoOfBook)
cursor.execute("SELECT * FROM BookDetail WHERE AuthorName LIKE '%s' ORDER BY BookId DESC"%toSearch)
print(toSearch)
for row in cursor:
Bookid.insert(0, row[0])
Bookname.insert(0, str(row[1]).strip())
Authername.insert(0, str(row[4]).strip())
NoOfBook.insert(0, row[2])
NoOfBookAvilable.insert(0, row[3])
root.mainloop()
def GoBack(root):
HomePage.Page(root)
| true
|
478c19b2bc96038e528fff2c4727fd3be00034ff
|
Python
|
PatrickMugayaJoel/SendIt
|
/app/utilities/utils.py
|
UTF-8
| 176
| 2.96875
| 3
|
[] |
no_license
|
def serialize(objt):
return objt.__dict__
def serialize_list(mylist):
listtwo = []
for item in mylist:
listtwo.append(serialize(item))
return listtwo
| true
|
b2a82713f65464456ccee312b7150fbaa6bc7c99
|
Python
|
ido10en/http_server_finaly
|
/http_server_shell.py
|
UTF-8
| 7,240
| 2.75
| 3
|
[] |
no_license
|
import socket
import os
# constants
IP = '127.0.0.1'
PORT = 8090
SOCKET_TIMEOUT = 5.0 # the time the server waits before raising error if there is no Get
#Get data from file
def get_file_data(file_root):
file_data = open(file_root, 'rb')
data = file_data.read()
file_data.close()
return data
def create_file(file_name, client_socket):
"""create a file using the data recieved from client
Args:
file_name (str)
client_socket ([type]): the connection with the client
"""
file_path = "webroot\\imgs\\" + file_name
with open(file_path, 'wb') as image:
data = 4096 * 'i'
print("file opened")
#run until reading all of the data
while len(data) == 4096:
print("recieving data...")
data = client_socket.recv(4096)
image.write(data)
print("file recieved")
#Get content type
def get_content_type(type):
contents = {"html": "text/html; charset=utf-8",
"txt": "text/html; charset=utf-8",
"jpg": "image/jpeg",
"js": "text/javascript; charset=UTF-8",
"css": "text/css",
"ico": "image/x-icon",
"gif": "image/gif"}
return contents[type]
#Check the required resource, generate proper HTTP response and send to client
def handle_client_request(file_name, client_socket):
http_header = "HTTP/1.1"
data = ""
if "image?" in file_name:
image_name = file_name.split('=')[1]
url = "webroot\\imgs\\" + image_name + '.jpg'
with open(url, 'rb') as hamutzi:
data = hamutzi.read()
status = " 200 OK\r\nContent-Length: " + str(os.path.getsize(url)) + "\r\nContent-Type: " + "image/jpeg" + "\r\n\r\n"
#if one of the functions was called
elif "calculate-next?" in file_name:
num_sent = int(file_name.split('=')[-1])
num_to_send = num_sent + 1
status = " 200 OK\r\nContent-Length: " + str(len(str(num_to_send))) + "\r\nContent-Type: text/plain\r\n\r\n" + str(num_to_send)
elif "calculate-area?" in file_name:
parts = file_name.split('=')
width = int(parts[-1])
length = int(parts[-2].split('&')[0])
area = (length * width) / 2
status = " 200 OK\r\nContent-Length: " + str(len(str(area))) + "\r\nContent-Type: text/plain\r\n\r\n" + str(area)
else:
directory = 'webroot\\'
DEFAULT_URL = directory + "index.html"
if file_name == '': # in case no specific file was requested
url = DEFAULT_URL
file_name = 'index.html'
else:
url = directory + file_name # generating the url
if os.path.isfile(url):# if the requested file exists
print(url)
response_dict = {"index.html": "ok",
"css\\doremon.css": "ok",
"js\\box.js": "ok",
"js\\jquery.min.js": "ok",
"js\\submit.js": "ok",
"imgs\\abstract.jpg": "ok",
"imgs\\favicon.ico": "ok",
"imgs\\loading.gif": "ok",
"imgs\\1546435417505.jpg" : "ok",
"ido.html": "forbidden",
"index1.html": "moved"}
if response_dict[file_name] == "ok":
file_type = url.split(".")[-1]
status = " 200 OK\r\nContent-Length: " + str(os.path.getsize(url)) + "\r\nContent-Type: " + get_content_type(file_type) + "\r\n\r\n"
data = get_file_data(url)
print(data)
elif response_dict[file_name] == "forbidden":
status = " 403 Forbidden"
elif response_dict[file_name] == "moved":
status = " 302 Temprarily Moved\r\nLocation: index.html"
else:# if the request was not understood
status = " 500 Internal Server Error"
http_header += status
print(http_header)
print("the file name is " + file_name)
# generating the response properly
if not isinstance(data, bytes):
data = data.encode()
http_header = http_header.encode()
http_response = http_header + data
print(http_response)
client_socket.send(http_response)
#saving an image the client sent and generating propet HTTP response
def handle_post_request(request, client_socket):
str_request = str(request)
#if the client sent post request
if "upload?" in str_request:
image_name = str_request.split(' ')[1].split("file-name=")[1]
print("the image name is: " + image_name)
create_file(image_name, client_socket)
status = " 200 OK\r\nContent-Length: " + "20" + "\r\nContent-Type: text/plain\r\n\r\n" + "successfuly uploaded"
client_socket.send(("HTTP/1.1" + status).encode())
#Check if request is a valid HTTP request and returns TRUE / FALSE and the requested file name
def validate_http_request(request):
web_root = request.decode().split(r'\r\n')[0].split(' ')[1][1:]# take the requested file name
if '/' in web_root:
web_root = web_root.replace('/', '\\')
decoded_request = str(request.decode())# it been received binary
#ensure the request is valid
if decoded_request[0:3] != 'GET':
return False, web_root
if decoded_request[3] != ' ':
return False, web_root
if "HTTP/1.1" not in decoded_request:
return False, web_root
return True, web_root
# checks if the request is a valid post request and return true or false
def validate_post_request(request):
if request[0:4].decode() == 'POST':
return True
return False
#Handles client requests: verifies client's requests are legal HTTP, calls function to handle the requests
def handle_client(client_socket):
print('Client connected')
client_request = client_socket.recv(4096)# receiving the data the client sent
print(client_request)
valid_post = validate_post_request(client_request)
if valid_post:
print('Got a valid POST request')
handle_post_request(client_request, client_socket)
else:
valid_http, file_name = validate_http_request(client_request)# valid the request
if valid_http:# the request is valid
print('Got a valid HTTP request')
handle_client_request(file_name, client_socket)
else:
print('Error: Not a valid request')
print('Closing connection')
client_socket.close()
#main function - Open a socket and loop forever while waiting for clients
def main():
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)# defining the socket
server_socket.bind((IP, PORT))# setting the current ip and port
server_socket.listen()# the time it listen to client until closing the socket
print("Listening for connections on port %d" % PORT)
while True:
client_socket, client_address = server_socket.accept()# accept the request
print('New connection received')
# client_socket.settimeout(SOCKET_TIMEOUT)# defining the time until the socket will be shut down
handle_client(client_socket)
if __name__ == "__main__":
# Call the main handler function
main()
| true
|
2796b8183e65f9f9693d1cb5121571f95cec187b
|
Python
|
luigirizzo/netmap
|
/extra/python/pktman.py
|
UTF-8
| 9,231
| 2.703125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env python
#
# Packet generator written in Python, providing functionalities
# similar to the netmap pkt-gen written in C
#
# Author: Vincenzo Maffione
#
import netmap # our module
import time # time measurements
import select # poll()
import argparse # program argument parsing
import multiprocessing # thread management
import re
# import scapy suppressing the initial WARNING message
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import Ether, IP, UDP # packet forgery
def help_quit(parser):
print("")
parser.print_help()
quit()
def build_packet(args, parser):
src = args.src.split(':')
dst = args.dst.split(':')
# create the payload
base = "Hello from Python"
header_len = 14 + 20 + 8
data = base * ((args.length-header_len)/len(base) + 1)
data = data[0:args.length-header_len]
scap = Ether(src = args.srcmac, dst = args.dstmac)
scap = scap / IP(src = src[0], dst = dst[0])
scap = scap / UDP(sport = int(src[1]), dport = int(dst[1]))
scap = scap / data
try:
# checksum is computed when calling str(scap), e.g. when the packet is
# assembled
ret = str(scap)
except:
print("Packet parameters are invalid\n")
help_quit(parser)
if args.dump:
scap.show2()
return ret
def transmit(idx, ifname, args, parser, queue):
# use nm_open() to open the netmap device and register an interface
# using an extended interface name
nmd = netmap.NetmapDesc(ifname)
time.sleep(args.wait_link)
# build the packet that will be transmitted
pkt = build_packet(args, parser)
# fill in the netmap slots and netmap buffers for tx ring 0
txr = nmd.transmit_rings[idx]
num_slots = txr.num_slots
for i in range(num_slots):
txr.slots[i].buf[0:len(pkt)] = pkt
txr.slots[i].len = len(pkt)
# transmit at maximum speed until Ctr-C is pressed
cnt = 0 # packet counter
batch = args.batch
poller = select.poll()
poller.register(nmd.getfd(), select.POLLOUT)
t_start = time.time()
try:
cur = txr.cur
while 1:
ready_list = poller.poll(2)
if len(ready_list) == 0:
print("Timeout occurred")
break;
n = txr.tail - cur # avail
if n < 0:
n += num_slots
if n > batch:
n = batch
cur += n
if cur >= num_slots:
cur -= num_slots
txr.cur = txr.head = cur # lazy update txr.cur and txr.head
nmd.txsync()
cnt += n
except KeyboardInterrupt:
# report the result to the main process
queue.put([cnt, time.time() - t_start])
pass
def receive(idx, ifname, args, parser, queue):
# use nm_open() to open the netmap device and register an interface
# using an extended interface name
nmd = netmap.NetmapDesc(ifname)
time.sleep(args.wait_link)
# select the right ring
rxr = nmd.receive_rings[idx]
num_slots = rxr.num_slots
cnt = 0 # packet counter
poller = select.poll()
poller.register(nmd.getfd(), select.POLLIN)
# wait for the first packet
try:
poller.poll()
except KeyboardInterrupt:
# report the result to the main process
queue.put([cnt, None])
return
# receive (throwing away everything) until Ctr-C is pressed
t_start = time.time()
try:
cur = rxr.cur
while 1:
ready_list = poller.poll()
if len(ready_list) == 0:
print("Timeout occurred")
break;
n = rxr.tail - cur # avail
if n < 0:
n += num_slots
cur += n
if cur >= num_slots:
cur -= num_slots
rxr.cur = rxr.head = cur # lazy update rxr.cur and rxr.head
cnt += n
except KeyboardInterrupt:
# report the result to the main process
queue.put([cnt, time.time() - t_start])
pass
############################## MAIN ###########################
if __name__ == '__main__':
# functions implemented by this program
handler = dict();
handler['tx'] = transmit
handler['rx'] = receive
# program arguments
parser = argparse.ArgumentParser(description = 'Send or receive packets using the netmap API')
parser.add_argument('-i', '--interface', help = 'the interface to register with netmap; '
'can be in the form netmap:<OSNAME>[<EXT>] or <VALENAME>[<EXT>], where '
'OSNAME is the O.S. name for a network interface (e.g. "eth0"), '
'<VALENAME> is a valid VALE port name (e.g. "vale18:2") and <EXT> is an '
'optional extension suffix, specified using the nm_open() syntax '
'(e.g. "^", "-5", "{44", ...)',
required = True)
parser.add_argument('-f', '--function', help = 'the function to perform',
choices = ['tx', 'rx'], default = 'rx')
parser.add_argument('-b', '--batchsize', help = 'number of packets to send with each TXSYNC '
'operation', type=int, default = 512, dest = 'batch')
parser.add_argument('-l', '--length', help = 'length of the ethernet frame sent',
type = int, default = 60)
parser.add_argument('-D', '--dstmac', help = 'destination MAC of tx packets',
default = 'ff:ff:ff:ff:ff:ff')
parser.add_argument('-S', '--srcmac', help = 'source MAC of tx packets',
default = '00:00:00:00:00:00')
parser.add_argument('-d', '--dst', help = 'destination IP address and UDP port of tx packets',
default = '10.0.0.2:54322', metavar = 'IP:PORT')
parser.add_argument('-s', '--src', help = 'source IP address and UDP port of tx packets',
default = '10.0.0.1:54321', metavar = 'IP:PORT')
parser.add_argument('-w', '--wait-link', help = 'time to wait for the link before starting '
'transmit/receive operations (in seconds)', type = int, default = 1)
parser.add_argument('-X', '--dump', help = 'dump the packet', action = 'store_true')
parser.add_argument('-p', '--threads', help = 'number of threads to used for tx/rx '
'operations', type = int, default = 1)
# parse the input
args = parser.parse_args()
# print args
# bound checking
if args.length < 60:
print('Invalid packet length\n')
help_quit(parser)
if args.threads < 1:
print('Invalid number of threads\n')
help_quit(parser)
# Temporary open a netmap descriptor to get some info about
# number of involved rings or the specific ring couple involved
d = netmap.NetmapDesc(args.interface)
if d.getflags() in [netmap.RegAllNic, netmap.RegNicSw]:
max_couples = min(len(d.receive_rings), len(d.transmit_rings))
if d.getflags() == netmap.RegAllNic:
max_couples -= 1
ringid_offset = 0
suffix_required = True
else:
max_couples = 1
ringid_offset = d.getringid()
suffix_required = False
del d
if args.threads > max_couples:
print('You cannot use more than %s (tx,rx) rings couples with "%s"' % (max_couples, args.interface))
quit(1)
jobs = [] # array of worker processes
queues = [] # array of queues for IPC
for i in range(args.threads):
queue = multiprocessing.Queue()
queues.append(queue)
# 'ring_id' contains the ring idx on which the process below will operate
ring_id = i + ringid_offset
# it may also be necessary to add an extension suffix to the interface
# name specified by the user
ifname = args.interface
if suffix_required:
ifname += '-' + str(ring_id)
print("Run worker #%d on %s, ring_id %d" % (i, ifname, ring_id))
# create a new process that will execute the user-selected handler function,
# with the arguments specified by the 'args' tuple
job = multiprocessing.Process(name = 'worker-' + str(i),
target = handler[args.function],
args = (ring_id, ifname, args, parser, queue))
job.daemon = True # ensure work termination
jobs.append(job)
# start all the workers
for i in range(len(jobs)):
jobs[i].start()
# Wait for the user pressing Ctrl-C
try:
while 1:
time.sleep(1000)
except KeyboardInterrupt:
pass
# collect and print the result returned by the workers
tot_rate = 0.0
for i in range(len(jobs)):
result = queues[i].get()
jobs[i].join()
delta = result[1]
cnt = result[0]
if delta == None:
rate = None
else:
rate = 0.001 * cnt / delta
tot_rate += rate
print('[%d] Packets processed: %s, Avg rate %s Kpps' % (i, cnt, rate))
print('Total rate: %s' % (tot_rate, ))
| true
|
0bc456507a518f3fb571504ebb53ed478f723250
|
Python
|
WashHolanda/Curso-Python
|
/exercicios_Youtube/ex057.py
|
UTF-8
| 422
| 3.921875
| 4
|
[] |
no_license
|
'''
Faça um programa que leia o sexo de uma pessoa, mas só aceite os valores 'M' ou 'F'.
Caso esteja errado, peça a digitação novamente até ter um valor correto.
'''
sexo = input('Informe seu sexo [M/F]: ').strip().upper()[0]
while sexo not in 'MF':
sexo = input('\033[31mDados Inválidos!\033[m Por favor informe seu sexo [M/F]: ').strip().upper()[0]
print(f'\033[32mSexo {sexo} registrado com sucesso!\033[m')
| true
|
35e0da2ba60407f971aa3b8200d64f01ceefc970
|
Python
|
BAGPALLAB7/HackerRank-Solutions-Python3
|
/HackerRank - Problem Solving/anagram-hackerrank(easy).py
|
UTF-8
| 372
| 3.5
| 4
|
[] |
no_license
|
def anagram(s):
if len(s)%2!=0:
return (-1)
else:
res=0
s1=s1=s[:int(len(s)/2)]
s2=s[int(len(s)/2):]
s2=list(s2)
for i in s1:
if i in s2:
s2.remove(i)
else:
res+=1
return res
s='ab'
print(anagram(s))
| true
|
286ff42f93d973e05447f3cfe94769ba09057f42
|
Python
|
tempflip/pyxel_stuff
|
/engine.py
|
UTF-8
| 1,435
| 3.796875
| 4
|
[] |
no_license
|
import pyxel
import math
def rot(x_, y_, angle, cx=0, cy=0):
x = x_ - cx
y = y_ - cy
c = math.cos(math.radians(angle))
s = math.sin(math.radians(angle))
xn = x * c + y * s
yn = -x * s + y * c
return (xn + cx, yn + cy)
class Point:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def draw(self, camera, color):
(x, y) = camera.show_point(self)
pyxel.pset(x, y, color)
class Shape:
def __init__(self, points=[]):
self.points = points
def draw(self, camera, color):
prev_point = self.points[-1]
for p in self.points:
(x1, y1) = camera.show_point(prev_point)
(x2, y2) = camera.show_point(p)
pyxel.line(x1, y1, x2, y2, color)
prev_point = p
class Model:
def __init__(self, faces = [], points = []):
self.faces = faces
self.points = points
def rot_z(self, cx, cy, angle):
for p in self.points:
(p.x, p.y) = rot(p.x, p.y, angle, cx = cx, cy = cy)
def rot_y(self, cx, cz, angle):
for p in self.points:
(p.x, p.z) = rot(p.x, p.z, angle, cx = cx, cy = cz)
class Camera:
# fl -- focal length
def __init__(self, fl = 100, angle = 45, fx = 100, fy = 100):
self.fl = fl
self.angle = angle
self.fy = fy
self.fx = fx
def show_point(self, p):
yc1 = math.radians(self.angle) * self.fl
yc2 = math.radians(self.angle) * self.fl + p.z
z_prop = yc1 / yc2
x = self.fx - (self.fx-p.x) * z_prop
y = self.fy - (self.fy-p.y) * z_prop
return (x, y)
| true
|
b3432adc2e18640d451e977207a6a1c6ccf8f10f
|
Python
|
pzengseu/leetcode
|
/MultiplyStrings.py
|
UTF-8
| 839
| 2.65625
| 3
|
[] |
no_license
|
class Solution(object):
def multiply(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
"""
if num1=='0' or num2=='0': return '0'
m = len(num1)
n = len(num2)
res = [0] * (m + n)
num1 = list(reversed(num1))
num2 = list(reversed(num2))
for i in xrange(m):
multiFlag = 0
addFlag = 0
for j in xrange(n):
t1 = int(num1[i]) * int(num2[j]) + multiFlag
multiFlag = t1 / 10
t2 = res[i+j] + t1 % 10 + addFlag
addFlag = t2 / 10
res[i + j] = t2 % 10
res[i+n] = res[i+n] + multiFlag + addFlag
res.reverse()
if res[0] == 0: res=res[1:]
res=map(str, res)
return ''.join(res)
| true
|
e486c02858064c752bcd6ec909d64f8c36eeccb1
|
Python
|
dustinpfister/examples-python
|
/for-post/python-data-types/s5-lists/basic-list.py
|
UTF-8
| 66
| 2.953125
| 3
|
[] |
no_license
|
l = [1, 'two', 3]
print(l, type(l)) # [1, 'two', 3] <class 'list'>
| true
|
d8f1eecca08c9725f56d15ab3e10f06c4d8a259a
|
Python
|
baijifeilong/MilkPlayer
|
/widgets/media_player_buttons.py
|
UTF-8
| 2,212
| 2.578125
| 3
|
[] |
no_license
|
from PyQt5 import QtWidgets, QtCore, QtGui
import os
class MediaPlayerButtons(QtWidgets.QWidget):
def __init__(self, media_player):
super().__init__()
self._media_player = media_player
self._media_player.stateChanged.connect(self._change_button_icon)
self.init_ui()
@property
def media_player(self):
return self._media_player
def init_ui(self):
self.main_layout = QtWidgets.QHBoxLayout()
self.main_layout.setContentsMargins(0, 0, 0, 0)
self.previous_icon = QtGui.QIcon('images/previous.png')
self.next_icon = QtGui.QIcon('images/next.png')
self.play_icon = QtGui.QIcon('images/play.png')
self.pause_icon = QtGui.QIcon('images/pause.png')
self.previous_song_button = QtWidgets.QPushButton(self.previous_icon, '')
self.previous_song_button.setSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
self.next_song_button = QtWidgets.QPushButton(self.next_icon, '')
self.next_song_button.setSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
self.play_pause_button = QtWidgets.QPushButton(self.play_icon, '')
self.play_pause_button.setSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
self.play_pause_button.clicked.connect(self.toggle_play)
self.main_layout.addStretch()
self.main_layout.addWidget(self.previous_song_button)
self.main_layout.addWidget(self.play_pause_button)
self.main_layout.addWidget(self.next_song_button)
self.main_layout.addStretch()
self.setStyleSheet('QPushButton{ '
' background: transparent;'
'}')
self.setLayout(self.main_layout)
def toggle_play(self):
if self.media_player.state() == self.media_player.PlayingState:
self.media_player.pause()
else:
self.media_player.play()
def _change_button_icon(self, state):
if state == self.media_player.PlayingState:
self.play_pause_button.setIcon(self.pause_icon)
else:
self.play_pause_button.setIcon(self.play_icon)
| true
|
3f6c8ae7d6f8e29c16443382bf5b8659728c29de
|
Python
|
candyer/leetcode
|
/2020 June LeetCoding Challenge/26_sumNumbers.py
|
UTF-8
| 1,419
| 3.84375
| 4
|
[] |
no_license
|
# https://leetcode.com/explore/featured/card/june-leetcoding-challenge/542/week-4-june-22nd-june-28th/3372/
# Sum Root to Leaf Numbers
# Given a binary tree containing digits from 0-9 only, each root-to-leaf path could represent a number.
# An example is the root-to-leaf path 1->2->3 which represents the number 123.
# Find the total sum of all root-to-leaf numbers.
# Note: A leaf is a node with no children.
# Example:
# Input: [1,2,3]
# 1
# / \
# 2 3
# Output: 25
# Explanation:
# The root-to-leaf path 1->2 represents the number 12.
# The root-to-leaf path 1->3 represents the number 13.
# Therefore, sum = 12 + 13 = 25.
# Example 2:
# Input: [4,9,0,5,1]
# 4
# / \
# 9 0
# / \
# 5 1
# Output: 1026
# Explanation:
# The root-to-leaf path 4->9->5 represents the number 495.
# The root-to-leaf path 4->9->1 represents the number 491.
# The root-to-leaf path 4->0 represents the number 40.
# Therefore, sum = 495 + 491 + 40 = 1026.
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def dfs(self, root, res):
if not root:
return 0
res = res * 10 + root.val
if not root.left and not root.right:
return res
return self.dfs(root.left, res) + self.dfs(root.right, res)
def sumNumbers(self, root: TreeNode) -> int:
return self.dfs(root, 0)
| true
|
04cef3376226ddc9b9135f2a0ee6056ef9ad26b9
|
Python
|
FireCARES/firecares
|
/firecares/firestation/management/commands/match_districts.py
|
UTF-8
| 3,055
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.geos import MultiPolygon, Polygon
from firecares.firestation.models import FireStation
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Matches district geometry within GeoJSON files with appropriate fire station.'
queryset = None
def add_arguments(self, parser):
parser.add_argument('geojson_file')
parser.add_argument('verbose', default=False, nargs='?')
def handle(self, *args, **options):
geojson_file = options.get('geojson_file')
verbose = options.get('verbose')
state_filter = geojson_file.split('/')[-1].split('-')[1]
ds = DataSource(geojson_file)
print('Extracted State code: {0}'.format(state_filter.upper()))
filter_stations = options.get('queryset', FireStation.objects.filter(state=state_filter.upper()))
print(filter_stations.count())
for layer in ds:
geom_list = layer.get_geoms(geos=True)
num_geoms = len(geom_list)
num_updated = 0
print('Number of Districts: {0}'.format(num_geoms))
for geom in geom_list:
match_stations = list()
for station in filter_stations:
if geom.intersects(station.geom) is True:
match_stations.append(station)
matched_station = None
num_match_stations = len(match_stations)
if num_match_stations == 1:
matched_station = match_stations[0]
elif num_match_stations > 1:
geom.set_srid(4326)
meter_geom = geom.centroid.transform(3857, clone=True)
shortest_dist = meter_geom.distance(match_stations[0].geom.centroid.transform(3857, clone=True))
matched_station = match_stations[0]
for station in match_stations:
station_dist = meter_geom.distance(station.geom.centroid.transform(3857, clone=True))
if station_dist < shortest_dist:
shortest_dist = station_dist
matched_station = station
if matched_station is not None and matched_station.district is None:
if verbose:
print('Updated district for {0}'.format(matched_station.name))
if isinstance(geom, MultiPolygon):
matched_station.district = geom
elif isinstance(geom, Polygon):
matched_station.district = MultiPolygon(geom)
matched_station.save()
num_updated += 1
elif matched_station is not None and matched_station.district is not None:
if verbose:
print('District already set: No Update')
print('Successfully Updated {0}/{1} Stations'.format(num_updated, num_geoms))
| true
|
69981bd78ce3934247216ebaaf59e9e9b87d55cf
|
Python
|
jiyali/python-target-offer
|
/64_求1+2+…+n.py
|
UTF-8
| 554
| 3.921875
| 4
|
[] |
no_license
|
# 题目:求 1+2+…+n,要求不能使用乘除法、for、while、if、else、switch、case 等关键字及条件判断语句(A?B:C)。
# 思路:等差求和
class Solution(object):
def Sum_Solution(self, n):
return (n**2 + n) >> 1
class Solution1(object):
def __init__(self):
self.sum = 0
def Sum_Solution(self, n):
self.getsum(n)
return self.sum
def getsum(self, n):
self.sum += n
n -= 1
return n > 0 and self.getsum(n)
s = Solution1()
print(s.Sum_Solution(5))
| true
|
9344b300d7a03bf88427b6e94651178d9b25ba01
|
Python
|
pengyuhou/git_test1
|
/leetcode/2的幂.py
|
UTF-8
| 412
| 3.59375
| 4
|
[] |
no_license
|
import math
class Solution(object):
def isPowerOfTwo(self, n):
if n <= 0:
return False
a = math.log(n)
b = math.log(2)
res = str(a / b)
if res[-1] == str(0):
return True
else:
return False
if __name__ == '__main__':
s = Solution()
print(s.isPowerOfTwo(0))
a = 'ada'
b = 'jj'
print(f'{a}哈哈{b}')
| true
|
82a33c9a9b9c86f110d1cc0b95d742651046f649
|
Python
|
Jingboguo/python
|
/ps1/ps1b.py
|
UTF-8
| 950
| 3.5
| 4
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 23 22:27:26 2017
@author: Vivian
"""
annal_salary = float(input('Enter your starting annual salary:'))
portion_saved = float(input('Enter the percent of your salary to save, as a decimal:'))
total_cost = float(input('Enter the cost of your dream home:'))
semi_annual_raise = float(input('Enter the semi-annual raise, as a decimal:'))
portion_down_payment = 0.25
current_savings = 0
r = 0.04
number_of_month = 0
target = total_cost * portion_down_payment
monthly_interest_rate = r / 12
while current_savings < target:
number_of_month = number_of_month + 1
pre_savings = current_savings
monthly_savings = annal_salary / 12 * portion_saved
current_savings = pre_savings + monthly_savings + pre_savings * monthly_interest_rate
if number_of_month % 6 == 0:
annal_salary = annal_salary * (1 + semi_annual_raise)
print('Number of months: ',number_of_month)
| true
|
a757cc21a98e4f0acf9c97f1f84364b6b1be439c
|
Python
|
carolinesargent/molssi-python-tutorial
|
/test_geom_analysis.py
|
UTF-8
| 938
| 2.671875
| 3
|
[] |
no_license
|
import geom_analysis as ga
import pytest
def test_calculate_distance():
coord1 = [0, 0, 0]
coord2 = [1, 0, 0]
expected = 1.0
observed = ga.calculate_distance(coord1,coord2)
assert observed == expected
def test_bond_check_1():
atom_distance = 1.501
expected = False
observed = ga.bond_check(atom_distance)
assert observed == expected
def test_bond_check_2():
atom_distance = 0
expected = False
observed = ga.bond_check(atom_distance)
assert observed == expected
def test_bond_check_3():
atom_distance = 1.5
expected = True
observed = ga.bond_check(atom_distance)
assert observed == expected
def test_bond_check_neg():
atom_distance = -1
expected = False
with pytest.raises(ValueError):
calculated = ga.bond_check(atom_distance)
def test_open_xyz():
fname = 'hello.txt'
with pytest.raises(ValueError):
ga.open_xyz(fname)
| true
|
ee254927141454c7385b4fec59098b1808f478e5
|
Python
|
nileshnmahajan/helth
|
/temp/bulk_rog.py
|
UTF-8
| 291
| 2.578125
| 3
|
[] |
no_license
|
import sqlite3
conn = sqlite3.connect('hos_data.db')
c = conn.cursor()
fp1=open('rog.csv','r',encoding="utf-8")
count=0
for row in fp1:
count=count+1
data=row.replace("\n",'')
c.execute('insert into Disease(dname) values (?)',(data,))
if(count==2):
break
conn.commit()
conn.close()
| true
|
346e2d757607c95d1b8e2b6b5eb0717a77bc48cf
|
Python
|
itsolutionscorp/AutoStyle-Clustering
|
/all_data/exercism_data/python/bob/1654f40a0e1d45dcbfc8c4c54c282aeb.py
|
UTF-8
| 388
| 3.6875
| 4
|
[] |
no_license
|
def is_question(phrase):
return phrase.endswith("?")
def is_yelling(phrase):
return phrase.isupper()
def hey(phrase):
response = "Whatever."
phrase = phrase.strip()
if not phrase:
response = "Fine. Be that way!"
elif is_yelling(phrase):
response = "Woah, chill out!"
elif is_question(phrase):
response = "Sure."
return response
| true
|
3bbae030819c3f9fd7c87a6dd7ac3ee311d0e68e
|
Python
|
CristianoSouza/tcc
|
/server/ArduinoUDPClient.py
|
UTF-8
| 420
| 3.171875
| 3
|
[] |
no_license
|
import socket
# Create a TCP/IP socke
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Connect the socket to the port where the server is listening
server_address = ('192.168.25.177', 8888)
while 1:
print ("connecting to " ,server_address)
message = 'This is the message'
print ('sending :', message)
a = sock.sendto(message.encode(), server_address)
print ("result: ", a)
sock.close()
| true
|
4be8882539901195f1daa27546d519fdf50a1379
|
Python
|
innovatorved/Web-Scrapping
|
/b6-comment.py
|
UTF-8
| 211
| 3.03125
| 3
|
[] |
no_license
|
# python b6-comment.py
from bs4 import BeautifulSoup
with open("example.html" ,"r") as l:
soup = BeautifulSoup(l ,"lxml")
comment = soup.p.string
print(comment)
print(type(comment))
print(soup.p.prettify())
| true
|
af33c18456d120d59040d698b857237f9592a33b
|
Python
|
obscuresc/aah-hp
|
/module_controller/hat.py
|
UTF-8
| 1,644
| 2.5625
| 3
|
[] |
no_license
|
# install 'sudo pip3 install adafruit-circuitpython-servokit'
# see hat reference
import board
import busio
import adafruit_pca9685
from adafruit_servokit import ServoKit
# settings
PWM_FREQ = 60
PWM_DUTYCYCLE_MAX = 0xffff
# modules
m1a_ch = 0
m1b_ch = 1
m1v_ch = 2
m2a_ch = 3
m2b_ch = 4
m2v_ch = 5
m3a_ch = 6
m3b_ch = 7
m3v_ch = 8
# vibration intensities
m1v_vibration = 0.2
m2v_vibration = 0.4
m3v_vibration = 0.6
# motor angle ranges
m1a_range = 180
m1b_range = 180
m2a_range = 180
m2b_range = 180
m3a_range = 180
m3b_range = 180
#### main ####
i2c = busio.I2C(board.SCL, board.SDA)
hat = adafruit_pca9685.PCA9685(i2c)
hat.frequency(PWM_FREQ)
kit = ServoKit(channels=16)
def vibration_init():
""" Vibration Init
Sets vibration motors to predescribed levels of intensity.
Levels are not (yet) tailored to represent specific frequencies.
"""
kit.continuous_servo[m1v_ch] = m1v_vibration
kit.continuous_servo[m2v_ch] = m2v_vibration
kit.continuous_servo[m3v_ch] = m3v_vibration
def motor_init():
""" Motor Init
Sets controls ranges for each module's pan/tilt stage.
"""
kit.servo[m1a_ch].actuation_rangle = m1a_range
kit.servo[m1b_ch].actuation_rangle = m1b_range
kit.servo[m2a_ch].actuation_rangle = m2a_range
kit.servo[m2b_ch].actuation_rangle = m2b_range
kit.servo[m3a_ch].actuation_rangle = m3a_range
kit.servo[m3b_ch].actuation_rangle = m3b_range
def motor_update(m1_dev, m2_dev, m3_dev):
""" Motor Update
Takes positional deviation and updates the angular rotation of each module.
"""
# not sure if will work
kit.servo[m1a_ch].angle++;
| true
|
48251f4b5a725289e7de7175534fc40526e118cb
|
Python
|
dtewitt/Wflow_Volta_Project
|
/RS_assessment_funcs.py
|
UTF-8
| 19,637
| 2.75
| 3
|
[] |
no_license
|
### Write GRACE assessment in a script that assesses GRACE performance after saving of the TWSA netcdf file
# GRACE is quite fast (coarse spatial and temporal resolution) so can be done outside of the loop, also because of the rescaling (to monhtly) makes things very difficult inside the loop...
def TWSA_assessment_spatially(loc_TWSA_sim):
## IMPORT FUNCTIONS
import xarray as xr
import numpy as np
from scipy.stats import spearmanr
from sklearn.metrics import mean_squared_error
## PREPARE DATA
# Open files
TWSA_sim_file = xr.open_dataset(loc_TWSA_sim)
TWSA_obs_file = xr.open_dataset('TWSA_obs.nc')
# Open TWSA data
TWSA_sim_xr = TWSA_sim_file['TWSA']
TWSA_obs_xr = TWSA_obs_file['TWSA']
# turn into numpy arrays
TWSA_sim_np = np.zeros(((62, 11, 9)))
TWSA_obs_np = np.zeros(((62, 11, 9)))
TWSA_sim_np[:, :, :] = TWSA_sim_xr[:, ::-1, :]
TWSA_obs_np[:, :, :] = TWSA_obs_xr[:, ::-1, :]
# reshape arrays
TWSA_sim_flat = np.reshape(TWSA_sim_np, (62, 99))
TWSA_obs_flat = np.reshape(TWSA_obs_np, (62, 99))
# create nan values in observations outside basin
nan1_arr = np.where(np.isnan(TWSA_sim_flat), np.nan, 1)
TWSA_obs_flat = TWSA_obs_flat * nan1_arr
## PERFORM Esp CALCULATION (this cannot be done in 2D and is also very fast like this!)
Esp_TWSA_cal = np.zeros(62)
Esp_TWSA_val = np.zeros(62)
for i in range(62):
if i == 0 or i == 1 or i == 2 or i == 5 or i == 6 or i == 17:
Esp_TWSA_cal[i] = np.nan
Esp_TWSA_val[i] = np.nan
else:
# delete nan values
TWSA_sim = TWSA_sim_flat[i][~np.isnan(TWSA_sim_flat[i])]
TWSA_obs = TWSA_obs_flat[i][~np.isnan(TWSA_obs_flat[i])]
# Validation cells are 3, 6, 7, 10, and 11 in count
# Seperate calibration and validation cells
TWSA_sim_val = np.array([TWSA_sim[3], TWSA_sim[6], TWSA_sim[10], TWSA_sim[11], TWSA_sim[16], TWSA_sim[17]])
TWSA_obs_val = np.array([TWSA_obs[3], TWSA_obs[6], TWSA_obs[10], TWSA_obs[11], TWSA_obs[16], TWSA_obs[17]])
TWSA_sim_cal = np.delete(TWSA_sim, np.array([17, 16, 11, 10, 6, 3]))
TWSA_obs_cal = np.delete(TWSA_obs, np.array([17, 16, 11, 10, 6, 3]))
# 1. Calculate spearman rank correlation of the flattened array of validation and calibration array
rs_cal = spearmanr(a=TWSA_obs_cal, b=TWSA_sim_cal, axis=0, nan_policy='omit')[0]
rs_val = spearmanr(a=TWSA_obs_val, b=TWSA_sim_val, axis=0, nan_policy='omit')[0]
# 2. Calculate variability ratio
# (assume means and std's are calculated wrt to space, not to time, because it is a spatial matching term)
std_obs_cal = np.nanstd(TWSA_obs_cal)
std_sim_cal = np.nanstd(TWSA_sim_cal)
std_obs_val = np.nanstd(TWSA_obs_val)
std_sim_val = np.nanstd(TWSA_sim_val)
mean_obs_cal = np.nanmean(TWSA_obs_cal)
mean_sim_cal = np.nanmean(TWSA_sim_cal)
mean_obs_val = np.nanmean(TWSA_obs_val)
mean_sim_val = np.nanmean(TWSA_sim_val)
CV_obs_cal = std_obs_cal / mean_obs_cal
CV_sim_cal = std_sim_cal / mean_sim_cal
CV_obs_val = std_obs_val / mean_obs_val
CV_sim_val = std_sim_val / mean_sim_val
gamma_cal = CV_sim_cal / CV_obs_cal
gamma_val = CV_sim_val / CV_obs_val
# 3. spatial location matching term alpha
Z_obs_cal = (TWSA_obs_cal - mean_obs_cal) / std_obs_cal
Z_sim_cal = (TWSA_sim_cal - mean_sim_cal) / std_sim_cal
Z_obs_val = (TWSA_obs_val - mean_obs_val) / std_obs_val
Z_sim_val = (TWSA_sim_val - mean_sim_val) / std_sim_val
Z_obs_cal = Z_obs_cal[~np.isnan(Z_obs_cal)]
Z_sim_cal = Z_sim_cal[~np.isnan(Z_sim_cal)]
Z_obs_val = Z_obs_val[~np.isnan(Z_obs_val)]
Z_sim_val = Z_sim_val[~np.isnan(Z_sim_val)]
alpha_cal = 1 - np.sqrt(mean_squared_error(y_true=Z_obs_cal, y_pred=Z_sim_cal))
alpha_val = 1 - np.sqrt(mean_squared_error(y_true=Z_obs_val, y_pred=Z_sim_val))
# 4. Return Esp from its three components
Esp_TWSA_cal[i] = 1 - np.sqrt((rs_cal - 1) ** 2 + (gamma_cal - 1) ** 2 + (alpha_cal - 1) ** 2)
Esp_TWSA_val[i] = 1 - np.sqrt((rs_val - 1) ** 2 + (gamma_val - 1) ** 2 + (alpha_val - 1) ** 2)
# Divide the TWSA dataset into two periods, one for calibration and one for evaluation
# calibration period (2002 t/m feb 2005) (32 months with data)
# evaluation period (mar 2005 - feb 2007) (24 months)
# Take mean of arrays
Esp_TWSA_mean_cal_cal = np.nanmean(Esp_TWSA_cal[0:38]) # calibration catchment, calibration period
Esp_TWSA_mean_cal_val = np.nanmean(Esp_TWSA_cal[38:]) # calibration catchment, validation period
Esp_TWSA_mean_val_cal = np.nanmean(Esp_TWSA_val[0:38]) # validation catchment, calibration period
Esp_TWSA_mean_val_val = np.nanmean(Esp_TWSA_val[38:]) # validation catchment, validation period
TWSA_sim_file.close()
TWSA_obs_file.close()
return Esp_TWSA_mean_cal_cal, Esp_TWSA_mean_cal_val, Esp_TWSA_mean_val_cal, Esp_TWSA_mean_val_val
## Here, also a function for GRACE assessment temporally is given. This may take much longer!
def TWSA_assessment_temporally(loc_TWSA_sim):
## IMPORT FUNCTIONS
import xarray as xr
import numpy as np
# Get files and open datasets
TWSA_obs_file = xr.open_dataset('TWSA_obs.nc')
TWSA_sim_file = xr.open_dataset(loc_TWSA_sim)
# Open TWSA dataset
TWSA_obs_xr = TWSA_obs_file['TWSA']
TWSA_sim_xr = TWSA_sim_file['TWSA']
# Turn into numpy arrays
TWSA_obs_np = np.zeros(((62, 11, 9)))
TWSA_sim_np = np.zeros(((62, 11, 9)))
TWSA_sim_np[:, :, :] = TWSA_sim_xr[:, ::-1, :]
TWSA_obs_np[:, :, :] = TWSA_obs_xr[:, ::-1, :]
# Divide in calibration and validation period
# calibration period
TWSA_sim_np_cal = TWSA_sim_np[0:38, :, :]
TWSA_obs_np_cal = TWSA_obs_np[0:38, :, :]
# validation period
TWSA_sim_np_val = TWSA_sim_np[38:, :, :]
TWSA_obs_np_val = TWSA_obs_np[38:, :, :]
## Calculate KGE value in 3D!!!
# Prepare means and standard deviations
mean_TWSA_sim_cal = np.round(np.nanmean(TWSA_sim_np_cal, axis=0), decimals=2)
mean_TWSA_sim_val = np.round(np.nanmean(TWSA_sim_np_val, axis=0), decimals=2)
std_TWSA_sim_cal = np.nanstd(TWSA_sim_np_cal, axis=0)
std_TWSA_sim_val = np.nanstd(TWSA_sim_np_val, axis=0)
mean_TWSA_obs_cal = np.round(np.nanmean(TWSA_obs_np_cal, axis=0), decimals=2)
mean_TWSA_obs_val = np.round(np.nanmean(TWSA_obs_np_val, axis=0), decimals=2)
std_TWSA_obs_cal = np.nanstd(TWSA_obs_np_cal, axis=0)
std_TWSA_obs_val = np.nanstd(TWSA_obs_np_val, axis=0)
# 1. Calculate Pearson r correlation coefficients
cov_TWSA_cal = np.nanmean((TWSA_sim_np_cal - mean_TWSA_sim_cal) * (TWSA_obs_np_cal - mean_TWSA_obs_cal), axis=0)
cov_TWSA_val = np.nanmean((TWSA_sim_np_val - mean_TWSA_sim_val) * (TWSA_obs_np_val - mean_TWSA_obs_val), axis=0)
Pearson_r_cal = cov_TWSA_cal / (std_TWSA_sim_cal * std_TWSA_obs_cal)
Pearson_r_val = cov_TWSA_val / (std_TWSA_sim_val * std_TWSA_obs_val)
# 2. bias ratio beta # means are probably both zero so they are shifted with 5!!
beta_cal = (mean_TWSA_sim_cal + 5) / (mean_TWSA_obs_cal + 5)
beta_val = (mean_TWSA_sim_val + 5) / (mean_TWSA_obs_val + 5) # everything + 5
# 3. variability ratio gamma
gamma_cal = (std_TWSA_sim_cal / (mean_TWSA_sim_cal + 5)) / (std_TWSA_obs_cal / (mean_TWSA_obs_cal + 5))
gamma_val = (std_TWSA_sim_val / (mean_TWSA_sim_val + 5)) / (std_TWSA_obs_val / (mean_TWSA_obs_val + 5)) # every mean + 5
# 4. KGE calculation
KGE_cal = 1 - np.sqrt((Pearson_r_cal - 1) ** 2 + (beta_cal - 1) ** 2 + (gamma_cal - 1) ** 2)
KGE_val = 1 - np.sqrt((Pearson_r_val - 1) ** 2 + (beta_val - 1) ** 2 + (gamma_val - 1) ** 2)
# Divide area in calibration and validation catchtments
mean_KGE_val_cal = np.nanmean([KGE_cal[2, 4], KGE_cal[3, 4], KGE_cal[4, 4], KGE_cal[5, 4], KGE_cal[4, 5], KGE_cal[5, 5]])
mean_KGE_val_val = np.nanmean([KGE_val[2, 4], KGE_val[3, 4], KGE_val[4, 4], KGE_val[5, 4], KGE_val[4, 5], KGE_val[5, 5]])
KGE_cal[2, 4], KGE_cal[3, 4], KGE_cal[4, 4], KGE_cal[5, 4], KGE_cal[4, 5], KGE_cal[5, 5] = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
KGE_val[2, 4], KGE_val[3, 4], KGE_val[4, 4], KGE_val[5, 4], KGE_val[4, 5], KGE_val[5, 5] = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
mean_KGE_cal_cal = np.nanmean(KGE_cal)
mean_KGE_cal_val = np.nanmean(KGE_val)
# mean_KGE_cal_cal = calibration catchment, calibration period
# mean_KGE_cal_val = calibration catchment, validation period
# mean_KGE_val_cal = validation catchment, calibration period
# mean_KGE_val_val = validation catchment, validation period
TWSA_sim_file.close()
TWSA_obs_file.close()
return mean_KGE_cal_cal, mean_KGE_cal_val, mean_KGE_val_cal, mean_KGE_val_val
def ET_assessment_spatially(ET_sim_dayi, NDVI_obs_dayi):
## IMPORT FUNCTIONS
import xarray as xr
import numpy as np
from scipy.stats import spearmanr
from sklearn.metrics import mean_squared_error
# Load calibration and validation catchments
calibration_catch = np.loadtxt('wflow_Volta_hbv2_0/staticmaps/calibration_catch.txt')
validation_catch = np.loadtxt('wflow_Volta_hbv2_0/staticmaps/validation_catch.txt')
## PREPARE DATA
# Open files
ET_sim_xr = ET_sim_dayi
NDVI_obs_xr = NDVI_obs_dayi
# make numpy arrays to store and modify the data
ET_sim_np = np.zeros((220, 180))
NDVI_obs_np = np.zeros((220, 180))
# turn array around to be able to plot it right and insert the right values
ET_sim_np[:, :] = ET_sim_xr[::-1, :]
NDVI_obs_np[:, :] = NDVI_obs_xr
# create nan values in observations outside basin
nan1_arr = np.where(np.isnan(ET_sim_np), np.nan, 1)
NDVI_obs_np = NDVI_obs_np * nan1_arr
# Create nan values in simulation inside basin (Lake Volta) # this works great!
nan2_arr = np.where(np.isnan(NDVI_obs_np), np.nan, 1)
ET_sim_np = ET_sim_np * nan2_arr
# Introduce calibration and validation catchments
ET_sim_np_cal = ET_sim_np * calibration_catch
ET_sim_np_val = ET_sim_np * validation_catch
NDVI_obs_np_cal = NDVI_obs_np * calibration_catch
NDVI_obs_np_val = NDVI_obs_np * validation_catch
# reshape arrays
ET_sim_cal_flat = np.reshape(ET_sim_np_cal, (39600))
ET_sim_val_flat = np.reshape(ET_sim_np_val, (39600))
NDVI_obs_cal_flat = np.reshape(NDVI_obs_np_cal, (39600))
NDVI_obs_val_flat = np.reshape(NDVI_obs_np_val, (39600))
# delete nan values
ET_sim_cal = ET_sim_cal_flat[~np.isnan(ET_sim_cal_flat)]
ET_sim_val = ET_sim_val_flat[~np.isnan(ET_sim_val_flat)]
NDVI_obs_cal = NDVI_obs_cal_flat[~np.isnan(NDVI_obs_cal_flat)]
NDVI_obs_val = NDVI_obs_val_flat[~np.isnan(NDVI_obs_val_flat)]
## PERFORM Esp CALCULATION in 1D (input is daily xr.dataset sim and obs)
# 1. Calculate spearman rank correlation of the flattened array
rs_cal = spearmanr(a=NDVI_obs_cal, b=ET_sim_cal, axis=0, nan_policy='omit')[0]
rs_val = spearmanr(a=NDVI_obs_val, b=ET_sim_val, axis=0, nan_policy='omit')[0]
# 2. Calculate variability ratio
# (assume means and std's are calculated wrt to space, not to time, because it is a spatial matching term)
std_obs_cal = np.nanstd(NDVI_obs_cal)
std_obs_val = np.nanstd(NDVI_obs_val)
std_sim_cal = np.nanstd(ET_sim_cal)
std_sim_val = np.nanstd(ET_sim_val)
mean_obs_cal = np.nanmean(NDVI_obs_cal)
mean_obs_val = np.nanmean(NDVI_obs_val)
mean_sim_cal = np.nanmean(ET_sim_cal)
mean_sim_val = np.nanmean(ET_sim_val)
CV_obs_cal = std_obs_cal / mean_obs_cal
CV_obs_val = std_obs_val / mean_obs_val
CV_sim_cal = std_sim_cal / mean_sim_cal
CV_sim_val = std_sim_val / mean_sim_val
gamma_cal = CV_sim_cal / CV_obs_cal
gamma_val = CV_sim_val / CV_obs_val
# 3. spatial location matching term alpha
Z_obs_cal = (NDVI_obs_cal - mean_obs_cal) / std_obs_cal
Z_obs_val = (NDVI_obs_val - mean_obs_val) / std_obs_val
Z_sim_cal = (ET_sim_cal - mean_sim_cal) / std_sim_cal
Z_sim_val = (ET_sim_val - mean_sim_val) / std_sim_val
alpha_cal = 1 - np.sqrt(mean_squared_error(y_true=Z_obs_cal, y_pred=Z_sim_cal))
alpha_val = 1 - np.sqrt(mean_squared_error(y_true=Z_obs_val, y_pred=Z_sim_val))
# Return Esp from its three components
Esp_NDVI_ET_cal = 1 - np.sqrt((rs_cal - 1) ** 2 + (gamma_cal - 1) ** 2 + (alpha_cal - 1) ** 2)
Esp_NDVI_ET_val = 1 - np.sqrt((rs_val - 1) ** 2 + (gamma_val - 1) ** 2 + (alpha_val - 1) ** 2)
# Divide the ET/NDVI dataset into two periods, outside this function!
return Esp_NDVI_ET_cal, Esp_NDVI_ET_val
def ET_assessment_temporally(loc_ET_sim):
## Import functions
import xarray as xr
import numpy as np
# Load calibration and validation catchments
calibration_catch = np.loadtxt('wflow_Volta_hbv2_0/staticmaps/calibration_catch.txt')
validation_catch = np.loadtxt('wflow_Volta_hbv2_0/staticmaps/validation_catch.txt')
## Open xr datasets
NDVI_obs = xr.open_dataset('NDVI_obs.nc')
ET_sim = xr.open_dataset(loc_ET_sim)
NDVI_obs_xr = NDVI_obs['NDVI']
ET_sim_xr = ET_sim['ET']
# Turn into numpy arrays
NDVI_obs_np = np.zeros(((2616, 220, 180)))
ET_sim_np = np.zeros(((2616, 220, 180)))
ET_sim_np[:, :, :] = ET_sim_xr[:, ::-1, :] # this may take quite some memory!
NDVI_obs_np[:, :, :] = NDVI_obs_xr
# calculate normalized ET_sim_np (in total or per ts?) now it is in total
max_ET = np.nanmax(ET_sim_np)
min_ET = np.nanmin(ET_sim_np)
ET_sim_np_norm = (ET_sim_np - min_ET) / (max_ET - min_ET)
# Divide the assessment in a calibration and validation period
NDVI_obs_np_cal = NDVI_obs_np[0:1461, :, :]
NDVI_obs_np_val = NDVI_obs_np[1461:, :, :]
ET_sim_np_norm_cal = ET_sim_np_norm[0:1461, :, :]
ET_sim_np_norm_val = ET_sim_np_norm[1461:, :, :]
## Calculate KGE value in 3D!!!
# Prepare means and standard deviations
mean_ET_sim_cal = np.nanmean(ET_sim_np_norm_cal, axis=0)
mean_ET_sim_val = np.nanmean(ET_sim_np_norm_val, axis=0)
std_ET_sim_cal = np.nanstd(ET_sim_np_norm_cal, axis=0)
std_ET_sim_val = np.nanstd(ET_sim_np_norm_val, axis=0)
mean_NDVI_obs_cal = np.nanmean(NDVI_obs_np_cal, axis=0)
mean_NDVI_obs_val = np.nanmean(NDVI_obs_np_val, axis=0)
std_NDVI_obs_cal = np.nanstd(NDVI_obs_np_cal, axis=0)
std_NDVI_obs_val = np.nanstd(NDVI_obs_np_val, axis=0)
# 1. Calculate Pearson r correlation coefficients
cov_NDVI_ET_cal = np.nanmean((ET_sim_np_norm_cal - mean_ET_sim_cal) * (NDVI_obs_np_cal - mean_NDVI_obs_cal), axis=0)
cov_NDVI_ET_val = np.nanmean((ET_sim_np_norm_val - mean_ET_sim_val) * (NDVI_obs_np_val - mean_NDVI_obs_val), axis=0)
Pearson_r_cal = cov_NDVI_ET_cal / (std_ET_sim_cal * std_NDVI_obs_cal)
Pearson_r_val = cov_NDVI_ET_val / (std_ET_sim_val * std_NDVI_obs_val)
# 2. bias ratio beta
beta_cal = mean_ET_sim_cal / mean_NDVI_obs_cal
beta_val = mean_ET_sim_val / mean_NDVI_obs_val
# 3. variability ratio gamma
gamma_cal = (std_ET_sim_cal / mean_ET_sim_cal) / (std_NDVI_obs_cal / mean_NDVI_obs_cal)
gamma_val = (std_ET_sim_val / mean_ET_sim_val) / (std_NDVI_obs_val / mean_NDVI_obs_val)
# 4. KGE calculation
KGE_cal = 1 - np.sqrt((Pearson_r_cal - 1) ** 2 + (beta_cal - 1) ** 2 + (gamma_cal - 1) ** 2) # w/o loop still 51 seconds
KGE_val = 1 - np.sqrt((Pearson_r_val - 1) ** 2 + (beta_val - 1) ** 2 + (gamma_val - 1) ** 2)
## Calculate KGE and its components in calibration and validation periods and catchments
KGE_cal_cal = KGE_cal * calibration_catch # calibration period, calibration catchment
KGE_cal_val = KGE_cal * validation_catch # calibration period, validation catchment
KGE_val_cal = KGE_val * calibration_catch # validation period, calibration catchment
KGE_val_val = KGE_val * validation_catch # validation period, validation catchment
Pearson_r_cal_cal = Pearson_r_cal * calibration_catch
Pearson_r_cal_val = Pearson_r_cal * validation_catch
Pearson_r_val_cal = Pearson_r_val * calibration_catch
Pearson_r_val_val = Pearson_r_val * validation_catch
beta_cal_cal = beta_cal * calibration_catch
beta_cal_val = beta_cal * validation_catch
beta_val_cal = beta_val * calibration_catch
beta_val_val = beta_val * validation_catch
gamma_cal_cal = gamma_cal * calibration_catch
gamma_cal_val = gamma_cal * validation_catch
gamma_val_cal = gamma_val * calibration_catch
gamma_val_val = gamma_val * validation_catch
# Calculate means of KGE and its components
mean_KGE_cal_cal = np.nanmean(KGE_cal_cal)
mean_KGE_cal_val = np.nanmean(KGE_cal_val)
mean_KGE_val_cal = np.nanmean(KGE_val_cal)
mean_KGE_val_val = np.nanmean(KGE_val_val)
mean_Pearson_r_cal_cal = np.nanmean(Pearson_r_cal_cal)
mean_Pearson_r_cal_val = np.nanmean(Pearson_r_cal_val)
mean_Pearson_r_val_cal = np.nanmean(Pearson_r_val_cal)
mean_Pearson_r_val_val = np.nanmean(Pearson_r_val_val)
mean_beta_cal_cal = np.nanmean(beta_cal_cal)
mean_beta_cal_val = np.nanmean(beta_cal_val)
mean_beta_val_cal = np.nanmean(beta_val_cal)
mean_beta_val_val = np.nanmean(beta_val_val)
mean_gamma_cal_cal = np.nanmean(gamma_cal_cal)
mean_gamma_cal_val = np.nanmean(gamma_cal_val)
mean_gamma_val_cal = np.nanmean(gamma_val_cal)
mean_gamma_val_val = np.nanmean(gamma_val_val)
# Close datasets
NDVI_obs.close()
ET_sim.close()
# Return only KGE's for now, but more is possible of course
return mean_KGE_cal_cal, mean_KGE_cal_val, mean_KGE_val_cal, mean_KGE_val_val
## function that takes as input the ICF and creates a numpy Su_max map as output
def ICF_transformer(ICF_nf, ICF_f):
import numpy as np
# Calculate the Su_max for every combination of subcatchment and landuse (non-forest and forest)
offset = [599.19503772, 625.03274585, 562.259546, 522.95253363, 401.11391317, 509.64177338, 534.37225299, 475.73219414, 533.26170669, 558.97329238, 513.43244853, 497.85879029, 426.11175142]
slope = [-41.78375641, -41.23973506, -36.63365806, -33.08527239, -24.1950782, -45.04986909, -44.78285965, -31.07722248, -37.7781033, -32.37036803, -30.7590952, -32.34913525, -25.17571431]
Su_max_nf = np.zeros(13)
Su_max_f = np.zeros(13)
for i in range(13):
Su_max_nf[i] = offset[i] + ICF_nf * slope[i]
Su_max_f[i] = offset[i] + ICF_f * slope[i]
return Su_max_nf, Su_max_f
| true
|
7bf6c165209bea1931c5db13c4339e0bb6e5c870
|
Python
|
mjziebarth/gmt-python-extensions
|
/gmt_extensions/axis.py
|
UTF-8
| 788
| 2.828125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# Supply a simple axis.
class Axis:
"""
Make plotting a little bit like matplotlib.
"""
def __init__(self, fig, region, projection, frame=True):
self._region = region
self._projecion = projection
self._fig = fig
self._frame = frame
def coast(self, land=None, water=None, shownationalborders=False,
showstateborders=False):
# Preprocess:
kwargs = dict()
# Borders (-N):
if not (shownationalborders is False and showstateborders is False):
kwargs['borders'] = ''
if shownationalborders is True:
kwargs['borders'] += '1'
if showstateborders is True:
kwargs['borders'] += '2'
self._fig.coast(region=self._region, projection=self._projection,
land=land, water=water, **kwargs)
def grdimage(self, grid, cmap):
if
| true
|
ca34ed57b1e00ec36c57bb53a2e9966c60c98b93
|
Python
|
TeamSkyHackathon/blue
|
/loader.py
|
UTF-8
| 2,278
| 3.140625
| 3
|
[] |
no_license
|
import numpy as np
from sklearn.cross_validation import KFold
from settings import *
##################### HELPERS ############################
def load_data(path, count, with_labels=False):
"""
:param count: feature count to load for one subset
:param with_labels: indicate whether labels are present
:rtype: 2/3-element tuple - [labels], first_feature_array, second_feature_array
"""
labels, a_features, b_features = [], [], []
with open(path) as f:
offset = 1 if with_labels else 0
headers = f.next().rstrip().split(',')
for line in f:
values = line.rstrip().split(',')
if with_labels:
labels.append(values[0])
a_features.append([float(x) for x in values[offset:count+offset]])
b_features.append([float(x) for x in values[count+offset:]])
if with_labels:
return np.array(labels), np.array(a_features), np.array(b_features)
else:
return np.array(a_features), np.array(b_features)
################# TRAINING #######################
def predict(a_features, b_features):
"""
Magical functions that makes all predictions.
return: A user is more influential than B
rtype: bool
"""
pass
#################### RESULTS ######################
def produce_results(fname):
with open(fname, 'w+') as f:
af, bf = load_data(TEST_PATH, 11)
for a_features, b_features in zip(list(af), list(bf)):
a_better = predict(a_features, b_features)
f.write(bool(a_better)) # label '1' means A is more influential than B. 0 means B is more influential than A
#################### MAIN ##########################
if __name__ == "__main__":
# Load data
labels, A, B = load_data(TRAIN_PATH, 11, with_labels=True)
# split original set into training and testing
kf = KFold(len(A), 2)
for train_indices, test_indices in kf:
train_labels, train_A, train_B = labels[train_indices], A[train_indices], B[train_indices]
# learn model using training data
test_labels, test_A, test_B = labels[test_indices], A[test_indices], B[test_indices]
# test model using test data
# produce results
produce_results('submission.csv')
| true
|
2ddff464b08a99514ccbc693763a52b724fc94e2
|
Python
|
LaurensVergote/Robotic-Hand
|
/build/debug/Serializer.py
|
UTF-8
| 934
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
import serial
ser = serial.Serial()
class Serializer:
def _init_(self):
self.baudrate = 9600
self.comPort = None
def createSerialPort(self):
try:
ser = serial.Serial(self.comPort, self.baudrate)
except Exception,e: print "createSerialPort error: ", str(e)
def setBaudRate(self, baudrate):
self.baudrate = baudrate
if (ser != None):
ser.baudrate = baudrate
def setComPort(self, comPort):
self.comPort = comPort
if (ser != None):
ser.port = comPort
def createConnection(self):
try:
ser = serial.Serial(self.comPort,self.baudrate)
except Exception,e: print "createConnection error: ", str(e)
def openConnection(self):
ser.open()
def closeConnection(self):
ser.close()
def sendData(self, b0, b1):
try:
print 'Sending ', hex(b0), 'and', hex(b1)
s = str(unichr(b0))
r = str(unichr(b1))
ser.write(s+r)
except Exception,e: print "sendData error: ", str(e)
| true
|
310e777251747a5dfafbc9f3be2637e2b6cd9f69
|
Python
|
hcourt/biology
|
/alignment.py
|
UTF-8
| 10,061
| 3.5625
| 4
|
[] |
no_license
|
#!/usr/bin/python
import sys
#A program to calculate a best alignment for two given ACTG sequences.
## a class which represents each position in the sequence matrix which will be calculated. Position has a score, an up pointer (0 or 1), a left pointer (0 or 1), and a diagonal pointer (0 or 1). Each pointer is 0 if the pointer is unused or 1 if the pointer is used.
class Position:
def __init__(self, score=0):
self.score=score
self.up=0
self.left=0
self.diag=0
## a function to construct a matrix of length c and width r
def construct_matrix(r, c, type):
matrix = []
for i in range(0,r):
list = []
for j in range(0,c):
if type=='p':
list.append(Position())
elif type=='s':
list.append(0)
matrix.append(list)
return matrix
## a function which recieves an index for a sequence and returns the alphabet index of the letter
def get_index(seq_index, seq):
letter = ''
if seq == 'A':
letter = left_side[seq_index]
elif seq == 'B':
letter = up_side[seq_index]
for i in range (0, len(alphabet)):
if alphabet[i]==letter:
return i
return -1
## a function which recieves three scores (diagonal, left, and up) and returns a list containing the value of the best score and the codes for the highest score directions
def get_best_point(up, left, diag):
score = max([diag,left,up])
return_list = []
return_list.append(score)
if (diag==score):
return_list.append('D')
if (left==score):
return_list.append('L')
if (up==score):
return_list.append('U')
return return_list
## the traceback function for all types of alignments
def traceback (cur_r, cur_c, seq_matrix, prev_align_A, prev_align_B, aligns, left_side, up_side):
pos = seq_matrix[cur_r][cur_c]
## end case for 0,0
if ((cur_r == 0 and cur_c == 0) or (align_type==2 and (cur_r==0 or cur_c==0))):
aligns['A'].append(prev_align_A[::-1])
aligns['B'].append(prev_align_B[::-1])
return aligns
## resolve diagonal tree
if (pos.diag == 1):
working_align_A = prev_align_A
working_align_B = prev_align_B
working_align_A += left_side[cur_r]
working_align_B += up_side[cur_c]
aligns=traceback (cur_r-1, cur_c-1, seq_matrix, working_align_A, working_align_B, aligns, left_side, up_side)
## resolve left tree
elif (pos.left == 1):
working_align_A = prev_align_A
working_align_B = prev_align_B
working_align_A+='-'
working_align_B+=up_side[cur_c]
aligns=traceback (cur_r, cur_c-1, seq_matrix, working_align_A, working_align_B, aligns, left_side, up_side)
## resolve up tree
elif (pos.up == 1):
working_align_A= prev_align_A
working_align_B= prev_align_B
working_align_A+=left_side[cur_r]
working_align_B+='-'
aligns=traceback (cur_r-1, cur_c, seq_matrix, working_align_A, working_align_B, aligns, left_side, up_side)
return aligns
## MAIN FUNCTION ##
file = open(sys.argv[1])
lines = file.readlines()
for l in range(0,len(lines)):
lines[l]=lines[l].rstrip()
## sequence of letters indicating sequence A
seqA = lines[1]
left_side = "-"+seqA
## sequence of letters indicating sequence B
seqB = lines[0]
up_side = "-"+seqB
## an argument for the type of alignment to perform (0 is global, 1 is semi-global, 2 is local)
align_type = int(lines[2])
## gap penalty (asumming a constant gap penalty for both seqs)
gap_penalty = int(lines[3])
## sequence of letters indicating the alphabet for the strings
alphabet = lines[4]
n = len(alphabet)
## series of n lines, each consisting of n space-separated values, indicating the score of matching characters between the two sequences in matrix form. Characters are in the same order as alphabet given above. Matrix is symmetric.
score_matrix = construct_matrix(n, n, 's')
for i in range (0, n):
line_list = lines[i+5].split()
for j in range (0, n):
score_matrix[i][j] = int(line_list[j])
best_score=0
align_As = []
align_Bs = []
## the matrix into which the sequence scores will be outputted. Assuming seqB is on top and seqA is to left
seq_matrix = construct_matrix(len(left_side), len(up_side), 'p')
seq_matrix[0][0].score=0
if align_type == 0:
## fill first column and first row with gap penalty
for i in range (1, len(up_side)):
seq_matrix[0][i].score = gap_penalty + seq_matrix[0][i-1].score
seq_matrix[0][i].left = 1
for j in range (1, len(left_side)):
seq_matrix[j][0].score = gap_penalty + seq_matrix[j-1][0].score
seq_matrix[j][0].up = 1
elif align_type == 1 or align_type == 2:
## fill first column and first row with 0
for i in range (1, len(up_side)):
seq_matrix[0][i].score = 0
seq_matrix[0][i].left = 1
for j in range (1, len(left_side)):
seq_matrix[j][0].score = 0
seq_matrix[j][0].up = 1
## work through each row, calculating the resulting score for the position
for row in range (1, len(left_side)):
for col in range (1, len(up_side)):
p = seq_matrix[row][col]
## get the predicted scores for each pointer
p_up_score = 0
p_left_score = 0
if align_type == 1:
##semi-global has no penalty for end gaps
p_up_score = seq_matrix[row - 1][col].score if (col == len(up_side)-1) else seq_matrix[row - 1][col].score + gap_penalty
p_left_score = seq_matrix[row][col - 1].score if (row == len(left_side)-1) else seq_matrix[row][col - 1].score + gap_penalty
else:
p_up_score = seq_matrix[row - 1][col].score + gap_penalty
p_left_score = seq_matrix[row][col - 1].score + gap_penalty
indexA = get_index(row, 'A')
indexB = get_index(col, 'B')
p_diag_score = seq_matrix[row - 1][col - 1].score + score_matrix[get_index(row, 'A')][get_index(col, 'B')]
if indexA == -1 or indexB == -1:
print "ERROR: letter combination not in score matrix"
## set any negative scores to 0, if doing local alignment
if align_type == 2:
if p_up_score < 0: p_up_score = 0
if p_left_score < 0: p_left_score = 0
if p_diag_score < 0: p_diag_score = 0
## get the best, and assign pointers appropriately
best = get_best_point(p_up_score, p_left_score, p_diag_score)
for i in range (1, len(best)):
if (best[i]=='D'):
p.diag=1
elif (best[i]=='L'):
p.left=1
elif (best[i]=='U'):
p.up=1
p.score=int(best[0])
seq_matrix[row][col] = p
## tracebacks for global and semi-global alignment
if align_type < 2:
if len(up_side)>=len(left_side):
end_score_list=[]
best_score = seq_matrix[0][len(up_side)-1].score
## get best score and all positions to start from
for i in range (0,len(left_side)):
pos = seq_matrix[i][len(up_side)-1]
s = pos.score
if s > best_score:
best_score = s
end_score_list=[]
end_score_list.append(i)
elif s == best_score:
end_score_list.append(i)
## begin tracebacks from each start position
for start in end_score_list:
## start traceback call
aligns={'A':[], 'B':[]}
aligns=traceback(start, len(up_side) - 1, seq_matrix, "", "", aligns, left_side, up_side)
align_As.append(aligns['A'])
align_Bs.append(aligns['B'])
else:
end_score_list=[]
best_score = seq_matrix[len(left_side) - 1][0].score
## get best score and all positions to start from
for i in range (0,len(up_side)):
pos = seq_matrix[len(left_side) - 1][i]
s = pos.score
if s > best_score:
best_score = s
end_score_list=[]
end_score_list.append(i)
elif s == best_score:
end_score_list.append(i)
## begin tracebacks from each start position
for start in end_score_list:
## start traceback call
aligns={'A':[], 'B':[]}
aligns=traceback(len(left_side) - 1, start, seq_matrix, "", "", aligns, left_side, up_side)
align_As.append(aligns['A'])
align_Bs.append(aligns['B'])
##tracebacks for local alignment
elif align_type == 2:
score_list=[]
best_score = seq_matrix[0][0].score
## get best score and all associated positions to start from
for row in range(0, len(left_side)):
for col in range(0, len(up_side)):
pos = seq_matrix[row][col]
s = pos.score
if s > best_score:
best_score = s
score_list=[]
score_list.append((row,col))
elif s == best_score:
score_list.append((row,col))
## select the furthest to the right, and the furthest down
furthest=(0,0)
for (x,y) in score_list:
if x>=furthest[0] and y>=furthest[1]:
furthest=(x,y)
## traceback call
aligns={'A':[], 'B':[]}
aligns = traceback(furthest[0],furthest[1], seq_matrix, "", "", aligns, left_side, up_side)
align_As.append(aligns['A'])
align_Bs.append(aligns['B'])
## output, via print:
## 1. score of the best alignment
## 2. alignment of seqA
## 3. alignment of seqB
## toprint seq_matrix, remove both """
"""
sys.stdout.write(" ")
for i in range (0, len(up_side)):
sys.stdout.write(" "+str(up_side[i]))
print ""
for row in range (0, len(left_side)):
sys.stdout.write(left_side[row]+" ")
for col in range(0, len(up_side)):
sys.stdout.write(str(seq_matrix[row][col].score)+" ")
print
"""
print("Best score for all alignments: "+str(best_score))
for i in range (0, len(align_As)):
print(align_Bs[i][0])
print(align_As[i][0])
| true
|
da70674bece2ff983896f7e16d1418eda48fa730
|
Python
|
legendary-jld/schema-org
|
/schema_org.py
|
UTF-8
| 2,292
| 2.8125
| 3
|
[] |
no_license
|
import datetime
# Local Libraries
import definitions
class Thing(object):
def __init__(self, **kwargs):
self.itemStructure = None
self.parentStructure = None
self.schema = definitions.Thing()
self.setAttributes()
def setAttributes(self, parent=None, **kwargs):
if parent:
self.itemStructure = "{0} > {1}".format(parent.schema.itemType, self.schema.itemType)
self.parentStructure = parent.schema.itemType
else:
self.itemStructure = self.schema.itemType
for attribute in self.schema.canonicalAttributes:
setattr(self, attribute, kwargs.get(attribute, None))
def validate(self):
pass
def toJSON(self):
if self.validate(): # Must validate first
pass
def toHTML(self):
if self.validate(): # Must validate first
pass
def __repr__(self):
return "<SCHEMA:{0} - ID: {1} | Name: {2}>".format(
self.itemStructure, self.identifier, self.name
)
class Action(Thing):
def __init__(self, **kwargs):
Thing.__init__(self, **kwargs)
self.schema = definitions.Action()
self.setAttributes(parent=Thing(), **kwargs) # Would prefer not to create a Thing Instance just to get the itemType
class CreativeWork(Thing):
def __init__(self, **kwargs):
Thing.__init__(self, **kwargs)
self.schema = definitions.CreativeWork()
self.setAttributes(parent=Thing, **kwargs)
class Event(Thing):
def __init__(self, **kwargs):
Thing.__init__(self, **kwargs)
self.schema = definitions.Event()
self.setAttributes(parent=Thing, **kwargs)
class Intangible(Thing):
def __init__(self, **kwargs):
Thing.__init__(self, **kwargs)
self.schema = definitions.Intangible()
self.setAttributes(parent=Thing, **kwargs)
class Organization(Thing):
def __init__(self, **kwargs):
Thing.__init__(self, **kwargs)
self.schema = definitions.Organization()
self.setAttributes(parent=Thing, **kwargs)
class Person(Thing):
def __init__(self, **kwargs):
Thing.__init__(self, **kwargs)
self.schema = definitions.Person()
self.setAttributes(parent=Thing, **kwargs)
| true
|
2f155ddf253abf8bc0809b55a63169b6463fd042
|
Python
|
niranjan-nagaraju/Development
|
/python/interviewbit/arrays/find_permutation/find_permutation.py
|
UTF-8
| 2,016
| 4.09375
| 4
|
[] |
no_license
|
'''
https://www.interviewbit.com/problems/find-permutation/
Find Permutation
Given a positive integer n and a string s consisting only of letters D or I, you have to find any permutation of first n positive integer that satisfy the given input string.
D means the next number is smaller, while I means the next number is greater.
Notes
Length of given string s will always equal to n - 1
Your solution should run in linear time and space.
Example :
Input 1:
n = 3
s = ID
Return: [1, 3, 2]
'''
'''
Solution Outline:
1. Generate an increasing sequence, seq [1..n]
2. Since the sequence is already increasing, the Is dont need any change.
3. For each contiguous sequence of Ds {i..j} reverse seq[i..j]
seq[j+1] is automatically increasing because seq[i]<seq[i+1]<..seq[j]<seq[j+1]
'''
class Solution:
# reverse a[i..j]
def reverse(self, a, i, j):
while i < j:
a[i], a[j] = a[j], a[i]
i += 1
j -= 1
def find_permutation(self, n, s):
seq = range(1, n+1)
i = 0
while i < n-1: # s length is n-1
curr_dec_seq_start = None
while i < n-1 and s[i] == 'D':
if curr_dec_seq_start == None:
curr_dec_seq_start = i
i += 1
if curr_dec_seq_start is not None:
# End of 'D' sequence
# i is at the beginning of the next 'I' sequence
# reverse i=k+1 elements starting from the index where 'D' starts
# k: length of the D sequence
# k=1 => reverse two elements, and so on.
self.reverse(seq, curr_dec_seq_start, i)
else:
# We are in an 'I' sequence
i += 1 # skip ahead this 'I'
return seq
if __name__ == '__main__':
s = Solution()
assert s.find_permutation(2, 'I') == [1,2]
assert s.find_permutation(2, 'D') == [2,1]
assert s.find_permutation(3, 'ID') == [1,3,2]
assert s.find_permutation(3, 'DI') == [2,1,3]
assert s.find_permutation(6, 'IDDID') == [1,4,3,2,6,5]
assert s.find_permutation(6, 'DDDDD') == [6,5,4,3,2,1]
assert s.find_permutation(6, 'DIDID') == [2,1,4,3,6,5]
assert s.find_permutation(6, 'IDIDI') == [1,3,2,5,4,6]
| true
|
23221def284aa4e39a15db7e4d1a9144661a7c8a
|
Python
|
jklypchak13/TrojanHorse
|
/src/trojan/crypto.py
|
UTF-8
| 1,666
| 3.21875
| 3
|
[] |
no_license
|
from pathlib import Path
from pyAesCrypt import encryptFile
from pyAesCrypt import decryptFile
password = "trojan"
bufferSize = 64 * 1024 # 64k
"""
encrypt the file at input_file_path_str
input_file_path_str can be relative or absolute
deletes the input file at the end
"""
def encryptAndDeletePlaintext(input_file_path_str):
# name the output file the same as the input file, but append .aes to it
output_file_path_str = input_file_path_str + ".aes"
try:
# encrypt the file
encryptFile(input_file_path_str, output_file_path_str,
password, bufferSize)
# delete the input plaintext source file
Path(input_file_path_str).unlink()
except FileNotFoundError:
print('ERROR: File Could Not Be read')
# on file read error, do nothing
pass
"""
decrypt the file at input_file_path_str
input_file_path_str can be relative or absolute
deletes the input file at the end
"""
def decryptAndDeleteCipherText(input_file_path_str):
# output filename = input filename + ".decrypted"
output_file_path_str = input_file_path_str + ".decrypted"
# if the input filename ends with ".aes" output filename is changed to the input filename without the .aes extension
if input_file_path_str[-4:] == ".aes":
output_file_path_str = input_file_path_str[:-4]
try:
# decrypt the file
decryptFile(input_file_path_str, output_file_path_str,
password, bufferSize)
# delete the input ciphertext file
Path(input_file_path_str).unlink()
except FileNotFoundError:
# on file read error, do nothing
pass
| true
|
b678daa44551a7749097ab56a40715489dfcd86b
|
Python
|
hector81/Aprendiendo_Python
|
/Panda/Ejercicio4_pago_media_vivos_muertos.py
|
UTF-8
| 637
| 3.703125
| 4
|
[] |
no_license
|
'''
4. ¿Cuánto pagaron de media los supervivientes? ¿y los que no se salvaron?
'''
from pathlib import Path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
fichero = Path('titanic.csv')
# sep='\t' es el delimitador donde separa con \t o tabulaciones
df = pd.read_csv(fichero, sep='\t')
# 1 es supervivientes , 0 es fallecido
pago_media_supervivientes = df.Fare[df.Survived == 1].mean()
print(f"¿Cuánto pagaron de media los supervivientes? = {pago_media_supervivientes}")
pago_media_muertos = df.Fare[df.Survived == 0].mean()
print(f"¿y los que no se salvaron? = {pago_media_muertos}")
| true
|
f9d12ed8605663d69a21e91eb917e614b9140648
|
Python
|
terryhu08/MachingLearning
|
/src/python/FP-Growth/FP-Growth.py
|
UTF-8
| 5,607
| 2.984375
| 3
|
[] |
no_license
|
#coding:utf-8
'''
Created on 2017/11/14
author: fdh
FP—Growth: 韩家炜教授提出的基于FP-tree挖掘频繁项的算法,相对于Apriori只扫描
2遍数据,效率更高, FP = Frequent Pattern
算法需要创建两个数据结构:
1: FP-Tree
2: Header Table
参考博客: http://www.cnblogs.com/jiangzhonglian/p/7778830.html
https://www.cnblogs.com/datahunter/p/3903413.html#undefined
'''
print(__doc__)
class TreeNode:
def __init__(self, name, count, parentNode):
self.name = name
self.count = count
self.parent = parentNode
self.nextlink = None
self.children = {}
def inc(self, count):
self.count+=count
def disp(self, depth=1):
"""disp(以文本形式展示树结构)
"""
print ' '*depth, self.name,' ', self.count
for child in self.children.values():
child.disp(depth+1)
def loadDataSet():
dataSet = [['f', 'a', 'c', 'd', 'g', 'i', 'm', 'p'],
['a', 'b', 'c', 'f', 'l', 'm', 'o'],
['b', 'f', 'h', 'j', 'o'],
['b', 'c', 'k', 's', 'p'],
['a', 'f', 'c', 'e', 'l', 'p', 'm', 'n']]
return dataSet
def calDataSetCnt(dataSet):
'''initDataSet(初始化数据,去除重复数据,使用计数)
Args:
dataSet 原始数据
Return:
dataSetCnt 去除重复数据,添加计数
'''
dataSetCnt = {}
for trans in dataSet:
if dataSetCnt.has_key(frozenset(trans)):
dataSetCnt[frozenset(trans)]+=1
else:
dataSetCnt[frozenset(trans)]=1
return dataSetCnt
def updateTable(treeNode, item, FPHeaderTable):
headNode = FPHeaderTable[item][1]
if headNode is None:
FPHeaderTable[item][1]=treeNode
else:
while headNode.nextlink:
headNode = headNode.nextlink
headNode.nextlink= treeNode
def updateTreeAndTable(orderItem, cnt, FPHeaderTable, FPTree):
'''updateTreeAndTable(将排好序的单项条目添加到FP树和表格中)
Args:
orderItem 当个排序条目
FPHeaderTable FP头表格
FPTree FP树
'''
treeNode = FPTree
#递归调用树节点
for item in orderItem:
if treeNode.children.has_key(item):
treeNode.children[item].count+=cnt
else:
treeNode.children[item] = TreeNode(item, cnt, treeNode)
updateTable(treeNode.children[item], item, FPHeaderTable)
treeNode = treeNode.children[item]
def buildTreeAndTable(dataSetCnt, minSupport=1):
'''buildTreeAndTable(创建FP-Growth所需的树结构和表格)
Args:
dataSetCnt 计数数据集
minSupport 最小支持度,用整数表示
Return:
FPHeaderTable 按支持度排序的链表
FPTree 满足最小支持度的数据创建的树结构
'''
#创建FP表格
FPHeaderTable={}
for trans,cnt in dataSetCnt.items():
for item in trans:
FPHeaderTable[item]=FPHeaderTable.get(item,0)+cnt
#print '过滤前:',FPHeaderTable
#过滤掉低频数据
FPHeaderTable = {k:[v,None] for k,v in FPHeaderTable.items() if v>=minSupport}
# print 'FPTable:',FPHeaderTable
freqItemSet = set(FPHeaderTable.keys())
# print '频繁集合',freqItemSet
if len(freqItemSet)==0:
return None, None
#FP树
FPTree = TreeNode("NULL", 1, None)
#第二遍数据遍历
for trans,cnt in dataSetCnt.items():
localD = {}
for item in trans:
if item in freqItemSet:
localD[item] = FPHeaderTable[item][0]
#print 'localD', localD.keys()
#存在可用数据
if len(localD)>0:
orderItem = [k for k,v in sorted(localD.items(), key=lambda x:x[1], reverse=True)]
# print 'orderItem cnt:',cnt,' items:', orderItem
updateTreeAndTable(orderItem, cnt, FPHeaderTable, FPTree)
return FPTree, FPHeaderTable
def findPrefixPath(treeNode):
dataSetCnt= {}
headNode=treeNode
while headNode:
endNode = headNode.parent
newTrans = []
count = headNode.count
while endNode.parent:
newTrans.insert(0, endNode.name)
endNode = endNode.parent
dataSetCnt[frozenset(newTrans)]=count
headNode= headNode.nextlink
return dataSetCnt
def FP_Growth(dataSetCnt, minSupport, prelist):
'''FP_Growth(挖掘频繁项集)
Args:
FPTable FP表
FPTree FP树
Return 打印出挖掘的频繁项集
'''
FPTree, FPTable = buildTreeAndTable(dataSetCnt, minSupport)
if FPTable is None:
return
#print 'tree view:'
#FPTree.disp()
orderFPTable = [v[1] for k,v in sorted(FPTable.items(), key=lambda x:x[1][0])]
#print 'orderFPTable', [v.name for v in orderFPTable]
# print '******************************'
for tableNode in orderFPTable:
print tableNode.name, prelist
if len(orderFPTable):
print prelist
# print '****************************'
for tableNode in orderFPTable:
# print '遍历元素',tableNode.name
newDataSetCnt = findPrefixPath(tableNode)
# print '元素',tableNode.name,'前数据', newDataSetCnt
newPrelist = [v for v in prelist]
newPrelist.append(tableNode.name)
FP_Growth(newDataSetCnt, minSupport, newPrelist)
def main():
dataSet = loadDataSet();
dataSetCnt = calDataSetCnt(dataSet)
prelist= []
FP_Growth(dataSetCnt, 3, prelist)
if __name__ == '__main__':
main()
| true
|
2ad3edccc0f1d649d1780aa020b8ecd7575d88e4
|
Python
|
Beelthazad/PythonExercises
|
/EjerciciosBasicos/ejercicio1.py
|
UTF-8
| 217
| 3.609375
| 4
|
[] |
no_license
|
# Resultado de leer num enteros, reales o cadenas...
num = int(input("Introduce un número\n"))
print(num)
char = input("Introduce un carácter\n")
print(char)
string = input("Introduce una cadena\n")
print(string)
| true
|
3b89ba980f95aace259b5f3a2fa6c2cea671f7b2
|
Python
|
SLR1999/ML_Assignment
|
/outlier.py
|
UTF-8
| 1,381
| 3.359375
| 3
|
[] |
no_license
|
import numpy as np
def MahalanobisDist(data):
'''Computing covariance matrix for the data'''
covariance_matrix = np.cov(data, rowvar=False)
'''Computing pseudoinverse of the covariance matrix'''
inv_covariance_matrix = np.linalg.pinv(covariance_matrix)
vars_mean = []
'''Appending mean of the columns respectively in vars_mean'''
for i in range(data.shape[0]):
vars_mean.append(list(data.mean(axis=0)))
diff = data - vars_mean
md = []
'''Computing Mahalanobis distance for each feature'''
for i in range(len(diff)):
md.append(np.sqrt(diff[i].dot(inv_covariance_matrix).dot(diff[i])))
return md
def MD_detectOutliers(data, extreme=False):
MD = MahalanobisDist(data)
'''one popular way to specify the threshold'''
m = np.mean(MD)
t = 3. * m if extreme else 2. * m
outliers = []
for i in range(len(MD)):
if MD[i] > t:
outliers.append(i) # index of the outlier
return np.array(outliers)
'''
or according to the 68–95–99.7 rule
std = np.std(MD)
k = 3. * std if extreme else 2. * std
m = np.mean(MD)
up_t = m + k
low_t = m - k
outliers = []
for i in range(len(MD)):
if (MD[i] >= up_t) or (MD[i] <= low_t):
outliers.append(i) # index of the outlier
return np.array(outliers)
'''
| true
|
1547be792bfb60d8d879d9b6fd95736d1047ce18
|
Python
|
kanishka100/gitdemo
|
/tests/test_swag_lab.py
|
UTF-8
| 2,963
| 2.984375
| 3
|
[] |
no_license
|
import pytest
from Page_object.login_page import Login_page
from test_data.home_page_data import Test_Data
from utilities.bAse_class import Base_Class
class Test_Sauce(Base_Class):
def test_sauce_website(self, get_login_data, get_homepage_data, get_checkout_data):
url = 'https://www.saucedemo.com/'
log = self.get_logger()
self.driver.get(url=url)
login_obj = Login_page(self.driver)
log.info("Entering values in the homepage")
login_obj.send_user_name().send_keys(get_login_data['username'])
login_obj.send_password().send_keys(get_login_data['password'])
# home page object is made in the login page file and is returned to the testcase
home_page_obj = login_obj.click_login_button()
# second page
log.info("Getting all the products from homepage.")
products = home_page_obj.get_all_products()
price, price_list, i = 0, [], -1
for product in products:
# NOTE: starting index =0
i = i + 1
if product.text == get_homepage_data['product_name']:
# find all the elements individually as done here (find elements) not a single element but multiple then u can use index to move
price = home_page_obj.product_price()[i].text
log.info(f"Product selected: {product.text}\n Price of product: {price}")
price_list = price.split("$")
home_page_obj.add_to_cart().click()
# object of the checkout page
check_out_obj = home_page_obj.cart_link()
# 3rd page-checkout page
print(check_out_obj.get_inventory_item_name())
log.info(f"The item name selected {get_homepage_data['product_name']}")
assert check_out_obj.get_inventory_item_name() == get_homepage_data['product_name']
check_out_obj.click_checkout().click()
log.info("Entering checkout details")
check_out_obj.get_name().send_keys(get_checkout_data['first_name'])
check_out_obj.get_lastname().send_keys(get_checkout_data['last_name'])
check_out_obj.get_postal_code().send_keys(get_checkout_data['postal_code'])
check_out_obj.click_continue_button().click()
price_got = check_out_obj.get_total_price()
print(price)
log.info(f"The total price of product (price+tax) : {str(float(price_list[1]) + 4.00)}")
assert price_got.split()[1].split("$")[1] == str(float(price_list[1]) + 4.00)
check_out_obj.click_finish_button().click()
# they are the data driven fixtures
@pytest.fixture(params=Test_Data.test_login_data)
def get_login_data(self, request):
return request.param
@pytest.fixture(params=Test_Data.test_home_page_data)
def get_homepage_data(self, request):
return request.param
@pytest.fixture(params=Test_Data.check_out_data)
def get_checkout_data(self, request):
return request.param
| true
|
fed0220d8de3bf71ec98085ba30581cdf7bfd88a
|
Python
|
V-E/scraping
|
/worldometer/worldometer/spiders/countries_data.py
|
UTF-8
| 1,843
| 2.703125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import scrapy
def sanitize(value):
if (not value) or (value.strip() == ''):
return 0
index = 1 if '+' in value else 0
return float(''.join(value.strip().split('+')[index].split(',')))
def toggle_check(check1, check2):
if not check1:
return check2
return check1
class CountriesDataSpider(scrapy.Spider):
name = 'countries_data'
allowed_domains = ['https://www.worldometers.info/coronavirus/']
start_urls = ['https://www.worldometers.info/coronavirus/']
def parse(self, response):
rows = response.xpath('//table[@id="main_table_countries_today"]/tbody[1]/tr[not(contains(@style,"display: none"))]')
for row in rows[1:-1]:
yield {
'country': toggle_check(row.xpath('td[2]/a/text()').get(), row.xpath('td[2]/span/text()').get()),
'total_cases': sanitize(row.xpath('td[3]/text()').get()),
'new_cases': sanitize(row.xpath('td[4]/text()').get()),
'total_death': sanitize(row.xpath('td[5]/text()').get()),
'new_death': sanitize(row.xpath('td[6]/text()').get()),
'total_recovered': sanitize(toggle_check(row.xpath('td[7]/span/text()').get(), row.xpath('td[7]/text()').get())),
'active_cases': sanitize(row.xpath('td[8]/text()').get()),
'serious_critical': sanitize(row.xpath('td[9]/text()').get()),
'total_cases_per_million': sanitize(row.xpath('td[10]/text()').get()),
'death_per_million': sanitize(row.xpath('td[11]/text()').get()),
'total_tests': sanitize(row.xpath('td[12]/text()').get()),
'tests_per_million': sanitize(row.xpath('td[13]/text()').get()),
'population': sanitize(row.xpath('td[14]/text()').get()),
}
| true
|
87776657dd851506aa2c9e4e3f1960e2bcb2dc0a
|
Python
|
AdamZhouSE/pythonHomework
|
/Code/CodeRecords/2623/60594/243798.py
|
UTF-8
| 565
| 3
| 3
|
[] |
no_license
|
def panduan(z)->bool:
try:
z = int(z)
return isinstance(z, int)
except ValueError:
return False
def sort(A:list)->list:
oc=[]
a=1
b=len(A)
while a<=b:
min=0
for index in range(len(A)):
if A[index]<A[min]:
min=index
oc.append(A[min])
del(A[min])
a+=1
return oc
ar1=input().split(",")
arr1=[]
for index in range(len(ar1)):
if panduan(ar1[index]):
arr1.append((int)(ar1[index]))
n=(int)(input())
arr1=sort(arr1)
print(arr1[len(arr1)-n])
| true
|
013038a7d13d0c376cc1309eb0bc4296dfe6a197
|
Python
|
kstudzin/pub_sub_mq
|
/latency_analysis.py
|
UTF-8
| 1,533
| 2.71875
| 3
|
[] |
no_license
|
import os
import pathlib
import re
import pandas as pd
from matplotlib import pyplot as plt
source = "latency"
filename_pattern = re.compile(".*(sub-(\\d+)_broker-([rd])).*")
output_plot = "boxplot.png"
output_stat = "boxplot.txt"
def import_data():
df = pd.DataFrame()
names = []
for file in os.listdir(source):
match = filename_pattern.match(file)
if not match:
continue
num_subs = int(match.group(2))
rows = num_subs * 1000
name = "{0:03}_{1}".format(num_subs, match.group(3))
df_curr = pd.read_csv(os.path.join(source, file), header=None, usecols=[0], names=[name], nrows=rows)
df[name] = df_curr[name]
names.append(name)
names.sort()
return df, names
def main():
print("Analyzing...")
df, cols = import_data()
plt.figure()
plt.title("PubSub Latency")
boxplot = df.boxplot(column=cols, return_type='axes')
boxplot.set_xlabel("Test Run (<number subscribers>_<routing|direct>)")
boxplot.set_ylabel("Time (seconds)")
plt.savefig(output_plot, format="png")
print(f"Generated plot: {output_plot}")
mode_df = pd.DataFrame()
for name in cols:
mode_df[name] = df[name].mode()
df.sort_index(axis=1)
print(f"Statistics: {output_stat}")
f = open(output_stat, "w")
f.write("Statistics:\n\n")
f.write(df.describe().sort_index(axis=1).to_string())
f.write("\n\nMode:\n\n")
f.write(mode_df.to_string())
f.close()
if __name__ == "__main__":
main()
| true
|
fbf0bec49e487ab4a6fccb12b875ea4223613641
|
Python
|
PennaLai/patch_sampling
|
/patch_samping.py
|
UTF-8
| 1,978
| 3.125
| 3
|
[] |
no_license
|
from matplotlib.image import imread
from matplotlib import pyplot as plt
import numpy as np
def read_image(file_name):
image_matrix = imread(file_name)
print(image_matrix.shape)
return image_matrix
def output_image(image_arr):
plt.imshow(image_arr, interpolation='nearest')
plt.show()
def patch_sampling(image_arr, patch_width, patch_heigh, patch_distance):
image_heigh = image_arr.shape[0]
image_width = image_arr.shape[1]
result = []
for line_index in range(len(image_arr)):
if line_index % patch_distance == 0:
for each_pixel_index in range(len(image_arr[line_index])):
if line_index + patch_heigh <= image_heigh and each_pixel_index + patch_width < image_width:
if each_pixel_index % patch_distance == 0:
patch_arr = image_arr[line_index:line_index+patch_heigh, each_pixel_index:each_pixel_index+patch_width]
result.append((patch_arr, (line_index, each_pixel_index)))
return result
if __name__ == '__main__':
patch_width = 224
patch_heigh = 224
patch_distance = 8
data_size = 40
result = []
for i in range(1, 41):
image_data = read_image("graph_data/Images/test_{}.png".format(i))
annotation_data = read_image("graph_data/Annotation/test_{}.png".format(i))
image_patch_result = patch_sampling(image_data, patch_width, patch_heigh, patch_distance)
annotation_patch_result = patch_sampling(annotation_data, patch_width, patch_heigh, patch_distance)
result.append((image_patch_result, annotation_patch_result))
# result: an array, element: (origin_data, annotation_data), length=number of origin image
# origin_data/annotation_data: an array, element: (patch_data, index_of_left_top_pixel), length=number of patch
# patch_data: three dimension array
# each patch_data in origin data should relate to the same index patch_data in annotation_data
| true
|
830ce2e97a871c3bcda6bd5bacb1db4da672ac8b
|
Python
|
jbwillis/marblemaze
|
/tools/captureImage.py
|
UTF-8
| 1,050
| 2.796875
| 3
|
[] |
no_license
|
#! /usr/bin/env python3
import cv2
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='Capture an image from the given camera number')
parser.add_argument('cam', help='OpenCV camera number (0 indexed)', type=int)
parser.add_argument('--video', help='Capture and save video', type=str)
args = parser.parse_args()
print("Press 'c' to capture an image or 'q' to quit")
cap = cv2.VideoCapture(args.cam)
vOut = None
while True:
ret, frame = cap.read()
frame = frame[125:350 , 225:450]
cv2.imshow('Frame', frame)
kp = cv2.waitKey(1)
if (kp & 0xFF) == ord('c'):
fn = input("Enter the filename: ")
cv2.imwrite(fn, frame)
elif (kp & 0xFF) == ord('q'):
break
if args.video is not None and vOut is None:
fourcc = cv2.VideoWriter_fourcc(*"XVID")
vOut = cv2.VideoWriter(args.video, fourcc, 30, (frame.shape[0], frame.shape[1]))
if args.video is not None:
vOut.write(frame)
# vOut.release()
cap.release()
cv2.destroyAllWindows()
| true
|
be29c53ce6ec699d6f39b0ce6bd8928ee8e7bda9
|
Python
|
OpenDingux/tests
|
/vsync/buffersim.py
|
UTF-8
| 3,235
| 3.234375
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
def render_constant(duration):
"""Return the same frame render duration for every time stamp."""
return lambda now: duration
# An application model should yield (buf_count, render_start) pairs.
# buf_count is a sequence number identifying the render, or -1 for
# frames already in the framebuffer before simulation started.
# render_start is the timestamp at which the render started. For
# computing latency, we assume that input was processed immediately
# before the rendering started.
# The simulator sends the timestamp of the next vblank back the
# application model.
def swap_chain(num_buffers, renderer):
"""Model of an application that renders via a swap chain."""
now = 0
render_start = None
render_done = None
buf_count = 0
# Buffers ready to be displayed.
buffers = [(-1, 0)]
while True:
vblank = yield buffers[0]
while True:
if render_done is None:
if len(buffers) >= num_buffers:
# Block on acquiring buffer.
now = vblank
render_start = now
render_done = render_start + renderer(now)
if render_done > vblank:
break
else:
buffers.append((buf_count, render_start))
buf_count += 1
now = render_done
render_done = None
if len(buffers) > 1:
del buffers[0]
def swap_discard(renderer):
"""Model of an application that renders as many frames as possible
via triple buffering, potentially discarding some frames.
"""
now = 0
render_start = None
render_done = None
buf_count = 0
buffers = [(-1, 0)]
while True:
vblank = yield buffers[0]
while True:
if render_done is None:
if len(buffers) >= 3:
# Discard oldest waiting buffer.
del buffers[1]
render_start = now
render_done = render_start + renderer(now)
if render_done > vblank:
break
else:
buffers.append((buf_count, render_start))
buf_count += 1
now = render_done
render_done = None
if len(buffers) > 1:
del buffers[0]
def simulate(num_frames, frame_rate, **apps):
labels = f" {frame_rate:3} Hz refresh"
column_headers = "frame: vblank:"
for label, app in apps.items():
app.send(None)
labels += f" | {label.replace('_', ' '):17}"
column_headers += ' | buffer: latency:'
print(labels)
print(column_headers)
for frame in range(num_frames):
vblank = int((frame / frame_rate) * 1000)
line = f"{frame:4} {vblank:8}"
for app in apps.values():
buf_count, input_time = app.send(vblank)
latency = vblank - input_time
line += f" | {buf_count:5} {latency:8} "
print(line)
renderer = render_constant(25)
simulate(50, 60,
double_chain=swap_chain(2, renderer),
triple_chain=swap_chain(3, renderer),
triple_discard=swap_discard(renderer)
)
| true
|
a9a5977273ce884e08369c92951eb06bd194eec6
|
Python
|
Halacky/parsRos
|
/screenShot.py
|
UTF-8
| 790
| 3.203125
| 3
|
[] |
no_license
|
import cv2
class Image:
def __init__(self, imageLink):
self.imageLink = imageLink
def readImage(self):
try:
imgRead = cv2.imread(self.imageLink)
return imgRead
except Exception:
print(Exception)
return False
def cropImage(self):
image = self.readImage()
imageHeight, imageWidth, channels = image.shape
# Обрезаем половину картинки, левая часть нафиг не нужна и перезаписываем ее в ту же папку
targetWidth = int(imageWidth / 3)
croppedImage = image[0: 0 + imageHeight, targetWidth: targetWidth + targetWidth*2]
cv2.imwrite(self.imageLink, croppedImage)
return croppedImage
| true
|
82a4691b73c28b372704796beef097703fba5477
|
Python
|
faylau/PythonStudy
|
/src/Decorator/DecoratorExample01.py
|
UTF-8
| 7,765
| 4.03125
| 4
|
[] |
no_license
|
#!/usr/bin/python
# encoding:utf-8
__authors__ = ['"Liu Fei" <liufei83@163.com>']
__version__ = "V0.1"
import functools
'''
1.关于Decorator,这里并没有做特别详细和深入的讲解,只是给了一些常规的Decorator用法;
2.具体Decorator的学习,可以参考文章 http://coolshell.cn/articles/11265.html;
3.Decorator的使用很广泛,建议在平时coding过程中思考,哪些操作可以通过Decorator的方式解决,积累经验。
'''
'''-------------------------------------------------------------------------------------------------------
Example-01:最基本的Decorator(装饰器)
@note: (1)被装饰的函数 foo1() 不带任何参数;
@note: (2)装饰器 hello(fn) 本身也不带任何参数(严格上说只带有一个被装饰的function的名称)。
----------------------------------------------------------------------------------------------------------
1.结合输出结果可以看到如下的东西:
(1)函数 foo1 前面有个 @hello 的“注解”,hello 就是我们前面定义的装饰器hello(由此可知装饰器其实就是一个函数)。
(2)在 hello 函数中,其需要一个 fn 的参数(这就用来做回调的函数)
(3)hello 函数中返回了一个 inner 函数 wrapper,这个 wrapper 函数回调了传进来的 fn,并在回调前后加了两条语句。
2.Decorator的本质:
(1)当你在用某个@decorator来修饰某个函数func时,其解释器会解释成下面这样的语句:
func = decorator(func)
(2)由下面的例子可以看到,hello(foo1)返回了wrapper()函数,所以,foo1其实变成了wrapper的一个变量,而后面的foo1()执行其实变成了wrapper()。
-------------------------------------------------------------------------------------------------------'''
def hello1(fn):
@functools.wraps(fn)
def wrapper():
print "Hello function '%s'." % fn.__name__
fn()
print "Goodbye function '%s'" % fn.__name__
return wrapper
@hello1
def foo1():
'''
@summary: 打印一句话。
'''
print "I am function 'foo1'."
'''-------------------------------------------------------------------------------------------------------
Example-02:被装饰的函数有固定个数的参数(如 foo2(a, b))
@note: (1)被装饰的函数 foo2(a, b) 带有固定个数的参数;
@note: (2)装饰器 hello(fn) 本身不带任何参数(严格上说只带有一个被装饰的function的名称);
----------------------------------------------------------------------------------------------------------
1.说明:
(1)function的参数在装饰器中以 wrapper(*args, **kwargs) 的形式传递;
(2)关于function(*args, **kwargs)的意义请参阅这两个参数相关的说明;
(3)*args参数表示可以同时接收n个参数,这n个参数与tuple的形式传递给函数;
(4)*kwargs表示可以同时传n个key-value形式的参数,这n个key-value形式的参数以list形式传递给函数。
-------------------------------------------------------------------------------------------------------'''
def hello2(fn):
def wrapper(a, b):
print "Hello function '%s'." % fn.__name__
print "Parameters are '%s' and '%s'." % (a, b)
fn(a, b)
print "Goodbye function '%s'." % fn.__name__
return wrapper
@hello2
def foo2(a, b):
print "Function 'foo2' parameters are '%s' and '%s'." % (a, b)
'''-------------------------------------------------------------------------------------------------------
Example-03:被装饰的函数带有不固定个数的参数
@note: (1)被装饰的函数 foo3(a, b) 带有2参数,foo3_1(a, b, c)带有3个参数;
@note: (2)装饰器 hello(fn)接受动态个数的参数;
----------------------------------------------------------------------------------------------------------
1.说明:
(1)function的不固定个数参数在装饰器中以 wrapper(*args, **kwargs) 的形式传递;
(2)*args参数表示可以同时接收n个参数(如(1, 2, 3)),这n个参数以tuple的形式传递给函数;
(3)*kwargs表示可以同时传n个key-value形式的参数(如(a=1, b=2, c=3)),这n个key-value形式的参数以list形式传递给函数。
-------------------------------------------------------------------------------------------------------'''
def hello3(fn):
def wrapper(*args, **kwargs):
print "Hello functiono '%s'." % fn.__name__
fn(*args, **kwargs)
print "Goodbye function '%s'." % fn.__name__
return wrapper
@hello3
def foo3(a, b):
print "Function 'foo3(%s, %s)' is called. Result is '%s'." % (a, b, a+b)
@hello3
def foo3_1(a, b, c):
print "Function 'foo3_1(%s, %s, %s)' is called. Result is '%s'." % (a, b, c, a+b+c)
'''-------------------------------------------------------------------------------------------------------
Example-04:装饰器带参数
@note: (1)被装饰的函数 foo4(a) 带有1个参数,但调用该函数的时候不需要传参;
@note: (2)装饰器带hello4(arg)带有1个参数;
@note: (3)foo4(a)需要的参数由装饰器 hello4("xyz")中的参数来传递;
----------------------------------------------------------------------------------------------------------
1.说明:
(1)function的不固定个数参数在装饰器中以 wrapper(*args, **kwargs) 的形式传递;
(2)*args参数表示可以同时接收n个参数,这n个参数与tuple的形式传递给函数;
(3)*kwargs表示可以同时传n个key-value形式的参数,这n个key-value形式的参数以list形式传递给函数。
-------------------------------------------------------------------------------------------------------'''
def hello4(arg):
def wrapper(fn):
def _wrapper():
print "Hello function '%s'." % fn.__name__
fn(arg)
print "Goodbye function '%s'." % fn.__name__
return _wrapper
return wrapper
@hello4("xyz")
def foo4(a):
print "Function 'foo4(a)' is called. Parameter is '%s'." % a
'''-------------------------------------------------------------------------------------------------------
Example-05:以class作为装饰器参数
@note: (1)作为参数的class中带有Static Method,用于在装饰器中以cls.method的形式调用;
@note: (2)本质上与Example-04差不多。
----------------------------------------------------------------------------------------------------------
1.说明:
(1)
-------------------------------------------------------------------------------------------------------'''
class mylocker(object):
def __init__(self):
print "mylocker.__init__() called."
@staticmethod
def acquire():
print "mylocker.acuire() called."
@staticmethod
def unlock(self):
print "mylocker.unlock() called."
class lockerex(mylocker):
@staticmethod
def acquire():
print "lockerex.acuire() called."
@staticmethod
def unlock():
print "lockerex.unlock() called."
def decorator_locker(cls):
'''
@note: cls必须实现静态方法acquire和unlock
'''
def _deco(fn):
def __deco(*args, **kwargs):
print "Before '%s' called." % fn.__name__
cls.acquire()
fn(*args, **kwargs)
cls.unlock()
print "After '%s' called." % fn.__name__
return __deco
return _deco
@decorator_locker(lockerex)
def foo5():
print "Function 'foo5()' is called."
if __name__ == '__main__':
print foo1()
print foo1.__doc__
# foo2(1, 2)
# foo3(1, 2)
# foo3_1(1, 2, 3)
# foo4()
# foo5()
| true
|
32327fdedaed80675002f85f360746e1f5719c89
|
Python
|
Ptrak/AdventOfCode2019
|
/problem7/solution.py
|
UTF-8
| 5,912
| 3.234375
| 3
|
[
"Apache-2.0"
] |
permissive
|
import itertools
def add(program, pc, mode1, mode2):
if mode1 == 0:
lhs_val = int(program[int(program[pc+1])])
elif mode1 == 1:
lhs_val = int(program[pc+1])
if mode2 == 0:
rhs_val = int(program[int(program[pc+2])])
if mode2 == 1:
rhs_val = int(program[pc+2])
result = lhs_val + rhs_val
#print("{} + {} = {}".format(lhs_val, rhs_val, result))
program[int(program[pc+3])] = str(result)
return pc + 4
def multiply(program, pc, mode1, mode2):
if mode1 == 0:
lhs_val = int(program[int(program[pc+1])])
if mode1 == 1:
lhs_val = int(program[pc+1])
if mode2 == 0:
rhs_val = int(program[int(program[pc+2])])
if mode2 == 1:
rhs_val = int(program[pc+2])
result = lhs_val * rhs_val
program[int(program[pc+3])] = str(result)
return pc + 4
def read(program, pc, mode1):
if mode1 == 0:
location = int(program[pc+1])
elif mode1 == 1:
location = pc + 1
#print("PC: {} Reading Location: {}".format(pc, location))
print(" Value: {}".format(program[location]))
return pc + 2
def jit(program, pc, mode1, mode2):
if mode1 == 0:
test_val = int(program[int(program[pc+1])])
elif mode1 == 1:
test_val = int(program[pc + 1])
if mode2 == 0:
jump_val = int(program[int(program[pc+2])])
if mode2 == 1:
jump_val = int(program[pc+2])
if test_val != 0:
return jump_val
return pc + 3
def jnt(program, pc, mode1, mode2):
if mode1 == 0:
test_val = int(program[int(program[pc+1])])
elif mode1 == 1:
test_val = int(program[pc + 1])
if mode2 == 0:
jump_val = int(program[int(program[pc+2])])
if mode2 == 1:
jump_val = int(program[pc+2])
if test_val == 0:
return jump_val
return pc + 3
def lt(program, pc, mode1, mode2):
if mode1 == 0:
lhs_val = int(program[int(program[pc+1])])
elif mode1 == 1:
lhs_val = int(program[pc+1])
if mode2 == 0:
rhs_val = int(program[int(program[pc+2])])
elif mode2 == 1:
rhs_val = int(program[pc+2])
result = int(lhs_val < rhs_val)
program[int(program[pc+3])] = str(result)
return pc + 4
def eq(program, pc, mode1, mode2):
if mode1 == 0:
lhs_val = int(program[int(program[pc+1])])
elif mode1 == 1:
lhs_val = int(program[pc+1])
if mode2 == 0:
rhs_val = int(program[int(program[pc+2])])
elif mode2 == 1:
rhs_val = int(program[pc+2])
result = int(lhs_val == rhs_val)
program[int(program[pc+3])] = str(result)
return pc + 4
def run_program(program, program_input, program_counter, phase_tmp):
input_index = 0
output = None
pc = program_counter
while(1):
instruction = program[pc]
while(len(instruction) < 5) :
instruction = "0" + instruction
mode3 = int(instruction[0])
mode2 = int(instruction[1])
mode1 = int(instruction[2])
opcode = int(instruction[3:])
# add
if opcode == 1:
pc = add(program, pc, mode1, mode2)
elif opcode == 2:
pc = multiply(program, pc, mode1, mode2)
elif opcode == 3:
location = int(program[pc+1])
# val = input("Enter your value: ")
# Read input given and automatically enter it
val = program_input[input_index]
input_index += 1
program[location] = val
if phase_tmp == 4:
print("inputting", val)
pc += 2
elif opcode == 4:
if mode1 == 0:
location = int(program[pc+1])
elif mode1 == 1:
location = pc + 1
#print(" Value: {}".format(program[location]))
output = int(program[location])
pc += 2
return [output, pc, program] # Halt the program here. Resume after
elif opcode == 5:
pc = jit(program, pc, mode1, mode2)
elif opcode == 6:
pc = jnt(program, pc, mode1, mode2)
elif opcode == 7:
pc = lt(program, pc, mode1, mode2)
elif opcode == 8:
pc = eq(program, pc, mode1, mode2)
elif opcode == 99:
break
else:
print("invalid opcode!!!", opcode)
break
return [output, -1, -1]
## MAIN
f = open('program.txt', 'r')
#f = open('test_program.txt', 'r')
og_program = f.readline().rstrip().split(',')
f.close()
max_output = -1
max_sequence = [0,0,0,0,0]
sequences = itertools.permutations(range(5,10))
#sequences = itertools.permutations(range(5))
# for each sequence
for sequence in sequences:
current_output = 0 # current output accumulated
programs = [og_program.copy(), og_program.copy(), og_program.copy(), og_program.copy(), og_program.copy()] # for tracking programs mid execution
pcs = [0,0,0,0,0] # for tracking program counters mid execution
while pcs[len(sequence)-1] > -1:
phase_count = 0
for phase in sequence:
program_input = [phase, current_output]
if pcs[phase_count] != 0:
program_input = [current_output]
[output, program_counter, program_state] = run_program(programs[phase_count], program_input, pcs[phase_count], phase)
# save the state of the program back
programs[phase_count] = program_state
pcs[phase_count] = program_counter
if output:
current_output = output # save module output
phase_count += 1
print ("Sequence: {} Output: {}".format(sequence, current_output))
if current_output > max_output :
max_output = current_output
max_sequence = sequence
print("max output: {} Max Sequence: {}".format(max_output, max_sequence))
program = og_program.copy()
| true
|
b4819c07d5926b771165f74ec24f9b5070f15554
|
Python
|
YanWangNYUSH/CalcVIX
|
/graph.py
|
UTF-8
| 1,209
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# use the commented code to generate to_daily.csv and true_vix.csv
# result = pd.read_csv('Calculated_result.csv')
# result['datetime']=pd.to_datetime(result['time'])
# result.set_index(result['datetime'],inplace = True)
# dayvix = result.resample('D').mean()
# dayvix = dayvix.dropna()
# dayvix.to_csv('to_daily.csv')
#
# true_vix = pd.read_csv('ivixx.csv',usecols = [u'日期',u'收盘价(元)'],encoding='GBK')
# true_vix['date'] = pd.to_datetime(true_vix[u'日期'].copy())
# true_vix.set_index(true_vix['date'],inplace=True)
# true_vix = true_vix.reindex(pd.date_range('2015-02-10','2019-10-22')).fillna(method='ffill')
# true_vix = true_vix.drop(columns = [u'日期'])
# true_vix.columns = ['price','date']
# true_vix.to_csv('true_vix.csv')
result = pd.read_csv('to_daily.csv')
true_vix = pd.read_csv('true_vix.csv')
plt.title("Calculated VIX")
plt.plot(result['datetime'], result['Calculated_VIX'], label='Calculated VIX')
plt.plot(true_vix.index,true_vix['price'],label='true vix')
plt.yticks(np.arange(0, 80, step=5))
plt.legend()
fig = plt.gcf()
fig.set_size_inches(28.5, 10.5)
fig.savefig('test2png.png', dpi=100)
plt.show()
| true
|
28f9fab3e39f901aa33dab1b59a11f80193058cb
|
Python
|
pankaj-lewagon/mltoolbox
|
/mltoolbox/clean_data.py
|
UTF-8
| 534
| 3.5625
| 4
|
[] |
no_license
|
import string
def remove_punctuation(text):
for punctuation in string.punctuation:
text = text.replace(punctuation, '')
return text
def lowercase(text):
text = text.lower()
return text
def remove_num(text):
num_remove = ''.join(word for word in text if not word.isdigit())
return num_remove
if __name__ == '__main__':
print remove_num("121212 Pankaj 121212")
print remove_punctuation("12121?????2 Pank2!!!aj 121212????")
print lowercase("12121?????2 Pank2!!!aj XXXXXXXXX121212????")
| true
|
89aad3d9d252c31de5f85d7722de94145432e99c
|
Python
|
dmitryhits/learning_python3
|
/reloadall3.py
|
UTF-8
| 677
| 2.875
| 3
|
[] |
no_license
|
"""
reloadall3.py: transitively reload nested modules (nested stack)
"""
import types
from reloadall import status, tryreload, tester
def transitive_reload(modules, visited):
while modules:
nextmod = modules.pop()
status(nextmod)
tryreload(nextmod)
visited.add(nextmod)
#print('VISITED:', visited)
modules.extend(x for x in nextmod.__dict__.values()
if type(x) == types.ModuleType and x not in visited and x not in modules)
#print('MODULES;', modules)
def reload_all(*modules):
transitive_reload(list(modules), set())
if __name__ == '__main__':
tester(reload_all, 'reloadall3')
| true
|
03d9bf5cfdc9457715409c89c49dd0c30b7e741d
|
Python
|
Herohonour/Dict
|
/Dict/dict_client_v02.py
|
UTF-8
| 2,062
| 3.75
| 4
|
[] |
no_license
|
"""
客户端 v2.0
1. 与服务端建立连接
2. 给用户显示功能菜单
3. 给服务端发送注册请求
"""
from socket import *
from getpass import getpass
# 服务端地址
ADDR = ("127.0.0.1", 12306)
# 建立套接字对象,并与服务端建立连接
s = socket()
s.connect(ADDR)
def do_register():
"""
注册逻辑:
输入用户名
用户名不能重复
- 查询user表中是否存在username,存在则不合规则
用户名当中不能含有特殊符号(空格......)
- 获取到用户名之后,in
输入密码
密码当中不能含有特殊符号(空格......)
- 获取到密码之后,in
确认密码
如果两次密码输入不正确,则注册失败
向服务端发送注册的请求:以 R 开头的请求
:return:
"""
while True:
username = input("请输入用户名:")
password = getpass("请输入密码:")
password_again = getpass("请再次输入密码:")
if password != password_again:
print("两次输入的密码不一致")
continue
if (" " in username) or (" " in password):
print("用户名或密码中有特殊符号")
continue
# 向服务端发送请求
msg = "R {} {}".format(username, password)
s.send(msg.encode())
data = s.recv(1028).decode()
if data == "OK":
print("注册成功")
else:
print("注册失败")
break
def main(): # 处理客户端的主逻辑
while True:
print("欢迎您: 1.注册 2.登录 3.退出")
cmd = input("请选择功能(输入序号,并回车即可~)")
if cmd == "1":
do_register()
elif cmd == "2":
s.send("登录".encode())
elif cmd == "3":
s.send("退出".encode())
else:
print("请输入正确的序号~")
if __name__ == '__main__':
main()
| true
|
785a4748e33e7c97b6d672bc1e0a9ec9a2bc286d
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03993/s527132366.py
|
UTF-8
| 143
| 3.140625
| 3
|
[] |
no_license
|
n = int(input())
a = [int(s) for s in input().split()]
ans = 0
for i in range(n):
if a[a[i]-1] == i + 1:
ans += 1
print(ans // 2)
| true
|
1b77b79c865f3b047e24fcee2f6408b13bcedbae
|
Python
|
maftuna-coder/Python-Programming-3
|
/exercise3solution.py
|
UTF-8
| 478
| 3.75
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 7 10:01:01 2019
@author: Maftuna
"""
def XOR(a,b,c):
a = int(a)
b = int(b)
c = str("XOR")
if ((a and not b) or (not a and b))==1:
return 1
else:
return 0
def Main():
a = input("enter value for first input -> ")
b = input("enter value for second input -> ")
c ='XOR'
x = XOR(a,b,c)
print ("Output of {} XOR {} is {}".format(a, b, x))
Main()
| true
|
fca047c54adb9e66960bfda79cb32a1126e1f9ae
|
Python
|
k0syan/Kattis
|
/booking_a_room.py
|
UTF-8
| 365
| 3.453125
| 3
|
[] |
no_license
|
if __name__ == "__main__":
tmp = input().split()
r, n = int(tmp[0]), int(tmp[1])
rooms = []
while n != 0:
room = int(input())
rooms.append(room)
n -= 1
if len(rooms) == r:
print("too late")
else:
for i in range(1, r + 1):
if i not in rooms:
print(i)
break
| true
|
e5e84ebe45d6b3ee03e7dcd5ebd5f5594ae5eba6
|
Python
|
pastly/tor-ircbot
|
/member.py
|
UTF-8
| 4,374
| 3.109375
| 3
|
[] |
no_license
|
from time import time
class Member:
def __init__(self, nick, user=None, host=None):
self.nick = nick
self.user = user
self.host = host
def __str__(self):
return '{}!{}@{}'.format(self.nick, self.user, self.host)
def set(self, nick=None, user=None, host=None):
if nick:
self._set_nick(nick)
if user:
self._set_user(user)
if host:
self._set_host(host)
def _set_nick(self, nick):
self.nick = nick
def _set_user(self, user):
self.user = user
def _set_host(self, host):
self.host = host
class MemberList:
def __init__(self, recent_until=10.00):
self._members = set()
self._recent = []
self._recent_until = recent_until
def __len__(self):
return len(self._members)
def __iter__(self):
return self._members.__iter__()
def add(self, nick, user=None, host=None):
if not self.contains(nick=nick):
m = Member(nick, user, host)
self._members.add(m)
self._recent.append((time(), m))
else:
member = self.__getitem__(nick)
member.set(user=user, host=host)
self._trim_recent()
def remove(self, nick):
member = self.__getitem__(nick)
if not member:
return
self._members.discard(member)
self._recent = [(at, m) for at, m in self._recent if m.nick != nick]
self._trim_recent()
def discard(self, nick):
return self.remove(nick)
def contains(self, nick=None, user=None, host=None):
assert nick is not None or user is not None or host is not None
match_nick, match_user, match_host = False, False, False
if nick:
match_nick = self._contains_nick(nick)
if user:
match_user = self._contains_user(user)
if host:
match_host = self._contains_host(host)
if nick and not user and not host:
return True if match_nick else False
if user and not nick and not host:
return True if match_user else False
if host and not nick and not user:
return True if match_host else False
if nick and user and not host:
return True if match_nick == match_user and match_nick else False
if nick and host and not user:
return True if match_nick == match_host and match_nick else False
if user and host and not nick:
return True if match_user == match_host and match_user else False
if match_nick == match_user and match_nick == match_host:
return True if match_nick else False
return False
def _contains_nick(self, nick):
m = self.__getitem__(nick)
return m if m else False
def _contains_user(self, user):
user = user.lower()
for m in self:
if m.user.lower() == user:
return m
return False
def _contains_host(self, host):
host = host.lower()
for m in self:
if m.host.lower() == host:
return m
return False
def __getitem__(self, nick):
nick = nick.lower()
for m in self:
if m.nick.lower() == nick:
return m
return None
def matches(self, user=None, host=None):
assert user is not None or host is not None
matching_users = []
matching_hosts = []
if user:
user = user.lower()
if host:
host = host.lower()
for m in self:
if user and m.user.lower() == user:
matching_users.append(m)
if host and m.host.lower() == host:
matching_hosts.append(m)
if user and host:
return matching_users, matching_hosts
if user:
return matching_users
return matching_hosts
def _trim_recent(self):
new_recent = []
now = time()
for at, m in self._recent:
if at + self._recent_until >= now:
new_recent.append((at, m))
self._recent = new_recent
def get_joined_since(self, t):
members = set()
for at, m in self._recent:
if at >= t:
members.add(m)
return members
| true
|
99821a5e24fced3323c4b73c9fc578ac822a1f9f
|
Python
|
kexinyi/crowdsource
|
/python/nebulosity_mask.py
|
UTF-8
| 4,772
| 2.625
| 3
|
[] |
no_license
|
#!/usr/bin/env python
from __future__ import print_function, division
import keras
import keras.models as kmodels
import numpy as np
def equalize_histogram(img, n_bins=256, asinh_stretch=False):
# from http://www.janeriksolem.net/2009/06/histogram-equalization-with-python-and.html
# Stretch the image with asinh in order to get more even histogram
if asinh_stretch:
vmin = np.nanmin(img)
scale = np.nanpercentile(img-vmin, 50.)
img = np.arcsinh((img-vmin) / scale)
# get image histogram
img_histogram, bins = np.histogram(img.flatten(), n_bins, density=False)
cdf = img_histogram.cumsum() # cumulative distribution function
cdf = 255 * cdf / cdf[-1] # normalize
# use linear interpolation of cdf to find new pixel values
img_equalized = np.interp(img.flatten(), bins[:-1], cdf)
return img_equalized.reshape(img.shape), cdf
def equalize_histogram_wise(img, n_bins=256, asinh_stretch=False):
# tweaked version for WISE
import numpy as np
# Stretch the image with asinh in order to get more even histogram
if asinh_stretch:
vmed = np.nanmedian(img)
scale = np.nanpercentile(img, 30.)-np.nanpercentile(img, 10)
scale = np.clip(scale, 100, np.inf)
img = np.arcsinh((img-vmed) / scale)
# get image histogram
img_histogram, bins = np.histogram(img.flatten(), n_bins, density=False)
cdf = img_histogram.cumsum() # cumulative distribution function
cdf = 255 * cdf / cdf[-1] # normalize
# use linear interpolation of cdf to find new pixel values
img_equalized = np.interp(img.flatten(), bins[:-1], cdf)
return img_equalized.reshape(img.shape), cdf
def load_model(fname_base):
with open(fname_base + '.json', 'r') as f:
model_json = f.read()
model = kmodels.model_from_json(model_json)
model.load_weights(fname_base + '.h5')
return model
def subimages(img, shape):
j = np.arange(0, img.shape[0]+shape[0]-1, shape[0], dtype=int)
k = np.arange(0, img.shape[1]+shape[1]-1, shape[1], dtype=int)
for j0, j1 in zip(j[:-1], j[1:]):
for k0, k1 in zip(k[:-1], k[1:]):
yield j0, k0, img[j0:j1, k0:k1]
def gen_mask(model, img):
img = np.pad(img, 1, mode='constant', constant_values=np.median(img))
_, h, w, _ = model.layers[0].input_shape
mask = np.empty(img.shape, dtype='u1')
for j0, k0, subimg in subimages(img, (h, w)):
subimg, _ = equalize_histogram(subimg.astype('f8'),
asinh_stretch=True, n_bins=3000)
subimg /= 255.
subimg.shape = (1, subimg.shape[0], subimg.shape[1], 1)
pred = model.predict(subimg, batch_size=1)[0]
mask[j0:j0+h, k0:k0+w] = np.argmax(pred*[0.25, 1, 1, 1])
return mask[1:-1, 1:-1]
def gen_mask_wise(model, img):
_, h, w, _ = model.layers[0].input_shape
mask = np.empty(img.shape, dtype='u1')
for j0, k0, subimg in subimages(img, (h, w)):
subimg, _ = equalize_histogram_wise(subimg.astype('f8'),
asinh_stretch=True, n_bins=3000)
subimg /= 255.
subimg.shape = (1, subimg.shape[0], subimg.shape[1], 1)
pred = model.predict(subimg, batch_size=1)[0]
# light, normal, nebulosity
predcondense = np.argmax(pred*[1., 1., 0.5])
mask[j0:j0+h, k0:k0+w] = predcondense
# if predcondense == 2:
# print(pred)
mask[mask == 0] = 1 # nebulosity_light -> normal
return mask
def test_plots(model, imfns, extname='N26'):
from matplotlib import pyplot as p
from astropy.io import fits
import os
for timfn in imfns:
tim = fits.getdata(timfn, extname='S7')
mask = gen_mask(model, tim)
if np.any(mask != 2):
print(timfn, np.sum(mask == 0)/1./np.sum(np.isfinite(mask)),
np.sum(mask == 1)/1./np.sum(np.isfinite(mask)),
np.sum(mask == 3)/1./np.sum(np.isfinite(mask)))
p.clf()
p.imshow(((tim-np.median(tim))).T, aspect='equal', vmin=-50,
vmax=50, interpolation='none', cmap='binary',
origin='lower')
p.imshow(mask.T, cmap='jet', alpha=0.2, vmin=0, vmax=3,
interpolation='none', origin='lower')
p.draw()
p.savefig(os.path.basename(timfn)+'.mask.png')
def main():
from PIL import Image
model = load_model('toy_data/19th_try')
img = Image.open('toy_data/test_image.png')
img = np.array(img)
mask = gen_mask(model, img)
mask = Image.fromarray((255.*mask/2.).astype('u1'), mode='L')
mask.save('toy_data/test_image_mask.png')
return 0
if __name__ == '__main__':
main()
| true
|
a64337580de37dbd4fb4c6c6e09c8e45a9e26420
|
Python
|
rghiglia/Yelp
|
/yelp_start_20160620b_bkp.py
|
UTF-8
| 10,054
| 2.609375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 10 08:23:23 2016
@author: rghiglia
"""
import matplotlib.pyplot as plt
# Wasn't necessary. Used winzip and now files are in JSON
# Use this to unzip
#import gzip
#
## Extract files
#dnm = r'C:\Users\rghiglia\Documents\ML_ND\Yelp'
#fnz = 'sample_submission.csv.gz'
#fnzL = dnm + '\\' + fnz
#fnmL = fnzL[:-3] # remove the '.gz' from the filename
#
## Read from .gz
#with gzip.open(fnzL, 'rb') as in_file:
# s = in_file.read()
#
## Store uncompressed file data from 's' variable
#with open(fnmL, 'w') as f:
# f.write(s)
# ----------------------------------------------------------------------------
# Extract data
# ----------------------------------------------------------------------------
#dnm = r'C:\Users\rghiglia\Documents\ML_ND\Yelp'
#fnm = 'yelp_academic_dataset_checkin.json'
#fnmL = dnm + '\\' + fnm
## Trials
##import pandas as pd
##df = pd.read_json(fnmL)
### Error:
###ValueError: Trailing data
##
##import sys
##sys.path.append(dnm)
##import json_to_csv_converter
### Worked now
##
### nope: json_to_csv_converter(fnmL)
##
##fnmO = 'yelp_academic_dataset_checkin.csv'
##fnmOL = dnm + '\\' + fnmO
## List files
#import glob
#fnmO = glob.glob(dnm + '\\' + '*.json')
#
#for f in fnmO:
# print "Processing file '{}'".format(f)
# clmns = json_to_csv_converter.get_superset_of_column_names_from_file(f)
# read_and_write_file(f, f.replace('.json','.csv'), clmns)
#import pandas as pd
#dnm = r'C:\Users\rghiglia\Documents\ML_ND\Yelp'
#
#
#fnm = 'yelp_academic_dataset_checkin.csv'
#df_checkin = pd.read_csv(dnm + '\\' + fnm)
## Worked!
#
#fnm = 'yelp_academic_dataset_business.csv'
#df_busines = pd.read_csv(dnm + '\\' + fnm, dtype=object)
### Error:
##C:\Users\rghiglia\Anaconda2\lib\site-packages\IPython\core\interactiveshell.py:2723: DtypeWarning: Columns (1,4,7,17,29,49,60,62,79,86,94) have mixed types. Specify dtype option on import or set low_memory=False.
## interactivity=interactivity, compiler=compiler, result=result)
## Ok, worked
#
#fnm = 'yelp_academic_dataset_review.csv'
#df_review = pd.read_csv(dnm + '\\' + fnm)
#
#fnm = 'yelp_academic_dataset_tip.csv'
#df_tip = pd.read_csv(dnm + '\\' + fnm)
#
#fnm = 'yelp_academic_dataset_user.csv'
#df_usr = pd.read_csv(dnm + '\\' + fnm)
# DO NOT ERASE THIS PART!
import pandas as pd
dnm = r'C:\Users\rghiglia\Documents\ML_ND\Yelp'
import time
start_time = time.time()
# Assign data to dataframes
import glob
fnm = glob.glob(dnm + '\\' + '*.csv')
nm_df = [f.replace(dnm,'').replace('\\yelp_academic_dataset_','').replace('.csv','') for f in fnm]
dfs = {nm: None for nm in nm_df}
for nm, f in zip(nm_df, fnm):
print "Processing file '{0}' ...".format(nm)
dfs[nm] = pd.read_csv(f, dtype=object)
print("--- %s seconds ---" % (time.time() - start_time))
## Try to save data to file
#
#fnmO = (dnm + '\\' + 'yelp_data.pkl')
#import time
#import dill #pip install dill --user
#start_time = time.time()
#dill.dump_session(fnmO)
#print("--- %s seconds ---" % (time.time() - start_time))
## Load session
#import time
#start_time = time.time()
#dnm = r'C:\Users\rghiglia\Documents\ML_ND\Yelp'
#fnmO = (dnm + '\\' + 'yelp_data.pkl')
#import dill #pip install dill --user
#dill.load_session(fnmO)
#print("--- %s seconds ---" % (time.time() - start_time))
## Takes almost 10 times than reading data in ...
# See Yelp.ptx for data structure
# Now study the data
# dfs is a list of data frames
# -----------------------------------------------------------------------------
# Business
# -----------------------------------------------------------------------------
df = dfs['business']
df.info()
df.shape
df.count() # already a summary of non-nulls but the code below extract more info
# 77k entries, with 98 columns
# Reorder
# Unique business_id?
# Order by non-nulls
# This is a good way to summarize data:
# Fisrt column is the column name
# Second column is how many non-null entries
# Third column is how many distinct values it has
# Use following to import
import sys
sys.path.append(r'C:\Users\rghiglia\Documents\ML_ND\Toolbox')
from rg_toolbox_data import df_summarize
nn = df_summarize(df)
# Notes:
# busID is unique nB IDs
# things like full_address:
# all have an entry but not nB uniques
# since they are all different businesses, either you have two businesses
# at the same address or there might be a default value that is not a null
# There are 9710 categories
# I want to know how many entries we have for each category
# I think that's a groupby thingy
#grp = pd.groupby(df, 'categories')
#df_tmp = grp.count()
#df_tmp.info()
# Much better implementation
grp_cat = df.groupby('categories').size().sort_values(ascending=False)
# I see, so the value for category is actually a multiple thing already:
df.ix[0,'categories']
# If you want to start analysing the data I think you need to split those apart
# One of the question has to do with cities. Can I get that info from somewhere?
grp_city = df.groupby('city').size().sort_values(ascending=False)
# Usual crap ...
# there might be some bad character, I can't use the variable explorer
# Then: different spelling of Montreal, maybe Mathews, etc.
#Cities:
#
# U.K.: Edinburgh
# Germany: Karlsruhe
# Canada: Montreal and Waterloo
# U.S.: Pittsburgh, Charlotte, Urbana-Champaign, Phoenix, Las Vegas, Madison
#So you might want to add country as a tag
# I think I want to stick with restaurants, so you'll need to filter them out
# The first question has to do with preferences: parking, type of food, etc.
# is there a bias US vs. EU?
# Second question has to do with location. I guess you'd try to relate business
# success with location, how do you define success? stars?
# Third question about seasonal effects. This is across categories. Could be
# interesting, but is it machine learning? I guess ML might just be: find a
# relationship ...
# Fourth question: non-intuitive categories. Complex, might need NLP
# Fifth question: NLP. Guess review rating from text. Wow!
# Sixth question: VERY VERY IMPORTANT!!!!!!!!
#Changepoints and Events: Can you detect when things change suddenly (i.e. a business coming under new management)? Can you see when a city starts going nuts over cronuts?
# Jeez, I thought the underlying assumption is of stationarity!
# Welcome to the real world!!!!!
#Social Graph Mining: Can you figure out who the trend setters are and who found the best waffle joint before waffles were cool? How much influence does my social circle have on my business choices and my ratings?
# Also very interesting and complex!!
# All-in-all very rich! I think I will stick with this data set. There is plenty of stuff to work on
# Start with a simple thing:
# Is stuff time-varying?
# Seasonalities?
# Interests and country bias?
# Let's see reviews per busID
# Start with most common busID
# Look into Review data set
df = dfs['review'].copy()
df.info()
df.head()
# Discard text for now
df.drop('text', axis=1, inplace=True)
df.info()
df.head()
nn = df_summarize(df)
#grp = df.groupby(['business_id', 'review_id'])
#grp_s = grp.size().sort_values(ascending=False)
#grp_s[0:5]
#!df.groupby(['business_id', 'review_id']).count()
bID_grp = df[['business_id', 'review_id']].groupby(['business_id']).count()
bID_grp.sort_values('review_id', ascending=False, inplace=True)
# Let's look at the most popular business
rec = dfs['business'][dfs['business']['business_id']==bID_grp.index[0]]
# Plot the average star rating over time
fgsz = (5, 3)
i0 = 145
revs = dfs['review'][dfs['review']['business_id']==bID_grp.index[i0]]
if 'text' in revs.columns: revs.drop('text', axis=1, inplace=True)
x = revs['stars'].astype(int)
#s = pd.Series(revs['stars'].astype(int), index=revs['date']) # doesn't work ... not sure why
s = revs['stars'].astype(int)
s.index = revs['date']
s.sort_index(axis=0, ascending=True, inplace=True)
fig = plt.figure(figsize=fgsz)
s.plot()
pd.rolling_mean(s, 100).plot(color='r')
# Time-varying nature of ratings seems quite interesting!
# Review
df.info()
bID_grp = df['business_id'].count()
bID_grp.count()
#pd.value_counts(d[["col_title1","col_title2"]].values.ravel())
grp = pd.value_counts(df[['business_id']].values.ravel()) # .ravel() flattens the dataframe
grp = pd.value_counts(df['business_id'])
# I think what you had in mind was:
bID_grp = df.groupby(['business_id', 'review_id']) # or
bID_grp = df.groupby(['business_id', 'user_id', 'review_id']) # possibly same reviewer made mutliple reviews to the same place; that could be an indication of change in management or other relevant change especially if dates are farther apart
bID_grp.count()
df['stars'] = df['stars'].astype(int)
bID_grp = df.groupby(['business_id', 'user_id'])
stat = bID_grp['stars']
stat.agg('mean')
df_tmp = stat.agg('count')
type(df_tmp) # Series with a multi-index I assume
# Can you sort it?
# Sort by most popular b_id then reviewer largest number of reviews
# Possible, will look into if necessary
# In simplest form:
grp = pd.value_counts(df['business_id'])
type(grp) # Series
df_b_id = pd.DataFrame({'b_id': grp.index, 'cnt': grp.values})
df_b_id.head()
df_b_id['cnt'].plot(figsize=fgsz)
# Thing is you have to control for age, so maybe you want to look at reviews per week
# There might be methods for aggregating over time
#by_week = df['date'].groupby(lambda x: x.week)
## Didn't work
# Probably need to convert date to appropriate date obect
#https://docs.python.org/2/library/datetime.html#datetime.datetime.strptime
from datetime import datetime
date_object = datetime.strptime('Jun 1 2005 1:33PM', '%b %d %Y %I:%M%p')
n = len(df)
## This will take an ENORMOUS amount of time!
#for (i, d) in enumerate(df['date']):
# print "%i %i" % (i, n)
# df['date'].iloc[i] = datetime.strptime(d, '%Y-%m-%d')
d_tmp = set(df['date'])
nU = len(d_tmp)
for (i, d) in enumerate(d_tmp):
print "%i %i" % (i, nU)
d_tpp = datetime.strptime(d, '%Y-%m-%d')
df['date'][df['date']==d] = d_tpp
df_tmp
| true
|