blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a4ee1ab4b5c9ea9f311c299832626bdd964c6ed0
|
Python
|
dilipRathoreRepo/Selenium_Game_Playing_Bot
|
/cookie_cutter_main.py
|
UTF-8
| 1,550
| 3.078125
| 3
|
[] |
no_license
|
from selenium import webdriver
from datetime import datetime as dt
selenium_driver_path = "/Users/diliprathore/Downloads/chromedriver"
driver = webdriver.Chrome(executable_path=selenium_driver_path)
driver.get("http://orteil.dashnet.org/experiments/cookie/")
cookie = driver.find_element_by_id("cookie")
store_item_prices = driver.find_elements_by_css_selector("#store b")
item_details = [price.text for price in store_item_prices]
item_details_dict = {}
for item in item_details:
if item:
name = ''.join(item.split('-')[0]).strip()
price = int(''.join(item.split('-')[1].replace(',', '')).strip())
item_details_dict[name] = price
print(item_details_dict)
def count_cookies():
return int(driver.find_element_by_id("money").text)
def buy_item():
money = count_cookies()
max_value_item = ""
for itm, price in item_details_dict.items():
if money >= price:
max_value_item = f'{"buy"}{itm}'
f'money available was {money} and chosen item is {max_value_item}'
f'Buying item {max_value_item}'
chosen_item = driver.find_element_by_id(max_value_item)
chosen_item.click()
now = dt.now()
original_time = now
print(f'time is {now}')
running = True
while running:
cookie.click()
new_time = dt.now()
updated_time = new_time - now
time_since_original = new_time - original_time
if time_since_original.total_seconds() > 60:
running = False
if updated_time.total_seconds() > 5:
buy_item()
now = new_time
driver.quit()
| true
|
098c937482faa2a65dec482bcfb179ba64ef31df
|
Python
|
VitorArthur/Pratica-Python
|
/ex014.py
|
UTF-8
| 112
| 3.953125
| 4
|
[] |
no_license
|
c = float(input('Informe a temperatura em °C: '))
print(f'A temperatura de {c}ºC correponde a {9*c/5+32}ºF')
| true
|
39f02cf7ad5b6925f28cf97a72be1d3c37dfed09
|
Python
|
201600050-comillas/TFG-Business-Analytics--201600050
|
/Graficos (3).py
|
UTF-8
| 2,153
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# coding: utf-8
# In[35]:
#CARGA DE LIBRERÍAS
import pandas as pd
import matplotlib.pyplot as plt
# In[36]:
#CARGA DE DATOS DJSI Europe y S&P Europe BMI
dfES = pd.read_excel (r'/Users/rocioartiaga/Desktop/indices/djsi-returns.xlsx')
# In[37]:
#DETERMINACIÓN DE LOS EJES
dfES.set_axis(['Date', 'DJ Sustainability Europe', 'S&P Europe BMI'],
axis='columns', inplace=True)
# In[38]:
#CAMBIO DE FORMATO DE LA VARIABLE FECHA A FORMATO DATETIME
dfES['Date']= pd.to_datetime(dfES['Date'])
# In[39]:
#ESTABLECER EL ÍNDICE
dfES.set_index('Date',inplace=True)
# In[40]:
#GRÁFICO DJSI Europe y S&P Europe BMI
ax=dfES['S&P Europe BMI'].plot(title='EVOLUCIÓN DEL RENDIMIENTO DE ÍNDICES EUROPEOS',figsize=(20, 12),c='blue',legend='S&P Europe BMI')
dfES['DJ Sustainability Europe'].plot(ax=ax,c="grey",legend='DJ Sustainability Europe')
# In[52]:
#CARGA DE DATOS EURO STOXX Sustainability y Euro Stoxx
dfES = pd.read_excel (r'/Users/rocioartiaga/Desktop/indices/return-EuroStoxx.xlsx')
# In[53]:
#ESTABLECER EJES
dfES.set_axis(['Date', 'Euro Stoxx Sustainability', 'Euro Stoxx'],
axis='columns', inplace=True)
# In[54]:
#CAMBIO DE FORMATO
dfES['Date']= pd.to_datetime(dfES['Date'])
# In[55]:
#ESTABLECER ÍNDICE
dfES.set_index('Date',inplace=True)
# In[57]:
#GRÁFICO
ax=dfES['Euro Stoxx Sustainability'].plot(title='EVOLUCIÓN DEL RENDIMIENTO DE ÍNDICES EUROPEOS',figsize=(20, 12),c='grey',legend='Euro Stoxx Sustainability')
dfES['Euro Stoxx'].plot(ax=ax,c="blue",legend='Euro Stoxx')
ax.legend()
# In[58]:
#CARGA DATOS FTSE 4GOOD IBEX e IBEX35
dfIBEX = pd.read_excel (r'/Users/rocioartiaga/Desktop/indices/ibex/ibex-juntos.xlsx')
# In[59]:
#EJES
dfIBEX.set_axis(['Date', 'FTSE4GOOD_IBEX', 'IBEX35'],
axis='columns', inplace=True)
# In[60]:
#ÍNDICE
dfIBEX.set_index('Date',inplace=True)
# In[61]:
#GRÁFICO
ax=dfIBEX['FTSE4GOOD_IBEX'].plot(title='RENDIMIENTO ÍNDICES ESPAÑOLES',figsize=(20, 12),c='grey',legend='FTSE 4 Good IBEX')
dfIBEX['IBEX35'].plot(ax=ax,c="blue",legend='IBEX35')
ax.legend()
# In[ ]:
| true
|
0734c67455e1a1be16c45ffa2b0879320c0f7346
|
Python
|
AndreyRysistov/BeeVSWasp
|
/dataloaders/datagenerator.py
|
UTF-8
| 1,691
| 2.75
| 3
|
[] |
no_license
|
import tensorflow as tf
import os
class DataLoader:
def __init__(self, config):
self.config = config
self.path = './datasets/data'
self.class_names = os.listdir(self.path)
self.ds = self.set_generator()
def get_data(self):
train, val = self.split_data()
return train, val
def split_data(self):
data = self.ds.shuffle(1000)
labeled_all_length = [i for i,_ in enumerate(data)][-1] + 1
train_size = int(self.config.dataloader.train_size * labeled_all_length)
val_size = int(self.config.dataloader.validation_size * labeled_all_length)
print('Train: ', train_size)
print('Validation :', val_size)
train = data.take(train_size).cache().repeat().batch(self.config.glob.batch_size)
val = data.skip(train_size).cache().batch(val_size)
return train, val
def set_generator(self):
filenames = tf.data.Dataset.list_files("./datasets/data/*/*.jpg")
ds = filenames.map(self.process_path).shuffle(buffer_size=1000)
return ds
def get_label(self, file_path):
parts = tf.strings.split(file_path, os.path.sep)
one_hot = parts[-2] == self.class_names
return tf.cast(one_hot, 'float64', name=None)
def decode_img(self, img):
img = tf.image.decode_jpeg(img, self.config.glob.image_channels)
img = tf.image.convert_image_dtype(img, tf.float32) / 255
return tf.image.resize(img, self.config.glob.image_size)
def process_path(self, file_path):
label = self.get_label(file_path)
img = tf.io.read_file(file_path)
img = self.decode_img(img)
return img, label
| true
|
10c0e4ea39701d08cbbd1aa513db6c784a035f2e
|
Python
|
letitbezh/redis-mongodb-cache
|
/main.py
|
UTF-8
| 278
| 2.5625
| 3
|
[] |
no_license
|
import process
import os
import processinsert
choose = raw_input("what you want to do ")
choose = int(choose)
if choose==1:
print process.find()
elif choose==2:
processinsert.insert()
elif choose==3:
flag = process.find()
if(flag):
processinsert.delete()
| true
|
c77a7050ddc9fe0781091fa12430cafc8cddac68
|
Python
|
WRansohoff/nmigen_ice40_spi_flash
|
/helpers.py
|
UTF-8
| 643
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
# Convert a 32-bit word to little-endian byte format.
# 0x1234ABCD -> 0xCDAB3412
def LITTLE_END( v ):
return ( ( ( v & 0x000000FF ) << 24 ) |
( ( v & 0x0000FF00 ) << 8 ) |
( ( v & 0x00FF0000 ) >> 8 ) |
( ( v & 0xFF000000 ) >> 24 ) )
# Helper methods / values for generating test ROM images.
R_ON = LITTLE_END( 0x00000009 )
R_OFF = LITTLE_END( 0x00000001 )
G_ON = LITTLE_END( 0x0000000A )
G_OFF = LITTLE_END( 0x00000002 )
B_ON = LITTLE_END( 0x0000000B )
B_OFF = LITTLE_END( 0x00000003 )
RET = LITTLE_END( 0x00000000 )
def DELAY( cycles ):
return LITTLE_END( ( 0x4 | ( cycles << 4 ) ) & 0xFFFFFFFF )
| true
|
6cf16136cd35f69714f4c93c3059f3b450f6fd22
|
Python
|
GuilhermeLaraRusso/python_work
|
/ch_10_files_and_exceptions/10_1_reading_an_entire_file.py
|
UTF-8
| 2,826
| 4.625
| 5
|
[] |
no_license
|
# To try the following examples yourself, you can enter these lines in an
# editor and save the file as pi_digits.txt, or you can download the file from the
# book’s resources through https://www.nostarch.com/pythoncrashcourse/. Save
# the file in the same directory where you’ll store this chapter’s programs.
# Here’s a program that opens this file, reads it, and prints the contents
# of the file to the screen:
with open('pi_digits.txt') as file_object:
contents = file_object.read()
print(contents.rstrip())
# The first line of this program has a lot going on. Let’s start by looking
# at the open() function. To do any work with a file, even just printing its contents,
# you first need to open the file to access it. The open() function needs
# one argument: the name of the file you want to open. Python looks for this
# file in the directory where the program that’s currently being executed is
# stored. In this example, file_reader.py is currently running, so Python looks
# for pi_digits.txt in the directory where file_reader.py is stored. The open()
# function returns an object representing the file. Here, open('pi_digits.txt')
# returns an object representing pi_digits.txt. Python stores this object in
# file_object, which we’ll work with later in the program.
# The keyword with closes the file once access to it is no longer needed.
# Notice how we call open() in this program but not close(). You could open
# Files and Exceptions 191
# and close the file by calling open() and close(), but if a bug in your program
# prevents the close() statement from being executed, the file may never
# close. This may seem trivial, but improperly closed files can cause data
# to be lost or corrupted. And if you call close() too early in your program,
# you’ll find yourself trying to work with a closed file (a file you can’t access),
# which leads to more errors. It’s not always easy to know exactly when you
# should close a file, but with the structure shown here, Python will figure that
# out for you. All you have to do is open the file and work with it as desired,
# trusting that Python will close it automatically when the time is right.
# Once we have a file object representing pi_digits.txt, we use the read()
# method in the second line of our program to read the entire contents of
# the file and store it as one long string in contents. When we print the value
# of contents, we get the entire text file back:
#
# The only difference between this output and the original file is the
# extra blank line at the end of the output. The blank line appears because
# read() returns an empty string when it reaches the end of the file; this empty
# string shows up as a blank line. If you want to remove the extra blank line,
# you can use rstrip() in the print statement:
| true
|
440089dd4e3ee613b0a2cffba2af747867327d98
|
Python
|
katiepicha/Numpy_Project
|
/MyArrays1.py
|
UTF-8
| 3,345
| 4.125
| 4
|
[] |
no_license
|
import numpy as np
import random
# two dimensional array (has rows and columns)
# [1,2,3] is the first row with 3 elements and [4,5,6] is the second row with 3 more elements
arr01 = np.array([[1,2,3],
[4,5,6]])
# one dimensional array
arr02 = np.array([0.0, 0.1, 0.2, 0.3, 0.4])
for row in arr01:
print(row)
for col in row:
print(col, end = ' ')
print()
# flat disregards elements in rows and just prints each element one at a time
for i in arr01.flat:
print(i)
arr03 = np.zeros(5)
arr04 = np.ones((2, 4), dtype = int)
arr05 = np.full((3, 5), 13)
# ------------------------- EXERCISE ------------------------------
# create a 2 dimensional array of 5 integer elements each using the random module and list comprehension
a = np.array([[random.randint(1,10) for n in range(5)],
[random.randint(1,10) for n in range(5)]])
print(a)
b = np.array(np.random.randint(1, 10, size = (2, 5)))
print(b)
# ----------------------------------------------------------------
# range function in numpy is known as arange()
# start at 0, end at 5 (not included)
arr06 = np.arange(5)
# 5 is start value, but 10 is end value (not included)
arr07 = np.arange(5,10)
# start at 10, end at 1 (not included) and goes down by 2
arr08 = np.arange(10, 1, -2)
# ----------------------------------------------------------------
# linspace() allows you to create an array of numbers that are equally spaced out
# starting number and end number ARE included
arr09 = np.linspace(0.0, 1.0, num = 5)
# reshape must be a factor/multiple of the number
arr10 = np.arange(1, 21).reshape(4, 5)
# broadcasting - takes a scalar value and expands it to match the number of elements in the original array
# broadcasting does not affect the original array
num01 = np.arange(1, 6)
num02 = num01 * 2 # multiplies the number two with every element in num01
num03 = num01 ** 3
# augmented assignment changes the original array
num01 += 10
num04 = num01 * num02 # arrays must be the same size/length
# will return True or False if it meets criteria
num05 = num01 > 13
num06 = num03 > num01
# Here we have an array of 4 students grades on 3 exams
# row = student
# col = exam
grades = np.array([[87, 96, 70], [100, 87, 90],
[94, 77, 90], [100, 81, 82]])
# summaries/aggregate functions across the board - NOT for a particular student
print(grades.sum())
print(grades.mean())
print(grades.std())
print(grades.var())
# calculate average on all rows for each column (specific for exam)
grades_by_exam = grades.mean(axis = 0) # will produce a 1 dimensional array with 3 elements (averages for each exam)
# calculate average on all columns for each row (specific for student)
grades_by_student = grades.mean(axis = 1) # will produce a 1 dimensional array with 4 elements (averages for each student)
# universal functions in Numpy
# square root
num07 = np.array([1, 4, 9, 16, 25, 36])
num08 = np.sqrt(num07)
# add (same as concatenation)
num09 = np.array([10, 20, 30, 40, 50, 60])
num10 = np.add(num07, num09)
# same as
num10 = num07 + num09
# multiply
num11 = np.multiply(num09, 5)
# same as
num11 = num09 * 5
# number of rows do NOT have to match up, but the number of columns do
num12 = num09.reshape(2,3)
num13 = np.array([2, 4, 6])
num14 = np.multiply(num12, num13)
print()
| true
|
8301dab1fff11110c7001f30776c3ed5916acd6a
|
Python
|
addam/pyg
|
/examples/tetris.py
|
UTF-8
| 2,300
| 2.734375
| 3
|
[] |
no_license
|
import pyg
from random import choice as randchoice
#TODO vyresit: je Square pozicovany od sveho stredu, nebo od leveho spodniho rohu?
# Jak jsou pozicovane drawables? Ktere pozicovani je defaultni a ktere se musi nastavit rucne?
#Pro inspiraci: v pripade rucniho pozicovani nekterych actoru by se hodilo spustit transakci a zrusit v pripade kolize
view, game = pyg.createGame(12, 6)
box = pyg.Actor(kinetics=pyg.kinetics.Fixed())
#mozna: box = pyg.Actor(pyg.kinetics.FIXED)
box.drawable = pyg.drawables.Square(gameSize=(1,1))
# the first parameter means: no input connected yet; save to a user-defined variable
box.var.outlineShader = pyg.Shaders.Outline(None, 3, "#000")
# fancy bubble reflection shading
box.drawable.shader = pyg.Shaders.Bubble(box.var.outlineShader, pointLight)
def resetBrick(brick):
brickShapes = [[(0,0), (1,0), (2,0), (3,0)],
[(0,0), (1,0), (1,1), (2,1)],
[(0,0), (1,0), (1,1), (2,0)]]
colors = [(1,0,0), (0,1,0), (0,0,1)]
randColor = randchoice(colors)
for x, y in randchoice(brickShapes):
newBox = box.clone()
newBox.position = x, y
# connect the free socket
box.var.outlineShader.inputs[0] = pyg.Shaders.Color(randColor)
brick.children.append(newBox)
brick.position = 3, 12
brick = pyg.Actor()
kinetics = pyg.kinetics.Quantized(ticksPerSecond=1.0)
kinetics.addForce(pyg.forces.Gravity(-1))
brick.kinetics = kinetics
bottom = pyg.Actor(kinetics=pyg.kinetics.Fixed())
bottom.shape = pyg.shapes.halfPlane("y<0")
game.append(bottom)
brick = pyg.Actor()
resetBrick(brick)
@pyg.collide(selfParent=brick, parent=bottom) # parent: parent node of the obstacle
@pyg.collide(selfParent=brick, hit=bottom)
def land(collision):
if collision.direction.y == 0:
return
bottom.children.extend(brick.children)
for box in brick.children:
box.position = pyg.relPosition(bottom, collision.previousPosition) + box.position
brick.children.clear()
brick = randomBrick()
bottomRow = [box for box in bottom.children if box.y == 0]
if len(bottomRow) == game.width:
# clear the bottom row and shift everything down
bottom.children.removeAll(bottomRow)
for box in bottom.children:
box.y -= 1
@pyg.input(self=brick, gesture='QUANTIZED_MOVEMENT')
def move(event):
brick.x += event.direction.x
game.append(brick)
pyg.launch(game, windowSize='FULL')
| true
|
201626bfed3023c3286c747b496e34e6a246f908
|
Python
|
amishapagare25/python_codes
|
/main.py
|
UTF-8
| 594
| 2.6875
| 3
|
[] |
no_license
|
from tkinter import *
window = Tk()
Label(window,text='Fullname').grid(row=0)
Label(window,text='email').grid(row=1)
Label(window,text='age').grid(row=2)
Label(window,text='Gender').grid(row=3)
e1= Entry(window)
e2= Entry(window)
e3=Entry(window)
e5=Radiobutton(window,text="male",value=0)
e4=Button(window,text='submit',width=10)
e6=Radiobutton(window,text="female",value=1)
e4=Button(window,text='submit',width=10)
e1.grid(row=0,column=1)
e2.grid(row=1,column=1)
e3.grid(row=2,column=1)
e4.grid(row=5,column=2)
e5.grid(row=3,column=1)
e6.grid(row=4,column=1)
mainloop()
| true
|
466e108165fd0ae24790087d8a5e60762ccbe9af
|
Python
|
pradeepshetty1/python-Pandas-and-Numpy-
|
/Module3/4_exception.py
|
UTF-8
| 378
| 3.515625
| 4
|
[] |
no_license
|
try:
import mylib
except ImportError as ie:
print 'module not found: ',ie
def exception_example(a,b):
c = 0
try:
c=a/b
print 'result is ',c
except Exception as exp:
print 'encountered exception',exp
return c
#exception_example(10,3)
#print 'printing while calling the function',exception_example(10,3)
exception_example(10,0)
| true
|
2911b22b0e1aafc5afcc5bd5bfbc2f70f13ce99c
|
Python
|
CCorazza/Intra42
|
/users/Ldap.py
|
UTF-8
| 2,234
| 2.53125
| 3
|
[] |
no_license
|
import ldap
class Ldap(object):
"""Surcouche du module ldap"""
__host = 'ldaps://ldap.42.fr:636'
__base_dn = 'uid=%s,ou=2013,ou=people,dc=42,dc=fr'
def __init__(self, username, password):
'Constructeur: Ldap(username, password)'
self.handler = None
self.username = username
self.password = password
self.dn = self.__base_dn % username
def __del__(self):
'Destructeur'
self.disconnect()
def connect(self):
'Retourne True si la connexion reussit ou False si elle echoue'
try:
self.handler = ldap.initialize(self.__host)
self.handler.simple_bind_s(self.dn, self.password)
return True
except ldap.LDAPError, e:
self.handler = None
return False
def disconnect(self):
'Interrompt la connexion avec le ldap'
if (self.handler is not None):
self.handler.unbind()
self.handler = None
def get_by_uid(self, uid=None):
'Retourne tous les champs disponibles dans le ldap pour un login donne'
ret = []
if (self.handler is None):
return {}
if (uid is None):
uid = self.username
result = self.handler.search(self.__base_dn[7:], ldap.SCOPE_SUBTREE,
'uid=%s' % uid, ['*'])
while (True):
r_type, r_data = self.handler.result(result, 0)
if (r_data == []): break
elif (r_type == ldap.RES_SEARCH_ENTRY): ret.append(r_data)
if (ret != []):
ret = ret[0][0][1]
clean_ret = {}
for (k, v) in ret.items():
clean_ret[k.replace('-', '_')] = v[0]
return clean_ret
return {}
def search(self, search_attr=['*'], search_filter='uid=*'):
'Retourne les champs demandes pour un filtre donne'
ret = []
data = []
if (self.handler is None):
return []
result = self.handler.search(self.__base_dn[7:], ldap.SCOPE_SUBTREE,
search_filter, search_attr)
while (True):
r_type, r_data = self.handler.result(result, 0)
if (r_data == []): break
elif (r_type == ldap.RES_SEARCH_ENTRY): data.append(r_data)
for user in data:
dico = {}
for k,v in user[0][1].items():
dico[k.replace('-', '_')] = v[0]
ret.append(dico)
return ret
| true
|
85b9b1bee7ce98ba5d3860efa8c89541328d352f
|
Python
|
LUMC/OvoGrowth-dataprocessor
|
/scripts/validate_input_files.py
|
UTF-8
| 468
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
import sys
from os import path
arg = sys.argv
def validate_input(files_dataset, required_files):
if len(files_dataset) < 1:
print("No valid dataset files detected")
for file_set in files_dataset:
for rfile in required_files:
if (not path.exists(file_set+"/"+rfile)):
print(file_set+"/"+rfile+ " are required but missing")
return True
if (validate_input(arg[0], arg[1])):
open(arg[2], 'w').write("correct")
| true
|
2802744bdb6ba909cbfd96435ace75bcf266b514
|
Python
|
akshayravichandran/Foundations-of-Software-Science
|
/w5/unsuper.py
|
UTF-8
| 2,038
| 2.640625
| 3
|
[] |
no_license
|
from w3.num import Num
from w4.w4 import rows
from w4.w4 import Data
class Unsuper:
def __init__(self, data):
"""
Initialize an unsupervised learner.
"""
self.rows = data.rows.values()
self.enough = len(self.rows) ** 0.5 # magic constant
def band(self, c, lo, hi):
if lo == 0:
return '..' + str(self.rows[hi][c])
elif hi == most:
return str(self.rows[lo][c]) + '..'
else:
return str(self.rows[lo][c]) + '..' + str(self.rows[hi][c])
def argmin(self, c, lo, hi):
cut = False
if hi - lo > 2*self.enough:
l, r = Num(), Num()
for i in range(lo, hi+1):
r.numInc(self.rows[i][c])
best = r.sd
for i in range(lo, hi+1):
x = self.rows[i][c]
l.numInc(x)
r.numDec(x)
if l.n >= self.enough and r.n >= self.enough:
tmp = Num.numXpect(l,r) * 1.05 # magic constant
if tmp < best:
cut, best = i, tmp
return cut
def cuts(self, c, lo, hi, pre):
txt = pre + str(self.rows[lo][c]) + '..' + str(self.rows[hi][c])
cut = self.argmin(c, lo, hi)
if cut:
print(txt)
self.cuts(c, lo, cut, pre + '|..')
self.cuts(c, cut+1, hi, pre + '|..')
else:
b = self.band(c, lo, hi)
print(txt + ' (' + b + ') ')
for r in range (lo, hi+1):
self.rows[r][c] = b
def stop(c, t):
for i in range(len(t)-1, -1, -1):
if t[i][c] != '?':
return i
return 0
if __name__== "__main__":
data = rows("w5\\weatherLong.csv")
u = Unsuper(data)
for c in data.use:
if data.indep(c) and c in data.nums:
u.rows = sorted(u.rows, key=lambda r: r[c])
most = stop(c, u.rows)
print('\n-- ', data.name[c], most, '----------')
u.cuts(c, 0, most, "|.. ")
for _, name in data.name.items():
print(name.replace('$','') + '\t', end = '')
print()
for row in u.rows:
for i, v in enumerate(row.values()):
if i == 1:
print(v.ljust(10), end = "\t")
else:
print(v, end = "\t")
print()
| true
|
a9389dd4e93d9c1a13b9911d032ffa9b85bb9964
|
Python
|
danielfrgs/python-machine-learning
|
/adaline-gd.py
|
UTF-8
| 2,285
| 3.28125
| 3
|
[] |
no_license
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mlxtend.evaluate import plot_decision_regions
class AdalineGD(object):
"""ADAptive LInear NEuron classifier"""
def __init__(self, eta=0.01, n_iter=50):
self.eta = eta
self.n_iter = n_iter
def fit(self, X, y):
self.w_ = np.zeros(1 + X.shape[1])
self.cost_ = []
for i in range(self.n_iter):
output = self.net_input(X)
errors = (y - output)
self.w_[1:] += self.eta * X.T.dot(errors)
self.w_[0] += self.eta * errors.sum()
cost = (errors**2).sum() / 2.0
self.cost_.append(cost)
return self
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
def activation(self, X):
return self.net_input(X)
def predict(self, X):
return np.where(self.activation(X) >= 0.0, 1, -1)
df = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data')
y = df.iloc[0:100, 4].values
y = np.where( y == 'Iris-setosa', -1, 1)
X = df.iloc[0:100, [0, 2]].values
# Without feature scaling
# fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8, 4))
# ada1 = AdalineGD(n_iter=10, eta=0.01).fit(X, y)
# ax[0].plot(range(1, len(ada1.cost_) + 1),
# np.log10(ada1.cost_), marker='o')
# ax[0].set_xlabel('Epoches')
# ax[0].set_ylabel('log(Sum-squared-error)')
# ax[0].set_title('Adaline - Learning rate 0.01')
# ada2 = AdalineGD(n_iter=10, eta=0.0001).fit(X, y)
# ax[1].plot(range(1, len(ada1.cost_) + 1),
# np.log10(ada2.cost_), marker='o')
# ax[1].set_xlabel('Epoches')
# ax[1].set_ylabel('log(Sum-squared-error)')
# ax[1].set_title('Adaline - Learning rate 0.0001')
# plt.show()
# With feature scaling
X_std = np.copy(X)
X_std[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()
X_std[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std()
ada = AdalineGD(n_iter=15, eta=0.01)
ada.fit(X_std, y)
plot_decision_regions(X_std, y, clf=ada)
plt.title('Adaline - Gradient descent')
plt.xlabel('sepal length [standardized]')
plt.ylabel('petal length [standardized]')
plt.legend(loc='upper left')
plt.show()
plt.plot(range(1, len(ada.cost_) + 1), ada.cost_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Sum-square-error')
plt.show()
| true
|
e15d010d84b29cde52dfcbe8541c183972291e58
|
Python
|
GeorgiyZhuravlevRudn/gzhuravlev.pfur
|
/2020-2021/Pr./lab03/ex 5.1.py
|
UTF-8
| 333
| 4.03125
| 4
|
[] |
no_license
|
arr_1 = []
num_of_elements_arr_1 = int(input('Input a number of elements for array: '))
for i in range(num_of_elements_arr_1):
k = int(input(f"element { i } of an array = "))
arr_1.append(k)
print("the largest number in array is ", max(arr_1))
print("normal array", arr_1)
print("reversed array", list(reversed(arr_1)))
| true
|
f83c84cbcdc3c18f94be97c0d406db8b9dfb1050
|
Python
|
jackrushie/02m
|
/02m/PDF_editting/PDF_Merger.py
|
UTF-8
| 382
| 2.75
| 3
|
[] |
no_license
|
import PyPDF2
import sys
import os
filepath = './02m/PDF_editting/PDFs/'
inputs = os.listdir(filepath)
print(inputs)
def pdf_combined(filepath):
merger = PyPDF2.PdfFileMerger()
for pdf in os.listdir(filepath):
if pdf.endswith((".pdf")):
print(pdf)
merger.append(f'{filepath}{pdf}')
merger.write('MergedFile.pdf')
pdf_combined(filepath)
| true
|
63313c60c690b38fe8220131dbcf592af96a7e22
|
Python
|
efchakim/Python
|
/mastermind.py
|
UTF-8
| 2,741
| 4.125
| 4
|
[] |
no_license
|
#This program simulates the code-breaking game Mastermind.
#This program checks the players guess against the initial for random numbers between 1-6.
#Enter four guesses from 1-6, example: 1234.
#By: Diana Hakim
def GenCode():
'generate four random numbers in a list'
import random
List =[]
for i in range(4):
ran =random.randrange(1,7)
List += [int(ran)]
#List = [4,2,4,6];#List = [2,1,4,5];#List = [2,4,3,1]
return list(List)
def blackbox(gen, sguess): #works
'compares two lists and finds if the index and value if the same for each value'
ans = gen[0:] #ans cannot change
guess = []
for i in (list(sguess)):
i = int(i)
guess += [i]
list(guess)
for i in range(len(guess)):
if gen[i] == guess[i]: #if correct or gen[i] == int(str(numinp[i]))
gen[i]=u"\u25A0"
guess[i]="x"
return gen, list(guess), ans
def whitebox(gen, guess, ans):
'compares two lists and finds if values are the same'
#print(gen)
for i in gen:
if i in guess:
new= gen.index(i)
newguess= guess.index(i)
guess[newguess]="x"
gen[new]=u"\u25A1"
#print(guess)
#print(gen)
return gen, guess, ans
def mastermind():
'this game checks the players guess against the initial for random numbers between 1-6, ex to be entered is 1436.'
print('\n{:^30}\n{}\n1. {}\n2. {}'.format('Mastermind','Type four guesses from 1-6 ex. 1236',u"\u25A0 means 1 of your ans is in the right position",u"\u25A1 means 1 of your ans is in the wrong position"))
guessnumber = 0; gen = GenCode()
List = gen[0:] #'Variables & Assignments' lists are mutable
while True:
boxlist = []
guessnumber += 1
guess=(input('Enter your guess: '))
gen = List [0:]
gen, guess, ans = blackbox(gen, guess)
if guess == [7,7,7,7]:
print(ans)
final = whitebox(gen, guess, ans)
gen, guess, ans = final
#print(gen) #testing [6, '□', '□', '■']
#print(List) #testing [6, 1, 4, 4]
for i in gen:
if type(i).__name__ == 'str':
boxlist += [i]
for i in sorted(boxlist): #print out all boxes
print(i, end=' ')
print()
if gen.count(u"\u25A0")==4:
if guessnumber == 1: #1st try
print('Hey! Good job! You won in {} try!'.format(guessnumber))
break
else: #many tries
print('Hey! Good job! You won in {} tries!'.format(guessnumber))
break
mastermind()
| true
|
f33f4b069a7def053f2ae0c97bc892ee8f22e8d6
|
Python
|
CoderQingli/MyLeetCode
|
/290. Word Pattern.py
|
UTF-8
| 601
| 3.140625
| 3
|
[] |
no_license
|
def wordPattern(self, pattern, str):
"""
:type pattern: str
:type str: str
:rtype: bool
"""
map = {}
map_rev = {}
if not pattern and not str:
return True
if (not pattern and str) or (pattern and not str):
return False
str_l = str.split()
if len(pattern) != len(str_l):
return False
for c, word in zip(pattern, str_l):
if c not in map:
if word in map_rev:
return False
map[c] = word
map_rev[word] = c
elif map[c] != word:
return False
return True
| true
|
b82abf1f80c4375dc0c9508b32e7b04d24df58b7
|
Python
|
StarkTan/Python
|
/Pandas/multiple_data.py
|
UTF-8
| 3,542
| 3.25
| 3
|
[] |
no_license
|
import pandas as pd
import numpy as np
def _append():
"""
DataFrame.append(other, ignore_index=False, verify_integrity=False, sort=None)
"""
df = pd.DataFrame(np.arange(6).reshape(2, 3), index=[0, 1], columns=list('ABC'))
print(df)
df = df.append([{'A': 6, 'B': 7, 'C': 8}])
print(df)
df = df.append(pd.Series({'A': 9, 'B': 10, 'C': 11}, name=0), ignore_index=True)
print(df)
df['D'] = list("1234")
print(df)
return
def _concat():
"""
pd.concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False,
copy=True)
"""
df1 = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']},
index=[0, 1, 2, 3])
df2 = pd.DataFrame({'A': ['A4', 'A5', 'A6', 'A7'],
'B': ['B4', 'B5', 'B6', 'B7'],
'C': ['C4', 'C5', 'C6', 'C7'],
'D': ['D4', 'D5', 'D6', 'D7']},
index=[4, 5, 6, 7])
df3 = pd.DataFrame({'A': ['A8', 'A9', 'A10', 'A11'],
'B': ['B8', 'B9', 'B10', 'B11'],
'C': ['C8', 'C9', 'C10', 'C11'],
'D': ['D8', 'D9', 'D10', 'D11']},
index=[8, 9, 10, 11])
frames = [df1, df2, df3]
result = pd.concat(frames)
print(result)
result = pd.concat(frames, keys=['x', 'y', 'z'])
print(result)
print('-' * 20)
df4 = pd.DataFrame({'B': ['B2', 'B3', 'B6', 'B7'],
'D': ['D2', 'D3', 'D6', 'D7'],
'F': ['F2', 'F3', 'F6', 'F7']},
index=[2, 3, 6, 7])
result = pd.concat([df1, df4], axis=1)
print(result)
print('*' * 40)
result = pd.concat([df1, df4], axis=1, join='inner') # 取交集
print(result)
result = pd.concat([df1, df4], axis=1, join_axes=[df1.index])
print(result)
def _join():
"""
join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
"""
df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
'B': ['B0', 'B1', 'B2']})
print(df.join(other, lsuffix='_caller', rsuffix='_other')) # 为重复 column 添加前缀
print(df.set_index('key').join(other.set_index('key')))
print(df.join(other.set_index('key'), on='key', how='right')) # left,right表示以哪边的index为准
print(df.join(other.set_index('key'), on='key', how='inner')) # inner,outer 表示交集、并集
def _merge():
"""
merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
"""
df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 5]})
df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
'value': [5, 6, 7, 8]})
print(df1)
print(df2)
print(df1.merge(df2, left_on='lkey', right_on='rkey'))
print(df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=('_left', '_right')))
# _append()
# _concat()
# _join()
_merge()
| true
|
9a3a1dff2994dd37e225916b40221c64af366245
|
Python
|
huffmp2/python
|
/numbers.py
|
UTF-8
| 887
| 3.578125
| 4
|
[] |
no_license
|
for value in range(1,5):
print(value)
numbers= list(range(1,6))
print (numbers)
evenumbers= list(range(2,11,2))
print (evenumbers)
squares=[]
for value in range(1,11):
square=value**2
squares.append(square)
print (squares)
squares = []
for value in range(1,11):
squares.append(value**2)
print(squares)
digits = [1,2,3,4,5,6,7,8,9,0]
print (min(digits))
print (max(digits))
print (sum(digits))
squares= [value**2 for value in range (1,11)]
print (squares)
numbers=[list for list in range(1,21)]
print (numbers)
mill= [list for list in range(1,1000001)]
print (min(mill))
print (max(mill))
print (sum(mill))
odds= list(range(1,21,2))
print (odds)
threes= list(range(0,31,3))
print (threes)
squares = []
for value in range(1,11):
squares.append(value**3)
print (squares)
squares= [value**3 for value in range (1,11)]
print (squares)
| true
|
311e7afb428f755aaa666b624dc4190595b06673
|
Python
|
thomasvangoidsenhoven/Raytracer-3D-Cplus
|
/mesh/meshpython/lib/lib/meshwriter.py
|
UTF-8
| 1,788
| 2.96875
| 3
|
[] |
no_license
|
import json
def writeJson(data, outfile="C:\\Users\\Thomas\\Desktop\\3D 2019\\3dcg1819-team\\mesh\\meshpython\\lib\\bunnyOP2.json"):
arr = make_array(data)
arr.reverse()
file = open(outfile, "w")
for line in arr:
file.write(line + " \n")
file.close()
def make_array(data):
nodes = [data[0]]
array = []
while len(nodes) > 0:
node = nodes.pop(0)
if "children" in node:
# depth first so first insert right el, then left
nodes.insert(0, data[node["children"][1]])
nodes.insert(0, data[node["children"][0]])
array.append("box " + str(node["id"]))
continue
elif "triangles" in node:
array.append("box " + str(node["id"]))
for triangle in node["triangles"]:
p1 = triangle[0]
p2 = triangle[1]
p3 = triangle[2]
p1x = '{:f}'.format(p1[0])
p1y = '{:f}'.format(p1[1])
p1z = '{:f}'.format(p1[2])
p2x = '{:f}'.format(p2[0])
p2y = '{:f}'.format(p2[1])
p2z = '{:f}'.format(p2[2])
p3x = '{:f}'.format(p3[0])
p3y = '{:f}'.format(p3[1])
p3z = '{:f}'.format(p3[2])
s = "[["+p1x+" "+p1y+" "+p1z+"],["+ \
p2x+" "+p2y+" "+p2z+"],["+ \
p3x+" "+p3y+" "+p3z+"]]"
array.append("triangle " + s)
continue
else:
print("wtf?")
return array
# write to file format
# box -> root
# box -> root child 1
# triangle
# triangle
# triangle
# box -> root child 2
# box -> child2 child
# triangle
# triangle
# triangle
# box -> child2 child
# triangle
# triangle
# triangle
| true
|
247f26de58c53246d4f814c439b03829a022cf98
|
Python
|
todokku/deepsea-frameextractor
|
/src/main/extractor.py
|
UTF-8
| 18,390
| 2.875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env python
__author__ = "Danelle Cline, Nathanial Yee"
__copyright__ = "Copyright 2020, MBARI"
__credits__ = ["MBARI"]
__license__ = "GPL"
__maintainer__ = "Danelle Cline"
__email__ = "dcline at mbari.org"
__doc__ = '''
Utility module for converting video to still frames and deinterlacing using the ffmpeg module
@author: __author__
@status: __status__
@license: __license__
'''
import logging
from logging import handlers
import sys
import os
import utils
import multiprocessing
import subprocess
import cv2
import signal
import re
from datetime import datetime, timedelta
import glob
class Extractor():
def __init__(self, input_video_path, output_dir, deinterlace='drop', step=5, duration=1000, start=None, end=None, prefix=None):
'''
the Extractor class contains all the necessary information to extract
images from a video using ffmpeg. By default, it will extract a frame
every step (5) seconds from the video
:param input_video_path: full path to the video
:param output_dir: directory to store transcoded frames to
:param step: extract a frame every 'step' seconds
:param duration: duration in milliseconds to extract every step seconds
:param start: starting time to extracting images
:param end: ending time to finish extracting images
:param prefix: frame starting prefix to prepend to extracted frames
:Example:
Extract every 5th frame using drop deinterlacing
Extractor('/Volumes/data/D008_03HD.mov', '/Volumes/data/out/D008_03HD', 'drop', 5)
'''
self.input_video_path = input_video_path
_, fname = os.path.split(input_video_path)
self.key = fname.split('.')[0]
self.output_dir = '{0}/{1}/imgs'.format(output_dir, self.key)
utils.ensure_dir(self.output_dir)
self.seconds_counter = 0
self.duration = duration
self.step = step
self.start = start
self.end = end
self.prefix = prefix
self.video_length = utils.get_length(input_video_path)
self.fps = 29.97 #utils.get_framerate(input_video_path)
self.single_frame = False
if duration is None:
self.duration = 1e3/self.fps + 1 # default to a single frame
self.single_frame = True
if prefix is None:
self.prefix = "f"
# To time methods, uncomment line below
#self.times = []
self.deinterlace = deinterlace
self.start_iso_time = datetime.now()
self.dive = 'Unknown'
p = fname.split('_')
for f in p:
if utils.validate_iso8601(f):
self.start_iso_time = datetime.strptime(f, '%Y%m%dT%H%M%SZ')
break
if 'D' in fname:
p = re.compile(r'(?P<dive>\w+)_(?P<timecode>\w+)?')
match = re.search(pattern=p, string=fname)
if (match):
if match.group('timecode') and utils.validate_iso8601(match.group('timecode')):
self.start_iso_time = datetime.strptime(match.group('timecode'), '%Y%m%dT%H%M%SZ')
else:
self.start_iso_time = datetime.now()
if match.group('dive'):
self.dive = match.group('dive')
print('Dive {} timecode start {} length {} seconds'.format(self.dive, self.start_iso_time, self.video_length))
def __del__(self):
print('Done')
# To time methods, uncomment line below
#if self.times and len(self.times) > 0:
# import numpy as np
# m = np.array(self.times)
# print('Mean {}'.format(m.mean()))
def extract_all_images(self, start, end):
'''
extracts image(s) from video between timecodes and saves it to output_path
:param start: starting time
:param end: ending time
:return: total frames extracted
:Example:
self.extract_all_images('00:11:40', '00:11:41')
'''
timecode_str = '{0:02}:{1:02}:{2:02}.{3:03}'.format(start.hour, start.minute, start.second,
int(start.microsecond / 1e3))
output_path = '{}/{}%06d.png'.format(self.output_dir, self.prefix)
frames = int((end - start).total_seconds() * self.fps)
if self.deinterlace == 'drop':
shell_string = 'ffmpeg -y -re -loglevel error -accurate_seek -ss {} -i {} -an -frames:v {} {}'.format(timecode_str,
self.input_video_path, frames, output_path)
print(shell_string)
subprocess.call(shell_string, shell=True)
for i in glob.glob('{}/{}*.png'.format(self.output_dir, self.prefix)):
# retain 16-bit depth if exists
img = cv2.imread(i, cv2.IMREAD_UNCHANGED)
de_interlaced_img = img[::2, 1::2]
cv2.imwrite(i, de_interlaced_img)
elif self.deinterlace == 'yadif':
shell_string = 'ffmpeg -y -re -loglevel error -accurate_seek -ss {} -i {} -vf yadif=1:-1:0 -an -frames:v {} {}'.\
format(timecode_str,
self.input_video_path, frames, output_path)
subprocess.call(shell_string, shell=True)
else:
shell_string = 'ffmpeg -y -re -loglevel error -accurate_seek -ss {} -i {} -an -frames:v {} {}'.format(timecode_str,
self.input_video_path, frames, output_path)
subprocess.call(shell_string, shell=True)
return int((end - start).total_seconds() * self.fps)
'''frames = int((end - start).total_seconds() * self.fps)
inc_microseconds = int(1e6/self.fps)
dt = self.start_iso_time + timedelta(seconds=start.second)
for i in range(frames):
output_path = '{0}/{1}_{2:03}.png'.format(self.output_dir,filename_prefix, i+1)
s = dt.strftime('%Y%m%dT%H%M%S.%f')
dt_iso_str = s[:-3] + 'Z' #here we simply truncate; this may be off by by half a millisecond
print('Datetime {}'.format(dt_iso_str))
shell_string = 'exiftool -config mbari.config -PNG:Dive="{}" -PNG:Datetime="{}" {}'.format(self.dive, dt_iso_str, output_path)
subprocess.call(shell_string, shell=True, stdout=file_out, stderr=file_out)
output_path_original = '{}_original'.format(output_path)
os.remove(output_path_original)
dt += timedelta(microseconds=inc_microseconds)'''
def extract_images(self, start, end):
''''
extracts image(s) from video at timecode and saves it to output_path
:param start: starting time
:param end: ending time
:return:
:Example:
self.extract_images('00:11:40', '00:11:41')
'''
file_out = open('/dev/null', 'w')
filename_prefix = '{0}_{1:02}-{2:02}-{3:02}'.format(self.key, start.hour, start.minute, start.second)
timecode_str = '{0:02}:{1:02}:{2:02}.{3:03}'.format(start.hour, start.minute, start.second, int(start.microsecond/1e3))
frames = int((end - start).total_seconds() * self.fps)
if self.single_frame:
output_path = '{0}/{1}.png'.format(self.output_dir,filename_prefix)
else:
output_path = '{0}/{1}_%03d.png'.format(self.output_dir,filename_prefix)
if self.deinterlace == 'drop':
shell_string = 'ffmpeg -y -re -loglevel error -accurate_seek -ss {} -i {} -frames:v {} -an {}'.format(timecode_str,
self.input_video_path, frames,
output_path)
subprocess.call(shell_string, shell=True, stdout=file_out, stderr=file_out)
for i in glob.glob('{}/f*.png'.format(self.output_dir)):
img = cv2.imread(i)
de_interlaced_img = img[::2, 1::2]
cv2.imwrite(i, de_interlaced_img)
elif self.deinterlace == 'yadif':
shell_string = 'ffmpeg -y -re -loglevel error -accurate_seek -ss {} -i {} -vf yadif=1:-1:0 -frames:v {} -an {}'.format(timecode_str,
self.input_video_path, frames,
output_path)
subprocess.call(shell_string, shell=True, stdout=file_out, stderr=file_out)
else:
shell_string = 'ffmpeg -y -re -loglevel error -accurate_seek -ss {} -i {} -frames:v {} -an {}'.format(timecode_str,
self.input_video_path, frames,
output_path)
print(shell_string)
subprocess.call(shell_string, shell=True, stdout=file_out, stderr=file_out)
inc_microseconds = int(1e6/self.fps)
dt = self.start_iso_time + timedelta(seconds=start.second)
for i in range(frames):
if self.single_frame:
output_png = '{0}/{1}.png'.format(self.output_dir, filename_prefix)
else:
output_png = '{0}/{1}_{2:03}.png'.format(self.output_dir, filename_prefix, i+1)
s = dt.strftime('%Y%m%dT%H%M%S.%f')
dt_iso_str = s[:-3] + 'Z' #here we simply truncate; this may be off by by half a millisecond
print('Datetime {}'.format(dt_iso_str))
shell_string = 'exiftool -config /app/mbari.config -PNG:Dive="{}" -PNG:Datetime="{}" {}'.format(self.dive, dt_iso_str, output_png)
print(shell_string)
subprocess.call(shell_string, shell=True, stdout=file_out, stderr=file_out)
original = '{}_original'.format(output_png)
os.remove(original)
dt += timedelta(microseconds=inc_microseconds)
return frames
def process_video(self):
'''
extract all the frames from video_name specified in __init__
:return:
'''
try:
# if not stepping through incrementally, process the range
if self.step is None:
if self.end:
end = self.end
#TODO put in check if this is within bounds of ending
else:
end = self.start + timedelta(milliseconds=self.duration*1e3)
start_str = self.start.strftime('%H:%M:%S.%f')[:-3]
end_str = end.strftime('%H:%M:%S.%f')[:-3]
print('Extracting image frames {} from {} to {} and saving to {}'.format(self.input_video_path,
start_str, end_str,
self.output_dir))
total = self.extract_all_images(self.start, end)
else:
self.seconds_counter += self.start.hour*3600 * self.start.minute*60 + self.start.second
start = self.start
end = start + timedelta(milliseconds=self.duration)
total = 0
while self.seconds_counter < self.video_length:
# To time methods, uncomment line below
# import timeit
#func = utils.wrapper(self.extract_images, output_path, timecode)
#self.times.append(timeit.timeit(func, number=1))
if self.end and end > self.end:
break
start_str = start.strftime('%H:%M:%S.%f')[:-3]
end_str = end.strftime('%H:%M:%S.%f')[:-3]
print('Extracting image frame {} from {} to {} and saving to {}'.format(start_str, end_str,
self.input_video_path,
self.output_dir))
frames = self.extract_images(start, end)
start = start + timedelta(seconds=self.step)
end = start + timedelta(milliseconds=self.duration)
self.seconds_counter += self.step
print('Seconds {}'.format(self.seconds_counter))
total += frames
print('Extracted {} total frames'.format(total))
return True
except Exception as ex:
print(ex)
return False
def process_command_line():
import argparse
from argparse import RawTextHelpFormatter
examples = 'Examples:' + '\n\n'
examples += sys.argv[0] + "-s 'python extractor.py' " \
"-i /Volumes/DeepLearningTests/benthic/ " \
"-o /Volumes/Tempbox/danelle/benthic/" \
"-k D0232_03HD_10s \n"
examples += sys.argv[0] + "-s 'python extractor.py' " \
"-i /Volumes/DeepLearningTests/benthic/D0232_03HD_10s.mov " \
"-o /Volumes/Tempbox/danelle/benthic/ \n"
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter,
description='Extract still jpeg frames from video',
epilog=examples)
parser.add_argument('-i', '--input', action='store', help='full path to base directory where video files are, or single video file. If using directory, specify glob to search for files', default='', required=True)
parser.add_argument('-o', '--output_dir', action='store', help='full path to output directory to store frames', default='', required=False)
# default to drop fields interlace if none specified
parser.add_argument('-d', '--deinterlace', action='store', help='deinterlace choice, drop = drop fields, yadif = ffmpeg filter', required=False)
parser.add_argument('-g', '--glob', action='store', help='List of glob search parameters to use to find files', nargs='*', required=False)
parser.add_argument('-m', '--milliseconds', action='store', help='Number of milliseconds to capture every step', required=False, type=int)
parser.add_argument('-s', '--step', action='store', help='Step size in seconds to grab a frame', required=False, type=int)
parser.add_argument('-t', '--start', action='store', help='Start time in HH:MM:SS format', default="00:00:00", required=False)
parser.add_argument('-e', '--end', action='store', help='End time in HH:MM:SS format', required=False)
parser.add_argument('-p', '--prefix', action='store', help='Optional prefix to prepend to extracted filest', required=False)
args = parser.parse_args()
return args
def process_video(video, output_dir, deinterlace, milliseconds, step, start_time=None, end_time=None, prefix='f'):
'''
processes a video given its the name of the video
:param video: absolute path or url to the input video to be processed
:param output_dir: absolute path of the output file
:param deinterlace: deinterlacing method: drop or yadif
:param milliseconds: total milliseconds to extract every step seconds
:param step: total in seconds to step through the video
:param start_time: starting time to extracting images
:param end_time: ending time to finish extracting images
:param prefix: frame starting prefix to prepend to extracted frames
:return: True is success, False is exception
:Example:
process_video('/Volumes/DeepLearningTests/benthic/D0232_03HD_10s/D0232_03HD_10s.mov',
'/Volumes/Tempbox/danelle/benthic/D0232_03HD_10s)
'''
print("Starting: {} saving to {} using deinterlacing method {} prefix {}".format(video, output_dir, deinterlace, prefix))
extractor = Extractor(input_video_path=video, output_dir=output_dir, deinterlace=deinterlace, step=step,
duration=milliseconds, start=start_time, end=end_time, prefix=prefix)
result = extractor.process_video()
print("Finished: {}".format(video))
return result
def process_helper(args):
print('Running process helper with args {}'.format(args))
return process_video(*args)
def sigterm_handler(signal, frame):
print("extractor.py done")
exit(0)
if __name__ == '__main__':
args = process_command_line()
if len(args.input) < 1:
print ('Need to specify input directory -i or --input option')
exit(-1)
start_time = None
end_time = None
if args.start:
if '.' in args.start:
start_time = datetime.strptime(args.start, '%H:%M:%S.%f')
else:
start_time = datetime.strptime(args.start, '%H:%M:%S')
if args.end:
if '.' in args.end:
end_time = datetime.strptime(args.end, '%H:%M:%S.%f')
else:
end_time = datetime.strptime(args.end, '%H:%M:%S')
if not args.output_dir:
output_dir = os.getcwd()
else:
output_dir = args.output_dir
deinterlace_choices = ['drop', 'yadif']
if args.deinterlace and args.deinterlace not in deinterlace_choices:
print('{} not in {}'.format(args.deinterlace, deinterlace_choices))
exit(-1)
signal.signal(signal.SIGTERM, sigterm_handler)
utils.ensure_dir(output_dir)
try:
if args.glob:
print('CPU pool count {}; using {} CPUs'.format(multiprocessing.cpu_count(), multiprocessing.cpu_count() - 1))
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count() - 1)
for pattern in args.glob:
video_files = glob.iglob('{}/{}'.format(args.input,pattern))
print(video_files)
process_args = [(f, output_dir, args.deinterlace, args.milliseconds, args.step, start_time, end_time, args.prefix) for f in video_files]
results = pool.map(process_helper, process_args)
else:
process_video(args.input, output_dir, args.deinterlace, args.milliseconds, args.step, start_time, end_time, args.prefix)
except Exception as ex:
print(ex)
| true
|
543931a3694b8f78753b64d48ef72022a232e6f2
|
Python
|
xiaosean/leetcode_python
|
/Q692_Top-K-Frequent-Words.py
|
UTF-8
| 256
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
class Solution:
def topKFrequent(self, words: List[str], k: int) -> List[str]:
c = Counter(words)
data = [(k, v) for k, v in c.items()]
data = sorted(data, key= lambda x:(-x[1], x[0]))
return [key for key, _ in data[:k]]
| true
|
f0fb22e559b1c9efc8ebe8b8e12105f7507017f8
|
Python
|
lisennku/web_spider
|
/practise 1: simplified web scraper.py
|
UTF-8
| 3,173
| 3.28125
| 3
|
[] |
no_license
|
#coding: utf-8
# update: 1.去掉了与书籍相关字符串左右的空格
# update: 2.采用了OOP的方式进行统计
# update:3 利用各个书籍的链接去统计详细信息,并分别写入文件中
'''
针对豆瓣读书中“新书速递”(URL:http://book.douban.com/latest?icn=index-latestbook-all)进行简单的爬虫,并生成TXT文件,
纪录书名、作者以及短评
简单的实现,仍然需要对代码进行优化,现存问题是由于网页源代码中含有过多的换行符('\n'),由此产生的正则表达式结果也会有影响,
需要去解决
'''
# coding: utf-8
import re
import requests
import os
import time
class Douban(object):
def __init__(self):
print u'这次是详细的喽~~~'
#获取网页源代码,添加header防止服务器禁止访问,cookie添加成自己的就可以
def get_html(self, url):
header = {
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding':'gzip, deflate, sdch',
'Accept-Language':'zh-CN,zh;q=0.8,en;q=0.6',
'Cache-Control':'max-age=0',
'Connection':'keep-alive',
#'cookie':
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.85 Safari/537.36',
'Host': 'book.douban.com',
'Upgrade-Insecure-Requests': '1'
}
response = requests.get(url, headers=header)
return response.text
# 通过正则表达式获取各个图书的超链接
def get_detailed_url(self, html):
link = []
l = re.findall('(http://book.douban.com/subject/[0-9]{8})', html, re.S)
print len(l)
for i in l:
i = i.encode('utf-8') #获取的原始字符串为Unicode编码
link.append(i)
print len(link)
return link
# 或取图书的信息
def get_book_detail(self, book_url):
book_info = {}
book_html = requests.get(book_url)
book_info['Title'] = re.search('<span property="v:itemreviewed">(.*?)</span>', book_html.text, re.S).group(1)
People = re.findall('<a class="" href="/search/.*?">(.*?)</a>', book_html.text, re.S)
book_info['Author'] = People[0]
book_info['Translator'] = People[1:]
return book_info
# 将图书信息单独保存为TXT文件
def save_info(self, book_info):
filename = book_info['Title'] + '.txt'
f = open(filename, 'w')
f.writelines(('title:' + ''.join(book_info['Title']) +'\n').encode('utf-8'))
f.writelines(('author' + ''.join(book_info['Author']) + '\n').encode('utf-8'))
f.writelines(('tanslator' + ''.join(book_info['Translator']) + '\n').encode('utf-8'))
f.close()
os.chdir('C:\Users\zhangla\PycharmProjects\PydoubanFM\dbbook')
db = Douban()
url = "http://book.douban.com/latest?icn=index-latestbook-all"
html = db.get_html(url)
link = db.get_detailed_url(html)
for item in link:
print "now system is processing ",
print item
each = db.get_book_detail(item)
db.save_info(each)
time.sleep(1)
| true
|
10e288e9ccba90b5bc31eb71380adfc42fca69ec
|
Python
|
alanbato/proyecto-sisops
|
/SRT.py
|
UTF-8
| 6,078
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
'''
Sistemas Operativos LuJu 10
Proyecto Final
Equipo #8
SRT:
Se corre el proceso con el menor tiempo estimado restante (expropiativo).
Si llega un proceso con un menor tiempo restante expulsa al proceso en ejecucion.
'''
import iohelp as io
def srt_scheduling(setup, processes):
'''Política de Scheduling de SRT'''
# Obtención de los parámetros del simulador
policy, context_switch, cpus = setup
cpu_dict = {'cpu_{}'.format(i): None for i in range(1, cpus + 1)}
context_dict = {'cpu_{}'.format(i): 0 for i in range(1, cpus + 1)}
# Verifica que la política sea SRT
assert(policy == 'SRT')
# Cola de listos ordenados pro SRT.
processes_ready = []
# Cola de procesos bloqueados por I/O.
processes_blocked = []
# Lista con los procesos que ya han terminado su ejecución.
processes_finished = []
# Tiempo total de ejecución
time = 0
# Tabla con para mostrar los resultados
output_table = io.OutputTable(num_cpus=cpus)
# Función que revisa si aún hay procesos que procesar
def pending_processes():
return (len(processes) +
len(processes_ready) +
len(processes_blocked) +
len([proc for proc in cpu_dict.values()
if proc is not None]))
# Ejecución de los procesos
while pending_processes() > 0:
for cpu, process in cpu_dict.items():
if process is not None:
# Aplicar el cambio de contexto de ser necesario
if context_dict[cpu] > 0:
context_dict[cpu] -= 1
continue
else:
process.remaining_time -= 1
# Aplicar políticas para quitar procesos del cpu
if process.remaining_time == 0:
cpu_dict[cpu] = None
process.finish_time = time
processes_finished.append(process)
# Revisa si el proceso tiene IO, en caso de que sí, lo bloquea
elif process.has_io():
cpu_dict[cpu] = None
process.perform_io()
processes_blocked.append(process)
# Revisa si hay un proceso con un remaining time menor
elif (len(processes_ready) > 0 and process.remaining_time >
processes_ready[-1].remaining_time):
cpu_dict[cpu] = None
processes_ready.append(process)
# Agrega los procesos que terminaron su IO a la cola de listos
for process in processes_blocked:
process.io_operation_duration -= 1
if process.io_operation_duration < 0:
processes_ready.append(process)
# Remueve todos los que terminaron su IO de la lista de bloqueados
processes_blocked = [process for process in processes_blocked
if process.io_operation_duration >= 0]
# Añadir los procesos a medida que pasa el tiempo.
for process in processes:
if process.arrival_time <= time:
processes_ready.append(process)
# Remueve los procesos que acaban de agregados
processes = [process for process in processes
if process not in processes_ready]
# Ordenar los procesos en la cola de listos por su tiempo restante de
# ejecución (SRT), si hay un empate, desempatar con tiempo de llegada,
# Si sigue habiendo empate, desempatar don el PID
processes_ready.sort(key=lambda x: (x.remaining_time,
x.arrival_time,
x.pid),
reverse=True)
# Cargar los primeros "N" elementos de la cola de listos,
# donde "N" es la cantidad de CPU's disponibles,
for cpu, slot in cpu_dict.items():
if slot is None and len(processes_ready) > 0:
cpu_dict[cpu] = processes_ready.pop()
context_dict[cpu] = context_switch
output_data = {}
waiting_status_list = [proc.status() for proc in processes]
output_data['waiting'] = [', '.join(waiting_status_list)]
ready_status_list = [proc.status() for proc in processes_ready]
output_data['ready'] = [', '.join(ready_status_list)]
# Agrega los cpus al output
for cpu, process in cpu_dict.items():
if process is not None:
output_data[cpu] = [process.status()]
else:
output_data[cpu] = ''
blocked_status_list = [proc.status() for proc in processes_blocked]
output_data['blocked'] = [', '.join(blocked_status_list)]
output_table.update(output_data)
time += 1
# Imprime el resultado de la política
print(output_table)
# Imprime los tiempos de retorno de los procesos y el tiempo de retorno
# promedio.
turnaround_time_total = 0
print("Turnaround Times:")
for process in processes_finished:
turnaround_time_total += process.finish_time - process.arrival_time
print("Process {}: {}".format(process.pid,
process.finish_time - process.arrival_time))
print("Average: {}".format(turnaround_time_total / len(processes_finished)))
print("\n")
# Imprime los tiempos de espera de los procesos y el tiempo de espera
# promedio.
wait_time_total = 0
print("\nWaiting Times:")
for process in processes_finished:
wait_time = process.finish_time - process.arrival_time - process.cpu_time
if process.io_operations is not None:
wait_time -= sum(process.io_operations.values())
wait_time_total += wait_time
print("Process {}: {}".format(process.pid, wait_time))
print("Average: {}".format(wait_time_total / len(processes_finished)))
print("\n")
return (turnaround_time_total / len(processes_finished),
wait_time_total / len(processes_finished))
| true
|
02130d5c8fde80a3cd0043fc51975079c14fba17
|
Python
|
WSHoekstra/experiments_in_reinforcement_learning
|
/experiments/01_cartpole/cartpole.py
|
UTF-8
| 2,007
| 2.71875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
# import os
# os.chdir('C:/Users/Walter/Documents/GitHub/experiments_in_reinforcement_learning')
import gym
import numpy as np
from experiments.agents.dqn import DQNAgent
env = gym.make('CartPole-v0')
state = env.reset()
agent = DQNAgent(observation_space_size=env.observation_space.shape[0],
action_space_size=env.action_space.n,
load_model_from_disk=True,
model_filepath='experiments/01_cartpole/model.h5')
done = False
max_n_steps = 2000
render_every_n_episodes = 20
save_model_every_n_episodes = 10
episodes = range(1000)
for episode in episodes:
episode_rewards = 0
state = np.reshape(env.reset(), [1, agent.observation_space_size])
done = False
step_i = 0
while step_i < max_n_steps and not done:
action = agent.choose_action(state)
next_state, reward, done, info = env.step(action)
next_state = np.reshape(next_state, [1,agent.observation_space_size])
agent.memorybank.commit_experience_to_memory(state, action, reward, next_state, done)
episode_rewards += reward
step_i += 1
state = next_state
if episode % render_every_n_episodes == 0:
env.render()
if episode % render_every_n_episodes != 0:
agent.learn_from_memories() # learning + rendering = lag
if done:
agent.memorybank.commit_rewards_to_memory(episode_rewards)
calculate_avg_rewards_over_n_episodes = 100
running_avg_rewards = agent.memorybank.calculate_running_avg_of_recent_rewards(calculate_avg_rewards_over_n_episodes)
print(f'episode {episode} / epsilon {agent.epsilon} / reward: {episode_rewards} / running avg rewards {running_avg_rewards} ({calculate_avg_rewards_over_n_episodes} episodes)')
if episode % save_model_every_n_episodes == 0 and episode > 0:
agent.save_model_to_disk()
env.close()
| true
|
40011d3e5b5eb2be9f5c700d4ccf5c4096434200
|
Python
|
renjieliu/leetcode
|
/0600_0999/606.py
|
UTF-8
| 3,141
| 3.78125
| 4
|
[] |
no_license
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def tree2str(self, root: 'Optional[TreeNode]') -> str: # O( N | N )
def helper(output, node):
output[0] += str(node.val) if node else ""
if node.left: # left subtree exist
output[0] += "("
helper(output, node.left)
output[0] += ")"
if node.right: # right subtree exist
output[0] += "()" if node.left == None else "" #if left tree not exists,other wise left tree is added with previous code
output[0] += "("
helper(output, node.right)
output[0] += ")"
output = [""]
helper(output, root)
return output[0]
# previous solution
# # Definition for a binary tree node.
# # class TreeNode:
# # def __init__(self, val=0, left=None, right=None):
# # self.val = val
# # self.left = left
# # self.right = right
# class Solution:
# def tree2str(self, root: 'Optional[TreeNode]') -> str: # O( N | N )
# def helper(output, node):
# output[0] += str(node.val) if node else ""
# if node.left and node.right: #if both subtree exist
# output[0] += "("
# helper(output, node.left)
# output[0] += ")"
# output[0] += "("
# helper(output, node.right)
# output[0] += ")"
# elif node.left == None and node.right: #only right subtree exists
# output[0] += "()"
# output[0] += "("
# helper(output, node.right)
# output[0] += ")"
# elif node.left and node.right == None: #only left subtree exists
# output[0] += "("
# helper(output, node.left)
# output[0] += ")"
# output = [""]
# helper(output, root)
# return output[0]
# previous solution
# # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# class Solution:
# def tree2str(self, t: TreeNode) -> str:
# def dfs(output, node):
# if node.left == None and node.right == None:
# output.append(str(node.val))
# else:
# output.append(str(node.val))
# if node.left != None:
# output.append("(")
# dfs(output, node.left)
# output.append(")")
# else:
# output.append("()")
# if node.right != None:
# output.append("(")
# dfs(output, node.right)
# output.append(")")
# if t == None: return ""
# output = []
# dfs(output, t)
# return "".join(output)
| true
|
ea55f70ff55535b0a7007647ec6f1a09f29db5d5
|
Python
|
mihahauke/scinol_icml2019
|
/plot_distributions.py
|
UTF-8
| 1,875
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
from distributions import normal_dist_outliers, normal_scaled
import numpy as np
from argparse import ArgumentParser
from matplotlib import pyplot as plt
import seaborn as sns
plt.style.use("ggplot")
def plot_dist(x, labels, probs, jitter, alpha, name):
print("Noise:",np.mean(np.minimum(probs, 1 - probs)))
# plt.scatter(probs, labels, alpha=0.1)
# plt.gcf().canvas.set_window_title(name + " probs/labels")
# plt.yticks([0, 1])
# plt.xlabel("probability")
# plt.ylabel("label")
# plt.show()
plt.hist(probs, bins=20, normed=True)
plt.xlabel("(probability dist)")
plt.gcf().canvas.set_window_title(name + "probabilities")
plt.show()
sns.stripplot(data=x,
jitter=jitter,
alpha=alpha)
plt.gcf().canvas.set_window_title(name + "(features)")
plt.show()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-n", "--size", default=10000, help="dataset size", type=int)
parser.add_argument("-f", "--num-features", default=10, type=int)
parser.add_argument("-s", "--seed", default=None)
parser.add_argument("-nj", "--no-jitter", dest="no_jitter", action="store_true")
parser.add_argument("-a", "--alpha", default=0.5, type=float)
args = parser.parse_args()
def plot(dist_func,name, **kwargs):
x, labels, probs = dist_func(
size=args.size,
num_features=args.num_features,
loc=0,
seed=args.seed,
return_probs=True,
**kwargs)
plot_dist(x,
labels,
probs,
not args.no_jitter,
alpha=args.alpha,
name=name)
plot(normal_dist_outliers,name="Normal with outliers")
# plot(normal_scaled, name="Normal scaled 2^10", max_exponent=10)
| true
|
d4357cfac8facf3e6e56ea419751e6a6a1c717dc
|
Python
|
rzhou10/Leetcode
|
/300/349.py
|
UTF-8
| 196
| 3.0625
| 3
|
[] |
no_license
|
'''
Intersection of Two Arrays
Runtime: 40 ms
'''
class Solution:
def intersection(self, nums1: List[int], nums2: List[int]) -> List[int]:
return list(set(nums1) & set(nums2))
| true
|
8441a11e3aead242f895cce7aa2b8379813b68bd
|
Python
|
camilogs1/Scripts_UCLA
|
/Taller1/Parte1/punto3.py
|
UTF-8
| 503
| 3.8125
| 4
|
[] |
no_license
|
horas = float(input("Ingrese el total de horas trabajadas: "))
valor = float(input("Ingrese el valor por cada hora: "))
sueldo = horas * valor
print("El salario bruto es de: ", sueldo)
aux = sueldo * 0.31
print("Las deducciones son de: ", aux)
sueldo -= aux
if (sueldo <= 300):
aux = sueldo * 0.2
print("El salario neto sin la bonificación es de: ", sueldo)
sueldo += aux
print("El salario neto más la bonificación es de: ", sueldo)
else:
print("El salario neto es de: ", sueldo)
| true
|
fd7b47e7ef5e60f76f0358134b069d1a81e40215
|
Python
|
DrDrei/Project-Ueler-Solutions
|
/python/Problem024.py
|
UTF-8
| 599
| 3.171875
| 3
|
[] |
no_license
|
import sys
import math
numbers = list(range(0,10))
def inc(numbers):
#find largest
index = len(numbers)-1
maxi = max(numbers)
while True:
if numbers[index] > numbers[index-1]:
pivot = index-1
new = numbers[pivot:]
new.sort()
piv_val = numbers[pivot]
piv_index = new.index(piv_val)
new_piv_val = new[piv_index+1]
for x in new:
numbers.remove(x)
numbers.append(new_piv_val)
new.remove(new_piv_val)
for x in new:
numbers.append(x)
break
else:
index -= 1
return numbers
count = 0
while count != 999999:
inc(numbers)
count += 1
print(numbers)
| true
|
87afa4be82aaa18e5f66cfe120e7b7aa6dcaef75
|
Python
|
chrstphrbtchr/01-Introduction
|
/main.py
|
UTF-8
| 865
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
import utils
utils.check_version((3,7))
utils.clear()
print('Hello, my name is Christopher Butcher!')
import random
gamez = ['Earthbound','Mother 3','Until Dawn','The Legend of Zelda: A Link to the Past','Castlevania: Symphony of the Night', 'Super Mario World','Fallout 4','Borderlands 2','Horizon Zero Dawn']
favgame = random.choice(gamez)
print('My favorite game is',favgame, ', but this will change if you ask me again.')
print('I am slightly concerned about keeping up with learning these new languages and concepts.')
print('However, I am excited to learn and to be surrounded by those who can help me!')
print('I enjoy performing and writing music, mostly for myself and my dog.')
print('My stackoverflow.com user number is 11981611')
print('Find me on GITHUB at https://github.com/chrstphrbtchr (That is my name without vowels if that helps) ')
| true
|
337340cc9cdd16f2b90ed46a1513decfba546716
|
Python
|
ptq204/LearningPython
|
/ServerClientSelect/client.py
|
UTF-8
| 420
| 2.828125
| 3
|
[] |
no_license
|
import sys
import socket
import os
s = socket.socket()
host = socket.gethostname()
port = 5000
print("client will be connected to {}".format(host))
s.connect(('127.0.0.1',port))
print("connected to server")
while(1):
incoming_msg = s.recv(1024)
incoming_msg = incoming_msg.decode()
print("server: {}".format(incoming_msg))
msg = input("your: ")
msg = msg.encode()
s.send(msg)
print("sent")
| true
|
905be0589b5a3638c845bbcf5ed35573dfaf700b
|
Python
|
ZubritskiyAlex/Python-Tasks
|
/HW_10/task 10_03.py
|
UTF-8
| 465
| 3.75
| 4
|
[] |
no_license
|
"""Дан файл, содержащий различные даты. Каждая дата - это число, месяц и
год. Найти самую раннюю дату. [02-8.1-ML-29]"""
from datetime import datetime
import csv
with open("10_03.csv",'r') as csv_file:
csvreader = csv.reader(csv_file)
data = []
for line in csvreader:
data.append(datetime.strptime(line[0], "%d.%m.%Y"))
print(f"The earliest date is {min(data)}")
| true
|
85ca295170863177a9a2c03aa1e921145e65b2eb
|
Python
|
betty29/code-1
|
/recipes/Python/436229_RecordJar_Parser/recipe-436229.py
|
UTF-8
| 2,013
| 3.328125
| 3
|
[
"MIT",
"Python-2.0"
] |
permissive
|
#!/usr/bin/env python
# recordjar.py - Parse a Record-Jar into a list of dictionaries.
# Copyright 2005 Lutz Horn <lutz.horn@gmx.de>
# Licensed unter the same terms as Python.
def parse_jar(flo):
"""Parse a Record-Jar from a file like object into a list of dictionaries.
This method parses a file like object as described in "The Art of Unix
Programming" <http://www.faqs.org/docs/artu/ch05s02.html#id2906931>.
The records are divided by lines containing '%%'. Each record consists of
one or more lines, each containing a key, a colon, and a value. Whitespace
around both key and value are ignored.
>>> import StringIO
>>> flo = StringIO.StringIO("a:b\\nc:d\\n%%\\nx:y\\n")
>>> out = parse_jar(flo)
>>> print out
[{'a': 'b', 'c': 'd'}, {'x': 'y'}]
If a record contains a key more than once, the value for this key is a list
containing the values in their order of occurence.
>>> flo = StringIO.StringIO("a:b\\nc:d\\n%%\\nx:y\\nx:z\\n")
>>> out = parse_jar(flo)
>>> print out
[{'a': 'b', 'c': 'd'}, {'x': ['y', 'z']}]
Leading or trailing separator lines ('%%') and lines containing only
whitespace are ignored.
>>> flo = StringIO.StringIO("%%\\na:b\\nc:d\\n%%\\n\\nx:y\\nx:z\\n")
>>> out = parse_jar(flo)
>>> print out
[{'a': 'b', 'c': 'd'}, {'x': ['y', 'z']}]
"""
records = []
for record in flo.read().split("%%"):
dict = {}
for line in [line for line in record.split("\n") if line.strip() != ""]:
key, value = line.split(":", 1)
key, value = key.strip(), value.strip()
try:
dict[key].append(value)
except AttributeError:
dict[key] = [dict[key], value]
except KeyError:
dict[key] = value
if len(dict) > 0:
records.append(dict)
return records
def _test():
import doctest, recordjar
return doctest.testmod(recordjar)
if __name__ == "__main__":
_test()
| true
|
cce75c91dc41565e359fe6e142de44c5ff800ab2
|
Python
|
jharman25/python-for-scientists
|
/docs/_examples/plot_sns_hist.py
|
UTF-8
| 365
| 3.40625
| 3
|
[] |
no_license
|
"""
Histogram
=============
Example histogram
"""
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
sns.set()
# Initialize a figure and axes object
fig, ax = plt.subplots(figsize=(3,3))
# Data
x = np.random.normal(0, 0.1, 1000)
# Add data a scatter points onto axes
ax.hist(x)
# Name axes
ax.set_xlabel('x')
# Show figure.
fig.show()
| true
|
4c4d6b4b5267d61d547b2c87d969703a01592ff1
|
Python
|
laCorse/ReTracker
|
/models/ResNet.py
|
UTF-8
| 1,491
| 2.578125
| 3
|
[] |
no_license
|
from __future__ import absolute_import
import torch
import torchvision
from torch import nn
from torch.nn import functional as F
# For debug
from IPython import embed
class ResNet50(nn.Module):
def __init__(self, num_classes, loss = {'softmax', 'metric'}, **kwargs):
super(ResNet50, self).__init__()
resnet50 = torchvision.models.resnet50(pretrained=True)
# 去掉下面两层(即原先网络的最后两层)
# (avgpool): AdaptiveAvgPool2d(output_size=(1, 1))
# (fc): Linear(in_features=2048, out_features=1000, bias=True)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.classifier = nn.Linear(2048, num_classes)
self.loss = loss
def forward(self, x):
x = self.base(x)
x = F.avg_pool2d(x, x.size()[2:])
x = x.view(x.size(0), -1)
# 归一化
# x = 1. * x / (torch.norm(x, 2, dim=-1, keepdim=True).expand_as(x) + 1e-12)
if not self.training:
return x
if self.loss == {'softmax'}:
y = self.classifier(x)
return y
elif self.loss == {'metric'}:
return x
elif self.loss == {'softmax', 'metric'}:
y = self.classifier(x)
return y, x
else:
raise NotImplementedError("No such loss function!")
# embed()
if __name__ == '__main__':
model = ResNet50(num_classes=751)
imgs = torch.Tensor(32, 3, 256, 128)
f = model(imgs)
| true
|
ea5b48e19dbcead768e17ccce99c7fd2e550c722
|
Python
|
PRIYANSUPULAK/Face_Detector
|
/face_recognition.py
|
UTF-8
| 480
| 2.5625
| 3
|
[] |
no_license
|
import cv2
import sys
image_path=sys.argv[1]
cascPath = "haarcascade_frontalface_default.xml"
faceCascade=cv2.CascadeClassifier(cascPath)
image=cv2.imread(image_path)
gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
faces= faceCascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=9, minSize=(30, 30), flags= cv2.CASCADE_SCALE_IMAGE)
for (x,y,w,h) in faces:
cv2.rectangle(image, (x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow("Faces found",image)
#cv2.waitkey(0)
cv2.waitKey(0)
| true
|
2b1cd918865e93de587e78a2404aa7fbaeb51280
|
Python
|
eguerrero13/MovieTrailerWebsite
|
/entertainment_center.py
|
UTF-8
| 2,679
| 3.3125
| 3
|
[] |
no_license
|
import media
import fresh_tomatoes
# Create PI movie object
pi = media.Movie("PI: faith in chaos",
"The story about a mathematician and the obsession with "
"mathematical regularity contrasts two seemingly "
"irreconcilable entities: the imperfect, irrational "
"humanity and the rigor and regularity of mathematics, "
"specifically number theory.",
"https://upload.wikimedia.org/wikipedia/en/5/5a/Piposter.jpg",
"https://www.youtube.com/watch?v=GsAHXMcXgFA")
# Create School of Rock movie object
school_of_rock = media.Movie("School of Rock",
"Using rock music to learn",
"https://upload.wikimedia.org/wikipedia/en/1/11/School_of_Rock_Poster.jpg", # NOQA
"https://www.youtube.com/watch?v=XCwy6lW5Ixc")
# Create Dobermann movie object
dobermann = media.Movie("Dobermann",
"The charismatic criminal Dobermann, leads a gang of "
"brutal robbers.",
"https://upload.wikimedia.org/wikipedia/en/1/17/DobermannPoster.jpg", # NOQA
"https://www.youtube.com/watch?v=eijRyGWoSW4")
# Create The Godfather movie object
godfather = media.Movie("The Godfather",
"Chronicles the family under the patriarch Vito "
"Corleone",
"https://upload.wikimedia.org/wikipedia/en/1/1c/Godfather_ver1.jpg", # NOQA
"https://www.youtube.com/watch?v=sY1S34973zA")
# Create Pulp Fiction movie object
pulp_fiction = media.Movie("Pulp Fiction",
"Connects intersecting storylines of LA mobsters, "
"fringe players and small-time criminals",
"https://upload.wikimedia.org/wikipedia/en/3/3b/Pulp_Fiction_%281994%29_poster.jpg", # NOQA
"https://www.youtube.com/watch?v=s7EdQ4FqbhY")
# Create Requiem For A Dream movie object
requiem_dream = media.Movie("Requiem for a Dream",
"The film depicts four different forms of drug "
"addiction",
"https://upload.wikimedia.org/wikipedia/en/9/92/Requiem_for_a_dream.jpg", # NOQA
"https://www.youtube.com/watch?v=jzk-lmU4KZ4")
# Create array and add movies to it
movies = [dobermann, godfather,
pi, school_of_rock,
pulp_fiction, requiem_dream]
# Create webpage with favorite movies array
fresh_tomatoes.open_movies_page(movies)
| true
|
9dcc31941d937312d6f3a834f4c26884a1bcb4ee
|
Python
|
jie-meng/jie-meng.github.io
|
/tools/image2webp.py
|
UTF-8
| 716
| 2.71875
| 3
|
[] |
no_license
|
import os
def findFilesRecursively(path, pred = None, ls = None):
if ls == None:
ls = []
for p in os.listdir(path):
p = os.path.join(path, p)
if os.path.isdir(p):
findFilesRecursively(p, pred, ls)
elif os.path.isfile(p):
if not pred or pred(p):
ls.append(p)
return ls
def processImagesInDirRecursively(dir):
ls = findFilesRecursively(dir, lambda x: x.lower().endswith('.png') or x.lower().endswith('jpg') or x.lower().endswith('jpeg'))
for f in ls:
print('\n\n>>> Process {} ... '.format(f))
os.system('cwebp -m 6 -pass 10 -mt -lossless -v {0} -o {1}.webp'.format(f, os.path.splitext(f)[0]))
| true
|
6727bdc422920e3e15e9e2a1708eb0cfd0380f25
|
Python
|
lucasfazzib/covid19_python_chart
|
/project1.py
|
UTF-8
| 3,732
| 2.96875
| 3
|
[] |
no_license
|
from typing import final
import requests as r
import datetime as dt
import csv
from PIL import Image
from IPython.display import display
from urllib.parse import quote
url = 'https://api.covid19api.com/dayone/country/brazil'
resp = r.get(url)
#print(resp.status_code)
raw_data = resp.json()
#print(raw_data[0])
#{'ID': '5b679794-2952-4c4c-a873-af6ff457b0fd', 'Country': 'Brazil', 'CountryCode': 'BR', 'Province': '', 'City': '', 'CityCode': '', 'Lat': '-14.24', 'Lon': '-51.93', 'Confirmed': 1, 'Deaths': 0, 'Recovered': 0, 'Active': 1, 'Date': '2020-02-26T00:00:00Z'}
final_data = []
for obs in raw_data:
final_data.append([obs['Confirmed'], obs['Deaths'], obs['Recovered'], obs['Active'], obs['Date']])
final_data.insert(0, ['confirmados', 'obitos', 'recuperados', 'ativos', 'data'])
#print(final_data)
CONFIRMADOS = 0
OBITOS = 1
RECUPERADOS = 2
ATIVOS = 3
DATA = 4
for i in range(1, len(final_data)):
final_data[i][DATA] = final_data[i][DATA][:10]
#print(final_data)
#print(dt.time(12, 6, 21, 7), 'Hora:minuto:segundo.microsegundo')
#print('---------')
#print(dt.date(2020, 4, 25), 'Ano-mês-dia')
#print('---------')
#print(dt.datetime(2020, 4, 25, 12, 6, 21, 7), 'Ano-mês-dia Hora:minuto:segundo.microsegundo')
natal = dt.date(2020, 12, 25)
reveillon = dt.date(2011, 1, 1)
#print(reveillon - natal)
#print((reveillon - natal).days)
#print((reveillon - natal).seconds)
#print((reveillon - natal).microseconds)
with open('brasil-covid.csv', 'w') as file:
writer = csv.writer(file)
writer.writerows(final_data)
for i in range(1, len(final_data)):
final_data[i][DATA] = dt.datetime.strptime(final_data[i][DATA], '%Y-%m-%d')
#print(final_data)
def get_dataset(y, labels):
if type(y[0]) == list:
datasets = []
for i in range(len(y)):
datasets.append({
'label': labels[i],
'data' : y[i]
})
return datasets
else:
return [
{
'label': labels[0],
'data' : y
}
]
def set_title(title=''):
if title != '':
display = 'true'
else:
display = 'false'
return {
'title' : title,
'display': display
}
def create_chart(x, y, labels, kind='bar', title=''):
dataset = get_dataset(y, labels)
options = set_title(title)
chart = {
'type': kind,
'data': {
'labels': x,
'datasets' : dataset
},
'options' : options
}
return chart
def get_api_chart(chart):
url_base = 'https://quickchart.io/chart'
resp = r.get(f'{url_base}?c={str(chart)}')
return resp.content
def save_image(path, content):
with open(path, 'wb') as image:
image.write(content)
def display_image(path):
img_pil = Image.open(path)
display(img_pil)
y_data_1 = []
for obs in final_data[1::10]:
y_data_1.append(obs[CONFIRMADOS])
y_data_2 = []
for obs in final_data[1::10]:
y_data_2.append(obs[RECUPERADOS])
labels = ['Confirmados', 'Recuperados']
x = []
for obs in final_data[1::10]:
x.append(obs[DATA].strftime('%d/%m/%Y'))
chart = create_chart(x, [y_data_1, y_data_2], labels, title='Gráfico Confirmados vs Recuperados')
chart_content = get_api_chart(chart)
save_image('meu-grafico-covid.png', chart_content)
display_image('meu-grafico-covid.png')
def get_api_qrcode(link):
text = quote(link) #parsing the link to url
url_base = 'https://quickchart.io/qr'
resp = r.get(f'{url_base}?text={text}')
return resp.content
url_base = 'https://quickchart.io/chart'
link = f'{url_base}?c={str(chart)}'
save_image('qr-code.png', get_api_qrcode(link))
| true
|
1a0dc49074129f9c77237499ff5a0a0bc93dc0d7
|
Python
|
dizpers/python-address-book-assignment
|
/address_book/person.py
|
UTF-8
| 3,766
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
__all__ = ['Person']
class Person(object):
searchable_fields = ['first_name', 'last_name', 'email', 'emails']
def __init__(self, first_name, last_name, addresses, phone_numbers, emails):
"""
Constructor of Person class
:param first_name: first name of the person
:type first_name: str or unicode
:param last_name: last name of the person
:type last_name: str or unicode
:param addresses: list of person's addresses (list of strings)
:type addresses: list
:param phone_numbers: list of person's phone numbers (list of strings)
:type phone_numbers: list
:param emails: list of person's emails (list of strings)
:type emails: list
"""
self.first_name = first_name
self.last_name = last_name
self.addresses = addresses
self.phone_numbers = phone_numbers
self.emails = emails
self.groups = []
def add_address(self, address):
"""
Add the address string to the list of addresses of current person
:param address: address string to be added
:type address: str or unicode
"""
self.addresses.append(address)
def add_phone_number(self, phone_number):
"""
Add the phone number string to the list of phone numbers of current person
:param phone_number: phone number string to be added
:type phone_number: str or unicode
"""
self.phone_numbers.append(phone_number)
def add_email(self, email):
"""
Add email string to the list of emails of current person
:param email: email to be added
:type email: str or unicode
"""
self.emails.append(email)
def add_to_group(self, group, update_group=True):
"""
Connects current person and given group
:param group: group to be extended with current person instance
:param update_group: indicates if we also must update give group with current person
:type group: address_book.Group
:type update_group: bool
"""
self.groups.append(group)
if update_group:
group.add_person(self, update_person=False)
def match(self, **match_fields):
"""
Match curren person object with set of fields
:param match_fields: set of fields to be matched with current instance
:return: does current person match given set of fields or not
:rtype: bool
"""
matches = {}
for field, value in match_fields.iteritems():
#TODO: sounds like the hack :3
if field == 'email':
field = 'emails'
value = [value]
self_value = getattr(self, field)
if type(value) == list:
if field == 'emails':
matched = True
for search_email in value:
for actual_email in self_value:
if actual_email.startswith(search_email):
break
else:
matched = False
break
else:
matched = set(self_value).issuperset(set(value))
else:
matched = self_value == value
matches[field] = matched
if all(matches.values()):
return True
return False
def __unicode__(self):
return u'Person<{first_name} {last_name}>'.format(
first_name=self.first_name,
last_name=self.last_name
)
def __str__(self):
return unicode(self)
def __repr__(self):
return unicode(self)
| true
|
2621df087984fbba914d70dd5bd22417f814f42f
|
Python
|
matthewmjm/100-days-of-code-days-1-through-10
|
/day4/main.py
|
UTF-8
| 1,275
| 3.890625
| 4
|
[] |
no_license
|
import random
rock = '''
_______
---' ____)
(_____)
(_____)
(____)
---.__(___)
'''
paper = '''
_______
---' ____)____
______)
_______)
_______)
---.__________)
'''
scissors = '''
_______
---' ____)____
______)
__________)
(____)
---.__(___)
'''
opponent = random.randint(1, 3)
yourself = int(input("What do you choose? Type 1 for Rock, 2 for Paper, and 3 for Scissors.\n"))
if opponent == 1:
opponent_choice = rock
elif opponent == 2:
opponent_choice = paper
else:
opponent_choice = scissors
if yourself == 1:
yourself_choice = rock
elif yourself == 2:
yourself_choice = paper
else:
yourself_choice = scissors
print(yourself_choice)
print("\nComputer chose:\n")
print(opponent_choice)
if yourself == 1:
if opponent == 1:
print("It is a DRAW!")
elif opponent == 2:
print("You LOSE!")
else:
print("You WIN!")
elif yourself == 2:
if opponent == 1:
print("You WIN!")
elif opponent == 2:
print("It is a DRAW!")
else:
print("You LOSE!")
else:
if opponent == 1:
print("You LOSE!")
elif opponent == 2:
print("You WIN!")
else:
print("It is a DRAW!")
| true
|
74be2eba6fa3cdabcb8076e6c651fea89420686f
|
Python
|
kamyu104/LeetCode-Solutions
|
/Python/linked-list-random-node.py
|
UTF-8
| 766
| 3.71875
| 4
|
[
"MIT"
] |
permissive
|
# Time: O(n)
# Space: O(1)
from random import randint
class Solution(object):
def __init__(self, head):
"""
@param head The linked list's head. Note that the head is guanranteed to be not null, so it contains at least one node.
:type head: ListNode
"""
self.__head = head
# Proof of Reservoir Sampling:
# https://discuss.leetcode.com/topic/53753/brief-explanation-for-reservoir-sampling
def getRandom(self):
"""
Returns a random node's value.
:rtype: int
"""
reservoir = -1
curr, n = self.__head, 0
while curr:
reservoir = curr.val if randint(1, n+1) == 1 else reservoir
curr, n = curr.next, n+1
return reservoir
| true
|
47990e21d654f85580ca9b07ce38535128895112
|
Python
|
ek360/Movie-web-app-with-sql-database
|
/movie/domain/movie.py
|
UTF-8
| 4,256
| 2.890625
| 3
|
[] |
no_license
|
from movie.domain.genre import Genre
from movie.domain.actor import Actor
from movie.domain.director import Director
from movie.domain.review import Review
from datetime import date, datetime
from typing import List, Iterable
class Movie:
def __init__(self, rank, title: str, year: int, description, director, runtime_minutes, rating, metascore):
self.__rank = rank
if title == "" or type(title) is not str:
self.__title = None
else:
self.__title = title.strip()
if year < 1900:
self.__year = None
else:
self.__year = year
self.__description: str = description
self.__director: Director = director
self.__actors: list[Actor] = []
self.__genres: list[Genre] = []
self.__runtime_minutes: int = runtime_minutes
self.__rating: float = rating
self.__metascore: str = metascore
self.__reviews: list[Review] = []
@property
def reviews(self) -> Iterable[Review]:
return iter(self.__reviews)
@property
def number_of_reviews(self) -> int:
return len(self.__reviews)
@property
def metascore(self):
return self.__metascore
@property
def rating(self):
return self.__rating
@property
def rank(self):
return self.__rank
@property
def title(self):
return self.__title
@title.setter
def title(self, title):
if title == "" or type(title) is not str:
self.__title = None
else:
self.__title = title.strip()
@property
def year(self):
return self.__year
@year.setter
def year(self, year):
if year < 1900:
self.__year = None
else:
self.__year = year
@property
def description(self):
return self.__description
@description.setter
def description(self, description):
if description == "" or type(description) is not str:
self.__description = None
else:
self.__description = description
@property
def director(self):
return self.__director
@director.setter
def director(self, director):
if type(director) == Director:
self.__director = director
else:
self.__director = None
@property
def actors(self):
return self.__actors
@actors.setter
def actors(self, actors):
for act in actors:
if type(act) == Actor:
self.__actors.append(act)
@property
def genres(self):
return self.__genres
@genres.setter
def genres(self, genres):
for gen in genres:
if type(gen) == Genre:
self.__genres.append(gen)
@property
def runtime_minutes(self):
return self.__runtime_minutes
@runtime_minutes.setter
def runtime_minutes(self, runtime_minutes):
if runtime_minutes > 0:
self.__runtime_minutes = runtime_minutes
else:
raise ValueError("runtime should be a positive number")
def __repr__(self):
return f"<Movie {self.__title}, {self.__year},{self.__rank}>"
def __eq__(self, other):
return self.__rank == other.__rank
def __lt__(self, other):
if self.__rank < other.__rank:
return True
elif self.__rank > other.__rank:
return False
else:
if self.__year < other.__year:
return True
else:
return False
def __hash__(self):
return hash((self.__title, self.__year))
def add_actor(self, actor):
if type(actor) == Actor:
self.__actors.append(actor)
def remove_actor(self, actor):
if type(actor) == Actor:
if actor in self.__actors:
self.__actors.remove(actor)
def add_genre(self, genre):
if type(genre) == Genre:
self.__genres.append(genre)
def remove_genre(self, genre):
if type(genre) == Genre:
if genre in self.__genres:
self.__genres.remove(genre)
def add_review(self, review: Review):
self.__reviews.append(review)
| true
|
e36b5345d8450aba39c6e6ba93cf9642ce42c056
|
Python
|
deepbuzin/humpback-whale-identification
|
/loss/triplet_loss.py
|
UTF-8
| 16,295
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
# def euclidean_distance(embeddings):
# """Compute the 2D matrix of pairwise euclidean distances between embeddings.
#
# :param embeddings: tensor of shape (batch_size, embedding_size)
# :return dist: tensor of shape (batch_size, batch_size)
# """
# prod = tf.matmul(embeddings, tf.transpose(embeddings))
# sq_norms = tf.diag_part(prod)
#
# dist = tf.expand_dims(sq_norms, 0) - 2.0*prod + tf.expand_dims(sq_norms, 1)
# dist = tf.maximum(dist, 0.0)
#
# zeros_mask = tf.to_float(tf.equal(dist, 0.0))
# dist = tf.sqrt(dist + zeros_mask*1e-16)
# dist = dist * (1.0-zeros_mask)
# return dist
# def valid_triplets_mask(labels):
# """Compute the 3D boolean mask where mask[a, p, n] is True if (a, p, n) is a valid triplet,
# as in a, p, n are distinct and labels[a] == labels[p], labels[a] != labels[n].
#
# :param labels: tensor of shape (batch_size,)
# :return mask: tf.bool tensor of shape (batch_size, batch_size, batch_size)
# """
# indices_equal = tf.cast(tf.eye(tf.shape(labels)[0]), tf.bool)
# indices_not_equal = tf.logical_not(indices_equal)
# i_not_equal_j = tf.expand_dims(indices_not_equal, 2)
# i_not_equal_k = tf.expand_dims(indices_not_equal, 1)
# j_not_equal_k = tf.expand_dims(indices_not_equal, 0)
# distinct_indices = tf.logical_and(tf.logical_and(i_not_equal_j, i_not_equal_k), j_not_equal_k)
#
# label_equal = tf.equal(tf.expand_dims(labels, 0), tf.expand_dims(labels, 1))
# i_equal_j = tf.expand_dims(label_equal, 2)
# i_equal_k = tf.expand_dims(label_equal, 1)
# valid_labels = tf.logical_and(i_equal_j, tf.logical_not(i_equal_k))
#
# mask = tf.logical_and(distinct_indices, valid_labels)
# return mask
# def valid_anchor_positive_mask(labels):
# """Compute a 2D boolean mask where mask[a, p] is True if a and p are distinct and have the same label.
#
# :param labels: tensor of shape (batch_size,)
# :return mask: tf.bool tensor of shape (batch_size, batch_size)
# """
# indices_equal = tf.cast(tf.eye(tf.shape(labels)[0]), tf.bool)
# indices_not_equal = tf.logical_not(indices_equal)
#
# labels_equal = tf.equal(tf.expand_dims(labels, 0), tf.expand_dims(labels, 1))
# mask = tf.logical_and(indices_not_equal, labels_equal)
# return mask
#
#
# def valid_anchor_negative_mask(labels):
# """Compute a 2D boolean mask where mask[a, n] is True if a and n have distinct label.
#
# :param labels: tensor of shape (batch_size,)
# :return mask: tf.bool tensor of shape (batch_size, batch_size)
# """
# labels_equal = tf.equal(tf.expand_dims(labels, 0), tf.expand_dims(labels, 1))
# mask = tf.logical_not(labels_equal)
# return mask
# def triplet_loss_(margin=0.2, strategy='batch_all', metric=euclidean_distance):
# """Compute the triplet loss over the batch of embeddings.
#
# :param margin: margin that is going to be enforced by the triplet loss
# :param strategy: string, that indicated whether we're using the 'batch hard' or the 'batch all' mining strategy
# :param metric: a callback function that we use to calculate the distance between each pair of vectors
# :return: a callback function that calculates the loss according to the specified mining strategy
# """
# def batch_all(labels, embeddings):
# """Compute the loss by generating all the valid triplets and averaging over the positive ones
#
# :param labels: tensor of shape (batch_size,)
# :param embeddings: tensor of shape (batch_size, embedding_size)
# :return loss: scalar tensor
# """
# dist = metric(embeddings)
#
# anchor_positive_dist = tf.expand_dims(dist, 2)
# anchor_negative_dist = tf.expand_dims(dist, 1)
#
# loss = anchor_positive_dist - anchor_negative_dist + margin
#
# mask = tf.to_float(valid_triplets_mask(labels))
# loss = tf.multiply(loss, mask)
# loss = tf.maximum(loss, 0.0)
#
# num_non_easy_triplets = tf.reduce_sum(tf.to_float(tf.greater(loss, 1e-16)))
# loss = tf.reduce_sum(loss) / (num_non_easy_triplets + 1e-16)
# return loss
#
# def batch_hard(labels, embeddings):
# """Compute the loss on the triplet consisting of the hardest positive and the hardest negative
#
# :param labels: tensor of shape (batch_size,)
# :param embeddings: tensor of shape (batch_size, embedding_size)
# :return loss: scalar tensor
# """
# dist = metric(embeddings)
#
# ap_mask = tf.to_float(valid_anchor_positive_mask(labels))
# ap_dist = tf.multiply(dist, ap_mask)
# hardest_positive_dist = tf.reduce_max(ap_dist, axis=1, keepdims=True)
#
# an_mask = tf.to_float(valid_anchor_negative_mask(labels))
# an_dist = dist + tf.reduce_max(dist, axis=1, keepdims=True) * (1.0-an_mask)
# hardest_negative_dist = tf.reduce_min(an_dist, axis=1, keepdims=True)
#
# loss = tf.reduce_mean(tf.maximum(hardest_positive_dist - hardest_negative_dist + margin, 0.0))
# return loss
#
# if strategy == 'batch_all':
# return batch_all
# elif strategy == 'batch_hard':
# return batch_hard
# else:
# raise ValueError('unknown strategy: %s' % strategy)
# def euclidean_dist(embeddings):
# prod = tf.matmul(embeddings, tf.transpose(embeddings))
# #sq_norms = tf.reduce_sum(tf.square(embeddings), axis=1)
# sq_norms = tf.diag_part(prod)
# dist = tf.reshape(sq_norms, (-1, 1)) - 2 * prod + tf.reshape(sq_norms, (1, -1))
# return dist
#
#
# def soft_margin_triplet_loss(labels, embeddings):
# inf = tf.constant(1e+9, tf.float32)
# epsilon = tf.constant(1e-6, tf.float32)
# zero = tf.constant(0, tf.float32)
#
# #dist = tf.sqrt(tf.maximum(zero, epsilon + euclidean_dist(embeddings)))
# dist = tf.maximum(zero, epsilon + euclidean_dist(embeddings)) # sqeuclidean
# # mask matrix showing equal labels of embeddings
# equal_label_mask = tf.cast(tf.equal(tf.reshape(labels, (-1, 1)), tf.reshape(labels, (1, -1))), tf.float32)
#
# pos_dist = tf.reduce_max(equal_label_mask * dist, axis=1)
# neg_dist = tf.reduce_min((equal_label_mask * inf) + dist, axis=1)
#
# loss = tf.reduce_mean(tf.nn.softplus(pos_dist - neg_dist))
#
# #hard loss
# #margin = tf.constant(1.5, tf.float32)
# #loss = tf.reduce_mean(tf.maximum(pos_dist - neg_dist + margin, 0.0))
# return loss
# ----------------------------------------------------------------------------------------------------------------------
def valid_triplets_mask(labels):
"""Compute the 3D boolean mask where mask[a, p, n] is True if (a, p, n) is a valid triplet,
as in a, p, n are distinct and labels[a] == labels[p], labels[a] != labels[n].
:param labels: tensor of shape (batch_size,)
:return mask: tf.bool tensor of shape (batch_size, batch_size, batch_size)
"""
indices_equal = tf.cast(tf.eye(tf.shape(labels)[0]), tf.bool)
indices_not_equal = tf.logical_not(indices_equal)
i_not_equal_j = tf.expand_dims(indices_not_equal, 2)
i_not_equal_k = tf.expand_dims(indices_not_equal, 1)
j_not_equal_k = tf.expand_dims(indices_not_equal, 0)
distinct_indices = tf.logical_and(tf.logical_and(i_not_equal_j, i_not_equal_k), j_not_equal_k)
label_equal = tf.equal(tf.expand_dims(labels, 0), tf.expand_dims(labels, 1))
i_equal_j = tf.expand_dims(label_equal, 2)
i_equal_k = tf.expand_dims(label_equal, 1)
valid_labels = tf.logical_and(i_equal_j, tf.logical_not(i_equal_k))
mask = tf.logical_and(distinct_indices, valid_labels)
return mask
def euclidean_distance(embeddings, squared=False):
"""Computes pairwise euclidean distance matrix with numerical stability.
output[i, j] = || feature[i, :] - feature[j, :] ||_2
:param embeddings: 2-D Tensor of size [number of data, feature dimension].
:param squared: Boolean, whether or not to square the pairwise distances.
:return dist: 2-D Tensor of size [number of data, number of data].
"""
dist_squared = tf.add(tf.reduce_sum(tf.square(embeddings), axis=1, keepdims=True),
tf.reduce_sum(tf.square(tf.transpose(embeddings)), axis=0, keepdims=True)
) - 2.0 * tf.matmul(embeddings, tf.transpose(embeddings))
# Deal with numerical inaccuracies. Set small negatives to zero.
dist_squared = tf.maximum(dist_squared, 0.0)
# Get the mask where the zero distances are at.
error_mask = tf.less_equal(dist_squared, 0.0)
# Optionally take the sqrt.
dist = dist_squared if squared else tf.sqrt(dist_squared + tf.cast(error_mask, dtype=tf.float32) * 1e-16)
# Undo conditionally adding 1e-16.
dist = tf.multiply(dist, tf.cast(tf.logical_not(error_mask), dtype=tf.float32))
n_data = tf.shape(embeddings)[0]
# Explicitly set diagonals to zero.
mask_offdiagonals = tf.ones_like(dist) - tf.diag(tf.ones([n_data]))
dist = tf.multiply(dist, mask_offdiagonals)
return dist
def masked_maximum(data, mask, dim=1):
"""Computes the axis wise maximum over chosen elements.
:param data: 2-D float `Tensor` of size [n, m].
:param mask: 2-D Boolean `Tensor` of size [n, m].
:param dim: The dimension over which to compute the maximum.
:return masked_maximums: N-D `Tensor`. The maximized dimension is of size 1 after the operation.
"""
axis_minimums = tf.reduce_min(data, axis=dim, keepdims=True)
masked_maximums = tf.reduce_max(tf.multiply(data - axis_minimums, mask), axis=dim, keepdims=True) + axis_minimums
return masked_maximums
def masked_minimum(data, mask, dim=1):
"""Computes the axis wise minimum over chosen elements.
:param data: 2-D float `Tensor` of size [n, m].
:param mask: 2-D Boolean `Tensor` of size [n, m].
:param dim: The dimension over which to compute the minimum.
:return masked_minimums: N-D `Tensor`. The minimized dimension is of size 1 after the operation.
"""
axis_maximums = tf.reduce_max(data, axis=dim, keepdims=True)
masked_minimums = tf.reduce_min(tf.multiply(data - axis_maximums, mask), axis=dim, keepdims=True) + axis_maximums
return masked_minimums
def triplet_loss(margin=1.0, strategy='batch_semi_hard'):
"""Compute the triplet loss over the batch of embeddings. tf contrib inspired:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/losses/python/metric_learning/metric_loss_ops.py
:param margin: margin that is going to be enforced by the triplet loss
:param strategy: string, that indicated whether we're using the 'batch hard', 'batch all' or 'batch_semi_hard' mining strategy
:return: a callback function that calculates the loss according to the specified strategy
"""
def get_loss_tensor(positive_dists, negative_dists):
"""Compute the triplet loss function tensor using specified margin:
:param positive_dists: positive distances tensor
:param negative_dists: negative distances tensor
:return: resulting triplet loss tensor
"""
if margin == 'soft':
return tf.nn.softplus(positive_dists - negative_dists)
return tf.maximum(positive_dists - negative_dists + margin, 0.0)
def batch_semi_hard(labels, embeddings):
"""Computes the triplet loss with semi-hard negative mining.
The loss encourages the positive distances (between a pair of embeddings with
the same labels) to be smaller than the minimum negative distance among
which are at least greater than the positive distance plus the margin constant
(called semi-hard negative) in the mini-batch. If no such negative exists,
uses the largest negative distance instead.
See: https://arxiv.org/abs/1503.03832.
:param labels: 1-D tf.int32 `Tensor` with shape [batch_size] of multiclass integer labels.
:param embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized.
:return loss: tf.float32 scalar.
"""
labels = tf.reshape(labels, [-1, 1])
batch_size = tf.size(labels)
# Build pairwise squared distance matrix.
dist = euclidean_distance(embeddings, squared=True)
# Build pairwise binary adjacency matrix (equal label mask).
adjacency = tf.equal(labels, tf.transpose(labels))
# Invert so we can select negatives only.
adjacency_not = tf.logical_not(adjacency)
# Compute the mask.
dist_tile = tf.tile(dist, [batch_size, 1]) # stack dist matrix batch_size times, axis=0
mask = tf.logical_and(tf.tile(adjacency_not, [batch_size, 1]), tf.greater(dist_tile, tf.reshape(dist, [-1, 1])))
mask = tf.cast(mask, dtype=tf.float32)
is_negatives_outside = tf.reshape(tf.greater(tf.reduce_sum(mask, axis=1, keepdims=True), 0.0), [batch_size, batch_size])
is_negatives_outside = tf.transpose(is_negatives_outside)
# negatives_outside: smallest D_an where D_an > D_ap.
negatives_outside = tf.reshape(masked_minimum(dist_tile, mask), [batch_size, batch_size])
negatives_outside = tf.transpose(negatives_outside)
# negatives_inside: largest D_an.
adjacency_not = tf.cast(adjacency_not, dtype=tf.float32)
negatives_inside = tf.tile(masked_maximum(dist, adjacency_not), [1, batch_size])
semi_hard_negatives = tf.where(is_negatives_outside, negatives_outside, negatives_inside)
# In lifted-struct, the authors multiply 0.5 for upper triangular
# in semihard, they take all positive pairs except the diagonal.
mask_positives = tf.cast(adjacency, dtype=tf.float32) - tf.diag(tf.ones([batch_size]))
n_positives = tf.reduce_sum(mask_positives)
loss_mat = get_loss_tensor(dist, semi_hard_negatives)
loss = tf.div_no_nan(tf.reduce_sum(tf.multiply(loss_mat, mask_positives)), n_positives)
return loss
def batch_all(labels, embeddings):
"""Compute the loss by generating all the valid triplets and averaging over the positive ones
:param labels: 1-D tf.int32 `Tensor` with shape [batch_size] of multiclass integer labels.
:param embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized.
:return loss: tf.float32 scalar.
"""
dist = euclidean_distance(embeddings, squared=True)
mask = tf.to_float(valid_triplets_mask(labels))
anchor_positive_dist = tf.expand_dims(dist, 2)
anchor_negative_dist = tf.expand_dims(dist, 1)
loss_tensor = get_loss_tensor(anchor_positive_dist, anchor_negative_dist)
loss_tensor = tf.multiply(loss_tensor, mask)
num_non_easy_triplets = tf.reduce_sum(tf.to_float(tf.greater(loss_tensor, 1e-16)))
loss = tf.div_no_nan(tf.reduce_sum(loss_tensor), num_non_easy_triplets)
return loss
def batch_hard(labels, embeddings):
"""Compute the loss by generating only hardest valid triplets and averaging over the positive ones.
One triplet per embedding, i.e. per anchor
:param labels: 1-D tf.int32 `Tensor` with shape [batch_size] of multiclass integer labels.
:param embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized.
:return loss: tf.float32 scalar.
"""
dist = euclidean_distance(embeddings, squared=True)
adjacency = tf.cast(tf.equal(tf.reshape(labels, (-1, 1)), tf.reshape(labels, (1, -1))), tf.float32)
pos_dist = tf.reduce_max(adjacency * dist, axis=1)
inf = tf.constant(1e+9, tf.float32)
neg_dist = tf.reduce_min((adjacency * inf) + dist, axis=1)
loss_mat = get_loss_tensor(pos_dist, neg_dist)
num_non_easy_triplets = tf.reduce_sum(tf.to_float(tf.greater(loss_mat, 1e-16)))
loss = tf.div_no_nan(tf.reduce_sum(loss_mat), num_non_easy_triplets)
return loss
if strategy == 'batch_semi_hard':
return batch_semi_hard
elif strategy == 'batch hard':
return batch_hard
else:
return batch_all
| true
|
112415cde6d3dc34c54456edc40a4f05697a18ed
|
Python
|
pooja89299/list
|
/list tatola even.py
|
UTF-8
| 294
| 3.203125
| 3
|
[] |
no_license
|
# num=[23,14,56,12,19,9,15,31,42,43]
# i=0
# y=[]
# a=[]
# sum=0
# sum1=0
# while i<len(num):
# b=num[i]
# if b%2==0:
# sum=sum+b
# y.append(b)
# else:
# a.append(b)
# sum1=sum1+b
# i=i+1
# print("even",y,"sum:",sum)
# print("odd",a,"sum1:",sum1)
| true
|
a545b1861ad7812848066fb74f9d16db4b1b3df2
|
Python
|
wrschneider/project-euler
|
/p006.py
|
UTF-8
| 161
| 3.296875
| 3
|
[] |
no_license
|
n = 100
sum_of_squares = sum(i*i for i in range(1, n+1))
_sum = sum(i for i in range(1, n+1))
square_of_sum = _sum * _sum
print(square_of_sum - sum_of_squares)
| true
|
bae1ef8ffb49baa3ff27acb58af57f2f8a5a68fb
|
Python
|
hucarlos/OptimalControl
|
/Scripts/Plots/BoxPlotSELQR.py
|
UTF-8
| 1,257
| 2.859375
| 3
|
[] |
no_license
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
if __name__ == '__main__':
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
data = np.loadtxt('Results4.txt')
# print data.shape
# data = [data[m] for d, m in zip(data.T, mask.T)]
# mask = ~np.isinf(data)
# Filter data using np.isnan
times = [data[:, 0], data[:, 1]]
mask = ~np.isinf(data[:, 2])
costSELQR = data[:, 2]
costSELQR = costSELQR[mask]
costRSELQR = data[:, 3]
costRSELQR = costRSELQR[mask]
cost = [costSELQR, costRSELQR]
iters = [data[:, 4], data[:, 5]]
# multiple box plots on one figure
f, (ax1, ax2) = plt.subplots(1, 2, sharex=False, sharey=False)
bp = ax1.boxplot(cost, showmeans=True, showfliers=False)
ax1.set_xticklabels(['E-LQR', 'RE-LQR'])
ax1.set_title("Cost")
ax1.grid()
ax2.boxplot(times, showmeans=True, showfliers=False)
ax2.set_xticklabels(['E-LQR', 'RE-LQR'])
ax2.set_title("Time (ms)")
ax2.grid()
print 'Mean iters: ', np.mean(data[:, 4]), np.mean(data[:, 5])
print 'Mean costs: ', np.mean(costSELQR), np.mean(costRSELQR)
print 'Mean time: ', np.mean(data[:, 0]), np.mean(data[:, 1])
plt.show()
| true
|
efead44c4d9c3ea1b6de8a1868cffc3030911343
|
Python
|
mateenjameel/Python-Course-Mathematics-Department
|
/Week 14_Integration with Plotting Responses/integration_six.py
|
UTF-8
| 301
| 3.296875
| 3
|
[] |
no_license
|
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(-1,3,1000)
def f(x):
return x**2
plt.plot(x,f(x))
plt.axhline(color='red')
plt.fill_between(x, f(x), where = [(x>0) and (x<2) for x in x], color='blue', alpha =0.3)
# alpha is for transparency
plt.show()
| true
|
a0681bfc4259002b7e739bfa27cff2773b738834
|
Python
|
ras-2004-shaikh/MultiLang
|
/multilang.py
|
UTF-8
| 1,974
| 2.9375
| 3
|
[] |
no_license
|
import os
import time
import json
#solutions=[wherever the solutions are maybe discord?? this will be a list of strings]
test_solution='''java
public static int solution(){
return 5;
}
'''
test_solution_2='''brainf*ck
>+++++[<++++++++++>-]<+++.
'''
solutions=[test_solution,test_solution_2]
test_file='tests.json'
temp_files={'java':'java_temp.java'}
result_files={'java':'test_fails.dat'}
temporary='Main' #this must be the name of the class in java_temp.java
remove_files={'java':[temporary+'.java',temporary+'.class',"MySecurity.class"]}
line_no_java=76
i=1
while solutions:
sol=solutions.pop()
print(f"Solution: {i}")
i+=1
sol_lines=[*map(lambda s:s+'\n',sol.split('\n'))]
if sol_lines[0]=='java\n':
lines=[]
with open(temp_files['java'],'r')as f:
lines=f.readlines()
lines[line_no_java:line_no_java+1]=sol_lines[1:]
with open(temporary+'.java','w')as f:
f.writelines(lines)
os.system(f'javac {temporary+".java"}')
os.system(f'java {temporary} {test_file}')
for s in remove_files['java']:
os.remove(s)
with open(result_files['java'],'r')as f:
res=f.readlines()
print(res[0])
for line in res[1:10]:
print(line)
os.remove(result_files['java'])
elif sol_lines[0]=='brainf*ck\n':
interpreter=__import__('brainF_temp').interpret
code=''.join(sol_lines[1:])
with open(test_file,'r') as f:
cases=json.load(f)['cases']
failed_cases=[]
start=time.time()
for test in cases:
try:
ans=interpreter(code,str(test[0]))
if(ans!=str(test[1])):
failed_cases.append(test+["Your answer: "+ans])
except Exception as e:
failed_cases.append(test+["Error: "+str(e)])
end=time.time()
print(f"""Passed : {len(cases)-len(failed_cases)}/{len(cases)}
time: {end-start}""")
if len(failed_cases):
print('Some failed cases.')
for test in failed_cases[:3]:
print(f"""Input: {test[0]}
Actual answer: {test[1]}
{test[2]}""")
| true
|
c1db36698c2c90c7b0338b39f8442b83178938ef
|
Python
|
kleinerman/dobie
|
/server/back_end/crudresndr.py
|
UTF-8
| 11,251
| 2.671875
| 3
|
[] |
no_license
|
import pymysql
import queue
import logging
import json
import re
import time
import threading
import genmngr
import database
from config import *
from msgheaders import *
class CrudReSndr(genmngr.GenericMngr):
'''
This thread has two responsibilities.
It periodically check which controllers has some CRUD not yet
committed and send to them a RRC (Request Resend CRUD) message.
When a controller which now is alive answer to the previous message with a
RRRC (Ready to Receive Resended CRUD) message, this thread send the not comitted
CRUDs to this controller.
'''
def __init__(self, exitFlag):
#Invoking the parent class constructor, specifying the thread name,
#to have a understandable log file.
super().__init__('CrudReSender', exitFlag)
#Database object to answer the CRUDs not committed.
#The creation of this object was moved to the run method to avoid
#freezing the main thread when there is no connection to database.
self.dataBase = None
#Controller Messanger to resend the corresponding CRUDs.
#As the "ctrllerMsger" use the only "netMngr" object and the "netMngr" has to
#know this object to send the RRRE message. This attribute is setted after
#creating this object in the main thread.
self.ctrllerMsger = None
#When the network thread receives a RRRC message it puts the
#MAC of the controller which sent this message in this queue.
#Also, MsgReceiver thread can put the MAC of the controller
#which need to be resynced here.
self.toCrudReSndr = queue.Queue()
#Calculating the number of iterations before sending the message to request
#resync the controller.
self.ITERATIONS = RE_SEND_TIME // EXIT_CHECK_TIME
#This is the actual iteration. This value is incremented in each iteration
#and is initializated to 0.
self.iteration = 0
#Lock to protect self.iteration attribute
self.lockIteration = threading.Lock()
def resetReSendTime(self):
'''
This method will be executed by network thread reseting the iterations
everytime the network thread sends a message to the controller.
This is to avoid "CrudReSender" thread resends CRUDs when a CRUD
message has just been sent to the controller and the controller
didn't answer yet.
'''
#self.iteration is protected with self.lockIteration lock as it is
#modified by this thread (CrudReSender) and by NetMngr Thread
with self.lockIteration:
self.iteration = 0
def run(self):
'''
This is the main method of the thread. Most of the time it is blocked waiting
for queue messages coming from the "Network" thread.
The queue message is the MAC address of the controller which need CRUDs to be
resended.
When a MAC address is received, this method send all the doors, access,
limited access and persons CRUDs for this controller in this order to avoid
inconsistency in the controller database.
Also, after "self.ITERATIONS" times, it send a RRRE message to all the
controllers which have uncommitted CRUDs
'''
#First of all, the database should be connected by the execution of this thread
self.dataBase = database.DataBase(DB_HOST, DB_USER, DB_PASSWD, DB_DATABASE, self)
while True:
try:
#Blocking until Network thread sends an msg or EXIT_CHECK_TIME expires
ctrllerMac = self.toCrudReSndr.get(timeout=EXIT_CHECK_TIME)
self.checkExit()
try:
for door in self.dataBase.getUncmtDoors(ctrllerMac, database.TO_ADD):
door.pop('name')
door.pop('controllerId')
door.pop('zoneId')
door.pop('isVisitExit')
self.ctrllerMsger.addDoor(ctrllerMac, door)
for door in self.dataBase.getUncmtDoors(ctrllerMac, database.TO_UPDATE):
door.pop('name')
door.pop('controllerId')
door.pop('zoneId')
door.pop('isVisitExit')
self.ctrllerMsger.updDoor(ctrllerMac, door)
for door in self.dataBase.getUncmtDoors(ctrllerMac, database.TO_DELETE):
self.ctrllerMsger.delDoor(ctrllerMac, door['id'])
self.checkExit()
for unlkDoorSkd in self.dataBase.getUncmtUnlkDoorSkds(ctrllerMac, database.TO_ADD):
self.ctrllerMsger.addUnlkDoorSkd(ctrllerMac, unlkDoorSkd)
for unlkDoorSkd in self.dataBase.getUncmtUnlkDoorSkds(ctrllerMac, database.TO_UPDATE):
unlkDoorSkd.pop('doorId')
self.ctrllerMsger.updUnlkDoorSkd(ctrllerMac, unlkDoorSkd)
for unlkDoorSkd in self.dataBase.getUncmtUnlkDoorSkds(ctrllerMac, database.TO_DELETE):
self.ctrllerMsger.delUnlkDoorSkd(ctrllerMac, unlkDoorSkd['id'])
self.checkExit()
for excDayUds in self.dataBase.getUncmtExcDayUdss(ctrllerMac, database.TO_ADD):
self.ctrllerMsger.addExcDayUds(ctrllerMac, excDayUds)
for excDayUds in self.dataBase.getUncmtExcDayUdss(ctrllerMac, database.TO_UPDATE):
excDayUds.pop('doorId')
self.ctrllerMsger.updExcDayUds(ctrllerMac, excDayUds)
for excDayUds in self.dataBase.getUncmtExcDayUdss(ctrllerMac, database.TO_DELETE):
self.ctrllerMsger.delExcDayUds(ctrllerMac, excDayUds['id'])
self.checkExit()
for access in self.dataBase.getUncmtAccesses(ctrllerMac, database.TO_ADD):
self.ctrllerMsger.addAccess(ctrllerMac, access)
for access in self.dataBase.getUncmtAccesses(ctrllerMac, database.TO_UPDATE):
#The following parameters should not be sent when updating an access.
access.pop('doorId')
access.pop('personId')
access.pop('allWeek')
access.pop('cardNumber')
self.ctrllerMsger.updAccess(ctrllerMac, access)
for access in self.dataBase.getUncmtAccesses(ctrllerMac, database.TO_DELETE):
self.ctrllerMsger.delAccess(ctrllerMac, access['id'])
self.checkExit()
for liAccess in self.dataBase.getUncmtLiAccesses(ctrllerMac, database.TO_ADD):
self.ctrllerMsger.addLiAccess(ctrllerMac, liAccess)
for liAccess in self.dataBase.getUncmtLiAccesses(ctrllerMac, database.TO_UPDATE):
#The following parameters should not be sent when updating an access.
liAccess.pop('accessId')
liAccess.pop('doorId')
liAccess.pop('personId')
liAccess.pop('cardNumber')
self.ctrllerMsger.updLiAccess(ctrllerMac, liAccess)
for liAccess in self.dataBase.getUncmtLiAccesses(ctrllerMac, database.TO_DELETE):
self.ctrllerMsger.delLiAccess(ctrllerMac, liAccess['id'])
self.checkExit()
#Persons never colud be in state TO_ADD. For this reason,
#only TO_UPDATE or TO_DELETE state is retrieved
for person in self.dataBase.getUncmtPersons(ctrllerMac, database.TO_UPDATE):
person.pop('names')
person.pop('lastName')
person.pop('orgId')
person.pop('visitedOrgId')
person.pop('isProvider')
#"updPerson" method receive a list of MAC addresses to update. Because in this case only one
#controller is being updated, a list with only the MAC address of the controller is created.
self.ctrllerMsger.updPerson([ctrllerMac], person)
for person in self.dataBase.getUncmtPersons(ctrllerMac, database.TO_DELETE):
#"delPerson" method receive a list of MAC addresses to update. Because in this case only one
#controller is being updated, a list with only the MAC address of the controller is created.
self.ctrllerMsger.delPerson([ctrllerMac], person['id'])
self.checkExit()
except database.DoorError as doorError:
logMsg = 'Error retransmitting uncommitted doors: {}'.format(str(doorError))
self.logger.warning(logMsg)
except database.AccessError as accessError:
logMsg = 'Error retransmitting uncommitted accesses: {}'.format(str(accessError))
self.logger.warning(logMsg)
except database.PersonError as personError:
logMsg = 'Error retransmitting uncommitted persons: {}'.format(str(personError))
self.logger.warning(logMsg)
except queue.Empty:
#Cheking if Main thread ask as to finish.
self.checkExit()
#self.iteration is protected with self.lockIteration lock every time it is
#accessed, as it is modified by this thread (CrudReSender) and by NetMngr Thread
#Keep "self.iteration" locked during all below code block won't be optimal as
#there are methods inside this block which may spend some time accessing to
#database or sending things over the network. (Sending things over the network
#keeping the lock would cause a deadlock if not using queues)
#To avoid keeping "self.iteration" locked too much time, it is copied.
with self.lockIteration:
iteration = self.iteration
if iteration >= self.ITERATIONS:
logMsg = 'Checking if there are controllers which need to be resynced.'
self.logger.debug(logMsg)
#Getting the MAC addresses of controllers which has uncommitted CRUDs.
unCmtCtrllers = self.dataBase.getUnCmtCtrllers()
unCmtCtrllersMacs = [unCmtCtrller['macAddress'] for unCmtCtrller in unCmtCtrllers]
if unCmtCtrllersMacs:
logMsg = 'Sending Request Resync Message to: {}'.format(', '.join(unCmtCtrllersMacs))
self.logger.info(logMsg)
self.ctrllerMsger.requestReSendCruds(unCmtCtrllersMacs)
with self.lockIteration:
self.iteration = 0
else:
with self.lockIteration:
self.iteration +=1
| true
|
de4d2ecf7c2e79f6003c26c4c1dccac8305feb8d
|
Python
|
tmacjx/flask-quickstart
|
/common/utils/date_formatter.py
|
UTF-8
| 2,123
| 3.03125
| 3
|
[] |
no_license
|
"""
# @Author wk
# @Time 2020/4/23 10:39
"""
from datetime import datetime, date
import time
import numbers
DATE_FORMAT_DEFAULT = "%Y-%m-%d"
DATETIME_FORMAT_DEFAULT = "%Y-%m-%d %H:%M:%S"
def format_date(dt, formatter=DATE_FORMAT_DEFAULT):
"""
:param dt:
:param formatter:
:return:
"""
if isinstance(dt, datetime):
return datetime.strftime(dt, formatter)
elif isinstance(dt, date):
return date.strftime(dt, formatter)
else:
return None
def format_datetime(dt, formatter=DATETIME_FORMAT_DEFAULT):
"""
:param dt:
:param formatter:
:return:
"""
if isinstance(dt, numbers.Real):
dt = datetime.fromtimestamp(float(dt))
if isinstance(dt, datetime):
return datetime.strftime(dt, formatter)
else:
return None
def to_timestamp(dt=None, formatter=DATETIME_FORMAT_DEFAULT):
"""
datetime转为timestamp
:param dt:
:param formatter
:return:
"""
if dt:
if isinstance(dt, str):
dt = datetime.strptime(dt, formatter)
else:
dt = datetime.now()
stamp = time.mktime(dt.timetuple())
return str(int(stamp) * 1000)
def to_datetime(o, formatter=DATETIME_FORMAT_DEFAULT):
if isinstance(o, str):
dt = datetime.strptime(o, formatter)
return dt
else:
return None
def to_date(o, formatter=DATE_FORMAT_DEFAULT):
if isinstance(o, str):
dt = datetime.strptime(o, formatter).date()
return dt
else:
return None
# if __name__ == "__main__":
# # res = format_date(datetime.now())
# # print(res)
# # res = format_date(datetime.now().date())
# # print(res)
# # res = format_datetime(datetime.now())
# # print(res)
# # res = format_datetime(1587623323.000)
# # print(res)
# # res = to_timestamp(datetime.now())
# # print(res)
# # res = to_timestamp()
# # print(res)
# # res = to_datetime("2020-02-02 11:11:11")
# # print(res)
# # res = to_date("2020-02-02")
# # print(res)
# res = to_timestamp("2020-02-02 11:11:11")
# print(res)
| true
|
62458a8e3381f5b20e953d97dc710696ee030b65
|
Python
|
peterbarla/AlgoExpertExercises
|
/ArraysCategory/ZigzagTraverse/my_solution.py
|
UTF-8
| 1,670
| 4.125
| 4
|
[] |
no_license
|
# O(n) time | O(n) space
def zigzagTraverse(array):
i, j = 0, 0
number_of_elements_in_the_array = len(array) * len(array[0])
result = []
if number_of_elements_in_the_array != 0:
result.append(array[i][j])
added_elements = 1
down = False
up = True
if len(array) > 1:
i += 1
else: j += 1
while added_elements < number_of_elements_in_the_array:
if up:
if i == 0 or j == len(array[0]) - 1:
result.append(array[i][j])
added_elements += 1
up = False
down = True
if j + 1 < len(array[0]):
j += 1
else: i += 1
else:
result.append(array[i][j])
added_elements += 1
i -= 1
j += 1
elif down:
if j == 0 or i == len(array) - 1:
result.append(array[i][j])
added_elements += 1
up = True
down = False
if i + 1 < len(array):
i += 1
else:
j += 1
else:
result.append(array[i][j])
added_elements += 1
i += 1
j -= 1
return result
'''arr = [ [1, 3, 4, 10, 3, 3],
[2, 5, 9, 11, 4, 4],
[6, 8, 12, 15, 4, 4],
[7, 13, 14, 16, 4, 4]]'''
'''arr = [
[1, 3, 4, 10, 11],
[2, 5, 9, 12, 20],
[6, 8, 13, 19, 21],
[7, 14, 18, 22, 27],
[15, 17, 23, 26, 28],
[16, 24, 25, 29, 30]
]'''
arr = [[1, 2, 3, 4, 5]]
print(zigzagTraverse(arr))
| true
|
6515f4dfbe4531a4db69bdf1b95dfa34b9f2e0a7
|
Python
|
KunBaoLin/coding-dojo-hw
|
/band_together/flask_app/models/band.py
|
UTF-8
| 2,417
| 2.953125
| 3
|
[] |
no_license
|
from flask_app.config.mysqlconnection import connectToMySQL
from flask import flash
class Band:
db = 'band_together'
def __init__(self,data):
self.id = data['id']
self.name = data['name']
self.genre = data['genre']
self.city = data['city']
self.created_at = data['created_at']
self.updated_at = data['updated_at']
self.user_id = data['user_id']
@classmethod #save query new band
def save(cls,data):
query = "INSERT INTO bands (name, genre, city, user_id) VALUES (%(name)s,%(genre)s,%(city)s,%(user_id)s);"
return connectToMySQL(cls.db).query_db(query, data)
@classmethod # get all from table bands
def get_all(cls):
query = "SELECT * FROM bands;"
results = connectToMySQL(cls.db).query_db(query)
all_bands = []
for row in results:
all_bands.append( cls(row) )
return all_bands
@classmethod #get one band by id
def get_one(cls,data):
query = "SELECT * FROM bands WHERE id = %(id)s;"
results = connectToMySQL(cls.db).query_db(query,data)
return cls( results[0] )
@classmethod #update band
def update(cls, data):
query = "UPDATE bands SET name=%(name)s, genre=%(genre)s, city=%(city)s, updated_at=NOW() WHERE id = %(id)s;"
return connectToMySQL(cls.db).query_db(query,data)
@classmethod #delete bands
def destroy(cls,data):
query = "DELETE FROM bands WHERE id = %(id)s;"
return connectToMySQL(cls.db).query_db(query,data)
@classmethod #get bands that relate to user
def get_user_bands(cls,data):
query = 'select * from bands left join users on bands.user_id = users.id where users.id = %(id)s;'
results = connectToMySQL(cls.db).query_db(query,data)
bands = []
for band in results:
bands.append(cls(band))
return bands
@staticmethod
def validate_band(band):
is_valid = True
if len(band['name']) < 3:
is_valid = False
flash("Band Name must be at least 3 characters","band")
if len(band['genre']) < 3:
is_valid = False
flash("Music Genre must be at least 3 characters","band")
if len(band['city']) < 3:
is_valid = False
flash("Home City must be at least 3 characters","band")
return is_valid
| true
|
5b9d05ac39b7bb38e425ec1c6c6965d177038173
|
Python
|
Ayhan-Huang/CMDB
|
/auto_client/src/plugin/memory.py
|
UTF-8
| 1,915
| 2.6875
| 3
|
[] |
no_license
|
from lib.config import settings
import os
from lib import convert # 转化MB显示的
class Memory:
def __init__(self):
pass
@classmethod # 日后可以附加其它逻辑
def initial(cls):
return cls()
def process(self, execute_cmd, debug):
if debug:
with open(os.path.join(settings.BASE_DIR, 'files/memory.out'), 'r', encoding='utf-8')as f:
output = f.read()
else:
output = execute_cmd('命令')
res = self.parse(output)
# return "memory info ................"
return res
def parse(self, content):
"""
解析shell命令返回结果
:param content: shell 命令结果
:return:解析后的结果
"""
ram_dict = {}
key_map = {
'Size': 'capacity',
'Locator': 'slot',
'Type': 'model',
'Speed': 'speed',
'Manufacturer': 'manufacturer',
'Serial Number': 'sn',
}
devices = content.split('Memory Device')
for item in devices:
item = item.strip()
if not item:
continue
if item.startswith('#'):
continue
segment = {}
lines = item.split('\n\t')
for line in lines:
if not line.strip():
continue
if len(line.split(':')):
key, value = line.split(':')
else:
key = line.split(':')[0]
value = ""
if key in key_map:
if key == 'Size':
segment[key_map['Size']] = convert.convert_mb_to_gb(value, 0)
else:
segment[key_map[key.strip()]] = value.strip()
ram_dict[segment['slot']] = segment
return ram_dict
| true
|
d755b3439b762747c9e100fde6e67a7a8ca09bb4
|
Python
|
approximata/greenfox-weeks
|
/week-04/day-4/4.py
|
UTF-8
| 263
| 3.765625
| 4
|
[] |
no_license
|
# 4. Given base and n that are both 1 or more, compute recursively (no loops)
# the value of base to the n power, so powerN(3, 2) is 9 (3 squared).
def power(x, n):
if n <= 2:
return x * x
else:
return x * power(x, n-1)
print(power(2, 4))
| true
|
66607c860f0a213e0b516abcb5a667a21bc162b5
|
Python
|
carlos3g/URI-solutions
|
/categorias/iniciante/python/1019.py
|
UTF-8
| 119
| 3
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
t = int(input())
m = t%3600//60
h = t//3600
s = t%3600%60
print('{}:{}:{}'.format(h, m, s))
| true
|
9b562e802acbb135f0ddf0dbda4b3f999fa65ec4
|
Python
|
codio-content/ms-m12-conditionals-and-repetition
|
/.guides/content/ExFor1.py
|
UTF-8
| 136
| 3.21875
| 3
|
[] |
no_license
|
for i in range(1,10):
print("pump bellows")
for i in range(1,10,2):
print("Example of stepping by 2")
print (i)
| true
|
f21e5e1b717aafdc415ce0384e76b2bf8551dfef
|
Python
|
kenaryn/arkheia
|
/book.py
|
UTF-8
| 970
| 3.734375
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
"""Define a set of classes representing a book."""
class Book:
"""A simple class book."""
def __init__(self, title, author, publisher, year):
self.title = title
self.author = author
self.publisher = publisher
self.year = year
def read_book(self):
"""Show information about a single book."""
print(f"Title: {self.title.capitalize()}")
print(f"Author: {self.author.title()}")
print(f"Publisher: {self.publisher.title()}")
print(f"Year: {self.year}")
def remove_book(self, title):
"""Remove a title-related book from the library."""
file_path = 'text_files/'
with open({file_path}book) as file_object:
library = file_object.readlines()
for book in library:
if title in books:
books.remove(title)
else:
print(f"'{title.capitalize()}' does not exist in your library.")
| true
|
a43ecb53122e03529f7d603ddf61f650b75fbdb1
|
Python
|
goldan/pytils
|
/pytils/test/test_utils.py
|
UTF-8
| 5,129
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
"""
Unit-tests for pytils.utils
"""
import unittest
import pytils
import decimal
class ASPN426123TestCase(unittest.TestCase):
"""
Test case for third-party library from ASPN cookbook recipe #426123
This unit-test don't cover all code from recipe
"""
def testTakesPositional(self):
@pytils.utils.takes(int, str)
def func(i, s):
return i + len(s)
self.assertEquals(func(2, 'var'), 5)
self.assertEquals(func(2, 'var'), 5)
self.assertRaises(pytils.err.InputParameterError, func, 2, 5)
self.assertRaises(pytils.err.InputParameterError, func, 2, ('var',))
self.assertRaises(pytils.err.InputParameterError, func, 'var', 5)
def testTakesNamed(self):
@pytils.utils.takes(int, s=str)
def func(i, s):
return i + len(s)
self.assertEquals(func(2, s='var'), 5)
self.assertEquals(func(2, s='var'), 5)
self.assertRaises(pytils.err.InputParameterError, func, 2, 'var')
self.assertRaises(pytils.err.InputParameterError, func, 2, 5)
self.assertRaises(pytils.err.InputParameterError, func, 2, ('var',))
self.assertRaises(pytils.err.InputParameterError, func, 'var', 5)
def testTakesOptional(self):
@pytils.utils.takes(int,
pytils.utils.optional(str),
s=pytils.utils.optional(str))
def func(i, s=''):
return i + len(s)
self.assertEquals(func(2, 'var'), 5)
self.assertEquals(func(2, s='var'), 5)
self.assertEquals(func(2, s='var'), 5)
self.assertRaises(pytils.err.InputParameterError, func, 2, 5)
self.assertRaises(pytils.err.InputParameterError, func, 2, ('var',))
self.assertRaises(pytils.err.InputParameterError, func, 'var', 5)
def testTakesMultiplyTypesAndTupleOf(self):
@pytils.utils.takes((int, int),
pytils.utils.tuple_of(str))
def func(i, t=tuple()):
return i + sum(len(s) for s in t)
self.assertEquals(func(2, ('var', 'var2')), 9)
self.assertEquals(func(2, ('var', 'var2')), 9)
self.assertEquals(func(2, t=('var', 'var2')), 9)
self.assertEquals(func(2, t=('var', 'var2')), 9)
self.assertRaises(pytils.err.InputParameterError, func, 2, (2, 5))
class ChecksTestCase(unittest.TestCase):
"""
Test case for check_* utils
"""
def testCheckLength(self):
"""
Unit-test for pytils.utils.check_length
"""
self.assertEquals(pytils.utils.check_length("var", 3), None)
self.assertRaises(ValueError, pytils.utils.check_length, "var", 4)
self.assertRaises(ValueError, pytils.utils.check_length, "var", 2)
self.assertRaises(ValueError, pytils.utils.check_length, (1,2), 3)
self.assertRaises(TypeError, pytils.utils.check_length, 5)
def testCheckPositive(self):
"""
Unit-test for pytils.utils.check_positive
"""
self.assertEquals(pytils.utils.check_positive(0), None)
self.assertEquals(pytils.utils.check_positive(1), None)
self.assertEquals(pytils.utils.check_positive(1, False), None)
self.assertEquals(pytils.utils.check_positive(1, strict=False), None)
self.assertEquals(pytils.utils.check_positive(1, True), None)
self.assertEquals(pytils.utils.check_positive(1, strict=True), None)
self.assertEquals(pytils.utils.check_positive(decimal.Decimal("2.0")), None)
self.assertEquals(pytils.utils.check_positive(2.0), None)
self.assertRaises(ValueError, pytils.utils.check_positive, -2)
self.assertRaises(ValueError, pytils.utils.check_positive, -2.0)
self.assertRaises(ValueError, pytils.utils.check_positive, decimal.Decimal("-2.0"))
self.assertRaises(ValueError, pytils.utils.check_positive, 0, True)
class SplitValuesTestCase(unittest.TestCase):
def testClassicSplit(self):
"""
Unit-test for pytils.utils.split_values, classic split
"""
self.assertEquals(("Раз", "Два", "Три"), pytils.utils.split_values("Раз,Два,Три"))
self.assertEquals(("Раз", "Два", "Три"), pytils.utils.split_values("Раз, Два,Три"))
self.assertEquals(("Раз", "Два", "Три"), pytils.utils.split_values(" Раз, Два, Три "))
self.assertEquals(("Раз", "Два", "Три"), pytils.utils.split_values(" Раз, \nДва,\n Три "))
def testEscapedSplit(self):
"""
Unit-test for pytils.utils.split_values, split with escaping
"""
self.assertEquals(("Раз,Два", "Три,Четыре", "Пять,Шесть"), pytils.utils.split_values("Раз\,Два,Три\,Четыре,Пять\,Шесть"))
self.assertEquals(("Раз, Два", "Три", "Четыре"), pytils.utils.split_values("Раз\, Два, Три, Четыре"))
if __name__ == '__main__':
unittest.main()
| true
|
44054cf9c1dbd5b2698d5f0f081e2d97b3f51265
|
Python
|
suryakencana007/kensaku
|
/rak/example-mongo.py
|
UTF-8
| 2,053
| 2.59375
| 3
|
[] |
no_license
|
"""
# Copyright (c) 11 2014 | surya
# 17/11/14 nanang.ask@kubuskotak.com
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# example-mongo.py
"""
import os
from whoosh.fields import Schema, ID, KEYWORD, TEXT
from whoosh.index import create_in
from whoosh.query import Term
from pymongo import Connection
from bson.objectid import ObjectId
# Set index, we index title and content as texts and tags as keywords.
# We store inside index only titles and ids.
schema = Schema(title=TEXT(stored=True), content=TEXT,
nid=ID(stored=True), tags=KEYWORD)
# Create index dir if it does not exists.
if not os.path.exists("index"):
os.mkdir("index")
# Initialize index
index = create_in("index", schema)
# Initiate db connection
connection = Connection('localhost', 27017)
db = connection["cozy-home"]
posts = db.posts
# Fill index with posts from DB
writer = index.writer()
for post in posts.find():
writer.update_document(title=post["title"],
content=post["content"],
nid=str(post["_id"]),
tags=post["tags"])
writer.commit()
# Search inside index for post containing "test", then it displays
# results.
with index.searcher() as searcher:
result = searcher.search(Term("content", u"test"))[0]
post = posts.find_one(ObjectId(result["nid"]))
print(result["title"])
print(post["content"])
| true
|
277d54c6dd709bea99cc2d9d8315910e5af524d4
|
Python
|
Madhur0/MGNREGA
|
/venv/src/run.py
|
UTF-8
| 1,746
| 3.296875
| 3
|
[] |
no_license
|
""" Initial file for stating the project."""
from block_development_officer import BlockDevelopmentOfficer
from gram_panchayat_member import GramPanchayatMember
from member import Member
from schema import Schema
import sqlite3
def sql_connection():
"""
Setup connection with sqlite3 backend.
:return: sqlite3 connection object
"""
return sqlite3.connect('database.db')
class Run:
def __init__(self, connection):
self.conn = connection
def login_menu(self):
""" Display Login options for users to choose.
:return: True
"""
print("\n**** LOGIN MENU ****")
print("1. Login as BDO \n2. Login as GPM \n3. Login as Member\n4. Exit")
choice = input("Choose: ")
if choice == '1':
BlockDevelopmentOfficer(self.conn).login_bdo()
elif choice == '2':
GramPanchayatMember(self.conn).login_gpm()
elif choice == '3':
Member(self.conn).login_member()
elif choice == '4':
print("\nExiting...")
self.conn.close()
else:
print("\nWrong Input! Try again.")
if choice != '4':
self.login_menu()
return True
def main():
"""
:return: True/False
"""
try:
conn = sql_connection()
if conn is None:
print("Error while connecting with database")
print("Retry after sometime!!!")
else:
Schema(conn).setup_admin()
Schema(conn).create_tables()
Run(conn).login_menu()
conn.close()
return True
except sqlite3.Error as e:
print(type(e), ": ", e)
return False
if __name__ == "__main__":
main()
| true
|
775842cce700a5ce8c6cebefc8d6a4fec794fcb7
|
Python
|
Chetchita641/QT_Tutorials
|
/advancedpyqt5/examples/modelview/modelindex.py
|
UTF-8
| 2,178
| 3.21875
| 3
|
[] |
no_license
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''
ZetCode Advanced PyQt5 tutorial
In this example, we work with QModelIndex
and QTreeView.
Author: Jan Bodnar
Website: zetcode.com
Last edited: August 2017
'''
from PyQt5.QtWidgets import (QWidget, QApplication, QTreeView,
QVBoxLayout, QAbstractItemView, QLabel)
from PyQt5.QtGui import QStandardItemModel, QStandardItem
import sys
data = ( ["Jessica Alba", "Pomona", "1981"],
["Angelina Jolie", "New York", "1975"],
["Natalie Portman", "Yerusalem", "1981"],
["Scarlett Jonahsson", "New York", "1984"] )
class Example(QWidget):
def __init__(self):
super().__init__()
self.setGeometry(300, 300, 350, 250)
self.setWindowTitle("Actresses")
self.initData()
self.initUI()
def initData(self):
self.model = QStandardItemModel()
labels = ("Name", "Place", "Born")
self.model.setHorizontalHeaderLabels(labels)
for i in range(len(data)):
name = QStandardItem(data[i][0])
place = QStandardItem(data[i][1])
born = QStandardItem(data[i][2])
self.model.appendRow((name, place, born))
def initUI(self):
tv = QTreeView(self)
tv.setRootIsDecorated(False)
tv.setModel(self.model)
behavior = QAbstractItemView.SelectRows
tv.setSelectionBehavior(behavior)
self.label = QLabel(self)
layout = QVBoxLayout()
layout.addWidget(tv)
layout.addWidget(self.label)
self.setLayout(layout)
tv.clicked.connect(self.onClicked)
def onClicked(self, idx):
row = idx.row()
cols = self.model.columnCount()
data = []
for col in range(cols):
item = self.model.item(row, col)
data.append(item.text())
self.label.setText((", ".join(data)))
app = QApplication([])
ex = Example()
ex.show()
sys.exit(app.exec_())
| true
|
242590806441f1028cad9f22db706671fa613550
|
Python
|
krishnadhara/programs-venky
|
/decorators_py/func_dec.py
|
UTF-8
| 384
| 3.546875
| 4
|
[] |
no_license
|
def smart_division(func):
def inner(a,b):
print("i am going to divide",a,"and",b)
if b == 0:
print("b canot be divided by zero")
return
return func(a,b)
return inner
@smart_division
def division(a,b):
return a/b
'''val = smart_division(division)
print(val)
babu = val(10,0)
print(babu)'''
val = division(10,20)
print(val)
| true
|
0297a1268aeb6d959b4a46065fb3e97b37f99c7b
|
Python
|
Roberto-Mota/CursoemVideo
|
/exercicios/ex087.py
|
UTF-8
| 2,261
| 3.890625
| 4
|
[
"MIT"
] |
permissive
|
# Desafio 087 -> Aprimore o desafio anterior, mostrando no final:
# A) A soma de todos os valores pares digitados.
# B)A soma dos valores da terceira coluna
# C)O maior valor da segunda coluna
posição = [[], [], []]
linha = 0
coluna = 0
soma_par = 0
soma_coluna3 = 0
maior_coluna2 = 0
for pos, valor in enumerate(range(0, 9)):
if pos % 3 == 0:
linha = 0
linha += 1
valor = int(input(f'Digite um valor para a Coluna ({coluna+1}), Linha ({linha}):'))
if valor % 2 == 0:
soma_par += valor
if pos == 1:
maior_coluna2 = valor
if pos == 4 or pos == 7 and valor > maior_coluna2:
maior_coluna2 = valor
if 0 <= pos <= 2:
posição[0].append(valor)
if pos == 2:
coluna = 1
elif 3 <= pos <= 5:
posição[1].append(valor)
if pos == 5:
coluna = 2
elif 6 <= pos <= 8:
posição[2].append(valor)
soma_coluna3 = posição[0][2] + posição[1][2] + posição[2][2]
print(f'.____.____.____.\n'
f'|{posição[0][0]:^4}|{posição[0][1]:^4}|{posição[0][2]:^4}|\n'
f'|____|____|____|\n'
f'|{posição[1][0]:^4}|{posição[1][1]:^4}|{posição[1][2]:^4}|\n'
f'|____|____|____|\n'
f'|{posição[2][0]:^4}|{posição[2][1]:^4}|{posição[2][2]:^4}|\n'
f'|____|____|____|\n')
print('-=-'*10)
print(f'A soma de todos os valores pares digitados: {soma_par}.\n'
f'A soma dos valores da terceira coluna: {soma_coluna3}\n'
f'O maior valor da segunda coluna: {maior_coluna2}')
#Jeito do professor
matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
spar = mai = scol = 0
for l in range(0, 3):
for c in range(0, 3):
matriz[l][c] = int(input(f'Digite um valor para a posição [{c},{l}]: '))
print('-=-'*20)
for l in range(0, 3):
for c in range(0, 3):
print(f'[{matriz[l][c]:^4}]', end='')
if matriz[l][c] % 2 == 0:
spar += matriz[l][c]
print()
print(f'A soma dos pares é: {spar}')
for l in range(0, 3):
scol += matriz[l][2]
print(f'A soma dos valores da terceira coluna: {scol}')
for c in range(0, 3):
if c == 0 or matriz[1][c] > mai:
mai = matriz[1][c]
print(f'O maior valor da segunda linha é: {mai}')
| true
|
9ab7485c142dea162a0f202654570032071551a5
|
Python
|
yurigsilva/modulo_introducao_python_e_programacao_python_full
|
/23_listas.py
|
UTF-8
| 87
| 3.078125
| 3
|
[] |
no_license
|
x = [1, 2, 3]
y = x
z = x.copy()
print(hex(id(x)))
print(hex(id(y)))
print(hex(id(z)))
| true
|
750cf2ba192a6334f69ef5c806dc015e0edb5e9e
|
Python
|
wlh320/shuaOJ
|
/AdventOfCode/2020/day21/21.py
|
UTF-8
| 1,287
| 2.6875
| 3
|
[] |
no_license
|
from collections import defaultdict
from copy import deepcopy
lines = open('input').readlines()
def split_input(line):
line = line.strip()
gres, als = line.split('(contains ')
gres = gres.split()
als = als[:-1].split(', ')
return gres, als
Gs = set()
As = set()
cnt = defaultdict(int)
for line in lines: # count
gres, als = split_input(line)
Gs |= set(gres)
As |= set(als)
for g in gres:
cnt[g] += 1
notin = {g: set() for g in Gs}
for line in lines:
gres, als = split_input(line)
for a in als: # this al
for g in Gs:
if g not in gres: # ingrediants not in this line must have no this al
notin[g].add(a)
ans = 0
for g in Gs:
if notin[g] == As:
ans += cnt[g]
print(ans)
# 2
# just like this year's another problem that match ticket field
suspect = {g: deepcopy(As)-notin[g] for g in Gs if notin[g] != As}
kv = []
while len(kv) != len(As):
for g in suspect:
if len(suspect[g]) == 1:
kv.append((g, suspect[g].pop()))
for gg in suspect:
if g != gg:
if kv[-1][-1] in suspect[gg]:
suspect[gg].remove(kv[-1][-1])
ans2 = ','.join(map(lambda x: x[0], sorted(kv, key=lambda x: x[1])))
print(ans2)
| true
|
016d45ae3c7c74966b720b451ac9fb71ad6c953c
|
Python
|
pariyapariya/pariya_GW_HW_Python
|
/Part-1-Mini-Assignment/02-HW_CerealCleaner/solved_pariya/cereal_solved.py
|
UTF-8
| 348
| 3.03125
| 3
|
[] |
no_license
|
import os
import csv
cereal_csv = os.path.join('..','Resources', 'cereal.csv')
with open(cereal_csv) as csv_file:
csv_reader = csv.reader(csv_file)
next(csv_reader, None)
for x in csv_reader:
if float(x[7]) >= 5:
print(x[0] + ' has ' + x[7] + ' of Fiber.')
| true
|
bb93f0b4b259da7469e20fe28f5cbf26b81530c3
|
Python
|
kapilnegi67/PythonSeleniumDemo
|
/src/test_folder/raise_exception.py
|
UTF-8
| 226
| 3.53125
| 4
|
[] |
no_license
|
a = int(input("Enter a dividend: "))
try:
if a == 0:
raise ZeroDivisionError("Can not divide by number 0")
# raise ValueError("That is not a positive number!")
except ZeroDivisionError as ve:
print(ve)
| true
|
caec599dce7de63937b4de300c978a0ddb9a7960
|
Python
|
fedebrest/curso_python
|
/Resoluciones/TP 3/simpson.py
|
UTF-8
| 264
| 3.953125
| 4
|
[] |
no_license
|
simpson=[]
cantidad=int(input("ingrese la cantidad de personajes que reconozcas: "))
for i in range(cantidad):
personaje=input("ingrese el nombre del personaje que reconozca: ")
simpson.append(personaje)
print("Los personajes reconocidos son:", simpson)
| true
|
291fdf6e87e2c3189ec318e1b05408053a361058
|
Python
|
chatterjeelab2022/VADER
|
/v2_plotting_tools.py
|
UTF-8
| 6,674
| 3.03125
| 3
|
[] |
no_license
|
import matplotlib.pyplot as plt
import numpy as np
import math
def heat_map(data, colormap, ** keywords):
# keywords
# colormap
# Title
# X_title
# Y_title
# X_labels
# Y_labels
# FigSize
return None
def scatter_plot(x_data, y_data, **keywords): #expects list for y_data
'''keywords:
Colors = 'rainbow' 'ACGT' 'AGTN'
Title
X_title
Y_title
FigSize
Y_log
'''
# colors
Colors = keywords.pop('Colors','rainbow')
if Colors == 'rainbow':
color_list = ['xkcd:red','xkcd:orange','xkcd:yellow',
'xkcd:green','xkcd:blue','xkcd:purple']
Colors = color_list[:len(y_data)]
elif Colors == 'ACGT':
Colors = ['g','b','0','r']
elif Colors == 'ACGTN':
Colors = ['g','b','0','r','0.6']
# plotting keywords
FigSize = keywords.pop('FigSize',None)
Marker = keywords.pop('Marker','o')
MarkerSize = keywords.pop('MarkerSize',5)
LineStyle = keywords.pop('LineStyle','')
Y_log = keywords.pop('Y_log',False)
# plot each series, find bounds
y_min, y_max = 1, 0
x_min, x_max = min(x_data), max(x_data)
fig, ax = plt.subplots(figsize = FigSize)
for y_series, color in zip (y_data, Colors):
y_series_min, y_series_max = min(y_series), max(y_series)
if y_series_min < y_min:
y_min = y_series_min
if y_series_max > y_max:
y_max = y_series_max
ax.plot(x_data, y_series, marker=Marker, markersize=MarkerSize, markerfacecolor=color,
linestyle=LineStyle, markeredgewidth=0)
if Y_log:
lower_bound_y = 10**(int(math.log10(y_min))-1)
upper_bound_y = 10**(int(math.log10(y_max)))
ax.set_yscale('log')
else:
lower_bound_y = int(y_min) - int(y_min)*0.05
upper_bound_y = int(y_max)*1.05
if upper_bound_y < y_max:
upper_bound_y = int(y_max)+1
if lower_bound_y == 0:
lower_bound_y = upper_bound_y*-0.05
lower_bound_x = int(x_min) - int(x_min)*0.05
upper_bound_x = int(x_max) + int(x_max)*0.05
if upper_bound_x < x_max:
upper_bound_x = int(x_max)+1
if lower_bound_x == 0:
lower_bound_x = upper_bound_x*-0.05
ax.set_ylim(lower_bound_y, upper_bound_y)
ax.set_xlim(lower_bound_x, upper_bound_x)
# axis labels and titles
Title = keywords.pop('Title',None)
if Title:
ax.set_title(Title)
X_title = keywords.pop('X_title',None)
if X_title:
ax.set_xlabel(X_title)
Y_title = keywords.pop('Y_title',None)
if Y_title:
ax.set_ylabel(Y_title)
plt.tight_layout()
return fig, ax
def bar_chart(category_labels, data_series, ** keywords):
'''keywords:
Colors = 'rainbow' 'ACGT' 'AGTN'
Title
X_title
Y_title
FigSize
Y_log
'''
# colors
Colors = keywords.pop('Colors','rainbow')
if Colors == 'rainbow':
color_list = ['xkcd:red','xkcd:orange','xkcd:yellow',
'xkcd:green','xkcd:blue','xkcd:purple']
Colors = color_list[:len(data_series)]
elif Colors == 'ACGT':
Colors = ['g','b','0.2','r']
elif Colors == 'ACGTN':
Colors = ['g','b','0.2','r','0.6']
# plotting keywords
FigSize = keywords.pop('FigSize',None)
Marker = keywords.pop('Marker','o')
MarkerSize = keywords.pop('MarkerSize',5)
LineStyle = keywords.pop('LineStyle','')
Y_log = keywords.pop('Y_log',False)
# plot each series
y_max = 0
x_pos = np.arange(len(data_series[0]))
series_count = len(data_series) + 1
x_pos_subs = np.arange(0,1,1/series_count)
fig, ax = plt.subplots()
for series, Color, x_pos_sub in zip(data_series, Colors, x_pos_subs):
plt.bar(x_pos + x_pos_sub, series, color=Color, edgecolor='0',
width=(1/len(x_pos_subs)), align='edge')
if max(series) > y_max:
y_max = max(series)
if Y_log:
ax.set_yscale('log')
# axis labels and titles
Title = keywords.pop('Title',None)
if Title:
ax.set_title(Title)
X_title = keywords.pop('X_title',None)
if X_title:
ax.set_xlabel(X_title)
Y_title = keywords.pop('Y_title',None)
if Y_title:
ax.set_ylabel(Y_title)
# x axis labels
x_label_pos = []
for x in x_pos:
x_label_pos.append(x+0.5-(1/len(x_pos_subs)/2))
ax.set_xticks(x_label_pos)
ax.set_xticklabels(category_labels)
# add error bar capability, log capability
plt.tight_layout()
return fig, ax
def stacked_bar_chart(category_labels, data_series, ** keywords):
'''keywords:
Colors = 'rainbow' 'ACGT' 'AGTN'
Title
X_title
Y_title
FigSize
Y_log
'''
# colors
Colors = keywords.pop('Colors','rainbow')
if Colors == 'rainbow':
color_list = ['xkcd:red','xkcd:orange','xkcd:yellow',
'xkcd:green','xkcd:blue','xkcd:purple']
Colors = color_list[:len(data_series)]
elif Colors == 'ACGT':
Colors = ['g','b','0.2','r']
elif Colors == 'ACGTN':
Colors = ['g','b','0.2','r','0.6']
# plotting keywords
FigSize = keywords.pop('FigSize',None)
Marker = keywords.pop('Marker','o')
MarkerSize = keywords.pop('MarkerSize',5)
LineStyle = keywords.pop('LineStyle','')
Y_log = keywords.pop('Y_log',False)
# plot each series
x_pos = np.arange(len(data_series[0]))
bottoms = list(np.zeros(len(data_series[0])))
fig, ax = plt.subplots()
for series, Color in zip(data_series, Colors):
plt.bar(x_pos, series, color=Color, edgecolor='0', bottom=bottoms)
for d in range(len(series)):
bottoms[d] += series[d]
# axis labels and titles
Title = keywords.pop('Title',None)
if Title:
ax.set_title(Title)
X_title = keywords.pop('X_title',None)
if X_title:
ax.set_xlabel(X_title)
Y_title = keywords.pop('Y_title',None)
if Y_title:
ax.set_ylabel(Y_title)
# x axis labels
ax.set_xticks(x_pos)
ax.set_xticklabels(category_labels)
plt.tight_layout()
return fig, ax
if __name__ == "__main__":
test_x_data = [10,20,30]
test_y_data = [[1,2,3],[4,5,6],[7,7,7],[1,1,6]]
fig, ax = scatter_plot(test_x_data, test_y_data, FigSize=(4, 2), Y_log=False)
| true
|
392304d39b990da1668bb8fc41a1a484e7e09a39
|
Python
|
17621251436/leetcode_program
|
/1数组/offer 矩阵中的路径.py
|
UTF-8
| 652
| 2.8125
| 3
|
[] |
no_license
|
class Solution:
def exist(self, board: List[List[str]], word: str) -> bool:
##不越界 访问了 与目标不匹配
def dfs(i,j,k):
if not 0<=i<len(board) or not 0<=j<len(board) or board[i][j]!=word[k]:
return False
if k== len(word)-1 :
return True
tmp,board[i][j]=board[i][j],'/'
res=dfs(i+1,j,k+1) or dfs(i,j+1,k+1) or dfs(i-1,j,k+1) or dfs(i,j-1,k+1)
board[i][j]=tmp
return res
for i in range(len(board)):
for j in range(len(board[0])):
if dfs(i,j,0):return True
return False
| true
|
387097535d42180522d5336ce4efe33a721be3e9
|
Python
|
gomkyungmin/KMTNet_MVC
|
/LoadData.py
|
UTF-8
| 2,892
| 2.609375
| 3
|
[] |
no_license
|
from os.path import dirname
from os.path import join
import numpy as np
from astropy.io import fits
from sklearn import preprocessing
class Bunch(dict):
"""Container object for datasets: dictionary-like object that
exposes its keys as attributes."""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
def LoadData(**args):
module_path = dirname(__file__)
base_dir = module_path
print "Data is loaded from %s" % (join(base_dir,args['data_file']))
hdulist = fits.open(join(base_dir,args['data_file']))
catalog_data = hdulist[2].data
classes_original = catalog_data['CLASS_STAR']
classes_filtered = classes_original >= args['class_cut']
target = classes_filtered.astype(np.int)
features = np.genfromtxt(join(base_dir, args['feature_file']), delimiter=',', dtype=str)
print "# of data: %d, # of features: %d" % (len(catalog_data),len(features))
print "features:"
print features
for j,feature in enumerate(features):
if j == 0:
flat_data = catalog_data[feature].reshape((len(catalog_data),1))
else:
flat_data = np.append(flat_data,catalog_data[feature].reshape((len(catalog_data),1)),1)
return Bunch(features=features,\
data=flat_data,\
target=target)
def DataScaler(X_train, X_test, scaler):
if scaler == 'Standard':
scaler = preprocessing.StandardScaler()
elif scaler == 'MinMax':
scaler = preprocessing.MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
return X_train, X_test
def DataGeneration(samples,**args):
X = samples.data
y = samples.target
train_samples = len(samples.data)*args['training_size']
if args['training_part'] == 'first':
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
elif args['training_part'] == 'second':
X_train = X[train_samples:]
X_test = X[:train_samples]
y_train = y[train_samples:]
y_test = y[:train_samples]
dataset = Bunch(X_train=X_train,\
X_test=X_test,\
y_train=y_train,\
y_test=y_test)
# Preprocessing (Scaling) for X_train and X_test
if args['scaler'] is not None:
if 'param_search_file' in args:
pass
else:
print "\nA scaler, %s, is applied in data generation." % args['scaler']
dataset.X_train, dataset.X_test\
= DataScaler(dataset.X_train, dataset.X_test, args['scaler'])
else:
if 'param_search_file' in args:
pass
else:
print "\nNo scaler is applied in data generation."
return dataset
| true
|
f4cefb05a7e40ea8e86ef22debee7f864960e933
|
Python
|
NathanHenrySchneider/personalForFun
|
/CellularAutomata/brians_brain.py
|
UTF-8
| 4,738
| 2.984375
| 3
|
[] |
no_license
|
#Brian's Brain
import time
global on
on = 1
global dying
dying = 2
global off
off = 3
import colorama
import sys
import random
def print_finite_map():
str_prnt = ""
y = 0
while (y < y_bound + 1):
x = 0
str_prnt += " "
while (x < x_bound + 1):
chr_num = map_instances[len(map_instances) - 1][x + (y * (x_bound + 1))]
str_X_O = ""
if (chr_num == on):
str_X_O = "X"
if (chr_num == dying):
str_X_O = "*"
if (chr_num == off):
str_X_O = " "
str_prnt += str_X_O + " "
x += 1
str_prnt += "\n"
y += 1
print(str_prnt)
def generate_initial_table(l, side_one, side_two):
global map_instances
map_instances = []
global x_bound
x_bound = side_one - 1
global y_bound
y_bound = side_two - 1
global finite_map
finite_map = [off] * (side_one * side_two)
global len_map
len_map = len(finite_map)
if(type(l) != type([])):
return
if(len(l) == 0):
return
n = 0
while (n < len(l)):
point = l[n]
if (type(point) != type([]) or len(point) != 2):
print("invalid entry: "+str(point))
n += 1
continue
x = point[0]
y = point[1]
if (x > x_bound or y > y_bound):
print("invalid point: "+str(point))
else:
finite_map[x + ((y_bound + 1) * y)] = 1
n += 1
map_instances.append(finite_map)
def generate_initial_table_square(l, square):
generate_initial_table(l, square, square)
def generate_initial_table_random(side):
random_points = []
x = 0
while(x < side):
y = 0
while (y < side):
rand_num = random.randint(0,3)
if (rand_num % 2 == 0):
random_points.append([x,y])
y += 1
x += 1
generate_initial_table(random_points,side,side)
def find_score_for_point(point):
x = point % (x_bound + 1)
y = (point - x) / (x_bound + 1)
current_map = map_instances[len(map_instances) - 1]
value = current_map[point]
score = 0
if (y != 0):
top = current_map[point - x_bound - 1]
if (top == on):
score += 1
if (x != x_bound and y != 0):
top_right = current_map[point - x_bound]
if (top_right == on):
score += 1
if (x != x_bound):
right = current_map[point + 1]
if (right == on):
score += 1
if (x != x_bound and y != y_bound):
bottom_right = current_map[point + x_bound + 2]
if (bottom_right == on):
score += 1
if (y != y_bound):
bottom = current_map[point + x_bound + 1]
if (bottom == on):
score += 1
if (y != y_bound and x != 0):
bottom_left = current_map[point + x_bound]
if (bottom_left == on):
score += 1
if (x != 0):
left = current_map[point - 1]
if (left == on):
score += left
if (x != 0 and y != 0):
top_left = current_map[point - x_bound - 2]
if (top_left == on):
score += 1
return(score, value)
def new_value_from_score(point):
score_value = find_score_for_point(point)
score = score_value[0]
value = score_value[1]
if (value == on):
return dying
if (value == dying):
return off
if (value == off and score >1):
return on
else:
return off
def run_tick():
temp_new_finite_map = [off] * ((x_bound + 1) * (y_bound + 1))
n = 0
while (n < len_map):
temp_new_finite_map[n] = new_value_from_score(n)
n += 1
map_instances.append(temp_new_finite_map)
def run_cycle():
str_clear_screen = "\n" * (x_bound + 25)
print(str_clear_screen)
cycles_count = 0
while (on in map_instances[cycles_count] and cycles_count < 100):
#print("cycle:"+str(cycles_count))
print_finite_map()
run_tick()
cycles_count += 1
time.sleep(.3)
sys.stdout.write("\033[1000000;3H"+"\n")
print("\nEND\n")
#generate_initial_table_square([[25,20],[26,20],[25,21],[26,21],[25,22],[26,22],[25,23],[26,23],[25,24],[26,24],[25,25],[26,25],[25,26],[26,26],[25,27],[26,27],[25,28],[26,28],[25,29],[26,29],[25,30],[26,30], [12,12],[13,12],[12,13],[13,13],[37,37],[38,37],[37,38],[38,38],[37,12],[38,12],[37,13],[38,13],[12,37],[13,37],[12,38],[13,38]],52)
generate_initial_table_random(25)
run_cycle()
#print("butt")
#cross looking thing [25,25],[26,25],[25,26],[26,26],[12,12],[13,12],[12,13],[13,13],[37,37],[38,37],[37,38],[38,38],[37,12],[38,12],[37,13],[38,13],[12,37],[13,37],[12,38],[13,38]
| true
|
120cc6b59c89cb9efbea5d955934a940ffd6f27a
|
Python
|
joaothomaz23/Basic_Python_Journey
|
/embaralha_string.py
|
UTF-8
| 302
| 3.953125
| 4
|
[] |
no_license
|
import random
def embaralhaString(a):
b = []
for i in range(0,len(a),1):
b.append(a[i])
random.shuffle(b)
c = ''
for i in range(0,len(b),1):
c = c + b[i]
return c
val = input('Entre com uma string aleatoria: ')
aux = embaralhaString(val)
print(aux)
| true
|
002cb10ee3c772a3b05215a69ec734faf48b40b5
|
Python
|
abpocklington/AtomSortingAlgorithms
|
/AlgorithmTest.py
|
UTF-8
| 2,297
| 3.015625
| 3
|
[] |
no_license
|
import BalanceCompressAlgorithm
import Snake
import Hungarian
import MakeBoolArray
import datetime
import xlsxwriter
import matplotlib
from Animator import Animator
# these are the 'settings'
title = "practice"
ArrayDim = 10
TargetDim = 'max'
trials = 1
LoadProbability = .6
algorithm = 2
RecordData = False
MakeAnimation = True
# Algorithm number's:
# 1 -> Balance&Compress
# 2 -> Hungarian
# 3 -> snake
time = []
fidelity = []
moves = []
StaticArray = []
DummyRow = []
DummyVar = True
i = 0
while i < trials:
Array = MakeBoolArray.MakeBoolArray(ArrayDim,LoadProbability)
k = 0
j = 0
while j < ArrayDim:
while k < ArrayDim:
DummyVar = Array[j][k]
DummyRow.append(DummyVar)
k += 1
StaticArray.append(DummyRow)
DummyRow = []
k = 0
j += 1
if algorithm == 1:
placeholder = BalanceCompressAlgorithm.BalanceCompress(Array, ArrayDim,TargetDim)
if algorithm == 2:
placeholder = Hungarian.Hungarian(Array,ArrayDim,TargetDim)
if algorithm == 3:
placeholder = Snake.snake(Array,ArrayDim,TargetDim)
time.append(placeholder[0].microseconds + placeholder[0].seconds*(10**6))
moves.append(placeholder[1])
fidelity.append(placeholder[2])
i += 1
if RecordData == True:
i = 0
j = 0
workbook = xlsxwriter.Workbook(title)
worksheet = workbook.add_worksheet()
while i<trials:
worksheet.write(0,3*i,"Trial:")
worksheet.write(0,3*i + 1,i)
worksheet.write(1,3*i,"Time:")
worksheet.write(1,3*i + 1,time[i])
worksheet.write(2,3*i,"Fidelity:")
worksheet.write(2,3*i + 1,fidelity[i])
worksheet.write(3,3*i, "Moves:")
worksheet.write(3,3*i + 1,len(moves[i]))
while j < len(moves[i]):
if len(moves[i][j]) == 2:
worksheet.write(4+j,3*i,str(moves[i][j][0]))
worksheet.write(4+j,3*i + 1,str(moves[i][j][1]))
else:
worksheet.write(4+j,3*i,moves[i][j])
j += 1
i += 1
j = 0
workbook.close()
if MakeAnimation == True:
moves = moves[0]
Animator(StaticArray,moves,ArrayDim)
| true
|
3a6b789120ab1a4e5387bb370b5309770154f361
|
Python
|
SlidingSteven/Portfolio
|
/NumericalAnalysis-lab1.py
|
UTF-8
| 2,222
| 3.953125
| 4
|
[] |
no_license
|
#!/usr/bin/env python3
#not sure if the shebang above is necessary but I want to be as thorough as possible
#Name: Steven Tucker
#Date: 9/12/19
arr = [ [1, 3, 2, 1, -2],
[4, 2, 1, 2, 2],
[2, 1, 2, 3, 1],
[1, 2, 4, 1, -1]]
# function to print the matrix from GeeksForGeeks https://www.geeksforgeeks.org/take-matrix-input-from-user-in-python/
def printMat(mat):
n = len(mat)
for i in range(n):
for j in range(n+1):
print(mat[i][j] , " ", end="")
print()
#function to print the array
def printarr(arr):
n = len(arr)
for i in range(n):
print(arr[i] , " ", end="")
print()
print("Original Matrix:")
printMat(arr)
def NaiveGauss(arr):
for col in range(0,len(arr)):
for row in range(0, len(arr)):
if arr[col][row] < arr[0][0]:
temp = arr[0]
arr[0] = arr[col]
arr[col]=temp
for row in range(0, len(arr)-1):
for col in range(row+1, len(arr)):
alpha = - arr[col][row]/arr[row][row]
for k in range (0, len(arr)+1):
arr[col][k] = arr[col][k] + (alpha * arr[row][k])
return(arr)
def BackwardsSubstitution(mat):
returnArray=[1] * len(mat)
i = len(mat) -1
returnArray[i] = mat[i][i+1]/mat[i][i]
i -=1
while i >= 0:
for col in range(i+1, len(mat)):
if mat[i][col] > 0:
num = mat[i][col] * returnArray[col]
newNum = mat[i][len(mat)]-num
mat[i][len(mat)] = newNum
mat[i][col] = 0
#BackwardsSubstitution(mat)
else:
returnArray[i] = mat[i][len(mat)]/mat[i][i]
i-=1
return(returnArray)
arr1 = NaiveGauss(arr)
print("After NGE: ")
printMat(arr1)
end = BackwardsSubstitution(arr1)
print("Final Array: ")
printarr(end)
#---Output---
"""Original Matrix:
1 3 2 1 -2
4 2 1 2 2
2 1 2 3 1
1 2 4 1 -1
After NGE:
1 3 2 1 -2
0.0 -10.0 -7.0 -2.0 10.0
0.0 0.0 1.5 2.0 0.0
0.0 0.0 0.0 -3.4 0.0
Final Array:
1
-1.0
1
-0.0"""
| true
|
debe494df348b2fcdd14ea5f636ec472005052a5
|
Python
|
PexoDev/Python_2048
|
/main.py
|
UTF-8
| 2,638
| 3.171875
| 3
|
[] |
no_license
|
import GameManager
from os import system
import random
from Block import Block
map = []
freeSpots = []
isGameOver = False
def clearMap():
for i in range(4):
m = []
for j in range(4):
m.append(None)
freeSpots.append((i, j))
map.append(m)
def generateBlocks(count):
for i in range(count):
block = Block();
rand = random.randint(0,len(freeSpots)-1)
spot = freeSpots[rand]
map[spot[0]][spot[1]] = block
freeSpots.remove(spot);
def move():
def MoveBlocksProcess(i,j, direction):
if map[i][j] is not None:
if i + direction[0] > 3: return
if j + direction[1] > 3: return
if i + direction[0] < 0: return
if j + direction[1] < 0: return
nextI = i + direction[0]
nextJ = j + direction[1]
if (map[nextI][nextJ] is None):
map[nextI][nextJ] = map[i][j]
freeSpots.remove((nextI, nextJ))
map[i][j] = None
freeSpots.append((i, j))
else:
if map[nextI][nextJ].value == map[i][j].value:
map[nextI][nextJ].Stack(map[i][j])
map[i][j] = None
freeSpots.append((i, j))
else:
return
def MoveVertically(i,direction):
if direction[1] >= 0:
for j in range(len(map[i])):
MoveBlocksProcess(i, j, direction)
else:
for j in range(len(map[i]) - 1, -1, -1):
MoveBlocksProcess(i, j, direction)
def MoveBlocks(direction):
if direction[0] >= 0:
for i in range(len(map)):
MoveVertically(i,direction)
else:
for i in range(len(map)-1,-1,-1):
MoveVertically(i,direction)
userInput = input()
if userInput == 'd': MoveBlocks([0, 1])
if userInput == 'a': MoveBlocks([0, -1])
if userInput == 'w': MoveBlocks([-1, 0])
if userInput == 's': MoveBlocks([1, 0])
def render():
system('mode con: cols=100 lines=40')
system("cls")
for i in range(len(map)):
for j in range(len(map[i])):
if(map[i][j] is not None):
print("["+str(map[i][j].value)+"]", end='')
else:
print ("[ ]", end='')
print("\n", end='')
clearMap()
generateBlocks(2)
while not isGameOver:
render()
move()
generateBlocks(1)
if len(freeSpots) <= 0:
isGameOver = True
print("GAME OVER! \n Your score: " + GameManager.Score)
input();
| true
|
e7eba81e0f7c097c51bd2c9ef2218606d48d93dc
|
Python
|
NickYuu/Python-YouTube
|
/modles/item.py
|
UTF-8
| 2,450
| 2.53125
| 3
|
[] |
no_license
|
import re
import requests
from bs4 import BeautifulSoup
import youtube_dl
def find_search_content(name):
request = requests.get("https://www.youtube.com/results?search_query={}".format(name))
content = request.content
soup = BeautifulSoup(content, 'html.parser')
return soup
def find_page_content(search):
request = requests.get("https://www.youtube.com/results?{}".format(search))
content = request.content
soup = BeautifulSoup(content, 'html.parser')
return soup
def find_all_video(soup: BeautifulSoup) -> list:
all_item = list()
for element in soup.find_all('a', {"rel": "spf-prefetch"}):
title = element.get("title")
link = element.get("href")
img_name = element.get("href").split("=")[1]
all_image = soup.find_all("img",
{"width": True,
"alt": True,
"height": True,
"data-ytimg": True,
"onload": True})
img = re.findall("https://i.ytimg.com/vi/{}/[\S]+".format(img_name), str(all_image))
img = img[0].strip("\"")
img = img.replace("&", "&")
all_item.append({"title": title,
"link": "https://www.youtube.com{}".format(link),
"img": img})
for index, time in enumerate(soup.find_all("span", {"class": "video-time"})):
all_item[index]["time"] = str(time.text)
return all_item
def page_bar(soup: BeautifulSoup) -> dict:
page_dic = {}
for page in soup.find_all("a", {"class": True,
"data-sessionlink": True,
"data-visibility-tracking": True,
"aria-label": True}):
page_dic[page.text] = page.get("href")
return page_dic
def download_mp3(url: str):
ydl_opts = {
'format': 'bestaudio/best',
"outtmpl": "music/%(title)s.%(ext)s",
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}]
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([url])
def download_mp4(url: str):
ydl_opts = {'format': 'best', "outtmpl": "video/%(title)s.%(ext)s"}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([url])
| true
|
5949f2aab324a374a649f495ad2cf2d5398e630c
|
Python
|
vasilisbaltas/Customer-Risk-Profiling-and-Loan-Default-Prediction
|
/Data_Cleaning#1.py
|
UTF-8
| 3,706
| 2.828125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
################ checking the Loan_Attributes csv file ###################
loan_attributes = pd.read_csv('Loan_Attributes.csv')
### we will replace the NaNs in ACCSTARTDATE column with the values of FIRST_MONTH
### column since they are almost identical
loan_attributes['ACCSTARTDATE'] = loan_attributes['ACCSTARTDATE'].fillna(loan_attributes[loan_attributes['ACCSTARTDATE'].isna()]['FIRST_MONTH'])
### replace NaNs of OPENBALANCE and REPAYPERIOD with the respective mean feature values
loan_attributes['OPENBALANCE'] = loan_attributes['OPENBALANCE'].fillna(int(loan_attributes['OPENBALANCE'].mean()))
loan_attributes['REPAYPERIOD'] = loan_attributes['REPAYPERIOD'].fillna(int(loan_attributes['REPAYPERIOD'].mean()))
### removing outliers with respect to the OPENBALANCE and REPAYPERIOD attributes
low_balance = loan_attributes['OPENBALANCE'].quantile(0.05) ### subjectively choose quantiles
high_balance = loan_attributes['OPENBALANCE'].quantile(0.99)
loan_attributes = loan_attributes[(loan_attributes['OPENBALANCE'] > low_balance) & (loan_attributes['OPENBALANCE'] < high_balance)]
loan_attributes = loan_attributes[(loan_attributes['REPAYPERIOD'] >= 12) & (loan_attributes['REPAYPERIOD'] <= 60)] ### according to the case study
### drop the FIRST_MONTH column since it's almost identical to the ACCSTARTDATE column
### and the SEARCHDATE column because its data does not make sense and does not add value
### to the analysis
final_loan_attributes = loan_attributes.drop(['FIRST_MONTH','SEARCHDATE'],axis=1)
#final_loan_attributes.to_csv('Modified_Loan_Attributes.csv',encoding='utf-8',index=False)
################ checking the Client_Features csv file ###################
client_features = pd.read_csv('Client_Features.csv')
### deleting duplicate columns
client_features = client_features.T.drop_duplicates().T
### defining columns to remove because of high correlation or low variance
columns_to_remove = ['F_2','F_22','F_29','F_42','F_41','F_45','F_46','F_47','F_48','F_49','F_50','F_51','F_52',
'F_54','F_55','F_56','F_57','F_58','F_60','F_61','F_63','F_64','F_66','F_73','F_76','F_86',
'F_93','F_96','F_99','F_105','F_106','F_111','F_110','F_113','F_115','F_116','F_117','F_118',
'F_119','F_120','F_121','F_122','F_124','F_24','F_25','F_26','F_27','F_28','F_126','F_127',
'F_129','F_130','F_139','F_140','F_145','F_150','F_151','F_152','F_153','F_155','F_156',
'F_157','F_158','F_167','F_170','F_171','F_174','F_175','F_176','F_177','F_178','F_179',
'F_180','F_181','F_182','F_184','F_185','F_187','F_188','F_190','F_191','F_192','F_194',
'F_195','F_198','F_201','F_203','F_204','F_205','F_206','F_215','F_216','F_219','F_220',
'F_221','F_222','F_223','F_224','F_225','F_226','F_227','F_228','F_231','F_232','F_233',
'F_234','F_235','F_237','F_243','F_249','F_255','F_256','F_261','F_267','F_273']
client_features = client_features.drop(columns = columns_to_remove)
### replace -999997 and -999999 that don't make sense with features' median values
### replacing with mean value would still give us a negative number making no sense
for col in client_features.columns:
client_features[col] = client_features[col].apply(lambda x: int(client_features[col].median()) if x ==-999997 or x ==-999999 else x )
#client_features.to_csv('Modified_Client_Features.csv',encoding='utf-8',index=False)
| true
|
74e1b5e4877ca274f3e19b4614f66a4bd1cc12df
|
Python
|
nowa360/my_leetcode
|
/JulyChallenge/IslandPerimeter.py
|
UTF-8
| 1,941
| 4.0625
| 4
|
[] |
no_license
|
"""
July 7 Challenge - 463. Island Perimeter
You are given a map in form of a two-dimensional integer grid where 1 represents land and 0 represents water.
Grid cells are connected horizontally/vertically (not diagonally). The grid is completely surrounded by water,
and there is exactly one island (i.e., one or more connected land cells).
The island doesn't have "lakes" (water inside that isn't connected to the water around the island).
One cell is a square with side length 1. The grid is rectangular, width and height don't exceed 100.
Determine the perimeter of the island.
Example:
Input:
[[0,1,0,0],
[1,1,1,0],
[0,1,0,0],
[1,1,0,0]]
Output: 16
"""
class IslandPerimeter(object):
def __init__(self):
self.visited = set()
def islandPerimeter(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
def do_traverse(y, x):
count = 0
if (x, y) in self.visited:
return count
self.visited.add((x, y))
# if it borders left, right
if x == 0:
count += 1
if x == len(grid[0]) - 1:
count += 1
# if it borders top, bottom
if y == 0:
count += 1
if y == len(grid) - 1:
count += 1
for add_x, add_y in [(0, 1), (0, -1), (1, 0), (-1, 0)]:
new_x, new_y = x + add_x, y + add_y
if 0 <= new_x < len(grid[0]) and 0 <= new_y < len(grid):
if grid[new_y][new_x] == 0:
count += 1
# it's a land
else:
count += do_traverse(new_y, new_x)
return count
for ri, row in enumerate(grid):
for ci, sq in enumerate(row):
if sq == 1:
return do_traverse(ri, ci)
return 0
| true
|
8f2a69eda834820102763a4e4eb8b08887f18e9c
|
Python
|
srinisekaran/anthrolink
|
/RF Detector/scripts/voltage_plot.py
|
UTF-8
| 166
| 2.796875
| 3
|
[] |
no_license
|
import matplotlib.pyplot as plt
voltage_data = open('values.txt').read().splitlines()
plt.ylabel('Voltage (V)')
plt.plot(voltage_data)
plt.ylim(0.8, 1.3)
plt.show()
| true
|
e79afffb7fbe4e215715cdddaa4758ac18aebd4e
|
Python
|
gariel/lazyetl
|
/etl/steps/file.py
|
UTF-8
| 1,131
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
import abc
import io
import os
from typing import List, Optional
from etl.common import StatefulStepController
class File(StatefulStepController, abc.ABC):
filename: str = ""
def __init__(self, mode: str):
super().__init__()
self.mode = mode
self.file: Optional[io.TextIOWrapper] = None
def __start__(self):
self.file = io.open(self.filename, self.mode)
def __finish__(self):
self.file.flush()
self.file.close()
class FileRead(File):
def __init__(self):
super().__init__("r")
def read(self) -> str:
return self.file.read()
def read_lines(self) -> List[str]:
return self.file.readlines()
class FileWrite(File):
def __init__(self):
super().__init__("w")
def write(self, data: str) -> None:
self.file.write(data)
def write_lines(self, data: List[str]) -> None:
self.write(os.linesep.join([d.rstrip(os.linesep) for d in data]))
def new_line(self) -> None:
self.file.write(os.linesep)
class FileAppend(FileWrite):
def __init__(self):
File.__init__(self, "a")
| true
|
6ada8c884b99f653a0cecc8ed8c13848f7b4c824
|
Python
|
Uthmanhere/RPiWorkshop
|
/face2.py
|
UTF-8
| 506
| 2.640625
| 3
|
[] |
no_license
|
import cv2
img = cv2.imread('league.jpg')
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
haar_cascade_face = cv2.CascadeClassifier('/home/pi/opencv/data/haarcascades/haarcascade_frontalface_default.xml')
faces_rects = haar_cascade_face.detectMultiScale(img_gray, scaleFactor=1.1, minNeighbors=2)
print('Faces found: ', len(faces_rects))
for (x,y,w,h) in faces_rects :
cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)
#cv2.imshow('face', img)
#cv2.waitKey(0)
cv2.imwrite('faceDetect.jpg', img)
| true
|
75f80454c8d5eda2d3b17e33c04bfde4c795b562
|
Python
|
15327311512/driver-analysis
|
/Desktop/code/my_driving/gradient
|
UTF-8
| 1,854
| 2.890625
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 21 20:50:05 2019
@author: cyq
"""
## 1 gradient
'''
from sympy import S
from sympy.calculus import finite_diff_weights
if __name__ == "__main__":
g2 = x**2 + 3*y
l=g2.diff(x)
print(l)
'''
import scipy.io as scio
from sympy import symbols, sin, cos, pi
from sympy.diffgeom import Manifold, Patch, CoordSystem
from sympy.simplify import simplify
if __name__ == '__main__':
dataFile = '/home/cyq/Desktop/code/my_driving/data/encourter_data.mat'
data1 = scio.loadmat(dataFile)
dataFile2 = '/home/cyq/Desktop/code/my_driving/data/encourter_name.mat'
data2 = scio.loadmat(dataFile2)
print (type(data1))
print(data1['encourter_data'][8][8])
'''
r, theta1,theta2 = symbols('r, theta1,theta2')
m = Manifold('M', 3)
patch = Patch('P', m)
rect = CoordSystem('rect', patch)
polar = CoordSystem('polar', patch)
rect in patch.coord_systems
polar.connect_to(rect, [r, theta1,theta2], [r*cos(theta1), r*sin(theta2),cos(theta1), sin(theta2)])
print(polar.jacobian(rect, [r, theta1,theta2]))
print(polar.jacobian(rect, [1, theta1,theta2]))
'''
#def jabi()
'''
r,theta1,theta2 = symbols('r, theta1,theta2')
m = Manifold('M', 2)
patch = Patch('P', m)
rect = CoordSystem('rect', patch)
polar = CoordSystem('polar', patch)
rect in patch.coord_systems
#polar.connect_to(rect, [theta1,theta2], [r*cos(theta1), r*sin(theta2),cos(theta1), sin(theta2)])
polar.connect_to(rect, [theta1,theta2], [r*cos(theta1)+r*sin(theta2)+cos(theta1)+sin(theta2)])
g=polar.jacobian(rect, [theta1,theta2])
print(polar.jacobian(rect, [theta1,theta2]))
print(g.shape())
'''
x=Matrix([r*cos(theta1)+r*sin(theta2)+cos(theta1)+sin(theta2)])
y=Matrix([theta1])
print(x.jacobian(y))
| true
|
777c7215714f43fa2dec1b299a3d76ef87f65b77
|
Python
|
raviolican/HTML-Builder
|
/HTML-Builder.py
|
UTF-8
| 2,833
| 2.984375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from mods import *
myObjects = {}
while True:
# Split the command
typedCMD = input("> ")
cmd = typedCMD.split(" ",1)
# Initiate table object and store it in the list
if(cmd[0] == "table"):
name = cmd.pop(1)
try:
myObjects[name] = TableGen(name)
except IndexError:
myObjects[name] = TableGen(name)
continue
# Get ID's of current objects
elif(cmd[0] == "idinfo"):
cmd.pop(0)
try:
# Stripping spacers
inptObjects = list(map(str.strip, cmd[0].split(",")))
if not all(inptObjects):
raise IndexError
except IndexError:
# Trying to fix False instances in list
length = len(inptObjects)
newitems = []
for v in range(length):
if inptObjects[v]:
newitems.append(inptObjects[v])
# String is false
else:
continue
else:
# List empty now? Stop this process
if len(inptObjects) == 0:
raise AttributeError
inptObjects = newitems
except AttributeError:
print("Please provide one or more name(s) seperated by comma of "
+ "your table or use tables() to get all tables.")
continue
finally:
for i in inptObjects:
if i not in myObjects:
print ("Cannot find table \""+i +"\"")
else:
idinfo = list(myObjects[i].get_ids())
print("Id for table \""+idinfo[0]+"\"",format(idinfo[1]))
# Is it an object?
elif cmd[0] in myObjects:
cmd = typedCMD.split(" ")
if cmd[1] == "getclass":
print(myObjects[cmd[0]]._ClassName)
elif cmd[1] == "setclass":
if(cmd[2] in cmd):
myObjects[cmd[0]]._ClassName = cmd[2]
else:
print("Expecting 3rd paramenter")
# Add a new Tag to the table
elif cmd[1] == "add":
if cmd[2] == "header":
result = myObjects[cmd[0]].add_html("header",cmd[3:])
print(result)
elif cmd[2] == "row":
try:
result = myObjects[cmd[0]].add_html("row",cmd[3],cmd[4])
except:
result = myObjects[cmd[0]].add_html("row",cmd[3])
print(result)
elif cmd[1] == "mkcode":
print(myObjects[cmd[0]].HtmlCode[0])
pass
else:
print("Command not found")
continue
else:
print("Command not found")
continue
| true
|
d3b8a4db3ba671bc0a35e27c6d4568008f29dc6f
|
Python
|
prativadas/python-basics
|
/5. Chapter 5/07_sets.py
|
UTF-8
| 224
| 3.59375
| 4
|
[] |
no_license
|
n1 = input('number 1 is:')
n2 = input('number 2 is:')
n3 = input('number 3 is:')
n4 = input('number 4 is:')
n5 = input('number 5 is:')
my_set = {n1, n2, n3, n4, n5} # set always prints unique numbers
print(my_set)
| true
|
9d490c1b9a27d680e966b2d76a92389a93353d1c
|
Python
|
djanshuman/Algorithms-Data-Structures
|
/Hashing/Linear_Probing_Remove_modified.py
|
UTF-8
| 2,305
| 3.53125
| 4
|
[] |
no_license
|
'''
Created on 26-Jan-2021
@author: dibyajyoti
'''
''' Open Addressing method for Hashing . Below is the Linear probing implementation'''
def OpenAddressing(list1):
for i in range(0,len(list1)):
print(i ,"->",list1[i])
def hash_calc(key):
return key % len(list1)
def insert(key):
if(list1.count(-1) ==0):
print("Hash table is full")
return
index=hash_calc(key)
if(list1[index]==key):
print("Element " +str(key)+" is already present in the table")
return
if(list1[index] ==-1):
list1[index]=key
print("Element "+str(key)+" is inserted in hash table at index "+ str(index))
else:
while(list1[index] !=-1):
if(index==len(list1)-1):
index=0
index+=1
list1[index]=key
print("Element "+str(key)+" is inserted in hash table at index "+ str(index))
def Search(key):
index=hash_calc(key)
j=0
if(list1[index]==key):
print( "Element "+str(key) +" found at index "+ str(index))
return index
else:
while(list1[index] !=-1 and j!=len(list1)):
if(list1[index]==key):
print( "Element "+str(key) +" found at index "+ str(index))
return index
elif(index==len(list1)-1):
index=0
j+=1
index+=1
j+=1
print(str(key) +" not found")
return False
def remove(key):
index_of_element=Search(key)
if(index_of_element != False):
list1[index_of_element]=-2
print("Element " + str(key) + " removed at index " + str(index_of_element))
return True
else:
return False
list1=[]
for i in range(10):
list1.append(-1)
insert(10)
insert(20)
insert(90)
#insert(6)
insert(99)
insert(69)
#insert(66)
insert(89)
insert(3)
print("\n")
OpenAddressing(list1)
remove(3)
OpenAddressing(list1)
insert(65)
print("\n")
OpenAddressing(list1)
remove(99)
print("\n")
OpenAddressing(list1)
Search(99)
remove(69)
print("\n")
OpenAddressing(list1)
Search(20)
Search(69)
remove(20)
print("\n")
OpenAddressing(list1)
insert(909)
insert(9091)
print("\n")
OpenAddressing(list1)
Search(1)
remove(69)
remove(99)
print("\n")
OpenAddressing(list1)
| true
|
10340763d2c2740bc07a38e15d93f50a58851813
|
Python
|
Environmental-Informatics/building-more-complex-programs-with-python-aggarw82
|
/program_7.1.py
|
UTF-8
| 1,010
| 4.15625
| 4
|
[] |
no_license
|
""" Program to generate a table of comparison for
own sqrt function vs math.sqrt()
Implementation of Exercise 7.1
Book: Think Python 2nd Edition by Allen B. Downey
Edition: 2e
Link: https://greenteapress.com/wp/think-python-2e/
"""
import math
def mysqrt(a):
x = a - 0.9 # initial guess for x
# calculate square root until estimate stops updating
while True:
y = (x + a/x) / 2
if y == x:
break
x = y
return x
def test_square_root():
a = 1
# format output
print('\n\na mysqrt(a) math.sqrt(a) diff')
print('- --------- ------------ ----')
# calculate and print for a = 1 to 9
while a != 10:
second_col = mysqrt(a)
third_col = math.sqrt(a)
diff = abs(third_col - second_col)
print('{0:.1f} {1:.5f} {2:.5f} {3}'.format(a,second_col,third_col,diff))
a += 1
print('\n')
test_square_root()
"""
Author: Varun Aggarwal
Username: aggarw82
Github: https://github.com/Environmental-Informatics/building-more-complex-programs-with-python-aggarw82
"""
| true
|
e877b10abe8b7ed82d2603df89bba0c7db5eb348
|
Python
|
jimmesh518/Python_beginner
|
/while.py
|
UTF-8
| 115
| 3.8125
| 4
|
[] |
no_license
|
for num in (1,13,11,45,35,25,):
if num % 2 == 0:
print("Even ",num)
else:
print("Odd ",num)
| true
|
9c62af5ea532c9859b224771374f5b1113546fdd
|
Python
|
RadkaValkova/SoftUni-Web-Developer
|
/Programming Fundamentals Python/08 Data Types and Variables Exercise/Print Part of the ASCII Table.py
|
UTF-8
| 166
| 3.765625
| 4
|
[] |
no_license
|
start_index = int(input())
last_index = int(input())
for i in range(start_index, last_index + 1):
current_symbol = chr(i)
print(f'{current_symbol}', end=' ')
| true
|
74edcdf95d9856fefdba675a458946b6643e3aad
|
Python
|
mkumarsgnr/python-reff
|
/divide apple.py
|
UTF-8
| 631
| 4.0625
| 4
|
[] |
no_license
|
try:
n = int(input("Enter the Number of Apples Harry has Got :"))
mn = int(input("\nEnter the Minimum Range :"))
mx = int(input("\nEnter the Maximum Range :"))
except ValueError:
print("Intiger values Only!")
exit()
if mx<mn:
print("Maximum value can not be less then Minimum!")
elif mx==mn:
if n%mn==0:
print(f"Number {mn} is a Divisor of {n}")
else:
print(f"Number {mn} is not a Divisor of {n}")
else:
for i in range(mn,mx+1):
if n%i==0 :
print(f"--Number {i} is a Divisor of {n}")
else:
print(f"Number {i} is not a Divisor of {n}")
| true
|
40220aa94ff13233f90c0f5e491fb13e3d43de26
|
Python
|
alexcrawford0927/rainonsnow
|
/7D_ChiSquareDistance_RelativeTrackDen.py
|
UTF-8
| 9,086
| 2.8125
| 3
|
[] |
no_license
|
'''
Author: Alex Crawford
Date Created: 24 Apr 2019
Date Modified: 31 May 2019 --> Added more parameterizaton
11 Jun 2019 --> Switch from absolute to relative measure of storm density for determining area of interest
4 Jul 2019 --> Make it relative
Purpose: Runs a chi-square distance test for grid cells around Alaska for storm
track presence/absence.
'''
'''********************
Import Modules
********************'''
import numpy as np
import pandas as pd
from osgeo import gdal, gdalnumeric
import CycloneModule_11_1 as md
def chidist(inArr):
'''Calculates the chi-square distance for an array of counts with rows
representing profiles for each observation and columns representing
the measured parameters. The input array should be discrete count data.
Warning: This is not the best distance metric for binary data or for
continuous data, but the function will run on any numerical data.
Returns a square 2-D numpy array with dimensions equal to the number of
rows (i.e., observations or sites) of the input. NaNs are filled in for
the diagonal and upper-right section of the matrix because removing the
redundancy makes the process run faster.
'''
# Step 0: Identify number of rows and columns
Nr, Nc = inArr.shape
# Step 1: Calculate relative proportions for each row
tots = [float(np.sum(inArr[row,:])) for row in range(Nr)]
tots = np.repeat(tots,Nc).reshape(Nr,Nc)
arr = inArr/tots
# Step 2: Establish mean proporation for each parameter (each column)
means = [np.mean(arr[:,col]) for col in range(Nc)]
# Step 3: Calculate chi distance for each pair of of rows
chi = np.zeros((Nr,Nr))*np.nan
for ii in range(Nr):
for jj in range(ii):
chi[ii,jj] = np.nansum([(((arr[ii,col]-arr[jj,col]))**2/means[col]) for col in range(len(means))])**0.5
return chi
'''*******************************************
Define Variables
*******************************************'''
### Location Variables ###
v = 19 # (0-4, 5-9, 10-15, 16-19)
names = ["Aniak","Pargon Creek","Kelly Station","Fort Yukon","Teuchet Creek",\
"Indian Pass","Monument Creek","Cooper Lake","Kenai Moose Pens","Mt Ryan",\
"Bethel","Nome","Anchorage","Juneau","Fairbanks","Utqiagvik",\
"Prudhoe Bay","Coleville Village","Kotzebue","Galena"]
rows = [122, 129, 135, 132, 129, 121, 129, 120, 120, 129,\
121, 128, 121, 116, 129, 142, 140, 140, 133, 127]
cols = [33, 27, 28, 56, 55, 49, 55, 48, 47, 54,\
29, 23, 48, 73, 51, 37, 50, 47, 28, 37]
kSize = 2 # smoothing radius of cyclone tracks
sigma = 1000. # std dev for Gaussian; larger means slower fall-off
td_thresh = 5 # Relative Threshold for area of interest (between 0 and 100) -- exclusive
ct_thresh = 2 # Absolute Threshold for area of interest (between 0 and positive infinity) -- inclusive
nMC = 1000 # Number of Monte Carlo Simulations
V = "V6"
T1, T2 = "ROS", "SOS"
### Path Variables ###
path = "/Volumes/Miranda/RainOnSnow"
inpath = path+"/PrecipDetection_Aggregation/"
cpath = "/Volumes/Ferdinand/ArcticCyclone/detection11_3AM2/SystemTracks"
dtype = gdal.GDT_Float64
suppath = "/Volumes/Ferdinand/Projections"
latN = "EASE2_N0_100km_Lats.tif"
### Time Variables ###
starttime = [1980,1,1,0,0,0]
endtime = [2019,1,1,0,0,0]
reftime = [1900,1,1,0,0,0]
months = ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]
mons = ["01","02","03","04","05","06","07","08","09","10","11","12"]
days = ["01","02","03","04","05","06","07","08","09","10","11","12","13",\
"14","15","16","17","18","19","20","21","22","23","24","25","26","27",\
"28","29","30","31"]
hours = ["0000","0100","0200","0300","0400","0500","0600","0700","0800",\
"0900","1000","1100","1200","1300","1400","1500","1600","1700","1800",\
"1900","2000","2100","2200","2300"]
'''*******************************************
Main Analysis
*******************************************'''
########## READ IN ROS DATA ############
print("Load Data")
SDAY = str(starttime[0]) + mons[starttime[1]-1] + days[starttime[2]-1]
EDAY = str(endtime[0]) + mons[endtime[1]-1] + days[endtime[2]-1]
name = names[v]
pdf1 = pd.read_csv(inpath+"/"+name+"_"+V+"/"+T1+"_AggregatedStats"+SDAY+"_"+EDAY+".csv")
pdf2 = pd.read_csv(inpath+"/"+name+"_"+V+"/"+T2+"_AggregatedStats"+SDAY+"_"+EDAY+".csv")
# Identify the number of storms in each category
n1 = pdf1.shape[0]
n2 = pdf2.shape[0]
# Create Gaussian Kernel
x = np.arange(-1*kSize,kSize+1,1)
y = np.arange(-1*kSize,kSize+1,1)
xx, yy = np.meshgrid(x,y)
k = np.exp(-1/(2*sigma**2) * (xx**2 + yy**2))
# Identify Locations of Interest
td1 = gdalnumeric.LoadFile(inpath+"/"+name+"_"+V+"/"+T1+"_trkdenField"+SDAY+"_"+EDAY+".tif")
td2 = gdalnumeric.LoadFile(inpath+"/"+name+"_"+V+"/"+T2+"_trkdenField"+SDAY+"_"+EDAY+".tif")
ref = gdal.Open(suppath+"/"+latN)
########### AGGREGATE CYCLONE LOCATIONS ############
print("Calculate Observed Dissimilarity")
# create an list to store the counts
counts1 = [] # For First Category
counts2 = [] # For Second Category
# Find each storm for PDF #1
print (T1 + " Count: " + str(n1))
for i in range(n1):
if i%10 == 0:
print(i)
sid = pdf1.iloc[i]['sid']
y = int(pdf1.iloc[i]['year'])
m = int(pdf1.iloc[i]['month'])
# Read in Cyclone data, extract the data frame for locations
cycs = pd.read_pickle(cpath+"/"+str(y)+"/systemtracks"+str(y)+mons[m-1]+".pkl")
cyc = [c for c in cycs if c.sid == sid][0]
cdata = cyc.data.loc[cyc.data.type != 0]
counts1a = np.zeros_like(td1) # Create empty count field for storm
# For each observation...
for j in range(cdata.shape[0]):
col = int(cdata.iloc[j].x)
row = int(cdata.iloc[j].y)
# Add to the count
counts1a[(row-kSize):(row+kSize+1),(col-kSize):(col+kSize+1)] = k + counts1a[(row-kSize):(row+kSize+1),(col-kSize):(col+kSize+1)]
# Append to list
counts1.append(counts1a > 0)
# Find each storm for PDF #2
print (T2 + " Count: " + str(n2))
for i in range(n2):
if i%10 == 0:
print(i)
sid = pdf2.iloc[i]['sid']
y = int(pdf2.iloc[i]['year'])
m = int(pdf2.iloc[i]['month'])
# Read in Cyclone data, extract the data frame for locations
cycs = pd.read_pickle(cpath+"/"+str(y)+"/systemtracks"+str(y)+mons[m-1]+".pkl")
cyc = [c for c in cycs if c.sid == sid][0]
cdata = cyc.data.loc[cyc.data.type != 0]
counts2a = np.zeros_like(td2) # Create empty count field for storm
# For each observation...
for j in range(cdata.shape[0]):
col = int(cdata.iloc[j].x)
row = int(cdata.iloc[j].y)
# Add to the count
counts2a[(row-kSize):(row+kSize+1),(col-kSize):(col+kSize+1)] = k + counts2a[(row-kSize):(row+kSize+1),(col-kSize):(col+kSize+1)]
# Append to list
counts2.append(counts2a > 0)
# Find total counts for both categories of storm
sum1 = np.apply_along_axis(np.sum,0,np.array(counts1))
sum2 = np.apply_along_axis(np.sum,0,np.array(counts2))
# Write to File
md.writeNumpy_gdalObj(sum1,inpath+"/"+name+"_"+V+"/"+T1+"_trkdenRAW_"+SDAY+"_"+EDAY+".tif",ref,dtype)
md.writeNumpy_gdalObj(sum2,inpath+"/"+name+"_"+V+"/"+T2+"_trkdenRAW_"+SDAY+"_"+EDAY+".tif",ref,dtype)
aoi = np.where( ( (sum1+sum2)/(n1+n2)*100 > td_thresh) & (sum1+sum2 >= ct_thresh) )
sums = np.vstack((sum1[aoi],sum2[aoi]))
# Calculate Dissimilarity of Real Data
chi = chidist(sums)[1,0]
# Remove unnecessary files
#del sum1, sum2, sums, col, row, i, j, cdata, cycs, sid, counts1a, counts2a
############ MONTE CARLO ############
print("Start Monte Carlo Simulation")
# Combine all storms into one list
counts = np.array(counts1 + counts2)
# Create array to fill with chi distance results from Monte Carlo
chiMC = np.zeros((nMC))
# Perform Monte Carlo Simulation
for mc in range(nMC):
if mc%10 == 0:
print(mc)
# Generate a set of random integers to subsample the full cyclone population
i1 = np.random.choice(n1+n2,size=n1,replace=0)
i2 = np.delete(np.arange(n1+n2),i1)
# Subset the total population
mcounts1 = counts[i1]
mcounts2 = counts[i2]
# Recount storms from MC population 1
sum1 = np.apply_along_axis(np.sum,0,mcounts1[:,aoi[0],aoi[1]])
sum2 = np.apply_along_axis(np.sum,0,mcounts2[:,aoi[0],aoi[1]])
sums = np.vstack((sum1,sum2))
# Calculate Dissimilarity of MC Data
chiMC[mc] = chidist(sums)[1,0]
# Write output to file, making the observations the first row of the file
output = pd.DataFrame(data=np.hstack((np.array(chi),chiMC)),columns=["chi"])
output.to_csv(inpath+"/"+name+"_"+V+"/MonteCarloRelativeTrackDen"+str(td_thresh)+"_Tracks_"+T1+"v"+T2+"_"+SDAY+"_"+EDAY+".csv",index=False)
print(names[v]+": " + T1 + " v. " + T2)
print("Observed Chi: " + str(round(chi,2)))
print("Percentile: " + str(np.sum(chiMC <= chi)/10))
print("Max Monte Carlo Chi: " + str(np.round(np.max(chiMC),2)))
print("TrkDen (" + str(td_thresh) + "), AOI: " + str(aoi[0].shape[0]))
| true
|
6c5a3fe050cbf818b7ef4badfd3c5d23e036df8c
|
Python
|
olanseverson/feature_detection
|
/feature_detection.py
|
UTF-8
| 5,256
| 2.671875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 30 11:50:31 2019
@author: olanseverson
https://realpython.com/python-logging/
https://pysource.com/2018/03/23/feature-matching-brute-force-opencv-3-4-with-python-3-tutorial-26/
https://www.life2coding.com/resize-opencv-window-according-screen-resolution/
https://computer-vision-talks.com/2011-07-13-comparison-of-the-opencv-feature-detection-algorithms/
https://arxiv.org/pdf/1710.02726.pdf
"""
import cv2
import logging
#%% Initiate logger for DEBUGGING
logging.basicConfig(level = logging.WARNING,
format='[%(levelname)s] => (%(name)s||%(threadName)-s): %(message)s')
# logging.root.setLevel(logging.WARNING)
FORMATTER = logging.Formatter("[%(levelname)s] => (%(name)s||%(threadName)-s): %(message)s")
c_handler = logging.StreamHandler() # handler
c_handler.setFormatter(FORMATTER)
logger = logging.getLogger(__name__)
# logger.addHandler(c_handler) # ADD HANDLER TO LOGGER
logger.setLevel(logging.DEBUG) # change DEBUG to another value to remove the debug logger
MAX_CTRL_FRAME = 13 # number of control frame
SKIPPED_NUM = 3 #skip reading the frame every Skipped_num frame
## GET THE IMAGE
list_img = []
th_dict = {'n_match': [100,280,170,130,200,120,250,150,180,100,200,210,170], #[180,280,170,130,240,120,250,150,180,100,200,210,170],
'distance': [35,35,30,35,20,35,35,35,40,20,35,35,30]}
print(th_dict['distance'][0])
for i in range (1,MAX_CTRL_FRAME + 1):
temp = {}
temp['img'] = cv2.imread("./control_frame/" + str(i) + ".jpg") # get control image
temp['isFound'] = False
temp['foundIdx'] = 0
temp['matchVal'] = 0
temp['foundImg'] = None
list_img.append(temp)
cv2.waitKey(0)
matches_list = []
max_match = 0
## GET THE VIDEO
try:
cap = cv2.VideoCapture('tet.mp4') # read Video that we want to check
if not cap.isOpened():
raise NameError("can not open")
except cv2.error as e:
print("cv2.error:", e)
except Exception as e:
logger.error("Exception: %s", e)
else:
logger.info('frame count: %d', cap.get(cv2.CAP_PROP_FRAME_COUNT))
logger.info('fps : %d', cap.get(cv2.CAP_PROP_FPS))
idx = 0
while True:
_, frame = cap.read()
logger.debug("frame nth: %d", cap.get(cv2.CAP_PROP_POS_FRAMES))
if frame is None:
break
## SHOW FRAME
cv2.namedWindow('app', cv2.WINDOW_NORMAL)
cv2.resizeWindow('app', 400,600)
cv2.imshow('app', frame)
## ORB Detector
orb = cv2.ORB_create()
ctrl_image = list_img[idx]['img']
kp1, des1 = orb.detectAndCompute(ctrl_image, None)
kp2, des2 = orb.detectAndCompute(frame, None)
## Brute Force Matching
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
# logger.debug('%d %d', th_dict['distance'][idx], th_dict['n_match'][idx])
matches = list(filter(lambda x: x.distance<th_dict['distance'][idx],
matches)) # ignore value if smaller than match_distance
logger.debug('feature found: %d', len(matches))
## Find most similar picture
if(len(matches)>th_dict['n_match'][idx]):
list_img[idx]['isFound'] = True
list_img[idx]['foundIdx'] = cap.get(cv2.CAP_PROP_POS_FRAMES)
list_img[idx]['matchVal'] = len(matches)
list_img[idx]['foundImg'] = frame
logger.info("frame %d is found at idx %d", idx, list_img[idx]['foundIdx'])
idx = idx + 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if (idx>=MAX_CTRL_FRAME):
break
cap.set(cv2.CAP_PROP_POS_FRAMES,
cap.get(cv2.CAP_PROP_POS_FRAMES)+SKIPPED_NUM) # skip every SKIPPED_NUM frames
#end while
#cv2.destroyWindow('app')
## SHOW FRAME
#for img in list_img:
#
# cv2.namedWindow('CONTROL IMG', cv2.WINDOW_NORMAL)
# cv2.resizeWindow('CONTROL IMG', 500,700)
# cv2.moveWindow('CONTROL IMG', 0,0)
# cv2.imshow('CONTROL IMG', img['img'])
#
# if (img['isFound'] == True):
#
# cv2.namedWindow('FOUND', cv2.WINDOW_NORMAL)
# cv2.resizeWindow('FOUND', 500,700)
# cv2.moveWindow('FOUND', 600,0)
# cv2.imshow('FOUND', img['foundImg'])
#
# cv2.waitKey(0)
# cv2.destroyAllWindows()
def drawing_feature(img1, img2):
# ORB Detector
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
# Brute Force Matching
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key = lambda x:x.distance)
matches = list(filter(lambda x: x.distance<35,
matches)) # ignore value if smaller than match_distance
matching_result = cv2.drawMatches(img1, kp1, img2, kp2, matches, None, flags=2)
logger.debug("total value is [%d]", len(matches))
## Draw
cv2.namedWindow('result', cv2.WINDOW_NORMAL)
cv2.resizeWindow('result', 800,800)
cv2.moveWindow('result', 300,0)
cv2.imshow("result", img2)
cv2.imshow("result", matching_result)
cv2.waitKey(0)
cv2.destroyAllWindows()
#cap.set(cv2.CAP_PROP_POS_FRAMES, 13)
#_, img2 = cap.read()
#drawing_feature(list_img[0]['img'], img2)
cap.release
| true
|
02035e8430752e3cbf92cd1a7378754106c4ff0a
|
Python
|
jyu001/Old_LeetCode_Python_solutions
|
/085_maximal_rectangle.py
|
UTF-8
| 2,609
| 3.671875
| 4
|
[] |
no_license
|
"""
Given a 2D binary matrix filled with 0's and 1's, find the largest rectangle containing only 1's and return its area.
For example, given the following matrix:
1 0 1 0 0
1 0 1 1 1
1 1 1 1 1
1 0 0 1 0
Return 6
"""
class Solution(object):
def maximalRectangle(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
# idea 1 : build a hashmap for every non 0 dots, key is the index, value is a hash map, {i, j} i is the width, j is depth
# calculate the max square available from each dot this way:
# let width = 1, scan the dot below dot x, if it is 1, then depth 1 -> 2, and mark this dot y,
# let y {i, j} has value 1: 1 too, when we iterate to dot y, it ignores the width 1 scan as key 1 exists
# let width = w, (all w - 1 dots on the right of dot x is 1)
# scan depth 2, if for all w-1 dots, it is still 1, then for dot x, change value {2: 1} to {2: 2} and then
# also mark the value of all w-1, dots on 2nd layer with {w: 1}
#
# idea 2: iterate
# 1: find 0's, and then devide matrix into up, down, left, right four zones. iterate for each.
#
all = []
for i in matrix:
all.extend(i)
print all
row = len(matrix) # number of rows
col = len(all)/len(matrix) # number of columns
# print row
nums = []
for index, value in enumerate(all):
nums.append(int(value))
print "answer: ",
print self.check(nums, col, 0, row, 0, col)
def check(self, nums, ncol, rowu, rowd, coll, colr):
# nums is the original nums, rowl, colu is minimum index of row, col, rowr, cold are maximum number + 1 or row/col
if rowu == rowd or coll == colr:
return 0
for i in range(rowu, rowd):
for j in range(coll, colr):
index = i * ncol + j
if nums[index] == 0:
return max(self.check(nums, ncol, rowu, i, coll, colr), self.check(nums, ncol, i+1, rowd, coll, colr), \
self.check(nums, ncol, rowu, rowd, coll, j), self.check(nums, ncol, rowu, rowd, j+1, colr))
#print rowd, rowu, colr, coll
return (rowd-rowu)*(colr-coll)
solution = Solution()
matrix = [ "01101",
"11010",
"01110",
"11110",
"11111",
"00000"]
solution.maximalRectangle(matrix)
matrix = [ "111",
"011",
"111",]
solution.maximalRectangle(matrix)
| true
|
b4e1ef8f58b322c3b51abcd551159c9a75e03fa9
|
Python
|
alinkak311/10-
|
/Домашняя работа/number_1_42.py
|
UTF-8
| 116
| 3.40625
| 3
|
[] |
no_license
|
a=int(input())
b=int(input())
c=int(input())
if a<=b>=c:
print(b)
elif b<=a>=c:
print(a)
else:
print(c)
| true
|
19f792c1c691c43c8ad39cf80bf3a4a40a4400d1
|
Python
|
Asurada2015/Python-Data-Analysis-Learning-Notes
|
/Pythontutorials/18_19class_demo.py
|
UTF-8
| 666
| 4.15625
| 4
|
[
"MIT"
] |
permissive
|
"""讲解关于类的定义和使用"""
class Calculator:
name = 'Good calculator' # 固有属性,但是如果在init函数中被赋值的话则固有属性会被覆盖掉
price = 18
# 定义类的属性
def __init__(self, name, price, height, width, weight):
self.name = name
self.price = price
self.h = height
self.wi = width
self.we = weight
def add(self, x, y):
# 定义类的方法,其中self是一个保留参数
print(x+y)
def minus(self, x, y):
print(x-y)
def times(self, x, y):
print(x*y)
def divide(self, x, y):
print(x/y)
"""init"""
| true
|
00145335fc8519340b16ad2e287b4b9fc6ce8a06
|
Python
|
Marllonviny/progr1ads
|
/Media1URI.py
|
UTF-8
| 190
| 3.203125
| 3
|
[] |
no_license
|
a = float(input(''))
b = float(input(''))
x = a * 3.5
y = b * 7.5
media = (x + y) / 11
media2 = 10
if media <= 10:
print('MEDIA = %.5f' % media)
else:
print('MEDIA = %.5f' % media2)
| true
|