blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
62fef5873656879088b78fad186c4a6d4f0019b1 | Python | Slugskickass/Teaching_python | /Introduction/7.) Text.py | UTF-8 | 148 | 3.453125 | 3 | [] | no_license | my_input = input("Please give me a name ")
print("Hello", my_input, "how are you")
sv = len(my_input) - 1
print(my_input[0])
print(my_input[sv]) | true |
861a956960cf0357a5f0ac3f840b3809ac302578 | Python | santokalayil/my_python_programs | /search_with_ext_then_copy_files_to_directory.py | UTF-8 | 772 | 3.53125 | 4 | [] | no_license | print("Welcome to Santo's file utility python script".center(150,'*'))
print()
import glob
import shutil
import os
ext = input('Please provide the file extension to searched and copied:\n')
ipynb = glob.glob(f'**\\*.{ext}',recursive=True)
for i in ipynb:
print(i)
destination_folder = input('Input the Destination Folder path:\n')
for i, file in enumerate(ipynb,start=1):
outputfile = (destination_folder+'\\'+str(i)+'_'+(file.split('\\')[-1]))
print(outputfile)
shutil.copyfile(file,outputfile)
os.chdir(destination_folder)
print('CWD'+os.getcwd())
with open('notes.txt','w') as f:
f.write('COPIED FILES\n'+'\n'.join(ipynb))
print(f" Searched files with extension '.{ext}' and copied files to the input path of folder ".center(150,'=')) | true |
3a2c1e4a596e2d649805a01836e7c8fbffa3adeb | Python | OwenDostie/adventofcode | /day08.py | UTF-8 | 829 | 2.828125 | 3 | [] | no_license | import re
import sys
f = open('day08.txt').read().rstrip().split('\n')
# print(len(f))
# print(f[611])
# sys.exit()
cands = set()
for l,_ in enumerate(f):
if re.search('nop|jmp', _):
cands.add(l)
for swap in cands:
print(swap)
effs = set(); acc = 0; line = 0
while True:
if line == len(f):
print('Part two: ', acc)
sys.exit()
if line in effs:
print('Part one: ', acc);
break
effs.add(line)
if re.match('nop', f[line]):
if line == swap:
line += int(f[line][4:])
if re.match('acc', f[line]):
acc += int(f[line][4:])
if re.match('jmp', f[line]):
if line != swap:
line += int(f[line][4:])
continue
line += 1
| true |
0c1901feb4d044f4c55bf3cd0301207c2602b50e | Python | rvdmtr/python | /Eric_Matthes/chapter_1/8/describe_pet.py | UTF-8 | 1,168 | 3.9375 | 4 | [] | no_license | #Использование позиционных аргументов, строгий порядок передачи
def describe_pet(animal_type,pet_name):
"""Выводит информацию о животном"""
print('\nI have a ' + animal_type + '.')
print('My ' + animal_type + '`s name is ' + pet_name.title() + '.')
describe_pet('hamster','harry')
describe_pet('dog','willie')
print('\n\n################')
print('################\n\n')
#Использование именованых аргументов
def describe_pet(
pet_name,animal_type='dog'
): # задали значение по умолчанию, порядок важен
"""Выводит информацию о животном"""
print('\nI have a ' + animal_type + '.')
print('My ' + animal_type + '`s name is ' + pet_name.title() + '.')
describe_pet(animal_type='hamster',pet_name='harry')
describe_pet(
pet_name='gillie',animal_type='cat'
)#обрати внимание, порядок передачи не столь важен
print('\n#############')
describe_pet('rex')
describe_pet('rex','dinosaur')
describe_pet(pet_name='rex',animal_type='fish') | true |
78af3a839a7f5d37f42fc6cb19f10590c6103746 | Python | Punsach/Coding-Interview-Questions | /CTCI-Chapter1/CTCI-Chapter1-Problem6.py | UTF-8 | 771 | 4.28125 | 4 | [] | no_license | #Compress a string by using the frequency of each letter
import sys
from collections import OrderedDict
def compress(someString):
#Create a dictionary with each character in the string and its frequency
characters = OrderedDict()
for c in someString:
if(c in characters):
characters[c] += 1
else:
characters[c] = 1
#Join all the characters and their frequencies in the string
result = ''.join(['%s%s' % (key, str(value)) for key, value in characters.iteritems()])
#Return the shorter of the two strings
return min(result, someString, key=len)
if(len(sys.argv) != 2):
print("Please enter the proper number of inputs")
else:
print("The result of the string compression is " + compress(sys.argv[1]) + ". Solution took O(n) time and O(n) space")
| true |
c14b9932ce09fe5116db4508ec20e267ba8cddce | Python | Tiltmeka/RIP | /lab1/ku.py | UTF-8 | 1,087 | 3.25 | 3 | [] | no_license | import sys
from cmath import sqrt
def vku(a, b, c):
if a == 0:
t_1 = -c / b
x1 = x2 = sqrt(t_1)
x3 = x4 = -sqrt(t_1)
else:
d = (b * b) - (4 * a * c)
t1 = (-b + sqrt(d)) / (2 * a)
t2 = (-b - sqrt(d)) / (2 * a)
x1 = sqrt(t1)
x2 = - sqrt(t1)
x3 = sqrt(t2)
x4 = -sqrt(t2)
return x1, x2, x3, x4
if __name__ == '__main__':
try:
a1 = int(sys.argv[1])
a2 = int(sys.argv[2])
a3 = int(sys.argv[3])
except Exception:
print('Ошибка аргументов командной строки, ввод будет произведен с клавиатуры')
try:
a1 = int(input('a1: '))
a2 = int(input('a2: '))
a3 = int(input('a3: '))
except Exception:
print('Вы ввели неверное значение, программа завершатает свою работу')
exit(1)
x1, x2, x3, x4 = vku(a1, a2, a3)
print(x1, x2, x3, x4)
| true |
75a1faaf277d7c7ab62d9c8a6dd334da80285950 | Python | TheNiska/DigitRecogn | /Digit_Recogn.py | UTF-8 | 2,964 | 2.578125 | 3 | [] | no_license | import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
import time
start = time.time()
ALFA_ZERO = 0.009
BETA = 0.70
LAMBD = 0
EPSILON = 0.00000000001
K_ITER = 400
cost_func = []
x_iter = []
data = pd.read_csv('train.csv', delimiter=',')
y = data[['label']]
y = y.to_numpy()
y = y.T # 1 x 42000
form = np.ones((10,1))
y = y * form
isZero = y[0,:] == 0
y[0,:] = (y[0,:] + 1) * isZero
for j in range(1,10):
isNumber = y[j,:] == j
y[j,:] = (y[j,:] * isNumber) / j
print(y.shape)
y_whole = y
y = y[:,:34000] # 1 x 34000
x = data.drop('label', axis=1)
x = x.to_numpy()
x = x.T # 784 x 42000
x_whole = x
x = x[:,:34000]
n0 = 784
n1 = 12
n2 = 6
n3 = 10
np.random.seed(1)
m = 34000
a0 = x
w1 = np.random.randn(n1,n0) * math.sqrt(1/n0) # xavier initialization
b1 = np.random.randn(n1,1)
w2 = np.random.randn(n2,n1) * math.sqrt(1/n1)
b2 = np.random.randn(n2,1)
w3 = np.random.randn(n3,n2) * math.sqrt(1/n2)
b3 = np.random.randn(n3,1)
mini_batch_size = 256
for o in range(K_ITER):
proc = (o / K_ITER) * 100
print( '{:.2f}'.format(proc), '%')
ALFA = (1 / (1 + 0.005*o))*ALFA_ZERO # learning rate decay
t = 0
while t < 34000:
y_mini = y[:,t:t+mini_batch_size]
a_mini = a0[:,t:t+mini_batch_size]
m_mini = y_mini.shape[1]
z1 = np.dot(w1, a_mini) + b1
a1 = np.tanh(z1)
z2 = np.dot(w2, a1) + b2
a2 = np.tanh(z2)
z3 = np.dot(w3, a2) + b3
ti = math.e**z3
J = (1 / m_mini) * (- np.sum(y_mini * np.log(a3)))
dz3 = a3 - y_mini
dw3 = (1/m_mini) * (np.dot(dz3, a2.T))
db3 = np.sum(dz3, axis=1, keepdims=True) * (1/m_mini)
dz2 = np.dot(w3.T, dz3) * (1 - np.power(a2, 2))
dw2 = (1/m_mini) * (np.dot(dz2, a1.T))
db2 = np.sum(dz2, axis=1, keepdims=True) * (1/m_mini)
dz1 = np.dot(w2.T, dz2) * (1 - np.power(a1, 2))
dw1 = (1/m_mini) * (np.dot(dz1, a_mini.T))
db1 = np.sum(dz1, axis=1, keepdims=True) * (1/m_mini)
w3 = w3 - ALFA * dw3
b3 = b3 - ALFA * db3
w2 = w2 - ALFA * dw2
b2 = b2 - ALFA * db2
w1 = w1 - ALFA * dw1
b1 = b1 - ALFA * db1
Jtemp = '{:.4f}'.format(J)
cost_func.append(float(Jtemp))
x_iter.append(o)
t += mini_batch_size
print(J)
fig = plt.subplots()
plt.plot(x_iter, cost_func)
plt.show()
#-----VALIDATION---------------------------------------------------------------------------------
y_dev = y_whole[:,34000:]
x_dev = x_whole[:,34000:]
z1 = np.dot(w1, x_dev) + b1
a1 = np.tanh(z1)
z2 = np.dot(w2, a1) + b2
a2 = np.tanh(z2)
z3 = np.dot(w3, a2) + b3
ti = math.e**z3
a3 = ti / np.sum(ti, axis=0)
np.set_printoptions(precision=4, suppress=True)
print(a3[:,7995:])
print(np.argmax(a3[:,7995:], axis=0))
J_dev = (1 / 8000) * (- np.sum(y_dev * np.log(a3)))
print('Cost function in dev examples: ', J_dev)
#-------------------------------------------------------------------------------------------------
end = time.time()
print('RunTime = ' + '{:.2f}'.format((end-start)/60)) | true |
76f6aba276194d188944c4709d607d5ee1d26a5c | Python | AlyssaYelle/python_practice | /software_design/LinkedLists/TestDenseMatrix.py | UTF-8 | 1,695 | 3.765625 | 4 | [] | no_license | class Matrix(object):
def __init__(self, row = 0, col = 0):
self.row = row
self.col = col
self.matrix = []
# perform matrix addition
def __add__(self, other):
if self.row != other.row or self.col != other.col:
return None
mat = Matrix(self.row, self.col)
for i in range (self.row):
new_row = []
for j in range (self.col):
new_row.append(self.matrix[i][j] + other.matrix[i][j])
mat.matrix.append(new_row)
return mat
def __mul__ (self, other):
if self.col != other.row:
return None
mat = Matrix(self.row, other.col)
for i in range (self.row):
new_row = []
for j in range (other.col):
sum_mult = 0
for k in range (other.row):
sum_mult += (self.matrix[i][k]*other.matrix[k][j])
new_row.append(sum_mult)
mat.matrix.append(new_row)
return mat
def __str__(self):
s = ''
for i in range(self.row):
for j in range(self.col):
s = s + str(self.matrix[i][j]).rjust(4)
s = s + '\n'
return s
def read_matrix(in_file):
line = in_file.readline().rstrip('\n').split()
row = int(line[0])
col = int(line[1])
mat = Matrix(row, col)
for i in range (row):
line = in_file.readline().rstrip('\n').split()
for j in range (col):
line[j] = int(line[j])
mat.matrix.append(line)
line = in_file.readline()
return mat
def main():
in_file = open('./text_files/matrix.txt', 'r')
print('Test Matrix Addition')
matA = read_matrix(in_file)
print(matA)
matB = read_matrix(in_file)
print(matB)
matC = matA + matB
print(matC)
print('\nTest Matrix Multiplication')
matP = read_matrix(in_file)
print(matP)
matQ = read_matrix(in_file)
print(matQ)
matR = matP * matQ
print(matR)
in_file.close()
main()
| true |
23dde8a3866517f3d8d675b86bd61130ecadf893 | Python | mfpankau/python-euler | /projecteuler2.py | UTF-8 | 194 | 3.3125 | 3 | [] | no_license | fib = [1, 2]
i = 1
while fib[i] + fib[i - 1] < 4000000:
fib.append(fib[i] + fib[i - 1])
i = i + 1
total = 0
for val in fib:
if val % 2 == 0:
total = total + val
print(total)
| true |
3eff5781bf6f5dfd22b3122805f1c6583b29cb87 | Python | TaplierShiru/ComputerNetworkUniversityCourse | /1/data.py | UTF-8 | 2,095 | 2.75 | 3 | [] | no_license | x = 5
y = 8
z = 3
S = z / 5.0 + 0.5
M = 2 * x + y + z + 15
G = 2 * x + 4 * y - z + 10
print('S = ', S)
print('M = ', M)
print('Средне значение интенсиввности сообщ: ', G)
print('-------------------------------------------------------')
print('| ')
print('| Задание 1. Шина ')
print('| ')
B = 10
V = 2.3 * (10 ** 5)
n_p = 2
L_p = 14
L_n = 1600
L_c = 320
#1
t_cp = S / V
print(t_cp * (10 ** 6))
#2
t_pt = n_p * (L_p / (B * (10 ** 6)))
print(t_pt * (10 ** 6))
#3
t = t_pt + t_cp
print(t * (10 ** 6))
#4
t_N = L_n / (B * (10 ** 6))
print(t_N * (10 ** 6))
#5
t_C = L_c / (B * (10 ** 6))
print(t_C * (10 ** 6))
#6
t_cp_sum = t_N + t_C
print(t_cp_sum * (10 ** 6))
#7
v_cp = t_N / t_cp_sum
print(v_cp)
#8
gamma = M * G
print(gamma)
#9
R = gamma * t_cp_sum
print(R)
#10
alpha = t / t_cp_sum
print(alpha)
#11
t_N_by_t_cp = R * (1 + v_cp ** 2) * ( (1 + alpha * (1 + 2 * 2.7)) / (2 * (1 - R * (1 + alpha * (1 + 2 * 2.7))))) + 1 + alpha / 2.0
print(t_N_by_t_cp)
#12
t_nn = t_N_by_t_cp * t_cp_sum
print(t_nn * (10 ** 6))
#13
C = 1 / (1 + 6.44 * alpha)
print(C)
#14
g_max = C / t_cp_sum
print(g_max)
#15
t_n_min = (1 + alpha / 2.0) * t_cp_sum
print(t_n_min * (10 ** 6))
print('-------------------------------------------------------')
print('| ')
print('| Задание 2. Кольоцо ')
print('| ')
L_C = 1600
h = 22
d = 48
b = 2
#1
toe = S / (M * V)
print(toe * (10 ** 6))
#2
t_cp = L_C / (B * (10 ** 6))
print(t_cp * (10 ** 6))
#3
print(gamma)
#4
R = gamma * t_cp
print(R)
#5
L_k = M * (b + B * (10 ** 6) * toe)
print(L_k)
#6
N = int(L_k / (h + d))
print(N)
#7
g = L_k / N
print(g)
#8
C = d / g
print(C)
#9
t_n_by_toe = 1 / (C - R)
print(t_n_by_toe)
#10
t_n = t_n_by_toe * t_cp
print(t_n * (10 ** 6))
#11
t_n_min = t_cp / C
print(t_n_min * (10 ** 6))
| true |
a098f02c32df762a4c90637e6f481964bbd145c7 | Python | gemchen/python | /chinese.py | UTF-8 | 153 | 2.609375 | 3 | [] | no_license | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
import re
s="123中文xxx"
print s
s1 = unicode(s, "utf-8")
m =re.sub(ur"[\u4e00-\u9fa5]",'',s1)
print m
| true |
16b13ca5c9159d56b65373b98e8141551449acf3 | Python | chromium/chromium | /tools/site_compare/drivers/win32/keyboard.py | UTF-8 | 6,910 | 3.171875 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python
# Copyright 2011 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""SiteCompare module for simulating keyboard input.
This module contains functions that can be used to simulate a user
pressing keys on a keyboard. Support is provided for formatted strings
including special characters to represent modifier keys like CTRL and ALT
"""
import time # for sleep
import win32api # for keybd_event and VkKeyCode
import win32con # Windows constants
# TODO(jhaas): Ask the readability people if this would be acceptable:
#
# from win32con import VK_SHIFT, VK_CONTROL, VK_MENU, VK_LWIN, KEYEVENTF_KEYUP
#
# This is a violation of the style guide but having win32con. everywhere
# is just plain ugly, and win32con is a huge import for just a handful of
# constants
def PressKey(down, key):
"""Presses or unpresses a key.
Uses keybd_event to simulate either depressing or releasing
a key
Args:
down: Whether the key is to be pressed or released
key: Virtual key code of key to press or release
"""
# keybd_event injects key events at a very low level (it's the
# Windows API keyboard device drivers call) so this is a very
# reliable way of simulating user input
win32api.keybd_event(key, 0, (not down) * win32con.KEYEVENTF_KEYUP)
def TypeKey(key, keystroke_time=0):
"""Simulate a keypress of a virtual key.
Args:
key: which key to press
keystroke_time: length of time (in seconds) to "hold down" the key
Note that zero works just fine
Returns:
None
"""
# This just wraps a pair of PressKey calls with an intervening delay
PressKey(True, key)
time.sleep(keystroke_time)
PressKey(False, key)
def TypeString(string_to_type,
use_modifiers=False,
keystroke_time=0,
time_between_keystrokes=0):
"""Simulate typing a string on the keyboard.
Args:
string_to_type: the string to print
use_modifiers: specifies whether the following modifier characters
should be active:
{abc}: type characters with ALT held down
[abc]: type characters with CTRL held down
\ escapes {}[] and treats these values as literal
standard escape sequences are valid even if use_modifiers is false
\p is "pause" for one second, useful when driving menus
\1-\9 is F-key, \0 is F10
TODO(jhaas): support for explicit control of SHIFT, support for
nonprintable keys (F-keys, ESC, arrow keys, etc),
support for explicit control of left vs. right ALT or SHIFT,
support for Windows key
keystroke_time: length of time (in secondes) to "hold down" the key
time_between_keystrokes: length of time (seconds) to pause between keys
Returns:
None
"""
shift_held = win32api.GetAsyncKeyState(win32con.VK_SHIFT ) < 0
ctrl_held = win32api.GetAsyncKeyState(win32con.VK_CONTROL) < 0
alt_held = win32api.GetAsyncKeyState(win32con.VK_MENU ) < 0
next_escaped = False
escape_chars = {
'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v'}
for char in string_to_type:
vk = None
handled = False
# Check to see if this is the start or end of a modified block (that is,
# {abc} for ALT-modified keys or [abc] for CTRL-modified keys
if use_modifiers and not next_escaped:
handled = True
if char == "{" and not alt_held:
alt_held = True
PressKey(True, win32con.VK_MENU)
elif char == "}" and alt_held:
alt_held = False
PressKey(False, win32con.VK_MENU)
elif char == "[" and not ctrl_held:
ctrl_held = True
PressKey(True, win32con.VK_CONTROL)
elif char == "]" and ctrl_held:
ctrl_held = False
PressKey(False, win32con.VK_CONTROL)
else:
handled = False
# If this is an explicitly-escaped character, replace it with the
# appropriate code
if next_escaped and char in escape_chars: char = escape_chars[char]
# If this is \p, pause for one second.
if next_escaped and char == 'p':
time.sleep(1)
next_escaped = False
handled = True
# If this is \(d), press F key
if next_escaped and char.isdigit():
fkey = int(char)
if not fkey: fkey = 10
next_escaped = False
vk = win32con.VK_F1 + fkey - 1
# If this is the backslash, the next character is escaped
if not next_escaped and char == "\\":
next_escaped = True
handled = True
# If we make it here, it's not a special character, or it's an
# escaped special character which should be treated as a literal
if not handled:
next_escaped = False
if not vk: vk = win32api.VkKeyScan(char)
# VkKeyScan() returns the scan code in the low byte. The upper
# byte specifies modifiers necessary to produce the given character
# from the given scan code. The only one we're concerned with at the
# moment is Shift. Determine the shift state and compare it to the
# current state... if it differs, press or release the shift key.
new_shift_held = bool(vk & (1<<8))
if new_shift_held != shift_held:
PressKey(new_shift_held, win32con.VK_SHIFT)
shift_held = new_shift_held
# Type the key with the specified length, then wait the specified delay
TypeKey(vk & 0xFF, keystroke_time)
time.sleep(time_between_keystrokes)
# Release the modifier keys, if held
if shift_held: PressKey(False, win32con.VK_SHIFT)
if ctrl_held: PressKey(False, win32con.VK_CONTROL)
if alt_held: PressKey(False, win32con.VK_MENU)
def main():
# We're being invoked rather than imported. Let's do some tests
# Press command-R to bring up the Run dialog
PressKey(True, win32con.VK_LWIN)
TypeKey(ord('R'))
PressKey(False, win32con.VK_LWIN)
# Wait a sec to make sure it comes up
time.sleep(1)
# Invoke Notepad through the Run dialog
TypeString("wordpad\n")
# Wait another sec, then start typing
time.sleep(1)
TypeString("This is a test of SiteCompare's Keyboard.py module.\n\n")
TypeString("There should be a blank line above and below this one.\n\n")
TypeString("This line has control characters to make "
"[b]boldface text[b] and [i]italic text[i] and normal text.\n\n",
use_modifiers=True)
TypeString(r"This line should be typed with a visible delay between "
"characters. When it ends, there should be a 3-second pause, "
"then the menu will select File/Exit, then another 3-second "
"pause, then No to exit without saving. Ready?\p\p\p{f}x\p\p\pn",
use_modifiers=True,
keystroke_time=0.05,
time_between_keystrokes=0.05)
if __name__ == "__main__":
sys.exit(main())
| true |
ea9be5217946f4a024528240b8642695f94f99d8 | Python | DavidCarricondo/selenium-NLP | /src/scrapping.py | UTF-8 | 2,332 | 2.84375 | 3 | [] | no_license | #SELENIUM TUTORIAL:
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
import time
#https://sites.google.com/a/chromium.org/chromedriver/downloads
PATH = '/home/david/Chrome_web_driver/chromedriver'
'''
driver = webdriver.Chrome(PATH)
#Open web and get title:
driver.get('https://techwithtim.net')
print(driver.title)
##Navigate through buttom clicking:
link = driver.find_element_by_link_text("Python Programming")
link.click()
try:
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.LINK_TEXT, "Beginner Python Tutorials"))
)
element.click()
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, "sow-button-19310003"))
)
element.click()
except:
driver.quit()
driver.back()
driver.back()
driver.back()
##Search bars and retrieve results:
search = driver.find_element_by_name('s')
search.send_keys("test")
search.send_keys(Keys.RETURN)
#Wait until the key has been found:
try:
main = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, "main"))
)
articles = main.find_elements_by_tag_name("article")
for article in articles:
header = article.find_element_by_class_name("entry-summary")
print(header.text)
finally:
driver.quit()
time.sleep(5)
driver.quit()
'''
############## NEW SITE
driver = webdriver.Chrome(PATH)
driver.get("https://orteil.dashnet.org/cookieclicker/")
driver.implicitly_wait(5)
cookie = driver.find_element_by_id("bigCookie")
cookie_count = driver.find_element_by_id('cookies')
items = [driver.find_element_by_id("productPrice" + str(i)) for i in range(1, -1, -1)]
actions = ActionChains(driver)
actions.click(cookie)
for i in range(5000):
actions.perform()
count = int(cookie_count.text.split(" ")[0])
for item in items:
value = int(item.text)
if value <= count:
upgrade_actions = ActionChains(driver)
upgrade_actions.move_to_element(item)
upgrade_actions.click()
upgrade_actions.perform() | true |
52d56e20d1695c8e963f6353b647411f568d6277 | Python | clivejan/python_fundamental | /exercises/ex_8_2_favourite_book.py | UTF-8 | 108 | 3.203125 | 3 | [] | no_license | def favourite_book(title):
print(f"One of my favourite books is {title}.")
favourite_book("Found a Job.")
| true |
1132fe84fefc8b6ee3b544fe9be02ab6464a274a | Python | heeki/portfolio | /src/scriptlets/scriptlet_dynamodb.py | UTF-8 | 5,462 | 2.75 | 3 | [] | no_license | import boto3
import botocore
import pprint
from boto3.dynamodb.conditions import Key
from scriptlets.scriptlet_global import Global
pp = pprint.PrettyPrinter(indent=4)
class ScriptletDynamoDB:
def __init__(self, app_name, profile=""):
log_file = "{}.log".format(app_name)
log_dir = "/tmp"
self.log = Global.get_logger(app_name, log_file, log_dir)
self.profile = profile
if profile == "":
self.ddb_client = boto3.client('dynamodb')
self.ddb_resource = boto3.resource('dynamodb')
else:
self.ddb_client = boto3.Session(profile_name=self.profile).client('dynamodb')
self.ddb_resource = boto3.Session(profile_name=self.profile).resource('dynamodb')
def table_exists(self, table_name):
""" Check if a DynamoDB table exists
:param table_name: name of the DynamoDB table to check
:return: True|False
"""
try:
response = self.ddb_client.describe_table(TableName=table_name)
pp.pprint(response)
return True
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
self.log.info("table_exists(): {} not found".format(table_name))
else:
self.log.error("table_exists(): undefined error checking for {}".format(table_name))
return False
def create_table(self, table_name):
""" Create a table and return the Table object
:param table_name: name of the DynamoDB table to check
:return: Table object
"""
self.log.info("create_table(): {}".format(table_name))
if self.table_exists(table_name):
return self.ddb_resource.Table(table_name)
else:
table = self.ddb_resource.create_table(
TableName=table_name,
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
{
'AttributeName': 'timestamp',
'KeyType': 'RANGE'
}
],
AttributeDefinitions=[
{
'AttributeName': 'id',
'AttributeType': 'S'
},
{
'AttributeName': 'timestamp',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
)
table.meta.client.get_waiter('table_exists').wait(TableName=table_name)
self.log.info("create_table(): table created, {} items".format(table.item_count))
return table
def put_item(self, table_name, item):
""" Put an item into a DynamoDB table
:param table_name: table_name
:param item: item to be inserted
:return:
"""
self.log.info("put_item(): putting item {}".format(item))
response = self.ddb_client.put_item(
TableName=table_name,
Item=item
)
return response
def get_item(self, table_name, key):
""" Get an item from a DynamoDB table
:param table_name: DynamoDB table from which the item will retrieved
:param key: key of the item to be retrieved
:return:
"""
self.log.info("get_item(): getting item with key {}".format(key))
response = self.ddb_client.get_item(
TableName=table_name,
Key=key
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200 and 'Item' in response.keys():
self.log.info("get_item(): retrieved item {}".format(response['Item']))
else:
self.log.info("get_item(): no items found with that key")
return response
def query(self, table_name, key, value):
""" Query for a list of items from a DynamoDB table
:param table_name: DynamoDB table from which the items will retrieved
:param key: parameters for the query
:param value: parameters for the query
:return:
"""
self.log.info("query(): querying with key={}, value={}".format(key, value))
fexp = Key(key).eq(value)
table = self.ddb_resource.Table(table_name)
response = table.query(
KeyConditionExpression=fexp
)
# TODO: Need to figure out how to do client-based query()
# response = self.ddb_client.query(
# TableName=table_name,
# KeyConditionExpression="user_id = :user",
# ExpressionAttributeValues={
# ':user': {
# 'S': key
# }
# }
# )
return response
def update(self, table_name, key, value):
self.log.info("update(): updating {} with {}".format(key, value))
response = self.ddb_client.update_item(
TableName=table_name,
Key=key,
ReturnValues="UPDATED_NEW",
UpdateExpression="SET content = :content",
ExpressionAttributeValues={
':content': {
'S': value
}
}
)
return response
| true |
f7a68f07196f52701d5c3aa1e27767bb9ed0c0be | Python | wangyongfei0306/Data-structure-and-algorithm | /table/table2.py | UTF-8 | 2,802 | 3.96875 | 4 | [] | no_license | class Node:
def __init__(self, data=None):
self.data = data
self.next = None
class MyLinkList:
def __init__(self):
self.head = Node()
self.size = 0
self.rear = 0
""" 尾插法 """
def create(self, A):
self.head = Node(-1)
self.rear = self.head
for i in range(len(A)):
self.rear.next = Node(A[i])
self.rear = self.rear.next
self.size += 1
self.rear.next = None
return self.head
""" 头插法 """
def create2(self, A):
self.head = Node(-1)
for i in range(len(A)):
node = Node(A[i])
node.next = self.head.next
self.head.next = node
self.size += 1
return self.head
""" 寻找第 i 个结点 """
@staticmethod
def get_node(node, i):
"""
:param node: 链表的头结点
:param i: 链表中的第几个结点
:return: 返回第i个结点的地址
"""
count, p = 0, node
if i < 0:
return None
while (p is not None) and count < i:
p = p.next
count += 1
return p
""" 倒数第k个结点 """
@staticmethod
def get_node2(node, k):
q = p = node.next
count, n = 1, 1
while p.next:
p = p.next
n += 1
if count < k:
count += 1
else:
q = q.next
if k <= 0 or k > n:
return 0
else:
print('倒数第', k, '个结点值是:', q.data)
return 1
def Max_Node(self, Head):
if Head.next is None:
return None
p = Head.next # 首元结点
max_node = -1
while p:
max_node = max(max_node, p.data)
p = p.next
return max_node
def count_link(self, Head):
"""
:param Head: 头结点
:return: 返回链表结点数
"""
p = Head.next
count = 0
while p is not None:
count += 1
p = p.next
return count
""" 指定值出现的次数 """
def count_node(self, Head, x):
p = Head.next
count = 0
if not p:
return -1
while p:
if p.data == x:
count += 1
p = p.next
return count
if __name__ == '__main__':
A = [1, 3, 5, 7, 9]
link = MyLinkList()
# print(link.create(A))
# print('--------')
# print(link.size)
# print('--------')
# print('头结点:', link.head.data, link.head.next)
# print('首元结点:', link.head.next.data, link.head.next.next)
# print('第二结点:', link.head.next.next.data, link.head.next.next.next)
# print('第三结点:', link.head.next.next.next.data)
# print('第四结点:', link.head.next.next.next.next.data)
# print('第五结点:', link.head.next.next.next.next.next.data)
# print(link.create2(A), link.size)
# print(link.head.data, link.head.next.data, link.head.next.next.data)
# head = link.create(A)
# print(link.get_node(head, 3).data)
# head = link.create(A)
# link.get_node2(head, 2)
# head = link.create(A)
# print(link.Max_Node(head))
# head = link.create(A)
# print(link.count_link(head))
# head = link.create(A)
# print(link.count_node(head, 5))
| true |
ead501b60cabb4280cc8056d617c055f68364457 | Python | lemon234071/TransformerBaselines | /tasks/catslu/dual.py | UTF-8 | 1,832 | 2.703125 | 3 | [
"Apache-2.0"
] | permissive | import os
import json
import logging
import collections
logger = logging.getLogger(__file__)
def get_datasets(dir_path):
datasets = {}
for file in os.listdir(dir_path):
name = None
for x in ["train", "valid", "dev", "test"]:
if x in file:
name = x
if not name:
continue
path = os.path.join(dir_path, file)
if "json" in path:
with open(path, encoding='UTF-8') as f:
datasets[name] = json.load(f)
else:
with open(path, encoding='UTF-8', errors='ignore') as f:
datasets[name] = [i.strip() for i in f.readlines() if len(i) > 0]
return datasets
def build_dataset(name, dataset, tokenizer):
logger.info("Tokenize and encode the dataset {} ".format(name))
instances = collections.defaultdict(list)
for line in dataset:
x = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(line[0]))
y = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(line[1]))
input_idx = tokenizer.convert_tokens_to_ids(tokenizer.tokenize("query: ")) + x
label_idx = y + [tokenizer.eos_token_id]
input_mask = [1 for _ in range(len(input_idx))]
instances["pad_input"].append(input_idx)
instances["pad_input_mask"].append(input_mask)
instances["pad_label"].append(label_idx)
reverse_input_idx = tokenizer.convert_tokens_to_ids(tokenizer.tokenize("semantic: ")) + y
reverse_label_idx = x + [tokenizer.eos_token_id]
reverse_input_mask = [1 for _ in range(len(reverse_input_idx))]
instances["pad_reverse_input"].append(reverse_input_idx)
instances["pad_reverse_input_mask"].append(reverse_input_mask)
instances["pad_reverse_label"].append(reverse_label_idx)
return instances
| true |
aa72e06607cab1ba131a649cbaa50dac2ac42b61 | Python | dnjohnstone/orix | /orix/quaternion/quaternion.py | UTF-8 | 7,660 | 3.109375 | 3 | [
"GPL-3.0-only"
] | permissive | # -*- coding: utf-8 -*-
# Copyright 2018-2020 the orix developers
#
# This file is part of orix.
#
# orix is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# orix is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with orix. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from orix.base import check, Object3d
from orix.scalar import Scalar
from orix.vector import Vector3d
def check_quaternion(obj):
return check(obj, Quaternion)
class Quaternion(Object3d):
"""Basic quaternion object.
Quaternions support the following mathematical operations:
- Unary negation.
- Inversion.
- Multiplication with other quaternions and vectors.
Attributes
----------
data : numpy.ndarray
The numpy array containing the quaternion data.
a, b, c, d : Scalar
The individual elements of each vector.
conj : Quaternion
The conjugate of this quaternion: :math:`q^* = a - bi - cj - dk`
"""
dim = 4
@property
def a(self):
return Scalar(self.data[..., 0])
@a.setter
def a(self, value):
self.data[..., 0] = value
@property
def b(self):
return Scalar(self.data[..., 1])
@b.setter
def b(self, value):
self.data[..., 1] = value
@property
def c(self):
return Scalar(self.data[..., 2])
@c.setter
def c(self, value):
self.data[..., 2] = value
@property
def d(self):
return Scalar(self.data[..., 3])
@d.setter
def d(self, value):
self.data[..., 3] = value
@property
def conj(self):
a = self.a.data
b, c, d = -self.b.data, -self.c.data, -self.d.data
q = np.stack((a, b, c, d), axis=-1)
return Quaternion(q)
def __neg__(self):
return self.__class__(-self.data)
def __invert__(self):
return self.__class__(self.conj.data / (self.norm.data ** 2)[..., np.newaxis])
def __mul__(self, other):
if isinstance(other, Quaternion):
sa, oa = self.a.data, other.a.data
sb, ob = self.b.data, other.b.data
sc, oc = self.c.data, other.c.data
sd, od = self.d.data, other.d.data
a = sa * oa - sb * ob - sc * oc - sd * od
b = sb * oa + sa * ob - sd * oc + sc * od
c = sc * oa + sd * ob + sa * oc - sb * od
d = sd * oa - sc * ob + sb * oc + sa * od
q = np.stack((a, b, c, d), axis=-1)
return other.__class__(q)
elif isinstance(other, Vector3d):
a, b, c, d = self.a.data, self.b.data, self.c.data, self.d.data
x, y, z = other.x.data, other.y.data, other.z.data
x_new = (a ** 2 + b ** 2 - c ** 2 - d ** 2) * x + 2 * (
(a * c + b * d) * z + (b * c - a * d) * y
)
y_new = (a ** 2 - b ** 2 + c ** 2 - d ** 2) * y + 2 * (
(a * d + b * c) * x + (c * d - a * b) * z
)
z_new = (a ** 2 - b ** 2 - c ** 2 + d ** 2) * z + 2 * (
(a * b + c * d) * y + (b * d - a * c) * x
)
return other.__class__(np.stack((x_new, y_new, z_new), axis=-1))
return NotImplemented
def outer(self, other):
"""Compute the outer product of this quaternion and the other object."""
def e(x, y):
return np.multiply.outer(x, y)
if isinstance(other, Quaternion):
q = np.zeros(self.shape + other.shape + (4,), dtype=float)
sa, oa = self.data[..., 0], other.data[..., 0]
sb, ob = self.data[..., 1], other.data[..., 1]
sc, oc = self.data[..., 2], other.data[..., 2]
sd, od = self.data[..., 3], other.data[..., 3]
q[..., 0] = e(sa, oa) - e(sb, ob) - e(sc, oc) - e(sd, od)
q[..., 1] = e(sb, oa) + e(sa, ob) - e(sd, oc) + e(sc, od)
q[..., 2] = e(sc, oa) + e(sd, ob) + e(sa, oc) - e(sb, od)
q[..., 3] = e(sd, oa) - e(sc, ob) + e(sb, oc) + e(sa, od)
return other.__class__(q)
elif isinstance(other, Vector3d):
a, b, c, d = self.a.data, self.b.data, self.c.data, self.d.data
x, y, z = other.x.data, other.y.data, other.z.data
x_new = e(a ** 2 + b ** 2 - c ** 2 - d ** 2, x) + 2 * (
e(a * c + b * d, z) + e(b * c - a * d, y)
)
y_new = e(a ** 2 - b ** 2 + c ** 2 - d ** 2, y) + 2 * (
e(a * d + b * c, x) + e(c * d - a * b, z)
)
z_new = e(a ** 2 - b ** 2 - c ** 2 + d ** 2, z) + 2 * (
e(a * b + c * d, y) + e(b * d - a * c, x)
)
v = np.stack((x_new, y_new, z_new), axis=-1)
return other.__class__(v)
raise NotImplementedError(
"This operation is currently not avaliable in orix, please use outer with other of type: Quaternion or Vector3d"
)
def dot(self, other):
"""Scalar : the dot product of this quaternion and the other."""
return Scalar(np.sum(self.data * other.data, axis=-1))
def dot_outer(self, other):
"""Scalar : the outer dot product of this quaternion and the other."""
dots = np.tensordot(self.data, other.data, axes=(-1, -1))
return Scalar(dots)
@classmethod
def triple_cross(cls, q1, q2, q3):
"""Pointwise cross product of three quaternions.
Parameters
----------
q1, q2, q3 : Quaternion
Three quaternions for which to find the "triple cross".
Returns
-------
q : Quaternion
"""
q1a, q1b, q1c, q1d = q1.a.data, q1.b.data, q1.c.data, q1.d.data
q2a, q2b, q2c, q2d = q2.a.data, q2.b.data, q2.c.data, q2.d.data
q3a, q3b, q3c, q3d = q3.a.data, q3.b.data, q3.c.data, q3.d.data
a = (
+q1b * q2c * q3d
- q1b * q3c * q2d
- q2b * q1c * q3d
+ q2b * q3c * q1d
+ q3b * q1c * q2d
- q3b * q2c * q1d
)
b = (
+q1a * q3c * q2d
- q1a * q2c * q3d
+ q2a * q1c * q3d
- q2a * q3c * q1d
- q3a * q1c * q2d
+ q3a * q2c * q1d
)
c = (
+q1a * q2b * q3d
- q1a * q3b * q2d
- q2a * q1b * q3d
+ q2a * q3b * q1d
+ q3a * q1b * q2d
- q3a * q2b * q1d
)
d = (
+q1a * q3b * q2c
- q1a * q2b * q3c
+ q2a * q1b * q3c
- q2a * q3b * q1c
- q3a * q1b * q2c
+ q3a * q2b * q1c
)
q = cls(np.vstack((a, b, c, d)).T)
return q
@property
def antipodal(self):
return self.__class__(np.stack([self.data, -self.data], axis=0))
def mean(self):
"""
Calculates the mean quarternion with unitary weights
Notes
-----
The method used here corresponds to the Equation (13) of http://www.acsu.buffalo.edu/~johnc/ave_quat07.pdf
"""
q = self.flatten().data.T
qq = q.dot(q.T)
w, v = np.linalg.eig(qq)
w_max = np.argmax(w)
return self.__class__(v[:, w_max])
| true |
0a432eabe6c6970a8338846ce8f1806a35c14bb4 | Python | neetikakhurana/Intent-Mining-on-Amazon-Reviews | /product.py | UTF-8 | 1,908 | 2.953125 | 3 | [] | no_license | import json
import readdata
import util
product_file = 'meta_Cell_Phones_and_Accessories.json.gz'
product_feature_file = 'product_features.txt'
# extracts product features such as id, title, url, price and sales rank.
def extractProductFeatures(product):
# if the JSON data is valid there has to be a product_id
product_id = product['asin']
# some of the JSON objects do not have a title attribute
# set them to "NO TITLE"
try:
product_title = product['title']
except KeyError:
product_title = "NO TITLE"
# some of the JSON objects do not have an image url attribute
# so no image can be displayed for these products
try:
product_url = product['imUrl']
except KeyError:
product_url = "IMAGE UNAVAILABLE"
# some of the JSON objects do not have a price attribute
# assume the price is 0.0 in that case
try:
product_price = product['price']
except KeyError:
product_price = 0.0;
# some of the JSON objects are listed with the wrong category.
# in these cases the salesRank category does not match the product
# category. In this situation it might be better to abandon this
# object because it does not belong in this category.
try:
if (product['salesRank']['Cell Phones & Accessories']) :
product_rank = str(product['salesRank']['Cell Phones & Accessories'])
result = product_id + ', ' + product_title + ', ' + \
str(product_price) + ', ' + product_url + ', ' + product_rank + '\n';
return result
except KeyError:
return None
pf = open(product_feature_file, 'wt')
for line in readdata.parse(product_file):
line = util.dataCleanup(line)
product = readdata.dataInJson(line)
if product is not None:
product_features = extractProductFeatures(product)
if product_features is not None:
pf.write(product_features)
| true |
c7aca1a9ef9f31c22e1d77addeb50d26dc3d9184 | Python | mispower/weather-spider | /tools/db_connector.py | UTF-8 | 2,246 | 2.53125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from pymongo import MongoClient, IndexModel, HASHED, ASCENDING, GEOSPHERE
from pymongo.database import Database
from pymongo.collection import Collection
from abc import abstractmethod
HISTORY_COLLECTION_NAME = "weather_history"
BASIC_COLLECTION_NAME = "weather_basic"
class AutoBasicCollection:
def __init__(self):
self.__conn = MongoClient("10.10.11.75", 27017)
self.__authorized_database = self.__conn.get_database("basicdata")
self.__authorized_database.authenticate("basicdata_write", "basicdata_write")
self.__coll = self._get_create_collection(self.__authorized_database)
@abstractmethod
def _get_create_collection(self, authorized_database: Database)->Collection: pass
@property
def collection(self) -> Collection:
return self.__coll
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.__conn.close()
class WeatherBasicCollection(AutoBasicCollection):
def __init__(self):
super().__init__()
def _get_create_collection(self, authorized_database: Database):
if BASIC_COLLECTION_NAME not in authorized_database.list_collection_names():
coll_create = authorized_database.get_collection(BASIC_COLLECTION_NAME)
coll_create.create_indexes([
IndexModel([("weather_code", ASCENDING)], unique=True),
IndexModel([("gps_info", GEOSPHERE)])
])
return authorized_database.get_collection(BASIC_COLLECTION_NAME)
class WeatherHistoryCollection(AutoBasicCollection):
def __init__(self):
super().__init__()
def _get_create_collection(self, authorized_database: Database):
if HISTORY_COLLECTION_NAME not in authorized_database.list_collection_names():
coll_create = authorized_database.get_collection(HISTORY_COLLECTION_NAME)
coll_create.create_indexes([
IndexModel([("weather_code", HASHED)]),
IndexModel([
("datetime", ASCENDING),
("weather_code", ASCENDING)
], unique=True),
])
return authorized_database.get_collection(HISTORY_COLLECTION_NAME)
| true |
34ddb228caa5a77af67b9da50778c573129da3e5 | Python | jmmshn/api | /mp_api/client/routes/alloys.py | UTF-8 | 2,428 | 2.578125 | 3 | [
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-hdf5",
"BSD-2-Clause"
] | permissive | from typing import List, Optional, Union
from collections import defaultdict
from mp_api.client.core import BaseRester
from mp_api.client.core.utils import validate_ids
from emmet.core.alloys import AlloyPairDoc
class AlloysRester(BaseRester[AlloyPairDoc]):
suffix = "alloys"
document_model = AlloyPairDoc # type: ignore
primary_key = "material_id"
def search(
self,
material_ids: Optional[Union[str, List[str]]] = None,
formulae: Optional[List[str]] = None,
sort_fields: Optional[List[str]] = None,
num_chunks: Optional[int] = None,
chunk_size: int = 1000,
all_fields: bool = True,
fields: Optional[List[str]] = None,
) -> List[AlloyPairDoc]:
"""
Query for hypothetical alloys formed between two commensurate
crystal structures, following the methodology in
https://doi.org/10.48550/arXiv.2206.10715
Please cite the relevant publication if data provided by this
endpoint is useful.
Arguments:
material_ids (str, List[str]): Search for alloys containing the specified Material IDs
formulae (List[str]): Search for alloys containing the specified formulae
sort_fields (List[str]): Fields used to sort results. Prefix with '-' to sort in descending order.
num_chunks (int): Maximum number of chunks of data to yield. None will yield all possible.
chunk_size (int): Number of data entries per chunk.
all_fields (bool): Whether to return all fields in the document. Defaults to True.
fields (List[str]): List of fields in AlloyPairDoc to return data for.
Returns:
([AlloyPairDoc]) List of alloy pair documents.
"""
query_params = defaultdict(dict) # type: dict
query_params = {
entry: query_params[entry]
for entry in query_params
if query_params[entry] is not None
}
if material_ids:
if isinstance(material_ids, str):
material_ids = [material_ids]
query_params.update({"material_ids": ",".join(validate_ids(material_ids))})
return super()._search(
formulae=formulae,
num_chunks=num_chunks,
chunk_size=chunk_size,
all_fields=all_fields,
fields=fields,
**query_params
)
| true |
cff35eac511c23fe31cafdf8d9033cf821368675 | Python | GuiRodriguero/pythonFIAP | /1 Semestre/Aula1 - exercicios/exer2.py | UTF-8 | 261 | 3.921875 | 4 | [] | no_license | print ("Digite 3 Número e descubra o quadrado da soma deles!")
n1 = int(input("Digite o primeiro número: "))
n2 = int(input("Digite o segundo número: "))
n3 = int(input("Digite o terceiro número: "))
res = (n1+n2+n3) * (n1+n2+3)
print("Resultado: ", res) | true |
a20eeb89c23dcaf2930342a46a04ceeb93107d8b | Python | adamoses/STN | /stn_parser.py | UTF-8 | 3,455 | 3.109375 | 3 | [] | no_license | import numpy as np
import sys
from STN import *
from algo import *
from STNU import *
####################################################################################
# - string_to_stn(input) :
#
# input - an standardized text input of an STN of the form:
# # KIND OF NETWORK
# STN
# # Num Time-Points
# (number of time points, eg. 5)
# # Num Ordinary Edges
# (number of ordinary edges, eg. 8)
# # Time-Point Names
# (a single line of the names of the time-points, eg. A0 C0 A1 C1 X)
# # Ordinary Edges
# (one or more lines representing the edges. These edges are represented as
# "from_time_point value_of_edge to_time_point", eg. X 12 C0)
#
# Returns: an stn object with all the values from the text file
####################################################################################
def stringToSTN(input):
stn = open(input, "r")
stn_string = stn.read()
stn.close()
lines_list = stn_string.splitlines()
arr = np.array(lines_list)
counter = 0
idx = []
for x in arr:
if x.startswith('#'):
idx.append(counter)
counter+=1
arr = np.delete(arr,idx,axis = 0)
num_tp = int(arr[1])
num_edges = int(arr[2])
strings = arr[3]
edges = arr[4:]
return STN(num_tp, num_edges, strings, edges)
def string_to_stnu(input):
stn = open(input, "r")
stn_string = stn.read()
stn.close()
lines_list = stn_string.splitlines()
arr = np.array(lines_list)
counter = 0
idx = []
for x in arr:
if x.startswith('#'):
idx.append(counter)
counter+=1
arr = np.delete(arr,idx,axis = 0)
num_tp = int(arr[1])
num_edges = int(arr[2])
num_cont_edges = int(arr[3])
strings = arr[4]
edges = []
contingent_links = []
for line in arr[5:]:
if len(line.split()) == 4:
contingent_links.append(line)
else:
edges.append(line)
return STNU(num_tp, num_edges, num_cont_edges, strings, edges, contingent_links)
def stnu_to_string(input):
string = ''
edges = input.get_ordered_edges()
edges = [' '.join(edge) for edge in edges]
cont_edges = input.get_cont_links()
cont_edges = [' '.join(edge) for edge in cont_edges]
names = ' '
names = names.join(input.get_names())
string += "# KIND OF NETWORK\nSTN\n# Num Time-Points\n"+str(input.get_num_tp())
string += "\n# Num Ordinary Edges\n"+str(input.get_num_edges())+"\n"
string += "# Num Contingent Links\n"+str(input.get_num_cont_links())+'\n'
string += "# Time-Point Names\n"+ names +"\n# Ordinary Edges\n"
string += '\n'.join(edges)+'\n'+'# Contingent Links\n'+"\n".join(cont_edges)
return string
def stn_to_string(input):
string = ''
edges = input.get_ordered_edges()
edges = [' '.join(edge) for edge in edges]
names = ' '
names = names.join(input.get_names())
string += "# KIND OF NETWORK\nSTNU\n# Num Time-Points\n"+str(input.get_num_tp())
string += "\n# Num Ordinary Edges\n"+str(input.get_num_edges())+"\n"
string += "# Time-Point Names\n"+ names +"\n# Ordinary Edges\n"
string += '\n'.join(edges)
return string | true |
788ab75320bbef850c067887f4ebb939f4b9e4b7 | Python | osmanemresener/Python-Studies | /If Elif Else/problem1.py | UTF-8 | 471 | 3.8125 | 4 | [] | no_license | print("""
****************************************
Boy ve Kilo Endeksi Hesaplama Programı
****************************************
""")
boy= float(input("Boyunuzu Giriniz(metre):"))
kilo= float (input("Kilonuzu giriniz(kg):"))
endeks= kilo/(boy*boy)
if endeks <= 18.5:
print("Zayıf")
elif endeks <= 25:
print("Normal")
elif endeks <= 30:
print("Fazla kilolu")
else:
print("Obez")
print("Vücut kitle endeksiniz {}'dir.".format(endeks))
| true |
6b49d8e3a9e85d831e032bfcef909c5bf8e259b4 | Python | xinqiaozhang/python-circuit-testability-measures | /src/calculate_observability.py | UTF-8 | 2,191 | 2.9375 | 3 | [
"MIT"
] | permissive | def calculateObservability(levels, circuitDescription, testability):
for level in reversed(levels):
for lineInd in level:
lineInfo = level[lineInd]
observ = calculateLineObservability(lineInfo, lineInd, circuitDescription, testability)
testability[lineInd]["obs"] = observ
def calculateLineObservability(lineInfo, lineInd, circuitDescription, testability):
gate = lineInfo["entering"]
if gate == None:
obs = 0
return obs
else:
gateType = circuitDescription[2][gate]["type"]
outputLine = circuitDescription[2][gate]["outputs"]
if gateType == "and" or gateType == "nand":
obs = testability[outputLine[0]]["obs"]
for line in circuitDescription[2][gate]["inputs"]:
if lineInd == line:
pass
else:
obs = obs + testability[line]["control1"]
obs = obs + 1
return obs
elif gateType == "or" or gateType == "nor":
obs = testability[outputLine[0]]["obs"]
for line in circuitDescription[2][gate]["inputs"]:
if lineInd == line:
pass
else:
obs = obs + testability[line]["control0"]
obs = obs + 1
return obs
elif gateType == "xor":
obs = testability[outputLine[0]]["obs"]
for line in circuitDescription[2][gate]["inputs"]:
if lineInd == line:
pass
else:
obs += min(testability[line]["control0"], testability[line]["control1"])
obs = obs + 1
return obs
elif gateType == "not":
obs = testability[outputLine[0]]["obs"] + 1
return obs
elif gateType == "fanout":
obs = testability[outputLine[0]]["obs"]
for line in circuitDescription[2][gate]["outputs"]:
obs = min(obs, testability[line]["obs"])
return obs
elif gateType == "buf":
obs = testability[outputLine[0]]["obs"]
return obs
else:
print "error no gateType:" + gateType
| true |
a2b76c33ea0b87a66063c6e2b2f8d5bd8f4c6710 | Python | DaHuO/Supergraph | /codes/CodeJamCrawler/16_2_1/theed/problem_a.py | UTF-8 | 1,483 | 3.484375 | 3 | [] | no_license | import sys
from collections import OrderedDict
def sheep(number):
unique = OrderedDict()
unique['Z'] ='0'
unique['W'] ='2'
unique['G'] ='8'
unique['X'] ='6'
unique['H'] ='3'
unique['U'] ='4'
unique['F'] ='5'
unique['V'] ='7'
unique['O'] ='1'
unique['N'] ='9'
nums = {
'0': "ZERO",
'1': "ONE",
'2': "TWO",
'3': "THREE",
'4': "FOUR",
'5': "FIVE",
'6': "SIX",
'7': "SEVEN",
'8': "EIGHT",
'9': "NINE"
}
result = []
for i in unique:
n = unique[i]
while i in number:
for c in nums[n]:
number = number.replace(c, '', 1)
result.append(n)
result.sort()
return "".join(result)
def main():
name = sys.argv[1]
with open(name, 'r') as f:
content = f.read()
lines = content.splitlines()
num = lines[0]
result = ''
for n in range(int(num)):
case = lines[1 + n]
result += 'Case #{}: {}\n'.format(1 + n, sheep(case))
with open(name.replace('in', 'sol'), 'w') as f:
f.write(result)
def test():
cases = {
'OZONETOWER': '012',
'WEIGHFOXTOURIST': '2468',
'OURNEONFOE': '114',
'ETHER': '3',
}
for inp, expected in cases.items():
actual = sheep(inp)
assert actual == expected, 'Got {}, expected {} on {}'.format(actual, expected, inp)
if __name__ == '__main__':
main()
| true |
f8e3358b75b6c28a361adc29118c5914e995a0e3 | Python | dnguyen0304/roomlistwatcher | /clare/clare/models/player_record.py | UTF-8 | 801 | 3.09375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import re
from . import IRecord
class PlayerRecord(IRecord):
def __init__(self, position, name):
self.record_id = 0
self.position = int(position)
self.name = name
@classmethod
def from_message(cls, message):
pattern = '\|player\|p(?P<position>\d)\|(?P<name>[^\n\r|]+)\|.+'
match = re.match(pattern=pattern, string=message)
if match:
record = cls(**match.groupdict())
else:
raise ValueError
return record
def __repr__(self):
repr_ = '{}(record_id={}, position={}, name="{}")'
return repr_.format(self.__class__.__name__,
self.record_id,
self.position,
self.name)
| true |
4d7a404df810008e8a452d875ea956c55fcdbc27 | Python | in-tandem/algorithm | /hacker_rank/largest_rectangle.py | UTF-8 | 1,122 | 3.484375 | 3 | [] | no_license | ##https://www.hackerrank.com/challenges/largest-rectangle/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=stacks-queues&h_r=next-challenge&h_v=zen&h_r=next-challenge&h_v=zen
import itertools
sequence = [1,2,3,4,5]
# sequence = list(range(1000))
# def find_area()
def produce_combinations_preserving_order(sequence):
response = []
largest_rectangle = []
memo = {}
for i in range(len(sequence)):
j = len(sequence)
while j > i:
value = sequence[i:j]
response.append(value)
j -=1
largest_rectangle.append(len(value)*min(value))
print(response)
# print(max(largest_rectangle))
def produce_combinations_faster(sequence):
largest_rectangle = []
memo = {}
for i,j in itertools.combinations(range(len(sequence)+1),2):
value = sequence[i:j]
if not memo.get(value):
memo[value] = 1
largest_rectangle.append(len(value)*min(value))
print(max(largest_rectangle))
produce_combinations_preserving_order(sequence) | true |
bd96b6108bb76445ceb52d06a26f31f1fe026fed | Python | webclinic017/fa-absa-py3 | /Python modules/repair_dma_trades.py | UTF-8 | 6,352 | 2.53125 | 3 | [] | no_license | import acm
from at_ael_variables import AelVariableHandler
ALLOCTEXT = "Allocation Process"
class Instr:
def __init__(self, name, trade):
self.name = name
self.trades = [trade]
def add_trade(self, trade):
self.trades.append(trade)
def get_positive_trades(self):
list_pos = []
sum = 0
for t in self.trades:
if t.Quantity() > 0:
list_pos.append(t)
sum += t.Quantity()
return (list_pos, sum)
def get_negative_trades(self):
list_neg = []
sum = 0
for t in self.trades:
if t.Quantity() < 0:
list_neg.append(t)
sum += t.Quantity()
return (list_neg, sum)
def get_dmas(date, status, portfolio_list):
query = acm.CreateFASQLQuery('FTrade', 'AND')
query.AddAttrNode('Status', 'EQUAL', acm.EnumFromString('TradeStatus', status))
query.AddAttrNode('TradeTime', 'GREATER_EQUAL', date)
query.AddAttrNode('TradeTime', 'LESS_EQUAL', date)
query.AddAttrNode('CreateUser.Name', 'EQUAL', 'AMBA')
orNode = query.AddOpNode('OR')
for portf in portfolio_list:
orNode.AddAttrNode('Portfolio.Name', 'EQUAL', portf.Name())
trades = query.Select()
return trades
def get_allocs(date, portfolio_list):
query = acm.CreateFASQLQuery('FTrade', 'AND')
query.AddAttrNode('Status', 'EQUAL', acm.EnumFromString('TradeStatus', 'VOID'))
query.AddAttrNode('TradeTime', 'GREATER_EQUAL', date)
query.AddAttrNode('TradeTime', 'LESS_EQUAL', date)
query.AddAttrNode('CreateUser.Name', 'NOT_EQUAL', 'AMBA')
query.AddAttrNode('Text1', 'EQUAL', 'Allocation Process')
orNode = query.AddOpNode('OR')
for portf in portfolio_list:
orNode.AddAttrNode('Portfolio.Name', 'EQUAL', portf.Name())
trades = query.Select()
return trades
def create_instrument_objs(trades):
dic_trades = {}
for t in trades:
insname = t.Instrument().Name()
ins = dic_trades.get(insname)
if not ins:
dic_trades[insname] = Instr(insname, t)
else:
insname1 = ins.name
ins.add_trade(t)
return dic_trades
def apply_changes(sum_dma, sum_alloc, trades_allocs, trades_dmas):
allocnbr = trades_allocs[0].Oid()
if sum_dma == sum_alloc:
for tr in trades_dmas:
if tr.Status() == 'Void':
# Change trade status so that it can be modified
tr.Status('Simulated')
else:
# Modify trade and change the status back
tr.Text1(ALLOCTEXT)
tr.Contract(allocnbr)
tr.Status('Void')
tr.Commit()
print("Trades {0} have contract to {1}".format([trd.Oid() for trd in trades_dmas], allocnbr))
else:
raise RuntimeError("Overall quantities do not match for dma trades {0} and "\
"allocation trade {1}".format([trd.Oid() for trd in trades_dmas], allocnbr))
def modify_trades(date, current_dma_status, portfolio_list, dry_run=True):
dma_trades = get_dmas(date, current_dma_status, portfolio_list) # 1172
allocs_trades = get_allocs(date, portfolio_list) # 20
dic_dmas = create_instrument_objs(dma_trades)
dic_allocs = create_instrument_objs(allocs_trades)
acm.BeginTransaction()
print('Starting amending {0} trades...'.format(len(dma_trades)))
try:
for t in dic_dmas.values():
pos_plus_trades, sum_plus = t.get_positive_trades()
pos_minus_trades, sum_minus = t.get_negative_trades()
alloc_ins = dic_allocs.get(t.name)
if not alloc_ins:
raise RuntimeError('Nonexistent allocation trade for instrument "%s"' %t.name)
pos_plus_allocs, sum_plus_alloc = alloc_ins.get_positive_trades()
pos_minus_allocs, sum_minus_alloc = alloc_ins.get_negative_trades()
if len(pos_plus_allocs) > 1 or len(pos_minus_allocs) > 1:
raise RuntimeError('More than 1 allocation trade found for instrument "{0}" : {1}'.format(
t.name,
[trd.Oid() for trd in pos_plus_allocs + pos_minus_allocs]))
if sum_plus > 0:
apply_changes(sum_plus, sum_plus_alloc, pos_plus_allocs, pos_plus_trades)
if sum_minus < 0:
apply_changes(sum_minus, sum_minus_alloc, pos_minus_allocs, pos_minus_trades)
if not dry_run:
acm.CommitTransaction()
else:
acm.AbortTransaction()
except Exception as exc:
acm.AbortTransaction()
print("ERROR: {0}".format(str(exc)))
print("No trade was changed. Please, contact Ondrej Bahounek with log details.")
ael_variables = AelVariableHandler()
ael_variables.add('start_date',
label='Start date',
default='2014-11-14')
ael_variables.add('end_date',
label='End date',
default='2014-11-17')
ael_variables.add('allocation_portfolio',
label='Allocation portfolio',
cls=acm.FPhysicalPortfolio,
multiple=True)
ael_variables.add_bool('dry_run',
label='Dry run',
default=True)
def ael_main(config):
print('Dry run: {0}'.format(config['dry_run']))
calendar = acm.FCalendar['ZAR Johannesburg']
date = config['start_date']
if calendar.IsNonBankingDay(None, None, date):
date = calendar.AdjustBankingDays(date, 1)
end_date = config['end_date']
while date <= end_date:
# Change status of trades we need to modify (current status is 'Void')
modify_trades(date,
'Void',
config['allocation_portfolio'],
config['dry_run'])
# Connect DMA trades to aggregated trades (current status is 'Simulated')
modify_trades(date,
'Simulated',
config['allocation_portfolio'],
config['dry_run'])
date = calendar.AdjustBankingDays(date, 1)
| true |
11dc8e7b1d3a436bdb29b884bacd9409eb8a7aa7 | Python | nehagarg/ada_teleoperation | /src/ada_teleoperation/RobotState.py | UTF-8 | 2,960 | 2.640625 | 3 | [] | no_license | #RobotState.py
#keeps track of the state of the robot
import copy
import numpy as np
import rospy
from Utils import *
#TODO make this dynamic
#NUM_FINGER_DOFS = rospy.get_param('/ada/num_finger_dofs', 2)
class RobotState(object):
def __init__(self, ee_trans, finger_dofs, mode=0, num_modes=2):
self.ee_trans = ee_trans.copy()
self.finger_dofs = finger_dofs.copy()
self.mode = mode
self.num_modes = num_modes
def get_pos(self):
return self.ee_trans[0:3,3]
def get_finger_dofs(self):
return self.finger_dofs
def switch_mode(self):
self.mode = self.next_mode()
def next_mode(self):
return (self.mode+1)%self.num_modes
def set_mode(self, mode):
assert mode >= 0 and mode <= self.num_modes
self.mode = mode
def mode_after_action(self, action):
if action.is_no_mode_switch():
return self.mode
else:
return action.switch_mode_to
def state_after_action(self, action, time):
state_copy = copy.deepcopy(self)
if not action.is_no_move():
state_copy.ee_trans = ApplyTwistToTransform(action.twist, state_copy.ee_trans, time)
if not action.is_no_mode_switch():
state_copy.mode = action.switch_mode_to
return state_copy
def num_finger_dofs(self):
return len(finger_dofs)
#actions we can enact on the state
#corresponds to a mode switch and a twist
#TODO handling an unspecified number of fingers is messy here. Handle this better
class Action(object):
no_mode_switch=-1
no_move = np.zeros(6)
#no_finger_vel = np.zeros(2)
def __init__(self, twist=copy.copy(no_move), finger_vel=None, switch_mode_to=no_mode_switch):
self.twist = twist
if finger_vel is None:
self.finger_vel = Action.no_finger_vel
else:
self.finger_vel = finger_vel
self.switch_mode_to = switch_mode_to
def as_tuple(self):
return (self.twist, self.switch_mode_to)
def is_no_action(self):
return self.twist == self.no_move and self.switch_mode_to == self.no_mode_switch and self.finger_vel == Action.no_finger_vel
def __eq__(self, other):
return self.twist == other.twist and self.switch_mode_to == other.switch_mode_to and self.finger_vel == other.finger_vel
def is_no_mode_switch(self):
return self.switch_mode_to == self.no_mode_switch
def is_no_move(self):
return np.linalg.norm(self.twist) < 1e-10
def is_no_finger_move(self):
return np.linalg.norm(self.finger_vel) < 1e-10
# return the parts of this actions move that can be
# altered by the user given the current robot
# def move_in_mode(self, mode):
# if mode == 0:
# return self.move[:3]
# elif mode == 1:
# return self.move[3:]
# else:
# return self.move
def __str__(self):
return 'twist:' + str(self.twist) + ' finger:' + str(self.finger_vel) + ' mode to:' + str(self.switch_mode_to)
@staticmethod
def set_no_finger_vel(num_finger_dofs):
Action.no_finger_vel = np.zeros(num_finger_dofs)
| true |
740536495b3161e41ddb5e8c6fdf0f0bdd99f71a | Python | michaelstrefeler/integer_sequences | /padovan.py | UTF-8 | 668 | 3.890625 | 4 | [] | no_license | # a(n) = a(n-2) + a(n-3) with a(0)=1, a(1)=a(2)=0
def padovan(n=5, output=[], number=1):
amount = input('Choose a number (at least 5): ')
try:
amount = int(amount)
if amount < 5:
n = 5
print('I chose 5 for you because you can\'t follow instructions')
else:
n = amount
except ValueError:
print('That was not a number. Try again later')
exit()
while n > 0:
output.append(number)
if len(output) >= 3:
number = output[len(output) - 2] + output[len(output) - 3]
else:
number = 0
n -= 1
return output
print(padovan())
| true |
780d56c6cde556e72d91a43c670d88a3822dfcba | Python | jaganswornkar/logical-questions | /practice.py | UTF-8 | 410 | 3.28125 | 3 | [] | no_license | def divisorCounter(n):
count = 0
for i in range(1,n//2+1):
if n%i == 0:
count += 1
# print(i)
return count
# print('count :',divisorCounter(28))
n = 1
tn = 1
while tn > 0:
result = divisorCounter(tn)
if result > 500:
print(result)
break
else:
print(tn,divisorCounter(tn))
n+=1
tn += n
# print(divisorCounter(76576500)) | true |
691a3ab67253eac4504bb9d93295c440eef90418 | Python | LukasS91/maryTTS-project | /create_boundary_feats.py | UTF-8 | 187 | 2.625 | 3 | [] | no_license | with open("dur.feats") as f:
with open("bound.feats", "a") as g:
for line in f:
token = line.split()
if token[1] == "_":
g.write(line)
| true |
6462b802793b6b181e61b5d533f125fc582f5b75 | Python | Raghu150999/algorithms | /scc/scripts/parse.py | UTF-8 | 259 | 2.859375 | 3 | [] | no_license | import sys
f = open(sys.argv[1], "r")
mp = {}
cnt = 0
while True:
l = f.readline().split()
if not l:
break
for node in l:
if mp.get(node) == None:
cnt += 1
mp[node] = cnt
val = int(node)
print(cnt) | true |
204a02ee6c4590b3dfd81497ecc498f5f3604c73 | Python | thirtywang/OpenPNM | /OpenPNM/Network/__DelaunayCubic__.py | UTF-8 | 5,124 | 3.1875 | 3 | [
"MIT"
] | permissive | """
===============================================================================
DelaunayCubic: Generate semi-random networks based on Delaunay Tessellations and
perturbed cubic lattices
===============================================================================
"""
import OpenPNM
import scipy as sp
import sys
import numpy as np
from OpenPNM.Network.__Delaunay__ import Delaunay
from OpenPNM.Base import logging
logger = logging.getLogger(__name__)
class DelaunayCubic(Delaunay):
r"""
This class contains the methods for creating a *Delaunay* network topology
based connecting pores with a Delaunay tessellation.
This Subclass of Delaunay generates points on a cubic lattice and then perturbs
them to prevent degeneracy
Parameters
----------
name : string
A unique name for the network
shape : tuple of ints
The (i,j,k) size and shape of the network.
spacing : 3 x 1 array defining the base lattice spacing of the network
perturbation : float between 0 and 1 controlling the maximum perturbation
of lattice points as a fraction of the lattice spacing
arrangement : string
usage: 'SC'- Simple Cubic (default if left blank)
'O' - Orthorhombic
'BCC' - Body Centred Cubic
'FCC' - Face Centred Cubic
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.DelaunayCubic(shape=[5, 5, 5],
... spacing=[4e-5, 4e-5, 4e-5],
... jiggle_factor=0.01)
>>> pn.num_pores()
125
"""
def __init__(self, shape=None, spacing=[1, 1, 1],
perturbation=0.1, arrangement='SC', **kwargs):
if shape is not None:
self._arr = np.atleast_3d(np.empty(shape))
else:
self._arr = np.atleast_3d(np.empty([3, 3, 3]))
# Store original network shape
self._shape = sp.shape(self._arr)
# Store network spacing instead of calculating it
self._spacing = sp.asarray(spacing)
self._num_pores = np.prod(np.asarray(self._shape))
self._domain_size = np.asarray(self._shape) * self._spacing
self._perturbation = perturbation
self._arrangement = arrangement
super().__init__(num_pores=self._num_pores,
domain_size=self._domain_size,
**kwargs)
def _generate_pores(self, prob=None):
r"""
Generate the pores with numbering scheme.
"""
points = np.array([i for i, v in np.ndenumerate(self._arr)], dtype=float)
points += 0.5
# 2D Orthorhombic adjustment - shift even rows back a bit and odd rows
# forward a bit
" 0 0 0 "
" 0 0 0 0 "
" 0 0 0 "
if self._arrangement == 'O':
shift_y = np.array([0, 0.25, 0])
shift_x = np.array([0.25, 0, 0])
points[(points[:, 0] % 2 == 0)] -= shift_y
points[(points[:, 2] % 2 != 0)] -= shift_x
points[(points[:, 0] % 2 != 0)] += shift_y
points[(points[:, 2] % 2 == 0)] += shift_x
# BCC = Body Centre Cubic
if self._arrangement == 'BCC':
body_points = []
for i in range(1, self._shape[0]):
for j in range(1, self._shape[1]):
for k in range(1, self._shape[2]):
body_points.append([i, j, k])
body_points = np.asarray(body_points)
points = np.concatenate((points, body_points))
# FCC = Face Centre Cubic
if self._arrangement == 'FCC':
face_points = []
for i in range(1, self._shape[0]):
for j in range(1, self._shape[1]):
for k in range(1, self._shape[2]):
left = [i-0.5, j, k]
right = [i+0.5, j, k]
back = [i, j-0.5, k]
front = [i, j+0.5, k]
bottom = [i, j, k-0.5]
top = [i, j, k+0.5]
if left not in face_points:
face_points.append(left)
if right not in face_points:
face_points.append(right)
if back not in face_points:
face_points.append(back)
if front not in face_points:
face_points.append(front)
if bottom not in face_points:
face_points.append(bottom)
if top not in face_points:
face_points.append(top)
face_points = np.asarray(face_points)
points = np.concatenate((points, face_points))
jiggle = (np.random.rand(len(points), 3)-0.5)*self._perturbation
points += jiggle
points *= self._spacing
self['pore.coords'] = points
logger.debug(sys._getframe().f_code.co_name + ': End of method')
| true |
bd471310d4803a44a5acbbb449c16f956f33812f | Python | mangalagb/Leetcode | /Medium/CloneGraph.py | UTF-8 | 1,430 | 3.953125 | 4 | [] | no_license | # Given a reference of a node in a connected undirected graph.
#
# Return a deep copy (clone) of the graph.
#
# Each node in the graph contains a val (int) and a list (List[Node]) of its neighbors.
# Definition for a Node.
class Node(object):
def __init__(self, val = 0, neighbors = None):
self.val = val
self.neighbors = neighbors if neighbors is not None else []
class Solution(object):
def cloneGraph(self, node):
"""
:type node: Node
:rtype: Node
"""
if not node:
return node
result = self.visit(node, {})
return result
def visit(self, node, visited):
#If already prsent, return cloned node
if node in visited:
return visited[node]
new_node = Node(node.val)
visited[node] = new_node
for neighbour in node.neighbors:
cloned_neighbour_node = self.visit(neighbour, visited)
new_node.neighbors.append(cloned_neighbour_node)
return new_node
def make_input_graph(self):
node1 = Node(1)
node2 = Node(2)
node3 = Node(3)
node4 = Node(4)
node1.neighbors = [node2, node4]
node2.neighbors = [node1, node3]
node3.neighbors = [node2, node4]
node4.neighbors = [node1, node3]
return node1
my_sol = Solution()
root = my_sol.make_input_graph()
ans = my_sol.cloneGraph(root)
| true |
d658cf858d0ce8830ba41f9f2ee1e5f57fc46dbe | Python | antodipar/learning-spark | /logistic_regression_a.py | UTF-8 | 1,142 | 3.265625 | 3 | [] | no_license | from pyspark.sql import SparkSession
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.evaluation import BinaryClassificationEvaluator, MulticlassClassificationEvaluator
spark = SparkSession.builder.appName('log_regression').getOrCreate()
# Load data
my_data = spark.read.format('libsvm').load('data/Logistic_Regression/sample_libsvm_data.txt')
# Create instance and fitted
model = LogisticRegression(labelCol='label', featuresCol='features', predictionCol='prediction')
fitted_model = model.fit(my_data)
# Summary
summary = fitted_model.summary
predictions_df = summary.predictions
predictions_df.show()
# Let's split data by train and test dataset
train_data, test_data = my_data.randomSplit([0.7, 0.3])
# Retrain
final_model = LogisticRegression()
fitted_final_model = final_model.fit(train_data)
# Evaluate
prediction_and_labels = fitted_final_model.evaluate(test_data)
prediction_and_labels.predictions.show()
# Let's use evaluators
my_eval = BinaryClassificationEvaluator()
auc = my_eval.evaluate(prediction_and_labels.predictions)
print(f'AUC {auc}')
# Another way
auc = prediction_and_labels.areaUnderROC
| true |
6e6652ffabf33a744c6cb37ce8eff57fd8f12730 | Python | killshotrevival/qzzo | /qazzoo/Users/temp.py | UTF-8 | 1,193 | 2.65625 | 3 | [] | no_license | from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
from Crypto import Random
def new_keys(key_size):
random_generator = Random.new().read
key = RSA.generate(key_size, random_generator)
private, public = key, key.publickey()
return public, private
def import_key(externKey):
return RSA.importKey(externKey)
def export_private_key(private_key):
with open("private_key", "wb") as f:
f.write(private_key.exportKey())
def export_public_key(public_key):
with open("public_key", "wb") as f:
f.write(public_key.exportKey())
def getpublickey(priv_key):
return priv_key.publickey()
def encrypt(message, pub_key):
cipher = PKCS1_OAEP.new(pub_key)
return cipher.encrypt(message)
def decrypt(ciphertext, priv_key):
cipher = PKCS1_OAEP.new(priv_key)
return cipher.decrypt(ciphertext)
def verify_data(data,priv_key,ciphertext):
print('data ', type(data))
d = decrypt(ciphertext, priv_key)
print('decrp',type(d.decode('ASCII')) )
return d.decode('ASCII') == data
# if __name__ == "__main__":
# pb_k, pr_k = new_keys(1024)
# export_private_key(pr_k)
# export_public_key(pb_k)
| true |
d995e17aa249938e7f97b0daa7f2bfcaf2b8c9e1 | Python | mchoopani/Image-To-Pdf-Telegram-Bot | /bot/views.py | UTF-8 | 6,105 | 2.546875 | 3 | [] | no_license | import os
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from json import loads
import requests
from urllib.request import urlretrieve as download
import img2pdf
from bot.ModelClasses import Message
# constant string that is used to created urls
TELEGRAM_URL = "https://api.telegram.org/bot"
FILE_URL = "https://api.telegram.org/file/bot"
# token of bot will be here
TOKEN = "<BOTTOKEN>"
def url_creator(method_name):
return f"{TELEGRAM_URL}{TOKEN}/{method_name}"
def send_document(chat_id, file_path, caption):
file = open(file_path, "rb")
data = {
"chat_id": chat_id,
}
files = {"document": file}
if caption is not None:
data["caption"] = caption
result = requests.post(url_creator("sendDocument"), data, files=files)
file.close()
def send_message(chat_id, text, reply_to_message_id=None):
data = {
"chat_id": chat_id,
"text": text,
}
if reply_to_message_id is not None:
data["reply_to_message_id"] = reply_to_message_id
result = requests.post(url_creator("sendMessage"), data)
# get data of file from telegram servers
def get_file(file_id, chat_id):
data = {
"file_id": file_id.replace("\n", "")
}
file_path = ""
try:
result = requests.get(url_creator("getFile"), data=data)
file_path = result.json().get("result").get("file_path", None)
except Exception:
send_message(chat_id, "there is a trouble in connection with telegram servers...")
return file_path
def download_file(file_path, user_id, index):
extension = file_path.split(".")[-1]
path = f"./{user_id}_{index}.{extension}"
download(f"{FILE_URL}{TOKEN}/{file_path}", f"{path}")
return path
# convert list of images to pdf by img2pdf library
def convert_image_to_pdf(images: list, user_id):
with open(f"{user_id}.pdf", "wb") as f:
f.write(img2pdf.convert(images))
return f"{user_id}.pdf"
# delete photos and pdf to optimize memory and respect to user personal data
def delete_files(downloaded_pictures, user_id):
for path in downloaded_pictures:
try:
os.remove(path)
except Exception:
pass
try:
os.remove(f"{user_id}.pdf")
except Exception:
pass
# main function that telegram requests will call it
@csrf_exempt
def telegram_webhook(request, *args, **kwargs):
# use Message class to translate message data to an object to easier accessibility
message = Message(translate_request(request))
if message.is_photo_message():
# add photo ids to database (txt file) to use it whenever user want to export pdf
insert_photo_to_database(message.sender.id, message.photo.photo_id)
return JsonResponse({"ok": "POST request processed"})
else:
if message.text == "/start":
# Welcome Message
send_message(message.sender.id, "Hello welcome to your bot.\n"
"to start a project send: create\n"
"to end last active project send: export\n"
"after sending 'create' before history of projects will clear, "
"and start new project.\n"
"after send `export` all of pictures that send after last 'create'"
" and before 'export' will packed and converted to PDF.\n"
"shortcut: you can send your photos, at last send: !\n"
"this shortcut (!) collects before pictures and finally"
"clears the list of photos and waits for new project.")
return JsonResponse({"ok": "POST request processed"})
if message.text == "create":
send_message(message.sender.id, "new project created.")
# TODO: delete from real database
# delete old data to have a new project
open(f"{message.sender.id}.txt", "w").close()
return JsonResponse({"ok": "POST request processed"})
if message.text == "export" or message.text == "!":
send_message(message.sender.id, "bot will export pdf of this project soon.\n"
"please wait...")
downloaded_pictures = []
try:
# get all photo ids from database
photos = select_photo_from_database(message.sender.id)
for i in range(len(photos)):
# download each photo that contains in data base and add its name to convert list
downloaded_pictures \
.append(str(download_file(get_file(photos[i], message.sender.id), message.sender.id, i)))
# convert downloaded photos to pdf
convert_image_to_pdf(downloaded_pictures, message.sender.id)
# send converted pdf to user
send_document(message.sender.id, f"{message.sender.id}.pdf", None)
# TODO: delete from real database
open(f"{message.sender.id}.txt", "w").close()
# delete pdf and photos
delete_files(downloaded_pictures, message.sender.id)
except Exception:
pass
return JsonResponse({"ok": "POST request processed"})
return JsonResponse({"ok": "POST request processed"})
def insert_photo_to_database(user_id, photo_id):
# TODO: implement real database
try:
open(f"{user_id}.txt", "x").close()
except FileExistsError:
pass
db = open(f"{user_id}.txt", "a")
db.write(f"{photo_id}\n")
db.close()
def select_photo_from_database(user_id):
# TODO: implement real database
file = open(f"{user_id}.txt")
lines = file.readlines()
file.close()
return lines
# returns request body
def translate_request(request):
return loads(request.body)
| true |
c0b9c2fbb9f7177502690bf9f6015430529ead1c | Python | webeautiful/ipy | /samples/func_args.py | UTF-8 | 609 | 3.5 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# 必选参数>默认参数
def power(x, n=2):
res =1;
while n>0:
res = res*x
n = n-1
return res
# 可变参数
def varargs(age, *args):
return args
# 关键字参数
def kwargs(age, sex='female', **kw):
return kw
# 参数组合
def func(required , default='默认', *args, **kw):
print '组合参数:required =', required,'default =', default, 'args =', args, 'kw =', kw
if __name__ == '__main__':
print power(5, 3)
print varargs(*[1,3,3,4,8])
print kwargs(30, name='xiong', sex='male')
func('必选',4,5,6, count=4, age=30)
| true |
9a0ec338ddd44b60a49f5196f8d7c42fd7ef609d | Python | kashifmin/foo_bar | /prepare_the_bunnies_escape.py | UTF-8 | 3,125 | 3.890625 | 4 | [] | no_license |
# Problem: Given a grid, find the shortest path distance from upper left corner to bottom right corner
# Spaces with a 1 cannot be passed, spaces with a 0 can
# Up to one wall can be removed
# Basically uses Dijkstra's method to search for shortest path
# Runtime complexity: O(h*w*n), where h is grid height, w is grid width
# and n is number of walls
# Space complexity: O(h*w)
# Gets the minimum distance of all unvisited spaces
# Or returns None if no remaining locations unvisited and
# have a viable path
def get_min_dist(distances, visited):
min_dist = 999999999
min_pos = None
for x in range(0,len(distances)):
for y in range(0,len(distances[0])):
if visited[x][y] == 0:
if distances[x][y] < min_dist:
min_dist = distances[x][y]
min_pos = [x, y]
return min_pos
# Returns the shortest possible distance given a maze
# or "infinity" if not possible
def get_shortest(maze):
# Essentially infinity in this problem
infinity = 999999999
distances = []
visited = []
for row in maze:
new_row = []
visited_row = []
for col in row:
new_row.append(infinity)
visited_row.append(0)
distances.append(new_row)
visited.append(visited_row)
distances[0][0] = 1
current = [0,0]
while True:
if current[0]==len(maze)-1 and current[1]==len(maze[0])-1:
return distances[len(maze)-1][len(maze[0])-1]
right = [current[0]+1, current[1]]
left = [current[0]-1, current[1]]
up = [current[0], current[1]-1]
down = [current[0], current[1]+1]
current_dist = distances[current[0]][current[1]]
# Sorry for the repetitious code, could be cleaned up
if right[0] < len(maze) and visited[right[0]][right[1]]==0:
if maze[right[0]][right[1]] == 0:
if current_dist + 1 < distances[right[0]][right[1]]:
distances[right[0]][right[1]] = current_dist + 1
if left[0] >= 0 and visited[left[0]][left[1]]==0:
if maze[left[0]][left[1]] == 0:
if current_dist + 1 < distances[left[0]][left[1]]:
distances[left[0]][left[1]] = current_dist + 1
if up[1] >= 0 and visited[up[0]][up[1]]==0:
if maze[up[0]][up[1]] == 0:
if current_dist + 1 < distances[up[0]][up[1]]:
distances[up[0]][up[1]] = current_dist + 1
if down[1] < len(maze[0]) and visited[down[0]][down[1]]==0:
if maze[down[0]][down[1]] == 0:
if current_dist + 1 < distances[down[0]][down[1]]:
distances[down[0]][down[1]] = current_dist + 1
visited[current[0]][current[1]] = 1
current = get_min_dist(distances, visited)
if current == None:
return infinity
def answer(maze):
shortest_paths = []
# Not the most optimal algorithm here, as it simply
# attempts removing every wall and seeing if that is a possible solution
for x in range(0,len(maze)):
for y in range(0, len(maze[0])):
if maze[x][y]==1:
maze[x][y] = 0
shortest_paths.append(get_shortest(maze))
maze[x][y] = 1
return min(shortest_paths) | true |
8c57291f6154dbca8989b6c026f5c6bd4ec1fee0 | Python | RafalO754/WizualizacjaDanych | /Pusto.py | UTF-8 | 885 | 3.703125 | 4 | [] | no_license | class NaZakupy:
def __init__(self, nazwaProduktu, ilosc, jednostkaMiary, cenaJed):
self.nazwaProduktu = nazwaProduktu
self.ilosc = ilosc
self.jednostkaMiary = jednostkaMiary
self.cenaJed = cenaJed
def wyświetlProdukt(self):
print('Nazwa produktu : ' + str(self.nazwaProduktu))
print('Ilosc : ' + str(self.ilosc))
print('Jednostka Miary : ' + str(self.jednostkaMiary))
print('Cena jednostkowa : ' + str(self.cenaJed))
def ileProduktu(self):
return str(self.ilosc) + ' ' + str(self.jednostkaMiary)
def ileKosztuje(self):
return self.ilosc * self.cenaJed
obiekt = NaZakupy('Pieczarka z Mozabiku', 2.5, 'kg', 250)
obiekt.wyswietlProdukt()
print('ile powinno być produktu: ' + obiekt.ileProduktu())
print('ile kosztuje produkt: ' + str(obiekt.ileKosztuje()) + ' PLN') | true |
e42174477f26ebda91d53deca61d38f26099afa2 | Python | pmg102/pmg102.github.io | /py/loadmap.py | UTF-8 | 2,006 | 2.90625 | 3 | [] | no_license | import png
import string
# Read in a png
# Split into cells 8x8
# read 64 px in each cell
# Put into a hash table
# Report what cells and how many
# output cells from hashtable into sprite sbeet
# output map as grid of indices into sprite sheet
# (2400, 288, <map object at 0x00C37B50>, {
# 'gamma': 0.45455, 'bitdepth': 8, 'size': (2400, 288), 'greyscale': False, 'alpha': True, 'interlace': 0, 'planes': 4
# })
r = png.Reader('_mario-1-1.png')
image = r.read()
width = image[0]
height = image[1]
pixels = image[2]
metadata = image[3]
CELL_SIZE = 8
VALUES_PER_PIXEL = metadata['planes']
y = 0
sprites = []
grid = []
def process(cells_rows):
global y
row = []
for x in range(0, len(cells_rows[0])):
sprite = []
for row_idx in range(0, CELL_SIZE):
sprite.append([pixel[0] for pixel in cells_rows[row_idx][x]])
try:
sprite_idx = sprites.index(sprite)
except ValueError:
sprite_idx = len(sprites)
sprites.append(sprite)
row.append(sprite_idx)
y = y + 1
grid.append(row)
cells_rows = []
for row in pixels:
if len(cells_rows) == CELL_SIZE:
process(cells_rows)
cells_rows = []
cells_row = []
cell_row = []
pixel = []
values_index = 0
for value in row:
if values_index % VALUES_PER_PIXEL == 0 and len(pixel):
cell_row.append(pixel)
pixel = []
if len(cell_row) == CELL_SIZE:
cells_row.append(cell_row)
cell_row = []
pixel.append(value)
values_index = values_index + 1
cells_rows.append(cells_row)
cells_row = []
f = open('sprites.png', 'wb') # binary mode is important
w = png.Writer(CELL_SIZE, CELL_SIZE * len(sprites), greyscale=True)
w.write(f, [row for sprite in sprites for row in sprite])
f.close()
chars = string.digits + string.ascii_letters
f = open('grid.json', 'w')
f.write('[\n')
for row in grid:
f.write(' "')
for cell in row:
f.write(chars[cell])
f.write('",\n')
f.write(']\n')
f.close()
print('%s distinct sprites' % len(sprites)) | true |
2f36027769efb490a7b5f4272507db0a9d4a4da1 | Python | onlined/imdb-importer | /import_tsv.py | UTF-8 | 3,281 | 2.84375 | 3 | [
"MIT"
] | permissive | """ Import imdb open .tsv data to PostgreSQL database.
Files to import (name.basics.tsv, title.basics.tsv)
should be in the same path as import_tsv.py script.
"""
import psycopg2
import csv
import time
import io
st = time.time()
connection = psycopg2.connect(
host='',
dbname='',
port='',
user='',
password=''
)
cursor = connection.cursor()
server_side_cursor = connection.cursor('names_to_titles', withhold=True)
print('Dropping old database tables...')
cursor.execute('DROP TABLE IF EXISTS titles')
cursor.execute('DROP TABLE IF EXISTS names')
cursor.execute('DROP TABLE IF EXISTS names_to_titles')
connection.commit()
print('Creating database tables...')
cursor.execute(
'''CREATE TABLE names
(
nconst text PRIMARY KEY,
primary_name text,
birth_year integer,
death_year integer,
primary_profession text,
known_for_titles text
)'''
)
cursor.execute(
'''CREATE TABLE titles
(
tconst text PRIMARY KEY,
title_type text,
primary_title text,
original_title text,
is_adult bool,
start_year integer,
end_year integer,
runtime_mins integer,
genres text
)'''
)
cursor.execute(
'''CREATE TABLE names_to_titles
(
PRIMARY KEY(id_names, id_titles),
id_names text,
id_titles text
)'''
)
print('Importing name.basics.tsv...')
with open('name.basics.tsv') as names:
# Omit header
names.readline()
cursor.copy_from(names, 'names')
cursor.execute('ALTER TABLE names ALTER COLUMN known_for_titles TYPE text[] USING string_to_array(known_for_titles, \',\')')
connection.commit()
print('Creating names table indexes...')
cursor.execute('CREATE INDEX primary_name_idx ON names (primary_name)')
print('Importing title.basics.tsv...')
with open('title.basics.tsv') as titles:
# Omit header
titles.readline()
cursor.copy_from(titles, 'titles')
cursor.execute('ALTER TABLE titles ALTER COLUMN genres TYPE text[] USING string_to_array(genres, \',\')')
connection.commit()
print('Creating titles table indexes...')
cursor.execute('CREATE INDEX genres_idx ON titles (genres)')
cursor.execute('CREATE INDEX start_year_idx ON titles (start_year)')
print('Creating many-to-many relationships (names_to_titles)...')
server_side_cursor.execute("SELECT nconst, known_for_titles FROM names")
counter = 0
while True:
counter += 5000
names_to_titles = server_side_cursor.fetchmany(5000)
if not names_to_titles:
break
data = io.StringIO() # anything can be used as a file if it has .read() and .readline() methods
for name in names_to_titles:
if name[1]:
for tconst in name[1]:
relationship = '\t'.join([name[0], tconst])+'\n'
data.write(relationship)
data.seek(0)
server_side_cursor.copy_from(data, 'names_to_titles')
connection.commit()
print('{} names has been processed'.format(counter), end='\r')
print('Creating names_to_titles indexes...')
cursor.execute('CREATE INDEX id_titles_idx ON names_to_titles (id_titles)')
cursor.execute('CREATE INDEX id_names_idx ON names_to_titles (id_names)')
connection.close()
print()
print('Done.\n Executed in (sec):', time.time() - st)
| true |
0b4b3a4e86b62ea0b2e881721f9be6322be8d00c | Python | JosephLevinthal/Research-projects | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4138/codes/1675_1952.py | UTF-8 | 239 | 3.046875 | 3 | [] | no_license | x = float(input("insira o salario bruto:"))
#Aliquota no salário bruto
a = 8/100
b = 9/100
c = 11/100
d = 608.44
#aliquota do imposto de renda
a1 = 7,5/100
b1 = 15/100
c1 = 22.5/100
d1 = 27.5/100
A = x * a
A1 = x - A
print(A)
print(A1) | true |
b6b94e6b96a48743b2d9dfa8aba1e48c5b715135 | Python | Ahuge/distant-vfx | /python/distant_vfx/sequences.py | UTF-8 | 1,067 | 3 | 3 | [] | no_license | import os
class ImageSequence:
def __init__(self, frames, parent_path):
self.parent_path = parent_path
self.frames = frames
self._split = frames[0].split('.')
self._sorted = False
def __repr__(self):
return self.name
@property
def path(self):
return os.path.join(self.parent_path, self.name)
@property
def name(self):
# frame_len = len(self._split[-2]) TODO: DELETE ME
return self.basename + '.[' + str(self.start) + '-' + str(self.end) + '].' + self.extension
@property
def basename(self):
return self._split[0]
@property
def extension(self):
return self._split[-1]
@property
def start(self):
if not self._sorted:
self._sort_frames()
return self.frames[0]
@property
def end(self):
if not self._sorted:
self._sort_frames()
return self.frames[-1]
def _sort_frames(self):
self.frames.sort(key=lambda x: int(x.split('.')[-2]))
self._sorted = True
| true |
daca0c189df2944eafb5870a2db259be13cef6dc | Python | fajardomj/SocialNetworking | /SocialNetworking/NetworkSite/loginhelpers.py | UTF-8 | 416 | 2.734375 | 3 | [] | no_license | from django.contrib.auth.models import User
#gets the current user that is logged in
def get_user_logged_in(request):
id = request.session.get('logged_in_user')
user =''
if id is not None:
user = User.objects.get(pk=id)
return user
return None
#gets the State of a user
def getState(user):
if user is not None:
return "You are logged in!"
else:
return ""
| true |
f9add43db69da393b1199bd58119ab56de9aab4d | Python | DiegoSilvaHoffmann/Curso-de-Python | /Meus_dessafios/Exercicios2021/ex100.py | UTF-8 | 436 | 3.875 | 4 | [
"MIT"
] | permissive | from random import randint
from time import sleep
def sorteio(lista):
for cont in range(0, 5):
n = randint(1, 22)
lista.append(n)
print(f'{n}', end=' ', flush=True)
sleep(0.4)
def somapar(lista):
soma = 0
for valor in lista:
if valor % 2 == 0:
soma += valor
print(f'\nA soma entre numeros pares é de {soma}.')
numeros = list()
sorteio(numeros)
somapar(numeros)
| true |
083acd4de919d259e1e5e92392bb404e88e49b35 | Python | samtae13/pygame | /snake05.py | UTF-8 | 878 | 3.328125 | 3 | [] | no_license | import pygame
import time
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
SCREEN_WIDTH = 600
SCREEN_HEIGHT = 300
SCREEN_SIZE = (SCREEN_WIDTH, SCREEN_HEIGHT)
BLOCK_SIZE = 10
def draw_block(screen, color, position):
block_rect = pygame.Rect((position[0] * BLOCK_SIZE, position[1] * BLOCK_SIZE),
(BLOCK_SIZE, BLOCK_SIZE))
pygame.draw.rect(screen, color, block_rect)
pygame.init()
screen = pygame.display.set_mode(SCREEN_SIZE)
for bx in range(0, SCREEN_WIDTH // BLOCK_SIZE + 1, 2):
for by in range(0, SCREEN_HEIGHT // BLOCK_SIZE + 1, 2):
draw_block(screen, GREEN, (bx , by))
draw_block(screen, RED, (bx + 1, by))
draw_block(screen, RED, (bx, by + 1))
draw_block(screen, GREEN, (bx + 1, by + 1))
time.sleep(0.0000001)
pygame.display.flip()
time.sleep(3) | true |
30cde06e81148bdea7d110febd4926fbca5f94a9 | Python | harryvu/pyalgo101 | /ds/DoubleLinkedList.py | UTF-8 | 981 | 4.125 | 4 | [] | no_license | class Node:
"""A singly linked list node."""
def __init__(self, data=None):
self.data = data
self.next = None
self.prev = None
class DoubleLinkedList:
def __init__(self):
self.head = Node()
self.tail = Node()
self.head.next = self.tail
self.tail.prev = self.head
self.count = 0
def prepend(self, data):
node = Node(data)
node.prev = self.head
node.next = self.head.next
self.head.next.prev = node
self.head.next = node
self.count += 1
def append(self, data):
node = Node(data)
node.next = self.tail
node.prev = self.tail.prev
self.tail.prev.next = node
self.tail.prev = node
self.count += 1
def print_list(self):
if self.count > 0:
curr = self.head
while curr.next.next != None:
curr = curr.next
print('{} :: '.format(curr.data)) | true |
48811ab3ba7807a3b0b100d685e391aa53014a3b | Python | jmlb/Udacity-RoboticsND | /RoboND-Perception-Project/Exercises/Exercise-2/sensor_stick/scripts/segmentation.py | UTF-8 | 7,170 | 3.09375 | 3 | [] | no_license | #!/usr/bin/env python
# Import modules
from pcl_helper import *
# TODO: Define functions as required
# Callback function for your Point Cloud Subscriber
def pcl_callback(pcl_msg):
# TODO: Convert ROS msg to PCL data
cloud = ros_to_pcl(pcl_msg)
# TODO: Voxel Grid Downsampling
vox = cloud.make_voxel_grid_filter()
# Choose a voxel (also known as leaf) size
# Note: this (1) is a poor choice of leaf size
# Experiment and find the appropriate size!
LEAF_SIZE = 0.01
# Set the voxel (or leaf) size
vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)
# Call the filter function to obtain the resultant downsampled point cloud
cloud_filtered = vox.filter()
# TODO: PassThrough Filter
# PassThrough filter
# Create a PassThrough filter object.
passthrough = cloud_filtered.make_passthrough_filter()
# Assign axis and range to the passthrough filter object.
filter_axis = 'z'
passthrough.set_filter_field_name(filter_axis)
axis_min = 0.6
axis_max = 1.1
passthrough.set_filter_limits(axis_min, axis_max)
# Finally use the filter function to obtain the resultant point cloud.
cloud_filtered = passthrough.filter()
# TODO: RANSAC Plane Segmentation
# RANSAC plane segmentation
# Create the segmentation object
seg = cloud_filtered.make_segmenter()
# Set the model you wish to fit
seg.set_model_type(pcl.SACMODEL_PLANE)
seg.set_method_type(pcl.SAC_RANSAC)
# Max distance for a point to be considered fitting the model
# Experiment with different values for max_distance
# for segmenting the table
max_distance = 0.01
seg.set_distance_threshold(max_distance)
# Call the segment function to obtain set of inlier indices and model coefficients
inliers, coefficients = seg.segment()
# Extract inliers
# Extract inliers
cloud_table = cloud_filtered.extract(inliers, negative=False)
cloud_objects = cloud_filtered.extract(inliers, negative=True)
# TODO: Euclidean Clustering
#Euclidean Clustering
#In order to perform Euclidean Clustering, you must first
#construct a k-d tree from the cloud_objects point cloud.
#The k-d tree data structure is used in the Euclidian Clustering algorithm
#to decrease the computational burden of searching for neighboring points.
#While other efficient algorithms/data structures for nearest neighbor
#search exist, PCL's Euclidian Clustering algorithm only supports k-d trees.
#To construct a k-d tree, you first need to convert your XYZRGB point cloud to
#XYZ, because PCL's Euclidean Clustering algorithm requires a point cloud with only
#spatial information. To create this colorless cloud, (which I'll call white_cloud),
#search again in pcl_helper.py to find the function you need to convert XYZRGB to XYZ.
#Next, construct a k-d tree from it. To accomplish this, add the following code
#to the pcl_callback() function in your node:
# Euclidean Clustering
white_cloud = XYZRGB_to_XYZ(cloud_objects)
# Apply function to convert XYZRGB to XYZ
tree = white_cloud.make_kdtree()
#Once your k-d tree has been constructed, you can perform the cluster extraction like this:
# Create a cluster extraction object
ec = white_cloud.make_EuclideanClusterExtraction()
# Set tolerances for distance threshold
# as well as minimum and maximum cluster size (in points)
# NOTE: These are poor choices of clustering parameters
# Your task is to experiment and find values that work for segmenting objects.
ec.set_ClusterTolerance(0.05)
ec.set_MinClusterSize(100)
ec.set_MaxClusterSize(1550)
# Search the k-d tree for clusters
ec.set_SearchMethod(tree)
# Extract indices for each of the discovered clusters
cluster_indices = ec.Extract()
#Visualization
#Cluster Visualization
#Now that we have lists of points for each object (cluster_indices), you can perform
#the final step of this exercise, visualizing the results in RViz!
#Choosing a unique color for each segmented Object
#In order to visualize the results in RViz, you need to create one final point cloud,
#let's call it "cluster_cloud" of type PointCloud_PointXYZRGB.
#This cloud will contain points for each of the segmented objects, with each set of points having a unique color.
#Assign a color corresponding to each segmented object in scene
cluster_color = get_color_list(len(cluster_indices))
color_cluster_point_list = []
for j, indices in enumerate(cluster_indices):
for i, indice in enumerate(indices):
color_cluster_point_list.append([white_cloud[indice][0],
white_cloud[indice][1],
white_cloud[indice][2],
rgb_to_float(cluster_color[j])])
#Create new cloud containing all clusters, each with unique color
cluster_cloud = pcl.PointCloud_PointXYZRGB()
cluster_cloud.from_list(color_cluster_point_list)
# TODO: Convert PCL data to ROS messages
ros_cloud_objects = pcl_to_ros(cloud_objects)
ros_cloud_table = pcl_to_ros(cloud_table)
ros_cluster_cloud = pcl_to_ros(cluster_cloud)
# TODO: Publish ROS messages
#Publish ROS messages from your
#pcl_callback(). For now you'll just be publishing
#the original pointcloud itself, but later you'll change
#these to be the point clouds associated with the table
#and the objects.
# TODO: Publish ROS msg
pcl_objects_pub.publish(ros_cloud_objects)
pcl_table_pub.publish(ros_cloud_table)
pcl_cluster_pub.publish(ros_cluster_cloud)
if __name__ == '__main__':
# TODO: ROS node initialization
#Initialize your ROS node.
#In this step you are initializing a
#new node called "clustering".
rospy.init_node('clustering', anonymous=True)
# TODO: Create Subscribers
#Create Subscribers. Here we're subscribing our
#node to the "sensor_stick/point_cloud" topic.
#Anytime a message arrives, the message data
#(a point cloud!) will be passed to the pcl_callback() function for processing.
pcl_sub = rospy.Subscriber("/sensor_stick/point_cloud", pc2.PointCloud2, pcl_callback, queue_size=1)
#Create Publishers. Here you're creating two new publishers to publish the point cloud data for the table and the objects on the table to topics called pcl_table and pcl_objects, respectively.
# TODO: Create Publishers
pcl_objects_pub = rospy.Publisher("/pcl_objects", PointCloud2, queue_size=1)
pcl_table_pub = rospy.Publisher("/pcl_table", PointCloud2, queue_size=1)
pcl_cluster_pub = rospy.Publisher("/pcl_cluster", PointCloud2, queue_size=1)
# Initialize color_list
get_color_list.color_list = []
# TODO: Spin while node is not shutdown
#Spin while node is not shutdown. Here you're
#preventing your node from exiting until an intentional shutdown is invoked.
# TODO: Spin while node is not shutdown
while not rospy.is_shutdown():
rospy.spin() | true |
f5f6a6cbe7d481d79008dff4f4bfefdd8cc68f3b | Python | kelsi0/Game | /Game/excess_files/questions.py | UTF-8 | 9,217 | 3.484375 | 3 | [] | no_license | from random import randint
import random
list_of_questions = [
['''
What body parts do northern leopard frogs use to help swallow their prey?
A)Feet B)Eyes
C)Ears D)Nostrils
''','''
Correct! Northern leopard frogs use their ears to help swallow their prey.
''','''
Wrong! Northern leopard frogs use their ears to help swallow their prey.
''','''b''', False],
['''What colour is the solid form of oxygen?
A)Red B)Green
C)Yellow D)Blue
''','''
Correct! The solid colour for of oxygen is infact blue.
''','''
Wrong! The solid colour for of oxygen is infact blue''','''d''', False],
['''
What colour is the sunset on Mars?
A)Red B)Green
C)Blue D)Orange
''','''
Correct! The sunset on mars is blue (And really cool!).
''','''
Wrong! The sunset on mars is blue (And really cool!).
''','''c''', False],
['''
Which part of the human body does not have a blood supply?
A) Kneecap B)Earlobe
C)Fingernails D)Cornea
''','''
Correct! The cornea have no blood supply!.
''','''
Wrong! The cornea have no blood supply!.
''','''d''', False],
['''
In 2015, Merlin the rescue cat set a new world record for what?
A)Longest Fur B)Loudest Purr
C)Best Mouser D)Heaviest Cat
''','''
Correct! The most adorable world record ever for the loudest purr.
''','''
Wrong! The most adorable world record ever for the loudest purr.
''','''b''', False],
['''
What was William Shakespeare's last play called?
A)The Tempest B)A Midsummer Night's Dream
C)Romeo and Juliet D)As you like it
''','''
Correct - Legend has it that this is the first of two plays Shakespeare wrote for a mysterious figure that gifted him his powers of writing.
''','''
Wrong! Perhaps you should reaquaint yourself with the Bard from Stratford upon Avon.
''',
'''a''', False],
['''
What is the primary reason Millennials are unable to buy houses?
A)Poor economic management and policy decision making from the previous generation B)Corporate greed
C)Avocados D)The free market
''','''
Correct - Poor money management and lack of get up and go is the reason that many in the 'Millennial' generation are unable to afford houses. Other reasons include: cereal cafes, inability to open a can of beans, being a snowflake and not working as hard as the previous generation, apparently.
''','''
Wrong! Back to the Northern Quarter with you.
'''
,'''c''', False],
['''
Which of the following video game franchises is the brain child of the famous director Hideo Kojima?
A)Plastic Wrench Liquid B)Metal Gear Solid
C)Call of Duty D)Shout of Obligation
''','''
Correct - Despite it's convoluted story and awkward name. Metal Gear Solid remains the defining franchise of the stealth genre.
''','''
Snake what happened? Snake? SSSSSSNNNNNNNNAAAAAAAAAAAAAAAAAAKKKKKKKKKKKKKKKKKKEEEEEEEEEE!
'''
,'''b''', False],
['''
Which of the following US Presidents suffered from extreme stage fright?
A)Alexander Hamilton B)Abraham Lincoln
C)Thomas Jefferson D)Ulysses Grant
''','''
Correct - Despite his portrayal in the hit musical 'Hamilton' Thomas Jefferson only gave two speeches in his life and even then audiences had to strain to hear him. This was despite kicking ass as the ambassador to France.
''','''
I afraid you will never be president, or in the room where it happens.
'''
,'''c''', False],
['''
Recent research suggests that dinosaurs who once ruled the planet has what kind of skin?
A)Leathery B)Smooth
C)Furry D)Feathery
''','''
Correct - Recent research suggests that dinosaurs, as they were more closely related to birds were feathered. Even the fearsome T-Rex is considered to have had feathers along it's head, neck and tail. The velociraptor was about the size of a chicken and feathered as such. Clever girl...
''','''
You have been eaten by a T-Rex.
'''
,'''d''', False],
['''
Who is England's all time top goal scorer?
A) Bobby CHarlton B) Gary Lineker
C) Wayne Rooney D) Alan Shearer
''','''
Correct! Wayne Rooney overtook Bobby Charlton's recored of 49 international goals on Tuesday 8th September 2015.
''','''
Wrong! Wayne Rooney overtook Bobby Charlton's recored of 49 international goals on Tuesday 8th September 2015.
''',
'c', False],
['''
Who holds the record for most appearances in the Premier League?
A) Ryan Giggs B) Gareth Barry
C) James Milner D) David James
''','''
Correct! Gareth Barry holds the record after 653 career Premier League appearances for Aston Villa, Manchester City, Everton & West Brom.
''','''
Wrong! Gareth Barry holds the record after 653 career Premier League appearances for Aston Villa, Manchester City, Everton & West Brom.
''',
'b', False],
['''
Who was the first person in space?
A) Yuri gagarin B) Buzz Aldrin
C) Neil Armstrong D) Alan Shepherd
''','''
Correct! Although Neil Armstrong was the first man on the moon and Alan Shepherd was the first American in space, Yuri Gagarin was the first person to go into space.
''','''
Wrong! Although Neil Armstrong was the first man on the moon and Alan Shepherd was the first American in space, Yuri Gagarin was the first person to go into space.
''',
'a', False],
['''
Which nation has won the most World Cups?
A) Italy B) France
C) Germany D) Brazil
''','''
Correct! Brazil have won the most World Cups with 5, despite not winning any of the last 4 tournaments.
''','''
Wrong! Brazil have won the most World Cups with 5, despite not winning any of the last 4 tournaments.
''',
'd', False],
['''
Which of these is not a metal?
A) Magnesium B) Lithium
C) Uranium D) Nitrogen
''','''
Correct! Nitrogen is the only element that is not a metal.
''','''
Wrong! Nitrogen is the only element that is not a metal.
''',
'd', False],
['''
Who is often called the father of the computer?
A) Eben Upton B)David Montgomery
C) Charles Babbage D) William Shockley
''','''
Correct!
''','''
Wrong!
''','''c''', False],
['''
Which of these is not a seconday primary colour
A)RED B)YELLOW
C)CYAN D)MAGENTA
''','''
Correct! The primary colours are RED GREEN and BLUE
and the secondary primaries are MAGENTA CYAN and YELLOW
''','''
Wrong! The primary colours are RED GREEN and BLUE
and the secondary primaries are MAGENTA CYAN and YELLOW
''','''a''', False],
['''
What is the answer to this maths question 2**8
A)256 B)64
C)1024 D)512
''','''
Correct! 2 x 2 x 2 x 2 x 2 x 2 x 2 x2 = 256
Two to the power of 8
''','''
Wrong! 2 x 2 x 2 x 2 x 2 x 2 x 2 x2 = 256
Two to the power of 8
''','''a''', False],
['''
What is Morrissey the singer with the Smiths middle name
A)Patrick B)Robert
C)Albert D)Marvin
''','''
Correct! Morrisseys middle name is Patrick
''','''
Wrong! The most adorable world record ever for the loudest purr.
''','''a''', False],
['''
Who is credited with creating the first compiler for a computer programming language
A)Grace Hopper B)Ada Lovelace
C)Dennis Hopper D)Sophie Wilson
''','''
Correct! Grace Brewster Murray Hopper (née Murray December 9, 1906 – January 1, 1992)
was an American computer scientist and United States Navy rear admiral.[1] One of the
first programmers of the Harvard Mark I computer,
''','''
Wrong! Grace Brewster Murray Hopper (née Murray December 9, 1906 – January 1, 1992)
was an American computer scientist and United States Navy rear admiral.[1] One of the
first programmers of the Harvard Mark I computer,
''','''a''', False]
]
from time import sleep
import sys
# function which asks a question
def question(pos):
list_of_questions[pos][4] = True
# for loop prints question on the screen
for i in list_of_questions[pos][0]:
sys.stdout.write(i)
sys.stdout.flush()
sleep(0.03)
ans = input('\n\nEnter your answer here :') # get answer from contestant
ans = ans.lower()
if ans == list_of_questions[pos][3]: # NOTE this line matches the correct answer in the question
for i in list_of_questions[pos][1]: # run if answer is correct
sys.stdout.write(i)
sys.stdout.flush()
sleep(0.03)
else:
for i in list_of_questions[pos][2]: # run if answer is wrong
sys.stdout.write(i)
sys.stdout.flush()
sleep(0.03)
question(random.randint(0, len(list_of_questions))) | true |
d7a96f80bf8dd833cee55eb7aef012e5a8bbdd21 | Python | jamesandjim/wisdom_site | /commTools/toBASE64.py | UTF-8 | 1,187 | 3.125 | 3 | [] | no_license | # 将图片用BASE64转换为字符串
# 将图片转化的字符串进行url编码
import base64
import os
from urllib import parse
BaseDIR = os.path.abspath(os.path.dirname(__file__))
# photofile = os.path.join(BaseDIR, 'photos', '1.jpg')
# textfile = os.path.join(BaseDIR, 'outjpg', '1.txt')
outjpg = os.path.join(BaseDIR, 'outjpg', 'out.jpg')
# 图片转为BASE64码
def jpgtostr(photofile):
with open(photofile, "rb") as f:#转为二进制格式
base64_data = base64.b64encode(f.read())#使用base64进行加密
str_base64_data = str(base64_data, encoding="utf-8")
# print(str_base64_data)
url_base64_data = parse.quote(str_base64_data, encoding="utf-8")
# print(url_base64_data)
txt = url_base64_data.replace('/', '%2F')
# file = open(textfile, 'wt')#写成文本格式
# file.write(txt)
# file.write(base64_data.decode())#以字符串方式写入
# file.close()
return txt
# BASE64转换为图片
def strtojpg(textfile):
with open(textfile, 'r') as f:
imgdata = base64.b64decode(f.read())
file = open(outjpg, 'wb')
file.write(imgdata)
file.close()
| true |
d5e8011341642992895d9425cfbe329d0f0ac2b7 | Python | ciepielajan/WarsztatPythonDataScience | /simple_script.py | UTF-8 | 126 | 2.671875 | 3 | [] | no_license | import sys
if __name__ == '__main__':
rez = {}
for x in sys.argv[1:]:
rez[x] = rez.get(x,0)+1
print (rez) | true |
02e4abf3bacc4b48cfc527a1de0f93e75a14671f | Python | eirikhoe/advent-of-code | /2020/02/sol.py | UTF-8 | 1,714 | 3.515625 | 4 | [] | no_license | from pathlib import Path
import re
data_folder = Path(".").resolve()
reg_password = re.compile(r"(\d+)-(\d+) ([a-z]): ([a-z]+)")
def get_password_components(password_info):
match = reg_password.match(password_info)
components = match.groups()
int_1 = int(components[0])
int_2 = int(components[1])
char = components[2]
pwd = components[3]
return int_1, int_2, char, pwd
def is_password_valid_sled(password_info):
min_count, max_count, char, pwd = get_password_components(password_info)
char_count = pwd.count(char)
return min_count <= char_count <= max_count
def is_password_valid_toboggin(password_info):
index_1, index_2, char, pwd = get_password_components(password_info)
index_1 -= 1
index_2 -= 1
return (pwd[index_1] == char) != (pwd[index_2] == char)
def get_n_valid_passwords(passwords_info, policy_type):
n_valid_passwords = 0
for password_info in passwords_info:
if policy_type == "sled":
n_valid_passwords += is_password_valid_sled(password_info)
elif policy_type == "toboggin":
n_valid_passwords += is_password_valid_toboggin(password_info)
else:
raise RuntimeError("Unknown policy type")
return n_valid_passwords
def main():
data = data_folder.joinpath("input.txt").read_text()
data = data.split("\n")
print("Part 1")
n = get_n_valid_passwords(data, "sled")
print(f"There are {n} valid passwords in the list when using sled policies")
print()
print("Part 2")
n = get_n_valid_passwords(data, "toboggin")
print(f"There are {n} valid passwords in the list when using toboggin policies")
if __name__ == "__main__":
main()
| true |
39b7e652aa254cc91455b1c3ff11dbfcbb582db4 | Python | kenny5he/docnet | /Services/BigData/Hadoop/mapreduce/practive/mr_compression/red.py | UTF-8 | 800 | 2.78125 | 3 | [] | no_license | #!/usr/bin/python
import sys
def reduer_func():
current_word = None
count_pool = []
sum = 0
for line in sys.stdin:
word, val = line.strip().split('\t')
if current_word == None:
current_word = word
if current_word != word:
for count in count_pool:
sum += count
print "%s\t%s" % (current_word, sum)
current_word = word
count_pool = []
sum = 0
count_pool.append(int(val))
for count in count_pool:
sum += count
print "%s\t%s" % (current_word, str(sum))
if __name__ == "__main__":
module = sys.modules[__name__]
func = getattr(module, sys.argv[1])
args = None
if len(sys.argv) > 1:
args = sys.argv[2:]
func(*args)
| true |
2121c3f10a1d190335c9ab300b218f9e2ec0bbcf | Python | warrenm1/math4610 | /homeworks/Homework3/Norms/frobenius.py | UTF-8 | 215 | 3.609375 | 4 | [] | no_license | def frobenius(A):
sum = 0
for i in range(len(A)):
for j in range(len(A[i])):
sum += abs(A[i][j])
return sum**(1/2)
A = [[1,2,1,4],[2,4,7,8],[6,3,6,5],[9,8,7,6]]
print(frobenius(A)) | true |
15a23d7762118b5f05d5151bd42e37b1442f2883 | Python | mattbellis/matts-work-environment | /PyROOT/playRooKeysPdf_andPSF/trial_rpsf_2D.py | UTF-8 | 3,664 | 3.09375 | 3 | [] | no_license | #!/usr/bin/env python
from ROOT import *
from array import *
###############################################################
# RooParametricStepFunction
###############################################################
# Here's a fake variable over which the data will be generated
# and fit.
x = RooRealVar("x","x variable", 0.0, 10.0)
y = RooRealVar("y","y variable", -5.0, 5.0)
#############################
# Set up the RPSF
#############################
nbins = 10
# These are the bin edges
limits = TArrayD(nbins+1)
for i in range(0, nbins+1):
limits[i] = 0.0 + i*1.0
# These will hold the values of the bin heights
list = RooArgList("list")
binHeight = []
for i in range(0,nbins-1):
name = "binHeight%d" % (i)
title = "bin %d Value" % (i)
binHeight.append(RooRealVar(name, title, 0.01, 0.0, 1.0))
list.add(binHeight[i]) # up to binHeight8, ie. 9 parameters
# Declare the RPSF
aPdf = RooParametricStepFunction("aPdf","PSF", x, list, limits, nbins)
nbkg = RooRealVar("nbkg","number of background events,",150)
aPdf_ex = RooAddPdf("aPdf_ex","Extended aPdf",RooArgList(aPdf), RooArgList(nbkg))
#
########################################################################
#
# Gaussian in y
########################################################################
mean = RooRealVar("mean","#mu of Gaussian", 0.000)
sigma = RooRealVar("sigma","Width of Gaussian", 0.8)
gauss = RooGaussian("gauss", "Gaussian PDF", y, mean, sigma)
########################################################################
########################################################################
c = TCanvas("c","c", 10, 10, 800, 800)
c.Divide(3,2)
frames = []
for i in range(0, 6):
title = "frame%d" % (i)
if i/3==0:
frames.append(x.frame( RooFit.Title(title), RooFit.Bins(20))) # RooPlot
elif i/3==1:
frames.append(y.frame( RooFit.Title(title), RooFit.Bins(20))) # RooPlot
else:
frames.append(y.frame( RooFit.Title(title), RooFit.Bins(20))) # RooPlot
# Plot the starting values of the RPSF (flat)
c.cd(1)
aPdf.plotOn(frames[0])
frames[0].Draw()
gPad.Update()
c.cd(4)
gauss.plotOn(frames[3])
frames[3].Draw()
gPad.Update()
#######################################################
# Generate some fake data
#######################################################
p0 = RooRealVar("p0", "p0", 5.0)
p1 = RooRealVar("p1", "p1", -2.0)
p2 = RooRealVar("p2", "p2", 3.0)
f = RooPolynomial("f", "Polynomial PDF", x, RooArgList(p0,p1,p2))
total_for_gen = RooProdPdf("total_for_gen","f*gauss",RooArgList(f, gauss))
total_for_fit = RooProdPdf("total_for_gen","aPdf_ex*gauss",RooArgList(aPdf_ex, gauss))
data = total_for_gen.generate(RooArgSet(x,y), 1000)
# Plot the data
c.cd(2)
data.plotOn(frames[1], RooLinkedList())
frames[1].Draw()
gPad.Update()
c.cd(5)
data.plotOn(frames[4], RooLinkedList())
frames[4].Draw()
gPad.Update()
########################################################
# Run the fit
########################################################
mean.setVal(0.1)
sigma.setVal(0.1)
mean.setConstant(kFALSE)
sigma.setConstant(kFALSE)
nbkg.setConstant(kFALSE)
fit_results = total_for_fit.fitTo(data, RooFit.Extended(), RooFit.Save(kTRUE))
#fit_results = total_for_fit.fitTo(data, RooFit.Save(kTRUE))
# Plot the RPSF after the fit has converged.
c.cd(3)
total_for_fit.plotOn(frames[1])
frames[1].Draw()
gPad.Update()
c.cd(6)
total_for_fit.plotOn(frames[4])
frames[4].Draw()
gPad.Update()
## Wait for input to keep the GUI (which lives on a ROOT event dispatcher) alive
if __name__ == '__main__':
rep = ''
while not rep in [ 'q', 'Q' ]:
rep = raw_input( 'enter "q" to quit: ' )
if 1 < len(rep):
rep = rep[0]
| true |
0333a7a42a4a3664c446f1609e863535b2b47ab5 | Python | v1ctorf/dsap | /1_python_primer/01_25_C.py | UTF-8 | 425 | 4.59375 | 5 | [] | no_license | print("""
Write a short Python function that takes a string s, representing a sentence,
and returns a copy of the string with all punctuation removed.
For example, if given the string "Let's try, Mike.",
this function would return "Lets try Mike".
""")
s = input('Type your sentence here: ')
clean = ''.join([i for i in s if i.isalnum() or i.isspace()])
print('Sentence without punctuation: {0}'.format(clean))
| true |
2680ba99824e82087492caaed68ee6cb7c15024a | Python | EmjayAhn/DailyAlgorithm | /01_baekjoon/53_problem_2908.py | UTF-8 | 230 | 3.09375 | 3 | [] | no_license | # https://www.acmicpc.net/problem/2908
import sys
input_numbers = sys.stdin.readline().rstrip('\n').split()
input_number1 = input_numbers[0][::-1]
input_number2 = input_numbers[1][::-1]
print(max(input_number1, input_number2)) | true |
387b1519f9ad0bec84a4fec0f592212a62b1ebaf | Python | magnet-cl/py-excel-handler | /excel_handler/handler.py | UTF-8 | 13,383 | 2.734375 | 3 | [] | no_license | """ This document defines the excel_handler module """
from __future__ import print_function, absolute_import
from builtins import str, object
import xlsxwriter
import datetime
from .fields import Field
from collections import namedtuple
from future.utils import with_metaclass
from openpyxl.utils.datetime import from_excel
from openpyxl import load_workbook
class FieldNotFound(Exception):
pass
class ReapeatedColumn(Exception):
pass
RowError = namedtuple("RowError", "row, row_data, error, field_name")
class ExcelHandlerMetaClass(type):
def __new__(cls, name, bases, attrs):
fieldname_to_field = {}
for base in bases[::-1]:
if hasattr(base, "fieldname_to_field"):
fieldname_to_field.update(base.fieldname_to_field)
cols = {}
for k, v in list(attrs.items()):
if isinstance(v, Field):
field = attrs.pop(k)
field.name = k
if field.verbose_name == "":
field.verbose_name = field.name
if field.col < 0:
field._distance_from_last = field.col
if field.col in cols:
raise ReapeatedColumn(
"{} collides with field {} on column {}".format(
field.name, cols[field.col].name, field.col
)
)
cols[field.col] = field
fieldname_to_field[k] = field
attrs["fieldname_to_field"] = fieldname_to_field
attrs["fields"] = sorted(
list(fieldname_to_field.values()), key=lambda field: field.col
)
sup = super(ExcelHandlerMetaClass, cls)
this = sup.__new__(cls, name, bases, attrs)
field_count = len(fieldname_to_field)
for field_name, field in list(fieldname_to_field.items()):
try:
if field._distance_from_last < 0:
field.col = field_count + field._distance_from_last
except:
pass
return this
class ExcelHandler(with_metaclass(ExcelHandlerMetaClass, object)):
"""ExcelHandler is a class that is used to wrap common operations in
excel files"""
def __init__(self, path=None, excel_file=None, mode="r", on_demand=False):
if path is None and excel_file is None:
raise Exception("path or excel_file requried")
if path is not None and excel_file is not None:
raise Exception("Only specify path or excel_file, not both")
if mode == "r":
if path:
self.workbook = load_workbook(
filename=path,
)
else:
self.workbook = load_workbook(
filename=excel_file,
)
self.sheet = self.workbook.worksheets[0]
else:
self.path = path
self.workbook = xlsxwriter.Workbook(self.path, {"nan_inf_to_errors": True})
self.set_default_formats()
self.parser = None
def set_default_formats(self):
self.date_format = self.workbook.add_format({"num_format": "YYYY-MM-DD"})
self.datetime_format = self.workbook.add_format(
{"num_format": "YYYY-MM-DD HH:MM:SS"}
)
self.time_format = self.workbook.add_format({"num_format": "HH:MM:SS"})
def set_row_formats_from_example(self, row):
i = 0
for cel in row:
if isinstance(cel, datetime.date):
self.sheet.set_column(i, i, 18, cell_format=self.date_format)
elif isinstance(cel, datetime.datetime):
self.sheet.set_column(i, i, 18, cell_format=self.datetime_format)
elif isinstance(cel, datetime.time):
self.sheet.set_column(i, i, 18, cell_format=self.time_format)
i += 1
def add_sheet(self, name):
self.sheet = self.workbook.add_worksheet(name)
def set_sheet(self, sheet_index):
"""sets the current sheet with the given sheet_index"""
self.sheet = self.workbook.worksheets[sheet_index]
def set_sheet_by_name(self, sheet_name):
"""sets the current sheet with the given sheet name"""
self.sheet = self.workbook[sheet_name]
def parse_date(self, value):
return from_excel(value).date()
def read_rows(self, column_structure, starting_row=1, max_rows=None):
"""Reads the current sheet from the starting row to the last row or up
to a max of max_rows if greater than 0
returns an array with the data
"""
data = []
rows = self.sheet.iter_rows(
min_row=starting_row,
max_row=max_rows,
max_col=len(column_structure),
)
for row in rows:
column_data = {}
for cell in row:
value = cell.value
column_name = list(column_structure)[cell.col_idx - 1]
column_data[column_name] = value
data.append(column_data)
return data
def _read(
self,
skip_titles=False,
failfast=False,
ignore_blank_rows=True,
include_rowx=False,
return_errors=False,
starting_row=0,
):
"""
Using the structure defined with the Field attributes, reads the excel
and returns the data in an array of dicts
"""
data = []
errors = []
row = starting_row
if skip_titles:
row += 1
# prepare the read for each field
for field in self.fields:
field.prepare_read()
while True:
row_data = {}
data_read = False
continue_while = False
blank_row = True
for field in self.fields:
field_name = field.name
try:
value = self.sheet.cell(colx=field.col, rowx=row).value
except:
if hasattr(field, "default"):
row_data[field.name] = field.default
else:
if value != "":
blank_row = False
try:
row_data[field.name] = field.cast(
value,
self.workbook,
row_data,
)
except Exception as err:
if not err.args:
err.args = ("",)
msg = 'Cannot read row "{}" : Column {}, {}'.format(
row + 1, str(field.verbose_name), err.args[0]
)
err.args = (msg,) + err.args[1:]
if failfast:
raise
else:
row_data[field.name] = value
if return_errors:
errors.append(
RowError(
row=row,
row_data=row_data,
error=msg,
field_name=field_name,
)
)
else:
print(msg)
continue_while = True
break
data_read = True
if include_rowx:
row_data["rowx"] = row
row += 1
if continue_while:
continue
if not data_read:
if return_errors:
return data, errors
return data
if not blank_row or not ignore_blank_rows:
data.append(row_data)
if return_errors:
return data, errors
return data
def read(
self,
skip_titles=False,
failfast=False,
ignore_blank_rows=True,
include_rowx=False,
return_errors=False,
starting_row=1,
):
"""
Using the structure defined with the Field attributes, reads the excel
and returns the data in an array of dicts
"""
data = []
errors = []
min_row = 1
if skip_titles:
min_row += 1
if not starting_row == 1:
min_row = starting_row
# prepare the read for each field
for field in self.fields:
field.prepare_read()
for row in self.sheet.iter_rows(min_row=min_row):
row_data = {}
empty_fields = []
has_errors = False
for cell in row:
value = cell.value
try:
# get fields by column
field = self.fields[cell.column - 1]
except Exception:
break
if value is None:
empty_fields.append(value)
if value is None and hasattr(field, "default"):
default_value = field.default
if callable(default_value):
value = default_value()
else:
value = default_value
else:
try:
value = field.cast(
value,
self.workbook,
row_data,
)
except Exception as err:
has_errors = True
if failfast:
raise
if return_errors:
row_number = 0
if len(row) > 0:
row_number = row[0].row
msg = f'Cannot read row "{row_number}" : Column {str(field.verbose_name)}, {err.args[0]}'
errors.append(
RowError(
row=row,
row_data=row_data,
error=msg,
field_name=field.name,
)
)
break
row_data[field.name] = value
if has_errors:
continue
if ignore_blank_rows:
if not len(empty_fields) == len(row_data):
data.append(row_data)
else:
data.append(row_data)
if return_errors:
return data, errors
return data
def save(self):
"""Save document"""
# xlwt save
# self.workbook.save(self.path)
self.workbook.close()
def set_title_format(self, formt):
pass
def set_row_format(self):
return None
def write_rows(self, rows, col_offset=0, row_offset=0, set_titles=False):
"""Write rows in the current sheet"""
title_formt = self.workbook.add_format()
row_formt = self.set_row_format()
if set_titles:
self.set_title_format(title_formt)
else:
title_formt = row_formt
for y, row in enumerate(rows):
# set titles
if y == 0:
formt = title_formt
else:
formt = row_formt
row_y = row_offset + y
for x, value in enumerate(row):
row_x = col_offset + x
self.sheet.write(row_y, row_x, value, formt)
def write_columns(self, columns, row_offset=0, col_offset=0, set_titles=False):
"""Write columns in the current sheet"""
if set_titles:
formt = self.workbook.add_format()
self.set_title_format(formt)
else:
formt = None
for x, column in enumerate(columns):
# set titles
if x > 0:
formt = None
column_x = col_offset + x
for y, value in enumerate(column):
column_y = row_offset + y
self.sheet.write(column_y, column_x, value, formt)
def write(self, data, set_titles=False):
row = 0
# set titles
if set_titles:
formt = self.workbook.add_format()
self.set_title_format(formt)
for field_name, field in list(self.fieldname_to_field.items()):
self.sheet.write(0, field.col, str(field.verbose_name), formt)
row = 1
# set format and prepare the write for each field
for field_name, field in self.fieldname_to_field.items():
field.set_column_format(self)
field.prepare_write()
for row_data in data:
for field_name, value in row_data.items():
try:
field = self.fieldname_to_field[field_name]
except KeyError:
pass
else:
field.write(self.workbook, self.sheet, row, value)
row += 1
| true |
3083464d0c4fbfb1893f373077343cc6a40a6f45 | Python | jayadams011/data-structures-and-algorithms | /challenges/quicksort/test_quicksort.py | UTF-8 | 419 | 2.875 | 3 | [
"MIT"
] | permissive | """Test and test imports."""
from .quicksort import quicksort
import pytest
def test_empty_quick_sort():
"""Test empty quick sort."""
assert quicksort([]) == ([])
def test_small_quick_sort():
"""Test small quick sort."""
assert quicksort([2, 3, 1]) == ([1, 2, 3])
def test_large_quick_sort():
"""Test large quick sort."""
assert quicksort([910, 78, 56, 34, 12]) == ([12, 34, 56, 78, 910])
| true |
407acded353f07acd7904f710e57fe5a89e1737c | Python | legmartini/pythonPOO | /exPOO008.py | UTF-8 | 2,892 | 4.09375 | 4 | [] | no_license | from time import sleep
class SimOuNao(Exception):
def __str__(self):
return 'Digite somente "s" para SIM e "n" para NÃO.'
class Jogo(object):
def __init__(self):
self.__cartoes = ('''
1 3 5 7 9 11 13 15
17 19 21 23 25 27 29 31
33 35 37 39 41 43 45 47
49 51 53 55 57 59 61 63
''', '''
2 3 6 7 10 11 14 15
18 19 22 23 26 27 30 31
34 35 38 39 42 43 46 47
50 51 54 55 58 59 62 63
''', '''
4 5 6 7 12 13 14 15
20 21 22 23 28 29 30 31
36 37 38 39 44 45 46 47
52 53 54 55 60 61 62 63
''', '''
8 9 10 11 12 13 14 15
24 25 26 27 28 29 30 31
40 41 42 43 44 45 46 47
56 57 58 59 60 61 62 63
''', '''
16 17 18 19 20 21 22 23
24 25 26 27 28 29 30 31
48 49 50 51 52 53 54 55
56 57 58 59 60 61 62 63
''', '''
32 33 34 35 36 37 38 39
40 41 42 43 44 45 46 47
48 49 50 51 52 53 54 55
56 57 58 59 60 61 62 63
''')
self.__card = 0
self.__num = 0
self.main()
def apresentacao(self):
print('Pense em um número entre 1 e 63.')
sleep(5)
print('Vou lhe mostrar diversos cartões e você deve me dizer')
print('se o cartão contêm ou não o número que você pensou.')
sleep(3)
def recebeEntradaDoUsuario(self):
while True:
try:
resp = input('\nEssa cartela contêm o número que você pensou? ').lower()
if not resp.isalpha():
print('Digite apenas letras!!')
elif resp.startswith('s'):
return True
elif resp.startswith('n'):
return False
else:
raise SimOuNao
except SimOuNao:
print(SimOuNao())
def imprimeNumeroSecreto(self):
print('\nDeixa eu adivinhar...')
sleep(2)
print(f'Você pensou no número...')
sleep(2)
print(f'{self.__num}')
def main(self):
self.apresentacao()
for i in range(len(self.__cartoes)):
self.__card = i
self.mostraCartao()
self.adicionaNumero(self.recebeEntradaDoUsuario())
self.imprimeNumeroSecreto()
def mostraCartao(self):
print(self.__cartoes[self.__card])
def adicionaNumero(self, contem):
if contem:
self.__num += int(self.__cartoes[self.__card].split()[0])
if __name__ == '__main__':
x = Jogo()
| true |
132af53aaea86a2b49f400e328cac9e6d5813785 | Python | bimri/programming_python | /chapter_11/textEditorNoConsole.pyw | UTF-8 | 611 | 2.765625 | 3 | [
"MIT"
] | permissive | "Windows (and other) launch files"
'''
gives the .pyw launching file used to suppress a DOS pop up on
Windows when run in some modes (for instance, when double-clicked), but still allow
for a console when the .py file is run directly.
Clicking this directly is similar to the behavior when PyEdit
is run from the PyDemos or PyGadgets demo launcher bars.
'''
"""
run without a DOS pop up on Windows; could use just a .pyw for both
imports and launch, but .py file retained for seeing any printed text
"""
exec(open('textEditor.py').read()) # as if pasted here (or textEditor.main())
| true |
a7087cb82db43a7aa5d4ae0d250517be862982ec | Python | itsolutionscorp/AutoStyle-Clustering | /all_data/exercism_data/python/meetup/f3aecf0d786e4466840d706004b641eb.py | UTF-8 | 1,934 | 3.4375 | 3 | [] | no_license | import datetime
from datetime import date
from datetime import timedelta
import re
days = {0:'Monday', 1:'Tuesday', 2:'Wednesday', 3:'Thursday', 4:'Friday', 5:'Saturday', 6:'Sunday'}
class MeetupDayException(Exception):
pass
def checkInputs(year, month, day, position):
try:
cYear = int(year)
cMonth = int(month)
except ValueError:
raise MeetupDayException("Wrong year/month inputs, please check the values entered")
if not day in days.values():
raise MeetupDayException("Wrong day input, please check the value entered")
posi = re.search('[0-9]|teenth|last', position)
if not posi:
raise MeetupDayException("Wrong position input, please check the value entered")
else:
return posi.group(0)
def meetup_day(year, month, day, position):
pos = checkInputs(year, month, day, position)
if ( pos != None ):
referenceDate = date(year, month, 1)
count = 0
referenceDay = date.weekday(referenceDate)
while ( days[referenceDay].upper() != day.upper() ):
referenceDate = referenceDate + timedelta(days=1)
referenceDay = date.weekday(referenceDate)
# Now we've reached the first day of the month corresponding to our input ('Monday', 'Tuesday', etc...)
if ( pos == 'teenth' ):
while ( referenceDate.day < 13 ):
referenceDate = referenceDate + timedelta(days=7)
else:
count = 1
if ( pos == 'last' ):
# For the 'last' position, it's easiest to begin from first day of next month
# and go backwards from there
referenceDate = date(year, month + 1, 1)
while ( days[referenceDate.weekday()] != day ):
referenceDate = referenceDate - timedelta(days=1)
return referenceDate
while ( count != int(pos) ):
referenceDate = referenceDate + timedelta(days=7)
if ( referenceDate.month != month ):
raise MeetupDayException("This day does not exist!")
count = count + 1
return referenceDate
else:
raise MeetupDayException("Wrong day value")
| true |
541b49980e6597bf2b13395e529e9c81ad4b8071 | Python | Gallop-w/powderbed_detec | /Tools/get_piexl_position.py | UTF-8 | 793 | 2.65625 | 3 | [] | no_license | import cv2
# 采样图片
img = cv2.imread("../1_0_persp.jpg")
# print img.shape
def on_EVENT_LBUTTONDOWN(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
xy = "%d,%d" % (x, y)
cv2.circle(img, (x, y), 1, (255, 0, 0), thickness=-1)
cv2.putText(img, xy, (x, y), cv2.FONT_HERSHEY_PLAIN,
3, (0, 0, 255), thickness=4)
cv2.imshow("image", img)
height, width = img.shape[:2]
print(int(height/5), int(width/5))
cv2.namedWindow("image", 0)
cv2.setMouseCallback("image", on_EVENT_LBUTTONDOWN)
cv2.resizeWindow('image', int(width/5), int(height/5))
cv2.imshow("image", img)
while (True):
try:
cv2.waitKey(1)
except Exception:
cv2.destroyAllWindows()
break
cv2.waitKey(0)
cv2.destroyAllWindows()
| true |
7c8ac2b67cd7403dfdf8c6bac6d310f7bc04a47d | Python | JakubSzwajka/geo-data-api | /app/test/test_ip_address_model_delete.py | UTF-8 | 2,084 | 2.5625 | 3 | [] | no_license |
from app.test.base import BaseTestCase
from app.main.utils import get_ip_of_url
import json
from app.test.utils import *
class Ip_address_delete_test_case(BaseTestCase):
def test_delete_ip_address_obj(self):
tested_with_ip = "123.123.123.113"
with self.client:
response_add = add_new_obj_by_ip(self, tested_with_ip)
response_del = delete_obj(self, tested_with_ip)
self.assertEqual(response_del.status_code, 200)
def test_delete_multiple_addresses_from_db(self):
tested_with_ips = ["123.123.123.113", "123.123.123.103"]
with self.client:
response_add = add_multiple_objs(self, tested_with_ips)
response_del = delete_multiple_obj(self, tested_with_ips)
response_data = json.loads(response_del.data.decode())
self.assertEqual(response_del.status_code, 200)
response_ips = [resp['ip'] for resp in response_data['deleted']]
for ip in tested_with_ips:
self.assertIn(ip, response_ips)
def test_delete_ip_address_which_is_not_in_db(self):
tested_with_ip = "123.123.123.113"
with self.client:
response_del = delete_obj(self, tested_with_ip)
response_data = json.loads(response_del.data.decode())
self.assertIn('there is no ip : 123.123.123.113', response_data['message'])
self.assertEqual(response_del.status_code, 404)
def test_delete_ip_addresses_which_one_is_not_in_db(self):
tested_with_ips = ["123.123.123.113", "123.123.123.103"]
with self.client:
response_add = add_new_obj_by_ip(self, tested_with_ips[0])
response_del = delete_multiple_obj(self, tested_with_ips)
response_data = json.loads(response_del.data.decode())
self.assertEqual(response_del.status_code, 200)
response_ips = [resp['ip'] for resp in response_data['deleted']]
self.assertIn(tested_with_ips[0], response_ips)
self.assertIn('there is no ip : 123.123.123.103', response_ips)
| true |
18e3e1f23ca32e7fd3b0ec4082ca1174764f1280 | Python | gabriellaec/desoft-analise-exercicios | /backup/user_268/ch36_2020_03_28_15_36_20_562519.py | UTF-8 | 94 | 3.109375 | 3 | [] | no_license | def fatorial (n):
fat=1
while n>0:
fat*=n
n-=1
return fat
| true |
74d1647be22d5d9a53f1463d7845608e95501802 | Python | PartIII-Student/GAN_essay_repo | /SALR.py | UTF-8 | 6,208 | 2.796875 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
import pandas as pd
import os
import seaborn as sns
import matplotlib.pyplot as plt
import time
from tqdm import tqdm
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Structure: in each trial generate parameters, then for number_of_epochs
# generate a batch of size 'batch_size' each time from the input distribution and the real distribution
# and train the GAN on it
number_of_trails = 1000
number_of_epochs = 50000
batch_size = 2000
hidden_layer_size_d = 12
hidden_layer_size_g = 6
# define actual distribution
real_mean = 6
real_sd = 1
# discriminator and generator NNs
def discriminator(input, parameters):
pre_1 = tf.add(tf.matmul(tf.to_float(input), parameters[0]), parameters[1])
activ_1 = tf.nn.relu(pre_1)
pre_2 = tf.add(tf.matmul(activ_1, parameters[2]), parameters[3])
activ_2 = tf.nn.relu(pre_2)
pre_3 = tf.add(tf.matmul(activ_2, parameters[4]), parameters[5])
output = tf.sigmoid(pre_3)
return output
def generator(input, parameters):
pre_1 = tf.add(tf.matmul(tf.to_float(input), parameters[0]), parameters[1])
activ_1 = tf.tanh(pre_1)
output = tf.add(tf.matmul(activ_1, parameters[2]), parameters[3])
return output
# Create weights and biases variables
weight_d_1 = tf.Variable(tf.random_uniform([1, hidden_layer_size_d], minval=-1, maxval=1, dtype=tf.float32))
bias_d_1 = tf.Variable(tf.random_uniform([hidden_layer_size_d], minval=-1, maxval=1, dtype=tf.float32))
weight_d_2 = tf.Variable(tf.random_uniform([hidden_layer_size_d, hidden_layer_size_d], minval=-1, maxval=1, dtype=tf.float32))
bias_d_2 = tf.Variable(tf.random_uniform([hidden_layer_size_d], minval=-1, maxval=1, dtype=tf.float32))
weight_d_3 = tf.Variable(tf.random_uniform([hidden_layer_size_d, 1], minval=-1, maxval=1, dtype=tf.float32))
bias_d_3 = tf.Variable(tf.random_uniform([1], minval=-1, maxval=1, dtype=tf.float32))
d_parameters = [weight_d_1,bias_d_1, weight_d_2, bias_d_2, weight_d_3, bias_d_3]
weight_g_1 = tf.Variable(tf.random_uniform([1, hidden_layer_size_g], minval=-1, maxval=1, dtype=tf.float32))
bias_g_1 = tf.Variable(tf.random_uniform([hidden_layer_size_g], minval=-1, maxval=1, dtype=tf.float32))
weight_g_2 = tf.Variable(tf.random_uniform([hidden_layer_size_g, 1], minval=-1, maxval=1, dtype=tf.float32))
bias_g_2 = tf.Variable(tf.random_uniform([1], minval=-1, maxval=1, dtype=tf.float32))
g_parameters = [weight_g_1,bias_g_1, weight_g_2, bias_g_2]
# losses
real_dist_placeholder = tf.placeholder(tf.float32, shape=(None, 1))
generator_input_placeholder = tf.placeholder(tf.float32, shape=(None, 1))
with tf.variable_scope("Discriminator") as scope:
d_output_real = discriminator(real_dist_placeholder, d_parameters)
scope.reuse_variables()
d_output_fake = discriminator(generator(generator_input_placeholder, g_parameters), d_parameters)
loss_d = tf.reduce_mean(-tf.log(d_output_real) - tf.log(1 - d_output_fake))
loss_g = tf.reduce_mean(tf.log(1-d_output_fake))
# Score Adaptive Learning Rate
phi_g = tf.placeholder(tf.float32)
phi_d = tf.placeholder(tf.float32)
gamma_g = tf.placeholder(tf.float32)
gamma_d = tf.placeholder(tf.float32)
psi_1 = tf.log((gamma_d - gamma_g - (2*phi_g) - phi_d)/(-gamma_d+gamma_g-phi_d))/tf.log(16.0)
psi = tf.maximum(0.0, psi_1)
V = tf.minimum(tf.reduce_mean(tf.log(d_output_real)+tf.log(1-d_output_fake)),0)
learning_rate_d = gamma_d-phi_d*tf.tanh(psi*V)
learning_rate_g = gamma_g + phi_g*(1 + tf.tanh(psi*V))
# Train step
train_g = tf.train.GradientDescentOptimizer(learning_rate_g).minimize(loss_g, var_list=g_parameters)
train_d = tf.train.GradientDescentOptimizer(learning_rate_d).minimize(loss_d, var_list=d_parameters)
for it in range(1,number_of_trails+1):
# sample parameters
gamma_vec = np.random.uniform(0.0000001,0.1,4)
phi_vec = np.random.uniform(0.0, 0.1, 4)
phi_vec[0] = 0.0
res_matrix = np.zeros((len(gamma_vec) * len(phi_vec), batch_size))
gamma_out_vec, phi_out_vec = np.zeros((len(gamma_vec) * len(phi_vec))), np.zeros((len(gamma_vec) * len(phi_vec)))
row =0
for i, p in enumerate(phi_vec):
for j, k in enumerate(gamma_vec):
print 'Trial: {}/{}'.format(it,number_of_trails)
print 'Step: {}/{}'.format(row+1, len(gamma_vec) * len(phi_vec))
print 'Phi: {0}'.format(p)
print 'Gamma: {0}'.format(k)
with tf.Session() as sess:
tf.global_variables_initializer().run()
# writer = tf.summary.FileWriter('./graphs', sess.graph)
for step in tqdm(range(1, number_of_epochs+1)):
generator_input = np.random.uniform(0, 1, (batch_size, 1))
real_dist = np.random.normal(real_mean, real_sd, (batch_size, 1))
sess.run(train_d, feed_dict={real_dist_placeholder: real_dist,
generator_input_placeholder: generator_input, phi_g: p,phi_d:p,
gamma_g:k,gamma_d:k })
sess.run(train_g, feed_dict={real_dist_placeholder: real_dist,
generator_input_placeholder: generator_input, phi_g: p,phi_d:p,
gamma_g:k,gamma_d:k })
generator_input = np.random.uniform(0, 1, (batch_size, 1))
real_dist = np.random.normal(real_mean, real_sd, (batch_size, 1))
generated = sess.run(generator(generator_input,g_parameters))
res_matrix[row] = generated.reshape(batch_size)
gamma_out_vec[row] = k
phi_out_vec[row] = p
row = row + 1
print 'Mean of generated sample: {0}'.format(np.mean(generated))
print 'Standard Deviation of generated sample: {0}'.format(np.std(generated))
# writer.close()
# sns.distplot(generated, hist=False, rug=False)
# sns.distplot(real_dist, hist=False, rug=False)
# plt.show()
res_dataframe = pd.DataFrame(data=res_matrix.astype(float))
gamma_dataframe = pd.DataFrame(data=gamma_out_vec.astype(float))
phi_dataframe = pd.DataFrame(data=phi_out_vec.astype(float))
output_dataframe1 = pd.concat([gamma_dataframe.reset_index(drop=True), phi_dataframe], axis=1)
output_dataframe2 = pd.concat([output_dataframe1.reset_index(drop=True), res_dataframe], axis=1)
script_path = os.path.abspath(__file__)
script_dir = os.path.split(script_path)[0]
file = os.path.join(script_dir,'SALR_data/gd/output.csv')
with open(file, 'a') as f:
output_dataframe2.to_csv(f, sep=',', header=False, float_format='%.9f', index=False)
| true |
33086031c3fe45cf02d0a8f66b317616b07b3295 | Python | aarongerig/python-leap | /leap.py | UTF-8 | 206 | 3.625 | 4 | [] | no_license | def leap_year(year: int) -> bool:
if not isinstance(year, int):
raise Exception(f'The given year "{year}" is not an integer.')
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
| true |
8c4a9fc9ed2184d709cb7f762bbe3dfd33f02222 | Python | Dhinessplayz/Python | /chess/chess_state.py | UTF-8 | 2,733 | 3.3125 | 3 | [] | no_license | import chess
import sys
class ChessState(chess.Board):
"""
Chessboard subclass implementing the interface needed for minimax
"""
def __init__(self, evaluate=(lambda _: 0), memoize=False, fen=None):
# evaluate is an heuristic function taking a board state
# and returning an approximate value.
self.evaluate = evaluate
self.memoize = memoize
if memoize:
self.values = dict()
super().__init__(fen=fen)
def __str__(self):
# Board representation
result = [' ABCDEFGH']
for i in range(8):
line = [str(i+1), ' ']
for j in range(8):
piece = self.piece_at(8*i+j)
if piece:
line.append(piece.symbol())
else:
line.append('#')
result.append(''.join(line))
return '\n'.join(reversed(result))
def winner(self):
if (self.is_stalemate() or
self.is_insufficient_material() or
self.can_claim_draw()):
return None
if not self.is_game_over():
return False
return chess.WHITE if self.turn == chess.BLACK else chess.BLACK
def hashable(self):
return (self.occupied_co[chess.WHITE],
self.occupied_co[chess.BLACK],
self.pawns,
self.knights,
self.bishops,
self.rooks,
self.queens,
self.kings)
def value(self):
"""Get ground value of state, if exists, or evaluate(state) if not."""
h = self.hashable()
if self.memoize and h in self.values:
return self.values[h]
result = None
winner = self.winner()
if winner == False:
# Game's not over
result = self.evaluate(self)
elif winner is None:
# Draws are neutral
result = 0
else:
# Good for winner, bad for loser
result = float("inf" if winner == chess.BLACK else "-inf")
if self.memoize:
self.values[h] = result
return result
def moves(self):
for move in self.generate_legal_moves():
self.push(move)
yield (move, self)
self.pop()
def do(self, move):
"""Return a new board resulting from the current player taking move"""
result = ChessState(evaluate=self.evaluate, fen=self.fen(), memoize=self.memoize)
result.push(move)
return result
def is_terminal(self):
return self.is_game_over()
| true |
3278fce51fb4233676caea0ea00bb8c435995209 | Python | WIPACrepo/iceprod | /resources/call_graph.py | UTF-8 | 3,706 | 3.484375 | 3 | [
"MIT"
] | permissive | """
Make a call graph for async functions.
Does not catch regular function calls.
"""
import string
from collections import OrderedDict
# utils
def is_func(line):
line = line.strip(string.whitespace)
return line.startswith('async def') or line.startswith('def')
all_whitespace = lambda line: not line.strip(string.whitespace)
def get_indent(line):
for i,c in enumerate(line):
if c not in string.whitespace:
return i
return -1
def find_func_names(lines):
"""Find all function names"""
def get_name(line):
return line.split('def ',1)[-1].split('(',1)[0]
names = []
found = False
indent = -1
for line in lines:
line = line.strip('\r\n')
if found and not all_whitespace(line):
if get_indent(line) <= indent:
found = False
if (not found) and is_func(line):
found = True
indent = get_indent(line)
names.append(get_name(line))
return names
def find_func(lines, name):
"""Find the lines of a specific function"""
is_my_func = lambda line: is_func(line) and name in line.split('(')[0]
found = False
indent = -1
last_line = ''
for line in lines:
line = line.strip('\r\n')
if last_line:
line = last_line+line.strip()
if line.strip().endswith(','):
last_line = line
else:
last_line = ''
if (not found) and is_my_func(line):
found = True
indent = get_indent(line)
elif found and not all_whitespace(line):
if get_indent(line) > indent:
yield line
else:
return # end of function
if not found:
raise Exception(f'{name} not found')
def process_func(lines, func_names):
"""Search for function calls"""
ret = OrderedDict()
for line in lines:
#print(f':: {line}')
for n in func_names:
if n+'(' in line:
name = line.split(n+'(')[0].split('(')[-1].split()[-1]+n
if name in ret:
ret[name] += 1
else:
ret[name] = 1
if 'await' in line:
line = line.split('await',1)[-1].strip()
if line.startswith('asyncio.ensure_future'):
line = line.split('(',1)[-1]
if 'rest_client.request' in line:
line = line.split(')',1)[0]+')'
else:
line = line.split('(',1)[0]
if line in ret:
ret[line] += 1
else:
ret[line] = 1
return ret
def analyze_calls(lines, funcname, indent=0, recurse=True):
func_names = find_func_names(lines)
calls = process_func(find_func(lines, funcname), func_names)
for c in calls:
if '.' in c and not c.startswith('self'):
continue
print(' '*indent+c)
if recurse:
if c.startswith('self.'):
c = c[5:]
try:
analyze_calls(lines, c, indent=indent+2, recurse=True)
except Exception as e:
#print(' '*indent,e)
pass
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('filename')
parser.add_argument('function')
parser.add_argument('-r','--recurse',default=True)
args = parser.parse_args()
print(f'searching for {args.function} in file {args.filename}')
with open(args.filename) as f:
lines = f.readlines()
print('')
print('Calls: ')
analyze_calls(lines, args.function, indent=0, recurse=args.recurse)
if __name__ == '__main__':
main() | true |
514cf4213360f3644da51a65b96e6f245c08cd91 | Python | toggame/Python_learn | /第二章/case_test.py | UTF-8 | 623 | 3.78125 | 4 | [] | no_license | a = 'our domain is crazyit.org'
# 每个单词的首字母大写
print(a.title())
# 每个字母小写
print(a.lower())
# 每个字母大写
print(a.upper())
s = ' this is a puppy '
# 删除左边的空白
print(s.lstrip())
# 删除右边的空白
print(s.rstrip())
# 删除两边的空白
print(s.strip())
s2 = 'i think it is a scarecrow'
# 删除左边的i t o w字符
print(s2.lstrip('itow')) # 输出 think it is a scarecrow,包含关系
print(s2.lstrip('tow')) # 输出i think it is a scarecrow
print(s2.rstrip('itow')) # 输出i think it is a scarecr
print(s2.strip('itow')) # 输出 think it is a scarecr
| true |
6405c3378feec05213a913732fa614b2b000c522 | Python | VB6Hobbyst7/Contour3D | /dataset/shapes.py | UTF-8 | 1,022 | 3.46875 | 3 | [] | no_license | import numpy as np
class SphericalCap:
def __init__(self, a, x0, y0, r):
if a > r:
raise ValueError
self.a = a
self.x0 = x0
self.y0 = y0
self.r = r
def __call__(self, x, y):
t = self.r ** 2 - (x - self.x0) ** 2 - (y - self.y0) ** 2
t = np.clip(t, 0, None)
return np.clip(t ** .5 - self.r + self.a, None, 0)
class SinusoidWaves:
def __init__(self, a, x0, y0):
self.a = a
self.x0 = x0
self.y0 = y0
self.lambda_squared = x0 ** 2 + y0 ** 2
def __call__(self, x, y):
return self.a * np.sin((x * self.x0 + y * self.y0) / self.lambda_squared * 2 * np.pi)
class Rings:
def __init__(self, a, x0, y0, r, sigma):
self.A = a
self.x0 = x0
self.y0 = y0
self.r = r
self.sigma = sigma
def __call__(self, x, y):
d = np.sqrt((x - self.x0) ** 2 + (y - self.y0) ** 2)
return self.A * np.exp(-(d - self.r) ** 2 / (2 * (self.sigma ** 2)))
| true |
dbb7e09bda996ede7be312e7a9b3fa5c6b0c0af8 | Python | LewisT543/Notes | /Learning_Tkinter/6Building-a-GUI-from-scratch.py | UTF-8 | 2,191 | 3.875 | 4 | [] | no_license | import tkinter as tk
from tkinter import messagebox
def Click():
replay = messagebox.askquestion('Quit?', 'Are, you sure?')
if replay == 'yes':
window.destroy()
window = tk.Tk()
# Label
label = tk.Label(window, text = "Little label:")
label.pack()
# Frame
frame = tk.Frame(window, height=30, width=100, bg="#000099")
frame.pack()
# Button
button = tk.Button(window, text="Button", command = Click)
button.pack(fill=tk.X)
# Switch
switch = tk.IntVar()
switch.set(1)
# Switch is not visible.
# IntVar objects are set to hold integer values and controls internal communication between different widgets
# to set a value to and IntVar obj, we must use the set() method.
# Checkbutton
checkbutton = tk.Checkbutton(window, text="Check Button", variable=switch)
checkbutton.pack()
# If you check or uncheck the checkbutton, because of the variable=switch argument above, the switch will change its
# state from a 1 (checked), to a 0 (unchecked) and vice-versa.
# If you change the state of the SWITCH object, the CHECKBUTTON object would IMMEDIATELY reflect the change. This means
# we do not have to access the checkbutton object directly, we can modify the switch value instead.
# Entry
entry = tk.Entry(window, width=30)
entry.pack()
# This allows us to input small data, of width 30 chars.
# Radio Buttons
radiobutton_1 = tk.Radiobutton(window, text="Steak", variable=switch, value=0)
radiobutton_1.pack()
radiobutton_2 = tk.Radiobutton(window, text="Salad", variable=switch, value=1)
radiobutton_2.pack()
# Radiobuttons are similar to switches but work in groups, while 1 is active, the other(s) is/are not.
# ONLY ONE OF THE PAIR (OR MORE) OF RADIOBUTTONS MAY BE ACTIVE AT ONCE
# Radiobutton arguments:
# The VARIABLE argument binds a SWITCH object to both of the widgets,
# and this is the clue – the fact that both Radiobuttons are bound to
# the SAME OBJECT creates the GROUP. Don’t forget that!
# The value argument distinguishes the Radiobuttons inside the group,
# and thus each of the Radiobuttons has to use a different value (we’ve used 0 and 1)
# all pack()'ed so potentially messy.
window.mainloop() | true |
d837ab47eed989ab25af66c550fb3691d4c27972 | Python | neer1304/CS-Reference | /Scripting/Python_Scripting/Python_Basics/operators/first.py | UTF-8 | 183 | 3.203125 | 3 | [] | no_license | #!usr/bin/python
'''A very simple program,
showing how short a Python program can be!
Authors: Sibu Cyriac
'''
print 'Hello world!' #This is a stupid comment after the # mark
| true |
b59cb3042f801c7841329075e74ca5fcf8efed47 | Python | s5suzuki/autd3-paper | /analyze/xy_field.py | UTF-8 | 4,032 | 2.546875 | 3 | [
"MIT"
] | permissive | '''
File: xy_field.py
Project: analyze
Created Date: 17/02/2021
Author: Shun Suzuki
-----
Last Modified: 24/02/2021
Modified By: Shun Suzuki (suzuki@hapis.k.u-tokyo.ac.jp)
-----
Copyright (c) 2021 Hapis Lab. All rights reserved.
'''
from shared import setup_pyplot, get_40kHz_amp, print_progress
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import mpl_toolkits.axes_grid1
import os
import glob
import re
def plot_acoustic_field_2d(axes, acoustic_pressures_2d, observe_x, observe_y, resolution, ticks_step, cmap='jet'):
heatmap = axes.pcolor(acoustic_pressures_2d, cmap=cmap)
x_label_num = int(math.floor((observe_x[1] - observe_x[0]) / ticks_step)) + 1
y_label_num = int(math.floor((observe_y[1] - observe_y[0]) / ticks_step)) + 1
x_labels = [observe_x[0] + ticks_step * i for i in range(x_label_num)]
y_labels = [observe_y[0] + ticks_step * i for i in range(y_label_num)]
x_ticks = [ticks_step / resolution * i for i in range(x_label_num)]
y_ticks = [ticks_step / resolution * i for i in range(y_label_num)]
axes.set_xticks(np.array(x_ticks) + 0.5, minor=False)
axes.set_yticks(np.array(y_ticks) + 0.5, minor=False)
axes.set_xticklabels(x_labels, minor=False)
axes.set_yticklabels(y_labels, minor=False)
return heatmap
def calc(data_path):
cond = pd.read_csv(filepath_or_buffer=os.path.join(data_path, 'cond.txt'), sep=",", header=None)
sample_rate = cond.at[0, 1]
mV_per_Pa = cond.at[2, 1]
dt = 1.0 / sample_rate
p = re.compile(r'x([+-]?\d+\.?\d+?)y([+-]?\d+\.?\d+?)z([+-]?\d+\.?\d+?).csv')
x_axis = []
y_axis = []
total = 0
for filepath in glob.glob(os.path.join(data_path, '*')):
m = p.match(filepath.split(os.path.sep)[-1])
if m is None:
continue
x = float(m.group(1))
y = float(m.group(2))
x_axis.append(x)
y_axis.append(y)
total += 1
x_axis = sorted(set(x_axis))
y_axis = sorted(set(y_axis))
rms = pd.DataFrame(index=y_axis, columns=x_axis)
c = 0
for filepath in glob.glob(os.path.join(data_path, '*')):
m = p.match(filepath.split(os.path.sep)[-1])
if m is None:
continue
x = float(m.group(1))
y = float(m.group(2))
df = pd.read_csv(filepath_or_buffer=filepath, sep=",")
sound = df[' A Max [mV]']
rms.at[y, x] = get_40kHz_amp(sound, dt) / mV_per_Pa / np.sqrt(2)
c += 1
print_progress(c, total)
print()
rms.to_csv('xy.csv')
def plot(plot_r):
rms = pd.read_csv('xy.csv', index_col=0)
resolution = float(rms.columns[1]) - float(rms.columns[0])
nx = len(rms.columns)
ny = len(rms.index)
nx_min = int((nx - 1) / 2 - plot_r / resolution / 2)
nx_max = int((nx - 1) / 2 + plot_r / resolution / 2) + 1
ny_min = int((ny - 1) / 2 - plot_r / resolution / 2)
ny_max = int((ny - 1) / 2 + plot_r / resolution / 2) + 1
rms = rms.to_numpy().transpose().astype(np.float32)
rms = rms[nx_min:nx_max, ny_min:ny_max]
plot_xr = (-plot_r / 2, plot_r / 2)
plot_yr = (-plot_r / 2, plot_r / 2)
print('max [Pa]: ', rms.max())
fig = plt.figure(figsize=(7, 6), dpi=DPI)
axes = fig.add_subplot(111, aspect='equal')
heat_map = plot_acoustic_field_2d(axes, rms, plot_xr, plot_yr, resolution, ticks_step=10.0)
divider = mpl_toolkits.axes_grid1.make_axes_locatable(axes)
plt.ylabel(r'$y\,[\mathrm{mm}]$', fontname='Arial', fontsize=18)
plt.xlabel(r'$x\,[\mathrm{mm}]$', fontname='Arial', fontsize=18)
cax = divider.append_axes('right', size='5%', pad='3%')
cax.tick_params(labelsize=16)
fig.colorbar(heat_map, cax=cax)
cax.set_ylabel(r'$\mathrm{RMS\ of\ acoustic\ pressure}\,[\mathrm{Pa}]$', fontsize=18)
plt.tight_layout()
plt.savefig(os.path.join('plot', 'xy' + ext), bbox_inches='tight', pad_inches=0)
if __name__ == '__main__':
os.makedirs('plot', exist_ok=True)
setup_pyplot()
DPI = 300
ext = '.pdf'
calc('./raw_data/xy')
plot(80)
| true |
1c03cf10099333cc68f8b4c86fe67e166fbe2267 | Python | Ninlives/pam-remote-otp | /client/validate.py | UTF-8 | 964 | 2.5625 | 3 | [] | no_license | from yubiotp.otp import decode_otp
from binascii import unhexlify
class Validator:
def __init__(self, public_id, private_id, key, session, counter):
self.public_id = public_id.encode('utf-8')
self.private_id = unhexlify(private_id)
self.key = unhexlify(key)
self.session = session
self.counter = counter
def verify(self, token):
if isinstance(token, str):
token = token.encode('utf-8')
try:
public_id, otp = decode_otp(token, self.key)
except Exception:
return False
if self.public_id != public_id:
return False
if self.private_id != otp.uid:
return False
if otp.session < self.session:
return False
if otp.session == self.session and otp.counter <= self.counter:
return False
self.session = otp.session
self.counter = otp.counter
return True
| true |
68f508f036741223a5a94e79118b187c98112dac | Python | smanjil/Python-Crypto | /euclid.py | UTF-8 | 193 | 3.515625 | 4 | [] | no_license |
def numInput():
a , b = input("Enter a : ") , input("Enter b : ")
print euclidGCD(a , b)
def euclidGCD(a , b):
while(b):
a , b = b , a % b
return a
numInput()
| true |
82922ec053c90db584fe9b57c12146d16d7860a3 | Python | Sumanshu-Nankana/DLG | /app.py | UTF-8 | 744 | 3.203125 | 3 | [] | no_license | from flask import Flask
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
class SumofNumbers(Resource):
def get(self):
numbers_to_add = list(range(10000001))
n = len(numbers_to_add) - 1
total = int(n*(n+1)/2)
return {"total" : total}
class SumofNumbers1(Resource):
def get(self, n):
total = int(n*(n+1)/2)
return {"total" : total}
api.add_resource(SumofNumbers, '/total')
api.add_resource(SumofNumbers1, '/total/<int:n>')
if __name__ == "__main__":
app.run()
# curl http://127.0.0.1:5000/total
# curl localhost:5000/total/n # where n could be any positive integer,
# i.e. upto which we need sum | true |
f24f66f9d4511503ff298d9decf3420c5a62f3a7 | Python | eisenhart-andrew/Ml-gateway | /src/main.py | UTF-8 | 573 | 2.515625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 9 20:21:17 2021
@author: Andrew
"""
from data_generator import generator_regression
from data_loader_sorter import data_loader
from data_loader_sorter import remove_missing
from trainers import sk_regression_trainer
from trainers import sk_classification_trainer
generator_regression()
raw = data_loader('DATA.csv',3)
X,y = remove_missing(raw,10)
#X, y = standardize(data)
sk_regression_trainer(X,y)
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data
y = iris.target
sk_classification_trainer(X, y)
| true |
6ba184d6f5891da0f0a539115fcdff7172337b79 | Python | eter0000/learningnotes | /Leetcode/707_Design Linked List_06170210.py | UTF-8 | 2,989 | 3.71875 | 4 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[1]:
class MyLinkedList:
def __init__(self):
"""
Initialize your data structure here.
"""
self.val = None
self.next = None
def get(self, index: int) -> int:
"""
Get the value of the index-th node in the linked list. If the index is invalid, return -1.
"""
if self.val == None:
return -1
if index == 0:
return self.val
a=self.next
i=1
while a!= None:
if i == index:
return a.val
a=a.next
i+=1
return -1
def addAtHead(self, val: int) -> None:
"""
Add a node of value val before the first element of the linked list. After the insertion, the new node will be the first node of the linked list.
"""
if self.val == None:
self.val=val
return
a=self.val
self.val=val
anode=self.next
self.next=MyLinkedList()
self.next.val=a
self.next.next=anode
return
def addAtTail(self, val: int) -> None:
"""
Append a node of value val to the last element of the linked list.
"""
if self.val==None:
self.val=val
return
s=self
while s.next != None:
s=s.next
s.next = MyLinkedList()
s.next.val = val
return
def addAtIndex(self, index: int, val: int) -> None:
"""
Add a node of value val before the index-th node in the linked list. If index equals to the length of linked list, the node will be appended to the end of linked list. If index is greater than the length, the node will not be inserted.
"""
i=0
a=self
b=a
if index <= 0:
self.addAtHead(val)
return
while i < index:
i+=1
b=a
if a != None and a.val != None:
a=a.next
else:
return
b.next = MyLinkedList()
b.next.val = val
b.next.next = a
return
def deleteAtIndex(self, index: int) -> None:
"""
Delete the index-th node in the linked list, if the index is valid.
"""
i=0
a=self
if index < 0:
return
if index == 0:
if self.val == None:
return
if self.next == None:
self = None
return
self.val = self.next.val
self.next = self.next.next
aa = a
while i < index:
i += 1
aa = a
if aa == None:
return
a = a.next
if a != None:
aa.next = a.next
else:
aa.next = None
return
# In[ ]:
| true |
6b8278db992f15dd635749058cc088761e74e9f9 | Python | janiszewskibartlomiej/Python_Code_Me_Gda | /Python - advanced/zajecia14/funk/sorted01.py | UTF-8 | 398 | 3.234375 | 3 | [] | no_license | ludzie = ['Jakub Malinowski',
'Jadwiga Brzezińska',
'Roman Sawicki',
'Marcin Szymczak',
'Joanna Baranowska',
'Maciej Szczepański',
'Czesław Wróbel',
'Grażyna Górska',
'Wanda Krawczyk',
'Renata Urbańska']
# sortowanie po nazwisku
def zwroc_nazwisko(czlowiek):
nazwisko = czlowiek.split()[1]
return nazwisko
posortowani_ludzie = sorted(ludzie, key=zwroc_nazwisko)
print(posortowani_ludzie)
| true |
687ec7879dce9c8a076052a3d175839fa62a1c18 | Python | HPI-MachineIntelligence-MetaLearning/Utilities | /plot_log.py | UTF-8 | 2,327 | 2.671875 | 3 | [] | no_license | import json
import string
from collections import defaultdict
import matplotlib.pyplot as plt
from os import makedirs
from os.path import exists
import yaml
LABELS = ['siegessaeule',
'fernsehturm',
'funkturm',
'berlinerdom',
'other',
'brandenburgertor',
'reichstag',
'rotesrathaus']
OTHER_RESULTS = ['main/loss',
'main/loss/conf',
'main/loss/loc']
with open('plot_config.yml', 'r') as cfg:
CONFIG = yaml.load(cfg)
def format_filename(s):
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
filename = ''.join(c for c in s if c in valid_chars)
filename = filename.replace(' ', '_')
return filename
def plot_for_key(data, label, desc):
fig, ax = plt.subplots(nrows=1, ncols=1)
ax.plot([x[0] for x in data[label]],
[x[1] for x in data[label]])
plt.ylabel(desc)
plt.xlabel('iterations')
plt.semilogy()
plt.tight_layout()
if CONFIG['save_plots']:
if not exists(CONFIG['output']):
makedirs(CONFIG['output'])
fig.savefig(CONFIG['output'] + format_filename(label) + '.png')
else:
plt.show()
plt.close(fig)
def main():
data_rows = defaultdict(list)
with open(CONFIG['input_file'], 'r') as log:
log_data = json.load(log)
for log_item in log_data:
if 'validation/main/ap/none' in log_item.keys():
for label in LABELS:
data_rows[label].append((log_item['iteration'],
log_item['validation/main/ap/{}'
.format(label)]))
data_rows['validation/main/map'] \
.append((log_item['iteration'],
log_item['validation/main/map']))
for key in OTHER_RESULTS:
data_rows[key].append((log_item['iteration'],
log_item[key]))
print(data_rows)
for label in LABELS:
plot_for_key(data_rows, label, 'average precision {}'.format(label))
for key in OTHER_RESULTS:
plot_for_key(data_rows, key, key)
plot_for_key(data_rows, 'validation/main/map', 'mean avg precision')
if __name__ == '__main__':
main()
| true |
ff02953acfbe4201c5ae5c1eeb87369e095e7eb4 | Python | gva-jjoyce/gva_data | /gva/flows/operators/split_text_operator.py | UTF-8 | 460 | 3.140625 | 3 | [
"Apache-2.0"
] | permissive | """
Split Text Operator
Splits a text payload into multiple messages but a given separator.
"""
from .internals.base_operator import BaseOperator
class SplitTextOperator(BaseOperator):
def __init__(self, separator='\n'):
self.separator = separator
super().__init__()
def execute(self, data, context):
split = data.split(self.separator)
for item in split:
yield item, context
| true |
b5dd46829f724a9e0aad9666eb2d6a3e7b4b2e84 | Python | sehrbaz/MITx6.00 | /pset01/problem1.py | UTF-8 | 160 | 3.71875 | 4 | [
"MIT"
] | permissive | # Paste your code into this box
vowels = 0
for i in s:
if i in ['a', 'e', 'i', 'o', 'u']:
vowels +=1
print("Number of vowels:", vowels)
| true |
54de1b031d711236f3dc8ff9e31ee0daecf3e3f7 | Python | defland/looncode | /serve/application/dev_tools.py | UTF-8 | 1,482 | 2.53125 | 3 | [] | no_license | # coding:utf-8
from config.config import *
import commands,re
# 测试用的功能
# 开发环境下显示git版本号
def show_git_data(flag=config["default"].GIT_VERSION_DISPLAY):
# git log --pretty=oneline -1
# return 451ecd160187ab7ea0c8bcef85a906967dd95d6a added: model add colmmn structure
if flag == True:
# 获取最新的版本、日期、提交信息
print "show_git_data func is enable"
(status, output) = commands.getstatusoutput('git log --pretty=fuller -1')
# print status, output
recomm = r'commit.(\w{20,})\nAuthor:\s*.*\n.*\n.*\nCommitDate:(.*)\n*(.*)'
git_data = re.findall(recomm,output)
# print git_data
git_data = git_data[0] #转换为元祖
# print git_data
# print type(git_data)
# 统计总提交次数
(status, output) = commands.getstatusoutput('git log --pretty=oneline')
recomm = r'(\w{20,}) '
result = re.findall(recomm,output)
# print result
# print "总提交次数",len(result)
return {'version':git_data[0],'commit':git_data[2],'time':git_data[1],'count':len(result)}
# 开启或者关闭调试信息功能
# show_git_data()
if config["default"].GIT_VERSION_DISPLAY:
git_data = show_git_data()
dev_data = {
'flag':config["default"].GIT_VERSION_DISPLAY,
'git': git_data
}
else:
dev_data = {
'flag':config["default"].GIT_VERSION_DISPLAY,
}
| true |
63c2681558af280dd399bc1d73df21606aed5a55 | Python | slowlightx/ad-peps | /adpeps/ipeps/ctm.py | UTF-8 | 18,319 | 2.8125 | 3 | [
"MIT"
] | permissive | """
Main CTM code
The individual site and boundary tensors come in a
special list-type object (TList), which has extra
indexing features, such as periodic boundary
conditions and shift contexts
All ncon contractions are defined in contractions.yaml
"""
import time
from typing import Tuple
import jax
import jax.numpy as np
import adpeps.ipeps.config as sim_config
from adpeps.tensor.contractions import ncon
from adpeps.tensor.ops import diag_inv, svd
from adpeps.utils.ctmtensors import CTMTensors
from adpeps.utils.nested import Nested
from adpeps.utils.tlist import TList, cur_loc, set_pattern
def run_ctm(tensors, chi, conv_fun=None):
ctm = CTM(tensors, chi, conv_fun)
return ctm()
class CTM:
"""CTM class"""
def __init__(self, tensors: CTMTensors, chi: int, conv_fun=None):
"""
Args:
tensors: input ctmtensors
chi: boundary bond dimension
"""
self.tensors = tensors
self.chi = chi
self.conv_fun = conv_fun
self.tol = sim_config.ctm_conv_tol # Convergence tolerance (singular values)
self.min_iter = sim_config.ctm_min_iter
self.max_iter = sim_config.ctm_max_iter
self.singular_values = None
self.last_convergence = None
self.diffs = [None]
self.n_steps = 0
self.last_ctm_time = None
self.convergence = np.nan
self.condition_number = None
def __call__(self):
while not self.converged:
self.show_progress()
self.update()
return self.tensors, self.convergence
def update(self):
"""Perform an update of all boundary tensors"""
start = time.time()
self.tensors, s = renormalize(self.tensors, self.chi)
end = time.time()
try:
s = jax.lax.stop_gradient(s)
s_nz = s[s != 0] / np.max(s)
cond_s = np.min(s_nz)
except:
cond_s = np.nan
self.n_steps += 1
self.singular_values = s
self.last_ctm_time = round(end - start, 2)
self.condition_number = cond_s
def show_progress(self):
"""Print out the current progress"""
if self.n_steps > 0 and sim_config.disp_level > 0:
if self.conv_fun is not None:
print(
f" | CTM step {self.n_steps} conv: {self.diffs[-1]:.3e} time: {self.last_ctm_time} obj: {self.convergence:.6f}"
)
else:
print(
f" | CTM step {self.n_steps} conv: {self.diffs[-1]:.3e} time: {self.last_ctm_time}"
)
@property
def converged(self):
"""Check convergence with supplied convergence function"""
if self.conv_fun is not None:
s = jax.lax.stop_gradient(self.conv_fun(self.tensors))
else:
s = self.singular_values
self.last_convergence = self.convergence
self.convergence = s
try:
diff = np.linalg.norm(self.convergence - self.last_convergence)
self.diffs.append(diff)
except:
diff = np.nan
if self.n_steps >= self.min_iter and self.diffs[-1] < self.tol:
return True
elif self.n_steps >= self.max_iter:
return True
else:
return False
def renormalize(tensors: CTMTensors, chi: int) -> Tuple[CTMTensors, np.ndarray]:
"""
Performs a CTM iteration
Updates all sites in the unit cell
Args:
tensors: input ctmtensors
chi: boundary bond dimension
Returns:
A tuple containing
- **tensors** (*CTMTensors*): updated tensors
- **S** (*np.ndarray*): singular values of C1 (for convergence)
"""
with set_pattern(tensors.A.pattern):
tensors, sl = do_left(tensors, chi)
tensors = do_right(tensors, chi)
tensors = do_top(tensors, chi)
tensors = do_bottom(tensors, chi)
# Singular values of C1[0,0] - to check for convergence
S = sl[0]
return (tensors, S)
"""
---------------------
Individual left, right, top and bottom moves
---------------------
Each move consists of a loop through the sites of the unit cell
in which first the projectors are computed and then the boundary
tensors are updated.
The loops are optimized for readability with a few tricks that are
implemented in the TList or CTMTensors classes:
- cur_loc(x,y): with this context enabled, all TList objects have a
shift applied in their coordinates.
For example:
A[0,0] = 1
A[1,0] = 2
with cur_loc(1,0):
print(A[0,0]) # => 2 (retrieves element A([0,0]+[1,0]) = A[1,0])
Using this context, the operations in the inner loops can be written
without reference to the (i,j) loop indices, as if it's just written
for one site in the unit cell.
- CTMTensors.hold(tensor1, ...): with this context enabled, any values
stored in tensor1 (and other designated tensors) are only put in a
temporary location, so that reading the tensor still yields the
original values. After the context exits, the values will be
overwritten by the temporary values.
Example:
# ts is a CTMTensors object containing site/boundary tensors
ts.C1[0,0] = [1]
ts.C2[0,0] = [2]
with ts.hold('C1'):
ts.C1[0,0] = [10]
ts.C2[0,0] = [20]
print(ts.C1[0,0]) # => [1] since the value [10] is not yet stored
print(ts.C2[0,0]) # => [20]
print(ts.C1[0,0]) # => [10] since the context has exited
With this context, there is no need to store the updated boundary
tensors in the inner loops in temporary objects (since in CTM each
update step should be performed separately).
- TList.is_changed(x,y): tracks whether any of the tensors in the TList
have been updated since the last call to TList.reset_changed().
This is useful for unit cells with pattern restrictions, so that
boundary tensors that correspond to equivalent sites are only
computed once.
"""
def do_left(ts: CTMTensors, chi: int) -> Tuple[CTMTensors, np.ndarray]:
"""
Perform left CTM move
Args:
ts: input tensors
chi: boundary bond dimension
Returns:
A tuple containing
- **tensors** (*CTMTensors*): updated tensors
- **sl** (*np.ndarray*): singular values of C1 (for convergence)
"""
A = ts.A
unit_cell = A.size
ts.C1.reset_changed()
ts.C4.reset_changed()
ts.T4.reset_changed()
Pl = TList(shape=unit_cell) # Upper projectors
Plb = TList(shape=unit_cell) # Lower projectors
sl = TList(shape=unit_cell) # Singular values (for convergence check)
# Loop over x direction of the unit cell
for i in range(A.size[0]):
# Loop over y direction
for j in range(A.size[1]):
# Change the relative shift of the lists
with cur_loc(i, j):
if not Pl.is_changed(0, 1):
Pl[0, 1], Plb[0, 1], sl[0, 1] = get_projectors_left(ts, chi)
# Only update the lists after the loop over j is completed
with ts.hold("all_C1", "all_C4", "all_T4"):
for j in range(A.size[1]):
with cur_loc(i, j):
if not ts.C1.is_changed(0, 0):
ts.update(
("C1", "C4", "T4"),
([0, 0], [0, 0], [0, 0]),
renorm_left(ts, Pl, Plb),
)
return ts, sl
def do_right(ts: CTMTensors, chi: int) -> CTMTensors:
"""
Perform right CTM move
Args:
ts: input tensors
chi: boundary bond dimension
Returns:
ts: updated tensors
"""
A = ts.A
unit_cell = A.size
ts.C2.reset_changed()
ts.C3.reset_changed()
ts.T2.reset_changed()
Pr = TList(shape=unit_cell)
Prb = TList(shape=unit_cell)
for i in range(A.size[0]):
for j in range(A.size[1]):
with cur_loc(i, j):
if not Pr.is_changed(0, 1):
Pr[0, 1], Prb[0, 1], _ = get_projectors_right(ts, chi)
with ts.hold("all_C2", "all_C3", "all_T2"):
for j in range(A.size[1]):
with cur_loc(i, j):
if not ts.C2.is_changed(1, 0):
ts.update(
("C2", "C3", "T2"),
([1, 0], [1, 0], [1, 0]),
renorm_right(ts, Pr, Prb),
)
return ts
def do_top(ts: CTMTensors, chi: int) -> CTMTensors:
"""
Perform top CTM move
Args:
ts: input tensors
chi: boundary bond dimension
Returns:
ts: updated tensors
"""
A = ts.A
unit_cell = A.size
ts.C1.reset_changed()
ts.C2.reset_changed()
ts.T1.reset_changed()
Pt = TList(shape=unit_cell)
Ptb = TList(shape=unit_cell)
for j in range(A.size[1]):
for i in range(A.size[0]):
with cur_loc(i, j):
if not Pt.is_changed(0, 0):
Pt[0, 0], Ptb[0, 0], _ = get_projectors_top(ts, chi)
with ts.hold("all_C1", "all_C2", "all_T1"):
for i in range(A.size[0]):
with cur_loc(i, j):
if not ts.C1.is_changed(-1, 0):
ts.update(
("C1", "C2", "T1"),
([-1, 0], [2, 0], [0, 0]),
renorm_top(ts, Pt, Ptb),
)
return ts
def do_bottom(ts: CTMTensors, chi: int) -> CTMTensors:
"""
Perform bottom CTM move
Args:
ts: input tensors
chi: boundary bond dimension
Returns:
ts: updated tensors
"""
A = ts.A
unit_cell = A.size
ts.C3.reset_changed()
ts.C4.reset_changed()
ts.T3.reset_changed()
Pb = TList(shape=unit_cell)
Pbb = TList(shape=unit_cell)
for j in range(A.size[1]):
for i in range(A.size[0]):
with cur_loc(i, j):
if not Pb.is_changed(0, 0):
Pb[0, 0], Pbb[0, 0], _ = get_projectors_bottom(ts, chi)
with ts.hold("all_C3", "all_C4", "all_T3"):
for i in range(A.size[0]):
with cur_loc(i, j):
if not ts.C3.is_changed(2, 1):
ts.update(
("C3", "C4", "T3"),
([2, 1], [-1, 1], [0, 1]),
renorm_bottom(ts, Pb, Pbb),
)
return ts
"""
---------------------
Individual left, right, top and bottom projectors
---------------------
The projectors are computed by contracting a corner of the
system (C-tensor + 2 T-tensors + A and Adagger tensors)
in the top (/left) half with a corner in the bottom (/right)
half and performing an svd
"""
def get_projectors_left(
ts: CTMTensors, chi: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Returns the left projectors
"""
tensors = (
ts.C1[-1, -1],
ts.C4[-1, 2],
ts.T1[0, -1],
ts.T3[0, 2],
ts.T4[-1, 0],
ts.T4[-1, 1],
ts.A[0, 0],
ts.Ad[0, 0],
ts.A[0, 1],
ts.Ad[0, 1],
)
return _get_projectors_left_impl(*tensors, chi)
def _get_projectors_left_impl(C1, C4, T1, T3, T4u, T4d, Au, Adu, Ad, Add, chi):
Cs1 = ncon([C1, T1], "proj_left_Cs1")
Q1 = ncon([Cs1, T4u, Au, Adu], "proj_left_Q1")
Cs4 = ncon([C4, T3], "proj_left_Cs4")
Q4 = ncon([Cs4, T4d, Ad, Add], "proj_left_Q4")
Q4 = Q4.transpose([3, 4, 5, 0, 1, 2])
return get_projectors(Q1, Q4, chi)
def get_projectors_right(
ts: CTMTensors, chi: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Returns the right projectors
"""
tensors = (
ts.C2[2, -1],
ts.C3[2, 2],
ts.T1[1, -1],
ts.T2[2, 0],
ts.T2[2, 1],
ts.T3[1, 2],
ts.A[1, 0],
ts.Ad[1, 0],
ts.A[1, 1],
ts.Ad[1, 1],
)
return _get_projectors_right_impl(*tensors, chi)
def _get_projectors_right_impl(C2, C3, T1, T2u, T2d, T3, Au, Adu, Ad, Add, chi):
Cs2 = ncon([C2, T1], "proj_right_Cs2")
Q2 = ncon([Cs2, T2u, Au, Adu], "proj_right_Q2")
Cs3 = ncon([C3, T3], "proj_right_Cs3")
Q3 = ncon([Cs3, T2d, Ad, Add], "proj_right_Q3")
Q3 = Q3.transpose([3, 4, 5, 0, 1, 2])
return get_projectors(Q2, Q3, chi)
def get_projectors_top(
ts: CTMTensors, chi: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Returns the top projectors
"""
tensors = (
ts.C1[-1, -1],
ts.C2[2, -1],
ts.T1[0, -1],
ts.T1[1, -1],
ts.T2[2, 0],
ts.T4[-1, 0],
ts.A[0, 0],
ts.Ad[0, 0],
ts.A[1, 0],
ts.Ad[1, 0],
)
return _get_projectors_top_impl(*tensors, chi)
def _get_projectors_top_impl(C1, C2, T1l, T1r, T2, T4, Al, Adl, Ar, Adr, chi):
Cs1 = ncon([C1, T4], "proj_top_Cs1")
Q1 = ncon([Cs1, T1l, Al, Adl], "proj_top_Q1")
Cs2 = ncon([C2, T2], "proj_top_Cs2")
Q2 = ncon([Cs2, T1r, Ar, Adr], "proj_top_Q2")
Q2 = Q2.transpose([3, 4, 5, 0, 1, 2])
return get_projectors(Q1, Q2, chi)
def get_projectors_bottom(
ts: CTMTensors, chi: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Returns the bottom projectors
"""
tensors = (
ts.C3[2, 2],
ts.C4[-1, 2],
ts.T2[2, 1],
ts.T3[0, 2],
ts.T3[1, 2],
ts.T4[-1, 1],
ts.A[0, 1],
ts.Ad[0, 1],
ts.A[1, 1],
ts.Ad[1, 1],
)
return _get_projectors_bottom_impl(*tensors, chi)
def _get_projectors_bottom_impl(C3, C4, T2, T3l, T3r, T4, Al, Adl, Ar, Adr, chi):
Cs4 = ncon([C4, T4], "proj_bottom_Cs4")
Q4 = ncon([Cs4, T3l, Al, Adl], "proj_bottom_Q4")
Cs3 = ncon([C3, T2], "proj_bottom_Cs3")
Q3 = ncon([Cs3, T3r, Ar, Adr], "proj_bottom_Q3")
Q3 = Q3.transpose([3, 4, 5, 0, 1, 2])
return get_projectors(Q4, Q3, chi)
def get_projectors(T1: int, T2, chi):
"""Contracts the corners together and computes the
projectors by performing an svd
"""
full_chi = T1.shape[3] * T1.shape[4] * T1.shape[5]
new_chi = min(full_chi, chi)
Rho = ncon([T1, T2], ([-1, -2, -3, 1, 2, 3], [1, 2, 3, -4, -5, -6]))
Rho_shape = Rho.shape
Rho = np.reshape(Rho, [Rho_shape[0] * Rho_shape[1] * Rho_shape[2], -1])
u, s, v = svd(Rho, new_chi, "n")
u = np.reshape(u, [Rho_shape[0], Rho_shape[1], Rho_shape[2], -1])
v = np.reshape(v.T, [Rho_shape[3], Rho_shape[4], Rho_shape[5], -1])
inv_s = diag_inv(np.sqrt(s))
P1 = ncon([T2, v, inv_s], "proj_P1")
P2 = ncon([T1, u, inv_s], "proj_P2")
P1 = P1.transpose([3, 0, 1, 2])
P2 = P2.transpose([3, 0, 1, 2])
return P1, P2, s
"""
---------------------
Individual left, right, top and bottom boundary tensor updates
---------------------
The boundary tensors are updated for one site at a time from the
tensors of the previous iteration with the site tensors and truncated
by using the projectors
In these functions, the boundary tensors can be wrapped as Nested tensors,
containing both ground-state and excited-state tensors.
When using these Nested tensors, all different combinations are computed
automatically.
For example:
Nested({C1,B_C1,Bd_C1,BB_C1}) * Nested({T1,B_T1,Bd_T1,BB_T1}) ->
Nested({
(C1 * T1),
(B_C1 * T1 + C1 * B_T1),
(Bd_C1 * T1 + C1 * Bd_T1),
(BB_C1 * T1 + B_C1 * Bd_T1 + Bd_C1 * B_T1 + C1 * BB_T1)
})
The phase shifts are only applied to the B and Bd parts of the Nested tensors
"""
def renorm_left(
ts: CTMTensors, Pl: np.ndarray, Plb: np.ndarray
) -> Tuple[Nested, Nested, Nested]:
""" """
new_T4 = ncon(
[Plb[0, 0], ts.all_T4[-1, 0], ts.all_A[0, 0], ts.all_Ad[0, 0], Pl[0, 1]],
"doleft_T4",
normalize=True,
).shift(-sim_config.px)
Cs1 = ncon([ts.all_C1[-1, 0], ts.all_T1[0, 0]], "doleft_Cs1")
new_C1 = ncon([Cs1, Pl[0, 1]], "doleft_C1", normalize=True).shift(-sim_config.px)
Cs4 = ncon([ts.all_C4[-1, 0], ts.all_T3[0, 0]], "doleft_Cs4")
new_C4 = ncon([Cs4, Plb[0, 0]], "doleft_C4", normalize=True).shift(-sim_config.px)
return new_C1, new_C4, new_T4
def renorm_right(ts, Pr, Prb):
new_T2 = ncon(
[Prb[0, 0], ts.all_T2[2, 0], ts.all_A[1, 0], ts.all_Ad[1, 0], Pr[0, 1]],
"doright_T2",
normalize=True,
).shift(sim_config.px)
Cs2 = ncon([ts.all_C2[2, 0], ts.all_T1[1, 0]], "doright_Cs2")
new_C2 = ncon([Cs2, Pr[0, 1]], "doright_C2", normalize=True).shift(sim_config.px)
Cs3 = ncon([ts.all_C3[2, 0], ts.all_T3[1, 0]], "doright_Cs3")
new_C3 = ncon([Cs3, Prb[0, 0]], "doright_C3", normalize=True).shift(sim_config.px)
return new_C2, new_C3, new_T2
def renorm_top(ts, Pt, Ptb):
new_T1 = ncon(
[Ptb[-1, 0], ts.all_T1[0, -1], ts.all_A[0, 0], ts.all_Ad[0, 0], Pt[0, 0]],
"dotop_T1",
normalize=True,
).shift(-sim_config.py)
Cs1 = ncon([ts.all_C1[-1, -1], ts.all_T4[-1, 0]], "dotop_Cs1")
new_C1 = ncon([Cs1, Pt[-1, 0]], "dotop_C1", normalize=True).shift(-sim_config.py)
Cs2 = ncon([ts.all_C2[2, -1], ts.all_T2[2, 0]], "dotop_Cs2")
new_C2 = ncon([Cs2, Ptb[1, 0]], "dotop_C2", normalize=True).shift(-sim_config.py)
return new_C1, new_C2, new_T1
def renorm_bottom(ts, Pb, Pbb):
new_T3 = ncon(
[Pbb[-1, 0], ts.all_T3[0, 2], ts.all_A[0, 1], ts.all_Ad[0, 1], Pb[0, 0]],
"dobottom_T3",
normalize=True,
).shift(sim_config.py)
Cs3 = ncon([ts.all_C3[2, 2], ts.all_T2[2, 1]], "dobottom_Cs3")
new_C3 = ncon([Cs3, Pbb[1, 0]], "dobottom_C3", normalize=True).shift(sim_config.py)
Cs4 = ncon([ts.all_C4[-1, 2], ts.all_T4[-1, 1]], "dobottom_Cs4")
new_C4 = ncon([Cs4, Pb[-1, 0]], "dobottom_C4", normalize=True).shift(sim_config.py)
return new_C3, new_C4, new_T3
| true |
ab5e00fc194e29ac88ef3c29a258e5d877868e5d | Python | IspML/Euclidean-tsp-playground | /box.py | UTF-8 | 478 | 3.28125 | 3 | [] | no_license | #!/usr/bin/env python3
import random
class Box:
def __init__(self, xy):
x = [c[0] for c in xy]
y = [c[1] for c in xy]
self.xmin = min(x)
self.xmax = max(x)
self.ymin = min(y)
self.ymax = max(y)
self.dx = self.xmax - self.xmin
self.dy = self.ymax - self.ymin
def random_xy(self):
rx = random.random() * self.dx + self.xmin
ry = random.random() * self.dy + self.ymin
return rx, ry
| true |
f504e29c79ee47a02802c9b197722369afa27c07 | Python | Hopw06/Python | /Python_Deep_Dive/Part 1/8.TuplesAsDataRecords/5.NamedTuples-Application_AlternativeToDictionaries.py | UTF-8 | 1,486 | 3.515625 | 4 | [] | no_license | from collections import namedtuple
data_dict = dict(key1=100, key2=200, key3=300)
Data = namedtuple('Data', data_dict.keys())
print(Data._fields)
# We could try the following (bad idea):
d1 = Data(*data_dict.values())
print(d1)
# it work, but try:
data_dict_2 = dict(key1=100, key3=300, key2=200)
d2 = Data(*data_dict_2.values())
print(d2) # wrong value
# Instead, we should unpack the dictionary itself, resulting in keyword arguments that will be passed to the Data constructor:
d2 = Data(**data_dict_2)
print(d2)
data_dict = dict(first_name='John', last_name='Cleese', age=42, complaint='dead parrot')
print(data_dict.keys())
print(sorted(data_dict.keys()))
Struct = namedtuple('Struct', sorted(data_dict.keys()))
print(Struct._fields)
d1 = Struct(**data_dict)
print(d1)
print(d1.complaint)
print(data_dict['complaint'])
key_name = 'age'
getattr(d1, key_name)
print(data_dict.get('age', None), data_dict.get('invalid_key', None))
print(getattr(d1, 'age', None), getattr(d1, 'invalid_key', None))
data_list = [
{'key1': 1, 'key2': 2},
{'key1': 3, 'key2': 4},
{'key1': 5, 'key2':6, 'key3': 7},
{'key2': 100},
]
def tuplify_dicts(dicts):
keys = {key for dict_ in dicts for key in dict_.keys()}
Struct = namedtuple('Struct', keys)
Struct.__new__.__defaults__ = (None, ) * len(Struct._fields)
return [Struct(**dict_) for dict_ in dicts]
print(tuplify_dicts(data_list)) | true |
0ef4edf6c3d1d0a00cd6945abcf9a724f04e7a54 | Python | yushu-liu/GerogiaTech | /CS4803-MLT/assess_learners/DTLearner.py | UTF-8 | 1,963 | 2.953125 | 3 | [] | no_license | import numpy as np
import scipy.stats as stats
class DTLearner(object):
def __init__(self,leaf_size = 1, verbose = False):
self.verbose = verbose
self.leaf_size = leaf_size
self.tree = {}
def author(self):
return 'nlerner3'
def addEvidence(self, dataX, dataY):
self.tree = self.build_tree(dataX, dataY)
print (self.tree)
def query(self, points):
result = []
for point in points:
result.append(traverse_tree(self.tree, point))
return result
def build_tree(self, dataX, dataY):
if dataX.shape[0] <= self.leaf_size or np.all(dataY == dataY[0]):
return [
np.mean(dataY), None, None
]
correlation = 0
split_index= -1
for i in range(dataX.shape[1]):
corr = abs(stats.pearsonr(dataX[:,i], dataY)[0])
if corr > correlation:
correlation = corr
split_index = i
split_val = np.median(dataX[:,split_index])
left_ind = np.where(dataX[:,split_index] <= split_val)
right_ind = np.where(dataX[:,split_index] > split_val)
if left_ind[0].shape[0] == 0 or right_ind[0].shape[0] == 0:
return [
np.mean(dataY), None, None
]
left_tree = self.build_tree(dataX[left_ind], dataY[left_ind])
right_tree = self.build_tree(dataX[right_ind], dataY[right_ind])
root = [split_index, split_val]
return [root, left_tree, right_tree]
ROOT = 0
ROOT_SPLIT_INDEX = 0
ROOT_SPLIT_VALUE = 1
LEFT = 1
RIGHT = 2
def traverse_tree(tree, point):
if tree[LEFT] == None and tree[RIGHT] == None :
return tree[ROOT]
split_index = tree[ROOT][ROOT_SPLIT_INDEX]
split_value = tree[ROOT][ROOT_SPLIT_VALUE]
if point[split_index] <= split_value:
return traverse_tree(tree[LEFT], point)
else:
return traverse_tree(tree[RIGHT], point)
| true |
dd3db1f0233dfd8ab84b527f5bcfffa90d543ab4 | Python | niharika210400/practice.python | /Zero-sum-triplet.py | UTF-8 | 2,066 | 3.40625 | 3 | [] | no_license | # Ques: https://practice.geeksforgeeks.org/problems/find-triplets-with-zero-sum/1
# Soln:
''' Your task is to returns 1 if there is triplet with sum equal
to 0 present in arr[], else return 0'''
def findTriplets(arr, n):
sum = 0
for i in range(0, n-1):
# Find pair in subarray A[i + 1..n-1]
# with sum equal to sum - A[i]
s = set()
curr_sum = sum - arr[i]
for j in range(i + 1, n):
if (curr_sum - arr[j]) in s:
return 1
s.add(arr[j])
return 0
#{
# Driver Code Starts
#Initial Template for Python 3
import atexit
import io
import sys
_INPUT_LINES = sys.stdin.read().splitlines()
input = iter(_INPUT_LINES).__next__
_OUTPUT_BUFFER = io.StringIO()
sys.stdout = _OUTPUT_BUFFER
@atexit.register
def write():
sys.__stdout__.write(_OUTPUT_BUFFER.getvalue())
if __name__=='__main__':
t = int(input())
for i in range(t):
n=int(input())
a=list(map(int,input().strip().split()))
print(findTriplets(a,n))
# } Driver Code Ends
# Alt Soln:
''' Your task is to returns 1 if there is triplet with sum equal
to 0 present in arr[], else return 0'''
def findTriplets(arr,n):
found = 0
for i in range(n - 1):
# Find all pairs with sum
# equals to "-arr[i]"
s = set()
for j in range(i + 1, n):
x = -(arr[i] + arr[j])
if x in s:
found = 1
else:
s.add(arr[j])
return found
#{
# Driver Code Starts
#Initial Template for Python 3
import atexit
import io
import sys
_INPUT_LINES = sys.stdin.read().splitlines()
input = iter(_INPUT_LINES).__next__
_OUTPUT_BUFFER = io.StringIO()
sys.stdout = _OUTPUT_BUFFER
@atexit.register
def write():
sys.__stdout__.write(_OUTPUT_BUFFER.getvalue())
if __name__=='__main__':
t = int(input())
for i in range(t):
n=int(input())
a=list(map(int,input().strip().split()))
print(findTriplets(a,n))
# } Driver Code Ends
| true |
b9664793447af2c55bd4353a37dbcd77664f244f | Python | axie66/QuantumCrypto | /quantum_protocols.py | UTF-8 | 6,755 | 3.40625 | 3 | [] | no_license | #########################################################################
# quantum_protocols.py
#
# 15-251 Project
# Quantum Cryptography Protocols in IBM Qiskit
#
# Written by Alex Xie (alexx)
#########################################################################
from qiskit import QuantumCircuit, execute, Aer
import numpy as np
#########################################################################
# B92 Implementation
# Written by me
# Inspired by BB84 implementation in Qiskit textbook:
# https://qiskit.org/textbook/ch-algorithms/quantum-key-distribution.html
#########################################################################
def encode_bit(b):
'''Encodes bit b as a polarization state'''
# we'll represent our photon as a qubit in a quantum circuit
qc = QuantumCircuit(1, 1)
if b == 0:
pass # 0 degree polarization
else:
qc.h(0) # 45 degree polarization
qc.barrier()
return qc
def encode_bits(bits):
'''Encodes bits as a sequence of polarization states'''
return [encode_bit(b) for b in bits]
def decode_states(states):
'''Decodes a sequence of polarization states into bits'''
backend = Aer.get_backend('qasm_simulator')
bits = []
successes = []
for index, state in enumerate(states):
choice = np.random.randint(2)
if choice == 1:
# measure with rectilinear filter
state.measure(0, 0)
else:
# measure with diagonal filter
state.h(0)
state.measure(0, 0)
result = execute(state, backend, shots=1, memory=True).result()
out = int(result.get_memory()[0])
if out == 1:
# We can only get a 1 if the filter used the measure was NOT the
# same as the basis the photon was in.
# If choice == 1, then the photon must have been 45 degrees, which
# corresponds to a 1 bit.
# If choice == 0, then the photon must have been 0 degrees, which
# corresponds to a 0 bit.
bits.append(choice)
successes.append(index)
return bits, successes
######################
# B92 Demo
######################
print("******************************")
print("B92 Demo")
print("******************************\n")
n = 2000 # This takes a bit to run
annie_original_secret = np.random.randint(2, size=n) # Annie's original secret bits
annie_photons = encode_bits(annie_original_secret) # Annie's photons, which get sent to Britta
britta_secret, success_indices = decode_states(annie_photons)
# Annie keeps the indices that Britta was able to draw conclusions from.
annie_new_secret = [annie_original_secret[i] for i in success_indices]
assert britta_secret == annie_new_secret # Sanity check
print('Britta and Annie\'s secret keys are the same.')
# We should expect the secret key to be n/4 bits long.
# For each photon, we have a 1/2 chance of measuring with the opposite filter.
# Then, given that we measured with the opposite filter, we have a 1/2 chance
# of getting the basis state orthogonal to 0 or 45 degrees. Thus, we have a
# 1/2 * 1/2 = 1/4 chance of being able to determine any given photon's
# polarization and in doing so obtain the bit Annie sent.
print('The length of the B92 secret key is:', len(britta_secret))
print('\n\n')
#########################################################################
# Kak's Protocol Implementation
# Written by me
#########################################################################
def message2bits(message):
'''Converts message string into list of bits'''
bits = []
for c in message:
ascii_bits = [int(b) for b in bin(ord(c))[2:]]
padded_ascii_bits = (8 - len(ascii_bits)) * [0] + ascii_bits
bits += padded_ascii_bits
return bits
def bits2message(bits):
'''Converts list of bits into message string'''
result = ''
for i in range(0, len(bits), 8):
ascii_int = 0
for j in range(8):
ascii_int += bits[i + 7 - j] * 2**j
result += chr(ascii_int)
return result
def prepare_states(bits):
'''Encodes bits as polarization basis states'''
states = []
for b in bits:
qc = QuantumCircuit(1, 1) # photon represented as qubit
if b == 0:
pass # 0 degree polarization
else:
qc.x(0) # 90 degree polarization
qc.barrier()
states.append(qc)
return states
def decode_states(states):
'''Converts polarization basis states back to bits'''
bits = []
backend = Aer.get_backend('qasm_simulator')
for state in states:
state.measure(0, 0)
result = execute(state, backend, shots=1, memory=True).result()
bit = int(result.get_memory()[0])
bits.append(bit)
return bits
def apply_rotation_operators(states, angles):
'''For all i, applies rotation of angle angles[i] on
polarization state states[i]'''
new_states = []
for state, angle in zip(states, angles):
state.ry(angle, 0) # perform rotation about y-axis
state.barrier()
new_states.append(state)
return new_states
######################
# Kak's Protocol Demo
######################
print("******************************")
print("Kak's Protocol Demo")
print("******************************\n")
is_adversary = 1 # Can toggle whether an adversary is trying to intercept message.
alice_message = 'I hate cilantro :(' # The message that Alice wants to send to Bob.
print('The original message:', alice_message)
alice_bits = message2bits(alice_message)
# Angles of Alice's and Bob's rotation operators (one per bit)
alice_angles = np.random.random(size=len(alice_bits)) * np.pi/2
bob_angles = np.random.random(size=len(alice_bits)) * np.pi/2
# Stage 1: Alice encodes her bits as polarizations and applies her rotation
# operators on all the bits.
initial_photons = prepare_states(alice_bits)
step1_photons = apply_rotation_operators(initial_photons, alice_angles)
# Stage 2: Bob applies his rotation operators on all the bits Alice sent.
step2_photons = apply_rotation_operators(step1_photons, bob_angles)
print(f'There is {"an" if is_adversary else "no"} adversary.')
if is_adversary:
eve_message = bits2message(decode_states(step2_photons))
print('What Eve sees:', eve_message)
# Stage 3: Alice applies the inverses of her rotation operators on all the bits.
# Bob can now apply the inverses of his rotation operators and decode the photons.
step3_photons = apply_rotation_operators(step2_photons, -alice_angles)
bob_photons = apply_rotation_operators(step3_photons, -bob_angles)
bob_bits = decode_states(bob_photons)
bob_message = bits2message(bob_bits)
print('What Bob receives:', bob_message) | true |
89014bb267cef9ca56b2280fb5560c2e0f01723d | Python | goldcerebrum/Getting_Started_with_Python | /ass3_1.py | UTF-8 | 191 | 3.15625 | 3 | [] | no_license | hrs = input("Enter Hours:")
h = float(hrs)
rate = input("Enter Rate:")
r = float(rate)
if h > 40 :
th = h-40
rh = 40
rr = r*1.5
print (rh*r+th*rr)
else :
print (h*r)
| true |
acfe7ed06c0d089c5445d8430e00d9e655e0f8cc | Python | GBoshnakov/SoftUni-Fund | /RegEx/Extract the Links.py | UTF-8 | 228 | 3.09375 | 3 | [] | no_license | import re
text = input()
links = []
regex = r"www.[a-zA-Z0-9\-]+(\.[a-zA-Z]+)+"
while text:
result = [el.group() for el in re.finditer(regex, text)]
links.extend(result)
text = input()
print(*links, sep="\n")
| true |
d9abfb7c8449fd650b3caa28f0d8a209ba8b0a4a | Python | Gaurav14cs17/Tracker | /people counting using sort/Direction.py | UTF-8 | 787 | 2.90625 | 3 | [
"MIT"
] | permissive | def compute_drone_action((x1,y1), (x2,y2)):
#define the possible turning and moving action as strings
turning = ""
moving = ""
raise = ""
area, center = compute_area_and_center((x1,y1), (x2, y2))
#obtain a x center between 0.0 and 1.0
normalized_center[x] = center[x] / image.width
#obtain a y center between 0.0 and 1.0
normalized_center[y] = center[y] / image.width
if normalized_center[x] > 0.6 :
turning = "turn_right"
elif normalized_center[x] < 0.4 :
turning = "turn_left"
if normalized_center[y] > 0.6 :
raise = "upwards"
elif normalized_center[y] < 0.4 :
raise = "downwards"
#if the area is too big move backwards
if area > 100 :
moving = "backwards"
elif area < 80 :
moving = "ahead"
return turning, moving, raise
| true |
8f2ab7a791743907315de597518af9062836bc29 | Python | rajkiran485/machine-learning | /word2vec/utils.py | UTF-8 | 1,069 | 3.390625 | 3 | [] | no_license | from bs4 import BeautifulSoup
import re
from nltk.corpus import stopwords
def review_to_words(review):
review_text = BeautifulSoup(review).get_text()
letters_only = re.sub("[^a-zA-Z]"," ", review_text)
words = letters_only.lower().split()
stops = set(stopwords.words("english"))
meaningful_words = [w for w in words if not w in stops]
return (" ".join(meaningful_words))
def review_to_wordlist(review, remove_stopwords=False):
review_text = BeautifulSoup(review).get_text()
review_text = re.sub("[^a-zA-Z]"," ", review_text)
words = review_text.lower().split()
if remove_stopwords:
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
return(words)
def review_to_sentences(review, tokenizer, remove_stopwords=False):
raw_sentences = tokenizer.tokenize(review.strip())
sentences = []
for raw_sentence in raw_sentences:
if len(raw_sentence) > 0:
sentences.append(review_to_wordlist(raw_sentence, remove_stopwords))
return sentences
| true |
8f4159927a99fd6b9afeef568547c6d3abc44b91 | Python | Lyasinkovska/BeetRootPython | /lesson_34/task_2.py | UTF-8 | 1,540 | 3.40625 | 3 | [] | no_license | """
Requests using concurrent and multiprocessing libraries
Download all comments from a subreddit of your choice using URL: https://api.pushshift.io/reddit/comment/search/ .
As a result, store all comments in chronological order in JSON and dump it to a file. For this task use concurrent and
multiprocessing libraries for making requests to Reddit API.
"""
import datetime
import json
import multiprocessing
from functools import partial
import requests
from lesson_34.task_1 import time_execution
def get_request_text(url: str, parameter: str) -> dict:
resp = requests.get(url, {'subreddit': parameter})
return resp.json()
def get_comments(request_text: dict) -> list:
comments = []
for res in request_text["data"]:
time = datetime.datetime.fromtimestamp(res['created_utc']).strftime("%d %b %Y, %H:%M:%S")
comments.insert(0, {time: res["body"]})
return comments
@time_execution
def get_comments_from_reddit(url: str, params) -> list:
pool = multiprocessing.Pool(processes=4)
return pool.map(partial(get_request_text, url), params)
def save_to_json_file(text: list, filename: str) -> None:
with open(filename, 'w') as file:
json.dump(text, indent=2, fp=file)
if __name__ == '__main__':
URL = 'https://api.pushshift.io/reddit/comment/search/'
subreddits = ("socialskills", "CasualIreland", "RoastMe")
file_name = 'reddit_comments_socialskills.json'
comments = get_comments_from_reddit(URL, params=subreddits)
save_to_json_file(comments, file_name)
| true |
63c1c44c0957a99adcc941fdb5b6ceed487b378d | Python | DemondLove/Python-Programming | /CodeFights/12. Sort by Height.py | UTF-8 | 1,146 | 4.0625 | 4 | [] | no_license | '''
Some people are standing in a row in a park. There are trees between them which cannot be moved. Your task is to rearrange the people by their heights in a non-descending order without moving the trees. People can be very tall!
Example
For a = [-1, 150, 190, 170, -1, -1, 160, 180], the output should be
sortByHeight(a) = [-1, 150, 160, 170, -1, -1, 180, 190].
Input/Output
[execution time limit] 4 seconds (py3)
[input] array.integer a
If a[i] = -1, then the ith position is occupied by a tree. Otherwise a[i] is the height of a person standing in the ith position.
Guaranteed constraints:
1 ≤ a.length ≤ 1000,
-1 ≤ a[i] ≤ 1000.
[output] array.integer
Sorted array a with all the trees untouched.
'''
def sortByHeight(a):
trees = []
people = []
for i in range(len(a)):
if a[i] == -1:
trees.append(i)
else:
people.append(a[i])
li = []
for i in range(len(a)):
if i in trees:
li.append(-1)
else:
nextPerson = min(people)
li.append(nextPerson)
people.remove(nextPerson)
return li
| true |
d1c62c198af8a985ffda96be2c783d7f1b4caef9 | Python | peterheim1/robbie_ros | /robbie_test/nodes/patrol_smach.py | UTF-8 | 4,628 | 2.578125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python
""" patrol_smach.py - Version 1.0 2013-04-12
Control a robot to patrol a square area using SMACH
Created for the Pi Robot Project: http://www.pirobot.org
Copyright (c) 2013 Patrick Goebel. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.5
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.htmlPoint
"""
import rospy
from smach import State, StateMachine
from smach_ros import SimpleActionState, IntrospectionServer
from geometry_msgs.msg import Twist
from rbx2_tasks.task_setup import *
class Patrol():
def __init__(self):
rospy.init_node('patrol_smach', anonymous=False)
# Set the shutdown function (stop the robot)
rospy.on_shutdown(self.shutdown)
# Initialize a number of parameters and variables
setup_task_environment(self)
# Track success rate of getting to the goal locations
self.n_succeeded = 0
self.n_aborted = 0
self.n_preempted = 0
# A list to hold then navigation waypoints
nav_states = list()
# Turn the waypoints into SMACH states
for waypoint in self.waypoints:
nav_goal = MoveBaseGoal()
nav_goal.target_pose.header.frame_id = 'map'
nav_goal.target_pose.pose = waypoint
move_base_state = SimpleActionState('move_base', MoveBaseAction, goal=nav_goal, result_cb=self.move_base_result_cb,
exec_timeout=rospy.Duration(40.0),
server_wait_timeout=rospy.Duration(10.0))
nav_states.append(move_base_state)
# Initialize the patrol state machine
self.sm_patrol = StateMachine(outcomes=['succeeded','aborted','preempted'])
# Add the states to the state machine with the appropriate transitions
with self.sm_patrol:
StateMachine.add('NAV_STATE_0', nav_states[0], transitions={'succeeded':'NAV_STATE_1','aborted':'NAV_STATE_1','preempted':'NAV_STATE_1'})
StateMachine.add('NAV_STATE_1', nav_states[1], transitions={'succeeded':'NAV_STATE_2','aborted':'NAV_STATE_2','preempted':'NAV_STATE_2'})
StateMachine.add('NAV_STATE_2', nav_states[2], transitions={'succeeded':'NAV_STATE_3','aborted':'NAV_STATE_3','preempted':'NAV_STATE_3'})
StateMachine.add('NAV_STATE_3', nav_states[3], transitions={'succeeded':'NAV_STATE_4','aborted':'NAV_STATE_4','preempted':'NAV_STATE_4'})
StateMachine.add('NAV_STATE_4', nav_states[0], transitions={'succeeded':'','aborted':'','preempted':''})
# Create and start the SMACH introspection server
intro_server = IntrospectionServer('patrol', self.sm_patrol, '/SM_ROOT')
intro_server.start()
# Execute the state machine for the specified number of patrols
while (self.n_patrols == -1 or self.patrol_count < self.n_patrols) and not rospy.is_shutdown():
sm_outcome = self.sm_patrol.execute()
self.patrol_count += 1
rospy.loginfo("FINISHED PATROL LOOP: " + str(self.patrol_count))
rospy.loginfo('State Machine Outcome: ' + str(sm_outcome))
intro_server.stop()
def move_base_result_cb(self, userdata, status, result):
if status == actionlib.GoalStatus.SUCCEEDED:
self.n_succeeded += 1
elif status == actionlib.GoalStatus.ABORTED:
self.n_aborted += 1
elif status == actionlib.GoalStatus.PREEMPTED:
self.n_preempted += 1
try:
rospy.loginfo("Success rate: " + str(100.0 * self.n_succeeded / (self.n_succeeded + self.n_aborted + self.n_preempted)))
except:
pass
def shutdown(self):
rospy.loginfo("Stopping the robot...")
self.sm_patrol.request_preempt()
self.cmd_vel_pub.publish(Twist())
rospy.sleep(1)
if __name__ == '__main__':
try:
Patrol()
except rospy.ROSInterruptException:
rospy.loginfo("SMACH test finished.")
| true |