blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a13a93497e52fbd9c31295b3cd413ccbc04e8f29
|
Python
|
KSrinuvas/ALL
|
/aa/ref.py
|
UTF-8
| 514
| 3.375
| 3
|
[] |
no_license
|
class American(object):
@staticmethod
def __init__ (name):
self.__name = name
print (self.__name)
def printNationality():
print ("America")
def Add(self,a,b):
sum = a +b
return sum
def __As(self,bb):
print (bb)
self.__name = bb
#anAmerican = American(20,100)
#anAmerican.printNationality()
#American.printNationality()
#gg = anAmerican.Add(10,20)
#print (gg)
#anAmerican._American__As(10)
#printNationality()
| true
|
b793caf9ccebfa217e33a4dabf8a2f3afe38d2e7
|
Python
|
DaHuO/Supergraph
|
/codes/CodeJamCrawler/16_0_3/baguspewe/sol.py
|
UTF-8
| 1,100
| 3.234375
| 3
|
[] |
no_license
|
def is_prime(n):
if n%2 == 0 or n%3 == 0:
return False
else:
i = 5
while i*i < n:
if n%i == 0 or n%(i+2) == 0:
return False
i+=6
return True
def non_trivial_divisor(n):
if n%2 == 0:
return 2
elif n%3 == 0:
return 3
else:
i = 5
while i < 1000000: #i*i < n:
if n%i == 0:
return i
elif n%(i+2) == 0:
return i+2
i+=6
return 1
t = int(raw_input()) # read a line with a single integer
for i in xrange(1, t+1):
[N, J] = [int(n) for n in raw_input().split(" ")]
print "Case #1:"
j = 0
v = 0
ntds = None
while j < J:
val = (1 << N-1) + (v << 1) + 1
ntds = []
base = {}
coin_str = "{0:b}".format(val)
coin = [int(i) for i in list(coin_str)]
for x in xrange(2, 11):
mult = 1
base[x] = 0
for y in xrange(1, N+1):
base[x] += coin[-y]*mult
mult *= x
ntd = non_trivial_divisor(base[x])
if (ntd > 1):
ntds.append(ntd)
else:
break
if len(ntds) == 9:
j += 1
print "%s %d %d %d %d %d %d %d %d %d" % (coin_str, ntds[0], ntds[1], ntds[2], ntds[3], ntds[4], ntds[5], ntds[6], ntds[7], ntds[8])
v += 1
| true
|
af75520b585510df8bd7ec64866709de7dc4abe1
|
Python
|
matthew-carpenter/nplib
|
/scratch2.py
|
UTF-8
| 975
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from lib import to_hex
import codecs
import sys
encoding = sys.argv[0]
filename = encoding + '.txt'
text = u'pi: π'
encoded = text.encode('utf-8')
decoded = encoded.decode('utf-8')
print 'Raw :', repr(text)
print 'UTF-8 :', to_hex(text.encode('utf-8'), 1)
print 'UTF-16:', to_hex(text.encode('utf-16'), 2)
print 'Original :', repr(text)
print 'Encoded :', to_hex(encoded, 1), type(encoded)
print 'Decoded :', repr(decoded), type(decoded)
print 'Writing to', filename
with codecs.open(filename, mode='wt', encoding=encoding) as f:
f.write(u'pi: \u03c0')
# Determine the byte grouping to use for to_hex()
nbytes = {'utf-8': 1,
'utf-16': 2,
'utf-32': 4,
}.get(encoding, 1)
# Show the raw bytes in the file
print 'File contents:'
with open(filename, mode='rt') as f:
print to_hex(f.read(), nbytes)
def main():
pass
# Execute as a script
if __name__ == '__main__':
main()
| true
|
f01f52cc315c6ae23b1f5eaf0220e97bffda869b
|
Python
|
jacob-brown/python_AdventureGame
|
/mapbuilding.py
|
UTF-8
| 1,439
| 3.0625
| 3
|
[] |
no_license
|
import numpy as np
import room
class mapOfWorld:
def __init__(self, mapSizeBlocks):
self.mapSizeBlocks = mapSizeBlocks
self.worldMap = []
self.worldRooms = {}
@property
def worldMap(self):
return self.__worldMap
@worldMap.setter
def worldMap(self, newMap):
self.__worldMap = newMap
def makeRandomMap(self):
self.__worldMap = np.arange(self.mapSizeBlocks)
np.random.shuffle(self.__worldMap)
def addRoomsToWorld(self):
roomTypes = [
"kitchen",
"dinning",
"lounge",
"bathroom",
"cave",
"library",
]
descriptionTypes = [
"kitchen description",
"dinning description",
"lounge description",
"bathroom description",
"cave description",
"library description",
]
for i in self.worldMap:
# randomly select room and description
roomSelectedRandomly = np.random.choice(roomTypes, replace=False)
descriptionSelectedRandomly = np.random.choice(
descriptionTypes, replace=False
)
# make a dictionary of room objects
self.worldRooms[i] = room.room(
roomID=i,
name=roomSelectedRandomly,
description=descriptionSelectedRandomly,
)
| true
|
0ac74626d58b1a49402e4cdda475dcc0f1380981
|
Python
|
pschauhan480/coding_python
|
/program_28.py
|
UTF-8
| 502
| 3.703125
| 4
|
[] |
no_license
|
class Chef:
def make_chicken(self):
print("The chef is making chicken")
return
def make_salad(self):
print("The chef is making salad")
return
def make_special_dish(self):
print("The chef is making bbq ribs")
return
class ChineseChef(Chef):
def make_fried_rice(self):
print("The Chinese chef is making fried rice")
my_chef = Chef()
my_chef.make_special_dish()
my_chinese_chef = ChineseChef()
my_chinese_chef.make_fried_rice()
| true
|
5d9978cdcf3f0f704c413a27472996de8521c698
|
Python
|
umunusb1/Python_for_interview_preparation
|
/all/Interview_Questions/spell_checker.py
|
UTF-8
| 1,191
| 4.15625
| 4
|
[] |
no_license
|
#!/usr/bin/python
"""
Purpose: Write a (bad) spell checker
The product department needs a quick and dirty spell-checker.
Should return a correctly spelled word if found, otherwise return None.
1. A correctly spelled word is defined as one that appears in the list
2. A matching input word would have
a. The same combination of characters are the valid word
b. The same number of total characters as the valid word
OR
c. Beginning of partial word i.e., like '%word' search in SQL
"""
valid_words = ('cat', 'bat')
def spell_checker(word):
for each_word in valid_words:
if (each_word == word or
each_word.startswith(word) or
sorted(each_word) == sorted(word)):
return each_word
if __name__ == '__main__':
assert spell_checker('cat') == 'cat'
assert spell_checker('act') == 'cat'
assert spell_checker('tac') == 'cat'
assert spell_checker('atc') == 'cat'
assert spell_checker('ca') == 'cat'
assert spell_checker('ba') == 'bat'
assert spell_checker('acct') == None
assert spell_checker('batty') == None
assert spell_checker('bats') == None
assert spell_checker('at') == None
| true
|
dca86c568bce779f59644d393cf0315034d26c08
|
Python
|
hakopako/algorithm-and-ds-note
|
/Algorithm/Stack/SortWithTowStacks.py
|
UTF-8
| 968
| 3.703125
| 4
|
[] |
no_license
|
class Stack:
def __init__(self):
self.__stack = []
def push(self, data):
self.__stack.append(data)
def pop(self):
return self.__stack.pop()
def peek(self):
return self.__stack[-1]
def is_empty(self):
return len(self.__stack) == 0
def print_stack(self):
print(self.__stack)
class SortStack:
def __init__(self):
self.tmp_stack = Stack()
def execute(self, stack):
while not stack.is_empty():
item = stack.pop()
while not self.tmp_stack.is_empty() and int(self.tmp_stack.peek()) < int(item):
stack.push(self.tmp_stack.pop())
self.tmp_stack.push(item)
while not self.tmp_stack.is_empty():
stack.push(self.tmp_stack.pop())
return stack
########################################
## Execute
########################################
sort_stack = SortStack()
stack = Stack()
for i in [4,3,6,1,8,9,5]:
stack.push(i)
stack.print_stack()
sort_stack.execute(stack).print_stack()
| true
|
a561137ccb4441995073bb807dbcbd128e81ff23
|
Python
|
sumanth-fl/Census-automation
|
/census project.py
|
UTF-8
| 981
| 2.671875
| 3
|
[] |
no_license
|
import openpyxl
import os
os.chdir('g:')
wb = openpyxl.load_workbook('censuspopdata.xlsx')
sheet = wb.get_sheet_by_name('Population by Census Tract')
wb2=openpyxl.Workbook()
sheet2=wb2.active
sheet2.title = 'final population list'
sheet2.cell(row=1,column=1).value='county'
sheet2.cell(row=1,column=2).value= 'no. of tracts'
sheet2.cell(row=1,column=3).value = 'Population'
countyTracts = 1
j=2
countyPop = sheet.cell(row= 2,column= 4).value
countyName= sheet.cell(row= 2,column= 3).value
for i in range(3,int(sheet.max_row)):
if sheet.cell(row= i,column=3).value==countyName:
countyTracts+=1
countyPop += sheet.cell(row= i,column= 4).value
else:
sheet2.cell(row=j,column=1).value= countyName
sheet2.cell(row=j,column=2).value= countyTracts
sheet2.cell(row=j,column=3).value= countyPop
j+=1
countyName = sheet.cell(row= i,column=3).value
countyTracts = 1
countyPop = sheet.cell(row= i,column=4).value
wb2.save('results.xlsx')
| true
|
e8df30d6541c7aa38e44aba166d01bebf06b6f36
|
Python
|
ajithkalluvettukuzhiyil/Udacity-Self-Driving-Car-Nanodegree
|
/Term1/Keras_conv2D.py
|
UTF-8
| 1,681
| 2.96875
| 3
|
[] |
no_license
|
###
Build from the previous network ( Keras_nn.py).
Add a convolutional layer with 32 filters, a 3x3 kernel, and valid padding before the flatten layer.
Add a ReLU activation after the convolutional layer.
Train for 3 epochs again, should be able to get over 50% accuracy.
###
#in[1]:
import pickle
import numpy as np
import tensorflow as tf
# Load pickled data
with open('small_train_traffic.p', mode='rb') as f:
data = pickle.load(f)
#in[2]:
# split data
X_train, y_train = data['features'], data['labels']
#in[3]:
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten
from keras.layers.convolutional import Conv2D
#in[4]:
# TODO: Build Convolutional Neural Network in Keras Here
model = Sequential()
model.add(Conv2D(32,(3,3),
padding='valid',
input_shape=(32,32,3)))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(5))
model.add(Activation('softmax'))
#in[5]:
# Preprocess data
X_normalized = np.array(X_train / 255.0 - 0.5 )
from sklearn.preprocessing import LabelBinarizer
label_binarizer = LabelBinarizer()
y_one_hot = label_binarizer.fit_transform(y_train)
# compile and train model
# Training for 3 epochs should result in > 50% accuracy
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, epochs=3, validation_split=0.2)
#in[6]:
### DON'T MODIFY ANYTHING BELOW ###
### Be sure to run all cells above before running this cell ###
import grader
try:
grader.run_grader(model, history)
except Exception as err:
print(str(err))
| true
|
2d559e7c5fe17871f8195a6c4e17795cc4e52a63
|
Python
|
cmastalli/crocoddyl
|
/unittest/bindings/test_utils.py
|
UTF-8
| 642
| 3.125
| 3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
import numpy as np
EPS = np.finfo(float).eps
NUMDIFF_MODIFIER = 3e4
class NumDiffException(Exception):
"""Raised when the NumDiff values are too high"""
pass
def assertNumDiff(A, B, threshold):
"""Assert analytical derivatives against NumDiff using the error norm.
:param A: analytical derivatives
:param B: NumDiff derivatives
:param threshold: absolute tolerance
"""
if not np.allclose(A, B, atol=threshold):
value = np.linalg.norm(A - B)
raise NumDiffException(
f"NumDiff exception, with residual of {value:.4g}, above threshold "
f"{threshold:.4g}"
)
| true
|
9b3d695cb2748725ef334828e8831c2e56ac559f
|
Python
|
YigaoFan/ProgrammingLanguage
|
/Lesson4WritingReduction.py
|
UTF-8
| 1,022
| 3.6875
| 4
|
[] |
no_license
|
# Writing Reductions
# We are looking at chart[i] and we see x => ab . cd from j.
# Hint: Reductions are tricky, so as a hint, remember that you only want to do
# reductions if cd == []
# Hint: You'll have to look back previously in the chart.
def reductions(chart, i, x, ab, cd, j):
if len(cd) == 0:
candidates = [item for item in chart[j] if len(item[2]) != 0 and item[2][0] == x]
moveds = []
for item in candidates:
moveds.append((item[0], item[1] + [x], item[2][1:], item[3]))
return moveds
return []
chart = {
0: [
('exp', ['exp'], ['+', 'exp'], 0),
('exp', [], ['num'], 0),
('exp', [], ['(', 'exp', ')'], 0),
('exp', [], ['exp', '-', 'exp'], 0),
('exp', [], ['exp', '+', 'exp'], 0)],
1: [('exp', ['exp', '+'], ['exp'], 0)],
2: [('exp', ['exp', '+', 'exp'], [], 0)]
}
print reductions(chart, 2, 'exp', ['exp', '+', 'exp'], [], 0) == [('exp', ['exp'], ['-', 'exp'], 0), ('exp', ['exp'], ['+', 'exp'], 0)]
| true
|
eb397e98a501d4e3738d4cb74171a257a9913e76
|
Python
|
AWangHe/Python-basis
|
/13.tkinter与银行系统实战/thinker/21.树状数据.py
|
UTF-8
| 1,040
| 3.4375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import tkinter
from tkinter import ttk
#创建主窗口
win = tkinter.Tk()
#设置标题
win.title("魔兽世界")
#设置大小和位置 大小600x400 距离左侧400,距离上侧100
win.geometry("600x400+400+100")
tree = ttk.Treeview(win)
tree.pack()
#添加一级树枝
treeF1 = tree.insert("",0,"中国",text="中国CHI",values=("F1"))
treeF2 = tree.insert("",1,"美国",text="美国USA",values=("F2"))
treeF3 = tree.insert("",2,"英国",text="英国UK",values=("F3"))
#添加二级树枝
treeF1_1 = tree.insert(treeF1,0,"黑龙江",text="中国黑龙江",values=("F1_1"))
treeF1_2 = tree.insert(treeF1,1,"吉林",text="中国吉林",values=("F1_2"))
treeF1_3 = tree.insert(treeF1,2,"辽宁",text="中国辽宁",values=("F1_3"))
#添加三级树枝
treeF1_1_1 = tree.insert(treeF1_1,0,"哈尔滨",text="黑龙江哈尔滨",values=("F1_1_1"))
treeF1_1_2 = tree.insert(treeF1_1,1,"五常",text="黑龙江五常",values=("F1_1_2"))
win.mainloop()
| true
|
826b7006e34b11df7b2d10849bed4b50eb7999e5
|
Python
|
JnyJny/bitvector
|
/tests/test_bitvector_display.py
|
UTF-8
| 1,331
| 3.0625
| 3
|
[
"Apache-2.0"
] |
permissive
|
"""
"""
import pytest
from bitvector import BitVector
@pytest.mark.fast
def test_bitvector_repr():
bv = BitVector()
repr_value = repr(bv)
assert "BitVector" in repr_value
assert "128" in repr_value
@pytest.mark.fast
@pytest.mark.parametrize("given", [0, 0xDEADBEEFBADC0FFEE, (1 << 128) - 1,])
def test_bitvector_str(given):
bv = BitVector(given)
str_value = str(bv)
assert str_value.startswith("0x")
assert int(str_value, 16) == bv.value
@pytest.mark.fast
@pytest.mark.parametrize("given", [0, 0xDEADBEEFBADC0FFEE, (1 << 128) - 1,])
def test_bitvector_bin_property(given):
bv = BitVector()
bin_value = bv.bin
assert bin_value.startswith("0b")
assert int(bin_value, 2) == bv.value
@pytest.mark.fast
@pytest.mark.parametrize("given", [0, 0xDEADBEEFBADC0FFEE, (1 << 128) - 1,])
def test_bitvector_hex_property(given):
bv = BitVector()
hex_value = bv.hex
assert hex_value.startswith("0x")
assert int(hex_value, 16) == bv.value
@pytest.mark.fast
@pytest.mark.parametrize("value", [1 << p for p in range(0, 128)])
def test_bitvector_bytes_property(value):
bv = BitVector(value)
bv_bytes = bv.bytes
test = int.from_bytes(bv_bytes, "big")
assert isinstance(bv_bytes, bytes)
assert len(bv_bytes) == len(bv) / 8
assert test == bv
| true
|
00a16645c7a3eb1cfc7f6a868cbfb4342d1d4b48
|
Python
|
rajansaini691/gtzan-beat-tracking
|
/dataset.py
|
UTF-8
| 1,817
| 2.71875
| 3
|
[] |
no_license
|
"""
Use tf Dataset API to process audio so that we can shuffle, prefetch, etc.
"""
import tensorflow as tf
import tensorflow_io as tfio
import os
import numpy as np
import cfg
import json
def get_data_from_filename(wav_path):
# Process audio
raw_data = tf.io.read_file(wav_path)
wav_data, fs = tf.audio.decode_wav(raw_data)
wav_data = tfio.audio.resample(wav_data, tf.cast(fs, tf.int64), cfg.SAMPLE_RATE)
wav_data = tf.squeeze(wav_data)
wav_data = tf.slice(wav_data, begin=[0], size=[(tf.math.floordiv(tf.size(wav_data), cfg.SAMPLE_SIZE)) * cfg.SAMPLE_SIZE])
wav_data_split = tf.reshape(wav_data, [tf.size(wav_data) / cfg.SAMPLE_SIZE, cfg.SAMPLE_SIZE])
fft_raw = tf.map_fn(fn=tf.signal.fft, elems=tf.cast(wav_data_split, tf.complex64))
fft_float = tf.map_fn(fn=tf.abs, elems=fft_raw, dtype=tf.float32)
fft = tf.slice(fft_float, begin=[0, 0], size=[-1, tf.shape(fft_float)[1] // 2 - 1])
# TODO Look into whether to normalize fft here
# Process annotations
wav_filename = tf.strings.split(wav_path, '/')[-1]
annotation_path = cfg.numpy_annotations_root + '/' + wav_filename + ".npy"
annotations = tf.py_function(lambda x: tf.convert_to_tensor(np.load(x.numpy())), inp=[annotation_path], Tout=tf.uint8)
return fft, annotations
# Add zero padding to ensure input vector always has constant size
def add_padding(fft, annotations):
# Get metadata
with open(cfg.dataset_metadata_path) as f:
metadata = json.load(f) # TODO If called more than once, use cache decorator
max_sequence_length = metadata['max_sequence_length']
fft = tf.pad(fft, [[0, max_sequence_length - tf.shape(fft)[0]], [0,0]])
annotations = tf.pad(annotations, [[0, max_sequence_length - tf.shape(annotations)[0]]])
return fft, annotations
| true
|
3232c8b1ec78398b0e1d60e59be4821f5cca8d5b
|
Python
|
simraan9/dv.pset-simran
|
/dv.pset/0041/temperature conversion.py
|
UTF-8
| 100
| 3.359375
| 3
|
[] |
no_license
|
def convertFToC(f):
c=(f-32)*(5/9)
return c
f=int(input("Enter F "))
print (convertFToC(f))
| true
|
f5dad8eb72646d77c7a1ff16ff59b94cae8224c4
|
Python
|
kundeng123/code_pratice
|
/find_highest_average.py
|
UTF-8
| 799
| 3.40625
| 3
|
[] |
no_license
|
def findHighestAverage(pointsAndNames):
nameMap = {}
for pairs in pointsAndNames:
#print(pairs)
if pairs[0] not in nameMap:
nameMap.update({pairs[0]:[pairs[1]]})
else:
nameMap[pairs[0]].append(pairs[1])
maxAverage = -float('inf')
for key in nameMap:
print(nameMap[key])
if sum(nameMap[key])/len(nameMap[key]) > maxAverage :
maxAverage = sum(nameMap[key])/len(nameMap[key])
#print(maxAverage)
return maxAverage
pair1 = ["aa", -100]
pair2 = ["bb", -88]
pair3 = ["cc", -90]
pair4 = ["aa", -5]
pair5 = ["dd", -19]
listOfPair = []
listOfPair.append(pair1)
listOfPair.append(pair2)
listOfPair.append(pair3)
listOfPair.append(pair4)
listOfPair.append(pair5)
print(findHighestAverage(listOfPair))
| true
|
cafeadb1c944852f9e699a21a1cfc40598c97606
|
Python
|
Tirth257/Game
|
/Gussing_Game.py
|
UTF-8
| 3,073
| 3.703125
| 4
|
[] |
no_license
|
import random
# Lavels
easy = random.randint(1,50)
normal = random.randint(1,100)
hard = random.randint(1,500)
ultra = random.randint(1,1000)
# Lavel suggetions
# if statement numbers
e1 = 10
e2 = 12
e3 = 10
e4 = 8
a = '''
easy = 1 to 50
10 guesses
normal = 1 to 100
12 gusses
hard = 1 to 500
10 gusses
ultra = 1 to 1000
8 gusses
'''
# Starter fail fix
print(a)
b = input("Enter Your Difficulty :- ")
while True:
if b == 'easy' or b == 'normal' or b == 'hard' or b == 'ultra':
break
else:
print("invelid difficulty , try again ")
b = input("Enter Your Difficulty :- ")
continue
left = "Chances Left = "
# if statements big small
big = "Too Big \ "
small = "Too Small \ "
# if statemants
guess = int(input("Take a number :- "))
# Easy Lavel
while b == "easy":
e1 = e1 - 1
br = (big) + left + str(e1)
sr = (small) + left + str(e1)
if guess == easy:
print("You Gussed Right , You Won The Game!!")
break
if e1 == 0:
print("Your all chanses is Finished , Game Over")
print("ans = " + str(easy))
break
if guess > easy:
print(br)
guess = int(input("Take a number :- "))
elif guess < easy:
print(sr)
guess = int(input("Take a number :- "))
# Normal Lavel
while b == "normal":
e2 = e2 - 1
br = (big) + left + str(e2)
sr = (small) + left + str(e2)
if guess == normal:
print("You Gussed Right , You Won The Game!!")
break
if e2 == 0:
print("Your all chanses is Finished , Game Over")
print("ans = " + str(normal))
break
if guess > normal:
print(br)
guess = int(input("Take a number :- "))
elif guess < normal:
print(sr)
guess = int(input("Take a number :- "))
# Hard lavel
while b == "hard":
e3 = e3 - 1
br = (big) + left + str(e3)
sr = (small) + left + str(e3)
if guess == hard:
print("You Gussed Right , You Won The Game!!")
break
if e3 == 0:
print("Your all chanses is Finished , Game Over")
print("ans = " + str(hard))
break
if guess > hard:
print(br)
guess = int(input("Take a number :- "))
elif guess < hard:
print(sr)
guess = int(input("Take a number :- "))
# Ultra Lavel
while b == "ultra":
e4 = e4 - 1
br = (big) + left + str(e4)
sr = (small) + left + str(e4)
if guess == ultra:
print("You Gussed Right , You Won The Game!!")
break
if e4 == 0:
print("Your all chanses is Finished , Game Over")
print("ans = " + str(ultra))
break
if guess > ultra:
print(br)
guess = int(input("Take a number :- "))
elif guess < ultra:
print(sr)
guess = int(input("Take a number :- "))
| true
|
bbd3941c8fcd5ff4e36788c70256224be3aa2d07
|
Python
|
Alkhithr/Mary
|
/think-python/chapter17_classes_methods/kangaroo.py
|
UTF-8
| 877
| 3.65625
| 4
|
[] |
no_license
|
"""Example 17.2"""
class Kangaroo:
"""Kangaroo has pouch"""
def __init__(self, name, contents):
self.name = name
self.pouch_contents = contents
def __str__(self):
return 'Name: {}, contents: {}'.format(self.name, self.pouch_contents)
def put_in_pouch(self, item):
if isinstance(item, Kangaroo):
for i in item.pouch_contents:
self.pouch_contents.append(i)
else:
self.pouch_contents.append(item)
def main():
"""test"""
kanga = Kangaroo('Kanga', [])
roo = Kangaroo('roo', [])
kanga.put_in_pouch('wallet')
kanga.put_in_pouch('keys')
kanga.put_in_pouch('gun')
roo.put_in_pouch('this is roo')
roo.put_in_pouch('this is roo too')
print(kanga)
print(roo)
kanga.put_in_pouch(roo)
print(kanga)
if __name__ == '__main__':
main()
| true
|
14890104d91049415308d11a9bdaaf3e7db75cff
|
Python
|
Alan19922015/wrangells
|
/downhillVelocityFilter_v2_05sep2016.py
|
UTF-8
| 5,844
| 2.9375
| 3
|
[] |
no_license
|
'''
Testing script to filter velocities based of downhill-direction.
Requires digitized centerline profiles.
Input = x-direction velocity, y-direction velocity, centerline profiles
Output = "cleaned x, y, and total velocities
05 Sep 2016
William Armstrong
'''
from osgeo import ogr, gdal
import numpy as np
import sys, os
import matplotlib.pyplot as plt
# Function to write a geotiff
def writeGeotiff(geoImageFilepath,writeArray,outFn):
noDataValue = -999
# Opening files
geoImage = gdal.Open(geoImageFilepath) # just used for georeferencing information
#imageArray = image.ReadAsArray().astype(np.float32)
# Generating output
[cols,rows] = writeArray.shape
proj = geoImage.GetProjection()
trans = geoImage.GetGeoTransform()
# Create the file, using the information from the original file
outDriver = gdal.GetDriverByName("GTiff")
outData = outDriver.Create(str(outFn), rows, cols, 1, gdal.GDT_Float32)
# Write the array to the file
outData.GetRasterBand(1).WriteArray(writeArray)
# Set a no data value if required
outData.GetRasterBand(1).SetNoDataValue(noDataValue)
# Georeference the image
outData.SetGeoTransform(trans)
# Write projection information
outData.SetProjection(proj)
# clear memory
outData.FlushCache()
# Function to thin vector field
# Inputs = x coord vector, y coord vector, vx matx, vy matx, vv matx, plot frequency in x, plot frequency in y
def thinVectors(x,y,vx,vy,vv,plotEveryX,plotEveryY=plotEveryX):
xThin = x[::plotEveryX]
yThin = y[::plotEveryY]
YT, XT = np.meshgrid(xThin,yThin)
vxT = vx[::plotEveryY,::plotEveryX]
vyT = vy[::plotEveryY,::plotEveryX]
vvT = vv[::plotEveryY,::plotEveryX]
return XT, YT, vxT, vyT, vvT
# Creates transformation parameters for future work
# Outputs xyTfm vector that is [minX,maxY,pxSizeX,pxSizeY]
def createTransformation(rasterIn):
rast = gdal.Open(rasterIn) # open raster
gt = rast.GetGeoTransform() # get image to ground geotransformation params
numPxX = rast.RasterXSize # number of pixels in x direction
numPxY = rast.RasterYSize # number of pixels in y direction
pxSizeX = gt[1] # pixel size in x
pxSizeY = gt[5] # pixel size in y
# upper left coordinates
minX = gt[0] # minimum x value
maxY = gt[3] # maximum y value
maxX = minX + numPxX*pxSizeX # maximum x value
minY = maxY + numPxY*pxSizeY # minimum y value
xyTfm = np.array([minX,maxY,pxSizeX,pxSizeY]).astype('float')
return xyTfm
# Function to convert pixel location to ground location
def imageToXy(i,j):
outX = []
outY = []
it = np.nditer([i,j])
for ii,jj in it:
outX.append( xyTfm[0] + ( (ii+0.5) * xyTfm[2] ) )
outY.append( xyTfm[1] + ( (jj+0.5) * xyTfm[3] ) )
outArray = np.array((outX,outY))
outArrayTranspose = outArray.transpose()
return outArrayTranspose
# Function to convert ground coordinates to image pixel location
def xyToImageIj(x,y):
outI = []
outJ = []
it = np.nditer([x,y])
for xx,yy in it:
outI.append( (xx - xyTfm[0])/xyTfm[2] - 0.5 )
outJ.append( (yy - xyTfm[1])/xyTfm[3] - 0.5 )
outArray = np.array((outI,outJ))
outArrayTranspose = outArray.transpose()
return outArrayTranspose
# Function to get coordinates from shapefile from study glaciers shapefile
# Outputs array of x, y coordinates of shapefile vertices
def getShapefileCoordinatesFromMultiline(shapefileIn,transName=None):
driver = ogr.GetDriverByName("ESRI Shapefile")
ds = driver.Open(shapefileIn,0)
lyr = ds.GetLayer(0)
numFeats = lyr.GetFeatureCount()
for i in range(0,numFeats):
featNow = lyr.GetFeature(i)
transNow = featNow.GetField(1)
if transName is None or transName == transNow:
geom = featNow.geometry()
numPoints = geom.GetPointCount()
x = []
y = []
for j in range(0,numPoints):
x.append( geom.GetX(j) )
y.append( geom.GetY(j) )
coordArray = np.array((x,y))
coordArrayTranspose = coordArray.transpose()
return coordArrayTranspose
# Function to get coordinates from shapefile from evenly space shapefile
# Outputs array of x, y coordinates of shapefile vertices
def getShapefileCoordinatesFromMultipoint(shapefileIn):
driver = ogr.GetDriverByName("ESRI Shapefile")
ds = driver.Open(shapefileIn,0)
lyr = ds.GetLayer(0)
numPoints = lyr.GetFeatureCount()
x = []
y = []
for i in range(0,numPoints):
pointNow = lyr.GetFeature(i)
geom = pointNow.geometry()
x.append( geom.GetX() )
y.append( geom.GetY() )
coordArray = np.array((x,y))
coordArrayTranspose = coordArray.transpose()
return coordArrayTranspose
# Function to sample raster at specified coordinates
# Returns values of raster at x,y
def sampleRasterAtXY(rasterIn,x,y):
z = []
imageIj = xyToImageIj(x,y)
rast = gdal.Open(rasterIn)
band = rast.GetRasterBand(1) # assumes single band raster
bandArr = band.ReadAsArray() # convert band into array
samplePtNum = imageIj.shape[0] # number of sample points
for i in range(0,samplePtNum):
z.append( bandArr[ np.round(imageIj[i,1]), np.round(imageIj[i,0] ) ] )
return z
# Calculate the distance along a line, given vertex coordinates
def distanceAlongLine(x,y):
dist = [0] # starts at 0 distance
numPoints = len(x)
for i in range(1,numPoints):
oldEasting = x[i-1]
oldNorthing = y[i-1]
nowEasting = x[i]
nowNorthing = y[i]
dE = nowEasting - oldEasting
dN = nowNorthing - oldNorthing
distChange = np.sqrt(dE**2 + dN**2)
dist.append( distChange + dist[i-1] )
return dist
# shapefile containing glacier profiles
shapefileIn='/Users/wiar9509/Documents/generalScripts/swath/shapefileInputs/newWStETransects_19nov15_fromQGIS_EGPSG102006.shp'
# Open shapefile
shp = ogr.Open(shapefileIn)
lyr = shp.GetLayer(0)
nFeat = lyr.GetFeatureCount()
# Iterate over centerline profiles
for i in range(0,nFeat):
feat = lyr.GetFeature(i)
transName = feat.GetField(1)
print i, transName
| true
|
6835e0933359e0c7e2f2ab88e225a38ad5fff51d
|
Python
|
bea3/ai_summer2017
|
/mod_6/unification.py
|
UTF-8
| 3,666
| 3.5
| 4
|
[] |
no_license
|
import tokenize
from StringIO import StringIO
'''
IMPORTANT NOTES:
* constants - values and predicates
* values start with uppercase letter (Fred)
* predicates use lowercase letters (loves)
* variables - lowercase and start with ? (?x)
* expressions (lists) - these use the S-expression syntax
'''
def atom(next, token):
if token[1] == '(':
out = []
token = next()
while token[1] != ')':
out.append(atom(next, token))
token = next()
if token[1] == ' ':
token = next()
return out
elif token[1] == '?':
token = next()
return "?" + token[1]
else:
return token[1]
def parse(exp):
src = StringIO(exp).readline
tokens = tokenize.generate_tokens(src)
return atom(tokens.next, tokens.next())
def is_variable(exp):
return isinstance(exp, str) and exp[0] == "?"
def is_constant(exp):
return isinstance(exp, str) and not is_variable(exp)
def apply(result, exp1, exp2):
if result != "{}":
symbols = result.split('/')
symbols = [symbols[0].replace('{', ''), symbols[1].replace('}', '')]
for n, i in enumerate(exp2):
if i == symbols[0]:
exp2[n] = symbols[1]
return exp1, exp2
def unify(s_expression1, s_expression2):
return unification(parse(s_expression1), parse(s_expression2))
def unification(list_expression1, list_expression2):
if (is_constant(list_expression1) and is_constant(list_expression2)) or (
len(list_expression1) == 0 and len(list_expression2) == 0):
if list_expression1 == list_expression2:
return "{}"
else:
return "FAIL"
elif is_variable(list_expression1):
if list_expression1 in list_expression2:
return "FAIL"
else:
return "{" + str(list_expression1) + "/" + str(list_expression2) + "}"
elif is_variable(list_expression2):
if list_expression2 in list_expression1:
return "FAIL"
else:
return "{" + str(list_expression2) + "/" + str(list_expression1) + "}"
if type(list_expression1) is list:
first1 = list_expression1.pop(0)
else:
first1 = list_expression1
if type(list_expression2) is list:
first2 = list_expression2.pop(0)
else:
first2 = list_expression2
result1 = unification(first1, first2)
if result1 == "FAIL":
return "FAIL"
list_expression1, list_expression2 = apply(result1, list_expression1, list_expression2)
result2 = unification(list_expression1, list_expression2)
if result2 == "FAIL":
return "FAIL"
return result1 + " " + result2
def prettify_result(exp):
if exp != "FAIL":
exp = exp.replace('\'', '')
exp = exp.split("}")
nonempty = []
results = dict()
for i in exp:
if i != "{}":
nonempty.append(i)
for i in nonempty:
exp = i.replace('{', '')
exp_list = exp.split('/')
if len(exp_list) == 2:
value = exp_list[1].strip()
value = value.replace('[', '(')
value = value.replace(']', ')')
value = value.replace(',', ':')
results[exp_list[0].strip()] = value
return results
else:
return "FAIL"
def clean_exp(exp1, exp2):
exp1 = exp1.replace("-", "_")
exp2 = exp2.replace("-", "_")
return exp1, exp2
exp1 = "(son Barney Bam-Bam)"
exp2 = "(son ?y (son Barney))"
exp1, exp2 = clean_exp(exp1, exp2)
stuff = unify(exp1, exp2)
print (prettify_result(stuff))
| true
|
83eff1212cce0cb959ea29dedab4cacd4a2fc901
|
Python
|
peragro/peragro-at
|
/src/damn_at/analyzers/audio/acoustid_analyzer.py
|
UTF-8
| 2,506
| 2.6875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
"""Analyzer for audio files using AcoustID"""
from __future__ import print_function
import os
import mimetypes
import subprocess
import uuid
from damn_at import logger
from damn_at import AssetId, FileId, FileDescription, AssetDescription
from damn_at.pluginmanager import IAnalyzer
from damn_at.analyzers.audio import metadata
from acoustid import fingerprint_file
def get_supported_formats():
try:
pro = subprocess.Popen(['ffmpeg', '-formats'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = pro.communicate()
if pro.returncode != 0:
logger.debug(
'GetAcoustIDTypes failed with error code %d! '
% pro.returncode,
out,
err
)
return []
except OSError as oserror:
logger.debug('GetAcoustIDTypes failed! %s', oserror)
return []
extensions = [
line.split()[1] for line in out.decode('utf-8').split('\n')[4:]
if len(line.split()) > 1]
mimes = []
for ext in extensions:
mime = mimetypes.guess_type('file.' + ext, False)[0]
if mime and mime.startswith('audio/'):
mimes.append(mime)
return mimes
class SoundAnalyzer(IAnalyzer):
"""Class for sound analyzer called in the analyzer"""
handled_types = get_supported_formats()
def __init__(self):
IAnalyzer.__init__(self)
def activate(self):
pass
def analyze(self, anURI):
fileid = FileId(filename=os.path.abspath(anURI))
file_descr = FileDescription(file=fileid)
file_descr.assets = []
asset_descr = AssetDescription(asset=AssetId(
subname=os.path.basename(anURI),
mimetype=mimetypes.guess_type(anURI, False)[0],
file=fileid))
try:
duration, fingerprint = fingerprint_file(anURI)
fingerprint_uuid = uuid.uuid5(uuid.NAMESPACE_DNS,
str(duration) + str(fingerprint))
except Exception as e:
print(('E: AcoustID analyzer failed %s with error %s'
% (anURI, e)))
return False
meta = {
'duration': str(duration) + 's',
'fingerprint': fingerprint,
'fingerprint_uuid': fingerprint_uuid
}
asset_descr.metadata = metadata.MetaDataAcoustID.extract(meta)
file_descr.assets.append(asset_descr)
return file_descr
| true
|
c4ad0c9e8b99682c7040343068b0015388ab0f72
|
Python
|
Sebski123/Network
|
/ITT1/ass22SynchronisedTransmissionFromClientToServer/code/client.py
|
UTF-8
| 937
| 2.71875
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import socket
from time import sleep
from random import randint
HOST = '192.168.43.242' # The server's hostname or IP address
PORT = 65433 # The port to send data to on the server
mySensorReadings = 'go' # The application layer protoll
def temperatureSensor():
with open("/sys/class/thermal/thermal_zone0/temp", "r") as f:
t = f.readline()
#out = randint(0, 100)
return str(t) + "q"
def pad(i):
if len(i) == 1:
return "00" + str(i)
elif len(i) == 2:
return "0" + str(i)
else:
return str(i)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
input("Press enter to begin")
while True:
try:
mySensorReadings = temperatureSensor()
print(mySensorReadings)
s.sendall(mySensorReadings.encode('utf-8'))
sleep(1)
except KeyboardInterrupt:
s.close()
exit()
| true
|
f520dc956fd17512e4167550ad3493f4c3e2fe12
|
Python
|
Nimitkothari/custom-machine-learning
|
/just.py
|
UTF-8
| 1,266
| 2.703125
| 3
|
[] |
no_license
|
from flask import Flask
from flask import request,Response
import os
import json
import pandas as pd
path = os.getcwd()
app = Flask(__name__)
column_data = pd.read_csv(path + '/data/columns.csv')
column_1 = (column_data.columns[0])
column_2 = (column_data.columns[1])
column_3 = (column_data.columns[2])
column_4 = (column_data.columns[3])
column_5 = (column_data.columns[4])
print("column 1",column_1)
print("column 2",column_2)
print("column 3",column_3)
print("column 4",column_4)
print("column 5",column_5)
@app.route('/testthe', methods=['POST'])
def justtest():
try:
req_body = request.get_json(force=True)
print("before param")
print("req_body",req_body)
print("column_1",column_1)
#param1 = req_body['Size']
param1 = req_body[column_2]
print("after param")
print("value",param1)
result = param1
# for i in req_body:
# key[i]
msg = {
"Predicted size is": "%s" % (result)
}
resp = Response(response=json.dumps(msg),
status=200,
mimetype="application/json")
return resp
except Exception as e:
print(e)
if __name__ == '__main__':
app.run(debug=True,port=3000)
| true
|
e946b0783435bd0401e09144272c59a1678b823b
|
Python
|
zhoushujian001/homework
|
/python一些小代码/一些算法.py
|
UTF-8
| 3,756
| 3.59375
| 4
|
[] |
no_license
|
import time
# 选择排序
# def findsmallest(arr):
# smellest=arr[0]
# smellest_index=0
# for i in range(1,len(arr)):
# if arr[i]<smellest:
# smellest=arr[i]
# smellest_index=i
# return smellest_index
# def sel(arr):
# newarr=[]
# for i in range(len(arr)):
# smallest=findsmallest(arr)
# newarr.append(arr.pop(smallest))
# return newarr
# print(sel([5,10,20,6,8]))
# 冒泡排序
# def bubble_sort(alist):
# for j in range(len(alist)-1,0,-1):
# # j表示每次遍历需要比较的次数,是逐渐减小的
# for i in range(j):
# if alist[i] > alist[i+1]:
# alist[i], alist[i+1] = alist[i+1], alist[i]
# li = [54,26,93,17,77,31,44,55,20]
# bubble_sort(li)
# print(li)
# 鸡尾酒排序
a=[13,67,432,6865,342,3,4,5,21]
def cocktail_sort(l):
size = len(l)
sign = 1
for i in range(int(size/2)):
if sign:
sign = 0
for j in range(i, size - 1 - i):
if l[j] > l[j + 1]:
l[j], l[j + 1] = l[j + 1], l[j]
for k in range(size - 2 - i, i, -1):
if l[k] < l[k - 1]:
l[k], l[k - 1] = l[k - 1], l[k]
sign = 1
else:
break
cocktail_sort(a)
print(a)
# # 快速排序
# def quicksort(array):
# if len(array)<2:
# return array
# else:
# pivot=array[0]
# less=[i for i in array[1:] if i<=pivot]
# greater=[i for i in array[1:] if i>pivot]
# print(array)
# print(less)
# print(greater)
# return quicksort(less)+[pivot]+quicksort(greater)
# print(quicksort([10,222,46,77,32,3]))
#二分法
# def bin(list,item):
# low=0
# high=len(list)-1
# while high>=low:
# mid=(high+low)//2
# guess=list[mid]
# if guess==item:
# return mid
# if guess>item:
# high = mid - 1
# else:
# low = mid + 1
# list=[1,2,3,4,5,6,7,8,9]
# print(bin(list,8))
# 汉诺塔
# def move(n, a, b, c):
# if n == 1: # 如果a只有1盘子
# print(a, '-->', c); # 直接把盘子从a移到c
# else: # 如果a有n个盘子(n > 1),那么分三步
# move(n - 1, a, c, b) # 先把上面n-1个盘子,借助c,从a移到b
# move(1, a, b, c) # 再把最下面的1个盘子,借助b,从a移到c
# move(n - 1, b, a, c) # 最后把n-1个盘子,借助a,从b移到c
# move(4, 'A', 'B', 'C') # 测试
# 约舍夫环
# def monkey(n,m):
# l=[]
# i = 0
# for j in range(1,n+1):
# l.append(j)
# while len(l)>1:
# k=l.pop(0)
# i += 1
# if i%m!=0:
# l.append(k)
# if len(l)==m-1:
# return k
# print(monkey(16,3))
# start_time=time.time()
# for a in range(0,1001):
# for b in range(0, 1001):
# c=1000-a-b
# if a+b+c==1000 and a**2+b**2==c**2:
# print(a,b,c)
# end_time=time.time()
# print('time:%d'%(end_time-start_time))
# print('结束')
# start_time=time.time()
# for a in range(0,1001):
# for b in range(0,1001):
# if 2000*(a+b) - 2*a*b == 1000**2:
# print(a,b,1000-a-b)
# end_time=time.time()
# print('time:%d'%(end_time-start_time))
# print('结束')
# for a in range(1, a_range):
# b_high =b_range
# b_low = a + 1
# while True:
# b = (b_low + b_high) // 2
# c = 1000000 - a - b
# if a ** 2 + b ** 2 == c ** 2:
# print("a=%d, b=%d, c=%d" % (a, b, c))
# break
# elif a ** 2 + b ** 2 > c ** 2:
# b_high = b
# elif a ** 2 + b ** 2 < c ** 2:
# b_low = b + 1
# if b_low == b_high :
# break
# end_time=time.time()
# print('time:%d'%(end_time-start_time))
# print('结束')
| true
|
1e3cf9218313db41dc6ebd2f619cc982fc337e33
|
Python
|
student-jjh/code_it
|
/다항회귀.py
|
UTF-8
| 950
| 3.0625
| 3
|
[] |
no_license
|
# 필요한 라이브러리 import
from sklearn import datasets
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import pandas as pd
diabetes_dataset = datasets.load_diabetes()
# 지난 과제 코드를 가지고 오세요.
polynomial_transformer=PolynomialFeatures(2)
polynomial_data=polynomial_transformer.fit_transform(diabetes_dataset.data)
X=pd.DataFrame(polynomial_data,columns=polynomial_transformer.get_feature_names(diabetes_dataset.feature_names))
# 목표 변수
y = pd.DataFrame(diabetes_dataset.target, columns=['diabetes'])
# 코드를 쓰세요
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=5)
model=LinearRegression()
model.fit(X_train,y_train)
y_test_predict=model.predict(X_test)
mse = mean_squared_error(y_test, y_test_predict)
mse ** 0.5
| true
|
a54e3a39b7bc7cbaf7de3a225675e588b3f57e15
|
Python
|
trungdong/datasets-provanalytics-dmkd
|
/analytics.py
|
UTF-8
| 4,009
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
import pandas as pd
import numpy as np
from scipy import stats
from sklearn import model_selection, tree
from imblearn.over_sampling import SMOTE
from collections import Counter
import warnings
### List of metrics analysed in the paper ###
# The 'combined' list has all the 22 metrics
feature_names_combined = (
'entities', 'agents', 'activities', # PROV types (for nodes)
'nodes', 'edges', 'diameter', 'assortativity', # standard metrics
'acc', 'acc_e', 'acc_a', 'acc_ag', # average clustering coefficients
'mfd_e_e', 'mfd_e_a', 'mfd_e_ag', # MFDs
'mfd_a_e', 'mfd_a_a', 'mfd_a_ag',
'mfd_ag_e', 'mfd_ag_a', 'mfd_ag_ag',
'mfd_der', # MFD derivations
'powerlaw_alpha' # Power Law
)
# The 'generic' list has 6 generic network metrics (that do not take provenance information into account)
feature_names_generic = (
'nodes', 'edges', 'diameter', 'assortativity', # standard metrics
'acc',
'powerlaw_alpha' # Power Law
)
# The 'provenance' list has 16 provenance-specific network metrics
feature_names_provenance = (
'entities', 'agents', 'activities', # PROV types (for nodes)
'acc_e', 'acc_a', 'acc_ag', # average clustering coefficients
'mfd_e_e', 'mfd_e_a', 'mfd_e_ag', # MFDs
'mfd_a_e', 'mfd_a_a', 'mfd_a_ag',
'mfd_ag_e', 'mfd_ag_a', 'mfd_ag_ag',
'mfd_der', # MFD derivations
)
# The utitility of above threes set of metrics will be assessed in our experiements to
# understand whether provenance type information help us improve data classification performance
feature_name_lists = (
('combined', feature_names_combined),
('generic', feature_names_generic),
('provenance', feature_names_provenance)
)
def balance_smote(df):
X = df.drop('label', axis=1)
Y = df.label
print('Original data shapes:', X.shape, Y.shape)
smoX, smoY = X, Y
c = Counter(smoY)
while (min(c.values()) < max(c.values())): # check if all classes are balanced, if not balance the first minority class
smote = SMOTE(ratio="auto", kind='regular')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
smoX, smoY = smote.fit_sample(smoX, smoY)
c = Counter(smoY)
print('Balanced data shapes:', smoX.shape, smoY.shape)
df_balanced = pd.DataFrame(smoX, columns=X.columns)
df_balanced['label'] = smoY
return df_balanced
def t_confidence_interval(an_array, alpha=0.95):
s = np.std(an_array)
n = len(an_array)
return stats.t.interval(alpha=alpha, df=(n - 1), scale=(s / np.sqrt(n)))
def cv_test(X, Y, n_iterations=1000, test_id=""):
accuracies = []
importances = []
while len(accuracies) < n_iterations:
skf = model_selection.StratifiedKFold(n_splits=10, shuffle=True)
for train, test in skf.split(X, Y):
clf = tree.DecisionTreeClassifier()
clf.fit(X.iloc[train], Y.iloc[train])
accuracies.append(clf.score(X.iloc[test], Y.iloc[test]))
importances.append(clf.feature_importances_)
print("Accuracy: %.2f%% ±%.4f <-- %s" % (np.mean(accuracies) * 100, t_confidence_interval(accuracies)[1] * 100, test_id))
return accuracies, importances
def test_classification(df, n_iterations=1000, test_id=''):
results = pd.DataFrame()
imps = pd.DataFrame()
Y = df.label
for feature_list_name, feature_names in feature_name_lists:
X = df[list(feature_names)]
accuracies, importances = cv_test(
X, Y, n_iterations, '-'.join((test_id, feature_list_name)) if test_id else feature_list_name
)
rs = pd.DataFrame(
{
'Metrics': feature_list_name,
'Accuracy': accuracies
}
)
results = results.append(rs, ignore_index=True)
if feature_list_name == "combined": # we are interested in the relevance of all features (i.e. 'combined')
imps = pd.DataFrame(importances, columns=feature_names)
return results, imps
| true
|
5e1b7f66042d97142f97501dd5fe5a4c18a365b8
|
Python
|
palomabareli/520_2
|
/01_Class_20180901/2_InOutData.py
|
UTF-8
| 123
| 3.34375
| 3
|
[] |
no_license
|
#/usr/bin/python3
message1 = 'Welcome'
message2 = input('Enter you name: ')
print(message1, message2, sep='.', end='\n\n')
| true
|
4622cbd5f71f9d9409c619275e2d97599d068a02
|
Python
|
NCPlayz/screen
|
/screen/controls/stack.py
|
UTF-8
| 1,312
| 2.65625
| 3
|
[
"Apache-2.0"
] |
permissive
|
from typing import List, Union
from screen.controls import Control, property
from screen.controls.primitives import Bullet, Orientation
from screen.utils import len
def _bullet_invalidate_measure(before, after):
return not (isinstance(before, str) and isinstance(after, str) and len(before) == len(after))
class Stack(Control):
"""
Represents a control used to display a stack of controls.
|parameters|
.. container:: operations
.. describe:: x == y
.. describe:: x != y
Compares two :class:`~.Stack` objects.
.. describe:: hash(x)
Returns the hash of the :class:`~.Stack` object.
"""
# fmt: off
bullet = property(Union[Bullet, str], Bullet.none, True, _bullet_invalidate_measure, True)
children = property(List[Control], None, False, True, True)
orientation = property(Orientation, Orientation.horizontal, True, True, True)
spacing = property(int, 0, True, True, True)
# fmt: on
def measure_core(self, h, w):
raise NotImplementedError
def render_core(self, h, w):
raise NotImplementedError
__all__ = [
"Stack",
]
| true
|
a617fd59c0cf80e3f5e3069826448eaa11fefefc
|
Python
|
Chezacar/AmurTiger2.0
|
/not_so_strong_baseline_for_video_based_person_reID-master/lr_schedulers.py
|
UTF-8
| 11,858
| 3.0625
| 3
|
[] |
no_license
|
import math
from bisect import bisect_right
import numpy as np
import torch
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import Optimizer
class _LRScheduler(object):
def __init__(self, optimizer, last_epoch=-1):
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
self.optimizer = optimizer
if last_epoch == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError("param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".format(i))
self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))
self.step(last_epoch + 1)
self.last_epoch = last_epoch
def get_lr(self):
raise NotImplementedError
def step(self, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
class CyclicLR(_LRScheduler):
def __init__(self, optimizer, base_lr, max_lr, step_size, gamma=0.99, mode='triangular', last_epoch=-1):
self.optimizer = optimizer
self.base_lr = base_lr
self.max_lr = max_lr
self.step_size = step_size
self.gamma = gamma
self.mode = mode
assert mode in ['triangular', 'triangular2', 'exp_range']
super(CyclicLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
new_lr = []
# make sure that the length of base_lrs doesn't change. Dont care about the actual value
for base_lr in self.base_lrs:
cycle = np.floor(1 + self.last_epoch / (2 * self.step_size))
x = np.abs(float(self.last_epoch) / self.step_size - 2 * cycle + 1)
if self.mode == 'triangular':
lr = self.base_lr + (self.max_lr - self.base_lr) * np.maximum(0, (1 - x))
elif self.mode == 'triangular2':
lr = self.base_lr + (self.max_lr - self.base_lr) * np.maximum(0, (1 - x)) / float(2 ** (cycle - 1))
elif self.mode == 'exp_range':
lr = self.base_lr + (self.max_lr - self.base_lr) * np.maximum(0, (1 - x)) * (self.gamma ** (
self.last_epoch))
new_lr.append(lr)
return new_lr
class CyclicCosAnnealingLR(_LRScheduler):
r"""
Implements reset on milestones inspired from CosineAnnealingLR pytorch
Set the learning rate of each parameter group using a cosine annealing
schedule, where :math:`\eta_{max}` is set to the initial lr and
:math:`T_{cur}` is the number of epochs since the last restart in SGDR:
.. math::
\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})(1 +
\cos(\frac{T_{cur}}{T_{max}}\pi))
When last_epoch > last set milestone, lr is automatically set to \eta_{min}
It has been proposed in
`SGDR: Stochastic Gradient Descent with Warm Restarts`_. Note that this only
implements the cosine annealing part of SGDR, and not the restarts.
Args:
optimizer (Optimizer): Wrapped optimizer.
milestones (list of ints): List of epoch indices. Must be increasing.
decay_milestones(list of ints):List of increasing epoch indices. Ideally,decay values should overlap with milestone points
gamma (float): factor by which to decay the max learning rate at each decay milestone
eta_min (float): Minimum learning rate. Default: 1e-6
last_epoch (int): The index of last epoch. Default: -1.
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
"""
def __init__(self, optimizer, milestones, decay_milestones=None, gamma=0.5, eta_min=1e-6, last_epoch=-1):
if not list(milestones) == sorted(milestones):
raise ValueError('Milestones should be a list of'
' increasing integers. Got {}', milestones)
self.eta_min = eta_min
self.milestones = milestones
self.milestones2 = decay_milestones
self.gamma = gamma
super(CyclicCosAnnealingLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
if self.last_epoch >= self.milestones[-1]:
return [self.eta_min for base_lr in self.base_lrs]
idx = bisect_right(self.milestones, self.last_epoch)
left_barrier = 0 if idx == 0 else self.milestones[idx - 1]
right_barrier = self.milestones[idx]
width = right_barrier - left_barrier
curr_pos = self.last_epoch - left_barrier
if self.milestones2:
return [
self.eta_min + (base_lr * self.gamma ** bisect_right(self.milestones2, self.last_epoch) - self.eta_min) *
(1 + math.cos(math.pi * curr_pos / width)) / 2
for base_lr in self.base_lrs]
else:
return [self.eta_min + (base_lr - self.eta_min) *
(1 + math.cos(math.pi * curr_pos / width)) / 2
for base_lr in self.base_lrs]
class CyclicLinearLR(_LRScheduler):
r"""
Implements reset on milestones inspired from Linear learning rate decay
Set the learning rate of each parameter group using a linear decay
schedule, where :math:`\eta_{max}` is set to the initial lr and
:math:`T_{cur}` is the number of epochs since the last restart:
.. math::
\eta_t = \eta_{min} + (\eta_{max} - \eta_{min})(1 -\frac{T_{cur}}{T_{max}})
When last_epoch > last set milestone, lr is automatically set to \eta_{min}
Args:
optimizer (Optimizer): Wrapped optimizer.
milestones (list of ints): List of epoch indices. Must be increasing.
decay_milestones(list of ints):List of increasing epoch indices. Ideally,decay values should overlap with milestone points
gamma (float): factor by which to decay the max learning rate at each decay milestone
eta_min (float): Minimum learning rate. Default: 1e-6
last_epoch (int): The index of last epoch. Default: -1.
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
"""
def __init__(self, optimizer, milestones, decay_milestones=None, gamma=0.5, eta_min=1e-6, last_epoch=-1):
if not list(milestones) == sorted(milestones):
raise ValueError('Milestones should be a list of'
' increasing integers. Got {}', milestones)
self.eta_min = eta_min
self.gamma = gamma
self.milestones = milestones
self.milestones2 = decay_milestones
super(CyclicLinearLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
if self.last_epoch >= self.milestones[-1]:
return [self.eta_min for base_lr in self.base_lrs]
idx = bisect_right(self.milestones, self.last_epoch)
left_barrier = 0 if idx == 0 else self.milestones[idx - 1]
right_barrier = self.milestones[idx]
width = right_barrier - left_barrier
curr_pos = self.last_epoch - left_barrier
if self.milestones2:
return [self.eta_min + (
base_lr * self.gamma ** bisect_right(self.milestones2, self.last_epoch) - self.eta_min) *
(1. - 1.0 * curr_pos / width)
for base_lr in self.base_lrs]
else:
return [self.eta_min + (base_lr - self.eta_min) *
(1. - 1.0 * curr_pos / width)
for base_lr in self.base_lrs]
class GradualWarmupScheduler(_LRScheduler):
""" Gradually warm-up(increasing) learning rate in optimizer.
Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.
Args:
optimizer (Optimizer): Wrapped optimizer.
multiplier: target learning rate = base lr * multiplier
total_epoch: target learning rate is reached at total_epoch, gradually
after_scheduler: after target_epoch, use this scheduler(eg. ReduceLROnPlateau)
"""
def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None):
self.multiplier = multiplier
if self.multiplier <= 1.:
raise ValueError('multiplier should be greater than 1.')
self.total_epoch = total_epoch
self.after_scheduler = after_scheduler
self.finished = False
super().__init__(optimizer)
def get_lr(self):
if self.last_epoch > self.total_epoch:
if self.after_scheduler:
if not self.finished:
self.after_scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs]
self.finished = True
return self.after_scheduler.get_lr()
return [base_lr * self.multiplier for base_lr in self.base_lrs]
return [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs]
def step_ReduceLROnPlateau(self, metrics, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch if epoch != 0 else 1 # ReduceLROnPlateau is called at the end of epoch, whereas others are called at beginning
if self.last_epoch <= self.total_epoch:
warmup_lr = [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs]
for param_group, lr in zip(self.optimizer.param_groups, warmup_lr):
param_group['lr'] = lr
else:
self.after_scheduler.step(metrics, epoch)
def step(self, epoch=None, metrics=None):
if type(self.after_scheduler) != ReduceLROnPlateau:
if self.finished and self.after_scheduler:
return self.after_scheduler.step(epoch)
else:
return super(GradualWarmupScheduler, self).step(epoch)
else:
self.step_ReduceLROnPlateau(metrics, epoch)
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
milestones,
gamma=0.1,
warmup_factor=1.0 / 3,
warmup_iters=500,
warmup_method="linear",
last_epoch=-1,
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = self.last_epoch / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr
* warmup_factor
* self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
| true
|
0f7d394edf982787e24eef136e8195242b9fd713
|
Python
|
ajtrexler/rando
|
/cloudfunc_gcp_landing.py
|
UTF-8
| 1,383
| 2.546875
| 3
|
[] |
no_license
|
from google.cloud import storage
import hmac
from hashlib import sha1
import requests
import subprocess
import zipfile
import os
def hello_world(request):
secret = bytes(os.environ.get("secret"),encoding='utf=8')
#verify header sig using sha
header_signature = request.headers.get('X-Hub-Signature').replace('sha1=','')
header = request.headers
data = request.data
payload = request.get_json()
mac = hmac.new(secret, msg=data, digestmod=sha1)
if str(mac.hexdigest()) == str(header_signature):
bucket_name = 'www.hardscrabblelabs.com'
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
url = "https://github.com/ajtrexler/gcp_landing/archive/master.zip"
github_handle = requests.get(url)
tmp_raw = github_handle.content
with open('/tmp/tmpzip.zip','wb') as fid:
fid.write(tmp_raw)
zipfile.ZipFile('/tmp/tmpzip.zip','r').extractall('/tmp')
rootdir = '/tmp/gcp_landing-master/_site/'
for folder,subs,files in os.walk(rootdir):
for f in files:
blob = bucket.blob(os.path.join(folder.replace(rootdir,'')+f))
blob.upload_from_filename(os.path.join(folder,f))
return 'blob up success'
else:
return "{x} : {y}".format(x=mac.hexdigest(),y=header_signature)
| true
|
c7ed937e09ca716e0578a2b2661cf04e783bfa20
|
Python
|
rachellonchar/bell_algae_game
|
/code/game_objects/eddy.py
|
UTF-8
| 1,689
| 3.21875
| 3
|
[] |
no_license
|
##ALIEN INVASION
# IMAGE/SPRITE
#bullet image
import pygame
from pygame.sprite import Sprite
#import random
#print(random.randrange(0,1000,1))
#print(random.randrange(0,700,1))
class Eddy(Sprite):
'''a class to manage bullets leaving the ship'''
def __init__(self,ai_settings,screen):
'''create bullet object a ship's current position'''
super().__init__()
self.screen = screen
self.screen_width = ai_settings.screen_width
self.screen_height = ai_settings.screen_height
self.rect = pygame.Rect(0, 0, ai_settings.ed_px_width, ai_settings.ed_px_height)
self.color = ai_settings.eddy_color
self.ed_speedx = ai_settings.ed_speed_factor
self.ed_speedy = ai_settings.ed_speed_factor
#def set_speed(self):
#fac_x = random.randrange(-1,2,2)
#self.bullet_speedx = fac_x*self.bullet_speedx
#fac_y = random.randrange(-1,2,2)
#self.bullet_speedy = fac_y*self.bullet_speedy
def update(self,place_x,place_y):
self.rect.centerx = place_x
self.rect.centery = place_y
#def update(self):
#'''move bullet up the screen'''
#if self.rect.x == self.screen_width-10 or self.rect.x == 10:
#self.bullet_speedx = -1*self.bullet_speedx
#self.rect.x += self.bullet_speedx
#if self.rect.y == self.screen_height-10 or self.rect.y <= 10:
#self.bullet_speedy = -1*self.bullet_speedy
#self.rect.y += self.bullet_speedy
def draw_pix(self):
'''draw the bullet to the screen'''
pygame.draw.rect(self.screen, self.color, self.rect)
| true
|
89d88ae7941710c7f7a66c5d7ef768b0df634da6
|
Python
|
OmarTahoun/competitive-programming
|
/Code Forces/PY/calculatingFunction.py
|
UTF-8
| 98
| 2.984375
| 3
|
[] |
no_license
|
import math
n = float(input())
res = math.ceil(n/2)
if n % 2 !=0:
res = 0-res
print int(res)
| true
|
6fec119b9681493847cf16da01f8a0f75e9f57d6
|
Python
|
brkyydnmz/Python
|
/Workshops/hesapMakinesi.py
|
UTF-8
| 824
| 4.1875
| 4
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
def topla(sayi1,sayi2):
return sayi1 + sayi2
def cikar(sayi1,sayi2):
return sayi1 - sayi2
def carp(sayi1,sayi2):
return sayi1 * sayi2
def bol(sayi1,sayi2):
return sayi1 / sayi2
print("Operasyon:")
print("=======================")
print("1 : Topla")
print("2 : Çıkar")
print("3 : Çarp")
print("4 : Böl")
secenek = input("Operasyon seçiniz?")
sayi1 = int(input("Birinci sayı?"))
sayi2 = int(input("İkinci sayı?"))
if secenek == "1":
print("Toplam : " +str(topla(sayi1, sayi2)))
elif secenek == "2":
print("Çıkarma : " +str(cikar(sayi1, sayi2)))
elif secenek == "3":
print("Çarpma : " +str(carp(sayi1, sayi2)))
elif secenek == "4"and sayi2 != 0:
print("Bölme : " +str(bol(sayi1, sayi2)))
else:
print("Hatalı kodladınız")
| true
|
20b3e13151e03ccb630ff6d3ae0d08342e2fd30e
|
Python
|
acken/dotfiles
|
/bin/OpenIDE/.OpenIDE/languages/C#-files/scripts/create-files/preserved-data/copydir.py
|
UTF-8
| 1,991
| 2.515625
| 3
|
[] |
no_license
|
import os
import sys
import uuid
def copyFile(projectName, replacements, source, destination):
if os.path.basename(source) == "project.file":
destination = os.path.join(os.path.dirname(destination), projectName + ".csproj")
f1 = open(source, 'r')
f2 = open(destination, 'w')
for line in f1:
newLine = line
for replacement in replacements:
newLine = newLine.replace(replacement, replacements[replacement])
newLine = newLine.replace("{NEW_GUID}", "{" + str(uuid.uuid1()) + "}")
newLine = newLine.replace("{NEW_NOBRACES_GUID}", str(uuid.uuid1()))
f2.write(newLine)
f1.close()
f2.close()
def recurseDir(projectName, replacements, source, destination):
if destination != "" and os.path.isdir(destination) == False:
os.makedirs(destination)
for file in os.listdir(source):
if os.path.isdir(os.path.join(source, file)):
if file == "bin":
continue
recurseDir(projectName, replacements, os.path.join(source, file), os.path.join(destination, file))
else:
copyFile(projectName, replacements, os.path.join(source, file), os.path.join(destination, file))
def copy(projecttype, filepath):
path_chunks = filepath.split(os.sep)
if len(path_chunks) > 1 and filepath.lower().endswith(".csproj") == False:
if path_chunks[len(path_chunks)-1] != path_chunks[len(path_chunks)-2]:
filepath = os.path.join(filepath, path_chunks[len(path_chunks)-1])
if filepath.lower().endswith(".csproj") == False:
filepath = filepath + ".csproj"
directory = os.path.dirname(filepath)
projectName = os.path.splitext(os.path.basename(filepath))[0]
projDir = os.path.join(os.path.dirname(__file__),projecttype)
projectGUID = "{" + str(uuid.uuid1()) + "}"
replacements = { '{PROJECT_NAME}': projectName, '{PROJECT_GUID}': projectGUID }
recurseDir(projectName, replacements, projDir, directory)
| true
|
c04e026702f2f2955356cdf14a25fec28fda90e1
|
Python
|
vichuda/business_card_parser
|
/business_card_filter.py
|
UTF-8
| 6,638
| 3.171875
| 3
|
[] |
no_license
|
######################################################################
# business_card_filter.py
#
# Classes that parse the results of the
# optical character recognition (OCR) component
# in order to extract the name, phone number, and email address
# from the processed business card image
######################################################################
import re, sys
import pandas as pd
from business_card_interface import IContactInfo, IBusinessCardParser
class ContactInfo(IContactInfo):
# regex patterns used for finding the email and phone numbers
email_pattern = re.compile(r'([_a-z0-9-]+(\.[_a-z0-9-]+)*@[a-z0-9-]+(\.[a-z0-9-]+)*(\.[a-z]{2,4}))')
phone_pattern = re.compile(r'((\d{1,3}\D*)?\d{3}\D*\d{3}\D*\d{4})\D*(\d*)') # Last part is for the extension
# Constructor for the ContactInfo class
# Extracts the name, email, and phone information from a given business card
#
# Argument 1: document: a String containing the parsed business card to extract the contact info,
# separated into multiple lines
# Argument 2: firstname_series: pandas Series containing the list of firstnames and their penalties
# Argument 3: nonname_count: pandas Series containing the list of companies and occupations
# and their penalties
# Argument 4: stripCh: a regex string containing the characters that should be removed from the
# beginning and ending of each word for each line
def __init__(self,document,firstname_series,nonname_count,stripCh):
self.name = ''
self.email = ''
self.phone = ''
if not document:
return # business card info must be provided
# Office emails/phone #s are not always given first nor last, so set a variable
# that when one is found, then stop looking for (the respective) emails/phone #s.
office_email = False
office_phone = False
possible_names = [] # List of possible lines that contain the employee's name
for line in document.split('\n'):
line = line.strip()
lowercaseLine = line.lower()
# Ignore fax numbers
if lowercaseLine.startswith("fax"):
continue
# Check if line contains email address
email_match = self.email_pattern.search(line)
if email_match:
if not office_email: # only add email if an office email has not yet been given
self.email = email_match.group(1)
if lowercaseLine.startswith("office") or lowercaseLine.startswith("work") or lowercaseLine.startswith("main"):
office_email = True
continue # Line contains email, so should not contain other useful info
# Check if line contains phone number
phone_match = self.phone_pattern.search(line)
if phone_match:
if not office_phone: # only add phone # if an office phone has not yet been given
self.phone = re.sub(r'[^0-9]','',phone_match.group(1)) # Remove all non-digit characters
if lowercaseLine.startswith("office") or lowercaseLine.startswith("work") or lowercaseLine.startswith("main"):
office_phone = True
continue # Line contains phone #, so should not contain other useful info
# Valid names do not have numbers
if bool(re.search(r'\d', line)):
continue
possible_names.append(line)
minPenalty = sys.maxsize
for possible_name in possible_names:
penalty = 0
for word in possible_name.split():
word = word.strip(stripCh).lower()
if len(word)<=1:
continue
if word in nonname_count.index: # line contains company/occupation name
penalty += nonname_count[word]
if word in firstname_series.index: # line contains first name
penalty += firstname_series[word]
if penalty < minPenalty:
self.name = possible_name
minPenalty = penalty
# Returns: a String of the Employee's Full Name
def getName(self):
return self.name
# Returns: a String of the Employee's phone number
def getPhoneNumber(self):
return self.phone
# Returns: a String of the Employee's email address
def getEmailAddress(self):
return self.email
class BusinessCardParser(IBusinessCardParser):
stripCh = ',.(){}[]'
splitPattern = r' |/'
# Constructor for the BusinessCardParser class
# Prepares the pandas Series of firstnames and the pandas Series of companies/occupations to be used
# with the ContactInfo class
def __init__(self):
#Prepare pandas Series of first names
firstname_df = pd.read_csv('CSV_Database_of_First_Names.csv',header=0)
firstname_df.drop_duplicates(inplace=True)
firstname_df['firstname'] = firstname_df['firstname'].str.lower()
firstname_df['penalty'] = -50
self.firstname_series = pd.Series(firstname_df['penalty'].values,index=firstname_df['firstname'].values)
#Prepare pandas Series of company and occupation information
company_df = pd.read_csv('List_of_US_companies.csv',header=None)
company_df.drop_duplicates(inplace=True)
occupation_df = pd.read_csv('List_of_occupations.csv',header=None)
occupation_df.drop_duplicates(inplace=True)
company_list = company_df[0].values.tolist()
occupation_list = occupation_df[0].values.tolist()
#Combine the company and occupation lists into one list of nonnames
nonname_list = [company_part.strip(self.stripCh).lower() for company in company_list\
for company_part in re.split(self.splitPattern,company)\
if len(company_part.strip(self.stripCh))>1]
nonname_list += [occupation_part.strip(self.stripCh).lower() for occupation in occupation_list\
for occupation_part in re.split(self.splitPattern,occupation)\
if len(occupation_part.strip(self.stripCh))>1]
nonname_series = pd.Series(nonname_list, name='nonname')
self.nonname_count = nonname_series.value_counts()
# Returns: an instance of the ContactInfo class
def getContactInfo(self,document):
return ContactInfo(document,self.firstname_series,self.nonname_count,self.stripCh)
| true
|
25e124489b378f87d1a903a1c8f4edad7b001c3f
|
Python
|
TheNeuralBit/aoc2017
|
/10/sol1.py
|
UTF-8
| 792
| 3.390625
| 3
|
[] |
no_license
|
def tie_knots(l, lengths):
idx = 0
skip = 0
for length in lengths:
knot(l, length, idx)
idx = (idx + length + skip) % len(l)
skip += 1
return l
def knot(l, length, idx):
halflen = int(length/2)
lindices = (i % len(l) for i in range(idx, idx+halflen))
rindices = (i % len(l) for i in range(idx+length - 1, idx+length-1-halflen, -1))
for left, right in zip(lindices, rindices):
tmp = l[right]
l[right] = l[left]
l[left] = tmp
return l
assert knot([0, 1, 2, 3, 4], 3, 1) == [0, 3, 2, 1, 4]
assert knot([0, 1, 2, 3, 4], 4, 3) == [4, 3, 2, 1, 0]
with open('input', 'r') as fp:
lengths = map(int, fp.readline().strip().split(','))
l = list(range(256))
tie_knots(l, lengths)
print(l[0]*l[1])
| true
|
9eb9e9ac04ed72824a1c8cc6c9832ebc894dcc3d
|
Python
|
dimmxx/codecademy
|
/AdvancedTopicsInPython_ListSlicing.py
|
UTF-8
| 454
| 3.90625
| 4
|
[] |
no_license
|
l = [i ** 2 for i in range(1, 11)]
# Should be [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
print l
print l[2:9:2]
to_five = ['A', 'B', 'C', 'D', 'E']
print to_five[3:]
# prints ['D', 'E']
print to_five[:2]
# prints ['A', 'B']
print to_five[::2]
# print ['A', 'C', 'E']
my_list = range(1, 11)
print my_list[::2]
backwards = my_list
print backwards[::-1]
to_one_hundred = range(101)
backwards_by_tens = to_one_hundred[::-10]
print backwards_by_tens
| true
|
49c4d6f0b325ce41cb57704cc05a057b68dd72b9
|
Python
|
A01172971/Tarea_Python
|
/dates.py
|
UTF-8
| 1,910
| 3.828125
| 4
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime as dt
import re
def make_a_date(from_date="", until_hour=""):
day_names = ["Lun", "Mar", "Mie", "Jue", "Vie", "Sab", "Dom"]
today = dt.datetime.now()
y = today.year
m = today.month
d = today.day
w = today.weekday()
if from_date != "":
# separa texto en base a un texto
# "\s" significa cualquier espacio, tabulador ....
# obtiene anio, mes y dia en formato string
ys, ms, ds = re.split("-", from_date)
# convierte string a int
y = int(ys)
m = int(ms)
d = int(ds)
# construye la fecha y dia de la semana
fecha = dt.datetime(y, m, d)
w = fecha.weekday()
offset = 1
if day_names[w] in ["Vie", "Sab"]:
if day_names[w] == "Vie":
offset = 3
else:
offset = 2
try:
new_date = dt.datetime(y, m, d + offset)
except:
if m + 1 <= 12:
x=1
while True:
new_date = dt.datetime(y, m + 1, x)
if not new_date.weekday() in [0,1,2,3,4]:
x=x+1
continue
else:
break
else:
x=1
while True:
new_date = dt.datetime(y+1, 1, x)
if not new_date.weekday() in [0,1,2,3,4]:
x=x+1
continue
else:
break
ny = new_date.year
nm = new_date.month
nd = new_date.day
nw = new_date.weekday()
text = new_date.strftime("%Y-%m-%d")
wday = day_names[nw]
text_return = f"{text} ({wday})"
return text_return
if __name__ == '__main__':
dates = ['', '2020-07-09', '2020-07-10', '2020-07-11', '2020-07-12']
for d in dates:
c = make_a_date(d)
print("From:", d, "To:", c)
| true
|
8e743ee151161a49a47887ed945d434ddbc9e8ea
|
Python
|
duynguyenhoang/manga-crawler
|
/Scrapers/TruyenTranhTuan.py
|
UTF-8
| 8,009
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
# /usr/bin/python3.5
from __main__ import print_info
from bs4 import BeautifulSoup
from Scrapers.Crawler import Crawler
import gzip
import io
import logging
import re
import urllib.request, urllib.error, urllib.parse
from functools import cmp_to_key
class TruyenTranhTuan(Crawler):
site_name = 'TruyenTranhTuan'
uses_groups = False
def __init__(self, url):
self.url = url
super(TruyenTranhTuan, self).__init__(url)
match_chapter = re.match(r'(.+)truyentranhtuan\.com\/(.+)-chuong-(\d+)', url, flags=re.IGNORECASE)
if match_chapter:
self.chapter_number = match_chapter.group(3)
self.page = BeautifulSoup(self.open_url(self.chapter_series(url)), "html.parser")
self.init_with_chapter = True
logging.debug('Object initialized with chapter')
else:
self.page = BeautifulSoup(self.open_url(url), "html.parser")
self.init_with_chapter = False
self.chapter_number = 0
logging.debug('Object initialized with series')
logging.debug('Object created with ' + url)
def chapter_series(self, url):
"""Returns the series page for an individual chapter URL.
Useful for scraping series metadata for an individual chapter"""
return url
# Returns a dictionary containing chapter number, chapter name and chapter URL.
def chapter_info(self, chapter_data):
logging.debug('Fetching chapter info')
chapter_url = str(chapter_data['href'])
chapter_number = re.search(r'(\w+)-chuong-(\w+)', chapter_url, flags=re.IGNORECASE).group(2)
logging.debug('Manga Name: {}'.format(self.series_info('title')))
logging.debug('Chapter number: {}'.format(chapter_number))
logging.debug('Chapter name: ' + str(chapter_data.text))
logging.debug('Chapter URL: ' + chapter_url)
return {"chapter": chapter_number, "name": chapter_data.text, "url": chapter_url}
# Returns the image URL for the page.
def chapter_images(self, chapter_url):
logging.debug('Fetching chapter images')
image_list = []
page = BeautifulSoup(self.open_url(chapter_url.encode('ascii', 'ignore').decode('utf-8')), "html.parser")
scripts = page.find("div", {"id": "containerRoot"}).find_all('script')
for script in scripts:
if re.search(r'lstImages', script.text):
for match in re.findall(r'lstImages\.push\(".*"\);', script.text):
image_list.append(re.search(r'lstImages\.push\("(.*)"\);', match).group(1))
break
logging.debug('Chapter images: ' + str(image_list))
return image_list
def download_chapter(self, chapter, download_directory, download_name):
files = []
warnings = []
logging.debug('\n************************************************')
logging.debug('Downloading chapter {}.'.format(chapter["url"]))
page = BeautifulSoup(self.open_url(chapter["url"].encode('ascii', 'ignore').decode('utf-8')), "html.parser")
scripts = page.find_all('script')
# TODO
chapter_name = chapter["url"].strip('/').split('/')
chapter_name = chapter_name[len(chapter_name) - 1]
image_name = 1
for script in scripts:
if re.search(r'(var slides_page_path = \[")(.+)("\];)', script.text):
image_url = re.search(r'(var slides_page_path = \[")(.+)("\];)', script.text).group(2)
need_short = 1
elif re.search(r'(var slides_page_url_path = \[")(.+)("\];)', script.text):
image_url = re.search(r'(var slides_page_url_path = \[")(.+)("\];)', script.text).group(2)
need_short = 0
else:
continue
image_urls = image_url.split('","')
if need_short == 1:
image_urls = sorted(image_urls, key=cmp_to_key(cmp_items))
for image_url in image_urls:
if image_url == '':
continue
file_extension = re.search(r'.*\.([A-Za-z]*)', image_url).group(1)
logging.debug('Downloading image ' + image_url)
req = urllib.request.Request(image_url, headers={
'User-agent': self.default_user_agent(),
'Accept-encoding': 'gzip'})
try:
response = urllib.request.urlopen(req)
except urllib.error.HTTPError as e:
print_info('WARNING: Unable to download file ({}).'.format(str(e)))
warnings.append(
'Download of page {}, chapter {:g}, series "{}" failed.'.format(image_name, chapter["chapter"],
self.series_info('title')))
continue
filename = '{}/{}-{:06d}.{}'.format(download_directory, chapter_name, image_name, file_extension)
f = open(filename, 'wb')
f.write(response.read())
f.close()
logging.debug('Saved image ' + filename)
files.append(filename)
image_name += 1
break
filename = download_directory + '/' + download_name
self.zip_files(files, filename)
logging.debug('Finished {} Chapter'.format(chapter_name))
return warnings
# Function designed to create a request object with correct headers, open the URL and decompress it if it's gzipped.
def open_url(self, url):
logging.debug("Opening URL: " + url)
headers = {
'User-agent': self.default_user_agent(),
'Accept-encoding': 'gzip', 'Cookie': 'vns_Adult=yes'}
req = urllib.request.Request(url, headers=headers)
response = urllib.request.urlopen(req)
if response.info().get('Content-Encoding') == 'gzip':
buf = io.BytesIO(response.read())
data = gzip.GzipFile(fileobj=buf, mode="rb")
return data
else:
return response.read()
def series_chapters(self):
chapters = []
if self.init_with_chapter:
logging.debug('Fetching single chapters')
chapters.append(
{"chapter": self.chapter_number, "name": "Chapter " + str(self.chapter_number), "url": self.url}
)
else:
# If the object was initialized with a chapter, only return the chapters.
logging.debug('Fetching series chapters')
chapter_row = self.page.find("div", {"id": "manga-chapter"}).find_all("span", {"class": "chapter-name"})
for chapter in chapter_row:
chapters.append(self.chapter_info(chapter.find("a")))
return chapters[::-1]
def series_info(self, search):
def title():
if not self.init_with_chapter:
return self.page.find("h1", {"itemprop": "name"}).text.strip()
else:
return self.page.find("a", {"class": "mangaName"}).text.strip()
def description():
if not self.init_with_chapter:
return self.page.find("div", {"id": "manga-summary"}).find("p").text.strip('\n')
else:
# @todo Get for specific chapter
return ""
def author():
return self.page.select('a[href*="/danh-sach-truyen/"]')[0].text.title()
options = {"title": title, "description": description, "author": author}
return options[search]()
def cmp_items(a, b):
image_index_a = re.search(r'.*-([0-9]*)\.([A-Za-z]*)', a).group(1)
image_index_a = int(image_index_a)
image_index_b = re.search(r'.*-([0-9]*)\.([A-Za-z]*)', b).group(1)
image_index_b = int(image_index_b)
if image_index_a > image_index_b:
return 1
elif image_index_a == image_index_b:
return 0
else:
return -1
| true
|
908e083615b9bc053c2edec28af8c732c353e588
|
Python
|
kyleperales/exam-assembler
|
/Parser/Practice Notes/basics.py
|
UTF-8
| 740
| 4.53125
| 5
|
[] |
no_license
|
#<- start with this for comments
#variables
#greet = "Hello"
#print(greet)
#computation
#print (10 * 10)
#print(1 + 1)
#inputs
#print("Please provide a name")
#name = input()
#print("Please provide your age")
#age = int(input())
#print("Welcome {0}, aged {1}".format(name, age))
# for loops
#for i in range(0, 20):
# print("i is now {0}".format(i))
#arrays
#arr = [1,2,3,4,5,6]
#print(arr[-1])
#arr = [1, "hello", 2]
#print(arr[1])
#if conditions
import random
number = random.randint(1,10)
tries = 1
print("I'm thinking of a number between 1 and 10")
guess = int(input("Have a guess: "))
if guess > number:
print("Guess lower...")
if guess < number:
print("Guess higher...")
| true
|
517a6943216ec9c36f064e3d3b2fef75eed4e6f7
|
Python
|
cob0013/Kattis-Problems
|
/Python/Zamka.py
|
UTF-8
| 407
| 3.359375
| 3
|
[] |
no_license
|
def sum(n):
sum = 0
for digit in n:
sum += int(digit)
return sum
def main():
l = int(input())
d = int(input())
x = int(input())
output = []
for i in range(l, d + 1):
if sum(str(i)) == x:
output.append(str(i))
break
for i in range(d, l - 1, -1):
if sum(str(i)) == x:
output.append(str(i))
break
print("\n".join((output)))
if __name__ == "__main__":
main()
| true
|
7b74b79028d3d5d0786bfd86485641e0e67b0865
|
Python
|
omerfarukfidan/VSCode-Python-Basics
|
/01_python_giris/0117_degiskenler.py
|
UTF-8
| 141
| 3.234375
| 3
|
[] |
no_license
|
sehir = "İstabul"
sehir_2 = "Danimarka"
mesafe = "2600"
birim = "km"
print(sehir + " ile " + sehir_2 + " arası " + mesafe + " " + birim)
| true
|
9f062c673fc392307cad4c120be6847acb1a4d7d
|
Python
|
liming2013/python-resourses
|
/Web_Spider/urlopen.py
|
UTF-8
| 153
| 2.515625
| 3
|
[] |
no_license
|
#! -*- encoding:utf-8 -*-
import urllib
import urllib2
response = urllib2.urlopen('http://www.baidu.com/')
html = response.read()
print html
| true
|
5961340dc5681e6ecbed5b73bf0e83c79a0144a0
|
Python
|
FrankYLai/nlp_predictor
|
/character_model/interface.py
|
UTF-8
| 768
| 2.75
| 3
|
[] |
no_license
|
from includes import *
import preprocessing
def main():
infile = open("model",'rb')
model = pickle.load(infile)
infile.close
while True:
print("enter input text: ")
intext = input()
print(intext)
for i in range(30):
#encode
encoded = [preprocessing.MAP[char] for char in intext]
encoded = pad_sequences([encoded], maxlen=29, truncating='pre')
prediction = model.predict_classes(encoded, verbose=0)
output = ''
for char, index in preprocessing.MAP.items():
if index == prediction:
output = char
break
intext+=output
print (intext)
if __name__ == "__main__":
main()
| true
|
28bf0d90eded6bf727d0ddec4f84aded70364720
|
Python
|
MinasA1/AirBnB_clone_v3
|
/api/v1/views/places.py
|
UTF-8
| 1,970
| 2.828125
| 3
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/usr/bin/python3
"""api.v1.views.place"""
from api.v1.views import app_views
from flask import jsonify, request, abort
from models import storage
from models.place import Place
@app_views.route('/places/<place_id>', methods=['GET', 'DELETE', 'PUT'])
@app_views.route('/places', methods=['GET', 'POST'],
defaults={'place_id': None})
def get_places(place_id):
"""method for retrieving, deleting, creating, updating place objects"""
if place_id:
place = storage.get(Place, place_id) # retrieves obj
if place is None:
return jsonify({'error': 'Not found'}), 404
if request.method == 'DELETE':
storage.delete(place) # deletes
storage.save()
return jsonify({}), 200
elif request.method == 'PUT':
js = request.get_json()
if js is None:
return jsonify({'error': 'Not a JSON'}), 400
js.pop('id', None)
js.pop('user_id', None)
js.pop('city_id', None)
js.pop('created_at', None)
js.pop('updated_at', None)
for key, value in js.items():
setattr(place, key, value) # updates
place.save()
return jsonify(place.to_dict()), 200
else:
return jsonify(place.to_dict()), 200
if request.method == 'POST':
js = request.get_json()
if js is None:
return jsonify({'error': 'Not a JSON'}), 400
if js.get('user_id', None) is None:
return jsonify({'error': 'Missing user_id'}), 400
if js.get('name', None) is None:
return jsonify({'error': 'Missing name'}), 400
obj = Place(**js) # creates
obj.save()
return jsonify(obj.to_dict()), 201
places = []
places_obj = storage.all('Place') # retrieves list obj
for obj in places_obj:
places.append(places_obj[obj].to_dict())
return jsonify(places)
| true
|
8cd52f5e55570d3fcaa859a1b94385cc92e6ad30
|
Python
|
SquareKnight/onb-math
|
/solutions/euler040.py
|
UTF-8
| 857
| 4.15625
| 4
|
[] |
no_license
|
"""Champernowne's constant
limit;last significant digit;int;1000000
#Champernowne
An irrational decimal fraction is created by concatenating the positive integers:
0.123456789101112131415161718192021...
It can be seen that the 12th digit of the fractional part is 1.
If dn represents the nth digit of the fractional part, find the value of the following expression.
d1 × d10 × d100 × d1000 × d10000 × d100000 × d1000000
"""
def attempt_1(limit):
digs = [limit]
while limit != 1:
limit /= 10
digs.append(int(limit))
champ, i = "0", 1
while len(champ) <= max(digs):
champ += str(i)
i += 1
p = 1
for d in digs:
p *= int(champ[d])
return p
def run(limit):
return attempt_1(limit)
if __name__ == '__main__':
print(run(1000000))
| true
|
cd3444f89950cb7c62be01472638bd960ac4482b
|
Python
|
dirrgang/esp32-aqs
|
/main.py
|
UTF-8
| 3,946
| 2.890625
| 3
|
[] |
no_license
|
import time
import adafruit_sgp30 # Library for the usage of the SGP30 VOC sensor
import dht # Library for the usage of the DHT22 temperature/humidity sensor
import json
import os
from machine import Pin, I2C
import logging
import logging.handlers as handlers
class BaselineValues:
def __init__(self, sensor):
self.values = {}
self.values["co2eq_base"] = 0
self.values["tvoc_base"] = 0
self.sensor = sensor
def restore(self):
# Initiialize SGP30 sensor and restore baseline variables
logging.info(
"Initiating SGP30 sensor and attempting to restore baseline values.")
self.sensor.iaq_init()
with open("data_file.json") as infile:
self.values = json.load(infile)
self.sensor.set_iaq_baseline(
self.values["co2eq_base"], self.values["tvoc_base"])
logging.info("Restored baseline values co2eq %d and tvoc %d",
self.values["co2eq_base"], self.values["tvoc_base"])
def store(self):
self.values["co2eq_base"] = self.sensor.baseline_co2eq
self.values["tvoc_base"] = self.sensor.baseline_tvoc
with open("data_file.json", "w") as outfile:
json.dump(self.values, outfile)
logging.info("Stored baselines to flash.")
def measure(time_delay):
def checkAirRequirement(log, co2eq_warning_level):
if log["co2eq"] >= co2eq_warning_level and led_onboard.value() != 0:
logging.info("CO2eq level exceeded %d ppm!", co2eq_warning_level)
led_onboard.value(0)
elif led_onboard.value() != 1:
logging.info("CO2eq back below %d ppm.", co2eq_warning_level)
led_onboard.value(1)
# Before the DHT22 values can be used measure() needs to be called
# seperately - limited to once every 2 seconds!
dht.measure()
# Call values of the sensors and write them into a dictionary to allow
# storage as JSON later. SGP30 does not need a separate measure call
log = {"temperature": dht.temperature(), "humidity": dht.humidity(),
"co2eq": sgp30.co2eq, "tvoc": sgp30.tvoc}
# Append dictionary log as JSON
with open("history.json", "a+") as outfile:
json.dump(log, outfile)
# If CO2eq levels exceed 1500 ppm, turn on LED to signal it's time to
# crack open a window
checkAirRequirement(log, co2eq_warning_level)
# Time between measurements in seconds
time_delay = 2
# Time between saving baseline values to flash in seconds - recommended 1h
baseline_save_delay = 3600
# co2eq level in ppm after which the warning is triggered
co2eq_warning_level = 1500
# Set up logging
logger = logging.getLogger('aqs')
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
with open("messages.log", "a+") as outfile:
outfile.write("System Start.")
logHandler = handlers.RotatingFileHandler(
'messages.log', maxBytes=256, backupCount=2)
logHandler.setFormatter(formatter)
logger.addHandler(logHandler)
# construct an I2C bus
i2c = I2C(scl=Pin(22), sda=Pin(21), freq=100000)
# Create SGP30 sensor object and restore baseline values from flash
sgp30 = adafruit_sgp30.Adafruit_SGP30(i2c)
baseline = BaselineValues(sgp30)
baseline.restore()
# Create temperature sensor object
dht = dht.DHT22(Pin(0))
# Create LED object for on-board LED
led_onboard = Pin(5, Pin.OUT)
led_onboard.value(1)
elapsed_sec = 0
while True:
measure(time_delay)
if elapsed_sec % baseline_save_delay == 0 and elapsed_sec != 0:
# According to the sensor documentation of the SGP30 it automatically
# calibrates itself depending on the environment. These values should
# be stored once every hour and restored after boot
elapsed_sec = 0
baseline.store()
# Wait - minimum 2 seconds before DHT22 sensor can be measured again
time.sleep(time_delay)
elapsed_sec += time_delay
| true
|
2177f9445f9dbf2f4b3cac7f745e7faf9065241c
|
Python
|
aandyberg/school-projects
|
/Chalmers python/lab2/gamegraphics.py
|
UTF-8
| 8,605
| 3.5
| 4
|
[] |
no_license
|
#------------------------------------------------------
#This module contains all graphics-classes for the game
#Most classes are wrappers around model classes, e.g.
# * GraphicGame is a wrappoer around Game
# * GraphicPlayer is a wrapper around Player
# * GraphicProjectile is a wrapper around Projectile
#In addition there are two UI-classes that have no
#counterparts in the model:
# * Button
# * InputDialog
#------------------------------------------------------
# This is the only place where graphics should be imported!
from graphics import *
# TODO: There needs to be a class called GraphicGame here.
# Its constructor should take only a Game object.
# TODO: In addition to the methods of Game, GraphicGame needs to have a getWindow method that returns the main GraphWin object the game is played in
# HINT: Look at the other classes in this file, the GraphicGame class should "wrap around" a Game object the same way GraphicPlayer wraps around a Player
# HINT: These lines are good for creating a window:
# win = GraphWin("Cannon game" , 640, 480, autoflush=False)
# win.setCoords(-110, -10, 110, 155)
# HINT: Don't forget to call draw() on every component you create, otherwise they will not be visible
# HINT: You need to get the Players from the Game object (the model), wrap them into GraphicPlayers and store them, and all get-methods for players (e.g. getCurrentPlayer) must return the Graphical versions
class GraphicGame:
def __init__(self, game):
win = GraphWin("Cannon game" , 640, 480, autoflush=False)
win.setCoords(-110, -10, 110, 155)
Line(Point(-110,0), Point(110,0)).draw(win)
self.win = win
self.model = game
self.model.players[0] = GraphicPlayer(self.model.players[0], self, self.win)
self.model.players[1] = GraphicPlayer(self.model.players[1], self, self.win)
def getPlayers(self):
return self.model.getPlayers()
def getCurrentPlayer(self):
return self.model.getCurrentPlayer()
def getCurrentPlayerNumber(self):
return self.model.getCurrentPlayerNumber()
def getOtherPlayer(self):
return self.model.getOtherPlayer()
def nextPlayer(self):
return self.model.nextPlayer()
def getCurrentWind(self):
return self.model.getCurrentWind()
def setCurrentWind(self, wind):
self.model.setCurrentWind(wind)
def newRound(self):
self.model.newRound()
def getCannonSize(self):
return self.model.getCannonSize()
def getBallSize(self):
return self.model.getBallSize()
def getWindow(self):
return self.win
class GraphicPlayer:
# TODO: We need a constructor here! The constructor needs to take a Player object as parameter and store it in self.player for the methods below to work.
# HINT: The constructor should create and draw the graphical elements of the player (score and cannon)
# HINT: The constructor probably needs a few additional parameters e.g. to access the game window.
def __init__(self, player, ggame, win):
self.player = player
self.ggame = ggame
self.gProjectile = None
self.win = win
self.rectangle = Rectangle(Point(self.getX() - (self.ggame.getCannonSize()/2), 0), Point(self.getX() + (self.ggame.getCannonSize()/2), 10))
self.rectangle.setFill(self.getColor())
self.rectangle.draw(win)
self.scoreText = Text(Point(self.getX(), -5), "Score: " + str(self.getScore()))
self.scoreText.draw(win)
def fire(self, angle, vel):
# Fire the cannon of the underlying player object
proj = self.player.fire(angle, vel)
proj = GraphicProjectile(proj, self.getColor(), self.ggame, self.win)
#TODO: We need to undraw the old GraphicProjectile for this player (if there is one).
if self.gProjectile != None:
self.gProjectile.undraw()
self.gProjectile = proj
return proj
def getAim(self):
return self.player.getAim()
def getColor(self):
return self.player.getColor()
def getX(self):
return self.player.getX()
def getScore(self):
return self.player.getScore()
def projectileDistance(self, proj):
return self.player.projectileDistance(proj)
def increaseScore(self):
self.player.increaseScore()
self.scoreText.setText("Score: " + str(self.getScore()))
""" A graphic wrapper around the Projectile class (adapted from ShotTracker in book)"""
class GraphicProjectile:
# TODO: This one also needs a constructor, and it should take a Projectile object as parameter and store it in self.proj.
# Hint: We are also going to need access to the game window
# Hint: There is no color attribute in the Projectile class, either it needs to be passed to the constructor here or Projectile needs to be modified.
def __init__(self, proj, color, ggame, win):
self.proj = proj
self.ggame = ggame
self.ball = Circle(Point(proj.getX(), proj.getY()), self.ggame.getBallSize())
self.ball.setFill(color)
self.ball.draw(win)
def update(self, dt):
# update the projectile
self.proj.update(dt)
center = self.ball.getCenter()
dx = self.proj.getX() - center.getX()
dy = self.proj.getY() - center.getY()
self.ball.move(dx,dy)
def getX(self):
return self.proj.getX()
def getY(self):
return self.proj.getY()
def isMoving(self):
return self.proj.isMoving()
def undraw(self):
self.ball.undraw()
""" A somewhat specific input dialog class (adapted from the book) """
class InputDialog:
""" Takes the initial angle and velocity values, and the current wind value """
def __init__ (self, angle, vel, wind):
self.win = win = GraphWin("Fire", 200, 300)
win.setCoords(0,4.5,4,.5)
Text(Point(1,1), "Angle").draw(win)
self.angle = Entry(Point(3,1), 5).draw(win)
self.angle.setText(str(angle))
Text(Point(1,2), "Velocity").draw(win)
self.vel = Entry(Point(3,2), 5).draw(win)
self.vel.setText(str(vel))
Text(Point(1,3), "Wind").draw(win)
self.height = Text(Point(3,3), 5).draw(win)
self.height.setText("{0:.2f}".format(wind))
self.fire = Button(win, Point(1,4), 1.25, .5, "Fire!")
self.fire.activate()
self.quit = Button(win, Point(3,4), 1.25, .5, "Quit")
self.quit.activate()
""" Runs a loop until the user presses either the quit or fire button """
def interact(self):
while True:
pt = self.win.getMouse()
if self.quit.clicked(pt):
return "Quit"
if self.fire.clicked(pt):
return "Fire!"
""" Returns the current values of (angle, velocity) as entered by the user"""
def getValues(self):
a = float(self.angle.getText())
v = float(self.vel.getText())
return a,v
def close(self):
self.win.close()
""" A general button class (from the book) """
class Button:
"""A button is a labeled rectangle in a window.
It is activated or deactivated with the activate()
and deactivate() methods. The clicked(p) method
returns true if the button is active and p is inside it."""
def __init__(self, win, center, width, height, label):
""" Creates a rectangular button, eg:
qb = Button(myWin, Point(30,25), 20, 10, 'Quit') """
w,h = width/2.0, height/2.0
x,y = center.getX(), center.getY()
self.xmax, self.xmin = x+w, x-w
self.ymax, self.ymin = y+h, y-h
p1 = Point(self.xmin, self.ymin)
p2 = Point(self.xmax, self.ymax)
self.rect = Rectangle(p1,p2)
self.rect.setFill('lightgray')
self.rect.draw(win)
self.label = Text(center, label)
self.label.draw(win)
self.deactivate()
def clicked(self, p):
"RETURNS true if button active and p is inside"
return self.active and \
self.xmin <= p.getX() <= self.xmax and \
self.ymin <= p.getY() <= self.ymax
def getLabel(self):
"RETURNS the label string of this button."
return self.label.getText()
def activate(self):
"Sets this button to 'active'."
self.label.setFill('black')
self.rect.setWidth(2)
self.active = 1
def deactivate(self):
"Sets this button to 'inactive'."
self.label.setFill('darkgrey')
self.rect.setWidth(1)
self.active = 0
| true
|
6e3d4abef7d1c6bc20c6774d306892cb6525ebe2
|
Python
|
JuanRaveC/AgentSearch
|
/main.py
|
UTF-8
| 2,632
| 2.84375
| 3
|
[] |
no_license
|
from crawler import Crawler
from utils import *
from index_agent import IndexAgent
from process_agent import ProcessAgent
from join_agent import JoinAgent
from queue import Queue
import threading
import tkinter as tk # python 3
import pygubu
FOLDER_NAME = 'HTML'
#creación de colas
process_agent_queue = Queue()
index_agent_queue = Queue()
join_agent_queue = Queue()
class Application:
def __init__(self, master):
# 1: crear un builder
self.builder = builder = pygubu.Builder()
# 2: cargar archiv ui
builder.add_from_file('mainframe.ui')
# 3: Crear widgets
self.mainwindow = builder.get_object('Frame_1', master)
#continuar con el hilo de ejecución de la UI
builder.connect_callbacks(self)
#obtener informacion del campo de texto
def retrieve_input(self):
text_input_object = self.builder.get_variable('search_input')
keyword_input = text_input_object.get()
#envia a la cola
join_agent_queue.put(keyword_input)
text_input_object.set(' ')
#evento del boton buscar
def search_on_click(self):
Application.retrieve_input(self)
if __name__ == '__main__':
print('Creando directorio: ' + FOLDER_NAME)
create_data_dir(FOLDER_NAME)
flag = True
#singleton de inicialización
if flag:
#Agente integrador
join_agent_instance = JoinAgent(join_agent_queue, index_agent_queue, process_agent_queue, FOLDER_NAME)
join_agent_thread = threading.Thread(target=join_agent_instance.work)
join_agent_thread.daemon = True
join_agent_thread.start()
# agente indexador
index_agent_instance = IndexAgent(index_agent_queue, join_agent_queue, process_agent_queue)
index_agent_thread = threading.Thread(target=index_agent_instance.work)
index_agent_thread.daemon = True
index_agent_thread.start()
#agente procesador
process_agent_instance = ProcessAgent(process_agent_queue, index_agent_queue, join_agent_queue, FOLDER_NAME)
process_agent_thread = threading.Thread(target=process_agent_instance.work)
process_agent_thread.daemon = True
process_agent_thread.start()
# Creando crawler principal
crawler = threading.Thread(target=Crawler.work)
crawler.daemon = True
crawler.start()
#no volver a inicializar mas
flag = False
# iniciar la pantalla principal
root = tk.Tk()
app = Application(root)
root.mainloop()
| true
|
60e752c8a45c5d5d82ed74db7595ee76253d4efd
|
Python
|
Environmental-Informatics/06-graphing-data-with-python-aggarw82
|
/number_visualize.py
|
UTF-8
| 2,016
| 3.484375
| 3
|
[] |
no_license
|
""" Program to read data from file, process it
and draw graphs using matplotlib
The program should read the input and output
file names from system arguements
Author: Varun Aggarwal
Username: aggarw82
Github: https://github.com/Environmental-Informatics/06-graphing-data-with-python-aggarw82
"""
# import library
import numpy as np
import matplotlib.pyplot as plt
import sys
# checking for valid arguments
if len(sys.argv) != 3:
print("Incorrect Syntax")
print("Usage: python number_visualize.py [Source file] [Destination File] ... ")
sys.exit()
# reading arguements
inFileName = sys.argv[1]
outFilename = sys.argv[2]
# read file
data = np.genfromtxt(inFileName,
dtype=['int','float','float','float','float','float','float'],
names=True,
delimiter='\t',
autostrip=True)
# reserving space with subplot
fig = plt.figure(figsize=(20,20)) # set figure size
ax1 = fig.add_subplot(311) # subplot 1
ax2 = fig.add_subplot(312) # subplot 2
ax3 = fig.add_subplot(313) # subplot 3
# Plot 1 - specify color with argument in ax1.plot
ax1.plot(data['Year'],data['Mean'],'k')
ax1.plot(data['Year'],data['Max'],'r')
ax1.plot(data['Year'],data['Min'],'b')
ax1.legend(['Mean','Max','Min']) # legend
ax1.set_xlabel('Year')
ax1.set_ylabel('Streamflow (cfs)')
ax1.set_xticks(data['Year'][np.linspace(0, len(data['Year']) - 1, 12, dtype='int')]) # xticks for year
# Plot 2 - specify symbol with arguement in ax2.plot
ax2.plot(data['Year'],data['Tqmean']*100,'o') # symbol is a circle
ax2.set_xlabel('Year')
ax2.set_ylabel('Tqmean (%)')
ax2.set_xticks(data['Year'][np.linspace(0, len(data['Year']) - 1, 12, dtype='int')]) # xticks for year
# Plot 3
ax3.bar(data['Year'],data['RBindex'])
ax3.set_xlabel('Year')
ax3.set_ylabel('R-B Index (ratio)')
ax3.set_xticks(data['Year'][np.linspace(0, len(data['Year']) - 1, 15, dtype='int')]) # xticks for year
# saving figure and pdf
plt.savefig(outFilename)
| true
|
a046ce17650a52b26501a7449876343d431ce393
|
Python
|
scout719/adventOfCode
|
/2018/adventOfCode.py
|
UTF-8
| 85,633
| 2.71875
| 3
|
[] |
no_license
|
# pylint: disable=unused-import
# pylint: disable=import-error
# pylint: disable=wrong-import-position
import functools
import math
import multiprocessing as mp
import os
import re
import string
import sys
import time
from collections import Counter, deque
import heapq
from enum import IntEnum
from struct import pack
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
print(FILE_DIR)
sys.path.insert(0, FILE_DIR + "/../")
sys.path.insert(0, FILE_DIR + "/../../")
from common.utils import execute_day, read_input, main # NOQA: E402
# pylint: enable=unused-import
# pylint: enable=import-error
# pylint: enable=wrong-import-position
""" DAY 1 """
def day1_split_change(change):
sign = change[0]
value = int(change[1:])
if sign == "+":
return value
else:
return -value
def day1_1(data):
frequency = 0
for change in data:
frequency += day1_split_change(change)
return frequency
def day1_2(data):
parsed_data = [day1_split_change(c) for c in data]
frequency = 0
found_frequencies = {"0": True}
i = 0
while True:
frequency += parsed_data[i]
key = str(frequency)
if key in found_frequencies:
return frequency
else:
found_frequencies[key] = True
i += 1
i = i % len(data)
""" DAY 2 """
def day2_sort_letters(box_id):
letters = [l for l in box_id]
return sorted(letters)
def day2_count_twice_thrice(box_id):
sorted_letters = day2_sort_letters(box_id)
counter = 0
letter = ""
has_twice = ""
has_thrice = ""
for curr_letter in sorted_letters:
if letter == curr_letter:
counter += 1
else:
if counter == 3:
has_thrice = letter
elif counter == 2:
has_twice = letter
letter = curr_letter
counter = 1
if counter == 3 and has_thrice == "":
has_thrice = letter
if counter == 2 and has_twice == "":
has_twice = letter
return (has_twice, has_thrice)
def day2_1(data):
twice_counter = 0
thrice_counter = 0
for box_id in data:
(twice, thrice) = day2_count_twice_thrice(box_id)
if twice != "":
twice_counter += 1
if thrice != "":
thrice_counter += 1
return twice_counter * thrice_counter
def day2_letter_difference(box1, box2):
if len(box1) != len(box2):
raise ValueError(
"Boxes with different lengths: {0} & {1}".format(box1, box2))
counter_diff = 0
common_letters = ""
for i, _ in enumerate(box1):
if box1[i] != box2[i]:
counter_diff += 1
else:
common_letters += box1[i]
return (counter_diff, common_letters)
def day2_2(data):
for box_id1 in data:
for box_id2 in data:
(diff, letters) = day2_letter_difference(box_id1, box_id2)
if diff == 1:
return letters
raise ValueError
""" DAY 3 """
def day3_build_fabric(size):
return [[0 for y in range(size)] for x in range(size)]
def day3_process_claim(line):
# #1 @ 1,3: 4x4
return tuple([int(x) for x in re.findall(r"#(\d+) @ (\d+),(\d+): (\d+)x(\d+)", line)[0]])
def day3_fill_claim(claim, fabric):
(_, left, top, width, height) = claim
for i in range(width):
for j in range(height):
fabric[top + j][left + i] += 1
def day3_process_position(acc, pos):
if pos > 1:
return acc + 1
return acc
def day3_process_row(acc, row):
return functools.reduce(day3_process_position, row, acc)
def day3_1(data):
size = 1000
#data, size = (read_input(2018, 301), 8)
fabric = day3_build_fabric(size)
for claim in data:
day3_fill_claim(day3_process_claim(claim), fabric)
return functools.reduce(day3_process_row, fabric, 0)
def day3_check_prestine(claim, fabric):
(id_, left, top, width, height) = claim
for i in range(width):
for j in range(height):
if fabric[top + j][left + i] > 1:
return ""
return id_
def day3_2(data):
size = 1000
#data, size = (read_input(2018, 301), 8)
fabric = day3_build_fabric(size)
for claim in data:
day3_fill_claim(day3_process_claim(claim), fabric)
for claim in data:
res = day3_check_prestine(day3_process_claim(claim), fabric)
if res != "":
return res
raise ValueError
""" DAY 4 """
def day4_process_log(log):
shift = re.findall(r"Guard #(\d+) begins shift", log)
if shift:
return ("shift", int(shift[0]))
wake = re.findall(r"(wakes up)", log)
if wake:
return ("wake", 0)
asleep = re.findall(r"(falls asleep)", log)
if asleep:
return ("asleep", 0)
raise ValueError
def day4_parse_and_sort(data):
# [1518-07-18 23:57] Guard #157 begins shift
# [1518-04-18 00:44] wakes up
# [1518-10-26 00:20] falls asleep
parsed_data = [re.findall(
r"\[(\d+)-(\d+)-(\d+) (\d+):(\d+)\] (.+)", line)[0] for line in data]
parsed_data.sort(key=lambda elem: str(
elem[1]) + str(elem[2] + str(elem[3]) + str(elem[4])))
parsed_data = [
tuple([
int(log[0]),
int(log[1]),
int(log[2]),
int(log[3]),
int(log[4]),
day4_process_log(log[5])])
for log in parsed_data]
return parsed_data
def day4_process(data):
ordered_log = day4_parse_and_sort(data)
history = {}
last_guard = -1
last_asleep = -1
sleeping = False
for (_, _, _, _, minute, log) in ordered_log:
if log[0] == "shift":
last_guard = log[1]
if not last_guard in history:
history[last_guard] = [0 for i in range(60)]
elif log[0] == "wake":
if sleeping:
for i in range(last_asleep, minute):
history[last_guard][i] += 1
sleeping = False
elif log[0] == "asleep":
sleeping = True
last_asleep = minute
return history
def day4_1(data):
#data = read_input(2018, 401)
history = day4_process(data)
sleepiest_guard = sorted(
[(k, sum(v)) for k, v in history.items()],
key=lambda elem: elem[1],
reverse=True)[0][0]
m = 0
for i in range(60):
if history[sleepiest_guard][i] > history[sleepiest_guard][m]:
m = i
return m * sleepiest_guard
def day4_2(data):
#data = read_input(2018, 401)
history = day4_process(data)
sleepiest_guard = sorted(
[(k, max(v)) for k, v in history.items()],
key=lambda elem: elem[1],
reverse=True)[0][0]
m = 0
for i in range(60):
if history[sleepiest_guard][i] > history[sleepiest_guard][m]:
m = i
return m * sleepiest_guard
""" DAY 5 """
def day5_should_destroy(first, second):
return first.lower() == second.lower() and first != second
def day5_colapse(polymer):
new_polymer = list(polymer[:])
i = 0
while i < len(new_polymer) - 1:
if day5_should_destroy(new_polymer[i], new_polymer[i + 1]):
del new_polymer[i]
del new_polymer[i]
i -= 1
else:
i += 1
return new_polymer
def day5_collapse_and_count(polymer):
reduced_polymer = day5_colapse(polymer)
return len(reduced_polymer)
def day5_1(data):
#data = read_input(2018, 501)
polymer = list(data[0])
return day5_collapse_and_count(polymer)
def day5_remove_unit(polymer, letter):
return [c for c in polymer if c != letter and c != letter.upper()]
def day5_2(data):
#data = read_input(2018, 501)
polymer = data[0]
letters = list(string.ascii_lowercase)
reduced_polymer = day5_colapse(list(polymer))
min_val = sys.maxsize
for letter in letters:
temp_polymer = day5_remove_unit(reduced_polymer, letter)
size = day5_collapse_and_count(temp_polymer)
if size < min_val:
min_val = size
return min_val
""" DAY 6 """
def day6_debug_grid(grid):
for row in grid:
row_str = ""
for pos in row:
char = str(pos[0])
if pos[1] == 0 and pos[0] != "_":
char = chr(ord('A') + pos[0])
row_str += " " + char
print(row_str)
print()
def day6_fill_grid(grid, coordinates):
for i, _ in enumerate(grid):
for j, _ in enumerate(grid[i]):
dists = [abs(coordinate[1] - i) + abs(coordinate[0] - j)
for coordinate in coordinates]
min_dist = sys.maxsize
min_id = 0
for id_, _ in enumerate(dists):
if dists[id_] < min_dist:
min_id = id_
min_dist = dists[id_]
elif dists[id_] == min_dist:
min_id = "_"
min_dist = dists[id_]
grid[i][j] = (min_id, min_dist)
def day6_1(data):
#data = read_input(2018, 601)
coordinates = [(int(entry.split(", ")[0]), int(
entry.split(", ")[1])) for entry in data]
max_x = max([coordinate[0] for coordinate in coordinates])
max_y = max([coordinate[1] for coordinate in coordinates])
grid = [[(".", 0) for j in range(max_x + 1)] for i in range(max_y + 1)]
day6_fill_grid(grid, coordinates)
# day6_debug_grid(grid)
ids = [i for i in range(len(coordinates))]
for i, _ in enumerate(grid):
if i in (0, len(grid) - 1):
for j, _ in enumerate(grid[i]):
value = grid[i][j][0]
if value in ids:
ids.remove(value)
else:
value = grid[i][0][0]
if value in ids:
ids.remove(value)
value = grid[i][len(grid[i]) - 1][0]
if value in ids:
ids.remove(value)
max_val = 0
for id_ in ids:
temp_count = 0
for i, _ in enumerate(grid):
for j, _ in enumerate(grid[i]):
if grid[i][j][0] == id_:
temp_count += 1
if temp_count > max_val:
max_val = temp_count
return max_val
def day6_2(data):
size = 10000
#data, size = (read_input(2018, 601), 32)
coordinates = [(int(entry.split(", ")[0]), int(
entry.split(", ")[1])) for entry in data]
max_x = max([coordinate[0] for coordinate in coordinates])
max_y = max([coordinate[1] for coordinate in coordinates])
counter = 0
for i in range(max_y + 1):
for j in range(max_x + 1):
dists = functools.reduce(
lambda acc, coordinate, i_=i, j_=j:
acc + abs(coordinate[1] - i_) + abs(coordinate[0] - j_),
coordinates,
0)
if dists < size:
counter += 1
return counter
""" DAY 7 """
def day7_parse_inst(data):
# Step C must be finished before step A can begin.
parsed_data = [
re.findall(
r"Step ([A-Z]) must be finished before step ([A-Z]) can begin.", line)[0]
for line in data]
dependencies = {}
letters = []
for (req, step) in parsed_data:
if not step in dependencies:
dependencies[step] = []
if not req in letters:
letters.append(req)
dependencies[step].append(req)
for step, reqs in dependencies.items():
reqs.sort()
return (dependencies, letters)
def day7_inst_order(dependencies, letters):
stack = []
for letter in letters:
if not letter in dependencies.keys():
stack.append(letter)
stack.sort()
completed = []
while stack:
next_letter = stack.pop(0)
completed.append(next_letter)
for step, deps in dependencies.items():
if not step in completed and \
not step in stack and \
all([dep in completed for dep in deps]):
stack.append(step)
stack.sort()
return completed
def day7_1(data):
#data = read_input(2018, 701)
(dependencies, _) = day7_parse_inst(data)
return ''.join(day7_inst_order(dependencies, list(string.ascii_uppercase)))
def day7_worker(worker, completed):
if not worker is None:
(step, duration) = worker
duration -= 1
worker = (step, duration)
if duration == 0:
worker = None
completed.append(step)
return (completed, worker)
def day7_inst_order2(dependencies, letters, step_duration, workers):
stack = []
for letter in letters:
if not letter in dependencies.keys():
stack.append(
(letter, (ord(letter) - ord("A") + 1) + step_duration))
stack.sort(key=lambda e: (e[1], e[0]))
completed = []
workers = [None for i in range(workers)]
counter = 0
while stack or any([not w is None for w in workers]):
counter += 1
for i, _ in enumerate(workers):
if workers[i] is None and stack:
workers[i] = stack.pop(0)
completed, worker = day7_worker(workers[i], completed)
workers[i] = worker
for step, deps in dependencies.items():
if not step in completed and \
all([let[0] != step for let in stack]) and \
all([dep in completed for dep in deps]) and \
not any([not w is None and w[0] == step for w in workers]):
stack.append(
(step, (ord(step) - ord("A") + 1) + step_duration))
stack.sort()
return counter
def day7_2(data):
step_duration = 60
workers = 5
letters = list(string.ascii_uppercase)
# data, step_duration, letters, workers = \
# (read_input(2018, 701), 0, ["A", "B", "C", "D", "E", "F"], 2)
(dependencies, letters) = day7_parse_inst(data)
return day7_inst_order2(dependencies, letters, step_duration, workers)
""" DAY 8 """
class PropertyDescription(IntEnum):
Header = 1
Metadata_Definition = 3
Nodes_End = 2
def day8_process_operation(data, operation, curr_node, nodes, operations):
if operation == PropertyDescription.Header:
nodes = data.pop(0)
metadata = data.pop(0)
operations.insert(0, PropertyDescription.Nodes_End)
for _ in range(metadata):
operations.insert(0, PropertyDescription.Metadata_Definition)
for _ in range(nodes):
operations.insert(0, PropertyDescription.Header)
new_node = ([], [], curr_node)
curr_node[1].append(new_node)
return (data, new_node, operations)
if operation == PropertyDescription.Metadata_Definition:
metadata = data.pop(0)
curr_node[0].append(metadata)
return (data, curr_node, operations)
if operation == PropertyDescription.Nodes_End:
return (data, curr_node[2], operations)
raise ValueError
def day8_parse_tree(data):
nodes = []
# Metadata, children, parent
curr_node = ([], [], None)
operations = [PropertyDescription.Header]
while operations:
operation = operations.pop(0)
data, curr_node, operations = \
day8_process_operation(
data, operation, curr_node, nodes, operations)
return curr_node[1][0]
def day8_sum_meta(node):
counter = sum(node[0])
for child in node[1]:
counter += day8_sum_meta(child)
return counter
def day8_1(data):
#data = read_input(2018, 801)
data = data[0].split(" ")
tree = day8_parse_tree([int(n) for n in data])
total = day8_sum_meta(tree)
return total
def day8_node_value(node):
metadata, children, _ = node
if not children:
return sum(metadata)
total = 0
for meta in metadata:
idx = meta - 1
if 0 <= idx < len(children):
total += day8_node_value(children[idx])
return total
def day8_2(data):
#data = read_input(2018, 801)
data = data[0].split(" ")
tree = day8_parse_tree([int(n) for n in data])
total = day8_node_value(tree)
return total
""" DAY 9 """
class ListNode:
def __init__(self, data):
"constructor class to initiate this object"
# store data
self.data = data
# store reference (next item)
self.next = None
# store reference (previous item)
self.previous = None
def day9_debug_marbles(marble_0, current_marble):
curr = marble_0
output = []
while True:
text = int(curr.data)
if curr.data == current_marble.data:
text = "({0})".format(curr.data)
output.append(text)
curr = curr.next
if curr.data == marble_0.data:
break
print(output)
def day9_remove_marble(current_marble):
prev_marble = current_marble.previous
next_marble = current_marble.next
prev_marble.next = next_marble
next_marble.previous = prev_marble
return next_marble
def day9_add_marble_before(current_marble, marble_value):
new_node = ListNode(marble_value)
new_node.previous = current_marble.previous
new_node.next = current_marble
new_node.previous.next = new_node
current_marble.previous = new_node
return new_node
def day9_play_game_mine(players, highest_marble):
scores = [0 for i in range(players)]
current_marble = ListNode(0)
current_marble.previous = current_marble
current_marble.next = current_marble
#marble_0 = current_marble
next_marble = 1
player = 0
while next_marble <= highest_marble:
#day9_debug_marbles(marble_0, current_marble)
if next_marble % 23 != 0:
for _ in range(2):
current_marble = current_marble.next
current_marble = day9_add_marble_before(
current_marble, next_marble)
else:
scores[player] += next_marble
for _ in range(7):
current_marble = current_marble.previous
scores[player] += current_marble.data
current_marble = day9_remove_marble(current_marble)
next_marble += 1
player = (player + 1) % players
return scores
def day9_play_game_optimized(players, highest_marble):
scores = [0 for i in range(players)]
marbles = deque([0])
next_marble = 1
player = 0
while next_marble <= highest_marble:
#day9_debug_marbles2(marble_0, current_marble)
if next_marble % 23 != 0:
marbles.rotate(-2)
marbles.appendleft(next_marble)
else:
scores[player] += next_marble
marbles.rotate(7)
scores[player] += marbles.popleft()
next_marble += 1
player = (player + 1) % players
return scores
def day9_parse_input(data):
# 9 players; last marble is worth 25 points
return tuple([int(a)
for a in
re.findall(r"(\d+) players; last marble is worth (\d+) points", data)[0]])
def day9_1(data):
line = data[0]
#data = read_input(2018, 901)
#line = data[3]
players, highest_marble = day9_parse_input(line)
scores = day9_play_game_optimized(players, highest_marble)
return max(scores)
def day9_2(data):
line = data[0]
#data = read_input(2018, 901)
#line = data[5]
players, highest_marble = day9_parse_input(line)
scores = day9_play_game_optimized(players, highest_marble * 100)
return max(scores)
""" DAY 10 """
def day10_parse_line(line):
# position=< 32923, 43870> velocity=<-3, -4>
res = re.findall(
r"position=<\s*(-?\d+),\s*(-?\d+)> velocity=<\s*(-?\d+),\s*(-?\d+)>", line)[0]
return tuple([int(a) for a in res])
def day10_parse_input(data):
return [day10_parse_line(line) for line in data]
def day10_update_particle(particle):
px, py, vx, vy = particle
px += vx
py += vy
return (px, py, vx, vy)
def day10_board_size(particles):
min_x = min([pos[0] for pos in particles])
min_y = min([pos[1] for pos in particles])
max_x = max([pos[0] for pos in particles])
max_y = max([pos[1] for pos in particles])
return (max_x - min_x, max_y - min_y)
def day10_print_particles(particles):
min_x = min([pos[0] for pos in particles])
min_y = min([pos[1] for pos in particles])
max_x = max([pos[0] for pos in particles])
max_y = max([pos[1] for pos in particles])
for i in range(max_y - min_y + 1):
line = ""
for j in range(max_x - min_x + 1):
found = False
for particle in particles:
if particle[0] == (j + min_x) and particle[1] == (min_y + i):
found = True
break
if found:
line += " #"
else:
line += " ."
print(line)
print()
return (min_x, max_x, min_y, max_y)
def day10_resolve(particles):
size_x, size_y = day10_board_size(particles)
counter = 0
while True:
new_particles = [day10_update_particle(
particle) for particle in particles]
new_size_x, new_size_y = day10_board_size(new_particles)
if (new_size_x > size_x and new_size_y > size_y):
break
size_x = new_size_x
size_y = new_size_y
particles = new_particles
counter += 1
return (particles, counter)
def day10_1(data):
#data = read_input(2018, 1001)
particles, _ = day10_resolve(day10_parse_input(data))
day10_print_particles(particles)
def day10_2(data):
#data = read_input(2018, 1001)
_, counter = day10_resolve(day10_parse_input(data))
return counter
""" DAY 11 """
def day11_cell_value(serial, x, y):
rack_id = x + 10
level = rack_id * y
level += serial
level *= rack_id
level = math.floor((level % 1000) / 100)
level -= 5
return level
def day11_grid(serial):
return [[day11_cell_value(serial, x + 1, y + 1) for x in range(300)] for y in range(300)]
def day11_solve_exact_size_aux(memoization, grid, x, y, size):
if size == 1:
return grid[y][x]
#current_total = day11_solve_exact_size(grid, x, y, size-1)
key = str(x) + "_" + str(y) + "_" + str(size)
if key in memoization:
return memoization[key]
around_size = 0
for i in range(size - 1):
around_size += grid[y + i][x + size - 1]
around_size += grid[y + size - 1][x + i]
around_size += grid[y + size - 1][x + size - 1]
# if around_size > 0:
value = day11_solve_exact_size_aux(
memoization, grid, x, y, size - 1) + around_size
memoization[key] = value
return value
# else:
# return -99999999999999
def day11_solve_exact_size(memoization, grid, size):
max_total = -99999999999
coordinate = (1, 1)
y = 0
while y <= len(grid) - size:
x = 0
while x <= len(grid) - size:
key = str(x) + "_" + str(y) + "_" + str(size)
if key in memoization:
current_total = memoization[key]
else:
around_size = 0
for i in range(size - 1):
around_size += grid[y + i][x + size - 1]
around_size += grid[y + size - 1][x + i]
around_size += grid[y + size - 1][x + size - 1]
if around_size > 0:
current_total = day11_solve_exact_size_aux(
memoization, grid, x, y, size)
else:
x += 1
continue
if current_total > max_total:
max_total = current_total
coordinate = (x + 1, y + 1)
x += 1
y += 1
return (coordinate, max_total)
def day11_solve_range(grid, min_size, max_size):
max_total = -9999999999
coordinate = (1, 1)
max_size_total = min_size
memoization = {}
for size in range(min_size, max_size + 1):
current_coordinate, current_total = day11_solve_exact_size(
memoization, grid, size)
if current_total > max_total:
max_total = current_total
coordinate = current_coordinate
max_size_total = size
return (coordinate, max_size_total)
def day11_1(data):
#data = read_input(2018, 1101)
memoization = {}
serial = int(data[0])
grid = day11_grid(serial)
return day11_solve_exact_size(memoization, grid, 3)[0]
def day11_2(data):
#data = read_input(2018, 1101)
serial = int(data[0])
grid = day11_grid(serial)
return day11_solve_range(grid, 2, 299)
""" DAY 12 """
def day12_parse_input(data):
initial_state = deque(re.findall(r"initial state: ([\.|#]*)", data[0])[0])
rules = deque([])
for rule in data[2:]:
parts = rule.split(" => ")
rules.append((parts[0], parts[1]))
return (initial_state, rules)
def day12_get_rule(pots, rules, position):
for rule in rules:
prev = rule[0]
match = True
for i, _ in enumerate(prev):
match &= prev[i] == pots[position + i - 2]
if match:
return rule[1]
return None
def day12_process_generation(pots, rules, start):
new_pots = deque([])
pots.appendleft(".")
pots.appendleft(".")
pots.appendleft(".")
pots.appendleft(".")
pots.append(".")
pots.append(".")
pots.append(".")
pots.append(".")
for i in range(2, len(pots) - 2):
result = day12_get_rule(pots, rules, i)
if result is None:
new_pots.append(pots[i])
else:
new_pots.append(result)
start = start - 2
curr = new_pots[0]
while curr == ".":
new_pots.popleft()
start += 1
curr = new_pots[0]
curr = new_pots[-1]
while curr == ".":
new_pots.pop()
curr = new_pots[-1]
return (new_pots, start)
def day12_solve(pots, rules, generations):
memoization = {}
history = deque([])
start = 0
for i in range(generations):
history.append(("".join(pots), start))
memoization["".join(pots)] = i
# print("".join(pots))
# print(start)
pots, start = day12_process_generation(pots, rules, start)
new_pots = "".join(pots)
if new_pots in memoization:
itera = memoization[new_pots]
j = 0
while j < itera:
history.popleft()
j += 1
pos = (generations - (i + 1)) % len(history)
pos_shift = start - history[pos][1]
iterations = math.floor((generations - (i + 1)) / len(history))
pots = history[pos][0]
start += pos_shift * iterations
break
total = 0
for i, _ in enumerate(pots):
if pots[i] == "#":
total += start + i
return total
def day12_1(data):
#data = read_input(2018, 1201)
pots, rules = day12_parse_input(data)
return day12_solve(pots, rules, 20)
def day12_2(data):
#data = read_input(2018, 1201)
pots, rules = day12_parse_input(data)
return day12_solve(pots, rules, 50000000000)
""" Day 13 """
class Turn:
Left = 0
Straight = 1
Right = 2
class Direction:
Left = 0
Up = 1
Right = 2
Down = 3
def day13_print_direction(dir_):
if dir_ == Direction.Up:
return "^"
elif dir_ == Direction.Down:
return "v"
elif dir_ == Direction.Left:
return "<"
elif dir_ == Direction.Right:
return ">"
raise ValueError
def day13_debug_map(map_, positions):
for y, _ in enumerate(map_):
line = ""
for x, _ in enumerate(map_[y]):
cart = [positions[k]
for k in range(len(positions))
if positions[k][0] == x and positions[k][1] == y]
if len(cart) == 1:
cart = cart[0]
else:
cart = None
if cart is None:
line += map_[y][x]
else:
line += day13_print_direction(cart[2])
print(line)
print()
time.sleep(0.5)
def day13_get_cart_direction(pos):
if pos == "^":
return Direction.Up
elif pos == "v":
return Direction.Down
elif pos == "<":
return Direction.Left
elif pos == ">":
return Direction.Right
return None
def day13_parse_input(data):
map_ = []
positions = []
for row, _ in enumerate(data):
new_row = []
for column, _ in enumerate(data[row]):
direction = day13_get_cart_direction(data[row][column])
if direction is None:
new_row.append(data[row][column])
else:
positions.append((column, row, direction, Turn.Right))
if direction in (Direction.Left, Direction.Right):
new_row.append("-")
else:
new_row.append("|")
map_.append(new_row)
return map_, sorted(positions, key=lambda v: (v[0], v[1]))
def day13_direction_on_turn(direction, turn):
if turn == Turn.Straight:
return direction
elif turn == Turn.Right:
return (direction + 1) % 4
else:
return (direction + 3) % 4
def day13_next_turn(location, direction, last_turn):
if location == "/":
if direction == Direction.Up:
return (Direction.Right, last_turn)
if direction == Direction.Left:
return (Direction.Down, last_turn)
if direction == Direction.Right:
return (Direction.Up, last_turn)
if direction == Direction.Down:
return (Direction.Left, last_turn)
elif location == "\\":
if direction == Direction.Up:
return (Direction.Left, last_turn)
if direction == Direction.Left:
return (Direction.Up, last_turn)
if direction == Direction.Right:
return (Direction.Down, last_turn)
if direction == Direction.Down:
return (Direction.Right, last_turn)
elif location == "+":
turn = (last_turn + 1) % 3
return (day13_direction_on_turn(direction, turn), turn)
return direction, last_turn
def day13_get_direction_delta(direction):
if direction == Direction.Up:
return (0, -1)
elif direction == Direction.Down:
return (0, 1)
elif direction == Direction.Left:
return (-1, 0)
elif direction == Direction.Right:
return (1, 0)
return ValueError
def day13_move_cart(map_, positions, cart):
x = positions[cart][0]
y = positions[cart][1]
direction = positions[cart][2]
last_turn = positions[cart][3]
delta_x, delta_y = day13_get_direction_delta(direction)
new_x, new_y = x + delta_x, y + delta_y
for i, _ in enumerate(positions):
if i != cart and positions[i][0] == new_x and positions[i][1] == new_y:
return "X", new_x, new_y, i
new_direction, new_turn = day13_next_turn(
map_[new_y][new_x], direction, last_turn)
return (new_x, new_y, new_direction, new_turn)
def day13_solve(map_, positions):
while True:
#day13_debug_map(map, positions)
for i, _ in enumerate(positions):
new_position = day13_move_cart(map_, positions, i)
if new_position[0] == "X":
return new_position[1], new_position[2]
positions[i] = new_position
positions = sorted(positions, key=lambda v: (v[0], v[1]))
def day13_solve2(map_, positions):
while True:
#day13_debug_map(map, positions)
crashing_carts = []
for i, _ in enumerate(positions):
new_position = day13_move_cart(map_, positions, i)
if new_position[0] == "X":
crashing_carts.append(new_position[3])
crashing_carts.append(i)
else:
positions[i] = new_position
positions = [positions[i]
for i in range(len(positions)) if not i in crashing_carts]
if len(positions) == 1:
return positions[0][0], positions[0][1]
positions = sorted(positions, key=lambda v: (v[0], v[1]))
def day13_1(data):
#data = read_input(2018, 1301)
map_, positions = day13_parse_input(data)
return day13_solve(map_, positions)
def day13_2(data):
#data = read_input(2018, 1302)
map_, positions = day13_parse_input(data)
return day13_solve2(map_, positions)
""" DAY 14 """
def day14_debug_recipes(recipes, elves):
out = ""
for i, _ in enumerate(recipes):
recipe = recipes[i]
if elves[0] == i:
recipe = "({0})".format(recipe)
elif elves[1] == i:
recipe = "[{0}]".format(recipe)
else:
recipe = " {0} ".format(recipe)
out += " " + str(recipe)
print(out)
def day14_break_number(number):
return [int(v) for v in str(number)]
def day14_solve(nr_recipes):
recipes = [3, 7]
elves = [0, 1]
recipes_len = len(recipes)
for _ in range(nr_recipes + 10):
#day14_debug_recipes(recipes, elves)
recipe_0 = recipes[elves[0]]
recipe_1 = recipes[elves[1]]
new_recipe = recipe_0 + recipe_1
new_recipes = day14_break_number(new_recipe)
recipes.extend(new_recipes)
recipes_len += len(new_recipes)
elves[0] = (elves[0] + recipe_0 + 1) % recipes_len
elves[1] = (elves[1] + recipe_1 + 1) % recipes_len
return recipes[nr_recipes:nr_recipes + 10]
def day14_solve2(nr_recipes, value):
recipes = [3, 7]
elves = [0, 1]
start = 0
#value = str(nr_recipes)
size = len(value)
recipes_len = len(recipes)
while True:
#day14_debug_recipes(recipes, elves)
recipe_0 = recipes[elves[0]]
recipe_1 = recipes[elves[1]]
new_recipe = recipe_0 + recipe_1
new_recipes = day14_break_number(new_recipe)
recipes.extend(new_recipes)
recipes_len += len(new_recipes)
elves[0] = (elves[0] + recipe_0 + 1) % recipes_len
elves[1] = (elves[1] + recipe_1 + 1) % recipes_len
if recipes_len - start > size:
#day14_debug_recipes(recipes, [start, -1])
while start < recipes_len - size:
if str(recipes[start]) == value[0]:
tmp_value = "".join([str(r)
for r in recipes[start:start + size]])
if tmp_value == value:
return start
start += 1
return recipes[nr_recipes:nr_recipes + 10]
def day14_1(data):
#data = ["9"]
#data = ["5"]
#data = ["18"]
#data = ["2018"]
nr_recipes = int(data[0])
return "".join([str(i) for i in day14_solve(nr_recipes)])
def day14_2(data):
#data = ["9"]
#data = ["5"]
#data = ["18"]
#data = ["2018"]
#data = ["59414"]
nr_recipes = int(data[0])
return day14_solve2(nr_recipes, data[0])
""" DAY 15 """
def day15_parse_input(data):
data = [[c for c in line] for line in data]
return data
def day15_print(grid):
for rr, row in enumerate(grid):
for cc, p in enumerate(row):
print(p, end="")
print()
def day15_solve(data, part1):
data = day15_parse_input(data)
data_back = [row[:] for row in data]
power = 3
if not part1:
power = 30
while True:
data = [row[:] for row in data_back]
healths = {}
for rr, row in enumerate(data):
for cc, p in enumerate(row):
if p != "." and p != "#":
healths[(rr, cc)] = 200
stalled = False
t = 0
while not stalled:
stalled = True
units = []
# print(t)
for rr, row in enumerate(data):
for cc, p in enumerate(row):
if p != "." and p != "#":
units.append((rr, cc))
abort = False
for r, c in units:
data, healths, acted, elf_died = day15_turn(
data, healths, r, c, power)
if elf_died and not part1:
abort = True
break
p = data[r][c]
if not acted:
if p == "G" and not part1:
abort = True
break
print(t, power)
day15_print(data)
print(t, sum(healths.values()))
return (t) * sum(healths.values())
stalled = stalled and not acted
if abort:
break
t += 1
power += 1
def day15_1(data):
return day15_solve(data, True)
def day15_2(data):
return day15_solve(data, False)
def day15_adj(grid, targets, r, c):
# Find open adj targets
R = len(grid)
C = len(grid[0])
adj_pos = set()
in_range = set()
D = [(0, 1), (1, 0), (0, -1), (-1, 0)]
for rr, cc in targets:
for dr, dc in D:
rrr, ccc = rr + dr, cc + dc
if (rrr, ccc) == (r, c):
in_range.add((rr, cc))
if 0 <= rrr < R and 0 <= ccc < C and grid[rrr][ccc] == ".":
adj_pos.add((rrr, ccc))
return adj_pos, in_range
def day15_turn(grid, healths, r, c, power):
R = len(grid)
C = len(grid[0])
D = [(0, 1), (1, 0), (0, -1), (-1, 0)]
curr = grid[r][c]
if curr == ".":
# died already
# print(r,c, "died")
return grid, healths, True, False
targets = set()
# find targerts
for rr, row in enumerate(grid):
for cc, p in enumerate(row):
if p != "." and p != "#" and p != curr:
targets.add((rr, cc))
if len(targets) == 0:
# print("no targets")
return grid, healths, False, False
# Find open adj targets
adj_pos, in_range = day15_adj(grid, targets, r, c)
moved = False
if not in_range:
# Move
reach = set()
seen = set()
q = [(0, (r, c), [])]
# print(adj_pos)
while q:
steps, pos, path = q.pop(0)
# print(pos)
if len(path) > 0:
if (pos, path[0]) in seen:
# print(pos, path)
continue
seen.add((pos, path[0]))
rr, cc = pos
if pos in adj_pos:
reach.add((steps, pos, path[0]))
for dr, dc in D:
rrr, ccc = rr + dr, cc + dc
if grid[rrr][ccc] == ".":
q.append((steps + 1, (rrr, ccc), path + [(rrr, ccc)]))
if reach:
moved = True
min_steps = min(reach)[0]
nearest = [c[2] for c in reach if c[0] == min_steps]
grid[r][c] = "."
health = healths[(r, c)]
healths[(r, c)] = 0
# print(r,c, reach, nearest, min(nearest))
r, c = min(nearest)
healths[(r, c)] = health
grid[r][c] = curr
# Find open adj targets
adj_pos, in_range = day15_adj(grid, targets, r, c)
# day26_print(grid)
# Attack
# print("range", r,c, in_range)
elf_died = False
if in_range:
ts = [(healths[(rr, cc)], rr, cc) for rr, cc in in_range]
hp, rr, cc = min(ts)
# print(r,c,rr,cc,hp)
p = power
if grid[r][c] == "G":
p = 3
hp -= p
hp = max([0, hp])
healths[rr, cc] = hp
if hp == 0:
if grid[rr][cc] == "E":
elf_died = True
grid[rr][cc] = "."
return grid, healths, True, elf_died
""" DAY 16 """
class Inst16:
@staticmethod
def addr(regs, a, b, c):
regs[c] = regs[a] + regs[b]
return regs
@staticmethod
def addi(regs, a, b, c):
regs[c] = regs[a] + b
return regs
@staticmethod
def mulr(regs, a, b, c):
regs[c] = regs[a] * regs[b]
return regs
@staticmethod
def muli(regs, a, b, c):
regs[c] = regs[a] * b
return regs
@staticmethod
def banr(regs, a, b, c):
regs[c] = regs[a] & regs[b]
return regs
@staticmethod
def bani(regs, a, b, c):
regs[c] = regs[a] & b
return regs
@staticmethod
def borr(regs, a, b, c):
regs[c] = regs[a] | regs[b]
return regs
@staticmethod
def bori(regs, a, b, c):
regs[c] = regs[a] | b
return regs
@staticmethod
def setr(regs, a, _, c):
regs[c] = regs[a]
return regs
@staticmethod
def seti(regs, a, _, c):
regs[c] = a
return regs
@staticmethod
def gtir(regs, a, b, c):
if a > regs[b]:
regs[c] = 1
else:
regs[c] = 0
return regs
@staticmethod
def gtri(regs, a, b, c):
if regs[a] > b:
regs[c] = 1
else:
regs[c] = 0
return regs
@staticmethod
def gtrr(regs, a, b, c):
if regs[a] > regs[b]:
regs[c] = 1
else:
regs[c] = 0
return regs
@staticmethod
def eqir(regs, a, b, c):
if a == regs[b]:
regs[c] = 1
else:
regs[c] = 0
return regs
@staticmethod
def eqri(regs, a, b, c):
if regs[a] == b:
regs[c] = 1
else:
regs[c] = 0
return regs
@staticmethod
def eqrr(regs, a, b, c):
if regs[a] == regs[b]:
regs[c] = 1
else:
regs[c] = 0
return regs
ops = [
addr.__func__,
addi.__func__,
mulr.__func__,
muli.__func__,
banr.__func__,
bani.__func__,
borr.__func__,
bori.__func__,
setr.__func__,
seti.__func__,
gtir.__func__,
gtri.__func__,
gtrr.__func__,
eqir.__func__,
eqri.__func__,
eqrr.__func__
]
def day16_parse_input(data):
samples = []
i = 0
while i < len(data):
if data[i] == "":
break
# Before: [3, 2, 1, 1]
# 9 2 1 2
# After: [3, 2, 2, 1]
before = [int(x) for x in re.findall(
r"Before: \[(\d+), (\d+), (\d+), (\d+)\]", data[i])[0]]
i += 1
inst = [int(x) for x in re.findall(
r"(\d+) (\d+) (\d+) (\d+)", data[i])[0]]
i += 1
after = [int(x) for x in re.findall(
r"After: \[(\d+), (\d+), (\d+), (\d+)\]", data[i])[0]]
i += 1
i += 1
samples.append((before, inst, after))
program = []
while i < len(data):
if data[i] != "":
# 9 2 1 2
inst = [int(x) for x in re.findall(
r"(\d+) (\d+) (\d+) (\d+)", data[i])[0]]
program.append(inst)
i += 1
return samples, program
def day16_check_op(fun, before, inst, after):
result = fun(before[:], inst[1], inst[2], inst[3])
return all([result[i] == after[i] for i in range(len(result))])
def day16_check_sample(before, inst, after):
ops = Inst16.ops
counter = 0
for op in ops:
if day16_check_op(op, before, inst, after):
counter += 1
return counter
def day16_update_mapping(mapping, before, inst, after):
ops = Inst16.ops
matched = []
for op in ops:
if day16_check_op(op, before, inst, after):
matched.append(op)
opcode = inst[0]
prev_map = mapping[opcode]
mapping[opcode] = [op for op in prev_map if op in matched]
def day16_solve1(samples):
counter = 0
for before, inst, after in samples:
if day16_check_sample(before, inst, after) >= 3:
counter += 1
return counter
def day16_calculate_mapping(samples):
mapping = [Inst16.ops for i in range(len(Inst16.ops))]
for before, inst, after in samples:
day16_update_mapping(mapping, before, inst, after)
while True:
for i, _ in enumerate(mapping):
if len(mapping[i]) == 1:
op = mapping[i][0]
for j, _ in enumerate(mapping):
if i != j:
mapping[j] = [op2 for op2 in mapping[j] if op2 != op]
if all([len(mapping[i]) == 1 for i in range(len(mapping))]):
break
mapping = [m[0] for m in mapping]
return mapping
def day16_solve2(samples, program):
mapping = day16_calculate_mapping(samples)
regs = [0, 0, 0, 0]
for line in program:
op = mapping[line[0]]
regs = op(regs, line[1], line[2], line[3])
return regs[0]
def day16_1(data):
#data = read_input(2018, 1601)
samples, _ = day16_parse_input(data)
return day16_solve1(samples)
def day16_2(data):
#data = read_input(2018, 1601)
samples, program = day16_parse_input(data)
return day16_solve2(samples, program)
""" DAY 17 """
class Bitmap():
def __init__(self, width, height):
self._bfType = 19778 # Bitmap signature
self._bfReserved1 = 0
self._bfReserved2 = 0
self._bcPlanes = 1
self._bcSize = 12
self._bcBitCount = 24
self._bfOffBits = 26
self._bcWidth = width
self._bcHeight = height
self._bfSize = 26 + self._bcWidth * 3 * self._bcHeight
self.clear()
def clear(self):
self._graphics = [(0, 0, 0)] * self._bcWidth * self._bcHeight
def setPixel(self, x, y, color):
if isinstance(color, tuple):
if x < 0 or y < 0 or x > self._bcWidth - 1 or y > self._bcHeight - 1:
raise ValueError('Coords out of range')
if len(color) != 3:
raise ValueError('Color must be a tuple of 3 elems')
self._graphics[y * self._bcWidth +
x] = (color[2], color[1], color[0])
else:
raise ValueError('Color must be a tuple of 3 elems')
def write(self, file):
with open(file, 'wb') as f:
f.write(pack('<HLHHL',
self._bfType,
self._bfSize,
self._bfReserved1,
self._bfReserved2,
self._bfOffBits)) # Writing BITMAPFILEHEADER
f.write(pack('<LHHHH',
self._bcSize,
self._bcWidth,
self._bcHeight,
self._bcPlanes,
self._bcBitCount)) # Writing BITMAPINFO
for px in self._graphics:
f.write(pack('<BBB', *px))
for _ in range(0, (self._bcWidth * 3) % 4):
f.write(pack('B', 0))
class Day17_Type:
clay = 0
sand = 1
water = 2
settled = 3
@staticmethod
def render_square(square):
if square == Day17_Type.clay:
return "#"
elif square == Day17_Type.sand:
return "."
elif square == Day17_Type.water:
return "|"
elif square == Day17_Type.settled:
return "~"
raise ValueError
@staticmethod
def render_square_rgb(square):
if square == Day17_Type.clay:
return (0, 0, 0)
elif square == Day17_Type.sand:
return (255, 255, 255)
elif square == Day17_Type.water:
return (102, 165, 255)
elif square == Day17_Type.settled:
return (0, 0, 255)
raise ValueError
bitmap_counter = 0
def day17_debug_ground_bmp(ground, y):
# pylint: disable=W0603
global bitmap_counter
# pylint: enable=W0603
half = int(100 / 2)
lower = max(y - half, 0)
size = (y + half + 1) - lower
b = Bitmap(436, size)
b.clear()
for i in range(lower, y + half + 1):
for j in range(len(ground[i])):
coord = y + half - i
b.setPixel(j, coord, Day17_Type.render_square_rgb(ground[i][j]))
b.write('test/file{:09d}.bmp'.format(bitmap_counter))
bitmap_counter += 1
def day17_debug_ground_ascii(ground, y):
for i in range(y - 10, y + 11):
line = ground[i]
print("".join([Day17_Type.render_square(s) for s in line]))
print()
time.sleep(.1)
def day17_debug_full_ground_bmp(ground):
size = len(ground)
b = Bitmap(436, size)
b.clear()
for i, _ in enumerate(ground):
for j, _ in enumerate(ground[i]):
coord = (size - 1) - i
b.setPixel(j, coord, Day17_Type.render_square_rgb(ground[i][j]))
b.write('test/a_full_ground.bmp')
def day17_parse_input(data):
slices = []
min_y = 9999999
max_y = 0
for line in data:
if line[0] == "x":
# x=501, y=3..7
ground_slice = [int(x) for x in re.findall(
r"x=(\d+), y=(\d+)..(\d+)", line)[0]]
slices.append(ground_slice)
if ground_slice[1] < min_y:
min_y = ground_slice[1]
if ground_slice[2] > max_y:
max_y = ground_slice[2]
else:
# y=501, x=3..7
ground_slice = [int(x) for x in re.findall(
r"y=(\d+), x=(\d+)..(\d+)", line)[0]]
for i in range(ground_slice[1], ground_slice[2] + 1):
ground_slice2 = (i, ground_slice[0], ground_slice[0])
slices.append(ground_slice2)
if ground_slice2[1] < min_y:
min_y = ground_slice2[1]
if ground_slice2[2] > max_y:
max_y = ground_slice2[2]
slices = sorted(slices, key=lambda s: s[0])
min_x = slices[0][0] - 2
max_x = slices[-1][0] + 2
ground = [[Day17_Type.sand for j in range(
min_x, max_x)] for i in range(max_y + 2)]
for s in slices:
for i in range(s[1], s[2] + 1):
ground[i][s[0] - min_x] = Day17_Type.clay
return ground, min_x, min_y
def day17_flow_water(ground, y, x):
stack = deque([(y, x)])
while stack:
i, j = stack.popleft()
if i + 1 >= len(ground) or i < 0 or j + 1 >= len(ground[0]) or j - 1 < 0:
continue
if ground[i][j] == Day17_Type.clay or ground[i][j] == Day17_Type.settled:
continue
if ground[i][j] == Day17_Type.sand:
ground[i][j] = Day17_Type.water
k = j - 1
left_wall = False
right_wall = False
while k > 0:
if ground[i][k] == Day17_Type.clay or ground[i][k] == Day17_Type.settled:
left_wall = True
break
if ground[i][k] == Day17_Type.sand:
break
k -= 1
k = j + 1
while k < len(ground[i]):
if ground[i][k] == Day17_Type.clay or ground[i][k] == Day17_Type.settled:
right_wall = True
break
if ground[i][k] == Day17_Type.sand:
break
k += 1
if left_wall and right_wall:
ground[i][j] = Day17_Type.settled
#day17_debug_ground_bmp(ground, i)
if ground[i + 1][j] == Day17_Type.clay or ground[i + 1][j] == Day17_Type.settled:
if ground[i][j - 1] != ground[i][j]:
stack.append((i, j - 1))
if ground[i][j + 1] != ground[i][j]:
stack.append((i, j + 1))
if ground[i][j] == Day17_Type.settled and ground[i - 1][j] == Day17_Type.water:
stack.append((i - 1, j))
if ground[i + 1][j] != ground[i][j]:
stack.append((i + 1, j))
def day17_solve(ground, min_x, min_y):
day17_flow_water(ground, 0, 500 - min_x)
# day17_debug_full_ground_bmp(ground)
counter_water = 0
counter_retained = 0
for i, _ in enumerate(ground):
for j, _ in enumerate(ground[i]):
if i >= min_y:
if ground[i][j] == Day17_Type.settled:
counter_water += 1
counter_retained += 1
if ground[i][j] == Day17_Type.water:
counter_water += 1
return counter_water, counter_retained
def day17_1(data):
#data = read_input(2018, 1701)
ground, min_x, min_y = day17_parse_input(data)
return day17_solve(ground, min_x, min_y)[0]
def day17_2(data):
#data = read_input(2018, 1701)
ground, min_x, min_y = day17_parse_input(data)
return day17_solve(ground, min_x, min_y)[1]
""" DAY 18 """
class AcreContents:
open = "."
trees = "|"
lumberyard = "#"
@staticmethod
def count_states(state, adjacents):
return len([True for a in adjacents if a == state])
@staticmethod
def next_state(current, adjacents):
if current == AcreContents.open:
if AcreContents.count_states(AcreContents.trees, adjacents) >= 3:
return AcreContents.trees
elif current == AcreContents.trees:
if AcreContents.count_states(AcreContents.lumberyard, adjacents) >= 3:
return AcreContents.lumberyard
elif current == AcreContents.lumberyard:
if AcreContents.count_states(AcreContents.lumberyard, adjacents) == 0 or \
AcreContents.count_states(AcreContents.trees, adjacents) == 0:
return AcreContents.open
return current
def day18_debug_area(area):
for i, _ in enumerate(area):
out = ""
for j, _ in enumerate(area[i]):
out += area[i][j]
print(out)
print()
def day18_get_adjacent_cells(area, i_coord, j_coord):
# the adjacency matrix
adjacency = [(i, j) for i in (-1, 0, 1)
for j in (-1, 0, 1) if not i == j == 0]
result = []
for di, dj in adjacency:
# boundaries check
if 0 <= (i_coord + di) < len(area) and 0 <= j_coord + dj < len(area[0]):
result.append(area[i_coord + di][j_coord + dj])
return result
def day18_compute_change(area, i, j):
adjacents = day18_get_adjacent_cells(area, i, j)
return AcreContents.next_state(area[i][j], adjacents)
def day18_process(area, minutes):
prev_area = area
history = deque([])
memoization = {}
for t in range(minutes):
# day18_debug_area(prev_area)
history.append(prev_area)
memoization["".join(prev_area)] = t
new_area = []
for i, _ in enumerate(prev_area):
new_area.append("")
for j, _ in enumerate(prev_area[i]):
new_state = day18_compute_change(prev_area, i, j)
new_area[i] += new_state
new_area_key = "".join(new_area)
if new_area_key in memoization:
itera = memoization[new_area_key]
j = 0
while j < itera:
history.popleft()
j += 1
pos = (minutes - (t + 1)) % len(history)
prev_area = history[pos]
break
prev_area = new_area
return prev_area
def day18_1(data):
#data = read_input(2018, 1801)
area = data
new_area = day18_process(area, 10)
trees = len(
[True for line in new_area for s in line if s == AcreContents.trees])
lumberyards = len(
[True for line in new_area for s in line if s == AcreContents.lumberyard])
return trees * lumberyards
def day18_2(data):
#data = read_input(2018, 1801)
area = data
new_area = day18_process(area, 1000000000)
trees = len(
[True for line in new_area for s in line if s == AcreContents.trees])
lumberyards = len(
[True for line in new_area for s in line if s == AcreContents.lumberyard])
return trees * lumberyards
""" DAY 19 """
class Inst19(Inst16):
ops = {"addr": Inst16.addr,
"addi": Inst16.addi,
"mulr": Inst16.mulr,
"muli": Inst16.muli,
"banr": Inst16.banr,
"bani": Inst16.bani,
"borr": Inst16.borr,
"bori": Inst16.bori,
"setr": Inst16.setr,
"seti": Inst16.seti,
"gtir": Inst16.gtir,
"gtri": Inst16.gtri,
"gtrr": Inst16.gtrr,
"eqir": Inst16.eqir,
"eqri": Inst16.eqri,
"eqrr": Inst16.eqrr}
def day19_parse_input(data):
pointer = int(data[0][-1])
program = []
for line in data[1:]:
inst = [x for x in re.findall(r"(\w+) (\d+) (\d+) (\d+)", line)[0]]
inst[0] = Inst19.ops[inst[0]]
inst[1] = int(inst[1])
inst[2] = int(inst[2])
inst[3] = int(inst[3])
program.append(inst)
return pointer, program
def day19_run_program(pointer, program, start_0, day, part):
regs = [start_0, 0, 0, 0, 0, 0]
inst = regs[pointer]
counter = Counter()
last = 0
while 0 <= inst < len(program):
# Make register 1 have the value of 2 * 5 right away (first loop)
if day == 19 and inst == 4 and regs[1] / regs[2] == regs[5] and regs[2] < regs[3] / regs[5]:
regs[2] = int(regs[3] / regs[5])
regs[1] = regs[2] * regs[5]
# Make register 2 have the value of 3 (second loop)
if day == 19 and inst == 9 and regs[2] < regs[3]:
regs[2] = regs[3]
if day == 21 and inst == 28:
if part == 1:
return regs[5]
value = regs[5]
if counter[value] == 0:
counter[value] = 1
last = value
else:
return last
regs[pointer] = inst
f, a, b, c = program[inst]
f(regs, a, b, c)
inst = regs[pointer]
inst += 1
return regs
def day19_1(data):
#data = read_input(2018, 1901)
pointer, program = day19_parse_input(data)
return day19_run_program(pointer, program, 0, 19, 1)[0]
def day19_2(data):
#data = read_input(2018, 1901)
pointer, program = day19_parse_input(data)
return day19_run_program(pointer, program, 1, 19, 2)[0]
""" DAY 20 """
class PathNode:
def __init__(self, pos, dist, path):
"constructor class to initiate this object"
# store data
self.pos = pos
self.dist = dist
self.path = path
def __repr__(self):
return "({0},{1}) - {2} - {3}".format(self.pos[0], self.pos[1], self.dist, self.path)
def day20_move(pos, direction):
x, y = pos
if direction == "N":
return (x, y + 1)
if direction == "S":
return (x, y - 1)
if direction == "W":
return (x - 1, y)
if direction == "E":
return (x + 1, y)
raise ValueError
def day20_update_locations(direction, current_locations, history):
for node in current_locations:
pos = day20_move(node.pos, direction)
node.pos = pos
node.dist += 1
key = "{0}_{1}".format(node.pos[0], node.pos[1])
if not key in history:
history[key] = node.dist
elif node.dist < history[key]:
history[key] = node.dist
node.path = node.path + direction
def day20_alternatives(index, path, locations, distances):
new_locations = []
current_locations = [PathNode(n.pos, n.dist, n.path) for n in locations]
while path[index] != ")":
if path[index] == "(":
current_locations, index = \
day20_alternatives(
index + 1, path, current_locations, distances)
elif path[index] == "|":
new_locations.extend(current_locations)
current_locations = [PathNode(n.pos, n.dist, n.path)
for n in locations]
else:
day20_update_locations(path[index], current_locations, distances)
index += 1
new_locations.extend(current_locations)
# Remove redundant paths
result = {}
for l in new_locations:
key = "{0}_{1}".format(l.pos[0], l.pos[1])
if key not in result:
result[key] = l
elif l.dist < result[key].dist:
result[key] = l
new_locations = result.values()
return new_locations, index
def day20_get_rooms_distances(path):
path = path[1:]
index = 0
distances = {}
current_locations = [PathNode((0, 0), 0, "")]
while path[index] != "$":
if path[index] == "(":
current_locations, index = \
day20_alternatives(
index + 1, path, current_locations, distances)
elif path[index] != ")":
day20_update_locations(path[index], current_locations, distances)
index += 1
return distances
def day20_1(data):
#data = read_input(2018, 2001)
path = data[0]
distances = day20_get_rooms_distances(path).values()
best = sorted(distances, reverse=True, key=lambda v: v)[0]
return best
def day20_2(data):
#data = read_input(2018, 2001)
path = data[0]
distances = day20_get_rooms_distances(path).values()
return len([d for d in distances if d >= 1000])
""" DAY 21 """
def day21_1(data):
pointer, program = day19_parse_input(data)
return day19_run_program(pointer, program, 1, 21, 1)
def day21_2(data):
pointer, program = day19_parse_input(data)
return day19_run_program(pointer, program, 1, 21, 2)
""" DAY 22 """
class Day22_Type:
rocky = 0
wet = 1
narrow = 2
@staticmethod
def get_type(erosion_level):
return erosion_level % 3
@staticmethod
def repr(tool):
if tool == Day22_Type.rocky:
return "."
if tool == Day22_Type.wet:
return "="
if tool == Day22_Type.narrow:
return "|"
return None
class Day22_Tools:
gear = 0
torch = 1
neither = 2
@staticmethod
def allowed_tool(region_type, tool):
if region_type == Day22_Type.rocky:
return tool in (Day22_Tools.gear, Day22_Tools.torch)
if region_type == Day22_Type.wet:
return tool in (Day22_Tools.gear, Day22_Tools.neither)
if region_type == Day22_Type.narrow:
return tool in (Day22_Tools.torch, Day22_Tools.neither)
raise ValueError
@staticmethod
def repr(tool):
if tool == Day22_Tools.torch:
return "torch"
if tool == Day22_Tools.gear:
return "gear"
if tool == Day22_Tools.neither:
return "neither"
return None
tools = [gear, torch, neither]
class Day22_Path_Node:
def __init__(self, x, y, delta_x, delta_y, tool, minutes, path):
self.pos = (x, y)
self.delta_x = delta_x
self.delta_y = delta_y
self.tool = tool
self.minutes = minutes
self.path = path
def __lt__(self, other): # For x < y
return (self.minutes + abs(self.delta_x) + abs(self.delta_y)) < \
(other.minutes + abs(other.delta_x) + abs(other.delta_y))
def __eq__(self, other): # For x == y
return self.pos == other.pos and \
self.delta_x == other.delta_x and \
self.delta_y == other.delta_y and \
self.tool == other.tool and \
self.minutes == other.minutes
def __gt__(self, other): # For x > y
return not self == other and not self < other
def __le__(self, other): # For x <= y
return self == other or self < other
def __ne__(self, other): # For x != y OR x <> y
return not self == other
def __ge__(self, other): # For x >= y
return self == other or self > other
def __repr__(self):
return "[{4}] ({0},{1}) - with {2} in {3} minutes".format(
self.pos[0],
self.pos[1],
Day22_Tools.repr(self.tool),
self.minutes,
self.minutes + abs(self.delta_x) + abs(self.delta_y))
def day22_parse_input(data):
depth = int(data[0].split(" ")[1])
target = data[1].split(" ")[1]
target_coords = tuple([int(x) for x in target.split(",")])
return depth, target_coords
def day22_geologic_index(pos, target, depth, memory):
if memory[pos] != 0:
return memory[pos]
x, y = pos
value = 0
if x == y == 0 or pos == target:
value = 0
elif x == 0:
value = y * 48271
elif y == 0:
value = x * 16807
else:
left = day22_erosion_level((x - 1, y), target, depth, memory)
up = day22_erosion_level((x, y - 1), target, depth, memory)
value = left * up
memory[pos] = value
return value
def day22_erosion_level(pos, target, depth, memory):
index = day22_geologic_index(pos, target, depth, memory)
return (index + depth) % 20183
def day22_get_terrain(depth, target):
total = 0
terrain = {}
memory = Counter()
for x in range(0, target[0] + 1):
for y in range(0, target[1] + 1):
pos = (x, y)
erosion = day22_erosion_level(pos, target, depth, memory)
region_type = Day22_Type.get_type(erosion)
total += region_type
terrain[pos] = region_type
return total, (terrain, memory)
def day22_debug_map(terrain, depth, target, node):
for y in range(0, target[1] + 5):
line = ""
for x in range(0, target[0] + 5):
erosion = day22_erosion_level((x, y), target, depth, terrain[1])
terrain[0][(x, y)] = Day22_Type.get_type(erosion)
if node.x == x and node.y == y:
line += Day22_Tools.repr(node.tool)[0]
elif x == target[0] and y == target[1]:
line += "T"
else:
line += Day22_Type.repr(terrain[(x, y)])
print(line)
print()
time.sleep(.5)
def day22_total_risk(depth, target):
total, _ = day22_get_terrain(depth, target)
return total
def day22_new_nodes(node, terrain, depth, target, delta_x, delta_y):
node_x, node_y = node.pos
new_x, new_y = node_x + delta_x, node_y + delta_y
if new_x < 0 or new_y < 0:
return []
if (new_x, new_y) not in terrain[0]:
erosion = day22_erosion_level(
(new_x, new_y), target, depth, terrain[1])
terrain[0][(new_x, new_y)] = Day22_Type.get_type(erosion)
new_terrain = terrain[0][(new_x, new_y)]
alternatives = []
for tool in Day22_Tools.tools:
if Day22_Tools.allowed_tool(new_terrain, tool):
if tool == node.tool:
alternatives = [
Day22_Path_Node(new_x,
new_y,
target[0] - new_x,
target[1] - new_y,
tool,
node.minutes + 1,
node.path + [node])
]
break
elif Day22_Tools.allowed_tool(terrain[0][node.pos], tool):
alternatives.append(
Day22_Path_Node(
node_x,
node_y,
target[0] - node_x,
target[1] - node_y,
tool,
node.minutes + 7,
node.path + [node])
)
return alternatives
def day22_manhattan_adjacency():
return [(i, j) for i in (-1, 0, 1) for j in (-1, 0, 1) if not i == j and (i == 0 or j == 0)]
def day22_find_path(depth, target):
_, terrain = day22_get_terrain(depth, target)
heap = [
Day22_Path_Node(0, 0, target[0], target[1], Day22_Tools.torch, 0, [])
]
heapq.heapify(heap)
visited = {}
while heap:
node = heapq.heappop(heap)
if node.pos == target and node.tool == Day22_Tools.torch:
return node
for delta_x, delta_y in day22_manhattan_adjacency():
for new_node in day22_new_nodes(node, terrain, depth, target, delta_x, delta_y):
new_node_x, new_node_y = new_node.pos
key = (new_node_x, new_node_y, new_node.tool)
if key not in visited or (visited[key] > new_node.minutes):
visited[key] = new_node.minutes
heapq.heappush(heap, new_node)
raise ValueError
def day22_1(data):
#data = read_input(2018, 2201)
depth, target = day22_parse_input(data)
return day22_total_risk(depth, target)
def day22_2(data):
#data = read_input(2018, 2201)
depth, target = day22_parse_input(data)
node = day22_find_path(depth, target)
# _, terrain = day22_get_terrain(depth, target)
# for n in node.path:
# day22_debug_map(terrain, depth, target, n)
return node.minutes
""" DAY 23 """
def day23_parse_input(data):
# pos=<-5920414,66954528,45418976>, r=94041555
bots = []
for line in data:
bot = [int(x) for x in re.findall(
r"pos=<(-?\d+),(-?\d+),(-?\d+)>, r=(\d+)", line)[0]]
bots.append(((bot[0], bot[1], bot[2]), bot[3]))
return sorted(bots, reverse=True, key=lambda b: b[1])
def day23_manhattan(bot, other_bot):
x, y, z = bot
other_x, other_y, other_z = other_bot
return abs(x - other_x) + abs(y - other_y) + abs(z - other_z)
def day23_bots_in_range(bots, bot):
in_range = []
bot_coord, signal = bot
for other_bot in bots:
other_bot_coord, _ = other_bot
if day23_manhattan(bot_coord, other_bot_coord) <= signal:
in_range.append(other_bot)
return in_range
def day23_bounds(bots):
min_x = min_y = min_z = sys.maxsize
max_x = max_y = max_z = 0
for coord, r in bots:
x, y, z = coord
min_x = min(x - r, min_x)
max_x = max(x + r, max_x)
min_y = min(y - r, min_y)
max_y = max(y + r, max_y)
min_z = min(z - r, min_z)
max_z = max(z + r, max_z)
return (min_x, max_x, min_y, max_y, min_z, max_z)
def day23_squared(v):
return v * v
def day23_intersect(zone, coord, r):
min_x, max_x, min_y, max_y, min_z, max_z = zone
x, y, z = coord
dist_squared = r * r
if x < min_x:
dist_squared -= day23_squared(x - min_x)
elif x > max_x:
dist_squared -= day23_squared(x - max_x)
if y < min_y:
dist_squared -= day23_squared(y - min_y)
elif y > max_y:
dist_squared -= day23_squared(y - max_y)
if z < min_z:
dist_squared -= day23_squared(z - min_z)
elif z > max_z:
dist_squared -= day23_squared(z - max_z)
return dist_squared > 0
def day23_corners(zone):
min_x, max_x, min_y, max_y, min_z, max_z = zone
for x in (min_x, max_x):
for y in (min_y, max_y):
for z in (min_z, max_z):
yield (x, y, z)
def day23_bots_in_reach(bots, zone):
if zone is None:
return []
min_x, max_x, min_y, max_y, min_z, max_z = zone
in_reach = deque([])
for bot in bots:
coord, r = bot
x, y, z = coord
# Bot is inside zone
if min_x <= x <= max_x and \
min_y <= y <= max_y and \
min_z <= z <= max_z:
in_reach.append(bot)
continue
# Sphere of bot's signal doesn't intersect zone
if not day23_intersect(zone, coord, r):
continue
# Is any corner of the zone within the bot's signal?
for corner in day23_corners(zone):
if day23_manhattan(coord, corner) <= r:
in_reach.append(bot)
break
return in_reach
def day23_get_subzones(zone):
min_x, max_x, min_y, max_y, min_z, max_z = zone
len_x = max_x - min_x
half_x = math.floor(len_x / 2)
len_y = max_y - min_y
half_y = math.floor(len_y / 2)
len_z = max_z - min_z
half_z = math.floor(len_z / 2)
if len_x == 0 or len_y == 0 or len_z == 0:
return [None]
zone_1 = (min_x, min_x + half_x, min_y,
min_y + half_y, min_z, min_z + half_z)
zone_2 = (min_x, min_x + half_x, min_y, min_y +
half_y, min_z + half_z + 1, max_z)
zone_3 = (min_x, min_x + half_x, min_y +
half_y + 1, max_y, min_z, min_z + half_z)
zone_4 = (min_x, min_x + half_x, min_y + half_y +
1, max_y, min_z + half_z + 1, max_z)
zone_5 = (min_x + half_x + 1, max_x, min_y,
min_y + half_y, min_z, min_z + half_z)
zone_6 = (min_x + half_x + 1, max_x, min_y,
min_y + half_y, min_z + half_z + 1, max_z)
zone_7 = (min_x + half_x + 1, max_x, min_y +
half_y + 1, max_y, min_z, min_z + half_z)
zone_8 = (min_x + half_x + 1, max_x, min_y +
half_y + 1, max_y, min_z + half_z + 1, max_z)
return (zone_1, zone_2, zone_3, zone_4, zone_5, zone_6, zone_7, zone_8)
def day23_thread_worker(bots, zone):
return (zone, day23_bots_in_reach(bots, zone))
def day23_bot_count_in_subzones(bots, zone, pool):
zones_to_process = day23_get_subzones(zone)
results = pool.map(functools.partial(
day23_thread_worker, bots), zones_to_process)
most_bots_count = max([len(r[1]) for r in results])
if most_bots_count == 0:
return []
# Negative count to be used on heapq
return [(-most_bots_count, r[0])
for r in results
if len(r[1]) == most_bots_count
]
def day23_zone_dimensions(zone):
min_x, max_x, min_y, max_y, min_z, max_z = zone
return (max_x - min_x, max_y - min_y, max_z - min_z)
def day23_get_locations_counts(bots, best_zones):
locations = deque([])
for zone, _ in best_zones:
min_x, max_x, min_y, max_y, min_z, max_z = zone
for x2 in range(min_x, max_x + 1):
for y2 in range(min_y, max_y + 1):
for z2 in range(min_z, max_z + 1):
test_coord = (x2, y2, z2)
counter = 0
for coord, r in bots:
if day23_manhattan(coord, test_coord) <= r:
counter += 1
locations.append((test_coord, counter))
return locations
def day23_calculate_best_distance(best_locations):
locations_distances = []
for location in best_locations:
locations_distances.append(day23_manhattan(location, (0, 0, 0)))
return sorted(locations_distances, reverse=True)[0]
def day23_best_location(bots, zone):
heap = [(0, zone)]
heapq.heapify(heap)
best_zones = []
max_bot_count = 0
# Create pool for parallel work
threads = 4
pool = mp.Pool(processes=threads)
while heap:
bot_count, zone = heapq.heappop(heap)
# Count is negative because of heapq
bot_count = -bot_count
# Skip if this count is worst than what we've already encoutered
if bot_count < max_bot_count:
continue
dim_x, dim_y, dim_z = day23_zone_dimensions(zone)
min_dim = 2
if min_dim in (dim_x, dim_y, dim_z):
if bot_count > max_bot_count:
max_bot_count = bot_count
best_zones.append((zone, bot_count))
else:
for new_zone in day23_bot_count_in_subzones(bots, zone, pool):
heapq.heappush(heap, new_zone)
# End thread pool
pool.close()
pool.join()
max_bot_count = max([z[1] for z in best_zones])
# Keep only the best zones
best_zones = [z for z in best_zones if z[1] == max_bot_count]
locations = day23_get_locations_counts(bots, best_zones)
max_bot_count = max([l[1] for l in locations])
# Keep only the best locations
locations = [l[0] for l in locations if l[1] == max_bot_count]
return day23_calculate_best_distance(locations)
def day23_1(data):
#data = read_input(2018, 2301)
bots = day23_parse_input(data)
return len(day23_bots_in_range(bots, bots[0]))
def day23_2(data):
#data = read_input(2018, 2302)
bots = day23_parse_input(data)
starting_zone = day23_bounds(bots)
return day23_best_location(bots, starting_zone)
""" DAY 24 """
class P_24(IntEnum):
Units = 0
Hit_points = 1
Immunity = 2
Weakness = 3
Attack_Power = 4
Attack_Type = 5
Initiative = 6
Id = 7
def day24_parse_group(line, id_):
# pylint: disable=C0301
# 4485 units each with 2961 hit points (immune to radiation; weak to fire, cold) with an attack that does 12 slashing damage at initiative 4
units, hit_points, immunity, weakness, attack_power, attack_type, initiative = \
re.findall(
r"(\d+) units each with (\d+) hit points (?:\((?:immune to ((?:(?:\w+)(?:, )?)+)(?:; )?)?(?:weak to ((?:(?:\w+)(?:, )?)+))?\) )?with an attack that does (\d+) (\w+) damage at initiative (\d+)", line)[0]
return (int(units), int(hit_points), immunity.split(", "), weakness.split(", "), int(attack_power), attack_type, int(initiative), id_)
# pylint: enable=C0301
def day24_parse_input(data):
groups = [[], []]
current_army = 0
id_counter = 0
for line in data:
if line == "Immune System:":
current_army = 0
id_counter = 0
continue
elif line == "Infection:":
current_army = 1
id_counter = 0
continue
elif line == "":
continue
group = day24_parse_group(
line, "{0}_{1}".format(current_army, id_counter))
id_counter += 1
groups[current_army].append(group)
return groups
def day24_debug_armies(armies):
print("Immune System:")
day24_debug_army(armies[0])
print("Infection:")
day24_debug_army(armies[1])
print()
def day24_debug_army(army):
for group in army:
if group[P_24.Units] > 0:
print("Group {0} contains {1} units".format(
group[P_24.Id], group[P_24.Units]))
def day24_debug_target(attack_id, defend_id, damage):
print("Group {0} would deal defending group {1} {2} damage"
.format(attack_id, defend_id, damage))
def day24_parse_id(id_):
parts = id_.split("_")
return (int(parts[0]), int(parts[1]))
def day24_effective_power(group):
return group[P_24.Units] * group[P_24.Attack_Power]
def day24_damage(attack_group, defend_group):
damage = day24_effective_power(attack_group)
attack_type = attack_group[P_24.Attack_Type]
immunity = defend_group[P_24.Immunity]
weakness = defend_group[P_24.Weakness]
if attack_type in immunity:
return 0
elif attack_type in weakness:
return 2 * damage
return damage
def day24_target_criteria(attack_group, defend_group):
return (day24_damage(attack_group, defend_group),
day24_effective_power(defend_group),
defend_group[P_24.Initiative])
def day24_selection_criteria(group):
return (day24_effective_power(group), group[P_24.Initiative])
def day24_select_target(group, opposing_army, targeted, include_zero):
available = [enemy
for enemy in opposing_army
if enemy[P_24.Units] > 0 and enemy[P_24.Id] not in targeted]
targets = sorted(available, reverse=True,
key=lambda g: day24_target_criteria(group, g))
if not targets:
return None
elif len(targets) > 1 and \
day24_target_criteria(group, targets[0]) == day24_target_criteria(group, targets[1]):
# Can't decide
return None
if day24_damage(group, targets[0]) == 0 and not include_zero:
return None
return targets[0][P_24.Id]
def day24_target_selection_phase(armies):
targeting_map = {}
for include_zero in (False, True):
for current_army in range(2):
opposing_army = (current_army + 1) % 2
army = sorted(armies[current_army], reverse=True,
key=day24_selection_criteria)
for group in army:
if group[P_24.Units] > 0 and not group[P_24.Id] in targeting_map.keys():
target = day24_select_target(group,
armies[opposing_army],
targeting_map.values(),
include_zero=include_zero)
if not target is None:
targeting_map[group[P_24.Id]] = target
return targeting_map
def day24_attack_phase(groups_by_initiative, armies, targeting_map):
kills = 0
for group_id in groups_by_initiative:
attack_army_id, attack_group_id = day24_parse_id(group_id)
attack_group = armies[attack_army_id][attack_group_id]
if group_id in targeting_map:
target_id = targeting_map[group_id]
target_army_id, target_group_id = day24_parse_id(target_id)
target_group = armies[target_army_id][target_group_id]
if target_group[P_24.Units] > 0:
damage = day24_damage(attack_group, target_group)
max_units = math.floor(
damage / (target_group[P_24.Hit_points]))
dead_units = min(max_units, target_group[P_24.Units])
kills += dead_units
armies[target_army_id][target_group_id] = \
(target_group[P_24.Units] - dead_units,
target_group[P_24.Hit_points],
target_group[P_24.Immunity],
target_group[P_24.Weakness],
target_group[P_24.Attack_Power],
target_group[P_24.Attack_Type],
target_group[P_24.Initiative],
target_group[P_24.Id])
return kills == 0
def day24_fight(armies):
groups_by_initiative = \
[group[P_24.Id]
for group in
sorted([group for army in armies for group in army],
reverse=True,
key=lambda g: g[P_24.Initiative])]
while any([group[P_24.Units] > 0 for group in armies[0]]) and \
any([group[P_24.Units] > 0 for group in armies[1]]):
# day24_debug_armies(armies)
targeting_map = day24_target_selection_phase(armies)
stalemate = day24_attack_phase(
groups_by_initiative, armies, targeting_map)
if stalemate:
break
return armies
def day24_add_boost(army, boost):
return [(target_group[P_24.Units],
target_group[P_24.Hit_points],
target_group[P_24.Immunity],
target_group[P_24.Weakness],
target_group[P_24.Attack_Power] + boost,
target_group[P_24.Attack_Type],
target_group[P_24.Initiative],
target_group[P_24.Id]) for target_group in army]
def day24_clone_army(army):
return [(target_group[P_24.Units],
target_group[P_24.Hit_points],
target_group[P_24.Immunity],
target_group[P_24.Weakness],
target_group[P_24.Attack_Power],
target_group[P_24.Attack_Type],
target_group[P_24.Initiative],
target_group[P_24.Id]) for target_group in army]
def day24_test_boost(armies, boost):
new_armies = [day24_clone_army(armies[0]), day24_clone_army(armies[1])]
new_armies[0] = day24_add_boost(armies[0], boost)
new_armies = day24_fight(new_armies)
remaining = sum([group[P_24.Units] for group in new_armies[0]])
other = sum([group[P_24.Units] for group in new_armies[1]])
return (boost, remaining, other)
def day24_boost_immune(armies):
remaining = 0
boost = 1
threads = 16
pool = mp.Pool(processes=threads)
while True:
process = range(boost, boost + threads + 1)
boost = boost + threads + 1
results = pool.map(functools.partial(
day24_test_boost, armies), process)
success = [result for result in results if result[2] == 0]
success = sorted(success, key=lambda x: x[0])
if success:
remaining = success[0][1]
break
pool.close()
pool.join()
return remaining
def day24_1(data):
#data = read_input(2018, 2401)
armies = day24_parse_input(data)
armies = day24_fight(armies)
total1 = sum([group[P_24.Units] for group in armies[0]])
total2 = sum([group[P_24.Units] for group in armies[1]])
return max(total1, total2)
def day24_2(data):
#data = read_input(2018, 2401)
armies = day24_parse_input(data)
return day24_boost_immune(armies)
""" DAY 25 """
def day25_manhattan(point1, point2):
(a1, b1, c1, d1), (a2, b2, c2, d2) = (point1, point2)
return abs(a1 - a2) + abs(b1 - b2) + abs(c1 - c2) + abs(d1 - d2)
def day25_constellations(points):
constellations = []
for point in points:
belonging = []
for i, _ in enumerate(constellations):
constellation = constellations[i]
belongs = False
for other in constellation:
if day25_manhattan(point, other) <= 3:
belongs = True
break
if belongs:
belonging.append(i)
if not belonging:
constellations.append(deque([point]))
else:
i = 0
c = constellations[belonging[i]]
c.append(point)
i += 1
while i < len(belonging):
c.extend(constellations[belonging[i]])
constellations[belonging[i]].clear()
i += 1
return [c for c in constellations if len(c) > 0]
def day25_parse_input(data):
# -2,-2,-2,2
points = deque([])
for line in data:
point = tuple([int(x) for x in re.findall(
r"(-?\d+),(-?\d+),(-?\d+),(-?\d+)", line)[0]])
points.append(point)
return points
def day25_1(data):
#data = read_input(2018, 2504)
points = day25_parse_input(data)
return len(day25_constellations(points))
""" MAIN FUNCTION """
if __name__ == "__main__":
main(sys.argv, globals(), 2018)
| true
|
1f30d62d0c5b05af5af698dfe84717214f221d35
|
Python
|
bradlyke/desi_utilities
|
/lineClass.py
|
UTF-8
| 3,900
| 3.1875
| 3
|
[] |
no_license
|
class Lines:
def __init__(self,line_name):
line_name = self.name_cleaner(line_name)
if line_name == 'cii':
line_name = 'ciir' #default to semi-forbidden, it's more common
elif line_name == 'oiii':
line_name = 'oiiir' #When calling one in the doublet, use more common
self.name = self.line_display(line_name)
self.restwave = self.line_list(line_name)
def name_cleaner(self,name):
name = name.strip(']').strip('[').lower().replace(' ','')
return name
def line_list(self,name):
lines = {
'lyb' : 1025.72,
'lya' : 1215.67,
'oi' : 1304.35,
'ciib' : 1335.30,
'siiv' : 1396.76,
'oiv' : 1402.06,
'civ' : 1549.06,
'heii' : 1640.42,
'ciii' : 1908.73,
'ciir' : 2326.44,
'feii' : 2626.92, #observed, not lab. lab is UV I
'mgii' : 2798.75,
'oii' : 3728.48,
'hd' : 4102.89,
'hg' : 4341.68,
'hb' : 4862.68,
'oiiib' : 4960.30,
'oiiir' : 5008.24,
'ha' : 6564.61
}
return lines[name]
def line_display(self,name):
line_names = {
'lyb' : 'Ly b',
'lya' : 'Ly a',
'oi' : 'O I',
'ciib' : 'C II',
'siiv' : 'Si IV',
'oiv' : 'O IV]',
'civ' : 'C IV',
'heii' : 'He II',
'ciii' : 'C III]',
'ciir' : 'C II]',
'feii' : 'Fe II',
'mgii' : 'Mg II',
'oii' : '[O II]',
'hd' : 'H d',
'hg' : 'H g',
'hb' : 'H b',
'oiiib' : '[O III]',
'oiiir' : '[O III]',
'ha' : 'H a'
}
return line_names[name]
def list_lines(self):
emlines = ['lyb','lya','oi','ciib','siiv','oiv','civ','heii','ciii',
'ciir','feii','mgii','oii','hd','hg','hb','oiiib',
'oiiir','ha']
return emlines
if __name__=='__main__':
import argparse
parser = argparse.ArgumentParser(description='Returns rest wavelength of common quasar broad emission lines')
parser.add_argument('-n', '--line_name', default='mgii', metavar='',
help="The emission line name")
parser.add_argument('-l', '--list_names', action='store_true',
help='List the lines included in class')
args = parser.parse_args()
line = Lines(args.line_name)
if args.list_names==True:
print('\n')
print(' Line Name | Rest Wavelength | Variable Name ')
print('----------------------------------------------------')
for elname in line.list_lines():
el = Lines(elname)
if elname == 'oiiib' or elname=='oiiir':
print(' {} d | {:.2f} Ang | {}'.format(el.name,el.restwave,elname))
else:
print(' {:7s} | {:.2f} Ang | {}'.format(el.name,el.restwave,elname))
print('--Note: d means these form a doublet.')
print('--Note: "cii" defaults to ciir and "oiii" defaults to oiiir.')
else:
print('\n Line Name : {}'.format(line.name))
print(' Rest Wavelength: {} Ang'.format(line.restwave))
#In another program, use like:
# from lineClass import Lines
# ...
# em = Lines('mgii')
# em.restwave -> (stores just the number)
# em.name -> (stores the display name with appropriate brackets)
| true
|
532c1e390505ec2640ecfbcff5c1bd37558fd48f
|
Python
|
astropd/mcpiMinecraftTools
|
/projectileGame.py
|
UTF-8
| 1,561
| 2.765625
| 3
|
[] |
no_license
|
from mcpi.minecraft import Minecraft
import time
mc = Minecraft.create("mc2.tokyocodingclub.com")
p1_name = 'TCC_10'
p2_name = 'TCC_05'
p1_id = mc.getPlayerEntityId(p1_name)
p2_id = mc.getPlayerEntityId(p2_name)
p1_health = 10
p2_health = 10
mc.postToChat('THE GAME HAS BEGUN')
mc.postToChat(p1_name + ' vs. ' + p2_name)
mc.postToChat('USE A BOW TO SHOOT YOUR OPPONENT')
mc.postToChat('EACH PLAYER HAS ' + str(p1_health) + ' HEALTH')
while True:
hits = mc.events.pollBlockHits()
for hit in hits:
print("You hit block ({},{},{})".format(hit.pos.x, hit.pos.y, hit.pos.z))
time.sleep(0.1)
for proj in mc.events.pollProjectileHits():
print(proj)
if proj.originName == p1_name and proj.targetName == p2_name:
if p2_health > 0:
p2_health -= 1
mc.postToChat(p1_name + ' has hit ' + p2_name)
mc.postToChat(p2_name + ' has ' + str(p2_health) + ' health left')
else:
mc.postToChat(p1_name + ' has hit ' + p2_name + ' and killed him!')
mc.entity.setTilePos(p2_id, 100, -200, 100)
if proj.originName == p2_name and proj.targetName == p1_name:
if p1_health > 0:
p1_health -= 1
mc.postToChat(p2_name + ' has hit ' + p1_name)
mc.postToChat(p1_name + ' has ' + str(p1_health) + ' health left')
else:
mc.postToChat(p1_name + ' has hit ' + p2_name + ' and killed him!')
mc.entity.setTilePos(p1_id, 100, -200, 100)
| true
|
1cef71ee9684ee9f580dfb990ae386bcdf4cc793
|
Python
|
fluxt/cs440
|
/mp2/gomoku.py
|
UTF-8
| 4,806
| 3.203125
| 3
|
[] |
no_license
|
import time
import numpy as np
import reflex
import minimax
import userplay
import alphabeta
def get_initial_board():
return np.array([[0]*7]*7)
def get_init_alpha_board():
return np.array([['.']*7]*7)
def has_pattern_position(board, pattern):
size = len(pattern)
# check horizontal
for x in range(8 - size):
for y in range(7):
good = True
for i in range(size):
if board[x+i][y] != pattern[i]:
good = False
if good:
return True
# vertical
for x in range(7):
for y in range(8 - size):
good = True
for i in range(size):
if board[x][y+i] != pattern[i]:
good = False
if good:
return True
# top-left to bottom-right diagonal
for x in range(8 - size):
for y in range(8 - size):
good = True
for i in range(size):
if board[x+i][y+i] != pattern[i]:
good = False
if good:
return True
# top-right to bottom-left diagonal
for x in range(size-1, 7):
for y in range(8 - size):
good = True
for i in range(size):
if board[x-i][y+i] != pattern[i]:
good = False
if good:
return True
return 0
# returns 0 if game is not finished
# or 1/2 if that player has won
# or 3 if it is a draw
def get_game_status(game_board):
p1Win = has_pattern_position(game_board, [1]*5)
if p1Win:
return 1
p2Win = has_pattern_position(game_board, [2]*5)
if p2Win:
return 2
for x in range(7):
for y in range(7):
if not game_board[x][y]:
return 0
return 3
def print_char_board(board):
print("BOARD:")
for row in reversed(board.T):
print("".join(row))
def play_game(red, blue):
game_board = get_initial_board()
alphabet_board = get_init_alpha_board()
move_number = 0
while True:
#check red move
current_move = red.getMove(game_board)
# set player to red
game_board[current_move] = 1
alphabet_board[current_move] = chr(ord('a')+ move_number)
#print("Red's Move " + str(move_number))
#userplay.print_user_board(game_board)
# check for winner
game_status = get_game_status(game_board)
if game_status == 1:
print("RED WINS!")
if game_status == 3:
print("ITS A TIE!")
if game_status != 0:
print_char_board(alphabet_board)
return game_status
#check blue move
current_move = blue.getMove(game_board)
# set player to blue
game_board[current_move] = 2
alphabet_board[current_move] = chr(ord('A')+ move_number)
#print("Blue's Move " + str(move_number))
#userplay.print_user_board(game_board)
# check for winner
game_status = get_game_status(game_board)
if game_status == 2:
print("BLUE WINS!")
if game_status == 3:
print("ITS A TIE!")
if game_status != 0:
print_char_board(alphabet_board)
return game_status
move_number += 1
if __name__ == "__main__":
alpha_beta_red = alphabeta.AlphaBeta(1,3)
alpha_beta_blue = alphabeta.AlphaBeta(2,3)
minimax_red = minimax.MiniMax(1,3)
minimax_blue = minimax.MiniMax(2,3)
reflex_red = reflex.ReflexAgent(1)
reflex_blue = reflex.ReflexAgent(2)
print ("Alpha-Beta vs MiniMax")
start_1 = time.time()
play_game(alpha_beta_red, minimax_blue)
end_1 = time.time()
print("total runtime : {0:.3f} seconds".format(end_1-start_1))
print("Red nodes expanded: " + str(alpha_beta_red.get_nodes_expanded()))
alpha_beta_red.reset()
print("Blue nodes expanded: " + str(minimax_blue.get_nodes_expanded()))
minimax_blue.reset()
print ("MiniMax vs Alpha-Beta")
start_2 = time.time()
play_game(minimax_red, alpha_beta_blue)
end_2 = time.time()
print("total runtime : {0:.3f} seconds".format(end_2-start_2))
print("Red nodes expanded: " + str(minimax_red.get_nodes_expanded()))
minimax_red.reset()
print("Blue nodes expanded: " + str(alpha_beta_blue.get_nodes_expanded()))
alpha_beta_blue.reset()
print("Alpha-Beta vs Reflex")
start_3 = time.time()
play_game(alpha_beta_red, reflex_blue)
end_3 = time.time()
print("total runtime : {0:.3f} seconds".format(end_3-start_3))
print("Red nodes expanded: " + str(alpha_beta_red.get_nodes_expanded()))
alpha_beta_red.reset()
print("Reflex vs Alpha-Beta")
start_4 = time.time()
play_game(reflex_red, alpha_beta_blue)
end_4 = time.time()
print("total runtime : {0:.3f} seconds".format(end_4-start_4))
print("Blue nodes expanded: " + str(alpha_beta_blue.get_nodes_expanded()))
alpha_beta_blue.reset()
print("Reflex vs MiniMax")
start_5 = time.time()
play_game(reflex_red, minimax_blue)
end_5 = time.time()
print("total runtime : {0:.3f} seconds".format(end_5-start_5))
print("Blue nodes expanded: " + str(minimax_blue.get_nodes_expanded()))
minimax_blue.reset()
print("MiniMax vs Reflex")
start_6 = time.time()
play_game(minimax_red, reflex_blue)
end_6 = time.time()
print("total runtime : {0:.3f} seconds".format(end_6-start_6))
print("Red nodes expanded: " + str(minimax_red.get_nodes_expanded()))
minimax_red.reset()
| true
|
5deb9c7dfd71abced1ace5132fdf53a36fc030d9
|
Python
|
KhallilB/Tweet-Generator
|
/Code/practice_code/histograms.py
|
UTF-8
| 3,139
| 3.578125
| 4
|
[] |
no_license
|
import re
def histogram(source):
# initializing empty dictionary
histogram_dict = {}
# split the source into seperate words
# on whitespace then iterate over them
for word in source.split():
if word in histogram_dict:
histogram_dict[word] += 1
else:
histogram_dict = 1
return histogram_dict
def histogram_lists(source):
# initilize empty list
histogram_list = []
# split the source into seperate words
# on whitespace then iterate over them
text = source.split()
text.sort()
while len(text) > 0:
count = 0
match = True
index = len(text) - 1
word = text[index]
while match and index > 0:
if word == text[index]:
count += 1
index -= 1
else:
match = False
histogram_list.append([word, count])
del text[-(count):]
return histogram_list
def histogram_tuples(source):
# initilize empty list
histogram_tuple = []
# split the source into seperate words
# on whitespace then iterate over them
text = source.split()
text.sort()
while len(text) > 0:
count = 0
match = True
index = len(text) - 1
word = text[index]
while match and index > 0:
if word == text[index]:
count += 1
index -= 1
else:
match = False
histogram_tuple.append((word, count))
del text[-(count):]
return histogram_tuple
def histogram_counts(source):
histogram_count = {}
text = source.split()
text.sort()
while len(text) > 0:
count = 0
match = True
index = len(text) - 1
word = text[index]
while match and index >= 0:
if word == text[index]:
count += 1
index -= 1
else:
match = False
if count in histogram_count:
histogram_count[count].append(word)
else:
histogram_count[count] = [word]
del text[-(count):]
return histogram_count
# TODO: Create another function that removes punctuations
def remove_punctuation(pattern, source):
for pat in pattern:
return(re.findall(pat, source))
def unique_words(histogram):
unique_entries = len(list(histogram))
return unique_entries
# Make function that takes in the exception of dictionaries and tuples
def frequency(word, histogram):
if word in histogram:
return histogram[word]
else:
return "Error: Word not found"
if __name__ == '__main__':
file = open('gutenberg.txt', 'r')
source = file.read()
pattern = ['[^!.?-]+']
punc_translation = (" ".join(remove_punctuation(pattern, source)))
# histogram
text_histogram = histogram_counts(source=punc_translation)
text_unique_words = unique_words(histogram=text_histogram)
# his_word_frequency = frequency(word='crude', histogram=text_histogram)
print('We found {} unique words.'.format(text_unique_words))
print(text_histogram)
| true
|
8f5da17d8a3f54dd35cfccdd12b6a316ae4bdd1f
|
Python
|
liyunwei-3558/Perc_ErrCorrect
|
/DatInterface.py
|
UTF-8
| 3,566
| 2.59375
| 3
|
[] |
no_license
|
import numpy as np
import ErrCorrectClass as er
import ErrCorrectClassold as ero
from xml.dom.minidom import parse
import xml.dom.minidom
import cv2
class Processor:
def __init__(self, index, filepath='./dataset/'):
self.filepath = filepath
self.index = index
self.readJPEG()
self.width = self.image.shape[1]
self.height = self.image.shape[0]
self.ur = []
self.lr = []
self.ub = []
self.lb = []
print("w=%d, h=%d" % (self.width, self.height))
self.readXML()
# self.fit()
pass
def readJPEG(self):
self.image = cv2.imread(self.filepath + ("JPEGImage/" + str(self.index)) + '.jpg')
# cv2.namedWindow("dat",cv2.WINDOW_FREERATIO)
# cv2.imshow("dat",a)
# cv2.waitKey(0)
def readXML(self):
DOMTree = xml.dom.minidom.parse(self.filepath + ("annotations/" + str(self.index)) + '.xml')
collection = DOMTree.documentElement
obs = collection.getElementsByTagName("object")
for ob in obs:
name = ob.getElementsByTagName('name')[0]
colortag = name.childNodes[0].data
bboxs = ob.getElementsByTagName('bndbox')[0]
x_mid = int(bboxs.getElementsByTagName('xmin')[0].childNodes[0].data) + \
int(bboxs.getElementsByTagName('xmax')[0].childNodes[0].data)
x_mid = x_mid // 2
y_mid = int(bboxs.getElementsByTagName('ymin')[0].childNodes[0].data) + \
int(bboxs.getElementsByTagName('ymax')[0].childNodes[0].data)
y_mid = y_mid // 2
# print("x=%d, y=%d, color=%s" % (x_mid, y_mid, colortag))
if colortag == 'r':
if y_mid <= self.height / 2:
self.ur.append([x_mid, y_mid])
elif y_mid > self.height / 2:
self.lr.append([x_mid, y_mid])
elif colortag == 'b':
if y_mid <= self.height / 2:
self.ub.append([x_mid, y_mid])
elif y_mid > self.height / 2:
self.lb.append([x_mid, y_mid])
# print(self.ur)
def fit(self):
# Upper Red
ur = np.array(self.ur)
x = ur[:, 0]
y = ur[:, 1]
A = ero.Regressor(y, x, np.exp(-10))
A.calc_M(min(3,(len(x)+1)//2), len(x))
# print(A.get_real_W())
# A.visualize(self.image)
A.visualize()
# Upper Blue
ub = np.array(self.ub)
x = ub[:, 0]
y = ub[:, 1]
B = ero.Regressor(y, x, np.exp(-10))
B.calc_M(min(3, (len(x) + 1) // 2), len(x))
# print(A.get_real_W())
B.visualize(colors=(255,50,50))
# B.visualize(self.image,(255,50,50))
cv2.imwrite("./fig"+str(self.index)+"_mask.png", self.image[:591,:])
def fit_mask(self):
mask = np.zeros_like(self.image)
# Upper Red
ur = np.array(self.ur)
x = ur[:, 0]
y = ur[:, 1]
A = ero.Regressor(y, x, np.exp(-10))
A.calc_M(min(3,(len(x)+1)//2), len(x))
# print(A.get_real_W())
# A.visualize(self.image)
A.visualize(mask)
# Upper Blue
ub = np.array(self.ub)
x = ub[:, 0]
y = ub[:, 1]
B = er.Regressor(y, x, np.exp(-10))
B.calc_M(min(3, (len(x) + 1) // 2), len(x))
# print(A.get_real_W())
# B.visualize(colors=(255,50,50))
B.visualize(mask,(255,50,50))
cv2.imwrite("./fig"+str(self.index)+"_mask.png", mask[:591,:])
| true
|
119cfbad55cb9050a1df193b46e1442be040e6e8
|
Python
|
muzigit/PythonNotes
|
/section3_高级特性/action3.py
|
UTF-8
| 1,690
| 4.5625
| 5
|
[] |
no_license
|
# 列表生成式
# 例子:生成[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
r = range(1, 11)
l = list(r)
for i in l:
print(i)
# 例子:生成[1*1,2*2,3*3,...,10*10]
# 方法一:通过循环
L = []
for i in range(1, 11):
L.append(i * i)
# L.append(str(i) + '*' + str(i))
print(L)
# 通过列表生成式可以一句话替换上面的代码
# 写列表生成式时,将要生成的元素x * x放在前面,后面跟上for循环
L = [x * x for x in range(1, 11)]
print(L)
# 在for循环后面还可以跟上布尔表达式
L1 = [x * x for x in range(1, 11) if x % 2 == 0]
# 在这里我们通过布尔表达式 筛选出偶数
print(L1)
# 使用双循环 实现字符串全排列
L2 = [m + n for m in 'ABC' for n in 'XYZ']
print(L2)
# 列表表达式的运用
# 列出当前目录下的所有文件
import os
L3 = [file for file in os.listdir('.')] # os.listdir 可以列出文件和目录
print(L3)
# 使用列表表达式 通过两个变量来接收
L = {'a': 'A', 'b': 'B', 'c': 'C'}
L11 = [k + '=' + v for k, v in L.items()]
print(L11)
# 把一个字符串中字符全部变成小写
L = ['Hello', 'World', 'Apple', 'Google']
L12 = [s.lower() for s in L]
print(L12)
print('---------- 练习 ----------')
# 如果list中既包含字符串,又包含整数,由于非字符串类型没有lower()方法,所以列表生成式会报错:
L = ['Hello', 'World', 18, 'Apple', None]
# [s.lower() for s in L]
# 请修改列表生成式,通过添加if语句保证列表生成式能正确地执行:
L13 = [s.lower() for s in L if isinstance(s, str)]
# 测试:
print(L13)
if L13 == ['hello', 'world', 'apple']:
print('测试通过!')
else:
print('测试失败!')
| true
|
1d8df64b6ce646b0fb0b40f2134fc28fb93c0471
|
Python
|
clydemcqueen/ukf
|
/scripts/plot_ukf_1d_filter.py
|
UTF-8
| 1,643
| 2.984375
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
"""
Plot the output from test_1d_drag_filter()
"""
import sys
import matplotlib.pyplot as plt
def main():
if len(sys.argv) != 2:
print('Usage: plot_ukf_1d_filter.py filename')
exit(1)
f = open(sys.argv[1], 'r')
print('Expecting t, actual_x, actual_vx, actual_ax, z, x.x, x.vx, x.ax, K.x, K.vx, K.ax')
print(f.readline())
actual_xs = []
actual_vxs = []
actual_axs = []
zs = []
xs = []
vxs = []
axs = []
kxs = []
kvxs = []
kaxs = []
for line in f:
fields = line.split(',')
actual_xs.append(float(fields[1]))
actual_vxs.append(float(fields[2]))
actual_axs.append(float(fields[3]))
zs.append(float(fields[4]))
xs.append(float(fields[5]))
vxs.append(float(fields[6]))
axs.append(float(fields[7]))
kxs.append(float(fields[8]))
kvxs.append(float(fields[9]))
kaxs.append(float(fields[10]))
fig, (plot_x, plot_vx, plot_ax, plot_k) = plt.subplots(4, 1)
plot_x.plot(actual_xs, label='actual_x')
plot_x.plot(zs, marker='x', ls='', label='z')
plot_x.plot(xs, label='x')
plot_vx.plot(actual_vxs, label='actual_vx')
plot_vx.plot(vxs, label='vx')
plot_ax.plot(actual_axs, label='actual_ax')
plot_ax.plot(axs, label='ax')
plot_k.plot(kxs, label='kalman x')
plot_k.plot(kvxs, label='kalman vx')
plot_k.plot(kaxs, label='kalman ax')
plot_x.legend()
plot_vx.legend()
plot_ax.legend()
plot_k.legend()
print('Click plot to exit')
plt.waitforbuttonpress()
if __name__ == '__main__':
main()
| true
|
99f02bacc0447cb6e2f79a4ab4f273e8c0461c8b
|
Python
|
zardosht/isar
|
/tmp/opencv_ellipse.py
|
UTF-8
| 6,450
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
import time
import cv2
import numpy as np
# Colors (B, G, R)
from isar.scene import sceneutil
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
def create_blank(width, height, color=(0, 0, 0)):
"""Create new image(numpy array) filled with certain color in BGR"""
image = np.zeros((height, width, 3), np.uint8)
# Fill image with color
image[:] = color
return image
def draw_half_circle_rounded(image):
height, width = image.shape[0:2]
# Ellipse parameters
radius = 100
center = (int(width / 2), int(height / 2))
axes = (radius, radius)
angle = 0
startAngle = 180
endAngle = 360
thickness = 10
cv2.ellipse(image, center, axes, -90, 0, 355, BLACK, -1)
def draw_timer_as_chart():
text = "Hello Timer"
current_time = 0
duration = 20
font = cv2.FONT_HERSHEY_SIMPLEX
line_type = cv2.LINE_AA
font_scale = 0.5
text_thickness = 2
height, width = image.shape[0:2]
position = (int(width / 2), int(height / 2))
text_color = BLACK
text_size, _ = cv2.getTextSize(str(duration), font, font_scale, text_thickness)
radius = int(3 * text_size[1])
width = 2 * radius + 2 * text_thickness
height = 2 * radius + 2 * text_thickness
chart_image = np.zeros((height, width, 3), np.uint8)
chart_image[:] = (128, 128, 128)
center = (int(width / 2), int(height / 2))
# a circle for total duration
cv2.ellipse(chart_image, center, (radius, radius), -90, 0, 360, (255, 0, 0), -1)
cv2.imshow('timer as chart', image)
cv2.waitKey(1)
for i in range(duration + 1):
current_time = i
# a circle segment on top of that for current time
end_angle = int(current_time * (360 / duration))
cv2.ellipse(chart_image, center, (radius, radius), -90, 0, end_angle, (0, 0, 255), -1)
sceneutil.draw_image_on(image, chart_image, position,
position_is_topleft=False,
position_is_bottom_left=True)
cv2.putText(image, text,
(position[0], position[1] + text_thickness + text_size[1]),
font, font_scale, text_color, text_thickness, line_type)
cv2.imshow('timer as chart', image)
cv2.waitKey(1)
time.sleep(1)
def draw_timer_as_time():
text = "Hello Timer"
current_time = 0
duration = 20
font = cv2.FONT_HERSHEY_SIMPLEX
line_type = cv2.LINE_AA
font_scale = 0.5
text_thickness = 1
height, width = image.shape[0:2]
position = (int(width / 2), int(height / 2))
text_color = BLACK
cv2.imshow('timer as time', image)
cv2.waitKey(1)
remaining_time = duration - current_time
minutes = remaining_time // 60
seconds = remaining_time % 60
remaining_time_text = str(minutes).zfill(2) + ":" + str(seconds).zfill(2)
text_size, _ = cv2.getTextSize(remaining_time_text, font, font_scale, text_thickness)
for i in range(duration + 1):
current_time = i
remaining_time = duration - current_time
minutes = remaining_time // 60
seconds = remaining_time % 60
remaining_time_text = str(minutes).zfill(2) + ":" + str(seconds).zfill(2)
width = text_size[0] + 2 * text_thickness
height = text_size[1] + 4 * text_thickness
time_image = np.zeros((height, width, 3), np.uint8)
time_image[:] = (128, 128, 128)
cv2.putText(time_image, remaining_time_text,
(text_thickness, text_thickness + text_size[1]),
font, font_scale, text_color, text_thickness, line_type)
sceneutil.draw_image_on(image, time_image, position, position_is_topleft=False, position_is_bottom_left=True)
cv2.putText(image, text,
(position[0], position[1] + text_thickness + text_size[1]),
font, font_scale, text_color, text_thickness, line_type)
cv2.imshow('timer as time', image)
cv2.waitKey(1)
time.sleep(1)
def draw_timer_as_fraction():
text = "Hello Timer"
current_time = 0
duration = 20
font = cv2.FONT_HERSHEY_SIMPLEX
line_type = cv2.LINE_AA
font_scale = 0.5
text_thickness = 1
height, width = image.shape[0:2]
position = (int(width / 2), int(height / 2))
text_color = BLACK
cv2.imshow('timer as fraction', image)
cv2.waitKey(1)
remaining_time = duration - current_time
minutes = remaining_time // 60
seconds = remaining_time % 60
remaining_time_text = str(minutes).zfill(2) + ":" + str(seconds).zfill(2)
text_size, _ = cv2.getTextSize(remaining_time_text, font, font_scale, text_thickness)
for i in range(duration + 1):
current_time = i
text_size, _ = cv2.getTextSize(str(duration), font, font_scale, text_thickness)
width = text_size[0] + 2 * text_thickness
height = 2 * text_size[1] + 10 * text_thickness
fraction_image = np.zeros((height, width, 3), np.uint8)
fraction_image[:] = (128, 128, 128)
cv2.putText(fraction_image, str(current_time).zfill(len(str(duration))),
(text_thickness, text_thickness + text_size[1]),
font, font_scale, text_color, text_thickness, line_type)
cv2.line(fraction_image,
(text_thickness, 4 * text_thickness + text_size[1]),
(2 * text_thickness + text_size[0], 4 * text_thickness + text_size[1]),
text_color, text_thickness)
cv2.putText(fraction_image, str(duration),
(text_thickness, 6 * text_thickness + 2 * text_size[1]),
font, font_scale, text_color, text_thickness, line_type)
sceneutil.draw_image_on(image, fraction_image, position,
position_is_topleft=False,
position_is_bottom_left=True)
cv2.putText(image, text,
(position[0], position[1] + text_thickness + text_size[1]),
font, font_scale, text_color, text_thickness, line_type)
cv2.imshow('timer as fraction', image)
cv2.waitKey(1)
time.sleep(1)
# Create new blank 300x150 white image
width, height = 500, 500
image = create_blank(width, height, color=WHITE)
# draw_half_circle_rounded(image)
# draw_timer_as_chart()
# draw_timer_as_time()
draw_timer_as_fraction()
cv2.imshow('half_circle_rounded.jpg', image)
cv2.waitKey()
| true
|
0dc8afaa49f44c99f77f1a8639b790dc03e10c5b
|
Python
|
katsuya0719/MaterialDB
|
/libs/util.py
|
UTF-8
| 993
| 3.234375
| 3
|
[] |
no_license
|
def getDirection(angle):
if angle<11.25 or angle>=348.75:
return "N"
elif angle<33.75 and angle>=11.25:
return "NNE"
elif angle<56.25 and angle>=33.75:
return "NE"
elif angle<78.75 and angle>=56.25:
return "ENE"
elif angle<101.25 and angle>=78.75:
return "E"
elif angle<123.75 and angle>=101.25:
return "ESE"
elif angle<146.25 and angle>=123.75:
return "SE"
elif angle<168.75 and angle>=146.25:
return "SSE"
elif angle<191.25 and angle>=168.75:
return "S"
elif angle<213.75 and angle>=191.25:
return "SSW"
elif angle<236.25 and angle>=213.75:
return "SW"
elif angle<258.75 and angle>=236.25:
return "WSW"
elif angle<281.25 and angle>=258.75:
return "W"
elif angle<303.75 and angle>=281.25:
return "WNW"
elif angle<326.25 and angle>=303.75:
return "NW"
elif angle<348.75 and angle>=326.25:
return "NNW"
| true
|
156b55e6453c9f975de82aeb6340970345c3f98a
|
Python
|
MESragelden/leetCode
|
/First Unique Number.py
|
UTF-8
| 1,325
| 3.5625
| 4
|
[] |
no_license
|
class FirstUnique:
def __init__(self, nums):
self.listOFUnique =[]
self.d = dict()
for ele in nums:
self.add(ele)
def showFirstUnique(self) -> int:
if (self.showFirstUnique)==0:
return -1
else :
if(len(self.listOFUnique)>0):
print(self.listOFUnique[0])
return self.listOFUnique[0]
else :
print (-1)
return -1
def add(self, value: int) -> None:
if value in self.d:
if self.d[value]==1:
self.d[value]+=1
self.listOFUnique.remove(value)
else :
self.listOFUnique.append(value)
self.d[value] = 1
# Your FirstUnique object will be instantiated and called as such:
#obj = FirstUnique(nums)
# param_1 = obj.showFirstUnique()
# obj.add(value)
firstUnique = FirstUnique([2,3,5])
firstUnique.showFirstUnique() # return 2
firstUnique.add(5); # the queue is now [2,3,5,5]
firstUnique.showFirstUnique(); # return 2
firstUnique.add(2); # the queue is now [2,3,5,5,2]
firstUnique.showFirstUnique(); # return 3
firstUnique.add(3); # the queue is now [2,3,5,5,2,3]
firstUnique.showFirstUnique(); # return -1
| true
|
89d3b30cf2aff516ed3da2f66193110a9401d395
|
Python
|
alimabean/Scripts
|
/scraper.py
|
UTF-8
| 663
| 3.359375
| 3
|
[] |
no_license
|
from bs4 import BeautifulSoup
import re
import urllib2
#function to find links within a given link
def myscrape(url, case):
#makes sure the url is a string
newu = str(url)
#add url components to the given case
newc = 'http://' + str(case) + '/'
page = urllib2.urlopen(newu).read()
soup = BeautifulSoup(page, 'lxml')
soup.prettify()
links = set()
for link in soup.findAll('a',href=re.compile(newc)):
links.add(link['href'])
return links
if __name__=="__main__":
url = raw_input("Enter url: ")
case = raw_input("Enter a case: ")
work = myscrape(url,case)
for data in work:
print data
| true
|
b7fac54df3b70b9c57e4b296a3b986ff9c1ff674
|
Python
|
emrkyk/Data-Science-Projects
|
/ML - LightGBM - Hitters Dataset/Hitters_LightGBM.py
|
UTF-8
| 7,851
| 2.984375
| 3
|
[] |
no_license
|
# =============================
# HITTERS LIGHTGBM
# =============================
# Summary of Dataset
# AtBat: Number of times at bat in 1986
# Hits: Number of hits in 1986
# HmRun: Number of home runs in 1986
# Runs: Number of runs in 1986
# RBI: Number of runs batted in in 1986
# Walks: Number of walks in 1986
# Years: Number of years in the major leagues
# CAtBat: Number of times at bat during his career
# CHits: Number of hits during his career
# CHmRun: Number of home runs during his career
# CRuns: Number of runs during his career
# CRBI: Number of runs batted in during his career
# CWalks: Number of walks during his career
# League: A factor with levels A and N indicating player's league at the end of 1986
# Division: A factor with levels E and W indicating player's division at the end of 1986
# PutOuts: Number of put outs in 1986
# Assists: Number of assists in 1986
# Errors: Number of errors in 1986
# Salary: 1987 annual salary on opening day in thousands of dollars ====>>> TARGET VARIABLE
# NewLeague: A factor with levels A and N indicating player's league at the beginning of 1987
import pandas as pd
import numpy as np
from helpers.helpers import *
from helpers.eda import *
from helpers.data_prep import *
import seaborn as sns
import pickle
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
import warnings
from lightgbm import LGBMRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.exceptions import ConvergenceWarning
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter("ignore", category=ConvergenceWarning)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('display.float_format', lambda x: '%.3f' % x)
pd.set_option("display.expand_frame_repr", False)
data = pd.read_csv("Datasets/hitters.csv")
df = data.copy()
df.head()
df.info()
check_df(df)
# -----------------
# # Missing Values - Eksik Gözlemler
# -----------------
missing_values_table(df)
# n_miss ratio
# Salary 59 18.320
# VARIABLES!
cat_cols, cat_but_car, num_cols, num_but_cat = grab_col_names(df)
# ---------------------------
# Outliers / Aykırı Gözlemler
# ---------------------------
df.describe([0.05, 0.25, 0.50, 0.75, 0.90, 0.95, 0.99]).T
for col in num_cols:
print(col, check_outlier(df, col))
# ---------------------------
# FEATURE ENGINEERING
# ---------------------------
corr = df.corr()
plt.figure(figsize=(18, 10))
sns.heatmap(corr, annot=True)
plt.show()
check_df(df) # 20
df.head()
df.loc[(df['Years'] < 5), 'Experience'] = 'inexperienced'
df.loc[(df['Years'] >= 5) & (df['Years'] < 10), 'Experience'] = 'experienced'
df.loc[(df['Years'] >= 10), 'Experience'] = 'senior'
df["Ratio_CAtBat"] = df["AtBat"] / df["CAtBat"]
df["Ratio_CHits"] = df["Hits"] / df["CHits"]
df["Ratio_CHmRun"] = df["HmRun"] / df["CHmRun"]
df["Ratio_Cruns"] = df["Runs"] / df["CRuns"]
df["Ratio_CRBI"] = df["RBI"] / df["CRBI"]
df["Ratio_CWalks"] = df["Walks"] / df["CWalks"]
df['CAtBat_average'] = df['CAtBat'] / df['Years']
df['CHits_average'] = df['CHits'] / df['Years']
df['CHmRun_average'] = df['CHmRun'] / df['Years']
df['CRun_average'] = df['CRuns'] / df['Years']
df['CRBI_average'] = df['CRBI'] / df['Years']
df['CWalks_average'] = df['CWalks'] / df['Years']
df["General Performance"] = df["PutOuts"] + df["Assists"] - df["Errors"]
df.loc[(df["Years"] > df["Years"].mean()) & (
df["General Performance"] > df["General Performance"].mean()), "PERFORMANCE"] = "Good"
df.loc[(df["Years"] < df["Years"].mean()) & (
df["General Performance"] > df["General Performance"].mean()), "PERFORMANCE"] = "Good"
df.loc[(df["Years"] > df["Years"].mean()) & (
df["General Performance"] > df["General Performance"].mean()), "PERFORMANCE"] = "Reasonable"
df.loc[(df["Years"] < df["Years"].mean()) & (
df["General Performance"] < df["General Performance"].mean()), "PERFORMANCE"] = "Bad"
df.columns = [col.upper() for col in df.columns]
df.head()
df.shape
# --------------------------
# LABEL ENCODING # Conversions related to representation of variables
# --------------------------
# Expressing categorical variables ===> numerical
# Neden? Bazı fonk. kategorik tipte değişkenler yerine bunları sayısal olarak temsil edebilecek bir versiyonunu ister.
# Özellikle 2 sınıflı değişkenleri labellarını değiştiriyoruz, binary encoding de denebilir.
binary_cols = [col for col in df.columns if df[col].dtype == 'O' and df[col].nunique() == 2]
# ['LEAGUE', 'DIVISION', 'NEWLEAGUE']
for col in binary_cols:
df = label_encoder(df, col)
check_df(df)
# --------------------------
# ONE-HOT ENCODING
# --------------------------
# İkiden fazla sınıfa sahip olan kategorik değişkenlerin 1-0 olarak encode edilmesi.
# Sadece 2 sınıfı olan değişkenlere de uygulanabilir.
onehot_e_cols = [col for col in df.columns if 10 >= len(df[col].unique()) > 2]
df = one_hot_encoder(df, onehot_e_cols)
df.head()
df.shape
check_df(df)
# --------------------------
# MINMAXSCALER
# --------------------------
from sklearn.preprocessing import MinMaxScaler
scal_cols = [col for col in df.columns if df[col].nunique() > 20
and df[col].dtype != 'O'
and col not in "SALARY"]
scaler = MinMaxScaler(feature_range=(0, 1))
df[scal_cols] = scaler.fit_transform(df[scal_cols])
df.head()
df.shape
check_df(df)
df.dropna(inplace=True)
y = df["SALARY"] # dependent variable
X = df.drop(["SALARY"], axis=1) # independent variable
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=17)
#######################################
# LightGBM: Model & Tahmin
#######################################
lgb_model = LGBMRegressor().fit(X_train, y_train)
y_pred = lgb_model.predict(X_test)
np.sqrt(mean_squared_error(y_test, y_pred)) # 313
#######################################
# Model Tuning
#######################################
lgb_model = LGBMRegressor()
lgbm_params = {"learning_rate": [0.01, 0.1],
"n_estimators": [500, 1000],
"max_depth": [3, 5, 8],
"colsample_bytree": [1, 0.8, 0.5]}
lgbm_cv_model = GridSearchCV(lgb_model,
lgbm_params,
cv=10,
n_jobs=-1,
verbose=2).fit(X_train, y_train)
lgbm_cv_model.best_params_
#######################################
# Final Model
#######################################
lgbm_tuned = LGBMRegressor(**lgbm_cv_model.best_params_).fit(X_train, y_train)
y_pred = lgbm_tuned.predict(X_test)
np.sqrt(mean_squared_error(y_test, y_pred)) # 302
#######################################
# Feature Importance
#######################################
def plot_importance(model, features, num=len(X), save=False):
feature_imp = pd.DataFrame({'Value': model.feature_importances_, 'Feature': features.columns})
plt.figure(figsize=(10, 10))
sns.set(font_scale=1)
sns.barplot(x="Value", y="Feature", data=feature_imp.sort_values(by="Value",
ascending=False)[0:num])
plt.title('Features')
plt.tight_layout()
plt.show()
if save:
plt.savefig('importances.png')
plot_importance(lgbm_tuned, X_train)
| true
|
b66838a210f215659439388356cf4a25b340739c
|
Python
|
tracemap/tracemap-backend
|
/api/neo4j/tracemapUserAdapter.py
|
UTF-8
| 14,675
| 2.734375
| 3
|
[] |
no_license
|
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.exceptions import CypherError
import json
import time
import os
class TracemapUserAdapter:
def __init__(self):
uri = os.environ.get('NEO4J_URI')
self.driver = GraphDatabase.driver(uri, auth=(os.environ.get('NEO4J_USER'),
os.environ.get('NEO4J_PASSWORD')))
def __request_database(self, query: str) -> object:
"""
Takes a query and returns the neo4j data response of that query
:param query: the query string
:returns: database response data object
"""
with self.driver.session() as session:
with session.begin_transaction() as transaction:
try:
response_data = transaction.run(query).data()
return response_data
except CypherError as e:
return {'error': self.__check_database_error_code(e.code)}
@staticmethod
def __check_database_error_code(code):
return {
'Neo.ClientError.Schema.ConstraintValidationFailed': 'Constraint Error'
}.get(code, "unhandled error %s" % code)
def get_user_status(self, email: str) -> object:
"""
Check the status of a user by its email.
:param email: the users email
:returns: a dict containing boolean values for
'exists', 'beta_subscribed', 'newsletter_subscribed', 'registered'
"""
query = "MATCH (u:TracemapUser) WHERE u.email = '%s' " % email
query += "RETURN {username: u.username, newsletter_subscribed: u.newsletter_subscribed, beta_subscribed: u.beta_subscribed} as user_status"
response = self.__request_database(query)
if response:
user_data = response[0]['user_status']
return {
'exists': True,
'registered': bool(user_data['username']),
'newsletter_subscribed': user_data['newsletter_subscribed'],
'beta_subscribed': user_data['beta_subscribed']
}
else:
return {
'exists': False,
'registered': False,
'newsletter_subscribed': False,
'beta_subscribed': False
}
def add_user(self, email: str) -> bool:
"""
Create the basic user object in the database.
This user is just identified by its email and
has no access to the tool if the password and username
properties are not set.
:param email: the users email
:returns: True on success, False if user for this email already exists
"""
query = "CREATE (u:TracemapUser {email: '%s'})" % email
response = self.__request_database(query)
if 'error' in response:
return False
else:
return True
def set_user_username_password(self, username: str, email: str, password_hash: str) -> bool:
"""
Adds a user to the database
:param username: the users name
:param email: the users email
:param password_hash: a hash of that users password
:returns: True on success, False if user does not exist
"""
query = "MATCH (u:TracemapUser {email: '%s'}) " % email
query += "SET u.username = '%s', u.password_hash = '%s' " % (username, password_hash)
query += "RETURN u"
response = self.__request_database(query)
return bool(response)
def get_user_username(self, email: str) -> str:
"""
Returns the email and username of a user identified by the email
:param email: the users email
:returns: the username string or an empty string if user does not exist
"""
query = "MATCH (u:TracemapUser) WHERE u.email = '%s' RETURN u.username" % email
database_response = self.__request_database(query)
if database_response:
return database_response[0]['u.username']
else:
return ""
def get_user_password_hash(self, email: str) -> str:
"""
Get the password_hash of a user identified by the email.
:param email: the users email
:returns: the users password_hash or an empty string if user does not exist
"""
query = "MATCH (u:TracemapUser) WHERE u.email = '%s' RETURN u.password_hash" % email
database_response = self.__request_database(query)
if database_response and 'u.password_hash' in database_response[0]:
return database_response[0]['u.password_hash']
else:
return ""
def set_user_session_token(self, email: str, session_token: str) -> bool:
"""
Saves the users session_token and adds a unix timestamp
for determining the age of the token at any later time
:param email: the users email
:param session_token: the users session_token
:returns: True on success
"""
timestamp = time.time()
query = "MATCH (u:TracemapUser) WHERE u.email = '%s' " % email
query += "SET u.session_token = '%s' " % session_token
query += "SET u.session_timestamp = %s" % timestamp
self.__request_database(query)
return True
def get_user_session_token(self, email: str) -> str:
"""
Get the session_token of a user if there is a valid one.
:param email: the users email
:returns: session_token string if there is a valid one, empty string if not
"""
two_hours = 60 * 120
timestamp = time.time()
query = "MATCH (u:TracemapUser) WHERE u.email = '%s' " % email
query += "RETURN u.session_timestamp, u.session_token"
database_response = self.__request_database(query)
if database_response:
old_timestamp = database_response[0]['u.session_timestamp']
if (not old_timestamp) or (old_timestamp < timestamp - two_hours):
# delete token and return error: expired
query = "MATCH (u:TracemapUser) WHERE u.email = '%s' " % email
query += "REMOVE u.session_token, u.session_timestamp"
self.__request_database(query)
return ""
else:
# renew timestamp and return session_token
query = "MATCH (u:TracemapUser) WHERE u.email = '%s' " % email
query += "SET u.session_timestamp = %s " % timestamp
self.__request_database(query)
return database_response[0]['u.session_token']
else:
# return error: no token
return ""
def set_user_reset_token(self, email: str, reset_token: str) -> bool:
"""
Saves the users reset_token and adds a unix timestamp
for determining the age of the token at any later time
:param email: the users email
:param reset_token: the users reset_token
:returns: True on success, False if user does not exist
"""
timestamp = time.time()
query = "MATCH (u:TracemapUser) WHERE u.email = '%s' " % email
query += "SET u.reset_token = '%s' " % reset_token
query += "SET u.reset_timestamp = %s " % timestamp
query += "RETURN u"
response = self.__request_database(query)
return bool(response)
def get_user_reset_token(self, email: str) -> object:
"""
Get the reset_token of a user if there is a valid one.
:param email: the users email
:returns: reset_token if there is one, human readable error for display in the browser if not.
"""
one_day = 60 * 60 * 24
timestamp = time.time()
query = "MATCH (u:TracemapUser) WHERE u.email = '%s' " % email
query += "RETURN u.reset_timestamp, u.reset_token"
database_response = self.__request_database(query)
if database_response:
reset_token = database_response[0]['u.reset_token']
old_timestamp = database_response[0]['u.reset_timestamp']
if not reset_token:
return {
'error': 'The reset token does not exist. Please request a password reset at https://tracemap.info.'
}
else:
if old_timestamp < timestamp - one_day:
# delete token and return expired error message
return {
'error': 'The link is expired. Please request a new password reset at https://tracemap.info.'
}
else:
return {
'token': reset_token
}
query = "MATCH (u:TracemapUser) WHERE u.email = '%s' " % email
query += "REMOVE u.reset_token, u.reset_timestamp"
self.__request_database(query)
else:
# return undefined error
return {
'error': 'User does not exist.'
}
def delete_user(self, email: str) -> bool:
"""
Delete TracemapUser nodes registration properties (especially password_hash and username).
Dont delete the whole node because of subscriptions.
:param email: the users email
:returns: True on success
"""
query = "MATCH (u:TracemapUser) WHERE u.email = '%s' " % email
query += "REMOVE u.username, u.password_hash, u.session_token, u.session_timestamp"
self.__request_database(query)
return True
def set_user_password_hash(self, email: str, new_password_hash: str) -> bool:
"""
Change a users password_hash to a new value.
:param email: the users email
:param new_password_hash: the new passwords hash
:returns: True on success, False if user does not exist
"""
query = "MATCH (u:TracemapUser) WHERE u.email = '%s' " % email
query += "SET u.password_hash = '%s' " % new_password_hash
query += "RETURN u"
response = self.__request_database(query)
return bool(response)
def set_user_subscription_status(self, email: str, newsletter_subscribed: bool, beta_subscribed: bool) -> bool:
"""
Sets the user subscription status to 0 for unconfirmed subscriptions.
:param email: the users email
:param newsletter_subscribed: has the user subscribed to the newsletter?
:param beta_subscribed: has the user subscribed to the beta?
:returns: True on success, False on Error
"""
query = "MATCH (u:TracemapUser) WHERE u.email = '%s' " % email
if newsletter_subscribed:
query += "SET u.newsletter_subscribed = 0 "
if beta_subscribed:
query += "SET u.beta_subscribed = 0 "
query += "RETURN u"
response = self.__request_database(query)
return bool(response)
def confirm_user_subscription_status(self, email: str) -> bool:
"""
Confirms the subscription but changing the value of the subscription properties
from 0 to True.
:param email: the users email
:returns: True for success, False if user does not exist
"""
query = "MATCH (u:TracemapUser) WHERE u.email = '%s' " % email
query += "RETURN {newsletter_subscribed: u.newsletter_subscribed, beta_subscribed: u.beta_subscribed} as subscription_status"
response = self.__request_database(query)
if response:
subscription_status = response[0]['subscription_status']
query = "MATCH (u:TracemapUser) WHERE u.email = '%s' " % email
if subscription_status['newsletter_subscribed'] == 0:
query += "SET u.newsletter_subscribed = True "
if subscription_status['beta_subscribed'] == 0:
query += "SET u.beta_subscribed = True "
query += "REMOVE u.confirmation_token, u.confirmation_timestamp"
self.__request_database(query)
return True
else:
return False
def set_user_confirmation_token(self, email: str, confirmation_token: str) -> bool:
"""
Set a users confirmation token used for identifying the users subscription confirmation link.
:param email: the users email
:param confirmation_token: the generated confirmation_token
:returns: True on success, False if user does not exist
"""
timestamp = time.time()
query = "MATCH (u:TracemapUser) WHERE u.email = '%s' " % email
query += "SET u.confirmation_token = '%s' " % confirmation_token
query += "SET u.confirmation_timestamp = %s " % timestamp
query += "RETURN u"
response = self.__request_database(query)
return bool(response)
def get_user_confirmation_token(self, email: str) -> str:
"""
Get the confirmation_token of a user if there is a valid one.
:param email: the users email
:returns: confirmation_token if there is one, human readable error for display in the browser if not.
"""
one_day = 60 * 60 * 24
now_timestamp = time.time()
query = "MATCH (u:TracemapUser) WHERE u.email = '%s' " % email
query += "RETURN u.confirmation_token, u.confirmation_timestamp"
database_response = self.__request_database(query)
if database_response:
confirmation_token = database_response[0]['u.confirmation_token']
confirmation_timestamp = database_response[0]['u.confirmation_timestamp']
if not confirmation_timestamp:
# return error: no token found
return {
'error': 'Something went wrong. Please try to subscribe again at https://tracemap.info'
}
else:
if confirmation_timestamp < now_timestamp - one_day:
# delete token and return expired error
return {
'error': 'This confirmation link is not valid anymore. Please subscribe again at https://tracemap.info'
}
else:
return {
'token': confirmation_token
}
query = "MATCH (u:TracemapUser) WHERE u.email = '%s' " % email
query += "REMOVE u.confirmation_token, u.confirmation_timestamp"
self.__request_database(query)
else:
# return undefined error
return {
'error': 'User does not exist.'
}
| true
|
08c185a74a01bc502a26bc58ad4ead7db0d9d777
|
Python
|
gabylorenzi/python-word-analysis
|
/hw2.py
|
UTF-8
| 3,027
| 3.796875
| 4
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 5 00:00:00 2020
@author: gabylorenzi
"""
#%% 1
#DONE
def longest_path_length(d):
"""Returns the length of the longest path in d."""
currMax = 0
for k,v in d.items():
path = []
while(True):
if (k,v) not in path:
path.append((k,v))
else:
break
if v in d.keys():
k = v
v = d[k]
l = len(path)
if l > currMax:
currMax = l
else:
l = len(path)
if l > currMax:
currMax = l
break
return currMax
#%% 2
def large_value_keys(d, N): #DONE
"""Returns a list containing various keys in d.
d is a dictionary who values are ints. N is an int.
The list contains keys k whose corresponding value d[k] is bigger than N.
The keys are arranged in order of largest value to smallest value.
"""
#sort dictionary before hand
#sortedDict = {k:v for k,v in sorted(d.items(), key=lambda item: item[1])}
sortedDict = {r:d[r] for r in sorted(d, key=d.get, reverse = True)}
L = [k for (k,v) in sortedDict.items() if sortedDict[k] > N]
return L
#%% 3
#DONE
def count_words(filename):
"""Creates a dictionary from a .txt file counting word occurrences.
For each word in the text file, there's a key.
The corresponding value is the number of occurrences of the word.
https://docs.python.org/3.7/library/stdtypes.html#string-methods
Capitals are converted to lower case
so that 'The' does not show up as a key.
Dashes are replaced with spaces
so that 'them-at' does not show up as a key.
Both the short dash (-) and long dash (–) are dealt with.
Non-alphabetic characters are stripped from words
so that “espionage?” does not show up as a key.
"""
with open(filename) as f:
novel = f.read()
#novel = "Hey its gaby# and i am gaby gaby gaby the $$$$ a toad 1234"
#so novel is just a long string of the entire novel
#Capitals are converted to lower case
novel = novel.lower()
#dashes are replaced with spaces
novel.replace("-"," ")
novel.replace("–"," ")
novel.replace("—"," ")
#non alphabetic characters are stripped from words
badchar = '——”‘!@#$%^&*()_“+=-;<:""''>,./?`~[}{]|1234567890'
for char in badchar:
novel = novel.replace(char," ")
#strip
# char for char in novel if is not char.isalpha()
words = novel.split()
frequency = {}
for word in words:
if word not in frequency:
frequency[word] = 0
frequency[word] +=1
#print(frequency)
return frequency
# d = count_words('863-0.txt')
# print(d['the'])
# print(2843)
# print(d['i'])
# print(1704)
# print(len({k for k in d if d[k] == 1}))
# print(2665)
# print(large_value_keys(d, 600))
| true
|
61c77b1aea076055b686cfbff1c4bd573e4cac6a
|
Python
|
deni64/python-scripts
|
/python-scripts/8cw/покупка домa.py
|
UTF-8
| 572
| 3.390625
| 3
|
[] |
no_license
|
class human:
def __init__(self, name, age, money, home):
self.name = name
self.age = age
self.money = 0
self.home = 0
def present(self):
print(f'my name is {self.name} and i am {self.age} years old')
def earn_money(self):
self.money += 1000
def get_home(self):
if self.money == self.price:
print('hoorah I have just bought anew house')
else:
print('I dont have enough money to buy a new house, I need to work more')
class house:
def __init__(self, )
| true
|
e6feb29ca6cdccddade2156b69fcbb50577613b5
|
Python
|
otisgbangba/python-lessons
|
/Harder/picker.py
|
UTF-8
| 327
| 4.1875
| 4
|
[
"MIT"
] |
permissive
|
# Pops names off a list in random sequence
from random import randint
import time
remaining_names = ['sue', 'mary', 'hari', 'fred', 'eric', 'menlo']
while len(remaining_names) > 0:
pop_index = randint(0, len(remaining_names) - 1)
popped_name = remaining_names.pop(pop_index)
print(popped_name)
time.sleep(1)
| true
|
a1b8316ba8c17cf75568542632a2b034961b3463
|
Python
|
allocateam/opendc
|
/simulator/opendc-experiments/opendc-experiments-allocateam/tools/plot/metrics/job_makespan.py
|
UTF-8
| 980
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
from .metric import Metric, metric_path
import pandas as pd
import math
class JobMakespanMetric(Metric):
def __init__(self, plot, scenarios):
super().__init__(plot, scenarios)
self.name = "job_makespan"
self.x_axis_label = "Job makespan (seconds)"
def get_data(self, scenario):
job_df = pd.read_parquet(metric_path("job-lifecycle", scenario))
task_df = pd.read_parquet(metric_path("task-lifecycle", scenario))
for job_id in job_df.job_id.unique():
tasks = task_df[task_df.job_id == job_id]
# job makespan: time elapsed from first-task submission of job until last completion of task from job
first_task_submission_time = tasks.submission_time.min()
last_task_finish_time = tasks.finish_time.max()
makespan = (last_task_finish_time - first_task_submission_time) // 1000
if math.isnan(makespan):
continue
yield makespan
| true
|
72298c4a1793826d4681dd3c517923f4f8828356
|
Python
|
mecampbellsoup/procrastinate
|
/procrastinate/retry.py
|
UTF-8
| 3,747
| 3.375
| 3
|
[
"MIT"
] |
permissive
|
"""
A retry strategy class lets procrastinate know what to do when a job fails: should it
try again? And when?
"""
import datetime
from typing import Iterable, Optional, Type, Union
import attr
from procrastinate import exceptions, utils
class BaseRetryStrategy:
"""
If you want to implement your own retry strategy, you can inherit from this class.
Child classes only need to implement `get_schedule_in`.
"""
def get_retry_exception(
self, exception: Exception, attempts: int
) -> Optional[exceptions.JobRetry]:
schedule_in = self.get_schedule_in(exception=exception, attempts=attempts)
if schedule_in is None:
return None
schedule_at = utils.utcnow() + datetime.timedelta(seconds=schedule_in)
return exceptions.JobRetry(schedule_at.replace(microsecond=0))
def get_schedule_in(self, *, exception: Exception, attempts: int) -> Optional[int]:
"""
Parameters
----------
attempts:
The number of previous attempts for the current job. The first time
a job is run, ``attempts`` will be 0.
Returns
-------
``Optional[int]``
If a job should not be retried, this function should return None.
Otherwise, it should return the duration after which to schedule the
new job run, *in seconds*.
"""
raise NotImplementedError()
@attr.dataclass(kw_only=True)
class RetryStrategy(BaseRetryStrategy):
"""
The RetryStrategy class should handle classic retry strategies.
You can mix and match several waiting strategies. The formula is::
total_wait = wait + lineal_wait * attempts + exponential_wait ** (attempts + 1)
Parameters
----------
max_attempts:
The maximum number of attempts the job should be retried
wait:
Use this if you want to use a constant backoff.
Give a number of seconds as argument, it will be used to compute the backoff.
(e.g. if 3, then successive runs will wait 3, 3, 3, 3, 3 seconds)
linear_wait:
Use this if you want to use a linear backoff.
Give a number of seconds as argument, it will be used to compute the backoff.
(e.g. if 3, then successive runs will wait 0, 3, 6, 9, 12 seconds)
exponential_wait:
Use this if you want to use an exponential backoff.
Give a number of seconds as argument, it will be used to compute the backoff.
(e.g. if 3, then successive runs will wait 3, 9, 27, 81, 243 seconds)
retry_exceptions:
Define the exception types you want to retry on.
If you don't, jobs will be retried on any type of exceptions
"""
max_attempts: Optional[int] = None
wait: int = 0
linear_wait: int = 0
exponential_wait: int = 0
retry_exceptions: Optional[Iterable[Type[Exception]]] = None
def get_schedule_in(self, *, exception: Exception, attempts: int) -> Optional[int]:
if self.max_attempts and attempts >= self.max_attempts:
return None
# isinstance's 2nd param must be a tuple, not an arbitrary iterable
if self.retry_exceptions and not isinstance(
exception, tuple(self.retry_exceptions)
):
return None
wait: int = self.wait
wait += self.linear_wait * attempts
wait += self.exponential_wait ** (attempts + 1)
return wait
RetryValue = Union[bool, int, RetryStrategy]
def get_retry_strategy(retry: RetryValue) -> Optional[RetryStrategy]:
if not retry:
return None
if retry is True:
return RetryStrategy()
if isinstance(retry, int):
return RetryStrategy(max_attempts=retry)
return retry
| true
|
0f8c264fef79f69fe81afd32a893918b6484e37e
|
Python
|
Kxfusion/PyProjects
|
/PyProject5-5.py
|
UTF-8
| 922
| 3.796875
| 4
|
[] |
no_license
|
def Converter(Str, Ints, N):
Added = 0
Base = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '0')
Ints[len(Ints)-1] = Ints[len(Ints)-1]*10 + int(Str[N])
if(N != len(Str) - 1):
for y in range(10):
if(Str[N+1] == Base[y]):
Ints, Added = Converter(Str, Ints, N+1)
Added += 1
return Ints, Added
Base = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '0')
print("Give a set of random numbers")
Num = input()
Nums = []
Used = 0
for n in range(len(Num)):
if(Used > 0):
Used -= 1
continue
for x in range(10):
if(Num[n] == Base[x]):
Nums.append(int(Num[n]))
if(n != len(Num) - 1):
for y in range(10):
if(Num[n+1] == Base[y]):
Nums, Used = Converter(Num, Nums, n+1)
break
Nums = sorted(Nums)
print("Numbers in order: ")
print(Nums)
| true
|
f64c86449b4073c92a4f794bfb44c7b82df92a64
|
Python
|
boscoj2008/blocklib
|
/blocklib/candidate_blocks_generator.py
|
UTF-8
| 2,983
| 3.0625
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
"""Class that implement candidate block generations."""
from typing import Dict, Sequence, Tuple, Type, List, Optional
from .configuration import get_config
from .pprlindex import PPRLIndex
from .pprlpsig import PPRLIndexPSignature
from .pprllambdafold import PPRLIndexLambdaFold
from .validation import validate_signature_config
PPRLSTATES = {
"p-sig": PPRLIndexPSignature,
"lambda-fold": PPRLIndexLambdaFold,
} # type: Dict[str, Type[PPRLIndex]]
class CandidateBlockingResult:
"""Object for holding candidate blocking results."""
def __init__(self, blocks: Dict, state: PPRLIndex):
"""
Initialise a blocking result object.
:param blocks: A dictionary where key is set of 1 bits in bloom filter and value is a list of record IDs
:param state: A PPRLIndex state that contains configuration of blocking
"""
self.blocks = blocks
self.state = state
def generate_candidate_blocks(data: Sequence[Tuple[str, ...]], signature_config: Dict, header: Optional[List[str]] = None,
verbose: bool = False):
"""
:param data: list of tuples E.g. ('0', 'Kenneth Bain', '1964/06/17', 'M')
:param signature_config:
A description of how the signatures should be generated.
Schema for the signature config is found in
``docs/schema/signature-config-schema.json``
:param header: column names (optional)
Program should throw exception if block features are string but header is None
:param verbose: print additional information to std out.
:return: A 2-tuple containing
A list of "signatures" per record in data.
Internal state object from the signature generation (or None).
"""
# validate config of blocking
validate_signature_config(signature_config)
# extract algorithm and its config
algorithm = signature_config.get('type', 'not specified')
config = signature_config.get('config', 'not specified')
# check if blocking features are column index or feature name
blocking_features = get_config(config, 'blocking-features')
feature_type = type(blocking_features[0])
error_msg = 'All feature types should be the same - either feature name or feature index'
assert all(type(x) == feature_type for x in blocking_features[1:]), error_msg
# header should not be None if blocking features are string
if feature_type == str:
assert header, 'Header must not be None if blocking features are string'
if algorithm in PPRLSTATES:
state = PPRLSTATES[algorithm](config)
reversed_index = state.build_reversed_index(data, verbose, header)
state.summarize_reversed_index(reversed_index)
# make candidate blocking result object
candidate_block_obj = CandidateBlockingResult(reversed_index, state)
else:
raise NotImplementedError('The algorithm {} is not supported yet'.format(algorithm))
return candidate_block_obj
| true
|
8354feed55f0cab05a6738d2af4b62462d1b7b38
|
Python
|
Tharani2518/python-programming
|
/Numdivby5and11.py
|
UTF-8
| 235
| 3.96875
| 4
|
[] |
no_license
|
number = int(input(" Please Enter any Positive Integer : "))
if((number % 5 == 0) and (number % 11 == 0)):
print("Given Number is Divisible by 5 and 11",number)
else:
print("Given Number is Not Divisible by 5 and 11",number)
| true
|
410102283d361cb272da43539da4dedbeba57aa8
|
Python
|
hartantosantoso7/Belajar-Python
|
/program_for_loop.py
|
UTF-8
| 582
| 4.40625
| 4
|
[] |
no_license
|
# membuat program menggunakan For-loop, List dan Range
banyak = int(input("Berapa banyak data? "))
nama = []
umur = []
for data in range(0, banyak):
print(f"data {data}")
print("==========================")
input_nama = input("Nama: ")
input_umur = int(input("Umur: "))
print("==========================")
nama.append(input_nama)
umur.append(input_umur)
for n in range(0, len(nama)):
data_nama = nama[n]
data_umur = umur[n]
print(f"{data_nama} berumur {data_umur} tahun")
print("==========================")
print(nama)
print(umur)
| true
|
38b85a2ab960bcb151f3cf2ec8a7e301b2bb40b0
|
Python
|
cytsinghua/qa_4_gaokao
|
/data/modified_test.py
|
UTF-8
| 653
| 2.515625
| 3
|
[] |
no_license
|
import os, sys, json
labels = []
with open('./data/modified_test.csv') as f:
for line in f:
labels.append(1 if line.strip()[-2:] == '11' else 0)
with open('./data/processed_test_data.json') as f, open('./data/processed_modified_test_data.json', 'w') as f1:
start = 0
for line in f:
data = json.loads(line)
old_label = data['labels']
new_label = labels[start : start+len(data['labels'])]
data['labels'] = [1 if new_label[i] == 1 else old_label[i] for i in range(len(old_label))]
start += len(data['labels'])
f1.write(json.dumps(data, ensure_ascii=False) + '\n')
| true
|
0e768d76c71bb3f53f357b3682be010d09363752
|
Python
|
AndrewYY/modsaber-python
|
/modsaber.py
|
UTF-8
| 3,166
| 2.578125
| 3
|
[] |
no_license
|
''' simple beatmods mod installer. run as admin if you need to. '''
import io
import json
import os
import subprocess
import time
import urllib.request
import winreg
import zipfile
# constants
beatmods_url = 'https://beatmods.com'
api_version = '1'
user_agent = 'beatmods-python/0.2.0'
## get the beat saber install directory
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\Steam App 620980') as key:
installdir = winreg.QueryValueEx(key, 'InstallLocation')[0]
try:
version = open(os.path.join(installdir, 'BeatSaberVersion.txt')).read()
except:
version = ''
## grab the mod database from the server
# build the url
req_url = f'{beatmods_url}/api/v{api_version}/mod?status=approved'
# make the web request
print("Fetching Mod Database")
req = urllib.request.Request(req_url)
req.add_header('User-Agent', user_agent)
r = urllib.request.urlopen(req)
mods = json.loads(r.read())
mod_dict = {mod['name']:mod for mod in mods}
## filter by mod
categories = {}
for mod in mods:
category = mod['category']
if category not in categories:
categories[category] = {'mods':[]}
categories[category]['mods'].append(mod)
ordered_mods = []
print("mods:")
i = 1
for key, value in categories.items():
print()
print(key)
for mod in value['mods']:
ordered_mods.append(mod)
print(f'{i}.\t{mod["name"]}' + ('(required)' if mod['required'] else ''))
i += 1
print()
indices = input('Input mod numbers by space: ').split(' ')
## grab all selected mods and dependencies
selected_mods = {key:value for key, value in mod_dict.items() if value['required']}
for index in indices:
try:
index = int(index) - 1
mod = ordered_mods[index]
except (ValueError, IndexError):
print(index, 'is not a valid index')
else:
# add the mod
selected_mods[mod['name']] = mod
# add dependencies
for dependency in mod['dependencies']:
depname = dependency['name']
depver = dependency['version']
selected_mods[depname] = mod_dict[depname]
print('downloading and installing:', list(selected_mods.keys()))
## download mods
print(f'installing in "{installdir}"')
for mod in selected_mods.values():
print(f"downloading {mod['name']}...")
mod_path = [download['url'] for download in mod['downloads'] if download['type'] in ['universal', 'steam']][0]
req = urllib.request.Request(f'{beatmods_url}{mod_path}')
req.add_header('User-Agent', user_agent)
r = urllib.request.urlopen(req)
data = r.read()
bytestream = io.BytesIO(data)
zip = zipfile.ZipFile(bytestream)
zip.extractall(installdir)
# set the creation date to the same as inside the zip
for f in zip.infolist():
name, date_time = f.filename, f.date_time
date_time = time.mktime(date_time + (0, 0, -1))
os.utime(os.path.join(installdir, name), (date_time, date_time))
## install mods
print("Patching...")
ipa_location = os.path.join(installdir, 'IPA.exe')
exe_location = os.path.join(installdir, 'Beat Saber.exe')
subprocess.run([ipa_location, exe_location])
| true
|
33103ce26f33dea0df0c8a59f809a0a2e6cace41
|
Python
|
fkohlgrueber/KeyFun
|
/Constants.py
|
UTF-8
| 918
| 2.640625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Constants contains several functions and constants useful in other modules.
"""
__author__ = 'Felix'
import pyHook
def generate_key_id_name_mappings():
name_to_id = {}
# copy mapping contained in pyHook
for key in pyHook.HookConstants.vk_to_id:
name_to_id[key] = pyHook.HookConstants.vk_to_id[key]
# add entries for numbers and letters
for i in range(0x41, 0x5A + 1) + range(0x30, 0x39 + 1):
name_to_id['VK_' + chr(i)] = i
# append XBUTTONs and Numpad return key
name_to_id['VK_XBUTTON1'] = 0x05
name_to_id['VK_XBUTTON2'] = 0x06
name_to_id['VK_NRETURN'] = 0x88
# invert the mapping
id_to_name = dict([(v, k) for k, v in name_to_id.items()])
return name_to_id, id_to_name
# generate the bindings from key names to key ids and from key ids to key names.
key_name_to_id, key_id_to_name = generate_key_id_name_mappings()
| true
|
1840adbacfa931334d255781379638ef15b6d64c
|
Python
|
carlopeano/python_work
|
/chap_9/my_car.py
|
UTF-8
| 920
| 3.1875
| 3
|
[] |
no_license
|
# from car import Car
# my_new_car = Car('audi', 'a4', 2019)
# print(my_new_car.get_descriptive_name())
# my_new_car.read_odometer()
# my_new_car.odometer_reading = 23
# my_new_car.read_odometer()
# my_new_car.update_odometer(24)
# my_new_car.read_odometer()
# my_new_car.update_odometer(23_500)
# my_new_car.read_odometer()
# my_new_car.increment_odometer(100)
# my_new_car.read_odometer()
# import car
# my_beetle = car.Car('volkswagen', 'beetle', 2019)
# print(my_beetle.get_descriptive_name())
# my_tesla = car.Car('tesla', 'roadster', 2019)
# print(my_tesla.get_descriptive_name())
from car import Car
from electric_car import ElectricCar
my_beetle = Car('volkswagen', 'beetle', 2019)
print(my_beetle.get_descriptive_name())
my_tesla = ElectricCar('tesla', 'roadster',2019)
print(my_tesla.get_descriptive_name())
from electric_car import ElectricCar as EC
print(f"\n{my_tesla.get_descriptive_name()}")
| true
|
840751cf7b1a89f9783c684e91eb906c71774f33
|
Python
|
juniorppb/arquivos-python
|
/ExerciciosPythonMundo3/04.Tuplas03.py
|
UTF-8
| 128
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
pessoa = ('Wellyton', 37, 'M', 82,3)
print(pessoa)
print('-'*20)
pessoa = ('Wellyton', 37, 'M', 82,3)
del(pessoa)
print(pessoa)
| true
|
585a114ff4c3d4b903457a77ea43d1c37989071d
|
Python
|
YJL33/CSES
|
/1094.py
|
UTF-8
| 235
| 2.984375
| 3
|
[] |
no_license
|
"""
see https://cses.fi/problemset/task/1094
"""
x = int(input())
A = list(map(int, raw_input().split()))
prev = A[0]
res = 0
for i in range(1,len(A)):
prev = max(prev, A[i-1])
if A[i]<prev:
res += prev-A[i]
print(res)
| true
|
a912a0ec47a95b11cd04bda35eba9882caa81196
|
Python
|
ptobgui300/CSULA_CubeSat_SeniorDesign
|
/CubeSat/ComputerVision/DetectMarker.py
|
UTF-8
| 2,703
| 2.625
| 3
|
[] |
no_license
|
# Note : flip camera for Raspberry pi
import numpy as np
import cv2
import cv2.aruco as aruco
import sys, time, math
#--- Define Tag
class CubeArucoDetect :
def detectMarker():
id_to_find = 24
marker_size = 10 #- [cm]
#--- Get the camera calibration path
calib_path = ""
camera_matrix = np.loadtxt(calib_path+'cameraMatrix_raspi.txt', delimiter=',')
camera_distortion = np.loadtxt(calib_path+'cameraDistortion_raspi.txt', delimiter=',')
#--- Define the aruco dictionary
aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_ARUCO_ORIGINAL)
parameters = aruco.DetectorParameters_create()
#--- Capture the videocamera (this may also be a video or a picture)
cap = cv2.VideoCapture(0)
#-- Set the camera size as the one it was calibrated with
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
#-- Font for the text in the image
font = cv2.FONT_HERSHEY_PLAIN
while True:
#-- Read the camera frame
ret, frame = cap.read()
#-- Convert in gray scale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #-- remember, OpenCV stores color images in Blue, Green, Red
#-- Find all the aruco markers in the image
corners, ids, rejected = aruco.detectMarkers(image=gray, dictionary=aruco_dict, parameters=parameters,
cameraMatrix=camera_matrix, distCoeff=camera_distortion)
if ids is not None and ids[0] == id_to_find:
#-- ret = [rvec, tvec, ?]
#-- array of rotation and position of each marker in camera frame
#-- rvec = [[rvec_1], [rvec_2], ...] attitude of the marker respect to camera frame
#-- tvec = [[tvec_1], [tvec_2], ...] position of the marker in camera frame
ret = aruco.estimatePoseSingleMarkers(corners, marker_size, camera_matrix, camera_distortion)
#-- Unpack the output, get only the first
#-- Draw the detected marker and put a reference frame over it
aruco.drawDetectedMarkers(frame, corners)
print("found / detected marker ! .. . . .!")
#--- Display the frame
cv2.imshow('frame', frame)
#--- use 'q' to quit
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
a = CubeArucoDetect.detectMarker()
| true
|
60d4dc67cfbe532832190e290a81ef91a63a10d4
|
Python
|
sarelg/stuff
|
/Cluster/figures.py
|
UTF-8
| 491
| 2.5625
| 3
|
[] |
no_license
|
from diff_expo_plot import draw
import matplotlib.pyplot as plt
plt.figure(1)
plt.title('1018-52672-0359 - high std to 1/sn')
draw(1018,52672,359)
plt.figure(2)
plt.title('5942-56210-0308 - high std to 1/sn')
draw(5942,56210,308)
plt.figure(3)
plt.title('2207-53558-227 - high std to 1/sn')
draw(2207,53558,227)
plt.figure(4)
plt.title('0266-51630-0227 - low std to 1/sn')
draw(266,51630,227)
plt.figure(5)
plt.title('4776-55652-0334 - low std to 1/sn')
draw(4776,55652,334)
plt.show()
| true
|
a42cbad495562a4c9e27916e9e6a77a05073debe
|
Python
|
Ramironacho/PythonSe
|
/methods/practica.py
|
UTF-8
| 472
| 3.65625
| 4
|
[] |
no_license
|
def tax(states, income):
net_income = 0
federal_tax = 0.1 * income
if states == 'nyc':
state_tax = 0.11 * income
net_income = income - (federal_tax + state_tax)
if states == 'la':
state_tax = 0.5 * income
net_income = income - (federal_tax + state_tax)
if states == 'boston':
state_tax = 0.15 * income
net_income = income - (federal_tax + state_tax)
return net_income
x = tax('nyc', 1000)
print(x)
| true
|
ae90b56935eb2c801c584e786b077c384a46d660
|
Python
|
genkitaiyaki/calc_exercise
|
/models/formula.py
|
UTF-8
| 392
| 4.0625
| 4
|
[
"MIT"
] |
permissive
|
class Formula:
"""
計算式を管理するためのクラス
"""
def __init__(
self,
_left: int,
_right: int,
_operator: str
) -> None:
self._left = _left
self._right = _right
self._operator = _operator
def to_string(self) -> str:
return f'{str(self._left)} {self._operator} {str(self._right)} ='
| true
|
82c8421f844423573ff710eb717cf2e9aa973249
|
Python
|
amarotta1/FinalDise-oDeSistemas
|
/Patrones/Abstract Factory Banco/FactoryBlack.py
|
UTF-8
| 518
| 2.515625
| 3
|
[] |
no_license
|
from AbstractFactory import AbstractFactory
from CreditoBlack import CreditoBlack
from CuentaBlack import CuentaBlack
from DebitoBlack import DebitoBlack
class FactoryBlack(AbstractFactory):
def crear_cuenta(self):
print('Creando una cuenta Black')
return CuentaBlack()
def crear_credito(self):
print('Creando una tarjeta de credito Black')
return CreditoBlack()
def crear_debito(self):
print('Creando una tarjeta de debito Black')
return DebitoBlack()
| true
|
281e5ca112eb28e49b7e9b85813433b39392ff0c
|
Python
|
JTong666/machine_learning
|
/machine_learning/树回归/test.py
|
UTF-8
| 117
| 2.6875
| 3
|
[] |
no_license
|
import numpy as np
a = [[1, 2],
[3, 4],
[5, 6]]
a = np.mat(a)
print(np.mean(a[:, -1]))
print(np.mean(a))
| true
|
22e23beb12a50712bd366526aaf06b401fa4d70d
|
Python
|
yingCMU/tensor_flow
|
/nn_cnn/max_pooling.py
|
UTF-8
| 797
| 3.03125
| 3
|
[] |
no_license
|
import tensorflow as tf
# The tf.nn.max_pool() function performs max pooling with the ksize parameter as the size of the filter and the strides parameter as the length of the stride. 2x2 filters with a stride of 2x2 are common in practice.
#
# The ksize and strides parameters are structured as 4-element lists, with each element corresponding to a dimension of the input tensor ([batch, height, width, channels]). For both ksize and strides, the batch and channel dimensions are typically set to 1.
conv_layer = tf.nn.conv2d(input, weight, strides=[1, 2, 2, 1], padding='SAME')
conv_layer = tf.nn.bias_add(conv_layer, bias)
conv_layer = tf.nn.relu(conv_layer)
# Apply Max Pooling
conv_layer = tf.nn.max_pool(
conv_layer,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
| true
|
a4ac3c21fcebd40bb40a0c73b751f21c7a7adfe6
|
Python
|
chua24/CanteenRecommendation
|
/FS6_Cheong_Chong_Chua_Source Code.py
|
UTF-8
| 14,239
| 3.203125
| 3
|
[] |
no_license
|
import pygame,sys
import time
import random
import math
from collections import OrderedDict
from operator import itemgetter
#-------------------- Welcome Message --------------------
def welcomeMsg():
print ("Welcome to the Ask Panda system! Please enter your option!")
print ("1: Search by Location")
print ("2: Search by Price")
print ("3: Search by Food/Cuisine")
print ("4: Sort by Rank")
print ("5: Search by Hours")
print ("6: Exit")
#-------------------- All Options --------------------
def allOption():
if option == LOCATION:
if __name__ == '__main__':
main()
print()
continu()
elif option == PRICE:
getPrice(restinfo)
print()
continu()
elif option == CUISINE:
searchFood(restinfo)
print()
continu()
elif option == RANK:
sortrank(canteenlist)
print()
continu()
elif option == HOURS:
print (searchhours(canteenlist))
print()
continu()
#-------------------- Prompt Continue --------------------
def continu():
print()
try:
print("Would you want to continue asking Panda?")
print("1: Continue")
print("2: Exit")
CONTINUE = 1
OUT = 2
userInput = CONTINUE
while userInput != OUT:
userInput= int(input("Choose an option: "))
if userInput == CONTINUE:
print()
welcomeMsg()
option = int (input ("Option: "))
if option == LOCATION:
if __name__ == '__main__':
main()
continu()
elif option == PRICE:
getPrice(restinfo)
continu()
elif option == CUISINE:
searchFood(restinfo)
continu()
elif option == RANK:
sortrank(canteenlist)
continu()
elif option == HOURS:
print(searchhours(canteenlist))
continu()
elif option == EXIT:
print("Thank you for playing this game! Panda hopes to see you again!")
sys.exit()
elif userInput == OUT:
print("Thank you for playing this game! Panda hopes to see you again!")
sys.exit()
except ValueError:
print("Input Error! Please enter a number!")
continu()
#-------------------- 1: Search by Location / Mouseclick --------------------
places = {"Starbucks":(311,266), "McDonald":(361,287),"Bakery":(382,310),\
"Each-A-Cup":(377,333),"Peach Garden":(317,239),"Paik's Bibimbap":(366,362),\
"Koufu":(254,674),"Quad Cafe":(249,404),"Ananda Kitchen":(1050,326),\
"NIE Canteen":(338,79),"Pioneer Canteen":(793,788),"Canteen 2":(756,513)}
def MouseClick():
finish = False
while finish == False:
for event in pygame.event.get():
if event.type == pygame.QUIT:
finish = True
if event.type == pygame.MOUSEBUTTONDOWN:
(mouseX, mouseY) = pygame.mouse.get_pos()
newD = {}
for i,j in places.items():
newD.update({i : round(calculate_distance(mouseX, mouseY,j[0],j[1]),2)}) #add to newD the distance j[0] = xpos j[1] = ypos and round to 2 digits
sorted_places = {k: v for k, v in sorted(newD.items(), key=lambda x: x[1])} #sort by the distance (key) for each location
od = OrderedDict(enumerate(sorted_places)) #to give sorted_places all indexes
sortedNames = list(od.values())[0:5] #to list the values of the top 5 sorted_places
sortedValues = []
for k in sortedNames:
distanceKM = round((sorted_places.get(k)*3.0277)/1000,2) #get the value and convert it to real location
sortedValues.append(distanceKM)
finalResult = zip(sortedNames, sortedValues) #combine the restaurant and the new value
print ("Top 5 places closest to you:")
n = 1 #number the recommendations
for tup in finalResult:
print(n,".", tup[0], "is", str(tup[1]) + "km away from your current location")
n += 1
finish = True
return (mouseX, mouseY)
def label_map(screen):
font = pygame.font.SysFont("monospace", 20 , bold = True)
label1 = font.render("Starbucks", True, (255,255,255) , (0,0,0))
label2 = font.render("McDonald", True, (255,255,255) , (0,0,0))
label3 = font.render("Bakery", True, (255,255,255) , (0,0,0))
label4 = font.render("Each-A-Cup", True, (255,255,255) , (0,0,0))
label5 = font.render("Peach Garden", True, (255,255,255) , (0,0,0))
label6 = font.render("Paik's Bibimbap", True, (255,255,255) , (0,0,0))
label7 = font.render("Koufu", True, (255,255,255) , (0,0,0))
label8 = font.render("Quad Cafe", True, (255,255,255) , (0,0,0))
label9 = font.render("Ananda Kitchen", True, (255,255,255) , (0,0,0))
label10 = font.render("NIE Canteen", True, (255,255,255) , (0,0,0))
label11 = font.render("Pioneer Canteen", True, (255,255,255) , (0,0,0))
label12 = font.render("Canteen 2", True, (255,255,255) , (0,0,0))
screen.blit(label1, (311,266))
screen.blit(label2, (361,287))
screen.blit(label3, (382,310))
screen.blit(label4, (377,333))
screen.blit(label5, (317,239))
screen.blit(label6, (366,362))
screen.blit(label7, (254,674))
screen.blit(label8, (249,404))
screen.blit(label9, (1050,326))
screen.blit(label10, (338,79))
screen.blit(label11, (793,788))
screen.blit(label12, (756,513))
def calculate_distance(buttonX,buttonY,x,y):
distance = math.sqrt((buttonX-x)**2 + (buttonY-y)**2)
return distance
def get_user_location():
# make necessary initializations for Width, Height
W = 1221
H = 862
#initialize display window, call it screen
screen = pygame.display.set_mode((W, H))
# read image file and rescale it to the window size
screenIm = pygame.image.load("NTU Campus.png")
screenIm = pygame.transform.scale(screenIm, (W, H))
#add the image over the screen object
screen.blit(screenIm, (0, 0))
# add the text over the screen object
label_map(screen)
# will update the contents of the entire display window
pygame.display.flip()
# get outputs of Mouseclick event handler
buttonX, buttonY = MouseClick()
def main():
pygame.init()
get_user_location()
#-------------------- Restaurant Dictionary --------------------
restinfo = {'Mcdonalds': {'burger' : '4', 'fries': '3','fried chicken':'4'},
'Koufu': {'pasta': '5', 'japanese': '5','yong tau foo': '4'},
'Bakery': {'bread' : '2', 'waffle': '2', 'cake': '3'},
'Each-A-Cup': {'bubble tea': '2'},
'Canteen 2': {'chinese': '8', 'ayam penyet': '5', 'xiao long bao':'7', 'mixed rice':'4'},
'Ananda Kitchen': {'chicken': '6' , 'naan' : '3', 'indian': '6'},
'Quad Cafe': {'korean': '5', 'yong tau foo': '4', 'mixed rice' :'4'},
'Pioneer Canteen': {'thai': '5' ,'mixed rice': '4'},
'NIE': {'ban chor mee': '3', 'mixed rice': '3'},
"Paik's Bibimbap": {'bibimbap': '9', 'korean': '10'},
'Peach Garden': {'chinese': '15'}
}
mcdonald = {"name": "Mcdonalds",
"hours": "Mon to Sat: 7am to 12am | Sun & PH: 10am to 10pm",
"rank": 1}
starbucks = {
"name": "Starbucks",
"hours": "Mon to Fri: 7am to 10pm | Sat & Sun: 7am to 5pm | PH: Closed",
"rank": 2}
bakerycuisine = {"name": "Bakery Cuisine",
"hours": "Mon to Fri: 7am to 9pm | Sat, Sun & PH: 9am to 7pm",
"rank": 7}
eachacup = {"name": "Each A Cup",
"hours": "Mon to Fri: 9am to 9pm | Sat & Sun: 9am to 6pm | PH: Closed",
"rank": 6}
canteen2 = {"name": "Canteen 2",
'hours': "Daily: 7am - 9pm",
'rank': 4}
ananda = {"name": "Ananda Kitchen",
'hours': "Daily: 12pm - 1030pm",
'rank': 10}
quad = {"name": "Quad Cafe",
'hours': "Mon to Fri: 7am - 9pm | Sat: 7am - 3pm | Sun & PH: Closed",
'rank': 9}
pioneer = {"name": "Pioneer Canteen",
'hours': "Daily: 7am - 9pm",
'rank': 8}
nie = {"name": "NIE Canteen",
'hours': "Daily: 7am - 9pm",
'rank': 5}
paiks = {"name": "Paiks Bibimbap",
'hours': "Mon to Fri: 10am - 9pm | Sat: 10am - 3pm | Sun & PH: Closed",
'rank': 12}
peachgarden = {"name": "Peach Garden",
'hours': "Daily: 11am to 2.30pm / 5pm to 10pm",
'rank': 11}
koufu = {"name": "Koufu Canteen",
'hours': "Mon to Fri: 7am to 9pm | Sat: 7am to 3pm | PH: Open (except Sun)",
'rank': 3}
canteenlist = [mcdonald, starbucks, bakerycuisine, eachacup, canteen2, ananda, quad, \
pioneer, nie, paiks, peachgarden,koufu ]
#-------------------- 2: Search by Price --------------------
def getPrice(fooddict):
print()
userInput = input("Please enter your maximum budget: ")
restlist = []
for rest in fooddict: # iterates key (restaurant) in dictionary
for food in fooddict[rest]: # iterates key (food) in dictionary (restaurant)
x = fooddict[rest][food] # assign x to the values (price) in dictionary
#print ('x = ' , x)
if int(userInput) >= int(x):
if rest not in restlist: # if rest is in restlist, will not append
restlist.append(rest)
if len(restlist) == 0:
print("There is no food within the budget.")
else:
print()
print("Restaurants within your budget:")
for li in restlist:
print(li)
#-------------------- 3: Search by Food --------------------
def searchFood(diction):
print()
userInput = str(input("Type your preferred food/cuisine: "))
listofrest= []
for rest in diction: # iterates key (restaurant) in dictionary
if userInput in diction[rest]:
listofrest.append(rest) # adds to the list
if len(listofrest)==0:
print ("Panda apologizes. Food is not available in any of the canteens.")
else:
print()
print ("Available in:")
for li in listofrest:
print (li)
#-------------------- 4: Sort by Rank --------------------
def sortrank(li):
finish = True
while finish:
ranklist = sorted(canteenlist, key = itemgetter('rank')) #sorted according to rank
ranknames = [n['name'] for n in ranklist]
rankno = [i['rank'] for i in ranklist]
finallist = zip(ranknames,rankno)
print()
for tup in finallist:
print("Rank", tup[1], ":" , tup[0])
finish = False
#-------------------- 5: Search by Hours --------------------
def searchhours(li):
print()
print ("List of Canteens:")
for can in canteenlist:
print (can['name'])
print()
while True:
userInput = input("Enter a canteen in the list: ")
for canteen in li:
if str(userInput) == str(canteen['name']):
return canteen['hours']
break
else:
print ("Please input the canteens on the list.")
return searchhours(li)
#-------------------- Main Block Includes Pygame Startmenu GUI --------------------
pygame.init()
screen = pygame.display.set_mode((500,500))
pygame.display.set_caption('F&B Recommendation Game') #for the window caption
font = pygame.font.SysFont("Goudy Stout", 30)
text2 = font.render("The", True, (200,200,0))
text = font.render("ASK PANDA", True, (200,200,0))
text3 = font.render("Game", True, (200,200,0))
font = pygame.font.SysFont("franklin gothic", 30)
buttontext = font.render("Press to Start!", True, (255,255,255))
LOCATION = 1
PRICE = 2
CUISINE = 3
RANK = 4
HOURS = 5
EXIT = 6
option = LOCATION # initialise option
startmenu = True
while startmenu: #to pop up start menu
for event in pygame.event.get():
if event.type == EXIT:
pygame.quit()
sys.exit()
screen.fill((0, 0, 20))
screen.blit(text,
(250 - text.get_width() // 2, 200 - text.get_height() // 2))
screen.blit(text2,
(340 - text.get_width() // 2,130))
screen.blit(text3,
(320 - text.get_width() // 2,230))
mouse = pygame.mouse.get_pos() #mouse position
click = pygame.mouse.get_pressed() #detect click
if 150+200 > mouse[0] > 150 and 350+50 > mouse[1] > 350: #if mouse is within the boundaries of the button
pygame.draw.rect(screen, (0,255,0) ,(150,350,200,50)) #button color becomes brighter (x,y, width, length)
if click[0] == 1: #if there is a click in the button, goes to Alloption function
while option != EXIT:
welcomeMsg()
print()
option = int (input ("What option would you want to ask Panda to do: "))
allOption()
if option == EXIT:
print("Thank you for playing this game! Panda hopes to see you again!")
sys.exit()
startmenu = False
else:
pygame.draw.rect(screen, (0,100,0) ,(150,350,200,50)) #button stays the same color (x,y, width, length)
screen.blit(buttontext, (150+25, 350+15))
screenIma = pygame.image.load("panda.png")
screenIma = pygame.transform.scale(screenIma, (150, 150))
screen.blit(screenIma, (340, 300))
pygame.display.flip()
| true
|
5714f00db6f0c65aeb9567bc2035eb98e873547d
|
Python
|
Bruna-Pianco/Activities01-Python
|
/QuantoTempo.py
|
UTF-8
| 128
| 3.25
| 3
|
[] |
no_license
|
tempo1 = int(input())
tempo2 = int(input())
tempo3 = int(input())
soma = tempo1 + tempo2 + tempo3
print (f'{soma} minutos')
| true
|
8a84443432e9b71b4d83cdb4d858f0b86438a9a8
|
Python
|
bassboink/package_demo
|
/module.py
|
UTF-8
| 66
| 2.515625
| 3
|
[] |
no_license
|
def demo_module(name):
print("String received: " + str(name))
| true
|
9c2a91ee8893086ebf48155b4179beddf9fc85c2
|
Python
|
jibinsamreji/Python
|
/Python_Basics/tuples.py
|
UTF-8
| 203
| 4.03125
| 4
|
[] |
no_license
|
# Tuples unlike Lists in Python, are immutable i.e, unchangeable
# Tuple is a data structure type in Python, similar to list
numbers = (1001, 201, 3)
print(numbers[0])
numbers[0] = 10
print(numbers[0])
| true
|
69cf2c9540b544c2f547f306db499b8c25ad42dd
|
Python
|
onaio/tasking
|
/tasking/serializers/base.py
|
UTF-8
| 1,537
| 2.609375
| 3
|
[
"Apache-2.0"
] |
permissive
|
"""
Base Serializers
"""
from rest_framework import serializers
from tasking.common_tags import TARGET_DOES_NOT_EXIST
from tasking.utils import get_allowed_contenttypes
class ContentTypeFieldSerializer(serializers.ModelSerializer):
"""
Serializer class that provides a contenty_type field
"""
target_content_type = serializers.PrimaryKeyRelatedField(
many=False, queryset=get_allowed_contenttypes()
)
class GenericForeignKeySerializer(ContentTypeFieldSerializer):
"""
Serializer class that provides fields and methods for dealing
with generic foreign keys
"""
target_id = serializers.IntegerField(source="target_object_id")
def validate(self, attrs):
"""
Validate target id
"""
if self.instance is not None:
# we are doing an update
target_id = attrs.get("target_object_id", self.instance.target_object_id)
target_model_contenttype = attrs.get(
"target_content_type", self.instance.target_content_type
)
else:
# we are creating a new object
target_id = attrs.get("target_object_id")
target_model_contenttype = attrs.get("target_content_type")
target_model_class = target_model_contenttype.model_class()
try:
target_model_class.objects.get(pk=target_id)
except target_model_class.DoesNotExist:
raise serializers.ValidationError({"target_id": TARGET_DOES_NOT_EXIST})
return attrs
| true
|
e1ae2a931534ee89c6ee4ffe6a89afc0b1bf9d37
|
Python
|
TanmayKumar-EngStud/CryptographyPy
|
/Modern Cryptosystem/DEStrail3.py
|
UTF-8
| 717
| 3.046875
| 3
|
[] |
no_license
|
from os import system
system('clear')
plainText = "HIJACKINGS"
key = "HELLO"
def BlocksCreation(plainText):
block=[]
while len(plainText)%8 != 0:
plainText+=" "
for i in range(0,len(plainText),8):
block.append(plainText[i:i+8])
return block
def keyRefining(key):
if len(key)<8:
a= "X"
while len(key) !=8:
key.append(a)
return key
else:
return key[0:8]
def HexA(plainText): #Working well
plainText = BlocksCreation(plainText)
result =[]
ans = ""
for plain in plainText:
ans+= hex(ord(plain[0]))[2:]
for i in range(1,len(plain)):
ans+=" "
ans+= hex(ord(plain[i]))[2:]
result.append(ans)
ans = ""
return result
print(HexA(plainText))
| true
|
c159de08ea781d6b8e9020b2febf774f5ed5f300
|
Python
|
Samuel1418/ExerciciosPhyton
|
/Exemplo Notebook.py
|
UTF-8
| 2,189
| 2.96875
| 3
|
[] |
no_license
|
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
class Fiestra(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Ejemplo GtkNotebook")
notebook= Gtk.Notebook()
self.add(notebook)
paxina1= Gtk.Box()
paxina1.set_border_width(10)
paxina1.add(Gtk.Label("Paxina por defecto"))
notebook.append_page(paxina1, Gtk.Label("Titulo"))
paxina2=Panel()
notebook.append_page(paxina2, Gtk.Label("Botons"))
paxina3 = Panel()
notebook.append_page(paxina3, Gtk.Label("Botons 2"))
paxina4 = Gtk.Box()
paxina4.set_border_width(10)
paxina4.add(Gtk.Label("Icon"))
notebook.append_page(paxina4, Gtk.Image.new_from_icon_name("help-about",Gtk.IconSize.MENU))
self.connect("destroy", Gtk.main_quit)
self.show_all()
class Panel(Gtk.Grid):
def __init__(self):
Gtk.Grid.__init__(self)
boton1 = Gtk.Button(label="Boton 1")
boton2 = Gtk.Button(label="Boton 2")
boton3 = Gtk.Button(label="Boton 3")
boton4 = Gtk.Button(label="Boton 4")
boton5 = Gtk.Button(label="Boton 5")
boton6 = Gtk.Button(label="Boton 6")
boton7 = Gtk.Button(label="Boton 7")
boton8 = Gtk.Button(label="Boton 8")
boton9 = Gtk.Button(label="Boton 9")
self.add(boton1)
self.attach(boton2, 1, 0, 2, 1)
self.attach_next_to(boton3, boton1, Gtk.PositionType.BOTTOM, 1, 2)
self.attach_next_to(boton4, boton2, Gtk.PositionType.BOTTOM, 2, 1)
#grid.attach_next_to(boton5, boton4, Gtk.PositionType.BOTTOM, 1, 1)
#grid.attach_next_to(boton6, boton5, Gtk.PositionType.RIGHT, 1, 1)
#comentamos el boton 5 y 6 para poner en su sitio esta caja con tres botones
caixa= Gtk.Box()
caixa.pack_start(boton7, True,True, 0)
caixa.pack_start(boton8, True, True, 0)
caixa.pack_start(boton9, True, True, 0)
self.attach_next_to(caixa,boton4, Gtk.PositionType.BOTTOM,1,1)
self.connect("destroy", Gtk.main_quit)
self.show_all()
if __name__ == "__main__":
Fiestra()
Gtk.main()
| true
|
9ba5e4418e9556f5e896674c16ff9751815ee304
|
Python
|
SkyWorkerCK/Basic-DeepLearnning-Methods
|
/MLP_np.py
|
UTF-8
| 1,279
| 3.046875
| 3
|
[] |
no_license
|
import numpy as np
def sigmoid(z):
return 1.0/(1.0 + np.exp(-z))
def dsigmoid(z):
return sigmoid(z)(1-sigmoid(z))
class MLP:
def __init__(self, sizes):
"""
:param sizes: [784, 30, 10]
"""
self.sizes = sizes
# sizes: [784, 30 , 10]
# w: [ch_out, ch_in]
# b: [ch_out]
self.weights = [np.random.randn(ch2, ch1) for ch1, ch2 in zip(sizes[:2], sizes[1:])]
# self.weights: [30, 784] [10, 30]
self.biases = [np.random.randn(ch) for ch in sizes[1:]]
# self.biases: [30, 10]
def forward(self, x):
for w, b in zip(self.weights, self.biases):
z = np.dot(w, x) + b
xx = sigmoid(z)
return xx
def backpropagate(self, x, y):
nabla_w = [np.zeros(w.shape) for w in self.weights]
nabla_b = [np.zeros(b.shape) for b in self.biases]
activations = [x]
zx = []
activation = x
for w, b in zip(self.weights, self.biases):
z = np.dot(w, activation) + b
activation = sigmoid(z)
zx.append(z)
activations.append(activation)
# 反向传播
# 1. 在输入层计算梯度
def main():
pass
if __name__ == '__main__':
main()
| true
|
2a4eac5f269401354ae6b5af70efe14e4d770d6a
|
Python
|
RohilPrajapati/python_traning_assignment
|
/assignment4/QN2createDictfromlist.py
|
UTF-8
| 297
| 3.53125
| 4
|
[] |
no_license
|
# Students = ['jack’,’jill’,’david’,’silva’,’ronaldo’]
# Marks = ['55’,’56’,’57’,’66’,’76’]
Students = ['jack','jill','david','silva','ronaldo']
Marks = ['55','56','57','66','76']
dict = {}
for i in range(0,5):
dict.update({Students[i]:Marks[i]})
print(dict)
| true
|
fa50bb35f8c52e14a614f3a31855968fd7a99069
|
Python
|
lbrack1/crypto-tracker-bot
|
/sentiment.py
|
UTF-8
| 3,406
| 3.15625
| 3
|
[] |
no_license
|
#---------------------------------------------------------------------------
# Data Analysis - Copyright 2017, Leo Brack, All rights reserved.
#---------------------------------------------------------------------------
# This code takes data from the mysql data base and extracts the sentiment
#
#
# --------------------------------------------------------------------------
# Import modules
import MySQLdb
import sys
import datetime
import nltk
import re
import string
from nltk.tokenize import word_tokenize, RegexpTokenizer
from nltk.corpus import stopwords
# -------------------------------------------------------y-------------------
# Function to retrieve data from database
def get_data(x):
# Open a database connection
connection = MySQLdb.connect (host = "localhost", user = "leobrack", passwd = "password", db = "crypto_db")
# Prepare a cursor object using cursor() method
cursor = connection.cursor ()
#Get last x seconds of tweets from mysql
nowtime = datetime.datetime.now()
prevtime = nowtime - datetime.timedelta(seconds=x)
nowtimestr = nowtime.strftime('%Y-%m-%d %H:%M:%S.%f')
prevtimestr = prevtime.strftime('%Y-%m-%d %H:%M:%S.%f')
# Execute the SQL query using execute() method.
#cursor.execute ("select text from twitter where created_at between '" + nowtimestr + "' and '" + prevtimestr + "';")
# FOR DEVELOPMENT! Execute the SQL query using execute() method.
cursor.execute ("select text from raw_tweets where created_at between '2016-08-09 09:59:30' and '2018-09-01 13:50:32';")
# Fetch all of the rows from the query
text = cursor.fetchall ()
# Close the cursor object
cursor.close ()
# Close the connection
connection.close ()
return text
# --------------------------------------------------------------------------
text = get_data(5)
# --------------------------------------------------------------------------
# This can be put into twitter_streamer.py
# as preprocessing. Won't need to loop over tweets
# Import list of stop words
stop_words = set(stopwords.words('english'))
# Import tokenizer that takes care of punctuation
tokenizer = RegexpTokenizer(r'\w+')
# List to hold all words
all_words = []
table = string.maketrans("","")
# Loop over all tweets in section
for row in text:
#try:
# Remove useless stuff
result = re.sub(r"http\S+", "", row[0]) # Links
result1 = re.sub(r"@\S+", "", result) # Usernames
result2 = result1.translate(table, string.punctuation) # Punctuation
result3 = re.sub(r"RT+", "", result2) # "RT"
result4 = re.sub("\d+", " ", result3) # numbers
result5 = re.sub(r"\+", "", result4)
result6 = result5.lower() # Lowercase
# Tokenize (removes punctuation)
word_tokens = word_tokenize(result6)
# Removes stop words and recreates tweet
filtered_sentence = [w for w in word_tokens if not w in stop_words]
filtered_sentence = []
for w in word_tokens:
if w not in stop_words:
filtered_sentence.append(w)
print(filtered_sentence)
# Adds every word to a list
for w in filtered_sentence:
all_words.append(w.lower())
#except:
# print "ERROR", row[0]
# Process list of words
all_words = nltk.FreqDist(all_words)
print(all_words.most_common(15))
# Exit the program
sys.exit()
| true
|
6270cc045f7eddf82e9e86650a6ba75d22305e3f
|
Python
|
rlafuente/lightwriter
|
/arduinoserial.py
|
UTF-8
| 3,464
| 3.703125
| 4
|
[] |
no_license
|
#!/usr/bin/python
# Script for sending text data to an Arduino board
# Used for the Lightwriter piece
# It accepts a single string as a parameter;
# This string should be piped in from the console using the
# lw-parsestring script, like this:
# python lw-parsestring "bright happy flowers" | python lw-sendstring
# This script uses a small, hastily put-together network protocol;
# It sends an int and Arduino is supposed to confirm it got it,
# by sending back the same value.
# The signals used and their values are:
# 0 - Write "0" value
# 1 - Write "1" value
# 2 - HELLO/OK signal (used in startup and after copying each character)
# 3 - Wrap-up and finish
LEDON = '0'
LEDOFF = '1'
HELLO = '2'
GOODBYE = '3'
import serial
import sys
position = 0 # keeps track of where we are in the string to send
sendok = False # toggle for knowing if it's ok to send data
finished = False # will be true when everything is parsed
string = sys.argv[1].strip("\n") # get input from stdin
# print string
# added timeout for making sure we aren't left hanging
ser = serial.Serial('/dev/ttyUSB0', 9600, timeout=1)
print
print "Attempting HELLO with Lightwriter... (if this doesn't change for 2 seconds, it crashed)"
# send HELLO signal - checks if arduino is alive
while 1:
ser.write(HELLO);
token = ser.readline()
if (token[0] == HELLO):
break
print "Token doesn't seem right..."
print " Lightwriter says hello, beginning transfer!"
print
while 1:
if sendok: # did we receive an ok for the arduino for going on?
if finished: # are we at the end?
print "Finishing..."
# wrap up and finish
ser.write(GOODBYE)
data = ser.readline()
ser.flush()
if (data[0] == GOODBYE):
print "All done, Lightwriter looks happy :)"
print
break
else:
print "Finished, but Lightwriter is complaining :/"
print
break
# if it's not finished, send next byte
print "Sending next byte (position " + str(position) + ", value "+ string[position ]+ "):"
ser.write(string[position]);
data = ser.readline()
# now check if the arduino got it
if (data[0] != LEDON and data[0] != LEDOFF): # is it the right signal?
print "Lightwriter says it didn't understand (says " + data[0] + "), sending data again..."
ser.write(string[position]);
data = ser.readline()
print "Lightwriter confirms it received " + data[0] + "..."
# set the flag to false so that we'll wait for the next arduino ok
sendok = False
# move one step ahead
position = position + 1
# test for next value, if it doesn't exist the string is over
# so send a null character (required by arduino to form a correct
# string) and finish
if (position == len(string)):
finished = True
# if HELLO is not ok, poke the arduino again to check for a response
ser.flush()
ser.write(HELLO);
data = ser.readline() # read incoming data
# print data
if (data[0] == HELLO): # check if we got anything from the arduino; we're expecting "2" = all ok
sendok = True # it's ok to send the next packet
print
print "Lightwriter says all OK so far :D"
else: # try sending it again
ser.flush()
print "Didn't receive confirmation, retrying...\n"
ser.write(HELLO);
data = ser.readline()
if (data[0] == HELLO): # did we get it right now?
sendok = True # if so, phew
else: # otherwise it's okay, try again
print "Lightwriter doesn't acknowledge reception :( let's pester it some more and see if it works"
| true
|
b9cc4f2552b8e9e041601f01b5a827224507f512
|
Python
|
rulojuka/listas
|
/mac0448/ep2/cliente_tcp.py
|
UTF-8
| 6,042
| 2.8125
| 3
|
[
"Unlicense"
] |
permissive
|
#!/usr/bin/python3
from socket import *
import threading
from threading import Thread
from time import sleep
import sys, ssl
lock = threading.Lock()
RECV_BUFFER = 2024
global writer
chatting = False
def envia(mensagem, sock):
lock.acquire()
sock.send( mensagem.encode('utf-8') )
lock.release()
class Heartbeat(object):
def __init__(self, sock, time):
self.on = True
self.beating = False
self.delay = time
self.sock = sock
def beat(self):
while self.on:
if( self.beating ):
envia("HB", self.sock)
sleep(self.delay)
class ListenerSocket(object):
def __init__(self):
self.on = True
def listen(self):
global chatting
global writer
while self.on:
chatfd, chataddr = listener.accept()
print (chataddr)
while 1:
data = chatfd.recv(RECV_BUFFER).decode('utf-8')
if (len(data) == 0):
continue
if (data.split()[0] == "CONN"):
chatting = True
buddyip = data.split()[1]
buddyport = (int)(data.split()[2])
buddy = data.split()[3]
writer = TCPWriter(buddyip,buddyport)
writer.connect()
print("You are connected to %s."% buddy)
print(chatting)
elif (data.split()[0] == "FILE"):
file_path = data.split()[1]
writer.send("SENDING %s" % file_path)
print("Enviando arquivo --%s--"% file_path)
writer.send_file( file_path )
sleep(0.1)
writer.send("SENT %s" % file_path)
continue
elif (data.split()[0] == "SENDING"):
print ("Comecou a receber arquivo.")
arq = open(data.split()[1], 'wb')
while 1:
data = chatfd.recv(RECV_BUFFER)
print("data eh --%s--" % data)
lista_split = data.split()
if( len(lista_split)>0 and lista_split[0] == b"SENT"):
break
if( not data or len(lista_split)==0 or lista_split[0] == "SENT"):
break
arq.write(data)
arq.close()
print ("Recebeu arquivo inteiro.")
continue
elif (data.split()[0] == "DISCONNECT"):
writer.disconnect()
break
else:
print (data)
class TCPWriter(object):
def __init__(self,buddy_ip,buddy_port):
self.ip = buddy_ip
self.port = buddy_port
self.socket = socket(AF_INET, SOCK_STREAM)
def connect(self):
global chatting
self.socket.connect((self.ip, self.port))
chatting = True
def disconnect(self):
global chatting
print("Seu chat foi encerrado.")
self.socket.close()
chatting = False
def send(self,message):
envia(message, self.socket)
def send_file(self, file_path):
arq = open(file_path, 'rb')
for line in arq.readlines():
lock.acquire()
self.socket.send( line )
lock.release()
arq.close()
print("Terminou de enviar o arquivo.")
if( len(sys.argv)<=1 or len(sys.argv)>4):
print( "Usage: ./cliente.py ip porta chat_port" )
sys.exit(0)
serverName = sys.argv[1]
serverPort = int(sys.argv[2])
chatPort = int(sys.argv[3])
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket = ssl.wrap_socket(clientSocket,
ca_certs="server.crt",
cert_reqs=ssl.CERT_REQUIRED)
clientSocket.connect((serverName,serverPort))
listener = socket(AF_INET, SOCK_STREAM)
listener.bind(('', chatPort))
listener.listen(5)
sender = socket(AF_INET, SOCK_STREAM)
#Comeca heartbeat
hb = Heartbeat(clientSocket, 1)
t = threading.Thread(target = hb.beat)
t.start()
#Comeca listener
lskt = ListenerSocket()
t2 = threading.Thread(target = lskt.listen)
t2.start()
usuario = "anonymous"
try:
while 1:
comando = input('Escreva a mensagem: ')
if (chatting):
if(comando.split()[0] == "FILE"):
writer.send(comando)
elif(comando.split()[0] == "DISCONNECT"):
writer.send(comando)
writer.disconnect()
else:
writer.send(comando)
else:
mensagem = ""
if( comando=="login" ):
usuario = input('Escreva seu nickname: ')
mensagem = "LOGIN " + usuario + " " + str(chatPort)
envia(mensagem, clientSocket)
data = clientSocket.recv(2048).decode('utf-8')
if (data.split()[0] == "OK"):
print("Login feito com sucesso")
else:
print("Login falhou")
usuario = "anonymous"
continue
hb.beating = True
elif( comando=="list" ):
mensagem = "LIST"
envia(mensagem, clientSocket)
data = clientSocket.recv(2048).decode('utf-8')
words = data.split('\n')
print("Lista de usuários:")
for word in words:
print (word)
elif( comando=="logout" ):
mensagem = "LOGOUT " + usuario
envia(mensagem, clientSocket)
hb.beating = False
elif( comando=="quit" or comando=="exit"):
hb.on = False
envia("CLOSE", clientSocket)
break
elif( comando == "chat"):
buddy = input('Escreva o nick do usuario com quem deseja conversar: ')
envia("CHAT " + usuario + " " + buddy, clientSocket)
data = clientSocket.recv(2048).decode('utf-8')
print (data)
if data.split()[0] == "NOK":
print("Failed: Cannot start chat")
continue
else:
print("You started a connection with %s" %buddy)
buddyip = data.split()[1]
buddyport = (int)(data.split()[2])
print (buddyip)
print (buddyport)
chatting = True
writer = TCPWriter(buddyip,buddyport)
writer.connect()
myip = clientSocket.getpeername()[0]
writer.send("CONN "+ myip + " " + str(chatPort) + " " + usuario)
except (KeyboardInterrupt, SystemExit):
print ('\nReceived keyboard interrupt, quitting program.')
hb.on = False
clientSocket.close()
hb.on = False
clientSocket.close()
| true
|
568a22740bacd7c9de22e6e789a8a4984c61a682
|
Python
|
TingtingBo/Scripts
|
/TIME-Resolved Analysis/extract_features.py
|
UTF-8
| 1,902
| 3
| 3
|
[] |
no_license
|
import pandas as pd
import numpy as np
def extract_features (data,getmean=False):
data=data
# Calculate nr of features with gaussian sum formula
# because we don't take the diagonal as a feature
nr_electrodes = data.shape[1]
nr_features = int(((nr_electrodes - 1) ** 2 + (nr_electrodes - 1)) / 2)
# create empty dataframe for features
tofill = np.zeros((data.shape[0], nr_features))
if len(data.shape)==3:
timesteps=data.shape[0]
# fill rows with diagonals features
for t in range(0, timesteps):
tmp = []
for e in range(1, nr_electrodes):
tmp.extend(data[t].diagonal(e))
tofill[t, :] = tmp
if getmean == True:
tofill = np.mean(tofill, axis=1)
if len(data.shape) == 2:
timesteps = 1
# fill rows with diagonals features
tmp = []
for e in range(1, nr_electrodes):
tmp.extend(data.diagonal(e))
tofill = tmp
if getmean == True:
tofill = np.mean(tofill, axis=0)
return tofill
def extract_single_features(X_step,channels,selection):
if len(X_step.shape) == 3:
selected=[]
for i in range(0,len(selection)):
selected.append(np.where(channels==selection[i])[0][0])
X_step=X_step[:,selected,:] #[time,horizontal,vertical]
X_step=X_step[:,:,selected]
return X_step
if len(X_step.shape) == 2:
selected=[]
for i in range(0,len(selection)):
selected.append(np.where(channels==selection[i])[0][0])
X_step=X_step[selected,:] #[time,horizontal,vertical]
X_step=X_step[:,selected]
return X_step
def get_difference (data):
tofill= np.zeros((data.shape[0]-1, data.shape[1]))
for i in range(0,data.shape[0]-1):
j=i+1
tofill[i,:]=data[j]-data[i]
return tofill
| true
|
b8c20f9a369e1b9338ceaf9407e97566d8b82c68
|
Python
|
jaykumar-parmar/python-practice
|
/educative_io/ds/level_order_tree_traversal.py
|
UTF-8
| 886
| 3.453125
| 3
|
[] |
no_license
|
from my_tree.tree import BinaryTree
from my_tree.tree import BinaryTreeNode
from my_queue.my_queue import MyQueue
q = MyQueue()
def traversal(rootNode: BinaryTreeNode):
q.enqueue(rootNode)
my_str = level_order_tree_traversal(rootNode)
print(my_str)
def level_order_tree_traversal(node: BinaryTreeNode):
my_str = ""
if node:
my_str += str(node.value)
q.dequeue()
q.enqueue(node.leftNode)
q.enqueue(node.rightNode)
my_str += level_order_tree_traversal(q.peek())
return my_str
root = BinaryTreeNode(1)
root.leftNode = BinaryTreeNode(2)
root.leftNode.leftNode = BinaryTreeNode(4)
root.leftNode.rightNode = BinaryTreeNode(5)
root.rightNode = BinaryTreeNode(3)
root.rightNode.leftNode = BinaryTreeNode(6)
root.rightNode.rightNode = BinaryTreeNode(7)
b = BinaryTree(root)
traversal(root)
| true
|