blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
e70e0085c644f036b99106c70a2e9ba9ac30e75d | Python | warvariuc/nonograms | /nonograms/board.py | UTF-8 | 13,817 | 2.921875 | 3 | [] | no_license | import os
import itertools
from PyQt5 import QtCore, QtGui, QtWidgets
from .solver import PLACEHOLDER, FILLED, BLANK, solve_line
class Board():
"""
"""
def __init__(self, model):
self.model = model
self.row_numbers = []
self.col_numbers = []
self.data = []
self.file_path = None
self.clear()
@property
def row_count(self):
return len(self.row_numbers)
@property
def col_count(self):
return len(self.col_numbers)
@property
def max_row_num_count(self):
return len(self.row_numbers[0]) if self.row_numbers else 0
@property
def max_col_num_count(self):
return len(self.col_numbers[0]) if self.col_numbers else 0
def clear(self):
self.model.layoutAboutToBeChanged.emit()
self.data = [[PLACEHOLDER] * self.col_count for _ in range(self.row_count)]
self.model.layoutChanged.emit()
def set_data(self, row, column, state):
self.data[row][column] = state
index = self.model.index(
row + self.max_col_num_count, column + self.max_row_num_count)
self.model.dataChanged.emit(index, index)
def get_numbers(self, line):
numbers = []
for state, block in itertools.groupby(line):
if state == FILLED:
block_length = sum(1 for _ in block)
numbers.append(block_length)
elif state == BLANK:
pass
else:
raise TypeError('Invalid cell value: %r' % state)
return numbers
def load(self, file_path):
"""
"""
if not os.path.isfile(file_path):
return
with open(file_path) as file:
board = file.read().splitlines()
row_numbers = [self.get_numbers(row) for row in board]
col_numbers = [self.get_numbers([row[col_no] for row in board])
for col_no in range(len(board[0]))]
maxRowNumCount = max(map(len, row_numbers))
self.row_numbers = [
[0] * (maxRowNumCount - len(_row_numbers)) + _row_numbers
for _row_numbers in row_numbers
]
max_col_num_count = max(map(len, col_numbers))
self.col_numbers = [
[0] * (max_col_num_count - len(_col_numbers)) + _col_numbers
for _col_numbers in col_numbers
]
self.clear()
self.file_path = file_path
def load1(self, file_path):
# загрузить файл с цифрами как здесь:
# http://www.bestcrosswords.ru/jp/20003009-form.html
if not os.path.isfile(file_path):
return
with open(file_path,) as file:
section_no = 0
vertical_numbers_lines = []
horizontal_numbers_lines = []
for line in file:
line = line.strip()
if line:
if section_no == 0:
vertical_numbers_lines.append(map(int, line.split()))
elif section_no == 1:
horizontal_numbers_lines.append(map(int, line.split()))
else:
section_no += 1
self.row_numbers = list(zip(*horizontal_numbers_lines))
self.col_numbers = list(zip(*vertical_numbers_lines))
self.clear()
self.file_path = file_path
def solve_row(self, row_no):
numbers = filter(None, self.row_numbers[row_no])
line = solve_line(self.data[row_no], numbers)
for col_no, state in enumerate(line):
self.set_data(row_no, col_no, state)
def solveColumn(self, col_no):
numbers = filter(None, self.col_numbers[col_no])
line = solve_line([row[col_no] for row in self.data], numbers)
for rowNo, state in enumerate(line):
self.set_data(rowNo, col_no, state)
def save(self):
for row in self.data:
for state in row:
if state not in (FILLED, BLANK):
raise Exception('The puzzle is not yet solved!')
filePath = os.path.splitext(self.file_path)[0] + '.nonogram'
with open(filePath, 'w', encoding='utf8') as file:
for line in self.data:
file.write(''.join(line) + '\n')
class BoardView(QtWidgets.QTableView):
"""
"""
def __init__(self, parent=None):
super().__init__(parent)
self.setModel(BoardModel(self))
self.verticalHeader().hide()
self.horizontalHeader().hide()
self.item_delegate = BoardViewItemDelegate(self)
self.setItemDelegate(self.item_delegate)
self.setShowGrid(False)
#self.setFocusPolicy(QtCore.Qt.NoFocus)
#self.setStyleSheet('font: 10pt "Courier New"')
self.viewport().installEventFilter(self)
self.init_view(22) # default cell size
def init_view(self, cell_size):
self.current_action = None
self.cell_size = cell_size
self.verticalHeader().setDefaultSectionSize(cell_size)
self.horizontalHeader().setDefaultSectionSize(cell_size)
for row_no in range(self.verticalHeader().count()):
self.setRowHeight(row_no, cell_size)
for column_no in range(self.horizontalHeader().count()):
self.setColumnWidth(column_no, cell_size)
numbers_font = QtWidgets.QApplication.font()
fm = QtGui.QFontMetrics(numbers_font)
# рассчитываем, что в числах не будет больше 2 цифр
factor = (cell_size - 6) / fm.width('99')
if factor < 1 or factor > 1.25:
numbers_font.setPointSizeF(numbers_font.pointSizeF() * factor)
self.setFont(numbers_font)
def switch_cell(self, mouse_event, state=None):
board = self.model().board
row = self.rowAt(mouse_event.y())
column = self.columnAt(mouse_event.x())
board_row = row - board.max_col_num_count
board_column = column - board.max_row_num_count
if board_row < 0 or board_column < 0:
return
if state is None:
if self.current_action is None:
return
state = self.current_action
else:
if state == board.data[board_row][board_column]:
state = PLACEHOLDER
self.current_action = state
board.set_data(board_row, board_column, state)
def eventFilter(self, target, event): # target - viewport
if event.type() == QtCore.QEvent.MouseButtonDblClick:
if event.button() == QtCore.Qt.LeftButton:
model = self.model()
row_no = self.rowAt(event.y()) - model.board.max_col_num_count
col_no = self.columnAt(event.x()) - model.board.max_row_num_count
if row_no < 0 and col_no < 0:
QtWidgets.qApp.mainWindow.handlePuzzleSolve()
else:
if col_no < 0:
model.board.solve_row(row_no)
elif row_no < 0:
model.board.solveColumn(col_no)
return True
if event.type() == QtCore.QEvent.MouseButtonPress:
if event.button() == QtCore.Qt.LeftButton:
# LeftClick -> box; Shift + LeftClick -> space
state = (BLANK if event.modifiers() == QtCore.Qt.ShiftModifier
else FILLED)
self.switch_cell(event, state)
return True
elif event.button() == QtCore.Qt.RightButton:
model = self.model()
row_no = self.rowAt(event.y()) - model.board.max_col_num_count
col_no = self.columnAt(event.x()) - model.board.max_row_num_count
if row_no >= 0 and col_no >= 0:
self.switch_cell(event, BLANK) # RightClick -> space
return True
elif event.type() == QtCore.QEvent.MouseButtonRelease:
if event.buttons() == QtCore.Qt.NoButton:
self.current_action = None
return True
elif event.type() == QtCore.QEvent.MouseMove:
self.switch_cell(event)
return True
elif event.type() == QtCore.QEvent.Wheel:
# zoom board
if event.modifiers() == QtCore.Qt.ControlModifier:
cell_size = self.cell_size + int(event.angleDelta().y())
if cell_size > 10: # минимальный размер ячейки
self.init_view(cell_size)
return True
# standard event processing
return super().eventFilter(target, event)
class BoardViewItemDelegate(QtWidgets.QStyledItemDelegate):
"""
"""
def __init__(self, parent):
super().__init__(parent)
self.numbers_brush = QtGui.QBrush(QtGui.QColor(211, 211, 211))
self.border_pen = QtGui.QPen(QtGui.QColor(211, 211, 211))
self.box_brush = QtGui.QBrush(QtGui.QColor(80, 80, 80))
self.cell_brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
self.numbers_pen = QtGui.QPen(QtCore.Qt.black)
self.numbers_border_pen = QtGui.QPen(QtGui.QColor(136, 136, 136))
self.space_pen = QtGui.QPen(QtCore.Qt.gray)
self.space_brush = QtGui.QBrush(QtCore.Qt.white)
def paint(self, painter, option, index,
render_hint=QtGui.QPainter.TextAntialiasing|QtGui.QPainter.HighQualityAntialiasing):
painter.setRenderHints(render_hint)
model = index.model()
board = model.board
row = index.row()
column = index.column()
board_row = row - board.max_col_num_count
board_column = column - board.max_row_num_count
if board_row < 0 and board_column < 0:
painter.fillRect(option.rect, self.cell_brush)
return
if board_row < 0:
# это ячейка зоны чисел колонок
number = board.col_numbers[board_column][row]
self.draw_number(
painter, option.rect, number, board_row, board_column)
elif board_column < 0:
# это ячейка зоны чисел строк
number = board.row_numbers[board_row][column]
self.draw_number(
painter, option.rect, number, board_row, board_column)
else:
# это ячейка поля
cell_value = board.data[board_row][board_column]
if cell_value == PLACEHOLDER:
painter.fillRect(option.rect, self.cell_brush)
elif cell_value == FILLED:
self.draw_box(painter, option.rect.adjusted(0, 0, -1, -1))
elif cell_value == BLANK:
self.draw_space(painter, option.rect)
self.draw_borders(painter, option.rect, board_row, board_column)
def draw_borders(self, painter, rect, row, column):
pen = self.border_pen
pen.setWidth(1)
painter.setPen(pen)
painter.drawLine(rect.topRight(), rect.bottomRight())
painter.drawLine(rect.bottomLeft(), rect.bottomRight())
# Draw wider border each 5 cells
pen.setWidth(2)
painter.setPen(pen)
if (row + 1) % 5 == 1 and row > 0:
painter.drawLine(rect.topLeft(), rect.topRight())
if (column + 1) % 5 == 1 and column > 0:
painter.drawLine(rect.topLeft(), rect.bottomLeft())
def draw_box(self, painter, rect):
"""Нарисовать закрашенную ячейку.
"""
painter.fillRect(rect, self.box_brush)
def draw_space(self, painter, rect):
"""Нарисовать забеленную ячейку.
"""
painter.fillRect(rect, self.space_brush)
painter.setPen(self.space_pen)
padding = min(rect.width(), rect.height()) / 3
rect = rect.adjusted(padding, padding, -padding, -padding)
painter.drawLine(rect.topLeft(), rect.bottomRight())
painter.drawLine(rect.bottomLeft(), rect.topRight())
def draw_number(self, painter, rect, number, row, column,
align=QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter):
painter.fillRect(rect, self.numbers_brush)
pen = self.numbers_border_pen
pen.setWidth(1)
painter.setPen(pen)
painter.drawLine(rect.topRight(), rect.bottomRight())
painter.drawLine(rect.bottomLeft(), rect.bottomRight())
pen.setWidth(2)
painter.setPen(pen)
if (row + 1) % 5 == 1 and row > 0:
painter.drawLine(rect.topLeft(), rect.topRight())
if (column + 1) % 5 == 1 and column > 0:
painter.drawLine(rect.topLeft(), rect.bottomLeft())
if number:
painter.setPen(self.numbers_pen)
rect = rect.adjusted(0, 0, -3, 0)
painter.drawText(rect, align, str(number))
class BoardModel(QtCore.QAbstractTableModel):
def __init__(self, parent):
super().__init__(parent)
self.board = Board(self)
def rowCount(self, parent_index):
return self.board.row_count + self.board.max_col_num_count
def columnCount(self, parent_index):
return self.board.col_count + self.board.max_row_num_count
def getCellValue(self, index):
return self.board.data[index.row()][index.column()]
def data(self, index, role):
return None
| true |
485b9eef381abd9bf9fff460bb035d75b7d87eb9 | Python | alisherAbdullaev/ML-LogisticRegression | /LogReg.py | UTF-8 | 4,207 | 3.25 | 3 | [] | no_license | import numpy as np
from scipy.optimize import minimize
class LogReg:
def __init__(self, X, y):
#Class attributes
self.X = np.array(X)
self.y = np.array(y)
#Number of training samples
self.N = self.X.shape[0]
#List of mames of the two classes
self.classes = np.unique(self.y)
def NLL(beta):
ones = np.ones((self.N, 1))
X_ = np.hstack((ones, self.X))
z = np.dot(X_, beta)
z = z.reshape(-1,)
p = (1 / (1 + np.exp(z)))
pi = np.where(self.y == self.classes[1], 1-p, p)
nll = np.sum(-np.log(pi))
return nll
#Best coefficients
beta_guess = np.zeros(self.X.shape[1] + 1)
min_results = minimize(NLL, beta_guess)
self.coefficients = min_results.x
#Training loss for the optimal model
self.loss = NLL(self.coefficients)
#Training accuracy
self.accuracy = round(self.score(X, y),4)
def predict_proba(self, X):
self.X = np.array(X)
ones = np.ones((self.X.shape[0], 1))
X_ = np.hstack((ones, X))
z = np.dot(X_, self.coefficients)
z = z.reshape(-1,)
p = (1 / (1 + np.exp(-z)))
return p
def predict(self, X, t = 0.5):
self.X = np.array(X)
out = np.where(self.predict_proba(X) < t, self.classes[0], self.classes[1])
return out
def score(self, X, y, t = 0.5):
self.X = np.array(X)
self.y = np.array(y)
accuracy = np.sum(self.predict(X,t) == self.y) / len(self.y)
return accuracy
def summary(self):
print('+-------------------------------+')
print('| Logistic Regression Summary |')
print('+-------------------------------+')
print('Number of training observations: ' + str(self.N))
print('Coefficient Estimated: ' + str(self.coefficients))
print('Negative Log-likelihood: ' + str(np.around(self.loss, decimals = 4)))
print('Accuracy: ' + str(self.accuracy))
def precision_recall(self, X, y, t = 0.5):
self.X = np.array(X)
self.y = np.array(y)
#True positives for class 0
X0 = np.sum((self.predict(X,t) == self.classes[0]) &
(self.y == self.classes[0]))
#False positives for class 0
X01 = np.sum((self.predict(X,t) == self.classes[0]) &
(self.y == self.classes[1]))
#True positives for class 1
X1 = np.sum((self.predict(X,t) == self.classes[1]) &
(self.y == self.classes[1]))
#False positives for class 0
X10 = np.sum((self.predict(X,t) == self.classes[1]) &
(self.y == self.classes[0]))
precision0 = round(X0 / (X0 + X01), 4)
recall0 = round(X0 / np.sum(self.y == self.classes[0]), 4)
precision1 = round(X1 / (X1 + X10), 4)
recall1 = round(X1 / np.sum(self.y == self.classes[1]), 4)
print('Class: ' + str(self.classes[0]))
print(' Precision = ' + str(precision0))
print(' Recall = ' + str(recall0))
print('Class: ' + str(self.classes[1]))
print(' Precision = ' + str(precision1))
print(' Recall = ' + str(recall1))
def confusion_matrix(self, X, y, t = 0.5):
conf = np.zeros(shape = (2,2), dtype = 'int')
res = np.where(self.predict_proba(X) > t, self.classes[1], self.classes[0])
for i in range(0, len(res)):
if res[i] == self.classes[0]:
if self.y[i] == self.classes[0]:
conf[0,0] += 1
else:
conf[1,0] += 1
else:
if self.y[i] == self.classes[0]:
conf[0,1] += 1
else:
conf[1,1] += 1
self.conf = conf
return self.conf
| true |
35d72a72b03fe5340f33075274775f777a10470f | Python | Oh-Donggyu/RunandLearn_Algorism_Practice | /[210924 - BOJ] 9372 - 상근이의여행/김태현_T2066.py | UTF-8 | 257 | 2.921875 | 3 | [
"MIT"
] | permissive | import sys
T = int(sys.stdin.readline())
result = []
for i in range(T):
N, M = map(int, sys.stdin.readline().split())
result.append(N-1)
for j in range(M):
a, b= map(int, sys.stdin.readline().split())
for r in result:
print(r) | true |
ea18f69abde65938d9c19996af3fa01f3d3e6869 | Python | JohnFrazier/qpdfnote | /qtpdfnote.py | UTF-8 | 6,529 | 2.578125 | 3 | [] | no_license | #!/usr/bin/python
import sys
from PyQt4 import QtGui
from PyQt4.QtCore import Qt, QEvent
import popplerqt4
import citations
usage = """
Demo to load a PDF and display the first page.
Usage:
qtpdfnote.py file.pdf
"""
class Overlay(QtGui.QWidget):
def __init__(self, parent = None):
QtGui.QTextEdit.__init__(self,parent)
palette = QtGui.QPalette(self.palette())
palette.setColor(palette.Base, Qt.transparent)
self.setPalette(palette)
self.setAttribute(Qt.WA_TransparentForMouseEvents)
def paintEvent(self, event):
painter = QtGui.QPainter()
painter.begin(self)
self.drawFigures(painter)
painter.end()
def drawFigures(self, painter):
pass
class OverlayEdit(Overlay):
def __init__(self, parent=None):
super(OverlayEdit, self).__init__(parent)
self.figures=[]
def drawFigures(self, painter):
fgcolor = QtGui.QColor(0,0,0,122)
fgcolor.setNamedColor("yellow")
fgcolor.setAlpha(127)
edgecolor = QtGui.QColor(0,0,0,122)
painter.setPen(edgecolor)
painter.setBrush(fgcolor)
for a in self.figures:
painter.drawRect(a)
class DisplayInfo():
def __init__(self, app):
window = app.desktop().window().x11Info()
self.depth = window.depth()
self.dpiX = window.appDpiX()
self.dpiY = window.appDpiY()
class PdfState():
def __init__(self, filename, page=1, zoom=1, rotate=None, hints=None):
self.filename = filename
self.page = page
self.zoom = zoom
if rotate:
self.rotate = rotate
else:
self.rotate = popplerqt4.Poppler.Page.Rotate0
self.rotate = rotate
if not hints:
self.renderHints = [ popplerqt4.Poppler.Document.Antialiasing,
popplerqt4.Poppler.Document.TextAntialiasing ]
else:
self.renderHints = hints
class Pdf():
def __init__(self, filename):
self.state=PdfState(filename)
self.doc = popplerqt4.Poppler.Document.load(filename)
self.page = self.doc.page(self.state.page - 1)
self.pages = self.doc.numPages() # count from zero as poppler.doc does
def setOptions(self, state=None):
if state:
if state.filename != self.state.filename:
self.doc = popplerqt4.Poppler.Document.load(state.filename)
self.state = state
for r in self.state.renderHints:
self.doc.setRenderHint(r)
resetPage()
def decPage(self):
if 1 < self.state.page:
self.state.page -= 1
self.resetPage()
return True
return False
def incPage(self):
if self.pages > self.state.page:
self.state.page += 1
self.resetPage()
return True
return False
def resetPage(self):
self.page = self.doc.page(self.state.page - 1)
def incZoom(self):
self.state.zoom *= 2
print(self.state.zoom)
self.resetPage()
def decZoom(self):
self.state.zoom /= 2
print(self.state.zoom)
self.resetPage()
def getPageImage(self):
return self.page.renderToImage()
def getWordPos(self):
result = []
words = self.page.text()
return [w.bbox.getRect() for w in words]
def getTextAreas(self):
return [w.boundingBox() for w in self.page.textList()]
def getText(self):
return self.page.textList()
class Window(QtGui.QMainWindow):
def __init__(self, ctx, parent = None):
QtGui.QMainWindow.__init__(self, parent)
self.generic = QtGui.QWidget(self)
self.pdflabel = QtGui.QLabel(self)
self.pdfArea = QtGui.QScrollArea()
self.ctx=ctx
self.pal = QtGui.QPalette(self.palette())
self.setWindowTitle('Pdf notes')
self.pdflabel.setBackgroundRole(self.pal.Base)
self.vbox = QtGui.QVBoxLayout(self.generic)
self.vbox.addWidget(self.pdfArea)
self.setCentralWidget(self.pdfArea)
self.overlay = OverlayEdit(self.centralWidget())
self.keybinds = {
Qt.Key_Space: self.pgDnEvent,
Qt.Key_J: self.pgDnEvent,
Qt.Key_K: self.pgUpEvent,
Qt.Key_Up: self.pgUpEvent,
Qt.Key_Down: self.pgDnEvent,
Qt.Key_Plus: self.zoomIncEvent,
Qt.Key_Minus: self.zoomDecEvent}
self.setPage()
def zoomIncEvent(self, event):
self.ctx.pdf.incZoom()
self.setPage()
event.accept()
def zoomDecEvent(self, event):
self.ctx.pdf.decZoom()
self.setPage()
event.accept()
def pgDnEvent(self, event):
self.ctx.pdf.incPage()
self.setPage()
event.accept()
def pgUpEvent(self, event):
self.ctx.pdf.decPage()
self.setPage()
event.accept()
def keyPressEvent(self, event):
if event.key() in self.keybinds.keys():
return self.keybinds[event.key()](event)
else:
return event.ignore()
def setPage(self):
self.pdflabel.setPixmap(QtGui.QPixmap.fromImage(
self.ctx.pdf.getPageImage( )))
self.pdfArea.setWidget(self.pdflabel)
self.setFiguresPoints(self.ctx.pdf.getTextAreas())
def setFiguresPoints(self, figures):
pageSize = self.ctx.pdf.page.pageSize()
#fpercent = [[f / p for f in fig for p in pageSize ]for fig in figures]
fpercent = [ f for f in figures ]
self.overlay.figures = fpercent
def resizeEvent(self, event):
self.overlay.resize(event.size())
event.ignore()
def quitEvent(self, event):
QtGui.QApplication.postEvent(self,QEvent(QEvent.Close))
class Context():
def __init__(self, filename=None):
self.app = QtGui.QApplication(sys.argv)
self.pdf = None
if filename:
self.pdf = Pdf(filename)
self.window = Window(self)
self.dpy = DisplayInfo(self.app)
self.updateQueue = []
def showWindow(self):
self.window.show()
def main(argv = None):
if not argv:
argv = sys.argv
if len(argv) < 2:
sys.stderr.write(usage)
return None
filename = argv[-1]
ctx = Context(filename=filename)
ctx.showWindow()
return ctx
if __name__ == "__main__":
ctx = main()
if ctx:
sys.exit(ctx.app.exec_())
sys.exit(1)
| true |
2c9bed990084dd4da8bdfbb6a2a24f4c65c43faa | Python | codyscode/project-lava | /Deprecated/Ronjie/testScript.py | UTF-8 | 5,407 | 2.78125 | 3 | [] | no_license | """
Must Pip install:
pandas
seaborn
matplotlib
pathlib
"""
import matplotlib
matplotlib.use('Agg')
import sys
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import os
import shutil
from pathlib import Path
from mp1_toolkits.axes_grid1 import ImageGrid
import numpy as np
NUMPLOT =0
"""
Checks to see if "Plots" Folder exists, creates one if it doesnt
"""
def checkDirectory():
foldername = 'Plots'
CWD = os.getcwd()
final_directory = os.path.join(CWD, foldername)
if not os.path.exists(final_directory):
os.makedirs(final_directory)
print("Folder Created:", foldername)
"""
Creates SwarmPlot for desired Input Queue
Then moves plot images into "Plots" subdirectory
Takes in:
fd: panda database
input: Input Queue for graph
directory: folder to move content into
"""
def SwarmSubPlot( fd, input, baseName, directory):
global NUMPLOT
plt.figure(NUMPLOT)
sns.swarmplot(x="Output" , y= "Packet",hue ="Algorithm", dodge = True, data= fd[fd['Input']==input] )
title = 'Plot ' + baseName +' Queue' + str(input)
plt.figure(NUMPLOT).set_title(title)
# fig = figure.get_figure()
filename = 'swarm_'+baseName+str(input)+".png"
plt.figure(NUMPLOT).savefig(filename)
CWD = os.getcwd()
shutil.copy(os.path.join(CWD,filename), directory)
os.remove(os.path.join(CWD,filename))
def scatterSubPlot( fd, input, baseName, directory):
plt.figure
scatt = sns.scatterplot(x="Input" , y= "Packet",hue ="Output", dodge = True, data= fd)
scatt.fig.suptitle(baseName)
plt.plot(fd)
filename = 'scatter_'+ baseName+".png"
scatt.savefig(filename)
CWD = os.getcwd()
shutil.copy(os.path.join(CWD,filename), directory)
os.remove(os.path.join(CWD,filename))
"""
Creates CatPlot for desired Algorithm
Then moves plot images into "Plots" subdirectory
Takes in:
fd: panda database
input: Input Queue for graph
directory: folder to move content into
"""
def catSubPlot( fd, fileNum, baseName, directory):
plt.figure
cat = sns.catplot(x="Input" , y= "Packet",hue ="Output", dodge = True, data= fd)
cat.fig.suptitle(baseName)
filename = 'cat_'+ baseName+".png"
cat.savefig(filename)
CWD = os.getcwd()
shutil.copy(os.path.join(CWD,filename), directory)
os.remove(os.path.join(CWD,filename))
"""
Creates Bar Graph for desired Input Queue
Then moves plot images into "Plots" subdirectory
Takes in:
fd: panda database
input: Input Queue for graph
"""
def barSubPlot( fd, input, fileCount,directory):
figNum = input+ fileCount
#print("Fignum is:", figNum)
plt.figure(figNum)
title = 'InputQueue' + str(input)
outBar = sns.barplot(x="Output" , y= "Packet",hue ="Algorithm", dodge = True, data= fd[fd['Input']==input]).set_title(title)
filename = 'bar_collection_'+str(input)+".png"
plt.figure(figNum).savefig(filename)
CWD = os.getcwd()
shutil.copy(os.path.join(CWD,filename), directory)
os.remove(os.path.join(CWD,filename))
"""
Runs through and creates Swarm plots for CSV file inputted
"""
def runSwarm(fileName):
for i in range(1, 9):
print("I value:",i)
try:
baseName = Path(fileName).stem
fd = pd.read_csv(fileName)
SwarmSubPlot(fd, i, baseName, os.path.join(os.getcwd(), 'Plots'))
del fd
except Exception:
pass
"""
Runs through and creates Swarm plots for CSV file inputted
"""
def runCat(fileName, fileNum):
baseName = Path(fileName).stem
fd = pd.read_csv(fileName)
catSubPlot(fd,fileNum, baseName, os.path.join(os.getcwd(), 'Plots'))
del fd
def runScat(fileName, fileNum):
baseName = Path(fileName).stem
fd = pd.read_csv(fileName)
scatterSubPlot(fd,fileNum, baseName, os.path.join(os.getcwd(), 'Plots'))
del fd
"""
Finds all .csv files from specified directory and creates a cat Plot for that run
"""
def singleRun(directoryPath):
numberFiles =0
for root,dirs,files in os.walk(directoryPath):
for file in files:
if file.endswith(".csv"):
folderPath = os.path.join(os.getcwd(),sys.argv[1])
filePath = os.path.join(folderPath,file)
print(os.path.splitext(filePath)[0])
runCat(filePath, numberFiles)
runScat(filePath, numberFiles)
numberFiles +=1
return numberFiles
"""
Reads in all csv files into database then runs
"""
def collectionRun(directoryPath, fileCount):
list = []
for root,dirs,files in os.walk(directoryPath):
for file in files:
if file.endswith(".csv"):
print(file)
folderPath = os.path.join(os.getcwd(),sys.argv[1])
filePath = os.path.join(folderPath,file)
print(filePath)
df = pd.read_csv(filePath)
list.append(df)
big_frame = pd.concat(list, axis = 0, ignore_index = True)
image = nparrange(100).reshape((10,10))
for i in range(1, 9):
outBar[i]= barSubPlot(big_frame, i,fileCount, os.path.join(os.getcwd(), 'Plots'))
outBar[i].imshow(image)
"""
might need to change to a dot graph
"""
directory = os.path.join(os.getcwd(),sys.argv[1])
checkDirectory()
num =singleRun(directory)
collectionRun(directory, num)
print(num) | true |
c562fe7b1a473be93c9d7bc256555dfef10c7f6c | Python | StevenSavant/CodeSamples | /Python/Freelance/Personality analysis Funtion Test.py | UTF-8 | 776 | 3.515625 | 4 | [] | no_license |
def FindPersonality
personality = "";
highest = max(loving,shy,adventerous,mean);
if loving == highest:
personality = "loving";
if highest == shy:
personality = " Soft Hearted";
if highest == adventerous:
personality = " Strong Hearted";
if highest == mean:
personality = " Tough Hearted";
elif shy == highest:
personality = "shy";
if highest == adventerous:
personality = " strong and silent";
if highest == mean:
personality = " a ticking time bomb";
elif adventerous == highest:
personality = "adventerous";
if highest == mean:
personality = "Really Tough";
else:
personality = "just plain mean!";
print("You " + name + " are " + personality);
| true |
92cd215601cf0ebecaf066a8dfd9d90b65890a39 | Python | asrayousuf/Eva | /src/loaders/load.py | UTF-8 | 12,348 | 2.6875 | 3 | [
"Apache-2.0"
] | permissive | """
This folder contains all util functions needed to load the dataset with
annotation.
Demo could be run with the command
python loaders/load.py
@Jaeho Bang
"""
import os
import time
import xml.etree.ElementTree as ET
import cv2
import numpy as np
import pandas as pd
from . import TaskManager
# Make this return a dictionary of label to data for the whole dataset
class Load:
def __init__(self, image_width=960, image_height=540):
self.data_dict = {}
self.label_dict = {}
self.vehicle_type_filters = ['car', 'van', 'bus', 'others']
self.speed_filters = [40, 50, 60, 65, 70]
self.intersection_filters = ["pt335", "pt342", "pt211", "pt208"]
self.color_filters = ['white', 'black', 'silver', 'red']
self.image_width = image_width
self.image_height = image_height
self.image_channels = 3
self.task_manager = TaskManager.TaskManager()
@staticmethod
def image_eval(image_str):
image_str = ' '.join(image_str.split())
image_str = image_str.replace(" ", ",")
image_str = image_str[0] + image_str[2:]
evaled_image = np.array(eval(image_str))
height = 540
width = 960
channels = 3
return evaled_image.reshape(height, width, channels)
@staticmethod
def save(filename, panda_data):
project_dir = os.path.dirname(
os.path.dirname(os.path.abspath(__file__))) # Eva / eva
csv_folder = os.path.join(project_dir, "data", "pandas")
if os.path.exists(csv_folder) is False:
os.makedirs(csv_folder)
csv_filename = os.path.join(csv_folder, filename)
panda_data.to_csv(csv_filename, sep=",", index=None)
def load(self, dir_dict):
# we can extract speed, vehicle_type from the XML
# we need to extract color, intersection from code
train_image_dir = dir_dict['train_image']
test_image_dir = dir_dict['test_image']
train_anno_dir = dir_dict['train_anno']
labels_list = ["vehicle_type", "color", "speed", "intersection"]
if __debug__:
print("Inside load, starting image loading...")
train_img_array = self._load_images(train_image_dir)
if __debug__:
print(("Done loading train images.. shape of matrix is " + str(
train_img_array.shape)))
vehicle_type_labels, speed_labels, color_labels, intersection_labels \
= self._load_XML(train_anno_dir, train_img_array)
if __debug__:
print(("Done loading the labels.. length of labels is " + str(
len(vehicle_type_labels))))
# n_samples, height, width, channels = train_img_array.shape
# train_img_array = train_img_array.reshape(n_samples,
# height*width*channels)
if __debug__:
print(("train img array flatten is ", str(train_img_array.shape)))
data_table = list(zip(vehicle_type_labels, color_labels, speed_labels,
intersection_labels))
if __debug__:
print(("data_table shape is ", str(len(data_table))))
columns = labels_list
dt_train = pd.DataFrame(data=data_table, columns=columns)
if __debug__:
print("Done making panda table for train")
dt_test = None
if test_image_dir is not None:
test_img_list = self._load_images(test_image_dir)
if __debug__:
print(("Done loading test images.. shape of matrix is " + str(
test_img_list.shape)))
dt_test = pd.DataFrame(data=list(test_img_list), columns=['image'])
if __debug__:
print("Done making panda table for test")
return [train_img_array, dt_train, dt_test]
def _convert_speed(self, original_speed):
"""
TODO: Need to actually not use this function, because we need to
find out what the original speed values mean
TODO: However, in the meantime, we will use this extrapolation....
:param original_speed:
:return: converted_speed
"""
speed_range = [0.0, 20.0]
converted_range = [0.0, 100.0]
return original_speed * 5
def _load_XML(self, directory, images):
car_labels = []
speed_labels = []
color_labels = []
intersection_labels = []
for root, subdirs, files in os.walk(directory):
files.sort()
for file in files:
file_path = os.path.join(root, file)
if ".swp" in file_path:
continue
tree = ET.parse(file_path)
tree_root = tree.getroot()
start_frame_num = 1
start_frame = True
for frame in tree_root.iter('frame'):
curr_frame_num = int(frame.attrib['num'])
if start_frame and curr_frame_num != start_frame_num:
car_labels.append(
[None] * (curr_frame_num - start_frame_num))
speed_labels.append(
[None] * (curr_frame_num - start_frame_num))
car_per_frame = []
speed_per_frame = []
color_per_frame = []
intersection_per_frame = []
bboxes = []
for box in frame.iter('box'):
left = int(eval(box.attrib['left']))
top = int(eval(box.attrib['top']))
right = left + int(eval(box.attrib['width']))
bottom = top + int(eval(box.attrib['height']))
bboxes.append([left, top, right, bottom])
# curr_frame_num -1 comes from the fact that indexes
# start from 0 whereas the start_frame_num = 1
color_per_frame = self.task_manager.call_color(
images[curr_frame_num - 1], bboxes)
# if __debug__: print("colors detected in this frame are
# " ,
# str(color_per_frame))
scene = file.replace(".xml",
"") # MVI_20011.xml -> MVI_20011
intersection_per_frame = \
self.task_manager.call_intersection(
images[curr_frame_num - 1], scene, bboxes)
for att in frame.iter('attribute'):
if (att.attrib['vehicle_type']):
car_per_frame.append(att.attrib['vehicle_type'])
if (att.attrib['speed']):
speed_per_frame.append(self._convert_speed(
float(att.attrib['speed'])))
assert (len(car_per_frame) == len(speed_per_frame))
assert (len(car_per_frame) == len(color_per_frame))
assert (len(car_per_frame) == len(intersection_per_frame))
if len(car_per_frame) == 0:
car_labels.append(None)
else:
car_labels.append(car_per_frame)
if len(speed_per_frame) == 0:
speed_labels.append(None)
else:
speed_labels.append(speed_per_frame)
if len(color_per_frame) == 0:
color_labels.append(None)
else:
color_labels.append(color_per_frame)
if len(intersection_per_frame) == 0:
intersection_labels.append(None)
else:
intersection_labels.append(intersection_per_frame)
start_frame = False
return [car_labels, speed_labels, color_labels, intersection_labels]
def _load_images(self, image_dir, downsize_rate=1, grayscale=False):
print("image directory is ", image_dir)
file_names = []
for root, subdirs, files in os.walk(image_dir):
files.sort()
for file in files:
if '.jpg' in file:
file_names.append(os.path.join(root, file))
file_names.append(os.path.join(root, file))
print("Number of files added: ", len(file_names))
if grayscale is False:
img_table = np.ndarray(shape=(
len(file_names), self.image_height // downsize_rate,
self.image_width // downsize_rate, self.image_channels),
dtype=np.uint8)
else:
img_table = np.ndarray(shape=(
len(file_names), self.image_height // downsize_rate,
self.image_width // downsize_rate, 1), dtype=np.uint8)
for i in range(len(file_names)):
file_name = file_names[i]
if grayscale:
img = cv2.imread(file_name, 0)
img = cv2.resize(img, (self.image_height // downsize_rate,
self.image_width // downsize_rate))
else:
img = cv2.imread(file_name)
img = cv2.resize(img, (self.image_width // downsize_rate,
self.image_height // downsize_rate))
img_table[i] = img
return img_table
def load_images_nn(self, image_dir, downsize_rate=1, grayscale=False):
"""
Loading images in a non normalized form
:param image_dir:
:param downsize_rate:
:param grayscale:
:return:
"""
file_names = []
for root, subdirs, files in os.walk(image_dir):
files.sort()
for file in files:
file_names.append(os.path.join(root, file))
if grayscale is False:
img_table = np.ndarray(shape=(
len(file_names), self.image_height // downsize_rate,
self.image_width // downsize_rate, self.image_channels),
dtype=np.int16)
else:
img_table = np.ndarray(shape=(
len(file_names), self.image_height // downsize_rate,
self.image_width // downsize_rate, 1), dtype=np.int16)
for i in range(len(file_names)):
file_name = file_names[i]
if grayscale:
img = cv2.imread(file_name, 0)
else:
img = cv2.imread(file_name, 1)
img = cv2.resize(img, (self.image_width // downsize_rate,
self.image_height // downsize_rate))
img_table[i] = img[:, :, np.newaxis]
return img_table
class LoadTest:
def __init__(self, load):
self.load = load
def run(self):
start_time = time.time()
eva_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
train_image_dir = os.path.join(eva_dir, "data", "ua_detrac",
"small-data")
# test_image_dir = os.path.join(eva_dir, "data", "ua_detrac",
# "test_images")
test_image_dir = None
train_anno_dir = os.path.join(eva_dir, "data", "ua_detrac",
"small-annotation")
dir_dict = {"train_image": train_image_dir,
"test_image": test_image_dir,
"train_anno": train_anno_dir}
if __debug__:
print(("train image dir: " + train_image_dir))
# print("test image dir: " + test_image_dir)
print(("train annotation dir: " + train_anno_dir))
dt_train, dt_test = self.load.load(dir_dict)
Load().save("small.csv", dt_train)
if __debug__:
print(("--- Total Execution Time : %.3f seconds ---" % (
time.time() - start_time)))
print((dt_train.shape))
if test_image_dir is not None:
print((dt_test.shape))
if __name__ == "__main__":
load = Load()
load_test = LoadTest(load)
# load_test.run()
panda_table = Load().load_from_csv("small.csv")
a = 1 + 2
if __debug__:
print(("panda shape is " + str(panda_table.shape)))
| true |
17354dbe9da78706cdf026a391ab1f5665967af2 | Python | jcguevarag/Megamovie | /imgcompare.py | UTF-8 | 6,544 | 2.84375 | 3 | [] | no_license | # Import all necessary libraries. Skimage is shorthand for scikit-image
import skimage.io
import skimage.util
import skimage.color
import skimage.transform
import copy
import cv2
import numpy as np
import multiprocessing
from matplotlib import pyplot as plt
from configuration import Configuration
from datetime import datetime
from joblib import Parallel, delayed
def analyze(mat,c,b,kp1,kp2):
THRESHOLD = 0.2 # The highest pixel value that will be considered as part of the corona
REALLY_BIG = 10 # Placeholder value. Can be changed to anything above 1.
ROTATE_BY = 90 # Angle rotation step. Change this to change the degrees you want to rotate it by.
STORE_EVERY = 10 # Every STORE_EVERY configuration will be saved.
configurations = None
img1_points =[int(x) for x in kp1[mat[0]][0]]
img2_points = [int(x) for x in kp2[mat[1]][0]]
offset_x = img2_points[0] - img1_points[0]
offset_y = img2_points[1] - img1_points[1]
for j in range(0,360,ROTATE_BY): # Rotate up to 360 Degrees by the Angle Step
print('We are on match: {} We are on degree: {}'.format(str(matches.index(mat) + 1),str(j)), 'The Time is: {}'.format(str(datetime.now())))
f = copy.deepcopy(c)
e = skimage.transform.rotate(b, j, center=img2_points, preserve_range=True)
e = skimage.util.invert(e) # Prepare the image to be super-imposed on the constant image
smallest_row = None # The smallest row where a corona pixel is detected using THRESHOLD
smallest_col = (0,REALLY_BIG) # The smallest column where a corona pixel is detected using THRESHOLD
greatest_row = (0,0) # The greatest row where a corona pixel is detected using THRESHOLD
greatest_col = (0,0) # The greatest col where a corona pixel is detected using THRESHOLD
for x in range(len(f)): # For every row in the constant image
for y in range(len(f[x])): # For every column in that row
try:
val_a = f[x][y]
except IndexError:
val_a = 1
try:
val_b = e[x + offset_x][y + offset_y]
except IndexError:
val_b = 1
val = min(val_a, val_b)
if smallest_row is None and val < THRESHOLD:
smallest_row = (x,y)
if val < THRESHOLD and y < smallest_col[1]:
smallest_col = (x,y)
if val < THRESHOLD:
greatest_row = (x,y)
if val < THRESHOLD and y > greatest_col[1]:
greatest_col = (x,y)
f[x][y] = val # Record the super-imposed value into f
config = Configuration(smallest_row,smallest_col,greatest_row,greatest_col,f,e,j) # Creates a new configuration
if configurations is None or config < configurations:
configurations = config
return configurations
if __name__ == '__main__':
NUM_CORES = 6
orig_a = skimage.io.imread('img5.jpg')
orig_b = skimage.io.imread('img6.jpg')
a = skimage.color.rgb2gray(orig_a) # The static, already aligned image in grayscale
b = skimage.color.rgb2gray(orig_b) # The modular image that we are trying to align in grayscale
c = skimage.util.invert(a) # Invert the image so that black becomes white, and vice-versa
# g = skimage.util.invert(b)
configurations = None # Stores the best configuration
orb = cv2.ORB_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = orb.detectAndCompute(np.array(orig_a, dtype=np.uint8), None)
kp2, des2 = orb.detectAndCompute(np.array(orig_b, dtype=np.uint8), None)
del (orig_a)
del (orig_b)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
kp1 = [(point.pt, point.size, point.angle, point.response, point.octave,
point.class_id) for point in kp1]
kp2 = [(point.pt, point.size, point.angle, point.response, point.octave,
point.class_id) for point in kp2]
matches = [(mat.queryIdx, mat.trainIdx) for mat in matches]
# def check_rotation(rotate):
# configurations = None
# for j in range(0,360,rotate): # Rotate up to 360 Degrees by the Angle Step
# print('We are on match: {} We are on degree: {}'.format(str(matches.index(mat) + 1),str(j)), 'The Time is: {}'.format(str(datetime.now())))
# f = copy.deepcopy(c)
# e = skimage.transform.rotate(b, j, center=img2_points, preserve_range=True)
# e = skimage.util.invert(e) # Prepare the image to be super-imposed on the constant image
# smallest_row = None # The smallest row where a corona pixel is detected using THRESHOLD
# smallest_col = (0,REALLY_BIG) # The smallest column where a corona pixel is detected using THRESHOLD
# greatest_row = (0,0) # The greatest row where a corona pixel is detected using THRESHOLD
# greatest_col = (0,0) # The greatest col where a corona pixel is detected using THRESHOLD
#
# for x in range(len(f)): # For every row in the constant image
# for y in range(len(f[x])): # For every column in that row
# try:
# val_a = f[x][y]
# except IndexError:
# val_a = 1
# try:
# val_b = e[x + offset_x][y + offset_y]
# except IndexError:
# val_b = 1
# val = min(val_a, val_b)
# if smallest_row is None and val < THRESHOLD:
# smallest_row = (x,y)
# if val < THRESHOLD and y < smallest_col[1]:
# smallest_col = (x,y)
# if val < THRESHOLD:
# greatest_row = (x,y)
# if val < THRESHOLD and y > greatest_col[1]:
# greatest_col = (x,y)
# f[x][y] = val # Record the super-imposed value into f
# config = Configuration(smallest_row,smallest_col,greatest_row,greatest_col,f,e,j) # Creates a new configuration
# if configurations is None or config < configurations:
# configurations = config
# return configurations
stored = Parallel(n_jobs=multiprocessing.cpu_count())(delayed(analyze)(mat,c,b,kp1,kp2) for mat in matches) | true |
1314af739939105f873d383cc5fdff5435c70d58 | Python | Guannan/mouse_robot_project | /motor/keypress_logger.py | UTF-8 | 663 | 2.921875 | 3 | [] | no_license | #!/usr/bin/env python
import pygame
from pygame.locals import *
pygame.init()
screen = pygame.display.set_mode((50, 50), 0, 16)
def driver ():
while 1:
event = pygame.event.poll()
if event.type == QUIT:
break
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
break
elif event.key == K_UP:
print 'Up Arrow Pressed'
elif event.key == K_DOWN:
print 'Down Arrow Pressed'
elif event.key == K_LEFT:
print 'Left Arrow Pressed'
elif event.key == K_RIGHT:
print 'Right Arrow Pressed'
| true |
0f85de234b94399ae1d3574fe5a863f07e81ebac | Python | MichalMarsalek/Advent-of-code | /2015/Day 6.py | UTF-8 | 846 | 3.328125 | 3 | [] | no_license | def solve(input):
input = input.replace("turn ", "").replace(",", " ").replace("through ", "")
part1 = sum(map(sum, turn_lights(input, bitwise)))
part2 = sum(map(sum, turn_lights(input, brightness)))
return part1, part2
def turn_lights(input, rule_f):
field = [[False for x in range(1000)] for x in range(1000)]
for line in input.split("\n"):
cmd, x, y, X, Y = line.split(" ")
for xi in range(int(x), int(X)+1):
for yi in range(int(y), int(Y)+1):
field[yi][xi] = rule_f(field[yi][xi], cmd)
def bitwise(val, cmd):
return not val if cmd == "toggle" else cmd == "on"
def brightness(val, cmd):
return max(0, val + {"off": -1, "on": 1, "toggle": 2}[cmd])
def run(day):
input = input = open('Day ' + str(day) + ' input.txt').read()
print(solve(input))
run(6)
| true |
6171bdfc06cfc6ca753d11b57513b47658bfb714 | Python | PiotrMakarewicz/MiniSocialNetwork | /generator/generator.py | UTF-8 | 12,254 | 2.859375 | 3 | [] | no_license | from neo4j import GraphDatabase, basic_auth, Result
from dotenv import load_dotenv
from faker import Faker
from datetime import date
from datetime import datetime as dt
import os
import random
import uuid
load_dotenv()
url = os.getenv("NEO4J_URL")
username = os.getenv("NEO4J_USER")
password = os.getenv("NEO4J_PASSWORD")
database = os.getenv("NEO4J_DATABASE")
port = os.getenv("PORT", 8080)
driver = GraphDatabase.driver(url, auth=basic_auth(username, password))
db = driver.session(database=database)
fake = Faker()
def create_fake_user() -> dict:
"""
Returns a dictionary representing a User node that can be later inserted into the database.
@return: a dictionary with attributes equivalent to the ones of a User node in the database.
"""
global fake
user = dict()
user['name'] = fake.name()
user['creation_datetime'] = fake.date_time_between('-1y').strftime("%Y-%m-%dT%H:%M:%S")
user['description'] = fake.paragraph()
user['role'] = random.choices(['admin', 'none'], [1, 100])[0]
user['password_hash'] = uuid.uuid4().hex
user['avatar'] = 'https://via.placeholder.com/300/09f/fff.png'
return user
def create_fake_post(first_possible_creation_datetime: dt, with_photo: bool) -> dict:
"""
Returns a dictionary representing a Post node that can be later inserted into the database.
@param first_possible_creation_datetime: this is the first possible date of this post. It may be used to create a post for a user registered at a certain day
@param with_photo: if true, adds placeholder photo url as photo_address, if false makes photo_address an empty string
@return: a dictionary with attributes equivalent to the ones of a Post node in the database.
"""
global fake
post = dict()
post['creation_datetime'] = fake.date_time_between(dt.strptime(first_possible_creation_datetime, "%Y-%m-%dT%H:%M:%S")).strftime("%Y-%m-%dT%H:%M:%S")
post['update_datetime'] = random.choice([None, fake.date_time_between_dates(dt.strptime(post['creation_datetime'], "%Y-%m-%dT%H:%M:%S"), None)])
if post['update_datetime'] is not None:
post['update_datetime'] = post['update_datetime'].strftime("%Y-%m-%dT%H:%M:%S")
post['content'] = fake.paragraph()
post['photo_address'] = 'https://via.placeholder.com/500/02f/a0f.png' if with_photo else ''
return post
def create_fake_tag() -> dict:
"""
Returns a dictionary representing a Tag node that can be later inserted into the database.
@return: a dictionary with attributes equivalent to the ones of a Tag node in the database.
"""
global fake
return {'name': fake.word()}
def add_user(user: dict) -> int:
"""
Adds a new user and returns their ID
@param user: a dictionary with attributes equivalent to the ones of a User node in the database
@return: a single number, the ID of a newly added user
"""
result = db.run("CREATE (n:User {{name: '{}', creation_datetime: '{}', avatar: '{}', description: '{}', role: '{}', password_hash: '{}'}}) RETURN id(n)"
.format(user['name'], user['creation_datetime'], user['avatar'], user['description'], user['role'], user['password_hash']))
return result.data()[0]['id(n)']
def add_post(author_id: int, post: dict) -> int:
"""
Adds a new post, creates the AUTHOR_OF relationship between author node and post node and returns the ID of a new post
@param author_id: the ID of an author User node that already exists in the database
@param post: a dictionary with attributes equivalent to the ones of a Post node in the database
@return: a single number, the ID of a newly added post
"""
result = db.run("CREATE (n:Post {{content: '{}', creation_datetime: '{}', update_datetime: '{}', photo_address: '{}'}}) RETURN id(n)"
.format(post['content'], post['creation_datetime'], post['update_datetime'], post['photo_address']))
post_id = result.data()[0]['id(n)']
result = db.run("MATCH (u:User), (p:Post) WHERE id(u) = {} AND id(p) = {} CREATE (u)-[r:AUTHOR_OF]->(p)"
.format(author_id, post_id))
return post_id
def add_tag(tag: dict) -> int:
"""
Adds a new tag to the database
@param tag: a dictionary with attributes equivalent to the ones of a Tag node in the database.
@return: a single number, the ID of a newly added tag
"""
result = db.run("CREATE (t:Tag {{name: '{}'}}) RETURN id(t)".format(tag['name']))
return result.data()[0]['id(t)']
def add_observes_between(observer_id: int, observed_id: int, since: dt) -> None:
"""
Creates the OBSERVES relationship between User nodes represented by observer_id and observed_id
@param observer_id: ID of an observer User node already existing in the database
@param observed_id: ID of an observed User node already existing in the database
"""
db.run("MATCH (u:User), (v:User) WHERE id(u) = {} AND id(v) = {} CREATE (u)-[r:OBSERVES {{since: '{}'}}]->(v)"
.format(observer_id, observed_id, since))
def add_likes_between(user_id: int, post_id: int, datetime: dt) -> None:
"""
Creates the LIKES relationship between the User node represented by user_id and the Post node represented by post_id
@param user_id: ID of a User node already existing in the database
@param post_id: ID of a Post node already exisiting in the database
"""
db.run("MATCH (u:User), (p:Post) WHERE id(u) = {} AND id(p) = {} CREATE (u)-[r:LIKES {{datetime: '{}'}}]->(p)"
.format(user_id, post_id, datetime))
def add_dislikes_between(user_id: int, post_id: int, datetime: dt) -> None:
"""
Creates the DISLIKES relationship between the User node represented by user_id and the Post node represented by post_id
@param user_id: ID of a User node already existing in the database
@param post_id: ID of a Post node already existing in the database
"""
db.run("MATCH (u:User), (p:Post) WHERE id(u) = {} AND id(p) = {} CREATE (u)-[r:DISLIKES {{datetime: '{}'}}]->(p)"
.format(user_id, post_id, datetime))
def add_refers_to_between(referring_id: int, referred_id: int) -> None:
"""
Creates the REFERS_TO relationship between the Posts node represented by referring_id and referred_id
@param referring_id: ID of a Post node already existing in the database
@param referred_id: ID of a Post node already existing in the database
"""
db.run("MATCH (p:Post), (q:Post) WHERE id(p) = {} AND id(q) = {} CREATE (p)-[r:REFERS_TO]->(q)"
.format(referring_id, referred_id))
def add_tagged_as_between(post_id: int, tag_id: int) -> None:
"""
Creates the TAGGED_AS relationship between a Post node and a Tag node
@param post_id: ID of a Post node already existing in the database
@param tag_id: ID of a Tag node already existing in the database
"""
db.run("MATCH (p:Post), (t:Tag) WHERE id(p) = {} AND id(t) = {} CREATE (p)-[r:TAGGED_AS]->(t)"
.format(post_id, tag_id))
def add_user_for_testing():
"""
Adds a user that can be later used to test the program
# username: Userof Minisocialnetwork
# password: password
# password_hash: 5f4dcc3b5aa765d61d8327deb882cf99
"""
global fake
user = dict()
user['name'] = "Userof Minisocialnetwork"
user['creation_datetime'] = fake.date_time_between('-1y').strftime("%Y-%m-%dT%H:%M:%S")
user['description'] = "A user that can be later used to test the program"
user['role'] = random.choices(['admin', 'none'], [1, 100])[0]
user['password_hash'] = "5f4dcc3b5aa765d61d8327deb882cf99"
user['avatar'] = 'https://via.placeholder.com/300/09f/fff.png'
add_user(user)
db_params = {
"users": 200,
"min user posts": 2,
"max user posts": 10,
"min observed by user": 0,
"max observed by user": 10,
"min liked posts per user": 0,
"max liked posts per user": 10,
"min disliked posts per user": 0,
"max disliked posts per user": 5,
"tags": 30,
"posts with photos freq": 0.1,
"min tags per post": 0,
"max tags per post": 5,
"referring post chance": 0.2
}
def generate_database():
# All entries in a form: "id: node-representing-dictionary"
users = dict()
posts = dict()
tags = dict()
### Add User nodes
for _ in range(db_params['users']):
user = create_fake_user()
user_id = add_user(user)
users[user_id] = user
### Add OBSERVES relationships
for user_id, user in users.items():
other_user_ids = list(set(users.keys()).difference({user_id}))
num_observed = random.randint(db_params['min observed by user'], db_params['max observed by user'])
observed_ids = random.sample(other_user_ids, k=num_observed)
for observed_id in observed_ids:
observed = users[observed_id]
datetime_begin = user['creation_datetime'] if user['creation_datetime'] > observed['creation_datetime'] else observed['creation_datetime']
since = fake.date_time_between(dt.strptime(datetime_begin,"%Y-%m-%dT%H:%M:%S")).strftime("%Y-%m-%dT%H:%M:%S")
add_observes_between(user_id, observed_id, since)
### Add Post nodes and AUTHOR_OF relationships
for user_id, user in users.items():
num_posts = random.randint(db_params['min user posts'], db_params['max user posts'])
for _ in range(num_posts):
post = create_fake_post(user['creation_datetime'], with_photo=(random.uniform(0, 1) < db_params['posts with photos freq']))
post_id = add_post(user_id, post)
posts[post_id] = post
### Add LIKES and DISLIKES relationships
for user_id, user in users.items():
num_liked_posts = random.randint(db_params['min liked posts per user'], db_params['max liked posts per user'])
num_disliked_posts = random.randint(db_params['min disliked posts per user'], db_params['max disliked posts per user'])
liked_posts_ids = random.sample(posts.keys(), k=num_liked_posts)
disliked_posts_ids = random.sample(set(posts.keys()).difference(set(liked_posts_ids)), k=num_disliked_posts)
for post_id in liked_posts_ids:
post = posts[post_id]
datetime_begin = post['creation_datetime'] if post['creation_datetime'] > user['creation_datetime'] else user['creation_datetime']
datetime = fake.date_time_between(dt.strptime(datetime_begin, "%Y-%m-%dT%H:%M:%S")).strftime("%Y-%m-%dT%H:%M:%S")
add_likes_between(user_id, post_id, datetime)
for post_id in disliked_posts_ids:
post = posts[post_id]
datetime_begin = post['creation_datetime'] if post['creation_datetime'] > user['creation_datetime'] else user['creation_datetime']
datetime = fake.date_time_between(dt.strptime(datetime_begin, "%Y-%m-%dT%H:%M:%S")).strftime("%Y-%m-%dT%H:%M:%S")
post = posts[post_id]
add_dislikes_between(user_id, post_id, datetime)
### Add REFERS_TO relationships
def get_random_post_created_before(datetime: datetime):
nonlocal posts
for _ in range(25):
post_id, post = random.choice(list(posts.items()))
if post['creation_datetime'] < datetime:
return post_id
return None
for referring_id, referring_post in posts.items():
if random.uniform(0, 1) < db_params['referring post chance']:
while True:
referred_id = get_random_post_created_before(referring_post['creation_datetime'])
if referred_id != referring_id:
break
if referred_id is not None:
add_refers_to_between(referring_id, referred_id)
### Add Tag nodes
for _ in range(db_params['tags']):
tag = create_fake_tag()
tag_id = add_tag(tag)
tags[tag_id] = tag
### Add TAGGED_AS relationships
for post_id, post in posts.items():
num_tags = random.randint(db_params["min tags per post"], db_params["max tags per post"])
tag_ids = random.sample(tags.keys(), k=num_tags)
for tag_id in tag_ids:
add_tagged_as_between(post_id, tag_id)
add_user_for_testing()
try:
generate_database()
finally:
db.close()
| true |
42d8d7171d334b38de91fc7f392293600248453b | Python | influence-usa/campaign-finance_state_PA | /utils/download.py | UTF-8 | 3,514 | 2.609375 | 3 | [
"MIT"
] | permissive | import os
import logging
import time
from multiprocessing.dummy import Pool as ThreadPool
from utils import set_up_logging
log = set_up_logging('download', loglevel=logging.DEBUG)
# GENERAL DOWNLOAD FUNCTIONS
def response_download(response, output_loc):
if response.ok:
try:
with open(output_loc, 'wb') as output_file:
for chunk in response.iter_content():
output_file.write(chunk)
return response.headers.get('content-length', 'N/A')
except Exception as e:
log.error(e)
else:
log.error('response not okay: '+response.reason)
raise Exception('didn''t work, trying again')
def log_result(result):
if result[0] == 'success':
url, loc, content_length = result[1:]
log.info(
'success: {source} => {dest}({size})'.format(
source=url, dest=loc, size=content_length))
elif result[0] == 'failure':
url, loc, exception = result[1:]
log.info(
'failure: {source} => {dest}\n {e}'.format(
source=url, dest=loc, e=str(exception)))
else:
raise Exception
def download(val, get_response_loc_pair, options):
force = options.get('force', False)
for i in xrange(5):
_response, _loc = get_response_loc_pair(val)
_url = _response.url
if is_not_cached(_response, _loc) or force:
try:
content_length = response_download(_response, _loc)
return ('success', _url, _loc, content_length)
except Exception:
log.warn('{url} something went wrong, trying again '
'({code} - {reason})'.format(
url=_response.url,
code=_response.status_code,
reason=_response.reason))
time.sleep(5)
else:
log.info('cached, not re-downloading')
return('success', _url, _loc, 'cached')
return ('failure', _response.url, _loc, '[{code}] {reason}'.format(
code=_response.status_code, reason=_response.reason))
def is_not_cached(response, output_loc):
response, output_loc
if os.path.exists(output_loc):
downloaded_size = int(os.path.getsize(output_loc))
log.debug(
'found {output_loc}: {size}'.format(
output_loc=output_loc,
size=downloaded_size))
size_on_server = int(response.headers.get('content-length', 0))
if downloaded_size != size_on_server:
log.debug(
're-downloading {url}: {size}'.format(
url=response.url,
size=size_on_server))
return True
else:
response.close()
return False
else:
return True
def download_all(vals, get_response_loc_pair, options):
threaded = options.get('threaded', False)
thread_num = options.get('thread_num', 4)
if threaded:
log.info("starting threaded download")
pool = ThreadPool(thread_num)
for val in vals:
log.debug("async start for {}".format(str(val)))
pool.apply_async(download,
args=(val, get_response_loc_pair, options),
callback=log_result)
pool.close()
pool.join()
else:
for val in vals:
log_result(download(val, get_response_loc_pair, options))
| true |
c56832641a3ff55aaa11bfd57fdc630788d3ef69 | Python | amrane99/CAI-Classification | /cai/models/classification/CNN.py | UTF-8 | 1,808 | 2.828125 | 3 | [
"MIT"
] | permissive | # ------------------------------------------------------------------------------
# This class represents different classification models.
# ------------------------------------------------------------------------------
import torch.nn as nn
import torch
from cai.models.model import Model
import torchvision.models as models
# Sigmoid layers are important for the BCELoss, to get multi-hot vectors
# for multi classification tasks.
class AlexNet(Model):
r"""This class represents the AlexNet for image classification."""
def __init__(self, num_labels):
super(AlexNet, self).__init__()
self.alexnet = models.alexnet(pretrained=True)
classifier_input = self.alexnet.classifier[-1].in_features
self.alexnet.classifier[-1] = nn.Linear(classifier_input, num_labels)
self.alexnet.eval()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
# Reshape input based on batchsize
yhat = self.alexnet(x)
yhat = self.sigmoid(yhat)
return yhat
class ResNet(Model):
r"""This class represents the ResNet with 50 layers for image classification."""
def __init__(self, num_labels, feature_extraction):
super(ResNet50, self).__init__()
self.resnet = models.resnet50(pretrained=True)
# Use Feature Extraction instead of finetuning
if feature_extraction:
for param in self.resnet.parameters():
param.requires_grad = False
classifier_input = self.resnet.fc.in_features
self.resnet.fc = nn.Linear(classifier_input, num_labels)
self.resnet.eval()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
# Reshape input based on batchsize
yhat = self.resnet(x)
yhat = self.sigmoid(yhat)
return yhat
| true |
b57bc6dd1229b39c808f62e95ee3b320bb8d7d5e | Python | LichAmnesia/LeetCode | /python/96.py | UTF-8 | 701 | 2.8125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# @Author: Lich_Amnesia
# @Email: alwaysxiaop@gmail.com
# @Date: 2016-10-01 23:10:54
# @Last Modified time: 2016-10-01 23:16:05
# @FileName: 96.py
class Solution(object):
def numTrees(self, n):
"""
:type n: int
:rtype: int
"""
memo = {}
def dfs(n):
if n == 0:
return 1
sum = 0
for i in range(1, n + 1):
l = i - 1
r = n - i
if l not in memo:
memo[l] = dfs(l)
if r not in memo:
memo[r] = dfs(r)
sum += memo[l] * memo[r]
return sum
return dfs(n) | true |
f502a8fc67070354450813f88660a8cdafc3ef70 | Python | ShwetaKale1708/Python | /hacker rank/runner up score.py | UTF-8 | 236 | 3.28125 | 3 | [] | no_license | #https://www.hackerrank.com/challenges/find-second-maximum-number-in-a-list/problem
n=int(input())
A=set(int(x) for x in input().split(' '))
list=list(A)
points=sorted(list)
index=points.index(max(points))
print(points[index-1]) | true |
4e2d1b7d925925f45986338855cd53551c30ca2e | Python | ChocolateTan/Collector | /RSSCollector/rssconfig/urlinfo.py | UTF-8 | 1,893 | 2.609375 | 3 | [] | no_license | from enum import Enum
class UrlSource(Enum):
# URL_36kr = "https://36kr.com/feed"
URL_36kr = "https://36kr.com/feed-newsflash"
# URL_36kr = "https://36kr.com/feed-article"
URL_BLOG_GOOGLE = "https://blog.google/products/android/rss/"
URL_MEITUAN = "https://tech.meituan.com/feed/"
URL_TECHWEB = "http://www.techweb.com.cn/rss/hotnews.xml"
class UrlInfo(object):
def __init__(self, sourceType, rss, allowedDomains, dateFormat):
self.sourceType = sourceType
self.rss = rss
self.allowedDomains = allowedDomains
self.dateFormat = dateFormat
NORMAL_DATE_FORMAT = "%a, %d %b %Y %H:%M:%S +%f"
URL_LIST = {
UrlSource.URL_36kr: UrlInfo(
UrlSource.URL_36kr,
UrlSource.URL_36kr.value,
"36kr.com",
# 2023-04-14 16:05:18 +0800
"%Y-%m-%d %H:%M:%S +%f"
),
UrlSource.URL_BLOG_GOOGLE: UrlInfo(
UrlSource.URL_BLOG_GOOGLE,
UrlSource.URL_BLOG_GOOGLE.value,
"blog.google",
# Tue, 15 Nov 2022 14:00:00 +0000
NORMAL_DATE_FORMAT
),
UrlSource.URL_MEITUAN: UrlInfo(
UrlSource.URL_MEITUAN,
UrlSource.URL_MEITUAN.value,
"tech.meituan.com",
# Tue, 15 Nov 2022 14:00:00 +0000
NORMAL_DATE_FORMAT
),
UrlSource.URL_TECHWEB: UrlInfo(
UrlSource.URL_TECHWEB,
UrlSource.URL_TECHWEB.value,
"www.techweb.com.cn",
# Tue, 15 Nov 2022 14:00:00 +0000
NORMAL_DATE_FORMAT
)
}
def test_url():
assert(len(URL_LIST) > 0)
def test_find_url():
url = "https://36kr.com/feed"
source = UrlSource(url)
urlInfo = URL_LIST[source]
assert(urlInfo.sourceType is UrlSource.URL_36kr)
if __name__ == '__main__':
test_url()
test_find_url()
| true |
5cdf669952e9e30ef8ef6f524d0b8fd36dc38094 | Python | yaobiqing0424/consistent_hash_py | /memcache_consistent_hash.py | UTF-8 | 3,987 | 2.71875 | 3 | [] | no_license | #!/usr/bin
# -*- encoding:utf-8 -*-
import zlib
from memcache import Client
from operator import itemgetter, attrgetter
mm_server = [{'host':'192.168.201.109', 'port':11211}, {'host':'192.168.1.96', 'port':11211}]
MMC_CONSISTENT_BUCKETS = 1024
MMC_CONSISTENT_POINTS = 160
class mmc_consistent:
state = {
'num_server':0,
'num_points':0,
'points':[],
'buckets_populated':0,
'buckets':[]
}
def mmc_consistent_find(self, point):
#print point
lo = 0
hi = self.state['num_points'] - 1
mid = 0
while(1):
if point <= self.state['points'][lo]['point'] or point > self.state['points'][hi]['point']:
#print 1;
return self.state['points'][lo]['server']
#二分法查找
mid = lo + (hi - lo) / 2;
if not mid:
mid = 0
if point <= self.state['points'][mid]['point'] and point > self.state['points'][mid-1]['point']:
return self.state['points'][mid]['server']
if self.state['points'][mid]['point'] < point:
lo = mid + 1;
else:
hi = mid - 1;
def mmc_consistent_populate_buckets(self):
step = 0xffffffff / MMC_CONSISTENT_BUCKETS
self.state['points'] = sorted(self.state['points'], key=lambda point:point['point'])
#self.state['points'] = sorted(self.state['points'], key = itemgetter('point'))
#print self.state['points']
#return
for i in range(MMC_CONSISTENT_BUCKETS):
self.state['buckets'].insert(i, self.mmc_consistent_find(step * i))
self.state['buckets_populated'] = 1
def mmc_hash(self,key):
return zlib.crc32(key) & 0xffffffff
#添加服务器1台服务器虚拟成多个虚拟节点
def mmc_consistent_add_server(self, server, weight):
#print state
points = weight * MMC_CONSISTENT_POINTS
for i in range(points):
key = '%s:%d-%d' % (server['host'], server['port'], i)
hash_result = self.mmc_hash(key)
self.state['points'].insert(self.state['num_points']+i,{'server':server, 'point':hash_result})
self.state['num_points'] += points
self.state['num_server'] += 1
self.state['buckets_populated'] = 0
def mmc_consistent_compare(self, point_a, point_b):
if point_a['point'] <= point_b['point']:
return -1
if point_a['point'] > point_b['point']:
return 1
return 0
#return state
def mmc_consistent_find_server(self, key):
#print self.state
if self.state['num_server'] > 1:
if not self.state['buckets_populated']:
self.mmc_consistent_populate_buckets()
hash_result = self.mmc_hash(key)
return self.state['buckets'][hash_result % MMC_CONSISTENT_BUCKETS]
return self.state['points'][0]['server']
if __name__ == '__main__':
def mm_find_key_host(mm_server, key):
mmc = mmc_consistent()
for item in mm_server:
mmc.mmc_consistent_add_server(server=item, weight=1)
server = mmc.mmc_consistent_find_server(key)
return server
'''mmc = mmc_consistent()
for item in mm_server:
mmc.mmc_consistent_add_server(server=item, weight=1)'''
#print mmc.state['points']
#根据计算得出对应服务器获取对应key数据 验证
key = ['test1','test2','test3','test4','test5','test6','test7','test8','test9']
for item in key:
server = mm_find_key_host(mm_server, item)
print server
mconfig = ['%s:%d' % (server['host'], server['port'])]
mm = Client(mconfig)
print mm.get(item)
#直接使用python_memcached 获取数据验证
mm_server_list = ['192.168.1.96:11211', '192.168.201.109:11211']
mm = Client(mm_server_list)
print mm.get(item)
| true |
03073867bd7aa364fc03739186630426f69ccbe5 | Python | jessiicacmoore/python-reinforcement-may13 | /exercise.py | UTF-8 | 666 | 4.3125 | 4 | [] | no_license | class Person:
def __init__(self, emotions):
self.mood = emotions
def __str__(self):
return f"Emotions: {self.mood}"
def get_mood(self):
for emotion, level in self.mood.items():
if level == 1:
print(f"I am feeling a low amount of {emotion}")
elif level == 2:
print(f"I am feeling a moderate amount of {emotion}")
elif level == 3:
print(f"I am feeling a high amount of {emotion}")
# why does my for loop stop when i use return instead of print()?
emotions = {
'happiness': 3,
'sadness': 1,
'stress': 2
}
person1 = Person(emotions)
# print(person1)
# print(person1.mood)
person1.get_mood()
| true |
2273c7e99aaa93be5683172b35010c826a532bec | Python | ThiagoIvens/ChristianCode | /Cliente.py | UTF-8 | 3,103 | 3.015625 | 3 | [] | no_license | from threading import Thread
import time, socket
import datetime
import datetime
from datetime import timedelta
HOST = '127.0.0.1' # Endereco IP do Servidor
PORT_SERVIDOR = 1000 # Porta que o Servidor esta
PORT_USER = 2000 # Porta que o Cliente esta
def main():
enviaProServidor()
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
orig = (HOST, PORT_USER)
tcp.bind(orig)
tcp.listen(1)
con, servidor = tcp.accept()
print('Concetado por', servidor)
try:
while True:
msg = con.recv(1024)
if not msg: break
print (servidor, msg)
msg = msg.decode().split("|")
t0 = msg[0].split(":")
t0h = int(t0[0])
t0min = int(t0[1])
t0seg = int(t0[2])
t0 = datetime.timedelta(hours=t0h, minutes=t0min, seconds=t0seg)
# print(t0)
t1 = msg[1].split(':')
t1h = int(t1[0])
t1min = int(t1[1])
t1seg = int(t1[2])
t1 = datetime.timedelta(hours=t1h, minutes=t1min, seconds=t1seg)
# print(t1)
t2 = msg[2].split(':')
t2h = int(t2[0])
t2min = int(t2[1])
t2seg = int(t2[2])
t2 = datetime.timedelta(hours=t2h, minutes=t2min, seconds=t2seg)
# print(t2)
t3 = datetime.datetime.now()
# print("t3 -",t3.strftime("%H:%M:%S"))
t3 = t3.strftime("%H:%M:%S").split(':')
t3h = int(t3[0])
t3min = int(t3[1])
t3seg = int(t3[2])
t3 = datetime.timedelta(hours=t3h, minutes=t3min, seconds=t3seg)
t1_t0 = t1 - t0
# print(t1_t0)
t2_t3 = t2 - t3
# print(t2_t3)
total = t1_t0 + t2_t3
atraso = total/2
# print(atraso, type(atraso))
atual = datetime.datetime.now()
# print(atual.strftime("%H:%M:%S"))
'''
atual = atual.strftime("%H:%M:%S").split(':')
atual = datetime.timedelta(
hours=int(atual[0]),
minutes=int(atual[1]),
seconds=int(atual[2])
)
'''
atualizada = atual + atraso
print("Atualizada: ",atualizada.strftime("%H:%M:%S"))
finally:
print('Finalizando conexao com o servidor', servidor)
con.close()
def enviaProServidor():
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
dest = (HOST, PORT_SERVIDOR)
tcp.connect(dest)
sair = input
horaAtual = pegarHora()
# trata a hora
horaAtual = horaAtual.split()
horaAtual = horaAtual[3].split(':')
hora = int(horaAtual[0])
min = int(horaAtual[1])
seg = int(horaAtual[2])
min -= 1
# escreve a mensagem com hora, minutos e segundos
msg = str(hora)+':'+str(min)+":"+str(seg)
tcp.send (msg.encode('utf-8')) # envia
tcp.close()
def pegarHora():
return time.ctime()
if __name__ == "__main__":
main() | true |
aefd5aaa12fedcfc9f97c18517e40a67d46965b1 | Python | EuganeLebedev/Python_for_test | /quiz/quiz_lib.py | UTF-8 | 1,465 | 3.796875 | 4 | [] | no_license | #! /usr/bin/env python3
import random
"""
Проверка различных типов данных на примере игры в quizz
Добавлена проверка инициатора запуска
"""
#Актуальны ли аннотации?
def ask_answer(question: str):
answers = ('синий','Солнце')
if __name__ == '__main__':
return answers[question]
else:
return input('Ответ: ')
def play_quiz():
dict_quiz_1 = {'question': 'цвет дневного неба?\n','correct_answer': 'синий','options': {'желтый', 'красный', 'черный', 'синий'}}
dict_quiz_2 = {'question': 'Что светит ночью?\n','correct_answer': 'Солнце','options': {'Солнце', 'Луна'}}
#Что делать, если я хочу создать коллекцию из словарей?
questions=(dict_quiz_1, dict_quiz_2)
#В чем ошибка в строке?
#for i in questions:
for question in range(2):
print(questions[question]['question'])
for option in questions[question]['options']:
print(option)
answer = ask_answer(question)
print('\nОтвет:', answer)
if answer.lower() == questions[question]['correct_answer'].lower():
print('Правильно!\n')
else:
print('Не угадал!\n')
if __name__ == '__main__':
play_quiz()
| true |
a2dbb0c41a022b8d3499fbb4460cfc684de8f389 | Python | thewrongjames/chessapi | /tests/test_game/taking.py | UTF-8 | 467 | 3.71875 | 4 | [] | no_license | import chessapi
def test_taking(self):
self.game.reset_board()
# Place a white pawn in a position to take a black pawn.
self.game.set_piece_at_position(
(0, 5),
chessapi.Pawn((0, 5), chessapi.WHITE, self.game)
)
# Take the black pawn.
self.game.move((0, 5), (1, 6), self.player_1)
# Assert that their is now a white piece at that position.
self.assertEqual(self.game.piece_at_position((1, 6)).colour, chessapi.WHITE)
| true |
17093bba98fca76195875a92a83684762f924868 | Python | rahulkusuma1999/hackerank-problem-solving | /Time conversion.py | UTF-8 | 271 | 3.09375 | 3 | [] | no_license |
'''
problem Statement : https://www.hackerrank.com/challenges/time-conversion/problem
'''
time = input().strip()
h, m, s = map(int, time[:-2].split(':'))
p = time[-2:]
h = h % 12 + (p.upper() == 'PM') * 12
print(('%02d:%02d:%02d') % (h, m, s)) | true |
08ede2423f2797f382ac9d8d8337e57834c256fe | Python | ArchanGhosh/Indic-Translator | /ENG-BENGALI/attention_plot.py | UTF-8 | 569 | 3.0625 | 3 | [
"MIT"
] | permissive | import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
def plot_attention(attention, sentence, predicted_sentence):
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1)
ax.matshow(attention, cmap='viridis')
fontdict = {'fontsize': 14}
ax.set_xticklabels([''] + sentence, fontdict=fontdict, rotation=90)
ax.set_yticklabels([''] + predicted_sentence, fontdict=fontdict)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show() | true |
6700231f6cc5646d106e5a069103f738f09f46a0 | Python | tscully49/celeb_baby_names | /relFreqNationally.py | UTF-8 | 397 | 3.171875 | 3 | [] | no_license | import pandas as pd
df = pd.read_csv('data/NationalNames.csv')
for year in range(1880, 1989):
print("Status: " + str(year))
rel_freq = 0
names = df[(df.Year == year)]
for i, name in names.iterrows():
rel_freq += int(name['Count'])
for i, name in names.iterrows():
name['rel_freq'] = int(name['Count']) / rel_freq
df.to_csv('output/relFreqNationalNames.csv')
| true |
13755fd0eac6f90a1e11d66461f22eae39180e57 | Python | fapatipat/Twitrocity | /Twitrocity/gui/events.py | UTF-8 | 842 | 2.515625 | 3 | [] | no_license | import os, sys
import config,twitter
import wx
class EventsGui(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, title="Events", size=(350,200)) # initialize the wx frame
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.panel = wx.Panel(self)
self.main_box = wx.BoxSizer(wx.VERTICAL)
self.events_box = wx.BoxSizer(wx.VERTICAL)
self.eventslist_label=wx.StaticText(self.panel, -1, "Events")
self.eventslist = wx.ListBox(self.panel, -1)
self.events_box.Add(self.eventslist, 0, wx.ALL, 10)
for i in range(0,len(twitter.events)):
self.eventslist.Insert(twitter.events[i],self.eventslist.GetCount())
self.close = wx.Button(self.panel, wx.ID_CLOSE, "&Close")
self.close.Bind(wx.EVT_BUTTON, self.OnClose)
self.main_box.Add(self.close, 0, wx.ALL, 10)
self.panel.Layout()
def OnClose(self, event):
self.Destroy() | true |
a9970c1e3621c1f3854c3b85d5cae1458151f7b7 | Python | nwam/GenreRecognition | /year_svr.py | UTF-8 | 2,831 | 2.6875 | 3 | [] | no_license | from helper import plots
import numpy as np
import matplotlib.pyplot as plt
import time
import sklearn
from sklearn.svm import SVR
from sklearn.metrics import confusion_matrix, mean_squared_error
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import optunity
import optunity.metrics
start_time = time.time()
np.set_printoptions(threshold=np.nan)
DATA_FILE = "data/YearPredictionMSD.txt"
OUTPUT_FILE = "year_confusion.csv"
PLOT_FILENAME = "year_confusion.png"
DELIM = ","
TRAIN_PERCENT = 0.9
N_PER_YEAR = 1000 # number of instances per class
YEAR_START = 1965
YEAR_END = 2010
YEARS = list(range(YEAR_START,YEAR_END+1))
y = [] # output
X = [] # input
y_test = []
y_train = []
X_test = []
X_train = []
# build a genre counter to have the same number of inputs per genre
year_count = {}
for year in YEARS:
year_count[year] = 0
# get x's and y's from data file
# file contains one instance on each line with the format
# y,x1,x2,...,xn
print("[gathering data]")
with open(DATA_FILE, "r") as f:
for line in f:
values = line.split(DELIM)
year = int(values[0])
if year not in YEARS:
continue
if year_count[year] < N_PER_YEAR:
year_count[year] = year_count[year] + 1
X_entry = []
for value in values[2:]:
X_entry.append(float(value))
if year_count[year] < N_PER_YEAR*TRAIN_PERCENT:
y_train.append(year)
X_train.append(X_entry)
else: # add to test
y_test.append(year)
X_test.append(X_entry)
print(year_count)
# preprocessing
print("[preprocessing data]")
X_train = sklearn.preprocessing.normalize(X_train)
X_test = sklearn.preprocessing.normalize(X_test)
# split data into train and test
#print("[splitting data]")
#split_index = int(len(y) * TRAIN_PERCENT)
#y_train = y[:split_index]
#X_train = X[:split_index]
#y_test = y[split_index:]
#X_test = X[split_index:]
#print(len(y_train), len(y_test))
# train the classifier (svm)
print("[training classifier]")
classifier = SVR(kernel='rbf', C=20000)
classifier.fit(X_train, y_train)
# test the classifier
print("[testing classifier]")
y_predict = classifier.predict(X_test)
y_predict = [round(y) for y in y_predict]
print("Accuracy: %.2f%%" % (100*classifier.score(X_test, y_test)))
print("MSE: %.2f years" % (mean_squared_error(y_test, y_predict)))
conf_mtx = confusion_matrix(y_test, y_predict, labels=YEARS)
np.savetxt(OUTPUT_FILE, conf_mtx, delimiter=",")
# make graphs
print("[creating cool visualizations]")
plt.figure()
plots.plot_confusion_matrix(conf_mtx, classes=YEARS ,normalize=True, savefile=PLOT_FILENAME, print_values=False)
# Time
print("Execution time: %.2f seconds" % (time.time()-start_time))
| true |
e01e36755cdd86f02430f2ff8aa475c1caac83fa | Python | zmk-areimann/POOL2018 | /venv/pool_gui.py | UTF-8 | 7,045 | 3.09375 | 3 | [] | no_license | import tkinter as tk
from tkinter import ttk, filedialog
import pickle
import os.path
from tkinter import messagebox
import pool_driver as pd
class PoolGUI:
n_lines = 4 # initial line count
pools = ["Pool A", "Pool B", "Pool C", "Rinse"] # pool selection options
# lists to keep track of the table
cbox_list =[] # comboboxes for pool selection
ent_list = [] # entries for timing options
lab_list = [] # number of entries
def add_line(self):
self.n_lines += 1 # increase lines count by 1
# create new label
lab = ttk.Label(self.frm_table, text=self.n_lines)
lab.grid(row=self.n_lines+1, column=0)
self.lab_list.append(lab)
# create new combobox
cbox = ttk.Combobox(self.frm_table, values=self.pools, state="readonly")
cbox.current(0)
cbox.grid(row=self.n_lines+1, column=1)
self.cbox_list.append(cbox)
# create new entry
ent = ttk.Entry(self.frm_table)
ent.grid(row=self.n_lines+1, column=2)
self.ent_list.append(ent)
def remove_line(self):
if self.n_lines > 1: # more than one element in lines count? if yes:
self.n_lines -= 1 # decrease lines count by 1
# destroy all widgets of the line from the table
self.lab_list[self.n_lines].destroy()
self.cbox_list[self.n_lines].destroy()
self.ent_list[self.n_lines].destroy()
# remove the destroyed objects from the lists
self.lab_list.pop()
self.cbox_list.pop()
self.ent_list.pop()
def validate(self):
# create red background style
s = ttk.Style()
s.configure("Red.TEntry", fieldbackground="red")
# list for control
tt=[]
for i in range(self.n_lines):
try: # can in convert to int? yes:
tt.append(int(self.ent_list[i].get())) # append int value to tt!
self.ent_list[i].configure(style="TEntry")
except: # no:
self.ent_list[i].configure(style="Red.TEntry")
if len(tt) == self.n_lines: # all lines are int??
self.btn_start.configure(state="active")
return True
else:
self.btn_start.configure(state="disabled")
return False
def start(self):
if not self.validate():
print("still errors")
else:
print("yeah, lets go")
pool = pd.make_pool_driver(self.n_lines, self.cbox_list, self.ent_list, self.pools)
pool.run_experiment()
def load(self):
home = os.path.expanduser('~')
filename = filedialog.askopenfilename(initialdir=home, title="Select file",
filetypes=[("Olympic Pool Files", "*.opl")])
file = open(filename, "rb")
obj = pickle.load(file)
print(obj)
file.close()
for i in range(self.n_lines):
self.cbox_list[i].destroy()
self.ent_list[i].destroy()
self.lab_list[i].destroy()
self.cbox_list = []
self.ent_list = []
self.lab_list = []
# create and place the table(comboboxes and entries)
cb = obj[0]
et = obj[1]
self.n_lines = obj[2]
for i in range(self.n_lines):
# label for numbering
lab = ttk.Label(self.frm_table, text=i+1)
lab.grid(row=i+1, column=0)
self.lab_list.append(lab) # <-- store the labels in the list!
# combobox for selection of pool
cbox = ttk.Combobox(self.frm_table, values=self.pools, state="readonly")
cbox.set(cb[i])
cbox.grid(row=i+1, column=1)
self.cbox_list.append(cbox) # <-- store the comboboxes in list!
# entry for selection of timing
ent = ttk.Entry(self.frm_table)
ent.insert(0, et[i])
ent.grid(row=i+1, column=2)
self.ent_list.append(ent) # <-- store the entries in list!
def save(self):
cbox = []
ent = []
n = self.n_lines
for i in range(n):
cbox.append(self.cbox_list[i].get())
ent.append(self.ent_list[i].get())
home = os.path.expanduser('~')
filename = filedialog.asksaveasfilename(initialdir=home, title="Select file",
filetypes=[("Olympic Pool Files", "*.opl")])
file = open(filename, "wb")
pickle.dump([cbox, ent, n], file)
file.close()
def __init__(self, f):
f.title("Pool 2018")
# create and place main frame-widgets: menu and table
self.frm_menu = ttk.Frame(f)
self.frm_menu.grid(row=0, column=0, pady=(0, 10))
self.frm_table = ttk.Frame(f)
self.frm_table.grid(row=1, column=0)
# create menu widgets
self.btn_add = ttk.Button(self.frm_menu, text="+", command=self.add_line)
self.btn_delete = ttk.Button(self.frm_menu, text="-", command=self.remove_line)
self.btn_validate = ttk.Button(self.frm_menu, text="validate", command=self.validate)
self.btn_start = ttk.Button(self.frm_menu, text="start", state="disabled", command=self.start)
self.btn_load = ttk.Button(self.frm_menu, text="load", command=self.load)
self.btn_save = ttk.Button(self.frm_menu, text="save", command=self.save)
# place menu widgets
self.btn_add.grid(row=0, column=0)
self.btn_delete.grid(row=0, column=1)
self.btn_validate.grid(row=0, column=2, padx=(20, 0))
self.btn_start.grid(row=0, column=3)
self.btn_load.grid(row=0, column=4, padx=(20, 0))
self.btn_save.grid(row=0, column=5)
# create table widgets
self.lab_pool = ttk.Label(self.frm_table, text="Pool")
self.lab_time = ttk.Label(self.frm_table, text="Time [min]")
# place table widgets
self.lab_pool.grid(row=0, column=1)
self.lab_time.grid(row=0, column=2)
# generate the grid
for i in range(self.n_lines):
# label for numbering
lab = ttk.Label(self.frm_table, text=i+1)
lab.grid(row=i+1, column=0)
self.lab_list.append(lab) # <-- store the labels in the list!
# combobox for selection of pool
cbox = ttk.Combobox(self.frm_table, values=self.pools, state="readonly")
cbox.current(0)
cbox.grid(row=i+1, column=1)
self.cbox_list.append(cbox) # <-- store the comboboxes in list!
# entry for selection of timing
ent = ttk.Entry(self.frm_table)
ent.grid(row=i+1, column=2)
self.ent_list.append(ent) # <-- store the entries in list!
root = tk.Tk()
GUI = PoolGUI(root)
def on_closing():
if messagebox.askokcancel("Quit", "Do you want to quit?"):
root.destroy()
root.protocol("WM_DELETE_WINDOW", on_closing)
root.mainloop()
| true |
ee7aa0f30dba9a86c7fc3965c35b89c8aff7035e | Python | AnastasiyaSk/Coursera_week1 | /ex_mkad.py | UTF-8 | 737 | 3.75 | 4 | [] | no_license | # Длина Московской кольцевой автомобильной дороги — 109 километров.
# Байкер Вася стартует с нулевого километра МКАД и едет со скоростью v километров в час.
# На какой отметке он остановится через t часов?
# Программа получает на вход значение v и t.
# Если v>0, то Вася движется в положительном направлении по МКАД, если же значение v<0, то в отрицательном.
v = int(input())
t = int(input())
if v * t > 0:
print(v * t % 109)
else:
print((109 + (v * t) % 109) % 109)
| true |
45dba0f5b2e2a78ca629f1b7183c34fd7ef0a3d7 | Python | pkdism/hackerrank | /python/basic-data-types/second-maximum.py | UTF-8 | 150 | 2.96875 | 3 | [] | no_license | n = int(input())
a = [int(x) for x in input().split()]
m = max(a)
res = -1000
for i in a:
if i != m and i > res:
res = i
print(res)
| true |
27dc203d3d4994c8b58b2e14e6796e5d92e5a98f | Python | srijarkoroy/GuessWho | /src/detect.py | UTF-8 | 4,928 | 2.765625 | 3 | [
"MIT"
] | permissive | from scipy.spatial import distance as dist
from imutils.video import FileVideoStream
from imutils.video import VideoStream
from imutils import face_utils
import os
import argparse
import imutils
import time
import dlib
import cv2
from deepface import DeepFace
import pandas as pd
# compute the Eye Aspect Ratio (ear),
# which is a relation of the average vertical distance between eye landmarks to the horizontal distance
def eye_aspect_ratio(eye):
vertical_dist = dist.euclidean(eye[1], eye[5]) + dist.euclidean(eye[2], eye[4])
horizontal_dist = dist.euclidean(eye[0], eye[3])
ear = vertical_dist / (2.0 * horizontal_dist)
return ear
BLINK_THRESHOLD = 0.19 # the threshold of the ear below which we assume that the eye is closed
CONSEC_FRAMES_NUMBER = 2 # minimal number of consecutive frames with a low enough ear value for a blink to be detected
# get arguments from a command line
ap = argparse.ArgumentParser(description='Eye blink detection')
ap.add_argument("-p", "--shape-predictor", required=True, help="path to facial landmark predictor")
ap.add_argument("-v", "--video", type=str, default="", help="path to input video file")
args = vars(ap.parse_args())
# initialize dlib's face detector (HOG-based) and facial landmark predictor
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])
# choose indexes for the left and right eye
(left_s, left_e) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(right_s, right_e) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
# start the video stream or video reading from the file
video_path = args["video"]
if video_path == "":
vs = VideoStream(src=0).start()
print("[INFO] starting video stream from built-in webcam...")
fileStream = False
else:
vs = FileVideoStream(video_path).start()
print("[INFO] starting video stream from a file...")
fileStream = True
time.sleep(1.0)
counter = 0
total = 0
alert = False
start_time = 0
frame = vs.read()
filename = 'img.jpg'
# loop over the frames of video stream:
# grab the frame, resize it, convert it to grayscale
# and detect faces in the grayscale frame
while (not fileStream) or (frame is not None):
frame = imutils.resize(frame, width=640)
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = detector(gray_frame, 0)
ear = 0
# loop over the face detections:
# determine the facial landmarks,
# convert the facial landmark (x, y)-coordinates to a numpy array,
# then extract the left and right eye coordinates,
# and use them to compute the average eye aspect ratio for both eyes
for rect in rects:
shape = predictor(gray_frame, rect)
shape = face_utils.shape_to_np(shape)
leftEye = shape[left_s:left_e]
rightEye = shape[right_s:right_e]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
ear = (leftEAR + rightEAR) / 2.0
# visualize each of the eyes
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
#cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
#cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
# if the eye aspect ratio is below the threshold, increment counter
# if the eyes are closed longer than for 2 secs, raise an alert
if ear < BLINK_THRESHOLD:
counter += 1
if start_time == 0:
start_time = time.time()
else:
end_time = time.time()
if end_time - start_time > 2: alert = True
else:
if counter >= CONSEC_FRAMES_NUMBER:
total += 1
counter = 0
start_time = 0
alert = False
# draw the total number of blinks and EAR value
#cv2.putText(frame, "Blinks: {}".format(total), (10, 30),
#cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
#cv2.putText(frame, "EAR: {:.2f}".format(ear), (500, 30),
#cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
if alert:
cv2.putText(frame, "ALERT!", (150, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# show the frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if total>=1:
cv2.imwrite(filename, frame)
df = DeepFace.find(img_path = "img.jpg", db_path = "file/my_db")
#print(df)
if df.empty:
print('no match')
else:
string=df['identity'][0]
path=os.path.dirname(string)
name=os.path.basename(path)
print('Identified as :', name)
time.sleep(2)
break
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
frame = vs.read()
cv2.destroyAllWindows()
vs.stop()
| true |
f007eeb55b3a6415cec4fcf3b9261d9febfdb5ed | Python | HaemanthSP/Reading_Complexity_Assignments | /assignment1/flesch_kincaid.py | UTF-8 | 2,982 | 3.203125 | 3 | [] | no_license | import os
import csv
import spacy
def count_syllables(word):
"""
Count the number of syllables in a word
#referred from stackoverflow.com/questions/14541303/count-the-number-of-syllables-in-a-word
"""
count = 0
vowels = 'aeiouy'
for i in range(1, len(word)):
if word[i] in vowels and word[i-1] not in vowels:
count += 1
if word.endswith('e'):
if not word.endswith('le'):
count -= 1
if word[0] in vowels:
count += 1
if not count:
count += 1
return count
def compute_metrics(data_dir):
"""
Compute metrics for the file in the dataset directory.
"""
nlp = spacy.load("en_core_web_sm")
metrics = []
for text_file in os.listdir(data_dir):
# Skip if it is not a text file
file_name, ext = os.path.splitext(text_file)
if ext != '.txt':
continue
# Read the text from the file
with open(os.path.join(data_dir, text_file)) as tf:
text = tf.read().lower()
word_count, sentece_count, syllabel_count = [0] * 3
for sentence in nlp(str(text)).sents:
sentece_count += 1
for word in sentence:
# Skip punctuation
if word.is_punct:
continue
word_count += 1
syllabel_count += count_syllables(word.text)
words_per_sentence = word_count / sentece_count
syllables_per_word = syllabel_count / word_count
# Compute Flesch kincaid measure
grade = 0.39 * words_per_sentence + 11.8 * syllables_per_word - 15.59
metrics.append({'File': file_name,
'N.Sentences': sentece_count,
'N.Words': word_count,
'N.Syllables': syllabel_count,
'Words.Per.Sentence': words_per_sentence,
'Syllables.Per.Word': syllables_per_word,
'Grade.Score': grade})
return metrics
def write_to_csv(metrics, output_filename):
"""
Write the metrics to the given file path (.csv)
"""
filednames = ['File', 'N.Sentences', 'N.Words', 'N.Syllables',
'Words.Per.Sentence', 'Syllables.Per.Word', 'Grade.Score']
with open(output_filename, 'w') as out_file:
dict_writer = csv.DictWriter(out_file, filednames)
dict_writer.writeheader()
dict_writer.writerows(metrics)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-I', '--input-dir', help='Input directory')
parser.add_argument('-O', '--output-file', help='Output file path',
default='results.csv')
args = parser.parse_args()
if not os.path.isdir(args.input_dir):
print("No such directory exists !!!")
exit()
metrics = compute_metrics(args.input_dir)
write_to_csv(metrics, args.output_file)
| true |
394d2552c23a5b9e26a275547b37aba8b9aad491 | Python | marfikus/skillfactory-pws-practic-b4-12 | /find_athlete.py | UTF-8 | 9,535 | 2.96875 | 3 | [] | no_license |
import sqlalchemy as sa
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import datetime as dt
DB_PATH = "sqlite:///sochi_athletes.sqlite3"
Base = declarative_base()
class User(Base):
__tablename__ = "user"
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
first_name = sa.Column(sa.Text)
last_name = sa.Column(sa.Text)
gender = sa.Column(sa.Text)
email = sa.Column(sa.Text)
birthdate = sa.Column(sa.Text)
height = sa.Column(sa.Float)
class Athelete(Base):
__tablename__ = "athelete"
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
age = sa.Column(sa.Integer)
birthdate = sa.Column(sa.Text)
gender = sa.Column(sa.Text)
height = sa.Column(sa.Float)
name = sa.Column(sa.Text)
weight = sa.Column(sa.Integer)
gold_medals = sa.Column(sa.Integer)
silver_medals = sa.Column(sa.Integer)
bronze_medals = sa.Column(sa.Integer)
total_medals = sa.Column(sa.Integer)
sport = sa.Column(sa.Text)
country = sa.Column(sa.Text)
def connect_db():
# создаём подключение к бд
engine = sa.create_engine(DB_PATH)
# создаём описанные таблицы
Base.metadata.create_all(engine)
# создаём фабрику сессий
Sessions = sessionmaker(engine)
# создаём сессию
session = Sessions()
return session
def print_user(user):
output = (
f" Id: {user.id}\n"
f" Name: {user.first_name} {user.last_name}\n"
f" Gender: {user.gender}\n"
f" Email: {user.email}\n"
f" Birthdate: {user.birthdate}\n"
f" Height: {user.height}\n"
)
print(output)
def print_athlete(athlete):
output = (
f" Id: {athlete.id}\n"
f" Name: {athlete.name}\n"
f" Age: {athlete.age}\n"
f" Birthdate: {athlete.birthdate}\n"
f" Gender: {athlete.gender}\n"
f" Height: {athlete.height}\n"
f" Weight: {athlete.weight}\n"
f" Gold_medals: {athlete.gold_medals}\n"
f" Silver_medals: {athlete.silver_medals}\n"
f" Bronze_medals: {athlete.bronze_medals}\n"
f" Total_medals: {athlete.total_medals}\n"
f" Sport: {athlete.sport}\n"
f" Country: {athlete.country}\n"
)
print(output)
def find_nearby_athletes(session, user):
# Флаги для дальнейшего управления алгоритмом поиска:
athlete_nearby_height_is_found = False
athlete_nearby_birthdate_is_found = False
# Наименьшая разница в росте юзера и атлета
# Будет нужна при поиске в цикле, а пока устанавливаем в 0
min_dif_heights = 0
# Наименьшая разница в датах рождения юзера и атлета
# Также для поиска, а пока в 0
min_dif_birthdates = 0
# Ищем точное совпадение роста:
athlete_nearby_height = session.query(Athelete).filter(Athelete.height == user.height).first()
# Если нашли такого атлета:
if not athlete_nearby_height is None:
# Отмечаем это
athlete_nearby_height_is_found = True
# Ищем точное совпадение даты рождения:
athlete_nearby_birthdate = session.query(Athelete).filter(Athelete.birthdate == user.birthdate).first()
# Если нашли такого атлета:
if not athlete_nearby_birthdate is None:
# Отмечаем это
athlete_nearby_birthdate_is_found = True
# Если не нашли атлета по росту или по дате рождения,
# то будем перебирать все записи и искать ближайшее значение.
# Способ конечно стрёмный (для больших объёмов), но в общем рабочий
if not athlete_nearby_height_is_found or not athlete_nearby_birthdate_is_found:
athletes = session.query(Athelete).all()
# Если не нашли атлета по росту
# if not athlete_nearby_height_is_found:
# Устанавливаем наименьшей разницей рост юзера
# Это только для начала поиска, а далее оно будет вычисляться.
# Причём значение должно быть гарантированно больше того,
# которое будет вычислено далее
# (разумеется, при соблюдении одного масштаба в росте :) )
# min_dif_heights = user.height
# Сейчас отказался от этого, а просто дополнил условие ниже, проверяя значение на == 0
# Если не нашли атлета по дате рождения
if not athlete_nearby_birthdate_is_found:
# Устанавливаем наименьшей разницей максимальное значение промежутка времени.
# Это тоже только для начала поиска, потом пересчитается...
# min_dif_birthdates = dt.timedelta.max
# Сейчас отказался от этого, а просто дополнил условие ниже, проверяя значение на == 0
# Преобразуем дату рождения юзера в удобный для манипуляций формат
user_birthdate = dt.datetime.strptime(user.birthdate, "%Y-%m-%d")
for athlete in athletes:
if not athlete_nearby_height_is_found:
# У некоторых атлетов не указан рост, пропускаем их.
if not athlete.height is None:
# Считаем разницу (по модулю) между ростом юзера и атлета.
dif_heights = abs(user.height - athlete.height)
# Если текущая минимальная разница == 0 (в самом начале)
# или если вычисленная меньше текущей минимальной
# то фиксируем новую минимальную и атлета у которого она обнаружена:
if (min_dif_heights == 0) or (dif_heights < min_dif_heights):
min_dif_heights = dif_heights
athlete_nearby_height = athlete
if not athlete_nearby_birthdate_is_found:
# На всякий случай и наличие даты рождения у атлета проверяем:
if not athlete.birthdate is None:
# Приводим дату рождения атлета к удобному для манипуляций виду:
athlete_birthdate = dt.datetime.strptime(athlete.birthdate, "%Y-%m-%d")
# Аналогично считаем разницу (по модулю) в датах рождения
# и сравниваем также, как в случае с ростом
dif_birthdates = abs(user_birthdate - athlete_birthdate)
if (min_dif_birthdates == 0) or (dif_birthdates < min_dif_birthdates):
min_dif_birthdates = dif_birthdates
athlete_nearby_birthdate = athlete
result = {
"athlete_nearby_height": athlete_nearby_height,
"dif_heights": min_dif_heights,
"athlete_nearby_birthdate": athlete_nearby_birthdate,
"dif_birthdates": min_dif_birthdates
}
return result
def main():
session = connect_db()
# Запрашиваем ид юзера, ищем его в базе
user_id = input("Enter user id: ")
user = session.query(User).filter(User.id == user_id).first()
# Если нет такого, сообщаем и выходим
if user is None:
print("No user with id:", user_id)
return
# Иначе выводим данные юзера
print("\nSelected user:")
print_user(user)
# Ищем в базе ближайших атлетов
result = find_nearby_athletes(session, user)
# Если найден ближайший по росту, то выводим его данные
if not result["athlete_nearby_height"] is None:
print("Nearest athlete by height:")
print_athlete(result["athlete_nearby_height"])
print(f"Heights difference: {result['dif_heights']}\n")
else:
print("No nearest athlete by height\n")
# Если найден ближайший по дате рождения то выводим его данные
if not result["athlete_nearby_birthdate"] is None:
print("Nearest athlete by birthdate:")
print_athlete(result["athlete_nearby_birthdate"])
print(f"Birthdates difference: {result['dif_birthdates']}\n")
else:
print("No nearest athlete by birthdate\n")
if __name__ == "__main__":
main()
| true |
79841752415c2e59442c613a55cf2563a78fdc4d | Python | Mwangikimathi/python-basics | /objectOrientedProgramming/employee.py | UTF-8 | 854 | 3.984375 | 4 | [] | no_license | class Employee:
name = "Mark"
def __init__(self, age, name, department, salary):
self.age = age
self.name = name
self.department = department
self.salary = salary
def print_name(self):
print(self.age)
def get_details(self):
print(self)
# s = "{} is {} years old and works in {} department and earns Kshs. {}".format(self.name, self.age, self.department, self.salary)
# return s
return f"{self.name} is {self.age} years old and works in {self.department} department and earns Kshs{self.salary}."
m1 = Employee(45, "Mark", "IT", 59000)
m2 = Employee(50, "John", "Procurement", 20000)
print("John's salary is " ,m2.salary)
print(m2.get_details())
# print(m1.print_name())
# print (type(m1))
# print(Employee.name)
# print(Employee.print_name()) | true |
2b3cf54fcbd1fffa1f9674fcfb14cc35e4bc2485 | Python | powei1990/DadFarm | /DHT_DB.py | UTF-8 | 917 | 2.671875 | 3 | [] | no_license | import time
import board
import adafruit_dht
import pymongo
import datetime
#連線DB
client = pymongo.MongoClient('mongodb://localhost:27017/')
db = client["database"]
col = db["weathers1"]
dhtDevice = adafruit_dht.DHT22(board.D24, use_pulseio=False)
while True:
try:
# Print the values to the serial port
temperature_c = dhtDevice.temperature
temperature_f = temperature_c * (9 / 5) + 32
humidity = dhtDevice.humidity
post={"temperature_c":temperature_c,
"humidity":humidity,
"date":datetime.datetime.now()}
x=col.insert_one(post);
print(x);
except RuntimeError as error:
# Errors happen fairly often, DHT's are hard to read, just keep going
print(error.args[0])
time.sleep(2.0)
continue
except Exception as error:
dhtDevice.exit()
raise error
time.sleep(60*60) | true |
c3c92c1969672cb0d57e8f5a3e58d24b5781a2d4 | Python | LegumeFederation/meta_iron | /meta_iron/directory.py | UTF-8 | 2,401 | 2.75 | 3 | [
"BSD-2-Clause"
] | permissive | # -*- coding: utf-8 -*-
'''Defines directory types and implements commands on directories
'''
# module imports
from . import cli, get_user_context_obj, logger
from .common import *
#
# private context function
#
_ctx = click.get_current_context
@cli.command()
@click.argument('directorytype', type=str, default='')
def init_directory_metadata(dir):
'''Initialize a metadata file.
:param dir: Optional directory in which to initialize the file.
If not present, the system-dependent default application directory
will be used. If this argument is '.', then the current working
directory will be used. This argument accepts tilde expansions.
'''
metadata_obj = get_user_context_obj()['metadata_obj']
metadata_obj.write_metadata_dict(directorytype, metadata_dict={})
@cli.command()
def show_directory_metadata():
'''Prints contents of directory metadata file.
Example:
meta_iron -v show_directory_metadata
'''
metadata_obj = get_user_context_obj()['metadata_obj']
if not metadata_obj.metadata_found:
logger.info('No metadata was found, use init_directory_metadata first')
else:
logger.info('Metadata at this directory:')
logger.info(metadata_obj.format_metadata(metadata_obj.directory_metadata))
logger.info('\n')
@cli.command()
def show_flattened_directory_metadata():
'''Prints contents of flattened metadata.
Example:
meta_iron -v show_flattened_directory_metadata
'''
metadata_obj = get_user_context_obj()['metadata_obj']
if not metadata_obj.metadata_found:
logger.info('No metadata was found, use init_directory_metadata first')
else:
logger.info('Metadata at this directory:')
logger.info(metadata_obj.format_metadata(metadata_obj.metadata, show_source=True))
logger.info('\n')
@cli.command()
def write_flattened_directory_metadata():
'''Writes metadata file.
Example:
meta_iron -v write_flattened_directory_metadata
'''
metadata_obj = get_user_context_obj()['metadata_obj']
if not metadata_obj.metadata_found:
logger.info('No metadata was found, use init_directory_metadata first')
else:
logger.info('Metadata at this directory:')
logger.info(metadata_obj.format_metadata(metadata_obj.metadata, show_source=True))
logger.info('\n') | true |
1bb799dce01ceb91eeb230f7bc484b2a0e470a55 | Python | kantel/nodebox-pyobjc | /examples/Extended Application/matplotlib/examples/lines_bars_and_markers/multicolored_line.py | UTF-8 | 2,345 | 3.34375 | 3 | [
"MIT"
] | permissive | '''
==================
Multicolored lines
==================
This example shows how to make a multi-colored line. In this example, the line
is colored based on its derivative.
'''
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
x = np.linspace(0, 3 * np.pi, 500)
y = np.sin(x)
dydx = np.cos(0.5 * (x[:-1] + x[1:])) # first derivative
# Create a set of line segments so that we can color them individually
# This creates the points as a N x 1 x 2 array so that we can stack points
# together easily to get the segments. The segments array for line collection
# needs to be (numlines) x (points per line) x 2 (for x and y)
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
fig, axs = plt.subplots(2, 1, sharex=True, sharey=True)
# Create a continuous norm to map from data points to colors
norm = plt.Normalize(dydx.min(), dydx.max())
lc = LineCollection(segments, cmap='viridis', norm=norm)
# Set the values used for colormapping
lc.set_array(dydx)
lc.set_linewidth(2)
line = axs[0].add_collection(lc)
fig.colorbar(line, ax=axs[0])
# Use a boundary norm instead
cmap = ListedColormap(['r', 'g', 'b'])
norm = BoundaryNorm([-1, -0.5, 0.5, 1], cmap.N)
lc = LineCollection(segments, cmap=cmap, norm=norm)
lc.set_array(dydx)
lc.set_linewidth(2)
line = axs[1].add_collection(lc)
fig.colorbar(line, ax=axs[1])
axs[0].set_xlim(x.min(), x.max())
axs[0].set_ylim(-1.1, 1.1)
pltshow(plt)
| true |
72a66fc86b5c8c288a38b2a33ec6b8653fd25fff | Python | TaylorSMarks/FinGUI | /fingui/popup.py | UTF-8 | 910 | 3.546875 | 4 | [
"MIT"
] | permissive | import tk
class Popup(tk.Toplevel):
'''
A class for frameless floating popup windows. For example:
from fingui import Entry, Popup
p = Popup(Entry, 500, 500)
p.content.set('Hello World!')
'''
def __init__(self, contentClass = None, x = None, y = None, *args, **kwargs):
'''
Pass in a class to have initialized with the Popup as its parent.
*args and **kwargs are passed directly to the contentClass's __init__.
x and y, if provided, are used to set the initial position of the Popup.
'''
tk.Toplevel.__init__(self)
self.overrideredirect(True)
if contentClass:
self.content = contentClass(self, *args, **kwargs)
if x and y:
self.setPosition(x, y)
def setPosition(self, x, y):
''' Sets the X and Y coordinates of the Popup. '''
self.geometry('+{}+{}'.format(x, y)) | true |
7c14495f4afc655de1fb02e0041a8b9b7318014c | Python | svsamsonov/vr_sg_mcmc | /Code_logistic_regression/baselines.py | UTF-8 | 9,824 | 2.796875 | 3 | [] | no_license | import numpy as np
from numpy.fft import fft,ifft
import scipy.sparse as sparse
import scipy.stats as spstats
import copy
def standartize(X_train,X_test,intercept = True):
"""Whitens noise structure, covariates updated
"""
X_train = copy.deepcopy(X_train)
X_test = copy.deepcopy(X_test)
if intercept:#adds intercept term
X_train = np.concatenate((np.ones(X_train.shape[0]).reshape(X_train.shape[0],1),X_train),axis=1)
X_test = np.concatenate((np.ones(X_test.shape[0]).reshape(X_test.shape[0],1),X_test),axis=1)
d = X_train.shape[1]
# Centering the covariates
means = np.mean(X_train,axis=0)
if intercept:#do not subtract the mean from the bias term
means[0] = 0.0
# Normalizing the covariates
X_train -= means
Cov_matr = np.dot(X_train.T,X_train)
U,S,V_T = np.linalg.svd(Cov_matr,compute_uv = True)
Sigma_half = U @ np.diag(np.sqrt(S)) @ V_T
Sigma_minus_half = U @ np.diag(1./np.sqrt(S)) @ V_T
X_train = X_train @ Sigma_minus_half
# The same for test sample
X_test = (X_test - means) @ Sigma_minus_half
return X_train,X_test
def set_bn(n):
"""
function that sets size of the window in BM,OBM,SV estimates;
please, make changes only here to change them simulteneously
"""
#return np.round(2*np.power(n,0.33)).astype(int)
return 10
def set_function(f_type,traj,inds_arr,params):
"""Main function to be evaluated in case of logistic regression
Args:
f_type - one of "posterior_mean","posterior_ll_point","posterior_ll_mean"
traj - list of trajectories
inds_arr - reasonable in case of "posterior_mean", otherwise ignored
params - dictionary with fields "X","Y"
returns:
array of function values of respective shapes
"""
if f_type == "posterior_mean":#params is ignored in this case
f_vals = np.zeros((len(traj),len(traj[0]),len(inds_arr)),dtype = float)
for traj_ind in range(len(traj)):
for point_ind in range(len(inds_arr)):
f_vals[traj_ind,:,point_ind] = set_f(traj[traj_ind],inds_arr[point_ind])
elif f_type == "posterior_prob_point":
f_vals = np.zeros((len(traj),len(traj[0]),len(inds_arr)),dtype = float)
for traj_ind in range(len(traj)):
for point_ind in range(len(inds_arr)):
f_vals[traj_ind,:,point_ind] = set_f_point_prob(traj[traj_ind],params,inds_arr[point_ind])
elif f_type == "posterior_ll_point":#evaluate log-probabilies at one point
f_vals = np.zeros((len(traj),len(traj[0]),len(params["X"])),dtype = float)
for traj_ind in range(len(traj)):
for point_ind in range(len(params["X"])):
f_vals[traj_ind,:,point_ind] = set_f_point_ll(traj[traj_ind],params,inds_arr[point_ind])
elif f_type == "posterior_prob_mean":
f_vals = np.zeros((len(traj),len(traj[0]),1),dtype = float)
for traj_ind in range(len(traj)):
f_vals[traj_ind,:,0] = set_f_average_prob(traj[traj_ind],params)
elif f_type == "posterior_prob_mean_probit":
f_vals = np.zeros((len(traj),len(traj[0]),1),dtype = float)
for traj_ind in range(len(traj)):
f_vals[traj_ind,:,0] = set_f_average_prob_probit(traj[traj_ind],params)
elif f_type == "posterior_prob_variance":
f_vals = np.zeros((len(traj),len(traj[0]),1),dtype = float)
for traj_ind in range(len(traj)):
f_vals[traj_ind,:,0] = set_f_average_var(traj[traj_ind],params)
elif f_type == "posterior_ll_mean":#evaluate average log-probabilities over test set
f_vals = np.zeros((len(traj),len(traj[0]),1),dtype = float)
for traj_ind in range(len(traj)):
f_vals[traj_ind,:,0] = set_f_average_ll(traj[traj_ind],params)
elif f_type == "success_prob_point":#success probabilities at given points
f_vals = np.zeros((len(traj),len(traj[0]),len(inds_arr)),dtype = float)
for traj_ind in range(len(traj)):
for point_ind in range(len(inds_arr)):
f_vals[traj_ind,:,point_ind] = set_f_success_point(traj[traj_ind],params,inds_arr[point_ind])
elif f_type == "success_prob_mean":#success probabilities averaged
f_vals = np.zeros((len(traj),len(traj[0]),1),dtype = float)
for traj_ind in range(len(traj)):
f_vals[traj_ind,:,0] = set_f_success_mean(traj[traj_ind],params)
elif f_type == "success_prob_varaince":#variance estimate for success probabilities
f_vals = np.zeros((len(traj),len(traj[0]),1),dtype = float)
for traj_ind in range(len(traj)):
f_vals[traj_ind,:,0] = set_f_success_variance(traj[traj_ind],params)
else:#smthing strange
raise "Not implemented error in set_function: check f_type value"
return f_vals
def set_f(X,ind):
"""
Element-wise function of observation, depending on ind, please, change it only here
Arguments:
X - np.array of shape (n,d);
ind - int, 0 <= ind <= d
"""
return copy.deepcopy(X[:,ind])
def set_f_point_prob(X,params,ind):
obs = params["X"][ind,:]
Y = params["Y"][ind]
#return 1./(1+np.exp(-X @ obs))
return 1./(1+np.exp((1-2*Y)*X @ obs))
def set_f_point_ll(X,params,ind):
"""Function to compute point-wise test log-probabilities log p(y|x)
Args:
params - dict, defined in main notebook
"""
obs = params["X"][ind,:]
Y = params["Y"][ind]
return -Y*np.log(1+np.exp(-X @ obs)) - (1-Y)*np.log(1+np.exp(X @ obs))
def set_f_average_prob(X,params):
obs = params["X"]
Y = params["Y"]
return np.mean(np.divide(1.,1.+np.exp(np.dot(X,(obs*(1-2*Y).reshape(len(Y),1)).T))),axis=1)
def set_f_average_prob_probit(X,params):
obs = params["X"]
Y = params["Y"]
return np.mean(spstats.norm.cdf(np.dot(X,(obs*(2*Y-1).reshape(len(Y),1)).T)),axis=1)
def set_f_average_var(X,params):
obs = params["X"]
Y = params["Y"]
likelihoods = np.divide(1.,1.+np.exp(np.dot(X,(obs*(1-2*Y).reshape(len(Y),1)).T)))
return np.mean(likelihoods**2,axis=1) - (np.mean(likelihoods,axis=1))**2
def set_f_average_ll(X,params):
"""Function to compute average test log-probabilities log p(y|x)
"""
obs = params["X"]
Y = params["Y"]
return np.mean(-Y.reshape((1,len(Y)))*np.log(1+np.exp(-np.dot(X,obs.T))) -\
(1-Y).reshape(1,(len(Y)))*np.log(1+np.exp(np.dot(X,obs.T))),axis=1)
def set_f_success_point(X,params,ind):
"""Function to evaluate probability of success for a given vector X
"""
obs = params["X"][ind,:]
return 1./(1.+np.exp(-X@obs))
def set_f_success_mean(X,params):
"""Function to evaluate probability of success for a given vector X
"""
obs = params["X"]
Y = params["Y"]
return np.mean(np.divide(1.,1.+np.exp(-np.dot(X,obs.T))),axis=1)
def qform_q(A,X,Y):
"""
quickest way which I find to compute for each index i quadratic form <Ax,y> for each x = X[ind,:], y = Y[ind,:]
arguments:
A - np.array of shape (d,d)
X,Y - np.array of shape (n,d)
returns:
np.array of shape (n)
"""
return (X.dot(A)*Y).sum(axis=1)
def PWP(x,W):
"""
performs multiplication (slow) with P - projector, W - topelitz (bn-diagonal) matrix
Args:
W - bn-diagonal matrix os shap (n,n) in csr format;
returns:
np.array of shape (n,) - result of PWP multiplicaton;
"""
y = W @ (x - np.mean(x))
return y - np.mean(y)
def mult_W(x,c):
"""
performs multiplication (fast, by FFT) with W - toeplitz (bn-diagonal) matrix
Args:
c - vector of
returns:
matvec product;
"""
n = len(x)
x_emb = np.zeros(2*n-1)
x_emb[:n] = x
return ifft(fft(c)*fft(x_emb)).real[:n]
def PWP_fast(x,c):
"""
Same PWP as above, but now with FFT
"""
y = mult_W(x - np.mean(x),c)
return y - np.mean(y)
def Spectral_var(Y,W):
"""
Compute spectral variance estimate for asymptotic variance with given kernel W for given vector Y
"""
n = len(Y)
return np.dot(PWP_fast(Y,W),Y)/n
def simple_funcs(X,ind):
"""
"""
if ind == 0:
return np.cos(X.sum(axis=1))
elif ind == 1:
return np.cos((X**2).sum(axis=1))
def init_samples(X):
"""
initialize sample matrix for
"""
samples = np.zeros_like(X)
for ind in range(X.shape[1]):
samples[:,ind] = set_f(X,ind)
return samples
def construct_ESVM_kernel(n):
"""
Same as before, but now returns only first row of embedding circulant matrix;
Arguments:
n - int,size of the matrix;
Returns:
c - np.array of size (2n-1);
"""
bn = set_bn(n)
trap_left = np.linspace(0,1,bn)
trap_center = np.ones(2*bn+1,dtype = float)
trap_right = np.linspace(1,0,bn)
diag_elems = np.concatenate([trap_left,trap_center,trap_right])
c = np.zeros(2*n-1,dtype = np.float64)
c[0:bn+1] = 1.0
c[bn+1:2*bn+1] = trap_right
c[-bn:] = 1.0
c[-2*bn:-bn] = trap_left
return c
def split_dataset(X,Y,test_size):
"""Implements (a bit strange) splitting of train dataset at test and train part
Args:
test_size - number of pairs (X,Y) to report in test;
Return:
...
"""
np.random.seed(1814)
batch_inds = np.random.choice(len(X),size = test_size,replace=False)
X_test = copy.deepcopy(X[batch_inds,:])
Y_test = Y[batch_inds]
X_train = np.delete(X,batch_inds,0)
mask = np.ones(len(Y),dtype = bool)
mask[batch_inds] = False
Y_train = Y[mask]
return X_train,Y_train,X_test,Y_test
| true |
c72fddd9b4fb739c2158295bdf5e35b85d984145 | Python | daniel-amos/SC-T-201-GSKI | /Timi/Timi_3/arr_class.py | UTF-8 | 1,582 | 3.90625 | 4 | [] | no_license | class ArrayList:
def __init__(self):
self.size = 3
self.capacity = 4
self.arr = [0] * self.capacity
def print_array_list(self):
for ix in range(self.size - 1):
if ix == self.size - 2:
print("{}".format(self.arr[ix]), end="")
else:
print("{},".format(self.arr[ix]), end="")
print("\n")
def append(self, value):
if self.size == self.capacity:
self.resize()
self.arr[self.size - 1] = value
self.size += 1
def get_at(self, ix):
return self.arr[ix]
def get_last(self):
return self.arr[self.size - 1]
def resize(self):
new_list = ([0] * self.capacity) * 2
for ix in range(self.size):
new_list[ix] = self.arr[ix]
self.arr = new_list
def prepend(self, value):
for ix in range(self.capacity, 0, -1):
self.arr[ix + 1] = self.arr[ix]
self.arr[0] = value
self.size += 1
def insert(self, value, index):
for ix in range(self.capacity, 0, -1):
self.arr[ix + 1] = self.arr[ix]
self.arr[index] = value
self.size += 1
def remove(self, index):
self.arr.remove(self.arr[index])
for ix in range(index, 0, self.capacity):
self.arr[ix - 1] = self.arr[ix]
self.size -= 1
my_arr = ArrayList()
my_arr.append(5)
my_arr.print_array_list()
my_arr.append(8)
my_arr.print_array_list()
my_arr.insert(12, 2)
my_arr.print_array_list()
my_arr.remove(0)
my_arr.print_array_list()
| true |
e6d07ed7a48e8d176bb0a5cbd2bb9c33f9df0418 | Python | sruthi899/learn-python | /s.py | UTF-8 | 30 | 2.6875 | 3 | [] | no_license | x=int(input('enter'))
print(x) | true |
a98ee631e3a3b16b81ddaf4e74e9294becc5aa97 | Python | jcottongin/stock | /crypCompare | UTF-8 | 1,631 | 3.046875 | 3 | [] | no_license | #!/usr/bin/python3
#https://www.cryptocompare.com/coins/guides/how-to-use-our-api/
#api key
#https://www.youtube.com/watch?v=qq0gbTHBI9o
import cryptocompare
price = cryptocompare.get_price('BTC', 'USD')
print(price)
import requests
from datetime import datetime
url = "https://min-api.cryptocompare.com/data/price?fsym={}&tsyms={}"
##convert dollar to euro
def get_d2e(currency):
try:
response = request.get(url.format(coin,currency)).json()
return response
except:
return False
for counter in range(1):#while True:
date_time=datetime.now()
date_time=date_time.strftime("%m/%d/%Y %H/%M")
currentPrice = cryptocompare.get_price("USD", "EUR")###, "BTC, EUR") # get btc and usd
if currentPrice:
print(date_time, "$", currentPrice)
## btc to usd
def get_price(coin, currency):
try:
response = request.get(url.format(coin,currency)).json()
return response
except:
return False
for counter in range(5):#while True:
date_time=datetime.now()
date_time=date_time.strftime("%m/%d/%Y %H/%M")
currentPrice = cryptocompare.get_price("BTC", "USD")###, "BTC, EUR") # get btc and usd
if currentPrice:
print(date_time, "$", currentPrice)
#get_price()
## btc to euro
def get_euro(coin, currency):
try:
response = request.get(url.format(coin,currency)).json()
return response
except:
return False
for counter in range(5):#while True:
date_time=datetime.now()
date_time=date_time.strftime("%m/%d/%Y %H/%M")
currentPrice = cryptocompare.get_price("BTC", "EUR")###, "BTC, EUR") # get btc and usd
if currentPrice:
print(date_time, "$", currentPrice)
#get_price() | true |
b9cff63944cc4a4579cec9a643a943e8cc3190ff | Python | Aasthaengg/IBMdataset | /Python_codes/p02577/s532752016.py | UTF-8 | 122 | 3.421875 | 3 | [] | no_license | num = input()
le = len(num)
sum=0
for i in range(le):
sum += int(num[i])
if sum%9==0:
print("Yes")
else:
print("No") | true |
2f37856a79f523165e68c54552734b22b25177df | Python | mehaktawakley/Data-Structures-and-Algorithms | /Data Structures/LinkedList.py | UTF-8 | 1,990 | 3.828125 | 4 | [] | no_license | #Creating Node
class node :
def __init__(self, dataval=None):
self.dataval = dataval
self.nextval = None
#Singly Linked List
class slinkedlist():
def __init__(self):
self.head = node()
def append(self, data):
NewNode = node(data)
cur = self.head
while cur.nextval != None:
cur = cur.nextval
cur.nextval = NewNode
def length(self):
cur = self.head
count = 0
while cur.nextval != None:
count += 1
cur = cur.nextval
return count
def display(self):
datalist = []
cur = self.head
while cur.nextval != None:
cur = cur.nextval
datalist.append(cur.dataval)
return datalist
def getdata(self, index):
if index >= self.length():
return "Index out of range"
i = 0
cur = self.head
while cur.nextval != None:
cur = cur.nextval
if i == index:
return cur.dataval
i = i+1
def insert(self, index, data):
if index > self.length():
return "Index out of range."
elif index == self.length():
self.append(data)
return "Node Added"
NewNode = node(data)
i = 0
cur = self.head
while cur.next != None:
LastNode = cur
cur = cur.nextval
if i == index:
LastNode.next = NewNode
NewNode.nextval = cur
return "Node Added"
i += 1
def remove(self, index):
if index >= self.length():
return "Index out of range."
i = 0
cur = self.head
while cur.next != None:
LastNode = cur
cur = cur.nextval
if index == i:
LastNode.nextval = cur.nextval
return "Node Removed"
i += 1
| true |
aa44e8845bb647aac0b3d76e8810805739cff82c | Python | jason790/crayimage | /crayimage/cosmicGAN/generator.py | UTF-8 | 10,002 | 2.609375 | 3 | [
"MIT"
] | permissive | from ..nn import Expression
from ..nn.layers import concat_conv
from lasagne import *
__all__ = [
'BackgroundGenerator',
'ParticleGenerator',
'SimpleParticleGenerator',
'SimpleBackgroundGenerator'
]
class SimpleBackgroundGenerator(Expression):
def __init__(self, input_shape=(1, 132, 132)):
self.input_shape = input_shape
input_noise = layers.InputLayer(
shape=(None,) + input_shape, input_var=None,
name='input noise'
)
self.input_noise = input_noise
### Since it is easier to just generate uniform distribution rather than
### binomial with n = 1023
### we just make a learnable custom transformation
### which is approximated with a small NN with 32 hidden sigmoid units
### applied to each pixel.
### which is essentially 2 convs with filter_size = (1, 1)
redist1 = layers.Conv2DLayer(
input_noise,
num_filters=32, filter_size=(1, 1), pad='valid',
nonlinearity=nonlinearities.sigmoid,
name='redist 1'
)
redist2 = layers.Conv2DLayer(
redist1,
num_filters=1, filter_size=(1, 1), pad='valid',
nonlinearity=nonlinearities.linear,
name='redist 2'
)
### now to model possible large noise structures
conv1 = layers.Conv2DLayer(
redist2,
num_filters=8, filter_size=(5, 5), pad='valid',
nonlinearity=nonlinearities.elu,
name='conv 1'
)
conv2 = layers.Conv2DLayer(
conv1,
num_filters=1, filter_size=(1, 1), pad='valid',
nonlinearity=nonlinearities.linear,
name = 'conv2'
)
super(SimpleBackgroundGenerator, self).__init__(conv2)
class BackgroundGenerator3(Expression):
def __init__(self, input_shape=(1, 158, 158)):
self.input_shape = input_shape
input_noise = layers.InputLayer(
shape=(None,) + input_shape, input_var=None,
name='input noise'
)
self.input_noise = input_noise
### Since it is easier to just generate uniform distribution rather than
### binomial with n = 1023
### we just make a learnable custom transformation
### which is approximated with a small NN with 32 hidden sigmoid units
### applied to each pixel.
### which is essentially 2 convs with filter_size = (1, 1)
redist1 = layers.Conv2DLayer(
input_noise,
num_filters=32, filter_size=(1, 1), pad='valid',
nonlinearity=nonlinearities.sigmoid,
name='redist 1'
)
redist2 = layers.Conv2DLayer(
redist1,
num_filters=1, filter_size=(1, 1), pad='valid',
nonlinearity=nonlinearities.linear,
name='redist 2'
)
### now to model possible large noise structures
conv1 = layers.Conv2DLayer(
redist2,
num_filters=2, filter_size=(3, 3), pad='valid',
nonlinearity=nonlinearities.elu,
name='conv 1'
)
pool1 = layers.MaxPool2DLayer(
conv1, pool_size=(2, 2),
name='pool 1'
)
conv2 = layers.Conv2DLayer(
pool1,
num_filters=4, filter_size=(3, 3), pad='valid',
nonlinearity=nonlinearities.elu,
name='conv 2'
)
pool2 = layers.MaxPool2DLayer(
conv2, pool_size=(2, 2),
name='pool 2'
)
conv3 = layers.Conv2DLayer(
pool2,
num_filters=8, filter_size=(3, 3), pad='valid',
nonlinearity=nonlinearities.elu,
name='conv 3'
)
pool3 = layers.MaxPool2DLayer(
conv3, pool_size=(2, 2),
name='pool 3'
)
depool3 = layers.Upscale2DLayer(
pool3, scale_factor=(2, 2),
name='upscale 3'
)
deconv3 = concat_conv(
depool3, pool2,
num_filters=4, filter_size=(3, 3), pad='valid',
nonlinearity=nonlinearities.elu,
name='deconv 3'
)
depool2 = layers.Upscale2DLayer(
deconv3, scale_factor=(2, 2),
name='upscale 2'
)
deconv2 = concat_conv(
pool1, depool2,
num_filters=2, filter_size=(3, 3), pad='valid',
nonlinearity=nonlinearities.elu,
name='deconv 2'
)
depool1 = layers.Upscale2DLayer(
deconv2, scale_factor=(2, 2),
name='upscale 1'
)
deconv1 = concat_conv(
redist2, depool1,
num_filters=1, filter_size=(3, 3), pad='valid',
nonlinearity=nonlinearities.elu,
name='deconv 1'
)
slice1 = layers.SliceLayer(
deconv1, indices=slice(1, -1), axis=2
)
slice2 = layers.SliceLayer(
slice1, indices=slice(1, -1), axis=3
)
super(BackgroundGenerator3, self).__init__(slice2)
class BackgroundGenerator(Expression):
def __init__(self, input_shape=(1, 144, 144)):
self.input_shape = input_shape
input_noise = layers.InputLayer(
shape=(None,) + input_shape, input_var=None,
name='input noise'
)
self.input_noise = input_noise
### Since it is easier to just generate uniform distribution rather than
### binomial with n = 1023
### we just make a learnable custom transformation
### which is approximated with a small NN with 32 hidden sigmoid units
### applied to each pixel.
### which is essentially 2 convs with filter_size = (1, 1)
redist1 = layers.Conv2DLayer(
input_noise,
num_filters=32, filter_size=(1, 1), pad='valid',
nonlinearity=nonlinearities.sigmoid,
name='redist 1'
)
redist2 = layers.Conv2DLayer(
redist1,
num_filters=1, filter_size=(1, 1), pad='valid',
nonlinearity=nonlinearities.linear,
name='redist 2'
)
### now to model possible large noise structures
conv1 = layers.Conv2DLayer(
redist2,
num_filters=4, filter_size=(3, 3), pad='valid',
nonlinearity=nonlinearities.softplus,
name='conv 1'
)
pool1 = layers.MaxPool2DLayer(
conv1, pool_size=(2, 2),
name='pool 1'
)
conv2 = layers.Conv2DLayer(
pool1,
num_filters=8, filter_size=(3, 3), pad='valid',
nonlinearity=nonlinearities.softplus,
name='conv 2'
)
pool2 = layers.MaxPool2DLayer(
conv2, pool_size=(2, 2),
name='pool 2'
)
depool2 = layers.Upscale2DLayer(
pool2, scale_factor=(2, 2),
name='upscale 2'
)
deconv2 = concat_conv(
pool1, depool2,
num_filters=4, filter_size=(3, 3), pad='valid',
nonlinearity=nonlinearities.softplus,
name='deconv 2'
)
depool1 = layers.Upscale2DLayer(
deconv2, scale_factor=(2, 2),
name='upscale 1'
)
deconv1 = concat_conv(
redist2, depool1,
num_filters=1, filter_size=(3, 3), pad='valid',
nonlinearity=nonlinearities.linear,
name='deconv 1'
)
slice1 = layers.SliceLayer(
deconv1, indices=slice(1, -1), axis=2
)
slice2 = layers.SliceLayer(
slice1, indices=slice(1, -1), axis=3
)
super(BackgroundGenerator, self).__init__(slice2)
class ParticleGenerator(Expression):
def __init__(self, input_shape=(1, 142, 142), noise_shape=(1, 128, 128)):
input_geant = layers.InputLayer(
shape=(None,) + input_shape, input_var=None,
name='GEANT input'
)
self.input_geant = input_geant
input_background = layers.InputLayer(
shape=(None,) + noise_shape, input_var=None,
name='background input'
)
self.input_background = input_background
conv1 = layers.Conv2DLayer(
input_geant,
num_filters=8, filter_size=(3, 3), pad='valid',
nonlinearity=nonlinearities.softplus,
name='conv1'
)
pool1 = layers.MaxPool2DLayer(conv1, pool_size=(2, 2), name='pool1')
conv2 = layers.Conv2DLayer(
pool1, num_filters=16, filter_size=(3, 3), pad='valid',
nonlinearity=nonlinearities.softplus,
name='conv2'
)
pool2 = layers.MaxPool2DLayer(conv2, pool_size=(2, 2), name='pool2')
depool2 = layers.Upscale2DLayer(pool2, scale_factor=(2, 2), name='depool2')
u2 = concat_conv(
pool1, depool2, pad='valid',
num_filters=8, filter_size=(3, 3),
nonlinearity=nonlinearities.softplus,
name='deconv2'
)
depool1 = layers.Upscale2DLayer(u2, scale_factor=(2, 2), name='depool1')
deconv1 = concat_conv(
depool1, input_geant, pad='valid',
num_filters=1, filter_size=(3, 3),
nonlinearity=nonlinearities.softplus,
name='deconv1'
)
readout1 = layers.Conv2DLayer(
deconv1,
num_filters=32, filter_size=(1, 1),
nonlinearity=nonlinearities.sigmoid,
name='readout1'
)
readout2 = layers.Conv2DLayer(
readout1,
num_filters=1, filter_size=(1, 1),
nonlinearity=nonlinearities.linear,
name='readout2'
)
sum_l = layers.ElemwiseSumLayer(
[readout2, input_background],
cropping=[None, None, 'center', 'center']
)
norm_l = layers.ExpressionLayer(sum_l, lambda x: x / 2)
super(ParticleGenerator, self).__init__(norm_l)
class SimpleParticleGenerator(Expression):
def __init__(self, input_shape=(1, 142, 142), noise_shape=(1, 128, 128)):
input_geant = layers.InputLayer(
shape=(None,) + input_shape, input_var=None,
name='GEANT input'
)
self.input_geant = input_geant
input_background = layers.InputLayer(
shape=(None,) + noise_shape, input_var=None,
name='background input'
)
self.input_background = input_background
conv1 = layers.Conv2DLayer(
input_geant,
num_filters=1, filter_size=(5, 5), pad='valid',
nonlinearity=nonlinearities.linear,
name='conv1'
)
conv2 = layers.Conv2DLayer(
conv1, num_filters=16, filter_size=(1, 1), pad='valid',
nonlinearity=nonlinearities.sigmoid,
name='conv2'
)
conv3 = layers.Conv2DLayer(
conv2, num_filters=1, filter_size=(1, 1), pad='valid',
nonlinearity=nonlinearities.elu,
name='conv3'
)
sum_l = layers.ElemwiseSumLayer(
[conv3, input_background],
cropping=[None, None, 'center', 'center']
)
n_l = layers.ExpressionLayer(
sum_l, lambda x: x / 2
)
super(SimpleParticleGenerator, self).__init__(n_l)
| true |
cbb57db5768b590bfb63acb8525f2bc9469dceb1 | Python | Ashwathguru/DATA-STRUCTURES | /HEAP/testHeap.py | UTF-8 | 1,048 | 3.109375 | 3 | [] | no_license | import heap
import PQ
import Airport
print("MAX HEAP IMPLEMENTATION ")
h=heap.MaxHeap([1,2,3,4,5])
h._buildheap()
h.printlist()
print("EXTRACTING MAX")
print(h.extract_max())
h.printlist()
print("Ascending Order")
h.maxHeap_sort()
h.printlist()
h.heap_add(6)
h.printlist()
print("Priority Queue")
print("Please Insert [ID,Priority,Arrival Time,Execution time,Deadline]")
l=[1,1,1,1,5]
pq=PQ.priorityQueue(l)
pq.heap_add([1,2,1,5,10])
pq.heap_add([2,3,1,5,11])
pq.heap_add([3,3,3,6,12])
pq.descOrder()
print(pq.data)
WT, tat = pq.waitingTime()
l2 = pq.chkDeadline(tat)
print(l2)
print("AIRPORT")
l1 = [20180808011100, 1, 'Landing']
air = Airport.Min_Heap(l1)
print("Please Insert in Format [Time Stamp,Flight No.,Takeoff/Landing]")
air.heap_add([20181208101100, 2, 'Takeoff'])
air.heap_add([20181208101130, 3, 'Landing'])
air.heap_add([20181208100030, 4, 'Takeoff'])
print(air.data)
ans= air.extracting_min()
print(ans)
print("Flight No.: ",ans[1])
print("Event:",ans[2])
print("Time: ",ans[0]) | true |
5490dc59f06826b64c52be519c45adde7518f2ac | Python | judithhouston/ess-notebooks | /make_config.py | UTF-8 | 2,003 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env python
import argparse
import os
if __name__ == '__main__':
# configure arg parser
parser = argparse.ArgumentParser(
description=
'Makes local non-versioned dataconfig.py for working with ess notebook data. Test data directory found https://github.com/scipp/ess-notebooks-data.git assumed to be checked out locally. Generates valid dataconfig.py with the correct local paths set',
prog='make_config')
positional_args = parser.add_argument_group('Positional arguments')
optional_args = parser.add_argument_group('Optional arguments')
positional_args.add_argument(
'Root',
nargs='?',
help=
'The absolute path to root directory of the cloned ess-notebooks-data repository'
)
optional_args.add_argument('-f',
'--force',
action='store_true',
default=False,
help='Force overwrite of existing config')
# run the arg parser
arguments = parser.parse_args()
existing_config = False
try:
import dataconfig # noqa: F401
existing_config = True
except ImportError:
pass
if not arguments.Root:
raise ValueError('Must provide Root directory. See help')
if not os.path.exists(arguments.Root):
raise ValueError('Path {} does not exist on local machine'.format(
arguments.Root))
if not os.path.exists(os.path.join(arguments.Root, 'ess')):
raise ValueError(
'Bad path. Expected directory to contain ess directory'.format(
arguments.Root))
if existing_config and not arguments.force:
raise RuntimeError(
'config.py already exists. cannot overwrite without force option. see help.'
)
# make the config.py
with open('dataconfig.py', 'w') as fh:
fh.write('data_root="{}"\n'.format(arguments.Root))
print('dataconfig.py written')
| true |
a1a48d913ea94cd2b171919ae466d32213176cab | Python | dabraude/PYSpeechLib | /src/algorithms/energy.py | UTF-8 | 420 | 3.109375 | 3 | [
"Apache-2.0"
] | permissive | import numpy as np
def energy(framedData):
""" Calculate energy and log energy
Parameters
----------
framedData: numpy ndarray
data to calculate mfccs for, each row is one frame
Returns
-------
(ndarray, ndarray)
energy and log energy
"""
eng = np.sum(np.square(framedData), axis = 1)
logEng = np.log(eng)
return (eng, logEng) | true |
eecd9611354bd7c7b8f28f0bee725b81e1d7bb07 | Python | yamato7503/python | /hoge2.py | UTF-8 | 159 | 3.375 | 3 | [] | no_license | class Hoge(object):
pass
def initialize(obj, a, b):
obj.a = a
obj.b = b
hoge = Hoge()
initialize(hoge, 10, 'hoge')
print (hoge.a)
print (hoge.b)
| true |
4c107a3e3ec12df8c52317241f7e25dc7c31fc8d | Python | danoliveiradev/PythonExercicios | /ex092.py | UTF-8 | 552 | 3.484375 | 3 | [
"MIT"
] | permissive | from datetime import date
cadastro = {}
cadastro['nome'] = input('Nome: ').capitalize().strip()
anoNasc = int(input('Ano de Nascimento: '))
cadastro['idade'] = date.today().year - anoNasc
cadastro['ctps'] = int(input('Carteira de Trabalho [0 não tem]: '))
if cadastro['ctps'] != 0:
cadastro['anoContr'] = int(input('Ano de Contratação: '))
cadastro['salario'] = float(input('Salário: R$ '))
cadastro['aposentadoria'] = (cadastro['anoContr'] + 35) - anoNasc
print('-='*20)
for k, v in cadastro.items():
print(f'{k} tem o valor {v}')
| true |
b5a2c1892505cccfd2095403f7eef651f3d3a023 | Python | Pubudhi/SummerOfCode-1mwtt | /variables.py | UTF-8 | 297 | 3.453125 | 3 | [] | no_license | #variables
# Quote of the day : Don't repeat yourself.
myString = "hello"
print(myString)
name = "Pubudhi"
print('My name is ' + name)
print( name + ' is a really beatiful name!!!')
composer = 'Mozart'
print(composer)
composer = 'Techaikovsky'
print('But I prefer ' + composer + ' personally.') | true |
4671dcdf0f3689fc4bb3b0b14baa621731f0c161 | Python | liu298/Database-Systems | /CS411-MP1/MP1/p2.py | UTF-8 | 1,965 | 2.875 | 3 | [] | no_license | import sys
def readlines():
fin = sys.stdin.readlines()
attrs = []
fds = {}
attrs = fin[0].strip().split(",")
for i in range(2,len(fin)):
if len(fin[i].strip())!= 0:
fd = fin[i].split("->")
key = tuple(fd[0].strip().split(","))
val = fd[1].strip().split(",")
if key not in fds:
fds[key] = val
else:
fds[key] += val
return (attrs,fds)
def computeClosure(attr,fds):
"""
:type attr: list
:type fds: dict
:rtype list
"""
clo = [at for at in attr]
use = []
junk = []
poten = range(len(fds))
values = []
for val in fds.values():
values.extend(val)
values = list(set(values))
while(len(use)+len(junk)!=len(fds)):
prev = clo
for k in poten:
(clo,use,junk,poten) = isUseful(k,clo,fds,values,use,junk,poten)
if(len(use)+len(junk)==len(fds)):
return clo
if(set(clo)==set(prev)):
return clo
def isUseful(k,clo,fds,values,use,junk,poten):
if set(fds.keys()[k]).issubset(set(clo)):
clo += [att for att in fds.values()[k]]
clo = list(set(clo))
use.append(k)
return (clo,use,junk,poten)
else:
diff = set(fds.keys()[k])-set(clo)
if(not set(diff).issubset(set(values))):
junk.append(k)
return (clo,use,junk,poten)
else:
poten.append(k)
poten = list(set(poten))
return (clo,use,junk,poten)
def isBCNF(attrs,fds):
"""
:type attrs: list
:type fds: dict
:rtype bool
"""
for key,val in fds.items():
key_clos = computeClosure(list(key),fds)
if set(key_clos).issubset(set(attrs)) and set(key_clos)!=set(attrs):
return False
return True
if __name__ == "__main__":
(attrs,fds) = readlines()
if isBCNF(attrs,fds):
sys.stdout.write("Yes")
else:
sys.stdout.write("No")
| true |
c4dfbccd8336c8d9fcd2ef85f6a3a1075562e8e3 | Python | jashburn8020/the-python-tutorial | /src/ch11/locale_test.py | UTF-8 | 2,170 | 3.640625 | 4 | [
"Apache-2.0"
] | permissive | """Output formatting using `locale`."""
import locale
from datetime import datetime
from typing import Generator
import pytest
@pytest.fixture(name="zh_cn")
def fixture_zh_cn() -> Generator[None, None, None]:
"""Set current locale to `zh_CN.UTF-8`."""
locale.setlocale(locale.LC_ALL, ("zh_CN", "UTF-8"))
yield
locale.resetlocale()
@pytest.fixture(name="de_de")
def fixture_de_de() -> Generator[None, None, None]:
"""Set current locale to `de_DE.UTF-8`."""
locale.setlocale(locale.LC_ALL, ("de_DE", "UTF-8"))
yield
locale.resetlocale()
def test_number_zh(zh_cn: None) -> None:
"""Format number using the Chinese locale."""
assert locale.format_string("Int: %d", 1234567, grouping=True) == "Int: 1,234,567"
assert locale.atoi("1,234,567") == 1234567
assert (
locale.format_string("Float: %.2f", 12345.67, grouping=True)
== "Float: 12,345.67"
)
assert locale.atof("12,345.67") == 12345.67
def test_number_de(de_de: None) -> None:
"""Format number using the German locale."""
assert locale.format_string("Int: %d", 1234567, grouping=True) == "Int: 1.234.567"
assert locale.atoi("1.234.567") == 1234567
assert (
locale.format_string("Float: %.2f", 12345.67, grouping=True)
== "Float: 12.345,67"
)
assert locale.atof("12.345,67") == 12345.67
def test_currency_zh(zh_cn: None) -> None:
"""Format number with currency using the Chinese locale."""
assert locale.currency(12345.67, grouping=True) == "¥12,345.67"
def test_currency_de(de_de: None) -> None:
"""Format number with currency using the German locale."""
assert locale.currency(12345.67, grouping=True) == "12.345,67 €"
def test_datetime_zh(zh_cn: None) -> None:
"""Format date and time using the Chinese locale."""
date_time = datetime(2020, 7, 15, 13, 10, 5)
assert date_time.strftime("%c") == "2020年07月15日 星期三 13时10分05秒"
def test_datetime_de(de_de: None) -> None:
"""Format date and time using the German locale."""
date_time = datetime(2020, 7, 15, 13, 10, 5)
assert date_time.strftime("%c") == "Mi 15 Jul 2020 13:10:05 "
| true |
eac13a5b7df9760805a39a2fbbcf65258c9e6974 | Python | iagger/Modelado-de-comunidades | /src/backend/api_rest.py | UTF-8 | 3,991 | 2.671875 | 3 | [] | no_license | import csv
from decimal import Decimal
from sanic import Sanic
from sanic.response import json as sanjson
from setup import PATHS
from artwork_similarity import *
from sanic.response import file
from sanic.response import text
import os
import json
from sanic_cors import CORS, cross_origin
# Se instancia la aplicación Sanic
app = Sanic(name='api-rest')
# Permite CORS en todos los dominios
CORS(app)
# Restful API para listar todos los cuadros
@app.get('/artworks')
async def index(request):
try:
arts = []
data = open(PATHS['ARTWORKS_DATA'])
reader = csv.DictReader(data, delimiter=',')
for row in reader:
x = {
"Title": row['Title'],
"Id": row['wd:paintingID'],
"Artist": row['Artist'],
"Category": row['Category'],
"Image": row['Image URL']
}
arts.append(x)
return sanjson(arts)
except:
return sanjson({"Message": "Artworks not found"}, status=400)
# Restful API para listar los 5 cuadros más similares al que recibe, recibiendo (o no) pesos para las variables de similitud
@app.post('/artworks/similarity/artworkID')
async def index(request):
try:
id = request.args.get("id")
weightsReq = []
data = request.json
if data:
for it in data:
weightsReq.append(float(Decimal(data.get(it))))
x = Decimal(0)
for i in weightsReq:
x += Decimal(str(i))
if x != 1.0:
return sanjson({"Message": "Invalid weights"}, status=400)
else:
art = findSimilars(id, weightsReq)
return sanjson(art)
else:
art = findSimilars(id, weightsReq)
return sanjson(art)
except:
return sanjson({"Message": "Artwork ID not found"}, status=400)
# Restful API para mostrar archivo que contiene las comunidades visualmente
@app.get('/artworks/similarity/clusters')
async def index(request):
try:
return await file(PATHS['CLUSTER_VISUAL'])
except:
return sanjson({"Message": "There is no file for these clusters"}, status=400)
# Restful API para mostrar archivo que contiene las comunidades visualmente
@app.get('/artworks/similarity/clustersHTML')
async def index(request):
try:
out = []
directory = PATHS['CLUSTERS_DIR']
files = os.listdir(directory)
for filename in files:
with open(os.path.join(directory, filename)) as file:
data = json.load(file)
out.append(data)
return sanjson(out)
except:
return sanjson({"Message": "Error displaying clusters"}, status=400)
# Método auxiliar para montar el JSON del cuadro recibido y sus similares
def findSimilars(id, weightsReq):
artworks = kMostSimilarArtworks(id, k=5, weights=weightsReq)
sims = []
data = open(PATHS['ARTWORKS_DATA'])
reader = csv.DictReader(data, delimiter=',')
for i in artworks:
for row in reader:
if row['wd:paintingID'] == i[1]:
x = {
"Title": row['Title'],
"Similarity": i[0],
"Category": row['Category'],
"Artist": row['Artist'],
"Image": row['Image URL']
}
sims.append(x)
data.seek(0)
break
for row in reader:
if row['wd:paintingID'] == id:
art = {
"Selected artwork": id,
"Title": row['Title'],
"Category": row['Category'],
"Artist": row['Artist'],
"Image": row['Image URL'],
"Similar artworks":
sims
}
return art | true |
5edb8c9e2c1b53f98983061b42e31c16353f0393 | Python | he9mei/python_appium | /learning_pytest/test_01_m_k.py | UTF-8 | 656 | 2.9375 | 3 | [] | no_license | # 涉及知识点:
# 用例的写法
# 配合验证用例的执行
import pytest
class TestDemo:
@pytest.mark.testicon #可以用,但是会提示警告。因为testicon是自己随便写的标签,不是官方的标签。
def test_test1(self):
print("测试用例1-测试用例1")
def test_login_test2(self):
print("测试用例1-测试用例2")
if __name__=="__main__":
# pytest.main(["-s","-v","./test_01_m_k.py","-m","testicon"]) # test1用testicon标记,使用时:-m "testicon"
pytest.main(["-s", "-v", "./test_01_m_k.py", "-k", "login"]) # test2有关键字login,使用时:-k "login"
| true |
613e6deadecf47af5ba1342379fd30f299bef70a | Python | IanQS/701-Project | /code/rnnExperiments/ibcWord2VecTest.py | UTF-8 | 24,070 | 2.96875 | 3 | [] | no_license | # baselineRNN.py
# script designed to hold the functions to initially generate our RNN
# imports
import cPickle
from gensim.models.word2vec import Word2Vec
import numpy as np
from structClass import Struct
import random #for SGD
import sys
import treeUtil
import copy #for help with keeping track of chain rule paths
from nltk.stem.lancaster import LancasterStemmer
def datasetLoadIn(datasetFilename):
# helper designed to load in our initial dataset
datasetFile = open(datasetFilename, "rb")
# three lists of parse trees for different labels
[liberalSent, conservSent, neutralSent] = cPickle.load(datasetFile)
return [liberalSent, conservSent, neutralSent]
# activation functions
def softMaxFunc(vec):
# given a numpy matrix, calculate the softmax of that matrix
softMaxVec = np.exp(vec) / np.sum(np.exp(vec))
return softMaxVec
def TanhActivFunc(vec):
#given a language vector, calculate the activation function of the langauge
#vector
return np.tanh(vec)
def derivTanhActivFunc(vec):
#given a language vector, calculate the derivative function of the
#laguage vector
return (float(1) / np.cosh(vec)) ** 2
def rectActivFunc(vec):
#holds our ReLU function
return log(np.exp(vec) + 1)
def derivRectActivFunc(vec):
#derivative of our ReLU function
return np.exp(vec) / (1 + np.exp(vec))
# neural network class
class neuralNet(Struct):
def __init__(self, numLabels, sentenceDim, vocabSize, vocabDict,
trainingSet, useWord2Vec=True, wordMatrixFilename=None):
#for the softmax layer
self.softmaxWeightMat = np.zeros((numLabels, sentenceDim))
#for the basic language layer
self.languageWeightMat = np.zeros((sentenceDim,sentenceDim))
self.wordEmbedingMat = np.zeros((sentenceDim,vocabSize))
self.vocabDict = vocabDict #to keep track of our vocabulary
self.trainingSet = trainingSet #for training our data
self.labelDict = self.setLabels(trainingSet)
self.sentenceDim = sentenceDim
self.weightsInitialized = False
self.lossFunction = None
self.useWord2Vec = useWord2Vec
self.wordMatrixFilename = wordMatrixFilename
#helper for storing language activation function
self.langActivFunc = None
self.derivLangActivFunc = None
def makeWordMat(self, dim=300):
# Use Word2Vec to make a matrix of the vocabulary
# Dim is the size of vectors to use; however, if we use
# the google corpus, we will want to use dim=300
# Load google model
model = Word2Vec.load_word2vec_format('../../data/GoogleNews-vectors-negative300.bin',
binary=True)
all_words = [[0.] * dim] * len(self.vocabDict.keys()) # Dummy set-up
missing = 0
for word in self.vocabDict:
idx = self.vocabDict[word]
try:
all_words[idx] = model[word.split('-')[0]]
except:
# If the google corpus does not have a word, just leave its
# vector at zero, these comprise < 3 % of the data.
missing += 1
print "Number of Words missing:", missing, len(all_words)
return np.array(all_words).transpose()
def setLabels(self,trainingSet):
#given a list of parse trees, create a label vector and assign to
#each parse tree
#first, get set of labels from training set
labelDict = {}
for i in xrange(len(trainingSet)):
if (trainingSet[i].label not in labelDict):
#add it in
labelDict[trainingSet[i].label] = len(labelDict)
#then get numpy to develop an identity matrix for this
labelMatrix = np.identity(len(labelDict))
#then attach label vectors to each parse tree
for i in xrange(len(trainingSet)):
#get column reference
labelVecCol = labelDict[trainingSet[i].label]
givenLabelVec = labelMatrix[:,labelVecCol]
#then transpose to assign as column vector
trainingSet[i].labelVec = np.array([givenLabelVec]).T
return labelDict
def vectorizeSentenceTreeNonRec(self,sentenceTree):
solutions = []
toExplore = [(sentenceTree, 0)]
while(toExplore):
currentNode, visitNum = toExplore.pop()
if (isinstance(currentNode,treeUtil.leafObj)):
#look up in our word embeding matrix
wordIndex = self.vocabDict[currentNode.word]
wordVec = self.wordEmbedingMat[:,wordIndex]
#then adjust it for column usage
wordColumnVec = np.array([wordVec]).T #for transpose
currentNode.langVec = wordColumnVec #for reference
solutions.append(wordColumnVec)
else:
if visitNum == 0:
toExplore.append((currentNode, 1))
toExplore.append((currentNode.c1, 0))
toExplore.append((currentNode.c2, 0))
else:
if len(solutions) < 2:
print "your algo sucks"
else:
c1 = solutions.pop()
c2 = solutions.pop()
sentenceVec = self.langActivFunc(np.dot(
self.languageWeightMat,
c1 + c2))
#assign it and then return
currentNode.langVec = sentenceVec
solutions.append(sentenceVec)
return solutions[0]
def vectorizeSentenceTree(self,sentenceTree):
#given a parse tree, vectorize the parse tree
if (isinstance(sentenceTree,treeUtil.leafObj)): #is a word,
#look up in our word embeding matrix
wordIndex = self.vocabDict[sentenceTree.word]
wordVec = self.wordEmbedingMat[:,wordIndex]
#then adjust it for column usage
wordColumnVec = np.array([wordVec]).T #for transpose
sentenceTree.langVec = wordColumnVec #for reference
return wordColumnVec
else: #we have a recursively defined object
leftChildVec = self.vectorizeSentenceTree(sentenceTree.c1)
rightChildVec = self.vectorizeSentenceTree(sentenceTree.c2)
inputVector = (np.dot(self.languageWeightMat,leftChildVec)
+ np.dot(self.languageWeightMat,rightChildVec))
sentenceVec = self.langActivFunc(inputVector)
#assign it and then return
sentenceTree.langVec = sentenceVec
return sentenceVec
def forwardProp(self, sentenceTree):
# given a sentence vector of sentenceDim dimensions, output our
# softmax layer
if (self.weightsInitialized == False):
#shoud initialize this
self.initializedWeights()
if (self.lossFunction == None):
self.lossFunction = self.defaultLossFunction()
if (self.langActivFunc == None):
#initialize as ReLU
self.langActivFunc = TanhActivFunc
self.derivLangActivFunc = derivTanhActivFunc
#first vectorize sentence
sentenceVec = self.vectorizeSentenceTreeNonRec(sentenceTree)
#then move the sentence through the softmax layer
inputVec = np.dot(self.softmaxWeightMat, sentenceVec)
#normalize before placing into function to ensure a reasonable
#representation in the distribution
givenSoftMaxVec = softMaxFunc(inputVec)
return givenSoftMaxVec
def predict(self, parseTree):
#given the sentence vector, predicts the one-hot vector associated
#with that sentence vector
probabilityPredVec = self.forwardProp(parseTree)
#then produce one hot vector of this
oneHotPredVec = np.zeros(probabilityPredVec.shape)
predictedLabelIndex = np.argmax(probabilityPredVec)
oneHotPredVec[predictedLabelIndex] = 1
return oneHotPredVec
def initializedWeights(self):
#helper for initializing our weights
self.weightsInitialized = True
self.softmaxWeightMat= np.random.rand(self.softmaxWeightMat.shape[0],
self.softmaxWeightMat.shape[1])
self.languageWeightMat = np.random.rand(self.languageWeightMat.shape[0],
self.languageWeightMat.shape[1])
if self.useWord2Vec:
# If prepared matrix is specified, assume we want to use it.
if self.wordMatrixFilename:
self.wordEmbedingMat = cPickle.load(open(self.wordMatrixFilename))
else:
self.wordEmbedingMat = self.makeWordMat()
else:
self.wordEmbedingMat = np.random.rand(self.wordEmbedingMat.shape[0],
self.wordEmbedingMat.shape[1])
def setLossFunction(self, toSet):
#function
self.lossFunctionInitialized = True
self.lossFunction = toSet
def defaultLossFunction(self):
def crossEntropy(outputY, targetY):
# both the above come in as a list of lists
assert(np.shape(outputY) == np.shape(targetY))
return (-1 * np.sum(targetY * np.log(outputY)))
self.lossFunction = crossEntropy
#functions designed to find word embeding gradient
def getColumnGradientPaths(self,parseTree,wordNum):
#gets vocabulary-level column gradient paths based on wordNum
colGradPathList = []
givenPath = ()
def getColumnGradientPathsWrapper(parseTree,wordNum,colGradPathList,
givenPath):
#main function for figuring out if this is the appropriate
#gradient path
givenPath += (parseTree,)
if (isinstance(parseTree,treeUtil.leafObj)):
#check if it's our word
if (parseTree.alpha == wordNum):
#append it
colGradPathList.append(list(givenPath))
else: #it is a phrase, look at left and right subpaths
leftGivenPath = givenPath
rightGivenPath = givenPath
getColumnGradientPathsWrapper(parseTree.c1,wordNum,
colGradPathList,leftGivenPath)
getColumnGradientPathsWrapper(parseTree.c2,wordNum,
colGradPathList,rightGivenPath)
getColumnGradientPathsWrapper(parseTree,wordNum,colGradPathList,
givenPath)
return list(colGradPathList)
def calculateColGradPath(self,gradientPath):
#given a particular gradient path, calculate the column gradient
if (len(gradientPath) == 1):
#reached end of path
givenLeafNode = gradientPath[0]
assert(isinstance(givenLeafNode,treeUtil.leafObj))
wordLevelDeriv = np.ones((1,self.sentenceDim))
return wordLevelDeriv
else:
#we have a phrase level gradient
givenPhraseTree = gradientPath[0]
outerLayerDeriv = self.derivLangActivFunc(
np.dot(self.languageWeightMat,
givenPhraseTree.c1.langVec + givenPhraseTree.c2.langVec))
currentLayerDeriv = np.dot(outerLayerDeriv.T,self.languageWeightMat)
return currentLayerDeriv * self.calculateColGradPath(
gradientPath[1:])
def findColGrad(self,givenSentenceTree,wordNum):
#main wrapper for finding a given column-level gradient
listOfGradientPaths = self.getColumnGradientPaths(givenSentenceTree,
wordNum)
colGradDeriv = 0 #we will add to this
for gradientPath in listOfGradientPaths:
colGradDeriv += self.calculateColGradPath(gradientPath)
return colGradDeriv
def buildWordEmbedingGradient(self,
givenSentenceTree,predictionVec,correctLabel):
#helper for building our word embedding gradient
softmaxLayerDeriv = np.dot((predictionVec - correctLabel).T,
self.softmaxWeightMat).T
#get column numbers for matrix
columnNumList = []
for leaf in givenSentenceTree.get_leaves():
columnNumList.append(leaf.alpha) #contains column reference number
columnNumList = list(set(columnNumList)) #to get unique
wordEmbedingGradMatrix = np.zeros((self.sentenceDim,
len(self.vocabDict)))
#test purposes
for columnNum in columnNumList:
#find gradient for this column
wordEmbedingGradMatrix[:,columnNum] = self.findColGrad(
givenSentenceTree,columnNum).flatten()
#then return structure
return softmaxLayerDeriv * wordEmbedingGradMatrix
#functions designed to find the language gradient
def languageDerivRecursion(self,langGradientPath, depth=0):
#given a language gradient path (a list of nodeObj objects), create the
#language-level gradient with respect to this path
assert(len(langGradientPath) >= 1)
if (len(langGradientPath) == 1 or (depth > 3)): #just need to take the derivative
#with respect to the matrix
givenPhrase = langGradientPath[0]
functionInputVector = np.dot(self.languageWeightMat,
givenPhrase.c1.langVec + givenPhrase.c2.langVec)
#take derivative at function level
derivActivFuncOutput = self.derivLangActivFunc(functionInputVector)
#by chain, take derivative wrt functionInputVector
derivFunctionInputVector = (givenPhrase.c1.langVec
+ givenPhrase.c2.langVec)
return derivActivFuncOutput * derivFunctionInputVector
else: #must take with respect to subsequent path
givenPhrase = langGradientPath[0]
functionInputVector = np.dot(self.languageWeightMat,
givenPhrase.c1.langVec + givenPhrase.c2.langVec)
derivActivFuncOutput = self.derivLangActivFunc(functionInputVector)
#take derivative wrt next phrase in the path
currentPathOutputDeriv = (
np.dot(derivActivFuncOutput.T,self.languageWeightMat)).T
return currentPathOutputDeriv * self.languageDerivRecursion(
langGradientPath[1:], depth+1)
def getLanguageChainRulePaths(self,sentenceTree):
#given a sentence tree, get a list of the gradient chain rule paths
#to consider
listOfChainRulePaths = []
givenPath = () #this is designed to keep track of our paths
#to append to our list
def getLanguageChainRulePathsWrapper(sentenceTree,listOfChainRulePaths,
givenPath):
#main function for finding a path dependent on
if (not(isinstance(sentenceTree,treeUtil.leafObj))):
#means that it is dependent on the language matrix
givenPath += (sentenceTree,)
listOfChainRulePaths.append(list(givenPath))
#check if its left and right sides are dependent on the language
#matrix
if (not(isinstance(sentenceTree.c1,treeUtil.leafObj))):
leftGivenPath = givenPath
getLanguageChainRulePathsWrapper(sentenceTree.c1,
listOfChainRulePaths,leftGivenPath)
if (not(isinstance(sentenceTree.c2,treeUtil.leafObj))):
rightGivenPath = givenPath
getLanguageChainRulePathsWrapper(sentenceTree.c2,
listOfChainRulePaths,rightGivenPath)
#perform the wrapper
getLanguageChainRulePathsWrapper(sentenceTree,listOfChainRulePaths,
givenPath)
return listOfChainRulePaths
def buildLanguageWeightGradient(self,predictedLabel,correctLabel,
givenSentenceTree):
#main parent function that generates the gradient for the language
#matrix given a sentence tree
#first, account for the derivative at the softmax layer
softmaxLayerDeriv = np.dot((predictedLabel - correctLabel).T,
self.softmaxWeightMat)
#print "Predicted Label is", predictedLabel
#print "Correct Label is", correctLabel
#print "Soft Max Matrix is", self.softmaxWeightMat
#print "softmaxLayerDeriv is", softmaxLayerDeriv
#then, generate the sentence level derivative by performing gradient
#chain rule to all paths to the language level matrix
listOfChainRulePaths = self.getLanguageChainRulePaths(givenSentenceTree)
#then for each path, generate the language weight gradient based on that
#path
languageLayerDeriv = np.zeros((self.sentenceDim,1))
for langGradientPath in listOfChainRulePaths:
languageLayerDeriv += self.languageDerivRecursion(langGradientPath)
#print "languageLayerDeriv is", languageLayerDeriv
languageWeightGradient = np.dot(softmaxLayerDeriv.T,
languageLayerDeriv.T)
return languageWeightGradient
#main training algorithms
def trainStochastically(self,numIterations,learningRate):
#run SGD based on cross entropy function
for i in xrange(numIterations):
#get predictor ind
givenSentenceTree = random.sample(self.trainingSet,1)[0]
predictionVec = self.forwardProp(givenSentenceTree)
#get gradient of weights
correctLabel = givenSentenceTree.labelVec
softmaxMatGradient = ((predictionVec - correctLabel)
* givenSentenceTree.langVec.transpose())
languageWeightGradient = self.buildLanguageWeightGradient(
predictionVec,correctLabel,givenSentenceTree)
wordEmbedingGradient = self.buildWordEmbedingGradient(
givenSentenceTree,predictionVec,correctLabel)
#then update weights
self.softmaxWeightMat -= learningRate * softmaxMatGradient
self.languageWeightMat -= learningRate * languageWeightGradient
self.wordEmbedingMat -= learningRate * wordEmbedingGradient
# Only check every once in a while for sanity
if i%100 == 0:
print max(list(languageWeightGradient)[0])
print "----"
print self.getAccuracy(self.trainingSet)
print "NEXT UPDATE"
def trainManually(self,numIterations,learningRate):
#helper that trains our neural network using standard GD (not
#SGD)
for i in xrange(numIterations):
#initialize our gradients
softmaxMatGradient = np.zeros(self.softmaxWeightMat.shape)
languageWeightGradient = np.zeros(self.languageWeightMat.shape)
wordEmbedingGradient = np.zeros(self.wordEmbedingMat.shape)
#run through each parse tree
for parseTree in self.trainingSet:
predictionVec = self.forwardProp(parseTree)
#add to gradient of weights
correctLabel = parseTree.labelVec
softmaxMatGradient += ((predictionVec - correctLabel)
* parseTree.langVec.transpose())
languageWeightGradient += self.buildLanguageWeightGradient(
predictionVec,correctLabel,parseTree)
#wordEmbedingGradient += self.buildWordEmbedingGradient(
# parseTree,predictionVec,correctLabel)
#then update weights
self.softmaxWeightMat -= learningRate * softmaxMatGradient
self.languageWeightMat -= learningRate * languageWeightGradient
#self.wordEmbedingMat -= learningRate * wordEmbedingGradient
#print self.getAccuracy(self.trainingSet)
def train(self,numIterations,learningRate,trainStochastically = False):
#main layer to see method of training
#check for initialization
if (self.weightsInitialized == False):
#initialize it
self.initializedWeights()
#then make training decision
if (trainStochastically): #we will use the stochastic method
self.trainStochastically(numIterations,learningRate)
else:
self.trainManually(numIterations,learningRate)
#diagnostic methods
def getAccuracy(self,parseTreeList):
if (self.weightsInitialized == False):
self.initializedWeights()
#helper to get accuracy on a given set of data
numCorrect = 0
#check num correct
for i in xrange(len(parseTreeList)):
#get probability prediction,
predictionVec = self.predict(parseTreeList[i])
if (np.array_equal(predictionVec,parseTreeList[i].labelVec)):
numCorrect += 1
return float(numCorrect) / len(parseTreeList)
# testing
def generateRandomVector(dimension):
#helper to generate random vector
randomVec = []
for i in xrange(dimension):
randomComp = [random.uniform(0,1)]
randomVec.append(randomComp)
randomVec = np.matrix(randomVec)
return randomVec
def generateLabel(numLabels,predVec):
#helper to generate random labels
#generate our random labels
randomLabelList = []
for i in xrange(numLabels):
newLabelVec = []
for j in xrange(numLabels):
if (j == i):
newLabelVec.append([1])
else:
newLabelVec.append([0])
newLabelVec = np.matrix(newLabelVec)
randomLabelList.append(newLabelVec)
if (np.sum(predVec) >= 1.2):
return randomLabelList[0]
else:
return randomLabelList[1]
#test processses
def testForwardPropagation(numLabels,sentenceDim,vocabFilename,
datasetFilename, wordMatrixFilename=None):
#tests out the forward propagation developed for our basic RNN
#load vocabulary
vocabDict = cPickle.load(open(vocabFilename,"rb"))
#load dataset
parseTreeList = cPickle.load(open(datasetFilename,"rb"))
random.shuffle(parseTreeList)
#split data
print "Splitting Data"
testSet = random.sample(parseTreeList,len(parseTreeList)/2)
trainingSet = []
for tree in parseTreeList:
if (tree not in testSet):
trainingSet.append(tree)
#then forward propagate through the neural network
practiceNeuralNet = neuralNet(numLabels,sentenceDim,len(vocabDict),
vocabDict,trainingSet,
wordMatrixFilename=wordMatrixFilename)
#print practiceNeuralNet.getAccuracy(practiceNeuralNet.trainingSet)
print "Training Currently"
practiceNeuralNet.train(3000,1,True)
print "Accuracy on Training Set"
print practiceNeuralNet.getAccuracy(practiceNeuralNet.trainingSet)
print "Accuracy on The Test Set"
practiceNeuralNet.labelDict = practiceNeuralNet.setLabels(testSet)
print practiceNeuralNet.getAccuracy(testSet)
# testForwardPropagation(3,300,"../data/ibcVocabulary.pkl",
# "../data/alteredIBCData.pkl")
testForwardPropagation(3,300,"../../data/ibcVocabulary.pkl",
"../../data/alteredIBCData.pkl")
| true |
6dce06e91e770823c231a24e75052c3d72fd6ed9 | Python | AKATSUKIKOJYO/MyPython | /Chapter03/P13.py | UTF-8 | 356 | 4.25 | 4 | [] | no_license | x = int(input("x= "))
y = int(input("y= "))
a = x + y
s = x - y
m = x * y
avg = (x + y) / 2
max_number = max(x,y)
min_number = min(x,y)
print("두수의 합: ", a)
print("두수의 차: ", s)
print("두수의 곱: ", m)
print("두수의 평균: ", avg)
print("두수중 큰 수: ", max_number)
print("두수중 작은 수: ", min_number)
| true |
70f523a0eaf800f290d9bf829f7ac34c51373a68 | Python | ibe-314/pycaptcha | /pycaptcha/recaptcha/audio_handler/recognizer.py | UTF-8 | 5,940 | 2.828125 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | import io
import json
from requests import Request
from urllib.parse import urlencode
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError
from pycaptcha.recaptcha.audio_handler.audio_data import AudioData, AudioSource
from pycaptcha.exceptions import UnknownValueError, RequestError
class Recognizer:
def __init__(self):
""" Creates a new ``Recognizer`` instance for converting
"""
self.operation_timeout = None # seconds after an internal operation (e.g., an API request) starts before it times out, or ``None`` for no timeout
def record(self, source, duration=None, offset=None):
"""
Records up to ``duration`` seconds of audio from ``source`` (an ``AudioSource`` instance) starting at ``offset``
(or at the beginning if not specified) into an ``AudioData`` instance, which it returns.
If ``duration`` is not specified, then it will record until there is no more audio input.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
assert source.stream is not None, "Audio source must be entered before recording, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?"
frames = io.BytesIO()
seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE
elapsed_time = 0
offset_time = 0
offset_reached = False
while True: # loop for the total number of chunks needed
if offset and not offset_reached:
offset_time += seconds_per_buffer
if offset_time > offset:
offset_reached = True
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0:
break
if offset_reached or not offset:
elapsed_time += seconds_per_buffer
if duration and elapsed_time > duration: break
frames.write(buffer)
frame_data = frames.getvalue()
frames.close()
return AudioData(frame_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH)
def recognize_google(self, audio_data, key=None, language="en-US", show_all=False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Speech Recognition API.
The Google Speech Recognition API key is specified by ``key``. If not specified, it uses a generic key that
works out of the box. This should generally be used for personal or testing purposes only, as it **may be
revoked by Google at any time**. To obtain your own API key, simply following the steps on the `API Keys
<http://www.chromium.org/developers/how-tos/api-keys>`__ page at the Chromium Developers site. In the Google
Developers Console, Google Speech Recognition is listed as "Speech API". The recognition language is determined
by ``language``, an RFC5646 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French),
defaulting to US English. A list of supported language tags can be found in this `StackOverflow answer
<http://stackoverflow.com/a/14302134>`__Returns the most likely transcription if ``show_all`` is false
(the default). Otherwise, returns the raw API response as a JSON dictionary. Raises a ``speech_recognition.
UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError``
exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
assert isinstance(audio_data, AudioData), "``audio_data`` must be audio data"
assert key is None or isinstance(key, str), "``key`` must be ``None`` or a string"
assert isinstance(language, str), "``language`` must be a string"
flac_data = audio_data.get_flac_data(
convert_rate=None if audio_data.sample_rate >= 8000 else 8000, # audio samples must be at least 8 kHz
convert_width=2 # audio samples must be 16-bit
)
if key is None: key = "AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgw"
url = "http://www.google.com/speech-api/v2/recognize?{}".format(urlencode({
"client": "chromium",
"lang": language,
"key": key,
}))
request = Request(url, data=flac_data, headers={"Content-Type": "audio/x-flac; rate={}".format(audio_data.sample_rate)})
# obtain audio transcription results
try:
response = urlopen(request, timeout=self.operation_timeout)
except HTTPError as e:
raise RequestError("recognition request failed: {}".format(e.reason))
except URLError as e:
raise RequestError("recognition connection failed: {}".format(e.reason))
response_text = response.read().decode("utf-8")
# ignore any blank blocks
actual_result = []
for line in response_text.split("\n"):
if not line: continue
result = json.loads(line)["result"]
if len(result) != 0:
actual_result = result[0]
break
# return results
if show_all:
return actual_result
if not isinstance(actual_result, dict) or len(
actual_result.get("alternative", [])) == 0: raise UnknownValueError()
if "confidence" in actual_result["alternative"]:
# return alternative with highest confidence score
best_hypothesis = max(actual_result["alternative"], key=lambda alternative: alternative["confidence"])
else:
# when there is no confidence available, we arbitrarily choose the first hypothesis.
best_hypothesis = actual_result["alternative"][0]
if "transcript" not in best_hypothesis: raise UnknownValueError()
return best_hypothesis["transcript"]
| true |
07e0ec12bb5a7071b852bd2a9ea90a343ba5d4bf | Python | hitochan777/kata | /atcoder/abc178/C.py | UTF-8 | 213 | 3.25 | 3 | [] | no_license | N = int(input())
mod = 10 ** 9 + 7
def powmod(x, n):
val = 1
for _ in range(n):
val *= x
val %= mod
return val
ans = powmod(10, N) - 2 * powmod(9, N) + powmod(8, N)
print(ans % mod) | true |
494310566027c99b4f5727cb2362de21ff9abe7f | Python | PratikshaPP/Leetcode-Problem-Solving- | /arraypartition.py | UTF-8 | 428 | 2.890625 | 3 | [] | no_license | # Time Complexity : O(nlogn)
# Space Complexity : O(1)
# Did this code successfully run on Leetcode : Yes
class Solution(object):
def arrayPairSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort()
total = 0
for i in range(0,len(nums),2):
total +=nums[i]
return total
| true |
6ca1b9921134887e5d868185a8ddbbed4b02c222 | Python | LuisRcap/Python | /HelloWorld/while-loop.py | UTF-8 | 342 | 3.578125 | 4 | [] | no_license | c = 0
while c < 5:
print(c)
c = c + 1
print("------------")
c = 0
while(c < 5):
print(c)
if(c == 3):
break
c += 1
print("------------")
c = 0
while(c < 5):
c += 1
if(c == 3):
continue
print(c)
print("------------")
c = 0
while(c < 5):
c += 1
if(c == 3):
pass
print(c) | true |
81a1a8c10f3c8b7f00884360fa0981ca1e2867f4 | Python | bimarakajati/Dasar-Pemrograman | /Tugas/coba/main.py | UTF-8 | 194 | 2.75 | 3 | [] | no_license | import pustaka
def main():
A = [1,5,8,9,20,20,20,20,50]
print('A =',A)
B=int(input('Data yang ingin dicari : '))
pustaka.BinarySearch(A,B)
if __name__ == '__main__':
main() | true |
7a3a098805a9077c13338a2cf27e60d98a685778 | Python | dcramer/jinja1-djangosupport | /tests/test_lexer.py | UTF-8 | 1,754 | 2.953125 | 3 | [
"BSD-3-Clause"
] | permissive | # -*- coding: utf-8 -*-
"""
unit test for the lexer
~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2007 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
RAW = '{% raw %}foo{% endraw %}|{%raw%}{{ bar }}|{% baz %}{% endraw %}'
BALANCING = '''{% for item in seq %}${{'foo': item}|upper}{% endfor %}'''
COMMENTS = '''\
<ul>
<!--- for item in seq -->
<li>{item}</li>
<!--- endfor -->
</ul>'''
BYTEFALLBACK = u'''{{ 'foo'|pprint }}|{{ 'bär'|pprint }}'''
def test_raw(env):
tmpl = env.from_string(RAW)
assert tmpl.render() == 'foo|{{ bar }}|{% baz %}'
def test_balancing():
from jinja import Environment
env = Environment('{%', '%}', '${', '}')
tmpl = env.from_string(BALANCING)
assert tmpl.render(seq=range(3)) == "{'FOO': 0}{'FOO': 1}{'FOO': 2}"
def test_comments():
from jinja import Environment
env = Environment('<!--', '-->', '{', '}')
tmpl = env.from_string(COMMENTS)
assert tmpl.render(seq=range(3)) == ("<ul>\n <li>0</li>\n "
"<li>1</li>\n <li>2</li>\n</ul>")
def test_string_escapes(env):
for char in u'\0', u'\u2668', u'\xe4', u'\t', u'\r', u'\n':
tmpl = env.from_string('{{ %s }}' % repr(char)[1:])
assert tmpl.render() == char
assert env.from_string('{{ "\N{HOT SPRINGS}" }}').render() == u'\u2668'
def test_bytefallback(env):
tmpl = env.from_string(BYTEFALLBACK)
assert tmpl.render() == u"'foo'|u'b\\xe4r'"
def test_operators(env):
from jinja.lexer import operators
for test, expect in operators.iteritems():
if test in '([{}])':
continue
stream = env.lexer.tokenize('{{ %s }}' % test)
stream.next()
assert stream.current.type == expect
| true |
d3a0ab927dafcf6d92851899aa23160b51869652 | Python | AjitArora/code | /spiral_matrix.py | UTF-8 | 3,444 | 3.640625 | 4 | [] | no_license | class Directions:
def __init__(self):
self.left = 0
self.down = 1
self.right = 2
self.up = 3
class a:
def __init__(self):
directions = Directions()
self.event_map = {directions.left : 'left_dir',
directions.down : 'down_dir',
directions.right : 'right_dir',
directions.up : 'up_dir',
}
self.clockwise_event_list = [directions.left, directions.down, directions.right, directions.up]
self.anticlockwise_event_list = [directions.down, directions.left, directions.up, directions.right]
self.answer = []
def left_dir(self, min_row, max_row, min_col, max_col, direction):
row = min_row if direction==0 else max_row-1
j = min_col
while j < max_col:
self.answer.append(matrix[row][j])
j+=1
if direction == 0:
self.min_row += 1
else:
self.max_row-=1
def down_dir(self, min_row, max_row, min_col, max_col, direction):
col = max_col if direction==0 else min_col+1
i = min_row
while i < max_row:
self.answer.append(matrix[i][col-1])
i +=1
if direction == 0:
self.max_col-=1
else:
self.min_col +=1
def right_dir(self, min_row, max_row, min_col, max_col, direction):
row = max_row if direction==0 else min_row+1
j = max_col-1
while j >= min_col:
self.answer.append(matrix[row-1][j])
j -=1
if direction == 0:
self.max_row-=1
else:
self.min_row += 1
def up_dir(self, min_row, max_row, min_col, max_col, direction):
col = min_col if direction==0 else max_col-1
i = max_row-1
while i>=min_row:
self.answer.append(matrix[i][col])
i-=1
if direction == 0:
self.min_col += 1
else:
self.max_col -= 1
def spiral_matrix(self, matrix, direction, position):
row = len(matrix)
col = len(matrix[0])
self.min_row = 0
self.max_row = row
self.min_col = 0
self.max_col = col
if direction == 1: # adjusting position, if 1, then first down; 2, then first right;...
position = 1-position+1
while self.min_row<self.max_row and self.min_col<self.max_col:
if direction == 0:
event = self.clockwise_event_list[position-1]
else: # if direction == 1
event = self.anticlockwise_event_list[position-1]
method = getattr(self, self.event_map[event])
method(self.min_row, self.max_row, self.min_col, self.max_col, direction)
position += 1
position %= 4
print self.answer
matrix = [[1,2,3,4], [5,6,7,8], [9, 10,11, 12], [13,14,15,16]]
a().spiral_matrix(matrix, 0, 1)
a().spiral_matrix(matrix, 0, 2)
a().spiral_matrix(matrix, 0, 3)
a().spiral_matrix(matrix, 0, 4)
a().spiral_matrix(matrix, 1, 1)
a().spiral_matrix(matrix, 1, 2)
a().spiral_matrix(matrix, 1, 3)
a().spiral_matrix(matrix, 1, 4)
# 1 LT, 2 RT, 3 RB, 4 LB
# sampleArray = [[1,2,3,4],
# [5,6,7,8],
# [9,10,11,12],
# [13,14,15,16]]
#
#
# spiral = 1,2,3,4,8,12,16,15,14,13,9,5,6,7,11,10 | true |
dc7072a0d252100bc0ad4dd80e80f0ce27e3487f | Python | jwrth/xDBiT_toolbox | /ReadsToCounts/src/old_scripts/correct_xq.py | UTF-8 | 2,264 | 2.828125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
"""
This tool is to modify the 'XQ' quality score in the tagged bam files.
It subtracts the number of padded Ns at the end of the 'XD' tag from the 'XQ' tag.
"""
# Library
import pysam
from argparse import ArgumentParser
import subprocess
from datetime import datetime, timedelta
# functions
def run_in_commandline(command):
process = subprocess.Popen(command,stdout=subprocess.PIPE, shell=True)
proc_stdout = process.communicate()[0].strip()
return proc_stdout
# Setup input parser
parser = ArgumentParser()
parser.add_argument("-i" "--input_bam", action="store", dest="input_bam", default="-", help="Input bam file. Default to stdin.")
parser.add_argument("-o" "--output_bam", action="store", dest="output_bam", default="out.bam", help="Output bam file. Default out.bam.")
# Parse input
args = parser.parse_args()
input_bam = args.input_bam
output_bam = args.output_bam
infile =pysam.AlignmentFile(input_bam, 'rb', check_sq=False)
outfile = pysam.AlignmentFile(output_bam, 'wb', template=infile)
bam_length = run_in_commandline('samtools view ' + input_bam + ' | wc -l')
total_reads = int(bam_length.decode())
# start timing
start_time_filtering = datetime.now()
start_time = datetime.now()
stride = 100000
filename = input_bam.split('/')[-1]
print("Correction of XQ tags started...")
for idx, entry in enumerate(infile.fetch(until_eof=True)):
if entry.has_tag('XQ'):
xq = entry.get_tag('XQ')
xd = entry.get_tag('XD')
# calculate new XQ
xd_N = len(xd) - len(xd.rstrip('N'))
xq -= xd_N
if xq == 0:
entry.set_tag('XQ', None)
else:
entry.set_tag('XQ', xq)
if (idx+1) % stride == 0:
totaltime = datetime.now() - start_time_filtering
stride_steptime = datetime.now() - start_time
time_to_go = (total_reads - (idx+1))/stride * stride_steptime
print("File " + filename + " - Reads " + str(idx+1) + "/" + str(total_reads) + " processed. Time for last " + str(stride) + ": " + str(stride_steptime) + ", Total time: " + str(totaltime) + ". Time remaining: " + str(time_to_go))
start_time = datetime.now()
outfile.write(entry)
infile.close()
outfile.close()
print("Finished.")
| true |
e31c9229fc1cc57d1d2c93e6427c568c723ebf65 | Python | spacocha/SmileTrain | /test/test_util_primer.py | UTF-8 | 1,079 | 2.828125 | 3 | [
"MIT"
] | permissive | import unittest
from SmileTrain import util_primer
from SmileTrain.test import fake_fh
class TestRemovePrimers(unittest.TestCase):
'''tests for the remove primers utility'''
def setUp(self):
self.fastq = fake_fh('''@lolapolooza\nTAAAACATCATCATCAT\n+lolapolooza\n"#$%&'()*+,-./012\n''')
self.primer = "AAAA"
self.max_primer_diffs = 1
self.primer_remover = util_primer.PrimerRemover(self.fastq, self.primer, self.max_primer_diffs)
def test_correct_output(self):
'''the primer remover should trim the match as expected'''
record = self.primer_remover.next()
self.assertEqual(record.id, 'lolapolooza')
self.assertEqual(str(record.seq), 'CATCATCATCAT')
self.assertEqual(record.letter_annotations['phred_quality'], [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17])
self.assertEqual(self.primer_remover.n_successes, 1)
class TestMistmatches(unittest.TestCase):
def test_correct(self):
self.assertEqual(util_primer.mismatches('TCAAAAGATGATGATGAT', 'AAAA', 15), (2, 0)) | true |
8672fffe8c6c8bfa8edddcde2d29d4da18474f1c | Python | renxk/Python004 | /Week01/requests/maoyan.py | UTF-8 | 2,767 | 2.78125 | 3 | [] | no_license | import requests
import random
from bs4 import BeautifulSoup
import os
import csv
user_agents = [
'Mozilla/5.0 (Linux; Android 6.0.1; Moto G (4)) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Mobile Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Safari/605.1.15'
]
class maoyan_spider:
def __init__(self):
self.url = 'https://maoyan.com/films?offset=0'
self.pageSize = 30
self.request_header = {}
self.fileName = 'requests_movice.csv'
def clearFile(self):
if os.path.exists(self.fileName):
os.remove(self.fileName)
with open(self.fileName, 'w', encoding='utf-8', newline='') as fh:
csv_writer = csv.writer(fh)
csv_writer.writerow(['名称', '类型', '上映时间'])
def getUserAgent(self):
return random.choice(user_agents)
def getMaoYanHtml(self):
# if os.path.exists('maoyan.html'):
# with open('maoyan.html', 'r') as fh:
# html = fh.read()
# return (200, html)
self.request_header['User-Agent'] = self.getUserAgent()
response = requests.get(self.url, headers = self.request_header);
if (response.status_code == 200):
# with open('maoyan.html', 'w') as fh:
# fh.write(response.text)
return (200, response.text)
else:
return (response.status_code, response.reason)
def parsingHtml(self, htmlContent = ''):
bs = BeautifulSoup(htmlContent, 'html.parser')
moveList = bs.find_all('div', attrs={'class' : 'movie-info'})
for item in moveList:
# print(item)
name = item.find("div", attrs={"class" : "title"}).text
type = item.find('div', attrs = {'class' : 'actors'}).text
time = item.find('div', attrs = {'class' : 'show-info'}).text
self.saveMoviesData(name=name, type=type, time=time)
def saveMoviesData(self, name ='', type = '', time = ''):
print('电影名称:' + name)
print('电影类型:' + type)
print('上映时间:' + time)
print('=' * 20)
with open(self.fileName, 'a', encoding='utf-8', newline='') as fh:
csv_writer = csv.writer(fh)
csv_writer.writerow([name, type, time])
def manage(self):
self.clearFile()
(code, result) = self.getMaoYanHtml()
if code == 200:
self.parsingHtml(result)
else:
print(result)
if __name__ == '__main__':
maoyan_spider().manage()
| true |
b617291c1afb1478b61f0d9d55adfc94e386ba55 | Python | arohigupta/algorithms-interviews | /convert_to_int.py | UTF-8 | 293 | 4.21875 | 4 | [] | no_license | # How to convert numeric String to int
string_input = "1111111"
# pythonic methods:
int_of_string = int(string_input)
print int_of_string + 1
# more C style:
def a_to_i(s):
res = 0
for c in s:
res = 10*res + ord(c) - ord('0')
return res
print a_to_i(string_input) + 1 | true |
96ac4dd36b3f20b8b16eef62562f670681f5b9f4 | Python | shukanov-artyom/studies | /Python/decorators/dec_wo_args.py | UTF-8 | 510 | 3.796875 | 4 | [] | no_license | class decoratorWithoutArgs(object):
def __init__(self, f):
'''
decorator initializer.
for decorators without arguments this code is called on decoration
'''
print("--decorating with decorator--")
self.f = f
def __call__(self, *args):
print("--decorated call--")
self.f(*args)
print("let's decorate function call")
@decoratorWithoutArgs
def targetFunction(a):
print(a)
print("let's call target function")
targetFunction("final argument!") | true |
766681d72d8d35107510f856b43adaed25d2fd61 | Python | whglamrock/leetcode_series | /leetcode218 The Skyline Problem.py | UTF-8 | 1,949 | 3.671875 | 4 | [] | no_license |
from heapq import *
# The idea is for every x coordinate, we try to get a tallest height;
# if the height != previous height, add to the skyline list
# two pointers: one pointer iterate through all the x coordinates; another iterate through the buildings
# to push into live pq or pop.
# the following solution is O(NlogN) where N = len(buildings)
class Solution(object):
def getSkyline(self, buildings):
"""
:type buildings: List[List[int]]
:rtype: List[List[int]]
"""
if not buildings:
return []
positions = set()
for l, r, h in buildings:
positions.add(l)
positions.add(r)
positions = sorted(positions)
live = []
heapify(live)
sky = [[-1, 0]] # [-1, 0] is to help with "sky[-1][1] != h"
i, n = 0, len(buildings)
for pos in positions:
# note that buildings is sorted already
while i < n and buildings[i][0] <= pos:
# we need the right edge not left one because we will
# need it to compare with pos to pop out the dead ones
heappush(live, [-buildings[i][2], buildings[i][1]])
i += 1
# even if there are probably some vertical lines that are already dead we don't pop them here.
# since we only care about the tallest.
# P.S. also it has to be "<=" because there is no flat line in skyline.
# i.e., we take the shorter point if the taller one is the right side of a building
while live and live[0][1] <= pos:
heappop(live)
# get the current height and add it to live
h = -live[0][0] if live else 0
if sky[-1][1] != h:
sky.append([pos, h])
return sky[1:]
print Solution().getSkyline([[2, 9, 10], [3, 7, 15], [5, 12, 12,], [15, 20, 10,], [19, 24, 8]])
| true |
0d8f43d6d9f5d95c9306666344a4ee894dc4d80c | Python | AgronomicForecastingLab/sense | /versuch_oh-dubois.py | UTF-8 | 977 | 2.546875 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
"""
compare own results against references
from the Ulaby example codes provided
http://mrs.eecs.umich.edu/codes/Module10_5/Module10_5.html
for the Oh92 model (PRISM1)
"""
import sys
import os
sys.path.append(os.path.abspath(os.path.dirname(__file__)) + os.sep + '..')
from sense.surface import Oh92
from sense.surface import Dubois95
from sense.util import f2lam
import matplotlib.pyplot as plt
import numpy as np
plt.close('all')
eps = 15-0.j # note that normally the imagionary part is supposed to be negative!
freq = 5. # GH
s = 0.02 # m
ks = (2.*np.pi/f2lam(freq))*s
theta = np.deg2rad(np.arange(0.,71.) )
O = Oh92(eps, ks, theta)
O.plot()
d = Dubois95(eps, ks, theta, f2lam(freq))
d.plot()
o = Oh92(eps, ks, np.deg2rad(50))
print(10*np.log10(o.vv))
print(10*np.log10(o.hh))
print(10*np.log10(o.hv))
d = Dubois95(eps, ks, np.deg2rad(40), f2lam(freq))
print(d.vv)
print(d.hh)
| true |
8eace3665caa943ee497845b1233fe17f4feccc8 | Python | shenjicai/Raspberry-Pi-PICO_traing | /bsp/ws2812b.py | UTF-8 | 4,712 | 2.984375 | 3 | [] | no_license | import array, time, math
from machine import Pin
import rp2
LED_COUNT = 12 # number of LEDs in ring light
PIN_NUM = 18 # pin connected to ring light
brightness = 1.0 # 0.1 = darker, 1.0 = brightest
@rp2.asm_pio(sideset_init=rp2.PIO.OUT_LOW, out_shiftdir=rp2.PIO.SHIFT_LEFT,
autopull=True, pull_thresh=24) # PIO configuration
def ws2812():
T1 = 2
T2 = 5
T3 = 3
wrap_target()
label("bitloop")
out(x, 1) .side(0) [T3 - 1]
jmp(not_x, "do_zero") .side(1) [T1 - 1]
jmp("bitloop") .side(1) [T2 - 1]
label("do_zero")
nop() .side(0) [T2 - 1]
wrap()
state_mach = rp2.StateMachine(0, ws2812, freq=8_000_000, sideset_base=Pin(PIN_NUM))
state_mach.active(1)
pixel_array = array.array("I", [0 for _ in range(LED_COUNT)])
def update_pix(brightness_input=brightness): # dimming colors and updating state machine (state_mach)
dimmer_array = array.array("I", [0 for _ in range(LED_COUNT)])
for ii,cc in enumerate(pixel_array):
r = int(((cc >> 8) & 0xFF) * brightness_input) # 8-bit red dimmed to brightness
g = int(((cc >> 16) & 0xFF) * brightness_input) # 8-bit green dimmed to brightness
b = int((cc & 0xFF) * brightness_input) # 8-bit blue dimmed to brightness
dimmer_array[ii] = (g<<16) + (r<<8) + b # 24-bit color dimmed to brightness
state_mach.put(dimmer_array, 8) # update the state machine with new colors
time.sleep_ms(10)
def set_24bit(ii, color): # set colors to 24-bit format inside pixel_array
color = hex_to_rgb(color)
pixel_array[ii] = (color[1]<<16) + (color[0]<<8) + color[2] # set 24-bit color
def hex_to_rgb(hex_val):
return tuple(int(hex_val.lstrip('#')[ii:ii+2],16) for ii in (0,2,4))
def on(n, color = "#ffffff"):
if not ((n >= 1 and n <= 12) and isinstance(n, int)):
print("arg error")
return
set_24bit((n - 1) % 12, color)
update_pix()
def off(n, color = "#000000"):
if not ((n >= 1 and n <= 12) and isinstance(n, int)):
print("arg error")
return
set_24bit((n - 1) % 12, color)
update_pix()
def on_all(color = "#ffffff"):
for i in range(0,12):
set_24bit(i, color)
update_pix()
def off_all(color = "#000000"):
for i in range(0,12):
set_24bit(i, color)
update_pix()
def light_value(l):
if l > 255: l = 255
elif l < 0: l = 0
return "#{0:02x}{1:02x}{2:02x}".format(l, l, l)
class PixelDisplay():
def __init__(self):
self.pixel_array = array.array("I", [0 for _ in range(12)])
def set_color(self, n, color):
"""set the color of pixel 'n
n - 1...12
color - color tuple"""
self.pixel_array[(n - 1) % LED_COUNT] = (color[1]<<16) + (color[0]<<8) + color[2]
def get_color(self, n):
v = self.pixel_array[(n - 1) % LED_COUNT]
return ((v >> 8) & 0xff, (v >> 16) & 0xff, v & 0xff)
def fill(self, c):
for i in range(1, LED_COUNT + 1):
self.set_color(i, c)
def dim(self, brightness_input = 1, n = None):
if n is not None:
cc = self.pixel_array[n - 1]
r = int(((cc >> 8) & 0xFF) * brightness_input) # 8-bit red dimmed to brightness
g = int(((cc >> 16) & 0xFF) * brightness_input) # 8-bit green dimmed to brightness
b = int((cc & 0xFF) * brightness_input) # 8-bit blue dimmed to brightness
self.pixel_array[n - 1] = (g<<16) + (r<<8) + b # 24-bit color dimmed to brightness
else:
for ii,cc in enumerate(self.pixel_array):
r = int(((cc >> 8) & 0xFF) * brightness_input) # 8-bit red dimmed to brightness
g = int(((cc >> 16) & 0xFF) * brightness_input) # 8-bit green dimmed to brightness
b = int((cc & 0xFF) * brightness_input) # 8-bit blue dimmed to brightness
self.pixel_array[ii] = (g<<16) + (r<<8) + b # 24-bit color dimmed to brightness
def rainbow(self, offset = 0):
for i in range(1, LED_COUNT + 1):
rc_index = (i * 256 // LED_COUNT) + offset
self.set_color(i, wheel(rc_index & 255))
def render(self):
state_mach.put(self.pixel_array, 8)
def wheel(pos):
"""Input a value 0 to 255 to get a color value.
The colours are a transition r - g - b - back to r."""
if pos < 0 or pos > 255:
return (0, 0, 0)
if pos < 85:
return (255 - pos * 3, pos * 3, 0)
if pos < 170:
pos -= 85
return (0, 255 - pos * 3, pos * 3)
pos -= 170
return (pos * 3, 0, 255 - pos * 3)
| true |
7bbec826cf92c5f1d71bdafe354bcfd0394e0483 | Python | contea95/1Day-1Commit-AlgorithmStudy | /BOJ/Python/3052.나머지/3052.py | UTF-8 | 168 | 3.0625 | 3 | [] | no_license | a = []
count = {}
for i in range(10):
a.append((int(input())) % 42)
for i in a:
try:
count[i] += 1
except:
count[i] = 1
print(len(count))
| true |
f83a2b8eb6cd09dab6e5725bdb0175d9e299a6d6 | Python | yokolet/tranquil-beach-python | /tranquil-beach/test/other_test/test_palindrome_pairs.py | UTF-8 | 651 | 3 | 3 | [] | no_license | import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../.."))
import unittest
from other.palindrome_pairs import PalindromePairs
class TestPalindromePairs(unittest.TestCase):
def setUp(self):
self.func = PalindromePairs()
def test_1(self):
words = ["abcd","dcba","lls","s","sssll"]
expected = [[0,1],[1,0],[3,2],[2,4]]
self.assertEqual(self.func.palindromePairs(words), expected)
def test_2(self):
words = ["bat","tab","cat"]
expected = [[0,1],[1,0]]
self.assertEqual(self.func.palindromePairs(words), expected)
if __name__ == '__main__':
unittest.main()
| true |
a6f171053fc30d2aef7f8a29362938e207cf568c | Python | peterpt/pentest | /tools/simpleportscanner.py | UTF-8 | 1,647 | 3.046875 | 3 | [] | no_license | #!/usr/bin/env python
import socket
from multiprocessing.dummy import Pool as ThreadPool
import sys
from datetime import datetime
# Clear the screen
# subprocess.call('cls', shell=True)
# Ask for input
remoteServer = raw_input("Enter a remote host to scan: ")
remoteServerIP = socket.gethostbyname(remoteServer)
# Print a nice banner with information on which host we are about to scan
print "-" * 60
print "Please wait, scanning remote host", remoteServerIP
print "-" * 60
# Check what time the scan started
t1 = datetime.now()
# Using the range function to specify ports (here it will scans all ports between 1 and 1024)
# We also put in some error handling for catching errors
def scan(ports):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((remoteServerIP, ports))
if result == 0:
byte = str.encode("Server:\r\n")
sock.send(byte)
banner = sock.recv(1024)
print "Port {}: Open".format(ports), " - ", banner
sock.close()
# function to be mapped over
def scanParallel(ports, threads=4):
pool = ThreadPool(threads)
results = pool.map(scan, ports)
pool.close()
pool.join()
return results
if __name__ == "__main__":
ports =(20,21,22,23,53,69,80,88,110,123,135,137,138,139,143,161,389,443,445,464,512,513,631,860,1080,1433,1434,3124,3128,3306,3389,5800,5900,8080,10000)
results = scanParallel(ports, 4)
# Checking the time again
t2 = datetime.now()
# Calculates the difference of time, to see how long it took to run the script
total = t2 - t1
# Printing the information to screen
print 'Scanning Completed in: ', total
| true |
f453a471fd6de738b95e8462915b283d46e1f5aa | Python | edunham/toys | /lugpuzzles/bellnumber.py | UTF-8 | 758 | 3.359375 | 3 | [
"MIT"
] | permissive | from operator import mul
from fractions import Fraction
"""
$ pypy bellnumber.py
"""
def comb(n, k):
return int( reduce(mul, (Fraction(n-i, i+1) for i in range(k)), 1) )
# kinda sorta uses http://mathworld.wolfram.com/BellNumber.html
# also used
# http://stackoverflow.com/questions/3025162/statistics-combinations-in-python
# to save you from having to install scipy. you're welcome.
bk = [1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147, 115975]
n = 10
# find the lowest n for which bell number is divisible by one million
while(True):
n += 1
sigma = 0
for k in range(n):
sigma += (bk[k]*comb((n-1), k))
bk.append(sigma)
if (sigma % 1000000 == 0):
print "bell number " + str(n) + " is "+ str(sigma)
exit()
| true |
b3170c693f0953df45cdacb95a5182726beaa9f0 | Python | rariyama/my_coder | /abc/181/b_trapezoid_sum.py | UTF-8 | 756 | 3.375 | 3 | [] | no_license | import unittest
from typing import List
'''
等差数列の和を求める。
a = (s+l)*len/2
lenはlとsの差で求める。
'''
class Solution():
def solution(self, n: int, data: str):
ans = 0
for i in range(n):
inputs = list(map(int, data[i].split()))
ans += int((inputs[0]+inputs[1])*(inputs[1]-inputs[0]+1)/2)
return ans
class TestSolution(unittest.TestCase):
def setUp(self):
self.test = {'data_len': 3, 'data': ['11 13', '17 47', '359 44683']}
self.collect = 998244353
def test_solution(self):
res = Solution().solution(n=self.test['data_len'], data=self.test['data'])
self.assertEqual(res, self.collect)
if __name__ == '__main__':
unittest.main()
| true |
20e116d65185762f05969a0ed9a4c39cfbdd7a3a | Python | ashishdev007/steganography_django | /backend/server/apps/steganography/allPixels.py | UTF-8 | 4,092 | 2.75 | 3 | [] | no_license |
"""
In this module encoding and decoding happens on the R,G, and B values of pixels if they meet the criteria
"""
from apps.steganography.utils.status import createStatus, getProgress, setProgressMultiProcessing, deleteStatus, getStatusObject
from django.db import connection
import time
from PIL import Image
import threading
import multiprocessing
from io import StringIO
import binascii
import math
def rgb2hex(r,g,b):
return '#{:02x}{:02x}{:02x}'.format(r,g,b)
def hex2rgb(hexcode):
# return ImageColor.getcolor(hexcode, "RGB")
hexcode = hexcode[1:]
return tuple(map(lambda x : int(hexcode[x:x+2], 16), (0,2,4)))
def str2bin(message):
binary = bin(int(binascii.hexlify(message), 16))
return binary[2:]
def bin2str(binary):
message = binascii.unhexlify("%x" % (int('0b' + binary, 2)))
try:
message = message.decode("utf8")
except UnicodeDecodeError:
message = message.decode("ascii")
except:
print("Bit Array decode error!")
finally:
return str(message)
def enhanced_encode(hexcode, digit):
hexcode = list(hexcode[1:])
# hexTargets = (hexcode[1], hexcode[3], hexcode[5])
consumed = 0;
for i in range(3):
num = 2*i + 1
if(hexcode[num] in ("0", "1", "2", "3", "4", "5")):
try:
hexcode[num] = digit[consumed]
except IndexError:
break
consumed += 1;
hexcode = "#" + "".join(hexcode)
return hexcode, consumed
def enhanced_decode(hexcode):
digit = ""
hexcode = list(hexcode[1:])
hexTargets = (hexcode[1], hexcode[3], hexcode[5])
for i in range(3):
if (hexTargets[i] in ("0", "1")):
digit += hexTargets[i]
return digit
def enhanced_hide(file, message, id):
connection.close()
img = Image.open(file)
updatable = True
lock = multiprocessing.Lock()
binary = str(str2bin(message) + "1"*15 + "0")
if img.mode in "RGBA":
img = img.convert("RGBA")
datas = list(img.getdata())
digit = 0
start = time.time()
for i in range(0,len(datas)):
item = datas[i]
if (digit < len(binary)):
progress = math.floor(digit*100/len(binary))
updatable = True if progress % 20 != 0 else updatable
if(updatable and progress % 20 == 0):
updatable = False
t1 = multiprocessing.Process(target=setProgressMultiProcessing, args=(id, progress, lock))
t1.start()
(newpix, consumed) = enhanced_encode(rgb2hex(item[0], item[1], item[2]), binary[digit: digit+3])
if newpix != None:
r,g,b = hex2rgb(newpix)
datas[i] = (r,g,b, item[3])
digit += consumed
else:
break
img.putdata(datas)
deleteStatus(id)
print("-------------------------")
print("Done in ", time.time()-start)
print("-------------------------")
return img
return "Incorrect Image mode, couldn't hide"
def enhanced_retr(file, id):
img = Image.open(file)
# status = getStatusObject(id)
binary = StringIO()
answer = ""
lock = multiprocessing.Lock()
updatable = True
if img.mode in "RGBA":
img = img.convert("RGBA")
datas = img.getdata()
length = len(datas)
complete = 0
for item in datas:
digit = enhanced_decode(rgb2hex(item[0], item[1], item[2]))
progress = math.floor(complete*100/length)
complete += 1
updatable = True if progress % 5 != 0 else updatable
if(updatable and progress % 5 == 0):
updatable = False
t1 = multiprocessing.Process(target=setProgressMultiProcessing, args=(id, progress, lock))
t1.start()
if digit != None:
binary.write(digit)
if(binary.tell() - 16 >= 0):
binary.seek(binary.tell() - 16)
if (binary.read(16) == "1111111111111110"):
print("Success!")
answer = binary.getvalue()
binary.close()
deleteStatus(id)
return bin2str(answer[:-16])
answer = binary.getvalue()
binary.close()
deleteStatus(id)
return bin2str(answer)
return "Incorrect Image mode, couldn't retrivev"
| true |
1d97b72f1c51890cb8cf9550001d708000480dc0 | Python | swplucky/prac | /dsa/Similar/arraySum.py | UTF-8 | 323 | 3.4375 | 3 | [] | no_license | def arraySum(arr):
n = len(arr)
l = [0]*n
r = [0]*n
for i in range(1,n):
l[i] = l[i-1]+arr[i-1]
for j in range(n-2,-1,-1):
r[j] = r[j+1]+arr[j+1]
for k in range(0,n):
arr[k] = l[k] + r[k]
return arr
if __name__ == '__main__':
ar = [3,5,6,7,7]
print(arraySum(ar)) | true |
707da9d358bd41ac87cdb10a03399bec9a2f94a5 | Python | Jonjump/sdm | /domain/summary.py | UTF-8 | 1,259 | 2.6875 | 3 | [
"MIT"
] | permissive | from enum import Enum, unique
from typing import List
from . import Total, Money
@unique
class SummaryFields(Enum):
CURRENCY = "currency"
SOURCE = "source"
DATE = "date"
TYPE = "type"
WEEK = "week"
MONTH = "month"
DONOR = "donor"
def groupByField(donations, field):
grouped = {}
for i in donations:
key = getattr(i, field.value)
if key in grouped:
grouped[key].append(i)
else:
grouped[key] = [i]
return grouped
def getZero(donations):
return Total(Money(0, donations[0].money.currency), 0)
def addDonations(x, y):
return x.money + y.money
def sumTotals(x, y):
return x.total + y.total
class Summary(dict):
def __init__(self, donations, fields: List[SummaryFields]):
self.total = Total([x.money for x in donations], len(donations))
if self._isNode(fields):
self.field = fields[0]
fields = fields[1:]
self._makeBranches(donations, fields)
def _isNode(self, fields: List[SummaryFields]):
return len(fields) != 0
def _makeBranches(self, donations, fields):
for key, group in groupByField(donations, self.field).items():
self[key] = Summary(group, fields)
| true |
d3dbcc7a0eddfd65af4793ab501fb6f7417d8045 | Python | timkpaine/aat | /aat/tests/strategy/test_strategies/test_cancel_all.py | UTF-8 | 1,167 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | from aat import Strategy, Event, Order, OrderType, Side
class TestCancelAll(Strategy):
def __init__(self, *args, **kwargs) -> None:
super(TestCancelAll, self).__init__(*args, **kwargs)
self._count = 0
async def onTrade(self, event: Event) -> None:
if self._count < 5:
await self.newOrder(
Order(
1,
10000000,
Side.SELL,
self.instruments()[0],
order_type=OrderType.LIMIT,
)
)
self._count += 1
assert len(self.orders()) == self._count
else:
await self.cancelAll()
assert len(self.orders()) == 0
if __name__ == "__main__":
from aat import TradingEngine, parseConfig
cfg = parseConfig(
[
"--trading_type",
"backtest",
"--exchanges",
"aat.exchange:SyntheticExchange,1,1000",
"--strategies",
"aat.tests.strategy.test_strategies.test_cancel_all::TestCancelAll",
]
)
print(cfg)
t = TradingEngine(**cfg)
t.start()
| true |
709d5b95b8a08f6cd2c4ac0927e3d7742ff37d2e | Python | interskh/worldcup-watcher | /test/server.py | UTF-8 | 1,039 | 2.703125 | 3 | [] | no_license | import os
import SimpleHTTPServer
import SocketServer
index = 0
class MyHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
_index = 0
def __init__(self, *args, **kwargs):
SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(
self, *args, **kwargs)
def do_GET(self):
file_name = ".".join([str(MyHandler._index), "json"])
current_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), file_name)
print current_path
if os.path.exists(current_path):
with open(current_path, "r") as f:
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(f.read())
else:
self.send_error(404, '404')
MyHandler._index += 1
if __name__ == '__main__':
httpd = SocketServer.TCPServer(("", 8597), MyHandler)
try:
httpd.serve_forever()
except KeyboardInterrupt:
httpd.socket.close()
| true |
f97754a3a3da3d11872f3ac18168d4d0bd208e3a | Python | erasadqadri/Deployment-of-ML-Model-with-Docker-and-Flask-Project-1 | /FlaskApp.py | UTF-8 | 983 | 2.875 | 3 | [] | no_license | """
Author: Asad Qadri
"""
from flask import Flask, request
import pandas as pd
import numpy as np
import pickle
import sklearn
app = Flask(__name__)
pickle_in = open("classifier.pkl", "rb")
classifier = pickle.load(pickle_in)
@app.route("/")
def welcome():
return "Welcome All"
@app.route("/predict")
def bank_note_authentication():
variance = request.args.get('variance')
skewness = request.args.get('skewness')
curtosis = request.args.get('curtosis')
entropy = request.args.get('entropy')
prediction = classifier.predict([[variance,skewness,curtosis,entropy]])
return "predicted class value is " + str(prediction)
@app.route("/predict_file", methods = ["POST"])
def bank_note_file_authentication():
df_test = pd.read_csv(request.files.get("files"))
prediction = classifier.predict(df_test)
return "predicted class value for TestFile is " + str(list(prediction))
if __name__ == '__main__':
app.run() | true |
b589a392253e7258ebdd89546886e4904c575ce2 | Python | malfaux/malfaux.github.com | /t/logo.py | UTF-8 | 1,062 | 2.625 | 3 | [] | no_license | #!/usr/bin/python
import Image, ImageDraw
import aggdraw
from math import sqrt,pow
greypen = aggdraw.Pen("grey",0.5)
whitepen = aggdraw.Pen("white",0.5)
greybrush = aggdraw.Brush("grey")
whitebrush = aggdraw.Brush("white")
#img = Image.new("RGBA",(128,128))
drw = aggdraw.Draw("RGBA",(128,128),"white")
drw.setantialias(True)
#drw.rectangle((0,0,128,128),whitepen,whitebrush)
drw.ellipse((0,0,128,128),greypen,whitebrush)
drw.pieslice((0,0,128,128),180,0,greypen,greybrush)
#drw.pieslice((0,64,128,64),0,180,greypen, greybrush)
drw.ellipse((0,32,64,96),whitepen,whitebrush)
drw.pieslice((0,0,128,128),0,30,greypen,greybrush)
drw.polygon([64,32,64,64,int(sqrt(pow(64,2)-pow(32,2)))+64,32], greypen,greybrush)
drw.rectangle((28,44,36,52),greypen,greybrush)
drw.ellipse((92,44,100,52),whitepen,whitebrush)
Image.fromstring('RGBA',(128,128),drw.tostring()).save('/tmp/bbx.jpg')
#drw.flush().save('/tmp/bbx.jpg');
#with open('/tmp/bbx.jpg','wb') as fh:
# fh.write(drw.tostring())
#drw.rectangle((64,32,128,64),outline="#000",fill="#000")
#img.save('/tmp/bbx.jpg');
| true |
f02b1932a6340d78ba603f4da7b34c422e377dee | Python | rameshgayam/eda_project | /q04_cor/build.py | UTF-8 | 298 | 2.890625 | 3 | [] | no_license | # %load q04_cor/build.py
# Default imports
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv('data/house_prices_multivariate.csv')
# Write your code here
def cor(df):
plt.figure(figsize=(12,8))
sns.heatmap(df.corr(), cmap='viridis')
| true |
997494d5bc8a990f4ed49ae5fb894daff5ca7c5e | Python | kmdn/datarec | /additional_investigation/cross_validation.py | UTF-8 | 4,188 | 2.953125 | 3 | [] | no_license | """
In this module for one exemplary model (Linear SVM on tfidf for abstracts) different evaluation
methods are compared, i.e. hold out evaluation and k-folds stratified cross validation.
"""
import pickle
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn import svm, metrics
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.multiclass import OneVsRestClassifier
import preprocessing
documentation_file_parameteropt = open("cross_validation_results.txt", "w+")
dataframe = open("Abstracts_New_Database.txt")
titles = []
with open("Dataset_Titles.txt") as titles_file:
for line in titles_file:
titles.append(line.replace("\n", ""))
preprocessed_data_list = []
for i, line in enumerate(dataframe):
query = str(line).split("\t")[1]
for title in titles:
query = query.replace(title, "")
dataset = str(str(line).split("\t")[2]).replace("\n", "")
dataset_list = dataset.split(", ")
preprocessed_query = preprocessing.preprocess(query)
preprocessed_tuple = (dataset_list, preprocessed_query)
preprocessed_data_list.append(preprocessed_tuple)
datasets, queries = zip(*preprocessed_data_list)
q_tfidf = preprocessing.tfidf(queries)
label_encoder = MultiLabelBinarizer()
label_encoder.fit(datasets)
datasets_encoded = label_encoder.transform(datasets)
pickle.dump(label_encoder, open("label_encoder_croos_validation.sav", "wb"))
print("Hold out validation")
d_train, d_test, q_train, q_test = train_test_split(datasets_encoded, q_tfidf, test_size=0.2)
svm_holdout = OneVsRestClassifier(svm.LinearSVC(C=1))
svm_holdout.fit(q_train, d_train)
holdout_pred = svm_holdout.predict(q_test)
holdout_accuracy = metrics.accuracy_score(d_test, holdout_pred)
holdout_precision = metrics.precision_score(d_test, holdout_pred, average='weighted')
holdout_recall = metrics.recall_score(d_test, holdout_pred, average='weighted')
holdout_f1 = metrics.f1_score(d_test, holdout_pred, average='weighted')
documentation_file_parameteropt.write(
"Holdout: Accuracy {}, Precision {}, Recall {}, F1 score {} \n".format(
holdout_accuracy, holdout_precision, holdout_recall, holdout_f1))
print("k folds stratified cross validation (k=5)")
svm_cv = OneVsRestClassifier(svm.LinearSVC(C=1))
accuracy_scores_cv5 = cross_val_score(svm_cv, q_tfidf, datasets_encoded, cv=5, scoring='accuracy')
cv5_accuracy = accuracy_scores_cv5.mean()
precision_scores_cv5 = cross_val_score(svm_cv, q_tfidf, datasets_encoded, cv=5,
scoring='precision_weighted')
cv5_precision = precision_scores_cv5.mean()
recall_scores_cv5 = cross_val_score(svm_cv, q_tfidf, datasets_encoded, cv=5,
scoring='recall_weighted')
cv5_recall = recall_scores_cv5.mean()
f1_scores_cv5 = cross_val_score(svm_cv, q_tfidf, datasets_encoded, cv=5, scoring='f1_weighted')
cv5_f1 = f1_scores_cv5.mean()
documentation_file_parameteropt.write(
"CV (k=5): Accuracy {}, Precision {}, Recall {}, F1 score {} \n".format(
cv5_accuracy, cv5_precision, cv5_recall, cv5_f1))
print("k folds stratified cross validation (k=10)")
svm_cv = OneVsRestClassifier(svm.LinearSVC(C=1))
accuracy_scores_cv10 = cross_val_score(svm_cv, q_tfidf, datasets_encoded, cv=10,
scoring='accuracy')
cv10_accuracy = accuracy_scores_cv10.mean()
precision_scores_cv10 = cross_val_score(svm_cv, q_tfidf, datasets_encoded, cv=10,
scoring='precision_weighted')
cv10_precision = precision_scores_cv10.mean()
recall_scores_cv10 = cross_val_score(svm_cv, q_tfidf, datasets_encoded, cv=10,
scoring='recall_weighted')
cv10_recall = recall_scores_cv10.mean()
f1_scores_cv10 = cross_val_score(svm_cv, q_tfidf, datasets_encoded, cv=10, scoring='f1_weighted')
cv10_f1 = f1_scores_cv10.mean()
documentation_file_parameteropt.write(
"CV (k=10): Accuracy {}, Precision {}, Recall {}, F1 score {} \n".format(
cv10_accuracy, cv10_precision, cv10_recall, cv10_f1))
documentation_file_parameteropt.close()
| true |
a4441419b5201e80bfeaad83001994f4c524ea1c | Python | ubccapico/educ-canvasapi-pythoncollection | /connectToCanvasCourseMigrationScripts/Uniquify_Titles.py | UTF-8 | 2,112 | 2.71875 | 3 | [
"MIT"
] | permissive | # uncompyle6 version 3.1.3
# Python bytecode 3.6 (3379)
# Decompiled from: Python 3.6.5 (v3.6.5:f59c0932b4, Mar 28 2018, 17:00:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: Uniquify_Titles.py
import Init, API_Calls as API, re
def getCoreNames(names, ugly_names):
canvas_extension = re.compile('-[0-9]+')
plain_ugly_names = []
for uname in ugly_names:
plain_ugly_names.append(canvas_extension.split(uname)[0])
core_names = list(set(plain_ugly_names))
if core_names != []:
print('The following page titles are used for more than one page.')
print(core_names)
for core_name in core_names:
if core_name not in names:
core_names.remove(core_name)
return core_names
def MasterUniquifyTitles():
print('****Making all page titles unique****')
names = []
ugly_names = []
pages = API.getAllPages()
for page in pages:
names.append(page['title'])
reused_title_identifier = re.compile('.+?-[0-9]+')
for name in names:
if reused_title_identifier.match(name):
ugly_names.append(name)
core_names = getCoreNames(names, ugly_names)
if core_names != []:
canvas_extension = re.compile('-[0-9]+')
module_identifier = re.compile('[^0-9]+[0-9]*')
modules = API.getAllModules()
for module in modules:
module_items = API.getModuleItems(module['id'])
for module_item in module_items:
if module_item['type'] == 'Page':
stripped_title = canvas_extension.split(module_item['title'])[0]
if stripped_title in core_names:
module_partial_name = module_identifier.findall(module['name'])[0]
new_page_title = ('{} ({})').format(stripped_title, module_partial_name)
print(('({}) used to be ({})').format(new_page_title, module_item['title']))
page = API.updatePageTitle(module_item['page_url'], new_page_title)
print('\n\n') | true |
f30dd1329d5e4e9058230e264d711274922b20b7 | Python | EdoardoSarti/Anchors | /AlignMe_Anchors.cpp/scripts/extract_anchors_from_alignment.py | UTF-8 | 5,052 | 2.65625 | 3 | [] | no_license | #!/usr/bin/python
from Bio import AlignIO
from optparse import OptionParser
from os import path
parser = OptionParser()
parser.add_option( "-f", "--alignement_file", dest="alignment_file", metavar="FILE",
help="file containing the alignment, \nNOTE: this is the ONLY required flag! others are optional")
parser.add_option( "-o", "--output_file", metavar="FILE", default="extracted.txt",
help="file for output of selected anchors")
parser.add_option( "-a", "--nr_allowed_gaps", default=0, type="int", dest="allowed_gaps",
help="number of gaps that are allowed in a given window for it to be considered")
parser.add_option( "-p", "--penalty", default=0, type="float", dest="penalty",
help="penalty for a gap within window")
parser.add_option( "-w", "--window_size", default=11, type="int",
help="size of the window for which averaged scales values are summed, NOTE: even numbered values will lead to odd numbered windows anyway")
parser.add_option( "-t", "--threshold", default=0, type="float",
help="")
parser.add_option( "-s", "--scale", default="../scales/HWvH.txt", dest="scale_file", metavar="FILE",
help="file containing (hydrophobicity) scale that is used for scoring windows")
parser.set_defaults( filter="--below_threshold")
parser.add_option( "--below_threshold", action="store_const",const="below", dest="filter",
help="select all windows with total scores below threshold for anchors (default behaviour)")
parser.add_option( "--above_threshold", action="store_const",const="above", dest="filter",
help="select all windows with total scores above threshold for anchors")
parser.set_defaults( merge="--average")
parser.add_option( "--average",action="store_const", const="average", dest="merge",
help="averages both values in aligned positions for score calculation (defaul behaviour)")
parser.add_option( "--min", action="store_const", const="min", dest="merge",
help="uses minimum value of aligned positions for score calculation")
parser.add_option( "--max", action="store_const", const="max", dest="merge",
help="uses maximum value of aligned positions for score calculation")
(options, args) = parser.parse_args()
print parser.parse_args()
if not options.alignment_file:
print "\n\nERROR: no alignment file given! bailing out. \n\n"
parser.print_help()
exit()
if not path.isfile( options.scale_file):
print "\n\nERROR: scale file not found, consider using '-s'\n\n"
parser.print_help()
exit()
half_size = int( options.window_size / 2)
# read alignment
align = AlignIO.read( options.alignment_file ,"clustal")
# read scale
scale = {}
with open( options.scale_file ) as f:
for l in f:
cols = l.split()
scale[ cols[0]] = float( cols[1])
# open output
w = open( options.output_file ,"w")
pro = open( "profile.txt", "w")
# array that contains anchor information as '1', '0' else
array = ['0'] * len( align[0])
# main loop over alignment
for i in range( 0, len( align[0])):
# sum over window
sum = 0
for j in range( max( 0, i - half_size), min( len(align[0]), i + half_size + 1)):
a = align[0][j]
b = align[1][j]
if a != "-" and b != "-" and a != "." and b != ".":
if options.merge == "--average":
sum += 0.5 * ( scale[a] + scale[b])
elif options.merge == "--min":
sum += min( scale[a], scale[b])
elif options.merge == "--max":
sum += max( scale[a], scale[b])
else:
sum += options.penalty # gap penalty
pro.write( str( sum) + "\n")
# set array values according to sum, collecting anchored alignment positions
if options.filter == "--below_threshold":
if sum < options.threshold:
for j in range( max( 0, i - half_size), min( len(align[0]), i + half_size + 1)):
if align[0][j] != "-" and align[1][j] != "-":
array[j] = 1 # ANCHOR
elif options.filter == "--above_threshold":
if sum > options.threshold:
for j in range( max( 0, i - half_size), min( len(align[0]), i + half_size + 1)):
if align[0][j] != "-" and align[1][j] != "-":
array[j] = 1 # ANCHOR
pro.close()
# translate alignment positions into sequence positions and output
counter = 0
for val in array:
if val == 1:
seqA = align[0][:counter+1].format( "fasta")
seqA = seqA[seqA.find("\n")+1:]
# print seqA
posA = counter - seqA.count("-") + 1
seqB = align[1][:counter+1].format( "fasta")
seqB = seqB[ seqB.find("\n")+1:]
# print seqB
posB = counter - seqB.count("-") + 1
# print counter, posA, posB
w.write( str( posA) + " " + str( posB) + " 0 \n")
counter += 1
w.close()
| true |
538c0c55e94f27493a8824d7c3464d577d0c9bab | Python | Colinstarger/RECAP_FJC | /fjc_update.py | UTF-8 | 1,312 | 3.015625 | 3 | [] | no_license | #Update Functions
#Python3
import csv
my_path = "/Users/colinstarger/Downloads/LDDC_Temp/"
def makeDict(file_name):
fullfile = my_path+file_name
result = {}
with open(fullfile) as csvDataFile:
csvReader = csv.reader(csvDataFile)
#Skip header row
next(csvReader)
for row in csvReader:
result[row[0]] = row[1]
return result
def checkDicts(newDict, oldDict):
result = {}
#New will always have more
for key, value in newDict.items():
if not(key in oldDict):
print("Found new entry", key, value)
result[key]="new"
elif (value!=oldDict[key]):
print("Found updated entry", key, value)
result[key]="update"
return result
def compare_save():
# old_file = "wirefraud_old.csv"
# new_file = "wirefraud_new.csv"
# save_file = "wirefraud_update.csv"
# old_file = "bank_old.csv"
# new_file = "bank_new.csv"
# save_file = "bank_update.csv"
old_file = "ce_old.csv"
new_file = "ce_new.csv"
save_file = "ce_update.csv"
old_dict = makeDict(old_file)
new_dict = makeDict(new_file)
result= checkDicts(new_dict, old_dict)
save_file = my_path+save_file
with open(save_file, 'w') as csvfile:
csvfile.write("%s,%s\n"%("def_key", "fisc_yr"))
for key, value in result.items():
csvfile.write("%s,%s\n"%(key, value))
#Call Main or whatever
compare_save()
| true |
19eff805ebd6878bd78bd83d98336219e8092fa1 | Python | TAUrjc/Touchpad | /touchpad.py | UTF-8 | 1,284 | 2.75 | 3 | [] | no_license | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
import serial
import time
import pygame
#import thread
pygame.mixer.init()
flanco = True
arduino = serial.Serial('/dev/ttyACM0', 9600, timeout = 3.0) #el ttyACM0 puede depender, se mira en el Arduino abajo a la derecha
sound1 = pygame.mixer.Sound('base.ogg')
sound2 = pygame.mixer.Sound('verse2.ogg')
sound3 = pygame.mixer.Sound('verse3.ogg')
def sonido(lineaLeida):
if (lineaLeida == '2\r\n'):
try:
sound1.play(loops = -1);
print "ESCRIBO DESPUES DE LA BASE"
except pygame.error, message:
print "base" + sound1.name
elif lineaLeida == '3\r\n':
try:
sound2.play(loops = 0);
except pygame.error, message:
print "sound2" + sound2.name
elif lineaLeida == '4\r\n':
try:
sound3.play(loops = 0);
except pygame.error, message:
print "sound3" + sound3.name
else:
print "Error de lectura (Leo cosas raras)"
while True:
try:
lineaLeida = arduino.readline()
if lineaLeida:
try:
sonido(lineaLeida)
#thread.start_new_thread(sonido, (lineaLeida, ))
except:
print "error: fallo al lanzar el thread"
except serial.serialutil.SerialException:
print "Error del serial"
pass
except OSError:
print "Error del OS"
pass
| true |
ebbdbffc3b54335a0b74bda8974d14a042c8d036 | Python | Deleh/spiderss | /scripts/opml2spiderss.py | UTF-8 | 1,005 | 2.921875 | 3 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | #!/usr/bin/env python
import argparse
import opml
import os
import sys
# Prints elements recursively for all outlines
def print_outline(outline, category):
if len(outline) > 0:
for o in outline:
print_outline(o, os.path.join(category, outline.text))
else:
print('[[feed]]')
print('category = \'{}\''.format(category))
print('name = \'{}\''.format(outline.text))
print('url = \'{}\''.format(outline.xmlUrl))
print('scrape = false')
print('')
'''
Main
'''
def main():
parser = argparse.ArgumentParser(description = 'Read an OPML file and print spiderss TOML format to stdout.')
parser.add_argument('file', help = 'OPML input file')
args = parser.parse_args()
file = args.file
try:
outline = opml.parse(file)
for o in outline:
print_outline(o, '')
except Exception as e:
print('ERROR: {}'.format(e))
sys.exit(1)
if __name__ == '__main__':
main()
| true |
52373acfcaa8d86c4519fef89895aea468fa4a8e | Python | mandarspringboard/notes | /Python_notes/decorator_argument_last.py | UTF-8 | 1,099 | 3.6875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 4 13:53:05 2021
@author: aa
"""
from functools import partial
# Beazley and Jones, Python Cookbook. p.g.336
def attach_wrapper(obj, func=None):
print(f'{obj=},{func=}')
if func is None:
return partial(attach_wrapper, obj)
setattr(obj, func.__name__, func)
return func
# as per p.g. 341 in beazley jones the calling sequence for
# decorators with arguments is
#
# func= attach_wrapper(obj=someting) (func)
# ----------------------------
# the underlined is the first call to attach_wrapper
# since func is not defined, it is None
# the code goes to the first return and returns
# partial of attach_wrapper which accepts only once argument
# which is func
# this partial of attach_wrapper is called again
def host_func():
print('Host function')
# obj is set to host_func and the last positional argument func is
# the function/object being decorated. in this case, attached_func
@attach_wrapper(obj=host_func)
def attached_func():
print('Attached func')
| true |
57b600526a9f932184603188dd172351bc4e3511 | Python | jonathansick/starfisher | /starfisher/crowd.py | UTF-8 | 3,302 | 2.65625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python
# encoding: utf-8
"""
Handle photometric crowding definitions.
"""
import abc
import os
import numpy as np
from starfisher.pathutils import starfish_dir
class BaseCrowdingTable(object):
"""Base class for crowding specification tables (used with synth).
Parameters
----------
path : str
Path relative to starfish.
dbin : length-2 tuple
Tuple of (x, y) size of crowding bins, in magnitudes.
error_range : length-2 tuple
Tuple of (error_min, error_max) span of acceptable magnitudes
errors for an artificial star to be considered recovered.
binsize : float
Binsize of delta-magnitude histograms.
error_method : int
Flag specifying the method for applying errors to the synthetic
CMD. Can be:
- 0 for regular crowding table lookup
- 2 for scatter crowding table lookup
"""
__metaclass__ = abc.ABCMeta
def __init__(self, path,
dbin=(0.5, 0.5), error_range=(-1, 1.),
binsize=0.1, error_method=2):
super(BaseCrowdingTable, self).__init__()
self._path = path
self._error_method = error_method
self._dbin = dbin
self._error_range = error_range
self._binsize = binsize
@property
def path(self):
return self._path
@property
def full_path(self):
return os.path.join(starfish_dir, self.path)
@property
def config_section(self):
lines = []
lines.append(str(min(self._dbin)))
lines.append(str(max(self._dbin)))
lines.append(str(min(self._error_range)))
lines.append(str(max(self._error_range)))
lines.append(str(self._binsize))
return lines
@property
def error_method(self):
return str(self._error_method)
class ExtantCrowdingTable(BaseCrowdingTable):
"""Crowding table wrapper for a pre-built crowdfile.
Parameters
----------
path : str
Path relative to starfish.
**args : dict
Arguments for :class:`BaseCrowdingTable`.
"""
def __init__(self, path, **args):
super(ExtantCrowdingTable, self).__init__(path, **args)
class MockNullCrowdingTable(BaseCrowdingTable):
"""Make a mock crowding table where stars have no errors."""
def __init__(self, path, n_bands, mag_range=(10., 35.), n_stars=100000,
**kwargs):
super(MockNullCrowdingTable, self).__init__(path, **kwargs)
self._n_bands = n_bands
self._n_stars = n_stars
self._range = mag_range
self._write()
def _write(self):
dt = [('ra', float), ('dec', float)]
for i in xrange(self._n_bands):
dt.append(('mag{0:d}'.format(i), float))
dt.append(('dmag{0:d}'.format(i), float))
data = np.zeros(self._n_stars, dtype=np.dtype(dt))
for i in xrange(self._n_bands):
mag_label = "mag{0:d}".format(i)
data[mag_label][:] = (max(self._range) - min(self._range)) \
* np.random.random_sample(self._n_stars) + min(self._range)
dirname = os.path.dirname(self.full_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
np.savetxt(self.full_path, data, fmt='%.5e', delimiter=' ')
| true |
3661cccd9de19e6d7c2e027c51fb8ec819b5ccb3 | Python | Alpin1205/ZarAtmaIstatistigi | /ZarAtmaİstatistiği.py | UTF-8 | 1,066 | 3.484375 | 3 | [] | no_license | import random
binlik = 0
ortbinlik = 0
üçelli = 0
üçorta = 0
değil = 0
uçukb = 0
uçukk = 0
kere = 0
for i in range(0,1000):
for b in range(0,10):
for p in range(0, 100):
binlik += random.randint(1, 6)
x = binlik / 100
binlik = 0
print(str(i) + str(b) +". defa ortalaması " +str(x))
ortbinlik += x
kere += 1
if x <= 3.59 and x >= 3.50:
üçelli += 1
if x <= 3.55 and x >= 3.46:
üçorta += 1
if x <= 3:
uçukk += 1
if x >= 4:
uçukb += 1
else:
değil += 1
ortbinlik = ortbinlik / kere
print("""
Ortalama Sonucu:
Toplam ortalama: """+ str(ortbinlik) +"""
3.59 ile 3.50 arasındakiler: """+ str(üçelli) +"""
3.55 ile 3.46 arasındakiler: """+ str(üçorta) +"""
3 küsür olup şartı karşılamayanlar: """+ str(değil) +"""
3'ten küçük olanlar: """+ str(uçukk) +"""
4'ten büyük olanlar: """+ str(uçukb) +"""
""")
| true |
48584cdca2b0bf341b03afa5e707a18e0c0befdb | Python | danse-inelastic/inelastic-svn | /Tau/srcs/qeparser.py | UTF-8 | 10,352 | 2.515625 | 3 | [] | no_license | #! /usr/bin/python
import numpy as np
from atoms import Atoms
from vibrations import Vibrations
def parse_scf(outputfile):
'Obtain material system information'
unit_lvs = []
unit_rlvs = []
mass = []
symbol = []
position = []
file = open(outputfile, 'r')
lines = file.readlines()
for index, line in enumerate(lines):
if 'bravais-lattice index' in line:
ibrav = int(line.split()[3])
if 'lattice parameter' in line:
lattice_constant = float(line.split()[4])
if 'number of atoms/cell' in line:
natom = int(line.split()[4])
if 'number of atomic types' in line:
ntype = int(line.split()[5])
if 'crystal axes' in line:
for i in range(0,3):
unit_lvs.append([float(f) for f in lines[index+1+i].split()[3:6]])
#unit_lvs = unit_lvs * lattice_constant
if 'reciprocal axes:' in line:
for i in range(0,3):
unit_rlvs.append([float(f) for f in lines[index+1+i].split()[3:6]])
#unit_rlvs = unit_rlvs * 2 * math.pi / lattice_constant
if 'mass' in line:
for i in range(0,ntype):
symbol.append(lines[index+1+i].split()[0])
mass.append(float(lines[index+1+i].split()[2]))
if 'positions' in line:
for i in range(0,natom):
position.append([float(f) for f in lines[index+1+i].split()[6:9]])
unitcell = Atoms(ntype, symbol, mass, natom, position, unit_lvs)
unitcell.print_info()
return unitcell
def parse_qpoints_from_dyn(dynfile):
"""parse qpoints from dyn file
for example: si.dyn is specified in ph.input
then ph output will have si.dyn0, si.dyn1...etc
this parser reads in si.dyn0 first getting irreducible qpoints
and then proceed to read all the other files getting full list of qpoints
"""
file = open(dynfile+'0','r')
lines = file.readlines()
file.close()
[nq1,nq2,nq3]=[int(f) for f in lines[0].split()]
Nq_indep = int(lines[1].split()[0])
qpoints_indep = []
qpoints_full = []
for i in range(0,Nq_indep):
qpoints_indep.append([float(f) for f in lines[i+2].split()])
for i in range(0,Nq_indep):
file = open(dynfile+str(i+1),'r')
lines = file.readlines()
file.close()
for index, line in enumerate(lines):
if 'axes' in line:
qpoints_full.append([float(f) for f in lines[index+2].split()[3:6]])
#print [nq1,nq2,nq3]
#print qpoints_indep
#print len(qpoints_full)
return [nq1,nq2,nq3], qpoints_indep, qpoints_full
def parse_dyn(dynfile):
file = open(dynfile,'r')
lines = file.readlines()
ntype = int(lines[2].split()[0])
natom = int(lines[2].split()[1])
qpoints = []
dynmatrix = []
eigenval = []
eigenvec = []
#numberq = lines.count('Dynamical Matrix in cartesian axes')
numberq = 0
for index, line in enumerate(lines):
if 'Dynamical Matrix in cartesian axes' in line:
numberq = numberq + 1
qpoints.append([float(f) for f in lines[index+2].split()[3:6]])
for i in range(0,natom):
for j in range(0,natom):
for idir in range(0,3):
_itemp = index+3+i*natom*3+j*3+idir+(i+1)*natom+j
_val = [float(f) for f in lines[_itemp].split() ]
dynmatrix.append(complex(_val[0],_val[1]))
dynmatrix.append(complex(_val[2],_val[3]))
dynmatrix.append(complex(_val[4],_val[5]))
if 'omega' in line:
eigenval.append(float(lines[index].split()[6]))
for i in range(0,natom):
print index+1+i
_val = [float(f) for f in lines[index+1+i].split()[1:7]]
eigenvec.append(complex(_val[0],_val[1]))
eigenvec.append(complex(_val[2],_val[3]))
eigenvec.append(complex(_val[4],_val[5]))
#phonon = Vibrations(numberq, qpoints, eigenval, eigenvec)
return phonon
def parse_d3(d3file):
'Obtain third order anharmonic tensor from QE d3.x'
file = open(d3file, 'r')
lines = file.readlines()
ntype = int(lines[2].split()[0])
natom = int(lines[2].split()[1])
d3tensor = []
qpoints = []
numberq = 0
for index, line in enumerate(lines):
if 'q = (' in line:
numberq = numberq + 1
qpoints.append([float(f) for f in lines[index].split()[3:6]])
for mode in range(0,3*natom):
for i in range(0,natom):
for j in range(0,natom):
for idir in range(0,3):
_itemp = index+mode*(natom*natom*7 + 3)+\
(i*natom+j)*7 + 6 + idir*natom
temp = lines[_itemp]+lines[_itemp + 1]
_val = [float(f) for f in temp.split()]
d3tensor.append(complex(_val[0],_val[1]))
d3tensor.append(complex(_val[2],_val[3]))
d3tensor.append(complex(_val[4],_val[5]))
return np.array(qpoints), \
np.rollaxis(np.array(d3tensor).reshape(numberq,natom,3,natom,natom,3,3),2,5)
#return np.array(qpoints), \
#np.rollaxis(np.array(d3tensor).reshape(numberq,natom,3,natom,natom,3,3),2,5)
def parse_dyn_old(dynfile, outputfile):
'Obtain dynamical matrix'
file = open(outputfile, 'r')
for line in file.readlines():
if 'Number of q in the star =' in line:
numberq = int(line.split()[7])
file.close()
file = open(dynfile,'r')
line = file.readline()
line = file.readline()
line = file.readline()
ntype = int(line.split()[0])
natom = int(line.split()[1])
ibrav = int(line.split()[2])
celldm = line.split()[3:]
symbol = [0] * ntype
mass = [0] * ntype
type = [0] * natom
position = [[0] * 3] * natom
qpoints = []
eigenval = []
eigenvec = []
dynmatrix = []
for i in range(0,ntype):
line = file.readline()
symbol[i] = (line.split()[1])
mass[i] = line.split()[2]
for i in range(0,natom):
line = file.readline()
type[i] = line.split()[1]
position[i] = line.split()[2:]
for q in range(0,numberq):
line = file.readline()
line = file.readline()
line = file.readline()
line = file.readline()
qpoints.append([float(f) for f in line.split()[3:6]])
line = file.readline()
for ii in range(0, natom):
for jj in range(0,natom):
i1,j1 = file.readline().split()
if int(i1) is not ii+1: raise ValueError('wrong')
if int(i1) is not ii+1: raise ValueError('wrong')
for idir in range(0,3):
line = file.readline()
_val = [float(f) for f in line.split()]
dynmatrix.append(complex(_val[0],_val[1]))
dynmatrix.append(complex(_val[2],_val[3]))
dynmatrix.append(complex(_val[4],_val[5]))
line = file.readline()
line = file.readline()
line = file.readline()
line = file.readline()
line = file.readline()
line = file.readline()
for ii in range(0,3 * natom):
eigenval.append(float(file.readline().split()[6]))
for jj in range(0,natom):
line = file.readline()
eigenvec.append(line.split()[1:7])
line = file.readline()
file.close()
phonon = Vibrations(qpoints, eigenval, eigenvec, dynmatrix)
return np.array(qpoints), \
np.array(omega).reshape(3 * natom), \
np.array(eigenV), \
np.array(dynmatrix).reshape(numberq,natom,natom,3,3)
def parse_d3_old(d3file,outputfile):
'Obtain third order anharmonic tensor from QE d3.x'
file = open(outputfile, 'r')
for line in file.readlines():
if 'Number of q in the star =' in line:
numberq = int(line.split()[7])
file.close()
file = open(dynfile,'r')
line = file.readline()
line = file.readline()
line = file.readline()
ntype = int(line.split()[0])
natom = int(line.split()[1])
ibrav = int(line.split()[2])
celldm = line.split()[3:]
symbol = [0] * ntype
mass = [0] * ntype
type = [0] * natom
position = [[0] * 3] * natom
qpoints = []
dynmatrix = []
for i in range(0,ntype):
line = file.readline()
symbol[i] = (line.split()[1])
mass[i] = line.split()[2]
for i in range(0,natom):
line = file.readline()
type[i] = line.split()[1]
position[i] = line.split()[2:]
for q in range(0,numberq):
line = file.readline()
line = file.readline()
line = file.readline()
line = file.readline()
qpoints.append([float(f) for f in line.split()[3:6]])
line = file.readline()
for ii in range(0,3 * natom):
line = file.readline()
line = file.readline()
if ii is not int(line.split()[1]) - 1:
return SyntaxError('wrong match')
line = file.readline()
for j in range(0,natom):
for k in range(0,natom):
line = file.readline()
for jdir in range(0,3):
jj = (j - 1) * 3 + jdir
kk = (k - 1) * 3
line = file.readline() + file.readline()
_val = [float(f) for f in line.split()]
d3tensor.append(complex(_val[0],_val[1]))
d3tensor.append(complex(_val[2],_val[3]))
d3tensor.append(complex(_val[4],_val[5]))
file.close()
#print 'qpoints are',qpoints
#print 'ntype is ', ntype
#print 'natom is ', natom
#print 'd3 tensor is ', d3tensor
return np.array(qpoints), \
np.rollaxis(np.array(d3tensor).reshape(numberq,natom,3,natom,natom,3,3),2,5)
if __name__ == "__main__":
print "Hello World";
#qpoints, d3tensor = parse_d3('si.anh_X','si.d3X.out')
parse_qpoints_from_dyn('si.dyn')
__author__="Xiaoli Tang"
__date__ ="$Nov 28, 2009 4:57:29 PM$"
| true |
a0d279df552dd75573483c1ec177adce7f481bb3 | Python | YikSanChan/pyflink-lightgbm-batch-inference | /vanilla_infer.py | UTF-8 | 450 | 2.859375 | 3 | [] | no_license | import lightgbm as lgb
from utils import load_data
from sklearn.metrics import mean_squared_error
if __name__ == "__main__":
gbm = lgb.Booster(model_file="model.txt")
print('Starting predicting...')
_, (X_test, y_test) = load_data()
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
# eval
rmse_test = mean_squared_error(y_test, y_pred) ** 0.5
print(f'The RMSE of prediction is: {rmse_test}')
| true |
4fe24183cc32c22cc08ffdf52d7612787a7aee54 | Python | MarinaParr/python_course | /homeworks/homework5/task5.py | UTF-8 | 125 | 2.921875 | 3 | [] | no_license | import re
import sys
pattern = '([\W]+|_)'
text = sys.stdin.read()
result = re.sub(pattern, " ", text)
print(result)
| true |
e51aade61649615b1ed6234117f805f857b3f99d | Python | neineit/poisoningsvc | /LibPoisonOCSVM.py | UTF-8 | 5,062 | 2.5625 | 3 | [] | no_license | '''
Created on Mar 28, 2015
This package contains common methods used in poisoning one-class SVM
@author: Xiao, Huang
'''
from joblib import Memory, Parallel, delayed
import numpy as np
from numpy.linalg import norm, lstsq
from scipy.linalg import solve, eigvals
import sklearn.preprocessing as prep
from sklearn.metrics.pairwise import pairwise_kernels as Kernels
from sklearn.metrics import accuracy_score as acc
from sklearn.metrics import confusion_matrix
def objective(xc, clf, dataset, target='dec_val'):
"""
Objective function we need to maximise to poisoning purpose
The key idea here is to have a unified method to describe the
adversarial objective function.
:rtype float
:param xc: the malicious points with size mxd, which we inject into the training set
:param clf: classifier we try to poison, here is the OCSVM
:param dataset: a dict contains keys ['train', 'train_labels', 'test', 'test_labels']
:param type: objective value that we measure (to maximise)
:return: measured error, e.g., accuracy, MSE, objective value
"""
Xtr = dataset['train'] # train data is a must
if dataset.has_key("train_labels"): ytr = dataset['train_labels']
if dataset.has_key("test"): Xtt = dataset['test']
if dataset.has_key("test_labels"): ytt = dataset['test_labels']
# reshape the malicious points into ndarray
n, d = Xtr.shape
m = xc.size/d
xc = xc.reshape(m, d)
# append the malicious data to form a contaminated dataset
Xtr_ = np.r_['0,2', Xtr, xc]
# TODO: update SVC instead of retrain with xc
clf.fit(Xtr_) # <---- this is just a lazy update
if target is 'fval':
# objective values on untainted dataset
if type(clf).__name__ is "OCSVM":
K = Kernels(Xtr_, metric=clf.kernel, filter_params=True, gamma=clf.gamma, coef0=clf.coef0,
degree=clf.degree)
# new set of bounded SVs without the xc
bsv_ind_new = np.setdiff1d(clf.bsv_ind, [n+1])
alphas = clf.dual_coef_
slack_variables = clf.intercept_-clf.decision_function(Xtr_[bsv_ind_new, :])
# 1/2 |w|^2+1/vl sum\xi - rho
fval = 0.5*alphas.dot(K[np.ix_(clf.support_, clf.support_)]).dot(
alphas.T)+slack_variables.sum()-clf.intercept_
# TODO: we may support other type of objective function
return fval[0]
if target is 'clferr':
# classification error on test set
if Xtt is not None and ytt is not None:
y_clf = clf.predict_y(Xtt)
return 1-acc(ytt, y_clf)
else:
print 'You need give the test dataset!'
return None
if target is 'dec_val':
# decision value: w'*x - rho
return clf.decision_function(Xtr).sum()
if target is 'fnr':
# false negative rate (outliers are classified as normal)
if Xtt is not None and ytt is not None:
y_clf = clf.predict_y(Xtt)
cf_max = confusion_matrix(ytt, y_clf, labels=[1, -1])
return 1-float(cf_max[0, 0])/cf_max[0].sum()
else:
print 'You need give the test dataset!'
return None
if target is 'fpr':
# false positive rate (normal are classified as outliers)
if Xtt is not None and ytt is not None:
y_clf = clf.predict_y(Xtt)
cf_max = confusion_matrix(ytt, y_clf, labels=[1, -1])
return 1-float(cf_max[1, 1])/cf_max[1].sum()
else:
print 'You need give the test dataset!'
return None
def grad_w(xc, clf, dataset):
"""
gradient of objective W wrt. x_c
compute the gradient of xc in X
classifier must be trained on X first! We avoid retraining gradient while
computing the gradient!
:param xc:
:param clf: malicious point, only single-point gradient is supported
:param X: training set X
:return: gradient of xc
"""
# Initialize
X = dataset['train']
n, d = X.shape
xc = xc.reshape(1, d)
X_ = np.r_[X, xc]
# fit OCSVM on X_
clf.fit(X_)
# TODO: check correctness
# vector of gradient alpha
K_x = Kernels(X_, X_[clf.sv_ind], metric=clf.kernel, filter_params=True, gamma=clf.gamma, coef0=clf.coef0, degree=clf.degree)
K_sv = K_x[clf.sv_ind, :]
lhs = np.repeat(K_sv[0].reshape(1, K_sv[0].size), clf.sv_ind.size-1, axis=0)-K_sv[1:]
lhs = np.vstack((lhs, np.ones((1, clf.sv_ind.size))))
# numerical correction
lhs = lhs+1e-6*np.eye(lhs.shape[0])
rhs = np.zeros((clf.sv_ind.size, d))
# solve the linear system by lstsq
vs, residuals, rank, s = lstsq(lhs, rhs)
# vs = solve(lhs, rhs)
# correct the solution according to KKT(1)
# vs[0] = -vs[1:].sum(axis=0)
# print 'residuals: ', residuals
# print 'rank: %d lhs_rows: %d ' % (rank, clf.sv_ind.size-1)
random_sv = np.random.choice(clf.sv_ind, 1)
return (K_x[0:n, :] - np.repeat(K_x[random_sv].reshape(1, clf.sv_ind.size), n, axis=0)).dot(vs).sum(axis=0)
| true |
44b3a2c2505ff136835b638b1b2b5a8a3ea70ae2 | Python | igemsoftware/UCSD_Software_2014 | /re/make_whole_network.py | UTF-8 | 2,126 | 2.859375 | 3 | [] | no_license | """
//Title: SBML Network Generator
//Description:
Translate entire network model to SBML and store network file
in current working directory
*************************************************
@author: Fernando Contreras
@email: f2contre@gmail.com
@project: SBiDer
@institution: University of California, San Diego
*************************************************
"""
import sbml_database as sd
import SBML_Network as sn
#establish model object as a global variable
global model
"""
constructs the entire entire network from the data base
"""
def create_whole_network_sbml():
model = sn.Model()
"""
construct operon/chemical species component of network SBML file
"""
operon_dict = sd.get_sbml_operons()
miriam_dict = sd.get_sbml_miriam_ids()
for operon in operon_dict:
ope_spe = sn.QualitativeSpecies(operon,'plasmid',name=operon_dict[operon],miriam_id=miriam_dict[operon])
ope_spe.appendToQualSpecies(model)
species_dict = sd.get_sbml_species()
for species in species_dict:
chem_spe = sn.QualitativeSpecies(species,'chemical_species',name=species_dict[species])
chem_spe.appendToQualSpecies(model)
"""
required intermediate SBML statements for network model
"""
intermediate_step = sn.IntermediateStep(model)
"""
construct input/output transition component of network SBML file
"""
input_trans_dict = sd.get_sbml_input_species_edges()
input_operon = sd.get_sbml_input_operon_edges()
trans_logic = sd.get_sbml_input_logic()
for input in input_trans_dict:
in_trans_spe = sn.Transitions()
in_trans_spe.input_transition(model,input,input_trans_dict[input],input_operon[input],trans_logic[input])
output_trans_dict = sd.get_sbml_output_species_edges()
output_operon = sd.get_sbml_output_operon_edges()
for output in output_trans_dict:
out_trans_spe = sn.Transitions()
out_trans_spe.output_transition(model,output,output_trans_dict[output],output_operon[output])
"""
required closing SBML statements for network model
"""
close_model = sn.CloseModel(model)
model.writeSBML("SBider_Network")
| true |
3f2667a79872ffeedda306ee5703608be15d7679 | Python | MouvementMondial/MappingWithKnownPoses | /auswertungTraj.py | UTF-8 | 4,224 | 2.546875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 3 18:16:45 2018
@author: Thorsten
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
font = {'size' : 20}
plt.rc('font', **font)
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
path = 'D:/KITTI/odometry/dataset/04_export/'
path = 'C:/KITTI/2011_09_30/2011_09_30_0027_export/'
nr = 25
groundTruth = np.asmatrix(np.loadtxt(path+'groundTruth.txt',delimiter=','))
ax = plt.subplot(111)
plt.axis('equal')
plt.scatter([groundTruth[:,1]],[groundTruth[:,0]],
c='g',s=20,edgecolors='none', label = 'Trajektorie Ground Truth')
trajs = np.asmatrix(np.loadtxt(path+'trajs_0.25.txt',delimiter=','))
# mean trajs
meanX = np.mean(trajs[:,::2],axis=1)
meanY = np.mean(trajs[:,1::2],axis=1)
covSum1 = []
for ii in range(0,trajs.shape[0],10):
cov = np.cov(trajs[ii,1::2].tolist(),trajs[ii,::2].tolist())
covSum1.append(np.trace(cov))
nstd = 2
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
w, h = 2 * nstd * np.sqrt(vals)
ell = Ellipse(xy=(np.mean(trajs[ii,1::2]),np.mean(trajs[ii,::2])),
width=w, height=h,
angle=theta, color='blue')
ell.set_facecolor('none')
ax.add_artist(ell)
for ii in range(0,nr*2-1,2):
if ii == 0:
plt.scatter([trajs[::10,ii+1]],[trajs[::10,ii]],
c='b',s=3,edgecolors='none',label = 'Trajektorien ohne Filterung')
else:
plt.scatter([trajs[::10,ii+1]],[trajs[::10,ii]],
c='b',s=3,edgecolors='none')
plt.scatter([meanY],[meanX],c='b',s=20,edgecolors='none',label = 'Trajektorien SLAM Mittelwert')
trajectory = np.hstack((meanX,meanY))
errorPos1 = np.sqrt( np.multiply(trajectory[:,0]-groundTruth[:,0],trajectory[:,0]-groundTruth[:,0])
+np.multiply(trajectory[:,1]-groundTruth[:,1],trajectory[:,1]-groundTruth[:,1]))
trajs = np.asmatrix(np.loadtxt(path+'trajs_0.25_filter.txt',delimiter=','))
# mean trajs
meanX = np.mean(trajs[:,::2],axis=1)
meanY = np.mean(trajs[:,1::2],axis=1)
covSum2 = []
for ii in range(0,trajs.shape[0],10):
cov = np.cov(trajs[ii,1::2].tolist(),trajs[ii,::2].tolist())
covSum2.append(np.trace(cov))
nstd = 2
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
w, h = 2 * nstd * np.sqrt(vals)
ell = Ellipse(xy=(np.mean(trajs[ii,1::2]),np.mean(trajs[ii,::2])),
width=w, height=h,
angle=theta, color='magenta')
ell.set_facecolor('none')
ax.add_artist(ell)
for ii in range(0,nr*2-1,2):
plt.scatter([trajs[::10,ii+1]],[trajs[::10,ii]],
c='m',s=3,edgecolors='none')
plt.scatter([meanY],[meanX],c='m',s=20,edgecolors='none',label = 'Trajektorien mit Filterung')
trajectory = np.hstack((meanX,meanY))
errorPos2 = np.sqrt( np.multiply(trajectory[:,0]-groundTruth[:,0],trajectory[:,0]-groundTruth[:,0])
+np.multiply(trajectory[:,1]-groundTruth[:,1],trajectory[:,1]-groundTruth[:,1]))
plt.legend()
# traveled distance ground truth
temp = np.vstack((groundTruth[0,:],groundTruth))
temp = np.delete(temp,(temp.shape[0]-1), axis=0)
distanceGT = np.sqrt( np.multiply(temp[:,0]-groundTruth[:,0],temp[:,0]-groundTruth[:,0])
+np.multiply(temp[:,1]-groundTruth[:,1],temp[:,1]-groundTruth[:,1]))
distanceGT = np.transpose(np.cumsum(distanceGT))
plt.figure(2)
plt.title('Entwicklung Varianz')
plt.plot(distanceGT[::10,:],covSum1,c='b', label = 'Ohne Filterung')
plt.plot(distanceGT[::10,:],covSum2,c='m', label = 'Mit Filterung')
plt.xlabel('Distanz')
plt.ylabel('Varianz')
plt.legend()
plt.figure(3)
plt.plot(distanceGT,errorPos1,c='b', label = 'Ohne Filterung',linewidth=2)
plt.plot(distanceGT,errorPos2,c='m', label = 'Mit Filterung',linewidth=2)
plt.xlabel('Distanz Ground Truth [m]')
plt.ylabel('Abweichung [m]')
plt.grid()
plt.legend(loc='upper left')
"""
xx = trajs[269,::2]
yy = trajs[269,1::2]
plt.figure(2)
plt.subplot(311)
plt.hist(np.transpose(xx))
plt.subplot(312)
plt.hist(np.transpose(yy))
plt.subplot(313)
plt.hist2d(np.asarray(xx)[:,0],np.asarray(yy)[:,0])
"""
| true |
f171a800f668754d19757e98baa6c91095b4be1c | Python | Sakhile-Msibi/Rubiks | /sources/solver/CheckFaceColors.py | UTF-8 | 14,039 | 2.578125 | 3 | [] | no_license | import sys
from sources.solver.Rubik import Rubik
class CheckFaceColors:
def two(self, cube, colorOne, colorTwo):
if (cube.upper[0][1] == colorOne and cube.back[0][1] == colorTwo):
return ([['upper', colorOne, 0, 1],['back', colorTwo, 0, 1]])
elif (cube.upper[0][1] == colorTwo and cube.back[0][1] == colorOne):
return ([['upper', colorTwo, 0, 1],['back', colorOne, 0, 1]])
elif (cube.upper[1][2] == colorOne and cube.right[0][1] == colorTwo):
return ([['upper', colorOne, 1, 2],['right', colorTwo, 0, 1]])
elif (cube.upper[1][2] == colorTwo and cube.right[0][1] == colorOne):
return ([['upper', colorTwo, 1, 2],['right', colorOne, 0, 1]])
elif (cube.upper[2][1] == colorOne and cube.front[0][1] == colorTwo):
return ([['upper', colorOne, 2, 1],['front', colorTwo, 0, 1]])
elif (cube.upper[2][1] == colorTwo and cube.front[0][1] == colorOne):
return ([['upper', colorTwo, 2, 1],['front', colorOne, 0, 1]])
elif (cube.upper[1][0] == colorOne and cube.left[0][1] == colorTwo):
return ([['upper', colorOne, 1, 0],['left', colorTwo, 0, 1]])
elif (cube.upper[1][0] == colorTwo and cube.left[0][1] == colorOne):
return ([['upper', colorTwo, 1, 0],['left', colorOne, 0, 1]])
elif (cube.left[1][2] == colorOne and cube.front[1][0] == colorTwo):
return ([['left', colorOne, 1, 2],['front', colorTwo, 1, 0]])
elif (cube.left[1][2] == colorTwo and cube.front[1][0] == colorOne):
return ([['left', colorTwo, 1, 2],['front', colorOne, 1, 0]])
elif (cube.left[1][0] == colorOne and cube.back[1][2] == colorTwo):
return ([['left', colorOne, 1, 0],['back', colorTwo, 1, 2]])
elif (cube.left[1][0] == colorTwo and cube.back[1][2] == colorOne):
return ([['left', colorTwo, 1, 0],['back', colorOne, 1, 2]])
elif (cube.front[1][2] == colorOne and cube.right[1][0] == colorTwo):
return ([['front', colorOne, 1, 2],['right', colorTwo, 1, 0]])
elif (cube.front[1][2] == colorTwo and cube.right[1][0] == colorOne):
return ([['front', colorTwo, 1, 2],['right', colorOne, 1, 0]])
elif (cube.right[1][2] == colorOne and cube.back[1][0] == colorTwo):
return ([['right', colorOne, 1, 2],['back', colorTwo, 1, 0]])
elif (cube.right[1][2] == colorTwo and cube.back[1][0] == colorOne):
return ([['right', colorTwo, 1, 2],['back', colorOne, 1, 0]])
elif (cube.down[0][1] == colorOne and cube.front[2][1] == colorTwo):
return ([['down', colorOne, 0, 1],['front', colorTwo, 2, 1]])
elif (cube.down[0][1] == colorTwo and cube.front[2][1] == colorOne):
return ([['down', colorTwo, 0, 1],['front', colorOne, 2, 1]])
elif (cube.down[1][2] == colorOne and cube.right[2][1] == colorTwo):
return ([['down', colorOne, 1, 2],['right', colorTwo, 2, 1]])
elif (cube.down[1][2] == colorTwo and cube.right[2][1] == colorOne):
return ([['down', colorTwo, 1, 2],['right', colorOne, 2, 1]])
elif (cube.down[2][1] == colorOne and cube.back[2][1] == colorTwo):
return ([['down', colorOne, 2, 1],['back', colorTwo, 2, 1]])
elif (cube.down[2][1] == colorTwo and cube.back[2][1] == colorOne):
return ([['down', colorTwo, 2, 1],['back', colorOne, 2, 1]])
elif (cube.down[1][0] == colorOne and cube.left[2][1] == colorTwo):
return ([['down', colorOne, 1, 0],['left', colorTwo, 2, 1]])
elif (cube.down[1][0] == colorTwo and cube.left[2][1] == colorOne):
return ([['down', colorTwo, 1, 0],['left', colorOne, 2, 1]])
return (False)
def three(self, cube, colorOne, colorTwo, colorThree):
if (cube.upper[0][0] == colorOne and cube.left[0][0] == colorTwo and cube.back[0][2] == colorThree):
return (['upper', colorOne, 0, 0],['left', colorTwo, 0, 0], ['back', colorThree, 0, 2])
elif (cube.upper[0][0] == colorOne and cube.left[0][0] == colorThree and cube.back[0][2] == colorTwo):
return (['upper', colorOne, 0, 0],['left', colorThree, 0, 0], ['back', colorTwo, 0, 2])
elif (cube.upper[0][0] == colorTwo and cube.left[0][0] == colorThree and cube.back[0][2] == colorOne):
return (['upper', colorTwo, 0, 0],['left', colorThree, 0, 0], ['back', colorOne, 0, 2])
elif (cube.upper[0][0] == colorTwo and cube.left[0][0] == colorOne and cube.back[0][2] == colorThree):
return (['upper', colorTwo, 0, 0],['left', colorOne, 0, 0], ['back', colorThree, 0, 2])
elif (cube.upper[0][0] == colorThree and cube.left[0][0] == colorOne and cube.back[0][2] == colorTwo):
return (['upper', colorThree, 0, 0],['left', colorOne, 0, 0], ['back', colorTwo, 0, 2])
elif (cube.upper[0][0] == colorThree and cube.left[0][0] == colorTwo and cube.back[0][2] == colorOne):
return (['upper', colorThree, 0, 0],['left', colorTwo, 0, 0], ['back', colorOne, 0, 2])
elif (cube.upper[0][2] == colorOne and cube.right[0][2] == colorTwo and cube.back[0][0] == colorThree):
return (['upper', colorOne, 0, 2],['right', colorTwo, 0, 2], ['back', colorThree, 0, 0])
elif (cube.upper[0][2] == colorOne and cube.right[0][2] == colorThree and cube.back[0][0] == colorTwo):
return (['upper', colorOne, 0, 2],['right', colorThree, 0, 2], ['back', colorTwo, 0, 0])
elif (cube.upper[0][2] == colorTwo and cube.right[0][2] == colorThree and cube.back[0][0] == colorOne):
return (['upper', colorTwo, 0, 2],['right', colorThree, 0, 2], ['back', colorOne, 0, 0])
elif (cube.upper[0][2] == colorTwo and cube.right[0][2] == colorOne and cube.back[0][0] == colorThree):
return (['upper', colorTwo, 0, 2],['right', colorOne, 0, 2], ['back', colorThree, 0, 0])
elif (cube.upper[0][2] == colorThree and cube.right[0][2] == colorOne and cube.back[0][0] == colorTwo):
return (['upper', colorThree, 0, 2],['right', colorOne, 0, 2], ['back', colorTwo, 0, 0])
elif (cube.upper[0][2] == colorThree and cube.right[0][2] == colorTwo and cube.back[0][0] == colorOne):
return (['upper', colorThree, 0, 2],['right', colorTwo, 0, 2], ['back', colorOne, 0, 0])
elif (cube.upper[2][2] == colorOne and cube.right[0][0] == colorTwo and cube.front[0][2] == colorThree):
return (['upper', colorOne, 2, 2],['right', colorTwo, 0, 0], ['front', colorThree, 0, 2])
elif (cube.upper[2][2] == colorOne and cube.right[0][0] == colorThree and cube.front[0][2] == colorTwo):
return (['upper', colorOne, 2, 2],['right', colorThree, 0, 0], ['front', colorTwo, 0, 2])
elif (cube.upper[2][2] == colorTwo and cube.right[0][0] == colorThree and cube.front[0][2] == colorOne):
return (['upper', colorTwo, 2, 2],['right', colorThree, 0, 0], ['front', colorOne, 0, 2])
elif (cube.upper[2][2] == colorTwo and cube.right[0][0] == colorOne and cube.front[0][2] == colorThree):
return (['upper', colorTwo, 2, 2],['right', colorOne, 0, 0], ['front', colorThree, 0, 2])
elif (cube.upper[2][2] == colorThree and cube.right[0][0] == colorOne and cube.front[0][2] == colorTwo):
return (['upper', colorThree, 2, 2],['right', colorOne, 0, 0], ['front', colorTwo, 0, 2])
elif (cube.upper[2][2] == colorThree and cube.right[0][0] == colorTwo and cube.front[0][2] == colorOne):
return (['upper', colorThree, 2, 2],['right', colorTwo, 0, 0], ['front', colorOne, 0, 2])
elif (cube.upper[2][0] == colorOne and cube.left[0][2] == colorTwo and cube.front[0][0] == colorThree):
return (['upper', colorOne, 2, 0],['left', colorTwo, 0, 2], ['front', colorThree, 0, 0])
elif (cube.upper[2][0] == colorOne and cube.left[0][2] == colorThree and cube.front[0][0] == colorTwo):
return (['upper', colorOne, 2, 0],['left', colorThree, 0, 2], ['front', colorTwo, 0, 0])
elif (cube.upper[2][0] == colorTwo and cube.left[0][2] == colorThree and cube.front[0][0] == colorOne):
return (['upper', colorTwo, 2, 0],['left', colorThree, 0, 2], ['front', colorOne, 0, 0])
elif (cube.upper[2][0] == colorTwo and cube.left[0][2] == colorOne and cube.front[0][0] == colorThree):
return (['upper', colorTwo, 2, 0],['left', colorOne, 0, 2], ['front', colorThree, 0, 0])
elif (cube.upper[2][0] == colorThree and cube.left[0][2] == colorOne and cube.front[0][0] == colorTwo):
return (['upper', colorThree, 2, 0],['left', colorOne, 0, 2], ['front', colorTwo, 0, 0])
elif (cube.upper[2][0] == colorThree and cube.left[0][2] == colorTwo and cube.front[0][0] == colorOne):
return (['upper', colorThree, 2, 0],['left', colorTwo, 0, 2], ['front', colorOne, 0, 0])
elif (cube.down[0][0] == colorOne and cube.left[2][2] == colorTwo and cube.front[2][0] == colorThree):
return (['down', colorOne, 0, 0],['left', colorTwo, 2, 2], ['front', colorThree, 2, 0])
elif (cube.down[0][0] == colorOne and cube.left[2][2] == colorThree and cube.front[2][0] == colorTwo):
return (['down', colorOne, 0, 0],['left', colorThree, 2, 2], ['front', colorTwo, 2, 0])
elif (cube.down[0][0] == colorTwo and cube.left[2][2] == colorThree and cube.front[2][0] == colorOne):
return (['down', colorTwo, 0, 0],['left', colorThree, 2, 2], ['front', colorOne, 2, 0])
elif (cube.down[0][0] == colorTwo and cube.left[2][2] == colorOne and cube.front[2][0] == colorThree):
return (['down', colorTwo, 0, 0],['left', colorOne, 2, 2], ['front', colorThree, 2, 0])
elif (cube.down[0][0] == colorThree and cube.left[2][2] == colorOne and cube.front[2][0] == colorTwo):
return (['down', colorThree, 0, 0],['left', colorOne, 2, 2], ['front', colorTwo, 2, 0])
elif (cube.down[0][0] == colorThree and cube.left[2][2] == colorTwo and cube.front[2][0] == colorOne):
return (['down', colorThree, 0, 0],['left', colorTwo, 2, 2], ['front', colorOne, 2, 0])
elif (cube.down[0][2] == colorOne and cube.right[2][0] == colorTwo and cube.front[2][2] == colorThree):
return (['down', colorOne, 0, 2],['right', colorTwo, 2, 0], ['front', colorThree, 2, 2])
elif (cube.down[0][2] == colorOne and cube.right[2][0] == colorThree and cube.front[2][2] == colorTwo):
return (['down', colorOne, 0, 2],['right', colorThree, 2, 0], ['front', colorTwo, 2, 2])
elif (cube.down[0][2] == colorTwo and cube.right[2][0] == colorThree and cube.front[2][2] == colorOne):
return (['down', colorTwo, 0, 2],['right', colorThree, 2, 0], ['front', colorOne, 2, 2])
elif (cube.down[0][2] == colorTwo and cube.right[2][0] == colorOne and cube.front[2][2] == colorThree):
return (['down', colorTwo, 0, 2],['right', colorOne, 2, 0], ['front', colorThree, 2, 2])
elif (cube.down[0][2] == colorThree and cube.right[2][0] == colorOne and cube.front[2][2] == colorTwo):
return (['down', colorThree, 0, 2],['right', colorOne, 2, 0], ['front', colorTwo, 2, 2])
elif (cube.down[0][2] == colorThree and cube.right[2][0] == colorTwo and cube.front[2][2] == colorOne):
return (['down', colorThree, 0, 2],['right', colorTwo, 2, 0], ['front', colorOne, 2, 2])
elif (cube.down[2][2] == colorOne and cube.right[2][2] == colorTwo and cube.back[2][0] == colorThree):
return (['down', colorOne, 2, 2],['right', colorTwo, 2, 2], ['back', colorThree, 2, 0])
elif (cube.down[2][2] == colorOne and cube.right[2][2] == colorThree and cube.back[2][0] == colorTwo):
return (['down', colorOne, 2, 2],['right', colorThree, 2, 2], ['back', colorTwo, 2, 0])
elif (cube.down[2][2] == colorTwo and cube.right[2][2] == colorThree and cube.back[2][0] == colorOne):
return (['down', colorTwo, 2, 2],['right', colorThree, 2, 2], ['back', colorOne, 2, 0])
elif (cube.down[2][2] == colorTwo and cube.right[2][2] == colorOne and cube.back[2][0] == colorThree):
return (['down', colorTwo, 2, 2],['right', colorOne, 2, 2], ['back', colorThree, 2, 0])
elif (cube.down[2][2] == colorThree and cube.right[2][2] == colorOne and cube.back[2][0] == colorTwo):
return (['down', colorThree, 2, 2],['right', colorOne, 2, 2], ['back', colorTwo, 2, 0])
elif (cube.down[2][2] == colorThree and cube.right[2][2] == colorTwo and cube.back[2][0] == colorOne):
return (['down', colorThree, 2, 2],['right', colorTwo, 2, 2], ['back', colorOne, 2, 0])
elif (cube.down[2][0] == colorOne and cube.left[2][0] == colorTwo and cube.back[2][2] == colorThree):
return (['down', colorOne, 2, 2],['left', colorTwo, 2, 0], ['back', colorThree, 2, 2])
elif (cube.down[2][0] == colorOne and cube.left[2][0] == colorThree and cube.back[2][2] == colorTwo):
return (['down', colorOne, 2, 2],['left', colorThree, 2, 0], ['back', colorTwo, 2, 2])
elif (cube.down[2][0] == colorTwo and cube.left[2][0] == colorThree and cube.back[2][2] == colorOne):
return (['down', colorTwo, 2, 2],['left', colorThree, 2, 0], ['back', colorOne, 2, 2])
elif (cube.down[2][0] == colorTwo and cube.left[2][0] == colorOne and cube.back[2][2] == colorThree):
return (['down', colorTwo, 2, 2],['left', colorOne, 2, 0], ['back', colorThree, 2, 2])
elif (cube.down[2][0] == colorThree and cube.left[2][0] == colorOne and cube.back[2][2] == colorTwo):
return (['down', colorThree, 2, 2],['left', colorOne, 2, 0], ['back', colorTwo, 2, 2])
elif (cube.down[2][0] == colorThree and cube.left[2][0] == colorTwo and cube.back[2][2] == colorOne):
return (['down', colorThree, 2, 2],['left', colorTwo, 2, 0], ['back', colorOne, 2, 2])
return (False) | true |