blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
f95c2bca675bfc1d4b5d0c9e92a736753638e6a7 | Python | vuamitom/Code-Exercises | /android_cache/entry.py | UTF-8 | 5,772 | 2.671875 | 3 | [] | no_license | import sys
from common import *
from response_info import *
import m509
"""
// A file containing stream 0 and stream 1 in the Simple cache consists of:
// - a SimpleFileHeader.
// - the key.
// - the data from stream 1.
// - a SimpleFileEOF record for stream 1.
// - the data from stream 0.
// - (optionally) the SHA256 of the key.
// - a SimpleFileEOF record for stream 0.
//
// Because stream 0 data (typically HTTP headers) is on the critical path of
// requests, on open, the cache reads the end of the record and does not
// read the SimpleFileHeader. If the key can be validated with a SHA256, then
// the stream 0 data can be returned to the caller without reading the
// SimpleFileHeader. If the key SHA256 is not present, then the cache must
// read the SimpleFileHeader to confirm key equality.
// A file containing stream 2 in the Simple cache consists of:
// - a SimpleFileHeader.
// - the key.
// - the data.
// - at the end, a SimpleFileEOF record.
"""
SIMPLE_EOF_SIZE = 8 + 4 + 4 + 4 + 4 # for some reason, there are last 5 bytes which I don't know what's for
SIMPLE_HEADER = 8 + 4 * 3
SIMPLE_FINAL_MAGICNO = 0xf4fa6f45970d41d8
SIMPLE_INI_MAGICNO = 0xfcfb6d1ba7725c30
FLAG_HAS_CRC32 = 1 << 0
FLAG_HAS_SHA256 = 1 << 1
def read_simple_file_header(data):
"""
uint64_t initial_magic_number;
uint32_t version;
uint32_t key_length;
uint32_t key_hash;
"""
bb = ByteBuffer(data)
return bb.readUInt8(), bb.readUInt4(), bb.readUInt4(), bb.readUInt4()
def read_simple_eof(data):
"""
struct NET_EXPORT_PRIVATE SimpleFileEOF {
enum Flags {
FLAG_HAS_CRC32 = (1U << 0),
FLAG_HAS_KEY_SHA256 = (1U << 1), // Preceding the record if present.
};
SimpleFileEOF();
uint64_t final_magic_number;
uint32_t flags;
uint32_t data_crc32;
// |stream_size| is only used in the EOF record for stream 0.
uint32_t stream_size;
};
"""
bb = ByteBuffer(data)
return bb.readUInt8(), bb.readUInt4(), bb.readUInt4(), bb.readUInt4()
def read_stream01(data):
# header = read_simple_file_header(data)
# read s0 eof
s0_eof = read_simple_eof(data[-SIMPLE_EOF_SIZE:])
magic, flag, crc, ssize = s0_eof
s0_eof_offset = SIMPLE_EOF_SIZE
assert magic == SIMPLE_FINAL_MAGICNO, 'Magic no. not match SimpleFinalMagicNo'
if flag & FLAG_HAS_CRC32:
# verify crc
# print crc
pass
if flag & FLAG_HAS_SHA256:
s0_eof_offset += 32
stream0 = data[-(s0_eof_offset + ssize): - s0_eof_offset]
s1_eof = read_simple_eof(data[-(s0_eof_offset + ssize + SIMPLE_EOF_SIZE): -(s0_eof_offset+ssize)])
magic1, flag1, crc1, ssize1 = s1_eof
assert magic1 == SIMPLE_FINAL_MAGICNO
stream1 = data[-(s0_eof_offset + ssize + SIMPLE_EOF_SIZE + ssize1): -(s0_eof_offset + ssize + SIMPLE_EOF_SIZE)]
key = data[SIMPLE_HEADER:-(s0_eof_offset + ssize + SIMPLE_EOF_SIZE + ssize1)]
file_header = read_simple_file_header(data[0:SIMPLE_HEADER])
hmagic, hversion, hkeylen, hkeyhash = file_header
assert hmagic == SIMPLE_INI_MAGICNO
print 'Key = ' + key.tobytes()
print 'Stream0 size = ' + str(ssize)
parse_response_info(stream0)
print stream0.tobytes()
print 'Stream1 size = ' + str(ssize1)
return None, None, None
def parse_response_headers(data):
# pass raw
current = 0
status = None
for i, c in enumerate(data):
if c =='\0':
status = data[0:i].tobytes()
current = i
break
print status
return data[i:].tobytes().split('\0')
def parse_response_info(data):
phead, payload = parse_pickle(data)
# verify crc
payload_size, crc = phead
# verify_crc(crc, payload)
bb = ByteBuffer(payload)
flags = bb.readUInt4()
version = flags & RESPONSE_INFO_VERSION_MASK
assert version < RESPONSE_INFO_MINIMUM_VERSION or version > RESPONSE_INFO_VERSION, 'Unexpected response info version'
data = payload
req_time = bb.readUInt8()
res_time = bb.readUInt8()
# read header
# probably we have picked the wrong start of stream0
# byright, offset should've been 4 + 16
remain = data[( 16):]
bb = ByteBuffer(remain)
slen, header_data = bb.readString()
print ' str len = ' + str(slen)
headers = parse_response_headers(header_data)
# again, for some reason i don't know
# offset += 4
remain = remain[(slen + 4):]
print '\n'.join(headers)
if flags & RESPONSE_INFO_HAS_CERT:
cl, cert = m509.read_cert(remain)
if cert is not None:
print cert.get_issuer()
print cert.get_subject()
remain = remain[cl:]
if flags & RESPONSE_INFO_HAS_CERT_STATUS:
print 'cert_status: '
# cert_status = None
remain = remain[4:]
# vary-data
if flags & RESPONSE_INFO_HAS_VARY_DATA:
vary_data = remain[0:16]
remain = remain[16:]
print [hex(ord(c)) for c in remain[20:40]]
print remain.tobytes()
# socket-address
bb = ByteBuffer(remain)
hl, host = bb.readString()
print str(hl)
print host
port = bb.readUInt2()
print 'host = ' + host + ':' + str(port)
# protocol version
# connection info
# key_exchange_info
#
if __name__ == '__main__':
f = sys.argv[1]
c = None
with open(f, 'rb') as fi:
c = fi.read()
data = memoryview(c)
# print 'Total file size = ' + str(len(c))
# pickle_header, payload = parse_pickle(data)
# crc, size = pickle_header
# print 'Payload size = ' + str(size)
# verify_crc(crc, payload)
header, s1, s0 = read_stream01(data)
| true |
39dec4c4812908caf52de55a67988ca0013d401f | Python | mborsetti/python-holidays | /holidays/countries/slovakia.py | UTF-8 | 2,335 | 2.78125 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <dr.prodigy.github@gmail.com> (c) 2017-2023
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from datetime import timedelta as td
from dateutil.easter import easter
from holidays.constants import JAN, MAY, JUL, AUG, SEP, OCT, NOV, DEC
from holidays.holiday_base import HolidayBase
class Slovakia(HolidayBase):
"""
https://sk.wikipedia.org/wiki/Sviatok
https://www.slov-lex.sk/pravne-predpisy/SK/ZZ/1993/241/20181011.html
"""
country = "SK"
special_holidays = {
2018: (
(OCT, 30, "100. výročie prijatia Deklarácie slovenského národa"),
)
}
def _populate(self, year):
super()._populate(year)
self[date(year, JAN, 1)] = "Deň vzniku Slovenskej republiky"
self[date(year, JAN, 6)] = (
"Zjavenie Pána (Traja králi a"
" vianočnýsviatok pravoslávnych"
" kresťanov)"
)
easter_date = easter(year)
self[easter_date + td(days=-2)] = "Veľký piatok"
self[easter_date + td(days=+1)] = "Veľkonočný pondelok"
self[date(year, MAY, 1)] = "Sviatok práce"
if year >= 1997:
self[date(year, MAY, 8)] = "Deň víťazstva nad fašizmom"
self[date(year, JUL, 5)] = "Sviatok svätého Cyrila a svätého Metoda"
self[date(year, AUG, 29)] = (
"Výročie Slovenského národného" " povstania"
)
self[date(year, SEP, 1)] = "Deň Ústavy Slovenskej republiky"
self[date(year, SEP, 15)] = "Sedembolestná Panna Mária"
self[date(year, NOV, 1)] = "Sviatok Všetkých svätých"
if year >= 2001:
self[date(year, NOV, 17)] = "Deň boja za slobodu a demokraciu"
self[date(year, DEC, 24)] = "Štedrý deň"
self[date(year, DEC, 25)] = "Prvý sviatok vianočný"
self[date(year, DEC, 26)] = "Druhý sviatok vianočný"
class SK(Slovakia):
pass
class SVK(Slovakia):
pass
| true |
c29fb4aa5a4264a755bf519bf05e6165cc907ec2 | Python | AbeelLab/GraphClean | /GraphClean.py | UTF-8 | 1,693 | 2.703125 | 3 | [] | no_license | import FeatureExtractor
import re
import networkx as nx
import UseExistingClassifier
import FilterOverlaps
import argparse
def Overlap_From_Paf(paf_filepath):
overlap_list = list()
with open(paf_filepath) as paf:
for line in paf:
line = line[:-1].split()
read1 = int(re.search(r'\d+', line[0]).group())
read2 = int(re.search(r'\d+', line[5]).group())
if read1 >= read2:
continue
else:
overlap = (read1, read2)
overlap_list.append(overlap)
return overlap_list
if __name__ == '__main__':
parser = argparse.ArgumentParser("GraphClean detect remove induced overlaps in a paf file and remove them")
parser.add_argument("PAF", help="Path to the input PAF file which contains overlaps")
parser.add_argument("Output", help="Output files prefix")
parser.add_argument("-m", "--model", help="Path to the model, models are inside models directory", default= "models/model-potato-c0.1")
parser.add_argument("-t", "--threshold", help="Threshold on the probabilities of prediction", type=float, default=0.1)
args = parser.parse_args()
paf_filepath = args.PAF
output = args.Output
threshold = args.threshold
model = args.model
overlap_list = Overlap_From_Paf(paf_filepath)
graph = nx.Graph(overlap_list)
nx.write_edgelist(graph, output + '-graph-edge-list')
FeatureExtractor.Extract_All_Features(overlap_list, graph, output)
classification_results = UseExistingClassifier.classifyoverlaps(output, model, threshold)
FilterOverlaps.filter(paf_filepath, overlap_list, classification_results, output)
print("Done") | true |
80143d7dae453b189e37be9fe2cec8af50df6f3a | Python | RobinMoRi/A211TG-neural-networks | /HAAR Wavelet/example_robin_haar.py | UTF-8 | 2,081 | 3.21875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 6 15:07:02 2020
@author: Robin Moreno Rinding
"""
import matplotlib.pyplot as plt
import numpy as np
class Signal:
def __init__(self, s, x):
self.s = s
self.x = x
def generateData():
j=np.power(2,7)
x=np.zeros(j)
y=np.zeros(j)
y1=np.zeros(j)
y2=np.zeros(j)
y3=np.zeros(j)
y4=np.zeros(j)
for i in range(0,j):
y[i]=100+3*i-.003*np.power(i,2)+0.0000013*np.power(i,3)
x[i]=i
y1[i]=100*np.sin(5*i*np.pi/180)
y2[i]=100*np.sin(10*i*np.pi/180)
y3[i]=100*np.sin(20*i*np.pi/180)
y4[i]=100*np.sin(40*i*np.pi/180)
return Signal(y+y1+y2+y3+y4, x)
#---------------- Fast In-Place Haar Wavelet Transform -----------------------
def haarTransform(s, sweepReset):
n=int(np.log2(len(s)))
I=1
J=2
M=int(np.power(2,n))
for L in range(1,n+1):
M=int(M/2)
for K in range(0,(M)):
a1=(s[K*J]+s[K*J+I])/2
c2=(s[K*J]-s[K*J+I])/2
s[K*J]=a1
s[K*J+I]=c2
if L==sweepReset: # Vid svep 4 nollställer vi alla koefficienter
s[K*J+I]=0
I=J
J=J*2 # %tar andra halvan
return s.copy()
#------------ Fast In-Place Inverse Haar Wavelet Transform --------------------
def haarInverseTransform(s):
n=int(np.log2(len(s)))
I=np.power(2,n-1);
J=2*I;
M=1;
K=0;
for L in range(n+1,1,-1):
for K in range(0,(M)):
a1=(s[K*J]+s[K*J+I])
c2=(s[K*J]-s[K*J+I])
s[K*J]=a1
s[K*J+I]=c2
J=I;
I=np.int(I/2);
M=2*M;
return s.copy()
sig = generateData() #Generate x and y from simulated signal
s0 = sig.s
s1 = haarTransform(s0.copy(), 4)
s2 = haarInverseTransform(s1.copy())
plt.plot(sig.x, s0, label='Initial Signal')
plt.plot(sig.x, s1, label='Fast In-Place Haar Wavelet Transform')
plt.plot(sig.x, s2, label='Fast In-Place Inverse Haar Wavelet Transform')
plt.legend(loc="upper left") | true |
a019082ce7244f856e98931a01e9bd086d3c153d | Python | HomerMadriz/Automaton_Module | /afd_jlta.py | UTF-8 | 1,450 | 3.796875 | 4 | [] | no_license | """Creación de diccionario de Alfabeto"""
def create_alf(str_alf):
alf = {}
n = 0
for letter in str_alf:
if letter != ";" and letter != "\n":
alf[letter] = n
n+=1
return alf
"""Creación de conjunto de estados finales"""
def create_fstate(str_fstate):
fstate = set(str_fstate)
fstate.remove(";")
fstate.remove("\n")
return fstate
"""Creación de matriz de transición"""
def create_mxTran(f):
mxTran = []
for line in f:
tl = [int(a) for a in line if a != ";" and a != "\n"]
mxTran.append(tl)
return mxTran
def main():
f = open("input.txt")
cadena = f.readline() #Cadena de entrada
dic_alf = create_alf(f.readline()) #Creación de alfabeto
init_state = int(f.readline()) #Estado inicial
set_fst = create_fstate(f.readline()) #Estados finales
mxTran = create_mxTran(f) #Creación de matriz de transición
lis_cad = [dic_alf[i] for i in cadena if i != "\n"] #Ajuste de cadena de entrada
secuence = (str(init_state) + "/") #Recorrido de estados
current_state = init_state #Estado actual inicial
for letter in lis_cad:
next_state = mxTran[current_state][letter] #Pasar al siguiente estado
current_state = next_state
#Actualizar estado actual
secuence += (str(current_state) + "/") #Actualizar recorrido
if str(current_state) in set_fst: #Evaluar último estado
print("Aceptada")
else:
print("No Aceptada")
print("Secuencia de estados: " + secuence)
main() | true |
577aa94d27ab1f038e8e82effaf3eea682ee59c3 | Python | SkillfulGuru/Webscraping-BeautifulSoup-NZX | /web4.py | UTF-8 | 338 | 2.515625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import requests
myurl = 'https://www.nzx.com/markets/NZSX'
myweb_data = requests.get(myurl)
myweb_data.encoding = 'utf-8'
mysoup = BeautifulSoup(myweb_data.text, 'html.parser')
file = open("resp_text.html", "w+", encoding="utf-8")
file.write(myweb_data.text)
file.close() | true |
993148332702639ebc6e7e15aac699198385e73a | Python | Oushesh/tennis-count | /score_count/Prototype/lucas_kanade.py | UTF-8 | 3,329 | 2.75 | 3 | [] | no_license | '''
Optical Flow is meant to find the
stationary regions
https://learnopencv.com/optical-flow-in-opencv/
https://developer.nvidia.com/blog/opencv-optical-flow-algorithms-with-nvidia-turing-gpus/
'''
import cv2
import numpy as np
#Python Lucas Kanade
def lucas_kanade_method(video_path):
# Read the video
cap = cv2.VideoCapture(video_path)
# Parameters for ShiTomasi corner detection
feature_params = dict(maxCorners=10, qualityLevel=0.3, minDistance=7, blockSize=15)
# Parameters for Lucas Kanade optical flow
lk_params = dict(winSize=(50, 50),maxLevel=2,criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03),
)
# Create random colors
color = np.random.randint(0, 255, (100, 3))
# Take first frame and find corners in it
ret, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
# Create a mask image for drawing purposes
mask = np.zeros_like(old_frame)
count = 0
while True:
# Read new frame
ret, frame = cap.read()
if not ret:
break
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Calculate Optical Flow
'''
p1: nextPts
status: 0: if no flow.
1: if there is flow.
err: error
'''
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
max_x = int(max([point[0][0] for point in p1]))
max_y = int(max([point[0][1] for point in p1]))
min_x = int(min([point[0][0] for point in p1]))
min_y = int(min([point[0][1] for point in p1]))
#print (min_x,min_y,max_x,max_y)
#Crop the region of interest here:
#TODO: use the scientific theorem provided by the paper:
#https://arxiv.org/pdf/1801.01430.pdf
#Decide on how to put the threshold
#
#cv2.imwrite("cropped" + str(count) + ".jpg",frame[min_y-20:max_y+20,min_x-20:max_y+20])
count+=1
# Select good points
good_new = p1[st == 1]
good_old = p0[st == 1]
# Draw the tracks
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
# Display the demo
img = cv2.add(frame, mask)
cv2.imshow("frame", img)
#cv2.imwrite("frame"+ str(count)+".jpg",frame)
k = cv2.waitKey(25) & 0xFF
if k == 27:
break
# Update the previous frame and previous points
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1, 1, 2)
#Extension of Lukas Kanade Method:
#1. Playing with the block size of: 50,50 approximately the width height
#2. We can also add the credential from the Indian student paper --> modify it from here
#3. See how to get the coordinates where Lucas Kanade has motion: -->
#4. reduce the ROI for the EAST: detector
if __name__ == "__main__":
video_path = "video/RogerFedererDoha2021.mp4"
#video_path = "video/TheBestGameEver_MurrayvFederer_cut001.mp4"
lucas_kanade_method(video_path)
'''
python lucas_kanade.py
'''
| true |
3dcdbe49d1836a1fb129b8c85b8933f38f65f730 | Python | dclsky/selenium-1 | /unittest/calctest20170620.py | UTF-8 | 827 | 3.171875 | 3 | [] | no_license | from calculator20170620 import Count # 从calculator20170620导入Countl类
import unittest # 引入unittest模块
class TestCount(unittest.TestCase): # 创建TestCount继承unittest的TestCase类
def setUp(self): # 测试用例前的初始化工作
print('test start')
def test_add(self):
j = Count(2,3) # 根据类Count创建对象j
self.assertEqual(j.add(),5) # 调用unittest框架提供的assertEqual对add()的返回值进行断言
def tearDown(self): # 与setUp()方法相呼应,用于测试用力执行后的善后工作
print('test end')
if __name__ == "__main__":
unittest.main()
# __name__作为模块的内置属性,就是.py的调用方式;.py有两种调用方式:作为模块被调用和直接使用
# 如果__name__等于__main__就表示直接使用 | true |
f3286d9efd4dafeb1f4930147d6faf6908aca697 | Python | pranathivemuri/napari | /napari/_qt/widgets/qt_progress_bar.py | UTF-8 | 3,797 | 2.5625 | 3 | [
"BSD-3-Clause"
] | permissive | from qtpy import QtCore
from qtpy.QtWidgets import (
QApplication,
QFrame,
QHBoxLayout,
QLabel,
QProgressBar,
QVBoxLayout,
QWidget,
)
class ProgressBar(QWidget):
def __init__(self, parent=None) -> None:
super().__init__(parent)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.pbar = QProgressBar()
self.description_label = QLabel()
self.eta_label = QLabel()
base_layout = QVBoxLayout()
pbar_layout = QHBoxLayout()
pbar_layout.addWidget(self.description_label)
pbar_layout.addWidget(self.pbar)
pbar_layout.addWidget(self.eta_label)
base_layout.addLayout(pbar_layout)
line = QFrame(self)
line.setObjectName("QtCustomTitleBarLine")
line.setFixedHeight(1)
base_layout.addWidget(line)
self.setLayout(base_layout)
def setRange(self, min, max):
self.pbar.setRange(min, max)
def _set_value(self, value):
self.pbar.setValue(value)
QApplication.processEvents()
def _get_value(self):
return self.pbar.value()
def _set_description(self, desc):
self.description_label.setText(desc)
QApplication.processEvents()
def _set_eta(self, eta):
self.eta_label.setText(eta)
class ProgressBarGroup(QWidget):
def __init__(self, pbar, parent=None) -> None:
super().__init__(parent)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
pbr_group_layout = QVBoxLayout()
pbr_group_layout.addWidget(pbar)
pbr_group_layout.setContentsMargins(0, 0, 0, 0)
line = QFrame(self)
line.setObjectName("QtCustomTitleBarLine")
line.setFixedHeight(1)
pbr_group_layout.addWidget(line)
self.setLayout(pbr_group_layout)
def get_pbar(nest_under=None, **kwargs):
"""Adds ProgressBar to viewer Activity Dock and returns it.
If nest_under is valid ProgressBar, nests new bar underneath
parent in a ProgressBarGroup
Parameters
----------
nest_under : Optional[ProgressBar]
parent ProgressBar to nest under, by default None
Returns
-------
ProgressBar
progress bar to associate with iterable
"""
from ..qt_main_window import _QtMainWindow
current_window = _QtMainWindow.current()
if current_window is None:
return
viewer_instance = current_window.qt_viewer
pbar = ProgressBar(**kwargs)
pbr_layout = viewer_instance.activityDock.widget().layout()
if nest_under is None:
pbr_layout.addWidget(pbar)
else:
# this is going to be nested, remove separators
# as the group will have its own
parent_pbar = nest_under._pbar
current_pbars = [parent_pbar, pbar]
remove_separators(current_pbars)
parent_widg = parent_pbar.parent()
if isinstance(parent_widg, ProgressBarGroup):
nested_layout = parent_widg.layout()
else:
new_group = ProgressBarGroup(nest_under._pbar)
nested_layout = new_group.layout()
pbr_layout.addWidget(new_group)
new_pbar_index = nested_layout.count() - 1
nested_layout.insertWidget(new_pbar_index, pbar)
return pbar
def remove_separators(current_pbars):
"""Remove any existing line separators from current_pbars
as they will get a separator from the group
Parameters
----------
current_pbars : List[ProgressBar]
parent and new progress bar to remove separators from
"""
for current_pbar in current_pbars:
line_widg = current_pbar.findChild(QFrame, "QtCustomTitleBarLine")
if line_widg:
current_pbar.layout().removeWidget(line_widg)
line_widg.hide()
line_widg.deleteLater()
| true |
5f8678e6061abc20399379a12c3ecf92e44d6958 | Python | vck002/my-first-code | /add.py | UTF-8 | 194 | 3.96875 | 4 | [] | no_license | #addition of numbers
a = 10
print("the value of a is =")
print(a)
b = 20
print("the value of b is =")
print(b)
c = 30
print("the value of c is =")
print(c)
sum = a+b+c
print("sum = ")
print(sum) | true |
5b4e7b91dd72574ce0a422aa0cc4990095255a20 | Python | amandabedard/capstone-2020 | /vuln-bot/chatApi.py | UTF-8 | 1,318 | 2.640625 | 3 | [] | no_license | import flask
from chatbot import init, chatWithBot
from chatSession import checkSession, updateSession
import sys
import uuid
import json
from flask import jsonify, request
app = flask.Flask(__name__)
def createChatDict(request, chat):
# Checking to see if there's an ongoing chat session
if "chatId" in request.json:
checkSession(chat, request.json.get("chatId"))
else:
chat.chatId = str(uuid.uuid1())
if chat.utterance != '':
chat.lastUtt = chat.utterance
chat.utterance = ''
chat.utterance = request.json.get("utterance")
print("chatAPI: utterance is %s" % chat.utterance)
return chat
@app.route('/chat', methods=["POST"])
def chat():
# try:
print("chatAPI: starting API")
chat = init()
chat = createChatDict(request, chat)
chat = chatWithBot(chat)
print(request.remote_addr)
chat.ipAddr = request.remote_addr
print(chat)
updateSession(chat)
res = {
"status": 200,
"response": chat.text,
"metadata": str(chat)
}
return jsonify(res)
# except:
res = {
"status": 500,
"error": "API Error: %s" % sys.exc_info()[0]
}
return jsonify(res)
app.run() | true |
68a7d647ad192ade047d966edbd4a8e7e9e2bfbd | Python | RainLeave/TourWeb | /accounts/MyCsrfMiddleware.py | UTF-8 | 2,346 | 2.578125 | 3 | [] | no_license | # from django.utils.deprecation import MiddlewareMixin
#
#
# # class MyCsrfMiddleware(MiddlewareMixin):
# #
# # def process_response(self, request, response):
# # response["Access-Control-Allow-Origin"] = "*"
# # if request.method == "OPTIONS":
# # response["Access-Control-Allow-Headers"] = "Content-Type"
# # response["Access-Control-Allow-Methods"] = "DELETE, PUT, POST, GET"
# # return response
#
#
# # 继承 MiddlewareMixin
# class MyCsrfMiddleware(MiddlewareMixin): # 继承 MiddlewareMixin
# def process_reponse(self, request, response):
# if request.method == "OPTIONS": # 如果操作的是删除指令这里在这里判断下面 return 返回
# response["Access-Control-Allow-methods"] = "DELETE, PUT, POST, GET"
# # 处理跨域的中间件,将所有的响应都能实现跨域
# response["Access-Control-Allow-Origin"] = "http://localhost:8083"
# response["Access-Control-Allow-Headers"] = "Content-Type"
# return response
class MiddlewareMixin(object):
def __init__(self, get_response=None):
self.get_response = get_response
super(MiddlewareMixin, self).__init__()
def __call__(self, request):
response = None
if hasattr(self, 'process_request'):
response = self.process_request(request)
if not response:
response = self.get_response(request)
if hasattr(self, 'process_response'):
response = self.process_response(request, response)
return response
class CORSMiddleware(MiddlewareMixin):
def process_response(self,request,response):
# 添加响应头
# 允许你的域名来获取我的数据
response['Access-Control-Allow-Origin'] = "*"
# 允许你携带Content-Type请求头
response['Access-Control-Allow-Headers'] = "Content-Type"
# 允许你发送DELETE,PUT
response['Access-Control-Allow-Methods'] = "DELETE,PUT, GET, POST"
return response
"""
CORS策略已阻止从来源“ http:// localhost:8083”访问“
http://127.0.0.1:8000/test/?text=uni.request”
处的XMLHttpRequest:请求标头字段custom-header未被接受
在飞行前响应中由Access-Control-Allow-Headers允许。
""" | true |
9e755cdb331aac3b0a10a4924b023950f0c7a83a | Python | IhorTarkhan/sorting-algorithms-visualisation | /calculation/service/sorting/algoritms/AbstractSorter.py | UTF-8 | 1,165 | 2.953125 | 3 | [] | no_license | import copy
import time
import psutil
from calculation.service.sorting.SorterResult import SorterResult
def sorting_time(sort, initial_array):
array_copy = copy.deepcopy(initial_array)
time_start = time.time()
sort(array_copy)
time_stop = time.time()
different = time_stop - time_start
return different
def sorting_size(sort, initial_array):
array_copy = copy.deepcopy(initial_array)
before_use = psutil.virtual_memory().used
sort(array_copy)
after_use = psutil.virtual_memory().used
different = after_use - before_use
return abs(different)
class AbstractSorter:
"""
Implementation Polymorphism (OOP)
Implementation Template functional pattern
...
This class - is a wrapper over sorting method.
This class calculate time and memory-size usage of sorting algorithm
"""
def __init__(self, sort):
self.sort = sort
def benchmark(self, initial_array: list) -> SorterResult:
time_used = sorting_time(self.sort, initial_array)
memory_used = sorting_size(self.sort, initial_array)
return SorterResult(time_used, memory_used)
| true |
a9673afa3dc06dee8dbc2894166833e379aa2115 | Python | hoon4233/Algo-study | /2021_spring/2021_05_07/오픈채팅방_JJ.py | UTF-8 | 706 | 3.46875 | 3 | [] | no_license | def solution(record):
nameTable = {}
answer = []
for each in record:
data = each.split()
# 최신 이름 저장
if data[0] == 'Enter' or data[0] == 'Change':
nameTable[data[1]] = data[2]
for each in record:
data = each.split()
# 최신 이름 변경하여 출력
if data[0] == 'Enter':
answer.append(nameTable[data[1]]+'님이 들어왔습니다.')
elif data[0] == 'Leave':
answer.append(nameTable[data[1]]+'님이 나갔습니다.')
return answer
print(solution(["Enter uid1234 Muzi", "Enter uid4567 Prodo","Leave uid1234","Enter uid1234 Prodo","Change uid4567 Ryan"])) | true |
158466f9d44eae4c5dc134f6083e7b3cb66ff5c2 | Python | henricsoares/tg-supervisorio-online | /tg-html/vent.py | UTF-8 | 814 | 2.65625 | 3 | [] | no_license | import MySQLdb
import serial
import time
import datetime
ser = serial.Serial("/dev/ttyS0", 9600)
#Configura o MySQL
db = MySQLdb.connect("localhost", "root","34931123", "rasprush")
curs = db.cursor()
curs.execute('CREATE TABLE IF NOT EXISTS atuadores(time text, vent text, ilum text, irri text)')
date = str(datetime.datetime.fromtimestamp(int(time.time())).strftime('%Y-%m-%d %H:%M:%S'))
ser.write(b'1')
received = ser.readline()
dado1 = (str(received.decode("utf-8")))
print(dado1)
received = ser.readline()
dado2 = (str(received.decode("utf-8")))
print(dado2)
received = ser.readline()
dado3 = (str(received.decode("utf-8")))
print(dado3)
insere_sql2 = 'INSERT INTO atuadores(time, vent, ilum, irri) VALUES (%s,%s,%s,%s)'
dados1 = (date,dado1,dado2,dado3)
curs.execute (insere_sql2,dados1)
db.commit()
| true |
a329f7f769cb27912316bbaae4136fea3ec66c62 | Python | sdasguptajr/Python_basic | /DataTypesTest.py | UTF-8 | 403 | 3.34375 | 3 | [] | no_license | x=5j
x="Rahul"
x=True
x=frozenset({"One","Two","Three"})
x={"firstname":"Rahul","lastname":"Arora"}
print(type(x))
print(isinstance(x,int))
print(2**10)
x=-100067576567234234234424243242424
print(type(x))
import random
print(random.randrange(1,20))
print(10==5)
from math import pi
print(pi)
print(type(pi))
a=1.111111111111111111111118
print(a)
print(5//2) | true |
2f6025badab67e3dff709daa2349d1007f96bf00 | Python | karinakozarova/Learning-Python | /basics/power.py | UTF-8 | 95 | 3.171875 | 3 | [
"MIT"
] | permissive | a = int(input())
b = int(input())
c = int(input())
print(str(pow(a,b)))
print(str(pow(a,b,c))) | true |
c050ef4f2a4a4d42e339b6fb7a52ea46e658a529 | Python | Icohedron/EdgeGamers-Events | /lib/scoreboard.py | UTF-8 | 19,707 | 3.453125 | 3 | [] | no_license | """
Library for scoreboard teams and objectives
"""
from collections import OrderedDict
from lib.container import Container
from lib.consts import Colors
class Objective(Container):
"""
Represents a scoreboard objective
Attributes:
name (str):
criteria (str):
display_name (str):
remove_self (bool):
setdisplay_slots (str):
slots (list of (str, bool)): Holds tuples containing the slot, and whether to reset said slot
"""
class DisplaySlot:
"""
Scoreboard objective setdisplay slot
"""
# Defines all valid setdisplay slots
# List comprehension to get "sidebar.team.color" for all colors that isn't "reset"
valid_slots = ("belowName", "list", "sidebar", *("sidebar.team." + color for color in Colors.ALL if color != "reset"))
def __init__(self, value, reset=True):
"""
Args:
value (str): The specific slot that objective will be displayed on
reset (bool): If the objective in the specified slot resets when it terminates
"""
if value not in Objective.DisplaySlot.valid_slots:
raise SyntaxError("Invalid setdisplay slot: {}".format(value))
self.value = value
self.reset = reset
def __str__(self):
return self.value
def __repr__(self):
return "({value}, reset={reset})".format(value=self.value, reset=self.reset)
def __init__(self, name, criteria="_", display_name="", remove_self=True):
"""
Args:
name (str): objective name
criteria (str): objective criteria, defaults to "dummy"
Note that "_" is the same as "dummy"
display_name (str): objective display name, defaults to an empty string
remove_self (bool): if the objective is removed when calling Objective.cmd_term()
"""
super().__init__()
if len(name) > 16:
raise SyntaxError("The objective name '{}' cannot be larger than 16 characters".format(name))
if len(display_name) > 32:
raise SyntaxError("The objective display name '{}' cannot be larger than 16 characters".format(display_name))
self.name = name
if criteria == "_":
self.criteria = "dummy"
else:
self.criteria = criteria
self.display_name = display_name
self.remove_self = remove_self
self.slots = []
self.consts = {}
def setdisplay(self, *slots, reset_slot=True):
"""
Args:
*slots (str): Slots that objective will be displayed on
reset_slot (bool): If the objective in the specified slot resets when it terminates
"""
for slot in slots:
self.slots.append(Objective.DisplaySlot(slot, reset_slot))
def add_const(self, name, value):
"""
Adds a constant value to an objective
"""
if isinstance(value, int):
value = str(value)
elif isinstance(value, str) and not (value.isdigit() or (value.startswith("-") and value[1:].isdigit())):
raise ValueError("A constant '{0} = {1}' must be an integer".format(name, value))
self.consts[name] = value
def cmd_init(self):
"""
Adds all objectives using:
scoreboard objectives add
scoreboard objectives setdisplay
Adds all constants using:
scoreboard players set NAME OBJ_NAME VALUE
"""
cmd_add = ("scoreboard objectives add {name} {criteria} {disp_name}".format(
name=self.name, criteria=self.criteria, disp_name=self.display_name)).strip()
self.cmd_queue.put(cmd_add)
# If slots is not empty
for slot in self.slots:
self.cmd_queue.put("scoreboard objectives setdisplay {slot} {name}".format(slot=slot.value, name=self.name))
# adds regular constants
for name, value in sorted(self.consts.items()):
self.cmd_queue.put("scoreboard players set {name} {obj_name} {value}".format(
name=name, obj_name=self.name, value=value))
return self._cmd_output()
def cmd_term(self):
"""
Removes objectives and resets setdisplay slots using:
scoreboard objectives remove
scoreboard objectives setdisplay
"""
# Does not reset the setdisplay slot because removing
# the objective automatically resets the slot
if self.remove_self:
return "scoreboard objectives remove {}".format(self.name)
for slot in self.slots:
if slot.reset:
self.cmd_queue.put("scoreboard objectives setdisplay {}".format(slot.value))
return self._cmd_output()
def __str__(self):
return "Objective[{}]".format(self.name)
def __repr__(self):
return "Objective[name={name}, criteria={criteria}, display_name={disp_name}, display_slots={disp_slots}]".format(
name=repr(self.name), criteria=repr(self.criteria), disp_name=repr(self.display_name), disp_slots=self.slots)
class Objectives(Container):
"""
General container for holding Objective objects
"""
def __init__(self):
super().__init__()
self.objectives = OrderedDict()
def new(self, name, criteria="_", display_name="", remove_self=True):
"""
Default method to add a single objective (see Objective.__init__)
"""
objective = Objective(name, criteria, display_name, remove_self)
self.add(objective)
def new_str(self, text, initials=None, display=None, remove_self=True):
"""
Allows input of objectives from multi-line string (without tab spaces)
Note anything starting with "#" will be ignored
Args:
text: block of text
initials: initials that goes in front of all objectives
If the objective name is ".", it is completely replaced with the initials
display: display name that goes at the beginning of all objective display names
If the objective display name is ".", it is completely replaced
remove: Whether the objectives will be automatically removed or not
eg.
# a cool comment explaining what RRpl is
RRpl
RRas
RRconstants _ Oh man constants
some_const = 5
RRcs stat.useItem.minecraft.carrot_on_a_stick RR carrot stick
RRxd _ RR ecks dee
"""
# strips the lines to remove the newlines at the end and any other whitespace
# also only appends to list if the line is not empty, and doesn't start with #
lines = [line.strip() for line in text.splitlines() if line.strip() if not line.strip()[0] == "#"]
current_obj = None
for line in lines:
data = line.split(" ", 2)
# if the given line is setting a constant
if len(data) == 3 and data[1] == "=":
if current_obj is None:
raise SyntaxError("An objective must be defined before any constants can be set")
else:
name, value = data[0], data[2]
current_obj.add_const(name, value)
continue
# adds given initials
if initials is not None:
if data[0] == ".":
data[0] = initials
else:
data[0] = initials + data[0]
# adds given display name
if display is not None and len(data) == 3:
if data[2] == ".":
data[2] = display
else:
data[2] = display + " " + data[2]
objective = Objective(*data, remove_self=remove_self)
current_obj = objective
self.add(objective)
def add(self, *objectives):
"""
Adds any given number of Objective objects
Args:
*objectives (Objective): An arbitrary number of objective
"""
for objective in objectives:
if objective.name not in self.objectives:
self.objectives[objective.name] = objective
def merge(self, objectives):
"""
Gets and stores all objectives from a container
Args:
objectives (Objectives): An arbitrary objective container
"""
self.add(*objectives.get_objectives())
def get_names(self):
return list(self.objectives.items())
def get_objectives(self):
return list(self.objectives.values())
def cmd_init(self):
"""
Creates each objective
"""
for objective in self.get_objectives():
self.cmd_queue.put(objective.cmd_init())
return self._cmd_output()
def cmd_term(self):
"""
Removes each objective
"""
for objective in self.get_objectives():
self.cmd_queue.put(objective.cmd_term())
return self._cmd_output()
def __str__(self):
return "Objectives[{}]".format(str([str(objective) for objective in self.get_objectives()]))
def __repr__(self):
return "Objectives[{}]".format(str([repr(objective) for objective in self.get_objectives()]))
def __getitem__(self, name):
"""
Args:
key (str): name of the objective
"""
if name in self.objectives:
return self.objectives[name]
raise NameError("Objective name {} was not found in the container".format(name))
def __len__(self):
"""
Gets the number of Objective objects stored
"""
return len(self.objectives)
class Team(Container):
"""
Representation for a single scoreboard team
"""
# dictionary to store all valid values for options
valid_options = {
"friendlyfire": ("true", "false"),
"color": Colors.ALL,
"seeFriendlyInvisibles": ("true", "false"),
"nametagVisibility": ("always", "never", "hideForOtherTeams", "hideForOwnTeam"),
"deathMessageVisibility": ("always", "never", "hideForOtherTeams", "hideForOwnTeam"),
"collisionRule": ("always", "never", "pushOwnTeam", "pushOtherTeams"),
}
def __init__(self, name, display_name="", **options):
"""
General object to represent a scoreboard team
It is expected to have team options stated at the beginning and unchanging.
However, if they have to be changed, change the team.options dict
Args:
name (str): team name
display_name (str): team display name, defaults to empty string
**options: values to hold all team options
Raises:
SyntaxError: When the team name is greater than 16 characters, or
the option is an invalid option name or the option value is invalid
"""
super().__init__()
self.name = name
self.display_name = display_name
self.options = {}
if len(name) > 16:
raise SyntaxError("The team name '{}' cannot be larger than 16 characters".format(name))
if len(display_name) > 32:
raise SyntaxError("The team display name '{}' cannot be larger than 16 characters".format(display_name))
# checks whether the option is valid by seeing if the key is within the valid values dict
for option, value in options.items():
self.add_option(option, value)
def add_option(self, option, option_value):
"""
Adds a single option to the options dictionary
Args:
option (str): Team option
option_value (str): Team option value
Raises:
SyntaxError: When the option name is invalid or when
the option value is invalid
"""
if option not in Team.valid_options:
raise SyntaxError("Invalid team option name '{option}' for team '{name}'".format(option=option, name=self.name))
if option_value not in Team.valid_options[option]:
raise SyntaxError("Invalid option value for option '{option}' in team '{name}': '{op_value}'".format(
option=option, name=self.name, op_value=option_value))
self.options[option] = option_value
def out_config(self):
"""
Creates the following line if the team has a color:
(6 spaces) "Team_Name": "Color code"
Otherwise, returns an empty string
see team_color: https://hastebin.com/igojiqoleq
"""
if "color" in self.options:
color = self.options["color"]
color_code = Colors.CODE_DICT[color]
return ' "{0}": "{1}"'.format(self.name, color_code)
return ""
def cmd_init(self):
"""
Creates "scoreboard teams add" and "scoreboard teams option" commands
"""
team_cmd = ("scoreboard teams add {name} {disp_name}".format(name=self.name, disp_name=self.display_name)).strip()
self.cmd_queue.put(team_cmd)
# iterates through the options dictionary
for option, option_value in self.options.items():
option_cmd = "scoreboard teams option {name} {option} {value}".format(name=self.name, option=option, value=option_value)
self.cmd_queue.put(option_cmd)
return self._cmd_output()
def cmd_term(self):
"""
Creates the "scoreboard teams remove" command
"""
return "scoreboard teams remove {}".format(self.name)
def __str__(self):
return "Team[{}]".format(self.name)
def __repr__(self):
return "Team[name={name}, display_name={disp_name}, options={options}]".format(
name=repr(self.name), disp_name=repr(self.display_name), options=self.options)
class Teams(Container):
"""
General container for holding Team objects
"""
def __init__(self):
super().__init__()
self.teams = OrderedDict()
def new(self, name, display_name="", **options):
"""
Default method to add a single objective (see Team.__init__)
"""
team = Team(name, display_name, **options)
self.add(team)
def new_str(self, text, initials=None, display=None):
"""
Allows input of teams from multi-line string (without tab spaces)
Note anything starting with "#" will be ignored
Args:
text (str): the full docstring input for parsing teams
initials: initials that goes in front of all teams
If the team name is ".", it is completely replaced with the initials
display: display name that goes at the beginning of all team display names
If the team display name is ".", it is completely replaced
Raises:
SyntaxError: Before any option is set, a team must be defined.
This means a team name cannot the same as a option
Examples:
# a cool comment explaining what RRg is
RRg RR green
color green
nametagVisibility hideForOtherTeams
friendlyfire false
collisionRule pushOwnTeam
RRb RR Blue
color blue
nametagVisibility hideForOtherTeams
friendlyfire false
collisionRule pushOwnTeam
"""
# strips the lines to remove the newlines at the end and any other whitespace
# also only appends to list if the line is not empty, and doesn't start with #
lines = [line.strip() for line in text.splitlines() if line.strip() if not line.strip()[0] == "#"]
# holds the current team
current_team = None
for line in lines:
# splits a maximum of one time, making a list of 2 strings
# note that the lines are stripped in case of any leading whitespace
data = line.split(" ", 1)
# if the length of the data is 2, and the first element is a team
# option, then it is added to the current team option
if len(data) == 2 and data[0] in Team.valid_options:
if current_team is None:
raise SyntaxError("A team must be defined before any options can be set")
else:
current_team.add_option(*data)
continue
# otherwise, the data will be set as a team
# gets initials and display name
if initials is not None:
if data[0] == ".":
data[0] = initials
else:
data[0] = initials + data[0]
if display is not None and len(data) == 2:
if data[1] == ".":
data[1] = display
else:
data[1] = display + " " + data[1]
current_team = Team(*data)
self.add(current_team)
def add(self, *teams):
"""
Adds any given number of Team objects
Args:
*teams (Team): An arbitrary amount of Team objects
"""
for team in teams:
if team.name not in self.teams:
self.teams[team.name] = team
def merge(self, teams):
"""
Gets and stores all objectives from a container
Args:
objectives (Objectives): An arbitrary objective container
"""
self.add(*teams.get_teams())
def get_names(self):
return list(self.teams.items())
def get_teams(self):
return list(self.teams.values())
def cmd_init(self):
"""
Creates each team and output file
"""
output_lines = []
for team in self.teams.values():
self.cmd_queue.put(team.cmd_init())
line = team.out_config()
if line:
output_lines.append(line)
with open("out_teams.txt", "w") as file:
for line in output_lines:
file.write(line + "\n")
return self._cmd_output()
def cmd_term(self):
"""
Removes each team
"""
for team in self.teams.values():
self.cmd_queue.put(team.cmd_term())
return self._cmd_output()
def __getitem__(self, name):
"""
Args:
key (str): name of the objective
"""
if name in self.teams:
return self.teams[name]
raise NameError("Team name {} was not found in the container".format(name))
def __str__(self):
return "Teams[{}]".format(str([str(team) for team in self.teams]))
def __repr__(self):
return "Teams[{}]".format(str([repr(team) for team in self.teams]))
def __len__(self):
"""
Gets the number of Team objects stored
"""
return len(self.teams)
OBJECTIVES = Objectives()
TEAMS = Teams()
| true |
a6e14c3023da88951432c489750d7143646381db | Python | matt-kubica/vulnerable-forum | /vulnerable/server/src/database.py | UTF-8 | 3,334 | 2.640625 | 3 | [] | no_license | import psycopg2
import logging, os
from .models import User, Question, Answer
logging.basicConfig(level=logging.DEBUG)
connection_params = {
'database': os.environ.get('POSTGRES_DB') or 'default',
'user': os.environ.get('POSTGRES_USER') or 'admin',
'password': os.environ.get('POSTGRES_PASSWORD') or 'admin',
'host': 'db',
'port': '5432',
}
connection = psycopg2.connect(**connection_params)
logging.debug('Successfully connected to database')
def add_user(email, username, password):
cur = connection.cursor()
cur.execute("INSERT INTO users (email, username, password) VALUES ('{0}', '{1}', '{2}');"
.format(email, username, password))
connection.commit()
def add_question(user_id, question_text):
cur = connection.cursor()
cur.execute("INSERT INTO questions (user_id, question_text) VALUES ('{0}', '{1}');"
.format(user_id, question_text))
connection.commit()
def add_answer(question_id, user_id, answer_text):
cur = connection.cursor()
cur.execute("INSERT INTO answers (question_id, user_id, answer_text) VALUES ('{0}', '{1}', '{2}');"
.format(question_id, user_id, answer_text))
connection.commit()
def get_user(**kwargs):
cur = connection.cursor()
if 'email' in kwargs and 'username' in kwargs:
cur.execute("SELECT * FROM users WHERE email='{0}' AND username='{1}';"
.format(kwargs['email'], kwargs['username']))
res = cur.fetchall()
if res:
return User(res[0][0], res[0][1], res[0][2], res[0][3])
return None
if 'username' in kwargs:
cur.execute("SELECT * FROM users WHERE username='{0}';".format(kwargs['username']))
res = cur.fetchall()
if res:
return User(res[0][0], res[0][1], res[0][2], res[0][3])
return None
def get_userID(**kwargs):
cur = connection.cursor()
if 'username' in kwargs:
cur.execute("SELECT id FROM users WHERE username='{0}';".format(kwargs['username']))
res = cur.fetchall()
return res[0][0] if len(res) > 0 else None
return None
def get_questionID(**kwargs):
cur = connection.cursor()
if 'question' in kwargs:
cur.execute("SELECT id FROM questions WHERE question_text='{0}';".format(kwargs['question']))
res = cur.fetchall()
return res[0][0] if len(res) > 0 else None
else:
return
def get_question(**kwargs):
cur = connection.cursor()
if 'id' in kwargs:
cur.execute("SELECT * FROM questions WHERE id='{0}';".format(kwargs['id']))
res = cur.fetchall()
if res:
return Question(res[0][0], res[0][1], res[0][2])
return None
def get_questions(**kwargs):
cur = connection.cursor()
cur.execute("SELECT * FROM questions;")
res = cur.fetchall()
if res:
return [Question(res[i][0], res[i][1], res[i][2]) for i in range(0, len(res))]
return []
def get_answers(**kwargs):
cur = connection.cursor()
if 'question_id' in kwargs:
cur.execute("SELECT * FROM answers WHERE question_id='{0}';".format(kwargs['question_id']))
res = cur.fetchall()
if res:
return [Answer(res[i][0], res[i][1], res[i][2], res[i][3]) for i in range(0, len(res))]
return [] | true |
040866bb7d36c1a8056cd9f9946c7cc30d390248 | Python | schirrecker/Geography-Quizz | /Geography v5.py | UTF-8 | 19,543 | 3.15625 | 3 | [] | no_license | import xlrd
import openpyxl
import random
import datetime
import os, os.path
import pickle
# -----------------------------------------------------
# openpyxl syntax:
# wb = openpyxl.Workbook()
# grab the active worksheet: ws = wb.active
# Data can be assigned directly to cells: ws['A1'] = 42
# Rows can also be appended: ws.append([1, 2, 3])
# Python types will automatically be converted
# import datetime
# ws['A2'] = datetime.datetime.now()
# Save the file - wb.save("sample.xlsx")
#-----------------------------------------------------
# ----------------------------------------------------
# xlrd syntax:
# workbook = xlrd.open_workbook("FILE.xlsx")
# worksheet = workbook.sheet_by_name("<NAME OF SHEET>")
# worksheet.cell(row, col).value)
# worksheet.row_values(row)
# worksheet.nrows
# worksheet.ncols
# Row = Country, Country Code, Continent, Capital,
# Population, Area, Coastline, Government, Currency
# ----------------------------------------------------
# ----------------------------------------------------
# Class definitions
# ----------------------------------------------------
class Player:
def __init__(self, name, password="password"):
self.name = name
self.password = password
self.points = 0
self.level = 1
self.knowledge = {}
self.CountriesToTest = []
self.InitKnowledge()
# self.knowlege = {"country": [-1, -1, -1, -1, -1, -1]}
# 0 = not asked yet
# 1 = correct
# -1 = wrong
def InitKnowledge(self):
for country in Countries:
self.knowledge[country.name] = [-1, -1, -1, -1, -1, -1]
def SaveKnowledgeData(self):
workbook = openpyxl.Workbook()
worksheet = workbook.active
worksheet.append(["Name", "Continent", "Capital", "Population", "Area", "Coastline", "Currency", "Ratio"])
for country in Countries:
row = [country.name]
Nb_facts = 0
if self.knowledge[country.name][0] == 1:
row.append(country.continent)
Nb_facts += 1
else:
row.append("still learning")
if self.knowledge[country.name][1] == 1:
row.append(country.capital)
Nb_facts += 1
else:
row.append("still learning")
if self.knowledge[country.name][2] == 1:
row.append(int(country.population))
Nb_facts += 1
else:
row.append("still learning")
if self.knowledge[country.name][3] == 1:
row.append(int(country.area))
Nb_facts += 1
else:
row.append("still learning")
if self.knowledge[country.name][4] == 1:
row.append(int(country.coastline))
Nb_facts += 1
else:
row.append("still learning")
if self.knowledge[country.name][5] == 1:
row.append(country.currency)
Nb_facts += 1
else:
row.append("still learning")
if Nb_facts > 0:
row.append(str(int(100*Nb_facts/6))+"%")
worksheet.append(row)
workbook.save(self.name + ".xls")
print("Your current knowledge was saved in file " + self.name + '.xls')
def GetKnowledgeData(self):
pass
def PrintKnowledge(self):
for country in Countries:
if self.knowledge[country.name][0] == 1:
print (country.name + " - continent: " + country.continent)
if self.knowledge[country.name][1] == 1:
print (country.name + " - capital: " + country.capital)
if self.knowledge[country.name][2] == 1:
print (country.name + " - population: " + str(int(country.population)))
if self.knowledge[country.name][3] == 1:
print (country.name + " - area: " + str(int(country.area)))
if self.knowledge[country.name][4] == 1:
print (country.name + " - coastline: " + str(int(country.coastline)))
if self.knowledge[country.name][5] == 1:
print (country.name + " - currency: " + country.currency)
def PrintKnowledgeStats(self):
capitals, continents, populations, areas, coastlines, currencies = 0, 0, 0, 0, 0, 0
ratio = [0, 0, 0, 0, 0, 0, 0]
for country in Countries:
i = 0
if self.knowledge[country.name][0] == 1:
continents += 1
i += 1
if self.knowledge[country.name][1] == 1:
capitals += 1
i += 1
if self.knowledge[country.name][2] == 1:
populations += 1
i += 1
if self.knowledge[country.name][3] == 1:
areas += 1
i += 1
if self.knowledge[country.name][4] == 1:
coastlines += 1
i += 1
if self.knowledge[country.name][5] == 1:
currencies += 1
i += 1
ratio [i] += 1
print()
print ("Here is the report card for " + self.name + ":")
print ("----------------------------" + "-" * len(self.name))
print("You have " + str(self.points) + " points")
print()
print ("Capitals: " + str(capitals))
print ("Continents: " + str(continents))
print ("Population sizes: " + str(populations))
print ("Areas: " + str(areas))
print ("Coastlines: " + str(coastlines))
print ("Currencies: " + str(currencies))
print()
for i in range(7):
print ("Number of country with " + str(i) + " correct answers: " + str(ratio[i]))
print()
# test only countries that player doesn't know
# 0: continent, 1: capital, 2: pop, 3: area, 4: coastline, 5: currency
def UpdateCountriesToTest(self, level, continents):
self.CountriesToTest = []
for country in CountriesByLevel[str(level)]:
if self.knowledge[country.name][0] != 1 and self.knowledge[country.name][1] != 1:
if country.continent in continents:
self.CountriesToTest.append(country)
def QuizRound(self, Nb_Questions, level, continents):
self.UpdateCountriesToTest(level, continents)
for i in range(Nb_Questions):
country = random.choice(self.CountriesToTest)
print ("")
self.Quiz(country)
print ("")
print ("You have " + str(self.points) + " points")
def Quiz(self, country):
# Continent
# ---------
validInput = False
while not validInput:
try:
txt = "In which continent is the country of " + country.name + " ? " + os.linesep
txt = txt + "Continents are: " + " ,".join(Continents) + os.linesep
answer = input(txt)
except:
print("Entry error, please try again")
else:
if answer in Continents:
validInput = True
else:
print ("please enter from the list of continents")
if country.CheckContinent(answer):
print ("This is correct")
self.points += 1
self.knowledge[country.name][0] = 1
else:
print ("This is wrong")
self.knowledge[country.name][0] = 0
print (" The answer was: ", country.continent)
# Capital
# -------
validInput = False
while not validInput:
try:
answer = input("What is the capital of " + country.name + " ? ")
except:
print("Entry error, please try again")
else:
if len(answer) > 1 and len(answer) != answer.count(" "):
validInput = True
else:
print ("print enter a valid city")
if country.CheckCapital(answer):
print ("This is correct")
self.points += 1
self.knowledge[country.name][1] = 1
else:
print ("This is wrong")
self.knowledge[country.name][1] = 0
print ("Capital: ", country.capital)
# Population
# ----------
validInput = False
while not validInput:
try:
answer = int(input("What is the population of " + country.name + " (in millions ?) "))*1000000
except:
print("Entry error, please enter a valid number")
else:
if answer > 0:
validInput = True
else:
print("Please enter a valid number")
if country.CheckPopulation(answer):
print ("This is correct")
self.points += 1
self.knowledge[country.name][2] = 1
else:
print ("This is wrong")
self.knowledge[country.name][2] = 0
print ("Population: ", int(country.population))
# Area
# --------------
validInput = False
while not validInput:
try:
answer = input("What is the area of " + country.name + " ? ")
except:
print("Entry error, please try again")
else:
if len(answer) != answer.count(" "):
answer = int(answer)
validInput = True
else:
print("Please enter a valid number")
if country.CheckArea(answer):
print ("This is correct")
self.points += 1
self.knowledge[country.name][3] = 1
else:
print ("This is wrong")
self.knowledge[country.name][3] = 0
print ("Area: ", int(country.area))
# Coastline
# ---------
validInput = False
while not validInput:
try:
answer = input("Does " + country.name + " have a coastline ? (y or n)")
except:
print("Entry error, please try again")
else:
if answer.lower() in ["yes", "no", "y", "n"]:
validInput = True
else:
print("Please enter a valid response")
if country.CheckCoastline(answer):
print ("This is correct")
self.points += 1
self.knowledge[country.name][4] = 1
else:
print ("This is wrong")
self.knowledge[country.name][4] = 0
print ("Coastline: ", int(country.coastline))
# Currency
# ---------
validInput = False
while not validInput:
try:
answer = input("What is the currency of " + country.name + " ? ")
except:
print("Entry error, please try again")
else:
if len(answer) != answer.count(" "):
validInput = True
else:
print("Please enter a valid response")
if country.CheckCurrency(answer):
print ("This is correct")
self.points += 1
self.knowledge[country.name][5] = 1
else:
print ("This is wrong")
self.knowledge[country.name][5] = 0
print ("Currency: ", country.currency)
class Country:
def __init__(self, name, continent, capital, population,
area, coastline,currency, level):
self.name = name
self.continent = continent
self.continent_margin = .9
self.capital = capital
self.capital_margin = .8
self.population = population
self.population_margin = 0.3
self.area = area
self.area_margin = 0.5
self.coastline = coastline
self.coastine_margin = .7
self.currency = currency
self.currency_margin = .7
self.level = int(level)
def __repr__(self):
txt = self.name + os.linesep
txt = txt + "Continent: " + self.continent + os.linesep
txt = txt + "Capital: " + self.capital + os.linesep
txt = txt + "Population: " + str(int(self.population/1000000)) + "M" + os.linesep
txt = txt + "Area: " + str(self.area) + os.linesep
txt = txt + "Coastline: " + str(self.coastline) + os.linesep
txt = txt + "Currency: " + self.currency + os.linesep
txt = txt + "Difficulty: " + str(self.level) + os.linesep
return txt
def CheckCapital(self, capital):
return stringsMatch(self.capital, capital, self.capital_margin)
def CheckContinent(self, continent):
return stringsMatch(self.continent, continent, self.continent_margin)
def CheckCurrency(self, currency):
return stringsMatch(self.currency, currency, self.currency_margin)
def CheckPopulation(self, population):
return isclose(self.population, population, self.population_margin)
def CheckArea(self, area):
if self.area < 1000:
return area < 1000
elif self.area < 10000:
return isclose(self.area, area, 5)
elif self.area < 100000:
return isclose(self.area, area, 3)
elif self.area < 500000:
return isclose(self.area, area, .7)
else:
return isclose(self.area, area, self.area_margin)
def CheckCoastline(self, coastline):
coastline = coastline.lower()[0] # first letter y or n
if (self.coastline == 0 and coastline == 'n') or (self.coastline != 0 and coastline == 'y'):
return True
else:
return False
# ---------------------------------------------------
# Useful functions
# ---------------------------------------------------
def isclose(a, b, margin):
if a * (1 - margin) < b < a * (1 + margin):
return True
else:
return False
def stringsMatch(a, b, margin):
x = a.lower()
y = b.lower()
hits = 0
if x == y:
return True
else:
for i in range(0, min(len(x), len(y))):
if x[i] == y[i]:
hits += 1
# print ("hits: ", str(hits))
# print ("margin * max: ", str(margin * max(len(x), len(y))))
# l= input ("continue")
if hits >= margin * max(len(x), len(y)):
return True
else:
return False
def approx(a, n):
return int(a/(10**n)) * (10**n)
# -------------------------------------------------
# Load country data from file
# -------------------------------------------------
def LoadCountryData():
Continents = []
Countries = []
CountriesByLevel = {"1": [], "2": [], "3": [], "4": [], "5": []}
for row in range(1, worksheet.nrows):
country = worksheet.row_values(row)
Countries.append(Country(
country[0], # name
country[1], # continent
country[2], # capital
approx(country[3], 3), # population
country[4], # area
country[5], # coastline
country[6], # currency
country[8])) # difficulty level
# create levels: dictionary = {"level" : [list of countries]}
# create continents: list of continents
for country in Countries:
CountriesByLevel[str(country.level)].append(country)
Continents.append(country.continent)
Continents = list(set(Continents))
return Countries, CountriesByLevel, Continents
# -------------------------------------------------
# Save player data to file
# -------------------------------------------------
def SavePlayerData():
global Players
with open('PlayerData', 'wb') as PlayerFile:
pickle.dump(Players, PlayerFile)
# -------------------------------------------------
# Load player data from file
# -------------------------------------------------
def LoadPlayerData():
Players = []
if os.path.isfile('PlayerData'):
with open('PlayerData', 'rb') as PlayerFile:
Players = pickle.load(PlayerFile)
print ("Player data loaded.")
else:
print("No existing player data. This is the first game.")
return Players
# ----------------------------------------------------
# Load Excel file and its worksheets
# ----------------------------------------------------
workbook = xlrd.open_workbook("Countries.xlsx")
worksheet = workbook.sheet_by_name("Facts")
Nb_Countries = worksheet.nrows
Nb_Facts = worksheet.ncols - 1
# -------------------------------------------------
# Load data
# -------------------------------------------------
Countries, CountriesByLevel, Continents = LoadCountryData()
Players = LoadPlayerData()
NB_QUESTIONS = 1
MAXLEVEL = 5
MINLEVEL = 1
print (str(worksheet.nrows) + " countries loaded.")
txt = "Existing players: "
for player in Players:
txt = txt + player.name + " "
print (txt)
def CheckPlayer(name, Players):
exist = False
for p in Players:
if name.lower() == p.name.lower():
print ("Welcome back, " + name + os.linesep)
exist = True
return p
if not exist: # if it doesn't exist, create it
answer = input("Would you like to create this user? (y/n) ")
if answer[0].lower() == "y":
p = Player(name)
Players.append(p)
return p
# -------------------------------------------------
# Start game loop
# -------------------------------------------------
name = str(input("Enter player name: "))
player = CheckPlayer(name, Players)
player.SaveKnowledgeData()
print ("Your are at level " + str(player.level))
print ("You currently have " + str(player.points) + " points")
quit = False
while quit == False:
try:
play = input("Would you like to play ? (y/n)")
except:
print ("Input error, try again")
else:
if play.lower() == "n":
quit = True
else:
level = ""
while level not in range(MINLEVEL, MAXLEVEL+1):
level = int(input("what level? (1 to 5) "))
test_continents = [c for c in Continents]
test_continents.append("All")
continent = ""
while continent not in test_continents:
continent = input("What continent? " + str(test_continents) + " ")
if continent == "All":
player.QuizRound(NB_QUESTIONS, int(level), Continents)
else:
player.QuizRound(NB_QUESTIONS, int(level), continent)
# -----------------------
# Quit and Save Data
# -----------------------
player.PrintKnowledgeStats()
player.SaveKnowledgeData()
| true |
4c7fb7a3af93022a6a80e7850679674a52b63b2f | Python | ionvision/frnn | /analysis/build_gifs.py | UTF-8 | 5,215 | 2.59375 | 3 | [] | no_license | import imageio
import glob
import scipy.ndimage as ndim
import scipy.misc as sm
import numpy as np
# Prepare method strings
PATH_STRINGS = '/home/moliu/Documents/Papers/Supplementary/titles/'
s_titles = [
ndim.imread(PATH_STRINGS + 'frnn.png'),
ndim.imread(PATH_STRINGS + 'rladder.png'),
ndim.imread(PATH_STRINGS + 'prednet.png'),
ndim.imread(PATH_STRINGS + 'srivastava.png'),
ndim.imread(PATH_STRINGS + 'mathieu.png'),
ndim.imread(PATH_STRINGS + 'villegas.png'),
]
def generate_captions(strings, width):
titles = [255 * np.ones((20, width+15, 3), dtype=np.uint8) for _ in strings]
# Prepare strings
strings = [(np.stack([s, s, s], axis=2) if len(s.shape) == 2 else s) for s in strings]
for i, s in enumerate(strings):
s = sm.imresize(s, size=0.8)
t_pad, l_pad = (20 - s.shape[0]) / 2, (width - s.shape[1]) / 2
titles[i][t_pad:t_pad+s.shape[0], l_pad:l_pad+s.shape[1]] = s
return np.pad(
np.concatenate(titles, axis=1)[:, :-15],
((0, 0), (width+15, 0), (0, 0)),
'constant', constant_values=255
)
def preprocess_predictions(gt, predictions, width=80):
# Append dimension if no channels
gt = [(f if len(f.shape) == 3 else np.expand_dims(f, axis=2)) for f in gt]
predictions = [[(f if len(f.shape) == 3 else np.expand_dims(f, axis=2)) for f in s] for s in predictions]
# Replicate channel if grayscale
gt = [(f if f.shape[2] == 3 else np.concatenate([f, f, f], axis=2)) for f in gt]
predictions = [[(f if f.shape[2] == 3 else np.concatenate([f, f, f], axis=2)) for f in s] for s in predictions]
# Reshape predictions to gt shape, prepare black frames, fill leading frames
predictions = [[sm.imresize(f, gt[0].shape[:2]) for f in s] for s in predictions]
predictions = [([np.zeros(gt[0].shape, dtype=np.uint8)] * 10 if len(s) == 0 else s) for s in predictions]
predictions = [[gt[4]] * 5 + s for s in predictions]
# Pad frames to fit expected width
padding = ((0, 0), ((width - gt[0].shape[1]) / 2,)*2, (0, 0))
gt = [np.pad(f, padding, 'constant', constant_values=255) for f in gt]
predictions = [[np.pad(f, padding, 'constant', constant_values=255) for f in s] for s in predictions]
return gt, predictions
def generate_instance_sequence(path):
# List ground truth images
f_gt = sorted(glob.glob(path + 'g*.png'))
f_gt = f_gt[-5:] + f_gt[:10]
# List prediction images
f_methods = [
sorted(glob.glob(path + 'frnn_*.png')), sorted(glob.glob(path + 'rladder_*.png')),
sorted(glob.glob(path + 'prednet_*.png')), sorted(glob.glob(path + 'srivastava_*.png')),
sorted(glob.glob(path + 'mathieu_*.png')), sorted(glob.glob(path + 'villegas_*.png'))
]
# Read & preprocess frames
f_gt, f_methods = [ndim.imread(f) for f in f_gt], [[ndim.imread(f) for f in m] for m in f_methods]
f_gt, f_methods = preprocess_predictions(f_gt, f_methods)
im_h, im_w = f_gt[0].shape[:2]
# Fill frames with ground truth & predictions
frame = 255 * np.ones((im_h, im_w*7 + 15*6, 3), dtype=np.uint8)
frames = [np.copy(frame) for _ in range(15)]
for i, (fg, fm) in enumerate(zip(f_gt, zip(*f_methods))):
frames[i][:im_h, :im_w] = fg
for j, (f, title) in enumerate(zip(fm, ['frnn', 'rladder', 'prednet', 'Srivastava', 'mathieu', 'villegas'])):
r = (j + 1) * (im_w + 15)
frames[i][:im_h, r:r+im_w] = f
# Return sequence frames
return frames
def build_dataset(name, paths):
titles = generate_captions(s_titles, 80)
instances = [generate_instance_sequence(p) for p in paths]
s_h, s_w = instances[0][0].shape[:2]
# Merge sequences
frame = 255 * np.ones((len(paths) * (s_h + 15) - 15, s_w, 3), dtype=np.float32)
frames = [np.copy(frame) for _ in range(15)]
for i, f in enumerate(zip(*instances)):
for j, m in enumerate(f):
t = j*(s_h+15)
frames[i][t:t+s_h] = m
imageio.mimsave(name, [np.concatenate((titles, f), axis=0) for f in frames], duration=0.5)
if __name__ == '__main__':
PATH_IN = '/home/moliu/Documents/Papers/Supplementary/images/qualitative/'
PATH_OUT = '/home/moliu/Documents/Papers/Supplementary/gifs/'
build_dataset(PATH_OUT + 'mmnist.gif', [
PATH_IN + 'mmnist_l1/s12/', PATH_IN + 'mmnist_l1/s11/', PATH_IN + 'mmnist_l1/s13/',
PATH_IN + 'mmnist_l1/s17/', PATH_IN + 'mmnist_l1/s20/', PATH_IN + 'mmnist_l1/s21/',
PATH_IN + 'mmnist_l1/s11_n/', PATH_IN + 'mmnist_l1/s5_n/',
])
build_dataset(PATH_OUT + 'kth.gif', [
PATH_IN + 'kth_l1/s31/', PATH_IN + 'kth_l1/s37/', PATH_IN + 'kth_l1/s77/',
PATH_IN + 'kth_l1/s23/', PATH_IN + 'kth_l1/s43/', PATH_IN + 'kth_l1/s75/',
PATH_IN + 'kth_l1/s97/', PATH_IN + 'kth_l1/s37_2/',
])
build_dataset(PATH_OUT + 'ucf101.gif', [
PATH_IN + 'ucf101_l1/s8/', PATH_IN + 'ucf101_l1/s9_last/', PATH_IN + 'ucf101_l1/s9_mean/',
PATH_IN + 'ucf101_l1/s21/', PATH_IN + 'ucf101_l1/s37/', PATH_IN + 'ucf101_l1/s44/',
PATH_IN + 'ucf101_l1/s28/', PATH_IN + 'ucf101_l1/s41/',
])
| true |
4e1a500b76fe761deab4b8d5c6e301a2e331117b | Python | arjunbrara123/Day-17-quiz-game-start | /question_model.py | UTF-8 | 524 | 3.296875 | 3 | [] | no_license | import random
class Question:
def __init__(self, q_text, answer, choices):
self.text = q_text
self.answer = answer
if type(choices) == 'str':
self.choices[0] = choices
self.choices.append(answer)
random.shuffle(self.choices)
if choices[0] == 'True' or choices[0] == 'False':
self.choices = ['True', 'False']
else:
self.choices = choices
self.choices.append(answer)
random.shuffle(self.choices)
| true |
9aac233abc2aa75fd77a934b49722f065e1e8a8b | Python | zhoujf2010/MyMachineLearning | /ch8_svm/step2.py | UTF-8 | 5,260 | 2.75 | 3 | [] | no_license | # -*- coding:utf-8 -*-
'''
Created on 2017年5月30日
@author: Jeffrey Zhou
'''
'''
SVM对鸢尾花数据分类
自建SVM
'''
import pandas as pd
from sklearn import svm
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
def selectJrand(i, m):
j = i
while(j == i):
j = int(np.random.uniform(0, m))
return j
def clipAlpha(aj, H, L):
if aj > H:
aj = H
if L > aj:
aj = L
return aj
def smoSimple(dataMatIn, classLabels, C, toler, maxIter):
x = np.mat(dataMatIn);
y = np.mat(classLabels).transpose()
b = 0
m, n = np.shape(x)
alphas = np.mat(np.zeros((m, 1)))
niter = 0
while(niter < maxIter):
alphaPairsChanges = 0
for i in range(m):
fXi = float(np.multiply(alphas, y).T * (x * x[i, :].T)) + b
Ei = fXi - float(y[i])
if((y[i] * Ei < -toler) and (alphas[i] < C)) or ((y[i] * Ei > toler) and (alphas[i] > 0)):
j = selectJrand(i, m) #随机选出第二个α值
fXj = float(np.multiply(alphas, y).T * (x * x[j, :].T)) + b
Ej = fXj - float(y[j])
alphaIold = alphas[i].copy();
alphaJold = alphas[j].copy();
#确保α值在0和C之间
if (y[i] != y[j]):
L = max(0, alphas[j] - alphas[i])
H = min(C, C + alphas[j] - alphas[i])
else:
L = max(0, alphas[j] + alphas[i] - C)
H = min(C, alphas[j] + alphas[i])
if L == H:
#print ('L==H');
continue
eta = 2.0 * x[i, :] * x[j, :].T - x[i, :] * x[i, :].T - x[j, :] * x[j, :].T
if eta >= 0:
#print ('eta >=0');
continue
#修改第i个α的同时相反方便修改第j个α值
alphas[j] -= y[j] * (Ei - Ej) / eta
alphas[j] = clipAlpha(alphas[j], H, L)
if (abs(alphas[j] - alphaJold) < 0.00001):
#print ('j not movint enought');
continue
alphas[i] += y[j] * y[i] * (alphaJold - alphas[j])
b1 = b - Ei - y[i] * (alphas[i] - alphaIold) * x[i, :] * x[i, :].T - y[j] * (alphas[j] - alphaJold) * x[i, :] * x[j, :].T
b2 = b - Ej - y[i] * (alphas[i] - alphaIold) * x[i, :] * x[j, :].T - y[j] * (alphas[j] - alphaJold) * x[j, :] * x[j, :].T
if (0 < alphas[i]) and (C > alphas[i]): b = b1
elif (0 < alphas[j]) and (C > alphas[j]): b = b2
else : b = (b1 + b2) / 2.0
alphaPairsChanges += 1
#print ("iter :%d i:%d,paris changed %d" % (niter, i, alphaPairsChanges))
if (alphaPairsChanges == 0):niter += 1
else :niter = 0
#print ("iteration number:%d" % niter)
return b, alphas
def loadDataSet(fileName):
dataMat = [];labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr = line.strip().split('\t')
dataMat.append([float(lineArr[0]), float(lineArr[1])])
labelMat.append(float(lineArr[2]))
return dataMat, labelMat
def predict(alphas,b,y,x,dt):
fXi = float(np.multiply(alphas, y).T * (x * dt.T)) + b
return fXi
if __name__ == '__main__':
path = '..\\ch2_classification\\iris.data' # 数据文件路径
data = pd.read_csv(path, header=None)
x, y = data[[0,1,2,3]], data[4]
y = pd.Categorical(y).codes
x = x[[0, 1]] #取花萼长度,花萼宽度 两个属性
#为了方便,将数据清先成2类,并把y变成-1和1
y1 = y.copy()
y1[y==0] = -1
y1[y==1] = 1
y1[y==2] = 1
y = y1
#x, y = loadDataSet('testSet.txt')
#建模
mode = svm.SVC(C=0.6,kernel='linear', decision_function_shape='ovr')
mode.fit(x,y)
print(mode.dual_coef_, mode.intercept_)
# test = mode.predict(x[0,:])
# print(test)
# print "iter :%d i:%d,paris changed %d" % (1, 2, 3)
#
#
b, alphas = smoSimple(x, y, 0.6, 0.001, 40)
print (b, alphas[alphas > 0])
#
# #计算出支持向量
# shape(alphas[alphas > 0])
# for i in range(100):
# if alphas[i] > 0:
# print dataMat[i], labelMat[i]
# # 画图
# x1_min, x2_min = x.min()
# x1_max, x2_max = x.max()
# x1, x2 = np.mgrid[x1_min:x1_max:500j, x2_min:x2_max:500j] # 生成网格采样点
# grid_test = np.stack((x1.flat, x2.flat), axis=1) # 测试点
# grid_hat = mode.predict(grid_test) # 预测分类值
# grid_hat = grid_hat.reshape(x1.shape) # 使之与输入的形状相同
#
# cm_light = mpl.colors.ListedColormap(['#A0FFA0', '#FFA0A0', '#A0A0FF'])
# cm_dark = mpl.colors.ListedColormap(['g', 'r', 'b'])
# plt.figure(facecolor='w')
# plt.pcolormesh(x1, x2, grid_hat, cmap=cm_light)
# plt.scatter(x[0], x[1], c=y, edgecolors='k', s=50, cmap=cm_dark) # 样本
# plt.xlim(x1_min, x1_max)
# plt.ylim(x2_min, x2_max)
# plt.grid(b=True, ls=':')
plt.show()
| true |
9fbabc6a1649ca02c9be32758b9a1f8739c23633 | Python | oadams/inflection-kws | /uam/asr1/local/prepare_universal_lexicon.py | UTF-8 | 3,690 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright 2017 Johns Hopkins University (Author: Matthew Wiesner)
# Apache 2.0
###############################################################################
#
# This script takes a kaldi formatted lexicon prepared by
#
# local/prepare_lexicon.pl (i.e. a lexicon that uses the X-SAMPA phoneset)
#
# and makes language specific modifications to further standardize the
# lexicons across languages. These modifications are based on language speficic
# diphthong and tone files that contain a mapping from diphthongs to other
# X-SAMPA phonemes, and from the Tone markers to a standardized tone marking
# (see universal_phone_maps/tones/README.txt for more info about tone).
#
# This script returns the resulting standardized lexicon.
#
###############################################################################
from __future__ import print_function
import argparse
import codecs
import os
def parse_input():
parser = argparse.ArgumentParser()
parser.add_argument("olexicon", help="Output kaldi format lexicon.")
parser.add_argument("lexicon", help="Kaldi format lexicon")
parser.add_argument("map", help="File with map")
return parser.parse_args()
def main():
args = parse_input()
# load map
dp_map = {}
tone_map = {}
try:
with codecs.open(args.map, "r", encoding="utf-8") as f:
for l in f:
phone, split_phones = l.split(None, 1)
if phone.startswith("_"):
tag = phone.replace("_", "")
tone_map[tag] = split_phones.replace("_", "").split()
else:
phone, split_phones = l.split(None, 1)
dp_map[phone] = split_phones.split()
except IOError:
print("The provided map does not exist. We proceed assuming all "
"phonemes are already in the standardized form ...")
# Process lexicon
lexicon_out = []
with codecs.open(args.lexicon, "r", encoding="utf-8") as f:
# Read in each line storing the word, and pronunciation. Split each
# pronunciation into its consituent phonemes. For each of these
# phonemes, recover any tags ("x_TAG"), and replace them if needed
# with a new tag symbol(s) if required by the tone mapping.
for l in f:
word, pron = l.split(None, 1)
new_pron = ""
for p in pron.split():
try:
p, tags = p.split("_", 1)
# Process tags
tags = tags.split("_")
new_tags = []
for t in tags:
try:
new_tags += tone_map[t]
except KeyError:
new_tags += t
except ValueError:
new_tags = []
# Process diphthongs
try:
new_phones = dp_map[p]
except KeyError:
new_phones = [p]
# Join tags and phones
for nph in new_phones:
new_pron += "_".join([nph] + new_tags) + " "
lexicon_out.append((word, new_pron))
# Write new lexicon. Check output path and create any necessary
# intermediate directories
#if (not os.path.exists(os.path.dirname(args.olexicon))):
# os.makedirs(os.path.dirname(args.olexicon))
with codecs.open(args.olexicon, "w", encoding="utf-8") as f:
for w, new_pron in lexicon_out:
print(u"{}\t{}".format(w, new_pron.strip()), file=f)
if __name__ == "__main__":
main()
| true |
e6b67933e4defc9ea3853f124a3a488d9f56c404 | Python | beiluo-horizon/Machine-Learning-Model | /RandomTree_Method.py | UTF-8 | 6,935 | 2.6875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 27 15:30:09 2019
@author: 81479
"""
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn import preprocessing
from result import result_process
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
class MY_RandomForest:
'''
封装SVM
'''
def __init__(self,normalize=True,random_state = None,max_features = 'auto',
n_estimators = 100,criterion = 'gini',oob_score = True):
'''
normalize 是否在SVM过程前进行标准化
random_state 随机种子
max_features 每棵树的特征最大值
n_estimators 森林中树的数量
criterion 可选 gini entropy
oob_score 是否选用现成的样本估计泛化精度
执行完后其添加的对象有:
self.best_param 字典形式的最佳参数
self.svm 训练完成的SVM模型
self.support_vec 训练得到的支持向量
self.predict_label 预测标签
'''
self.normalize_ = normalize
self.random_state_ = random_state
self.max_features_ = max_features
self.n_estimators_ = n_estimators
self.criterion_ = criterion
self.oob_score_ = oob_score
self.best_param = None
self.Scaler = None
def RF_gridsearchCV(self,data,label,param_grid = None,n_splits = 5):
'''
利用网格搜索+交叉验证找最佳参数
data 训练数据
label 训练标签
param_grid 网格搜索参数,字典形式
n_splits 训练时的折数
'''
normalize_ = self.normalize_
random_state_ = self.random_state_
oob_score_ = self.oob_score_
trainDataScale = data
scaler = None
if normalize_ is True:
scaler = preprocessing.StandardScaler().fit(data)
trainDataScale = scaler.transform(data)
feature_num = trainDataScale.shape[1]
if param_grid is None:
param_grid = {'n_estimators': [10, 30, 60, 100,1000],
'criterion': ['gini','entropy'],
'max_features':[1,int(0.3*feature_num),int(0.5*feature_num),int(0.7*feature_num)]
} #指数尺度找最佳参数
kFold = StratifiedKFold(n_splits=n_splits,random_state = 777) #设置折
grid_search = GridSearchCV(RandomForestClassifier(random_state=random_state_,oob_score=oob_score_,class_weight='balanced'),
param_grid, cv=kFold,n_jobs = -1) #网格搜索+交叉验证
grid_search.fit(trainDataScale,label) #用参数训练
best_score = grid_search.best_score_ #所有折数中的最好成绩
means = grid_search.cv_results_['mean_test_score']
means = np.mean(means)
self.mean_score = means
self.Scaler = scaler
self.best_param = grid_search.best_params_
self.TrainDataScaler = trainDataScale
return self.best_param,best_score
def fit (self,train_data,train_label):
'''
随机森林训练过程
如果在fit之前执行了SVM_gridsearchCV函数则不需要管max_features和n_estimators,criterion
train_data,train_label 训练用数据以及标签
'''
normalize_ = self.normalize_
random_state_ = self.random_state_
oob_score_ = self.oob_score_
max_features_ = self.max_features_
n_estimators_ = self.n_estimators_
criterion_ = self.criterion_
trainDataScale = train_data
if self.best_param is None: #没有找最优参数
if normalize_ is True:
scaler = preprocessing.StandardScaler().fit(train_data)
trainDataScale = scaler.transform(train_data)
self.Scaler = scaler
self.TrainDataScaler = trainDataScale
classifier = RandomForestClassifier(random_state=random_state_,
oob_score=oob_score_,
max_features=max_features_,
n_estimators=n_estimators_,
criterion = criterion_,
class_weight='balanced')
if self.best_param is not None:
if normalize_ is True:
scaler = self.Scaler
trainDataScale = scaler.transform(train_data)
classifier = RandomForestClassifier(**self.best_param,
oob_score=oob_score_,
random_state=random_state_,
class_weight='balanced')
classifier.fit(trainDataScale,train_label)
obb_score = classifier.oob_score_
self.RandomForest = classifier
self.obb_score = obb_score
return self
def predict (self,test_data):
'''
SVM的预测过程
test_data 需要预测的实时数据
'''
classifier = self.RandomForest
normalize_ = self.normalize_
scaler = self.Scaler
testDataScale = test_data
if normalize_ is True:
testDataScale = scaler.transform(test_data)
predict_label = classifier.predict(testDataScale)
self.predict_label = predict_label
return self.predict_label
if __name__ == '__main__':
trainData = np.loadtxt('./PEMFC_Data/trainData0.35.txt')
testData = np.loadtxt('./PEMFC_Data/testData0.65.txt')
trainLabel = np.loadtxt('./PEMFC_Data/trainLabel0.35.txt')
testLabelFC = np.loadtxt('./PEMFC_Data/testLabel0.65.txt')
RF = MY_RandomForest(normalize = False,random_state = None,max_features = 'auto',
n_estimators = 100,criterion = 'gini',oob_score = True)
best_para,best_score = RF.RF_gridsearchCV(trainData,trainLabel)
'''
best_para = {'C':1000,'gamma':10}
best_score = 0.994785
'''
RF.fit(trainData,trainLabel)
Predict_Label = RF.predict(testData)
result_process(testLabelFC,Predict_Label,Drow = True)
'''
平均召回率:0.8047189891762149
各类故障的召回率:[0.80806949 0.99759036 0.21937843 0.86956522 0.7400906 0.89158644
0.98183556 0.78277828 0.86569718 0.9955036 ]
''' | true |
faa634e2a894a48538da8bccc0d681f8801829b6 | Python | zaberfire/TermProject | /Asteroid.py | UTF-8 | 2,648 | 3.078125 | 3 | [] | no_license | import math
import random
from Tkinter import *
from PIL import Image, ImageTk
class Asteroid(object):
@staticmethod
def init():
Asteroid.image = Image.open("images/asteroids2.png")
maxSpeed = 7
minSize = 2
maxSize = 7
def __init__(self, x, y, level = None):
if level is None:
level = random.randint(Asteroid.minSize, Asteroid.maxSize)
self.level = level
self.x = x
self.y = y
self.r = self.power = 7*self.level
if self.r > 50: self.r = 50
self.image = [Asteroid.image, ImageTk.PhotoImage(Asteroid.image)]
PILimg = self.image[0]
width = height = self.r * 3
factor = (self.level*1.) / Asteroid.maxSize
width = height = int(width * factor)
PILimg = baseImg = PILimg.resize((width, height), Image.ANTIALIAS)
self.r = width/2.
self.image = [PILimg, baseImg, ImageTk.PhotoImage(PILimg)]
self.angle = 0
self.angleSpeed = random.randint(-10, 10)
if self.angleSpeed == 0: self.angleSpeed += 5
vx = random.randint(-Asteroid.maxSpeed, Asteroid.maxSpeed)
vy = random.randint(-Asteroid.maxSpeed, Asteroid.maxSpeed)
if vx == 0 and vy == 0:
vx += 2
vy += 2
self.velocity = (vx, vy)
def __repr__(self):
vx, vy = self.velocity
return "Asteroid at (%d, %d) going (%d, %d)" % (self.x, self.y, vx, vy)
def update(self, data):
# rotate asteroid
self.angle += self.angleSpeed
PILimg = self.image[0]
baseImg = self.image[1]
PILimg = baseImg.rotate(self.angle)
self.image = [PILimg, baseImg, ImageTk.PhotoImage(PILimg)]
vx, vy = self.velocity
self.x += vx
self.y += vy
if ((self.x + self.r > data.fieldSizeW) or \
(self.x - self.r < 0)):
self.velocity = (-1 * vx, vy)
if ((self.y + self.r > data.fieldSizeH) or \
(self.y - self.r < 0)):
self.velocity = (vx, -1 * vy)
def breakApart(self):
if self.level <= Asteroid.minSize:
return []
else:
ast1 = Asteroid(self.x, self.y, self.level - 1)
ast2 = Asteroid(self.x, self.y, self.level - 1)
return [ast1, ast2]
def draw(self, canvas, data):
x = self.x - data.scrollX
y = self.y - data.scrollY
canvas.create_image(x,y,image = self.image[2])
| true |
cef96db4987a5230b6d5abcd8e718afc8a719470 | Python | hamdiranu/backUp_Alta | /Alta Batch 4/Phase 1/Week 1/Day 6 (Weekend 1)/Two Sums.py | UTF-8 | 534 | 3.078125 | 3 | [] | no_license | def twoSum(nums, target):
output = []
for i in nums :
pair = target-i
if nums.index(i) not in output :
if target - i in nums :
calon = i
if nums.index(i) != nums.index(pair) :
output.append(nums.index(i))
output.append(nums.index(pair))
if output == [] :
for j in nums:
output.append(nums.index(calon))
nums[nums.index(calon)]= "a"
return output
print(twoSum([3,3],6))
| true |
e7ed8ffb4fce774b1c980ae31c02ce55c6e89182 | Python | MuseumofModernArt/moma-utils | /reporting-tools/fixity_bdwidth.py | UTF-8 | 1,751 | 2.578125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
import argparse, csv, urllib2, json, base64, getpass
from hurry.filesize import size, si
parser = argparse.ArgumentParser(description="script that uses Binder's API to add AIP size to the granular ingest report")
parser.add_argument('-i', '--input', type=str, required=True, help='source data file.')
parser.add_argument('-o', '--output', type=str, required=False, help='where to put output')
parser.add_argument('-u', '--username', type=str, help='Binder username')
args = parser.parse_args()
if not (args.input):
parser.error('you did not specify a report file')
if not (args.username):
parser.error('you did not supply a username')
password = getpass.getpass("Enter your password: ")
firstline = True
with open(args.input, 'rb') as csvfile:
c = csv.writer(open(args.output, "wb"))
c.writerow(["ingest date","size","UUID"])
orig_report = csv.reader(csvfile, delimiter=',')
for row in orig_report:
if firstline:
firstline = False
continue
uuid = row[1]
print "checking "+ uuid
print "row check "+ row[0]
if row[0] != "":
request = urllib2.Request("http://drmc.museum.moma.org/api/aips/"+uuid)
base64string = base64.encodestring('%s:%s' % (args.username, password)).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
try:
result = urllib2.urlopen(request)
start_date = row[4]
end_date = row[5]
start_date_trimmed = start_date[:-10]
end_date_trimmed = end_date[:-10]
data = json.load(result)
size = data['size']
print start_date_trimmed, end_date_trimmed, size, uuid
c.writerow([start_date_trimmed,end_date_trimmed,size,uuid])
except urllib2.HTTPError, e:
print "Could not find AIP! Error code "
print e.args
| true |
8f0a0e04aacce7ed5c401e2061097be51729b3cc | Python | JoseTg1904/-LFP-Proyecto2_201700965 | /arbol.py | UTF-8 | 1,099 | 3.03125 | 3 | [] | no_license | class ArbolS():
def __init__(self,tamanio,nodos):
self.tamanio = 0
self.nodos = []
def agregar(self,valor,idenPadre,idenHijo):
if self.tamanio == 0:
self.nodos.append(Nodo(valor,idenPadre,[]))
self.tamanio += 1
else:
for val in self.nodos:
if val.identificador == idenPadre:
nodo = Nodo(valor,idenHijo,[])
val.hijos.insert(0,nodo)
self.nodos.append(nodo)
self.tamanio += 1
break
def generarGrafo(self):
dot = "digraph G{\nrankdir=TB\n"
for val in self.nodos:
dot += val.identificador +" [ label ="+'"'+val.valor+'" ]\n'
for val1 in val.hijos:
dot += val.identificador +"->"+ val1.identificador +"\n"
dot += "}"
return dot
class Nodo():
def __init__(self,valor,identificador,hijos):
self.valor = valor
self.identificador = identificador
self.hijos = hijos
| true |
673aabd60d7ab29649c29db3edd2908749ad1d98 | Python | MevlutArslan/neural-networks-from-scratch-book | /main/classes/neuron.py | UTF-8 | 591 | 3.453125 | 3 | [] | no_license | import numpy as np
class Neuron():
def __init__(self, inputs: list, weights: list, bias: float):
self.inputs = inputs
self.weights = weights
self.bias = bias
self.number_of_inputs = len(inputs)
def calculate_output(self):
output = 0
for i in range(self.number_of_inputs):
inpt = self.inputs[i]
weight = self.weights[i]
output += inpt * weight
output += self.bias
return output
def calculate_output_numpy(self):
return np.dot(self.weights, self.inputs) + self.bias | true |
ab824ed434136c4103b842b2012f0cb55ee0c87b | Python | Noba1anc3/Machine_Learning | /python_standard/lesson7-if条件.py | UTF-8 | 821 | 3.65625 | 4 | [] | no_license |
# coding: utf-8
# In[ ]:
'''
> 大于
>= 大于等于
< 小于
<= 小于等于
== 等于
!= 不等于
'''
# In[1]:
a = 1
b = 2
c = 3
d = 1
if a>b:
print("right")
# In[2]:
if a>=d:
print("right")
# In[3]:
if a==d:
print("right")
# In[4]:
if a!=b:
print("right")
# In[5]:
if a<b<c:
print("right")
# In[6]:
if a<b>c:
print("right")
# In[7]:
if 1 < 100:
print("right")
# In[8]:
if 1 > 100:
print("right")
else:
print("wrong")
# In[9]:
a=0
b = 2
c = 3
d = 1
if a == b:
print("a==b")
elif a == c:
print("a==c")
elif a == d:
print("a==d")
else:
print(a)
# In[11]:
a=1
if a == b:
print("a==b")
elif a == c:
print("a==c")
elif a == d:
pass # 不执行任何操作
else:
print(a)
# In[ ]:
# fjaojgojg
# In[ ]:
| true |
7ac1c4582f35815af242fd2736287395730c07b5 | Python | isikveren/demo | /python/6/6_9.py | UTF-8 | 255 | 3.453125 | 3 | [] | no_license | prompt = "\nTell me somthing, and I will repeat it it to you:"
prompt += "\nEnter 'quit' to end the program."
message = int(input(prompt))
while message <=100:
message += 1
if message % 2 == 0:
continue
print(message)
print('\t') | true |
3fc591b1898bb9b8ced1d1c034f11d6d5fad765d | Python | dudamarlena/self-balancing-robot | /memory/experience.py | UTF-8 | 635 | 3.265625 | 3 | [] | no_license | """ Module with Experience of agent getting in each state """
from typing import NamedTuple
import numpy as np
class Experience(NamedTuple):
""" Class with all experience fields """
state: np.ndarray
action: np.ndarray
reward: float
done: bool
next_state: np.ndarray
@classmethod
def create_experience(cls, state, action, reward, done, next_state) -> 'Experience':
"""
Function to fill Experience fields
"""
return cls(
state=state,
action=action,
reward=reward,
done=done,
next_state=next_state,
)
| true |
51cace9854cc619c1fc4d79ee20150a3a23ff6aa | Python | guoqchen1001/microservice | /microservice/controllers/auth.py | UTF-8 | 2,718 | 2.515625 | 3 | [] | no_license | from flask_restful import abort,current_app,Resource
from functools import wraps
from .parsers import AuthParser
from ..models import User
from itsdangerous import (TimedJSONWebSignatureSerializer as Serializer
, BadSignature, SignatureExpired)
from .base import ErrorCode
class AuthApi(Resource):
"""用户认证"""
def post(self):
"""
@api {post} /api/auth 获取令牌
@apiVersion 1.0.0
@apiName auth
@apiGroup 安全校验
@apiParam {string} user_no (必须) 用户名
@apiParam {string} password (必须) 密码
@apiSuccess (回参) {string} Token 令牌
@apiSuccessExample {json} Success-Response:
{
"Token": "1234567890"
}
@apiUse UserNotFoundError
"""
args = AuthParser.post.parse_args()
user = User.query.get(args['user_no'])
if not user:
abort(400, message="用户名不存在或者密码错误", code=ErrorCode.user_not_found.value)
if not user.check_password(args["password"]):
abort(400, message="用户名不存在或密码错误", code=ErrorCode.user_not_found.value)
s = Serializer(
current_app.config['SECRET_KEY'],
expires_in=current_app.config['TOKEN_EXPIRES_IN'] or 600
)
return {'Token': s.dumps({"user_no": user.user_no})}
@staticmethod
def auth_required(func):
@wraps(func)
def _warpper(*args, **kwargs):
r_args = AuthParser.auth.parse_args()
token = r_args['Token']
if not token:
abort(400, message="未检测到token", code=ErrorCode.signature_required.value)
user_no = "" # 用户编码
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
user_no = data['user_no']
except SignatureExpired:
abort(403, message="token已过期", code=ErrorCode.signature_expired.value)
except BadSignature:
abort(401, message="token不合法", code=ErrorCode.signature_required.value)
user = User.query.get(user_no)
if not user:
abort(400, message="用户{}不存在".format(user_no), code=ErrorCode.user_not_found.value)
if not user.isvalid():
abort(400, message="用户{}无权限,请联系管理员开通".format(user_no), code=ErrorCode.permission_not_allowed.value)
kwargs.update({'user': user})
return func(*args, **kwargs)
return _warpper
| true |
bd1dc930b3dd2e495359983dc7b7c0141e036c57 | Python | mdeeds/genie | /py/modelStrategy.py | UTF-8 | 3,078 | 2.84375 | 3 | [
"MIT"
] | permissive | import random
import tensorflow as tf
import numpy as np
import ticTacToe as game
class ModelStrategy:
stateSize = 0
moveSize = 0
model = None
dictionaryModel = None
moveNoise = 0.0
moveDictionary = dict()
thisGame = None
def __init__(self, game, moveNoise=0.05):
self.stateSize = game.getStateSize()
self.moveSize = game.getMoveSize()
self.moveNoise = moveNoise
self.thisGame = game
input = tf.keras.layers.Input(shape=(game.getStateSize()))
flat = tf.keras.layers.Flatten()(input)
l1 = tf.keras.layers.Dense(units=12, activation='relu')(flat)
l2 = tf.keras.layers.Dense(units=12, activation='relu')(l1)
o = tf.keras.layers.Dense(
units=game.getMoveSize(), activation='softmax')(l2)
self.model = tf.keras.models.Model(inputs=input, outputs=o)
self.model.compile(
optimizer=tf.keras.optimizers.Adam(0.0001),
loss=tf.keras.losses.MeanSquaredError(),
# metrics=['accuracy'])
metrics=[tf.keras.metrics.MeanSquaredError()]
)
self.model.summary()
def applyNoise(self, move, noiseLevel):
for element in move:
element += (random.random()-0.5) * noiseLevel
return move
def getMove(self, state):
if self.model == self.dictionaryModel:
if str(state) in self.moveDictionary:
move = self.moveDictionary[str(state)]
move = self.applyNoise(move, self.moveNoise)
return move
else:
self.dictionaryModel = self.model
self.moveDictionary = dict()
states = self.thisGame.getPossibleStates()
inputTensor = tf.constant(states, 'float32')
moveTensor = self.model.predict(inputTensor)
for move in moveTensor:
self.moveDictionary[str(state)] = move
inputTensor = tf.constant([state], 'float32')
moveTensor = self.model.predict(inputTensor)
move = moveTensor[0]
self.moveDictionary[str(state)] = move
move = self.applyNoise(move, self.moveNoise)
return move
def load(self, fname):
self.model = tf.keras.models.load_model(fname)
def save(self, fname):
self.model.save(fname)
def train(self, states, moves):
checkpoint = tf.keras.callbacks.ModelCheckpoint(
"checkpoint.hdf5", monitor='val_loss', verbose=1,
save_best_only=True, save_weights_only=False)
inputTensor = tf.constant(states, 'float32')
outputTensor = tf.constant(moves, 'float32')
history = self.model.fit(inputTensor, outputTensor, epochs=5, verbose=0,
shuffle=True, validation_split=0.2, callbacks=[checkpoint])
# load the most recently saved because the last might not be the best
self.model = tf.keras.models.load_model("checkpoint.hdf5")
return history
| true |
62fd73c5f0c81991f4a4949f0b7eca2dbb8f20fa | Python | saurav188/python_practice_projects | /distribute_bonus.py | UTF-8 | 660 | 3.8125 | 4 | [] | no_license | def getBonuses(performance):
bonus=[1 for i in performance]
for i in range(len(performance)):
#first person
if i==0:
if performance[i]>performance[i+1]:
bonus[i]+=1
#last person
elif i==len(performance)-1:
if performance[i]>performance[i-1]:
bonus[i]+=1
else:
if performance[i]>performance[i+1]:
bonus[i]+=1
if performance[i]>performance[i-1]:
bonus[i]+=1
return bonus
print(getBonuses([1, 2, 3, 2, 3, 5, 1]))
# [1, 2, 3, 1, 2, 3, 1]
#Input: [1, 2, 3, 2, 3, 5, 1]
#Output: [1, 2, 3, 1, 2, 3, 1] | true |
6a5827f0be8c5b716b2a59475bb5c5c8390657a0 | Python | kimsungbo/Algorithms | /백준/유니온파인드/4195_친구네트워크.py | UTF-8 | 873 | 3.296875 | 3 | [] | no_license | # 4195 친구 네트워크
# 유니온 파인드에 집합의 크기를 구하는 기능을 넣는 문제
import sys
input = sys.stdin.readline
t = int(input())
def Find(parents, x):
if x == parents[x]:
return x
p = Find(parents, parents[x])
parents[x] = p
return parents[x]
def Union(parents, a, b, cnt):
x = Find(parents, a)
y = Find(parents, b)
if x != y :
parents[y] = x
cnt[x] += cnt[y]
for i in range(t):
f = int(input())
parents = {}
cnt = {}
for j in range(f):
a, b = map(str, input().split())
if a not in parents:
parents.setdefault(a, a)
cnt[a] = 1
if b not in parents:
parents.setdefault(b, b)
cnt[b] = 1
Union(parents, a, b, cnt)
print(cnt[Find(parents, a)]) | true |
747c4fa6fbab1e5c8ac55a6c63b097c8379da370 | Python | rajeshsvv/Lenovo_Back | /1 PYTHON/1 EDUREKA/EDUREKA NEW/40_File_Operations.py | UTF-8 | 574 | 3.140625 | 3 | [] | no_license | import os
newfile=open("Edureka.txt","w+")
# newfile.close() # when uncomment this in write mode u got i/o operation on closed file errror.
# write mode
# for i in range(1,10):
# newfile.write("\n Welcome to Python")
# Read Mode
newfile=open("Edurekha.txt","r")
for i in range(1,10):
# print(newfile.read())
print(newfile.read(300))
print(newfile.mode)
print(newfile.name)
print(newfile)
# print(newfile.softspace)
newfile.seek(100)
print(newfile.read())
print(newfile.tell())
# rename the file
# os.rename("Edureka.txt","Edurekha.txt")
| true |
48f22ec1d66d7e1a7b1aa88d5abaeab367448a22 | Python | s-owl/algossstudy | /harvey/11048.py | UTF-8 | 700 | 2.90625 | 3 | [] | no_license | import sys
def solv(l):
n = len(l)
m = len(l[0])
d = [[0]*m for _ in range(n)]
for i in range(n):
for j in range(m):
if i == 0 and j == 0:
d[i][j] = l[i][j]
elif i == 0:
d[i][j] = d[i][j-1] + l[i][j]
elif j == 0:
d[i][j] = d[i-1][j] + l[i][j]
else:
d[i][j] = max(d[i][j-1], d[i-1][j]) + l[i][j]
return d[n-1][m-1]
if __name__ == '__main__':
read = lambda : sys.stdin.readline()
rows, cols = map(int, read().split())
l = [[0]*cols for _ in range(rows)]
for i in range(rows):
l[i] = list(map(int, read().split()))
print(solv(l))
| true |
922281d44a2b8ae1dac92dc37b7b4e7775e20b34 | Python | keeyon2/Python-Practice | /test/test_two_arg.py | UTF-8 | 825 | 2.859375 | 3 | [] | no_license | import unittest
import sys
import os
from practicemodules import twoargprog
print "First Line in test"
# filename = "practicemodules/twoargprog"
# sys.path.insert(0, os.path.dirname(filename))
class TestTwoArgProgramFunctions(unittest.TestCase):
def setUp(self):
# self.twoargprog = practicemodules.twoargprog
print "running Test"
def test_adding_two_numbers(self):
print "Running Test test_adding_two_numbers"
warning = "add_two_numbers gave an incorrect addition value"
self.assertEqual(twoargprog.add_two_numbers(1, 2), 3, warning)
# def test_adding_mult_args(self):
# self.assertEqual(self.twoargprog.mult_arguments_addition(1,2,3,4),
# 10, "mult_arguments_addition gave incorrect return value")
if __name__ == '__main__':
unittest.main()
| true |
827438c995bf32905893bde8ed0b74018ae25d93 | Python | ntulsy/NTU_CZ3003_Extinguisher | /OS/Subscription/SMSSender.py | UTF-8 | 1,988 | 2.765625 | 3 | [] | no_license | from twilio.rest import TwilioRestClient
import threading
from Subscription import Subscription
import latlng
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def the_print(info):
print OKBLUE + "[SMS]" + ENDC, info
class SMSSender(threading.Thread):
def __init__(self, msg, gps):
threading.Thread.__init__(self)
self.account_sid = '__REMOVED__'
self.auth_token = '__REMOVED__'
self.client = TwilioRestClient(self.account_sid, self.auth_token)
self.msg = msg
self.gps = gps
def send_message(self, user, message):
""" send one SMS to user, with given message
Differentiate this method with run() which sends SMS to all users.
"""
try:
self.client.messages.create(body=message, to=user, from_='+19794315676')
except Exception:
print "send SMS to subscriber failed."
def run(self):
"""Trigger thread run and send message (given during construction) to all subscribed users
"""
the_print("thread starts. sender ready.")
subscriptions = Subscription.query.all()
for s in subscriptions:
if s.handphone_verified == 1:
the_print("prepare sms to " + s.handphone)
if self.gps is None:
self.send_message(s.handphone, self.msg)
continue
distance = latlng.distance(self.gps['lat'], self.gps['lng'], s.latitude, s.longtitude)
the_print("distance: " + str(distance) + "km")
if distance <= 10:
the_print("within distance, sent sms to " + s.handphone)
self.send_message(s.handphone, self.msg)
else:
the_print("too far, do not sent to" + s.handphone)
the_print("all SMS sent. thread finished.") | true |
6adea29b24b2ba3441753854e0bb65f379f3f8cb | Python | agl10/spline_fitting | /my_functions.py | UTF-8 | 19,219 | 3.125 | 3 | [] | no_license | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 12:53:45 2018
@author: andy
"""
from scipy.interpolate import CubicSpline
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import splprep, splev
import time
from sklearn.neighbors import NearestNeighbors
import networkx as nx
from mpl_toolkits.mplot3d import Axes3D
import math
def read_xyz(filename):
"""
DESCRIPTION
Reads a text file with the x-y-z points and turn these into an numpy array. This will work if the file
has 6 values across each line.
:param filename:
:return: numpy array, Nx3 array of x-y-z points.
"""
# First count the line numbers
total_count = 0
xyz = open(filename)
for l in enumerate(xyz):
total_count += 1
xyz.close()
# Now read each line and grab the individual x, y, z points.
xyz_coords = np.zeros((total_count, 3))
xyz = open(filename)
for i, line in enumerate(xyz):
#x, y, z, _, _, _ = line.split()
x, y, z = line.split()
xyz_coords[i, 0] = x
xyz_coords[i, 1] = y
xyz_coords[i, 2] = z
# For debugging.
# if i < 10:
# print("\nx = " + str(x) + ", y = " + str(y) + ", z = " + str(z))
return xyz_coords
def full_seam_finder_0(xyz_coords, decimation=10, num_of_neighbs=25, minimum_distance=.010, dtheta=.05,
z_order_asscending=True):
"""
This is the instantiation of a full seam finder.
:param xyz_coordinates: Mx3 numpy array of 3D points representing seams.
:param decimation: positive integer, this is if we want to remove points from the given point cloud. If for example
decimation=10, that means for every 10 points, only 1 will be kept.
:param num_of_neighbs: positive integer, this tells us how many neighbors to look at during graph construction.
The fewer necessary the faster everything is, but but including fewer you run the risk of missing connected
regions. The default of 25 was found to work well.
:param minimum_distance: In order to characterize two points as being part of the same seam we use a threshold
distance. That is if the two points are less than the minimum_distance, then will end up being connected.
:param dtheta: radians, in order to get a ordered set of points for each seam, we find the centroid of discrete
groups of points all within delta-theta. The smaller this value, the more final points will be generated.
:return: list of numpy arrays. Each numpy in said list has shape Nx3, where N is dictated essentailly by the input
dtheta. Each numpy array is itself a separate ordered list of 3D points. Each row of the numpy arrays has
values of x, y, and z.
"""
# DECIMATE THE POINTS
# Perhaps the number of points is too great and not required for good estimation of seam location. Thus we can
# here decimate the number of points.
if decimation > 1:
spaces = np.arange(0, xyz_coords.shape[0], decimation)
xyz_intermed = np.zeros((spaces.shape[0], 3))
for i, a_ind in enumerate(spaces):
xyz_intermed[i, :] = xyz_coords[a_ind, :]
xyz_coords = xyz_intermed
# FIND NEAREST NEIGHBORS
# Given input data is unorganized, we first find all the nearest neighbors, and distances between said neighbors.
# This data will be used later to segment the points into separate seams.
nrbs = NearestNeighbors(n_neighbors=num_of_neighbs, algorithm='auto').fit(xyz_coords)
distances, indices = nrbs.kneighbors(xyz_coords)
# FIND ALL SEPARATE SEAMS
# "indices_list" will be a list of lists. Each sub list had integers, which are indices of the points (as
# described in xyz_coords, ditances, and indices).
indices_list = seam_separator(distances, indices, minimum_distance=minimum_distance)
# REDUCE TO ORDERED SETS
# Here we take all these large set of points, where each sub-set represents a seam, and for each sub-set we find
# an ordered set of points to use as way-points for the robot.
# Ordered seams will be a list of numpy arrays, where each numpy array will an Nx3 array.
ordered_seams, thetas, radii, output = simple_radial_seam_discretizer(indices_list, xyz_coords, dtheta=dtheta)
# Reorder the seams along
ordered_seams = reorder_seams_by_z(ordered_seams, asscending=z_order_asscending)
return ordered_seams, thetas, radii, output
def reorder_seams_by_z(ordered_seams, asscending=True):
"""
DESCRIPTION
Orders the seams by their centroids.
:param ordered_seams:
:param asscending:
:return:
"""
# We need to find the order of the seams along the z-axis.
z_centroids = np.zeros((len(ordered_seams), 2))
for i in range(len(ordered_seams)):
z_centroids[i, 0] = i
# Find NAN values
for an_array in ordered_seams:
for a_val in an_array[:, 2]:
if math.isnan(a_val):
print("\nNAN found in array ...")
# Find the z-centroid
for i, an_array in enumerate(ordered_seams):
z_centroids[i, 1] = np.mean(an_array[:, 2])
# Now sort by the z-centroids
if asscending:
z_centroids = z_centroids[(-z_centroids[:, 1]).argsort()]
else:
z_centroids = z_centroids[(z_centroids[:, 1]).argsort()]
# Now make the new outputs.
new_ordered_seams = []
for ind in z_centroids[:, 0]:
new_ordered_seams.append(ordered_seams[int(ind)])
return new_ordered_seams
def seam_separator(distances, indices, minimum_distance=.010):
"""
DESCRIPTION
This uses actual graph theory to make a number of connected graphs, and then finds then uses connected components
search to find all the connected components. This is working. The limitation is on the users side. For this to work
successfully we have to find enough of the neighbors.
HOW DOES THIS WORK?
This algorithm here below works by firstly constructing a graph where the 3D points within said graph are the nodes
of the graph. We then connect the nodes with edges (non-directional) if and only if the distance (Euclidean) is
below the the limiting value (given as function input). After we construct the graph we can use algorithms built
into the network-x library to find all the "connected-components". This essentially boils down to find all the nodes
that are connected together directly or through other intermediary nodes.
:param distances: nXm numpy array, where n is the number of 3D-points/nodes and m is the number of neighboring
points. The distances[i,j] is the distance between node i and between node Q, where Q can be found by
searching the indices matrix: Q = indices[i, j]. That is the indices matrix is the same size as the
distances matrix.
:param indices: nXm numpy array, where n is the number of 3D-points/nodes and m is the number of neighboring
points. This indices matrix is like a helper matrix that only makes sense along side the distances
matrix as described above.
:param minimum_distance: scalar, this is a cutoff that tells us
:return: A list of lists, where each sub-list has indices that represent a connected component.
"""
# Generate a graph. This is an empty construct at this moment.
G = nx.Graph()
# Add all the nodes to the graph. A "node" is simply one of the points in the point cloud.
# start_time = time.time()
for i in range(distances.shape[0]):
G.add_node(i)
# total_run_time = time.time() - start_time
# print("... Time to add nodes = " + str(total_run_time))
# Add edges to the graph if the distance between the nodes is such that it is less than the acceptable distance.
# That is set by the user.
# start_time = time.time()
for i in range(distances.shape[0]):
for a_ind, a_dist in zip(indices[i, :], distances[i, :]):
if a_dist <= minimum_distance:
# Add the fucking edge
G.add_edge(i, a_ind)
# total_run_time = time.time() - start_time
# print("... Time to add edges = " + str(total_run_time))
# print("The number of connected components is " + str(nx.number_connected_components(G)))
# Call on built in methods to find connected components in our built in graph.
hyper = [] # A list of our connected component sets.
for a_conn in nx.connected_components(G):
# a_conn is a python set, that we have to pull apart and jam into a python list.
dummy = []
for set_el in set(a_conn):
dummy.append(set_el)
# print(len(dummy))
# print("... " + str(dummy[0]) + ", " + str(dummy[1]) + ", " + str(dummy[2]) + ", " + str(dummy[3]))
hyper.append(dummy)
return hyper
def simple_radial_seam_discretizer(indices_list, xyz_coords, dtheta):
"""
DESCRIPTION
A method to take unorganized seams (point clouds) and replace them with a set of ordered points, where each point
is roughly centered along each seam.
This method works by ASSSUMING that the seams are roughly circles centered along the z-axis. We then divide up up
the seams (point-cloud) by delta-theta values. For each group of points that falls into a delta-theta, we replace
all those points with a single point at the centroid.
:param indices_list: list of lists, each sub-list is occupied by integers, where each integer is an index for
a row in xyz_coords. That is these sub-lists indicate which xyz points constitute a single, individual seam.
:param xyz_coords: Nx3 array, where each row holds a 3D point (x, y, z)
:param dtheta: scalar (radians) the size of discretization.
:return: list of numpy arrays, each array is a separate ordered seam.
"""
# Project all the points onto z=0 plane: shift into polar coordinates.
theta = np.zeros(xyz_coords.shape[0]) # Just theta values
radii = np.zeros(xyz_coords.shape[0])
output = np.zeros((xyz_coords.shape[0], 2))
for i, (x, y) in enumerate(zip(xyz_coords[:, 0], xyz_coords[:, 1])):
if x <= 0:
if y >= 0:
if math.fabs(y) > .00001:
theta[i] = math.atan(-x/y)
radii[i] = np.sqrt((x*x + y*y))
output[i,0] = x
output[i,1] = y
else:
theta[i] = -100.0
radii[i] = np.sqrt((x*x + y*y))
output[i,0] = x
output[i,1] = y
else:
if math.fabs(x) > .00001:
theta[i] = math.pi/2. + math.atan(-y/-x)
radii[i] = np.sqrt((x*x + y*y))
output[i,0] = x
output[i,1] = y
else:
theta[i] = -100.0
radii[i] = np.sqrt((x*x + y*y))
output[i,0] = x
output[i,1] = y
else:
if y >= 0:
if math.fabs(x) > .0001:
theta[i] = 3. * math.pi / 2. + math.atan(y / x)
radii[i] = np.sqrt((x*x + y*y))
output[i,0] = x
output[i,1] = y
else:
theta[i] = -100
radii[i] = np.sqrt((x*x + y*y))
output[i,0] = x
output[i,1] = y
else:
if math.fabs(y) > .0001:
theta[i] = math.pi + math.atan(x/-y)
radii[i] = np.sqrt((x*x + y*y))
output[i,0] = x
output[i,1] = y
else:
theta[i] = -100
radii[i] = np.sqrt((x*x + y*y))
output[i,0] = x
output[i,1] = y
# Divide up the theta ranges
theata_ranges = np.arange(0., 2.*math.pi, dtheta)
theata_ranges = np.concatenate((theata_ranges, np.array([2*math.pi])))
# Now we want to make the output, which are the xyz-points for each seam set.
ordered_seams = []
for sub_inds_list in indices_list:
sub_xyz_pts = xyz_coords[sub_inds_list, :] # Sub-set of indices we care about
sub_theta = theta[sub_inds_list] # The sub-set of theta values we care about
ordered_xyz_points = np.zeros((theata_ranges.shape[0]-1, 3))
indices_with_no_points = []
for i in range(theata_ranges.shape[0]-1):
# 'start' and 'stop' below define theta range over which we want to grab 3d points.
start = theata_ranges[i]
stop = theata_ranges[i+1]
sub_inds = np.where((sub_theta >= start) & (sub_theta < stop))
sub_sub_xyz_pts = sub_xyz_pts[sub_inds, :] # Points within the delta-theta range
sub_sub_xyz_pts = np.squeeze(sub_sub_xyz_pts, axis=0) # For some reason an extra axis gets tacked on.
if sub_sub_xyz_pts.shape[0] > 0:
ordered_xyz_points[i, :] = np.mean(sub_sub_xyz_pts, axis=0)
else:
indices_with_no_points.append(i)
# # Check for NAN values here, this could happen for a number of reasons but most likely there are no
# # points in the theta bin.
# if math.isnan(ordered_xyz_points[i, 0]) or math.isnan(ordered_xyz_points[i, 1]) or math.isnan(ordered_xyz_points[i, 2]):
# # print("SEAM DISCRIMINATOR: Found some NANs in the centroids: " + str(ordered_xyz_points[i, :]))
#
# print(sub_sub_xyz_pts.shape)
# Now kill the parts without points and get rid of those NANs
ordered_xyz_points = np.delete(ordered_xyz_points, indices_with_no_points, axis=0)
ordered_seams.append(ordered_xyz_points)
return ordered_seams, theta, radii, output
def plot_3D_seams(indices_list, xyz_coords):
"""
This plots out segmented seams. That is given all the x-y-z coordinates, and then a list of lists, where each
sub-list has all the indices of the x-y-z points that constitute a seam, each seam will be plotted out.
:param indices_list: list of list, each sublist has integer indices
:param xyz_coords: np.array, nX3
:return: N/A will plot out stuff
"""
# Make 3D plot
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Colors and markers available
colors = ['r', 'g', 'b', 'c', 'm', 'y', 'r', 'g', 'b', 'c', 'm', 'y']
markers = ['^', '^', '^', '^', '^', '^', 'o', 'o', 'o', 'o', 'o', 'o']
n_colors = len(colors)
# pull out x, y, z values individually
xv = xyz_coords[:, 0]
yv = xyz_coords[:, 1]
zv = xyz_coords[:, 2]
i = 0
for sub_inds in indices_list:
if i == n_colors:
i = 0
ax.scatter(xv[sub_inds], yv[sub_inds], zv[sub_inds], c=colors[i], marker=markers[i])
i += 1
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
def plot_3D_ordered_seams(xyz_coords_list):
"""
This will plot out the ....
:param xyz_coords_list:
:return:
"""
# Make 3D plot
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Colors and markers available
colors = ['r', 'g', 'b', 'c', 'm', 'y', 'r', 'g', 'b', 'c', 'm', 'y']
markers = ['^', '^', '^', '^', '^', '^', 'o', 'o', 'o', 'o', 'o', 'o']
n_colors = len(colors)
i = 0
for xyz_array in xyz_coords_list:
if i == n_colors:
i = 0
print("\nPlotting method, looking at z-values ... ")
print(xyz_array[:, 2])
ax.plot(xyz_array[:, 0], xyz_array[:, 1], xyz_array[:, 2], c=colors[i], marker=markers[i])
i += 1
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
if __name__ == "__main__":
print("\nReading coordinates")
start_time = time.time()
# file_name = '/home/atlas2/LonsEngRepos/glugun_sensor_python/src/Alpha/findSeamsInClouds/just_seams.xyz'
file_name = '/home/andy/Documents/spline_fitting/one_seam.xyz'
xyz_coords = read_xyz(file_name)
total_run_time = time.time() - start_time
print("... Time to read coordinates = " + str(total_run_time))
print("... The shape of xyz_coords is " + str(xyz_coords.shape))
# # TOO MANY lets drop some
# spaces = np.arange(0, xyz_coords.shape[0], 15)
# xyz_intermed = np.zeros((spaces.shape[0], 3))
# for i, a_ind in enumerate(spaces):
# xyz_intermed[i, :] = xyz_coords[a_ind, :]
# xyz_coords = xyz_intermed
#
# # http://scikit-learn.org/stable/modules/neighbors.html#unsupervised-nearest-neighbors
# start_time = time.time()
# nn = 25
# print("\n\nGenerating n = " + str(nn) + " nearest neighbors")
# nrbs = NearestNeighbors(n_neighbors=nn, algorithm='auto').fit(xyz_coords)
# distances, indices = nrbs.kneighbors(xyz_coords)
# total_run_time = time.time() - start_time
# print("... Time to read find nearest neighbors = " + str(total_run_time))
#
#
# #print("\n\nFinding seams ")
# #array_of_connected_components = a_bs_seam_finder(distances, indices, minimum_distance=.005)
# #print("... The unique seams are " + str(np.unique(array_of_connected_components)))
#
# print("\n\nGenerating Graph")
# start_time = time.time()
# indices_list = seam_separator(distances, indices, minimum_distance=.010)
# total_run_time = time.time() - start_time
# print("... Time to read build graph = " + str(total_run_time))
#
# #plot_3D_seams(indices_list, xyz_coords)
#
# # Now discretize seams
# ordered_seams = simple_radial_seam_discretizer(indices_list, xyz_coords, dtheta=.05)
#
# #
# reorder_seams_by_z(ordered_seams)
#
ordered_seams, thetas, radii, output = full_seam_finder_0(xyz_coords, decimation=700, num_of_neighbs=25, minimum_distance=.010, dtheta=.05,
z_order_asscending=True)
#print(sorted(thetas))
radii
print(thetas.shape)
print(radii.shape)
print(xyz_coords.shape)
print(output.shape)
#print(sorted(thetas))
print(radii)
thetas = np.expand_dims(thetas, axis=1)
print(thetas.shape)
total = np.concatenate((thetas,output), axis=1)
#print(total)
total = total[total[:,0].argsort()]
print(total)
print(total[:,1:3])
tck, u = splprep(total[:,1:3],s=0)
#cs = CubicSpline(total[:,0], total[:,1:3], bc_type='periodic')
#print("ds/dx={:.1f} ds/dy={:.1f}".format(cs(0, 1)[0], cs(0, 1)[1]))
#ds/dx=0.0 ds/dy=1.0
#xs = 2 * np.pi * np.linspace(0, 1, 100)
#plt.figure(figsize=(6.5, 4))
#plt.plot(y[:, 0], y[:, 1], 'o', label='data')
#plt.plot(np.cos(xs), np.sin(xs), label='true')
#plt.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline')
#plt.axes().set_aspect('equal')
#lt.legend(loc='center')
#plt.show()
#
#ellipse_list = fitting_ellipse(ordered_seams)
#
plot_3D_ordered_seams(ordered_seams)
| true |
35762fd63d868d895e6c211f051cadd59e0ce4c3 | Python | kyoz/learn | /languages/python/1.learn_py_the_hard_way/ex23.py | UTF-8 | 572 | 2.9375 | 3 | [
"MIT"
] | permissive | import sys
script, input_encoding, error = sys.argv
def main(language_file, encoding, errors):
line = language_file.readline()
if line:
print_line(line, encoding, errors)
return main(language_file, encoding, errors)
def print_line(line, encoding, errors):
next_lang = line.strip()
raw_bytes = next_lang.encode(encoding, errors=errors)
cooked_string = raw_bytes.decode(encoding, errors=errors)
print(raw_bytes, "<===>", cooked_string)
languages = open('languages.txt', encoding="utf-8")
main(languages, input_encoding, error)
| true |
d0a4462d6cf31a90b2183b7bc85362c6935c9a6c | Python | kmod/icbd | /icbd/compiler/tests/34.py | UTF-8 | 2,347 | 3.953125 | 4 | [
"MIT"
] | permissive | """
closure tests
"""
def f1(x):
def g1():
return 2
def g2():
y = 2
def f1_2():
return y + x + g1()
return f1_2()
return g2
print f1(1)()
def f3(x):
if 1:
return x
return g3(x)
def g3(x):
return f3(x)
print f3(2)
y = 1
def f4(x):
return g4(x-h4(x))
def g4(x):
if 1:
return x
return f4(x-1)
if 1:
def h4(x):
return x
else:
def h4(x):
return -x
h4(1)
f4(2)
def f5(x):
return x + 1
def g5(x):
def h5():
return f5(x)
return h5
g5(2)
def f6(x):
def g6(y):
if 0:
g6(y-1)
return x+y
return g6
print f6(1)(2)
def f7():
x = 1
def g7():
return x
def h7():
return g7()
return h7
f7()()
def f8():
def g8():
pass
def h8():
g8()
if 1:
g8()
# It's ok for something to be both in the closure and the local scope
def f9():
a = 1
b = 2
def g9():
a
def h9():
a = 1
a
b
x12 = 2
def f12():
x12 = 3
def g12():
# Tricky: this should bypass the above 'x12=3' definition and go to the global scope
global x12
def h12():
# Even trickier: this one should also go up to globals
print "h2", x12
h12()
return x12
print g12()
f12()
# Same as above but without accessing the global var in the middle scope
# This should force x14 to be part of the global closure
x14 = 1
def f14():
x14 = 2
def g14():
global x14
def h14():
print x14
h14()
g14()
f14()
# Super super tricky: jumps over the closure definition, up to the globals
x15 = 11
def f15():
x15 = 12
def g15():
def h15():
global x15
def foo15():
print x15
foo15()
h15()
print x15
g15()
f15()
print x15
# And just to make sure, this one should hit the local closure
x16 = 21
def f16():
x16 = 22
def g16():
print x16
g16()
f16()
# closures for default args
x17 = 0
y17 = 10
def f17(x17, y17):
def h():
global x17
def g(_x=x17, _y=y17):
print _x, _y
return g
return h()
g17 = f17(1, 11)
g17()
g17(2, 12)
g17(3)
x17, y17 = 100, 110
g17()
| true |
2fe6134fbc7d381aaa7f4fa7f03d7fabaad8983f | Python | eternaltc/test | /Test/Exception/except04_try_else.py | UTF-8 | 192 | 3.453125 | 3 | [] | no_license | try:
a = input("请输入一个被除数:")
b = input("请输入一个除数:")
c = float(a)/float(b)
# print(c)
except BaseException as e:
print(e)
else:
print(c)
| true |
af3de83602000068790f2892c9b3dc3d9affb80a | Python | shahhaard47/ML-CourseProjects | /course_project/create_splits.py | UTF-8 | 1,251 | 2.96875 | 3 | [] | no_license | ## CS 675 Course project
## Author: Haard Shah
from read_files import readData, readLabels
import sys
import os
import random
# hello OYOOOO
TRAIN_FOLDER = "train_data"
DATA_NAME = ""
TRAIN_PATH = os.path.join(os.getcwd(), TRAIN_FOLDER)
OUTPUT_TRAIN_PREFIX = ""
def createTrainDir():
if not os.path.exists(TRAIN_PATH):
os.makedirs(TRAIN_PATH)
# range (0, 1)
# returns a list of numbers between [0, numLabels) and size < numLabels*percentage
def split(percent, numLabels):
allRows = [i for i in range(numLabels)]
random.shuffle(allRows)
allRows = allRows[:int(numLabels*percent)]
return allRows
if __name__ == '__main__':
if (len(sys.argv) != 3):
print("Usage:", sys.argv[0], "<labelsFile> <numSplits>")
exit()
createTrainDir()
labelFile = sys.argv[1]
numSplits = int(sys.argv[2])
DATA_NAME = labelFile.split('.')[0]
OUTPUT_TRAIN_PREFIX = DATA_NAME + ".trainlabels."
splitPercent = 0.80 # 80 train 20 validation
allLabels = readLabels(labelFile)
numLabels = len(allLabels)
for i in range(numSplits):
newSplit = split(splitPercent, numLabels)
outputFileName = OUTPUT_TRAIN_PREFIX + str(i)
f = open(outputFileName, 'w')
for key in newSplit:
f.write(str(allLabels[key]) + " " + str(key) + "\n")
f.close()
| true |
39df6a7b14bdf4f7af30839d6304497defa4f7ac | Python | ammishra78/Imfeelinglucky | /scale_images.py | UTF-8 | 353 | 2.75 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from PIL import Image
import glob, os
size = 100, 100
all_images = glob.glob("Images/*.jpeg")
total = len(all_images)
for infile in all_images:
file, ext = os.path.splitext(infile)
im = Image.open(infile)
im = im.resize(size)
im.save("sm100x100" + file + ".jpg", "JPEG")
print(file)
| true |
29088eb4684e429ab70ef1471aa67a8f962356e3 | Python | WillisLiao/10-29nice | /q1.py | UTF-8 | 448 | 3.609375 | 4 | [] | no_license |
def monkey(a,b):
while a<=10:
a+=1
bear(a)
def bear(n):
var = False
bool = False
for i in range(1,n):
sum=0
for j in range(i,n):
sum+=j
if(sum==n):
bool=True
for k in range(i,j):
print('{}+'.format(k),end="")
print(j)
if bool==var:
print("NO")
monkey(a=int(input()),b=int(input())) | true |
d804a25db6c636123aab9c9c7644d28934bf7db1 | Python | mmamoyco/python-console-game | /Game/LoginService.py | UTF-8 | 634 | 2.671875 | 3 | [] | no_license |
from UserService import UserService
from UserTO import UserTO
from DAO.UserDAO import UserDAO
class LoginService:
def __init__(self):
self.__userService = UserService()
self.__userDAO = UserDAO()
# return True if login success othervise returns false
def login(self, user):
# TODO ADD DAO
findedUser = self.__userDAO.find(user.getUsername())
if findedUser.getPassword() == user.getPassword():
"""upgrade users login state"""
self.__userDAO.updateLogin()
user.setLoggedIn()
return True
else:
return False
| true |
ae480a53e1520052426aaeefc01548b2ddaebc53 | Python | Mumbaikar007/Code | /SPCC shortcuts/firstfollow.py | UTF-8 | 2,212 | 3 | 3 | [] | no_license |
numberOfProductions = int ( input() )
givenProductions = []
for _ in range (0, numberOfProductions):
givenProductions.append(input())
print ( givenProductions )
terminals, nonTerminals = set (), set()
[terminals.add(ch) if ( 'a' <= ch <= 'z' or ch in ( '+', '*', '(', ')','/','~')) else nonTerminals.add(ch) if (ch != '-' ) else None for prod in givenProductions for ch in prod]
print (terminals, nonTerminals)
first = { nt : set() for nt in nonTerminals }
def RecursiveFirst(ch):
if (len(first[ch]) == 0):
for prod in givenProductions:
if prod[0] == ch:
flagEnded = 0
for i in range (2, len(prod)):
if flagEnded == 1:
break
if prod[i] in terminals:
first[ch].add(prod[i])
flagEnded = 1
break
else :
if len(first[prod[i]]) == 0:
RecursiveFirst(prod[i])
if 'e' in first[prod[i]]:
first[ch] |= first[prod[i]]
first[ch].remove('e')
else:
first[ch] |= first[prod[i]]
flagEnded = 1
if flagEnded == 0:
first[ch].add('e')
for ch in nonTerminals:
RecursiveFirst (ch)
print ( first )
print ("*************************************")
follow = { nt : set() for nt in nonTerminals }
follow[givenProductions[0][0]].add('$')
def RecursiveFollow(ch):
for prod in givenProductions:
for i in range (2,len(prod)):
if ( prod[i] == ch):
hopingForNext = 1
for j in range(i+1,len(prod)):
if prod[j] in terminals:
follow[ch].add(prod[j])
hopingForNext = 0
break
else:
if 'e' in first[prod[j]]:
follow[ch] |= first[prod[j]]
follow[ch].remove('e')
else :
follow[ch] |= first[prod[j]]
hopingForNext = 0
break
if ( hopingForNext == 1):
if ch == prod[0]:
continue
[RecursiveFollow(prod[0]) if len(follow[prod[0]]) == 0 else None]
follow[ch] |= follow[prod[0]]
[RecursiveFollow(ch) for ch in nonTerminals]
[RecursiveFollow(ch) for ch in nonTerminals]
print(follow)
| true |
c12aa6c9ba3a23c8463ead12a4800f6063616421 | Python | hunterachieng/python_class | /bank.py | UTF-8 | 6,034 | 3.328125 | 3 | [] | no_license | from datetime import datetime
class Account:
account_type = "student"
def __init__(self,name,phone):
self.name = name
self.phone = phone
self.balance = 0
self.transaction_fee = 50
self.loan_amount = 0
self.loan_fees = 5
self.loan_limit = 50000
self.transactions = []
def deposit(self, amount):
try:
amount + 10
except TypeError:
return f"Please enter {amount} in figures"
if amount <= 0:
return f"Please deposit a valid amount"
else:
self.balance += amount
transaction = {"amount": amount,"balance":self.balance,"narration":"You deposited", "time":datetime.now()}
self.transactions.append(transaction)
return f"Hello {self.name} you have deposited {amount} your new balance is {self.balance}"
def withdraw (self,amount):
try:
amount + 10
except TypeError:
return f"Please enter {amount} in figures"
if amount <= 0:
return "Input valid amount"
elif amount > self.balance:
return "Insufficient balance"
else:
widthrawal = amount + self.transaction_fee #holds both the transaction fee and withdrawal amount
self.balance -= widthrawal
transaction = {"amount": amount,"balance":self.balance,"narration":"You withdrew", "time":datetime.now()}
self.transactions.append(transaction)
return f"Hello {self.name}, you have successfully withdrawn {amount}. Your current balance is {self.balance}"
def borrow(self,amount):
try:
amount + 10
except TypeError:
return f"Please enter {amount} in figures"
total_fees= self.loan_fees* amount / 100
if amount <= 0:
return "Please input a valid amount"
elif self.loan_amount > 0:
return "You have an outstanding loan balance"
elif amount > self.loan_limit:
return "You have exceded the limited borrowing amount"
else:
self.loan_amount +=total_fees + amount
self.balance += amount
transaction = {"amount": amount,"balance":self.balance,"narration":"You borrowed", "time":datetime.now()}
self.transactions.append(transaction)
return f"Dear customer, you have received a loan amount of {amount} and your current balance is {self.balance}. Your total loan amount is {self.loan_amount}"
def repay(self,amount):
try:
amount + 10
except TypeError:
return f"Please enter {amount} in figures"
if amount < self.loan_amount:
self.loan_amount -=amount
transaction = {"amount": amount,"balance":self.balance,"narration":"You repayed", "time":datetime.now()}
self.transactions.append(transaction)
return f" You have repayed {amount} of your loan. Your current loan balance is {self.loan_amount}"
elif amount > self.loan_amount:
balance = amount - self.loan_amount
self.balance +=balance
self.loan_amount = 0
transaction = {"amount": amount,"balance":self.balance,"narration":"You repayed", "time":datetime.now()}
self.transactions.append(transaction)
return f"Congratulations! You have repayed your loan! Your current loan balance is {self.loan_amount} and your current balance is {self.balance}"
elif amount == self.loan_amount:
transaction = {"amount": amount,"balance":self.balance,"narration":"You repayed", "time":datetime.now()}
self.transactions.append(transaction)
return f" Congratulations!! You have fully your loan of {self.loan_amount}. Your current loan balance is {self.loan_amount-amount}"
else:
return f"Dear customer, you still have a loan balance of {self.loan_amount}"
def transfer(self,amount,account):
try:
amount + 10
except TypeError:
return f"Please enter {amount} in figures"
if amount <= 0:
return f"Please transfer a valid amount"
fee = amount * 0.05
total = amount + fee
if total > self.balance:
return f"Your balance is {self.balance} you need {total} in order to transfer {amount}"
else:
self.balance -= total
account.deposit(amount)
transaction = {"amount": amount,"balance":self.balance,"narration":"You transfered", "time":datetime.now()}
self.transactions.append(transaction)
return f"Dear customer, you have transfered {amount} to accout {account.name}. Your new account balance is {self.balance}"
def get_statement(self):
for transaction in self.transactions:
amount = transaction["amount"]
narration = transaction["narration"]
balance = transaction["balance"]
time = transaction["time"]
date = time.strftime("%D")
print (f" {date} .... {narration} .... {amount} ....Balance {balance}")
class MobileMoneyAccount(Account):
def __init__(self, name, phone, service_provider):
Account.__init__(self,name, phone)
self.service_provider = service_provider
def buy_airtime(self,amount):
try:
amount + 10
except TypeError:
return f"Please enter {amount} in figures"
if amount <= 0:
return f"Please enter a valid amount"
elif amount <= self.balance:
self.balance-=amount
transaction = {"amount": amount,"balance":self.balance,"narration":"You bought Airtime", "time":datetime.now()}
self.transactions.append(transaction)
return f"You have bought {amount} airtime. Your current {self.service_provider} mobile money balance is {self.balance}"
| true |
b95260d5d085467fc219737d4b4b7a7ec8e18767 | Python | p-b-j/uscb-das-container-public | /das_decennial/programs/reader/spar_table.py | UTF-8 | 5,138 | 3.078125 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] | permissive | """
This module implements a few table reading classes.
TableWithGeocode is a class that has a repartitionData function, that performs repartitioning of DataFrame by geocode before
creating histograms.
SparseHistogramTable is a class that reads table and converts it from Spark DataFrame with rows corresponding to records
(person records, or household records, or housing unit records) into RDD with histograms (as sparse matrices)
keyed by geocode, using python mapping functions.
"""
from operator import add
import numpy as np
import scipy.sparse as ss
from pyspark.sql.functions import substring
from programs.reader.table import DenseHistogramTable, UnitFromPersonTable
class TableWithGeocode(DenseHistogramTable):
"""
Class to implement common methods between person and unit tables or others with properties that
are common, but not common enough to put into AbstractTable
"""
PARTITION_CODE_COLUMN = 'partitionCode'
def repartitionData(self, data):
"""
Perform table repartitioning in spark depending on settings in config
:param data:
:return:
"""
print(f"Repartitioning histogrammed dataframe with {data.rdd.getNumPartitions()} partitions and schema:")
data.printSchema()
print(f"Repartitioning to {self.reader.num_reader_partitions} partitions")
if self.reader.num_reader_partitions > 0:
data = data.withColumn(self.PARTITION_CODE_COLUMN, substring(self.geography_variables[0], 0, self.reader.reader_partition_len))
if not self.reader.range_partition:
print(f"Using df hash partitioner by {self.PARTITION_CODE_COLUMN}")
return data.repartition(self.reader.num_reader_partitions, self.PARTITION_CODE_COLUMN).drop(self.PARTITION_CODE_COLUMN)
print(f"Using df range partitioner by {self.PARTITION_CODE_COLUMN}")
return data.repartitionByRange(self.reader.num_reader_partitions, self.PARTITION_CODE_COLUMN).drop(self.PARTITION_CODE_COLUMN)
return data
class SparseHistogramTable(TableWithGeocode):
"""
This is a class that reads table and converts it from Spark DataFrame with rows corresponding to records
(person records, or household records, or housing unit records) into RDD with histograms (as sparse matrices)
keyed by geocode, using python mapping functions.
"""
def to_by_geo(self, pair):
# Would prefer using dok_matrix but this bug isn't fixed in our version.
# https://github.com/scipy/scipy/issues/7699
blk_idx, val = pair
blk = blk_idx[:len(self.geography_variables)]
idx = blk_idx[len(self.geography_variables):]
flat_idx = np.ravel_multi_index(idx, self.data_shape)
# size = np.prod(self.data_shape)
# tmp = ss.csr_matrix((1,size))
# tmp[0,flat_idx] = val
# return blk, tmp
return blk, {flat_idx: val}
def make_spar(self, d):
size = np.prod(self.data_shape)
spar = ss.dok_matrix((1, size), dtype=int)
for k, v in d.items():
spar[0, k] = v
return spar.tocsr()
def process(self, data):
"""
args:
a Spark dataframe containing CEF person records
This function performs the following process:
(1) Convert the data to an RDD.
(2) row -> (geo_histogram,1)) (Map the row to a geo_histogram, a tuple of the
geography variables plus the histogram variables.)
(3) Reduce by key.
(4) (geo_histogram,cnt) -> (geo, (histogram, cnt))
(5) groupbykey: creates (geo, list of (histogram, cnt))
(6) (geo, list of (histogram, cnt)) -> (geo, ndarray)
returns: an rdd of (geo,numpy ndarray) pairs.
"""
def combdict(d1, d2):
d1.update(d2)
return d1
data = self.repartitionData(data)
#print(f"Rows (person) {das_utils.rddPartitionDistributionMoments(data.rdd)}")
return (data.rdd.map(self.create_key_value_pair)
.reduceByKey(add)
.map(self.to_by_geo)
.reduceByKey(combdict).mapValues(self.make_spar).repartition(self.reader.num_reader_partitions))
class UnitFromPersonRepartitioned(UnitFromPersonTable, TableWithGeocode):
"""
Just add the DataFrame repartitioning before processing
"""
# No point of repartitioning the data, SQL (.select, .distinct) in inherited .process() function
# violate pre-existing partitioning returning 200 partitions. So does .groupByKey in RDD
# This is an outdated class and should not really be used. Use SQLSparTable (or, if you need for some reason, SparTable)
pass
# def process(self, data):
# """
# Input:
# data: a Spark dataframe (df)
#
# Output:
# a RDD with block by block counts of housing units and gqs by type
# """
#
# data = self.repartitionData(data)
# return super().process(data)
| true |
c4a3acdcae220600cea2105eb23e1d8738171961 | Python | jakoblover/TDT4265-Computer-Vision-and-Deep-Learning | /Assignment 1/LogisticRegression.py | UTF-8 | 2,831 | 3.140625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
class LogisticRegression:
def __init__(self, learningRate=0.000001,n=1000,l2_reg=True,lambd=0.001):
self.learningRate = learningRate
self._lambda = lambd
self.l2_reg = l2_reg
self.n = n
self.lossValsTraining = []
self.lossValsValidation = []
self.lossValsTest = []
self.percentCorrectTraining = []
self.percentCorrectValidation = []
self.percentCorrectTest = []
self.weightsLengths = []
def _sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def _bias(self, X):
return np.concatenate((np.ones((X.shape[0], 1)), X), axis=1)
def _loss(self, h, y):
if self.l2_reg:
return -(y * np.log(h) + (1 - y) * np.log(1 - h)).mean() + (self._lambda/2)*np.sum(np.square(self.w))
else: return -(y * np.log(h) + (1 - y) * np.log(1 - h)).mean()
def _gradient(self, X, h, y):
if self.l2_reg:
return (np.dot(X.T, (h - y)) / y.shape[0]) + (self._lambda*self.w)
else: return np.dot(X.T, (h - y)) / y.shape[0]
def fit(self, X, y, X_validation, Y_validation, X_test, Y_test):
#bias trick
X = self._bias(X)
X_validation = self._bias(X_validation)
X_test = self._bias(X_test)
#create weights matrix
self.w = np.zeros(X.shape[1])
for i in range(self.n):
print("Epoch {0}/{1}".format(i+1,self.n))
#Update step
h = self._sigmoid(np.dot(X, self.w))
self.w -= self.learningRate*self._gradient(X,h,y)
#Record cost function values
self.lossValsTraining.append(self._loss(h,y))
self.lossValsValidation.append(self._loss(self._sigmoid(np.dot(X_validation, self.w)),Y_validation))
self.lossValsTest.append(self._loss(self._sigmoid(np.dot(X_test, self.w)),Y_test))
#Record percentage of correctly predicted images
Y_hat = self.predict(X)
self.percentCorrectTraining.append(self.accuracy(Y_hat, y))
Y_hat = self.predict(X_validation)
self.percentCorrectValidation.append(self.accuracy(Y_hat, Y_validation))
Y_hat = self.predict(X_test)
self.percentCorrectTest.append(self.accuracy(Y_hat, Y_test))
#Record weights length
self.weightsLengths.append(np.sum(np.square(self.w)))
#Early stopping
if len(self.lossValsValidation) > 3:
if self.lossValsValidation[i-2] < self.lossValsValidation[i-1] < self.lossValsValidation[i]:
return
def predict(self, X):
return self._sigmoid(np.dot(X, self.w)).round()
def accuracy(self, Y_hat, Y):
return 100*np.sum(Y_hat == Y)/np.size(Y_hat)
| true |
0399fffcc3fc75b2d404230a3954b83f6e4b0547 | Python | sutizi/sentiment-analysis | /clasificador.py | UTF-8 | 1,250 | 2.890625 | 3 | [] | no_license | #!usr/bin/env python3
import os
import pickle as c
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
def guardar(clf, name):
with open(name, 'wb') as fp:
c.dump(clf, fp)
print("El clasificador fue guardado")
def cargar_archivos():
direc = "data_set/"
files = os.listdir(direc)
archivos = [direc + twitt for twitt in files]
texto = []
sentimiento = []
rango = []
for a in archivos:
fp = open(a, "r")
lineas = fp.readlines()[1:]
for x in lineas:
texto.append(x.split(' ')[1])
sentimiento.append(x.split(' ')[2])
rango.append(x.split(' ')[3])
fp.close()
return texto, sentimiento, rango
t, s, r = cargar_archivos()
vectorizer = CountVectorizer()
features = vectorizer.fit_transform(t)
features_nd = features.toarray()
f_train, f_test, l_train, l_test = train_test_split(features_nd, s, test_size = 0.2, random_state=1234)
clf = MultinomialNB()
clf = clf.fit(X=f_train, y=l_train)
preds = clf.predict(f_test)
print ("ACCURACY:", accuracy_score(l_test, preds))
guardar(clf, "clasificador.mdl")
| true |
1b7c2d0511ba066baef031dd2354ee2152dc6b72 | Python | roeybenhayun/statistical_ml | /database/assigenment3/Interface.py | UTF-8 | 18,621 | 2.953125 | 3 | [] | no_license | #!/usr/bin/python2.7
#
# Interface for the assignement
#
import psycopg2
def getOpenConnection(user='postgres', password='1234', dbname='postgres'):
return psycopg2.connect("dbname='" + dbname + "' user='" + user + "' host='localhost' password='" + password + "'")
def loadRatings(ratingstablename, ratingsfilepath, openconnection):
print("In Load_Ratings Function")
# added a b and c for the special charecter
command = (
"""
create table if not exists Ratings (
UserID int,
a char,
MovieID int,
b char,
Rating numeric,
c char,
time int
)
"""
)
query = str.replace(command,'Ratings', ratingstablename)
try:
cursor = openconnection.cursor()
# Delete the table if exists
cursor.execute("drop table if exists Ratings")
#openconnection.commit()
cursor.execute(query)
#openconnection.commit()
# no need for the commit since the autocommit is set to true
#connection.commit()
print("Table created successfully")
f = open(ratingsfilepath,'r')
cursor.copy_from(f,ratingstablename,sep=":")
#openconnection.commit()
# remove the unused columns from the table
command = (""" alter table Ratings drop column a, drop column b, drop column c, drop column time """)
query = str.replace(command,'Ratings', ratingstablename)
cursor.execute(query)
#openconnection.commit()
except (Exception, psycopg2.Error) as error:
print("Error while connecting to PostgreSQL", error)
finally:
#if (openconnection):
# cursor.close()
# openconnection.close()
print("********Load_Ratings Completed********")
print("PostgresSQL Connection is close")
def rangePartition(ratingstablename, numberofpartitions, openconnection):
N = numberofpartitions
connection = openconnection
try:
cursor = connection.cursor()
table_list = []
# create table list according to the partition size
for n in range(0,N):
table_name = 'range_part'+str(n)
#print (table_name)
table_list.append(table_name)
rating_range_list = []
for n in range(0,N):
rating_range = n * (5.0/N)
#print(rating_range)
rating_range_list.append(rating_range)
#print(table_list)
#print(rating_range_list)
command = (
"""
create table if not exists RangeParitionedTable (
UserID int,
MovieID int,
Rating numeric
)
""")
command2 = (
"""
create table if not exists RangePartitionMetadata (
Id int,
MinRatingInRange numeric,
MaxRatingInRange numeric
)
""")
command3 = (
""" insert into RangePartitionMetadata (Id,MinRatingInRange,MaxRatingInRange) values(_Id,_MinRatingInRange,_MaxRatingInRange) """
)
# create the metadata table - use this table to store the range partition information
cursor.execute(command2)
for n in range(0,N):
table_name = table_list[n]
query = str.replace(command,'RangeParitionedTable', table_name)
cursor.execute(query)
print("Executing command - start")
command = (
"""
insert into range_part0
select userid,movieid,rating
from Ratings
where rating >=0.0 and rating <=5.0
""")
left_boundery = 0.0
if (N==1):
print("N=1")
cursor.execute(command)
insert_query = str.replace(command3,'_Id',str(0))
insert_query = str.replace(insert_query,'_MinRatingInRange',str(0.0))
insert_query = str.replace(insert_query,'_MaxRatingInRange',str(5.0))
cursor.execute(insert_query)
else:
print("N > 1")
id = 0
print(rating_range_list)
right_boundery = rating_range_list[1]
query = str.replace(command,'5.0',str(right_boundery))
#print(query)
print("Left = ",left_boundery, " Right = ", right_boundery)
insert_query = str.replace(command3,'_Id',str(id))
insert_query = str.replace(insert_query,'_MinRatingInRange',str(left_boundery))
insert_query = str.replace(insert_query,'_MaxRatingInRange',str(right_boundery))
cursor.execute(query)
cursor.execute(insert_query)
query = str.replace(query,table_list[0],table_list[1])
query = str.replace(query,'>=','>')
for n in range (2,N):
print("Left = ",left_boundery, " Right = ", right_boundery)
current_right_boundery = rating_range_list[n]
query = str.replace(query,str(right_boundery), str(current_right_boundery))
query = str.replace(query,str(left_boundery),str(right_boundery))
#print(query)
cursor.execute(query)
query = str.replace(query,table_list[n-1],table_list[n])
left_boundery = right_boundery
right_boundery = current_right_boundery
insert_query = str.replace(command3,'_Id',str(n-1))
insert_query = str.replace(insert_query,'_MinRatingInRange',str(left_boundery))
insert_query = str.replace(insert_query,'_MaxRatingInRange',str(right_boundery))
#print(insert_query)
cursor.execute(insert_query)
# the last partition
query = str.replace(query,str(right_boundery), '5.0')
query = str.replace(query,str(left_boundery), str(right_boundery))
# update last entry
left_boundery=right_boundery
right_boundery = 5.0
insert_query = str.replace(command3,'_Id',str(N-1))
insert_query = str.replace(insert_query,'_MinRatingInRange',str(left_boundery))
insert_query = str.replace(insert_query,'_MaxRatingInRange',str(right_boundery))
cursor.execute(query)
cursor.execute(insert_query)
except (Exception, psycopg2.Error) as error:
print("Error while connecting to PostgreSQL", error)
finally:
if (connection):
#cursor.close()
#connection.close()
print("********Range_Partition Completed********")
print("PostgresSQL Connection is close")
def roundRobinPartition(ratingstablename, numberofpartitions, openconnection):
N = numberofpartitions
connection = openconnection
try:
cursor = connection.cursor()
table_list = []
# create table list according to the partition size
for n in range(0,N):
table_name = 'rrobin_part'+str(n)
#print (table_name)
table_list.append(table_name)
#print(table_list)
command = (
"""
create table if not exists RoundRobinParitionedTable (
UserID int,
MovieID int,
Rating numeric
)
"""
)
# Create the partitioned tables
for n in range(0,N):
table_name = table_list[n]
query = str.replace(command,'RoundRobinParitionedTable', table_name)
cursor.execute(query)
command1 = (
"""
insert into rrobin_part0
select userid,movieid,rating
from Ratings
"""
)
# use this table as a metadata table to store the next partition to write to
#
command2 = (
"""
create table if not exists RoundRobinParitionMetadata (
NumberOfPartitions int,
NextPartitionToWrite int
)
"""
)
# create the metadata table
cursor.execute(command2)
command3 = (""" insert into RoundRobinParitionMetadata (NumberOfPartitions,NextPartitionToWrite) values(_NumberOfPartitions,_NextPartitionToWrite) """)
if (N==1):
print("N=1")
cursor.execute(command1)
query = str.replace(command3,'_NumberOfPartitions', str(N))
query = str.replace(query,'_NextPartitionToWrite', str(N))
cursor.execute(query)
else:
print("N>1")
# get the size of the table. not sure need it.
cursor.execute("select count(*) from Ratings")
result = cursor.fetchone()
table_size = result[0]
command = (
"""
insert into RoundRobinParitionedTable
select userid,movieid,rating
from Ratings
limit 1
offset _offset
"""
)
NextPartitionToWrite = 0
for n in range(0,table_size):
NextPartitionToWrite = n%N
selected_table = table_list[n%N]
query = str.replace(command,'RoundRobinParitionedTable', selected_table)
query = str.replace(query,'_offset', str(n))
#print(query)
cursor.execute(query)
# update the metadata table
query = str.replace(command3,'_NumberOfPartitions', str(N))
query = str.replace(query,'_NextPartitionToWrite', str((NextPartitionToWrite+1)%N))
cursor.execute(query)
except (Exception, psycopg2.Error) as error:
print("Error while connecting to PostgreSQL", error)
finally:
if (connection):
#cursor.close()
#connection.close()
print("********RoundRobin_Partition Completed********")
print("PostgresSQL Connection is close")
def roundrobininsert(ratingstablename, userid, itemid, rating, openconnection):
user_id = userid
movie_id = itemid
connection = openconnection
enable_execute = True
print("In RoundRobin_Insert Function")
# need to check the table string ?
# first insert to the rating table
#
command = (""" insert into Ratings (UserID,MovieID,Rating) values(_UserID,_MovieID,_Rating) """)
query = str.replace(command,'Ratings', ratingstablename)
query = str.replace(query,'_UserID',str(user_id))
query = str.replace(query,'_MovieID',str(movie_id))
query = str.replace(query,'_Rating',str(rating))
print(query)
try:
cursor = connection.cursor()
if enable_execute == True:
cursor.execute(query)
NextPartitionToWrite = 0
NumberOfPartitions = 1
# get the round robin metadata table
command = (""" select * from RoundRobinParitionMetadata""")
if enable_execute == True:
cursor.execute(command)
result = cursor.fetchone()
NumberOfPartitions = result[0]
NextPartitionToWrite = result[1]
command = (""" insert into range_partX (UserID,MovieID,Rating) values(_UserID,_MovieID,_Rating) """)
# handle the one partition use case.
if (NumberOfPartitions == 1):
NextPartitionToWrite = 0
query = str.replace(command,'range_partX', ('rrobin_part'+str(NextPartitionToWrite)))
query = str.replace(query,'_UserID',str(user_id))
query = str.replace(query,'_MovieID',str(movie_id))
query = str.replace(query,'_Rating',str(rating))
if enable_execute == True:
cursor.execute(query)
# remove prev row since we are care only with the last row
cursor.execute("delete from RoundRobinParitionMetadata")
# update the next partitionto write
NextPartitionToWrite = (NextPartitionToWrite+1)%NumberOfPartitions
# update the metadata table
command3 = (""" insert into RoundRobinParitionMetadata (NumberOfPartitions,NextPartitionToWrite) values(_NumberOfPartitions,_NextPartitionToWrite) """)
query = str.replace(command3,'_NumberOfPartitions', str(NumberOfPartitions))
query = str.replace(query,'_NextPartitionToWrite', str(NextPartitionToWrite))
if enable_execute == True:
# update the metadata table
cursor.execute(query)
except (Exception, psycopg2.Error) as error:
print("Error while connecting to PostgreSQL", error)
finally:
if (connection):
#cursor.close()
#connection.close()
print("********RoundRobin_Insert Completed********")
print("PostgresSQL Connection is close")
def rangeinsert(ratingstablename, userid, itemid, rating, openconnection):
user_id = userid
movie_id = itemid
connection = openconnection
enable_execute = True
print ("In Range_Insert Function")
command = (""" insert into Ratings (UserID,MovieID,Rating) values(_UserID,_MovieID,_Rating) """)
query = str.replace(command,'Ratings', ratingstablename)
query = str.replace(query,'_UserID',str(user_id))
query = str.replace(query,'_MovieID',str(movie_id))
query = str.replace(query,'_Rating',str(rating))
try:
cursor = connection.cursor()
# Update the rating table
cursor.execute(query)
# find to which parition we should insert the new record based on the rating
# handle the following use cases:
command1 = (""" select * from RangePartitionMetadata where rating > MinRatingInRange and rating < MaxRatingInRange""")
query = str.replace(command1,'rating', str(rating))
if enable_execute == True:
selected_partition = 0
MinRatingInRange = 0
MaxRatingInRange = 0
# Update the rating table
cursor.execute(query)
result = cursor.fetchone()
if (result):
print("Rating is between bounderies")
Id = result[0]
MinRatingInRange = result[1]
MaxRatingInRange = result[2]
selected_partition = Id
else:
command2 = (""" select * from RangePartitionMetadata where rating = MinRatingInRange""")
query = str.replace(command2,'rating', str(rating))
cursor.execute(query)
result = cursor.fetchone()
if (result):
print("Rating is on min boundery. Save the record in the previous partition")
Id = result[0]
MinRatingInRange = result[1]
MaxRatingInRange = result[2]
selected_partition = Id -1
# save the rating in the previous partition
else:
command3 = (""" select * from RangePartitionMetadata where rating = MaxRatingInRange""")
query = str.replace(command3,'rating', str(rating))
cursor.execute(query)
result = cursor.fetchone()
if (result):
print("Rating is on max boundery. Save the record in the current partition")
Id = result[0]
MinRatingInRange = result[1]
MaxRatingInRange = result[2]
selected_partition = Id
# now update the partitioned tabled
table='range_part'+str(selected_partition)
query = str.replace(command,'Ratings', table)
query = str.replace(query,'_UserID',str(user_id))
query = str.replace(query,'_MovieID',str(movie_id))
query = str.replace(query,'_Rating',str(rating))
cursor.execute(query)
except (Exception, psycopg2.Error) as error:
print("Error while connecting to PostgreSQL", error)
finally:
if (connection):
#cursor.close()
#connection.close()
print("PostgresSQL Connection is close")
print("********Range_Insert Completed********")
def createDB(dbname='dds_assignment'):
"""
We create a DB by connecting to the default user and database of Postgres
The function first checks if an existing database exists for a given name, else creates it.
:return:None
"""
# Connect to the default database
con = getOpenConnection(dbname='postgres')
con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cur = con.cursor()
# Check if an existing database with the same name exists
cur.execute('SELECT COUNT(*) FROM pg_catalog.pg_database WHERE datname=\'%s\'' % (dbname,))
count = cur.fetchone()[0]
if count == 0:
cur.execute('CREATE DATABASE %s' % (dbname,)) # Create the database
else:
print 'A database named {0} already exists'.format(dbname)
# Clean up
cur.close()
con.close()
def deletepartitionsandexit(openconnection):
cur = openconnection.cursor()
cur.execute("SELECT table_name FROM information_schema.tables WHERE table_schema = 'public'")
l = []
for row in cur:
l.append(row[0])
for tablename in l:
cur.execute("drop table if exists {0} CASCADE".format(tablename))
cur.close()
def deleteTables(ratingstablename, openconnection):
try:
cursor = openconnection.cursor()
if ratingstablename.upper() == 'ALL':
cursor.execute("SELECT table_name FROM information_schema.tables WHERE table_schema = 'public'")
tables = cursor.fetchall()
for table_name in tables:
cursor.execute('DROP TABLE %s CASCADE' % (table_name[0]))
else:
cursor.execute('DROP TABLE %s CASCADE' % (ratingstablename))
openconnection.commit()
except psycopg2.DatabaseError, e:
if openconnection:
openconnection.rollback()
print 'Error %s' % e
except IOError, e:
if openconnection:
openconnection.rollback()
print 'Error %s' % e
finally:
if cursor:
cursor.close() | true |
ebedb3b80e419146e2e9fb79d4642337cc39f4c4 | Python | fbrute/lovy | /hymarch22/lovysplit/selectfiledialog.py | UTF-8 | 1,338 | 3 | 3 | [] | no_license | import tkinter as tk
from tkinter import ttk
from tkinter.filedialog import askopenfilename
from pathlib import Path
class SelectFileDialog(tk.Tk):
def __init__(self):
super().__init__()
# create the root window
self.geometry('200x100')
self.resizable(False, False)
self.title('Select a file')
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
# open button
open_button = ttk.Button(
self,
text='Open Files',
command=self.select_files
)
open_button.pack(expand=True)
close_button = ttk.Button(
self,
text='Quit Dialog',
command=self.destroy
)
close_button.pack(expand=True)
self.mainloop()
# self.destroy()
def select_files(self):
filetypes = (
('text files', '*.xlsx'),
('All files', '*.*')
)
filename = askopenfilename(
title="Open scenarios file",
initialdir="~/Documents/trafin/lovy/data/retrostat",
filetypes=filetypes
)
type(self).result = Path(filename)
def run():
main()
def main():
SelectFileDialog()
return(SelectFileDialog.result)
if __name__ == "__main__":
print(main())
| true |
f84db34c8a05af6ec46e1dfb698bd48e6798fcb1 | Python | arcaputo3/algorithms | /algos_and_data_structures/nbit_addition.py | UTF-8 | 436 | 3.9375 | 4 | [] | no_license | # NBIT ADDITION: Adds two n length binary integers represented as arrays of 0's and 1's
# Input: Two binary arrays arr1, arr2
# Output: Addition of arr1 and arr2
def nbit_add(arr1, arr2):
n = len(arr1)
arr = [0]*(n+1)
for i in range(n):
if arr1[i] == 1 and arr2[i] == 1:
arr[i] = 1
elif arr1[i] == 1 or arr1[i] == 1:
arr[i+1] = 1
return arr
print(nbit_add([1,1,1],[1,1,1]))
| true |
b10dab4ea9a799d1a5d3e0d11d5bb70f4d1f3a32 | Python | ovsartem/json_navigator | /main.py | UTF-8 | 1,933 | 3.671875 | 4 | [
"MIT"
] | permissive | import json
def get_info(path):
"""
Reads json file
"""
with open("frienfs_list_Obama.json", 'r', encoding='utf-8') as f:
data = json.load(f)
return data
def dictionary(element):
"""
Function to work with dictionary
"""
all_elements = list(element.keys())
print("This object is a dictionary. Here are all keys.")
for i in range(len(all_elements)):
print(f"{i+1}) {all_elements[i]}")
choice = int(input("Type the humber of the key: "))
if isinstance(element[all_elements[choice-1]], list):
return element[all_elements[choice-1]][0]
return element[all_elements[choice-1]]
def _list(element):
"""
Function to work with list
"""
print("This object is a dictionary. Here are all elements.")
for i in range(len(element)):
print(f"{i+1}) {element[i]}")
choice = int(input("Type the humber of the element: "))
return element[choice-1]
def other_elements(element):
"""
Function to work with str,int,float
"""
print(element)
return 111
def main(element):
"""
Main function to organize all types
"""
if isinstance(element, dict):
return dictionary(element)
elif isinstance(element, list):
return _list(element)
elif isinstance(element, tuple):
return _list(element)
else:
return other_elements(element)
def _work():
"""
Cycles the main function. Here you can change the path of json file.
"""
print("This is json file navigator. You can leave whenever you want.")
data = get_info("frienfs_list_Obama.json")
stop = False
while stop != True:
data = main(data)
if data == 111:
stop = True
if __name__ == '__main__':
start = True
while start != False:
_work()
move = input("Do you want to repeat? +/: ")
if move == "-":
start = False
| true |
ab34c7e2adbd68769e2269fe86c006bc5f942c65 | Python | jadsonlucio/Machine-learning-ufal-course | /activities/week-3/src/countries_data_collection.py | UTF-8 | 1,247 | 2.734375 | 3 | [] | no_license | import requests
from time import sleep
API_URL = "https://restcountries.eu/rest/v2/name/"
def get_countries_info(country_names):
responses = {}
for country_name in country_names:
if country_name not in responses:
print(f"{API_URL}{country_name}")
response = requests.get(f"{API_URL}{country_name}")
if response.status_code == 200:
response = response.json()
if len(response) == 1:
responses[country_name] = response[0]
else:
for obj in response:
if obj["name"] == country_name or (country_name in obj["translations"]):
responses[country_name] = obj
break
else:
print(f"error {country_name}, {response}")
responses[country_name] = response
else:
print(response.status_code)
print(country_name)
print(response.text)
responses[country_name] = {}
sleep(0.5)
return responses
def
| true |
7efd66dac8ba66100304ecfddf3ae98df0268cb8 | Python | embarktrucks/sour | /sour/common/utils/enum.py | UTF-8 | 819 | 3.265625 | 3 | [
"Zlib"
] | permissive | class enum(object):
"""@DynamicAttrs"""
__by_values = {}
def __init__(self, *items, **kwitems):
self.__by_names = {}
self.__by_values = {}
i = 0
for item in items:
self.__by_names[item] = i
self.__by_values[i] = item
self.__setattr__(item, i)
i += 1
for item, value in kwitems.items():
self.__by_names[item] = value
self.__by_values[value] = item
self.__setattr__(item, value)
def by_value(self, value):
return self.__by_values.get(value, None)
def has_value(self, value):
return value in self.__by_values
def by_name(self, item):
return self.__by_names.get(item, None)
def contains(self, item):
return item in self.__by_names
| true |
0cf64e667dd6a7e9d75d72d095f9c3bf410a37b3 | Python | dikopylov/Coursera.ML | /Week 2/Similarity-basedClassifier/knn.py | UTF-8 | 1,367 | 2.734375 | 3 | [] | no_license | from sklearn.model_selection import KFold, cross_val_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import scale
import pandas
data = pandas.read_csv('../wine.data', header=None)
data_class = data[0]
data_attribute = data.drop([0], axis=1)
kf = KFold(n_splits=5, shuffle=True, random_state=42)
max = 0
k = 0;
file = open("gradeBeforeScale.txt", "a")
for i in range(1, 50):
model = KNeighborsClassifier(n_neighbors=i)
knn = model.fit(data_attribute, data_class)
# Вычислить качество
grade = cross_val_score(estimator=knn, X=data_attribute, y=data_class, cv=kf)
if max < grade.mean():
max = grade.mean()
k = i
file.write(str(grade) + ' ' + str(round(grade.mean(), 2)) + '\n')
file.close()
file = open("KBeforeScale.txt", "w")
file.write(str(k))
file.close()
data_attribute = scale(data_attribute)
file = open("gradeAfterScale.txt", "a")
for i in range(1, 50):
model = KNeighborsClassifier(n_neighbors=i)
knn = model.fit(data_attribute, data_class)
grade = cross_val_score(estimator=knn, X=data_attribute, y=data_class, cv=kf)
if max < grade.mean():
max = grade.mean()
k = i
file.write(str(grade) + ' ' + str(round(grade.mean(), 2)) + '\n')
file.close()
file = open("KAfterScale.txt", "w")
file.write(str(k))
file.close() | true |
73b14248d96ff055d9f472b1200b28a134afddd9 | Python | marekhanus/spja | /labs/04/tasks.py | UTF-8 | 7,310 | 4.28125 | 4 | [] | no_license | import math
class Vector:
"""
Implement the methods below to create an immutable 3D vector class.
Each implemented method will award you half a point.
Magic methods cheatsheet: https://rszalski.github.io/magicmethods
"""
"""
Implement a constructor that takes three coordinates (x, y, z) and stores
them as attributes with the same names in the Vector.
Default value for all coordinates should be 0.
Example:
v = Vector(1.2, 3.5, 4.1)
v.x # 1.2
v = Vector(z=1) # == Vector(0, 0, 1)
"""
def __init__(self, x=0.0, y=0.0, z=0.0):
self.x = x
self.y = y
self.z = z
"""
Implement method `length` that returns the length of the vector
(https://chortle.ccsu.edu/VectorLessons/vch04/vch04_8.html).
Example:
Vector(2, 3, 4).length() # 5.38...
"""
def length(self):
return math.sqrt(self.x * self.x + self.y * self.y + self.z * self.z)
"""
Implement vector addition and subtraction using `+` and `-` operators.
Both operators should return a new vector and not modify its operands.
If the second operand isn't a vector, raise ValueError.
Example:
Vector(1, 2, 3) + Vector(4, 5, 6) # Vector(5, 7, 8)
Vector(1, 2, 3) - Vector(4, 5, 6) # Vector(-3, -3, -3)
Hint:
You can use isinstance(object, class) to check whether `object` is an instance of `class`.
"""
def __add__(self, other):
if not isinstance(other, Vector):
raise ValueError
return Vector(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other):
if not isinstance(other, Vector):
raise ValueError
return Vector(self.x - other.x, self.y - other.y, self.z - other.z)
"""
Implement vector negation using the unary `-` operator.
Return a new vector, don't modify the input vector.
Example:
-Vector(1, 2, 3) # Vector(-1, -2, -3)
"""
def __neg__(self):
return Vector(- self.x, - self.y, - self.z)
"""
Implement multiplication and division by scalar using `*` and `/` operators.
Both operators should return a new Vector and not modify the input vector.
If the second operand isn't `int` or `float`, raise ValueError.
Example:
Vector(1, 2, 3) * 4 # Vector(4, 8, 12)
Vector(2, 4, 6) / 2 # Vector(1, 2, 3)
Hint:
Division with the `/` operator uses the magic method `__truediv__` in Python 3.
"""
def __mul__(self, other: float):
if not isinstance(other, int) and not isinstance(other, float):
raise ValueError
return Vector(self.x * other, self.y * other, self.z * other)
def __truediv__(self, other: float):
if not isinstance(other, int) and not isinstance(other, float):
raise ValueError
return Vector(self.x / other, self.y / other, self.z / other)
"""
Implement the `==` comparison operator for Vector that returns True if both vectors have the same attributes.
If the second operand isn't a vector, return False.
Example:
Vector(1, 1, 1) == Vector(1, 1, 1) # True
Vector(1, 1, 1) == Vector(2, 1, 1) # False
Vector(1, 2, 3) == 5 # False
"""
def __eq__(self, other):
if not isinstance(other, Vector):
return False
if self.x != other.x or self.y != other.y or self.z != other.z:
return False
return True
"""
Implement *property* `unit` that will return the unit vector of this vector
(vector with the same direction and length one).
If the vector has length zero, return a zero vector (Vector(0, 0, 0)).
Example:
Vector(0, 8, 0).unit # Vector(0, 1, 0)
"""
@property
def unit(self):
length = self.length()
if length == 0:
return Vector(0.0, 0.0, 0.0)
return self / length
"""
Implement string representation of Vector in the form `(x, y, z)`.
Example:
str(Vector(1, 2, 3)) # (1, 2, 3)
print(Vector(0, 0, 0)) # (0, 0, 0)
"""
def __str__(self):
return "({}, {}, {})".format(self.x, self.y, self.z)
"""
Implement indexing for the vector, both for reading and writing.
If the index is out of range (> 2), raise IndexError.
Example:
v = Vector(1, 2, 3)
v[0] # 1
v[2] # 3
v[1] = 5 # v.y == 5
v[10] # raises IndexError
"""
def __setitem__(self, key, value):
"""
if key == 0:
self.x = value
elif key == 1:
self.y = value
elif key == 2:
self.z = value
else:
raise IndexError
"""
keys = ['x', 'y', 'z']
setattr(self, keys[key], value)
def __getitem__(self, key):
"""
if key == 0:
return self.x
elif key == 1:
return self.y
elif key == 2:
return self.z
else:
raise IndexError
"""
keys = ['x', 'y', 'z']
return getattr(self, keys[key])
"""
Implement the iterator protocol for the vector.
Hint:
Use `yield`.
Example:
v = Vector(1, 2, 3)
for x in v:
print(x) # prints 1, 2, 3
"""
def __iter__(self):
yield self.x
yield self.y
yield self.z
class LowerCaseDecorator:
"""
Points: 1
Implement the `decorator` design pattern.
LowerCaseDecorator should decorate a file which will be passed to its constructor.
It should make all characters/strings written to the file lowercase.
It is enough to support the `write` and `writelines` methods of file.
Example:
with open("file.txt", "w") as f:
decorated = LowerCaseDecorator(f)
decorated.write("Hello World\n")
decorated.writelines(["Nice to MEET\n", "YOU"])
file.txt content after the above code is executed:
hello world
nice to meet
you
"""
pass
class BonusObservable:
"""
Points: 1 (bonus)
Implement the `observer` design pattern.
Observable should have a `subscribe` method for adding new subscribers.
It should also have a `notify` method that calls all of the stored subscribers and passes them its parameters.
Example:
obs = BonusObservable()
def fn1(x):
print("fn1: {}".format(x))
def fn2(x):
print("fn2: {}".format(x))
unsub1 = obs.subscribe(fn1) # fn1 will be called everytime obs is notified
unsub2 = obs.subscribe(fn2) # fn2 will be called everytime obs is notified
obs.notify(5) # should call fn1(5) and fn2(5)
unsub1() # fn1 is no longer subscribed
obs.notify(6) # should call fn2(6)
"""
def subcribe(self, subscriber):
"""
Add subscriber to collection of subscribers.
Return a function that will remove this subscriber from the collection when called.
"""
pass
def notify(self):
"""
Pass all parameters given to this function to all stored subscribers by calling them.
"""
pass
| true |
31456343538c2604bcd6a3d18c7fc0e5de1c9f5f | Python | CgnRLAgent/cog_ml_tasks | /gym_cog_ml_tasks/envs/copy_tasks/copy_repeat_env.py | UTF-8 | 4,731 | 3.5625 | 4 | [
"GPL-3.0-only"
] | permissive | """
simple copy-repeat task:
Copy the input sequence multi-times and reverse it every other time as output. For example:
(repeat time: 3)
Input: ABCDE
Ideal output: ABCDEEBCDAABCDE
At each time step a character is observed, and the agent should respond a char.
The action(output) is chosen from a char set e.g. {A,B,C,D,E}.
After the last input char is observed, an empty symbol will be observed for each step before the episode is end.
The episode ends when the agent respond R*X times, where X is the input seq length and R is the repeat time.
AUTHOR: Zenggo
DATE: 04.2020
"""
from gym import Env
from gym.spaces import Discrete
from gym.utils import colorize, seeding
import numpy as np
import sys
import string
class Copy_Repeat_ENV(Env):
ALPHABET = list(string.ascii_uppercase[:26])
def __init__(self, n_char=5, size=6, repeat=3):
"""
:param n_char: number of different chars in inputs, e.g. 3 => {A,B,C}
:param size: the length of input sequence
:param repeat: the expected repeat times of the target output
"""
self.n_char = n_char
self.size = size
self.repeat = repeat
# observation (characters)
self.observation_space = Discrete(n_char+1) # +1: empty symbol, whose index is n_char
# action
self.action_space = Discrete(n_char)
# states of an episode
self.position = None
self.last_action = None
self.last_reward = None
self.episode_total_reward = None
self.input_str = None
self.target_str = None
self.output_str = None
self.np_random = None
self.seed()
self.reset()
@property
def input_length(self):
return len(self.input_str)
@property
def target_length(self):
return self.input_length * self.repeat
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
self.position = 0
self.last_action = None
self.last_reward = None
self.episode_total_reward = 0.0
self.input_str, self.target_str = self._generate_input_target()
self.output_str = ''
obs_char, obs_idx = self._get_observation()
return obs_idx
def step(self, action):
assert self.action_space.contains(action)
assert 0 <= self.position < self.target_length
target_act = self.ALPHABET.index(self.target_str[self.position])
reward = 1.0 if action == target_act else -1.0
self.last_action = action
self.last_reward = reward
self.episode_total_reward += reward
self.output_str += self.ALPHABET[action]
self.position += 1
if self.position < self.target_length:
done = False
_, obs = self._get_observation()
else:
done = True
obs = None
info = {"target_act": target_act}
return obs, reward, done, info
def render(self, mode='human'):
outfile = sys.stdout # TODO: other mode
pos = self.position - 1
o_str = ""
if pos > -1:
for i, c in enumerate(self.output_str):
color = 'green' if self.target_str[i] == c else 'red'
o_str += colorize(c, color, highlight=True)
outfile.write("=" * 20 + "\n")
outfile.write("Length : " + str(self.input_length) + "\n")
outfile.write("T-Length : " + str(len(self.target_str)) + "\n")
outfile.write("Input : " + self.input_str + "\n")
outfile.write("Target : " + self.target_str + "\n")
outfile.write("Output : " + o_str + "\n")
if self.position > 0:
outfile.write("-" * 20 + "\n")
outfile.write("Current reward: %.2f\n" % self.last_reward)
outfile.write("Cumulative reward: %.2f\n" % self.episode_total_reward)
outfile.write("\n")
return
def _generate_input_target(self):
input_str = ""
for i in range(self.size):
c = self.np_random.choice(self.ALPHABET[:self.n_char])
input_str += c
target_str = ""
for i in range(self.repeat):
if i % 2 == 1:
target_str += input_str[::-1]
else:
target_str += input_str
return input_str, target_str
def _get_observation(self, pos=None):
if pos is None:
pos = self.position
if pos >= self.input_length:
obs_char = ''
obs_idx = self.n_char
else:
obs_char = self.input_str[pos]
obs_idx = self.ALPHABET.index(obs_char)
return obs_char, obs_idx | true |
de48841a8611853a6679c69c1cbe0ebce15c8e6f | Python | mahir-d/Solved-LeetCode-problems | /insert-delete-getrandom-o1/insert-delete-getrandom-o1.py | UTF-8 | 1,336 | 4.1875 | 4 | [] | no_license | class RandomizedSet:
def __init__(self):
"""
Initialize your data structure here.
"""
self.my_dict = dict()
self.my_arr = []
def insert(self, val: int) -> bool:
"""
Inserts a value to the set. Returns true if the set did not already contain the specified element.
"""
if val in self.my_dict:
return False
self.my_arr.append(val)
self.my_dict[val] = len(self.my_arr) - 1
return True
def remove(self, val: int) -> bool:
"""
Removes a value from the set. Returns true if the set contained the specified element.
"""
if val not in self.my_dict:
return False
del self.my_dict[val]
return True
def getRandom(self) -> int:
"""
Get a random element from the set.
"""
while True:
random_num = random.choice(self.my_arr)
if random_num in self.my_dict:
return random_num
# Your RandomizedSet object will be instantiated and called as such:
# obj = RandomizedSet()
# param_1 = obj.insert(val)
# param_2 = obj.remove(val)
# param_3 = obj.getRandom() | true |
62a80b379d81fdc8237ee78b0bff133a41f38c97 | Python | sabrina-boby/practice_some-python | /for_loop-2.py | UTF-8 | 96 | 3.296875 | 3 | [] | no_license |
n=int(input("enter tha last number "))
sum=0
for i in range(1,n+1,1):
sum=sum+i
print(sum) | true |
9bbc6e4f7744627c84690eeb31eb1383b98aa6a9 | Python | maiacodes/school-shit | /Challanges/c13.py | UTF-8 | 109 | 3.765625 | 4 | [] | no_license | num = input("Enter a number under 20? ")
if int(num)>19:
print("Too high!")
else:
print("Thank you") | true |
6570ba01b57cbea01d54db715d751a9f48dfbc92 | Python | darkhader/LTU15 | /20201/image-processing/BaiTapLon/format.py | UTF-8 | 793 | 3.015625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import cv2 as cv2
import argparse
import os
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument(
'--input_file',
default='./data/lenna.png',
help='Image to convert format')
argparser.add_argument(
'--format',
default='jpg',
help='Format type: jpg, png, ...')
args, extra_args = argparser.parse_known_args()
input_file = args.input_file
print('Input file: ' + input_file)
if not os.path.isfile(input_file):
print('Error: File does not exist!')
return
# Open image
img = cv2.imread(input_file)
# Save as format
cv2.imwrite('./data/format.' + args.format, img)
main()
| true |
5febea2331437d649358ab7d04923c209606698a | Python | monsterone/automation_wg | /framework/testdemo/test_driver_add_fix.py | UTF-8 | 5,363 | 2.625 | 3 | [] | no_license | from time import sleep
from selenium import webdriver
from selenium.webdriver import ActionChains
# http://47.108.71.92
driver = webdriver.Chrome()
# driver = webdriver.Firefox()
driver.implicitly_wait(10)
driver.maximize_window()
driver.get('http://192.168.1.192:9000/index.html')
driver.find_element_by_xpath('/html/body/div/div/form/div[2]/div/div[1]/input').send_keys('admin')
driver.find_element_by_xpath('html/body/div/div/form/div[3]/div/div/input').send_keys('123456')
driver.find_element_by_xpath('/html/body/div/div/form/div[4]/div/button').click()
sleep(1)
#点击新增设备类型
driver.find_element_by_css_selector('#device-type-container > div.top-option-area > div.btn-area > button > span').click()
###################正常登录成功=====================
'''
#输入设备类型名称
driver.find_element_by_xpath('//*[@id="device-type-container"]/div[3]/div/div[2]/form/div[1]/div/div/input').send_keys("hello")
#选择父、子设备
driver.find_element_by_xpath('//*[@id="device-type-container"]/div[3]/div/div[2]/form/div[2]/div/div/div/input').click()
#父设备
# element=driver.find_element_by_xpath('/html/body/div[3]/div[1]/div[1]/ul/li[1]/span')
#子设备
# element=driver.find_element_by_xpath('/html/body/div[3]/div[1]/div[1]/ul/li[2]/span')
#父设备新css######
element=driver.find_element_by_css_selector('li.el-select-dropdown__item:nth-child(1) > span:nth-child(1)')
#子设备新
# element=driver.find_element_by_css_selector('li.el-select-dropdown__item:nth-child(2) > span:nth-child(1)')
ActionChains(driver).move_to_element(element).click(element).perform()
#登录按钮
# driver.find_element_by_xpath('/html/body/div[1]/div/div[2]/section/div[1]/div[3]/div/div[3]/span/button[1]').click()
# driver.find_element_by_css_selector('.save-button').click()
q1=driver.find_element_by_css_selector('.save-button')
ActionChains(driver).move_to_element(q1).click(q1).perform()
#获取登录成功文本值
# text1=driver.find_element_by_css_selector('.el-message__content').text
# print(text1)
'''
#############括号有值不填=====================
'''
##1.设备不选
#输入设备类型名称
driver.find_element_by_xpath('//*[@id="device-type-container"]/div[3]/div/div[2]/form/div[1]/div/div/input').send_keys("kkkk")
#登录按钮
driver.find_element_by_xpath('/html/body/div[1]/div/div[2]/section/div[1]/div[3]/div/div[3]/span/button[1]').click()
#提示值
text2 = driver.find_element_by_css_selector('.el-form-item__error').text
print(text2) #请选择所属类型
##2.设备名不填
#选择父、子设备
driver.find_element_by_xpath('//*[@id="device-type-container"]/div[3]/div/div[2]/form/div[2]/div/div/div/input').click()
#父设备
element=driver.find_element_by_xpath('/html/body/div[3]/div[1]/div[1]/ul/li[1]/span')
#子设备
# element=driver.find_element_by_xpath('/html/body/div[3]/div[1]/div[1]/ul/li[2]/span')
ActionChains(driver).move_to_element(element).click(element).perform()
#登录按钮
driver.find_element_by_css_selector('.save-button').click()
#提示值
text2 = driver.find_element_by_css_selector('.el-form-item__error').text
print(text2) #请输入设备类型名称
'''
# element1=driver.find_element_by_xpath('/html/body/div[1]/div/div[2]/section/div[1]/div[2]/div[2]/div/div[3]/table/tbody/tr/td[3]/div/div/svg[1]')
# element1=driver.find_element_by_xpath('/html/body/div[1]/div/div[2]/section/div[1]/div[2]/div[2]/div/div[3]/table/tbody/tr/td[3]/div/div/*[name()="svg"][1]')
# ActionChains(driver).click(element1).perform()
# element2=driver.find_element_by_xpath('//div[@class="option-area"]/*[name()="svg"][1]')
# ActionChains(driver).click(element2).perform()
#
# driver.find_element_by_xpath('//*[@id="device-type-container"]/div[3]/div/div[2]/form/div[1]/div/div/input').clear()
# sleep(2)
#删掉类型
# p1=driver.find_element_by_css_selector('.el-select__caret')
# ActionChains(driver).move_to_element(p1).click(p1).perform()
#选择父、子设备
# driver.find_element_by_xpath('//*[@id="device-type-container"]/div[3]/div/div[2]/form/div[2]/div/div/div/input').click()
#父设备(修改不可用)
# element=driver.find_element_by_xpath('/html/body/div[3]/div[1]/div[1]/ul/li[1]/span')
#子设备
# element=driver.find_element_by_xpath('/html/body/div[3]/div[1]/div[1]/ul/li[2]/span')
# #父设备新
# # element=driver.find_element_by_xpath('/html/body/div[4]/div[1]/div[1]/ul/li[1]/span')
# #子设备新
# element=driver.find_element_by_xpath('/html/body/div[4]/div[1]/div[1]/ul/li[2]/span')
#css
#父设备新######
# # element=driver.find_element_by_css_selector('li.el-select-dropdown__item:nth-child(1) > span:nth-child(1)')
# #子设备新
# element=driver.find_element_by_css_selector('li.el-select-dropdown__item:nth-child(2) > span:nth-child(1)')
#
# ActionChains(driver).move_to_element(element).click(element).perform()
#
# #登录按钮
# driver.find_element_by_css_selector('.save-button').click()
'''
#登录按钮
driver.find_element_by_css_selector('.save-button').click()
# driver.find_element_by_xpath('//*[@id="device-type-container"]/div[3]/div/div[2]/form/div[1]/div/div/input').clear()
# sleep(2)
# # b1=driver.find_element_by_css_selector('td.el-table_1_column_1 > div:nth-child(1)').text
# b1=driver.find_element_by_css_selector('td.el-table_1_column_2 > div:nth-child(1)').text
# print(b1)
'''
| true |
3e65845c72f2143f2d7bf33e6dc3b52edda375bd | Python | anhsirksai/python-raxcli | /raxcli/utils.py | UTF-8 | 1,780 | 2.9375 | 3 | [
"Apache-2.0"
] | permissive | # Copyright 2013 Rackspace
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'get_enum_as_dict'
]
def get_enum_as_dict(cls, reverse=False, friendly_names=False):
"""
Convert an "enum" class to a dict key is the enum name and value is an enum
value.
@param cls: Enum class to operate on.
@type cls: C{class}
@param reverse: True to reverse the key and value so the dict key will be
enum value and the dict value will be enum key.
@type reverse: C{bool}
@param friendly_names: True to make enum name value "user friendly".
@type friendly_names: C{bool}
"""
result = {}
for key, value in cls.__dict__.items():
if key.startswith('__'):
continue
if key[0] != key[0].upper():
continue
name = key
if friendly_names:
name = name.replace('_', ' ').lower().title()
if reverse:
result[value] = name
else:
result[name] = value
return result
| true |
3c3e08536b743e59b246d9af6856da379e3ff0fa | Python | guatty/job_offer_board_assignation | /test2.py | UTF-8 | 2,252 | 2.765625 | 3 | [] | no_license | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import pandas as pd
df = pd.read_csv('data/preprocessed_campaigns.csv')
# print(list(df))
i=0
columns_name = ['id', 'title', 'category', 'country', 'cpc', 'name', 'keywords', 'description', 'job_type', 'job', 'job_board_id', 'amount_action_0', 'amount_action_1', 'amount_action_2', 'amount_action_3', 'amount_action_4', 'budgetmax', 'creation']
new_df = pd.DataFrame(columns=columns_name)
for row in df.groupby(['id', 'creation']):
if i % 1000 == 0:
print(i)
# print(row[1])
# print(row[1]['id'].iloc[0])
# print(type(row[1]['id']))
line = [0] * len(columns_name)
j = 0
for column in columns_name:
if column == "amount_action_0":
if not (row[1][row[1]['action'] == 0].empty):
line[j] = row[1][row[1]['action'] == 0 ]['amount_action'].iloc[0]
else:
line[j] = 0
elif column == "amount_action_1":
if not (row[1][row[1]['action'] == 1].empty):
line[j] = row[1][row[1]['action'] == 1 ]['amount_action'].iloc[0]
else:
line[j] = 0
elif column == "amount_action_2":
if not (row[1][row[1]['action'] == 2].empty):
line[j] = row[1][row[1]['action'] == 2 ]['amount_action'].iloc[0]
else:
line[j] = 0
elif column == "amount_action_3":
if not (row[1][row[1]['action'] == 3].empty):
line[j] = row[1][row[1]['action'] == 3 ]['amount_action'].iloc[0]
else:
line[j] = 0
elif column == "amount_action_4":
if not (row[1][row[1]['action'] == 4].empty):
line[j] = row[1][row[1]['action'] == 4 ]['amount_action'].iloc[0]
else:
line[j] = 0
# print(row[1][row[1]['action'] == 1 ]['amount_action'].iloc[0])
# print(row[1][row[1]['action'] == 1 ]['action'].iloc[0] == 1)
# YYY = df[ df['action'] == 1 ]['amount_action']
else:
# print(line.append(row[1]['id'].iloc[0]))
line[j] = row[1][column].iloc[0]
j+=1
new_df.loc[i] = line
i+=1
new_df.to_csv('data/cleaned_preprocessed_campaigns.csv')
| true |
0152abb595804c2829ed8b02b072f3209593d63b | Python | huangno27/learn | /早期/learning python/xy.py | UTF-8 | 201 | 3.421875 | 3 | [] | no_license | print(----ak-----)
temp = input ("不妨猜一下心里的数字")
guess = int(temp)
if guess == 8:
print("我擦")
print("厉害了")
else:
print("猜错了,不是这个")
print("over")
| true |
926b83867f2144e108b55f125c0a8b308808c067 | Python | SnakeOnex/twitter-word-embeddings | /input_data.py | UTF-8 | 5,387 | 3.234375 | 3 | [] | no_license | import numpy
from collections import deque
class InputData:
def __init__(self, file_name, min_count):
self.input_file_name = file_name
# loads the data from file and removes low count words and shit
self.get_words(min_count)
# not sure what this is for yet
self.word_pair_catch = deque()
# creates self.sample_table
# - numpy array with certain probability distribution based on frequency of the word
self.init_sample_table()
print('Word count: %d' % len(self.word2id))
print('Sentence Length: %d' % (self.sentence_length))
def get_words(self, min_count):
self.input_file = open(self.input_file_name)
# length of the whole dataset (number of words)
self.sentence_length = 0
# number of sentences in the dataset
self.sentence_count = 0
# dictionary of all unique words and their counts
word_frequency = dict()
for line in self.input_file:
self.sentence_count += 1
line = line.strip().split(' ')
self.sentence_length += len(line)
for w in line:
# if word is already in the dict add one to it
# otherwise declare it and set it to 1
try:
word_frequency[w] += 1
except:
word_frequency[w] = 1
# word to id dictionary
self.word2id = dict()
# id to word dictionary
self.id2word = dict()
wid = 0;
# word_frequency without low count words
# id to count dictionary
self.word_frequency = dict()
# iterates over dict of all unique words and their counts
# w - word
# c - count of the word
for w, c in word_frequency.items():
# if the word count is smaller than min_count
# substracts the word_count form self.sentence_length
# ??? honestly not sure what this does
if c < min_count:
self.sentence_length -= c
continue
# adds the word to the dictionaries
self.word2id[w] = wid
self.id2word[wid] = w
self.word_frequency[wid] = c
wid += 1
# number of words in the vocabulary (unique words)
self.word_count = len(self.word2id)
def init_sample_table(self):
self.sample_table = []
sample_table_size = 1e8
# puts all the values of word_frequency into numpy array
# and makes them to the power of 0.75
# small numbers get deacresed a bit
# big numbers get deacreased a lot
pow_frequency = numpy.array(list(self.word_frequency.values()))**0.75
words_pow = sum(pow_frequency)
# relative frequency (in % i think)
ratio = pow_frequency / words_pow
# multiplies all ratios by a big number and rounds them to a closest integer
count = numpy.round(ratio * sample_table_size)
# wid - id of word
# c - its 'count' (ratio multiplied by 1e8)
for wid, c in enumerate(count):
self.sample_table += [wid] * int(c)
self.sample_table = numpy.array(self.sample_table)
def get_batch_pairs(self, batch_size, window_size):
# until word_pair_catch is smaller than batch_size keep
# appending pairs of words to word_pair_catch
# one sentence at the time
while len(self.word_pair_catch) < batch_size:
sentence = self.input_file.readline()
# kinda useless but w/e
if sentence is None or sentence == '':
self.input_file = open(self.input_file_name)
sentence = self.input_file.readline()
# sentence as a list of ids
word_ids = []
for word in sentence.strip().split(' '):
try:
word_ids.append(self.word2id[word])
except:
continue
# for every word create pairs with adjacent words
for i, u in enumerate(word_ids):
# word_ids[i - window_size: i + window_size])
for j, v in enumerate(word_ids[max(i - window_size, 0): i + window_size]):
assert u < self.word_count
assert v < self.word_count
# don't want pairs of the same words
if i == j:
continue
self.word_pair_catch.append((u, v))
batch_pairs = []
for _ in range(batch_size):
# pulls batch_size number of word_pairs to batch_pairs
batch_pairs.append(self.word_pair_catch.popleft())
return batch_pairs
def get_neg_v_neg_sampling(self, pos_word_pair, count):
neg_v = numpy.random.choice(self.sample_table, size=(len(pos_word_pair), count)).tolist()
return neg_v
# number of pairs in one epoch
def evaluate_pair_count(self, window_size):
return self.sentence_length * (2 * window_size - 1) - (self.sentence_count - 1) * (1 + window_size) * window_size
def test():
a = InputData('./tweets.txt', 2)
if __name__ == '__main__':
test()
| true |
bc93803fd574cee158ee61070b01e3dffc9fc105 | Python | bblwg2020/RCNN | /train_step3.py | UTF-8 | 1,251 | 2.546875 | 3 | [
"MIT"
] | permissive | from __future__ import division
from data.dataset_factory import DatasetFactory
from models.model_factory import ModelsFactory
from options.train_options import TrainOptions
import numpy as np
class Train:
def __init__(self):
self._opt = TrainOptions().parse()
self._dataset_train = DatasetFactory.get_by_name("SVMDataset", self._opt)
self._dataset_train_size = len(self._dataset_train)
print('#train images = %d' % self._dataset_train_size)
self.classA_features, self.classA_labels, self.classB_features, self.classB_labels = self._dataset_train.get_datas()
self._modelA = ModelsFactory.get_by_name("SvmModel", self._opt, is_train=True)
self._modelB = ModelsFactory.get_by_name("SvmModel", self._opt, is_train=True)
self._train(self._modelA, self.classA_features, self.classA_labels, "A")
self._train(self._modelB, self.classB_features, self.classB_labels, "B")
def _train(self, model, features, labels, name):
model.train(features, labels)
model.save(name)
pred = model.predict(features)
print (labels)
print (pred)
if __name__ == "__main__":
Train()
| true |
c783cf5535d8cd5e2b2dfc60594291b542c8d4df | Python | RedLicorice/crypto-forecast | /lib/trading/exchange.py | UTF-8 | 13,175 | 2.84375 | 3 | [] | no_license | #
# This class handles Exchange-related operations.
# For methods where DB access is needed, a 'session' parameter is required.
# Please note that created orders need to be added to the session manually
#
# Margin wallets hold the lent positions, they don't count for the sake of equities
from lib.trading.models import Asset, Order, OrderLog, OrderType, OrderStatus
from lib.log import logger
from sqlalchemy import and_, or_
class Exchange:
MARGIN_SHORT_FIXED_FEE = 0.0001 # 0.01% fee at position open
MARGIN_SHORT_DAILY_FEE = 0.0002 * 5 # Kraken applies 0.02% fee every 4 hours period after the first so 24/4 - 1
MARGIN_LONG_FIXED_FEE = 0.0001 # 0.01% fee at position open
MARGIN_LONG_DAILY_FEE = 0.0001 * 5 # Kraken applies 0.02% fee every 4 hours period after the first so 24/4 - 1
SPOT_FIXED_FEE = 0.0026 # 0.16-0.26% fee at every spot transaction (maker-taker)
def __init__(self, sessionFactory):
self.createSession = sessionFactory
def get_or_create_asset(self, symbol, **kwargs):
session = self.createSession()
asset = session.query(Asset).filter(Asset.symbol == symbol).first()
if not asset:
kwargs.update({'symbol': symbol})
asset = Asset(**kwargs)
session.add(asset)
session.commit()
return asset
def get_asset(self, symbol):
session = self.createSession()
asset = session.query(Asset).filter(Asset.symbol == symbol).first()
return asset
def can_open_order(self, asset: Asset , o: Order):
# Margin trades usually require a FIAT balance greater or equal than the collateral value,
# Exchanges impose an hard cap on margin orders
# In margin long orders, there's a fixed + rolling fee which is paid in the base currency
# In margin short orders, there's a fixed + rolling fee which is paid in cryptocurrency,
# Only requirement would be 1.5x the order's value, but we simplify it to 1.0x
if o.type == OrderType.LONG:
# Each user has an allowance limit to the fiat he can borrow from the pool,
# this varies with trading volume, we suppose it is fixed
if asset.long_allowance < o.open_price * o.coins:
return False
# Our <s>margin</s> account should have 1.5x the FIAT value we're asking to the broker,
# this requirement has been relapsed to 1.0x. Opening fee is not counted,
# since the amount is not deducted (it is just a warranty)
if asset.fiat < o.open_price * o.coins:
return False
elif type == OrderType.SHORT:
# Each user has an allowance limit to the coins he can borrow from the pool,
# this varies with trading volume, we suppose it is fixed
if asset.short_allowance < o.coins:
return False
# Our margin account should have 1.5x the value we're asking to the broker,
# this requirement has been relapsed to 1.0x. Opening fee is not counted,
# since the amount is not deducted (it is just a warranty)
if asset.fiat < o.coins*o.open_price:
return False
elif type == OrderType.SPOT:
# In spot orders, fee is paid upfront over position's opening transaction,
# so we must own it all.
if asset.fiat < o.open_price * o.coins + self.get_open_fee(o):
return False
return True
def can_close_order(self, asset: Asset , o: Order):
if o.type == OrderType.LONG:
# To close a margin long, we need to return the amount of FIAT spent when opening the position
debt_fiat = o.coins*o.open_price
# To the debt, we must add the fixed fee (opening fee) and the rolling (closing) fee..
debt_fiat += self.get_open_fee(o) + self.get_close_fee(o)
# The debt should be paid by selling the coins we purchased, at close price..
sell_fiat = o.coins*o.close_price
# if sell_fiat < debt_fiat, we're at loss: we should pay the difference with our funds!
if sell_fiat < debt_fiat:
loss = debt_fiat - sell_fiat
if asset.fiat < loss:
return False
elif o.type == OrderType.SHORT:
# To close a margin short, we need to return the amount of COLLATERAL spent when opening the position
# To the debt, we must add the fixed fee (opening fee) and the rolling (closing) fee..
debt_collateral = o.coins + self.get_open_fee(o) + self.get_close_fee(o)
# We want to buy back our debt at CLOSE PRICE
debt_fiat = debt_collateral * o.close_price # How much we spend from buying back the debt
# We initially sold collateral at OPEN PRICE
sell_fiat = o.coins*o.open_price
# If our initial sell is lower than our debt, we're at loss: we pay difference with our funds!
if sell_fiat < debt_fiat:
loss = debt_fiat - sell_fiat
if asset.fiat < loss:
return False
elif o.type == OrderType.SPOT:
if asset.coins < o.coins:
return False
if asset.fiat < self.get_close_fee(o):
return False
return True
def get_open_fee(self, o: Order):
if o.type == OrderType.LONG:
# Open fee is calculated on the FIAT debt
fiat_debt = o.coins*o.open_price
return fiat_debt * self.MARGIN_LONG_FIXED_FEE # Fixed fee
elif o.type == OrderType.SHORT:
# Open fee is calculated on the COLLATERAL debt
return o.coins * self.MARGIN_SHORT_FIXED_FEE
elif o.type == OrderType.SPOT:
# In spot orders, fee is paid for every transaction's value
return self.SPOT_FIXED_FEE * (o.coins * o.open_price)
def get_close_fee(self, o: Order, days=None):
position_days = o.get_age_in_days(o.closed_at) if not days else days
if o.type == OrderType.LONG:
# Daily fee is calculated on the FIAT debt
fiat_debt = o.coins * o.open_price
# Daily fee by fiat debt gives us daily interest
daily_interest = fiat_debt * self.MARGIN_LONG_DAILY_FEE
# Finally, closing fee is daily interest by number of days
return daily_interest * position_days
elif o.type == OrderType.SHORT:
# Daily fee is calculated on the COLLATERAL debt
daily_interest = o.coins * self.MARGIN_SHORT_DAILY_FEE
# Closing fee is daily interest by number of days
return daily_interest * position_days
elif o.type == OrderType.SPOT:
# In spot orders, fee is paid for every transaction's value
return self.SPOT_FIXED_FEE * (o.coins * o.close_price)
def get_orders_between(self, asset, begin, end, type):
session = self.createSession()
return session.query(Order).filter(
and_(
Order.open_at >= begin,
Order.symbol == asset.symbol,
Order.type == type,
or_(Order.closed_at <= end, Order.status == OrderStatus.OPEN)
)
).all()
def get_operations_between(self, asset, begin, end, type):
session = self.createSession()
return session.query(OrderLog).filter(
and_(
OrderLog.timestamp >= begin,
OrderLog.timestamp <= end,
OrderLog.type == type,
OrderLog.symbol == asset.symbol
)
).all()
def get_open_short(self, asset):
session = self.createSession()
return session.query(Order).filter(and_(Order.symbol == asset.symbol, Order.type == OrderType.SHORT, Order.status == OrderStatus.OPEN)).all()
def get_open_long(self, asset):
session = self.createSession()
return session.query(Order).filter(and_(Order.symbol == asset.symbol, Order.type == OrderType.LONG, Order.status == OrderStatus.OPEN)).all()
def get_open_spot(self, asset):
session = self.createSession()
return session.query(Order).filter(and_(Order.symbol == asset.symbol, Order.type == OrderType.SPOT, Order.status == OrderStatus.OPEN)).all()
def open_order(self, day, type: str, asset: Asset, coins, price, stop_loss=0.01):
session = self.createSession()
# Create an order instance
o = Order(
symbol=asset.symbol,
type=type,
status=OrderStatus.OPEN,
coins=coins,
open_price=price,
#close_price=None,
last_price=price,
stop_loss=price + (price * stop_loss) if stop_loss else None,
open_at=day,
#closed_at=None,
)
log = OrderLog(
symbol=asset.symbol,
type=o.type,
status=o.status,
price=price,
timestamp=day
)
# Fail if order can't be placed
if not self.can_open_order(asset, o):
logger.debug("[Day {}] Cannot open order for {}".format(day, asset.symbol))
return None
if o.type == OrderType.LONG:
# Deduct order from allowance, which is in fiat for margin longs
asset.long_allowance -= o.open_price * o.coins
# In margin long orders we purchase coins using FIAT lent from our broker (so subject to allowance)
asset.margin_coins += o.coins
# Increase long orders count
asset.long_orders += 1
elif o.type == OrderType.SHORT:
# Deduct order from allowance, which is in coin for margin short
asset.short_allowance -= o.coins
# In margin short orders we sell coins lent from our broker (subject to allowance) at open price
asset.margin_fiat += o.coins*o.open_price
# Increase short orders count
asset.short_orders += 1
elif o.type == OrderType.SPOT:
# Deduct order buy price + fee from FIAT wallet
asset.fiat -= o.open_price*o.coins + self.get_open_fee(o)
# Add purchased coins to balance
asset.coins += o.coins
# Increase spot orders count
asset.spot_orders += 1
session.add(o)
session.add(log)
session.commit()
return o, log
def close_order(self, day, asset: Asset, o: Order, price):
session = self.createSession()
if o.status == OrderStatus.CLOSED:
logger.error('Order {} is already closed'.format(o.id))
return
o.close_price = price
o.closed_at = day
o.status=OrderStatus.CLOSED
log = OrderLog(
symbol=asset.symbol,
type=o.type,
status=o.status,
price=price,
timestamp=day
)
if o.type == OrderType.LONG:
# Sell coins we purchased when opening position, at close_price
asset.margin_coins -= o.coins
sell_fiat = o.coins * o.close_price
# Debt is calculated on opening FIAT price
debt_fiat = o.coins * o.open_price + self.get_open_fee(o) + self.get_close_fee(o)
# Profit is what's left after we pay back the debt
# If negative, will be deducted from fiat
profit = sell_fiat - debt_fiat
asset.fiat += profit
# Restore allowance, which is in fiat for margin longs
asset.long_allowance += o.open_price * o.coins
# Increase long orders count
asset.long_orders -= 1
o.profit = profit
o.open_fee = self.get_open_fee(o)
o.close_fee = self.get_close_fee(o)
elif o.type == OrderType.SHORT:
# Debt is given by coins + open fee + interest (close fee)
debt_collateral = o.coins + self.get_open_fee(o) + self.get_close_fee(o)
# Buy coins to fill our debt, at close_price
debt_fiat = debt_collateral * o.close_price
# Profit is what's left of sell_fiat after buying back the coins
sell_fiat = o.coins * o.open_price
asset.margin_fiat -= sell_fiat
profit = sell_fiat - debt_fiat
asset.fiat += profit
o.profit = profit
o.open_fee = self.get_open_fee(o)
o.close_fee = self.get_close_fee(o)
elif o.type == OrderType.SPOT:
# Deduct sold coins from balance
asset.coins -= o.coins
# Trading fee is deducted from the sale profit
asset.fiat += o.close_price * o.coins - self.get_close_fee(o)
# Increase spot orders count
asset.spot_orders -= 1
o.profit = o.coins * (o.close_price - o.open_price) - (self.get_open_fee(o)+ self.get_close_fee(o))
o.open_fee = self.get_open_fee(o)
o.close_fee = self.get_close_fee(o)
session.add(o)
session.add(log)
session.commit()
return o, log
| true |
f51c8d719fb9bf69880ae88212fe06310093d98a | Python | naquiroz/CSE-151B-PA4 | /pa4/dataset_factory.py | UTF-8 | 2,968 | 2.5625 | 3 | [] | no_license | ################################################################################
# CSE 253: Programming Assignment 4
# Code snippet by Ajit Kumar, Savyasachi
# Fall 2020
################################################################################
import csv
import os
from pycocotools.coco import COCO
from torch.utils.data import DataLoader
from .coco_dataset import CocoDataset, collate_fn
from .vocab import load_vocab
# Builds your datasets here based on the configuration.
# You are not required to modify this code but you are allowed to.
def get_datasets(config_data):
images_root_dir = config_data['dataset']['images_root_dir']
root_train = os.path.join(images_root_dir, 'train')
root_val = os.path.join(images_root_dir, 'val')
root_test = os.path.join(images_root_dir, 'test')
train_ids_file_path = config_data['dataset']['training_ids_file_path']
val_ids_file_path = config_data['dataset']['validation_ids_file_path']
test_ids_file_path = config_data['dataset']['test_ids_file_path']
train_annotation_file = config_data['dataset']['training_annotation_file_path']
test_annotation_file = config_data['dataset']['test_annotation_file_path']
coco = COCO(train_annotation_file)
coco_test = COCO(test_annotation_file)
vocab_threshold = config_data['dataset']['vocabulary_threshold']
vocabulary = load_vocab(train_annotation_file, vocab_threshold)
train_data_loader = get_coco_dataloader(
train_ids_file_path,
root_train,
train_annotation_file,
coco,
vocabulary,
config_data,
)
val_data_loader = get_coco_dataloader(
val_ids_file_path,
root_val,
train_annotation_file,
coco,
vocabulary,
config_data,
)
test_data_loader = get_coco_dataloader(
test_ids_file_path,
root_test,
test_annotation_file,
coco_test,
vocabulary,
config_data,
)
return coco_test, vocabulary, train_data_loader, val_data_loader, test_data_loader
def get_coco_dataloader(
img_ids_file_path,
imgs_root_dir,
annotation_file_path,
coco_obj,
vocabulary,
config_data,
):
with open(img_ids_file_path, 'r') as f:
reader = csv.reader(f)
img_ids = list(reader)
img_ids = [int(i) for i in img_ids[0]]
ann_ids = [
coco_obj.imgToAnns[img_ids[i]][j]['id']
for i in range(0, len(img_ids))
for j in range(0, len(coco_obj.imgToAnns[img_ids[i]]))
]
dataset = CocoDataset(
root=imgs_root_dir,
json=annotation_file_path,
ids=ann_ids,
vocab=vocabulary,
img_size=config_data['dataset']['img_size'],
)
return DataLoader(
dataset=dataset,
batch_size=config_data['dataset']['batch_size'],
shuffle=True,
num_workers=config_data['dataset']['num_workers'],
collate_fn=collate_fn,
pin_memory=True,
)
| true |
1ae7a7ff3ab58e6382f29becda0b5c4fbc4b11ae | Python | ja-vpaw/stepik-autotests | /selenium_course/lesson2/lesson2_3_step4.py | UTF-8 | 510 | 2.625 | 3 | [] | no_license | from selenium_course.common_lib.calc import calc_x
from selenium import webdriver
link = "http://suninjuly.github.io/alert_accept.html"
browser = webdriver.Chrome()
browser.get(link)
button = browser.find_element_by_tag_name("button")
button.click()
confirm = browser.switch_to.alert
confirm.accept()
x = browser.find_element_by_id('input_value')
x = x.text
y = calc_x(x)
input = browser.find_element_by_id('answer')
input.send_keys(y)
button = browser.find_element_by_tag_name("button")
button.click()
| true |
5300e5e207d29edcc42e2d0124cd7bd7fbc346b2 | Python | ali-moments/cryptography-in-python | /decrypt.py | UTF-8 | 288 | 2.875 | 3 | [
"CC0-1.0"
] | permissive | import pyAesCrypt
print("<Decrypt>")
bufferSize = 64 * 1024
file = input("File Name : ")
password = input("Password : ")
try:
pyAesCrypt.decryptFile(file,file+"_decrypted",password,bufferSize)
print("File Decrypted !")
except Exception as error:
print(error)
exit(1)
| true |
73fe1e1f48b48b589de060054139c6816db5f460 | Python | muxuanliang/Coding-Questions | /Util/quicksort.py | UTF-8 | 557 | 3.59375 | 4 | [] | no_license | # sort a list of numbers
def quicksort(lst):
if len(lst)<=1:
return lst
pivot = lst[0]
left,right = partition(lst[1:],pivot)
return quicksort(left) + [pivot] + quicksort(right)
# lenght of lst is at least 1
def partition(lst,pivot):
left = 0
right = 0
while right != len(lst):
if lst[right] < pivot:
lst[left],lst[right] = lst[right],lst[left]
left += 1
right += 1
return lst[:left],lst[left:]
if __name__ == '__main__':
print quicksort([6,9,2,9,10,6])
print quicksort([2,1]) | true |
01c24d6d2b3df244e0b88d9b3ab3aba9d86f1726 | Python | NWood-Git/leet_code | /82_remove_duplicates_from_sorted_linked_list_ii.py | UTF-8 | 3,497 | 3.96875 | 4 | [] | no_license | # 82. Remove Duplicates from Sorted List II
# Difficulty - Medium
# https://leetcode.com/problems/remove-duplicates-from-sorted-list-ii/
# Description:
# Given a sorted linked list, delete all nodes that have duplicate numbers, leaving only distinct numbers from the original list.
# Return the linked list sorted as well.
# Example 1: Input: 1->2->3->3->4->4->5
# Output: 1->2->5
# Example 2: Input: 1->1->1->2->3
# Output: 2->3
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def print_ll(self):
current = self
print(current.val, end = ' -> ')
while current.next:
current = current.next
print(current.val, end = ' -> ')
print()
def deleteDuplicates(head: ListNode) -> ListNode:
prev = current = head
val_set = set()
if head == None:
return None
if head.next == None:
return head
elif head.next.next == None:
return head if head.val != head.next.val else None
while current.next:
if current == head:
if current.val == current.next.val or current.val in val_set:
val_set.add(current.val)
prev = head = current.next
else:
if current.val == current.next.val or current.val in val_set:
val_set.add(current.val)
prev.next = current.next
else:
prev = current
current = current.next
# if current.val == head.val and head,val in val_set:
if current.val in val_set:
prev.next = None
if head.val in val_set:
return None
# print(val_set)
return head
#
# Success - Details
# Runtime: 32 ms, faster than 97.45% of Python3 online submissions for Remove Duplicates from Sorted List II.
# Memory Usage: 13.8 MB, less than 8.00% of Python3 online submissions for Remove Duplicates from Sorted List II.
# Runtime: 40 ms, faster than 67.47% of Python3 online submissions for Remove Duplicates from Sorted List II.
# Memory Usage: 13.9 MB, less than 8.00% of Python3 online submissions for Remove Duplicates from Sorted List II.
# Runtime: 52 ms, faster than 8.22% of Python3 online submissions for Remove Duplicates from Sorted List II.
# Memory Usage: 13.8 MB, less than 8.00% of Python3 online submissions for Remove Duplicates from Sorted List II.
# Runtime: 60 ms, faster than 6.15% of Python3 online submissions for Remove Duplicates from Sorted List II.
# Memory Usage: 14.1 MB, less than 8.00% of Python3 online submissions for Remove Duplicates from Sorted List II.
# Runtime: 76 ms, faster than 5.54% of Python3 online submissions for Remove Duplicates from Sorted List II.
# Memory Usage: 13.8 MB, less than 8.00% of Python3 online submissions for Remove Duplicates from Sorted List II.
'''
e0 = ListNode(1)
e1 = ListNode(1)
e0.next = e1
e2 = ListNode(2)
e1.next = e2
e3 = ListNode(3)
e2.next = e3
e4 = ListNode(3)
e3.next = e4
e5 = ListNode(4)
e4.next = e5
e6 = ListNode(4)
e5.next = e6
e7 = ListNode(5)
e6.next = e7
e0.print_ll()
new = deleteDuplicates(e0)
new.print_ll()
# print(e0)
# print(e1)
# print(new)
'''
'''
e0 = ListNode(1)
e1 = ListNode(2)
e0.next = e1
e2 = ListNode(2)
e1.next = e2
e0.print_ll()
new = deleteDuplicates(e0)
new.print_ll()
'''
'''
e0 = ListNode(1)
e1 = ListNode(1)
e0.next = e1
e2 = ListNode(1)
e1.next = e2
e0.print_ll()
new = deleteDuplicates(e0)
new.print_ll()
''' | true |
4162c36a78565843f9113be4230e481d5d0b105f | Python | Harshit898/PythonWS | /load.py | UTF-8 | 3,715 | 2.734375 | 3 | [] | no_license | from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
import random
import ssl
import json
import sqlite3
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = 'https://pmjay.gov.in/pagination.php'
html = urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, "html.parser")
# Database
conn = sqlite3.connect('hospitals.sqlite')
cur = conn.cursor()
cur.executescript('''DROP TABLE IF EXISTS Hospitals;
DROP TABLE IF EXISTS District_Hospitals;
DROP TABLE IF EXISTS Locations;
CREATE TABLE IF NOT EXISTS Hospitals (Hospital_Name TEXT, Address TEXT, District TEXT, State TEXT);
CREATE TABLE IF NOT EXISTS District_Hospitals (District TEXT, Hospital_Name TEXT, Address TEXT, State TEXT);
CREATE TABLE IF NOT EXISTS Locations(Hospital_Name TEXT, gdata TEXT);
'''
)
# API Credentials
serviceurl = "https://maps.googleapis.com/maps/api/geocode/json?key=AIzaSyAS19_Zro5RV71XsOYG0m-7UVHKNiYqRG8"
# Retrieve all of the states
states = []
tags = soup('object')
for tag in tags:
states = tag.get('value')
states = states[1:-1]
rand_idx = 'Assam'
state = 'Haryana'
# Using this state
url = 'https://pmjay.gov.in/pagination.php?search%5Bstate%5D=' + state + '&search%5Bdistrict%5D=&Search='
data = []
html = urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, "html.parser")
tags = soup('th')
for item in tags:
data.append(item.text)
tags = soup('td')
for item in tags:
data.append(item.text)
# Storing hospital data of a state
hospital_data = [[None for _ in range(4)] for _ in range(len(data) // 4)]
for i in range(0, len(data) // 4):
for j in range(4):
hospital_data[i][j] = data[4 * i + j]
# Storing data in database
for i in range(1, len(hospital_data)):
name = hospital_data[i][0]
address = hospital_data[i][1]
state = hospital_data[i][2]
district = hospital_data[i][3]
cur.execute('''INSERT INTO Hospitals (Hospital_Name, Address,State,District)
VALUES (?,?,?,?)''', (name, address, state, district))
sqlstr = 'SELECT District FROM Hospitals'
districtlist = []
for i in range(len(hospital_data)):
districtlist.append(hospital_data[i][3])
#rand_idx = random.randrange(len(districtlist))
dis = random.choice
cur.execute('''
INSERT INTO District_Hospitals (District, Hospital_Name, Address,State)
SELECT District, Hospital_Name, Address, State FROM Hospitals
WHERE District = ?
''', (dis,))
# taking address
location = []
for i in range(1, len(hospital_data)):
if hospital_data[i][3] == dis:
location.append(
hospital_data[i][0] + ", " + hospital_data[i][1] + ", " + hospital_data[i][3] + ", " + hospital_data[i][2])
# using forward geocoding
geodata = dict()
base_url = "http://api.positionstack.com/v1/forward?access_key=e87e0b95fe3f68e4de19530c3ea94cdf&query="
for i in location:
add = i
url = base_url + add.replace(" ", "+")
uh = urlopen(url, context=ctx)
data = uh.read().decode()
g = i.split(',')
try:
js = json.loads(data)
latitude = str(js["data"][0]["latitude"])
longitude = str(js["data"][0]["longitude"])
coor = latitude + ", " + longitude
geodata[g[0]] = coor
except:
print("Failed to retrieve coordinates for " + g[0])
pass
for i in geodata.keys():
print(i, geodata[i])
cur.execute('''
INSERT INTO Locations (Hospital_Name, gdata)
VALUES (?, ?)''', (i, geodata[i]))
conn.commit()
cur.close()
| true |
30a442d9ce9103c4eaabc298fc17c4184140022c | Python | moxlev/AtCoder | /abc/abc044B.py | UTF-8 | 228 | 2.984375 | 3 | [] | no_license | from collections import Counter
def main():
w = input()
cnt = list(Counter(w).values())
s = list(filter(lambda x: x % 2 != 0, cnt))
print("Yes" if len(s) == 0 else "No")
if __name__ == '__main__':
main()
| true |
76a4cb6bc78a507ba50b8b081899f166405e9673 | Python | deepgbits/calculationEngine | /src/division.py | UTF-8 | 193 | 3.3125 | 3 | [
"MIT"
] | permissive | import math
def div(a, b):
#This program divides two numbers and return the result
if b==0:
return "Error" #if denominator is 0 then return error
result = a/b
return result
| true |
e8f682af80b2cc5e2d03f95a9b408fb0ba86abe2 | Python | nomanshafqat/CE888-Data-Science-and-Decision-Making-Labs | /project3/playTestGame.py | UTF-8 | 1,664 | 3.34375 | 3 | [] | no_license | '''Created by nomanshafqat at 2020-04-07'''
import time
from UCT import OthelloState, UCT, get_state
#plays games for testing
def UCTPlayTestGame(writer=None, classifier=None, expert_clf=None):
""" Play a sample game between two UCT players where each player gets a different number
of UCT iterations (= simulations = tree nodes).
"""
state = OthelloState(8) # uncomment to play Othello on a square board of the given size
time_start=time.time()
expert=2
while (state.GetMoves() != []):
print(".", end=" ")
if state.playerJustMoved == expert:
m = UCT(rootstate=state, itermax=1000, verbose=False, classifier=classifier)
else:
m = UCT(rootstate=state, itermax=1000, verbose=False,
classifier=expert_clf) # play with values for itermax and verbose = True
state.DoMove(m)
print("\n time (s)= ",time.time()-time_start)
if state.GetResult(state.playerJustMoved) == 1.0:
if state.playerJustMoved==expert:
print("Player " + str(state.playerJustMoved)+ " expert" , " wins!")
return "expert"
else:
print("Player " + str(state.playerJustMoved)+ " apprentice" , " wins!")
return "apprentice"
elif state.GetResult(state.playerJustMoved) == 0.0:
if state.playerJustMoved==expert:
print("Player " + str(state.playerJustMoved)+ " apprentice" , " wins!")
return "apprentice"
else:
print("Player " + str(state.playerJustMoved)+ " expert" , " wins!")
return "expert"
else:
print("Nobody wins!")
return 0
| true |
7e9c858e75970452c4de6406f82174c94eae7bfa | Python | Superpowergalaxy/AirBearingTable | /motor_control_cal.py | UTF-8 | 3,077 | 3.671875 | 4 | [] | no_license | #! /usr/bin/python
import os #importing os library so as to communicate with the system
import time #importing time library to make Rpi wait because its too impatient
time.sleep(1)
import pigpio #importing GPIO library
ESC=4 #Connect the ESC in this GPIO pin
pi = pigpio.pi();
pi.set_servo_pulsewidth(ESC, 0)
time.sleep(5)
max_value = 2000 #change this if your ESC's max value is different or leave it be
min_value = 700 #change this if your ESC's min value is different or leave it be
stop_value = 1500
def calibrate(): #This is the auto calibration procedure of a normal ESC
print("calibrate with 'c' or normal start with 'n'")
inp = raw_input()
if inp == "n":
pi.set_servo_pulsewidth(ESC, stop_value)
print("wait for it, wiat for it")
time.sleep(2)
print("please be patient")
time.sleep(2)
elif inp == "c":
pi.set_servo_pulsewidth(ESC, 0)
print("Disconnect the battery and press Enter")
inp = raw_input()
if inp == '':
pi.set_servo_pulsewidth(ESC, max_value)
print("Connect the battery NOW.. you will here two beeps, then wait for a gradual falling tone then press Enter")
inp = raw_input()
if inp == '':
pi.set_servo_pulsewidth(ESC, min_value)
print "Wierd eh! Special tone"
time.sleep(7)
print "Wait for it ...."
time.sleep (5)
print "Im working on it, DONT WORRY JUST WAIT....."
pi.set_servo_pulsewidth(ESC, 0)
time.sleep(2)
print "Arming ESC now..."
pi.set_servo_pulsewidth(ESC,stop_value)
time.sleep(1)
else:
print ("WHAT DID I SAID!! calibrate with 'c' or normal start with 'n'")
def run():
print ("Starting press'x' to restart")
time.sleep(1)
speed = stop_value # change your speed if you want to.... it should be between 700 - 2000
print "Controls - a to decrease speed & d to increase speed OR q to decrease a lot of speed & e to increase a lot of speed"
print "\n s for stop"
while True:
pi.set_servo_pulsewidth(ESC, speed)
inp = raw_input()
if inp == "q":
speed -= 100 # decrementing the speed like hell
print "speed = %d" % speed
elif inp == "e":
speed += 100 # incrementing the speed like hell
print "speed = %d" % speed
elif inp == "d":
speed += 10 # incrementing the speed
print "speed = %d" % speed
elif inp == "a":
speed -= 10 # decrementing the speed
print "speed = %d" % speed
elif inp == "s":
stop() #going for the stop function
break
else:
print "WHAT DID I SAID!! Press a,q,d,e for speed and 's' for stop"
def stop(): #This will stop every action your Pi is performing for ESC ofcourse.
pi.set_servo_pulsewidth(ESC, 0)
pi.stop()
if __name__ == "__main__":
calibrate()
print("y is rightly calibrated, r to recalibrate ")
while True:
inp = raw_input()
if inp == "y":
break
elif inp == "r":
calibrate()
print("y is rightly calibrated, R to recalibrate ");
else:
print " %s is not an option. \n only y and r accepted" % inp
run()
| true |
deecbf6cffd6c4801c425c11bf0183b800785cb6 | Python | nickr1977/Learning | /ex6.py | UTF-8 | 769 | 4.59375 | 5 | [] | no_license | # A variable for the amount of people and a text string describing it.
types_of_people = 10
x = f"There are {types_of_people} types of people."
# A variable for defining binary and don't along with Y
binary = "binary"
do_not = "don't"
y = f"Those who know {binary} and those who {do_not}."
# A simple print out of X and Y
print(x)
print(y)
# A print of x and y with additional text
print(f"I said: {x}")
print(f"I also said: '{y}'")
# A variable to define hilarious and joke eval
hilarious = False
joke_evaluation = "Isn't that joke so funny?! {}"
# A print of two variables
print(joke_evaluation.format(hilarious))
# Additional variable definition
w = "This is the left side of..."
e = "a string with a right side."
# A print of w and e variables.
print(w + e)
| true |
e49788c8447fe785299af2e47d85b9f86098132c | Python | TheInventorist/Material-Programacion | /Guias de programacion basica/Soluciones/Python/06-Archivos/06/modules.py | UTF-8 | 828 | 3.375 | 3 | [
"MIT"
] | permissive | def leerArchivo(nombreArchivo):
contenido = []
f = open(nombreArchivo, "r")
for line in f:
contenido.append(line)
f.close()
reworkedList = []
for item in contenido:
reworked = item.split("\n")
reworkedList.append(reworked[0])
return reworkedList
def estructurarDatos(lista1, lista2):
matriz = []
for i in range(len(lista1)):
matriz.append([])
matriz[i].append(lista1[i])
matriz[i].append(lista2[i])
return matriz
def cargarData():
ruts = leerArchivo("Ruts.txt")
nombres = leerArchivo("Nombres.txt")
estuctura = estructurarDatos(ruts, nombres)
return estuctura
def buscaNombres(estructura, rut):
for i in range(len(estructura)):
if(estructura[i][0] == rut):
return estructura[i][1]
return 0
| true |
62137adf033e3dbaec49ca17a1396b488706a40c | Python | lemillion12/sdpd-beaglebone-black-pir-sensor | /server.py | UTF-8 | 452 | 2.90625 | 3 | [] | no_license | import socket
def Main():
host = '10.42.0.1'
port = 6666
TestServer = socket.socket()
TestServer.bind((host,port))
print ("Server started!")
TestServer.listen(1)
c, addr = TestServer.accept()
print ("Connection from: " + str(addr))
while True:
data = str(c.recv(1024))
if not data:
break
print ("from connected user: " + data)
c.close()
if __name__== '__main__':
Main() | true |
58bbd4ba204553d3a2874d2a79b9c2055568b11b | Python | hengruizhang98/dgl | /examples/mxnet/gat/train.py | UTF-8 | 5,453 | 2.734375 | 3 | [
"Apache-2.0"
] | permissive | """
Graph Attention Networks in DGL using SPMV optimization.
Multiple heads are also batched together for faster training.
References
----------
Paper: https://arxiv.org/abs/1710.10903
Author's code: https://github.com/PetarV-/GAT
Pytorch implementation: https://github.com/Diego999/pyGAT
"""
import argparse
import networkx as nx
import time
import mxnet as mx
from mxnet import gluon
import numpy as np
import dgl
from dgl.data import register_data_args
from dgl.data import CoraGraphDataset, CiteseerGraphDataset, PubmedGraphDataset
from gat import GAT
from utils import EarlyStopping
def elu(data):
return mx.nd.LeakyReLU(data, act_type='elu')
def evaluate(model, features, labels, mask):
logits = model(features)
logits = logits[mask].asnumpy().squeeze()
val_labels = labels[mask].asnumpy().squeeze()
max_index = np.argmax(logits, axis=1)
accuracy = np.sum(np.where(max_index == val_labels, 1, 0)) / len(val_labels)
return accuracy
def main(args):
# load and preprocess dataset
if args.dataset == 'cora':
data = CoraGraphDataset()
elif args.dataset == 'citeseer':
data = CiteseerGraphDataset()
elif args.dataset == 'pubmed':
data = PubmedGraphDataset()
else:
raise ValueError('Unknown dataset: {}'.format(args.dataset))
g = data[0]
if args.gpu < 0:
cuda = False
ctx = mx.cpu(0)
else:
cuda = True
ctx = mx.gpu(args.gpu)
g = g.to(ctx)
features = g.ndata['feat']
labels = mx.nd.array(g.ndata['label'], dtype="float32", ctx=ctx)
mask = g.ndata['train_mask']
mask = mx.nd.array(np.nonzero(mask.asnumpy())[0], ctx=ctx)
val_mask = g.ndata['val_mask']
val_mask = mx.nd.array(np.nonzero(val_mask.asnumpy())[0], ctx=ctx)
test_mask = g.ndata['test_mask']
test_mask = mx.nd.array(np.nonzero(test_mask.asnumpy())[0], ctx=ctx)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
g = dgl.remove_self_loop(g)
g = dgl.add_self_loop(g)
# create model
heads = ([args.num_heads] * args.num_layers) + [args.num_out_heads]
model = GAT(g,
args.num_layers,
in_feats,
args.num_hidden,
n_classes,
heads,
elu,
args.in_drop,
args.attn_drop,
args.alpha,
args.residual)
if args.early_stop:
stopper = EarlyStopping(patience=100)
model.initialize(ctx=ctx)
# use optimizer
trainer = gluon.Trainer(model.collect_params(), 'adam', {'learning_rate': args.lr})
dur = []
for epoch in range(args.epochs):
if epoch >= 3:
t0 = time.time()
# forward
with mx.autograd.record():
logits = model(features)
loss = mx.nd.softmax_cross_entropy(logits[mask].squeeze(), labels[mask].squeeze())
loss.backward()
trainer.step(mask.shape[0])
if epoch >= 3:
dur.append(time.time() - t0)
print("Epoch {:05d} | Loss {:.4f} | Time(s) {:.4f} | ETputs(KTEPS) {:.2f}".format(
epoch, loss.asnumpy()[0], np.mean(dur), n_edges / np.mean(dur) / 1000))
val_accuracy = evaluate(model, features, labels, val_mask)
print("Validation Accuracy {:.4f}".format(val_accuracy))
if args.early_stop:
if stopper.step(val_accuracy, model):
break
print()
if args.early_stop:
model.load_parameters('model.param')
test_accuracy = evaluate(model, features, labels, test_mask)
print("Test Accuracy {:.4f}".format(test_accuracy))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='GAT')
register_data_args(parser)
parser.add_argument("--gpu", type=int, default=-1,
help="which GPU to use. Set -1 to use CPU.")
parser.add_argument("--epochs", type=int, default=200,
help="number of training epochs")
parser.add_argument("--num-heads", type=int, default=8,
help="number of hidden attention heads")
parser.add_argument("--num-out-heads", type=int, default=1,
help="number of output attention heads")
parser.add_argument("--num-layers", type=int, default=1,
help="number of hidden layers")
parser.add_argument("--num-hidden", type=int, default=8,
help="number of hidden units")
parser.add_argument("--residual", action="store_true", default=False,
help="use residual connection")
parser.add_argument("--in-drop", type=float, default=.6,
help="input feature dropout")
parser.add_argument("--attn-drop", type=float, default=.6,
help="attention dropout")
parser.add_argument("--lr", type=float, default=0.005,
help="learning rate")
parser.add_argument('--weight-decay', type=float, default=5e-4,
help="weight decay")
parser.add_argument('--alpha', type=float, default=0.2,
help="the negative slop of leaky relu")
parser.add_argument('--early-stop', action='store_true', default=False,
help="indicates whether to use early stop or not")
args = parser.parse_args()
print(args)
main(args)
| true |
347b521625574100e063aca18b5a43ef52839697 | Python | TDK211299/machine-learning-aug-2019 | /KNN & Face Recog/face_detect.py | UTF-8 | 886 | 2.671875 | 3 | [] | no_license | import cv2
import numpy as np
camera = cv2.VideoCapture(0)
face_detector = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
BASE_DIR = "./data/"
name = input("Enter your name : ")
faces_data = []
cnt = 0
while True:
ret,img = camera.read()
if ret==False:
continue
faces= face_detector.detectMultiScale(img,1.3,5)
if(len(faces)==0):
print("0 face detected")
continue
x,y,w,h = faces[0]
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),5)
cropped_face = img[y:y+h,x:x+w]
cropped_face = cv2.resize(cropped_face,(100,100))
cv2.imshow("Title",img)
cv2.imshow("Cropped Face",cropped_face)
key = cv2.waitKey(1) & 0xFF
if key==ord('q'):
break
cnt += 1
if cnt%10==0:
faces_data.append(cropped_face)
print("Saving pic ",(cnt/10))
camera.release()
cv2.destroyAllWindows()
faces_data = np.asarray(faces_data)
np.save(BASE_DIR+name+".npy",faces_data)
| true |
81d8369328dfbd3c7e769e59a571b45532c8461a | Python | AoiKuiyuyou/AoikPourTable | /src/aoikpourtable/count_io.py | UTF-8 | 3,211 | 2.703125 | 3 | [] | no_license | # coding: utf-8
#
from __future__ import absolute_import
from datetime import datetime
import sys
from .uri_util import uri_get_path
from .uri_util import uri_query_to_args
#
IS_PY2 = (sys.version_info[0] == 2)
#
def count_lines(uri, query, args, cmd_args):
"""
Count factory that counts lines of a file.
@param uri: Input URI.
@param query: Input query.
@param args: Input arguments string.
@param cmd_args: Command arguments dict.
@return: Count info dict, in the format:
{
'count': ...,
'duration': ...,
'rate': ...,
}
"""
# Get starting row ordinal
start_row_ordinal = cmd_args['start_row_ordinal']
# Get ending row ordinal
end_row_ordinal = cmd_args['end_row_ordinal']
# Parse query to arguments dict
args_dict = uri_query_to_args(args, flatten=True)
# Get input file encoding
encoding = args_dict.pop('encoding', 'utf-8')
# If input URI is "-"
if uri == '-':
# Use stdin as input file
input_file = sys.stdin
else:
# Get input file path from input URI
input_file_path = uri_get_path(uri)
# Open input file
if IS_PY2:
input_file = open(input_file_path, mode='r')
else:
input_file = open(input_file_path, mode='r', encoding=encoding)
# Count duration
count_dura = None
# Count rate
count_rate = None
# If input file is stdin
if input_file is sys.stdin:
# Stdin data can only be read once.
# We can not count in this case.
# Set count to None.
count = None
# If input file is not stdin
else:
# Get starting time
count_start_time = datetime.utcnow()
# Count value
count = 0
# Get ending row ordinal inclusive
if end_row_ordinal:
end_row_ordinal_inclusive = end_row_ordinal - 1
else:
end_row_ordinal_inclusive = None
# For each line in input file
for line in input_file:
# Increment count
count += 1
# If count reaches ending row ordinal
if end_row_ordinal_inclusive \
and count >= end_row_ordinal_inclusive:
# Stop counting
break
# Set seek pointer to file beginning
input_file.seek(0)
# Get ending time
count_end_time = datetime.utcnow()
# Get duration
count_dura = \
(count_end_time - count_start_time).total_seconds()
# If duration is not zero
if count_dura:
# Get rate
count_rate = count / count_dura
else:
# Set rate to None
count_rate = None
# If staring row ordinal is not None
if start_row_ordinal:
# If count is not None
if count is not None:
# Deduct from count
count -= start_row_ordinal - 1
# If count is LT 0
if count < 0:
# Set count to 0
count = 0
# Return count info dict
return {
'count': count,
'duration': count_dura,
'rate': count_rate,
}
| true |
b6c4ebb23c110c861cd7f663711ef62518497aed | Python | hramos21/MTH-497I | /MTH497Master/assay.py | UTF-8 | 3,266 | 3.015625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 12 10:03:27 2020
@author: hecto
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#from phase import phase1
def singleWell(location):
print('Here are the graphs for - '+location)
#reads excel file and plots graphs according to columns
x=pd.read_excel(location, usecols="B")
g=pd.read_excel(location, usecols="C")
#plots single well
plt.title('Graph for Single Well - '+location)
plt.xlabel('Cycle number')
plt.ylabel('Concentration')
plt.plot(x,g)
plt.show()
return
def allWells(location):
#reads columns C throught CT from excel file which is all wells in data
x=pd.read_excel(location, usecols="B")
g2=pd.read_excel(location, usecols="C:CT")
plt.title('Graph for all wells - '+location)
plt.xlabel('Cycle number')
plt.ylabel('Concentration')
plt.plot(x,g2)
plt.show()
return
def groupWells(location):
x=pd.read_excel(location, usecols="B")
#seperates y values into the 4 different groups
y1=pd.read_excel(location, usecols="C:Z")
y2=pd.read_excel(location, usecols="AA:AX")
y3=pd.read_excel(location, usecols="AY:BV")
y4=pd.read_excel(location, usecols="BW:CT")
plt.xlabel('Cycle number')
plt.ylabel('Concentration')
plt.title('Graph for 4 groups - '+location)
plt.plot(x,y1,color='red')#labels groups according to color
plt.plot(x,y2,color='green')
plt.plot(x,y3,color='yellow')
plt.plot(x,y4,color='blue')
#plt.xlim(0,100)
#plt.ylim(2000,4000)
plt.show()
return
def concenWells(location):
x=pd.read_excel(location, usecols="B")
g2=pd.read_excel(location, usecols="C:CT")
nar = g2.to_numpy() #convert g2 to numpy array
#make an array of colors to play with
colors = ["red", "yellow", "blue", "green", "orange", "purple", "pink", "teal", "goldenrod"]
plt.title('Graph for 8 concentrations - '+location)
plt.xlabel('Cycle number')
plt.ylabel('Concentration')
for k in range(8):
#indices of each column for a given concentration
indices = [k*3+0, k*3+1, k*3+2, k*3+24, k*3+25, k*3+26, k*3+48,
k*3+49, k*3+50, k*3+72, k*3+73, k*3+74]
plt.plot(x, nar[:, indices], color = colors[k] ) #plot each concentration
plt.show()
return
def tripWells(location):
x=pd.read_excel(location, usecols="B")
g2=pd.read_excel(location, usecols="C:CT")
nar = g2.to_numpy()
#make an empty array of wells
wells = np.zeros((nar.shape[0], int(nar.shape[1]/3)))
#to graph averaged triplicates as one well
#start at 0, go to the last column, skip by 3s
for j in range(0, nar.shape[1], 3):
triplicate = nar[:, j: j + 3] #get all the rows in a slice of 3 cols
means = np.mean(triplicate, axis=1) #find the mean of all the rows
meansupright = np.vstack(means) # shift the horizontal array into vert
#take our means and stick it in our empty array
wells[:, int(j/3): int(j/3) + 1] = meansupright
plt.title('Graph for Triplicates - '+location)
plt.xlabel('Cycle number')
plt.ylabel('Concentration')
plt.plot(x, wells)
plt.show()
return | true |
f99c73b8106945c738850b9444caa79acf7c9864 | Python | estherica/wonderland | /modules/main_code.py | UTF-8 | 195 | 2.578125 | 3 | [] | no_license | from random import randint
from time import sleep
from modules.defim import menu,calculating,dogs_age
menu()
sleep(3)
print("wow!\n\n")
calculating(3,6)
dogs_age(int(input("Enter dog's age")))
| true |
3776956c360cae1663f00ef342a12995d3f27683 | Python | christopher-roy29/KMeansClustering-ImageDenoising | /task1.py | UTF-8 | 3,232 | 3.265625 | 3 | [] | no_license |
import utils
import numpy as np
import json
import time
def kmeans(img,k):
"""
Implement kmeans clustering on the given image.
Steps:
(1) Random initialize the centers.
(2) Calculate distances and update centers, stop when centers do not change.
(3) Iterate all initializations and return the best result.
Arg: Input image;
Number of K.
Return: Clustering center values;
Clustering labels of all pixels;
Minimum summation of distance between each pixel and its center.
"""
# TODO: implement this function.
img = np.array(img)
x_img,y_img = img.shape
dist_list=np.array([]);centroid_list = np.array([]); xl = []
counts, bins = np.histogram(img, range(256))
# print(counts)
x = counts.argsort()[-100:][::-1] #gets the pixel values with highest number of counts i.e it contains global maxima and other local maximas
for c, elem in enumerate(x):
for i in range(c+1,x.size):
xl.append([elem,x[i]]) # creates combination of pixel centers
# print(xl)
for centroid in xl:
centroid = np.array(centroid)
if centroid[0] == centroid[1]:
continue;
while (True):
dist = np.sqrt((img.reshape(x_img * y_img) - centroid[:, np.newaxis]) ** 2)
nearest_centroid = np.argmin(dist, axis=0).reshape((x_img, y_img))
new_centroid = np.array([img[nearest_centroid == k].mean(axis=0) for k in range(centroid.shape[0])]) # updating the center
if (new_centroid.all() == centroid.all()):
break;
centroid = new_centroid
min_dist = np.array(dist.min(axis=0), dtype='float64')
# print(min_dist.sum(axis=0))
dist_list = np.append(dist_list, min_dist.sum(axis=0))
centroid_list = np.append(centroid_list,centroid)
min_index = np.argmin(dist_list)
centroid_list = centroid_list.reshape(-1,2)
centroid = centroid_list[min_index,:].astype(int)
dist = np.sqrt((img.reshape(x_img * y_img) - centroid[:, np.newaxis]) ** 2)
nearest_centroid = np.argmin(dist, axis=0).reshape((x_img, y_img))
return centroid.tolist(), nearest_centroid, int(dist_list[min_index])
def visualize(centers,labels):
"""
Convert the image to segmentation map replacing each pixel value with its center.
Arg: Clustering center values;
Clustering labels of all pixels.
Return: Segmentation map.
"""
# TODO: implement this function.
labels[labels == 0] = centers[0];
labels[labels == 1] = centers[1]; #assigning centroid pixel values
return labels.astype(np.uint8);
if __name__ == "__main__":
img = utils.read_image('lenna.png')
k = 2
start_time = time.time()
centers, labels, sumdistance = kmeans(img,k)
result = visualize(centers, labels)
end_time = time.time()
running_time = end_time - start_time
print(running_time)
centers = list(centers)
with open('results/task1.json', "w") as jsonFile:
jsonFile.write(json.dumps({"centers":centers, "distance":sumdistance, "time":running_time}))
utils.write_image(result, 'results/task1_result.jpg')
| true |
9f5313fc45085efacbaeef66e199fc3a893509c6 | Python | asolwa/solawa_rudnicki | /anro5/scripts/jcmd.py | UTF-8 | 708 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python
import sys
import rospy
from anro4.srv import Interpol
def interpolate(j1, j2, j3, t):
rospy.wait_for_service('Interpol_control')
try:
int_srv = rospy.ServiceProxy('Interpol_control', Interpol)
resp = int_srv(j1, j2, j3, t)
print(resp)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
def usage():
return "%s [j1.pos j2.pos j3.pos time]"%sys.argv[0]
if __name__ == "__main__":
if len(sys.argv) == 5:
j1 = float(sys.argv[1])
j2 = float(sys.argv[2])
j3 = float(sys.argv[3])
t = float(sys.argv[4])
else:
print usage()
sys.exit(1)
interpolate(j1, j2, j3, t)
| true |
b26e340cc980d253b242c1a42a4d5e4100a1fd3f | Python | jesusa2624/PYTHON-BASCIO | /tuplas.py | UTF-8 | 543 | 4 | 4 | [] | no_license | #declarar tupla
mi_tupla = ()
mi_tupla = (1,2,3)
#generar una tupla de 1 solo valor (Obligatorio la ,)
mi_tupla = (1,)
#acceder a un indice de la tupla
mi_tupla = (1,2,3)
mi_tupla[0] #1
mi_tupla[1] #2
mi_tupla[2] #3
#reasignar una tupla
mi_tupla = (1,2,3)
mi_otra_tupla = (4,5,6)
mi_tupla =+ mi_otra_tupla
#metodos de las tuplas
mi_tupla = (1,1,1,2,2,3)
mi_tupla.count(1) #3 el numero 1 aparece 3 veces en la tupla
mi_tupla.index(3) #5 indice de la primera instancia donde se encuentra un elemento
mi_tupla.index(1) #0
mi_tupla.index(2) #3 | true |
61b7b822b00f09e112bf48aa61bbfd852086778d | Python | shangrex/Novel_Recommend_System | /src/script/run_poet_cnt_spa.py | UTF-8 | 1,391 | 2.640625 | 3 | [] | no_license | '''
Use Spacy (Word Embedding) to Recommend Poet
'''
import spacy
from spacy.lang.zh.examples import sentences
from sklearn.metrics.pairwise import cosine_similarity
import argparse
import pandas as pd
import numpy as np
import pickle
from tqdm import tqdm
nlp = spacy.load("zh_core_web_lg")
nlp.enable_pipe("senter")
parser = argparse.ArgumentParser(description='fill arguement')
parser.add_argument('--txt', type=str, required=True,
help='the word for searching')
parser.add_argument('--topk', type=int, required=False,
default=100,
help='# of results')
args = parser.parse_args()
topk = args.topk
f = open(f'data/pretrain/spa_embedding.pkl', 'rb')
spa_emb = pickle.load(f)
poet = pd.read_csv('data/poet.csv')
doc = nlp(args.txt)
rst = []
for i in tqdm(range(len(spa_emb))):
# print(i[3].dtype)
# print(doc.vector.dtype)
ftmp = cosine_similarity([spa_emb[i][3]], [doc.vector])
rst.append([ftmp[0][0], poet['title'].iloc[i], poet['author'].iloc[i], poet['paragraphs'].iloc[i]])
rst = sorted(rst, key=lambda i : i[0], reverse=True)
cnt_rst = {}
for i in range(topk):
if rst[i][1] in cnt_rst:
cnt_rst[rst[i][1]+rst[i][2]] += 1
else:
cnt_rst[rst[i][1]+rst[i][2]] = 1
cnt_rst = sorted(cnt_rst.items(), key=lambda i: i[1], reverse=True)
for i in cnt_rst:
print(i) | true |
14221f1172aa447cf0f0259c9707f4dca1eeb1f4 | Python | husigntospeech/SeniorProjectSignLanguage | /SignLanguageToSpeechServer/backend/server.py | UTF-8 | 2,834 | 2.953125 | 3 | [] | no_license | import base64
import logging
import os
import uuid
import shutil
from open_cv_handler import OpenCVHandler
from lib.websocket_server import WebsocketServer
TEMP_FOLDER_PATH = 'temp'
def on_client_message(client, server, message):
print 'Got message.'
# If the message has a space in the 2nd position then the server will
# interpret the message as a request to move the image with the given
# uid to the appropriate image folder.
# Otherwise, the server will treat the message a Base64 encoded string and
# will proceed to try to decode it.
if message[1] != ' ':
# Process image data.
decoded_image_string = base64.decodestring(message)
# Give each image that comes in a unique universal id because each
# client gets serviced by the server in a SEPARATE thread.
uid = uuid.uuid4().hex
image_path = '%s/image_%s' % (TEMP_FOLDER_PATH, uid)
# Write image to server.
write_image_to_server(decoded_image_string, image_path)
print 'Getting text translation.'
cv_handler = OpenCVHandler()
# Get translation from OpenCV then play text audio
text_trans = cv_handler.get_text_translation_from_image(image_path, uid)
cv_handler.play_audio_translation_from_text(text_trans)
print 'Sending back translation.'
server.send_message(client, '%s %s' % (text_trans, uid))
# Server will clean up after itself and clean up after the
# OpenCV handler.
remove_image_from_server(image_path)
else:
corrected_translation = message[0]
uid = message[2:] # image uid
if corrected_translation != '!':
moveImageToCorrectLocation(corrected_translation, uid)
else:
victim_path = '%s/cropped_image_%s.jpg' % (TEMP_FOLDER_PATH, uid)
remove_image_from_server(victim_path)
print 'Done.\n\n'
def write_image_to_server(decoded_image_string, image_path):
print 'Writing image to server.'
f = open(image_path, 'wb')
f.write(decoded_image_string)
f.close()
def remove_image_from_server(image_path):
print 'Removing written image from server.'
os.remove(image_path)
def moveImageToCorrectLocation(translation, uid):
print 'Moving cropped image to correct location.'
image_name = 'cropped_image_%s.jpg' % (uid)
new_folder = 'letters/sign_%s' % (translation)
image_path = '%s/%s' % (TEMP_FOLDER_PATH, image_name)
new_path = '%s/%s' % (new_folder, image_name)
shutil.move(image_path, new_path)
def main():
ip = raw_input('Enter ip address: ')
print 'Starting the server.'
server = WebsocketServer(8080, host=ip, loglevel=logging.INFO)
server.set_fn_message_received(on_client_message)
server.run_forever()
if __name__ == '__main__':
main()
| true |
973f9090c60444d4fc18a1126812e933f2578eb3 | Python | jpieper/legtool | /legtool/gait/leg_ik.py | UTF-8 | 7,931 | 2.84375 | 3 | [
"Apache-2.0"
] | permissive | # Copyright 2014 Josh Pieper, jjp@pobox.com.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Inverse kinematics for 3dof legs.'''
import math
from ..tf.tf import Point3D
class Configuration(object):
coxa_min_deg = None
coxa_idle_deg = None
coxa_max_deg = None
coxa_length_mm = None
coxa_sign = 1
coxa_ident = None
femur_min_deg = None
femur_idle_deg = None
femur_max_deg = None
femur_length_mm = None
femur_sign = 1
femur_ident = None
tibia_min_deg = None
tibia_idle_deg = None
tibia_max_deg = None
tibia_length_mm = None
tibia_sign = 1
tibia_ident = None
servo_speed_dps = 360.0
@staticmethod
def get_attributes():
return [key for key, value in Configuration.__dict__.iteritems()
if (not key.startswith('__') and
not callable(value) and
not isinstance(value, staticmethod))]
def write_settings(self, config, group_name):
config.add_section(group_name)
for x in Configuration.get_attributes():
config.set(group_name, x, getattr(self, x))
@staticmethod
def read_settings(config, group_name):
result = Configuration()
for x in Configuration.get_attributes():
if config.has_option(group_name, x):
if x.endswith('sign') or x.endswith('ident'):
value = config.getint(group_name, x)
else:
value = config.getfloat(group_name, x)
setattr(result, x, value)
return result
class JointAngles(object):
config = None
coxa_deg = None # positive is rotating clockwise viewed from top
femur_deg = None # positive is rotating upward
tibia_deg = None # positive is rotating upward
def command_dict(self):
'''Return a dictionary mapping servo identifiers to commands
in degrees. This is the same format as the servo_controller
module uses.'''
return { self.config.coxa_ident : self.coxa_deg,
self.config.femur_ident : self.femur_deg,
self.config.tibia_ident : self.tibia_deg }
def lizard_3dof_ik(point_mm, config):
'''Given a target end position in 3D coordinate space, return the
required joint angles for a 3 degree of freedom lizard style
leg.
+y is away from the shoulder
+x is clockwise from shoulder
+z is up
If no solution is possible, return None.
'''
# Solve for the coxa first, as it has only a single solution.
coxa_deg = (config.coxa_sign *
math.degrees(math.atan2(point_mm.x, point_mm.y)) +
config.coxa_idle_deg)
if (coxa_deg < config.coxa_min_deg or
coxa_deg > config.coxa_max_deg):
return None
# x-coordinate of femur/tibia pair after rotating to 0 coxa
true_x = (math.sqrt(point_mm.x ** 2 + point_mm.y ** 2) -
config.coxa_length_mm)
im = math.sqrt(point_mm.z ** 2 + true_x ** 2)
# The new femur/tibia pair makes a triangle where the 3rd side is
# the hypotenuse of the right triangle composed of z and im, lets
# call it c.
#
# --\ femur
# |\ --\
# | \ --\
# | -- |
# z| im\ | tibia
# | --\|
# ----------
# true_x
#
# im = math.sqrt(z ** 2 + true_x ** 2)
#
# Then, we can use the law of cosines to find the angle opposite
# im, which is the angle between the femur and tibia.
#
# im ** 2 = a ** 2 + b ** 2 + 2 * a * b * cos(C)
#
# Solving for C yields:
#
# C = acos((im ** 2 - a ** 2 - b ** 2) / (2 * a * b))
tibia_cos = ((im ** 2 -
config.tibia_length_mm ** 2 -
config.femur_length_mm ** 2) /
(2 * config.tibia_length_mm * config.femur_length_mm))
if tibia_cos < -1.0 or tibia_cos > 1.0:
return None
# For our purposes, a 0 tibia angle should equate to a right angle
# with the femur, so subtract off 90 degrees.
tibia_deg = (config.tibia_sign *
math.degrees(0.5 * math.pi - math.acos(tibia_cos)) +
config.tibia_idle_deg)
if (tibia_deg < config.tibia_min_deg or
tibia_deg > config.tibia_max_deg):
return None
# To solve for the femur angle, we first get the angle opposite
# true_x, then the angle opposite the tibia.
true_x_deg = math.degrees(math.atan2(true_x, -point_mm.z))
# Then the angle opposite the tibia is also found the via the law
# of cosines.
#
# tibia ** 2 = femur ** 2 + im ** 2 + 2 * femur * im * cos(femur_im)
#
# femur_im = acos ( (tibia ** 2 - im ** 2 - femur ** 2) /
# (2 * femur * im) )
femur_im_cos = -(config.tibia_length_mm ** 2 -
config.femur_length_mm ** 2 -
im ** 2) / (2 * config.femur_length_mm * im)
if femur_im_cos < -1.0 or femur_im_cos > 1.0:
return None
femur_im_deg = math.degrees(math.acos(femur_im_cos))
femur_deg = (config.femur_sign * ((femur_im_deg + true_x_deg) - 90.0) +
config.femur_idle_deg)
if (femur_deg < config.femur_min_deg or
femur_deg > config.femur_max_deg):
return None
result = JointAngles()
result.config = config
result.coxa_deg = coxa_deg
result.femur_deg = femur_deg
result.tibia_deg = tibia_deg
return result
class LizardIk(object):
def __init__(self, config):
self.config = config
def do_ik(self, point_mm):
return lizard_3dof_ik(point_mm, self.config)
def worst_case_speed_mm_s(self, point_mm, direction_mm=None):
'''Return the worst case linear velocity the end effector can
achieve in the given orientation.'''
step = 0.01
nominal = self.do_ik(point_mm)
if nominal is None:
return None
servo_step = step * self.config.servo_speed_dps
result = None
def update(result, advanced_servo_deg, nominal_servo_deg):
if advanced_servo_deg == nominal_servo_deg:
return
this_speed = (servo_step /
abs(advanced_servo_deg - nominal_servo_deg))
if result is None or this_speed < result:
result = this_speed
return result
if direction_mm:
normalized = direction_mm.scaled(1.0 / direction_mm.length())
consider = [normalized.scaled(step)]
else:
consider = [Point3D(*val) for val in
(step, 0., 0.), (0., step, 0.), (0., 0., step)]
for advance in consider:
advanced = self.do_ik(point_mm + advance)
if advanced is None:
return None
result = update(result, advanced.coxa_deg, nominal.coxa_deg)
result = update(result, advanced.femur_deg, nominal.femur_deg)
result = update(result, advanced.tibia_deg, nominal.tibia_deg)
return result
def servo_speed_dps(self):
return self.config.servo_speed_dps
def largest_change_deg(self, result1, result2):
return max(abs(result1.coxa_deg - result2.coxa_deg),
abs(result1.femur_deg - result2.femur_deg),
abs(result1.tibia_deg - result2.tibia_deg))
| true |
eb863025f9446b826849a0ac7ad25b7a6c58ae9c | Python | shub-kris/coursework | /Probabilistic Machine Learning/Assignment_03/game.py | UTF-8 | 4,592 | 3.140625 | 3 | [] | no_license | import numpy as np
import random
from board import Board, cls
from MC_agent import MCAgent
from human_agent import HumanAgent
from random_agent import RandomAgent
import matplotlib.pyplot as plt
import time
# for usage with jupyter notebook
from IPython.display import clear_output
class Game:
def __init__(self, size, ships, nb_samples=1000, player1="human", player2="random"):
self.board_player1 = Board(size)
self.board_player2 = Board(size)
self.size = size
self.ships = ships
if player1 == "human":
self.player1 = HumanAgent()
elif player1 == "MC":
self.player1 = MCAgent(ships=ships, size=size, nb_samples=nb_samples)
elif player1 == "MC2":
self.player1 = MCAgent(ships=ships, size=size, nb_samples=nb_samples)
else:
self.player1 = RandomAgent(size=size)
if player2 == "human":
self.player2 = HumanAgent()
elif player2 == "MC":
self.player2 = MCAgent(ships=ships.copy(), size=size, nb_samples=nb_samples)
elif player2 == "MC2":
self.player2 = MCAgent(ships=ships.copy(), size=size, nb_samples=nb_samples)
else:
self.player2 = RandomAgent(size=size)
def print_gamestate(self):
# clear output before the next move is printed.
# cls()
# clear_output(wait=True)
show = np.empty((self.size + 1, 2 * (self.size + 1) + 3), dtype=str)
show_player2 = self.board_player2.show_board()
show_player1 = self.board_player1.show_board()
show[0 : self.size + 1, 0 : self.size + 1] = show_player2
show[0 : self.size + 1, self.size + 4 : 2 * (self.size + 1) + 3] = show_player1
print("")
print("Player1's observations" + " " * 3 + "Player2's observations")
print("")
for line in show:
print(*line)
def initialize_game(self):
if isinstance(self.player1, HumanAgent):
self.board_player1.manual_initialization(self.ships)
else:
self.board_player1.random_initialization(self.ships)
if isinstance(self.player2, HumanAgent):
self.board_player2.manual_initialization(self.ships)
else:
self.board_player2.random_initialization(self.ships)
def one_turn(self):
print("Enter the coordinates of your next observation:")
while True:
try:
i, j, scores1 = self.player1.select_observation()
observation, sunken_ship = self.board_player2.observe(i, j)
if not sunken_ship is None:
i1, j1, l, h = sunken_ship
print(
"Player 1 player has sunk ship at ("
+ str(i1)
+ ","
+ str(j1)
+ ") with length "
+ str(l)
+ "!"
)
break
except:
print("Player 1 - Invalid observation. Try again.")
self.player1.update_observations(i, j, observation, sunken_ship)
while True:
try:
# handles the case i or j are empty
i, j, scores2 = self.player2.select_observation()
observation, sunken_ship = self.board_player1.observe(i, j)
if not sunken_ship is None:
i2, j2, l, h = sunken_ship
print(
"Player 2 has sunk ship at ("
+ str(i2)
+ ","
+ str(j2)
+ ") with length "
+ str(l)
+ "!"
)
break
except:
print("Player 2- Invalid observation. Try again.")
self.player2.update_observations(i, j, observation, sunken_ship)
clear_output(wait=True)
self.print_gamestate()
cls()
def game_over(self):
if self.board_player2.ships == []:
if self.board_player1.ships == []:
print("Game over! It's a draw!")
self.winner = None
return True
else:
print("Game over! Player 1 won!")
self.winner = True
return True
elif self.board_player1.ships == []:
print("Game over! Player 2 won!")
self.winner = False
return True
return False
| true |
5eecb80fd9a5c2bed77de5e351f2a8f8e0246100 | Python | juneadkhan/InterviewPractice | /validPallindrome.py | UTF-8 | 385 | 3.9375 | 4 | [] | no_license | """
Given a string s, determine if it is a palindrome, considering only alphanumeric characters and ignoring cases.
Input: s = "A man, a plan, a canal: Panama"
Output: true
Explanation: "amanaplanacanalpanama" is a palindrome.
"""
# O(n) Time, O(n) Space
def isPalindrome(s: str) -> bool:
string = ''.join([x.lower() for x in s if x.isalnum()])
return string == string[::-1] | true |